From 3db1e8157ba7d83cb745e12ed27871d83ed045ce Mon Sep 17 00:00:00 2001 From: Jianping Liu Date: Tue, 27 Aug 2024 17:06:46 +0800 Subject: [PATCH] drivers/thirdparty: put release-drivers in tree Only the size of mlnx driver tgz is very big, other drivers source size is not very big. So, remove release-drivers submoule (sub git repo). Signed-off-by: Jianping Liu Reviewed-by: Yongliang Gao --- .gitmodules | 3 - dist/sources/download-and-copy-drivers.sh | 23 +- dist/sources/release-drivers.tgz | Bin 121 -> 0 bytes dist/templates/kernel.template.spec | 14 +- drivers/thirdparty/copy-drivers.sh | 26 +- drivers/thirdparty/release-drivers | 1 - .../thirdparty/release-drivers/bnxt/COPYING | 339 + .../thirdparty/release-drivers/bnxt/ChangeLog | 22467 +++++++ .../thirdparty/release-drivers/bnxt/MANIFEST | 262 + .../thirdparty/release-drivers/bnxt/Makefile | 1558 + .../release-drivers/bnxt/README.TXT | 1997 + .../thirdparty/release-drivers/bnxt/bnxt.c | 20609 ++++++ .../thirdparty/release-drivers/bnxt/bnxt.h | 3737 ++ .../release-drivers/bnxt/bnxt_auxbus_compat.c | 178 + .../release-drivers/bnxt/bnxt_auxbus_compat.h | 111 + .../release-drivers/bnxt/bnxt_compat.h | 2835 + .../bnxt/bnxt_compat_link_modes.c | 149 + .../bnxt/bnxt_compat_link_modes.h | 208 + .../release-drivers/bnxt/bnxt_coredump.c | 732 + .../release-drivers/bnxt/bnxt_coredump.h | 159 + .../release-drivers/bnxt/bnxt_dbr.h | 119 + .../release-drivers/bnxt/bnxt_dcb.c | 938 + .../release-drivers/bnxt/bnxt_dcb.h | 186 + .../release-drivers/bnxt/bnxt_debugfs.c | 603 + .../release-drivers/bnxt/bnxt_debugfs.h | 31 + .../release-drivers/bnxt/bnxt_debugfs_cpt.c | 476 + .../release-drivers/bnxt/bnxt_devlink.c | 1476 + .../release-drivers/bnxt/bnxt_devlink.h | 133 + .../bnxt/bnxt_devlink_compat.h | 101 + .../release-drivers/bnxt/bnxt_dim.c | 68 + .../release-drivers/bnxt/bnxt_dim.h | 354 + .../release-drivers/bnxt/bnxt_ethtool.c | 6292 ++ .../release-drivers/bnxt/bnxt_ethtool.h | 95 + .../bnxt/bnxt_ethtool_compat.c | 458 + .../release-drivers/bnxt/bnxt_extra_ver.h | 18 + .../release-drivers/bnxt/bnxt_fw_hdr.h | 120 + .../release-drivers/bnxt/bnxt_hdbr.c | 588 + .../release-drivers/bnxt/bnxt_hdbr.h | 141 + .../release-drivers/bnxt/bnxt_hsi.h | 21048 ++++++ .../release-drivers/bnxt/bnxt_hwmon.c | 245 + .../release-drivers/bnxt/bnxt_hwmon.h | 30 + .../release-drivers/bnxt/bnxt_hwrm.c | 836 + .../release-drivers/bnxt/bnxt_hwrm.h | 157 + .../release-drivers/bnxt/bnxt_ktls.c | 1329 + .../release-drivers/bnxt/bnxt_ktls.h | 267 + .../release-drivers/bnxt/bnxt_lfc.c | 806 + .../release-drivers/bnxt/bnxt_lfc.h | 98 + .../release-drivers/bnxt/bnxt_lfc_ioctl.h | 111 + .../release-drivers/bnxt/bnxt_log.c | 570 + .../release-drivers/bnxt/bnxt_log.h | 55 + .../release-drivers/bnxt/bnxt_log_data.c | 84 + .../release-drivers/bnxt/bnxt_log_data.h | 17 + .../release-drivers/bnxt/bnxt_mpc.c | 543 + .../release-drivers/bnxt/bnxt_mpc.h | 143 + .../release-drivers/bnxt/bnxt_netmap_linux.h | 985 + .../release-drivers/bnxt/bnxt_nic_flow.c | 313 + .../release-drivers/bnxt/bnxt_nic_flow.h | 18 + .../release-drivers/bnxt/bnxt_nvm_defs.h | 73 + .../release-drivers/bnxt/bnxt_ptp.c | 1524 + .../release-drivers/bnxt/bnxt_ptp.h | 201 + .../release-drivers/bnxt/bnxt_sriov.c | 2125 + .../release-drivers/bnxt/bnxt_sriov.h | 82 + .../release-drivers/bnxt/bnxt_sriov_sysfs.c | 266 + .../release-drivers/bnxt/bnxt_sriov_sysfs.h | 20 + .../thirdparty/release-drivers/bnxt/bnxt_tc.c | 3904 ++ .../thirdparty/release-drivers/bnxt/bnxt_tc.h | 384 + .../release-drivers/bnxt/bnxt_tc_compat.h | 310 + .../release-drivers/bnxt/bnxt_tfc.c | 267 + .../release-drivers/bnxt/bnxt_tfc.h | 150 + .../release-drivers/bnxt/bnxt_udcc.c | 1131 + .../release-drivers/bnxt/bnxt_udcc.h | 84 + .../release-drivers/bnxt/bnxt_ulp.c | 646 + .../release-drivers/bnxt/bnxt_ulp.h | 162 + .../release-drivers/bnxt/bnxt_vfr.c | 1394 + .../release-drivers/bnxt/bnxt_vfr.h | 266 + .../release-drivers/bnxt/bnxt_xdp.c | 659 + .../release-drivers/bnxt/bnxt_xdp.h | 57 + .../release-drivers/bnxt/bnxt_xsk.c | 490 + .../release-drivers/bnxt/bnxt_xsk.h | 21 + .../release-drivers/bnxt/find_src.awk | 37 + .../release-drivers/bnxt/hcapi/bitalloc.c | 258 + .../release-drivers/bnxt/hcapi/bitalloc.h | 57 + .../bnxt/hcapi/cfa/cfa_p40_hw.h | 652 + .../bnxt/hcapi/cfa/cfa_p58_hw.h | 1236 + .../bnxt/hcapi/cfa/hcapi_cfa.h | 89 + .../bnxt/hcapi/cfa/hcapi_cfa_defs.h | 794 + .../bnxt/hcapi/cfa/hcapi_cfa_p4.c | 137 + .../bnxt/hcapi/cfa/hcapi_cfa_p4.h | 452 + .../bnxt/hcapi/cfa/hcapi_cfa_p58.c | 116 + .../bnxt/hcapi/cfa/hcapi_cfa_p58.h | 411 + .../bnxt/hcapi/cfa_v3/include/cfa_resources.h | 180 + .../bnxt/hcapi/cfa_v3/include/cfa_types.h | 107 + .../bnxt/hcapi/cfa_v3/include/cfa_util.h | 19 + .../bnxt/hcapi/cfa_v3/include/sys_util.h | 57 + .../bnxt/hcapi/cfa_v3/mm/cfa_mm.c | 673 + .../bnxt/hcapi/cfa_v3/mm/include/cfa_mm.h | 156 + .../bnxt/hcapi/cfa_v3/mm/include/sys_util.h | 13 + .../bnxt/hcapi/cfa_v3/mpc/cfa_bld_mpc.c | 33 + .../cfa_v3/mpc/cfa_bld_p70_host_mpc_wrapper.c | 1110 + .../bnxt/hcapi/cfa_v3/mpc/cfa_bld_p70_mpc.c | 883 + .../hcapi/cfa_v3/mpc/cfa_bld_p70_mpcops.c | 43 + .../hcapi/cfa_v3/mpc/include/cfa_bld_defs.h | 399 + .../mpc/include/cfa_bld_mpc_field_ids.h | 1268 + .../hcapi/cfa_v3/mpc/include/cfa_bld_mpcops.h | 589 + .../include/cfa_bld_p70_host_mpc_wrapper.h | 75 + .../cfa_v3/mpc/include/cfa_bld_p70_mpc.h | 610 + .../cfa_v3/mpc/include/cfa_bld_p70_mpc_defs.h | 373 + .../cfa_v3/mpc/include/cfa_bld_p70_mpcops.h | 14 + .../cfa_v3/mpc/include/cfa_p70_mpc_cmds.h | 1528 + .../cfa_v3/mpc/include/cfa_p70_mpc_cmpls.h | 3294 + .../cfa_v3/mpc/include/cfa_p70_mpc_common.h | 75 + .../mpc/include/cfa_p70_mpc_field_ids.h | 1170 + .../mpc/include/cfa_p70_mpc_field_mapping.h | 768 + .../bnxt/hcapi/cfa_v3/tim/cfa_tim.c | 114 + .../bnxt/hcapi/cfa_v3/tim/include/cfa_tim.h | 191 + .../bnxt/hcapi/cfa_v3/tpm/cfa_tpm.c | 256 + .../bnxt/hcapi/cfa_v3/tpm/include/cfa_tpm.h | 234 + .../bnxt/tf_core/cfa_resource_types.h | 265 + .../bnxt/tf_core/cfa_tcam_mgr.c | 1855 + .../bnxt/tf_core/cfa_tcam_mgr.h | 297 + .../bnxt/tf_core/cfa_tcam_mgr_device.h | 122 + .../bnxt/tf_core/cfa_tcam_mgr_hwop_msg.c | 227 + .../bnxt/tf_core/cfa_tcam_mgr_hwop_msg.h | 25 + .../bnxt/tf_core/cfa_tcam_mgr_p4.c | 857 + .../bnxt/tf_core/cfa_tcam_mgr_p4.h | 16 + .../bnxt/tf_core/cfa_tcam_mgr_p58.c | 858 + .../bnxt/tf_core/cfa_tcam_mgr_p58.h | 16 + .../release-drivers/bnxt/tf_core/dpool.c | 596 + .../release-drivers/bnxt/tf_core/dpool.h | 248 + .../release-drivers/bnxt/tf_core/rand.c | 44 + .../release-drivers/bnxt/tf_core/rand.h | 25 + .../release-drivers/bnxt/tf_core/tf_core.c | 1600 + .../release-drivers/bnxt/tf_core/tf_core.h | 1598 + .../release-drivers/bnxt/tf_core/tf_device.c | 567 + .../release-drivers/bnxt/tf_core/tf_device.h | 881 + .../bnxt/tf_core/tf_device_p4.c | 455 + .../bnxt/tf_core/tf_device_p4.h | 190 + .../bnxt/tf_core/tf_device_p58.c | 787 + .../bnxt/tf_core/tf_device_p58.h | 193 + .../release-drivers/bnxt/tf_core/tf_em.h | 233 + .../bnxt/tf_core/tf_em_hash_internal.c | 180 + .../bnxt/tf_core/tf_em_internal.c | 365 + .../bnxt/tf_core/tf_ext_flow_handle.h | 185 + .../bnxt/tf_core/tf_global_cfg.c | 181 + .../bnxt/tf_core/tf_global_cfg.h | 104 + .../bnxt/tf_core/tf_identifier.c | 240 + .../bnxt/tf_core/tf_identifier.h | 140 + .../release-drivers/bnxt/tf_core/tf_if_tbl.c | 158 + .../release-drivers/bnxt/tf_core/tf_if_tbl.h | 159 + .../release-drivers/bnxt/tf_core/tf_msg.c | 1218 + .../release-drivers/bnxt/tf_core/tf_msg.h | 386 + .../release-drivers/bnxt/tf_core/tf_rm.c | 1399 + .../release-drivers/bnxt/tf_core/tf_rm.h | 453 + .../release-drivers/bnxt/tf_core/tf_session.c | 952 + .../release-drivers/bnxt/tf_core/tf_session.h | 501 + .../bnxt/tf_core/tf_sram_mgr.c | 775 + .../bnxt/tf_core/tf_sram_mgr.h | 201 + .../release-drivers/bnxt/tf_core/tf_tbl.c | 600 + .../release-drivers/bnxt/tf_core/tf_tbl.h | 219 + .../bnxt/tf_core/tf_tbl_sram.c | 659 + .../bnxt/tf_core/tf_tbl_sram.h | 104 + .../release-drivers/bnxt/tf_core/tf_tcam.c | 762 + .../release-drivers/bnxt/tf_core/tf_tcam.h | 266 + .../bnxt/tf_core/tf_tcam_mgr_msg.c | 211 + .../bnxt/tf_core/tf_tcam_mgr_msg.h | 28 + .../release-drivers/bnxt/tf_core/tf_util.c | 164 + .../release-drivers/bnxt/tf_core/tf_util.h | 93 + .../bnxt/tf_ulp/bnxt_tf_common.h | 79 + .../bnxt/tf_ulp/bnxt_tf_tc_shim.c | 295 + .../bnxt/tf_ulp/bnxt_tf_tc_shim.h | 45 + .../release-drivers/bnxt/tf_ulp/bnxt_tf_ulp.c | 1576 + .../release-drivers/bnxt/tf_ulp/bnxt_tf_ulp.h | 619 + .../bnxt/tf_ulp/bnxt_tf_ulp_p5.c | 1474 + .../bnxt/tf_ulp/bnxt_tf_ulp_p5.h | 25 + .../bnxt/tf_ulp/bnxt_tf_ulp_p7.c | 1149 + .../bnxt/tf_ulp/bnxt_tf_ulp_p7.h | 85 + .../bnxt/tf_ulp/bnxt_ulp_flow.h | 117 + .../bnxt/tf_ulp/bnxt_ulp_linux_flow.c | 593 + .../bnxt/tf_ulp/bnxt_ulp_meter.c | 502 + .../generic_templates/ulp_template_db_act.c | 262 + .../generic_templates/ulp_template_db_class.c | 5183 ++ .../generic_templates/ulp_template_db_enum.h | 2120 + .../generic_templates/ulp_template_db_field.h | 147 + .../generic_templates/ulp_template_db_tbl.c | 3957 ++ .../generic_templates/ulp_template_db_tbl.h | 184 + .../ulp_template_db_thor2_act.c | 9864 +++ .../ulp_template_db_thor2_class.c | 53192 ++++++++++++++++ .../ulp_template_db_thor_act.c | 10166 +++ .../ulp_template_db_thor_class.c | 50716 +++++++++++++++ .../ulp_template_db_wh_plus_act.c | 6772 ++ .../ulp_template_db_wh_plus_class.c | 14567 +++++ .../bnxt/tf_ulp/ulp_alloc_tbl.c | 210 + .../bnxt/tf_ulp/ulp_alloc_tbl.h | 72 + .../bnxt/tf_ulp/ulp_def_rules.c | 745 + .../release-drivers/bnxt/tf_ulp/ulp_fc_mgr.c | 589 + .../release-drivers/bnxt/tf_ulp/ulp_fc_mgr.h | 195 + .../bnxt/tf_ulp/ulp_fc_mgr_p5.c | 142 + .../bnxt/tf_ulp/ulp_fc_mgr_p7.c | 108 + .../release-drivers/bnxt/tf_ulp/ulp_flow_db.c | 1951 + .../release-drivers/bnxt/tf_ulp/ulp_flow_db.h | 454 + .../release-drivers/bnxt/tf_ulp/ulp_gen_tbl.c | 586 + .../release-drivers/bnxt/tf_ulp/ulp_gen_tbl.h | 222 + .../bnxt/tf_ulp/ulp_generic_flow_offload.c | 1465 + .../bnxt/tf_ulp/ulp_generic_flow_offload.h | 167 + .../release-drivers/bnxt/tf_ulp/ulp_linux.h | 19 + .../release-drivers/bnxt/tf_ulp/ulp_mapper.c | 4544 ++ .../release-drivers/bnxt/tf_ulp/ulp_mapper.h | 291 + .../bnxt/tf_ulp/ulp_mapper_p5.c | 1349 + .../bnxt/tf_ulp/ulp_mapper_p7.c | 1590 + .../bnxt/tf_ulp/ulp_mark_mgr.c | 318 + .../bnxt/tf_ulp/ulp_mark_mgr.h | 107 + .../release-drivers/bnxt/tf_ulp/ulp_matcher.c | 509 + .../release-drivers/bnxt/tf_ulp/ulp_matcher.h | 70 + .../bnxt/tf_ulp/ulp_nic_flow.c | 280 + .../bnxt/tf_ulp/ulp_nic_flow.h | 32 + .../release-drivers/bnxt/tf_ulp/ulp_port_db.c | 1047 + .../release-drivers/bnxt/tf_ulp/ulp_port_db.h | 196 + .../bnxt/tf_ulp/ulp_tc_custom_offload.c | 1953 + .../bnxt/tf_ulp/ulp_tc_custom_offload.h | 80 + .../bnxt/tf_ulp/ulp_tc_handler_tbl.c | 142 + .../bnxt/tf_ulp/ulp_tc_parser.c | 3623 ++ .../bnxt/tf_ulp/ulp_tc_parser.h | 205 + .../bnxt/tf_ulp/ulp_tc_rte_flow.h | 1608 + .../bnxt/tf_ulp/ulp_tc_rte_flow_gen.c | 1134 + .../bnxt/tf_ulp/ulp_template_debug.c | 1000 + .../bnxt/tf_ulp/ulp_template_debug.h | 449 + .../bnxt/tf_ulp/ulp_template_debug_proto.h | 91 + .../bnxt/tf_ulp/ulp_template_struct.h | 517 + .../bnxt/tf_ulp/ulp_tf_debug.c | 111 + .../bnxt/tf_ulp/ulp_tf_debug.h | 21 + .../release-drivers/bnxt/tf_ulp/ulp_udcc.c | 274 + .../release-drivers/bnxt/tf_ulp/ulp_udcc.h | 67 + .../release-drivers/bnxt/tf_ulp/ulp_utils.c | 1061 + .../release-drivers/bnxt/tf_ulp/ulp_utils.h | 234 + .../release-drivers/bnxt/tfc_v3/tfc.h | 978 + .../release-drivers/bnxt/tfc_v3/tfc_act.c | 756 + .../bnxt/tfc_v3/tfc_action_handle.h | 64 + .../release-drivers/bnxt/tfc_v3/tfc_cpm.c | 408 + .../release-drivers/bnxt/tfc_v3/tfc_cpm.h | 163 + .../release-drivers/bnxt/tfc_v3/tfc_debug.h | 18 + .../release-drivers/bnxt/tfc_v3/tfc_em.c | 854 + .../release-drivers/bnxt/tfc_v3/tfc_em.h | 151 + .../bnxt/tfc_v3/tfc_flow_handle.h | 65 + .../bnxt/tfc_v3/tfc_global_id.c | 48 + .../release-drivers/bnxt/tfc_v3/tfc_ident.c | 74 + .../release-drivers/bnxt/tfc_v3/tfc_idx_tbl.c | 338 + .../release-drivers/bnxt/tfc_v3/tfc_if_tbl.c | 104 + .../release-drivers/bnxt/tfc_v3/tfc_init.c | 72 + .../bnxt/tfc_v3/tfc_mpc_table.c | 888 + .../release-drivers/bnxt/tfc_v3/tfc_msg.c | 1416 + .../release-drivers/bnxt/tfc_v3/tfc_msg.h | 136 + .../release-drivers/bnxt/tfc_v3/tfc_priv.c | 84 + .../release-drivers/bnxt/tfc_v3/tfc_priv.h | 55 + .../release-drivers/bnxt/tfc_v3/tfc_session.c | 167 + .../bnxt/tfc_v3/tfc_tbl_scope.c | 1838 + .../release-drivers/bnxt/tfc_v3/tfc_tcam.c | 236 + .../release-drivers/bnxt/tfc_v3/tfc_util.c | 156 + .../release-drivers/bnxt/tfc_v3/tfc_util.h | 87 + .../bnxt/tfc_v3/tfc_vf2pf_msg.c | 322 + .../bnxt/tfc_v3/tfc_vf2pf_msg.h | 149 + .../release-drivers/bnxt/tfc_v3/tfo.c | 501 + .../release-drivers/bnxt/tfc_v3/tfo.h | 271 + .../release-drivers/mlnx/get_mlnx_info.sh | 20 + 263 files changed, 354491 insertions(+), 50 deletions(-) delete mode 100644 dist/sources/release-drivers.tgz delete mode 160000 drivers/thirdparty/release-drivers create mode 100644 drivers/thirdparty/release-drivers/bnxt/COPYING create mode 100644 drivers/thirdparty/release-drivers/bnxt/ChangeLog create mode 100644 drivers/thirdparty/release-drivers/bnxt/MANIFEST create mode 100644 drivers/thirdparty/release-drivers/bnxt/Makefile create mode 100644 drivers/thirdparty/release-drivers/bnxt/README.TXT create mode 100644 drivers/thirdparty/release-drivers/bnxt/bnxt.c create mode 100644 drivers/thirdparty/release-drivers/bnxt/bnxt.h create mode 100644 drivers/thirdparty/release-drivers/bnxt/bnxt_auxbus_compat.c create mode 100644 drivers/thirdparty/release-drivers/bnxt/bnxt_auxbus_compat.h create mode 100644 drivers/thirdparty/release-drivers/bnxt/bnxt_compat.h create mode 100644 drivers/thirdparty/release-drivers/bnxt/bnxt_compat_link_modes.c create mode 100644 drivers/thirdparty/release-drivers/bnxt/bnxt_compat_link_modes.h create mode 100644 drivers/thirdparty/release-drivers/bnxt/bnxt_coredump.c create mode 100644 drivers/thirdparty/release-drivers/bnxt/bnxt_coredump.h create mode 100644 drivers/thirdparty/release-drivers/bnxt/bnxt_dbr.h create mode 100644 drivers/thirdparty/release-drivers/bnxt/bnxt_dcb.c create mode 100644 drivers/thirdparty/release-drivers/bnxt/bnxt_dcb.h create mode 100644 drivers/thirdparty/release-drivers/bnxt/bnxt_debugfs.c create mode 100644 drivers/thirdparty/release-drivers/bnxt/bnxt_debugfs.h create mode 100644 drivers/thirdparty/release-drivers/bnxt/bnxt_debugfs_cpt.c create mode 100644 drivers/thirdparty/release-drivers/bnxt/bnxt_devlink.c create mode 100644 drivers/thirdparty/release-drivers/bnxt/bnxt_devlink.h create mode 100644 drivers/thirdparty/release-drivers/bnxt/bnxt_devlink_compat.h create mode 100644 drivers/thirdparty/release-drivers/bnxt/bnxt_dim.c create mode 100644 drivers/thirdparty/release-drivers/bnxt/bnxt_dim.h create mode 100644 drivers/thirdparty/release-drivers/bnxt/bnxt_ethtool.c create mode 100644 drivers/thirdparty/release-drivers/bnxt/bnxt_ethtool.h create mode 100644 drivers/thirdparty/release-drivers/bnxt/bnxt_ethtool_compat.c create mode 100644 drivers/thirdparty/release-drivers/bnxt/bnxt_extra_ver.h create mode 100644 drivers/thirdparty/release-drivers/bnxt/bnxt_fw_hdr.h create mode 100644 drivers/thirdparty/release-drivers/bnxt/bnxt_hdbr.c create mode 100644 drivers/thirdparty/release-drivers/bnxt/bnxt_hdbr.h create mode 100644 drivers/thirdparty/release-drivers/bnxt/bnxt_hsi.h create mode 100644 drivers/thirdparty/release-drivers/bnxt/bnxt_hwmon.c create mode 100644 drivers/thirdparty/release-drivers/bnxt/bnxt_hwmon.h create mode 100644 drivers/thirdparty/release-drivers/bnxt/bnxt_hwrm.c create mode 100644 drivers/thirdparty/release-drivers/bnxt/bnxt_hwrm.h create mode 100644 drivers/thirdparty/release-drivers/bnxt/bnxt_ktls.c create mode 100644 drivers/thirdparty/release-drivers/bnxt/bnxt_ktls.h create mode 100644 drivers/thirdparty/release-drivers/bnxt/bnxt_lfc.c create mode 100644 drivers/thirdparty/release-drivers/bnxt/bnxt_lfc.h create mode 100644 drivers/thirdparty/release-drivers/bnxt/bnxt_lfc_ioctl.h create mode 100644 drivers/thirdparty/release-drivers/bnxt/bnxt_log.c create mode 100644 drivers/thirdparty/release-drivers/bnxt/bnxt_log.h create mode 100644 drivers/thirdparty/release-drivers/bnxt/bnxt_log_data.c create mode 100644 drivers/thirdparty/release-drivers/bnxt/bnxt_log_data.h create mode 100644 drivers/thirdparty/release-drivers/bnxt/bnxt_mpc.c create mode 100644 drivers/thirdparty/release-drivers/bnxt/bnxt_mpc.h create mode 100644 drivers/thirdparty/release-drivers/bnxt/bnxt_netmap_linux.h create mode 100644 drivers/thirdparty/release-drivers/bnxt/bnxt_nic_flow.c create mode 100644 drivers/thirdparty/release-drivers/bnxt/bnxt_nic_flow.h create mode 100644 drivers/thirdparty/release-drivers/bnxt/bnxt_nvm_defs.h create mode 100644 drivers/thirdparty/release-drivers/bnxt/bnxt_ptp.c create mode 100644 drivers/thirdparty/release-drivers/bnxt/bnxt_ptp.h create mode 100644 drivers/thirdparty/release-drivers/bnxt/bnxt_sriov.c create mode 100644 drivers/thirdparty/release-drivers/bnxt/bnxt_sriov.h create mode 100644 drivers/thirdparty/release-drivers/bnxt/bnxt_sriov_sysfs.c create mode 100644 drivers/thirdparty/release-drivers/bnxt/bnxt_sriov_sysfs.h create mode 100644 drivers/thirdparty/release-drivers/bnxt/bnxt_tc.c create mode 100644 drivers/thirdparty/release-drivers/bnxt/bnxt_tc.h create mode 100644 drivers/thirdparty/release-drivers/bnxt/bnxt_tc_compat.h create mode 100644 drivers/thirdparty/release-drivers/bnxt/bnxt_tfc.c create mode 100644 drivers/thirdparty/release-drivers/bnxt/bnxt_tfc.h create mode 100644 drivers/thirdparty/release-drivers/bnxt/bnxt_udcc.c create mode 100644 drivers/thirdparty/release-drivers/bnxt/bnxt_udcc.h create mode 100644 drivers/thirdparty/release-drivers/bnxt/bnxt_ulp.c create mode 100644 drivers/thirdparty/release-drivers/bnxt/bnxt_ulp.h create mode 100644 drivers/thirdparty/release-drivers/bnxt/bnxt_vfr.c create mode 100644 drivers/thirdparty/release-drivers/bnxt/bnxt_vfr.h create mode 100644 drivers/thirdparty/release-drivers/bnxt/bnxt_xdp.c create mode 100644 drivers/thirdparty/release-drivers/bnxt/bnxt_xdp.h create mode 100644 drivers/thirdparty/release-drivers/bnxt/bnxt_xsk.c create mode 100644 drivers/thirdparty/release-drivers/bnxt/bnxt_xsk.h create mode 100755 drivers/thirdparty/release-drivers/bnxt/find_src.awk create mode 100644 drivers/thirdparty/release-drivers/bnxt/hcapi/bitalloc.c create mode 100644 drivers/thirdparty/release-drivers/bnxt/hcapi/bitalloc.h create mode 100644 drivers/thirdparty/release-drivers/bnxt/hcapi/cfa/cfa_p40_hw.h create mode 100644 drivers/thirdparty/release-drivers/bnxt/hcapi/cfa/cfa_p58_hw.h create mode 100644 drivers/thirdparty/release-drivers/bnxt/hcapi/cfa/hcapi_cfa.h create mode 100644 drivers/thirdparty/release-drivers/bnxt/hcapi/cfa/hcapi_cfa_defs.h create mode 100644 drivers/thirdparty/release-drivers/bnxt/hcapi/cfa/hcapi_cfa_p4.c create mode 100644 drivers/thirdparty/release-drivers/bnxt/hcapi/cfa/hcapi_cfa_p4.h create mode 100644 drivers/thirdparty/release-drivers/bnxt/hcapi/cfa/hcapi_cfa_p58.c create mode 100644 drivers/thirdparty/release-drivers/bnxt/hcapi/cfa/hcapi_cfa_p58.h create mode 100644 drivers/thirdparty/release-drivers/bnxt/hcapi/cfa_v3/include/cfa_resources.h create mode 100644 drivers/thirdparty/release-drivers/bnxt/hcapi/cfa_v3/include/cfa_types.h create mode 100644 drivers/thirdparty/release-drivers/bnxt/hcapi/cfa_v3/include/cfa_util.h create mode 100644 drivers/thirdparty/release-drivers/bnxt/hcapi/cfa_v3/include/sys_util.h create mode 100644 drivers/thirdparty/release-drivers/bnxt/hcapi/cfa_v3/mm/cfa_mm.c create mode 100644 drivers/thirdparty/release-drivers/bnxt/hcapi/cfa_v3/mm/include/cfa_mm.h create mode 100644 drivers/thirdparty/release-drivers/bnxt/hcapi/cfa_v3/mm/include/sys_util.h create mode 100644 drivers/thirdparty/release-drivers/bnxt/hcapi/cfa_v3/mpc/cfa_bld_mpc.c create mode 100644 drivers/thirdparty/release-drivers/bnxt/hcapi/cfa_v3/mpc/cfa_bld_p70_host_mpc_wrapper.c create mode 100644 drivers/thirdparty/release-drivers/bnxt/hcapi/cfa_v3/mpc/cfa_bld_p70_mpc.c create mode 100644 drivers/thirdparty/release-drivers/bnxt/hcapi/cfa_v3/mpc/cfa_bld_p70_mpcops.c create mode 100644 drivers/thirdparty/release-drivers/bnxt/hcapi/cfa_v3/mpc/include/cfa_bld_defs.h create mode 100644 drivers/thirdparty/release-drivers/bnxt/hcapi/cfa_v3/mpc/include/cfa_bld_mpc_field_ids.h create mode 100644 drivers/thirdparty/release-drivers/bnxt/hcapi/cfa_v3/mpc/include/cfa_bld_mpcops.h create mode 100644 drivers/thirdparty/release-drivers/bnxt/hcapi/cfa_v3/mpc/include/cfa_bld_p70_host_mpc_wrapper.h create mode 100644 drivers/thirdparty/release-drivers/bnxt/hcapi/cfa_v3/mpc/include/cfa_bld_p70_mpc.h create mode 100644 drivers/thirdparty/release-drivers/bnxt/hcapi/cfa_v3/mpc/include/cfa_bld_p70_mpc_defs.h create mode 100644 drivers/thirdparty/release-drivers/bnxt/hcapi/cfa_v3/mpc/include/cfa_bld_p70_mpcops.h create mode 100644 drivers/thirdparty/release-drivers/bnxt/hcapi/cfa_v3/mpc/include/cfa_p70_mpc_cmds.h create mode 100644 drivers/thirdparty/release-drivers/bnxt/hcapi/cfa_v3/mpc/include/cfa_p70_mpc_cmpls.h create mode 100644 drivers/thirdparty/release-drivers/bnxt/hcapi/cfa_v3/mpc/include/cfa_p70_mpc_common.h create mode 100644 drivers/thirdparty/release-drivers/bnxt/hcapi/cfa_v3/mpc/include/cfa_p70_mpc_field_ids.h create mode 100644 drivers/thirdparty/release-drivers/bnxt/hcapi/cfa_v3/mpc/include/cfa_p70_mpc_field_mapping.h create mode 100644 drivers/thirdparty/release-drivers/bnxt/hcapi/cfa_v3/tim/cfa_tim.c create mode 100644 drivers/thirdparty/release-drivers/bnxt/hcapi/cfa_v3/tim/include/cfa_tim.h create mode 100644 drivers/thirdparty/release-drivers/bnxt/hcapi/cfa_v3/tpm/cfa_tpm.c create mode 100644 drivers/thirdparty/release-drivers/bnxt/hcapi/cfa_v3/tpm/include/cfa_tpm.h create mode 100644 drivers/thirdparty/release-drivers/bnxt/tf_core/cfa_resource_types.h create mode 100644 drivers/thirdparty/release-drivers/bnxt/tf_core/cfa_tcam_mgr.c create mode 100644 drivers/thirdparty/release-drivers/bnxt/tf_core/cfa_tcam_mgr.h create mode 100644 drivers/thirdparty/release-drivers/bnxt/tf_core/cfa_tcam_mgr_device.h create mode 100644 drivers/thirdparty/release-drivers/bnxt/tf_core/cfa_tcam_mgr_hwop_msg.c create mode 100644 drivers/thirdparty/release-drivers/bnxt/tf_core/cfa_tcam_mgr_hwop_msg.h create mode 100644 drivers/thirdparty/release-drivers/bnxt/tf_core/cfa_tcam_mgr_p4.c create mode 100644 drivers/thirdparty/release-drivers/bnxt/tf_core/cfa_tcam_mgr_p4.h create mode 100644 drivers/thirdparty/release-drivers/bnxt/tf_core/cfa_tcam_mgr_p58.c create mode 100644 drivers/thirdparty/release-drivers/bnxt/tf_core/cfa_tcam_mgr_p58.h create mode 100644 drivers/thirdparty/release-drivers/bnxt/tf_core/dpool.c create mode 100644 drivers/thirdparty/release-drivers/bnxt/tf_core/dpool.h create mode 100644 drivers/thirdparty/release-drivers/bnxt/tf_core/rand.c create mode 100644 drivers/thirdparty/release-drivers/bnxt/tf_core/rand.h create mode 100644 drivers/thirdparty/release-drivers/bnxt/tf_core/tf_core.c create mode 100644 drivers/thirdparty/release-drivers/bnxt/tf_core/tf_core.h create mode 100644 drivers/thirdparty/release-drivers/bnxt/tf_core/tf_device.c create mode 100644 drivers/thirdparty/release-drivers/bnxt/tf_core/tf_device.h create mode 100644 drivers/thirdparty/release-drivers/bnxt/tf_core/tf_device_p4.c create mode 100644 drivers/thirdparty/release-drivers/bnxt/tf_core/tf_device_p4.h create mode 100644 drivers/thirdparty/release-drivers/bnxt/tf_core/tf_device_p58.c create mode 100644 drivers/thirdparty/release-drivers/bnxt/tf_core/tf_device_p58.h create mode 100644 drivers/thirdparty/release-drivers/bnxt/tf_core/tf_em.h create mode 100644 drivers/thirdparty/release-drivers/bnxt/tf_core/tf_em_hash_internal.c create mode 100644 drivers/thirdparty/release-drivers/bnxt/tf_core/tf_em_internal.c create mode 100644 drivers/thirdparty/release-drivers/bnxt/tf_core/tf_ext_flow_handle.h create mode 100644 drivers/thirdparty/release-drivers/bnxt/tf_core/tf_global_cfg.c create mode 100644 drivers/thirdparty/release-drivers/bnxt/tf_core/tf_global_cfg.h create mode 100644 drivers/thirdparty/release-drivers/bnxt/tf_core/tf_identifier.c create mode 100644 drivers/thirdparty/release-drivers/bnxt/tf_core/tf_identifier.h create mode 100644 drivers/thirdparty/release-drivers/bnxt/tf_core/tf_if_tbl.c create mode 100644 drivers/thirdparty/release-drivers/bnxt/tf_core/tf_if_tbl.h create mode 100644 drivers/thirdparty/release-drivers/bnxt/tf_core/tf_msg.c create mode 100644 drivers/thirdparty/release-drivers/bnxt/tf_core/tf_msg.h create mode 100644 drivers/thirdparty/release-drivers/bnxt/tf_core/tf_rm.c create mode 100644 drivers/thirdparty/release-drivers/bnxt/tf_core/tf_rm.h create mode 100644 drivers/thirdparty/release-drivers/bnxt/tf_core/tf_session.c create mode 100644 drivers/thirdparty/release-drivers/bnxt/tf_core/tf_session.h create mode 100644 drivers/thirdparty/release-drivers/bnxt/tf_core/tf_sram_mgr.c create mode 100644 drivers/thirdparty/release-drivers/bnxt/tf_core/tf_sram_mgr.h create mode 100644 drivers/thirdparty/release-drivers/bnxt/tf_core/tf_tbl.c create mode 100644 drivers/thirdparty/release-drivers/bnxt/tf_core/tf_tbl.h create mode 100644 drivers/thirdparty/release-drivers/bnxt/tf_core/tf_tbl_sram.c create mode 100644 drivers/thirdparty/release-drivers/bnxt/tf_core/tf_tbl_sram.h create mode 100644 drivers/thirdparty/release-drivers/bnxt/tf_core/tf_tcam.c create mode 100644 drivers/thirdparty/release-drivers/bnxt/tf_core/tf_tcam.h create mode 100644 drivers/thirdparty/release-drivers/bnxt/tf_core/tf_tcam_mgr_msg.c create mode 100644 drivers/thirdparty/release-drivers/bnxt/tf_core/tf_tcam_mgr_msg.h create mode 100644 drivers/thirdparty/release-drivers/bnxt/tf_core/tf_util.c create mode 100644 drivers/thirdparty/release-drivers/bnxt/tf_core/tf_util.h create mode 100644 drivers/thirdparty/release-drivers/bnxt/tf_ulp/bnxt_tf_common.h create mode 100644 drivers/thirdparty/release-drivers/bnxt/tf_ulp/bnxt_tf_tc_shim.c create mode 100644 drivers/thirdparty/release-drivers/bnxt/tf_ulp/bnxt_tf_tc_shim.h create mode 100644 drivers/thirdparty/release-drivers/bnxt/tf_ulp/bnxt_tf_ulp.c create mode 100644 drivers/thirdparty/release-drivers/bnxt/tf_ulp/bnxt_tf_ulp.h create mode 100644 drivers/thirdparty/release-drivers/bnxt/tf_ulp/bnxt_tf_ulp_p5.c create mode 100644 drivers/thirdparty/release-drivers/bnxt/tf_ulp/bnxt_tf_ulp_p5.h create mode 100644 drivers/thirdparty/release-drivers/bnxt/tf_ulp/bnxt_tf_ulp_p7.c create mode 100644 drivers/thirdparty/release-drivers/bnxt/tf_ulp/bnxt_tf_ulp_p7.h create mode 100644 drivers/thirdparty/release-drivers/bnxt/tf_ulp/bnxt_ulp_flow.h create mode 100644 drivers/thirdparty/release-drivers/bnxt/tf_ulp/bnxt_ulp_linux_flow.c create mode 100644 drivers/thirdparty/release-drivers/bnxt/tf_ulp/bnxt_ulp_meter.c create mode 100644 drivers/thirdparty/release-drivers/bnxt/tf_ulp/generic_templates/ulp_template_db_act.c create mode 100644 drivers/thirdparty/release-drivers/bnxt/tf_ulp/generic_templates/ulp_template_db_class.c create mode 100644 drivers/thirdparty/release-drivers/bnxt/tf_ulp/generic_templates/ulp_template_db_enum.h create mode 100644 drivers/thirdparty/release-drivers/bnxt/tf_ulp/generic_templates/ulp_template_db_field.h create mode 100644 drivers/thirdparty/release-drivers/bnxt/tf_ulp/generic_templates/ulp_template_db_tbl.c create mode 100644 drivers/thirdparty/release-drivers/bnxt/tf_ulp/generic_templates/ulp_template_db_tbl.h create mode 100644 drivers/thirdparty/release-drivers/bnxt/tf_ulp/generic_templates/ulp_template_db_thor2_act.c create mode 100644 drivers/thirdparty/release-drivers/bnxt/tf_ulp/generic_templates/ulp_template_db_thor2_class.c create mode 100644 drivers/thirdparty/release-drivers/bnxt/tf_ulp/generic_templates/ulp_template_db_thor_act.c create mode 100644 drivers/thirdparty/release-drivers/bnxt/tf_ulp/generic_templates/ulp_template_db_thor_class.c create mode 100644 drivers/thirdparty/release-drivers/bnxt/tf_ulp/generic_templates/ulp_template_db_wh_plus_act.c create mode 100644 drivers/thirdparty/release-drivers/bnxt/tf_ulp/generic_templates/ulp_template_db_wh_plus_class.c create mode 100644 drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_alloc_tbl.c create mode 100644 drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_alloc_tbl.h create mode 100644 drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_def_rules.c create mode 100644 drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_fc_mgr.c create mode 100644 drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_fc_mgr.h create mode 100644 drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_fc_mgr_p5.c create mode 100644 drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_fc_mgr_p7.c create mode 100644 drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_flow_db.c create mode 100644 drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_flow_db.h create mode 100644 drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_gen_tbl.c create mode 100644 drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_gen_tbl.h create mode 100644 drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_generic_flow_offload.c create mode 100644 drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_generic_flow_offload.h create mode 100644 drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_linux.h create mode 100644 drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_mapper.c create mode 100644 drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_mapper.h create mode 100644 drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_mapper_p5.c create mode 100644 drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_mapper_p7.c create mode 100644 drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_mark_mgr.c create mode 100644 drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_mark_mgr.h create mode 100644 drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_matcher.c create mode 100644 drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_matcher.h create mode 100644 drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_nic_flow.c create mode 100644 drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_nic_flow.h create mode 100644 drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_port_db.c create mode 100644 drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_port_db.h create mode 100644 drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_tc_custom_offload.c create mode 100644 drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_tc_custom_offload.h create mode 100644 drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_tc_handler_tbl.c create mode 100644 drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_tc_parser.c create mode 100644 drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_tc_parser.h create mode 100644 drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_tc_rte_flow.h create mode 100644 drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_tc_rte_flow_gen.c create mode 100644 drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_template_debug.c create mode 100644 drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_template_debug.h create mode 100644 drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_template_debug_proto.h create mode 100644 drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_template_struct.h create mode 100644 drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_tf_debug.c create mode 100644 drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_tf_debug.h create mode 100644 drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_udcc.c create mode 100644 drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_udcc.h create mode 100644 drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_utils.c create mode 100644 drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_utils.h create mode 100644 drivers/thirdparty/release-drivers/bnxt/tfc_v3/tfc.h create mode 100644 drivers/thirdparty/release-drivers/bnxt/tfc_v3/tfc_act.c create mode 100644 drivers/thirdparty/release-drivers/bnxt/tfc_v3/tfc_action_handle.h create mode 100644 drivers/thirdparty/release-drivers/bnxt/tfc_v3/tfc_cpm.c create mode 100644 drivers/thirdparty/release-drivers/bnxt/tfc_v3/tfc_cpm.h create mode 100644 drivers/thirdparty/release-drivers/bnxt/tfc_v3/tfc_debug.h create mode 100644 drivers/thirdparty/release-drivers/bnxt/tfc_v3/tfc_em.c create mode 100644 drivers/thirdparty/release-drivers/bnxt/tfc_v3/tfc_em.h create mode 100644 drivers/thirdparty/release-drivers/bnxt/tfc_v3/tfc_flow_handle.h create mode 100644 drivers/thirdparty/release-drivers/bnxt/tfc_v3/tfc_global_id.c create mode 100644 drivers/thirdparty/release-drivers/bnxt/tfc_v3/tfc_ident.c create mode 100644 drivers/thirdparty/release-drivers/bnxt/tfc_v3/tfc_idx_tbl.c create mode 100644 drivers/thirdparty/release-drivers/bnxt/tfc_v3/tfc_if_tbl.c create mode 100644 drivers/thirdparty/release-drivers/bnxt/tfc_v3/tfc_init.c create mode 100644 drivers/thirdparty/release-drivers/bnxt/tfc_v3/tfc_mpc_table.c create mode 100644 drivers/thirdparty/release-drivers/bnxt/tfc_v3/tfc_msg.c create mode 100644 drivers/thirdparty/release-drivers/bnxt/tfc_v3/tfc_msg.h create mode 100644 drivers/thirdparty/release-drivers/bnxt/tfc_v3/tfc_priv.c create mode 100644 drivers/thirdparty/release-drivers/bnxt/tfc_v3/tfc_priv.h create mode 100644 drivers/thirdparty/release-drivers/bnxt/tfc_v3/tfc_session.c create mode 100644 drivers/thirdparty/release-drivers/bnxt/tfc_v3/tfc_tbl_scope.c create mode 100644 drivers/thirdparty/release-drivers/bnxt/tfc_v3/tfc_tcam.c create mode 100644 drivers/thirdparty/release-drivers/bnxt/tfc_v3/tfc_util.c create mode 100644 drivers/thirdparty/release-drivers/bnxt/tfc_v3/tfc_util.h create mode 100644 drivers/thirdparty/release-drivers/bnxt/tfc_v3/tfc_vf2pf_msg.c create mode 100644 drivers/thirdparty/release-drivers/bnxt/tfc_v3/tfc_vf2pf_msg.h create mode 100644 drivers/thirdparty/release-drivers/bnxt/tfc_v3/tfo.c create mode 100644 drivers/thirdparty/release-drivers/bnxt/tfc_v3/tfo.h create mode 100755 drivers/thirdparty/release-drivers/mlnx/get_mlnx_info.sh diff --git a/.gitmodules b/.gitmodules index 712a6c107f94..2965005b98ec 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,6 +1,3 @@ [submodule "kernel/tkernel/outtree/emm"] path = kernel/tkernel/outtree/emm url = git@git.woa.com:tlinux/EMM/kmod-emm.git -[submodule "drivers/thirdparty/release-drivers"] - path = drivers/thirdparty/release-drivers - url = https://gitee.com/OpenCloudOS/release-drivers.git diff --git a/dist/sources/download-and-copy-drivers.sh b/dist/sources/download-and-copy-drivers.sh index bd0f1d82c5a1..00179af0d13b 100755 --- a/dist/sources/download-and-copy-drivers.sh +++ b/dist/sources/download-and-copy-drivers.sh @@ -3,26 +3,11 @@ check_url_reachable() { curl -I https://gitee.com 1>/dev/null 2>&1 || exit 0 - curl -I https://content.mellanox.com 1>/dev/null 2>&1 || exit 0 -} - -thirdparty_clone_git(){ - if [ $(stat -c%s release-drivers.tgz) -gt 1024 ]; then - tar -zxf release-drivers.tgz ; return 0 - fi - ## If clone git fail, using the kernel native drivers to compile. - timeout 600 git clone -q https://gitee.com/OpenCloudOS/release-drivers.git || exit 0 - - rm -f release-drivers.tgz ; rm -rf release-drivers/.git ; tar -zcf release-drivers.tgz release-drivers -} - -thirdparty_rm_git(){ - rm -rf release-drivers } thirdparty_mlnx(){ - mlnx_tgz_url=$(release-drivers/mlnx/get_mlnx_info.sh mlnx_url) - mlnx_tgz_name=$(release-drivers/mlnx/get_mlnx_info.sh mlnx_tgz_name) + mlnx_tgz_url=$(../../drivers/thirdparty/release-drivers/mlnx/get_mlnx_info.sh mlnx_url) + mlnx_tgz_name=$(../../drivers/thirdparty/release-drivers/mlnx/get_mlnx_info.sh mlnx_tgz_name) get_mlnx_tgz_ok=1 if [ $(stat -c%s ${mlnx_tgz_name}) -gt 1024 ]; then return 0; fi @@ -41,10 +26,6 @@ thirdparty_mlnx(){ ## check_url_reachable -thirdparty_clone_git - thirdparty_mlnx echo "Having downloaded thirdparty drivers." - -thirdparty_rm_git diff --git a/dist/sources/release-drivers.tgz b/dist/sources/release-drivers.tgz deleted file mode 100644 index f8d287511d43227348e589f3b30755aae923f2e6..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 121 zcmb2|=3oE==C|hzxehsquw1Y`73XkAqP%!sLo`dv0kx_+=Tjn4M_=ttvpXZQ|7yDI z^s34&FJo^P@v3{wR(x)2e*Dh+cX3{)YVD`!EqNE_C7)#Xc;@2koBDK*U3gacciZo) W%n%zG7!JtSGKBSIZ)MP6U;qHy^D?FY diff --git a/dist/templates/kernel.template.spec b/dist/templates/kernel.template.spec index b58f62160456..52ccbd8e9fc8 100644 --- a/dist/templates/kernel.template.spec +++ b/dist/templates/kernel.template.spec @@ -194,9 +194,8 @@ Source2001: cpupower.config ### Used for download thirdparty drivers # Start from Source3000 to Source3099, for thirdparty release drivers Source3000: download-and-copy-drivers.sh -Source3001: release-drivers.tgz -Source3002: MLNX_OFED_LINUX-23.10-3.2.2.0-rhel9.4-x86_64.tgz -Source3003: install.sh +Source3001: MLNX_OFED_LINUX-23.10-3.2.2.0-rhel9.4-x86_64.tgz +Source3002: install.sh ###### Kernel package definations ############################################## ### Main meta package @@ -635,8 +634,7 @@ BuildConfig() { if [ -e ../../dist/sources ]; then ./copy-drivers.sh else - cp -a %{SOURCE3001} ./ ; tar -zxf release-drivers.tgz ; rm -f release-drivers.tgz - cp -a %{SOURCE3002} release-drivers/mlnx/ + cp -a %{SOURCE3001} release-drivers/mlnx/ fi popd @@ -1264,7 +1262,7 @@ BuildInstMLNXOFED() { # Compress it into a new tgz file. if [[ "${DISTRO}" != "tl3" ]]; then ## "${DISTRO}" == "tl4" or "${DISTRO}" == "oc9" - mlnxfulname=$(basename %{SOURCE3002}) + mlnxfulname=$(basename %{SOURCE3001}) mlnxrelease=${mlnxfulname%.*} else ## "${DISTRO}" == "tl3" @@ -1274,8 +1272,8 @@ BuildInstMLNXOFED() { # Turn it back to the original file sed -i 's/! -z $JUMP_ROOT/$UID -ne 0/g' $mlnxrelease-ext.$KernUnameR/mlnx_add_kernel_support.sh cp -r $signed $mlnxrelease-ext.$KernUnameR/ko_files.signed - sed -i "s/KERNELMODULE_REPLACE/$KernUnameR/g" %{SOURCE3003} - cp -r ko.location %{SOURCE3003} $mlnxrelease-ext.$KernUnameR/ + sed -i "s/KERNELMODULE_REPLACE/$KernUnameR/g" %{SOURCE3002} + cp -r ko.location %{SOURCE3002} $mlnxrelease-ext.$KernUnameR/ tar -zcvf $mlnxrelease-ext.$KernUnameR.tgz $mlnxrelease-ext.$KernUnameR mkdir %{buildroot}/mlnx/ install -m 755 $mlnxrelease-ext.$KernUnameR.tgz %{buildroot}/mlnx/ diff --git a/drivers/thirdparty/copy-drivers.sh b/drivers/thirdparty/copy-drivers.sh index f4d856e5c075..9180b7876204 100755 --- a/drivers/thirdparty/copy-drivers.sh +++ b/drivers/thirdparty/copy-drivers.sh @@ -1,26 +1,18 @@ #!/bin/bash thirdparty_prepare_source_code(){ - if [ ! -e release-drivers/.git ] ; then - ## Real release-drivers.tgz will more than 1024 bytes. - ## Real release-drivers.tgz will less than 1024bytes. - if [ $(stat -c%s ../../dist/sources/release-drivers.tgz) -gt 1024 ]; then - cp -a ../../dist/sources/release-drivers.tgz ./ ; rm -rf release-drivers - tar -zxf release-drivers.tgz ; rm -f release-drivers.tgz - else - ../../dist/sources/download-and-copy-drivers.sh - fi - fi - mlnx_tgz_name=$(release-drivers/mlnx/get_mlnx_info.sh mlnx_tgz_name) + if [ ! -e release-drivers/mlnx/${mlnx_tgz_name} ] ; then - if [ $(stat -c%s ../../dist/sources/${mlnx_tgz_name}) -gt 1024 ]; then - cp -a ../../dist/sources/${mlnx_tgz_name} release-drivers/mlnx/ ; return 0 + ## This script will only be called when existing ../../dist/sources dir + if [ $(stat -c%s ../../dist/sources/${mlnx_tgz_name}) -lt 1024 ]; then + ## Real MLNX_OFED_LINUX-*.tgz will more than 1024 bytes. + ## Real MLNX_OFED_LINUX-*.tgz will more than 1024 bytes. + pushd ../../dist/sources + ./download-and-copy-drivers.sh + popd fi - if [ -e ${mlnx_tgz_name} ]; then - mv -f ${mlnx_tgz_name} release-drivers/mlnx/ ; return 0 - fi - ../../dist/sources/download-and-copy-drivers.sh ; mv -f ${mlnx_tgz_name} release-drivers/mlnx/ + cp -a ../../dist/sources/${mlnx_tgz_name} release-drivers/mlnx/ fi } diff --git a/drivers/thirdparty/release-drivers b/drivers/thirdparty/release-drivers deleted file mode 160000 index b61dece794c0..000000000000 --- a/drivers/thirdparty/release-drivers +++ /dev/null @@ -1 +0,0 @@ -Subproject commit b61dece794c04d9d00421929d0dda87cee8512a3 diff --git a/drivers/thirdparty/release-drivers/bnxt/COPYING b/drivers/thirdparty/release-drivers/bnxt/COPYING new file mode 100644 index 000000000000..d159169d1050 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/COPYING @@ -0,0 +1,339 @@ + GNU GENERAL PUBLIC LICENSE + Version 2, June 1991 + + Copyright (C) 1989, 1991 Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The licenses for most software are designed to take away your +freedom to share and change it. By contrast, the GNU General Public +License is intended to guarantee your freedom to share and change free +software--to make sure the software is free for all its users. This +General Public License applies to most of the Free Software +Foundation's software and to any other program whose authors commit to +using it. (Some other Free Software Foundation software is covered by +the GNU Lesser General Public License instead.) You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +this service if you wish), that you receive source code or can get it +if you want it, that you can change the software or use pieces of it +in new free programs; and that you know you can do these things. + + To protect your rights, we need to make restrictions that forbid +anyone to deny you these rights or to ask you to surrender the rights. +These restrictions translate to certain responsibilities for you if you +distribute copies of the software, or if you modify it. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must give the recipients all the rights that +you have. You must make sure that they, too, receive or can get the +source code. And you must show them these terms so they know their +rights. + + We protect your rights with two steps: (1) copyright the software, and +(2) offer you this license which gives you legal permission to copy, +distribute and/or modify the software. + + Also, for each author's protection and ours, we want to make certain +that everyone understands that there is no warranty for this free +software. If the software is modified by someone else and passed on, we +want its recipients to know that what they have is not the original, so +that any problems introduced by others will not reflect on the original +authors' reputations. + + Finally, any free program is threatened constantly by software +patents. We wish to avoid the danger that redistributors of a free +program will individually obtain patent licenses, in effect making the +program proprietary. To prevent this, we have made it clear that any +patent must be licensed for everyone's free use or not licensed at all. + + The precise terms and conditions for copying, distribution and +modification follow. + + GNU GENERAL PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. This License applies to any program or other work which contains +a notice placed by the copyright holder saying it may be distributed +under the terms of this General Public License. The "Program", below, +refers to any such program or work, and a "work based on the Program" +means either the Program or any derivative work under copyright law: +that is to say, a work containing the Program or a portion of it, +either verbatim or with modifications and/or translated into another +language. (Hereinafter, translation is included without limitation in +the term "modification".) Each licensee is addressed as "you". + +Activities other than copying, distribution and modification are not +covered by this License; they are outside its scope. The act of +running the Program is not restricted, and the output from the Program +is covered only if its contents constitute a work based on the +Program (independent of having been made by running the Program). +Whether that is true depends on what the Program does. + + 1. You may copy and distribute verbatim copies of the Program's +source code as you receive it, in any medium, provided that you +conspicuously and appropriately publish on each copy an appropriate +copyright notice and disclaimer of warranty; keep intact all the +notices that refer to this License and to the absence of any warranty; +and give any other recipients of the Program a copy of this License +along with the Program. + +You may charge a fee for the physical act of transferring a copy, and +you may at your option offer warranty protection in exchange for a fee. + + 2. You may modify your copy or copies of the Program or any portion +of it, thus forming a work based on the Program, and copy and +distribute such modifications or work under the terms of Section 1 +above, provided that you also meet all of these conditions: + + a) You must cause the modified files to carry prominent notices + stating that you changed the files and the date of any change. + + b) You must cause any work that you distribute or publish, that in + whole or in part contains or is derived from the Program or any + part thereof, to be licensed as a whole at no charge to all third + parties under the terms of this License. + + c) If the modified program normally reads commands interactively + when run, you must cause it, when started running for such + interactive use in the most ordinary way, to print or display an + announcement including an appropriate copyright notice and a + notice that there is no warranty (or else, saying that you provide + a warranty) and that users may redistribute the program under + these conditions, and telling the user how to view a copy of this + License. (Exception: if the Program itself is interactive but + does not normally print such an announcement, your work based on + the Program is not required to print an announcement.) + +These requirements apply to the modified work as a whole. If +identifiable sections of that work are not derived from the Program, +and can be reasonably considered independent and separate works in +themselves, then this License, and its terms, do not apply to those +sections when you distribute them as separate works. But when you +distribute the same sections as part of a whole which is a work based +on the Program, the distribution of the whole must be on the terms of +this License, whose permissions for other licensees extend to the +entire whole, and thus to each and every part regardless of who wrote it. + +Thus, it is not the intent of this section to claim rights or contest +your rights to work written entirely by you; rather, the intent is to +exercise the right to control the distribution of derivative or +collective works based on the Program. + +In addition, mere aggregation of another work not based on the Program +with the Program (or with a work based on the Program) on a volume of +a storage or distribution medium does not bring the other work under +the scope of this License. + + 3. You may copy and distribute the Program (or a work based on it, +under Section 2) in object code or executable form under the terms of +Sections 1 and 2 above provided that you also do one of the following: + + a) Accompany it with the complete corresponding machine-readable + source code, which must be distributed under the terms of Sections + 1 and 2 above on a medium customarily used for software interchange; or, + + b) Accompany it with a written offer, valid for at least three + years, to give any third party, for a charge no more than your + cost of physically performing source distribution, a complete + machine-readable copy of the corresponding source code, to be + distributed under the terms of Sections 1 and 2 above on a medium + customarily used for software interchange; or, + + c) Accompany it with the information you received as to the offer + to distribute corresponding source code. (This alternative is + allowed only for noncommercial distribution and only if you + received the program in object code or executable form with such + an offer, in accord with Subsection b above.) + +The source code for a work means the preferred form of the work for +making modifications to it. For an executable work, complete source +code means all the source code for all modules it contains, plus any +associated interface definition files, plus the scripts used to +control compilation and installation of the executable. However, as a +special exception, the source code distributed need not include +anything that is normally distributed (in either source or binary +form) with the major components (compiler, kernel, and so on) of the +operating system on which the executable runs, unless that component +itself accompanies the executable. + +If distribution of executable or object code is made by offering +access to copy from a designated place, then offering equivalent +access to copy the source code from the same place counts as +distribution of the source code, even though third parties are not +compelled to copy the source along with the object code. + + 4. You may not copy, modify, sublicense, or distribute the Program +except as expressly provided under this License. Any attempt +otherwise to copy, modify, sublicense or distribute the Program is +void, and will automatically terminate your rights under this License. +However, parties who have received copies, or rights, from you under +this License will not have their licenses terminated so long as such +parties remain in full compliance. + + 5. You are not required to accept this License, since you have not +signed it. However, nothing else grants you permission to modify or +distribute the Program or its derivative works. These actions are +prohibited by law if you do not accept this License. Therefore, by +modifying or distributing the Program (or any work based on the +Program), you indicate your acceptance of this License to do so, and +all its terms and conditions for copying, distributing or modifying +the Program or works based on it. + + 6. Each time you redistribute the Program (or any work based on the +Program), the recipient automatically receives a license from the +original licensor to copy, distribute or modify the Program subject to +these terms and conditions. You may not impose any further +restrictions on the recipients' exercise of the rights granted herein. +You are not responsible for enforcing compliance by third parties to +this License. + + 7. If, as a consequence of a court judgment or allegation of patent +infringement or for any other reason (not limited to patent issues), +conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot +distribute so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you +may not distribute the Program at all. For example, if a patent +license would not permit royalty-free redistribution of the Program by +all those who receive copies directly or indirectly through you, then +the only way you could satisfy both it and this License would be to +refrain entirely from distribution of the Program. + +If any portion of this section is held invalid or unenforceable under +any particular circumstance, the balance of the section is intended to +apply and the section as a whole is intended to apply in other +circumstances. + +It is not the purpose of this section to induce you to infringe any +patents or other property right claims or to contest validity of any +such claims; this section has the sole purpose of protecting the +integrity of the free software distribution system, which is +implemented by public license practices. Many people have made +generous contributions to the wide range of software distributed +through that system in reliance on consistent application of that +system; it is up to the author/donor to decide if he or she is willing +to distribute software through any other system and a licensee cannot +impose that choice. + +This section is intended to make thoroughly clear what is believed to +be a consequence of the rest of this License. + + 8. If the distribution and/or use of the Program is restricted in +certain countries either by patents or by copyrighted interfaces, the +original copyright holder who places the Program under this License +may add an explicit geographical distribution limitation excluding +those countries, so that distribution is permitted only in or among +countries not thus excluded. In such case, this License incorporates +the limitation as if written in the body of this License. + + 9. The Free Software Foundation may publish revised and/or new versions +of the General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + +Each version is given a distinguishing version number. If the Program +specifies a version number of this License which applies to it and "any +later version", you have the option of following the terms and conditions +either of that version or of any later version published by the Free +Software Foundation. If the Program does not specify a version number of +this License, you may choose any version ever published by the Free Software +Foundation. + + 10. If you wish to incorporate parts of the Program into other free +programs whose distribution conditions are different, write to the author +to ask for permission. For software which is copyrighted by the Free +Software Foundation, write to the Free Software Foundation; we sometimes +make exceptions for this. Our decision will be guided by the two goals +of preserving the free status of all derivatives of our free software and +of promoting the sharing and reuse of software generally. + + NO WARRANTY + + 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY +FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN +OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES +PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED +OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS +TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE +PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, +REPAIR OR CORRECTION. + + 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR +REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, +INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING +OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED +TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY +YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER +PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE +POSSIBILITY OF SUCH DAMAGES. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +convey the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License along + with this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +Also add information on how to contact you by electronic and paper mail. + +If the program is interactive, make it output a short notice like this +when it starts in an interactive mode: + + Gnomovision version 69, Copyright (C) year name of author + Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, the commands you use may +be called something other than `show w' and `show c'; they could even be +mouse-clicks or menu items--whatever suits your program. + +You should also get your employer (if you work as a programmer) or your +school, if any, to sign a "copyright disclaimer" for the program, if +necessary. Here is a sample; alter the names: + + Yoyodyne, Inc., hereby disclaims all copyright interest in the program + `Gnomovision' (which makes passes at compilers) written by James Hacker. + + , 1 April 1989 + Ty Coon, President of Vice + +This General Public License does not permit incorporating your program into +proprietary programs. If your program is a subroutine library, you may +consider it more useful to permit linking proprietary applications with the +library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. diff --git a/drivers/thirdparty/release-drivers/bnxt/ChangeLog b/drivers/thirdparty/release-drivers/bnxt/ChangeLog new file mode 100644 index 000000000000..4319179831f7 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/ChangeLog @@ -0,0 +1,22467 @@ +commit 1eb4ef12591348c440ac9d6efcf7521e73cf2b10 +Author: Somnath Kotur +Date: Wed Jun 7 00:54:09 2023 -0700 + + bnxt_en: Implement .set_port / .unset_port UDP tunnel callbacks + + As per the new udp tunnel framework, drivers which need to know the + details of a port entry (i.e. port type) when it gets deleted should + use the .set_port / .unset_port callbacks. + + Implementing the current .udp_tunnel_sync callback would mean that the + deleted tunnel port entry would be all zeros. This used to work on + older firmware because it would not check the input when deleting a + tunnel port. With newer firmware, the delete will now fail and + subsequent tunnel port allocation will fail as a result. + + Fixes: 442a35a5a7aa ("bnxt: convert to new udp_tunnel_nic infra") + Reviewed-by: Kalesh Anakkur Purayil + Signed-off-by: Somnath Kotur + Signed-off-by: Michael Chan + Signed-off-by: Paolo Abeni + +commit 319a7827df9784048abe072afe6b4fb4501d8de4 +Author: Pavan Chebbi +Date: Wed Jun 7 00:54:08 2023 -0700 + + bnxt_en: Prevent kernel panic when receiving unexpected PHC_UPDATE event + + The firmware can send PHC_RTC_UPDATE async event on a PF that may not + have PTP registered. In such a case, there will be a null pointer + deference for bp->ptp_cfg when we try to handle the event. + + Fix it by not registering for this event with the firmware if !bp->ptp_cfg. + Also, check that bp->ptp_cfg is valid before proceeding when we receive + the event. + + Fixes: 8bcf6f04d4a5 ("bnxt_en: Handle async event when the PHC is updated in RTC mode") + Signed-off-by: Pavan Chebbi + Signed-off-by: Michael Chan + Signed-off-by: Paolo Abeni + +commit 83474a9b252ab23e6003865c2775024344cb9c09 +Author: Vikas Gupta +Date: Wed Jun 7 00:54:07 2023 -0700 + + bnxt_en: Skip firmware fatal error recovery if chip is not accessible + + Driver starts firmware fatal error recovery by detecting + heartbeat failure or fw reset count register changing. But + these checks are not reliable if the device is not accessible. + This can happen while DPC (Downstream Port containment) is in + progress. Skip firmware fatal recovery if pci_device_is_present() + returns false. + + Fixes: acfb50e4e773 ("bnxt_en: Add FW fatal devlink_health_reporter.") + Reviewed-by: Somnath Kotur + Reviewed-by: Pavan Chebbi + Signed-off-by: Vikas Gupta + Signed-off-by: Michael Chan + Signed-off-by: Paolo Abeni + +commit 1a9e4f501bc6ff1b6ecb60df54fbf2b54db43bfe +Author: Somnath Kotur +Date: Wed Jun 7 00:54:06 2023 -0700 + + bnxt_en: Query default VLAN before VNIC setup on a VF + + We need to call bnxt_hwrm_func_qcfg() on a VF to query the default + VLAN that may be setup by the PF. If a default VLAN is enabled, + the VF cannot support VLAN acceleration on the receive side and + the VNIC must be setup to strip out the default VLAN tag. If a + default VLAN is not enabled, the VF can support VLAN acceleration + on the receive side. The VNIC should be set up to strip or not + strip the VLAN based on the RX VLAN acceleration setting. + + Without this call to determine the default VLAN before calling + bnxt_setup_vnic(), the VNIC may not be set up correctly. For + example, bnxt_setup_vnic() may set up to strip the VLAN tag based + on stale default VLAN information. If RX VLAN acceleration is + not enabled, the VLAN tag will be incorrectly stripped and the + RX data path will not work correctly. + + Fixes: cf6645f8ebc6 ("bnxt_en: Add function for VF driver to query default VLAN.") + Reviewed-by: Pavan Chebbi + Signed-off-by: Somnath Kotur + Signed-off-by: Michael Chan + Signed-off-by: Paolo Abeni + +commit 1d997801c7cc6a7f542e46d5a6bf16f893ad3fe9 +Author: Sreekanth Reddy +Date: Wed Jun 7 00:54:05 2023 -0700 + + bnxt_en: Don't issue AP reset during ethtool's reset operation + + Only older NIC controller's firmware uses the PROC AP reset type. + Firmware on 5731X/5741X and newer chips does not support this reset + type. When bnxt_reset() issues a series of resets, this PROC AP + reset may actually fail on these newer chips because the firmware + is not ready to accept this unsupported command yet. Avoid this + unnecessary error by skipping this reset type on chips that don't + support it. + + Fixes: 7a13240e3718 ("bnxt_en: fix ethtool_reset_flags ABI violations") + Reviewed-by: Pavan Chebbi + Signed-off-by: Sreekanth Reddy + Signed-off-by: Michael Chan + Signed-off-by: Paolo Abeni + +commit 095d5dc0c1d9f3284e3c575ccf4c0e8b04b548f8 +Author: Pavan Chebbi +Date: Wed Jun 7 00:54:04 2023 -0700 + + bnxt_en: Fix bnxt_hwrm_update_rss_hash_cfg() + + We must specify the vnic id of the vnic in the input structure of this + firmware message. Otherwise we will get an error from the firmware. + + Fixes: 98a4322b70e8 ("bnxt_en: update RSS config using difference algorithm") + Reviewed-by: Kalesh Anakkur Purayil + Reviewed-by: Somnath Kotur + Signed-off-by: Pavan Chebbi + Signed-off-by: Michael Chan + Signed-off-by: Paolo Abeni + +commit 649c3fed36730a53447d8f479c14e431363563b6 +Author: Jakub Kicinski +Date: Tue Jun 6 18:08:25 2023 -0700 + + eth: bnxt: fix the wake condition + + The down condition should be the negation of the wake condition, + IOW when I moved it from: + + if (cond && wake()) + to + if (__netif_txq_completed_wake(cond)) + + Cond should have been negated. Flip it now. + + This bug leads to occasional crashes with netconsole. + It may also lead to queue never waking up in case BQL is not enabled. + + Reported-by: David Wei + Fixes: 08a096780d92 ("bnxt: use new queue try_stop/try_wake macros") + Reviewed-by: Michael Chan + Link: https://lore.kernel.org/r/20230607010826.960226-1-kuba@kernel.org + Signed-off-by: Jakub Kicinski + +commit 278fda0d52f67244044384abd7dd5b3a5b3a5604 +Author: Yunsheng Lin +Date: Thu May 11 09:12:13 2023 +0800 + + net: remove __skb_frag_set_page() + + The remaining users calling __skb_frag_set_page() with + page being NULL seems to be doing defensive programming, + as shinfo->nr_frags is already decremented, so remove + them. + + Signed-off-by: Yunsheng Lin + Reviewed-by: Leon Romanovsky + Reviewed-by: Michael Chan + Reviewed-by: Jesse Brandeburg + Reviewed-by: Simon Horman + Signed-off-by: David S. Miller + +commit b51f4113ebb02011f0ca86abc3134b28d2071b6a +Author: Yunsheng Lin +Date: Thu May 11 09:12:12 2023 +0800 + + net: introduce and use skb_frag_fill_page_desc() + + Most users use __skb_frag_set_page()/skb_frag_off_set()/ + skb_frag_size_set() to fill the page desc for a skb frag. + + Introduce skb_frag_fill_page_desc() to do that. + + net/bpf/test_run.c does not call skb_frag_off_set() to + set the offset, "copy_from_user(page_address(page), ...)" + and 'shinfo' being part of the 'data' kzalloced in + bpf_test_init() suggest that it is assuming offset to be + initialized as zero, so call skb_frag_fill_page_desc() + with offset being zero for this case. + + Also, skb_frag_set_page() is not used anymore, so remove + it. + + Signed-off-by: Yunsheng Lin + Reviewed-by: Leon Romanovsky + Reviewed-by: Simon Horman + Signed-off-by: David S. Miller + +commit 8c154d272c3e03b100baaf1df473f22a78fa403e +Author: Vadim Fedorenko +Date: Tue Apr 18 13:25:11 2023 -0700 + + bnxt_en: fix free-runnig PHC mode + + The patch in fixes changed the way real-time mode is chosen for PHC on + the NIC. Apparently there is one more use case of the check outside of + ptp part of the driver which was not converted to the new macro and is + making a lot of noise in free-running mode. + + Fixes: 131db4991622 ("bnxt_en: reset PHC frequency in free-running mode") + Signed-off-by: Vadim Fedorenko + Reviewed-by: Michael Chan + Reviewed-by: Pavan Chebbi + Link: https://lore.kernel.org/r/20230418202511.1544735-1-vadfed@meta.com + Signed-off-by: Jakub Kicinski + +commit 4f4e54b1041e60694117893cd986831153a3e719 +Author: Kalesh AP +Date: Sun Apr 16 23:58:19 2023 -0700 + + bnxt_en: Fix a possible NULL pointer dereference in unload path + + In the driver unload path, the driver currently checks the valid + BNXT_FLAG_ROCE_CAP flag in bnxt_rdma_aux_device_uninit() before + proceeding. This is flawed because the flag may not be set initially + during driver load. It may be set later after the NVRAM setting is + changed followed by a firmware reset. Relying on the + BNXT_FLAG_ROCE_CAP flag may crash in bnxt_rdma_aux_device_uninit() if + the aux device was never initialized: + + BUG: unable to handle kernel NULL pointer dereference at 0000000000000000 + PGD 8ae6aa067 P4D 0 + Oops: 0000 [#1] SMP NOPTI + CPU: 39 PID: 42558 Comm: rmmod Kdump: loaded Tainted: G OE --------- - - 4.18.0-348.el8.x86_64 #1 + Hardware name: Dell Inc. PowerEdge R750/0WT8Y6, BIOS 1.5.4 12/17/2021 + RIP: 0010:device_del+0x1b/0x410 + Code: 89 a5 50 03 00 00 4c 89 a5 58 03 00 00 eb 89 0f 1f 44 00 00 41 56 41 55 41 54 4c 8d a7 80 00 00 00 55 53 48 89 fb 48 83 ec 18 <48> 8b 2f 4c 89 e7 65 48 8b 04 25 28 00 00 00 48 89 44 24 10 31 c0 + RSP: 0018:ff7f82bf469a7dc8 EFLAGS: 00010292 + RAX: 0000000000000000 RBX: 0000000000000000 RCX: 0000000000000000 + RDX: 0000000000000000 RSI: 0000000000000206 RDI: 0000000000000000 + RBP: ff31b7cd114b0ac0 R08: 0000000000000000 R09: ffffffff935c3400 + R10: ff31b7cd45bc3440 R11: 0000000000000001 R12: 0000000000000080 + R13: ffffffffc1069f40 R14: 0000000000000000 R15: 0000000000000000 + FS: 00007fc9903ce740(0000) GS:ff31b7d4ffac0000(0000) knlGS:0000000000000000 + CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 + CR2: 0000000000000000 CR3: 0000000992fee004 CR4: 0000000000773ee0 + DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 + DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 + PKRU: 55555554 + Call Trace: + bnxt_rdma_aux_device_uninit+0x1f/0x30 [bnxt_en] + bnxt_remove_one+0x2f/0x1f0 [bnxt_en] + pci_device_remove+0x3b/0xc0 + device_release_driver_internal+0x103/0x1f0 + driver_detach+0x54/0x88 + bus_remove_driver+0x77/0xc9 + pci_unregister_driver+0x2d/0xb0 + bnxt_exit+0x16/0x2c [bnxt_en] + __x64_sys_delete_module+0x139/0x280 + do_syscall_64+0x5b/0x1a0 + entry_SYSCALL_64_after_hwframe+0x65/0xca + RIP: 0033:0x7fc98f3af71b + + Fix this by modifying the check inside bnxt_rdma_aux_device_uninit() + to check for bp->aux_priv instead. We also need to make some changes + in bnxt_rdma_aux_device_init() to make sure that bp->aux_priv is set + only when the aux device is fully initialized. + + Fixes: d80d88b0dfff ("bnxt_en: Add auxiliary driver support") + Reviewed-by: Ajit Khaparde + Signed-off-by: Kalesh AP + Signed-off-by: Michael Chan + Signed-off-by: Paolo Abeni + +commit 294e39e0d03449f32b0723d3fd99ab5c23881c05 +Author: Jakub Kicinski +Date: Wed Apr 12 21:26:05 2023 -0700 + + bnxt: hook NAPIs to page pools + + bnxt has 1:1 mapping of page pools and NAPIs, so it's safe + to hoook them up together. + + Reviewed-by: Tariq Toukan + Tested-by: Dragos Tatulea + Signed-off-by: Jakub Kicinski + +commit 36647b206c014a0bf3ab17bc88f2c840eefd796c +Author: Jakub Kicinski +Date: Tue Apr 11 18:50:37 2023 -0700 + + bnxt: use READ_ONCE/WRITE_ONCE for ring indexes + + Eric points out that we should make sure that ring index updates + are wrapped in the appropriate READ_ONCE/WRITE_ONCE macros. + + Suggested-by: Eric Dumazet + Reviewed-by: Eric Dumazet + Reviewed-by: Jesse Brandeburg + Signed-off-by: Jakub Kicinski + Reviewed-by: Michael Chan + Signed-off-by: Paolo Abeni + +commit f032d8a9c8b353806366cb3786512b80f478efab +Author: Ivan Vecera +Date: Tue Apr 11 14:04:42 2023 +0200 + + bnxt_en: Allow to set switchdev mode without existing VFs + + Remove an inability of bnxt_en driver to set eswitch to switchdev + mode without existing VFs by: + + 1. Allow to set switchdev mode in bnxt_dl_eswitch_mode_set() so + representors are created only when num_vfs > 0 otherwise just + set bp->eswitch_mode + 2. Do not automatically change bp->eswitch_mode during + bnxt_vf_reps_create() and bnxt_vf_reps_destroy() calls so + the eswitch mode is managed only by an user by devlink. + Just set temporarily bp->eswitch_mode to legacy to avoid + re-opening of representors during destroy. + 3. Create representors in bnxt_sriov_enable() if current eswitch + mode is switchdev one + + Tested by this sequence: + 1. Set PF interface up + 2. Set PF's eswitch mode to switchdev + 3. Created N VFs + 4. Checked that N representors were created + 5. Set eswitch mode to legacy + 6. Checked that representors were deleted + 7. Set eswitch mode back to switchdev + 8. Checked that representors exist again for VFs + 9. Deleted all VFs + 10. Checked that all representors were deleted as well + 11. Checked that current eswitch mode is still switchdev + + Signed-off-by: Ivan Vecera + Acked-by: Venkat Duvvuru + Link: https://lore.kernel.org/r/20230411120443.126055-1-ivecera@redhat.com + Signed-off-by: Paolo Abeni + +commit 301f227fc860624d37ba5dae9da57dcf371268db +Author: Jakub Kicinski +Date: Thu Apr 6 18:25:36 2023 -0700 + + net: piggy back on the memory barrier in bql when waking queues + + Drivers call netdev_tx_completed_queue() right before + netif_txq_maybe_wake(). If BQL is enabled netdev_tx_completed_queue() + should issue a memory barrier, so we can depend on that separating + the stop check from the consumer index update, instead of adding + another barrier in netif_txq_maybe_wake(). + + This matters more than the barriers on the xmit path, because + the wake condition is almost always true. So we issue the + consumer side barrier often. + + Wrap netdev_tx_completed_queue() in a local helper to issue + the barrier even if BQL is disabled. Keep the same semantics + as netdev_tx_completed_queue() (barrier only if bytes != 0) + to make it clear that the barrier is conditional. + + Plus since macro gets pkt/byte counts as arguments now - + we can skip waking if there were no packets completed. + + Signed-off-by: Jakub Kicinski + +commit 08a096780d9239e69909c48f4b1fcd99c860b2ef +Author: Jakub Kicinski +Date: Thu Apr 6 18:25:35 2023 -0700 + + bnxt: use new queue try_stop/try_wake macros + + Convert bnxt to use new macros rather than open code the logic. + Two differences: + (1) bnxt_tx_int() will now only issue a memory barrier if it sees + enough space on the ring to wake the queue. This should be fine, + the mb() is between the writes to the ring pointers and checking + queue state. + (2) we'll start the queue instead of waking on race, this should + be safe inside the xmit handler. + + Reviewed-by: Michael Chan + Signed-off-by: Jakub Kicinski + +commit a9a457f338e7711af391f618b60d8a4b15ba8050 +Author: Selvin Xavier +Date: Thu Mar 30 02:45:34 2023 -0700 + + RDMA/bnxt_re: Update HW interface headers + + Updating the HW structures to the latest version. + This is copied from the code maintained internally. No functionality + changes in this patch. Code is re-organized to match the file maintained + in the internal tree. Also, New HW interface structures are added, which + will be used by the drivers in future. + + CC: Michael Chan + Signed-off-by: Selvin Xavier + Link: https://lore.kernel.org/r/1680169540-10029-2-git-send-email-selvin.xavier@broadcom.com + Signed-off-by: Leon Romanovsky + +commit 581bce7bcb7e7f100908728e7b292e266c76895b +Author: Michael Chan +Date: Tue Mar 28 18:30:21 2023 -0700 + + bnxt_en: Add missing 200G link speed reporting + + bnxt_fw_to_ethtool_speed() is missing the case statement for 200G + link speed reported by firmware. As a result, ethtool will report + unknown speed when the firmware reports 200G link speed. + + Fixes: 532262ba3b84 ("bnxt_en: ethtool: support PAM4 link speeds up to 200G") + Signed-off-by: Michael Chan + Reviewed-by: Simon Horman + Signed-off-by: Jakub Kicinski + +commit 62aad36ed31abc80f35db11e187e690448a79f7d +Author: Kalesh AP +Date: Tue Mar 28 18:30:20 2023 -0700 + + bnxt_en: Fix typo in PCI id to device description string mapping + + Fix 57502 and 57508 NPAR description string entries. The typos + caused these devices to not match up with lspci output. + + Fixes: 49c98421e6ab ("bnxt_en: Add PCI IDs for 57500 series NPAR devices.") + Reviewed-by: Pavan Chebbi + Signed-off-by: Kalesh AP + Signed-off-by: Michael Chan + Reviewed-by: Simon Horman + Signed-off-by: Jakub Kicinski + +commit 83714dc3db0e4a088673601bc8099b079bc1a077 +Author: Kalesh AP +Date: Tue Mar 28 18:30:19 2023 -0700 + + bnxt_en: Fix reporting of test result in ethtool selftest + + When the selftest command fails, driver is not reporting the failure + by updating the "test->flags" when bnxt_close_nic() fails. + + Fixes: eb51365846bc ("bnxt_en: Add basic ethtool -t selftest support.") + Reviewed-by: Pavan Chebbi + Reviewed-by: Somnath Kotur + Signed-off-by: Kalesh AP + Signed-off-by: Michael Chan + Reviewed-by: Simon Horman + Signed-off-by: Jakub Kicinski + +commit a02c33130709736f5a770a55e5bfeda871a8c1cc +Author: Pavan Chebbi +Date: Tue Mar 21 07:44:49 2023 -0700 + + bnxt: Enforce PTP software freq adjustments only when in non-RTC mode + + Currently driver performs software based frequency adjustments + when RTC capability is not discovered or when in shared PHC mode. + But there may be some old firmware versions that still support + hardware freq adjustments without RTC capability being exposed. + In this situation driver will use non-realtime mode even on single + host NICs. + + Hence enforce software frequency adjustments only when running in + shared PHC mode. Make suitable changes for cyclecounter for the + same. + + Signed-off-by: Pavan Chebbi + Reviewed-by: Michael Chan + Acked-by: Vadim Fedorenko + Signed-off-by: Jakub Kicinski + +commit edc5287315489ccc6d049efe46227cedc9ede673 +Author: Pavan Chebbi +Date: Tue Mar 21 07:44:48 2023 -0700 + + bnxt: Defer PTP initialization to after querying function caps + + Driver uses the flag BNXT_FLAG_MULTI_HOST to determine whether + to use non-realtime mode PHC when running on a multi host NIC. + However when ptp initializes on a NIC with shared PHC, we still + don't have this flag set yet because HWRM_FUNC_QCFG is issued + much later. + + Move the ptp initialization code after we have issued func_qcfg. + The next patch will use the BNXT_FLAG_MULTI_HOST flag during PTP + initialization. + + Signed-off-by: Pavan Chebbi + Reviewed-by: Michael Chan + Acked-by: Vadim Fedorenko + Signed-off-by: Jakub Kicinski + +commit a3a4e300439bde3517edd61d982d747000a9f212 +Author: Pavan Chebbi +Date: Tue Mar 21 07:44:47 2023 -0700 + + bnxt: Change fw_cap to u64 to accommodate more capability bits + + The current fw_cap field (u32) has run out of bits to save any + new capability. + + Change the field to u64. + + Signed-off-by: Pavan Chebbi + Reviewed-by: Edwin Peer + Reviewed-by: Michael Chan + Acked-by: Vadim Fedorenko + Signed-off-by: Jakub Kicinski + +commit 7c6dddc239abe660598c49ec95ea0ed6399a4b2a +Author: Maxim Korotkov +Date: Thu Mar 9 20:43:47 2023 +0300 + + bnxt: avoid overflow in bnxt_get_nvram_directory() + + The value of an arithmetic expression is subject + of possible overflow due to a failure to cast operands to a larger data + type before performing arithmetic. Used macro for multiplication instead + operator for avoiding overflow. + + Found by Security Code and Linux Verification + Center (linuxtesting.org) with SVACE. + + Signed-off-by: Maxim Korotkov + Reviewed-by: Pavan Chebbi + Link: https://lore.kernel.org/r/20230309174347.3515-1-korotkov.maxim.s@gmail.com + Signed-off-by: Jakub Kicinski + +commit 131db499162274858bdbd7b5323a639da4aab86c +Author: Vadim Fedorenko +Date: Fri Mar 10 07:13:56 2023 -0800 + + bnxt_en: reset PHC frequency in free-running mode + + When using a PHC in shared between multiple hosts, the previous + frequency value may not be reset and could lead to host being unable to + compensate the offset with timecounter adjustments. To avoid such state + reset the hardware frequency of PHC to zero on init. Some refactoring is + needed to make code readable. + + Fixes: 85036aee1938 ("bnxt_en: Add a non-real time mode to access NIC clock") + Signed-off-by: Vadim Fedorenko + Reviewed-by: Pavan Chebbi + Link: https://lore.kernel.org/r/20230310151356.678059-1-vadfed@meta.com + Signed-off-by: Jakub Kicinski + +commit 5f29b73d4eba2926ab99d7bf5f2028810af3c66c +Author: Bjorn Helgaas +Date: Tue Mar 7 12:19:16 2023 -0600 + + bnxt: Drop redundant pci_enable_pcie_error_reporting() + + pci_enable_pcie_error_reporting() enables the device to send ERR_* + Messages. Since f26e58bf6f54 ("PCI/AER: Enable error reporting when AER is + native"), the PCI core does this for all devices during enumeration. + + Remove the redundant pci_enable_pcie_error_reporting() call from the + driver. Also remove the corresponding pci_disable_pcie_error_reporting() + from the driver .remove() path. + + Note that this only controls ERR_* Messages from the device. An ERR_* + Message may cause the Root Port to generate an interrupt, depending on the + AER Root Error Command register managed by the AER service driver. + + Signed-off-by: Bjorn Helgaas + Reviewed-by: Michael Chan + Acked-by: Jesse Brandeburg + Signed-off-by: Jakub Kicinski + +commit 89b59a84cb166f1ab5b6de9830e61324937c661e +Author: Selvin Xavier +Date: Fri Mar 3 18:43:58 2023 -0800 + + bnxt_en: Fix the double free during device removal + + Following warning reported by KASAN during driver unload + + ================================================================== + BUG: KASAN: double-free in bnxt_remove_one+0x103/0x200 [bnxt_en] + Free of addr ffff88814e8dd4c0 by task rmmod/17469 + CPU: 47 PID: 17469 Comm: rmmod Kdump: loaded Tainted: G S 6.2.0-rc7+ #2 + Hardware name: Dell Inc. PowerEdge R740/01YM03, BIOS 2.3.10 08/15/2019 + Call Trace: + + dump_stack_lvl+0x33/0x46 + print_report+0x17b/0x4b3 + ? __call_rcu_common.constprop.79+0x27e/0x8c0 + ? __pfx_free_object_rcu+0x10/0x10 + ? __virt_addr_valid+0xe3/0x160 + ? bnxt_remove_one+0x103/0x200 [bnxt_en] + kasan_report_invalid_free+0x64/0xd0 + ? bnxt_remove_one+0x103/0x200 [bnxt_en] + ? bnxt_remove_one+0x103/0x200 [bnxt_en] + __kasan_slab_free+0x179/0x1c0 + ? bnxt_remove_one+0x103/0x200 [bnxt_en] + __kmem_cache_free+0x194/0x350 + bnxt_remove_one+0x103/0x200 [bnxt_en] + pci_device_remove+0x62/0x110 + device_release_driver_internal+0xf6/0x1c0 + driver_detach+0x76/0xe0 + bus_remove_driver+0x89/0x160 + pci_unregister_driver+0x26/0x110 + ? strncpy_from_user+0x188/0x1c0 + bnxt_exit+0xc/0x24 [bnxt_en] + __x64_sys_delete_module+0x21f/0x390 + ? __pfx___x64_sys_delete_module+0x10/0x10 + ? __pfx_mem_cgroup_handle_over_high+0x10/0x10 + ? _raw_spin_lock+0x87/0xe0 + ? __pfx__raw_spin_lock+0x10/0x10 + ? __audit_syscall_entry+0x185/0x210 + ? ktime_get_coarse_real_ts64+0x51/0x80 + ? syscall_trace_enter.isra.18+0x126/0x1a0 + do_syscall_64+0x37/0x90 + entry_SYSCALL_64_after_hwframe+0x72/0xdc + RIP: 0033:0x7effcb6fd71b + Code: 73 01 c3 48 8b 0d 6d 17 2c 00 f7 d8 64 89 01 48 83 c8 ff c3 66 2e 0f 1f 84 00 00 00 00 00 90 f3 0f 1e fa b8 b0 00 00 00 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 8b 0d 3d 17 2c 00 f7 d8 64 89 01 48 + RSP: 002b:00007ffeada270b8 EFLAGS: 00000206 ORIG_RAX: 00000000000000b0 + RAX: ffffffffffffffda RBX: 00005623660e0750 RCX: 00007effcb6fd71b + RDX: 000000000000000a RSI: 0000000000000800 RDI: 00005623660e07b8 + RBP: 0000000000000000 R08: 00007ffeada26031 R09: 0000000000000000 + R10: 00007effcb771280 R11: 0000000000000206 R12: 00007ffeada272e0 + R13: 00007ffeada28bc4 R14: 00005623660e02a0 R15: 00005623660e0750 + + + Auxiliary device structures are freed in bnxt_aux_dev_release. So avoid + calling kfree from bnxt_remove_one. + + Also, set bp->edev to NULL before freeing the auxilary private structure. + + Fixes: d80d88b0dfff ("bnxt_en: Add auxiliary driver support") + Reviewed-by: Ajit Khaparde + Reviewed-by: Andy Gospodarek + Signed-off-by: Selvin Xavier + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit accd7e23693aaaa9aa0d3e9eca0ae77d1be80ab3 +Author: Michael Chan +Date: Fri Mar 3 18:43:57 2023 -0800 + + bnxt_en: Avoid order-5 memory allocation for TPA data + + The driver needs to keep track of all the possible concurrent TPA (GRO/LRO) + completions on the aggregation ring. On P5 chips, the maximum number + of concurrent TPA is 256 and the amount of memory we allocate is order-5 + on systems using 4K pages. Memory allocation failure has been reported: + + NetworkManager: page allocation failure: order:5, mode:0x40dc0(GFP_KERNEL|__GFP_COMP|__GFP_ZERO), nodemask=(null),cpuset=/,mems_allowed=0-1 + CPU: 15 PID: 2995 Comm: NetworkManager Kdump: loaded Not tainted 5.10.156 #1 + Hardware name: Dell Inc. PowerEdge R660/0M1CC5, BIOS 0.2.25 08/12/2022 + Call Trace: + dump_stack+0x57/0x6e + warn_alloc.cold.120+0x7b/0xdd + ? _cond_resched+0x15/0x30 + ? __alloc_pages_direct_compact+0x15f/0x170 + __alloc_pages_slowpath.constprop.108+0xc58/0xc70 + __alloc_pages_nodemask+0x2d0/0x300 + kmalloc_order+0x24/0xe0 + kmalloc_order_trace+0x19/0x80 + bnxt_alloc_mem+0x1150/0x15c0 [bnxt_en] + ? bnxt_get_func_stat_ctxs+0x13/0x60 [bnxt_en] + __bnxt_open_nic+0x12e/0x780 [bnxt_en] + bnxt_open+0x10b/0x240 [bnxt_en] + __dev_open+0xe9/0x180 + __dev_change_flags+0x1af/0x220 + dev_change_flags+0x21/0x60 + do_setlink+0x35c/0x1100 + + Instead of allocating this big chunk of memory and dividing it up for the + concurrent TPA instances, allocate each small chunk separately for each + TPA instance. This will reduce it to order-0 allocations. + + Fixes: 79632e9ba386 ("bnxt_en: Expand bnxt_tpa_info struct to support 57500 chips.") + Reviewed-by: Somnath Kotur + Reviewed-by: Damodharam Ammepalli + Reviewed-by: Pavan Chebbi + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 2038cc592811209de20c4e094ca08bfb1e6fbc6c +Author: Michael Chan +Date: Fri Feb 10 12:31:55 2023 -0500 + + bnxt_en: Fix mqprio and XDP ring checking logic + + In bnxt_reserve_rings(), there is logic to check that the number of TX + rings reserved is enough to cover all the mqprio TCs, but it fails to + account for the TX XDP rings. So the check will always fail if there + are mqprio TCs and TX XDP rings. As a result, the driver always fails + to initialize after the XDP program is attached and the device will be + brought down. A subsequent ifconfig up will also fail because the + number of TX rings is set to an inconsistent number. Fix the check to + properly account for TX XDP rings. If the check fails, set the number + of TX rings back to a consistent number after calling netdev_reset_tc(). + + Fixes: 674f50a5b026 ("bnxt_en: Implement new method to reserve rings.") + Reviewed-by: Hongguang Gao + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 66c0e13ad236c74ea88c7c1518f3cef7f372e3da +Author: Marek Majtyka +Date: Wed Feb 1 11:24:18 2023 +0100 + + drivers: net (bnxt_en): turn on XDP features + + A summary of the flags being set for various drivers is given below. + Note that XDP_F_REDIRECT_TARGET and XDP_F_FRAG_TARGET are features + that can be turned off and on at runtime. This means that these flags + may be set and unset under RTNL lock protection by the driver. Hence, + READ_ONCE must be used by code loading the flag value. + + Also, these flags are not used for synchronization against the availability + of XDP resources on a device. It is merely a hint, and hence the read + may race with the actual teardown of XDP resources on the device. This + may change in the future, e.g. operations taking a reference on the XDP + resources of the driver, and in turn inhibiting turning off this flag. + However, for now, it can only be used as a hint to check whether device + supports becoming a redirection target. + + Turn 'hw-offload' feature flag on for: + - netronome (nfp) + - netdevsim. + + Turn 'native' and 'zerocopy' features flags on for: + - intel (i40e, ice, ixgbe, igc) + - mellanox (mlx5). + - stmmac + - netronome (nfp) + + Turn 'native' features flags on for: + - amazon (ena) + - broadcom (bnxt) + ... + + Reviewed-by: Gerhard Engleder + Reviewed-by: Simon Horman + Acked-by: Stanislav Fomichev + Acked-by: Jakub Kicinski + Co-developed-by: Kumar Kartikeya Dwivedi + Signed-off-by: Kumar Kartikeya Dwivedi + Co-developed-by: Lorenzo Bianconi + Signed-off-by: Lorenzo Bianconi + Signed-off-by: Marek Majtyka + Link: https://lore.kernel.org/r/3eca9fafb308462f7edb1f58e451d59209aa07eb.1675245258.git.lorenzo@kernel.org + Signed-off-by: Alexei Starovoitov + +commit 30343221132430c24b468493c861f71e2bad131f +Author: Ajit Khaparde +Date: Mon Dec 12 10:36:29 2022 -0800 + + bnxt_en: Remove runtime interrupt vector allocation + + Modified the bnxt_en code to create and pre-configure RDMA devices + with the right MSI-X vector count for the ROCE driver to use. + This is to align the ROCE driver to the auxiliary device model which + will simply bind the driver without getting into PCI-related handling. + All PCI-related logic will now be in the bnxt_en driver. + + Suggested-by: Leon Romanovsky + Signed-off-by: Ajit Khaparde + Reviewed-by: Leon Romanovsky + +commit a43c26fa2e6ca724360927856c326ebd3247b843 +Author: Ajit Khaparde +Date: Tue Dec 6 13:29:46 2022 -0800 + + RDMA/bnxt_re: Remove the sriov config callback + + Remove the SRIOV config callback which the bnxt_en was calling + to reconfigure the chip resources for a PF device when VFs are + created. The code is now modified to provision the VF resources + based on the total VF count instead of the actual VF count. + This allows the SRIOV config callback to be removed from the + list of ulp_ops. + + Suggested-by: Leon Romanovsky + Signed-off-by: Ajit Khaparde + Reviewed-by: Leon Romanovsky + +commit 848dc857c8dee61972abdb05ce81f12f0d0e05e4 +Author: Hongguang Gao +Date: Fri Oct 14 16:31:31 2022 -0700 + + bnxt_en: Remove struct bnxt access from RoCE driver + + Decouple RoCE driver from directly accessing L2's private bnxt + structure. Move the fields needed by RoCE driver into bnxt_en_dev. + They'll be passed to RoCE driver by bnxt_rdma_aux_device_add() + function. + + Signed-off-by: Hongguang Gao + Signed-off-by: Ajit Khaparde + Reviewed-by: Andy Gospodarek + Reviewed-by: Selvin Xavier + Reviewed-by: Leon Romanovsky + +commit 3b65e9456c29217429158203bfdce4361f45e0be +Author: Ajit Khaparde +Date: Wed Sep 7 13:22:42 2022 -0700 + + bnxt_en: Use auxiliary bus calls over proprietary calls + + Wherever possible use the function ops provided by auxiliary bus + instead of using proprietary ops. + + Defined bnxt_re_suspend and bnxt_re_resume calls which can be + invoked by the bnxt_en driver instead of the ULP stop/start calls. + + Signed-off-by: Ajit Khaparde + Reviewed-by: Andy Gospodarek + Reviewed-by: Selvin Xavier + Reviewed-by: Leon Romanovsky + +commit 63669ab384eadebefd1e2a60a15a5431ee874fab +Author: Ajit Khaparde +Date: Mon Sep 5 17:16:51 2022 -0700 + + bnxt_en: Use direct API instead of indirection + + For a single ULP user there is no need for complicating function + indirection calls. Remove all this complexity in favour of direct + function calls exported by the bnxt_en driver. This allows to + simplify the code greatly. Also remove unused ulp_async_notifier. + + Suggested-by: Leon Romanovsky + Signed-off-by: Ajit Khaparde + Reviewed-by: Andy Gospodarek + Reviewed-by: Selvin Xavier + Reviewed-by: Leon Romanovsky + +commit dafcdf5e2bd0bba594a51fe335694d4b44d8b8da +Author: Ajit Khaparde +Date: Thu Sep 1 16:43:46 2022 -0700 + + bnxt_en: Remove usage of ulp_id + + Since the driver continues to use the single ULP model, + the extra complexity and indirection is unnecessary. + Remove the usage of ulp_id from the code. + + Suggested-by: Leon Romanovsky + Signed-off-by: Ajit Khaparde + Reviewed-by: Andy Gospodarek + Reviewed-by: Selvin Xavier + Reviewed-by: Leon Romanovsky + +commit 6d758147c7b80a46465f72e9e6294d244ee98a21 +Author: Ajit Khaparde +Date: Fri Oct 14 14:18:04 2022 -0700 + + RDMA/bnxt_re: Use auxiliary driver interface + + Use auxiliary driver interface for driver load, unload ROCE driver. + The driver does not need to register the interface using the netdev + notifier anymore. Removed the bnxt_re_dev_list which is not needed. + Currently probe, remove and shutdown ops have been implemented for + the auxiliary device. + Also remove exccessve validation checks for rdev. + + Signed-off-by: Ajit Khaparde + Reviewed-by: Andy Gospodarek + Reviewed-by: Selvin Xavier + Reviewed-by: Leon Romanovsky + +commit d80d88b0dfff5829ab31030692672ba6fe9cde48 +Author: Ajit Khaparde +Date: Sun Mar 6 20:01:30 2022 -0800 + + bnxt_en: Add auxiliary driver support + + Add auxiliary driver support. + An auxiliary device will be created if the hardware indicates + support for RDMA. + The bnxt_ulp_probe() function has been removed and a new + bnxt_rdma_aux_device_add() function has been added. + The bnxt_free_msix_vecs() and bnxt_req_msix_vecs() will now hold + the RTNL lock when they call the bnxt_close_nic()and bnxt_open_nic() + since the device close and open need to be protected under RTNL lock. + The operations between the bnxt_en and bnxt_re will be protected + using the en_ops_lock. + This will be used by the bnxt_re driver in a follow-on patch + to create ROCE interfaces. + + Signed-off-by: Ajit Khaparde + Reviewed-by: Andy Gospodarek + Reviewed-by: Selvin Xavier + Reviewed-by: Leon Romanovsky + +commit fb8421a94c5613fee86e192bab0892ecb1d56e4c +Author: Jiri Pirko +Date: Fri Jan 27 16:50:42 2023 +0100 + + devlink: remove devlink features + + Devlink features were introduced to disallow devlink reload calls of + userspace before the devlink was fully initialized. The reason for this + workaround was the fact that devlink reload was originally called + without devlink instance lock held. + + However, with recent changes that converted devlink reload to be + performed under devlink instance lock, this is redundant so remove + devlink features entirely. + + Note that mlx5 used this to enable devlink reload conditionally only + when device didn't act as multi port slave. Move the multi port check + into mlx5_devlink_reload_down() callback alongside with the other + checks preventing the device from reload in certain states. + + Signed-off-by: Jiri Pirko + Reviewed-by: Jacob Keller + Signed-off-by: David S. Miller + +commit d3e599c090fc6977331150c5f0a69ab8ce87da21 +Author: Kees Cook +Date: Wed Jan 18 12:35:01 2023 -0800 + + bnxt: Do not read past the end of test names + + Test names were being concatenated based on a offset beyond the end of + the first name, which tripped the buffer overflow detection logic: + + detected buffer overflow in strnlen + [...] + Call Trace: + bnxt_ethtool_init.cold+0x18/0x18 + + Refactor struct hwrm_selftest_qlist_output to use an actual array, + and adjust the concatenation to use snprintf() rather than a series of + strncat() calls. + + Reported-by: Niklas Cassel + Link: https://lore.kernel.org/lkml/Y8F%2F1w1AZTvLglFX@x1-carbon/ + Tested-by: Niklas Cassel + Fixes: eb51365846bc ("bnxt_en: Add basic ethtool -t selftest support.") + Cc: Michael Chan + Cc: "David S. Miller" + Cc: Eric Dumazet + Cc: Jakub Kicinski + Cc: Paolo Abeni + Cc: netdev@vger.kernel.org + Signed-off-by: Kees Cook + Reviewed-by: Michael Chan + Reviewed-by: Niklas Cassel + Signed-off-by: David S. Miller + +commit 97f5e03a4a27d27ee4fed0cdb1658c81cf2784db +Author: Jakub Kicinski +Date: Tue Jan 10 20:25:47 2023 -0800 + + bnxt: make sure we return pages to the pool + + Before the commit under Fixes the page would have been released + from the pool before the napi_alloc_skb() call, so normal page + freeing was fine (released page == no longer in the pool). + + After the change we just mark the page for recycling so it's still + in the pool if the skb alloc fails, we need to recycle. + + Same commit added the same bug in the new bnxt_rx_multi_page_skb(). + + Fixes: 1dc4c557bfed ("bnxt: adding bnxt_xdp_build_skb to build skb from multibuffer xdp_buff") + Reviewed-by: Andy Gospodarek + Link: https://lore.kernel.org/r/20230111042547.987749-1-kuba@kernel.org + Signed-off-by: Jakub Kicinski + +commit a056ebcc30e2f78451d66f615d2f6bdada3e6438 +Author: Michael Chan +Date: Mon Dec 26 22:19:40 2022 -0500 + + bnxt_en: Fix HDS and jumbo thresholds for RX packets + + The recent XDP multi-buffer feature has introduced regressions in the + setting of HDS and jumbo thresholds. HDS was accidentally disabled in + the nornmal mode without XDP. This patch restores jumbo HDS placement + when not in XDP mode. In XDP multi-buffer mode, HDS should be disabled + and the jumbo threshold should be set to the usable page size in the + first page buffer. + + Fixes: 32861236190b ("bnxt: change receive ring space parameters") + Reviewed-by: Mohammad Shuab Siddique + Reviewed-by: Ajit Khaparde + Reviewed-by: Andy Gospodarek + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 1abeacc1979fa4a756695f5030791d8f0fa934b9 +Author: Michael Chan +Date: Mon Dec 26 22:19:39 2022 -0500 + + bnxt_en: Fix first buffer size calculations for XDP multi-buffer + + The size of the first buffer is always page size, and the useable + space is the page size minus the offset and the skb_shared_info size. + Make sure SKB and XDP buf sizes match so that the skb_shared_info + is at the same offset seen from the SKB and XDP_BUF. + + build_skb() should be passed PAGE_SIZE. xdp_init_buff() should + be passed PAGE_SIZE as well. xdp_get_shared_info_from_buff() will + automatically deduct the skb_shared_info size if the XDP buffer + has frags. There is no need to keep bp->xdp_has_frags. + + Change BNXT_PAGE_MODE_BUF_SIZE to BNXT_MAX_PAGE_MODE_MTU_SBUF + since this constant is really the MTU with ethernet header size + subtracted. + + Also fix the BNXT_MAX_PAGE_MODE_MTU macro with proper parentheses. + + Fixes: 32861236190b ("bnxt: change receive ring space parameters") + Reviewed-by: Somnath Kotur + Reviewed-by: Andy Gospodarek + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 9b3e607871ea5ee90f10f5be3965fc07f2aa3ef7 +Author: Michael Chan +Date: Mon Dec 26 22:19:38 2022 -0500 + + bnxt_en: Fix XDP RX path + + The XDP program can change the starting address of the RX data buffer and + this information needs to be passed back from bnxt_rx_xdp() to + bnxt_rx_pkt() for the XDP_PASS case so that the SKB can point correctly + to the modified buffer address. Add back the data_ptr parameter to + bnxt_rx_xdp() to make this work. + + Fixes: b231c3f3414c ("bnxt: refactor bnxt_rx_xdp to separate xdp_init_buff/xdp_prepare_buff") + Reviewed-by: Andy Gospodarek + Reviewed-by: Pavan Chebbi + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit bbfc17e50ba2ed18dfef46b1c433d50a58566bf1 +Author: Michael Chan +Date: Mon Dec 26 22:19:37 2022 -0500 + + bnxt_en: Simplify bnxt_xdp_buff_init() + + bnxt_xdp_buff_init() does not modify the data_ptr or the len parameters, + so no need to pass in the addresses of these parameters. + + Fixes: b231c3f3414c ("bnxt: refactor bnxt_rx_xdp to separate xdp_init_buff/xdp_prepare_buff") + Reviewed-by: Andy Gospodarek + Reviewed-by: Somnath Kotur + Reviewed-by: Pavan Chebbi + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 0020ae2a4aa81becd182231bf48acd66c86c86dd +Author: Vikas Gupta +Date: Mon Dec 26 22:19:36 2022 -0500 + + bnxt_en: fix devlink port registration to netdev + + We don't register a devlink port in case of a VF so + avoid setting the devlink pointer to netdev. + Also, SET_NETDEV_DEVLINK_PORT has to be moved + so that we determine whether the device is PF/VF first. + + This fixes the NULL pointer dereference of devlink_port->devlink + when creating VFs: + + BUG: kernel NULL pointer dereference, address: 0000000000000160 + PGD 0 + Oops: 0000 [#1] PREEMPT SMP NOPTI + CPU: 14 PID: 388 Comm: kworker/14:1 Kdump: loaded Not tainted 6.1.0-rc8 #5 + Hardware name: Dell Inc. PowerEdge R750/06V45N, BIOS 1.3.8 08/31/2021 + Workqueue: events work_for_cpu_fn + RIP: 0010:devlink_nl_port_handle_size+0xb/0x50 + Code: 83 c4 10 5b 5d c3 cc cc cc cc b8 a6 ff ff ff eb de e8 c9 59 21 00 66 0f 1f 84 00 00 00 00 00 0f 1f 44 00 00 55 53 48 8b 47 20 <48> 8b a8 60 01 00 00 48 8b 45 60 48 8b 38 e8 92 90 1a 00 48 8b 7d + RSP: 0018:ff4fe5394846fcd8 EFLAGS: 00010286 + RAX: 0000000000000000 RBX: 0000000000000794 RCX: 0000000000000000 + RDX: ff1f129683a30a40 RSI: 0000000000000008 RDI: ff1f1296bb496188 + RBP: 0000000000000334 R08: 0000000000000cc0 R09: 0000000000000000 + R10: ff1f1296bb494298 R11: ffffffffffffffc0 R12: 0000000000000000 + R13: 0000000000000000 R14: ff1f1296bb494000 R15: 0000000000000000 + FS: 0000000000000000(0000) GS:ff1f129e5fa00000(0000) knlGS:0000000000000000 + CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 + CR2: 0000000000000160 CR3: 000000131f610006 CR4: 0000000000771ee0 + DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 + DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 + PKRU: 55555554 + Call Trace: + + if_nlmsg_size+0x14a/0x220 + rtmsg_ifinfo_build_skb+0x3c/0x100 + rtmsg_ifinfo+0x9c/0xc0 + register_netdevice+0x59d/0x670 + register_netdev+0x1c/0x40 + bnxt_init_one+0x674/0xa60 [bnxt_en] + local_pci_probe+0x42/0x80 + work_for_cpu_fn+0x13/0x20 + process_one_work+0x1e2/0x3b0 + ? rescuer_thread+0x390/0x390 + worker_thread+0x1c4/0x3a0 + ? rescuer_thread+0x390/0x390 + kthread+0xd6/0x100 + ? kthread_complete_and_exit+0x20/0x20 + + Fixes: ac73d4bf2cda ("net: make drivers to use SET_NETDEV_DEVLINK_PORT to set devlink_port") + Cc: Jiri Pirko + Signed-off-by: Vikas Gupta + Reviewed-by: Andy Gospodarek + Reviewed-by: Kalesh Anakkur Purayil + Reviewed-by: Damodharam Ammepalli + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit b6488b161ab2972a0b4f45490ea3aecef2b23256 +Author: Coco Li +Date: Sat Dec 10 04:16:46 2022 +0000 + + bnxt: Use generic HBH removal helper in tx path + + Eric Dumazet implemented Big TCP that allowed bigger TSO/GRO packet sizes + for IPv6 traffic. See patch series: + 'commit 89527be8d8d6 ("net: add IFLA_TSO_{MAX_SIZE|SEGS} attributes")' + + This reduces the number of packets traversing the networking stack and + should usually improves performance. However, it also inserts a + temporary Hop-by-hop IPv6 extension header. + + Using the HBH header removal method in the previous patch, the extra header + be removed in bnxt drivers to allow it to send big TCP packets (bigger + TSO packets) as well. + + Tested: + Compiled locally + + To further test functional correctness, update the GSO/GRO limit on the + physical NIC: + + ip link set eth0 gso_max_size 181000 + ip link set eth0 gro_max_size 181000 + + Note that if there are bonding or ipvan devices on top of the physical + NIC, their GSO sizes need to be updated as well. + + Then, IPv6/TCP packets with sizes larger than 64k can be observed. + + Signed-off-by: Coco Li + Reviewed-by: Michael Chan + Tested-by: Michael Chan + Link: https://lore.kernel.org/r/20221210041646.3587757-2-lixiaoyan@google.com + Signed-off-by: Jakub Kicinski + +commit a802073d1c9ca2ffd757ab8df5afa9d73ba7e6b1 +Author: Jakub Kicinski +Date: Tue Nov 29 17:31:08 2022 -0800 + + bnxt: report FEC block stats via standard interface + + I must have missed that these stats are only exposed + via the unstructured ethtool -S when they got merged. + Plumb in the structured form. + + Reviewed-by: Michael Chan + Link: https://lore.kernel.org/r/20221130013108.90062-1-kuba@kernel.org + Signed-off-by: Jakub Kicinski + +commit 226bf980550627c88549b112ac6c8fb40873afb4 +Author: Vincent Mailhol +Date: Tue Nov 29 18:51:38 2022 +0900 + + net: devlink: let the core report the driver name instead of the drivers + + The driver name is available in device_driver::name. Right now, + drivers still have to report this piece of information themselves in + their devlink_ops::info_get callback function. + + In order to factorize code, make devlink_nl_info_fill() add the driver + name attribute. + + Now that the core sets the driver name attribute, drivers are not + supposed to call devlink_info_driver_name_put() anymore. Remove + devlink_info_driver_name_put() and clean-up all the drivers using this + function in their callback. + + Signed-off-by: Vincent Mailhol + Tested-by: Ido Schimmel # mlxsw + Reviewed-by: Jacob Keller + Reviewed-by: Jiri Pirko + Signed-off-by: Jakub Kicinski + +commit 991aef4ee4f6eb999924f429b943441a32835c8f +Author: Gaosheng Cui +Date: Fri Nov 11 15:04:33 2022 +0800 + + bnxt_en: Remove debugfs when pci_register_driver failed + + When pci_register_driver failed, we need to remove debugfs, + which will caused a resource leak, fix it. + + Resource leak logs as follows: + [ 52.184456] debugfs: Directory 'bnxt_en' with parent '/' already present! + + Fixes: cabfb09d87bd ("bnxt_en: add debugfs support for DIM") + Signed-off-by: Gaosheng Cui + Reviewed-by: Leon Romanovsky + Reviewed-by: Michael Chan + Signed-off-by: David S. Miller + +commit a29c132f92ed5af6e7116966b7e9899d4c22783c +Author: Jacob Keller +Date: Wed Nov 9 15:09:43 2022 -0800 + + ptp: bnxt: convert .adjfreq to .adjfine + + When the BNXT_FW_CAP_PTP_RTC flag is not set, the bnxt driver implements + .adjfreq on a cyclecounter in terms of the straightforward "base * ppb / 1 + billion" calculation. When BNXT_FW_CAP_PTP_RTC is set, the driver forwards + the ppb value to firmware for configuration. + + Convert the driver to the newer .adjfine interface, updating the + cyclecounter calculation to use adjust_by_scaled_ppm to perform the + calculation. Use scaled_ppm_to_ppb when forwarding the correction to + firmware. + + Signed-off-by: Jacob Keller + Cc: Michael Chan + Cc: Richard Cochran + Reviewed-by: Pavan Chebbi + Signed-off-by: David S. Miller + +commit 85036aee1938d65da4be6ae1bc7e5e7e30b567b9 +Author: Pavan Chebbi +Date: Sun Nov 6 19:16:32 2022 -0500 + + bnxt_en: Add a non-real time mode to access NIC clock + + When using a PHC that is shared between multiple hosts, + in order to achieve consistent timestamps across all hosts, + we need to isolate the PHC from any host making frequency + adjustments. + + This patch adds a non-real time mode for this purpose. + The implementation is based on a free running NIC hardware timer + which is used as the timestamper time-base. Each host implements + individual adjustments to a local timecounter based on the NIC free + running timer. + + Cc: Richard Cochran + Signed-off-by: Pavan Chebbi + Reviewed-by: Andy Gospodarek + Signed-off-by: Michael Chan + Reviewed-by: Leon Romanovsky + Signed-off-by: Paolo Abeni +commit 98a4322b70e817f0663adb61b8272f7b995ed41a +Author: Edwin Peer +Date: Sun Nov 6 19:16:31 2022 -0500 + + bnxt_en: update RSS config using difference algorithm + + Hardware is unable to realize all legal firmware interface state values + for hash_type. For example, if 4-tuple TCP_IPV4 hash is enabled, + 4-tuple UDP_IPV4 hash must also be enabled. By providing the bits the + user intended to change instead of the possible illegal intermediate + states, the firmware is able to make better compromises when deciding + which bits to ignore. + + With this new mechansim, we can now report the actual configured hash + back to the user. Add bnxt_hwrm_update_rss_hash_cfg() to report the + actual hash after user configuration. + + Signed-off-by: Edwin Peer + Signed-off-by: Michael Chan + Reviewed-by: Leon Romanovsky + Signed-off-by: Paolo Abeni + +commit 41d2dd42bfa17035c3b0429b6b0e46305607fcc7 +Author: Edwin Peer +Date: Sun Nov 6 19:16:30 2022 -0500 + + bnxt_en: refactor VNIC RSS update functions + + Extract common code into a new function. This will avoid duplication + in the next patch, which changes the update algorithm for both the P5 + and legacy code paths. + + No functional changes. + + Signed-off-by: Edwin Peer + Signed-off-by: Michael Chan + Reviewed-by: Leon Romanovsky + Signed-off-by: Paolo Abeni + +commit 9a0f830f80265bd1ef816e1541ac24bee80e9a3c +Author: Jakub Kicinski +Date: Fri Nov 4 12:01:25 2022 -0700 + + ethtool: linkstate: add a statistic for PHY down events + + The previous attempt to augment carrier_down (see Link) + was not met with much enthusiasm so let's do the simple + thing of exposing what some devices already maintain. + Add a common ethtool statistic for link going down. + Currently users have to maintain per-driver mapping + to extract the right stat from the vendor-specific ethtool -S + stats. carrier_down does not fit the bill because it counts + a lot of software related false positives. + + Add the statistic to the extended link state API to steer + vendors towards implementing all of it. + + Implement for bnxt and all Linux-controlled PHYs. mlx5 and (possibly) + enic also have a counter for this but I leave the implementation + to their maintainers. + + Link: https://lore.kernel.org/r/20220520004500.2250674-1-kuba@kernel.org + Reviewed-by: Florian Fainelli + Reviewed-by: Michael Chan + Reviewed-by: Andrew Lunn + Signed-off-by: Jakub Kicinski + Link: https://lore.kernel.org/r/20221104190125.684910-1-kuba@kernel.org + Signed-off-by: Paolo Abeni + +commit 02597d39145bb0aa81d04bf39b6a913ce9a9d465 +Author: Alex Barba +Date: Thu Nov 3 19:33:27 2022 -0400 + + bnxt_en: fix potentially incorrect return value for ndo_rx_flow_steer + + In the bnxt_en driver ndo_rx_flow_steer returns '0' whenever an entry + that we are attempting to steer is already found. This is not the + correct behavior. The return code should be the value/index that + corresponds to the entry. Returning zero all the time causes the + RFS records to be incorrect unless entry '0' is the correct one. As + flows migrate to different cores this can create entries that are not + correct. + + Fixes: c0c050c58d84 ("bnxt_en: New Broadcom ethernet driver.") + Reported-by: Akshay Navgire + Signed-off-by: Alex Barba + Signed-off-by: Andy Gospodarek + Signed-off-by: Michael Chan + Signed-off-by: Jakub Kicinski + +commit 6d81ea3765dfa6c8a20822613c81edad1c4a16a0 +Author: Michael Chan +Date: Thu Nov 3 19:33:26 2022 -0400 + + bnxt_en: Fix possible crash in bnxt_hwrm_set_coal() + + During the error recovery sequence, the rtnl_lock is not held for the + entire duration and some datastructures may be freed during the sequence. + Check for the BNXT_STATE_OPEN flag instead of netif_running() to ensure + that the device is fully operational before proceeding to reconfigure + the coalescing settings. + + This will fix a possible crash like this: + + BUG: unable to handle kernel NULL pointer dereference at 0000000000000000 + PGD 0 P4D 0 + Oops: 0000 [#1] SMP NOPTI + CPU: 10 PID: 181276 Comm: ethtool Kdump: loaded Tainted: G IOE --------- - - 4.18.0-348.el8.x86_64 #1 + Hardware name: Dell Inc. PowerEdge R740/0F9N89, BIOS 2.3.10 08/15/2019 + RIP: 0010:bnxt_hwrm_set_coal+0x1fb/0x2a0 [bnxt_en] + Code: c2 66 83 4e 22 08 66 89 46 1c e8 10 cb 00 00 41 83 c6 01 44 39 b3 68 01 00 00 0f 8e a3 00 00 00 48 8b 93 c8 00 00 00 49 63 c6 <48> 8b 2c c2 48 8b 85 b8 02 00 00 48 85 c0 74 2e 48 8b 74 24 08 f6 + RSP: 0018:ffffb11c8dcaba50 EFLAGS: 00010246 + RAX: 0000000000000000 RBX: ffff8d168a8b0ac0 RCX: 00000000000000c5 + RDX: 0000000000000000 RSI: ffff8d162f72c000 RDI: ffff8d168a8b0b28 + RBP: 0000000000000000 R08: b6e1f68a12e9a7eb R09: 0000000000000000 + R10: 0000000000000001 R11: 0000000000000037 R12: ffff8d168a8b109c + R13: ffff8d168a8b10aa R14: 0000000000000000 R15: ffffffffc01ac4e0 + FS: 00007f3852e4c740(0000) GS:ffff8d24c0080000(0000) knlGS:0000000000000000 + CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 + CR2: 0000000000000000 CR3: 000000041b3ee003 CR4: 00000000007706e0 + DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 + DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 + PKRU: 55555554 + Call Trace: + ethnl_set_coalesce+0x3ce/0x4c0 + genl_family_rcv_msg_doit.isra.15+0x10f/0x150 + genl_family_rcv_msg+0xb3/0x160 + ? coalesce_fill_reply+0x480/0x480 + genl_rcv_msg+0x47/0x90 + ? genl_family_rcv_msg+0x160/0x160 + netlink_rcv_skb+0x4c/0x120 + genl_rcv+0x24/0x40 + netlink_unicast+0x196/0x230 + netlink_sendmsg+0x204/0x3d0 + sock_sendmsg+0x4c/0x50 + __sys_sendto+0xee/0x160 + ? syscall_trace_enter+0x1d3/0x2c0 + ? __audit_syscall_exit+0x249/0x2a0 + __x64_sys_sendto+0x24/0x30 + do_syscall_64+0x5b/0x1a0 + entry_SYSCALL_64_after_hwframe+0x65/0xca + RIP: 0033:0x7f38524163bb + + Fixes: 2151fe0830fd ("bnxt_en: Handle RESET_NOTIFY async event from firmware.") + Reviewed-by: Somnath Kotur + Signed-off-by: Michael Chan + Signed-off-by: Jakub Kicinski + +commit 0cf736a18a1e804037839bd8df9e36f0efdb8745 +Author: Vikas Gupta +Date: Thu Nov 3 19:33:25 2022 -0400 + + bnxt_en: fix the handling of PCIE-AER + + Fix the sequence required for PCIE-AER. While slot reset occurs, firmware + might not be ready and the driver needs to check for its recovery. We + also need to remap the health registers for some chips and clear the + resource reservations. The resources will be allocated again during + bnxt_io_resume(). + + Fixes: fb1e6e562b37 ("bnxt_en: Fix AER recovery.") + Signed-off-by: Vikas Gupta + Signed-off-by: Michael Chan + Signed-off-by: Jakub Kicinski + +commit b4c66425771ddb910316c7b4cd7fa0614098ec45 +Author: Vikas Gupta +Date: Thu Nov 3 19:33:24 2022 -0400 + + bnxt_en: refactor bnxt_cancel_reservations() + + Introduce bnxt_clear_reservations() to clear the reserved attributes only. + This will be used in the next patch to fix PCI AER handling. + + Signed-off-by: Vikas Gupta + Signed-off-by: Michael Chan + Signed-off-by: Jakub Kicinski + +commit 77df1db80da384c565106321f5934967690da7dd +Author: Jiri Pirko +Date: Wed Nov 2 17:02:10 2022 +0100 + + net: remove unused ndo_get_devlink_port + + Remove ndo_get_devlink_port which is no longer used alongside with the + implementations in drivers. + + Signed-off-by: Jiri Pirko + Signed-off-by: Jakub Kicinski + +commit ac73d4bf2cdaf2cb8a43df8ee4a5c066d2c5d7b4 +Author: Jiri Pirko +Date: Wed Nov 2 17:02:04 2022 +0100 + + net: make drivers to use SET_NETDEV_DEVLINK_PORT to set devlink_port + + Benefit from the previously implemented tracking of netdev events in + devlink code and instead of calling devlink_port_type_eth_set() and + devlink_port_type_clear() to set devlink port type and link to related + netdev, use SET_NETDEV_DEVLINK_PORT() macro to assign devlink_port + pointer to netdevice which is about to be registered. + + Signed-off-by: Jiri Pirko + Signed-off-by: Jakub Kicinski + +commit 45034224623a5634e4ccc57b497ac825c260170f +Author: Vikas Gupta +Date: Fri Oct 21 02:37:23 2022 -0400 + + bnxt_en: check and resize NVRAM UPDATE entry before flashing + + Resize of the UPDATE entry is required if the image to + be flashed is larger than the available space. Add this step, + otherwise flashing larger firmware images by ethtool or devlink + may fail. + + Reviewed-by: Andy Gospodarek + Signed-off-by: Vikas Gupta + Signed-off-by: Michael Chan + Signed-off-by: Jakub Kicinski + +commit 7ef3d3901b99d9715840c9860082bec124beed83 +Author: Vikas Gupta +Date: Fri Oct 21 02:37:22 2022 -0400 + + bnxt_en: add .get_module_eeprom_by_page() support + + Add support for .get_module_eeprom_by_page() callback which + implements generic solution for module`s eeprom access. + + v3: Add bnxt_get_module_status() to get a more specific extack error + string. + Return -EINVAL from bnxt_get_module_eeprom_by_page() when we + don't want to fallback to old method. + v2: Simplification suggested by Ido Schimmel + + Link: https://lore.kernel.org/netdev/YzVJ%2FvKJugoz15yV@shredder/ + Signed-off-by: Vikas Gupta + Signed-off-by: Michael Chan + Reviewed-by: Ido Schimmel + Signed-off-by: Jakub Kicinski + +commit 84a911db83055e2d4c9d0171f116a47711014374 +Author: Michael Chan +Date: Fri Oct 21 02:37:21 2022 -0400 + + bnxt_en: Update firmware interface to 1.10.2.118 + + The main changes are PTM timestamp support, CMIS EEPROM support, and + asymmetric CoS queues support. + + Signed-off-by: Michael Chan + Signed-off-by: Jakub Kicinski + +commit ba077d683d45190afc993c1ce45bcdbfda741a40 +Author: Vikas Gupta +Date: Mon Oct 17 11:32:22 2022 -0400 + + bnxt_en: fix memory leak in bnxt_nvm_test() + + Free the kzalloc'ed buffer before returning in the success path. + + Fixes: 5b6ff128fdf6 ("bnxt_en: implement callbacks for devlink selftests") + Signed-off-by: Vikas Gupta + Signed-off-by: Michael Chan + Link: https://lore.kernel.org/r/1666020742-25834-1-git-send-email-michael.chan@broadcom.com + Signed-off-by: Jakub Kicinski + +commit 197173db990cad244221ba73c43b1df6170ae278 +Author: Jason A. Donenfeld +Date: Wed Oct 5 17:49:46 2022 +0200 + + treewide: use get_random_bytes() when possible + + The prandom_bytes() function has been a deprecated inline wrapper around + get_random_bytes() for several releases now, and compiles down to the + exact same code. Replace the deprecated wrapper with a direct call to + the real function. This was done as a basic find and replace. + + Reviewed-by: Greg Kroah-Hartman + Reviewed-by: Kees Cook + Reviewed-by: Yury Norov + Reviewed-by: Christophe Leroy # powerpc + Acked-by: Jakub Kicinski + Signed-off-by: Jason A. Donenfeld + +commit b48b89f9c189d24eb5e2b4a0ac067da5a24ee86d +Author: Jakub Kicinski +Date: Tue Sep 27 06:27:53 2022 -0700 + + net: drop the weight argument from netif_napi_add + + We tell driver developers to always pass NAPI_POLL_WEIGHT + as the weight to netif_napi_add(). This may be confusing + to newcomers, drop the weight argument, those who really + need to tweak the weight can use netif_napi_add_weight(). + + Acked-by: Marc Kleine-Budde # for CAN + Link: https://lore.kernel.org/r/20220927132753.750069-1-kuba@kernel.org + Signed-off-by: Jakub Kicinski + +commit 8db3d514e96715c897fe793c4d5fc0fd86aca517 +Author: Vadim Fedorenko +Date: Thu Sep 22 22:10:38 2022 +0300 + + bnxt_en: replace reset with config timestamps + + Any change to the hardware timestamps configuration triggers nic restart, + which breaks transmition and reception of network packets for a while. + But there is no need to fully restart the device because while configuring + hardware timestamps. The code for changing configuration runs after all + of the initialisation, when the NIC is actually up and running. This patch + changes the code that ioctl will only update configuration registers and + will not trigger carrier status change, but in case of timestamps for + all rx packetes it fallbacks to close()/open() sequnce because of + synchronization issues in the hardware. Tested on BCM57504. + + Cc: Richard Cochran + Signed-off-by: Vadim Fedorenko + Reviewed-by: Michael Chan + Link: https://lore.kernel.org/r/20220922191038.29921-1-vfedorenko@novek.ru + Signed-off-by: Jakub Kicinski + +commit c31f26c8f69f776759cbbdfb38e40ea91aa0dd65 +Author: Jakub Kicinski +Date: Wed Sep 21 13:10:05 2022 -0700 + + bnxt: prevent skb UAF after handing over to PTP worker + + When reading the timestamp is required bnxt_tx_int() hands + over the ownership of the completed skb to the PTP worker. + The skb should not be used afterwards, as the worker may + run before the rest of our code and free the skb, leading + to a use-after-free. + + Since dev_kfree_skb_any() accepts NULL make the loss of + ownership more obvious and set skb to NULL. + + Fixes: 83bb623c968e ("bnxt_en: Transmit and retrieve packet timestamps") + Reviewed-by: Andy Gospodarek + Reviewed-by: Michael Chan + Link: https://lore.kernel.org/r/20220921201005.335390-1-kuba@kernel.org + Signed-off-by: Jakub Kicinski + +commit ae8ffba8baad651af706538e8c47d0a049d406c6 +Author: Vadim Fedorenko +Date: Fri Sep 16 02:49:32 2022 +0300 + + bnxt_en: fix flags to check for supported fw version + + The warning message of unsupported FW appears every time RX timestamps + are disabled on the interface. The patch fixes the flags to correct set + for the check. + + Fixes: 66ed81dcedc6 ("bnxt_en: Enable packet timestamping for all RX packets") + Cc: Richard Cochran + Signed-off-by: Vadim Fedorenko + Reviewed-by: Andy Gospodarek + Reviewed-by: Michael Chan + Link: https://lore.kernel.org/r/20220915234932.25497-1-vfedorenko@novek.ru + Signed-off-by: Jakub Kicinski + +commit 366c304741729e64d778c80555d9eb422cf5cc89 +Author: Vikas Gupta +Date: Mon Aug 22 11:06:54 2022 -0400 + + bnxt_en: fix LRO/GRO_HW features in ndo_fix_features callback + + LRO/GRO_HW should be disabled if there is an attached XDP program. + BNXT_FLAG_TPA is the current setting of the LRO/GRO_HW. Using + BNXT_FLAG_TPA to disable LRO/GRO_HW will cause these features to be + permanently disabled once they are disabled. + + Fixes: 1dc4c557bfed ("bnxt: adding bnxt_xdp_build_skb to build skb from multibuffer xdp_buff") + Signed-off-by: Vikas Gupta + Signed-off-by: Michael Chan + Signed-off-by: Jakub Kicinski + +commit 09a89cc59ad67794a11e1d3dd13c5b3172adcc51 +Author: Vikas Gupta +Date: Mon Aug 22 11:06:53 2022 -0400 + + bnxt_en: fix NQ resource accounting during vf creation on 57500 chips + + There are 2 issues: + + 1. We should decrement hw_resc->max_nqs instead of hw_resc->max_irqs + with the number of NQs assigned to the VFs. The IRQs are fixed + on each function and cannot be re-assigned. Only the NQs are being + assigned to the VFs. + + 2. vf_msix is the total number of NQs to be assigned to the VFs. So + we should decrement vf_msix from hw_resc->max_nqs. + + Fixes: b16b68918674 ("bnxt_en: Add SR-IOV support for 57500 chips.") + Signed-off-by: Vikas Gupta + Signed-off-by: Michael Chan + Signed-off-by: Jakub Kicinski + +commit 574b2bb9692fd3d45ed631ac447176d4679f3010 +Author: Vikas Gupta +Date: Mon Aug 22 11:06:52 2022 -0400 + + bnxt_en: set missing reload flag in devlink features + + Add missing devlink_set_features() API for callbacks reload_down + and reload_up to function. + + Fixes: 228ea8c187d8 ("bnxt_en: implement devlink dev reload driver_reinit") + Reviewed-by: Somnath Kotur + Signed-off-by: Vikas Gupta + Signed-off-by: Michael Chan + Signed-off-by: Jakub Kicinski + +commit 7dd3de7cb1d657a918c6b2bc673c71e318aa0c05 +Author: Pavan Chebbi +Date: Mon Aug 22 11:06:51 2022 -0400 + + bnxt_en: Use PAGE_SIZE to init buffer when multi buffer XDP is not in use + + Using BNXT_PAGE_MODE_BUF_SIZE + offset as buffer length value is not + sufficient when running single buffer XDP programs doing redirect + operations. The stack will complain on missing skb tail room. Fix it + by using PAGE_SIZE when calling xdp_init_buff() for single buffer + programs. + + Fixes: b231c3f3414c ("bnxt: refactor bnxt_rx_xdp to separate xdp_init_buff/xdp_prepare_buff") + Reviewed-by: Somnath Kotur + Signed-off-by: Pavan Chebbi + Signed-off-by: Michael Chan + Signed-off-by: Jakub Kicinski + +commit ddde5412fdaa5048bbca31529d46cb8da882870c +Author: Pavan Chebbi +Date: Mon Jul 11 22:26:18 2022 -0400 + + bnxt_en: Fix bnxt_refclk_read() + + The upper 32-bit PHC register is not latched when reading the lower + 32-bit PHC register. Current code leaves a small window where we may + not read correct higher order bits if the lower order bits are just about + to wrap around. + + This patch fixes this by reading higher order bits twice and makes + sure that final value is correctly paired with its lower 32 bits. + + Fixes: 30e96f487f64 ("bnxt_en: Do not read the PTP PHC during chip reset") + Cc: Richard Cochran + Signed-off-by: Pavan Chebbi + Signed-off-by: Michael Chan + Signed-off-by: Jakub Kicinski + +commit 53f8c2d37efb5b03b9527ad04332df3bb889f0fa +Author: Michael Chan +Date: Mon Jul 11 22:26:17 2022 -0400 + + bnxt_en: Fix and simplify XDP transmit path + + Fix the missing length hint in the TX BD for the XDP transmit path. The + length hint is required on legacy chips. + + Also, simplify the code by eliminating the first_buf local variable. + tx_buf contains the same value. The opaque value only needs to be set + on the first BD. Fix this also for correctness. + + Fixes: a7559bc8c17c ("bnxt: support transmit and free of aggregation buffers") + Reviewed-by: Andy Gospodarek + Signed-off-by: Michael Chan + Signed-off-by: Jakub Kicinski + +commit 619b9b1622c283cc5ca86f4c487db266a8f55dab +Author: Vikas Gupta +Date: Mon Jul 11 22:26:16 2022 -0400 + + bnxt_en: fix livepatch query + + In the livepatch query fw_target BNXT_FW_SRT_PATCH is + applicable for P5 chips only. + + Fixes: 3c4153394e2c ("bnxt_en: implement firmware live patching") + Reviewed-by: Saravanan Vajravel + Reviewed-by: Somnath Kotur + Signed-off-by: Vikas Gupta + Signed-off-by: Michael Chan + Signed-off-by: Jakub Kicinski + +commit 4279414bff8af9898e8c53ae6c5bc17f68ad67b7 +Author: Michael Chan +Date: Mon Jul 11 22:26:15 2022 -0400 + + bnxt_en: Fix bnxt_reinit_after_abort() code path + + bnxt_reinit_after_abort() is called during ifup when a previous + FW reset sequence has aborted or a previous ifup has failed after + detecting FW reset. In all cases, it is safe to assume that a + previous FW reset has completed and the driver may not have fully + reinitialized. + + Prior to this patch, it is assumed that the + FUNC_DRV_IF_CHANGE_RESP_FLAGS_HOT_FW_RESET_DONE flag will always be + set by the firmware in bnxt_hwrm_if_change(). This may not be true if + the driver has already attempted to register with the firmware. The + firmware may not set the RESET_DONE flag again after the driver has + registered, assuming that the driver has seen the flag already. + + Fix it to always go through the FW reset initialization path if + the BNXT_STATE_FW_RESET_DET flag is set. This flag is always set + by the driver after successfully going through bnxt_reinit_after_abort(). + + Fixes: 6882c36cf82e ("bnxt_en: attempt to reinitialize after aborted reset") + Reviewed-by: Pavan Chebbi + Signed-off-by: Michael Chan + Signed-off-by: Jakub Kicinski + +commit c5b744d38c36a407a41e918602eec4d89730787b +Author: Kashyap Desai +Date: Mon Jul 11 22:26:14 2022 -0400 + + bnxt_en: reclaim max resources if sriov enable fails + + If bnxt_sriov_enable() fails after some resources have been reserved + for the VFs, the current code is not unwinding properly and the + reserved resources become unavailable afterwards. Fix it by + properly unwinding with a call to bnxt_hwrm_func_qcaps() to + reset all maximum resources. + + Also, add the missing bnxt_ulp_sriov_cfg() call to let the RDMA + driver know to abort. + + Fixes: c0c050c58d84 ("bnxt_en: New Broadcom ethernet driver.") + Signed-off-by: Kashyap Desai + Signed-off-by: Michael Chan + Signed-off-by: Jakub Kicinski + +commit 45262522d0027269dbece119f1cb89e25f5de965 +Author: Christophe JAILLET +Date: Tue Jul 5 22:22:59 2022 +0200 + + bnxt: Use the bitmap API to allocate bitmaps + + Use bitmap_zalloc()/bitmap_free() instead of hand-writing them. + + It is less verbose and it improves the semantic. + + Signed-off-by: Christophe JAILLET + Link: https://lore.kernel.org/r/d508f3adf7e2804f4d3793271b82b196a2ccb940.1657052562.git.christophe.jaillet@wanadoo.fr + Signed-off-by: Jakub Kicinski + +commit 504148fedb854299972d164b001357b888a9193e +Author: Eric Dumazet +Date: Thu Jun 30 15:07:50 2022 +0000 + + net: add skb_[inner_]tcp_all_headers helpers + + Most drivers use "skb_transport_offset(skb) + tcp_hdrlen(skb)" + to compute headers length for a TCP packet, but others + use more convoluted (but equivalent) ways. + + Add skb_tcp_all_headers() and skb_inner_tcp_all_headers() + helpers to harmonize this a bit. + + Signed-off-by: Eric Dumazet + Signed-off-by: David S. Miller + +commit c909e7ca494f397f51648048252d00d3dd61cefd +Author: Jiang Jian +Date: Wed Jun 22 22:45:26 2022 +0800 + + bnxt: Fix typo in comments + + Remove the repeated word 'and' from comments + + Signed-off-by: Jiang Jian + Link: https://lore.kernel.org/r/20220622144526.20659-1-jiangjian@cdjrlc.com + Signed-off-by: Jakub Kicinski + +commit dbb2f362c7835660a4e39eadd7481563e2e176b7 +Author: Jakub Kicinski +Date: Thu May 19 23:19:55 2022 -0700 + + eth: bnxt: make ulp_id unsigned to make GCC 12 happy + + GCC array bounds checking complains that ulp_id is validated + only against upper bound. Make it unsigned. + + Reviewed-by: Michael Chan + Link: https://lore.kernel.org/r/20220520061955.2312968-1-kuba@kernel.org + Signed-off-by: Jakub Kicinski + +commit ab0bed4bf6fae8a42cf3b08b38e1fffb1a79193a +Author: Kalesh AP +Date: Thu May 12 22:40:24 2022 -0400 + + bnxt_en: parse and report result field when NVRAM package install fails + + Instead of always returning -ENOPKG, decode the firmware error + code further when the HWRM_NVM_INSTALL_UPDATE firmware call fails. + Return a more suitable error code to userspace and log an error + in dmesg. + + This is version 2 of the earlier patch that was reverted: + + 02acd399533e ("bnxt_en: parse result field when NVRAM package install fails") + + In this new version, if the call is made through devlink instead of + ethtool, we'll also set the error message in extack. + + Link: https://lore.kernel.org/netdev/20220307141358.4d52462e@kicinski-fedora-pc1c0hjn.dhcp.thefacebook.com/ + Reviewed-by: Somnath Kotur + Reviewed-by: Pavan Chebbi + Signed-off-by: Kalesh AP + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 66ed81dcedc665bf8c7dfc3867d425f50eba219e +Author: Pavan Chebbi +Date: Thu May 12 22:40:23 2022 -0400 + + bnxt_en: Enable packet timestamping for all RX packets + + Add driver support to enable timestamping on all RX packets + that are received by the NIC. This capability can be requested + by the applications using SIOCSHWTSTAMP ioctl with filter type + HWTSTAMP_FILTER_ALL. + + Cc: Richard Cochran + Signed-off-by: Pavan Chebbi + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 11862689e8f117e4702f55000790d7bce6859e84 +Author: Pavan Chebbi +Date: Thu May 12 22:40:22 2022 -0400 + + bnxt_en: Configure ptp filters during bnxt open + + For correctness, we need to configure the packet filters for timestamping + during bnxt_open. This way they are always configured after firmware + reset or chip reset. We should not assume that the filters will always + be retained across resets. + + This patch modifies the ioctl handler and always configures the PTP + filters in the bnxt_open() path. + + Cc: Richard Cochran + Signed-off-by: Pavan Chebbi + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit ad04cc058d644acc6c903d8da4b8d59aa2b6335e +Author: Michael Chan +Date: Thu May 12 22:40:21 2022 -0400 + + bnxt_en: Update firmware interface to 1.10.2.95 + + The main changes are timestamp support for all RX packets and new PCIe + statistics. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 195af57914d15229186658ed26dab24b9ada4122 +Author: Michael Chan +Date: Mon May 2 21:13:12 2022 -0400 + + bnxt_en: Fix unnecessary dropping of RX packets + + In bnxt_poll_p5(), we first check cpr->has_more_work. If it is true, + we are in NAPI polling mode and we will call __bnxt_poll_cqs() to + continue polling. It is possible to exhanust the budget again when + __bnxt_poll_cqs() returns. + + We then enter the main while loop to check for new entries in the NQ. + If we had previously exhausted the NAPI budget, we may call + __bnxt_poll_work() to process an RX entry with zero budget. This will + cause packets to be dropped unnecessarily, thinking that we are in the + netpoll path. Fix it by breaking out of the while loop if we need + to process an RX NQ entry with no budget left. We will then exit + NAPI and stay in polling mode. + + Fixes: 389a877a3b20 ("bnxt_en: Process the NQ under NAPI continuous polling.") + Reviewed-by: Andy Gospodarek + Signed-off-by: Michael Chan + Signed-off-by: Jakub Kicinski + +commit 2b156fb57d8f0d28f2207edc646751f4717cf20d +Author: Michael Chan +Date: Mon May 2 21:13:11 2022 -0400 + + bnxt_en: Initiallize bp->ptp_lock first before using it + + bnxt_ptp_init() calls bnxt_ptp_init_rtc() which will acquire the ptp_lock + spinlock. The spinlock is not initialized until later. Move the + bnxt_ptp_init_rtc() call after the spinlock is initialized. + + Fixes: 24ac1ecd5240 ("bnxt_en: Add driver support to use Real Time Counter for PTP") + Reviewed-by: Pavan Chebbi + Reviewed-by: Saravanan Vajravel + Reviewed-by: Andy Gospodarek + Reviewed-by: Somnath Kotur + Reviewed-by: Damodharam Ammepalli + Signed-off-by: Michael Chan + Signed-off-by: Jakub Kicinski + +commit 13ba794397e45e52893cfc21d7a69cb5f341b407 +Author: Somnath Kotur +Date: Mon May 2 21:13:10 2022 -0400 + + bnxt_en: Fix possible bnxt_open() failure caused by wrong RFS flag + + bnxt_open() can fail in this code path, especially on a VF when + it fails to reserve default rings: + + bnxt_open() + __bnxt_open_nic() + bnxt_clear_int_mode() + bnxt_init_dflt_ring_mode() + + RX rings would be set to 0 when we hit this error path. + + It is possible for a subsequent bnxt_open() call to potentially succeed + with a code path like this: + + bnxt_open() + bnxt_hwrm_if_change() + bnxt_fw_init_one() + bnxt_fw_init_one_p3() + bnxt_set_dflt_rfs() + bnxt_rfs_capable() + bnxt_hwrm_reserve_rings() + + On older chips, RFS is capable if we can reserve the number of vnics that + is equal to RX rings + 1. But since RX rings is still set to 0 in this + code path, we may mistakenly think that RFS is supported for 0 RX rings. + + Later, when the default RX rings are reserved and we try to enable + RFS, it would fail and cause bnxt_open() to fail unnecessarily. + + We fix this in 2 places. bnxt_rfs_capable() will always return false if + RX rings is not yet set. bnxt_init_dflt_ring_mode() will call + bnxt_set_dflt_rfs() which will always clear the RFS flags if RFS is not + supported. + + Fixes: 20d7d1c5c9b1 ("bnxt_en: reliably allocate IRQ table on reset to avoid crash") + Signed-off-by: Somnath Kotur + Signed-off-by: Michael Chan + Signed-off-by: Jakub Kicinski + +commit 9f4b28301ce6a594a692a0abc2002d0bb912f2b7 +Author: Andy Gospodarek +Date: Fri Apr 8 03:59:06 2022 -0400 + + bnxt: XDP multibuffer enablement + + Allow aggregation buffers to be in place in the receive path and + allow XDP programs to be attached when using a larger than 4k MTU. + + v3: Add a check to sure XDP program supports multipage packets. + + Signed-off-by: Andy Gospodarek + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit a7559bc8c17c3f9a91dcbeefe8642ba757fd09e8 +Author: Andy Gospodarek +Date: Fri Apr 8 03:59:05 2022 -0400 + + bnxt: support transmit and free of aggregation buffers + + This patch adds the following features: + - Support for XDP_TX and XDP_DROP action when using xdp_buff + with frags + - Support for freeing all frags attached to an xdp_buff + - Cleanup of TX ring buffers after transmits complete + - Slight change in definition of bnxt_sw_tx_bd since nr_frags + and RX producer may both need to be used + - Clear out skb_shared_info at the end of the buffer + + v2: Fix uninitialized variable warning in bnxt_xdp_buff_frags_free(). + + Signed-off-by: Andy Gospodarek + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 1dc4c557bfedfcdf7fc0c46795857773b7ad66e7 +Author: Andy Gospodarek +Date: Fri Apr 8 03:59:04 2022 -0400 + + bnxt: adding bnxt_xdp_build_skb to build skb from multibuffer xdp_buff + + Since we have an xdp_buff with frags there needs to be a way to + convert that into a valid sk_buff in the event that XDP_PASS is + the resulting operation. This adds a new rx_skb_func when the + netdev has an MTU that prevents the packets from sitting in a + single page. + + This also make sure that GRO/LRO stay disabled even when using + the aggregation ring for large buffers. + + v3: Use BNXT_PAGE_MODE_BUF_SIZE for build_skb + + Signed-off-by: Andy Gospodarek + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 9a6aa3504885331a2fbf843c8cb7fa6be49a3d40 +Author: Andy Gospodarek +Date: Fri Apr 8 03:59:03 2022 -0400 + + bnxt: add page_pool support for aggregation ring when using xdp + + If we are using aggregation rings with XDP enabled, allocate page + buffers for the aggregation rings from the page_pool. + + Signed-off-by: Andy Gospodarek + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 32861236190bf1247d18e245cee0814603d2c29f +Author: Andy Gospodarek +Date: Fri Apr 8 03:59:02 2022 -0400 + + bnxt: change receive ring space parameters + + Modify ring header data split and jumbo parameters to account + for the fact that the design for XDP multibuffer puts close to + the first 4k of data in a page and the remaining portions of + the packet go in the aggregation ring. + + v3: Simplified code around initial buffer size calculation + + Signed-off-by: Andy Gospodarek + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 31b9998bf225eca51f0d9f8d694d807495bf80a8 +Author: Andy Gospodarek +Date: Fri Apr 8 03:59:01 2022 -0400 + + bnxt: set xdp_buff pfmemalloc flag if needed + + Set the pfmemaloc flag in the xdp buff so that this can be + copied to the skb if needed for an XDP_PASS action. + + Signed-off-by: Andy Gospodarek + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 4c6c123c9af9c94be4726134ca72ba5a0be0ebd0 +Author: Andy Gospodarek +Date: Fri Apr 8 03:59:00 2022 -0400 + + bnxt: adding bnxt_rx_agg_pages_xdp for aggregated xdp + + This patch adds a new function that will read pages from the + aggregation ring and create an xdp_buff with frags based on + the entries in the aggregation ring. + + Signed-off-by: Andy Gospodarek + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 23e4c0469ad03f695993cceccb50cbddf9ef8963 +Author: Andy Gospodarek +Date: Fri Apr 8 03:58:59 2022 -0400 + + bnxt: rename bnxt_rx_pages to bnxt_rx_agg_pages_skb + + Clarify that this is reading buffers from the aggregation ring. + + Signed-off-by: Andy Gospodarek + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit ca1df2dd8e2f2c18a90d21e59ad56d43c2e9322e +Author: Andy Gospodarek +Date: Fri Apr 8 03:58:58 2022 -0400 + + bnxt: refactor bnxt_rx_pages operate on skb_shared_info + + Rather than operating on an sk_buff, add frags from the aggregation + ring into the frags of an skb_shared_info. This will allow the + caller to use either an sk_buff or xdp_buff. + + Signed-off-by: Andy Gospodarek + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit ee536dcbdce4966009b4ea15f03cba045161249a +Author: Andy Gospodarek +Date: Fri Apr 8 03:58:57 2022 -0400 + + bnxt: add flag to denote that an xdp program is currently attached + + This will be used to determine if bnxt_rx_xdp should be called + rather than calling it every time. + + Signed-off-by: Andy Gospodarek + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit b231c3f3414cfc7bf8fb1e246ed5a3d523616520 +Author: Andy Gospodarek +Date: Fri Apr 8 03:58:56 2022 -0400 + + bnxt: refactor bnxt_rx_xdp to separate xdp_init_buff/xdp_prepare_buff + + Move initialization of xdp_buff outside of bnxt_rx_xdp to prepare + for allowing bnxt_rx_xdp to operate on multibuffer xdp_buffs. + + v2: Fix uninitalized variables warning in bnxt_xdp.c. + v3: Add new define BNXT_PAGE_MODE_BUF_SIZE + + Signed-off-by: Andy Gospodarek + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 27d4073f8d9af0340362554414f4961643a4f4de +Author: Ray Jui +Date: Fri Apr 1 20:21:12 2022 -0400 + + bnxt_en: Prevent XDP redirect from running when stopping TX queue + + Add checks in the XDP redirect callback to prevent XDP from running when + the TX ring is undergoing shutdown. + + Also remove redundant checks in the XDP redirect callback to validate the + txr and the flag that indicates the ring supports XDP. The modulo + arithmetic on 'tx_nr_rings_xdp' already guarantees the derived TX + ring is an XDP ring. txr is also guaranteed to be valid after checking + BNXT_STATE_OPEN and within RCU grace period. + + Fixes: f18c2b77b2e4 ("bnxt_en: optimized XDP_REDIRECT support") + Reviewed-by: Vladimir Olovyannikov + Signed-off-by: Ray Jui + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit facc173cf700e55b2ad249ecbd3a7537f7315691 +Author: Andy Gospodarek +Date: Fri Apr 1 20:21:11 2022 -0400 + + bnxt_en: reserve space inside receive page for skb_shared_info + + Insufficient space was being reserved in the page used for packet + reception, so the interface MTU could be set too large to still have + room for the contents of the packet when doing XDP redirect. This + resulted in the following message when redirecting a packet between + 3520 and 3822 bytes with an MTU of 3822: + + [311815.561880] XDP_WARN: xdp_update_frame_from_buff(line:200): Driver BUG: missing reserved tailroom + + Fixes: f18c2b77b2e4 ("bnxt_en: optimized XDP_REDIRECT support") + Reviewed-by: Somnath Kotur + Reviewed-by: Pavan Chebbi + Signed-off-by: Andy Gospodarek + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 4f81def272de17dc4bbd89ac38f49b2676c9b3d2 +Author: Pavan Chebbi +Date: Fri Apr 1 20:21:10 2022 -0400 + + bnxt_en: Synchronize tx when xdp redirects happen on same ring + + If there are more CPUs than the number of TX XDP rings, multiple XDP + redirects can select the same TX ring based on the CPU on which + XDP redirect is called. Add locking when needed and use static + key to decide whether to take the lock. + + Fixes: f18c2b77b2e4 ("bnxt_en: optimized XDP_REDIRECT support") + Signed-off-by: Pavan Chebbi + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit dcf500065fabe27676dfe7b4ba521a4f1e0fc8ac +Author: Damien Le Moal +Date: Mon Mar 28 15:27:08 2022 +0900 + + net: bnxt_ptp: fix compilation error + + The Broadcom bnxt_ptp driver does not compile with GCC 11.2.2 when + CONFIG_WERROR is enabled. The following error is generated: + + drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c: In function ‘bnxt_ptp_enable’: + drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c:400:43: error: array + subscript 255 is above array bounds of ‘struct pps_pin[4]’ + [-Werror=array-bounds] + 400 | ptp->pps_info.pins[pin_id].event = BNXT_PPS_EVENT_EXTERNAL; + | ~~~~~~~~~~~~~~~~~~^~~~~~~~ + In file included from drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c:20: + drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h:75:24: note: while + referencing ‘pins’ + 75 | struct pps_pin pins[BNXT_MAX_TSIO_PINS]; + | ^~~~ + cc1: all warnings being treated as errors + + This is due to the function ptp_find_pin() returning a pin ID of -1 when + a valid pin is not found and this error never being checked. + Change the TSIO_PIN_VALID() function to also check that a pin ID is not + negative and use this macro in bnxt_ptp_enable() to check the result of + the calls to ptp_find_pin() to return an error early for invalid pins. + This fixes the compilation error. + + Cc: + Fixes: 9e518f25802c ("bnxt_en: 1PPS functions to configure TSIO pins") + Signed-off-by: Damien Le Moal + Reviewed-by: Michael Chan + Link: https://lore.kernel.org/r/20220328062708.207079-1-damien.lemoal@opensource.wdc.com + Signed-off-by: Jakub Kicinski + +commit 625788b5844511cf4c30cffa7fa0bc3a69cebc82 +Author: Eric Dumazet +Date: Thu Mar 10 21:14:20 2022 -0800 + + net: add per-cpu storage and net->core_stats + + Before adding yet another possibly contended atomic_long_t, + it is time to add per-cpu storage for existing ones: + dev->tx_dropped, dev->rx_dropped, and dev->rx_nohandler + + Because many devices do not have to increment such counters, + allocate the per-cpu storage on demand, so that dev_get_stats() + does not have to spend considerable time folding zero counters. + + Note that some drivers have abused these counters which + were supposed to be only used by core networking stack. + + v4: should use per_cpu_ptr() in dev_get_stats() (Jakub) + v3: added a READ_ONCE() in netdev_core_stats_alloc() (Paolo) + v2: add a missing include (reported by kernel test robot ) + Change in netdev_core_stats_alloc() (Jakub) + + Signed-off-by: Eric Dumazet + Cc: jeffreyji + Reviewed-by: Brian Vazquez + Reviewed-by: Jakub Kicinski + Acked-by: Paolo Abeni + Link: https://lore.kernel.org/r/20220311051420.2608812-1-eric.dumazet@gmail.com + Signed-off-by: Jakub Kicinski + +commit f16a9169286691d23906a1bb1c8e07e53113586c +Author: Edwin Peer +Date: Sat Mar 5 03:54:40 2022 -0500 + + bnxt_en: Do not destroy health reporters during reset + + Health reporter state should be maintained over resets. Previously + reporters were destroyed if the device capabilities changed, but + since none of the reporters depend on capabilities anymore, this + logic should be removed. + + Signed-off-by: Edwin Peer + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 7c492a2530c1f05441da541307c2534230dfd59b +Author: Michael Chan +Date: Sat Mar 5 03:54:39 2022 -0500 + + bnxt_en: Eliminate unintended link toggle during FW reset + + If the flow control settings have been changed, a subsequent FW reset + may cause the ethernet link to toggle unnecessarily. This link toggle + will increase the down time by a few seconds. + + The problem is caused by bnxt_update_phy_setting() detecting a false + mismatch in the flow control settings between the stored software + settings and the current FW settings after the FW reset. This mismatch + is caused by the AUTONEG bit added to link_info->req_flow_ctrl in an + inconsistent way in bnxt_set_pauseparam() in autoneg mode. The AUTONEG + bit should not be added to link_info->req_flow_ctrl. + + Reviewed-by: Colin Winegarden + Reviewed-by: Pavan Chebbi + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 9a3bc77ec65efa3d58e4da0d0e64cefdd9c1692e +Author: Michael Chan +Date: Sat Mar 5 03:54:38 2022 -0500 + + bnxt_en: Properly report no pause support on some cards + + Some cards are configured to never support link pause or PFC. Discover + these cards and properly report no pause support to ethtool. Disable + PFC settings from DCBNL if PFC is unsupported. + + Reviewed-by: Pavan Chebbi + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 0f5a4841f2ec504fba753c86f824a23ed8d0df1f +Author: Edwin Peer +Date: Sat Mar 5 03:54:37 2022 -0500 + + bnxt_en: introduce initial link state of unknown + + This will force link state to always be logged for initial NIC open. + + Signed-off-by: Edwin Peer + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 54ff1e3e8fc3aad09d5dfc426bb462e261c37a1b +Author: Kalesh AP +Date: Sat Mar 5 03:54:35 2022 -0500 + + bnxt_en: add more error checks to HWRM_NVM_INSTALL_UPDATE + + FW returns error code "NVM_INSTALL_UPDATE_CMD_ERR_CODE_ANTI_ROLLBACK" + in the response to indicate that HWRM_NVM_INSTALL_UPDATE command has + failed due to Anti-rollback feature. Parse the error and return an + appropriate error code to the user. + + Reviewed-by: Somnath Kotur + Reviewed-by: Edwin Peer + Signed-off-by: Kalesh AP + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 8e42aef0b7307c024bedf8716628392977f27f55 +Author: Kalesh AP +Date: Sat Mar 5 03:54:34 2022 -0500 + + bnxt_en: refactor error handling of HWRM_NVM_INSTALL_UPDATE + + This is in anticipation of handling more "cmd_err" from FW in the next + patch. + + Reviewed-by: Somnath Kotur + Reviewed-by: Edwin Peer + Signed-off-by: Kalesh AP + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 1278d17a1fb860e7eab4bc3ff4b026a87cbf5105 +Author: Kalesh AP +Date: Sun Feb 20 04:05:53 2022 -0500 + + bnxt_en: Fix devlink fw_activate + + To install a livepatch, first flash the package to NVM, and then + activate the patch through the "HWRM_FW_LIVEPATCH" fw command. + To uninstall a patch from NVM, flash the removal package and then + activate it through the "HWRM_FW_LIVEPATCH" fw command. + + The "HWRM_FW_LIVEPATCH" fw command has to consider following scenarios: + + 1. no patch in NVM and no patch active. Do nothing. + 2. patch in NVM, but not active. Activate the patch currently in NVM. + 3. patch is not in NVM, but active. Deactivate the patch. + 4. patch in NVM and the patch active. Do nothing. + + Fix the code to handle these scenarios during devlink "fw_activate". + + To install and activate a live patch: + devlink dev flash pci/0000:c1:00.0 file thor_patch.pkg + devlink -f dev reload pci/0000:c1:00.0 action fw_activate limit no_reset + + To remove and deactivate a live patch: + devlink dev flash pci/0000:c1:00.0 file thor_patch_rem.pkg + devlink -f dev reload pci/0000:c1:00.0 action fw_activate limit no_reset + + Fixes: 3c4153394e2c ("bnxt_en: implement firmware live patching") + Reviewed-by: Vikas Gupta + Reviewed-by: Somnath Kotur + Signed-off-by: Kalesh AP + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit b891106da52b2c12dbaf73400f6d225b06a38d80 +Author: Michael Chan +Date: Sun Feb 20 04:05:52 2022 -0500 + + bnxt_en: Increase firmware message response DMA wait time + + When polling for the firmware message response, we first poll for the + response message header. Once the valid length is detected in the + header, we poll for the valid bit at the end of the message which + signals DMA completion. Normally, this poll time for DMA completion + is extremely short (0 to a few usec). But on some devices under some + rare conditions, it can be up to about 20 msec. + + Increase this delay to 50 msec and use udelay() for the first 10 usec + for the common case, and usleep_range() beyond that. + + Also, change the error message to include the above delay time when + printing the timeout value. + + Fixes: 3c8c20db769c ("bnxt_en: move HWRM API implementation into separate file") + Reviewed-by: Vladimir Olovyannikov + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 0e0e3c5358470cbad10bd7ca29f84a44d179d286 +Author: Kalesh AP +Date: Sun Feb 20 04:05:51 2022 -0500 + + bnxt_en: Restore the resets_reliable flag in bnxt_open() + + During ifdown, we call bnxt_inv_fw_health_reg() which will clear + both the status_reliable and resets_reliable flags if these + registers are mapped. This is correct because a FW reset during + ifdown will clear these register mappings. If we detect that FW + has gone through reset during the next ifup, we will remap these + registers. + + But during normal ifup with no FW reset, we need to restore the + resets_reliable flag otherwise we will not show the reset counter + during devlink diagnose. + + Fixes: 8cc95ceb7087 ("bnxt_en: improve fw diagnose devlink health messages") + Reviewed-by: Vikas Gupta + Reviewed-by: Pavan Chebbi + Reviewed-by: Somnath Kotur + Signed-off-by: Kalesh AP + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 8cdb15924252e27af16c4a8fe0fc606ce5fd04dc +Author: Pavan Chebbi +Date: Sun Feb 20 04:05:50 2022 -0500 + + bnxt_en: Fix incorrect multicast rx mask setting when not requested + + We should setup multicast only when net_device flags explicitly + has IFF_MULTICAST set. Otherwise we will incorrectly turn it on + even when not asked. Fix it by only passing the multicast table + to the firmware if IFF_MULTICAST is set. + + Fixes: 7d2837dd7a32 ("bnxt_en: Setup multicast properly after resetting device.") + Signed-off-by: Pavan Chebbi + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit cfcab3b3b61584a02bb523ffa99564eafa761dfe +Author: Michael Chan +Date: Sun Feb 20 04:05:49 2022 -0500 + + bnxt_en: Fix occasional ethtool -t loopback test failures + + In the current code, we setup the port to PHY or MAC loopback mode + and then transmit a test broadcast packet for the loopback test. This + scheme fails sometime if the port is shared with management firmware + that can also send packets. The driver may receive the management + firmware's packet and the test will fail when the contents don't + match the test packet. + + Change the test packet to use it's own MAC address as the destination + and setup the port to only receive it's own MAC address. This should + filter out other packets sent by management firmware. + + Fixes: 91725d89b97a ("bnxt_en: Add PHY loopback to ethtool self-test.") + Reviewed-by: Pavan Chebbi + Reviewed-by: Edwin Peer + Reviewed-by: Andy Gospodarek + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 6758f937669dba14c6aac7ca004edda42ec1b18d +Author: Michael Chan +Date: Sun Feb 20 04:05:48 2022 -0500 + + bnxt_en: Fix offline ethtool selftest with RDMA enabled + + For offline (destructive) self tests, we need to stop the RDMA driver + first. Otherwise, the RDMA driver will run into unrecoverable errors + when destructive firmware tests are being performed. + + The irq_re_init parameter used in the half close and half open + sequence when preparing the NIC for offline tests should be set to + true because the RDMA driver will free all IRQs before the offline + tests begin. + + Fixes: 55fd0cf320c3 ("bnxt_en: Add external loopback test to ethtool selftest.") + Reviewed-by: Edwin Peer + Reviewed-by: Ben Li + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 84d3c83e6ea7d46cf3de3a54578af73eb24a64f2 +Author: Somnath Kotur +Date: Sun Feb 20 04:05:47 2022 -0500 + + bnxt_en: Fix active FEC reporting to ethtool + + ethtool --show-fec does not show anything when the Active + FEC setting in the chip is set to None. Fix it to properly return + ETHTOOL_FEC_OFF in that case. + + Fixes: 8b2775890ad8 ("bnxt_en: Report FEC settings to ethtool.") + Signed-off-by: Somnath Kotur + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit b370517e5233c42925911d1ac805e512c47dbc01 +Author: Jakub Kicinski +Date: Thu Jan 27 10:43:00 2022 -0800 + + bnxt: report header-data split state + + Aggregation rings imply header-data split. + + Signed-off-by: Jakub Kicinski + Signed-off-by: David S. Miller + +commit 8bcf6f04d4a531d2efd9b51f13d903aa03985ac6 +Author: Pavan Chebbi +Date: Tue Jan 25 23:40:13 2022 -0500 + + bnxt_en: Handle async event when the PHC is updated in RTC mode + + In Multi-host environment, when the PHC is updated by one host, + an async message from firmware will be sent to other hosts. + Re-initialize the timecounter when the driver receives this + async message. + + Cc: Richard Cochran + Reviewed-by: Somnath Kotur + Signed-off-by: Pavan Chebbi + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit e7b0afb69083ff7199dec51e7b9b1646e799943b +Author: Pavan Chebbi +Date: Tue Jan 25 23:40:12 2022 -0500 + + bnxt_en: Implement .adjtime() for PTP RTC mode + + The adjusted time is set in the PHC in RTC mode. We also need to + update the snapshots ptp->current_time and ptp->old_time when the + time is adjusted. + + Cc: Richard Cochran + Reviewed-by: Somnath Kotur + Signed-off-by: Pavan Chebbi + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 24ac1ecd524065cdcf8c27dc85ae37eccce8f2f6 +Author: Pavan Chebbi +Date: Tue Jan 25 23:40:11 2022 -0500 + + bnxt_en: Add driver support to use Real Time Counter for PTP + + Add support for RTC mode if it is supported by firmware. In RTC + mode, the PHC is set to the 64-bit clock. Because the legacy interface + is 48-bit, the driver still has to keep track of the upper 16 bits and + handle the rollover. + + Cc: Richard Cochran + Reviewed-by: Somnath Kotur + Signed-off-by: Pavan Chebbi + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 740c342e399981babdd62d0d5beb7c8ec9503a9a +Author: Pavan Chebbi +Date: Tue Jan 25 23:40:10 2022 -0500 + + bnxt_en: PTP: Refactor PTP initialization functions + + Making the ptp free and timecounter initialization code into separate + functions so that later patches can use them. + + Cc: Richard Cochran + Signed-off-by: Pavan Chebbi + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 2895c1531056903a8e22df565664ade106e29426 +Author: Michael Chan +Date: Tue Jan 25 23:40:09 2022 -0500 + + bnxt_en: Update firmware interface to 1.10.2.73 + + The main changes are PTP support for RTC, additional NVM error codes, + backing store v2 firmware APIs. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 8c6f36d93449e8de7b49a67727e3d09b67c73126 +Author: Edwin Peer +Date: Sun Jan 9 18:54:45 2022 -0500 + + bnxt_en: improve firmware timeout messaging + + While it has always been possible to infer that an HWRM command was + abandoned due to an unhealthy firmware status by the shortened timeout + reported, this change improves the log messaging to account for this + case explicitly. In the interests of further clarity, the firmware + status is now also reported in these new messages. + + v2: Remove inline keyword for hwrm_wait_must_abort() in .c file. + + Reviewed-by: Andy Gospodarek + Signed-off-by: Edwin Peer + Signed-off-by: Michael Chan + Signed-off-by: Jakub Kicinski + +commit bce9a0b7900836df223ab638090df0cb8430d9e8 +Author: Edwin Peer +Date: Sun Jan 9 18:54:44 2022 -0500 + + bnxt_en: use firmware provided max timeout for messages + + Some older devices cannot accommodate the 40 seconds timeout + cap for long running commands (such as NVRAM commands) due to + hardware limitations. Allow these devices to request more time for + these long running commands, but print a warning, since the longer + timeout may cause the hung task watchdog to trigger. In the case of a + firmware update operation, this is preferable to failing outright. + + v2: Use bp->hwrm_cmd_max_timeout directly without the constants. + + Fixes: 881d8353b05e ("bnxt_en: Add an upper bound for all firmware command timeouts.") + Signed-off-by: Edwin Peer + Signed-off-by: Michael Chan + Signed-off-by: Jakub Kicinski + +commit 662c9b22f5b568fe79e69b06f3c926cad965bfd2 +Author: Edwin Peer +Date: Sun Jan 9 18:54:43 2022 -0500 + + bnxt_en: improve VF error messages when PF is unavailable + + The current driver design relies on the PF netdev being open in order + to intercept the following HWRM commands from a VF: + - HWRM_FUNC_VF_CFG + - HWRM_CFA_L2_FILTER_ALLOC + - HWRM_PORT_PHY_QCFG (only if FW_CAP_LINK_ADMIN is not supported) + + If the PF is closed, then VFs are subjected to rather inscrutable error + messages in response to any configuration requests involving the above + command types. Recent firmware distinguishes this problem case from + other errors by returning HWRM_ERR_CODE_PF_UNAVAILABLE. In most cases, + the appropriate course of action is still to fail, but this can now be + accomplished with the aid of more user informative log messages. For L2 + filter allocations that are already asynchronous, an automatic retry + seems more appropriate. + + v2: Delete extra newline. + + Signed-off-by: Edwin Peer + Signed-off-by: Michael Chan + Signed-off-by: Jakub Kicinski + +commit 8fa4219dba8e621aa1e78dfa7eeab10f55acb3c0 +Author: Edwin Peer +Date: Sun Jan 9 18:54:42 2022 -0500 + + bnxt_en: add dynamic debug support for HWRM messages + + Add logging of firmware messages. These can be useful for diagnosing + issues in the field, but due to their verbosity are only appropriate + at a debug message level. + + Signed-off-by: Edwin Peer + Signed-off-by: Michael Chan + Signed-off-by: Jakub Kicinski + +commit b6459415b384cb829f0b2a4268f211c789f6cf0b +Author: Jakub Kicinski +Date: Tue Dec 28 16:49:13 2021 -0800 + + net: Don't include filter.h from net/sock.h + + sock.h is pretty heavily used (5k objects rebuilt on x86 after + it's touched). We can drop the include of filter.h from it and + add a forward declaration of struct sk_filter instead. + This decreases the number of rebuilt objects when bpf.h + is touched from ~5k to ~1k. + + There's a lot of missing includes this was masking. Primarily + in networking tho, this time. + + Signed-off-by: Jakub Kicinski + Signed-off-by: Alexei Starovoitov + Acked-by: Marc Kleine-Budde + Acked-by: Florian Fainelli + Acked-by: Nikolay Aleksandrov + Acked-by: Stefano Garzarella + Link: https://lore.kernel.org/bpf/20211229004913.513372-1-kuba@kernel.org + +commit 720908e5f816d56579e098e32bd3b56bad2be8f0 +Author: Jakub Kicinski +Date: Mon Dec 27 03:00:32 2021 -0500 + + bnxt_en: Use page frag RX buffers for better software GRO performance + + If NETIF_F_GRO_HW is disabled, the existing driver code uses kmalloc'ed + data for RX buffers. This causes inefficient SW GRO performance + because the GRO data is merged using the less efficient frag_list. + Use netdev_alloc_frag() and friends instead so that GRO data can be + merged into skb_shinfo(skb)->frags for better performance. + + [Use skb_free_frag() - Vikas Gupta] + + Signed-off-by: Jakub Kicinski + Reviewed-by: Pavan Chebbi + Reviewed-by: Andy Gospodarek + Signed-off-by: Vikas Gupta + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit b976969bed83e90fffb9e750e1d1562956500cd9 +Author: Edwin Peer +Date: Mon Dec 27 03:00:31 2021 -0500 + + bnxt_en: convert to xdp_do_flush + + The xdp_do_flush_map function has been replaced with the more general + xdp_do_flush(). + + Signed-off-by: Edwin Peer + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 3fcbdbd5d8d51d14ad5687a253990e89da9b7661 +Author: Michael Chan +Date: Mon Dec 27 03:00:30 2021 -0500 + + bnxt_en: Support CQE coalescing mode in ethtool + + Support showing and setting the CQE mode in ethtool. + + Reviewed-by: Edwin Peer + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit df78ea22460b6c625bd2079686bec0d834e56946 +Author: Michael Chan +Date: Mon Dec 27 03:00:29 2021 -0500 + + bnxt_en: Support configurable CQE coalescing mode + + CQE coalescing mode is the same as the timer reset coalescing mode + on Broadcom devices. Currently this mode is always enabled if it + is supported by the device. Restructure the code slightly to support + dynamically changing this mode. + + Add a flags field to struct bnxt_coal. Initially, the CQE flag will + be set for the RX and TX side if the device supports it. We need to + move bnxt_init_dflt_coal() to set up default coalescing until the + capability is determined. + + Reviewed-by: Andy Gospodarek + Reviewed-by: Edwin Peer + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit dc1f5d1ebc5c7f70cd352b2b71097f972b244c94 +Author: Andy Gospodarek +Date: Mon Dec 27 03:00:28 2021 -0500 + + bnxt_en: enable interrupt sampling on 5750X for DIM + + 5750X (P5) chips handle receiving packets on the NQ rather than the main + completion queue so we need to get and set stats from the correct spots + for dynamic interrupt moderation. + + Signed-off-by: Andy Gospodarek + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 0fb8582ae5b9bf0dc5a4fededabe7db16a8b430a +Author: Michael Chan +Date: Mon Dec 27 03:00:27 2021 -0500 + + bnxt_en: Log error report for dropped doorbell + + Log the unrecognized error report type value as well. + + Reviewed-by: Andy Gospodarek + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 5a717f4a8e00f563962b736961a12b6798c839a0 +Author: Somnath Kotur +Date: Mon Dec 27 03:00:26 2021 -0500 + + bnxt_en: Add event handler for PAUSE Storm event + + FW has been modified to send a new async event when it detects + a pause storm. Register for this new event and log it upon receipt. + + Reviewed-by: Andy Gospodarek + Reviewed-by: Edwin Peer + Signed-off-by: Somnath Kotur + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 144d4c9e800da1230d817bbd50068a22e4cc688e +Author: Baowen Zheng +Date: Fri Dec 17 19:16:18 2021 +0100 + + flow_offload: reject to offload tc actions in offload drivers + + A follow-up patch will allow users to offload tc actions independent of + classifier in the software datapath. + + In preparation for this, teach all drivers that support offload of the flow + tables to reject such configuration as currently none of them support it. + + Signed-off-by: Baowen Zheng + Signed-off-by: Simon Horman + Acked-by: Jamal Hadi Salim + Signed-off-by: David S. Miller + +commit 9c9211a3fc7aa41b2952765b62000443b3bb6f23 +Author: Hangbin Liu +Date: Fri Dec 10 16:59:58 2021 +0800 + + net_tstamp: add new flag HWTSTAMP_FLAG_BONDED_PHC_INDEX + + Since commit 94dd016ae538 ("bond: pass get_ts_info and SIOC[SG]HWTSTAMP + ioctl to active device") the user could get bond active interface's + PHC index directly. But when there is a failover, the bond active + interface will change, thus the PHC index is also changed. This may + break the user's program if they did not update the PHC timely. + + This patch adds a new hwtstamp_config flag HWTSTAMP_FLAG_BONDED_PHC_INDEX. + When the user wants to get the bond active interface's PHC, they need to + add this flag and be aware the PHC index may be changed. + + With the new flag. All flag checks in current drivers are removed. Only + the checking in net_hwtstamp_validate() is kept. + + Suggested-by: Jakub Kicinski + Signed-off-by: Hangbin Liu + Signed-off-by: David S. Miller + +commit c8064e5b4adac5e1255cf4f3b374e75b5376e7ca +Author: Paolo Abeni +Date: Tue Nov 30 11:08:07 2021 +0100 + + bpf: Let bpf_warn_invalid_xdp_action() report more info + + In non trivial scenarios, the action id alone is not sufficient to + identify the program causing the warning. Before the previous patch, + the generated stack-trace pointed out at least the involved device + driver. + + Let's additionally include the program name and id, and the relevant + device name. + + If the user needs additional infos, he can fetch them via a kernel + probe, leveraging the arguments added here. + + Signed-off-by: Paolo Abeni + Signed-off-by: Daniel Borkmann + Acked-by: Toke Høiland-Jørgensen + Link: https://lore.kernel.org/bpf/ddb96bb975cbfddb1546cf5da60e77d5100b533c.1638189075.git.pabeni@redhat.com + +commit 7462494408cd3de8b0bc1e79670bf213288501d0 +Author: Hao Chen +Date: Thu Nov 18 20:12:43 2021 +0800 + + ethtool: extend ringparam setting/getting API with rx_buf_len + + Add two new parameters kernel_ringparam and extack for + .get_ringparam and .set_ringparam to extend more ring params + through netlink. + + Signed-off-by: Hao Chen + Signed-off-by: Guangbin Huang + Signed-off-by: David S. Miller + +commit 9f5363916a5099e618e6e40606e91b8ce0833754 +Author: Michael Chan +Date: Tue Nov 16 14:26:10 2021 -0500 + + bnxt_en: Fix compile error regression when CONFIG_BNXT_SRIOV is not set + + bp->sriov_cfg is not defined when CONFIG_BNXT_SRIOV is not set. Fix + it by adding a helper function bnxt_sriov_cfg() to handle the logic + with or without the config option. + + Fixes: 46d08f55d24e ("bnxt_en: extend RTNL to VF check in devlink driver_reinit") + Reported-by: kernel test robot + Reviewed-by: Edwin Peer + Reviewed-by: Andy Gospodarek + Signed-off-by: Michael Chan + Link: https://lore.kernel.org/r/1637090770-22835-1-git-send-email-michael.chan@broadcom.com + Signed-off-by: Jakub Kicinski + +commit 4721031c3559db8eae61df305f10c00099a7c1d0 +Author: Eric Dumazet +Date: Mon Nov 15 09:05:51 2021 -0800 + + net: move gro definitions to include/net/gro.h + + include/linux/netdevice.h became too big, move gro stuff + into include/net/gro.h + + Signed-off-by: Eric Dumazet + Signed-off-by: David S. Miller + +commit b0757491a118ae5727cf9f1c3a11544397d46596 +Author: Sriharsha Basavapatna +Date: Mon Nov 15 02:38:01 2021 -0500 + + bnxt_en: reject indirect blk offload when hw-tc-offload is off + + The driver does not check if hw-tc-offload is enabled for the device + before offloading a flow in the context of indirect block callback. + Fix this by checking NETIF_F_HW_TC in the features flag and rejecting + the offload request. This will avoid unnecessary dmesg error logs when + hw-tc-offload is disabled, such as these: + + bnxt_en 0000:19:00.1 eno2np1: dev(ifindex=294) not on same switch + bnxt_en 0000:19:00.1 eno2np1: Error: bnxt_tc_add_flow: cookie=0xffff8dace1c88000 error=-22 + bnxt_en 0000:19:00.0 eno1np0: dev(ifindex=294) not on same switch + bnxt_en 0000:19:00.0 eno1np0: Error: bnxt_tc_add_flow: cookie=0xffff8dace1c88000 error=-22 + + Reported-by: Marcelo Ricardo Leitner + Fixes: 627c89d00fb9 ("bnxt_en: flow_offload: offload tunnel decap rules via indirect callbacks") + Signed-off-by: Sriharsha Basavapatna + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit b68a1a933fe4a52a8316d214e3421f2a89bc113e +Author: Edwin Peer +Date: Mon Nov 15 02:38:00 2021 -0500 + + bnxt_en: fix format specifier in live patch error message + + This fixes type mismatch warning. + + Reported-by: kernel test robot + Fixes: 3c4153394e2c ("bnxt_en: implement firmware live patching") + Signed-off-by: Edwin Peer + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 46d08f55d24e69e921456b5a40717da09199267b +Author: Edwin Peer +Date: Mon Nov 15 02:37:59 2021 -0500 + + bnxt_en: extend RTNL to VF check in devlink driver_reinit + + The fixes the race condition between configuring SR-IOV and devlink + reload. The SR-IOV configure logic already takes the RTNL lock, + setting sriov_cfg under the lock while changes are underway. Extend + the lock scope in devlink driver_reinit to cover the VF check and + don't run concurrently with SR-IOV configure. + + Reported-by: Leon Romanovsky + Fixes: 228ea8c187d8 ("bnxt_en: implement devlink dev reload driver_reinit") + Cc: Leon Romanovsky + Reviewed-by: Somnath Kotur + Reviewed-by: Pavan Chebbi + Reviewed-by: Andy Gospodarek + Signed-off-by: Edwin Peer + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 6ab9f57a648953e2326b9ad000783c122d133c9d +Author: Wan Jiabing +Date: Mon Nov 1 22:03:12 2021 -0400 + + bnxt_en: avoid newline at end of message in NL_SET_ERR_MSG_MOD + + Fix following coccicheck warning: + ./drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c:446:8-56: WARNING + avoid newline at end of message in NL_SET_ERR_MSG_MOD. + + Signed-off-by: Wan Jiabing + Link: https://lore.kernel.org/r/20211102020312.16567-1-wanjiabing@vivo.com + Signed-off-by: Jakub Kicinski + +commit 63185eb3aa267f2844580bbd8c9c1c97516f5dbb +Author: Vikas Gupta +Date: Fri Oct 29 03:47:55 2021 -0400 + + bnxt_en: Provide stored devlink "fw" version on older firmware + + On older firmware that doesn't support the HWRM_NVM_GET_DEV_INFO + command that returns detailed stored firmware versions, fallback + to use the same firmware package version that is reported to ethtool. + Refactor bnxt_get_pkgver() in bnxt_ethtool.c so that devlink can call + and get the package version. + + Signed-off-by: Vikas Gupta + Reviewed-by: Edwin Peer + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 3c4153394e2c749b415947b86eb560114ec0f64d +Author: Edwin Peer +Date: Fri Oct 29 03:47:54 2021 -0400 + + bnxt_en: implement firmware live patching + + Live patches are activated by using the 'limit no_reset' option when + performing a devlink dev reload fw_activate operation. These packages + must first be installed on the device in the usual way. For example, + via devlink dev flash or ethtool -f. + + The devlink device info has also been enhanced to render stored and + running live patch versions. + + Signed-off-by: Edwin Peer + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 21e70778d0d4e677bf4b1882a3280cd05c80d559 +Author: Michael Chan +Date: Fri Oct 29 03:47:53 2021 -0400 + + bnxt_en: Update firmware interface to 1.10.2.63 + + The main changes are firmware live patch support and 2 additional FEC + standard counters. + + Add the matching FEC counters to ethtool counter array. Firmware older + than 220 does not return the proper size of the extended RX counters so + we need to cap it at the smaller legacy size. Otherwise the new FEC + counters may show up with garbage values. + + Reviewed-by: Edwin Peer + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 188876db04a3524aa81ced7475686e7c44ca1a5e +Author: Edwin Peer +Date: Fri Oct 29 03:47:52 2021 -0400 + + bnxt_en: implement dump callback for fw health reporter + + Populate the dump with firmware 'live' coredump data. This includes + the information stored in NVRAM by the firmware exception handler + prior to recovery. Thus, the live dump includes the desired crash + context. + + Firmware does not support HWRM calls after RESET_NOTIFY, so there is + no supported way to capture a coredump during the auto dump phase. + Detect this and abort when called from devlink_health_report(). + + Signed-off-by: Edwin Peer + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 4e59f0600790cc205192203570a677375671d1d7 +Author: Edwin Peer +Date: Fri Oct 29 03:47:51 2021 -0400 + + bnxt_en: extract coredump command line from current task + + Tools other than 'ethtool -w' may be used to produce a coredump. For + devlink health, such dumps could even be driver initiated in response + to a health event. In these cases, the kernel thread information will + be placed in the coredump record instead. + + v2: use min_t() instead of min() to fix the mismatched type warning + + Signed-off-by: Edwin Peer + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 80194db9f53bc8877468f96734133b7a8d28aa4c +Author: Vasundhara Volam +Date: Fri Oct 29 03:47:50 2021 -0400 + + bnxt_en: Retrieve coredump and crashdump size via FW command + + Recent firmware provides coredump and crashdump size info via + DBG_QCFG command. Read the dump sizes from firmware, instead of + computing in the driver. This patch reduces the time taken + to collect the dump via ethtool. + + Signed-off-by: Vasundhara Volam + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 80f62ba9d53d40e7a71b79543026e8e20afe4ec1 +Author: Vasundhara Volam +Date: Fri Oct 29 03:47:49 2021 -0400 + + bnxt_en: Add compression flags information in coredump segment header + + Firmware sets compression flags for each segment, add this information + while filling segment header. + + Signed-off-by: Vasundhara Volam + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit b032228e58ea2477955058ad4d70a636ce1dec51 +Author: Edwin Peer +Date: Fri Oct 29 03:47:48 2021 -0400 + + bnxt_en: move coredump functions into dedicated file + + Change bnxt_get_coredump() and bnxt_get_coredump_length() to non-static + functions. + + Signed-off-by: Edwin Peer + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 9a575c8c25ae2372112db6d6b3e553cd90e9f02b +Author: Edwin Peer +Date: Fri Oct 29 03:47:47 2021 -0400 + + bnxt_en: Refactor coredump functions + + The coredump functionality will be used by devlink health. Refactor + these functions that get coredump and coredump length. There is no + functional change, but the following checkpatch warnings were + addressed: + + - strscpy is preferred over strlcpy. + - sscanf results should be checked, with an additional warning. + + Signed-off-by: Edwin Peer + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 8cc95ceb7087d6910050286301d05f4824a0bf59 +Author: Edwin Peer +Date: Fri Oct 29 03:47:46 2021 -0400 + + bnxt_en: improve fw diagnose devlink health messages + + Add firmware event counters as well as health state severity. In + the unhealthy state, recommend a remedy and inform the user as to + its impact. + + Readability of the devlink tool's output is negatively impacted by + adding these fields to the diagnosis. The single line of text, as + rendered by devlink health diagnose, benefits from more terse + descriptions, which can be substituted without loss of clarity, even + in pretty printed JSON mode. + + Signed-off-by: Edwin Peer + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 2bb21b8db5c0e515549d7d1d0de5dc905a32a338 +Author: Edwin Peer +Date: Fri Oct 29 03:47:45 2021 -0400 + + bnxt_en: consolidate fw devlink health reporters + + Merge 'fw' and 'fw_fatal' health reporters. There is no longer a need + to distinguish between firmware reporters. Only bonafide errors are + reported now and no reports were being generated for the 'fw' reporter. + + Signed-off-by: Edwin Peer + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit aadb0b1a0b3628291dff2dab8c8af1b63df1cae9 +Author: Edwin Peer +Date: Fri Oct 29 03:47:44 2021 -0400 + + bnxt_en: remove fw_reset devlink health reporter + + Firmware resets initiated by the user are not errors and should not + be reported via devlink. Once only unsolicited resets remain, it is no + longer sensible to maintain a separate fw_reset reporter. + + Signed-off-by: Edwin Peer + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 1596847d0f7b00147c4cb01158325d72c096cdde +Author: Edwin Peer +Date: Fri Oct 29 03:47:43 2021 -0400 + + bnxt_en: improve error recovery information messages + + The recovery election messages are often mistaken for errors. Improve + the wording to clarify the meaning of these frequent and expected + events. Also, take the first step towards more inclusive language. + + Signed-off-by: Edwin Peer + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 892a662f04736ba40e241c794b15f1b2ee489dc3 +Author: Edwin Peer +Date: Fri Oct 29 03:47:42 2021 -0400 + + bnxt_en: add enable_remote_dev_reset devlink parameter + + The reported parameter value should not take into account the state + of remote drivers. Firmware will reject remote resets as appropriate, + thus it is not strictly necessary to check HOT_RESET_ALLOWED before + attempting to initiate a reset. But we add the check so that we can + provide more intuitive messages when reset is not permitted. + + This firmware setting needs to be restored from all functions after + a firmware reset. + + Signed-off-by: Edwin Peer + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 8f6c5e4d1470499b8feff98353eb2920bd81635a +Author: Edwin Peer +Date: Fri Oct 29 03:47:41 2021 -0400 + + bnxt_en: implement devlink dev reload fw_activate + + Similar to reload driver_reinit, the RTNL lock is held across reload + down and up to prevent interleaving state changes. But we need to + subsequently release the RTNL lock while waiting for firmware reset + to complete. + + Also keep a statistic on fw_activate resets initiated remotely from + other functions. + + Signed-off-by: Edwin Peer + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 228ea8c187d814e1b8e369086e640dfc1d42974f +Author: Edwin Peer +Date: Fri Oct 29 03:47:40 2021 -0400 + + bnxt_en: implement devlink dev reload driver_reinit + + The RTNL lock must be held between down and up to prevent interleaving + state changes, especially since external state changes might release + and allocate different driver resource subsets that would otherwise + need to be tracked and carefully handled. If the down function fails, + then devlink will not call the corresponding up function, thus the + lock is released in the down error paths. + + v2: Don't use devlink_reload_disable() and devlink_reload_enable(). + Instead, check that the netdev is not in unregistered state before + proceeding with reload. + + Signed-off-by: Edwin Peer + Signed-Off-by: Michael Chan + Signed-off-by: David S. Miller + +commit d900aadd86b0c9ddb8b78e5fa512fb4133b30559 +Author: Edwin Peer +Date: Fri Oct 29 03:47:39 2021 -0400 + + bnxt_en: refactor cancellation of resource reservations + + Resource reservations will also need to be reset after FUNC_DRV_UNRGTR + in the following devlink driver_reinit patch. Extract this logic into a + reusable function. + + Signed-off-by: Edwin Peer + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit c7dd4a5b0a155c4db0ff9758668235651c2ebf22 +Author: Edwin Peer +Date: Fri Oct 29 03:47:38 2021 -0400 + + bnxt_en: refactor printing of device info + + The device info logged during probe will be reused by the devlink + driver_reinit code in a following patch. Extract this logic into + the new bnxt_print_device_info() function. The board index needs + to be saved in the driver context so that the board information + can be retrieved at a later time, outside of the probe function. + + Reviewed-by: Somnath Kotur + Signed-off-by: Edwin Peer + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 766607570becbd26cab6d66a544dd8d0d964df5a +Author: Jakub Kicinski +Date: Thu Oct 14 07:24:31 2021 -0700 + + ethernet: constify references to netdev->dev_addr in drivers + + This big patch sprinkles const on local variables and + function arguments which may refer to netdev->dev_addr. + + Commit 406f42fa0d3c ("net-next: When a bond have a massive amount + of VLANs...") introduced a rbtree for faster Ethernet address look + up. To maintain netdev->dev_addr in this tree we need to make all + the writes to it got through appropriate helpers. + + Some of the changes here are not strictly required - const + is sometimes cast off but pointer is not used for writing. + It seems like it's still better to add the const in case + the code changes later or relevant -W flags get enabled + for the build. + + No functional changes. + + Link: https://lore.kernel.org/r/20211014142432.449314-1-kuba@kernel.org + Signed-off-by: Jakub Kicinski + +commit 4b70dce2c1b93930fe4728a221a8d8e674c271c5 +Author: Juhee Kang +Date: Sun Oct 10 13:03:27 2021 +0900 + + bnxt: use netif_is_rxfh_configured instead of open code + + The open code which is dev->priv_flags & IFF_RXFH_CONFIGURED is defined as + a helper function on netdevice.h. So use netif_is_rxfh_configured() + function instead of open code. This patch doesn't change logic. + + Signed-off-by: Juhee Kang + Signed-off-by: David S. Miller + +commit a05e4c0af490ca7c22fc77120aafebebdeaaf537 +Author: Jakub Kicinski +Date: Mon Oct 4 09:05:21 2021 -0700 + + ethernet: use eth_hw_addr_set() for dev->addr_len cases + + Convert all Ethernet drivers from memcpy(... dev->addr_len) + to eth_hw_addr_set(): + + @@ + expression dev, np; + @@ + - memcpy(dev->dev_addr, np, dev->addr_len) + + eth_hw_addr_set(dev, np) + + In theory addr_len may not be ETH_ALEN, but we don't expect + non-Ethernet devices to live under this directory, and only + the following cases of setting addr_len exist: + - cxgb4 for mgmt device, + and the drivers which set it to ETH_ALEN: s2io, mlx4, vxge. + + Signed-off-by: Jakub Kicinski + Signed-off-by: David S. Miller + +commit f3956ebb3bf06ab2266ad5ee2214aed46405810c +Author: Jakub Kicinski +Date: Fri Oct 1 14:32:23 2021 -0700 + + ethernet: use eth_hw_addr_set() instead of ether_addr_copy() + + Convert Ethernet from ether_addr_copy() to eth_hw_addr_set(): + + @@ + expression dev, np; + @@ + - ether_addr_copy(dev->dev_addr, np) + + eth_hw_addr_set(dev, np) + + Signed-off-by: Jakub Kicinski + Signed-off-by: David S. Miller + +commit a96d317fb1a30b9f323548eb2ff05d4e4600ead9 +Author: Jakub Kicinski +Date: Fri Oct 1 14:32:20 2021 -0700 + + ethernet: use eth_hw_addr_set() + + Convert all Ethernet drivers from memcpy(... ETH_ADDR) + to eth_hw_addr_set(): + + @@ + expression dev, np; + @@ + - memcpy(dev->dev_addr, np, ETH_ALEN) + + eth_hw_addr_set(dev, np) + + Signed-off-by: Jakub Kicinski + Signed-off-by: David S. Miller + +commit 5df290e7a70367d476406420d19c4dea14d45dd1 +Author: Leon Romanovsky +Date: Sat Sep 25 14:22:42 2021 +0300 + + bnxt_en: Register devlink instance at the end devlink configuration + + Move devlink_register() to be last command in devlink configuration + sequence, so no user space access will be possible till devlink instance + is fully operable. As part of this change, the devlink_params_publish + call is removed as not needed. + + This change fixes forgotten devlink_params_unpublish() too. + + Signed-off-by: Leon Romanovsky + Signed-off-by: David S. Miller + +commit 61415c3db3d98c74bfe8f9e8688b6e40b4c3e1d4 +Author: Leon Romanovsky +Date: Thu Sep 23 21:12:49 2021 +0300 + + bnxt_en: Properly remove port parameter support + + This driver doesn't have any port parameters and registers + devlink port parameters with empty table. Remove the useless + calls to devlink_port_params_register and _unregister. + + Fixes: da203dfa89ce ("Revert "devlink: Add a generic wake_on_lan port parameter"") + Signed-off-by: Leon Romanovsky + Reviewed-by: Edwin Peer + Signed-off-by: David S. Miller + +commit e624c70e1131e145bd0510b8a700b5e2d112e377 +Author: Leon Romanovsky +Date: Thu Sep 23 21:12:48 2021 +0300 + + bnxt_en: Check devlink allocation and registration status + + devlink is a software interface that doesn't depend on any hardware + capabilities. The failure in SW means memory issues, wrong parameters, + programmer error e.t.c. + + Like any other such interface in the kernel, the returned status of + devlink APIs should be checked and propagated further and not ignored. + + Fixes: 4ab0c6a8ffd7 ("bnxt_en: add support to enable VF-representors") + Signed-off-by: Leon Romanovsky + Reviewed-by: Edwin Peer + Signed-off-by: David S. Miller + +commit db4278c55fa53760893266538e86e638330b03bb +Author: Leon Romanovsky +Date: Wed Sep 22 11:58:03 2021 +0300 + + devlink: Make devlink_register to be void + + devlink_register() can't fail and always returns success, but all drivers + are obligated to check returned status anyway. This adds a lot of boilerplate + code to handle impossible flow. + + Make devlink_register() void and simplify the drivers that use that + API call. + + Signed-off-by: Leon Romanovsky + Acked-by: Simon Horman + Acked-by: Vladimir Oltean # dsa + Reviewed-by: Jiri Pirko + Signed-off-by: David S. Miller + +commit 5bed8b0704c9ecccc8f4a2c377d7c8e21090a82e +Author: Michael Chan +Date: Mon Sep 20 02:51:52 2021 -0400 + + bnxt_en: Fix TX timeout when TX ring size is set to the smallest + + The smallest TX ring size we support must fit a TX SKB with MAX_SKB_FRAGS + + 1. Because the first TX BD for a packet is always a long TX BD, we + need an extra TX BD to fit this packet. Define BNXT_MIN_TX_DESC_CNT with + this value to make this more clear. The current code uses a minimum + that is off by 1. Fix it using this constant. + + The tx_wake_thresh to determine when to wake up the TX queue is half the + ring size but we must have at least BNXT_MIN_TX_DESC_CNT for the next + packet which may have maximum fragments. So the comparison of the + available TX BDs with tx_wake_thresh should be >= instead of > in the + current code. Otherwise, at the smallest ring size, we will never wake + up the TX queue and will cause TX timeout. + + Fixes: c0c050c58d84 ("bnxt_en: New Broadcom ethernet driver.") + Reviewed-by: Pavan Chebbi + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 7c3a0a018e672a9723a79b128227272562300055 +Author: Eli Cohen +Date: Wed Sep 15 07:47:27 2021 +0300 + + net/{mlx5|nfp|bnxt}: Remove unnecessary RTNL lock assert + + Remove the assert from the callback priv lookup function since it does + not require RTNL lock and is already protected by flow_indr_block_lock. + + This will avoid warnings from being emitted to dmesg if the driver + registers its callback after an ingress qdisc was created for a + netdevice. + + The warnings started after the following patch was merged: + commit 74fc4f828769 ("net: Fix offloading indirect devices dependency on qdisc order creation") + + Signed-off-by: Eli Cohen + Signed-off-by: David S. Miller + +commit 985941e1dd5e996311c29688ca0d3aa1ff8eb0b6 +Author: Michael Chan +Date: Sun Sep 12 12:34:49 2021 -0400 + + bnxt_en: Clean up completion ring page arrays completely + + We recently changed the completion ring page arrays to be dynamically + allocated to better support the expanded range of ring depths. The + cleanup path for this was not quite complete. It might cause the + shutdown path to crash if we need to abort before the completion ring + arrays have been allocated and initialized. + + Fix it by initializing the ring_mem->pg_arr to NULL after freeing the + completion ring page array. Add a check in bnxt_free_ring() to skip + referencing the rmem->pg_arr if it is NULL. + + Fixes: 03c7448790b8 ("bnxt_en: Don't use static arrays for completion ring pages") + Reviewed-by: Andy Gospodarek + Reviewed-by: Edwin Peer + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 1affc01fdc6035189a5ab2a24948c9419ee0ecf2 +Author: Edwin Peer +Date: Sun Sep 12 12:34:48 2021 -0400 + + bnxt_en: make bnxt_free_skbs() safe to call after bnxt_free_mem() + + The call to bnxt_free_mem(..., false) in the bnxt_half_open_nic() error + path will deallocate ring descriptor memory via bnxt_free_?x_rings(), + but because irq_re_init is false, the ring info itself is not freed. + + To simplify error paths, deallocation functions have generally been + written to be safe when called on unallocated memory. It should always + be safe to call dev_close(), which calls bnxt_free_skbs() a second time, + even in this semi- allocated ring state. + + Calling bnxt_free_skbs() a second time with the rings already freed will + cause NULL pointer dereference. Fix it by checking the rings are valid + before proceeding in bnxt_free_tx_skbs() and + bnxt_free_one_rx_ring_skbs(). + + Fixes: 975bc99a4a39 ("bnxt_en: Refactor bnxt_free_rx_skbs().") + Signed-off-by: Edwin Peer + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit eca4cf12acda306f851f6d2a05b1c9ef62cf0e81 +Author: Michael Chan +Date: Sun Sep 12 12:34:47 2021 -0400 + + bnxt_en: Fix error recovery regression + + The recent patch has introduced a regression by not reading the reset + count in the ERROR_RECOVERY async event handler. We may have just + gone through a reset and the reset count has just incremented. If + we don't update the reset count in the ERROR_RECOVERY event handler, + the health check timer will see that the reset count has changed and + will initiate an unintended reset. + + Restore the unconditional update of the reset count in + bnxt_async_event_process() if error recovery watchdog is enabled. + Also, update the reset count at the end of the reset sequence to + make it even more robust. + + Fixes: 1b2b91831983 ("bnxt_en: Fix possible unintended driver initiated error recovery") + Reviewed-by: Edwin Peer + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 1b2b91831983aeac3adcbb469aa8b0dc71453f89 +Author: Michael Chan +Date: Sun Sep 5 14:10:59 2021 -0400 + + bnxt_en: Fix possible unintended driver initiated error recovery + + If error recovery is already enabled, bnxt_timer() will periodically + check the heartbeat register and the reset counter. If we get an + error recovery async. notification from the firmware (e.g. change in + primary/secondary role), we will immediately read and update the + heartbeat register and the reset counter. If the timer for the next + health check expires soon after this, we may read the heartbeat register + again in quick succession and find that it hasn't changed. This will + trigger error recovery unintentionally. + + The likelihood is small because we also reset fw_health->tmr_counter + which will reset the interval for the next health check. But the + update is not protected and bnxt_timer() can miss the update and + perform the health check without waiting for the full interval. + + Fix it by only reading the heartbeat register and reset counter in + bnxt_async_event_process() if error recovery is trasitioning to the + enabled state. Also add proper memory barriers so that when enabling + for the first time, bnxt_timer() will see the tmr_counter interval and + perform the health check after the full interval has elapsed. + + Fixes: 7e914027f757 ("bnxt_en: Enable health monitoring.") + Reviewed-by: Edwin Peer + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 7ae9dc356f247ad9f9634b3da61a45eb72968b2e +Author: Michael Chan +Date: Sun Sep 5 14:10:58 2021 -0400 + + bnxt_en: Fix UDP tunnel logic + + The current logic assumes that when the driver sends the message to the + firmware to add the VXLAN or Geneve port, the firmware will never fail + the operation. The UDP ports are always stored and are used to check + the tunnel packets in .ndo_features_check(). These tunnnel packets + will fail to offload on the transmit side if firmware fails the call to + add the UDP ports. + + To fix the problem, bp->vxlan_port and bp->nge_port will only be set to + the offloaded ports when the HWRM_TUNNEL_DST_PORT_ALLOC firmware call + succeeds. When deleting a UDP port, we check that the port was + previously added successfuly first by checking the FW ID. + + Fixes: 1698d600b361 ("bnxt_en: Implement .ndo_features_check().") + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 6fdab8a3ade2adc123bbf5c4fdec3394560b1fb1 +Author: Michael Chan +Date: Sun Sep 5 14:10:57 2021 -0400 + + bnxt_en: Fix asic.rev in devlink dev info command + + The current asic.rev is incomplete and does not include the metal + revision. Add the metal revision and decode the complete asic + revision into the more common and readable form (A0, B0, etc). + + Fixes: 7154917a12b2 ("bnxt_en: Refactor bnxt_dl_info_get().") + Reviewed-by: Edwin Peer + Reviewed-by: Somnath Kotur + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit beb55fcf950f5454715df05234bb2b2914bc97ac +Author: Edwin Peer +Date: Sun Sep 5 14:10:56 2021 -0400 + + bnxt_en: fix read of stored FW_PSID version on P5 devices + + P5 devices store NVM arrays using a different internal representation. + This implementation detail permeates into the HWRM API, requiring the + caller to explicitly index the array elements in HWRM_NVM_GET_VARIABLE + on these devices. Conversely, older devices do not support the indexed + mode of operation and require reading the raw NVM content. + + Fixes: db28b6c77f40 ("bnxt_en: Fix devlink info's stored fw.psid version format.") + Signed-off-by: Edwin Peer + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 1656db67233e4259281d2ac35b25f712edbbc20b +Author: Edwin Peer +Date: Sun Sep 5 14:10:55 2021 -0400 + + bnxt_en: fix stored FW_PSID version masks + + The FW_PSID version components are 8 bits wide, not 4. + + Fixes: db28b6c77f40 ("bnxt_en: Fix devlink info's stored fw.psid version format.") + Signed-off-by: Edwin Peer + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 5240118f08a07669537677be19edbf008682f8bd +Author: Edwin Peer +Date: Wed Sep 1 11:53:15 2021 -0700 + + bnxt_en: fix kernel doc warnings in bnxt_hwrm.c + + Parameter names in the comments did not match the function arguments. + + Fixes: 213808170840 ("bnxt_en: add support for HWRM request slices") + Signed-off-by: Edwin Peer + Reported-by: Jakub Kicinski + Reviewed-by: Michael Chan + Reviewed-by: Florian Fainelli + Link: https://lore.kernel.org/r/20210901185315.57137-1-edwin.peer@broadcom.com + Signed-off-by: Jakub Kicinski + +commit c6132f6f2e682c958f7022ecfd8bec35723a1a9d +Author: Michael Chan +Date: Tue Aug 31 21:15:23 2021 -0400 + + bnxt_en: Fix 64-bit doorbell operation on 32-bit kernels + + The driver requires 64-bit doorbell writes to be atomic on 32-bit + architectures. So we redefined writeq as a new macro with spinlock + protection on 32-bit architectures. This created a new warning when + we added a new file in a recent patchset. writeq is defined on many + 32-bit architectures to do the memory write non-atomically and it + generated a new macro redefined warning. This warning was fixed + incorrectly in the recent patch. + + Fix this properly by adding a new bnxt_writeq() function that will + do the non-atomic write under spinlock on 32-bit systems. All callers + in the driver will now call bnxt_writeq() instead. + + v2: Need to pass in bp to bnxt_writeq() + Use lo_hi_writeq() [suggested by Florian] + + Reported-by: kernel test robot + Fixes: f9ff578251dc ("bnxt_en: introduce new firmware message API based on DMA pools") + Reviewed-by: Edwin Peer + Signed-off-by: Michael Chan + Reviewed-by: Florian Fainelli + Signed-off-by: David S. Miller + +commit 68f684e257d7f3a6303b0e838bfa982c74f2c8da +Author: Edwin Peer +Date: Sun Aug 29 03:35:06 2021 -0400 + + bnxt_en: support multiple HWRM commands in flight + + Add infrastructure to maintain a pending list of HWRM commands awaiting + completion and reduce the scope of the hwrm_cmd_lock mutex so that it + protects only the request mailbox. The mailbox is free to use for one + or more concurrent commands after receiving deferred response events. + + For uniformity and completeness, use the same pending list for + collecting completions for commands that respond via a completion ring. + These commands are only used for freeing rings and for IRQ test and + we only support one such command in flight. + + Note deferred responses are also only supported on the main channel. + The secondary channel (KONG) does not support deferred responses. + + Signed-off-by: Edwin Peer + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit b34695a894b88e50e16dd3dcb1098fe919023f14 +Author: Edwin Peer +Date: Sun Aug 29 03:35:05 2021 -0400 + + bnxt_en: remove legacy HWRM interface + + There are no longer any callers relying on the old API. + + Signed-off-by: Edwin Peer + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit bbf33d1d9805fc3a59ded637ab6555fb20edb5d2 +Author: Edwin Peer +Date: Sun Aug 29 03:35:04 2021 -0400 + + bnxt_en: update all firmware calls to use the new APIs + + The conversion follows this general pattern for most of the calls: + + 1. The input message is changed from a stack variable initialized + using bnxt_hwrm_cmd_hdr_init() to a pointer allocated and intialized + using hwrm_req_init(). + + 2. If we don't need to read the firmware response, the hwrm_send_message() + call is replaced with hwrm_req_send(). + + 3. If we need to read the firmware response, the mutex lock is replaced + by hwrm_req_hold() to hold the response. When the response is read, the + mutex unlock is replaced by hwrm_req_drop(). + + If additional DMA buffers are needed for firmware response data, the + hwrm_req_dma_slice() is used instead of calling dma_alloc_coherent(). + + Some minor refactoring is also done while doing these conversions. + + v2: Fix unintialized variable warnings in __bnxt_hwrm_get_tx_rings() + and bnxt_approve_mac() + + Signed-off-by: Edwin Peer + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 3c10ed497fa87780a9ee8c31092373e5f1e20f64 +Author: Edwin Peer +Date: Sun Aug 29 03:35:03 2021 -0400 + + bnxt_en: use link_lock instead of hwrm_cmd_lock to protect link_info + + We currently use the hwrm_cmd_lock to serialize the update of the + firmware's link status response data and the copying of link status data + to the VF. This won't work when we update the firmware message APIs, so + we use the link_lock mutex instead. All link_info data should be + updated under the link_lock mutex. Also add link_lock to functions that + touch link_info in __bnxt_open_nic() and bnxt_probe_phy(). The locking + is probably not strictly necessary during probe, but it's more consistent. + + Signed-off-by: Edwin Peer + Reviewed-by: Michael Chan + Signed-off-by: David S. Miller + +commit 2138081708405fb9c16a76a9b6ef46c35d3f17a9 +Author: Edwin Peer +Date: Sun Aug 29 03:35:02 2021 -0400 + + bnxt_en: add support for HWRM request slices + + Slices are a mechanism for suballocating DMA mapped regions from the + request buffer. Such regions can be used for indirect command data + instead of creating new mappings with dma_alloc_coherent(). + + The advantage of using a slice is that the lifetime of the slice is + bound to the request and will be automatically unmapped when the + request is consumed. + + A single external region is also supported. This allows for regions + that will not fit inside the spare request buffer space such that + the same API can be used consistently even for larger mappings. + + Signed-off-by: Edwin Peer + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit ecddc29d928d0ecccbc8f339b59ed75e5c8e8ecf +Author: Edwin Peer +Date: Sun Aug 29 03:35:01 2021 -0400 + + bnxt_en: add HWRM request assignment API + + hwrm_req_replace() provides an assignment like operation to replace a + managed HWRM request object with data from a pre-built source. This is + useful for handling request data provided by higher layer HWRM clients. + + Signed-off-by: Edwin Peer + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 02b9aa1068682319508f9a1678e993ad958a8a4f +Author: Edwin Peer +Date: Sun Aug 29 03:35:00 2021 -0400 + + bnxt_en: discard out of sequence HWRM responses + + During firmware crash recovery, it is possible for firmware to respond + to stale HWRM commands that have already timed out. Because response + buffers may be reused, any out of sequence responses need to be ignored + and only the matching seq_id should be accepted. + + Also, READ_ONCE should be used for the reads from the DMA buffer to + ensure that the necessary loads are scheduled. + + Reviewed-by: Scott Branden + Signed-off-by: Edwin Peer + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit f9ff578251dc2f1cf5b9b007e050033d8414829d +Author: Edwin Peer +Date: Sun Aug 29 03:34:59 2021 -0400 + + bnxt_en: introduce new firmware message API based on DMA pools + + This change constitutes a major step towards supporting multiple + firmware commands in flight by maintaining a separate response buffer + for the duration of each request. These firmware commands are also + known as Hardware Resource Manager (HWRM) commands. Using separate + response buffers requires an API change in order for callers to be + able to free the buffer when done. + + It is impossible to keep the existing APIs unchanged. The existing + usage for a simple HWRM message request such as the following: + + struct input req = {0}; + bnxt_hwrm_cmd_hdr_init(bp, &req, REQ_TYPE, -1, -1); + rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (rc) + /* error */ + + changes to: + + struct input *req; + rc = hwrm_req_init(bp, req, REQ_TYPE); + if (rc) + /* error */ + rc = hwrm_req_send(bp, req); /* consumes req */ + if (rc) + /* error */ + + The key changes are: + + 1. The req is no longer allocated on the stack. + 2. The caller must call hwrm_req_init() to allocate a req buffer and + check for a valid buffer. + 3. The req buffer is automatically released when hwrm_req_send() returns. + 4. If the caller wants to check the firmware response, the caller must + call hwrm_req_hold() to take ownership of the response buffer and + release it afterwards using hwrm_req_drop(). The caller is no longer + required to explicitly hold the hwrm_cmd_lock mutex to read the + response. + 5. Because the firmware commands and responses all have different sizes, + some safeguards are added to the code. + + This patch maintains legacy API compatibiltiy, implementing the old + API in terms of the new. The follow-on patches will convert all + callers to use the new APIs. + + v2: Fix redefined writeq with parisc .config + Fix "cast from pointer to integer of different size" warning in + hwrm_calc_sentinel() + + Signed-off-by: Edwin Peer + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 3c8c20db769cd68e299b487035825e026b1a6ce9 +Author: Edwin Peer +Date: Sun Aug 29 03:34:58 2021 -0400 + + bnxt_en: move HWRM API implementation into separate file + + Move all firmware messaging functions and definitions to new + bnxt_hwrm.[ch]. The follow-on patches will make major modifications + to these APIs. + + Signed-off-by: Edwin Peer + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 7b370ad77392455dccd77c121b48bc9f76a14cbe +Author: Edwin Peer +Date: Sun Aug 29 03:34:57 2021 -0400 + + bnxt_en: Refactor the HWRM_VER_GET firmware calls + + Refactor the code so that __bnxt_hwrm_ver_get() does not call + bnxt_hwrm_do_send_msg() directly. The new APIs will not expose this + internal call. Add a new bnxt_hwrm_poll() to poll the HWRM_VER_GET + firmware call silently. The other bnxt_hwrm_ver_get() function will + send the HWRM_VER_GET message directly with error logs enabled. + + Signed-off-by: Edwin Peer + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 6c172d59ad79d3973e393ba49d819ed6f0417202 +Author: Edwin Peer +Date: Sun Aug 29 03:34:56 2021 -0400 + + bnxt_en: remove DMA mapping for KONG response + + The additional response buffer serves no useful purpose. There can + be only one firmware command in flight due to the hwrm_cmd_lock mutex, + which is taken for the entire duration of any command completion, + KONG or otherwise. It is thus safe to share a single DMA buffer. + + Removing the code associated with the additional mapping will simplify + matters in the next patch, which allocates response buffers from DMA + pools on a per request basis. + + Signed-off-by: Edwin Peer + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 907fd4a294dbb9ce12d9e47cb6fcf4dcc7b2a5f3 +Author: Jakub Kicinski +Date: Fri Aug 27 08:27:45 2021 -0700 + + bnxt: count discards due to memory allocation errors + + Count packets dropped due to buffer or skb allocation errors. + Report as part of rx_dropped. + + v2: drop the ethtool -S entry [Vladimir] + + Reviewed-by: Michael Chan + Reviewed-by: Edwin Peer + Signed-off-by: Jakub Kicinski + +commit 40bedf7cb2ac949052bf9741ccb9d673d782ae2c +Author: Jakub Kicinski +Date: Fri Aug 27 08:27:44 2021 -0700 + + bnxt: count packets discarded because of netpoll + + bnxt may discard packets if Rx completions are consumed + in an attempt to let netpoll make progress. It should be + extremely rare in practice but nonetheless such events + should be counted. + + Since completion ring memory is allocated dynamically use + a similar scheme to what is done for HW stats to save them. + + Report the stats in rx_dropped and per-netdev ethtool + counter. Chances that users care which ring dropped are + very low. + + v3: only save the stat to rx_dropped on reset, + rx_total_netpoll_discards will now only show drops since + last reset, similar to other "total_discard" counters. + + Reviewed-by: Michael Chan + Reviewed-by: Edwin Peer + Signed-off-by: Jakub Kicinski + +commit 0ff25f6a17c76d50e5d4bdd29bb69ad173a3cde1 +Author: Heiner Kallweit +Date: Sun Aug 22 15:56:24 2021 +0200 + + bnxt: Search VPD with pci_vpd_find_ro_info_keyword() + + Use pci_vpd_find_ro_info_keyword() to search for keywords in VPD to + simplify the code. + + Link: https://lore.kernel.org/r/f062921c-ad33-3b3e-8ada-b53427a9cd4a@gmail.com + Signed-off-by: Heiner Kallweit + Signed-off-by: Bjorn Helgaas + +commit 550cd7c1b45b568ccac28fd46663799f1ff8a62d +Author: Heiner Kallweit +Date: Sun Aug 22 15:55:23 2021 +0200 + + bnxt: Read VPD with pci_vpd_alloc() + + Use pci_vpd_alloc() to dynamically allocate a properly sized buffer and + read the full VPD data into it. + + This simplifies the code, and we no longer have to make assumptions about + VPD size. + + Link: https://lore.kernel.org/r/62522a24-f39a-2b35-1577-1fbb41695bed@gmail.com + Reported-by: kernel test robot + Signed-off-by: Heiner Kallweit + Signed-off-by: Bjorn Helgaas + +commit f3ccfda1931977b80267ba54070a1aeafa18f6ca +Author: Yufeng Mo +Date: Fri Aug 20 15:35:18 2021 +0800 + + ethtool: extend coalesce setting uAPI with CQE mode + + In order to support more coalesce parameters through netlink, + add two new parameter kernel_coal and extack for .set_coalesce + and .get_coalesce, then some extra info can return to user with + the netlink API. + + Signed-off-by: Yufeng Mo + Signed-off-by: Huazhong Tan + Signed-off-by: Jakub Kicinski + +commit df70303dd14623829a4acdec539c929accb92e0e +Author: Christophe JAILLET +Date: Sun Aug 22 07:59:44 2021 +0200 + + net: broadcom: switch from 'pci_' to 'dma_' API + + The wrappers in include/linux/pci-dma-compat.h should go away. + + The patch has been generated with the coccinelle script below. + + It has been compile tested. + + @@ + @@ + - PCI_DMA_BIDIRECTIONAL + + DMA_BIDIRECTIONAL + + @@ + @@ + - PCI_DMA_TODEVICE + + DMA_TO_DEVICE + + @@ + @@ + - PCI_DMA_FROMDEVICE + + DMA_FROM_DEVICE + + @@ + @@ + - PCI_DMA_NONE + + DMA_NONE + + @@ + expression e1, e2, e3; + @@ + - pci_alloc_consistent(e1, e2, e3) + + dma_alloc_coherent(&e1->dev, e2, e3, GFP_) + + @@ + expression e1, e2, e3; + @@ + - pci_zalloc_consistent(e1, e2, e3) + + dma_alloc_coherent(&e1->dev, e2, e3, GFP_) + + @@ + expression e1, e2, e3, e4; + @@ + - pci_free_consistent(e1, e2, e3, e4) + + dma_free_coherent(&e1->dev, e2, e3, e4) + + @@ + expression e1, e2, e3, e4; + @@ + - pci_map_single(e1, e2, e3, e4) + + dma_map_single(&e1->dev, e2, e3, e4) + + @@ + expression e1, e2, e3, e4; + @@ + - pci_unmap_single(e1, e2, e3, e4) + + dma_unmap_single(&e1->dev, e2, e3, e4) + + @@ + expression e1, e2, e3, e4, e5; + @@ + - pci_map_page(e1, e2, e3, e4, e5) + + dma_map_page(&e1->dev, e2, e3, e4, e5) + + @@ + expression e1, e2, e3, e4; + @@ + - pci_unmap_page(e1, e2, e3, e4) + + dma_unmap_page(&e1->dev, e2, e3, e4) + + @@ + expression e1, e2, e3, e4; + @@ + - pci_map_sg(e1, e2, e3, e4) + + dma_map_sg(&e1->dev, e2, e3, e4) + + @@ + expression e1, e2, e3, e4; + @@ + - pci_unmap_sg(e1, e2, e3, e4) + + dma_unmap_sg(&e1->dev, e2, e3, e4) + + @@ + expression e1, e2, e3, e4; + @@ + - pci_dma_sync_single_for_cpu(e1, e2, e3, e4) + + dma_sync_single_for_cpu(&e1->dev, e2, e3, e4) + + @@ + expression e1, e2, e3, e4; + @@ + - pci_dma_sync_single_for_device(e1, e2, e3, e4) + + dma_sync_single_for_device(&e1->dev, e2, e3, e4) + + @@ + expression e1, e2, e3, e4; + @@ + - pci_dma_sync_sg_for_cpu(e1, e2, e3, e4) + + dma_sync_sg_for_cpu(&e1->dev, e2, e3, e4) + + @@ + expression e1, e2, e3, e4; + @@ + - pci_dma_sync_sg_for_device(e1, e2, e3, e4) + + dma_sync_sg_for_device(&e1->dev, e2, e3, e4) + + @@ + expression e1, e2; + @@ + - pci_dma_mapping_error(e1, e2) + + dma_mapping_error(&e1->dev, e2) + + @@ + expression e1, e2; + @@ + - pci_set_dma_mask(e1, e2) + + dma_set_mask(&e1->dev, e2) + + @@ + expression e1, e2; + @@ + - pci_set_consistent_dma_mask(e1, e2) + + dma_set_coherent_mask(&e1->dev, e2) + + Signed-off-by: Christophe JAILLET + Signed-off-by: David S. Miller + +commit 828affc27ed43441bd1efdaf4e07e96dd43a0362 +Author: Michael Chan +Date: Sun Aug 15 16:15:37 2021 -0400 + + bnxt_en: Add missing DMA memory barriers + + Each completion ring entry has a valid bit to indicate that the entry + contains a valid completion event. The driver's main poll loop + __bnxt_poll_work() has the proper dma_rmb() to make sure the valid + bit of the next entry has been checked before proceeding further. + But when we call bnxt_rx_pkt() to process the RX event, the RX + completion event consists of two completion entries and only the + first entry has been checked to be valid. We need the same barrier + after checking the next completion entry. Add missing dma_rmb() + barriers in bnxt_rx_pkt() and other similar locations. + + Fixes: 67a95e2022c7 ("bnxt_en: Need memory barrier when processing the completion ring.") + Reported-by: Lance Richardson + Reviewed-by: Andy Gospodarek + Reviewed-by: Lance Richardson + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 976e52b718c3de9077fff8f3f674afb159c57fb1 +Author: Michael Chan +Date: Sun Aug 15 16:15:36 2021 -0400 + + bnxt_en: Disable aRFS if running on 212 firmware + + 212 firmware broke aRFS, so disable it. Traffic may stop after ntuple + filters are inserted and deleted by the 212 firmware. + + Fixes: ae10ae740ad2 ("bnxt_en: Add new hardware RFS mode.") + Reviewed-by: Pavan Chebbi + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit fb9f7190092d2bbd1f8f0b1cc252732cbe99a87e +Author: Jakub Kicinski +Date: Thu Aug 12 14:42:42 2021 -0700 + + bnxt: count Tx drops + + Drivers should count packets they are dropping. + + Fixes: c0c050c58d84 ("bnxt_en: New Broadcom ethernet driver.") + Reviewed-by: Michael Chan + Reviewed-by: Edwin Peer + Signed-off-by: Jakub Kicinski + +commit e8d8c5d80f5e9d4586c68061b62c642752289095 +Author: Jakub Kicinski +Date: Thu Aug 12 14:42:41 2021 -0700 + + bnxt: make sure xmit_more + errors does not miss doorbells + + skbs are freed on error and not put on the ring. We may, however, + be in a situation where we're freeing the last skb of a batch, + and there is a doorbell ring pending because of xmit_more() being + true earlier. Make sure we ring the door bell in such situations. + + Since errors are rare don't pay attention to xmit_more() and just + always flush the pending frames. + + The busy case should be safe to be left alone because it can + only happen if start_xmit races with completions and they + both enable the queue. In that case the kick can't be pending. + + Noticed while reading the code. + + Fixes: 4d172f21cefe ("bnxt_en: Implement xmit_more.") + Reviewed-by: Michael Chan + Reviewed-by: Edwin Peer + Signed-off-by: Jakub Kicinski + +commit 01cca6b9330ac7460de44eeeb3a0607f8aae69ff +Author: Jakub Kicinski +Date: Thu Aug 12 14:42:40 2021 -0700 + + bnxt: disable napi before canceling DIM + + napi schedules DIM, napi has to be disabled first, + then DIM canceled. + + Noticed while reading the code. + + Fixes: 0bc0b97fca73 ("bnxt_en: cleanup DIM work on device shutdown") + Fixes: 6a8788f25625 ("bnxt_en: add support for software dynamic interrupt moderation") + Reviewed-by: Michael Chan + Reviewed-by: Edwin Peer + Signed-off-by: Jakub Kicinski + +commit 3c603136c9f82833813af77185618de5af67676c +Author: Jakub Kicinski +Date: Thu Aug 12 14:42:39 2021 -0700 + + bnxt: don't lock the tx queue from napi poll + + We can't take the tx lock from the napi poll routine, because + netpoll can poll napi at any moment, including with the tx lock + already held. + + The tx lock is protecting against two paths - the disable + path, and (as Michael points out) the NETDEV_TX_BUSY case + which may occur if NAPI completions race with start_xmit + and both decide to re-enable the queue. + + For the disable/ifdown path use synchronize_net() to make sure + closing the device does not race we restarting the queues. + Annotate accesses to dev_state against data races. + + For the NAPI cleanup vs start_xmit path - appropriate barriers + are already in place in the main spot where Tx queue is stopped + but we need to do the same careful dance in the TX_BUSY case. + + Fixes: c0c050c58d84 ("bnxt_en: New Broadcom ethernet driver.") + Reviewed-by: Michael Chan + Reviewed-by: Edwin Peer + Signed-off-by: Jakub Kicinski + +commit 919d13a7e455c2e7676042d7a5f94c164e859d8a +Author: Leon Romanovsky +Date: Sun Aug 8 21:57:43 2021 +0300 + + devlink: Set device as early as possible + + All kernel devlink implementations call to devlink_alloc() during + initialization routine for specific device which is used later as + a parent device for devlink_register(). + + Such late device assignment causes to the situation which requires us to + call to device_register() before setting other parameters, but that call + opens devlink to the world and makes accessible for the netlink users. + + Any attempt to move devlink_register() to be the last call generates the + following error due to access to the devlink->dev pointer. + + [ 8.758862] devlink_nl_param_fill+0x2e8/0xe50 + [ 8.760305] devlink_param_notify+0x6d/0x180 + [ 8.760435] __devlink_params_register+0x2f1/0x670 + [ 8.760558] devlink_params_register+0x1e/0x20 + + The simple change of API to set devlink device in the devlink_alloc() + instead of devlink_register() fixes all this above and ensures that + prior to call to devlink_register() everything already set. + + Signed-off-by: Leon Romanovsky + Reviewed-by: Jiri Pirko + Signed-off-by: David S. Miller + +commit 92529df76db5ab184b82674cf7a4eef4b665b40e +Author: Michael Chan +Date: Sat Aug 7 15:03:15 2021 -0400 + + bnxt_en: Use register window 6 instead of 5 to read the PHC + + Some older Broadcom debug tools use window 5 and may conflict, so switch + to use window 6 instead. + + Fixes: 118612d519d8 ("bnxt_en: Add PTP clock APIs, ioctls, and ethtool methods") + Reviewed-by: Andy Gospodarek + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 9e26680733d5c6538ba2e7a111fb49c9ac2dc16a +Author: Michael Chan +Date: Sat Aug 7 15:03:14 2021 -0400 + + bnxt_en: Update firmware call to retrieve TX PTP timestamp + + New firmware interface requires the PTP sequence ID header offset to + be passed to the firmware to properly find the matching timestamp + for all protocols. + + Fixes: 83bb623c968e ("bnxt_en: Transmit and retrieve packet timestamps") + Reviewed-by: Edwin Peer + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit fbfee25796e2688004d58ad4d0673279366b97dd +Author: Michael Chan +Date: Sat Aug 7 15:03:13 2021 -0400 + + bnxt_en: Update firmware interface to 1.10.2.52 + + The key change is the firmware call to retrieve the PTP TX timestamp. + The header offset for the PTP sequence number field is now added. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit c1129b51ca0e6d261df7b49388af7962c8e9a19e +Author: Michael Chan +Date: Mon Aug 2 10:52:39 2021 -0400 + + bnxt_en: Increase maximum RX ring size if jumbo ring is not used + + The current maximum RX ring size is defined assuming the RX jumbo ring + (aka aggregation ring) is used. The RX jumbo ring is automicatically used + when the MTU exceeds a threshold or when rx-gro-hw/lro is enabled. The RX + jumbo ring is automatically sized up to 4 times the size of the RX ring + size. + + The BNXT_MAX_RX_DESC_CNT constant is the upper limit on the size of the + RX ring whether or not the RX jumbo ring is used. Obviously, the + maximum amount of RX buffer space is significantly less when the RX jumbo + ring is not used. + + To increase flexibility for the user who does not use the RX jumbo ring, + we now define a bigger maximum RX ring size when the RX jumbo ring is not + used. The maximum RX ring size is now up to 8K when the RX jumbo ring + is not used. The maximum completion ring size also needs to be scaled + up to accomodate the larger maximum RX ring size. + + Note that when the RX jumbo ring is re-enabled, the RX ring size will + automatically drop if it exceeds the maximum. + + Reviewed-by: Edwin Peer + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 03c7448790b87cec82a2f1406ff40dd1a8861170 +Author: Michael Chan +Date: Mon Aug 2 10:52:38 2021 -0400 + + bnxt_en: Don't use static arrays for completion ring pages + + We currently store these page addresses and DMA addreses in static + arrays. On systems with 4K pages, we support up to 64 pages per + completion ring. The actual number of pages for each completion ring + may be much less than 64. For example, when the RX ring size is set + to the default 511 entries, only 16 completion ring pages are needed + per ring. + + In the next patch, we'll be doubling the maximum number of completion + pages. So we convert to allocate these arrays as needed instead of + declaring them statically. + + Reviewed-by: Pavan Chebbi + Reviewed-by: Somnath Kotur + Reviewed-by: Edwin Peer + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit abf90ac2c292410c22bbce4dfedeb49b9b0295ff +Author: Pavan Chebbi +Date: Wed Jul 28 14:11:45 2021 -0400 + + bnxt_en: Log if an invalid signal detected on TSIO pin + + FW can report to driver via ASYNC event if it encountered an + invalid signal on any TSIO PIN. Driver will log this event + for the user to take corrective action. + + Reviewed-by: Somnath Kotur + Reviewed-by: Arvind Susarla + Reviewed-by: Edwin Peer + Signed-off-by: Pavan Chebbi + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 099fdeda659d2071a97753a6479d13342ff2b285 +Author: Pavan Chebbi +Date: Wed Jul 28 14:11:44 2021 -0400 + + bnxt_en: Event handler for PPS events + + Once the PPS pins are configured, the FW can report + PPS values using ASYNC event. This patch adds the + ASYNC event handler and subsequent reporting of the + events to kernel. + + Signed-off-by: Pavan Chebbi + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 9e518f25802cc13adcdb26adaaccfc7e9de80d44 +Author: Pavan Chebbi +Date: Wed Jul 28 14:11:43 2021 -0400 + + bnxt_en: 1PPS functions to configure TSIO pins + + Application will send ioctls to set/clear PPS pin functions + based on user input. This patch implements the driver + callbacks that will configure the TSIO pins using firmware + commands. After firmware reset, the TSIO pins will be reconfigured + again. + + Reviewed-by: Edwin Peer + Signed-off-by: Pavan Chebbi + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit caf3eedbcd8d326c9b789cc270b9ddcce254e0ce +Author: Pavan Chebbi +Date: Wed Jul 28 14:11:42 2021 -0400 + + bnxt_en: 1PPS support for 5750X family chips + + 1PPS (One Pulse Per Second) is a signal generated either + by the NIC PHC or an external timing source. + Integrating the support to configure and use 1PPS using + the TSIO pins along with PTP timestamps will add Grand + Master capability to the 5750X family chipsets. + + This patch initializes the driver data structures and + registers the 1PPS with kernel, based on the TSIO pins' + capability in the hardware. This will create a /dev/ppsX + device which applications can use to receive PPS events. + + Later patches will define functions to configure and use + the pins. + + Reviewed-by: Edwin Peer + Signed-off-by: Pavan Chebbi + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 30e96f487f64c60b2884e37b9dabf287c9d048aa +Author: Michael Chan +Date: Wed Jul 28 14:11:41 2021 -0400 + + bnxt_en: Do not read the PTP PHC during chip reset + + During error recovery or hot firmware upgrade, the chip may be under + reset and the PHC register read cycles may cause completion timeouts. + Check that the chip is not under reset condition before proceeding + to read the PHC by checking the flag BNXT_STATE_IN_FW_RESET. We also + need to take the ptp_lock before we set this flag to prevent race + conditions. + + We need this logic because the PHC now will stay registered after + bnxt_close(). + + Reviewed-by: Pavan Chebbi + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit a521c8a01d267bc1732ee83fdbf3ad2e02240d63 +Author: Michael Chan +Date: Wed Jul 28 14:11:40 2021 -0400 + + bnxt_en: Move bnxt_ptp_init() from bnxt_open() back to bnxt_init_one() + + It was pointed out by Richard Cochran that registering the PHC during + probe is better than during ifup, so move bnxt_ptp_init() back to + bnxt_init_one(). In order to work correctly after firmware reset which + may result in PTP config. changes, we modify bnxt_ptp_init() to return + if the PHC has been registered earlier. If PTP is no longer supported + by the new firmware, we will unregister the PHC and clean up. + + This partially reverts: + + d7859afb6880 ("bnxt_en: Move bnxt_ptp_init() to bnxt_open()") + + Reviewed-by: Pavan Chebbi + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit a76053707dbf0dc020a73b4d90cd952409ef3691 +Author: Arnd Bergmann +Date: Tue Jul 27 15:45:13 2021 +0200 + + dev_ioctl: split out ndo_eth_ioctl + + Most users of ndo_do_ioctl are ethernet drivers that implement + the MII commands SIOCGMIIPHY/SIOCGMIIREG/SIOCSMIIREG, or hardware + timestamping with SIOCSHWTSTAMP/SIOCGHWTSTAMP. + + Separate these from the few drivers that use ndo_do_ioctl to + implement SIOCBOND, SIOCBR and SIOCWANDEV commands. + + This is a purely cosmetic change intended to help readers find + their way through the implementation. + + Cc: Doug Ledford + Cc: Jason Gunthorpe + Cc: Jay Vosburgh + Cc: Veaceslav Falico + Cc: Andy Gospodarek + Cc: Andrew Lunn + Cc: Vivien Didelot + Cc: Florian Fainelli + Cc: Vladimir Oltean + Cc: Leon Romanovsky + Cc: linux-rdma@vger.kernel.org + Signed-off-by: Arnd Bergmann + Acked-by: Jason Gunthorpe + Signed-off-by: David S. Miller + +commit 758684e49f4c7ea2a75e249e486659f0950cd63e +Author: Somnath Kotur +Date: Mon Jul 26 14:52:48 2021 -0400 + + bnxt_en: Fix static checker warning in bnxt_fw_reset_task() + + Now that we return when bnxt_open() fails in bnxt_fw_reset_task(), + there is no need to check for 'rc' value again before invoking + bnxt_reenable_sriov(). + + Fixes: 3958b1da725a ("bnxt_en: fix error path of FW reset") + Reported-by: Dan Carpenter + Signed-off-by: Somnath Kotur + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 89bc7f456cd40e0be7b94f4fdae9186f22b76a05 +Author: Michael Chan +Date: Fri Jul 23 17:53:48 2021 -0400 + + bnxt_en: Add missing periodic PHC overflow check + + We use the timecounter APIs for the 48-bit PHC and packet timestamps. + We must periodically update the timecounter at roughly half the + overflow interval. The overflow interval is about 78 hours, so + update it every 19 hours (1/4 interval) for some extra margins. + + Fixes: 390862f45c85 ("bnxt_en: Get the full 48-bit hardware timestamp periodically") + Reviewed-by: Pavan Chebbi + Signed-off-by: Michael Chan + Acked-by: Richard Cochran + Signed-off-by: David S. Miller + +commit de5bf19414fec860168f05d00d574562bd9d86d1 +Author: Michael Chan +Date: Sun Jul 18 15:36:33 2021 -0400 + + bnxt_en: Fix PTP capability discovery + + The current PTP initialization logic does not account for firmware + reset that may cause PTP capability to change. The valid pointer + bp->ptp_cfg is used to indicate that the device is capable of PTP + and that it has been initialized. So we must clean up bp->ptp_cfg + and free it if the firmware after reset does not support PTP. + + Fixes: 93cb62d98e9c ("bnxt_en: Enable hardware PTP support") + Cc: Richard Cochran + Reviewed-by: Pavan Chebbi + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit d7859afb6880249039b178fdfb1bef94fd954cf2 +Author: Michael Chan +Date: Sun Jul 18 15:36:32 2021 -0400 + + bnxt_en: Move bnxt_ptp_init() to bnxt_open() + + The device needs to be in ifup state for PTP to function, so move + bnxt_ptp_init() to bnxt_open(). This means that the PHC will be + registered during bnxt_open(). + + This also makes firmware reset work correctly. PTP configurations + may change after firmware upgrade or downgrade. bnxt_open() will + be called after firmware reset, so it will work properly. + + bnxt_ptp_start() is now incorporated into bnxt_ptp_init(). We now + also need to call bnxt_ptp_clear() in bnxt_close(). + + Fixes: 93cb62d98e9c ("bnxt_en: Enable hardware PTP support") + Cc: Richard Cochran + Reviewed-by: Pavan Chebbi + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 11a39259ff79b74bc99f8b7c44075a2d6d5e7ab1 +Author: Somnath Kotur +Date: Sun Jul 18 15:36:31 2021 -0400 + + bnxt_en: Check abort error state in bnxt_half_open_nic() + + bnxt_half_open_nic() is called during during ethtool self test and is + protected by rtnl_lock. Firmware reset can be happening at the same + time. Only critical portions of the entire firmware reset sequence + are protected by the rtnl_lock. It is possible that bnxt_half_open_nic() + can be called when the firmware reset sequence is aborting. In that + case, bnxt_half_open_nic() needs to check if the ABORT_ERR flag is set + and abort if it is. The ethtool self test will fail but the NIC will be + brought to a consistent IF_DOWN state. + + Without this patch, if bnxt_half_open_nic() were to continue in this + error state, it may crash like this: + + bnxt_en 0000:82:00.1 enp130s0f1np1: FW reset in progress during close, FW reset will be aborted + Unable to handle kernel NULL pointer dereference at virtual address 0000000000000000 + ... + Process ethtool (pid: 333327, stack limit = 0x0000000046476577) + Call trace: + bnxt_alloc_mem+0x444/0xef0 [bnxt_en] + bnxt_half_open_nic+0x24/0xb8 [bnxt_en] + bnxt_self_test+0x2dc/0x390 [bnxt_en] + ethtool_self_test+0xe0/0x1f8 + dev_ethtool+0x1744/0x22d0 + dev_ioctl+0x190/0x3e0 + sock_ioctl+0x238/0x480 + do_vfs_ioctl+0xc4/0x758 + ksys_ioctl+0x84/0xb8 + __arm64_sys_ioctl+0x28/0x38 + el0_svc_handler+0xb0/0x180 + el0_svc+0x8/0xc + + Fixes: a1301f08c5ac ("bnxt_en: Check abort error state in bnxt_open_nic().") + Signed-off-by: Somnath Kotur + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 96bdd4b9ea7ef9a12db8fdd0ce90e37dffbd3703 +Author: Michael Chan +Date: Sun Jul 18 15:36:30 2021 -0400 + + bnxt_en: Validate vlan protocol ID on RX packets + + Only pass supported VLAN protocol IDs for stripped VLAN tags to the + stack. The stack will hit WARN() if the protocol ID is unsupported. + + Existing firmware sets up the chip to strip 0x8100, 0x88a8, 0x9100. + Only the 1st two protocols are supported by the kernel. + + Fixes: a196e96bb68f ("bnxt_en: clean up VLAN feature bit handling") + Reviewed-by: Somnath Kotur + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 3958b1da725a477b4a222183d16a14d85445d4b6 +Author: Somnath Kotur +Date: Sun Jul 18 15:36:29 2021 -0400 + + bnxt_en: fix error path of FW reset + + When bnxt_open() fails in the firmware reset path, the driver needs to + gracefully abort, but it is executing code that should be invoked only + in the success path. Define a function to abort FW reset and + consolidate all error paths to call this new function. + + Fixes: dab62e7c2de7 ("bnxt_en: Implement faster recovery for firmware fatal error.") + Signed-off-by: Somnath Kotur + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 6cd657cb3ee6f4de57e635b126ffbe0e51d00f1a +Author: Michael Chan +Date: Sun Jul 18 15:36:28 2021 -0400 + + bnxt_en: Add missing check for BNXT_STATE_ABORT_ERR in bnxt_fw_rset_task() + + In the BNXT_FW_RESET_STATE_POLL_VF state in bnxt_fw_reset_task() after all + VFs have unregistered, we need to check for BNXT_STATE_ABORT_ERR after + we acquire the rtnl_lock. If the flag is set, we need to abort. + + Fixes: 230d1f0de754 ("bnxt_en: Handle firmware reset.") + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 2c9f046bc377efd1f5e26e74817d5f96e9506c86 +Author: Michael Chan +Date: Sun Jul 18 15:36:27 2021 -0400 + + bnxt_en: Refresh RoCE capabilities in bnxt_ulp_probe() + + The capabilities can change after firmware upgrade/downgrade, so we + should get the up-to-date RoCE capabilities everytime bnxt_ulp_probe() + is called. + + Fixes: 2151fe0830fd ("bnxt_en: Handle RESET_NOTIFY async event from firmware.") + Reviewed-by: Somnath Kotur + Reviewed-by: Edwin Peer + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit c08c59653415201ac46ab791c936ae804c45a11b +Author: Edwin Peer +Date: Sun Jul 18 15:36:26 2021 -0400 + + bnxt_en: reject ETS settings that will starve a TC + + ETS proportions are presented to HWRM_QUEUE_COS2BW_CFG as minimum + bandwidth constraints. Thus, zero is a legal value for a given TC. + However, if all the other TCs sum up to 100%, then at least one + hardware queue will starve, resulting in guaranteed TX timeouts. + Reject such nonsensical configurations. + + Reviewed-by: Pavan Chebbi + Signed-off-by: Edwin Peer + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit c81cfb6256d90ea5ba4a6fb280ea3b171be4e05c +Author: Kalesh AP +Date: Sun Jul 18 15:36:25 2021 -0400 + + bnxt_en: don't disable an already disabled PCI device + + If device is already disabled in reset path and PCI io error is + detected before the device could be enabled, driver could + call pci_disable_device() for already disabled device. Fix this + problem by calling pci_disable_device() only if the device is already + enabled. + + Fixes: 6316ea6db93d ("bnxt_en: Enable AER support.") + Signed-off-by: Kalesh AP + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 93cb62d98e9c3d8c94cc09a15b9ab1faf342c392 +Author: Michael Chan +Date: Sun Jun 27 13:19:50 2021 -0400 + + bnxt_en: Enable hardware PTP support + + Call bnxt_ptp_init() to initialize and register with the clock driver + to enable PTP support. Call bnxt_ptp_free() to unregister and clean + up during shutdown. + + Reviewed-by: Edwin Peer + Reviewed-by: Pavan Chebbi + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 83bb623c968e7351aee5111547693f95f330dc5a +Author: Pavan Chebbi +Date: Sun Jun 27 13:19:49 2021 -0400 + + bnxt_en: Transmit and retrieve packet timestamps + + Setup the TXBD to enable TX timestamp if requested. At TX packet DMA + completion, if we requested TX timestamp on that packet, we defer to + .do_aux_work() to obtain the TX timestamp from the firmware before we + free the TX SKB. + + v2: Use .do_aux_work() to get the TX timestamp from firmware. + + Reviewed-by: Edwin Peer + Signed-off-by: Pavan Chebbi + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 7f5515d19cd7aa02a866fd86622a022f12e06f0f +Author: Pavan Chebbi +Date: Sun Jun 27 13:19:48 2021 -0400 + + bnxt_en: Get the RX packet timestamp + + If the RX packet is timestamped by the hardware, the RX completion + record will contain the lower 32-bit of the timestamp. This needs + to be combined with the upper 16-bit of the periodic timestamp that + we get from the timer. The previous snapshot in ptp->old_timer is + used to make sure that the snapshot is not ahead of the RX timestamp + and we adjust for wrap-around if needed. + + v2: Make ptp->old_time read access safe on 32-bit CPUs. + + Reviewed-by: Edwin Peer + Signed-off-by: Pavan Chebbi + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 390862f45c85b8ebbf9c5c09192bf413a8fb72f8 +Author: Pavan Chebbi +Date: Sun Jun 27 13:19:47 2021 -0400 + + bnxt_en: Get the full 48-bit hardware timestamp periodically + + From the bnxt_timer(), read the 48-bit hardware running clock + periodically and store it in ptp->current_time. The previous snapshot + of the clock will be stored in ptp->old_time. The old_time snapshot + will be used in the next patches to compute the RX packet timestamps. + + v2: Use .do_aux_work() to read the timer periodically. + + Reviewed-by: Edwin Peer + Signed-off-by: Pavan Chebbi + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 118612d519d83b98ead11195a5c818f5e8904654 +Author: Michael Chan +Date: Sun Jun 27 13:19:46 2021 -0400 + + bnxt_en: Add PTP clock APIs, ioctls, and ethtool methods + + Add the clock APIs to set/get/adjust the hw clock, and the related + ioctls and ethtool methods. + + v2: Propagate error code from ptp_clock_register(). + Add spinlock to serialize access to the timecounter. The + timecounter is accessed in process context and the RX datapath. + Read the PHC using direct registers. + + Reviewed-by: Edwin Peer + Signed-off-by: Pavan Chebbi + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit ae5c42f0b92ca0abefe2e3930a14fc2e716c81a2 +Author: Michael Chan +Date: Sun Jun 27 13:19:45 2021 -0400 + + bnxt_en: Get PTP hardware capability from firmware + + Store PTP hardware info in a structure if hardware and firmware support PTP. + + Reviewed-by: Edwin Peer + Reviewed-by: Pavan Chebbi + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 78eeadb8fea6d1a37d5060fe2ea0a0b45f8d8860 +Author: Michael Chan +Date: Sun Jun 27 13:19:44 2021 -0400 + + bnxt_en: Update firmware interface to 1.10.2.47 + + Adding the PTP related firmware interface is the main change. + + There is also a name change for admin_mtu, requiring code fixup. + + Reviewed-by: Pavan Chebbi + Signed-off-by: Edwin Peer + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 158c1399fc45c5178a3f2b8b68ff2faa2e36a52d +Author: Toke Høiland-Jørgensen +Date: Thu Jun 24 18:05:58 2021 +0200 + + bnxt: Remove rcu_read_lock() around XDP program invocation + + The bnxt driver has rcu_read_lock()/rcu_read_unlock() pairs around XDP + program invocations. However, the actual lifetime of the objects referred + by the XDP program invocation is longer, all the way through to the call to + xdp_do_flush(), making the scope of the rcu_read_lock() too small. This + turns out to be harmless because it all happens in a single NAPI poll + cycle (and thus under local_bh_disable()), but it makes the rcu_read_lock() + misleading. + + Rather than extend the scope of the rcu_read_lock(), just get rid of it + entirely. With the addition of RCU annotations to the XDP_REDIRECT map + types that take bh execution into account, lockdep even understands this to + be safe, so there's really no reason to keep it around. + + Signed-off-by: Toke Høiland-Jørgensen + Signed-off-by: Daniel Borkmann + Cc: Michael Chan + Link: https://lore.kernel.org/bpf/20210624160609.292325-9-toke@redhat.com + +commit 03400aaa69f916a376e11526cf591901a96a3a5c +Author: Somnath Kotur +Date: Fri Jun 18 02:07:27 2021 -0400 + + bnxt_en: Call bnxt_ethtool_free() in bnxt_init_one() error path + + bnxt_ethtool_init() may have allocated some memory and we need to + call bnxt_ethtool_free() to properly unwind if bnxt_init_one() + fails. + + Fixes: 7c3809181468 ("bnxt_en: Refactor bnxt_init_one() and turn on TPA support on 57500 chips.") + Signed-off-by: Somnath Kotur + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit c12e1643d2738bcd4e26252ce531878841dd3f38 +Author: Rukhsana Ansari +Date: Fri Jun 18 02:07:26 2021 -0400 + + bnxt_en: Fix TQM fastpath ring backing store computation + + TQM fastpath ring needs to be sized to store both the requester + and responder side of RoCE QPs in TQM for supporting bi-directional + tests. Fix bnxt_alloc_ctx_mem() to multiply the RoCE QPs by a factor of + 2 when computing the number of entries for TQM fastpath ring. This + fixes an RX pipeline stall issue when running bi-directional max + RoCE QP tests. + + Fixes: c7dd7ab4b204 ("bnxt_en: Improve TQM ring context memory sizing formulas.") + Signed-off-by: Rukhsana Ansari + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 0afd6a4e8028cc487c240b6cfe04094e45a306e4 +Author: Michael Chan +Date: Fri Jun 18 02:07:25 2021 -0400 + + bnxt_en: Rediscover PHY capabilities after firmware reset + + There is a missing bnxt_probe_phy() call in bnxt_fw_init_one() to + rediscover the PHY capabilities after a firmware reset. This can cause + some PHY related functionalities to fail after a firmware reset. For + example, in multi-host, the ability for any host to configure the PHY + settings may be lost after a firmware reset. + + Fixes: ec5d31e3c15d ("bnxt_en: Handle firmware reset status during IF_UP.") + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit cc9fd18032efada6433712f52de8d98dfbd00fd2 +Author: Gustavo A. R. Silva +Date: Fri Nov 20 12:27:50 2020 -0600 + + bnxt_en: Fix fall-through warnings for Clang + + In preparation to enable -Wimplicit-fallthrough for Clang, fix a warning + by explicitly adding a break statement instead of just letting the code + fall through to the next case. + + Link: https://github.com/KSPP/linux/issues/115 + Signed-off-by: Gustavo A. R. Silva + +commit 702279d2ce4650000bb6302013630304e359dc13 +Author: Michael Chan +Date: Sat May 15 03:25:19 2021 -0400 + + bnxt_en: Fix context memory setup for 64K page size. + + There was a typo in the code that checks for 64K BNXT_PAGE_SHIFT in + bnxt_hwrm_set_pg_attr(). Fix it and make the code more understandable + with a new macro BNXT_SET_CTX_PAGE_ATTR(). + + Fixes: 1b9394e5a2ad ("bnxt_en: Configure context memory on new devices.") + Reviewed-by: Edwin Peer + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit ab21494be9dc7d62736c5fcd06be65d49df713ee +Author: Andy Gospodarek +Date: Sat May 15 03:25:18 2021 -0400 + + bnxt_en: Include new P5 HV definition in VF check. + + Otherwise, some of the recently added HyperV VF IDs would not be + recognized as VF devices and they would not initialize properly. + + Fixes: 7fbf359bb2c1 ("bnxt_en: Add PCI IDs for Hyper-V VF devices.") + Reviewed-by: Edwin Peer + Signed-off-by: Andy Gospodarek + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit aa473d6ceb821d7c568c64cca7fff3e86ba9d789 +Author: Michael Chan +Date: Tue May 11 19:10:50 2021 -0400 + + bnxt_en: Fix and improve .ndo_features_check(). + + Jakub Kicinski pointed out that we need to handle ipv6 extension headers + and to explicitly check for supported tunnel types in + .ndo_features_check(). + + For ipv6 extension headers, the hardware supports up to 2 ext. headers + and each must be <= 64 bytes. For tunneled packets, the supported + packets are UDP with supported VXLAN and Geneve ports, GRE, and IPIP. + + v3: More improvements based on Alexander Duyck's valuable feedback - + Remove the jump lable in bnxt_features_check() and restructure it + so that the TCP/UDP is check is consolidated in bnxt_exthdr_check(). + + v2: Add missing step to check inner ipv6 header for UDP and GRE tunnels. + Check TCP/UDP next header after skipping ipv6 ext headers for + non-tunneled packets and for inner ipv6. + (Both feedback from Alexander Duyck) + + Reviewed-by: Edwin Peer + Reviewed-by: Pavan Chebbi + Fixes: 1698d600b361 ("bnxt_en: Implement .ndo_features_check().") + Signed-off-by: Michael Chan + Reviewed-by: Alexander Duyck + Signed-off-by: David S. Miller + +commit 4cf0abbce69bde3d07757dfa9be6420407fdbc45 +Author: Heiner Kallweit +Date: Thu Apr 1 18:43:15 2021 +0200 + + PCI/VPD: Remove pci_vpd_find_tag() 'offset' argument + + All callers pass 0 as offset. Therefore remove the parameter and use a + fixed offset 0 in pci_vpd_find_tag(). + + Link: https://lore.kernel.org/r/f62e6e19-5423-2ead-b2bd-62844b23ef8f@gmail.com + Signed-off-by: Heiner Kallweit + Signed-off-by: Bjorn Helgaas + +commit 1698d600b361915fbe5eda63a613da55c435bd34 +Author: Michael Chan +Date: Sun Apr 25 13:45:27 2021 -0400 + + bnxt_en: Implement .ndo_features_check(). + + For UDP encapsultions, we only support the offloaded Vxlan port and + Geneve port. All other ports included FOU and GUE are not supported so + we need to turn off TSO and checksum features. + + v2: Reverse the check for supported UDP ports to be more straight forward. + + Reviewed-by: Sriharsha Basavapatna + Reviewed-by: Edwin Peer + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit dade5e15fade59a789c30bc47abfe926ddd856d6 +Author: Michael Chan +Date: Sun Apr 25 13:45:26 2021 -0400 + + bnxt_en: Support IFF_SUPP_NOFCS feature to transmit without ethernet FCS. + + If firmware is capable, set the IFF_SUPP_NOFCS flag to support the + sockets option to transmit packets without FCS. This is mainly used + for testing. + + Reviewed-by: Edwin Peer + Signed-off-by: David S. Miller + +commit 7fbf359bb2c19c824cbb1954020680824f6ee5a5 +Author: Michael Chan +Date: Sun Apr 25 13:45:25 2021 -0400 + + bnxt_en: Add PCI IDs for Hyper-V VF devices. + + Support VF device IDs used by the Hyper-V hypervisor. + + Reviewed-by: Vasundhara Volam + Reviewed-by: Andy Gospodarek + Signed-off-by: Edwin Peer + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 92923cc71012535cc5d760b1319675ad4c404c08 +Author: Michael Chan +Date: Sun Apr 25 13:45:24 2021 -0400 + + bnxt_en: Call bnxt_approve_mac() after the PF gives up control of the VF MAC. + + When the PF is no longer enforcing an assigned MAC address on a VF, the + VF needs to call bnxt_approve_mac() to tell the PF what MAC address it is + now using. Otherwise it gets out of sync and the PF won't know what + MAC address the VF wants to use. Ultimately the VF will fail when it + tries to setup the L2 MAC filter for the vnic. + + Reviewed-by: Edwin Peer + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 7b3c8e27d67e2b04c1ce099261469c12d09c13d4 +Author: Michael Chan +Date: Sun Apr 25 13:45:23 2021 -0400 + + bnxt_en: Move bnxt_approve_mac(). + + Move it before bnxt_update_vf_mac(). In the next patch, we need to call + bnxt_approve_mac() from bnxt_update_mac() under some conditions. This + will avoid forward declaration. + + Reviewed-by: Edwin Peer + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 6b7027689890c590373fc58f362fae43d0517e21 +Author: Edwin Peer +Date: Sun Apr 25 13:45:22 2021 -0400 + + bnxt_en: allow VF config ops when PF is closed + + It is perfectly legal for the stack to query and configure VFs via PF + NDOs while the NIC is administratively down. Remove the unnecessary + check for the PF to be in open state. + + Signed-off-by: Edwin Peer + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit dd85fc0ab5b4daa496bd3e2832b51963022182d0 +Author: Edwin Peer +Date: Sun Apr 25 13:45:21 2021 -0400 + + bnxt_en: allow promiscuous mode for trusted VFs + + Firmware previously only allowed promiscuous mode for VFs associated with + a default VLAN. It is now possible to enable promiscuous mode for a VF + having no VLAN configured provided that it is trusted. In such cases the + VF will see all packets received by the PF, irrespective of destination + MAC or VLAN. + + Note, it is necessary to query firmware at the time of bnxt_promisc_ok() + instead of in bnxt_hwrm_func_qcfg() because the trusted status might be + altered by the PF after the VF has been configured. This check must now + also be deferred because the firmware call sleeps. + + Signed-off-by: Edwin Peer + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit d5ca99054f8e25384390d41c0123d930eed510b6 +Author: Michael Chan +Date: Sun Apr 25 13:45:20 2021 -0400 + + bnxt_en: Add support for fw managed link down feature. + + In the current code, the driver will not shutdown the link during + IFDOWN if there are still VFs sharing the port. Newer firmware will + manage the link down decision when the port is shared by VFs, so + we can just call firmware to shutdown the port unconditionally and + let firmware make the final decision. + + Reviewed-by: Edwin Peer + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit b0d28207ced88b3909547d8299f679353a87fd35 +Author: Michael Chan +Date: Sun Apr 25 13:45:19 2021 -0400 + + bnxt_en: Add a new phy_flags field to the main driver structure. + + Copy the phy related feature flags from the firmware call + HWRM_PORT_PHY_QCAPS to this new field. We can also remove the flags + field in the bnxt_test_info structure. It's cleaner to have all PHY + related flags in one location, directly copied from the firmware. + + To keep the BNXT_PHY_CFG_ABLE() macro logic the same, we need to make + a slight adjustment to check that it is a PF. + + Reviewed-by: Edwin Peer + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 1d2deb61f095a7df231cc394c06d07a2893ac9eb +Author: Edwin Peer +Date: Sun Apr 25 13:45:18 2021 -0400 + + bnxt_en: report signal mode in link up messages + + Firmware reports link signalling mode for certain speeds. In these + cases, print the signalling modes in kernel log link up messages. + + Reviewed-by: Andy Gospodarek + Signed-off-by: Edwin Peer + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit bbd6f0a948139970f4a615dff189d9a503681a39 +Author: Michael Chan +Date: Fri Apr 23 18:13:19 2021 -0400 + + bnxt_en: Fix RX consumer index logic in the error path. + + In bnxt_rx_pkt(), the RX buffers are expected to complete in order. + If the RX consumer index indicates an out of order buffer completion, + it means we are hitting a hardware bug and the driver will abort all + remaining RX packets and reset the RX ring. The RX consumer index + that we pass to bnxt_discard_rx() is not correct. We should be + passing the current index (tmp_raw_cons) instead of the old index + (raw_cons). This bug can cause us to be at the wrong index when + trying to abort the next RX packet. It can crash like this: + + #0 [ffff9bbcdf5c39a8] machine_kexec at ffffffff9b05e007 + #1 [ffff9bbcdf5c3a00] __crash_kexec at ffffffff9b111232 + #2 [ffff9bbcdf5c3ad0] panic at ffffffff9b07d61e + #3 [ffff9bbcdf5c3b50] oops_end at ffffffff9b030978 + #4 [ffff9bbcdf5c3b78] no_context at ffffffff9b06aaf0 + #5 [ffff9bbcdf5c3bd8] __bad_area_nosemaphore at ffffffff9b06ae2e + #6 [ffff9bbcdf5c3c28] bad_area_nosemaphore at ffffffff9b06af24 + #7 [ffff9bbcdf5c3c38] __do_page_fault at ffffffff9b06b67e + #8 [ffff9bbcdf5c3cb0] do_page_fault at ffffffff9b06bb12 + #9 [ffff9bbcdf5c3ce0] page_fault at ffffffff9bc015c5 + [exception RIP: bnxt_rx_pkt+237] + RIP: ffffffffc0259cdd RSP: ffff9bbcdf5c3d98 RFLAGS: 00010213 + RAX: 000000005dd8097f RBX: ffff9ba4cb11b7e0 RCX: ffffa923cf6e9000 + RDX: 0000000000000fff RSI: 0000000000000627 RDI: 0000000000001000 + RBP: ffff9bbcdf5c3e60 R8: 0000000000420003 R9: 000000000000020d + R10: ffffa923cf6ec138 R11: ffff9bbcdf5c3e83 R12: ffff9ba4d6f928c0 + R13: ffff9ba4cac28080 R14: ffff9ba4cb11b7f0 R15: ffff9ba4d5a30000 + ORIG_RAX: ffffffffffffffff CS: 0010 SS: 0018 + + Fixes: a1b0e4e684e9 ("bnxt_en: Improve RX consumer index validity check.") + Reviewed-by: Pavan Chebbi + Reviewed-by: Andy Gospodarek + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 27537929f30d3136a71ef29db56127a33c92dad7 +Author: Dan Carpenter +Date: Thu Apr 22 12:10:28 2021 +0300 + + bnxt_en: fix ternary sign extension bug in bnxt_show_temp() + + The problem is that bnxt_show_temp() returns long but "rc" is an int + and "len" is a u32. With ternary operations the type promotion is quite + tricky. The negative "rc" is first promoted to u32 and then to long so + it ends up being a high positive value instead of a a negative as we + intended. + + Fix this by removing the ternary. + + Fixes: d69753fa1ecb ("bnxt_en: return proper error codes in bnxt_show_temp") + Signed-off-by: Dan Carpenter + Signed-off-by: David S. Miller + +commit 37434782d63f89de5b9c383a449b6a82dc3fa4fb +Author: Jakub Kicinski +Date: Mon Apr 19 13:02:42 2021 -0700 + + bnxt: add more ethtool standard stats + + Michael suggest a few more stats we can expose. + + $ ethtool -S eth0 --groups eth-mac + Standard stats for eth0: + eth-mac-FramesTransmittedOK: 902623288966 + eth-mac-FramesReceivedOK: 28727667047 + eth-mac-FrameCheckSequenceErrors: 1 + eth-mac-AlignmentErrors: 0 + eth-mac-OutOfRangeLengthField: 0 + $ ethtool -S eth0 | grep '\(fcs\|align\|oor\)' + rx_fcs_err_frames: 1 + rx_align_err_frames: 0 + tx_fcs_err_frames: 0 + + Suggested-by: Michael Chan + Signed-off-by: Jakub Kicinski + Signed-off-by: David S. Miller + +commit 782bc00affcd63dacaa34e9ab6da588605423312 +Author: Jakub Kicinski +Date: Fri Apr 16 12:27:44 2021 -0700 + + bnxt: implement ethtool standard stats + + Most of the names seem to strongly correlate with names from + the standard and RFC. Whether ..+good_frames are indeed Frames..OK + I'm the least sure of. + + Signed-off-by: Jakub Kicinski + Signed-off-by: David S. Miller + +commit c9ca5c3aabafcaa934731b8a841f28f8df990b7f +Author: Jakub Kicinski +Date: Thu Apr 15 15:53:16 2021 -0700 + + bnxt: implement ethtool::get_fec_stats + + Report corrected bits. + + Signed-off-by: Jakub Kicinski + Reviewed-by: Michael Chan + Signed-off-by: David S. Miller + +commit ac797ced1fd0faba285c460eb1f64d1296b9cfa4 +Author: Sriharsha Basavapatna +Date: Sun Apr 11 20:18:15 2021 -0400 + + bnxt_en: Free and allocate VF-Reps during error recovery. + + During firmware recovery, VF-Rep configuration in the firmware is lost. + Fix it by freeing and (re)allocating VF-Reps in FW at relevant points + during the error recovery process. + + Signed-off-by: Sriharsha Basavapatna + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 90f4fd02968720bdeb38a16deeff96fa770206e4 +Author: Michael Chan +Date: Sun Apr 11 20:18:14 2021 -0400 + + bnxt_en: Refactor __bnxt_vf_reps_destroy(). + + Add a new helper function __bnxt_free_one_vf_rep() to free one VF rep. + We also reintialize the VF rep fields to proper initial values so that + the function can be used without freeing the VF rep data structure. This + will be used in subsequent patches to free and recreate VF reps after + error recovery. + + Reviewed-by: Edwin Peer + Reviewed-by: Sriharsha Basavapatna + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit ea2d37b2b3079a896bc3f44a1962d3f01aa81b7f +Author: Sriharsha Basavapatna +Date: Sun Apr 11 20:18:13 2021 -0400 + + bnxt_en: Refactor bnxt_vf_reps_create(). + + Add a new function bnxt_alloc_vf_rep() to allocate a VF representor. + This function will be needed in subsequent patches to recreate the + VF reps after error recovery. + + Signed-off-by: Sriharsha Basavapatna + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 190eda1a9dbc47409073bec79b81f362e21973b6 +Author: Vasundhara Volam +Date: Sun Apr 11 20:18:12 2021 -0400 + + bnxt_en: Invalidate health register mapping at the end of probe. + + After probe is successful, interface may not be bought up in all + the cases and health register mapping could be invalid if firmware + undergoes reset. Fix it by invalidating the health register at the + end of probe. It will be remapped during ifup. + + Fixes: 43a440c4007b ("bnxt_en: Improve the status_reliable flag in bp->fw_health.") + Signed-off-by: Vasundhara Volam + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 17e1be342d46eb0b7c3df4c7e623493483080b63 +Author: Michael Chan +Date: Sun Apr 11 20:18:11 2021 -0400 + + bnxt_en: Treat health register value 0 as valid in bnxt_try_reover_fw(). + + The retry loop in bnxt_try_recover_fw() should not abort when the + health register value is 0. It is a valid value that indicates the + firmware is booting up. + + Fixes: 861aae786f2f ("bnxt_en: Enhance retry of the first message to the firmware.") + Reviewed-by: Edwin Peer + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 861aae786f2f7e1cab7926f7bb7783cb893e7edb +Author: Michael Chan +Date: Mon Mar 22 03:08:45 2021 -0400 + + bnxt_en: Enhance retry of the first message to the firmware. + + Two enhancements: + + 1. Read the health status first before sending the first + HWRM_VER_GET message to firmware instead of the other way around. + This guarantees we got the accurate health status before we attempt + to send the message. + + 2. We currently only retry sending the first HWRM_VER_GET message to + the firmware if the firmware is in the process of booting. If the + firmware is in error state and is doing core dump for example, the + driver should also retry if the health register has the RECOVERING + flag set. This flag indicates the firmware will undergo recovery + soon. Modify the retry logic to retry for this case as well. + + Reviewed-by: Edwin Peer + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit bae8a00379f4c1327c8e38a768083460b5ad5b12 +Author: Vasundhara Volam +Date: Mon Mar 22 03:08:44 2021 -0400 + + bnxt_en: Remove the read of BNXT_FW_RESET_INPROG_REG after firmware reset. + + Once the chip goes through reset, the register mapping may be lost + and any read of the mapped health registers may return garbage value + until the registers are mapped again in the init path. + + Reading BNXT_FW_RESET_INPROG_REG after firmware reset will likely + return garbage value due to the above reason. Reading this register + is for information purpose only so remove it. + + Reviewed-by: Edwin Peer + Signed-off-by: Vasundhara Volam + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 2924ad95cb51673ed3544cf371cafc66e2c76cc8 +Author: Michael Chan +Date: Mon Mar 22 03:08:43 2021 -0400 + + bnxt_en: Set BNXT_STATE_FW_RESET_DET flag earlier for the RDMA driver. + + During ifup, if the driver detects that firmware has gone through a + reset, it will go through a re-probe sequence. If the RDMA driver is + loaded, the re-probe sequence includes calling the RDMA driver to stop. + We need to set the BNXT_STATE_FW_RESET_DET flag earlier so that it is + visible to the RDMA driver. The RDMA driver's stop sequence is + different if firmware has gone through a reset. + + Reviewed-by: Pavan Chebbi + Reviewed-by: P B S Naresh Kumar + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 15a7deb895497e4c9496b98367e4a0671add03f1 +Author: Scott Branden +Date: Mon Mar 22 03:08:42 2021 -0400 + + bnxt_en: check return value of bnxt_hwrm_func_resc_qcaps + + Check return value of call to bnxt_hwrm_func_resc_qcaps in + bnxt_hwrm_if_change and return failure on error. + + Reviewed-by: Edwin Peer + Signed-off-by: Scott Branden + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit a2f3835cc68a2222d0ab97862187ed98e65fe682 +Author: Edwin Peer +Date: Mon Mar 22 03:08:41 2021 -0400 + + bnxt_en: don't fake firmware response success when PCI is disabled + + The original intent here is to allow commands during reset to succeed + without error when the device is disabled, to ensure that cleanup + completes normally during NIC close, where firmware is not necessarily + expected to respond. + + The problem with faking success during reset's PCI disablement is that + unrelated ULP commands will also see inadvertent success during reset + when failure would otherwise be appropriate. It is better to return + a different error result such that reset related code can detect + this unique condition and ignore as appropriate. + + Note, the pci_disable_device() when firmware is fatally wounded in + bnxt_fw_reset_close() does not need to be addressed, as subsequent + commands are already expected to fail due to the BNXT_NO_FW_ACCESS() + check in bnxt_hwrm_do_send_msg(). + + Reviewed-by: Scott Branden + Signed-off-by: Edwin Peer + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 80a9641f09f890a27a57e8ad30472553e0f769a6 +Author: Pavan Chebbi +Date: Mon Mar 22 03:08:40 2021 -0400 + + bnxt_en: Improve wait for firmware commands completion + + In situations where FW has crashed, the bnxt_hwrm_do_send_msg() call + will have to wait until timeout for each firmware message. This + generally takes about half a second for each firmware message. If we + try to unload the driver n this state, the unload sequence will take + a long time to complete. + + Improve this by checking the health register if it is available and + abort the wait for the firmware response if the register shows that + firmware is not healthy. The very first message HWRM_VER_GET is + excluded from this check because that message is used to poll for + firmware to come out of reset during error recovery. + + Signed-off-by: Pavan Chebbi + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 43a440c4007b28c473afba966e8410459db4975f +Author: Michael Chan +Date: Mon Mar 22 03:08:39 2021 -0400 + + bnxt_en: Improve the status_reliable flag in bp->fw_health. + + In order to read the firmware health status, we first need to determine + the register location and then the register may need to be mapped. + There are 2 code paths to do this. The first one is done early as a + best effort attempt by the function bnxt_try_map_fw_health_reg(). The + second one is done later in the function bnxt_map_fw_health_regs() + after establishing communications with the firmware. We currently + only set fw_health->status_reliable if we can successfully set up the + health register in the first code path. + + Improve the scheme by setting the fw_health->status_reliable flag if + either (or both) code paths can successfully set up the health + register. This flag is relied upon during run-time when we need to + check the health status. So this will make it work better. + + During ifdown, if the health register is mapped, we need to invalidate + the health register mapping because a potential fw reset will reset + the mapping. Similarly, we need to do the same after firmware reset + during recovery. We'll remap it during ifup. + + Reviewed-by: Edwin Peer + Reviewed-by: Vasundhara Volam + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit fdc13979f91e664717f47eb8c49094e4b7f202e3 +Author: Lorenzo Bianconi +Date: Mon Mar 8 12:06:58 2021 +0100 + + bpf, devmap: Move drop error path to devmap for XDP_REDIRECT + + We want to change the current ndo_xdp_xmit drop semantics because it will + allow us to implement better queue overflow handling. This is working + towards the larger goal of a XDP TX queue-hook. Move XDP_REDIRECT error + path handling from each XDP ethernet driver to devmap code. According to + the new APIs, the driver running the ndo_xdp_xmit pointer, will break tx + loop whenever the hw reports a tx error and it will just return to devmap + caller the number of successfully transmitted frames. It will be devmap + responsibility to free dropped frames. + + Move each XDP ndo_xdp_xmit capable driver to the new APIs: + + - veth + - virtio-net + - mvneta + - mvpp2 + - socionext + - amazon ena + - bnxt + - freescale (dpaa2, dpaa) + - xen-frontend + - qede + - ice + - igb + - ixgbe + - i40e + - mlx5 + - ti (cpsw, cpsw-new) + - tun + - sfc + + Signed-off-by: Lorenzo Bianconi + Signed-off-by: Daniel Borkmann + Reviewed-by: Ioana Ciornei + Reviewed-by: Ilias Apalodimas + Reviewed-by: Camelia Groza + Acked-by: Edward Cree + Acked-by: Jesper Dangaard Brouer + Acked-by: Shay Agroskin + Link: https://lore.kernel.org/bpf/ed670de24f951cfd77590decf0229a0ad7fd12f6.1615201152.git.lorenzo@kernel.org + +commit 20d7d1c5c9b11e9f538ed4a2289be106de970d3e +Author: Edwin Peer +Date: Fri Feb 26 04:43:10 2021 -0500 + + bnxt_en: reliably allocate IRQ table on reset to avoid crash + + The following trace excerpt corresponds with a NULL pointer dereference + of 'bp->irq_tbl' in bnxt_setup_inta() on an Aarch64 system after many + device resets: + + Unable to handle kernel NULL pointer dereference at ... 000000d + ... + pc : string+0x3c/0x80 + lr : vsnprintf+0x294/0x7e0 + sp : ffff00000f61ba70 pstate : 20000145 + x29: ffff00000f61ba70 x28: 000000000000000d + x27: ffff0000009c8b5a x26: ffff00000f61bb80 + x25: ffff0000009c8b5a x24: 0000000000000012 + x23: 00000000ffffffe0 x22: ffff000008990428 + x21: ffff00000f61bb80 x20: 000000000000000d + x19: 000000000000001f x18: 0000000000000000 + x17: 0000000000000000 x16: ffff800b6d0fb400 + x15: 0000000000000000 x14: ffff800b7fe31ae8 + x13: 00001ed16472c920 x12: ffff000008c6b1c9 + x11: ffff000008cf0580 x10: ffff00000f61bb80 + x9 : 00000000ffffffd8 x8 : 000000000000000c + x7 : ffff800b684b8000 x6 : 0000000000000000 + x5 : 0000000000000065 x4 : 0000000000000001 + x3 : ffff0a00ffffff04 x2 : 000000000000001f + x1 : 0000000000000000 x0 : 000000000000000d + Call trace: + string+0x3c/0x80 + vsnprintf+0x294/0x7e0 + snprintf+0x44/0x50 + __bnxt_open_nic+0x34c/0x928 [bnxt_en] + bnxt_open+0xe8/0x238 [bnxt_en] + __dev_open+0xbc/0x130 + __dev_change_flags+0x12c/0x168 + dev_change_flags+0x20/0x60 + ... + + Ordinarily, a call to bnxt_setup_inta() (not in trace due to inlining) + would not be expected on a system supporting MSIX at all. However, if + bnxt_init_int_mode() does not end up being called after the call to + bnxt_clear_int_mode() in bnxt_fw_reset_close(), then the driver will + think that only INTA is supported and bp->irq_tbl will be NULL, + causing the above crash. + + In the error recovery scenario, we call bnxt_clear_int_mode() in + bnxt_fw_reset_close() early in the sequence. Ordinarily, we will + call bnxt_init_int_mode() in bnxt_hwrm_if_change() after we + reestablish communication with the firmware after reset. However, + if the sequence has to abort before we call bnxt_init_int_mode() and + if the user later attempts to re-open the device, then it will cause + the crash above. + + We fix it in 2 ways: + + 1. Check for bp->irq_tbl in bnxt_setup_int_mode(). If it is NULL, call + bnxt_init_init_mode(). + + 2. If we need to abort in bnxt_hwrm_if_change() and cannot complete + the error recovery sequence, set the BNXT_STATE_ABORT_ERR flag. This + will cause more drastic recovery at the next attempt to re-open the + device, including a call to bnxt_init_int_mode(). + + Fixes: 3bc7d4a352ef ("bnxt_en: Add BNXT_STATE_IN_FW_RESET state.") + Reviewed-by: Scott Branden + Signed-off-by: Edwin Peer + Signed-off-by: Michael Chan + Signed-off-by: Jakub Kicinski + +commit d20cd745218cde1b268bef5282095ec6c95a3ea2 +Author: Vasundhara Volam +Date: Fri Feb 26 04:43:09 2021 -0500 + + bnxt_en: Fix race between firmware reset and driver remove. + + The driver's error recovery reset sequence can take many seconds to + complete and only the critical sections are protected by rtnl_lock. + A recent change has introduced a regression in this sequence. + + bnxt_remove_one() may be called while the recovery is in progress. + Normally, unregister_netdev() would cause bnxt_close_nic() to be + called and this would cause the error recovery to safely abort + with the BNXT_STATE_ABORT_ERR flag set in bnxt_close_nic(). + + Recently, we added bnxt_reinit_after_abort() to allow the user to + reopen the device after an aborted recovery. This causes the + regression in the scenario described above because we would + attempt to re-open even after the netdev has been unregistered. + + Fix it by checking the netdev reg_state in + bnxt_reinit_after_abort() and abort if it is unregistered. + + Fixes: 6882c36cf82e ("bnxt_en: attempt to reinitialize after aborted reset") + Signed-off-by: Vasundhara Volam + Signed-off-by: Michael Chan + Signed-off-by: Jakub Kicinski + +commit f4d95c3c194de04ae7b44f850131321c7ceb9312 +Author: Michael Chan +Date: Sun Feb 14 18:05:01 2021 -0500 + + bnxt_en: Improve logging of error recovery settings information. + + We currently only log the error recovery settings if it is enabled. + In some cases, firmware disables error recovery after it was + initially enabled. Without logging anything, the user will not be + aware of this change in setting. + + Log it when error recovery is disabled. Also, change the reset count + value from hexadecimal to decimal. + + Reviewed-by: Edwin Peer + Reviewed-by: Pavan Chebbi + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit df97b34d3ace777f90df368efe5508ddd32c16d4 +Author: Michael Chan +Date: Sun Feb 14 18:05:00 2021 -0500 + + bnxt_en: Reply to firmware's echo request async message. + + This is a new async message that the firmware can send to check if it + can communicate with the driver. This is an added error detection + scheme that firmware can use if it suspects errors in the PCIe + interface. When the driver receives this async message, it will reply + back echoing some data in the async message. If the firmware is not + getting the reply with the proper data after some retries, error + recovery will kick in. + + Reviewed-by: Andy Gospodarek + Reviewed-by: Edwin Peer + Reviewed-by: Vasundhara Volam + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 41435c39400071904a9b569d7bfc053c7c644bc5 +Author: Michael Chan +Date: Sun Feb 14 18:04:59 2021 -0500 + + bnxt_en: Initialize "context kind" field for context memory blocks. + + If firmware provides the offset to the "context kind" field of the + relevant context memory blocks, we'll initialize just that field for + each block instead of initializing all of context memory. + + Populate the bnxt_mem_init structure with the proper offset returned + by firmware. If it is older firmware and the information is not + available, we set the offset to an invalid value and fall back to + the old behavior of initializing every byte. Otherwise, we initialize + only the "context kind" byte at the offset. + + Reviewed-by: Edwin Peer + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit e9696ff33c79aed73ff76edb4961042a9b87d27b +Author: Michael Chan +Date: Sun Feb 14 18:04:58 2021 -0500 + + bnxt_en: Add context memory initialization infrastructure. + + Currently, the driver calls memset() to set all relevant context memory + used by the chip to the initial value. This can take many milliseconds + with the potentially large number of context pages allocated for the + chip. + + To make this faster, we only need to initialize the "context kind" field + of each block of context memory. This patch sets up the infrastructure + to do that with the bnxt_mem_init structure. In the next patch, we'll + add the logic to obtain the offset of the "context kind" from the + firmware. This patch is not changing the current behavior of calling + memset() to initialize all relevant context memory. + + Reviewed-by: Pavan Chebbi + Reviewed-by: Edwin Peer + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit dab62e7c2de7b035c928a8babee27a6127891fdf +Author: Michael Chan +Date: Sun Feb 14 18:04:57 2021 -0500 + + bnxt_en: Implement faster recovery for firmware fatal error. + + During some fatal firmware error conditions, the PCI config space + register 0x2e which normally contains the subsystem ID will become + 0xffff. This register will revert back to the normal value after + the chip has completed core reset. If we detect this condition, + we can poll this config register immediately for the value to revert. + Because we use config read cycles to poll this register, there is no + possibility of Master Abort if we happen to read it during core reset. + This speeds up recovery significantly as we don't have to wait for the + conservative min_time before polling MMIO to see if the firmware has + come out of reset. As soon as this register changes value we can + proceed to re-initialize the device. + + Reviewed-by: Edwin Peer + Reviewed-by: Vasundhara Volam + Reviewed-by: Andy Gospodarek + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit be6d755f3d0c7c76d07f980bca6dd7e70dcec452 +Author: Edwin Peer +Date: Sun Feb 14 18:04:56 2021 -0500 + + bnxt_en: selectively allocate context memories + + Newer devices may have local context memory instead of relying on the + host for backing store. In these cases, HWRM_FUNC_BACKING_STORE_QCAPS + will return a zero entry size to indicate contexts for which the host + should not allocate backing store. + + Selectively allocate context memory based on device capabilities and + only enable backing store for the appropriate contexts. + + Signed-off-by: Edwin Peer + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 31f67c2ee0551f0fd0dd134f4a73c456c53ea015 +Author: Michael Chan +Date: Sun Feb 14 18:04:55 2021 -0500 + + bnxt_en: Update firmware interface spec to 1.10.2.16. + + The main changes are the echo request/response from firmware for error + detection and the NO_FCS feature to transmit frames without FCS. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit db28b6c77f4050f62599267a886b61fbd6504633 +Author: Vasundhara Volam +Date: Thu Feb 11 02:24:24 2021 -0500 + + bnxt_en: Fix devlink info's stored fw.psid version format. + + The running fw.psid version is in decimal format but the stored + fw.psid is in hex format. This can mislead the user to reset the + NIC to activate the stored version to become the running version. + + Fix it to display the stored fw.psid in decimal format. + + Fixes: 1388875b3916 ("bnxt_en: Add stored FW version info to devlink info_get cb.") + Signed-off-by: Vasundhara Volam + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 132e0b65dc2b8bfa9721bfce834191f24fd1d7ed +Author: Edwin Peer +Date: Thu Feb 11 02:24:23 2021 -0500 + + bnxt_en: reverse order of TX disable and carrier off + + A TX queue can potentially immediately timeout after it is stopped + and the last TX timestamp on that queue was more than 5 seconds ago with + carrier still up. Prevent these intermittent false TX timeouts + by bringing down carrier first before calling netif_tx_disable(). + + Fixes: c0c050c58d84 ("bnxt_en: New Broadcom ethernet driver.") + Signed-off-by: Edwin Peer + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 871127e6ab0d6abb904cec81fc022baf6953be1f +Author: Michael Chan +Date: Tue Jan 26 01:20:24 2021 -0500 + + bnxt_en: Convert to use netif_level() helpers. + + Use the various netif_level() helpers to simplify the C code. This was + suggested by Joe Perches. + + Cc: Joe Perches + Signed-off-by: Michael Chan + Link: https://lore.kernel.org/r/1611642024-3166-1-git-send-email-michael.chan@broadcom.com + Signed-off-by: Jakub Kicinski + +commit 0da65f4932cee9f9698a2e1493d22b27c91841c9 +Author: Michael Chan +Date: Mon Jan 25 02:08:21 2021 -0500 + + bnxt_en: Do not process completion entries after fatal condition detected. + + Once the firmware fatal condition is detected, we should cease + comminication with the firmware and hardware quickly even if there + are many completion entries in the completion rings. This will + speed up the recovery process and prevent further I/Os that may + cause further exceptions. + + Do not proceed in the NAPI poll function if fatal condition is + detected. Call napi_complete() and return without arming interrupts. + Cleanup of all rings and reset are imminent. + + Reviewed-by: Pavan Chebbi + Reviewed-by: Vasundhara Volam + Reviewed-by: Edwin Peer + Signed-off-by: Michael Chan + Acked-by: Willem de Bruijn + Signed-off-by: Jakub Kicinski + +commit 5863b10aa86a5f5f69a25b55a5c15806c834471a +Author: Michael Chan +Date: Mon Jan 25 02:08:20 2021 -0500 + + bnxt_en: Consolidate firmware reset event logging. + + Combine the three netdev_warn() calls into a single call, printed at + the NETIF_MSG_HW log level. + + Reviewed-by: Vasundhara Volam + Signed-off-by: Michael Chan + Acked-by: Willem de Bruijn + Signed-off-by: Jakub Kicinski + +commit 4f036b2e75986946117237a6baddc489dd2b3c34 +Author: Michael Chan +Date: Mon Jan 25 02:08:19 2021 -0500 + + bnxt_en: Improve firmware fatal error shutdown sequence. + + In the event of a fatal firmware error, firmware will notify the host + and then it will proceed to do core reset when it sees that all functions + have disabled Bus Master. To prevent Master Aborts and other hard + errors, we need to quiesce all activities in addition to disabling Bus + Master before the chip goes into core reset. + + Reviewed-by: Edwin Peer + Reviewed-by: Vasundhara Volam + Signed-off-by: Michael Chan + Acked-by: Willem de Bruijn + Signed-off-by: Jakub Kicinski + +commit 38290e37297087f7ea3ef7904b8f185d77c42976 +Author: Michael Chan +Date: Mon Jan 25 02:08:18 2021 -0500 + + bnxt_en: Modify bnxt_disable_int_sync() to be called more than once. + + In the event of a fatal firmware error, we want to disable IRQ early + in the recovery sequence. This change will allow it to be called + safely again as part of the normal shutdown sequence. + + Reviewed-by: Edwin Peer + Reviewed-by: Vasundhara Volam + Signed-off-by: Michael Chan + Acked-by: Willem de Bruijn + Signed-off-by: Jakub Kicinski + +commit e340a5c4fbdde20fec8c16b83bce386aaad6b6eb +Author: Michael Chan +Date: Mon Jan 25 02:08:17 2021 -0500 + + bnxt_en: Add a new BNXT_STATE_NAPI_DISABLED flag to keep track of NAPI state. + + Up until now, we don't need to keep track of this state because NAPI + is always enabled once and disabled once during bring up and shutdown. + For better error recovery in subsequent patches, we want to quiesce + the device earlier during fatal error conditions. The normal shutdown + sequence will disable NAPI again and the flag will prevent disabling + NAPI twice. + + Reviewed-by: Pavan Chebbi + Reviewed-by: Andy Gospodarek + Reviewed-by: Edwin Peer + Reviewed-by: Vasundhara Volam + Signed-off-by: Michael Chan + Acked-by: Willem de Bruijn + Signed-off-by: Jakub Kicinski + +commit 339eeb4bd9e477141280e46ea9433f3a10b54699 +Author: Michael Chan +Date: Mon Jan 25 02:08:16 2021 -0500 + + bnxt_en: Add bnxt_fw_reset_timeout() helper. + + This code to check if we have reached the maximum wait time after + firmware reset is used multiple times. Add a helper function to + do this. + + Reviewed-by: Edwin Peer + Signed-off-by: Michael Chan + Acked-by: Willem de Bruijn + Signed-off-by: Jakub Kicinski + +commit 5d06eb5cb1f9da393eb47b8948d4367e69e48a62 +Author: Vasundhara Volam +Date: Mon Jan 25 02:08:15 2021 -0500 + + bnxt_en: Retry open if firmware is in reset. + + Firmware may be in the middle of reset when the driver tries to do ifup. + In that case, firmware will return a special error code and the driver + will retry 10 times with 50 msecs delay after each retry. + + Signed-off-by: Vasundhara Volam + Signed-off-by: Michael Chan + Acked-by: Willem de Bruijn + Signed-off-by: Jakub Kicinski + +commit 6882c36cf82ebb210f3977be7a3a0be0c64a44cb +Author: Edwin Peer +Date: Mon Jan 25 02:08:14 2021 -0500 + + bnxt_en: attempt to reinitialize after aborted reset + + Drawing a hard line on aborted resets prevents a NIC open in + some scenarios that may otherwise be recoverable. For example, + if a firmware recovery happened while a PF was down and an + attempt was made to bring up an associated VF in this state, + then it was impossible to ever bring up this VF without a + rebind or reload of its driver. + + Attempt to reinitialize the firmware when an aborted reset (or + failed init after a reset) is discovered during open - it may + succeed. Also take care to allow the user to retry opening the + NIC even after an aborted reset. + + Signed-off-by: Edwin Peer + Signed-off-by: Michael Chan + Acked-by: Willem de Bruijn + Signed-off-by: Jakub Kicinski + +commit a44daa8fcbcf572545c4c1a7908b3fbb38388048 +Author: Edwin Peer +Date: Mon Jan 25 02:08:13 2021 -0500 + + bnxt_en: log firmware debug notifications + + Firmware is capable of generating asynchronous debug notifications. + The event data is opaque to the driver and is simply logged. Debug + notifications can be enabled by turning on hardware status messages + using the ethtool msglvl interface. + + Reviewed-by: Pavan Chebbi + Signed-off-by: Edwin Peer + Signed-off-by: Michael Chan + Acked-by: Willem de Bruijn + Signed-off-by: Jakub Kicinski + +commit 881d8353b05e80d93db14b860581ceba14116422 +Author: Vasundhara Volam +Date: Mon Jan 25 02:08:12 2021 -0500 + + bnxt_en: Add an upper bound for all firmware command timeouts. + + The timeout period for firmware messages is passed to the driver + from the firmware in the response of the first command. This + timeout period is multiplied by a factor for certain long + running commands such as NVRAM commands. In some cases, the + timeout period can become really long and it can cause hung task + warnings if firmware has crashed or is not responding. To avoid + such long delays, cap all firmware commands to a max timeout value + of 40 seconds. + + Reviewed-by: Edwin Peer + Signed-off-by: Vasundhara Volam + Signed-off-by: Michael Chan + Acked-by: Willem de Bruijn + Signed-off-by: Jakub Kicinski + +commit 3e3c09b0e999f51d35875c3103c6ccb49290788f +Author: Vasundhara Volam +Date: Mon Jan 25 02:08:11 2021 -0500 + + bnxt_en: Move reading VPD info after successful handshake with fw. + + If firmware is in reset or in bad state, it won't be able to return + VPD data. Move bnxt_vpd_read_info() until after bnxt_fw_init_one_p1() + successfully returns. By then we would have established proper + communications with the firmware. + + Reviewed-by: Edwin Peer + Signed-off-by: Vasundhara Volam + Signed-off-by: Michael Chan + Acked-by: Willem de Bruijn + Signed-off-by: Jakub Kicinski + +commit d1cbd1659cac9b192f4677715becf937978b091a +Author: Michael Chan +Date: Mon Jan 25 02:08:10 2021 -0500 + + bnxt_en: Retry sending the first message to firmware if it is under reset. + + The first HWRM_VER_GET message to firmware during probe may timeout if + firmware is under reset. This can happen during hot-plug for example. + On P5 and newer chips, we can check if firmware is in the boot stage by + reading a status register. Retry 5 times if the status register shows + that firmware is not ready and not in error state. + + Reviewed-by: Edwin Peer + Signed-off-by: Michael Chan + Acked-by: Willem de Bruijn + Signed-off-by: Jakub Kicinski + +commit b187e4bae0aaa49958cc589af46f7059672980db +Author: Edwin Peer +Date: Mon Jan 25 02:08:09 2021 -0500 + + bnxt_en: handle CRASH_NO_MASTER during bnxt_open() + + Add missing support for handling NO_MASTER crashes while ports are + administratively down (ifdown). On some SoC platforms, the driver + needs to assist the firmware to recover from a crash via OP-TEE. + This is performed in a similar fashion to what is done during driver + probe. + + Signed-off-by: Edwin Peer + Signed-off-by: Michael Chan + Acked-by: Willem de Bruijn + Signed-off-by: Jakub Kicinski + +commit fe1b853572f17dcfdda93651c1ca3f41bbaf76f0 +Author: Michael Chan +Date: Mon Jan 25 02:08:08 2021 -0500 + + bnxt_en: Define macros for the various health register states. + + Define macros to check for the various states in the lower 16 bits of + the health register. Replace the C code that checks for these values + with the newly defined macros. + + Reviewed-by: Edwin Peer + Signed-off-by: Michael Chan + Acked-by: Willem de Bruijn + Signed-off-by: Jakub Kicinski + +commit 16db6323042f39b6f49148969e9d03d11265bc1b +Author: Michael Chan +Date: Mon Jan 25 02:08:07 2021 -0500 + + bnxt_en: Update firmware interface to 1.10.2.11. + + Updates to backing store APIs, QoS profiles, and push buffer initial + index support. + + Since the new HWRM_FUNC_BACKING_STORE_CFG message size has increased, + we need to add some compat. logic to fall back to the smaller legacy + size if firmware cannot accept the larger message size. The new fields + added to the structure are not used yet. + + Signed-off-by: Michael Chan + Acked-by: Willem de Bruijn + Signed-off-by: Jakub Kicinski + +commit 687487751814a493fba953efb9b1542b2f90614c +Author: Pavan Chebbi +Date: Mon Jan 11 04:26:40 2021 -0500 + + bnxt_en: Clear DEFRAG flag in firmware message when retry flashing. + + When the FW tells the driver to retry the INSTALL_UPDATE command after + it has cleared the NVM area, the driver is not clearing the previously + used ALLOWED_TO_DEFRAG flag. As a result the FW tries to defrag the NVM + area a second time in a loop and can fail the request. + + Fixes: 1432c3f6a6ca ("bnxt_en: Retry installing FW package under NO_SPACE error condition.") + Signed-off-by: Pavan Chebbi + Signed-off-by: Michael Chan + Signed-off-by: Jakub Kicinski + +commit 869c4d5eb1e6fbda66aa790c48bdb946d71494a0 +Author: Michael Chan +Date: Mon Jan 11 04:26:39 2021 -0500 + + bnxt_en: Improve stats context resource accounting with RDMA driver loaded. + + The function bnxt_get_ulp_stat_ctxs() does not count the stats contexts + used by the RDMA driver correctly when the RDMA driver is freeing the + MSIX vectors. It assumes that if the RDMA driver is registered, the + additional stats contexts will be needed. This is not true when the + RDMA driver is about to unregister and frees the MSIX vectors. + + This slight error leads to over accouting of the stats contexts needed + after the RDMA driver has unloaded. This will cause some firmware + warning and error messages in dmesg during subsequent config. changes + or ifdown/ifup. + + Fix it by properly accouting for extra stats contexts only if the + RDMA driver is registered and MSIX vectors have been successfully + requested. + + Fixes: c027c6b4e91f ("bnxt_en: get rid of num_stat_ctxs variable") + Reviewed-by: Yongping Zhang + Reviewed-by: Pavan Chebbi + Signed-off-by: Michael Chan + Signed-off-by: Jakub Kicinski + +commit be9df4aff65f18caa79b35f88f42c3d5a43af14f +Author: Lorenzo Bianconi +Date: Tue Dec 22 22:09:29 2020 +0100 + + net, xdp: Introduce xdp_prepare_buff utility routine + + Introduce xdp_prepare_buff utility routine to initialize per-descriptor + xdp_buff fields (e.g. xdp_buff pointers). Rely on xdp_prepare_buff() in + all XDP capable drivers. + + Signed-off-by: Lorenzo Bianconi + Signed-off-by: Daniel Borkmann + Reviewed-by: Alexander Duyck + Acked-by: Jesper Dangaard Brouer + Acked-by: John Fastabend + Acked-by: Shay Agroskin + Acked-by: Martin Habets + Acked-by: Camelia Groza + Acked-by: Marcin Wojtas + Link: https://lore.kernel.org/bpf/45f46f12295972a97da8ca01990b3e71501e9d89.1608670965.git.lorenzo@kernel.org + Signed-off-by: Alexei Starovoitov + +commit 43b5169d8355ccf26d726fbc75f083b2429113e4 +Author: Lorenzo Bianconi +Date: Tue Dec 22 22:09:28 2020 +0100 + + net, xdp: Introduce xdp_init_buff utility routine + + Introduce xdp_init_buff utility routine to initialize xdp_buff fields + const over NAPI iterations (e.g. frame_sz or rxq pointer). Rely on + xdp_init_buff in all XDP capable drivers. + + Signed-off-by: Lorenzo Bianconi + Signed-off-by: Daniel Borkmann + Reviewed-by: Alexander Duyck + Acked-by: Jesper Dangaard Brouer + Acked-by: John Fastabend + Acked-by: Shay Agroskin + Acked-by: Martin Habets + Acked-by: Camelia Groza + Acked-by: Marcin Wojtas + Link: https://lore.kernel.org/bpf/7f8329b6da1434dc2b05a77f2e800b29628a8913.1608670965.git.lorenzo@kernel.org + Signed-off-by: Alexei Starovoitov + +commit 30bfce109420912f201d4f295f9130ff44f04b41 +Author: Jakub Kicinski +Date: Wed Jan 6 13:06:36 2021 -0800 + + net: remove ndo_udp_tunnel_* callbacks + + All UDP tunnel port management is now routed via udp_tunnel_nic + infra directly. Remove the old callbacks. + + Reviewed-by: Alexander Duyck + Reviewed-by: Jacob Keller + Signed-off-by: Jakub Kicinski + +commit 33dbcf60556a2a23b07f837e5954991925b72fd2 +Author: Zheng Yongjun +Date: Tue Dec 29 21:52:46 2020 +0800 + + bnxt_en: Use kzalloc for allocating only one thing + + Use kzalloc rather than kcalloc(1,...) + + The semantic patch that makes this change is as follows: + (http://coccinelle.lip6.fr/) + + // + @@ + @@ + + - kcalloc(1, + + kzalloc( + ...) + // + + Signed-off-by: Zheng Yongjun + Signed-off-by: David S. Miller + +commit a029a2fef5d11bb85587433c3783615442abac96 +Author: Michael Chan +Date: Sun Dec 27 14:18:18 2020 -0500 + + bnxt_en: Check TQM rings for maximum supported value. + + TQM rings are hardware resources that require host context memory + managed by the driver. The driver supports up to 9 TQM rings and + the number of rings to use is requested by firmware during run-time. + Cap this number to the maximum supported to prevent accessing beyond + the array. Future firmware may request more than 9 TQM rings. Define + macros to remove the magic number 9 from the C code. + + Fixes: ac3158cb0108 ("bnxt_en: Allocate TQM ring context memory according to fw specification.") + Reviewed-by: Pavan Chebbi + Reviewed-by: Vasundhara Volam + Signed-off-by: Michael Chan + Signed-off-by: Jakub Kicinski + +commit fb1e6e562b37b39adfe251919c9abfdb3e01f921 +Author: Vasundhara Volam +Date: Sun Dec 27 14:18:17 2020 -0500 + + bnxt_en: Fix AER recovery. + + A recent change skips sending firmware messages to the firmware when + pci_channel_offline() is true during fatal AER error. To make this + complete, we need to move the re-initialization sequence to + bnxt_io_resume(), otherwise the firmware messages to re-initialize + will all be skipped. In any case, it is more correct to re-initialize + in bnxt_io_resume(). + + Also, fix the reverse x-mas tree format when defining variables + in bnxt_io_slot_reset(). + + Fixes: b340dc680ed4 ("bnxt_en: Avoid sending firmware messages when AER error is detected.") + Reviewed-by: Edwin Peer + Signed-off-by: Vasundhara Volam + Signed-off-by: Michael Chan + Signed-off-by: Jakub Kicinski + +commit a86b313e18178b39fcca4850d4dfeb5af1e3dc7e +Author: Michael Chan +Date: Sun Dec 13 06:51:46 2020 -0500 + + bnxt_en: Enable batch mode when using HWRM_NVM_MODIFY to flash packages. + + The current scheme allocates a DMA buffer as big as the requested + firmware package file and DMAs the contents to firmware in one + operation. The buffer size can be several hundred kilo bytes and + the driver may not be able to allocate the memory. This will cause + firmware upgrade to fail. + + Improve the scheme by using smaller DMA blocks and calling firmware to + DMA each block in a batch mode. Older firmware can cause excessive + NVRAM erases if the block size is too small so we try to allocate a + 256K buffer to begin with and size it down successively if we cannot + allocate the memory. + + Reviewed-by: Edwin Peer + Signed-off-by: Michael Chan + Signed-off-by: Jakub Kicinski + +commit 1432c3f6a6ca091db10b60c7b9078f34f4c5268d +Author: Pavan Chebbi +Date: Sun Dec 13 06:51:45 2020 -0500 + + bnxt_en: Retry installing FW package under NO_SPACE error condition. + + In bnxt_flash_package_from_fw_obj(), if firmware returns the NO_SPACE + error, call __bnxt_flash_nvram() to create the UPDATE directory and + then loop back and retry one more time. + + Since the first try may fail, we use the silent version to send the + firmware commands. + + Reviewed-by: Vasundhara Volam + Reviewed-by: Edwin Peer + Signed-off-by: Pavan Chebbi + Signed-off-by: Michael Chan + Signed-off-by: Jakub Kicinski + +commit 2e5fb428a61ce58f9db6ceccdeb6dc292248f1dc +Author: Pavan Chebbi +Date: Sun Dec 13 06:51:44 2020 -0500 + + bnxt_en: Restructure bnxt_flash_package_from_fw_obj() to execute in a loop. + + On NICs with a smaller NVRAM, FW installation may fail after multiple + updates due to fragmentation. The driver can retry when FW returns + a special error code. To faciliate the retry, we restructure the + logic that performs the flashing in a loop. The actual retry logic + will be added in the next patch. + + Signed-off-by: Pavan Chebbi + Signed-off-by: Michael Chan + Signed-off-by: Jakub Kicinski + +commit a9094ba6072bfabe93f93b641a3858d9c91c2c86 +Author: Michael Chan +Date: Sun Dec 13 06:51:43 2020 -0500 + + bnxt_en: Rearrange the logic in bnxt_flash_package_from_fw_obj(). + + This function will be modified in the next patch to retry flashing + the firmware in a loop. To facilate that, we rearrange the code so + that the steps that only need to be done once before the loop will be + moved to the top of the function. + + Signed-off-by: Michael Chan + Signed-off-by: Jakub Kicinski + +commit 93ff343528ce034ef35e71d3b1d506df3cf85282 +Author: Pavan Chebbi +Date: Sun Dec 13 06:51:42 2020 -0500 + + bnxt_en: Refactor bnxt_flash_nvram. + + Refactor bnxt_flash_nvram() into __bnxt_flash_nvram() that takes an + additional dir_item_len parameter. The new function will be used + in subsequent patches with the dir_item_len parameter set to create + the UPDATE directory during flashing. + + Signed-off-by: Pavan Chebbi + Signed-off-by: Michael Chan + Signed-off-by: Jakub Kicinski + +commit b02e5a0ebb172c8276cea3151942aac681f7a4a6 +Author: Björn Töpel +Date: Mon Nov 30 19:52:01 2020 +0100 + + xsk: Propagate napi_id to XDP socket Rx path + + Add napi_id to the xdp_rxq_info structure, and make sure the XDP + socket pick up the napi_id in the Rx path. The napi_id is used to find + the corresponding NAPI structure for socket busy polling. + + Signed-off-by: Björn Töpel + Signed-off-by: Daniel Borkmann + Acked-by: Ilias Apalodimas + Acked-by: Michael S. Tsirkin + Acked-by: Tariq Toukan + Link: https://lore.kernel.org/bpf/20201130185205.196029-7-bjorn.topel@gmail.com + +commit cc69837fcaf467426ca19e5790085c26146a2300 +Author: Jakub Kicinski +Date: Fri Nov 20 14:50:52 2020 -0800 + + net: don't include ethtool.h from netdevice.h + + linux/netdevice.h is included in very many places, touching any + of its dependecies causes large incremental builds. + + Drop the linux/ethtool.h include, linux/netdevice.h just needs + a forward declaration of struct ethtool_ops. + + Fix all the places which made use of this implicit include. + + Acked-by: Johannes Berg + Acked-by: Shannon Nelson + Reviewed-by: Jesse Brandeburg + Link: https://lore.kernel.org/r/20201120225052.1427503-1-kuba@kernel.org + Signed-off-by: Jakub Kicinski + +commit c54bc3ced5106663c2f2b44071800621f505b00e +Author: Michael Chan +Date: Fri Nov 20 02:44:31 2020 -0500 + + bnxt_en: Release PCI regions when DMA mask setup fails during probe. + + Jump to init_err_release to cleanup. bnxt_unmap_bars() will also be + called but it will do nothing if the BARs are not mapped yet. + + Fixes: c0c050c58d84 ("bnxt_en: New Broadcom ethernet driver.") + Reported-by: Jakub Kicinski + Signed-off-by: Michael Chan + Link: https://lore.kernel.org/r/1605858271-8209-1-git-send-email-michael.chan@broadcom.com + Signed-off-by: Jakub Kicinski + +commit 3383176efc0fb0c0900a191026468a58668b4214 +Author: Zhang Changzhong +Date: Thu Nov 19 21:30:21 2020 +0800 + + bnxt_en: fix error return code in bnxt_init_board() + + Fix to return a negative error code from the error handling + case instead of 0, as done elsewhere in this function. + + Fixes: c0c050c58d84 ("bnxt_en: New Broadcom ethernet driver.") + Reported-by: Hulk Robot + Signed-off-by: Zhang Changzhong + Reviewed-by: Edwin Peer + Link: https://lore.kernel.org/r/1605792621-6268-1-git-send-email-zhangchangzhong@huawei.com + Signed-off-by: Jakub Kicinski + +commit b5f796b62c98cd8c219c4b788ecb6e1218e648cb +Author: Zhang Changzhong +Date: Wed Nov 18 20:17:31 2020 +0800 + + bnxt_en: fix error return code in bnxt_init_one() + + Fix to return a negative error code from the error handling + case instead of 0, as done elsewhere in this function. + + Fixes: c213eae8d3cd ("bnxt_en: Improve VF/PF link change logic.") + Reported-by: Hulk Robot + Signed-off-by: Zhang Changzhong + Reviewed-by: Edwin Peer + Link: https://lore.kernel.org/r/1605701851-20270-1-git-send-email-zhangchangzhong@huawei.com + Signed-off-by: Jakub Kicinski + +commit 52cc5f3a166a33012ebca2cdefebf4c689110068 +Author: Jacob Keller +Date: Wed Nov 18 11:06:36 2020 -0800 + + devlink: move flash end and begin to core devlink + + When performing a flash update via devlink, device drivers may inform + user space of status updates via + devlink_flash_update_(begin|end|timeout|status)_notify functions. + + It is expected that drivers do not send any status notifications unless + they send a begin and end message. If a driver sends a status + notification without sending the appropriate end notification upon + finishing (regardless of success or failure), the current implementation + of the devlink userspace program can get stuck endlessly waiting for the + end notification that will never come. + + The current ice driver implementation may send such a status message + without the appropriate end notification in rare cases. + + Fixing the ice driver is relatively simple: we just need to send the + begin_notify at the start of the function and always send an end_notify + no matter how the function exits. + + Rather than assuming driver authors will always get this right in the + future, lets just fix the API so that it is not possible to get wrong. + Make devlink_flash_update_begin_notify and + devlink_flash_update_end_notify static, and call them in devlink.c core + code. Always send the begin_notify just before calling the driver's + flash_update routine. Always send the end_notify just after the routine + returns regardless of success or failure. + + Doing this makes the status notification easier to use from the driver, + as it no longer needs to worry about catching failures and cleaning up + by calling devlink_flash_update_end_notify. It is now no longer possible + to do the wrong thing in this regard. We also save a couple of lines of + code in each driver. + + Signed-off-by: Jacob Keller + Acked-by: Vasundhara Volam + Reviewed-by: Jiri Pirko + Signed-off-by: Jakub Kicinski + +commit b44cfd4f5b912454387a4bf735d42eb4e7078ca8 +Author: Jacob Keller +Date: Wed Nov 18 11:06:35 2020 -0800 + + devlink: move request_firmware out of driver + + All drivers which implement the devlink flash update support, with the + exception of netdevsim, use either request_firmware or + request_firmware_direct to locate the firmware file. Rather than having + each driver do this separately as part of its .flash_update + implementation, perform the request_firmware within net/core/devlink.c + + Replace the file_name parameter in the struct devlink_flash_update_params + with a pointer to the fw object. + + Use request_firmware rather than request_firmware_direct. Although most + Linux distributions today do not have the fallback mechanism + implemented, only about half the drivers used the _direct request, as + compared to the generic request_firmware. In the event that + a distribution does support the fallback mechanism, the devlink flash + update ought to be able to use it to provide the firmware contents. For + distributions which do not support the fallback userspace mechanism, + there should be essentially no difference between request_firmware and + request_firmware_direct. + + Signed-off-by: Jacob Keller + Acked-by: Shannon Nelson + Acked-by: Vasundhara Volam + Reviewed-by: Jiri Pirko + Signed-off-by: Jakub Kicinski + +commit 0ae0a779efb8840a0cdb2d6bd9a5d07663ac3ee2 +Author: Vasundhara Volam +Date: Sun Nov 15 19:27:52 2020 -0500 + + bnxt_en: Avoid unnecessary NVM_GET_DEV_INFO cmd error log on VFs. + + VFs do not have access permissions to issue NVM_GET_DEV_INFO + firmware command. + + Fixes: 4933f6753b50 ("bnxt_en: Add bnxt_hwrm_nvm_get_dev_info() to query NVM info.") + Signed-off-by: Vasundhara Volam + Signed-off-by: Michael Chan + Signed-off-by: Jakub Kicinski + +commit fa97f303fa4cf8469fd3d1ef29da69c0a3f6ddc8 +Author: Michael Chan +Date: Sun Nov 15 19:27:51 2020 -0500 + + bnxt_en: Fix counter overflow logic. + + bnxt_add_one_ctr() adds a hardware counter to a software counter and + adjusts for the hardware counter wraparound against the mask. The logic + assumes that the hardware counter is always smaller than or equal to + the mask. + + This assumption is mostly correct. But in some cases if the firmware + is older and does not provide the accurate mask, the driver can use + a mask that is smaller than the actual hardware mask. This can cause + some extra carry bits to be added to the software counter, resulting in + counters that far exceed the actual value. Fix it by masking the + hardware counter with the mask passed into bnxt_add_one_ctr(). + + Fixes: fea6b3335527 ("bnxt_en: Accumulate all counters.") + Reviewed-by: Vasundhara Volam + Reviewed-by: Pavan Chebbi + Reviewed-by: Edwin Peer + Signed-off-by: Michael Chan + Signed-off-by: Jakub Kicinski + +commit eba93de6d31c1734dee59909020a162de612e41e +Author: Michael Chan +Date: Sun Nov 15 19:27:50 2020 -0500 + + bnxt_en: Free port stats during firmware reset. + + Firmware is unable to retain the port counters during any kind of + fatal or non-fatal resets, so we must clear the port counters to + avoid false detection of port counter overflow. + + Fixes: fea6b3335527 ("bnxt_en: Accumulate all counters.") + Reviewed-by: Edwin Peer + Reviewed-by: Vasundhara Volam + Signed-off-by: Michael Chan + Signed-off-by: Jakub Kicinski + +commit 4260330b32b14330cfe427d568ac5f5b29b5be3d +Author: Edwin Peer +Date: Sun Nov 15 19:27:49 2020 -0500 + + bnxt_en: read EEPROM A2h address using page 0 + + The module eeprom address range returned by bnxt_get_module_eeprom() + should be 256 bytes of A0h address space, the lower half of the A2h + address space, and page 0 for the upper half of the A2h address space. + + Fix the firmware call by passing page_number 0 for the A2h slave address + space. + + Fixes: 42ee18fe4ca2 ("bnxt_en: Add Support for ETHTOOL_GMODULEINFO and ETHTOOL_GMODULEEEPRO") + Signed-off-by: Edwin Peer + Signed-off-by: Michael Chan + Signed-off-by: Jakub Kicinski + +commit 825741b071722f1c8ad692cead562c4b5f5eaa93 +Author: Vasundhara Volam +Date: Mon Oct 26 00:18:21 2020 -0400 + + bnxt_en: Send HWRM_FUNC_RESET fw command unconditionally. + + In the AER or firmware reset flow, if we are in fatal error state or + if pci_channel_offline() is true, we don't send any commands to the + firmware because the commands will likely not reach the firmware and + most commands don't matter much because the firmware is likely to be + reset imminently. + + However, the HWRM_FUNC_RESET command is different and we should always + attempt to send it. In the AER flow for example, the .slot_reset() + call will trigger this fw command and we need to try to send it to + effect the proper reset. + + Fixes: b340dc680ed4 ("bnxt_en: Avoid sending firmware messages when AER error is detected.") + Reviewed-by: Edwin Peer + Signed-off-by: Vasundhara Volam + Signed-off-by: Michael Chan + Signed-off-by: Jakub Kicinski + +commit a1301f08c5acf992d9c1fafddc84c3a822844b04 +Author: Michael Chan +Date: Mon Oct 26 00:18:20 2020 -0400 + + bnxt_en: Check abort error state in bnxt_open_nic(). + + bnxt_open_nic() is called during configuration changes that require + the NIC to be closed and then opened. This call is protected by + rtnl_lock. Firmware reset can be happening at the same time. Only + critical portions of the entire firmware reset sequence are protected + by the rtnl_lock. It is possible that bnxt_open_nic() can be called + when the firmware reset sequence is aborting. In that case, + bnxt_open_nic() needs to check if the ABORT_ERR flag is set and + abort if it is. The configuration change that resulted in the + bnxt_open_nic() call will fail but the NIC will be brought to a + consistent IF_DOWN state. + + Without this patch, if bnxt_open_nic() were to continue in this error + state, it may crash like this: + + [ 1648.659736] BUG: unable to handle kernel NULL pointer dereference at (null) + [ 1648.659768] IP: [] bnxt_alloc_mem+0x50a/0x1140 [bnxt_en] + [ 1648.659796] PGD 101e1b3067 PUD 101e1b2067 PMD 0 + [ 1648.659813] Oops: 0000 [#1] SMP + [ 1648.659825] Modules linked in: xt_CHECKSUM iptable_mangle ipt_MASQUERADE nf_nat_masquerade_ipv4 iptable_nat nf_nat_ipv4 nf_nat nf_conntrack_ipv4 nf_defrag_ipv4 xt_conntrack nf_conntrack ipt_REJECT nf_reject_ipv4 tun bridge stp llc ebtable_filter ebtables ip6table_filter ip6_tables iptable_filter sunrpc dell_smbios dell_wmi_descriptor dcdbas amd64_edac_mod edac_mce_amd kvm_amd kvm irqbypass crc32_pclmul ghash_clmulni_intel aesni_intel lrw gf128mul glue_helper ablk_helper vfat cryptd fat pcspkr ipmi_ssif sg k10temp i2c_piix4 wmi ipmi_si ipmi_devintf ipmi_msghandler tpm_crb acpi_power_meter sch_fq_codel ip_tables xfs libcrc32c sd_mod crc_t10dif crct10dif_generic mgag200 i2c_algo_bit drm_kms_helper syscopyarea sysfillrect sysimgblt fb_sys_fops ttm ahci drm libahci megaraid_sas crct10dif_pclmul crct10dif_common + [ 1648.660063] tg3 libata crc32c_intel bnxt_en(OE) drm_panel_orientation_quirks devlink ptp pps_core dm_mirror dm_region_hash dm_log dm_mod fuse + [ 1648.660105] CPU: 13 PID: 3867 Comm: ethtool Kdump: loaded Tainted: G OE ------------ 3.10.0-1152.el7.x86_64 #1 + [ 1648.660911] Hardware name: Dell Inc. PowerEdge R7515/0R4CNN, BIOS 1.2.14 01/28/2020 + [ 1648.661662] task: ffff94e64cbc9080 ti: ffff94f55df1c000 task.ti: ffff94f55df1c000 + [ 1648.662409] RIP: 0010:[] [] bnxt_alloc_mem+0x50a/0x1140 [bnxt_en] + [ 1648.663171] RSP: 0018:ffff94f55df1fba8 EFLAGS: 00010202 + [ 1648.663927] RAX: 0000000000000000 RBX: ffff94e6827e0000 RCX: 0000000000000000 + [ 1648.664684] RDX: 0000000000000000 RSI: 0000000000000000 RDI: ffff94e6827e08c0 + [ 1648.665433] RBP: ffff94f55df1fc20 R08: 00000000000001ff R09: 0000000000000008 + [ 1648.666184] R10: 0000000000000d53 R11: ffff94f55df1f7ce R12: ffff94e6827e08c0 + [ 1648.666940] R13: ffff94e6827e08c0 R14: ffff94e6827e08c0 R15: ffffffffb9115e40 + [ 1648.667695] FS: 00007f8aadba5740(0000) GS:ffff94f57eb40000(0000) knlGS:0000000000000000 + [ 1648.668447] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 + [ 1648.669202] CR2: 0000000000000000 CR3: 0000001022772000 CR4: 0000000000340fe0 + [ 1648.669966] Call Trace: + [ 1648.670730] [] ? bnxt_need_reserve_rings+0x9d/0x170 [bnxt_en] + [ 1648.671496] [] __bnxt_open_nic+0x8a/0x9a0 [bnxt_en] + [ 1648.672263] [] ? bnxt_close_nic+0x59/0x1b0 [bnxt_en] + [ 1648.673031] [] bnxt_open_nic+0x1b/0x50 [bnxt_en] + [ 1648.673793] [] bnxt_set_ringparam+0x6c/0xa0 [bnxt_en] + [ 1648.674550] [] dev_ethtool+0x1334/0x21a0 + [ 1648.675306] [] dev_ioctl+0x1ef/0x5f0 + [ 1648.676061] [] sock_do_ioctl+0x4d/0x60 + [ 1648.676810] [] sock_ioctl+0x1eb/0x2d0 + [ 1648.677548] [] do_vfs_ioctl+0x3a0/0x5b0 + [ 1648.678282] [] ? __do_page_fault+0x238/0x500 + [ 1648.679016] [] SyS_ioctl+0xa1/0xc0 + [ 1648.679745] [] system_call_fastpath+0x25/0x2a + [ 1648.680461] Code: 9e 60 01 00 00 0f 1f 40 00 45 8b 8e 48 01 00 00 31 c9 45 85 c9 0f 8e 73 01 00 00 66 0f 1f 44 00 00 49 8b 86 a8 00 00 00 48 63 d1 <48> 8b 14 d0 48 85 d2 0f 84 46 01 00 00 41 8b 86 44 01 00 00 c7 + [ 1648.681986] RIP [] bnxt_alloc_mem+0x50a/0x1140 [bnxt_en] + [ 1648.682724] RSP + [ 1648.683451] CR2: 0000000000000000 + + Fixes: ec5d31e3c15d ("bnxt_en: Handle firmware reset status during IF_UP.") + Reviewed-by: Vasundhara Volam + Reviewed-by: Pavan Chebbi + Signed-off-by: Michael Chan + Signed-off-by: Jakub Kicinski + +commit f75d9a0aa96721d20011cd5f8c7a24eb32728589 +Author: Vasundhara Volam +Date: Mon Oct 26 00:18:19 2020 -0400 + + bnxt_en: Re-write PCI BARs after PCI fatal error. + + When a PCIe fatal error occurs, the internal latched BAR addresses + in the chip get reset even though the BAR register values in config + space are retained. + + pci_restore_state() will not rewrite the BAR addresses if the + BAR address values are valid, causing the chip's internal BAR addresses + to stay invalid. So we need to zero the BAR registers during PCIe fatal + error to force pci_restore_state() to restore the BAR addresses. These + write cycles to the BAR registers will cause the proper BAR addresses to + latch internally. + + Fixes: 6316ea6db93d ("bnxt_en: Enable AER support.") + Signed-off-by: Vasundhara Volam + Signed-off-by: Michael Chan + Signed-off-by: Jakub Kicinski + +commit 631ce27a3006fc0b732bfd589c6df505f62eadd9 +Author: Vasundhara Volam +Date: Mon Oct 26 00:18:18 2020 -0400 + + bnxt_en: Invoke cancel_delayed_work_sync() for PFs also. + + As part of the commit b148bb238c02 + ("bnxt_en: Fix possible crash in bnxt_fw_reset_task()."), + cancel_delayed_work_sync() is called only for VFs to fix a possible + crash by cancelling any pending delayed work items. It was assumed + by mistake that the flush_workqueue() call on the PF would flush + delayed work items as well. + + As flush_workqueue() does not cancel the delayed workqueue, extend + the fix for PFs. This fix will avoid the system crash, if there are + any pending delayed work items in fw_reset_task() during driver's + .remove() call. + + Unify the workqueue cleanup logic for both PF and VF by calling + cancel_work_sync() and cancel_delayed_work_sync() directly in + bnxt_remove_one(). + + Fixes: b148bb238c02 ("bnxt_en: Fix possible crash in bnxt_fw_reset_task().") + Reviewed-by: Pavan Chebbi + Reviewed-by: Andy Gospodarek + Signed-off-by: Vasundhara Volam + Signed-off-by: Michael Chan + Signed-off-by: Jakub Kicinski + +commit 21d6a11e2cadfb8446265a3efff0e2aad206e15e +Author: Vasundhara Volam +Date: Mon Oct 26 00:18:17 2020 -0400 + + bnxt_en: Fix regression in workqueue cleanup logic in bnxt_remove_one(). + + A recent patch has moved the workqueue cleanup logic before + calling unregister_netdev() in bnxt_remove_one(). This caused a + regression because the workqueue can be restarted if the device is + still open. Workqueue cleanup must be done after unregister_netdev(). + The workqueue will not restart itself after the device is closed. + + Call bnxt_cancel_sp_work() after unregister_netdev() and + call bnxt_dl_fw_reporters_destroy() after that. This fixes the + regession and the original NULL ptr dereference issue. + + Fixes: b16939b59cc0 ("bnxt_en: Fix NULL ptr dereference crash in bnxt_fw_reset_task()") + Signed-off-by: Vasundhara Volam + Signed-off-by: Michael Chan + Signed-off-by: Jakub Kicinski + +commit 1388875b391689459659335a1fdbe5c5d45f3500 +Author: Vasundhara Volam +Date: Mon Oct 12 05:10:54 2020 -0400 + + bnxt_en: Add stored FW version info to devlink info_get cb. + + This patch adds FW versions stored in the flash to devlink info_get + callback. Return the correct fw.psid running version using the + newly added bp->nvm_cfg_ver. + + v2: + Ensure stored pkg_name string is NULL terminated when copied to + devlink. + + Return directly from the last call to bnxt_dl_info_put(). + + If the FW call to get stored version fails for any reason, return + success immediately to devlink without the stored versions. + + Reviewed-by: Andy Gospodarek + Signed-off-by: Vasundhara Volam + Signed-off-by: Michael Chan + Link: https://lore.kernel.org/r/1602493854-29283-10-git-send-email-michael.chan@broadcom.com + Signed-off-by: Jakub Kicinski + +commit 7154917a12b20ea46b09097a22342bcdf6adac66 +Author: Vasundhara Volam +Date: Mon Oct 12 05:10:53 2020 -0400 + + bnxt_en: Refactor bnxt_dl_info_get(). + + Add a new function bnxt_dl_info_put() to simplify the code, as there + are more stored firmware version fields to be added in the next patch. + + Also, rename fw_ver variable name to ncsi_ver for better naming while + copying to devlink info_get cb. + + v2: + Ensure active_pkg_name string is NULL terminated when copied to + devlink. + + Return directly from the last call to bnxt_dl_info_put(). + + Reviewed-by: Pavan Chebbi + Reviewed-by: Andy Gospodarek + Signed-off-by: Vasundhara Volam + Signed-off-by: Michael Chan + Link: https://lore.kernel.org/r/1602493854-29283-9-git-send-email-michael.chan@broadcom.com + Signed-off-by: Jakub Kicinski + +commit 4933f6753b50367b581084927bf19efa1bcaac86 +Author: Vasundhara Volam +Date: Mon Oct 12 05:10:52 2020 -0400 + + bnxt_en: Add bnxt_hwrm_nvm_get_dev_info() to query NVM info. + + Add a new bnxt_hwrm_nvm_get_dev_info() to query firmware version + information via NVM_GET_DEV_INFO firmware command. Use it to + get the running version of the NVM configuration information. + + This new function will also be used in subsequent patches to get the + stored firmware versions. + + Reviewed-by: Andy Gospodarek + Signed-off-by: Vasundhara Volam + Signed-off-by: Michael Chan + Link: https://lore.kernel.org/r/1602493854-29283-8-git-send-email-michael.chan@broadcom.com + Signed-off-by: Jakub Kicinski + +commit 8eddb3e7ce124dd6375d3664f1aae13873318b0f +Author: Michael Chan +Date: Mon Oct 12 05:10:51 2020 -0400 + + bnxt_en: Log unknown link speed appropriately. + + If the VF virtual link is set to always enabled, the speed may be + unknown when the physical link is down. The driver currently logs + the link speed as 4294967295 Mbps which is SPEED_UNKNOWN. Modify + the link up log message as "speed unknown" which makes more sense. + + Reviewed-by: Vasundhara Volam + Reviewed-by: Edwin Peer + Signed-off-by: Michael Chan + Link: https://lore.kernel.org/r/1602493854-29283-7-git-send-email-michael.chan@broadcom.com + Signed-off-by: Jakub Kicinski + +commit c966c67c09921e117991b54a83e1e9ac6dbc3899 +Author: Michael Chan +Date: Mon Oct 12 05:10:50 2020 -0400 + + bnxt_en: Log event_data1 and event_data2 when handling RESET_NOTIFY event. + + Log these values that contain useful firmware state information. + + Reviewed-by: Edwin Peer + Reviewed-by: Vasundhara Volam + Signed-off-by: Michael Chan + Link: https://lore.kernel.org/r/1602493854-29283-6-git-send-email-michael.chan@broadcom.com + Signed-off-by: Jakub Kicinski + +commit 03ab8ca1e920f6adb35fc7ea80e18006d652d465 +Author: Michael Chan +Date: Mon Oct 12 05:10:49 2020 -0400 + + bnxt_en: Simplify bnxt_async_event_process(). + + event_data1 and event_data2 are used when processing most events. + Store these in local variables at the beginning of the function to + simplify many of the case statements. + + Reviewed-by: Edwin Peer + Signed-off-by: Michael Chan + Link: https://lore.kernel.org/r/1602493854-29283-5-git-send-email-michael.chan@broadcom.com + Signed-off-by: Jakub Kicinski + +commit 8fb35cd302f74e63db4ce43a44e5e5fae44d80e3 +Author: Michael Chan +Date: Mon Oct 12 05:10:48 2020 -0400 + + bnxt_en: Set driver default message level. + + Currently, bp->msg_enable has default value of 0. It is more useful + to have the commonly used NETIF_MSG_DRV and NETIF_MSG_HW enabled by + default. + + v2: Change the fall back bnxt_reset_task() inside bnxt_rx_ring_reset() + to silent mode. With older fw, we would take the fall back path and + it would be very noisy. + + Reviewed-by: Edwin Peer + Reviewed-by: Vasundhara Volam + Signed-off-by: Michael Chan + Link: https://lore.kernel.org/r/1602493854-29283-4-git-send-email-michael.chan@broadcom.com + Signed-off-by: Jakub Kicinski + +commit 6896cb35eec5020e889f755b176c41951f5976c6 +Author: Vasundhara Volam +Date: Mon Oct 12 05:10:47 2020 -0400 + + bnxt_en: Enable online self tests for multi-host/NPAR mode. + + Online self tests are not disruptive and can be run in NPAR mode + and in multi-host NIC as well. + + Reviewed-by: Edwin Peer + Signed-off-by: Vasundhara Volam + Signed-off-by: Michael Chan + Link: https://lore.kernel.org/r/1602493854-29283-3-git-send-email-michael.chan@broadcom.com + Signed-off-by: Jakub Kicinski + +commit cf223bfaf791f4c6ab6a5c213b91b9311ac9f2bd +Author: Vasundhara Volam +Date: Mon Oct 12 05:10:46 2020 -0400 + + bnxt_en: Return -EROFS to user space, if NVM writes are not permitted. + + If NVRAM resources are locked, NVM writes are not permitted. In such + scenarios, firmware returns HWRM_ERR_CODE_RESOURCE_LOCKED error to + firmware commands. + + Reviewed-by: Edwin Peer + Signed-off-by: Vasundhara Volam + Signed-off-by: Michael Chan + Link: https://lore.kernel.org/r/1602493854-29283-2-git-send-email-michael.chan@broadcom.com + Signed-off-by: Jakub Kicinski + +commit 8d4bd96b54dcb5997d1035f4dfd300c04d07ec11 +Author: Michael Chan +Date: Sun Oct 4 15:23:01 2020 -0400 + + bnxt_en: Eliminate unnecessary RX resets. + + Currently, the driver will schedule RX ring reset when we get a buffer + error in the RX completion record. These RX buffer errors can be due + to normal out-of-buffer conditions or a permanent error in the RX + ring. Because the driver cannot distinguish between these 2 + conditions, we assume all these buffer errors require reset. + + This is very disruptive when it is just a normal out-of-buffer + condition. Newer firmware will now monitor the rings for the permanent + failure and will send a notification to the driver when it happens. + This allows the driver to reset only when such a notification is + received. In environments where we have predominently out-of-buffer + conditions, we now can avoid these unnecessary resets. + + Reviewed-by: Edwin Peer + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 1b5c8b63d6a4a2a33ce279ff8d42e40dec3b04ce +Author: Michael Chan +Date: Sun Oct 4 15:23:00 2020 -0400 + + bnxt_en: Reduce unnecessary message log during RX errors. + + There is logic in the RX path to detect unexpected handles in the + RX completion. We'll print a warning and schedule a reset. The + next expected handle is then set to 0xffff which is guaranteed to + not match any valid handle. This will force all remaining packets in + the ring to be discarded before the reset. There can be hundreds of + these packets remaining in the ring and there is no need to print the + warnings for these forced errors. + + Reviewed-by: Pavan Chebbi + Reviewed-by: Edwin Peer + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 8a27d4b9e54cdc9e3f45d3d11b0c898e92dace39 +Author: Michael Chan +Date: Sun Oct 4 15:22:59 2020 -0400 + + bnxt_en: Add a software counter for RX ring reset. + + Add a per ring rx_resets counter to count these RX resets. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 8fbf58e17dce8f250dda9ad6b0a49b3041f0af14 +Author: Michael Chan +Date: Sun Oct 4 15:22:58 2020 -0400 + + bnxt_en: Implement RX ring reset in response to buffer errors. + + On some older chips, it is necessary to do a reset when we get buffer + errors associated with an RX ring. These buffer errors may become + frequent if the RX ring underruns under heavy traffic. The current + code does a global reset of all reasources when this happens. This + works but creates a big disruption of all rings when one RX ring is + having problem. This patch implements a localized RX ring reset of + just the RX ring having the issue. All other rings including all + TX rings will not be affected by this single RX ring reset. + + Only the older chips prior to the P5 class supports this reset. + Because it is not a global reset, packets may still be arriving + while we are calling firmware to reset that ring. We need to be + sure that we don't post any buffers during this time while the + ring is undergoing reset. After firmware completes successfully, + the ring will be in the reset state with no buffers and we can start + filling it with new buffers and posting them. + + Reviewed-by: Pavan Chebbi + Signed-off-by: Edwin Peer + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 7737d325f867faa5e809c607699d2253dfb6ab09 +Author: Michael Chan +Date: Sun Oct 4 15:22:57 2020 -0400 + + bnxt_en: Refactor bnxt_init_one_rx_ring(). + + bnxt_init_one_rx_ring() includes logic to initialize the BDs for one RX + ring and to allocate the buffers. Separate the allocation logic into a + new bnxt_alloc_one_rx_ring() function. The allocation function will be + used later to allocate new buffers for one specified RX ring when we + reset that RX ring. + + Reviewed-by: Pavan Chebbi + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 975bc99a4a397d1b1584a99b4adb7948b6e6944b +Author: Michael Chan +Date: Sun Oct 4 15:22:56 2020 -0400 + + bnxt_en: Refactor bnxt_free_rx_skbs(). + + bnxt_free_rx_skbs() frees all the allocated buffers and SKBs for + every RX ring. Refactor this function by calling a new function + bnxt_free_one_rx_ring_skbs() to free these buffers on one specified + RX ring at a time. This is preparation work for resetting one RX + ring during run-time. + + Reviewed-by: Pavan Chebbi + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit fc8864e0b6ee2120d9b438f411159afe99348ff0 +Author: Michael Chan +Date: Sun Oct 4 15:22:55 2020 -0400 + + bnxt_en: Log FW health status info, if reset is aborted. + + If firmware does not come out of reset, log FW health status info + to provide more information on firmware status. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 87f7ab8d6f88cc594e3417fd0a7238b9c72e88eb +Author: Edwin Peer +Date: Sun Oct 4 15:22:54 2020 -0400 + + bnxt_en: perform no master recovery during startup + + The NS3 SoC platforms require assistance from the OP-TEE to recover + firmware if a crash occurs while no driver is bound. The + CRASHED_NO_MASTER condition is recorded in the firmware status register + during the crash to indicate when driver intervension is needed to + coordinate a firmware reload. This condition is detected during early + driver initialization in order to effect a firmware fastboot on + supported platforms when necessary. + + Reviewed-by: Vasundhara Volam + Signed-off-by: Edwin Peer + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit ba02629ff6cbadce2c6452a4942ccacef658e5c0 +Author: Edwin Peer +Date: Sun Oct 4 15:22:53 2020 -0400 + + bnxt_en: log firmware status on firmware init failure + + Firmware now supports device independent discovery of the status + register location. This status register can provide more detailed + information about firmware errors, especially if problems occur + before the HWRM interface is functioning. Attempt to map this + register if it is present and report the firmware status on firmware + init failures. + + Signed-off-by: Edwin Peer + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 3e9ec2bb932d746b0ae65aba79697457e7c83f90 +Author: Edwin Peer +Date: Sun Oct 4 15:22:52 2020 -0400 + + bnxt_en: refactor bnxt_alloc_fw_health() + + The allocator for the firmware health structure conflates allocation + and capability checks, limiting the reusability of the code. This patch + separates out the capability check and disablement and improves the + warning message to better describe the consequences of an allocation + failure. + + Signed-off-by: Edwin Peer + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 424174f14eeccfea747d9f078a41bd393ffe0bde +Author: Vasundhara Volam +Date: Sun Oct 4 15:22:51 2020 -0400 + + bnxt_en: Update firmware interface spec to 1.10.1.68. + + Main changes is to extend hwrm_nvm_get_dev_info_output() for stored + firmware versions and a new flag is added to fw_status_reg. + + Reviewed-by: Edwin Peer + Signed-off-by: Vasundhara Volam + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 4301304b04cc79f7b79bc982469fce16397f949f +Author: Michael Chan +Date: Sun Sep 27 13:42:20 2020 -0400 + + bnxt_en: Improve preset max value for ethtool -l. + + The current logic that calculates the preset maximum value for combined + channel does not take into account the rings used for XDP and mqprio + TCs. Each of these features will reduce the number of TX rings. Add + the logic to divide the TX rings accordingly based on whether the + device is currently in XDP mode and whether TCs are in use. + + Reviewed-by: Vasundhara Volam + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit ccd6a9dcab79dc98d8d93d5aa3f68d71494f93fe +Author: Michael Chan +Date: Sun Sep 27 13:42:19 2020 -0400 + + bnxt_en: Implement ethtool set_fec_param() method. + + This feature allows the user to set the different FEC modes on the NIC + port. Any new setting will take effect immediately after a link toggle. + + Reviewed-by: Vasundhara Volam + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 2046e3c356fd18f577cd0180a299cddd10fe5752 +Author: Michael Chan +Date: Sun Sep 27 13:42:18 2020 -0400 + + bnxt_en: Report Active FEC encoding during link up. + + The current code is reporting the FEC configured settings during link up. + Change it to report the more useful active FEC encoding that may be + negotiated or auto detected. + + Reviewed-by: Vasundhara Volam + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 8b2775890ad88d20a8f968aa88dbae487961240f +Author: Michael Chan +Date: Sun Sep 27 13:42:17 2020 -0400 + + bnxt_en: Report FEC settings to ethtool. + + Implement .get_fecparam() method to report the configured and active FEC + settings. Also report the supported and advertised FEC settings to + the .get_link_ksettings() method. + + Reviewed-by: Vasundhara Volam + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 745b5c653913829ede6d4466f36b35426d6e1823 +Author: Edwin Peer +Date: Sun Sep 27 13:42:16 2020 -0400 + + bnxt_en: avoid link reset if speed is not changed + + PORT_PHY_CONFIG is always sent with REQ_FLAGS_RESET_PHY set. This flag + must be set in order for the firmware to institute the requested PHY + change immediately, but it results in a link flap. This is unnecessary + and results in an improved user experience if the PHY reconfiguration + is avoided when the user requested speed does not constitute a change. + + Signed-off-by: Edwin Peer + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 3128e811b134128aadcf8582dded81b70980f23a +Author: Michael Chan +Date: Sun Sep 27 13:42:15 2020 -0400 + + bnxt_en: Handle ethernet link being disabled by firmware. + + On some 200G dual port NICs, if one port is configured to 200G, + firmware will disable the ethernet link on the other port. Firmware + will send notification to the driver for the disabled port when this + happens. Define a new field in the link_info structure to keep track + of this state. The new phy_state field replaces the unused loop_back + field. + + Log a message when the phy_state changes state. In the disabled state, + disallow any PHY configurations on the disabled port as the firmware + will fail all calls to configure the PHY in this state. + + Reviewed-by: Vasundhara Volam + Reviewed-by: Edwin Peer + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 532262ba3b84fe77d7d420a24fa66b167bb0d93e +Author: Edwin Peer +Date: Sun Sep 27 13:42:14 2020 -0400 + + bnxt_en: ethtool: support PAM4 link speeds up to 200G + + Add ethtool PAM4 link modes for: + 50000baseCR_Full + 100000baseCR2_Full + 200000baseCR4_Full + + Signed-off-by: Edwin Peer + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit d058426ea84fff2bb941ecb7291a876571860dd2 +Author: Edwin Peer +Date: Sun Sep 27 13:42:13 2020 -0400 + + bnxt_en: add basic infrastructure to support PAM4 link speeds + + The firmware interface has added support for new link speeds using + PAM4 modulation. Expand the bnxt_link_info structure to closely + mirror the new firmware structures. Add logic to copy the PAM4 + capabilities and settings from the firmware. + + Signed-off-by: Edwin Peer + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit f00530bf3c9828d22a15043da5bc142760197fc2 +Author: Edwin Peer +Date: Sun Sep 27 13:42:12 2020 -0400 + + bnxt_en: refactor bnxt_get_fw_speed() + + It will be necessary to update more than one field in the link_info + structure when PAM4 speeds are added in a later patch. Instead of + merely translating ethtool speed values to firmware speed values, + change the responsiblity of this function to update all the necessary + link_info fields required to force the speed change to the desired + ethtool value. This also reduces code duplication somewhat at the two + call sites, which otherwise both have to independently update link_info + fields to turn off auto negotiation advertisements. + + Also use the appropriate REQ_FORCE_LINK_SPEED definitions. These happen + to have the same values, but req_link_speed is utilimately passed as + force_link_speed in HWRM_PORT_PHY_CFG which is not defined in terms of + REQ_AUTO_LINK_SPEED. + + Reviewed-by: Scott Branden + Signed-off-by: Edwin Peer + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit c916062a8917463a469539eccee50d31e63c91c1 +Author: Edwin Peer +Date: Sun Sep 27 13:42:11 2020 -0400 + + bnxt_en: refactor code to limit speed advertising + + Extract the code for determining an advertised speed is no longer + supported into a separate function. This will avoid some code + duplication in a later patch when supporting PAM4 speeds, since + these speeds are specified in a separate field. + + Reviewed-by: Scott Branden + Signed-off-by: Edwin Peer + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 9d6b648c3112012dac6d804d331cdd3a2247765c +Author: Michael Chan +Date: Sun Sep 27 13:42:10 2020 -0400 + + bnxt_en: Update firmware interface spec to 1.10.1.65. + + The main changes include FEC, ECN statistics, HWRM_PORT_PHY_QCFG + response size reduction, and a new counter added to + ctx_hw_stats_ext struct to support the new 58818 chip. + + The ctx_hw_stats_ext structure is now the superset supporting the new + 58818 chips and the prior P5 chips. Add a new flag to identify the new + chip and use constants for the chip specific ring statistics sizes + instead of the size of the structure. + + Because the HWRM_PORT_PHY_QCFG response structure size has shrunk back + to 96 bytes, the workaround added earlier to limit the size of this + message for forwarding to the VF can be removed. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit bc75c054f04048517e0b153ab38d973bbcdcef59 +Author: Jacob Keller +Date: Fri Sep 25 13:46:06 2020 -0700 + + devlink: convert flash_update to use params structure + + The devlink core recently gained support for checking whether the driver + supports a flash_update parameter, via `supported_flash_update_params`. + However, parameters are specified as function arguments. Adding a new + parameter still requires modifying the signature of the .flash_update + callback in all drivers. + + Convert the .flash_update function to take a new `struct + devlink_flash_update_params` instead. By using this structure, and the + `supported_flash_update_params` bit field, a new parameter to + flash_update can be added without requiring modification to existing + drivers. + + As before, all parameters except file_name will require driver opt-in. + Because file_name is a necessary field to for the flash_update to make + sense, no "SUPPORTED" bitflag is provided and it is always considered + valid. All future additional parameters will require a new bit in the + supported_flash_update_params bitfield. + + Signed-off-by: Jacob Keller + Reviewed-by: Jakub Kicinski + Cc: Jiri Pirko + Cc: Jakub Kicinski + Cc: Jonathan Corbet + Cc: Michael Chan + Cc: Bin Luo + Cc: Saeed Mahameed + Cc: Leon Romanovsky + Cc: Ido Schimmel + Cc: Danielle Ratson + Signed-off-by: David S. Miller + +commit 22ec3d232f8511b21355fcdb6fb2a4eced3decd8 +Author: Jacob Keller +Date: Fri Sep 25 13:46:05 2020 -0700 + + devlink: check flash_update parameter support in net core + + When implementing .flash_update, drivers which do not support + per-component update are manually checking the component parameter to + verify that it is NULL. Without this check, the driver might accept an + update request with a component specified even though it will not honor + such a request. + + Instead of having each driver check this, move the logic into + net/core/devlink.c, and use a new `supported_flash_update_params` field + in the devlink_ops. Drivers which will support per-component update must + now specify this by setting DEVLINK_SUPPORT_FLASH_UPDATE_COMPONENT in + the supported_flash_update_params in their devlink_ops. + + This helps ensure that drivers do not forget to check for a NULL + component if they do not support per-component update. This also enables + a slightly better error message by enabling the core stack to set the + netlink bad attribute message to indicate precisely the unsupported + attribute in the message. + + Going forward, any new additional parameter to flash update will require + a bit in the supported_flash_update_params bitfield. + + Signed-off-by: Jacob Keller + Reviewed-by: Jakub Kicinski + Cc: Jiri Pirko + Cc: Jonathan Corbet + Cc: Michael Chan + Cc: Bin Luo + Cc: Saeed Mahameed + Cc: Leon Romanovsky + Cc: Ido Schimmel + Cc: Danielle Ratson + Cc: Shannon Nelson + Signed-off-by: David S. Miller + +commit c07fa08f02f4053b51dae1a6ee08bc644dc7846d +Author: Michael Chan +Date: Sun Sep 20 21:08:59 2020 -0400 + + bnxt_en: Fix wrong flag value passed to HWRM_PORT_QSTATS_EXT fw call. + + The wrong flag value caused the firmware call to return actual port + counters instead of the counter masks. This messed up the counter + overflow logic and caused erratic extended port counters to be + displayed under ethtool -S. + + Fixes: 531d1d269c1d ("bnxt_en: Retrieve hardware masks for port counters.") + Reviewed-by: Edwin Peer + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit d2b42d010f2941c2a85d970500b9d4ba79765593 +Author: Michael Chan +Date: Sun Sep 20 21:08:58 2020 -0400 + + bnxt_en: Fix HWRM_FUNC_QSTATS_EXT firmware call. + + Fix it to set the required fid input parameter. The firmware call + fails without this patch. + + Fixes: d752d0536c97 ("bnxt_en: Retrieve hardware counter masks from firmware if available.") + Reviewed-by: Edwin Peer + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit f0f47b2f8cbc22e6e611ef6cc988f5452e5aec3f +Author: Vasundhara Volam +Date: Sun Sep 20 21:08:57 2020 -0400 + + bnxt_en: Return -EOPNOTSUPP for ETHTOOL_GREGS on VFs. + + Debug firmware commands are not supported on VFs to read registers. + This patch avoids logging unnecessary access_denied error on VFs + when user calls ETHTOOL_GREGS. + + By returning error in get_regs_len() method on the VF, the get_regs() + method will not be called. + + Fixes: b5d600b027eb ("bnxt_en: Add support for 'ethtool -d'") + Signed-off-by: Vasundhara Volam + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit a53906908148d64423398a62c4435efb0d09652c +Author: Michael Chan +Date: Sun Sep 20 21:08:56 2020 -0400 + + bnxt_en: Protect bnxt_set_eee() and bnxt_set_pauseparam() with mutex. + + All changes related to bp->link_info require the protection of the + link_lock mutex. It's not sufficient to rely just on RTNL. + + Fixes: 163e9ef63641 ("bnxt_en: Fix race when modifying pause settings.") + Reviewed-by: Edwin Peer + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit d69753fa1ecb3218b56b022722f7a5822735b876 +Author: Edwin Peer +Date: Sun Sep 20 21:08:55 2020 -0400 + + bnxt_en: return proper error codes in bnxt_show_temp + + Returning "unknown" as a temperature value violates the hwmon interface + rules. Appropriate error codes should be returned via device_attribute + show instead. These will ultimately be propagated to the user via the + file system interface. + + In addition to the corrected error handling, it is an even better idea to + not present the sensor in sysfs at all if it is known that the read will + definitely fail. Given that temp1_input is currently the only sensor + reported, ensure no hwmon registration if TEMP_MONITOR_QUERY is not + supported or if it will fail due to access permissions. Something smarter + may be needed if and when other sensors are added. + + Fixes: 12cce90b934b ("bnxt_en: fix HWRM error when querying VF temperature") + Signed-off-by: Edwin Peer + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 492adcf481292521ee8df1a482dc12acdb28aa15 +Author: Vasundhara Volam +Date: Sun Sep 20 21:08:54 2020 -0400 + + bnxt_en: Use memcpy to copy VPD field info. + + Using strlcpy() to copy from VPD is not correct because VPD strings + are not necessarily NULL terminated. Use memcpy() to copy the VPD + length up to the destination buffer size - 1. The destination is + zeroed memory so it will always be NULL terminated. + + Fixes: a0d0fd70fed5 ("bnxt_en: Read partno and serialno of the board from VPD") + Signed-off-by: Vasundhara Volam + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 423cffcf6c70031cb2265d0476087450ed89db59 +Author: Jakub Kicinski +Date: Mon Sep 14 17:11:56 2020 -0700 + + bnxt: add pause frame stats + + These stats are already reported in ethtool -S. + Michael confirms they are equivalent to standard stats. + + v2: - fix sparse warning about endian by using the macro + - use u64 for pointer type + + Signed-off-by: Jakub Kicinski + Reviewed-by: Saeed Mahameed + Reviewed-by: Michael Chan + Signed-off-by: David S. Miller + +commit 5198d545dba8ad893f5e5a029ca8d43ee7bcf011 +Author: Jakub Kicinski +Date: Wed Sep 9 10:37:51 2020 -0700 + + net: remove napi_hash_del() from driver-facing API + + We allow drivers to call napi_hash_del() before calling + netif_napi_del() to batch RCU grace periods. This makes + the API asymmetric and leaks internal implementation details. + Soon we will want the grace period to protect more than just + the NAPI hash table. + + Restructure the API and have drivers call a new function - + __netif_napi_del() if they want to take care of RCU waits. + + Note that only core was checking the return status from + napi_hash_del() so the new helper does not report if the + NAPI was actually deleted. + + Some notes on driver oddness: + - veth observed the grace period before calling netif_napi_del() + but that should not matter + - myri10ge observed normal RCU flavor + - bnx2x and enic did not actually observe the grace period + (unless they did so implicitly) + - virtio_net and enic only unhashed Rx NAPIs + + The last two points seem to indicate that the calls to + napi_hash_del() were a left over rather than an optimization. + Regardless, it's easy enough to correct them. + + This patch may introduce extra synchronize_net() calls for + interfaces which set NAPI_STATE_NO_BUSY_POLL and depend on + free_netdev() to call netif_napi_del(). This seems inevitable + since we want to use RCU for netpoll dev->napi_list traversal, + and almost no drivers set IFF_DISABLE_NETPOLL. + + Signed-off-by: Jakub Kicinski + Signed-off-by: David S. Miller + +commit b16939b59cc00231a75d224fd058d22c9d064976 +Author: Vasundhara Volam +Date: Sat Sep 5 22:55:37 2020 -0400 + + bnxt_en: Fix NULL ptr dereference crash in bnxt_fw_reset_task() + + bnxt_fw_reset_task() which runs from a workqueue can race with + bnxt_remove_one(). For example, if firmware reset and VF FLR are + happening at about the same time. + + bnxt_remove_one() already cancels the workqueue and waits for it + to finish, but we need to do this earlier before the devlink + reporters are destroyed. This will guarantee that + the devlink reporters will always be valid when bnxt_fw_reset_task() + is still running. + + Fixes: b148bb238c02 ("bnxt_en: Fix possible crash in bnxt_fw_reset_task().") + Reviewed-by: Edwin Peer + Signed-off-by: Vasundhara Volam + Signed-off-by: Michael Chan + Signed-off-by: Jakub Kicinski + +commit b340dc680ed48dcc05b56e1ebe1b9535813c3ee0 +Author: Vasundhara Volam +Date: Sat Sep 5 22:55:36 2020 -0400 + + bnxt_en: Avoid sending firmware messages when AER error is detected. + + When the driver goes through PCIe AER reset in error state, all + firmware messages will timeout because the PCIe bus is no longer + accessible. This can lead to AER reset taking many minutes to + complete as each firmware command takes time to timeout. + + Define a new macro BNXT_NO_FW_ACCESS() to skip these firmware messages + when either firmware is in fatal error state or when + pci_channel_offline() is true. It now takes a more reasonable 20 to + 30 seconds to complete AER recovery. + + Fixes: b4fff2079d10 ("bnxt_en: Do not send firmware messages if firmware is in error state.") + Signed-off-by: Vasundhara Volam + Signed-off-by: Michael Chan + Signed-off-by: Jakub Kicinski + +commit 96ecdcc992eb7f468b2cf829b0f5408a1fad4668 +Author: Jakub Kicinski +Date: Wed Aug 26 12:40:07 2020 -0700 + + bnxt: don't enable NAPI until rings are ready + + Netpoll can try to poll napi as soon as napi_enable() is called. + It crashes trying to access a doorbell which is still NULL: + + BUG: kernel NULL pointer dereference, address: 0000000000000000 + CPU: 59 PID: 6039 Comm: ethtool Kdump: loaded Tainted: G S 5.9.0-rc1-00469-g5fd99b5d9950-dirty #26 + RIP: 0010:bnxt_poll+0x121/0x1c0 + Code: c4 20 44 89 e0 5b 5d 41 5c 41 5d 41 5e 41 5f c3 41 8b 86 a0 01 00 00 41 23 85 18 01 00 00 49 8b 96 a8 01 00 00 0d 00 00 00 24 <89> 02 + 41 f6 45 77 02 74 cb 49 8b ae d8 01 00 00 31 c0 c7 44 24 1a + netpoll_poll_dev+0xbd/0x1a0 + __netpoll_send_skb+0x1b2/0x210 + netpoll_send_udp+0x2c9/0x406 + write_ext_msg+0x1d7/0x1f0 + console_unlock+0x23c/0x520 + vprintk_emit+0xe0/0x1d0 + printk+0x58/0x6f + x86_vector_activate.cold+0xf/0x46 + __irq_domain_activate_irq+0x50/0x80 + __irq_domain_activate_irq+0x32/0x80 + __irq_domain_activate_irq+0x32/0x80 + irq_domain_activate_irq+0x25/0x40 + __setup_irq+0x2d2/0x700 + request_threaded_irq+0xfb/0x160 + __bnxt_open_nic+0x3b1/0x750 + bnxt_open_nic+0x19/0x30 + ethtool_set_channels+0x1ac/0x220 + dev_ethtool+0x11ba/0x2240 + dev_ioctl+0x1cf/0x390 + sock_do_ioctl+0x95/0x130 + + Reported-by: Rob Sherwood + Fixes: c0c050c58d84 ("bnxt_en: New Broadcom ethernet driver.") + Signed-off-by: Jakub Kicinski + Reviewed-by: Michael Chan + Signed-off-by: David S. Miller + +commit b43b9f53fbb06faa4f2fcdbf235db3289026e2e4 +Author: Michael Chan +Date: Wed Aug 26 01:08:39 2020 -0400 + + bnxt_en: Setup default RSS map in all scenarios. + + The recent changes to support user-defined RSS map assume that RX + rings are always reserved and the default RSS map is set after the + RX rings are successfully reserved. If the firmware spec is older + than 1.6.1, no ring reservations are required and the default RSS + map is not setup at all. In another scenario where the fw Resource + Manager is older, RX rings are not reserved and we also end up with + no valid RSS map. + + Fix both issues in bnxt_need_reserve_rings(). In both scenarios + described above, we don't need to reserve RX rings so we need to + call this new function bnxt_check_rss_map_no_rmgr() to setup the + default RSS map when needed. + + Without valid RSS map, the NIC won't receive packets properly. + + Fixes: 1667cbf6a4eb ("bnxt_en: Add logical RSS indirection table structure.") + Reviewed-by: Vasundhara Volam + Reviewed-by: Edwin Peer + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 5fa65524f6e0b9528c6ceac3f33f7e8f0c3a084a +Author: Edwin Peer +Date: Wed Aug 26 01:08:38 2020 -0400 + + bnxt_en: init RSS table for Minimal-Static VF reservation + + There are no VF rings available during probe when the device is configured + using the Minimal-Static reservation strategy. In this case, the RSS + indirection table can only be initialized later, during bnxt_open_nic(). + However, this was not happening because the rings will already have been + reserved via bnxt_init_dflt_ring_mode(), causing bnxt_need_reserve_rings() + to return false in bnxt_reserve_rings() and bypass the RSS table init. + + Solve this by pushing the call to bnxt_set_dflt_rss_indir_tbl() into + __bnxt_reserve_rings(), which is common to both paths and is called + whenever ring configuration is changed. After doing this, the RSS table + init that must be called from bnxt_init_one() happens implicitly via + bnxt_set_default_rings(), necessitating doing the allocation earlier in + order to avoid a null pointer dereference. + + Fixes: bd3191b5d87d ("bnxt_en: Implement ethtool -X to set indirection table.") + Signed-off-by: Edwin Peer + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 12cce90b934bf2b0ed9c339b4d5503e69954351a +Author: Edwin Peer +Date: Wed Aug 26 01:08:37 2020 -0400 + + bnxt_en: fix HWRM error when querying VF temperature + + Firmware returns RESOURCE_ACCESS_DENIED for HWRM_TEMP_MONITORY_QUERY for + VFs. This produces unpleasing error messages in the log when temp1_input + is queried via the hwmon sysfs interface from a VF. + + The error is harmless and expected, so silence it and return unknown as + the value. Since the device temperature is not particularly sensitive + information, provide flexibility to change this policy in future by + silencing the error rather than avoiding the HWRM call entirely for VFs. + + Fixes: cde49a42a9bb ("bnxt_en: Add hwmon sysfs support to read temperature") + Cc: Marc Smith + Reported-by: Marc Smith + Signed-off-by: Edwin Peer + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit b148bb238c02f0c7797efed026e9bba5892d2172 +Author: Michael Chan +Date: Wed Aug 26 01:08:36 2020 -0400 + + bnxt_en: Fix possible crash in bnxt_fw_reset_task(). + + bnxt_fw_reset_task() is run from a delayed workqueue. The current + code is not cancelling the workqueue in the driver's .remove() + method and it can potentially crash if the device is removed with + the workqueue still pending. + + The fix is to clear the BNXT_STATE_IN_FW_RESET flag and then cancel + the delayed workqueue in bnxt_remove_one(). bnxt_queue_fw_reset_work() + also needs to check that this flag is set before scheduling. This + will guarantee that no rescheduling will be done after it is cancelled. + + Fixes: 230d1f0de754 ("bnxt_en: Handle firmware reset.") + Reviewed-by: Vasundhara Volam + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit df3875ec550396974b1d8a518bd120d034738236 +Author: Vasundhara Volam +Date: Wed Aug 26 01:08:35 2020 -0400 + + bnxt_en: Fix PCI AER error recovery flow + + When a PCI error is detected the PCI state could be corrupt, save + the PCI state after initialization and restore it after the slot + reset. + + Fixes: 6316ea6db93d ("bnxt_en: Enable AER support.") + Signed-off-by: Vasundhara Volam + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 7de651490c27ebc5edb5c7224c368bd0cd5b3862 +Author: Michael Chan +Date: Wed Aug 26 01:08:34 2020 -0400 + + bnxt_en: Fix ethtool -S statitics with XDP or TCs enabled. + + We are returning the wrong count for ETH_SS_STATS in get_sset_count() + when XDP or TCs are enabled. In a recent commit, we got rid of + irrelevant counters when the ring is RX only or TX only, but we + did not make the proper adjustments for the count. As a result, + when we have XDP or TCs enabled, we are returning an excess count + because some of the rings are TX only. This causes ethtool -S to + display extra counters with no counter names. + + Fix bnxt_get_num_ring_stats() by not assuming that all rings will + always have RX and TX counters in combined mode. + + Fixes: 125592fbf467 ("bnxt_en: show only relevant ethtool stats for a TX or RX ring") + Reviewed-by: Vasundhara Volam + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit dbbfa96ad920c50d58bcaefa57f5f33ceef9d00e +Author: Vasundhara Volam +Date: Wed Aug 26 01:08:33 2020 -0400 + + bnxt_en: Check for zero dir entries in NVRAM. + + If firmware goes into unstable state, HWRM_NVM_GET_DIR_INFO firmware + command may return zero dir entries. Return error in such case to + avoid zero length dma buffer request. + + Fixes: c0c050c58d84 ("bnxt_en: New Broadcom ethernet driver.") + Signed-off-by: Vasundhara Volam + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit c1c2d77408022a398a1a7c51cf20488c922629de +Author: Pavan Chebbi +Date: Wed Aug 26 01:08:32 2020 -0400 + + bnxt_en: Don't query FW when netif_running() is false. + + In rare conditions like two stage OS installation, the + ethtool's get_channels function may be called when the + device is in D3 state, leading to uncorrectable PCI error. + Check netif_running() first before making any query to FW + which involves writing to BAR. + + Fixes: db4723b3cd2d ("bnxt_en: Check max_tx_scheduler_inputs value from firmware.") + Signed-off-by: Pavan Chebbi + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit df561f6688fef775baa341a0f5d960becd248b11 +Author: Gustavo A. R. Silva +Date: Sun Aug 23 17:36:59 2020 -0500 + + treewide: Use fallthrough pseudo-keyword + + Replace the existing /* fall through */ comments and its variants with + the new pseudo-keyword macro fallthrough[1]. Also, remove unnecessary + fall-through markings when it is the case. + + [1] https://www.kernel.org/doc/html/v5.7/process/deprecated.html?highlight=fallthrough#implicit-switch-case-fall-through + + Signed-off-by: Gustavo A. R. Silva + +commit b5d600b027eb2733a1d7d253b84efb96c40f6a9d +Author: Vasundhara Volam +Date: Mon Jul 27 05:40:45 2020 -0400 + + bnxt_en: Add support for 'ethtool -d' + + Add support to dump PXP registers and PCIe statistics. + + Signed-off-by: Vasundhara Volam + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit a0c30621c28c6d8e9c572cd6139881f15c806792 +Author: Michael Chan +Date: Mon Jul 27 05:40:44 2020 -0400 + + bnxt_en: Switch over to use the 64-bit software accumulated counters. + + Now we can report all the full 64-bit CPU endian software accumulated + counters instead of the hw counters, some of which may be less than + 64-bit wide. Define the necessary macros to access the software + counters. + + Reviewed-by: Vasundhara Volam + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit fea6b3335527f41bc729466fb2a95e48a98827ac +Author: Michael Chan +Date: Mon Jul 27 05:40:43 2020 -0400 + + bnxt_en: Accumulate all counters. + + Now that we have the infrastructure in place, add the new function + bnxt_accumulate_all_stats() to periodically accumulate and check for + counter rollover of all ring stats and port stats. + + A chip bug was also discovered that could cause some ring counters to + become 0 during DMA. Workaround by ignoring zeros on the affected + chips. + + Some older frimware will reset port counters during ifdown. We need + to check for that and free the accumulated port counters during ifdown + to prevent bogus counter overflow detection during ifup. + + Reviewed-by: Vasundhara Volam + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 531d1d269c1d432d691026c82d04b7bb5f1ae318 +Author: Michael Chan +Date: Mon Jul 27 05:40:42 2020 -0400 + + bnxt_en: Retrieve hardware masks for port counters. + + If supported by newer firmware, make the firmware call to query all + the port counter masks. If not supported, assume 40-bit port + counter masks. + + Reviewed-by: Vasundhara Volam + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit d752d0536c979068084e0c60ff506c07e1cf613e +Author: Michael Chan +Date: Mon Jul 27 05:40:41 2020 -0400 + + bnxt_en: Retrieve hardware counter masks from firmware if available. + + Newer firmware has a new call HWRM_FUNC_QSTATS_EXT to retrieve the + masks of all ring counters. Make this call when supported to + initialize the hardware masks of all ring counters. If the call + is not available, assume 48-bit ring counter masks on P5 chips. + + Reviewed-by: Vasundhara Volam + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit a37120b22e540b4eb068addf9be3f63b64dca690 +Author: Michael Chan +Date: Mon Jul 27 05:40:40 2020 -0400 + + bnxt_en: Allocate additional memory for all statistics blocks. + + Some of these DMAed hardware counters are not full 64-bit counters and + so we need to accumulate them as they overflow. Allocate copies of these + DMA statistics memory blocks with the same size for accumulation. The + hardware counter widths are also counter specific so we allocate + memory for masks that correspond to each counter. + + Reviewed-by: Vasundhara Volam + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 177a6cde47fcf23e5826c6224fc53038480451c2 +Author: Michael Chan +Date: Mon Jul 27 05:40:39 2020 -0400 + + bnxt_en: Refactor statistics code and structures. + + The driver manages multiple statistics structures of different sizes. + They are all allocated, freed, and handled practically the same. Define + a new bnxt_stats_mem structure and common allocation and free functions + for all staistics memory blocks. + + Reviewed-by: Vasundhara Volam + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 24c93443fe21ca3ec87b7b121548daa3b57a34af +Author: Michael Chan +Date: Mon Jul 27 05:40:38 2020 -0400 + + bnxt_en: Use macros to define port statistics size and offset. + + The port statistics structures have hard coded padding and offset. + Define macros to make this look cleaner. + + Reviewed-by: Pavan Chebbi + Reviewed-by: Vasundhara Volam + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit bfc6e5fbcbbf75967a7f673ad67fa227d77f2541 +Author: Michael Chan +Date: Mon Jul 27 05:40:37 2020 -0400 + + bnxt_en: Update firmware interface to 1.10.1.54. + + Main changes are 200G support and fixing the definitions of discard and + error counters to match the hardware definitions. + + Because the HWRM_PORT_PHY_QCFG message size has now exceeded the max. + encapsulated response message size of 96 bytes from the PF to the VF, + we now need to cap this message to 96 bytes for forwarding. The forwarded + response only needs to contain the basic link status and speed information + and can be capped without adding the new information. + + v2: Fix bnxt_re compile error. + + Cc: Selvin Xavier + Reviewed-by: Vasundhara Volam + Reviewed-by: Edwin Peer + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit dfe64de974f8c8391776a376cc01c582064e8a46 +Author: Vasundhara Volam +Date: Mon Jul 27 05:40:36 2020 -0400 + + bnxt_en: Remove PCIe non-counters from ethtool statistics + + Remove PCIe non-counters display from ethtool statistics, as + they are not simple counters but register dump. The next few + patches will add logic to detect counter roll-over and it won't + work with these PCIe non-counters. + + There will be a follow up patch to get PCIe information via + ethtool register dump. + + Signed-off-by: Vasundhara Volam + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit e8407fdeb9a6866784e249881f6c786a0835faba +Author: Andrii Nakryiko +Date: Tue Jul 21 23:46:02 2020 -0700 + + bpf, xdp: Remove XDP_QUERY_PROG and XDP_QUERY_PROG_HW XDP commands + + Now that BPF program/link management is centralized in generic net_device + code, kernel code never queries program id from drivers, so + XDP_QUERY_PROG/XDP_QUERY_PROG_HW commands are unnecessary. + + This patch removes all the implementations of those commands in kernel, along + the xdp_attachment_query(). + + This patch was compile-tested on allyesconfig. + + Signed-off-by: Andrii Nakryiko + Signed-off-by: Alexei Starovoitov + Link: https://lore.kernel.org/bpf/20200722064603.3350758-10-andriin@fb.com + +commit 18c7015cc65ab62a161291b863225db5cfc717a4 +Author: Jakub Kicinski +Date: Fri Jul 17 13:59:58 2020 -0700 + + net: bnxt: don't complain if TC flower can't be supported + + The fact that NETIF_F_HW_TC is not set should be a sufficient + indication to the user that TC offloads are not supported. + No need to bother users of older firmware versions with + pointless warnings on every boot. + + Also, since the support is optional, bnxt_init_tc() should not + return an error in case FW is old, similarly to how error + is not returned when CONFIG_BNXT_FLOWER_OFFLOAD is not set. + + With that we can add an error message to the caller, to warn + about actual unexpected failures. + + Signed-off-by: Jakub Kicinski + Reviewed-by: Michael Chan + Signed-off-by: David S. Miller + +commit c40f4e50b6cfc7c66f69d12c6b3fbcd954f1ded5 +Author: Petr Machata +Date: Sat Jul 11 00:55:03 2020 +0300 + + net: sched: Pass qdisc reference in struct flow_block_offload + + Previously, shared blocks were only relevant for the pseudo-qdiscs ingress + and clsact. Recently, a qevent facility was introduced, which allows to + bind blocks to well-defined slots of a qdisc instance. RED in particular + got two qevents: early_drop and mark. Drivers that wish to offload these + blocks will be sent the usual notification, and need to know which qdisc it + is related to. + + To that end, extend flow_block_offload with a "sch" pointer, and initialize + as appropriate. This prompts changes in the indirect block facility, which + now tracks the scheduler in addition to the netdevice. Update signatures of + several functions similarly. + + Signed-off-by: Petr Machata + Signed-off-by: David S. Miller + +commit 27640ce68d21e556b66bc5fa022aacd26e53c947 +Author: Michael Chan +Date: Sat Jul 11 20:48:25 2020 -0400 + + bnxt_en: Fix completion ring sizing with TPA enabled. + + The current completion ring sizing formula is wrong with TPA enabled. + The formula assumes that the number of TPA completions are bound by the + RX ring size, but that's not true. TPA_START completions are immediately + recycled so they are not bound by the RX ring size. We must add + bp->max_tpa to the worst case maximum RX and TPA completions. + + The completion ring can overflow because of this mistake. This will + cause hardware to disable the completion ring when this happens, + leading to RX and TX traffic to stall on that ring. This issue is + generally exposed only when the RX ring size is set very small. + + Fix the formula by adding bp->max_tpa to the number of RX completions + if TPA is enabled. + + Fixes: c0c050c58d84 ("bnxt_en: New Broadcom ethernet driver."); + Reviewed-by: Vasundhara Volam + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit ca0c753815fe4786b79a80abf0412eb5d52090b8 +Author: Vasundhara Volam +Date: Sat Jul 11 20:48:24 2020 -0400 + + bnxt_en: Init ethtool link settings after reading updated PHY configuration. + + In a shared port PHY configuration, async event is received when any of the + port modifies the configuration. Ethtool link settings should be + initialised after updated PHY configuration from firmware. + + Fixes: b1613e78e98d ("bnxt_en: Add async. event logic for PHY configuration changes.") + Signed-off-by: Vasundhara Volam + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 163e9ef63641a02de4c95cd921577265c52e1ce2 +Author: Vasundhara Volam +Date: Sat Jul 11 20:48:23 2020 -0400 + + bnxt_en: Fix race when modifying pause settings. + + The driver was modified to not rely on rtnl lock to protect link + settings about 2 years ago. The pause setting was missed when + making that change. Fix it by acquiring link_lock mutex before + calling bnxt_hwrm_set_pause(). + + Fixes: e2dc9b6e38fa ("bnxt_en: Don't use rtnl lock to protect link change logic in workqueue.") + Signed-off-by: Vasundhara Volam + Reviewed-by: Edwin Peer + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit c8b1d7436045d3599bae56aef1682813ecccaad7 +Author: Davide Caratti +Date: Fri Jul 10 12:55:08 2020 +0200 + + bnxt_en: fix NULL dereference in case SR-IOV configuration fails + + we need to set 'active_vfs' back to 0, if something goes wrong during the + allocation of SR-IOV resources: otherwise, further VF configurations will + wrongly assume that bp->pf.vf[x] are valid memory locations, and commands + like the ones in the following sequence: + + # echo 2 >/sys/bus/pci/devices/${ADDR}/sriov_numvfs + # ip link set dev ens1f0np0 up + # ip link set dev ens1f0np0 vf 0 trust on + + will cause a kernel crash similar to this: + + bnxt_en 0000:3b:00.0: not enough MMIO resources for SR-IOV + BUG: kernel NULL pointer dereference, address: 0000000000000014 + #PF: supervisor read access in kernel mode + #PF: error_code(0x0000) - not-present page + PGD 0 P4D 0 + Oops: 0000 [#1] SMP PTI + CPU: 43 PID: 2059 Comm: ip Tainted: G I 5.8.0-rc2.upstream+ #871 + Hardware name: Dell Inc. PowerEdge R740/08D89F, BIOS 2.2.11 06/13/2019 + RIP: 0010:bnxt_set_vf_trust+0x5b/0x110 [bnxt_en] + Code: 44 24 58 31 c0 e8 f5 fb ff ff 85 c0 0f 85 b6 00 00 00 48 8d 1c 5b 41 89 c6 b9 0b 00 00 00 48 c1 e3 04 49 03 9c 24 f0 0e 00 00 <8b> 43 14 89 c2 83 c8 10 83 e2 ef 45 84 ed 49 89 e5 0f 44 c2 4c 89 + RSP: 0018:ffffac6246a1f570 EFLAGS: 00010246 + RAX: 0000000000000000 RBX: 0000000000000000 RCX: 000000000000000b + RDX: 0000000000000001 RSI: 0000000000000000 RDI: ffff98b28f538900 + RBP: ffff98b28f538900 R08: 0000000000000000 R09: 0000000000000008 + R10: ffffffffb9515be0 R11: ffffac6246a1f678 R12: ffff98b28f538000 + R13: 0000000000000001 R14: 0000000000000000 R15: ffffffffc05451e0 + FS: 00007fde0f688800(0000) GS:ffff98baffd40000(0000) knlGS:0000000000000000 + CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 + CR2: 0000000000000014 CR3: 000000104bb0a003 CR4: 00000000007606e0 + DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 + DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 + PKRU: 55555554 + Call Trace: + do_setlink+0x994/0xfe0 + __rtnl_newlink+0x544/0x8d0 + rtnl_newlink+0x47/0x70 + rtnetlink_rcv_msg+0x29f/0x350 + netlink_rcv_skb+0x4a/0x110 + netlink_unicast+0x21d/0x300 + netlink_sendmsg+0x329/0x450 + sock_sendmsg+0x5b/0x60 + ____sys_sendmsg+0x204/0x280 + ___sys_sendmsg+0x88/0xd0 + __sys_sendmsg+0x5e/0xa0 + do_syscall_64+0x47/0x80 + entry_SYSCALL_64_after_hwframe+0x44/0xa9 + + Fixes: c0c050c58d840 ("bnxt_en: New Broadcom ethernet driver.") + Reported-by: Fei Liu + CC: Jonathan Toppins + CC: Michael Chan + Signed-off-by: Davide Caratti + Reviewed-by: Michael Chan + Acked-by: Jonathan Toppins + Signed-off-by: David S. Miller + +commit 442a35a5a7aa7277ace9a2671260dbff1a04e029 +Author: Jakub Kicinski +Date: Thu Jul 9 17:42:52 2020 -0700 + + bnxt: convert to new udp_tunnel_nic infra + + Convert to new infra, taking advantage of sleeping in callbacks. + + v2: + - use bp->*_fw_dst_port_id != INVALID_HW_RING_ID as indication + that the offload is active. + + Signed-off-by: Jakub Kicinski + Reviewed-by: Michael Chan + Signed-off-by: David S. Miller + +commit 71ad8d55f8e5ea101069b552422f392655e2ffb6 +Author: Danielle Ratson +Date: Thu Jul 9 16:18:16 2020 +0300 + + devlink: Replace devlink_port_attrs_set parameters with a struct + + Currently, devlink_port_attrs_set accepts a long list of parameters, + that most of them are devlink port's attributes. + + Use the devlink_port_attrs struct to replace the relevant parameters. + + Signed-off-by: Danielle Ratson + Reviewed-by: Jiri Pirko + Signed-off-by: Ido Schimmel + Signed-off-by: David S. Miller + +commit 1da63ddd0e155277bf613dfc7062af95d90452f2 +Author: Edwin Peer +Date: Wed Jul 8 07:54:01 2020 -0400 + + bnxt_en: allow firmware to disable VLAN offloads + + Bare-metal use cases require giving firmware and the embedded + application processor control over VLAN offloads. The driver should + not attempt to override or utilize this feature in such scenarios + since it will not work as expected. + + Signed-off-by: Edwin Peer + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit a196e96bb68fbc7a319f45df1d529b807216a03a +Author: Edwin Peer +Date: Wed Jul 8 07:54:00 2020 -0400 + + bnxt_en: clean up VLAN feature bit handling + + The hardware VLAN offload feature on our NIC does not have separate + knobs for handling customer and service tags on RX. Either offloading + of both must be enabled or both must be disabled. Introduce definitions + for the combined feature set in order to clean up the code and make + this constraint more clear. Technically these features can be separately + enabled on TX, however, since the default is to turn both on, the + combined TX feature set is also introduced for code consistency. + + Signed-off-by: Edwin Peer + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit bd3191b5d87d5ebc1d4149bbbb42a64ec3d469bf +Author: Michael Chan +Date: Wed Jul 8 07:53:59 2020 -0400 + + bnxt_en: Implement ethtool -X to set indirection table. + + With the new infrastructure in place, we can now support the setting of + the indirection table from ethtool. + + When changing channels, in a rare case that firmware cannot reserve the + rings that were promised, we will still try to keep the RSS map and only + revert to default when absolutely necessary. + + v4: Revert RSS map to default during ring change only when absolutely + necessary. + + v3: Add warning messages when firmware cannot reserve the requested RX + rings, and when the RSS table entries have to change to default. + + v2: When changing channels, if the RSS table size changes and RSS map + is non-default, return error. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit adc38ac66745949ce12c1861c1a25f3ef93df1f8 +Author: Michael Chan +Date: Wed Jul 8 07:53:58 2020 -0400 + + bnxt_en: Return correct RSS indirection table entries to ethtool -x. + + Now that we have the logical indirection table, we can return these + proper logical indices directly to ethtool -x instead of the physical + IDs. + + Reported-by: Jakub Kicinski + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit f33a305d09388880ec92db8de3c38448db36b629 +Author: Michael Chan +Date: Wed Jul 8 07:53:57 2020 -0400 + + bnxt_en: Fill HW RSS table from the RSS logical indirection table. + + Now that we have the logical table, we can fill the HW RSS table + using the logical table's entries and converting them to the HW + specific format. Re-initialize the logical table to standard + distribution if the number of RX rings changes during ring reservation. + + v4: Use bnxt_get_rxfh_indir_size() to get the RSS table size. + + v2: Use ALIGN() to roundup the RSS table size. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit f9f6a3fbb5eb89e738ebdf16ac56437177537b28 +Author: Michael Chan +Date: Wed Jul 8 07:53:56 2020 -0400 + + bnxt_en: Add helper function to return the number of RSS contexts. + + On some chips, this varies based on the number of RX rings. Add this + helper function and refactor the existing code to use it. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 1667cbf6a4ebe0901bd93ef0d6defd35006fd2be +Author: Michael Chan +Date: Wed Jul 8 07:53:55 2020 -0400 + + bnxt_en: Add logical RSS indirection table structure. + + The driver currently does not keep track of the logical RSS indirection + table. The hardware RSS table is set up with standard default ring + distribution when initializing the chip. This makes it difficult to + support user sepcified indirection table entries. As a first step, add + the logical table in the main bnxt structure and allocate it according + to chip specific table size. Add a function that sets up default + RSS distribution based on the number of RX rings. + + v4: Use bnxt_get_rxfh_indir_size() for the current RSS table size. + + v2: Use kmalloc_array() since we init. all entries afterwards. + Use ALIGN() to roundup the RSS table size. + Use ethtool_rxfh_indir_default() to init. the default entries. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit b73c1d08a0ec33f2ddafdd21d3a48614da4e6853 +Author: Michael Chan +Date: Wed Jul 8 07:53:54 2020 -0400 + + bnxt_en: Fix up bnxt_get_rxfh_indir_size(). + + Fix up bnxt_get_rxfh_indir_size() to return the proper current RSS + table size for P5 chips. Change it to non-static so that bnxt.c + can use it to get the table size. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 34370d2435f9853ac882056faa98f3263c537c36 +Author: Michael Chan +Date: Wed Jul 8 07:53:53 2020 -0400 + + bnxt_en: Set up the chip specific RSS table size. + + Currently, we allocate one page for the hardware DMA RSS indirection + table. While the size is currently big enough for all chips, future + chip variations may support bigger sizes, so it is better to calculate + and store the chip specific size and allocate accordingly. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit c55e28a8b43fcd7dc71868bd165705bc7741a7ca +Author: Vasundhara Volam +Date: Tue Jun 23 19:01:38 2020 -0400 + + bnxt_en: Read VPD info only for PFs + + Virtual functions does not have VPD information. This patch modifies + calling bnxt_read_vpd_info() only for PFs and avoids an unnecessary + error log. + + Fixes: a0d0fd70fed5 ("bnxt_en: Read partno and serialno of the board from VPD") + Signed-off-by: Vasundhara Volam + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit c2dec363feb41544a76c8083aca2378990e17166 +Author: Michael Chan +Date: Tue Jun 23 19:01:37 2020 -0400 + + bnxt_en: Fix statistics counters issue during ifdown with older firmware. + + On older firmware, the hardware statistics are not cleared when the + driver frees the hardware stats contexts during ifdown. The driver + expects these stats to be cleared and saves a copy before freeing + the stats contexts. During the next ifup, the driver will likely + allocate the same hardware stats contexts and this will cause a big + increase in the counters as the old counters are added back to the + saved counters. + + We fix it by making an additional firmware call to clear the counters + before freeing the hw stats contexts when the firmware is the older + 20.x firmware. + + Fixes: b8875ca356f1 ("bnxt_en: Save ring statistics before reset.") + Reported-by: Jakub Kicinski + Reviewed-by: Vasundhara Volam + Signed-off-by: Michael Chan + Tested-by: Jakub Kicinski + Signed-off-by: David S. Miller + +commit fed7edd18143c68c63ea049999a7e861123de6de +Author: Michael Chan +Date: Tue Jun 23 19:01:36 2020 -0400 + + bnxt_en: Do not enable legacy TX push on older firmware. + + Older firmware may not support legacy TX push properly and may not + be disabling it. So we check certain firmware versions that may + have this problem and disable legacy TX push unconditionally. + + Fixes: c0c050c58d84 ("bnxt_en: New Broadcom ethernet driver.") + Reviewed-by: Edwin Peer + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit d0ad2ea2bc185835f8a749302ad07b70528d2a09 +Author: Michael Chan +Date: Tue Jun 23 19:01:35 2020 -0400 + + bnxt_en: Store the running firmware version code. + + We currently only store the firmware version as a string for ethtool + and devlink info. Store it also as a version code. The next 2 + patches will need to check the firmware major version to determine + some workarounds. + + We also use the 16-bit firmware version fields if the firmware is newer + and provides the 16-bit fields. + + Reviewed-by: Edwin Peer + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 9bf88b9fc8a4a8dd38992a7a065e459c645c9545 +Author: Vasundhara Volam +Date: Sat Jun 20 22:01:57 2020 +0530 + + bnxt_en: Add board.serial_number field to info_get cb + + Add board.serial_number field info to info_get cb via devlink, + if driver can fetch the information from the device. + + Cc: Jiri Pirko + Cc: Jakub Kicinski + Signed-off-by: Vasundhara Volam + Reviewed-by: Michael Chan + Reviewed-by: Jiri Pirko + Signed-off-by: David S. Miller + +commit a1db217861f33b8d9ea8171bcacee51186e2d5ba +Author: wenxu +Date: Thu Jun 18 20:49:10 2020 +0800 + + net: flow_offload: fix flow_indr_dev_unregister path + + If the representor is removed, then identify the indirect flow_blocks + that need to be removed by the release callback and the port representor + structure. To identify the port representor structure, a new + indr.cb_priv field needs to be introduced. The flow_block also needs to + be removed from the driver list from the cleanup path. + + Fixes: 1fac52da5942 ("net: flow_offload: consolidate indirect flow_block infrastructure") + + Signed-off-by: wenxu + Signed-off-by: David S. Miller + +commit 66f1939a1b705305df820d65f4d9a8457d05759c +Author: wenxu +Date: Thu Jun 18 20:49:09 2020 +0800 + + flow_offload: use flow_indr_block_cb_alloc/remove function + + Prepare fix the bug in the next patch. use flow_indr_block_cb_alloc/remove + function and remove the __flow_block_indr_binding. + + Signed-off-by: wenxu + Signed-off-by: David S. Miller + +commit 4b61d3e8d3daebbde7ec02d593f84248fdf8bec2 +Author: Po Liu +Date: Fri Jun 19 14:01:07 2020 +0800 + + net: qos offload add flow status with dropped count + + This patch adds a drop frames counter to tc flower offloading. + Reporting h/w dropped frames is necessary for some actions. + Some actions like police action and the coming introduced stream gate + action would produce dropped frames which is necessary for user. Status + update shows how many filtered packets increasing and how many dropped + in those packets. + + v2: Changes + - Update commit comments suggest by Jiri Pirko. + + Signed-off-by: Po Liu + Reviewed-by: Simon Horman + Reviewed-by: Vlad Buslov + Signed-off-by: David S. Miller + +commit e000940473d1423a42ef9c823fb23ccffe3f07ea +Author: Vasundhara Volam +Date: Sun Jun 14 19:57:10 2020 -0400 + + bnxt_en: Return from timer if interface is not in open state. + + This will avoid many uneccessary error logs when driver or firmware is + in reset. + + Fixes: 230d1f0de754 ("bnxt_en: Handle firmware reset.") + Signed-off-by: Vasundhara Volam + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 6e2f83884c099de0e87b15a820736e522755d074 +Author: Michael Chan +Date: Sun Jun 14 19:57:09 2020 -0400 + + bnxt_en: Fix AER reset logic on 57500 chips. + + AER reset should follow the same steps as suspend/resume. We need to + free context memory during AER reset and allocate new context memory + during recovery by calling bnxt_hwrm_func_qcaps(). We also need + to call bnxt_reenable_sriov() to restore the VFs. + + Fixes: bae361c54fb6 ("bnxt_en: Improve AER slot reset.") + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 59ae210173ff86256fa0cdba4ea4d608c61e123d +Author: Michael Chan +Date: Sun Jun 14 19:57:08 2020 -0400 + + bnxt_en: Re-enable SRIOV during resume. + + If VFs are enabled, we need to re-configure them during resume because + firmware has been reset while resuming. Otherwise, the VFs won't + work after resume. + + Fixes: c16d4ee0e397 ("bnxt_en: Refactor logic to re-enable SRIOV after firmware reset detected.") + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 2084ccf6259cc95e0575f0fafc93595d0219a9f6 +Author: Michael Chan +Date: Sun Jun 14 19:57:07 2020 -0400 + + bnxt_en: Simplify bnxt_resume(). + + The separate steps we do in bnxt_resume() can be done more simply by + calling bnxt_hwrm_func_qcaps(). This change will add an extra + __bnxt_hwrm_func_qcaps() call which is needed anyway on older + firmware. + + Fixes: f9b69d7f6279 ("bnxt_en: Fix suspend/resume path on 57500 chips") + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit e445e30cf7e6d68566db775ce186cbe63ef286e9 +Author: Pablo Neira Ayuso +Date: Fri May 29 02:25:40 2020 +0200 + + bnxt_tc: update indirect block support + + Register ndo callback via flow_indr_dev_register() and + flow_indr_dev_unregister(). + + Signed-off-by: Pablo Neira Ayuso + Signed-off-by: David S. Miller + +commit 2a5a8800fa915bd9bc272c91ca64728e6aa84c0a +Author: Edwin Peer +Date: Mon May 25 17:41:19 2020 -0400 + + bnxt_en: fix firmware message length endianness + + The explicit mask and shift is not the appropriate way to parse fields + out of a little endian struct. The length field is internally __le16 + and the strategy employed only happens to work on little endian machines + because the offset used is actually incorrect (length is at offset 6). + + Also remove the related and no longer used definitions from bnxt.h. + + Fixes: 845adfe40c2a ("bnxt_en: Improve valid bit checking in firmware response message.") + Signed-off-by: Edwin Peer + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 95ec1f470b976858264d7635a6ef76bc33c3875b +Author: Vasundhara Volam +Date: Mon May 25 17:41:18 2020 -0400 + + bnxt_en: Fix return code to "flash_device". + + When NVRAM directory is not found, return the error code + properly as per firmware command failure instead of the hardcode + -ENOBUFS. + + Fixes: 3a707bed13b7 ("bnxt_en: Return -EAGAIN if fw command returns BUSY") + Signed-off-by: Vasundhara Volam + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit b8056e8434b037fdab08158fea99ed7bc8ef3a74 +Author: Michael Chan +Date: Mon May 25 17:41:17 2020 -0400 + + bnxt_en: Fix accumulation of bp->net_stats_prev. + + We have logic to maintain network counters across resets by storing + the counters in bp->net_stats_prev before reset. But not all resets + will clear the counters. Certain resets that don't need to change + the number of rings do not clear the counters. The current logic + accumulates the counters before all resets, causing big jumps in + the counters after some resets, such as ethtool -G. + + Fix it by only accumulating the counters during reset if the irq_re_init + parameter is set. The parameter signifies that all rings and interrupts + will be reset and that means that the counters will also be reset. + + Reported-by: Vijayendra Suman + Fixes: b8875ca356f1 ("bnxt_en: Save ring statistics before reset.") + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 63fe91ab3d1c5c0b4497b993b8eeaa54f6688d53 +Author: Jesper Dangaard Brouer +Date: Thu May 14 12:49:07 2020 +0200 + + bnxt: Add XDP frame size to driver + + This driver uses full PAGE_SIZE pages when XDP is enabled. + + In case of XDP uses driver uses __bnxt_alloc_rx_page which does full + page DMA-map. Thus, xdp_adjust_tail grow is DMA compliant for XDP_TX + action that does DMA-sync. + + Signed-off-by: Jesper Dangaard Brouer + Signed-off-by: Alexei Starovoitov + Reviewed-by: Andy Gospodarek + Cc: Michael Chan + Cc: Andy Gospodarek + Link: https://lore.kernel.org/bpf/158945334769.97035.13437970179897613984.stgit@firesoul + +commit ba42580019560ed9c54f87c3c4e852ce26869c5d +Author: Jason Yan +Date: Tue May 5 15:46:08 2020 +0800 + + net: bnxt: Remove Comparison to bool in bnxt_ethtool.c + + Fix the following coccicheck warning: + + drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c:1991:5-46: WARNING: + Comparison to bool + drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c:1993:10-54: WARNING: + Comparison to bool + drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c:2380:5-38: WARNING: + Comparison to bool + + Signed-off-by: Jason Yan + Signed-off-by: David S. Miller + +commit 125592fbf467d1a70312006bbaf29724d0ba5897 +Author: Rajesh Ravi +Date: Mon May 4 04:50:41 2020 -0400 + + bnxt_en: show only relevant ethtool stats for a TX or RX ring + + Currently, ethtool -S shows all TX/RX ring counters whether the + channel is combined, RX, or TX. The unused counters will always be + zero. Improve it by showing only the relevant counters if the channel + is RX or TX. If the channel is combined, the counters will be shown + exactly the same as before. + + [ MChan: Lots of cleanups and simplifications on Rajesh's original + code] + + Signed-off-by: Rajesh Ravi + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 3316d50905f0e551d4786767d827589960a8cb83 +Author: Michael Chan +Date: Mon May 4 04:50:40 2020 -0400 + + bnxt_en: Split HW ring statistics strings into RX and TX parts. + + This will allow the RX and TX ring statistics to be separated if needed. + In the next patch, we'll be able to only display RX or TX statistcis if + the channel is RX only or TX only. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 9d8b5f05529c619b63d68b0dd26a1dfe35a4fab2 +Author: Michael Chan +Date: Mon May 4 04:50:39 2020 -0400 + + bnxt_en: Refactor the software ring counters. + + We currently have 3 software ring counters, rx_l4_csum_errors, + rx_buf_errors, and missed_irqs. The 1st two are RX counters and the + last one is a common counter. Organize them into 2 structures + bnxt_rx_sw_stats and bnxt_cmn_sw_stats. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 098286ff930ca752e4c9295ea65840dd55f5f290 +Author: Michael Chan +Date: Mon May 4 04:50:38 2020 -0400 + + bnxt_en: Add doorbell information to bnxt_en_dev struct. + + The purpose of this is to inform the RDMA driver the size of the doorbell + BAR that the L2 driver has mapped and the portion that is mapped + uncacheable. The unchaeable portion is shared with the RoCE driver. + Any remaining unmapped doorbell BAR can be used by the RDMA driver for + its own purpose. Currently, the entire L2 portion is mapped uncacheable. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 8ae2473842bdbb95bfb451b130dad6a650b3ad1b +Author: Michael Chan +Date: Mon May 4 04:50:37 2020 -0400 + + bnxt_en: Add support for L2 doorbell size. + + Read the L2 doorbell size from the firmware and only map the portion + of the doorbell BAR for L2 use. This will leave the remaining doorbell + BAR available for the RoCE driver to use. The RoCE driver can map + the remaining portion as write-combining to support the push feature. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit e93b30d56fc0670e508456afc59f16d70fe1f83f +Author: Michael Chan +Date: Mon May 4 04:50:36 2020 -0400 + + bnxt_en: Set the db_offset on 57500 chips for the RDMA MSIX entries. + + The driver provides completion ring or NQ doorbell offset for each + MSIX entry requested by the RDMA driver. The NQ offset on 57500 + chips is different than legacy chips. Set it correctly based on + chip type for correctness. The RDMA driver is ignoring this field + for the 57500 chips so it is not causing any problem. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit ebdf73dc595b6711dbfaf3007d513909bd814940 +Author: Michael Chan +Date: Mon May 4 04:50:35 2020 -0400 + + bnxt_en: Define the doorbell offsets on 57500 chips. + + Define the 57500 chip doorbell offsets instead of using the magic + values in the C file. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 8cec0940803c255f501d4b9f4764cd47fc206ad4 +Author: Edwin Peer +Date: Mon May 4 04:50:34 2020 -0400 + + bnxt_en: Improve kernel log messages related to ethtool reset. + + Kernel log messages for failed AP reset commands should be suppressed. + These are expected to fail on devices that do not have an AP. Add + missing driver reload message after AP reset and log it in a common + way without duplication. + + Signed-off-by: Edwin Peer + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 7a13240e371891d90cd51e3ea55ea04f4b2065dc +Author: Edwin Peer +Date: Mon May 4 04:50:33 2020 -0400 + + bnxt_en: fix ethtool_reset_flags ABI violations + + The ethtool ABI specifies that the reset operation should only clear + the flags that were actually reset. Setting the flags to zero after + a chip reset violates this because it does not include resetting the + application processor complex. Similarly, components that are not yet + defined are also not necessarily being reset. + + The fact that chip reset does not cover the AP also means that it is + inappropriate to treat these two components exclusively of one another. + The ABI provides a mechanism to report a failure to reset independent + components via the returned bitmask, so it is also wrong to fail hard + if one of a set of independent resets is not possible. + + It is incorrect to rely on the passed by reference flags in bnxt_reset(), + which are being updated as components are reset. The initially requested + value should be used instead so that hard errors do not propagate if any + earlier components could have been reset successfully. + + Note, AP and chip resets are global in nature. Dedicated resets are + thus not currently supported. + + Signed-off-by: Edwin Peer + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 94f17e89c956553606d5c7cf4f40ce6012529d48 +Author: Edwin Peer +Date: Mon May 4 04:50:32 2020 -0400 + + bnxt_en: refactor ethtool firmware reset types + + The case statement in bnxt_firmware_reset() dangerously mixes types. + This patch separates the application processor and whole chip resets + from the rest such that the selection is performed on a pure type. + + Signed-off-by: Edwin Peer + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 95fec034fd440b4882701df3e84d2b76af4e627d +Author: Edwin Peer +Date: Mon May 4 04:50:31 2020 -0400 + + bnxt_en: prepare to refactor ethtool reset types + + Extract bnxt_hwrm_firmware_reset() for performing firmware reset + operations. This new helper function will be used in a subsequent + patch to separate unrelated reset types out of bnxt_firmware_reset(). + + Signed-off-by: Edwin Peer + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit d0b82c5461c9b9bfcb572fe0b50d8e2662e281f1 +Author: Vasundhara Volam +Date: Mon May 4 04:50:30 2020 -0400 + + bnxt_en: Do not include ETH_FCS_LEN in the max packet length sent to fw. + + The firmware does not expect the CRC to be included in the length + passed from the driver. The firmware always configures the chip + to strip out the CRC. + + Signed-off-by: Vasundhara Volam + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit c7dd7ab4b204ac0142e0d05e71e05e71ae6cb270 +Author: Michael Chan +Date: Mon May 4 04:50:29 2020 -0400 + + bnxt_en: Improve TQM ring context memory sizing formulas. + + The current formulas to calculate the TQM slow path and fast path ring + context memory sizes are not quite correct. TQM slow path entry is + array index 0 of ctx->tqm_mem[]. The other array entries are for fast + path. Fix these sizes according to latest firmware spec. for 57500 and + newer chips. + + Fixes: 3be8136ce14e ("bnxt_en: Initialize context memory to the value specified by firmware.") + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit ac3158cb01084aa654222f1ad970b6c1af3cef98 +Author: Michael Chan +Date: Mon May 4 04:50:28 2020 -0400 + + bnxt_en: Allocate TQM ring context memory according to fw specification. + + Newer firmware spec. will specify the number of TQM rings to allocate + context memory for. Use the firmware specified value and fall back + to the old value derived from bp->max_q if it is not available. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 460c2577aaf349f4e49eaf2b9ec3d8c52a619ef5 +Author: Michael Chan +Date: Mon May 4 04:50:27 2020 -0400 + + bnxt_en: Update firmware spec. to 1.10.1.33. + + Changes include additional statistics, ECN support, context memory + interface change for better TQM context memory sizing, firmware + health status definitions, etc. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit c72cb303aa6c2ae7e4184f0081c6d11bf03fb96b +Author: Michael Chan +Date: Sun Apr 26 16:24:42 2020 -0400 + + bnxt_en: Fix VLAN acceleration handling in bnxt_fix_features(). + + The current logic in bnxt_fix_features() will inadvertently turn on both + CTAG and STAG VLAN offload if the user tries to disable both. Fix it + by checking that the user is trying to enable CTAG or STAG before + enabling both. The logic is supposed to enable or disable both CTAG and + STAG together. + + Fixes: 5a9f6b238e59 ("bnxt_en: Enable and disable RX CTAG and RX STAG VLAN acceleration together.") + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit bbf211b1ecb891c7e0cc7888834504183fc8b534 +Author: Michael Chan +Date: Sun Apr 26 16:24:41 2020 -0400 + + bnxt_en: Return error when allocating zero size context memory. + + bnxt_alloc_ctx_pg_tbls() should return error when the memory size of the + context memory to set up is zero. By returning success (0), the caller + may proceed normally and may crash later when it tries to set up the + memory. + + Fixes: 08fe9d181606 ("bnxt_en: Add Level 2 context memory paging support.") + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit bae361c54fb6ac6eba3b4762f49ce14beb73ef13 +Author: Michael Chan +Date: Sun Apr 26 16:24:40 2020 -0400 + + bnxt_en: Improve AER slot reset. + + Improve the slot reset sequence by disabling the device to prevent bad + DMAs if slot reset fails. Return the proper result instead of always + PCI_ERS_RESULT_RECOVERED to the caller. + + Fixes: 6316ea6db93d ("bnxt_en: Enable AER support.") + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 9e68cb0359b20f99c7b070f1d3305e5e0a9fae6d +Author: Vasundhara Volam +Date: Sun Apr 26 16:24:39 2020 -0400 + + bnxt_en: Reduce BNXT_MSIX_VEC_MAX value to supported CQs per PF. + + Broadcom adapters support only maximum of 512 CQs per PF. If user sets + MSIx vectors more than supported CQs, firmware is setting incorrect value + for msix_vec_per_pf_max parameter. Fix it by reducing the BNXT_MSIX_VEC_MAX + value to 512, even though the maximum # of MSIx vectors supported by adapter + are 1280. + + Fixes: f399e8497826 ("bnxt_en: Use msix_vec_per_pf_max and msix_vec_per_pf_min devlink params.") + Signed-off-by: Vasundhara Volam + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit c71c4e49afe173823a2a85b0cabc9b3f1176ffa2 +Author: Michael Chan +Date: Sun Apr 26 16:24:38 2020 -0400 + + bnxt_en: Fix VF anti-spoof filter setup. + + Fix the logic that sets the enable/disable flag for the source MAC + filter according to firmware spec 1.7.1. + + In the original firmware spec. before 1.7.1, the VF spoof check flags + were not latched after making the HWRM_FUNC_CFG call, so there was a + need to keep the func_flags so that subsequent calls would perserve + the VF spoof check setting. A change was made in the 1.7.1 spec + so that the flags became latched. So we now set or clear the anti- + spoof setting directly without retrieving the old settings in the + stored vf->func_flags which are no longer valid. We also remove the + unneeded vf->func_flags. + + Fixes: 8eb992e876a8 ("bnxt_en: Update firmware interface spec to 1.7.6.2.") + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit ba7d16c7794213b591a0ea415d975858d6a7dfd8 +Author: Eran Ben Elisha +Date: Sun Mar 29 14:05:54 2020 +0300 + + devlink: Implicitly set auto recover flag when registering health reporter + + When health reporter is registered to devlink, devlink will implicitly set + auto recover if and only if the reporter has a recover method. No reason + to explicitly get the auto recover flag from the driver. + + Remove this flag from all drivers that called + devlink_health_reporter_create. + + All existing health reporters set auto recovery to true if they have a + recover method. + + Yet, administrator can unset auto recover via netlink command as prior to + this patch. + + Signed-off-by: Eran Ben Elisha + Reviewed-by: Jiri Pirko + Reviewed-by: Jakub Kicinski + Signed-off-by: David S. Miller + +commit 93a129eb8c520b032e1823447b2e1badcc650666 +Author: Jiri Pirko +Date: Sat Mar 28 16:37:43 2020 +0100 + + net: sched: expose HW stats types per action used by drivers + + It may be up to the driver (in case ANY HW stats is passed) to select + which type of HW stats he is going to use. Add an infrastructure to + expose this information to user. + + $ tc filter add dev enp3s0np1 ingress proto ip handle 1 pref 1 flower dst_ip 192.168.1.1 action drop + $ tc -s filter show dev enp3s0np1 ingress + filter protocol ip pref 1 flower chain 0 + filter protocol ip pref 1 flower chain 0 handle 0x1 + eth_type ipv4 + dst_ip 192.168.1.1 + in_hw in_hw_count 2 + action order 1: gact action drop + random type none pass val 0 + index 1 ref 1 bind 1 installed 10 sec used 10 sec + Action statistics: + Sent 0 bytes 0 pkt (dropped 0, overlimits 0 requeues 0) + backlog 0b 0p requeues 0 + used_hw_stats immediate <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< + + Signed-off-by: Jiri Pirko + Signed-off-by: David S. Miller + +commit 2013d03827dbc2d4b3110ea96f805c5ab035ab15 +Author: Vasundhara Volam +Date: Fri Mar 27 15:05:51 2020 +0530 + + bnxt_en: Fix "fw.mgmt" and "fw.nsci" info via devlink info_get cb + + Fix macro names to report fw.mgmt and fw.ncsi versions to match the + devlink documentation. + + Example display after fixes: + + $ devlink dev info pci/0000:af:00.0 + pci/0000:af:00.0: + driver bnxt_en + serial_number B0-26-28-FF-FE-25-84-20 + versions: + fixed: + board.id BCM957454A4540 + asic.id C454 + asic.rev 1 + running: + fw 216.1.154.0 + fw.psid 0.0.0 + fw.mgmt 216.1.146.0 + fw.mgmt.api 1.10.1 + fw.ncsi 864.0.44.0 + fw.roce 216.1.16.0 + + Fixes: 9599e036b161 ("bnxt_en: Add support for devlink info command") + Signed-off-by: Vasundhara Volam + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 56d69c784d36bee693e950de37fe1751e99fda57 +Author: Vasundhara Volam +Date: Fri Mar 27 15:05:50 2020 +0530 + + bnxt_en: Add partno to devlink info_get cb + + Add part number info from the vital product data to info_get command + via devlink tool. Update bnxt.rst documentation as well. + + Cc: Jakub Kicinski + Signed-off-by: Vasundhara Volam + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit a0d0fd70fed5cc4f1e2dd98b801be63b07b4d6ac +Author: Vasundhara Volam +Date: Fri Mar 27 15:05:49 2020 +0530 + + bnxt_en: Read partno and serialno of the board from VPD + + Store the part number and serial number information from VPD in + the bnxt structure. Follow up patch will add the support to display + the information via devlink command. + + Signed-off-by: Vasundhara Volam + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit b7a444f078592921fa6f83f44b42dd88c08955ee +Author: Vasundhara Volam +Date: Fri Mar 27 15:04:52 2020 +0530 + + bnxt_en: Add fw.mgmt.api version to devlink info_get cb. + + Display the minimum version of firmware interface spec supported + between driver and firmware. Also update bnxt.rst documentation file. + + Signed-off-by: Vasundhara Volam + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 5d765a5e4bd7c368e564e11402bba74cf7f03ac1 +Author: Vasundhara Volam +Date: Sun Mar 22 16:40:05 2020 -0400 + + bnxt_en: Reset rings if ring reservation fails during open() + + If ring counts are not reset when ring reservation fails, + bnxt_init_dflt_ring_mode() will not be called again to reinitialise + IRQs when open() is called and results in system crash as napi will + also be not initialised. This patch fixes it by resetting the ring + counts. + + Fixes: 47558acd56a7 ("bnxt_en: Reserve rings at driver open if none was reserved at probe time.") + Signed-off-by: Vasundhara Volam + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 62bfb932a51f6d08eb409248e69f8d6428c2cabd +Author: Michael Chan +Date: Sun Mar 22 16:40:04 2020 -0400 + + bnxt_en: Free context memory after disabling PCI in probe error path. + + Other shutdown code paths will always disable PCI first to shutdown DMA + before freeing context memory. Do the same sequence in the error path + of probe to be safe and consistent. + + Fixes: c20dc142dd7b ("bnxt_en: Disable bus master during PCI shutdown and driver unload.") + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 0b5b561cea32d5bb1e0a82d65b755a3cb5212141 +Author: Michael Chan +Date: Sun Mar 22 16:40:03 2020 -0400 + + bnxt_en: Return error if bnxt_alloc_ctx_mem() fails. + + The current code ignores the return value from + bnxt_hwrm_func_backing_store_cfg(), causing the driver to proceed in + the init path even when this vital firmware call has failed. Fix it + by propagating the error code to the caller. + + Fixes: 1b9394e5a2ad ("bnxt_en: Configure context memory on new devices.") + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 62d4073e86e62e316bea2c53e77db10418fd5dd7 +Author: Edwin Peer +Date: Sun Mar 22 16:40:02 2020 -0400 + + bnxt_en: fix memory leaks in bnxt_dcbnl_ieee_getets() + + The allocated ieee_ets structure goes out of scope without being freed, + leaking memory. Appropriate result codes should be returned so that + callers do not rely on invalid data passed by reference. + + Also cache the ETS config retrieved from the device so that it doesn't + need to be freed. The balance of the code was clearly written with the + intent of having the results of querying the hardware cached in the + device structure. The commensurate store was evidently missed though. + + Fixes: 7df4ae9fe855 ("bnxt_en: Implement DCBNL to support host-based DCBX.") + Signed-off-by: Edwin Peer + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit a24ec3220f369aa0b94c863b6b310685a727151c +Author: Michael Chan +Date: Sun Mar 22 16:40:01 2020 -0400 + + bnxt_en: Fix Priority Bytes and Packets counters in ethtool -S. + + There is an indexing bug in determining these ethtool priority + counters. Instead of using the queue ID to index, we need to + normalize by modulo 10 to get the index. This index is then used + to obtain the proper CoS queue counter. Rename bp->pri2cos to + bp->pri2cos_idx to make this more clear. + + Fixes: e37fed790335 ("bnxt_en: Add ethtool -S priority counters.") + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 53eca1f3479f355ec17b2e86a6b0680510292833 +Author: Jakub Kicinski +Date: Mon Mar 16 18:42:11 2020 -0700 + + net: rename flow_action_hw_stats_types* -> flow_action_hw_stats* + + flow_action_hw_stats_types_check() helper takes one of the + FLOW_ACTION_HW_STATS_*_BIT values as input. If we align + the arguments to the opening bracket of the helper there + is no way to call this helper and stay under 80 characters. + + Remove the "types" part from the new flow_action helpers + and enum values. + + Signed-off-by: Jakub Kicinski + Signed-off-by: David S. Miller + +commit 0fcfc7a1c3d14fd5d80e3c615efbd581381a138b +Author: Vasundhara Volam +Date: Sun Mar 8 18:45:54 2020 -0400 + + bnxt_en: Call devlink_port_type_clear() in remove() + + Similar to other drivers, properly clear the devlink port type when + removing the device before unregistration. + + Cc: Jiri Pirko + Signed-off-by: Vasundhara Volam + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 3a707bed13b77dd7773867bee156164d730c24e0 +Author: Vasundhara Volam +Date: Sun Mar 8 18:45:53 2020 -0400 + + bnxt_en: Return -EAGAIN if fw command returns BUSY + + If firmware command returns error code as HWRM_ERR_CODE_BUSY, which + means it cannot handle the command due to a conflicting command + from another function, convert it to -EAGAIN. If it is an ethtool + operation, this error code will be returned to userspace. + + Signed-off-by: Vasundhara Volam + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 3d0615911d33b81da64d75031490859b4513a19b +Author: Vasundhara Volam +Date: Sun Mar 8 18:45:52 2020 -0400 + + bnxt_en: Modify some bnxt_hwrm_*_free() functions to void. + + Return code is not needed in some of these functions, as the return + code from firmware message is ignored. Remove the unused rc variable + and also convert functions to void. + + Signed-off-by: Vasundhara Volam + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 9f90445c14bedaea20e64cbe5838450ca377cc85 +Author: Vasundhara Volam +Date: Sun Mar 8 18:45:51 2020 -0400 + + bnxt_en: Remove unnecessary assignment of return code + + As part of converting error code in firmware message to standard + code, checking for firmware return code is removed in most of the + places. Remove the assignment of return code where the function + can directly return. + + Signed-off-by: Vasundhara Volam + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 843d699d79a1ca7fc5d61bf4cf7a85b5879a8ff6 +Author: Michael Chan +Date: Sun Mar 8 18:45:50 2020 -0400 + + bnxt_en: Clear DCB settings after firmware reset. + + The driver stores a copy of the DCB settings that have been applied to + the firmware. After firmware reset, the firmware settings are gone and + will revert back to default. Clear the driver's copy so that if there + is a DCBNL request to get the settings, the driver will retrieve the + current settings from the firmware. lldpad keeps the DCB settings in + userspace and will re-apply the settings if it is running. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 389a877a3b20c1bc058143dfc4d95fd754fb0240 +Author: Michael Chan +Date: Sun Mar 8 18:45:49 2020 -0400 + + bnxt_en: Process the NQ under NAPI continuous polling. + + When we are in continuous NAPI polling mode, the current code in + bnxt_poll_p5() will only process the completion rings and will not + process the NQ until interrupt is re-enabled. Tis logic works and + will not cause RX or TX starvation, but async events in the NQ may + be delayed for the duration of continuous NAPI polling. These + async events may be firmware or VF events. + + Continue to handle the NQ after we are done polling the completion + rings. This actually simplies the code in bnxt_poll_p5(). + + Acknowledge the NQ so these async events will not overflow. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 340ac85eabce302aeb3ae7e1817a8bbd4ffd09b2 +Author: Michael Chan +Date: Sun Mar 8 18:45:48 2020 -0400 + + bnxt_en: Simplify __bnxt_poll_cqs_done(). + + Simplify the function by removing tha 'all' parameter. In the current + code, the caller has to specify whether to update/arm both completion + rings with the 'all' parameter. + + Instead of this, we can just update/arm all the completion rings + that have been polled. By setting cpr->had_work_done earlier in + __bnxt_poll_work(), we know which completion ring has been polled + and can just update/arm all the completion rings with + cpr->had_work_done set. + + This simplifies the function with one less parameter and works just + as well. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 54a9062f6909bed8667984c1726bce8183c72118 +Author: Michael Chan +Date: Sun Mar 8 18:45:47 2020 -0400 + + bnxt_en: Handle all NQ notifications in bnxt_poll_p5(). + + In bnxt_poll_p5(), the logic polls for up to 2 completion rings (RX and + TX) for work. In the current code, if we reach budget polling the + first completion ring, we will stop. If the other completion ring + has work to do, we will handle it when NAPI calls us back. + + This is not optimal. We potentially leave an unproceesed entry in + the NQ. When we are finally done with NAPI polling and re-enable + interrupt, the remaining entry in the NQ will cause interrupt to + be triggered immediately for no reason. + + Modify the code in bnxt_poll_p5() to keep looping until all NQ + entries are handled even if the first completion ring has reached + budget. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 319a1d19471ec49b8a91a7f6a3fe2c4535e5c279 +Author: Jiri Pirko +Date: Sat Mar 7 12:40:13 2020 +0100 + + flow_offload: check for basic action hw stats type + + Introduce flow_action_basic_hw_stats_types_check() helper and use it + in drivers. That sanitizes the drivers which do not have support + for action HW stats types. + + Signed-off-by: Jiri Pirko + Signed-off-by: David S. Miller + +commit 8d85b75b4e08ab41e55dbb43cb1b82b5b35f22c5 +Author: Jacob Keller +Date: Mon Mar 2 18:25:01 2020 -0800 + + bnxt_en: Use pci_get_dsn() + + Replace the open-coded implementation for reading the PCIe DSN with + pci_get_dsn(). + + Use of put_unaligned_le64 should be correct. pci_get_dsn() will perform + two pci_read_config_dword calls. The first dword will be placed in the + first 32 bits of the u64, while the second dword will be placed in the + upper 32 bits of the u64. + + On Little Endian systems, the least significant byte comes first, which + will be the least significant byte of the first dword, followed by the + least significant byte of the second dword. Since the _le32 variations + do not perform byte swapping, we will correctly copy the dwords into the + dsn[] array in the same order as before. + + On Big Endian systems, the most significant byte of the second dword + will come first. put_unaligned_le64 will perform a CPU_TO_LE64, which + will swap things correctly before copying. This should also end up with + the correct bytes in the dsn[] array. + + While at it, fix a small typo in the netdev_info error message when the + DSN cannot be read. + + Signed-off-by: Jacob Keller + Cc: Michael Chan + Signed-off-by: David S. Miller + +commit f704d24371a4cd7009cb776b55463462f2326493 +Author: Jakub Kicinski +Date: Wed Mar 4 21:15:39 2020 -0800 + + bnxt: reject unsupported coalescing params + + Set ethtool_ops->supported_coalesce_params to let + the core reject unsupported coalescing parameters. + + This driver did not previously reject unsupported parameters. + + v3: adjust commit message for new member name + + Signed-off-by: Jakub Kicinski + Reviewed-by: Michael Chan + Signed-off-by: David S. Miller + +commit 22630e28f9c2b55abd217869cc0696def89f2284 +Author: Edwin Peer +Date: Sun Mar 1 22:07:18 2020 -0500 + + bnxt_en: fix error handling when flashing from file + + After bnxt_hwrm_do_send_message() was updated to return standard error + codes in a recent commit, a regression in bnxt_flash_package_from_file() + was introduced. The return value does not properly reflect all + possible firmware errors when calling firmware to flash the package. + + Fix it by consolidating all errors in one local variable rc instead + of having 2 variables for different errors. + + Fixes: d4f1420d3656 ("bnxt_en: Convert error code in firmware message response to standard code.") + Signed-off-by: Edwin Peer + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit a9b952d267e59a3b405e644930f46d252cea7122 +Author: Vasundhara Volam +Date: Sun Mar 1 22:07:17 2020 -0500 + + bnxt_en: reinitialize IRQs when MTU is modified + + MTU changes may affect the number of IRQs so we must call + bnxt_close_nic()/bnxt_open_nic() with the irq_re_init parameter + set to true. The reason is that a larger MTU may require + aggregation rings not needed with smaller MTU. We may not be + able to allocate the required number of aggregation rings and + so we reduce the number of channels which will change the number + of IRQs. Without this patch, it may crash eventually in + pci_disable_msix() when the IRQs are not properly unwound. + + Fixes: c0c050c58d84 ("bnxt_en: New Broadcom ethernet driver.") + Signed-off-by: Vasundhara Volam + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 9a005c3898aa07cd5cdca77b7096814e6c478c92 +Author: Jonathan Lemon +Date: Mon Feb 24 15:29:09 2020 -0800 + + bnxt_en: add newline to netdev_*() format strings + + Add missing newlines to netdev_* format strings so the lines + aren't buffered by the printk subsystem. + + Nitpicked-by: Jakub Kicinski + Signed-off-by: Jonathan Lemon + Acked-by: Michael Chan + Signed-off-by: David S. Miller + +commit 8743db4a9acfd51f805ac0c87bcaae92c42d1061 +Author: Vasundhara Volam +Date: Thu Feb 20 17:26:35 2020 -0500 + + bnxt_en: Issue PCIe FLR in kdump kernel to cleanup pending DMAs. + + If crashed kernel does not shutdown the NIC properly, PCIe FLR + is required in the kdump kernel in order to initialize all the + functions properly. + + Fixes: d629522e1d66 ("bnxt_en: Reduce memory usage when running in kdump kernel.") + Signed-off-by: Vasundhara Volam + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 5567ae4a8d569d996d0d88d0eceb76205e4c7ce5 +Author: Vasundhara Volam +Date: Thu Feb 20 17:26:34 2020 -0500 + + bnxt_en: Improve device shutdown method. + + Especially when bnxt_shutdown() is called during kexec, we need to + disable MSIX and disable Bus Master to completely quiesce the device. + Make these 2 calls unconditionally in the shutdown method. + + Fixes: c20dc142dd7b ("bnxt_en: Disable bus master during PCI shutdown and driver unload.") + Signed-off-by: Vasundhara Volam + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 18e4960c18f484ac288f41b43d0e6c4c88e6ea78 +Author: Michael Chan +Date: Sun Feb 2 02:41:38 2020 -0500 + + bnxt_en: Fix TC queue mapping. + + The driver currently only calls netdev_set_tc_queue when the number of + TCs is greater than 1. Instead, the comparison should be greater than + or equal to 1. Even with 1 TC, we need to set the queue mapping. + + This bug can cause warnings when the number of TCs is changed back to 1. + + Fixes: 7809592d3e2e ("bnxt_en: Enable MSIX early in bnxt_init_one().") + Signed-off-by: Michael Chan + Signed-off-by: Jakub Kicinski + +commit d407302895d3f3ca3a333c711744a95e0b1b0150 +Author: Vasundhara Volam +Date: Sun Feb 2 02:41:37 2020 -0500 + + bnxt_en: Fix logic that disables Bus Master during firmware reset. + + The current logic that calls pci_disable_device() in __bnxt_close_nic() + during firmware reset is flawed. If firmware is still alive, we're + disabling the device too early, causing some firmware commands to + not reach the firmware. + + Fix it by moving the logic to bnxt_reset_close(). If firmware is + in fatal condition, we call pci_disable_device() before we free + any of the rings to prevent DMA corruption of the freed rings. If + firmware is still alive, we call pci_disable_device() after the + last firmware message has been sent. + + Fixes: 3bc7d4a352ef ("bnxt_en: Add BNXT_STATE_IN_FW_RESET state.") + Signed-off-by: Vasundhara Volam + Signed-off-by: Michael Chan + Signed-off-by: Jakub Kicinski + +commit 12de2eadf87825c3990c1aa68b5e93101ca2f043 +Author: Michael Chan +Date: Sun Feb 2 02:41:36 2020 -0500 + + bnxt_en: Fix RDMA driver failure with SRIOV after firmware reset. + + bnxt_ulp_start() needs to be called before SRIOV is re-enabled after + firmware reset. Re-enabling SRIOV may consume all the resources and + may cause the RDMA driver to fail to get MSIX and other resources. + Fix it by calling bnxt_ulp_start() first before calling + bnxt_reenable_sriov(). + + We re-arrange the logic so that we call bnxt_ulp_start() and + bnxt_reenable_sriov() in proper sequence in bnxt_fw_reset_task() and + bnxt_open(). The former is the normal coordinated firmware reset sequence + and the latter is firmware reset while the function is down. This new + logic is now more straight forward and will now fix both scenarios. + + Fixes: f3a6d206c25a ("bnxt_en: Call bnxt_ulp_stop()/bnxt_ulp_start() during error recovery.") + Reported-by: Vasundhara Volam + Signed-off-by: Michael Chan + Signed-off-by: Jakub Kicinski + +commit c16d4ee0e397163fe7ceac281eaa952e63fadec7 +Author: Michael Chan +Date: Sun Feb 2 02:41:35 2020 -0500 + + bnxt_en: Refactor logic to re-enable SRIOV after firmware reset detected. + + Put the current logic in bnxt_open() to re-enable SRIOV after detecting + firmware reset into a new function bnxt_reenable_sriov(). This call + needs to be invoked in the firmware reset path also in the next patch. + + Signed-off-by: Michael Chan + Signed-off-by: Jakub Kicinski + +commit 9599e036b161243d7c62399a1b6c250573e08a43 +Author: Vasundhara Volam +Date: Mon Jan 27 04:56:26 2020 -0500 + + bnxt_en: Add support for devlink info command + + Display the following information via devlink info command: + - Driver name + - Board id + - Broad revision + - Board Serial number + - Board FW version + - FW parameter set version + - FW App version + - FW management version + - FW RoCE version + + Standard output example: + $ devlink dev info pci/0000:3b:00.0 + pci/0000:3b:00.0: + driver bnxt_en + serial_number 00-10-18-FF-FE-AD-05-00 + versions: + fixed: + asic.id D802 + asic.rev 1 + running: + fw 216.1.124.0 + fw.psid 0.0.0 + fw.app 216.1.122.0 + fw.mgmt 864.0.32.0 + fw.roce 216.1.15.0 + + [ This version has incorporated changes suggested by Jakub Kicinski to + use generic devlink version tags. ] + + v2: Use fw.psid + + Cc: Jiri Pirko + Cc: Jakub Kicinski + Signed-off-by: Vasundhara Volam + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit b014232f7f56f6db10b8540b0b97ae8c7eeef28e +Author: Vasundhara Volam +Date: Mon Jan 27 04:56:24 2020 -0500 + + bnxt_en: Rename switch_id to dsn + + Instead of switch_id, renaming it to dsn will be more meaningful + so that it can be used to display device serial number in follow up + patch via devlink_info command. + + Signed-off-by: Vasundhara Volam + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 8159cbe3e0b24489f97ca0fb7df7a1710f03a26b +Author: Vasundhara Volam +Date: Mon Jan 27 04:56:23 2020 -0500 + + bnxt_en: Add support to update progress of flash update + + This patch adds status notification to devlink flash update + while flashing is in progress. + + $ devlink dev flash pci/0000:05:00.0 file 103.pkg + Preparing to flash + Flashing done + + Cc: Jiri Pirko + Signed-off-by: Vasundhara Volam + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit cda2cab0771183932d6ba73c5ac63bb63decdadf +Author: Vasundhara Volam +Date: Mon Jan 27 04:56:22 2020 -0500 + + bnxt_en: Move devlink_register before registering netdev + + Latest kernels get the phys_port_name via devlink, if + ndo_get_phys_port_name is not defined. To provide the phys_port_name + correctly, register devlink before registering netdev. + + Also call devlink_port_type_eth_set() after registering netdev as + devlink port updates the netdev structure and notifies user. + + Cc: Jiri Pirko + Signed-off-by: Vasundhara Volam + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 002870ebdabb75809d753e3c171c68c4e70a0e91 +Author: Vasundhara Volam +Date: Mon Jan 27 04:56:21 2020 -0500 + + bnxt_en: Register devlink irrespective of firmware spec version + + This will allow to register for devlink port and use port features. + Also register params only if firmware spec version is at least 0x10600 + which will support reading/setting numbered variables in NVRAM. + + Signed-off-by: Vasundhara Volam + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit d6292ade7f6f109bffe9e1065de37f688fa0107f +Author: Vasundhara Volam +Date: Mon Jan 27 04:56:20 2020 -0500 + + bnxt_en: Refactor bnxt_dl_register() + + Define bnxt_dl_params_register() and bnxt_dl_params_unregister() + functions and move params register/unregister code to these newly + defined functions. This patch is in preparation to register + devlink irrespective of firmware spec. version in the next patch. + + Signed-off-by: Vasundhara Volam + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 5313845f491f22761c675f7009dd77b5ae64172d +Author: Michael Chan +Date: Mon Jan 27 04:56:19 2020 -0500 + + bnxt_en: Disable workaround for lost interrupts on 575XX B0 and newer chips. + + The hardware bug has been fixed on B0 and newer chips, so disable the + workaround on these chips. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 87d67f59d6a33f741b5c6a42fa01b99ea2b14b7d +Author: Pavan Chebbi +Date: Mon Jan 27 04:56:18 2020 -0500 + + bnxt_en: Periodically check and remove aged-out ntuple filters + + Currently the only time we check and remove expired filters is + when we are inserting new filters. + Improving the aRFS expiry handling by adding code to do the above + work periodically. + + Signed-off-by: Pavan Chebbi + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit f47d0e19ae99329177423db80f86a601f8cd8e3e +Author: Michael Chan +Date: Mon Jan 27 04:56:17 2020 -0500 + + bnxt_en: Do not accept fragments for aRFS flow steering. + + In bnxt_rx_flow_steer(), if the dissected packet is a fragment, do not + proceed to create the ntuple filter and return error instead. Otherwise + we would create a filter with 0 source and destination ports because + the dissected ports would not be available for fragments. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit c66c06c5e28a55920c87b386876461c139aa1878 +Author: Michael Chan +Date: Mon Jan 27 04:56:16 2020 -0500 + + bnxt_en: Support UDP RSS hashing on 575XX chips. + + 575XX (P5) chips have the same UDP RSS hashing capability as P4 chips, + so we can enable it on P5 chips. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 1d86859fdf31a0d50cc82b5d0d6bfb5fe98f6c00 +Author: Michael Chan +Date: Mon Jan 27 04:56:15 2020 -0500 + + bnxt_en: Remove the setting of dev_port. + + The dev_port is meant to distinguish the network ports belonging to + the same PCI function. Our devices only have one network port + associated with each PCI function and so we should not set it for + correctness. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 43a5107dc1acbf88a8a194beb9ff4e8563a2c7a3 +Author: Michael Chan +Date: Mon Jan 27 04:56:14 2020 -0500 + + bnxt_en: Improve bnxt_probe_phy(). + + If the 2nd parameter fw_dflt is not set, we are calling bnxt_probe_phy() + after the firmware has reset. There is no need to query the current + PHY settings from firmware as these settings may be different from + the ethtool settings that the driver will re-establish later. So + return earlier in bnxt_probe_phy() to save one firmware call. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 83d8f5e92d034a4c4a04d780107e73af31a38504 +Author: Michael Chan +Date: Mon Jan 27 04:56:13 2020 -0500 + + bnxt_en: Improve link up detection. + + In bnxt_update_phy_setting(), ethtool_get_link_ksettings() and + bnxt_disable_an_for_lpbk(), we inconsistently use netif_carrier_ok() + to determine link. Instead, we should use bp->link_info.link_up + which has the true link state. The netif_carrier state may be off + during self-test and while the device is being reset and may not always + reflect the true link state. + + By always using bp->link_info.link_up, the code is now more + consistent and more correct. Some unnecessary link toggles are + now prevented with this patch. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit d061b2411d5f3d6272187ab734ce0640827fca13 +Author: Michael Chan +Date: Fri Jan 17 00:32:47 2020 -0500 + + bnxt_en: Do not treat DSN (Digital Serial Number) read failure as fatal. + + DSN read can fail, for example on a kdump kernel without PCIe extended + config space support. If DSN read fails, don't set the + BNXT_FLAG_DSN_VALID flag and continue loading. Check the flag + to see if the stored DSN is valid before using it. Only VF reps + creation should fail without valid DSN. + + Fixes: 03213a996531 ("bnxt: move bp->switch_id initialization to PF probe") + Reported-by: Marc Smith + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 6fc7caa84e713f7627e171ab1e7c4b5be0dc9b3d +Author: Michael Chan +Date: Fri Jan 17 00:32:46 2020 -0500 + + bnxt_en: Fix ipv6 RFS filter matching logic. + + Fix bnxt_fltr_match() to match ipv6 source and destination addresses. + The function currently only checks ipv4 addresses and will not work + corrently on ipv6 filters. + + Fixes: c0c050c58d84 ("bnxt_en: New Broadcom ethernet driver.") + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit ceb3284c588eee5ea256c70e4d8d7cf399b8134e +Author: Michael Chan +Date: Fri Jan 17 00:32:45 2020 -0500 + + bnxt_en: Fix NTUPLE firmware command failures. + + The NTUPLE related firmware commands are sent to the wrong firmware + channel, causing all these commands to fail on new firmware that + supports the new firmware channel. Fix it by excluding the 3 + NTUPLE firmware commands from the list for the new firmware channel. + + Fixes: 760b6d33410c ("bnxt_en: Add support for 2nd firmware message channel.") + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 3071c51783b39d6a676d02a9256c3b3f87804285 +Author: Jonathan Lemon +Date: Thu Jan 9 11:35:42 2020 -0800 + + bnxt: Detach page from page pool before sending up the stack + + When running in XDP mode, pages come from the page pool, and should + be freed back to the same pool or specifically detached. Currently, + when the driver re-initializes, the page pool destruction is delayed + forever since it thinks there are oustanding pages. + + Fixes: 322b87ca55f2 ("bnxt_en: add page_pool support") + Signed-off-by: Jonathan Lemon + Reviewed-by: Andy Gospodarek + Signed-off-by: David S. Miller + +commit 737d7a6c55964955604b9de398dac3791bab5a64 +Author: Vikas Gupta +Date: Thu Jan 2 21:18:11 2020 +0530 + + bnxt_en: Call recovery done after reset is successfully done + + Return EINPROGRESS to devlink health reporter recover as we are not yet + done and call devlink_health_reporter_recovery_done once reset is + successfully completed from workqueue context. + + Signed-off-by: Vikas Gupta + Signed-off-by: David S. Miller + +commit 6adc4601c2a1ac87b4ab8ed0cb55db6efd0264e8 +Author: Jonathan Lemon +Date: Tue Dec 10 08:39:46 2019 -0800 + + bnxt: apply computed clamp value for coalece parameter + + After executing "ethtool -C eth0 rx-usecs-irq 0", the box becomes + unresponsive, likely due to interrupt livelock. It appears that + a minimum clamp value for the irq timer is computed, but is never + applied. + + Fix by applying the corrected clamp value. + + Fixes: 74706afa712d ("bnxt_en: Update interrupt coalescing logic.") + Signed-off-by: Jonathan Lemon + Signed-off-by: Michael Chan + Signed-off-by: Jakub Kicinski + +commit 0290bd291cc0e0488e35e66bf39efcd7d9d9122b +Author: Michael S. Tsirkin +Date: Tue Dec 10 09:23:51 2019 -0500 + + netdev: pass the stuck queue to the timeout handler + + This allows incrementing the correct timeout statistic without any mess. + Down the road, devices can learn to reset just the specific queue. + + The patch was generated with the following script: + + use strict; + use warnings; + + our $^I = '.bak'; + + my @work = ( + ["arch/m68k/emu/nfeth.c", "nfeth_tx_timeout"], + ["arch/um/drivers/net_kern.c", "uml_net_tx_timeout"], + ["arch/um/drivers/vector_kern.c", "vector_net_tx_timeout"], + ["arch/xtensa/platforms/iss/network.c", "iss_net_tx_timeout"], + ["drivers/char/pcmcia/synclink_cs.c", "hdlcdev_tx_timeout"], + ["drivers/infiniband/ulp/ipoib/ipoib_main.c", "ipoib_timeout"], + ["drivers/infiniband/ulp/ipoib/ipoib_main.c", "ipoib_timeout"], + ["drivers/message/fusion/mptlan.c", "mpt_lan_tx_timeout"], + ["drivers/misc/sgi-xp/xpnet.c", "xpnet_dev_tx_timeout"], + ["drivers/net/appletalk/cops.c", "cops_timeout"], + ["drivers/net/arcnet/arcdevice.h", "arcnet_timeout"], + ["drivers/net/arcnet/arcnet.c", "arcnet_timeout"], + ["drivers/net/arcnet/com20020.c", "arcnet_timeout"], + ["drivers/net/ethernet/3com/3c509.c", "el3_tx_timeout"], + ["drivers/net/ethernet/3com/3c515.c", "corkscrew_timeout"], + ["drivers/net/ethernet/3com/3c574_cs.c", "el3_tx_timeout"], + ["drivers/net/ethernet/3com/3c589_cs.c", "el3_tx_timeout"], + ["drivers/net/ethernet/3com/3c59x.c", "vortex_tx_timeout"], + ["drivers/net/ethernet/3com/3c59x.c", "vortex_tx_timeout"], + ["drivers/net/ethernet/3com/typhoon.c", "typhoon_tx_timeout"], + ["drivers/net/ethernet/8390/8390.h", "ei_tx_timeout"], + ["drivers/net/ethernet/8390/8390.h", "eip_tx_timeout"], + ["drivers/net/ethernet/8390/8390.c", "ei_tx_timeout"], + ["drivers/net/ethernet/8390/8390p.c", "eip_tx_timeout"], + ["drivers/net/ethernet/8390/ax88796.c", "ax_ei_tx_timeout"], + ["drivers/net/ethernet/8390/axnet_cs.c", "axnet_tx_timeout"], + ["drivers/net/ethernet/8390/etherh.c", "__ei_tx_timeout"], + ["drivers/net/ethernet/8390/hydra.c", "__ei_tx_timeout"], + ["drivers/net/ethernet/8390/mac8390.c", "__ei_tx_timeout"], + ["drivers/net/ethernet/8390/mcf8390.c", "__ei_tx_timeout"], + ["drivers/net/ethernet/8390/lib8390.c", "__ei_tx_timeout"], + ["drivers/net/ethernet/8390/ne2k-pci.c", "ei_tx_timeout"], + ["drivers/net/ethernet/8390/pcnet_cs.c", "ei_tx_timeout"], + ["drivers/net/ethernet/8390/smc-ultra.c", "ei_tx_timeout"], + ["drivers/net/ethernet/8390/wd.c", "ei_tx_timeout"], + ["drivers/net/ethernet/8390/zorro8390.c", "__ei_tx_timeout"], + ["drivers/net/ethernet/adaptec/starfire.c", "tx_timeout"], + ["drivers/net/ethernet/agere/et131x.c", "et131x_tx_timeout"], + ["drivers/net/ethernet/allwinner/sun4i-emac.c", "emac_timeout"], + ["drivers/net/ethernet/alteon/acenic.c", "ace_watchdog"], + ["drivers/net/ethernet/amazon/ena/ena_netdev.c", "ena_tx_timeout"], + ["drivers/net/ethernet/amd/7990.h", "lance_tx_timeout"], + ["drivers/net/ethernet/amd/7990.c", "lance_tx_timeout"], + ["drivers/net/ethernet/amd/a2065.c", "lance_tx_timeout"], + ["drivers/net/ethernet/amd/am79c961a.c", "am79c961_timeout"], + ["drivers/net/ethernet/amd/amd8111e.c", "amd8111e_tx_timeout"], + ["drivers/net/ethernet/amd/ariadne.c", "ariadne_tx_timeout"], + ["drivers/net/ethernet/amd/atarilance.c", "lance_tx_timeout"], + ["drivers/net/ethernet/amd/au1000_eth.c", "au1000_tx_timeout"], + ["drivers/net/ethernet/amd/declance.c", "lance_tx_timeout"], + ["drivers/net/ethernet/amd/lance.c", "lance_tx_timeout"], + ["drivers/net/ethernet/amd/mvme147.c", "lance_tx_timeout"], + ["drivers/net/ethernet/amd/ni65.c", "ni65_timeout"], + ["drivers/net/ethernet/amd/nmclan_cs.c", "mace_tx_timeout"], + ["drivers/net/ethernet/amd/pcnet32.c", "pcnet32_tx_timeout"], + ["drivers/net/ethernet/amd/sunlance.c", "lance_tx_timeout"], + ["drivers/net/ethernet/amd/xgbe/xgbe-drv.c", "xgbe_tx_timeout"], + ["drivers/net/ethernet/apm/xgene-v2/main.c", "xge_timeout"], + ["drivers/net/ethernet/apm/xgene/xgene_enet_main.c", "xgene_enet_timeout"], + ["drivers/net/ethernet/apple/macmace.c", "mace_tx_timeout"], + ["drivers/net/ethernet/atheros/ag71xx.c", "ag71xx_tx_timeout"], + ["drivers/net/ethernet/atheros/alx/main.c", "alx_tx_timeout"], + ["drivers/net/ethernet/atheros/atl1c/atl1c_main.c", "atl1c_tx_timeout"], + ["drivers/net/ethernet/atheros/atl1e/atl1e_main.c", "atl1e_tx_timeout"], + ["drivers/net/ethernet/atheros/atlx/atl.c", "atlx_tx_timeout"], + ["drivers/net/ethernet/atheros/atlx/atl1.c", "atlx_tx_timeout"], + ["drivers/net/ethernet/atheros/atlx/atl2.c", "atl2_tx_timeout"], + ["drivers/net/ethernet/broadcom/b44.c", "b44_tx_timeout"], + ["drivers/net/ethernet/broadcom/bcmsysport.c", "bcm_sysport_tx_timeout"], + ["drivers/net/ethernet/broadcom/bnx2.c", "bnx2_tx_timeout"], + ["drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h", "bnx2x_tx_timeout"], + ["drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c", "bnx2x_tx_timeout"], + ["drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c", "bnx2x_tx_timeout"], + ["drivers/net/ethernet/broadcom/bnxt/bnxt.c", "bnxt_tx_timeout"], + ["drivers/net/ethernet/broadcom/genet/bcmgenet.c", "bcmgenet_timeout"], + ["drivers/net/ethernet/broadcom/sb1250-mac.c", "sbmac_tx_timeout"], + ["drivers/net/ethernet/broadcom/tg3.c", "tg3_tx_timeout"], + ["drivers/net/ethernet/calxeda/xgmac.c", "xgmac_tx_timeout"], + ["drivers/net/ethernet/cavium/liquidio/lio_main.c", "liquidio_tx_timeout"], + ["drivers/net/ethernet/cavium/liquidio/lio_vf_main.c", "liquidio_tx_timeout"], + ["drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c", "lio_vf_rep_tx_timeout"], + ["drivers/net/ethernet/cavium/thunder/nicvf_main.c", "nicvf_tx_timeout"], + ["drivers/net/ethernet/cirrus/cs89x0.c", "net_timeout"], + ["drivers/net/ethernet/cisco/enic/enic_main.c", "enic_tx_timeout"], + ["drivers/net/ethernet/cisco/enic/enic_main.c", "enic_tx_timeout"], + ["drivers/net/ethernet/cortina/gemini.c", "gmac_tx_timeout"], + ["drivers/net/ethernet/davicom/dm9000.c", "dm9000_timeout"], + ["drivers/net/ethernet/dec/tulip/de2104x.c", "de_tx_timeout"], + ["drivers/net/ethernet/dec/tulip/tulip_core.c", "tulip_tx_timeout"], + ["drivers/net/ethernet/dec/tulip/winbond-840.c", "tx_timeout"], + ["drivers/net/ethernet/dlink/dl2k.c", "rio_tx_timeout"], + ["drivers/net/ethernet/dlink/sundance.c", "tx_timeout"], + ["drivers/net/ethernet/emulex/benet/be_main.c", "be_tx_timeout"], + ["drivers/net/ethernet/ethoc.c", "ethoc_tx_timeout"], + ["drivers/net/ethernet/faraday/ftgmac100.c", "ftgmac100_tx_timeout"], + ["drivers/net/ethernet/fealnx.c", "fealnx_tx_timeout"], + ["drivers/net/ethernet/freescale/dpaa/dpaa_eth.c", "dpaa_tx_timeout"], + ["drivers/net/ethernet/freescale/fec_main.c", "fec_timeout"], + ["drivers/net/ethernet/freescale/fec_mpc52xx.c", "mpc52xx_fec_tx_timeout"], + ["drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c", "fs_timeout"], + ["drivers/net/ethernet/freescale/gianfar.c", "gfar_timeout"], + ["drivers/net/ethernet/freescale/ucc_geth.c", "ucc_geth_timeout"], + ["drivers/net/ethernet/fujitsu/fmvj18x_cs.c", "fjn_tx_timeout"], + ["drivers/net/ethernet/google/gve/gve_main.c", "gve_tx_timeout"], + ["drivers/net/ethernet/hisilicon/hip04_eth.c", "hip04_timeout"], + ["drivers/net/ethernet/hisilicon/hix5hd2_gmac.c", "hix5hd2_net_timeout"], + ["drivers/net/ethernet/hisilicon/hns/hns_enet.c", "hns_nic_net_timeout"], + ["drivers/net/ethernet/hisilicon/hns3/hns3_enet.c", "hns3_nic_net_timeout"], + ["drivers/net/ethernet/huawei/hinic/hinic_main.c", "hinic_tx_timeout"], + ["drivers/net/ethernet/i825xx/82596.c", "i596_tx_timeout"], + ["drivers/net/ethernet/i825xx/ether1.c", "ether1_timeout"], + ["drivers/net/ethernet/i825xx/lib82596.c", "i596_tx_timeout"], + ["drivers/net/ethernet/i825xx/sun3_82586.c", "sun3_82586_timeout"], + ["drivers/net/ethernet/ibm/ehea/ehea_main.c", "ehea_tx_watchdog"], + ["drivers/net/ethernet/ibm/emac/core.c", "emac_tx_timeout"], + ["drivers/net/ethernet/ibm/emac/core.c", "emac_tx_timeout"], + ["drivers/net/ethernet/ibm/ibmvnic.c", "ibmvnic_tx_timeout"], + ["drivers/net/ethernet/intel/e100.c", "e100_tx_timeout"], + ["drivers/net/ethernet/intel/e1000/e1000_main.c", "e1000_tx_timeout"], + ["drivers/net/ethernet/intel/e1000e/netdev.c", "e1000_tx_timeout"], + ["drivers/net/ethernet/intel/fm10k/fm10k_netdev.c", "fm10k_tx_timeout"], + ["drivers/net/ethernet/intel/i40e/i40e_main.c", "i40e_tx_timeout"], + ["drivers/net/ethernet/intel/iavf/iavf_main.c", "iavf_tx_timeout"], + ["drivers/net/ethernet/intel/ice/ice_main.c", "ice_tx_timeout"], + ["drivers/net/ethernet/intel/ice/ice_main.c", "ice_tx_timeout"], + ["drivers/net/ethernet/intel/igb/igb_main.c", "igb_tx_timeout"], + ["drivers/net/ethernet/intel/igbvf/netdev.c", "igbvf_tx_timeout"], + ["drivers/net/ethernet/intel/ixgb/ixgb_main.c", "ixgb_tx_timeout"], + ["drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c", "adapter->netdev->netdev_ops->ndo_tx_timeout(adapter->netdev);"], + ["drivers/net/ethernet/intel/ixgbe/ixgbe_main.c", "ixgbe_tx_timeout"], + ["drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c", "ixgbevf_tx_timeout"], + ["drivers/net/ethernet/jme.c", "jme_tx_timeout"], + ["drivers/net/ethernet/korina.c", "korina_tx_timeout"], + ["drivers/net/ethernet/lantiq_etop.c", "ltq_etop_tx_timeout"], + ["drivers/net/ethernet/marvell/mv643xx_eth.c", "mv643xx_eth_tx_timeout"], + ["drivers/net/ethernet/marvell/pxa168_eth.c", "pxa168_eth_tx_timeout"], + ["drivers/net/ethernet/marvell/skge.c", "skge_tx_timeout"], + ["drivers/net/ethernet/marvell/sky2.c", "sky2_tx_timeout"], + ["drivers/net/ethernet/marvell/sky2.c", "sky2_tx_timeout"], + ["drivers/net/ethernet/mediatek/mtk_eth_soc.c", "mtk_tx_timeout"], + ["drivers/net/ethernet/mellanox/mlx4/en_netdev.c", "mlx4_en_tx_timeout"], + ["drivers/net/ethernet/mellanox/mlx4/en_netdev.c", "mlx4_en_tx_timeout"], + ["drivers/net/ethernet/mellanox/mlx5/core/en_main.c", "mlx5e_tx_timeout"], + ["drivers/net/ethernet/micrel/ks8842.c", "ks8842_tx_timeout"], + ["drivers/net/ethernet/micrel/ksz884x.c", "netdev_tx_timeout"], + ["drivers/net/ethernet/microchip/enc28j60.c", "enc28j60_tx_timeout"], + ["drivers/net/ethernet/microchip/encx24j600.c", "encx24j600_tx_timeout"], + ["drivers/net/ethernet/natsemi/sonic.h", "sonic_tx_timeout"], + ["drivers/net/ethernet/natsemi/sonic.c", "sonic_tx_timeout"], + ["drivers/net/ethernet/natsemi/jazzsonic.c", "sonic_tx_timeout"], + ["drivers/net/ethernet/natsemi/macsonic.c", "sonic_tx_timeout"], + ["drivers/net/ethernet/natsemi/natsemi.c", "ns_tx_timeout"], + ["drivers/net/ethernet/natsemi/ns83820.c", "ns83820_tx_timeout"], + ["drivers/net/ethernet/natsemi/xtsonic.c", "sonic_tx_timeout"], + ["drivers/net/ethernet/neterion/s2io.h", "s2io_tx_watchdog"], + ["drivers/net/ethernet/neterion/s2io.c", "s2io_tx_watchdog"], + ["drivers/net/ethernet/neterion/vxge/vxge-main.c", "vxge_tx_watchdog"], + ["drivers/net/ethernet/netronome/nfp/nfp_net_common.c", "nfp_net_tx_timeout"], + ["drivers/net/ethernet/nvidia/forcedeth.c", "nv_tx_timeout"], + ["drivers/net/ethernet/nvidia/forcedeth.c", "nv_tx_timeout"], + ["drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c", "pch_gbe_tx_timeout"], + ["drivers/net/ethernet/packetengines/hamachi.c", "hamachi_tx_timeout"], + ["drivers/net/ethernet/packetengines/yellowfin.c", "yellowfin_tx_timeout"], + ["drivers/net/ethernet/pensando/ionic/ionic_lif.c", "ionic_tx_timeout"], + ["drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c", "netxen_tx_timeout"], + ["drivers/net/ethernet/qlogic/qla3xxx.c", "ql3xxx_tx_timeout"], + ["drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c", "qlcnic_tx_timeout"], + ["drivers/net/ethernet/qualcomm/emac/emac.c", "emac_tx_timeout"], + ["drivers/net/ethernet/qualcomm/qca_spi.c", "qcaspi_netdev_tx_timeout"], + ["drivers/net/ethernet/qualcomm/qca_uart.c", "qcauart_netdev_tx_timeout"], + ["drivers/net/ethernet/rdc/r6040.c", "r6040_tx_timeout"], + ["drivers/net/ethernet/realtek/8139cp.c", "cp_tx_timeout"], + ["drivers/net/ethernet/realtek/8139too.c", "rtl8139_tx_timeout"], + ["drivers/net/ethernet/realtek/atp.c", "tx_timeout"], + ["drivers/net/ethernet/realtek/r8169_main.c", "rtl8169_tx_timeout"], + ["drivers/net/ethernet/renesas/ravb_main.c", "ravb_tx_timeout"], + ["drivers/net/ethernet/renesas/sh_eth.c", "sh_eth_tx_timeout"], + ["drivers/net/ethernet/renesas/sh_eth.c", "sh_eth_tx_timeout"], + ["drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c", "sxgbe_tx_timeout"], + ["drivers/net/ethernet/seeq/ether3.c", "ether3_timeout"], + ["drivers/net/ethernet/seeq/sgiseeq.c", "timeout"], + ["drivers/net/ethernet/sfc/efx.c", "efx_watchdog"], + ["drivers/net/ethernet/sfc/falcon/efx.c", "ef4_watchdog"], + ["drivers/net/ethernet/sgi/ioc3-eth.c", "ioc3_timeout"], + ["drivers/net/ethernet/sgi/meth.c", "meth_tx_timeout"], + ["drivers/net/ethernet/silan/sc92031.c", "sc92031_tx_timeout"], + ["drivers/net/ethernet/sis/sis190.c", "sis190_tx_timeout"], + ["drivers/net/ethernet/sis/sis900.c", "sis900_tx_timeout"], + ["drivers/net/ethernet/smsc/epic100.c", "epic_tx_timeout"], + ["drivers/net/ethernet/smsc/smc911x.c", "smc911x_timeout"], + ["drivers/net/ethernet/smsc/smc9194.c", "smc_timeout"], + ["drivers/net/ethernet/smsc/smc91c92_cs.c", "smc_tx_timeout"], + ["drivers/net/ethernet/smsc/smc91x.c", "smc_timeout"], + ["drivers/net/ethernet/stmicro/stmmac/stmmac_main.c", "stmmac_tx_timeout"], + ["drivers/net/ethernet/sun/cassini.c", "cas_tx_timeout"], + ["drivers/net/ethernet/sun/ldmvsw.c", "sunvnet_tx_timeout_common"], + ["drivers/net/ethernet/sun/niu.c", "niu_tx_timeout"], + ["drivers/net/ethernet/sun/sunbmac.c", "bigmac_tx_timeout"], + ["drivers/net/ethernet/sun/sungem.c", "gem_tx_timeout"], + ["drivers/net/ethernet/sun/sunhme.c", "happy_meal_tx_timeout"], + ["drivers/net/ethernet/sun/sunqe.c", "qe_tx_timeout"], + ["drivers/net/ethernet/sun/sunvnet.c", "sunvnet_tx_timeout_common"], + ["drivers/net/ethernet/sun/sunvnet_common.c", "sunvnet_tx_timeout_common"], + ["drivers/net/ethernet/sun/sunvnet_common.h", "sunvnet_tx_timeout_common"], + ["drivers/net/ethernet/synopsys/dwc-xlgmac-net.c", "xlgmac_tx_timeout"], + ["drivers/net/ethernet/ti/cpmac.c", "cpmac_tx_timeout"], + ["drivers/net/ethernet/ti/cpsw.c", "cpsw_ndo_tx_timeout"], + ["drivers/net/ethernet/ti/cpsw_priv.c", "cpsw_ndo_tx_timeout"], + ["drivers/net/ethernet/ti/cpsw_priv.h", "cpsw_ndo_tx_timeout"], + ["drivers/net/ethernet/ti/davinci_emac.c", "emac_dev_tx_timeout"], + ["drivers/net/ethernet/ti/netcp_core.c", "netcp_ndo_tx_timeout"], + ["drivers/net/ethernet/ti/tlan.c", "tlan_tx_timeout"], + ["drivers/net/ethernet/toshiba/ps3_gelic_net.h", "gelic_net_tx_timeout"], + ["drivers/net/ethernet/toshiba/ps3_gelic_net.c", "gelic_net_tx_timeout"], + ["drivers/net/ethernet/toshiba/ps3_gelic_wireless.c", "gelic_net_tx_timeout"], + ["drivers/net/ethernet/toshiba/spider_net.c", "spider_net_tx_timeout"], + ["drivers/net/ethernet/toshiba/tc35815.c", "tc35815_tx_timeout"], + ["drivers/net/ethernet/via/via-rhine.c", "rhine_tx_timeout"], + ["drivers/net/ethernet/wiznet/w5100.c", "w5100_tx_timeout"], + ["drivers/net/ethernet/wiznet/w5300.c", "w5300_tx_timeout"], + ["drivers/net/ethernet/xilinx/xilinx_emaclite.c", "xemaclite_tx_timeout"], + ["drivers/net/ethernet/xircom/xirc2ps_cs.c", "xirc_tx_timeout"], + ["drivers/net/fjes/fjes_main.c", "fjes_tx_retry"], + ["drivers/net/slip/slip.c", "sl_tx_timeout"], + ["include/linux/usb/usbnet.h", "usbnet_tx_timeout"], + ["drivers/net/usb/aqc111.c", "usbnet_tx_timeout"], + ["drivers/net/usb/asix_devices.c", "usbnet_tx_timeout"], + ["drivers/net/usb/asix_devices.c", "usbnet_tx_timeout"], + ["drivers/net/usb/asix_devices.c", "usbnet_tx_timeout"], + ["drivers/net/usb/ax88172a.c", "usbnet_tx_timeout"], + ["drivers/net/usb/ax88179_178a.c", "usbnet_tx_timeout"], + ["drivers/net/usb/catc.c", "catc_tx_timeout"], + ["drivers/net/usb/cdc_mbim.c", "usbnet_tx_timeout"], + ["drivers/net/usb/cdc_ncm.c", "usbnet_tx_timeout"], + ["drivers/net/usb/dm9601.c", "usbnet_tx_timeout"], + ["drivers/net/usb/hso.c", "hso_net_tx_timeout"], + ["drivers/net/usb/int51x1.c", "usbnet_tx_timeout"], + ["drivers/net/usb/ipheth.c", "ipheth_tx_timeout"], + ["drivers/net/usb/kaweth.c", "kaweth_tx_timeout"], + ["drivers/net/usb/lan78xx.c", "lan78xx_tx_timeout"], + ["drivers/net/usb/mcs7830.c", "usbnet_tx_timeout"], + ["drivers/net/usb/pegasus.c", "pegasus_tx_timeout"], + ["drivers/net/usb/qmi_wwan.c", "usbnet_tx_timeout"], + ["drivers/net/usb/r8152.c", "rtl8152_tx_timeout"], + ["drivers/net/usb/rndis_host.c", "usbnet_tx_timeout"], + ["drivers/net/usb/rtl8150.c", "rtl8150_tx_timeout"], + ["drivers/net/usb/sierra_net.c", "usbnet_tx_timeout"], + ["drivers/net/usb/smsc75xx.c", "usbnet_tx_timeout"], + ["drivers/net/usb/smsc95xx.c", "usbnet_tx_timeout"], + ["drivers/net/usb/sr9700.c", "usbnet_tx_timeout"], + ["drivers/net/usb/sr9800.c", "usbnet_tx_timeout"], + ["drivers/net/usb/usbnet.c", "usbnet_tx_timeout"], + ["drivers/net/vmxnet3/vmxnet3_drv.c", "vmxnet3_tx_timeout"], + ["drivers/net/wan/cosa.c", "cosa_net_timeout"], + ["drivers/net/wan/farsync.c", "fst_tx_timeout"], + ["drivers/net/wan/fsl_ucc_hdlc.c", "uhdlc_tx_timeout"], + ["drivers/net/wan/lmc/lmc_main.c", "lmc_driver_timeout"], + ["drivers/net/wan/x25_asy.c", "x25_asy_timeout"], + ["drivers/net/wimax/i2400m/netdev.c", "i2400m_tx_timeout"], + ["drivers/net/wireless/intel/ipw2x00/ipw2100.c", "ipw2100_tx_timeout"], + ["drivers/net/wireless/intersil/hostap/hostap_main.c", "prism2_tx_timeout"], + ["drivers/net/wireless/intersil/hostap/hostap_main.c", "prism2_tx_timeout"], + ["drivers/net/wireless/intersil/hostap/hostap_main.c", "prism2_tx_timeout"], + ["drivers/net/wireless/intersil/orinoco/main.c", "orinoco_tx_timeout"], + ["drivers/net/wireless/intersil/orinoco/orinoco_usb.c", "orinoco_tx_timeout"], + ["drivers/net/wireless/intersil/orinoco/orinoco.h", "orinoco_tx_timeout"], + ["drivers/net/wireless/intersil/prism54/islpci_dev.c", "islpci_eth_tx_timeout"], + ["drivers/net/wireless/intersil/prism54/islpci_eth.c", "islpci_eth_tx_timeout"], + ["drivers/net/wireless/intersil/prism54/islpci_eth.h", "islpci_eth_tx_timeout"], + ["drivers/net/wireless/marvell/mwifiex/main.c", "mwifiex_tx_timeout"], + ["drivers/net/wireless/quantenna/qtnfmac/core.c", "qtnf_netdev_tx_timeout"], + ["drivers/net/wireless/quantenna/qtnfmac/core.h", "qtnf_netdev_tx_timeout"], + ["drivers/net/wireless/rndis_wlan.c", "usbnet_tx_timeout"], + ["drivers/net/wireless/wl3501_cs.c", "wl3501_tx_timeout"], + ["drivers/net/wireless/zydas/zd1201.c", "zd1201_tx_timeout"], + ["drivers/s390/net/qeth_core.h", "qeth_tx_timeout"], + ["drivers/s390/net/qeth_core_main.c", "qeth_tx_timeout"], + ["drivers/s390/net/qeth_l2_main.c", "qeth_tx_timeout"], + ["drivers/s390/net/qeth_l2_main.c", "qeth_tx_timeout"], + ["drivers/s390/net/qeth_l3_main.c", "qeth_tx_timeout"], + ["drivers/s390/net/qeth_l3_main.c", "qeth_tx_timeout"], + ["drivers/staging/ks7010/ks_wlan_net.c", "ks_wlan_tx_timeout"], + ["drivers/staging/qlge/qlge_main.c", "qlge_tx_timeout"], + ["drivers/staging/rtl8192e/rtl8192e/rtl_core.c", "_rtl92e_tx_timeout"], + ["drivers/staging/rtl8192u/r8192U_core.c", "tx_timeout"], + ["drivers/staging/unisys/visornic/visornic_main.c", "visornic_xmit_timeout"], + ["drivers/staging/wlan-ng/p80211netdev.c", "p80211knetdev_tx_timeout"], + ["drivers/tty/n_gsm.c", "gsm_mux_net_tx_timeout"], + ["drivers/tty/synclink.c", "hdlcdev_tx_timeout"], + ["drivers/tty/synclink_gt.c", "hdlcdev_tx_timeout"], + ["drivers/tty/synclinkmp.c", "hdlcdev_tx_timeout"], + ["net/atm/lec.c", "lec_tx_timeout"], + ["net/bluetooth/bnep/netdev.c", "bnep_net_timeout"] + ); + + for my $p (@work) { + my @pair = @$p; + my $file = $pair[0]; + my $func = $pair[1]; + print STDERR $file , ": ", $func,"\n"; + our @ARGV = ($file); + while () { + if (m/($func\s*\(struct\s+net_device\s+\*[A-Za-z_]?[A-Za-z-0-9_]*)(\))/) { + print STDERR "found $1+$2 in $file\n"; + } + if (s/($func\s*\(struct\s+net_device\s+\*[A-Za-z_]?[A-Za-z-0-9_]*)(\))/$1, unsigned int txqueue$2/) { + print STDERR "$func found in $file\n"; + } + print; + } + } + + where the list of files and functions is simply from: + + git grep ndo_tx_timeout, with manual addition of headers + in the rare cases where the function is from a header, + then manually changing the few places which actually + call ndo_tx_timeout. + + Signed-off-by: Michael S. Tsirkin + Acked-by: Heiner Kallweit + Acked-by: Jakub Kicinski + Acked-by: Shannon Nelson + Reviewed-by: Martin Habets + + changes from v9: + fixup a forward declaration + changes from v9: + more leftovers from v3 change + changes from v8: + fix up a missing direct call to timeout + rebased on net-next + changes from v7: + fixup leftovers from v3 change + changes from v6: + fix typo in rtl driver + changes from v5: + add missing files (allow any net device argument name) + changes from v4: + add a missing driver header + changes from v3: + change queue # to unsigned + Changes from v2: + added headers + Changes from v1: + Fix errors found by kbuild: + generalize the pattern a bit, to pick up + a couple of instances missed by the previous + version. + + Signed-off-by: David S. Miller + +commit 7e334fc8003c7a38372cc98e7be6082670a47d29 +Author: Vasundhara Volam +Date: Tue Dec 10 02:49:13 2019 -0500 + + bnxt_en: Add missing devlink health reporters for VFs. + + The VF driver also needs to create the health reporters since + VFs are also involved in firmware reset and recovery. Modify + bnxt_dl_register() and bnxt_dl_unregister() so that they can + be called by the VFs to register/unregister devlink. Only the PF + will register the devlink parameters. With devlink registered, + we can now create the health reporters on the VFs. + + Fixes: 6763c779c2d8 ("bnxt_en: Add new FW devlink_health_reporter") + Signed-off-by: Vasundhara Volam + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 937f188c1f4f89b3fa93ba31fc8587dc1fb14a22 +Author: Vasundhara Volam +Date: Tue Dec 10 02:49:12 2019 -0500 + + bnxt_en: Fix the logic that creates the health reporters. + + Fix the logic to properly check the fw capabilities and create the + devlink health reporters only when needed. The current code creates + the reporters unconditionally as long as bp->fw_health is valid, and + that's not correct. + + Call bnxt_dl_fw_reporters_create() directly from the init and reset + code path instead of from bnxt_dl_register(). This allows the + reporters to be adjusted when capabilities change. The same + applies to bnxt_dl_fw_reporters_destroy(). + + Fixes: 6763c779c2d8 ("bnxt_en: Add new FW devlink_health_reporter") + Signed-off-by: Vasundhara Volam + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 0797c10d2d1fa0d6f14612404781b348fc757c3e +Author: Vasundhara Volam +Date: Tue Dec 10 02:49:11 2019 -0500 + + bnxt_en: Remove unnecessary NULL checks for fw_health + + After fixing the allocation of bp->fw_health in the previous patch, + the driver will not go through the fw reset and recovery code paths + if bp->fw_health allocation fails. So we can now remove the + unnecessary NULL checks. + + Signed-off-by: Vasundhara Volam + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 8280b38e01f71e0f89389ccad3fa43b79e57c604 +Author: Vasundhara Volam +Date: Tue Dec 10 02:49:10 2019 -0500 + + bnxt_en: Fix bp->fw_health allocation and free logic. + + bp->fw_health needs to be allocated for either the firmware initiated + reset feature or the driver initiated error recovery feature. The + current code is not allocating bp->fw_health for all the necessary cases. + This patch corrects the logic to allocate bp->fw_health correctly when + needed. If allocation fails, we clear the feature flags. + + We also add the the missing kfree(bp->fw_health) when the driver is + unloaded. If we get an async reset message from the firmware, we also + need to make sure that we have a valid bp->fw_health before proceeding. + + Fixes: 07f83d72d238 ("bnxt_en: Discover firmware error recovery capabilities.") + Signed-off-by: Vasundhara Volam + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit c74751f4c39232c31214ec6a3bc1c7e62f5c728b +Author: Vasundhara Volam +Date: Tue Dec 10 02:49:09 2019 -0500 + + bnxt_en: Return error if FW returns more data than dump length + + If any change happened in the configuration of VF in VM while + collecting live dump, there could be a race and firmware can return + more data than allocated dump length. Fix it by keeping track of + the accumulated core dump length copied so far and abort the copy + with error code if the next chunk of core dump will exceed the + original dump length. + + Fixes: 6c5657d085ae ("bnxt_en: Add support for ethtool get dump.") + Signed-off-by: Vasundhara Volam + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 325f85f37e5b35807d86185bdf2c64d2980c44ba +Author: Michael Chan +Date: Tue Dec 10 02:49:08 2019 -0500 + + bnxt_en: Free context memory in the open path if firmware has been reset. + + This will trigger new context memory to be rediscovered and allocated + during the re-probe process after a firmware reset. Without this, the + newly reset firmware does not have valid context memory and the driver + will eventually fail to allocate some resources. + + Fixes: ec5d31e3c15d ("bnxt_en: Handle firmware reset status during IF_UP.") + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 0c722ec0a289c7f6b53f89bad1cfb7c4db3f7a62 +Author: Michael Chan +Date: Tue Dec 10 02:49:07 2019 -0500 + + bnxt_en: Fix MSIX request logic for RDMA driver. + + The logic needs to check both bp->total_irqs and the reserved IRQs in + hw_resc->resv_irqs if applicable and see if both are enough to cover + the L2 and RDMA requested vectors. The current code is only checking + bp->total_irqs and can fail in some code paths, such as the TX timeout + code path with the RDMA driver requesting vectors after recovery. In + this code path, we have not reserved enough MSIX resources for the + RDMA driver yet. + + Fixes: 75720e6323a1 ("bnxt_en: Keep track of reserved IRQs.") + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit d168f328fecc9f401b54db18ff4ddd4bca7b161d +Author: Vasundhara Volam +Date: Sat Nov 23 22:30:50 2019 -0500 + + bnxt_en: Add support for flashing the device via devlink + + Use the same bnxt_flash_package_from_file() function to support + devlink flash operation. + + Cc: Jiri Pirko + Signed-off-by: Vasundhara Volam + Signed-off-by: Michael Chan + Signed-off-by: Jakub Kicinski + +commit c7e457f42c02066e49a6e03028c889aefbb8999b +Author: Michael Chan +Date: Sat Nov 23 22:30:49 2019 -0500 + + bnxt_en: Allow PHY settings on multi-function or NPAR PFs if allowed by FW. + + Currently, the driver does not allow PHY settings on a multi-function or + NPAR NIC whose port is shared by more than one function. Newer + firmware now allows PHY settings on some of these NICs. Check for + this new firmware setting and allow the user to set the PHY settings + accordingly. + + Signed-off-by: Michael Chan + Signed-off-by: Jakub Kicinski + +commit b1613e78e98d065fd3356d0b93df665b0740f652 +Author: Michael Chan +Date: Sat Nov 23 22:30:48 2019 -0500 + + bnxt_en: Add async. event logic for PHY configuration changes. + + If the link settings have been changed by another function sharing the + port, firmware will send us an async. message. In response, we will + call the new bnxt_init_ethtool_link_settings() function to update + the current settings. + + Signed-off-by: Michael Chan + Signed-off-by: Jakub Kicinski + +commit 8119e49b68fa1ec778f9ec8be05b5492046100b2 +Author: Michael Chan +Date: Sat Nov 23 22:30:47 2019 -0500 + + bnxt_en: Refactor the initialization of the ethtool link settings. + + Refactor this logic in bnxt_probe_phy() into a separate function + bnxt_init_ethtool_link_settings(). It used to be that the settable + link settings will never be changed without going through ethtool. + So we only needed to do this once in bnxt_probe_phy(). Now, another + function sharing the port may change it and we may need to re-initialize + the ethtool settings again in run-time. + + Signed-off-by: Michael Chan + Signed-off-by: Jakub Kicinski + +commit 8a60efd1decbaf9ef71d4296b75ff262e653bd34 +Author: Michael Chan +Date: Sat Nov 23 22:30:46 2019 -0500 + + bnxt_en: Skip disabling autoneg before PHY loopback when appropriate. + + New firmware allows PHY loopback to be set without disabling autoneg + first. Check this capability and skip disabling autoneg when + it is supported by firmware. Using this scheme, loopback will + always work even if the PHY only supports autoneg. + + Signed-off-by: Michael Chan + Signed-off-by: Jakub Kicinski + +commit 1acefc9aedb3179fc9add0a21fa62c0aca08efc4 +Author: Michael Chan +Date: Sat Nov 23 22:30:45 2019 -0500 + + bnxt_en: Assign more RSS context resources to the VFs. + + The driver currently only assignes 1 RSS context to each VF. This works + for the Linux VF driver. But other drivers, such as DPDK, can make use + of additional RSS contexts. Modify the code to divide up and assign + RSS contexts to VFs just like other resources. + + Signed-off-by: Michael Chan + Signed-off-by: Jakub Kicinski + +commit 3be8136ce14ea12c7b40f7ad20a5ff8aec339289 +Author: Michael Chan +Date: Sat Nov 23 22:30:44 2019 -0500 + + bnxt_en: Initialize context memory to the value specified by firmware. + + Some chips that need host context memory as a backing store requires + the memory to be initialized to a non-zero value. Query the + value from firmware and initialize the context memory accordingly. + + Signed-off-by: Michael Chan + Signed-off-by: Jakub Kicinski + +commit f9b69d7f62796b33657c98e0d3ca3be763f70fa4 +Author: Vasundhara Volam +Date: Sat Nov 23 22:30:43 2019 -0500 + + bnxt_en: Fix suspend/resume path on 57500 chips + + Driver calls HWRM_FUNC_RESET firmware call while resuming the device + which clears the context memory backing store. Because of which + allocating firmware resources would eventually fail. Fix it by freeing + all context memory during suspend and reallocate the memory during resume. + + Call bnxt_hwrm_queue_qportcfg() in resume path. This firmware call + is needed on the 57500 chips so that firmware will set up the proper + queue mapping in relation to the context memory. + + Signed-off-by: Vasundhara Volam + Signed-off-by: Michael Chan + Signed-off-by: Jakub Kicinski + +commit f92335d830059f3f9db950f0af49405d287924d5 +Author: Vasundhara Volam +Date: Sat Nov 23 22:30:42 2019 -0500 + + bnxt_en: Send FUNC_RESOURCE_QCAPS command in bnxt_resume() + + After driver unregister, firmware is erasing the information that + driver supports new resource management. Send FUNC_RESOURCE_QCAPS + command to inform the firmware that driver supports new resource + management while resuming from hibernation. Otherwise, we fallback + to the older resource allocation scheme. + + Also, move driver register after sending FUNC_RESOURCE_QCAPS command + to be consistent with the normal initialization sequence. + + Signed-off-by: Vasundhara Volam + Signed-off-by: Michael Chan + Signed-off-by: Jakub Kicinski + +commit 2e882468fce263afef4a77ea4fe40808baaddae7 +Author: Vasundhara Volam +Date: Sat Nov 23 22:30:41 2019 -0500 + + bnxt_en: Combine 2 functions calling the same HWRM_DRV_RGTR fw command. + + Everytime driver registers with firmware, driver is required to + register for async event notifications as well. These 2 calls + are done using the same firmware command and can be combined. + + We are also missing the 2nd step to register for async events + in the suspend/resume path and this will fix it. Prior to this, + we were getting only default notifications. + + ULP can register for additional async events for the RDMA driver, + so we add a parameter to the new function to only do step 2 when + it is called from ULP. + + Signed-off-by: Vasundhara Volam + Signed-off-by: Michael Chan + Signed-off-by: Jakub Kicinski + +commit bdb3860236b3ec8bb0f55ddef6d62666a8b3b23e +Author: Vasundhara Volam +Date: Sat Nov 23 22:30:40 2019 -0500 + + bnxt_en: Do driver unregister cleanup in bnxt_init_one() failure path. + + In the bnxt_init_one() failure path, if the driver has already called + firmware to register the driver, it is not undoing the driver + registration. Add this missing step to unregister for correctness, + so that the firmware knows that the driver has unloaded. + + Signed-off-by: Vasundhara Volam + Signed-off-by: Michael Chan + Signed-off-by: Jakub Kicinski + +commit ef02af8c8ece3d6fb01fe267c1c7622399bc34f6 +Author: Michael Chan +Date: Sat Nov 23 22:30:39 2019 -0500 + + bnxt_en: Disable/enable Bus master during suspend/resume. + + Disable Bus master during suspend to prevent DMAs after the device + goes into D3hot state. The new 57500 devices may continue to DMA + from context memory after the system goes into D3hot state. This + may cause some PCIe errors on some system. Re-enable it during resume. + + Signed-off-by: Michael Chan + Signed-off-by: Jakub Kicinski + +commit fb4cd81e4c03efa54b82e81e2a4afc092c061384 +Author: Michael Chan +Date: Sat Nov 23 22:30:38 2019 -0500 + + bnxt_en: Add chip IDs for 57452 and 57454 chips. + + Fix BNXT_CHIP_NUM_5645X() to include 57452 and 56454 chip IDs, so + that these chips will be properly classified as P4 chips to take + advantage of the P4 fixes and features. + + Signed-off-by: Michael Chan + Signed-off-by: Jakub Kicinski + +commit 642aebdee4a1f53b713becb3b7df8896fbaeda33 +Author: Pavan Chebbi +Date: Mon Nov 18 03:56:43 2019 -0500 + + bnxt_en: Abort waiting for firmware response if there is no heartbeat. + + This is especially beneficial during the NVRAM related firmware + commands that have longer timeouts. If the BNXT_STATE_FW_FATAL_COND + flag gets set while waiting for firmware response, abort and return + error. + + Signed-off-by: Pavan Chebbi + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit a2b31e27f6269af8bbda4be2199c2af7c4dcb5a3 +Author: Vasundhara Volam +Date: Mon Nov 18 03:56:42 2019 -0500 + + bnxt_en: Add a warning message for driver initiated reset + + During loss of heartbeat, log this warning message. + + Signed-off-by: Vasundhara Volam + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 05069dd4c577f9b143dfd243d55834333c4470c5 +Author: Vasundhara Volam +Date: Mon Nov 18 03:56:41 2019 -0500 + + bnxt_en: Return proper error code for non-existent NVM variable + + For NVM params that are not supported in the current NVM + configuration, return the error as -EOPNOTSUPP. + + Signed-off-by: Vasundhara Volam + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit e4e38237d7e39e84d4db4a5cf0aa1ce7fbfaa5d6 +Author: Vasundhara Volam +Date: Mon Nov 18 03:56:40 2019 -0500 + + bnxt_en: Report health status update after reset is done + + Report health status update to devlink health reporter, once + reset is completed. + + Cc: Jiri Pirko + Signed-off-by: Vasundhara Volam + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit e633a32935a315b8e1f742622dcb254076a42352 +Author: Vasundhara Volam +Date: Mon Nov 18 03:56:39 2019 -0500 + + bnxt_en: Set MASTER flag during driver registration. + + The Linux driver is capable of being the master function to handle + resets, so we set the flag to let firmware know. Some other + drivers, such as DPDK, is not capable and will not set the flag. + + Signed-off-by: Vasundhara Volam + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 0a3f4e4f342c070312d799f7998d2f916c502c6e +Author: Vasundhara Volam +Date: Mon Nov 18 03:56:38 2019 -0500 + + bnxt_en: Extend ETHTOOL_RESET to hot reset driver. + + If firmware supports hot reset, extend ETHTOOL_RESET to support + hot reset driver which does not require a driver reload after + ETHTOOL_RESET. The driver will go through the same coordinated + reset sequence as a firmware initiated fatal/non-fatal reset. + + Signed-off-by: Vasundhara Volam + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 5b306bde2b46964d604924ec085d619ffc331e09 +Author: Vasundhara Volam +Date: Mon Nov 18 03:56:37 2019 -0500 + + bnxt_en: Increase firmware response timeout for coredump commands. + + Use the larger HWRM_COREDUMP_TIMEOUT value for coredump related + data response from the firmware. These commands take longer than + normal commands. + + Signed-off-by: Vasundhara Volam + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 19b3751ffa713d04290effb26fe01009010f2206 +Author: Michael Chan +Date: Mon Nov 18 03:56:36 2019 -0500 + + bnxt_en: Improve RX buffer error handling. + + When hardware reports RX buffer errors, the latest 57500 chips do not + require reset. The packet is discarded by the hardware and the + ring will continue to operate. + + Also, add an rx_buf_errors counter for this type of error. It can help + the user to identify if the aggregation ring is too small. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 41136ab35888c4007c6aad2f86e35afb97003e69 +Author: Michael Chan +Date: Mon Nov 18 03:56:35 2019 -0500 + + bnxt_en: Update firmware interface spec to 1.10.1.12. + + The aRFS ring table interface has changed for the 57500 chips. Updating + it accordingly so it will work with the latest production firmware. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 85192dbf4de08795afe2b88e52a36fc6abfc3dba +Author: Andrii Nakryiko +Date: Sun Nov 17 09:28:03 2019 -0800 + + bpf: Convert bpf_prog refcnt to atomic64_t + + Similarly to bpf_map's refcnt/usercnt, convert bpf_prog's refcnt to atomic64 + and remove artificial 32k limit. This allows to make bpf_prog's refcounting + non-failing, simplifying logic of users of bpf_prog_add/bpf_prog_inc. + + Validated compilation by running allyesconfig kernel build. + + Suggested-by: Daniel Borkmann + Signed-off-by: Andrii Nakryiko + Signed-off-by: Daniel Borkmann + Link: https://lore.kernel.org/bpf/20191117172806.2195367-3-andriin@fb.com + +commit 3128aad163d36d99247fc76b4efbbba2d5465cc4 +Author: Venkat Duvvuru +Date: Wed Nov 13 13:51:19 2019 -0500 + + bnxt_en: Fix array overrun in bnxt_fill_l2_rewrite_fields(). + + Fix the array overrun while keeping the eth_addr and eth_addr_mask + pointers as u16 to avoid unaligned u16 access. These were overlooked + when modifying the code to use u16 pointer for proper alignment. + + Fixes: 90f906243bf6 ("bnxt_en: Add support for L2 rewrite") + Reported-by: Olof Johansson + Signed-off-by: Venkat Duvvuru + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 6a68749dbd777b832e1d84265bd6d8b39d1843ac +Author: Pavan Chebbi +Date: Thu Oct 31 01:07:51 2019 -0400 + + bnxt_en: Call bnxt_ulp_stop()/bnxt_ulp_start() during suspend/resume. + + Inform the RDMA driver to stop/start during suspend/resume. The + RDMA driver needs to stop and start just like error recovery. + + Signed-off-by: Pavan Chebbi + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit f3a6d206c25ad9490f3a3c6d62baba9504227a75 +Author: Vasundhara Volam +Date: Thu Oct 31 01:07:50 2019 -0400 + + bnxt_en: Call bnxt_ulp_stop()/bnxt_ulp_start() during error recovery. + + Notify the RDMA driver by calling the bnxt_ulp_stop()/bnxt_ulp_start() + hooks during error recovery. The current ULP IRQ start/stop + sequence in error recovery (which is insufficient) is replaced with the + full reset sequence when we call bnxt_ulp_stop()/bnxt_ulp_start(). + + Signed-off-by: Vasundhara Volam + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit aa46dffff452f7c6d907c4e6a0062e2c53a87fc0 +Author: Vasundhara Volam +Date: Thu Oct 31 01:07:49 2019 -0400 + + bnxt_en: Improve bnxt_ulp_stop()/bnxt_ulp_start() call sequence. + + We call bnxt_ulp_stop() to notify the RDMA driver that some error or + imminent reset is about to happen. After that we always call + some variants of bnxt_close(). + + In the next patch, we will integrate the recently added error + recovery with the RDMA driver. In response to ulp_stop, the + RDMA driver may free MSIX vectors and that will also trigger + bnxt_close(). To avoid bnxt_close() from being called twice, + we set a new flag after ulp_stop is called. If the RDMA driver + frees MSIX vectors while the new flag is set, we will not call + bnxt_close(), knowing that it will happen in due course. + + With this change, we must make sure that the bnxt_close() call + after ulp_stop will reset IRQ. Modify bnxt_reset_task() + accordingly if we call ulp_stop. + + Signed-off-by: Vasundhara Volam + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 627c89d00fb969f9b3b4f3156716149631d2796c +Author: Sriharsha Basavapatna +Date: Thu Oct 31 01:07:48 2019 -0400 + + bnxt_en: flow_offload: offload tunnel decap rules via indirect callbacks + + The decap (VXLAN tunnel) flow rules are not getting offloaded with + upstream kernel. This is because TC block callback infrastructure has + been updated to use indirect callbacks to get offloaded rules from + other higher level devices (such as tunnels), instead of ndo_setup_tc(). + Since the decap rules are applied to the tunnel devices (e.g, vxlan_sys), + the driver should register for indirect TC callback with tunnel devices + to get the rules for offloading. This patch updates the driver to + register and process indirect TC block callbacks from VXLAN tunnels. + + Signed-off-by: Sriharsha Basavapatna + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 9b9eb518e3383d94c8b81ff403d524f2cee5b6b9 +Author: Somnath Kotur +Date: Thu Oct 31 01:07:47 2019 -0400 + + bnxt_en: Add support for NAT(L3/L4 rewrite) + + Provides support for modifying L3/L4 Header parameters to support NAT. + Sets the appropriate fields/bits in cfa_flow_alloc cmd. + + Sample cmd for offloading an IPv4 flow with SNAT: + + ovs-ofctl add-flow ovsbr0 "ip,nw_src=192.168.201.44 \ + actions=mod_nw_src:203.31.220.144,output:p7p1" + + Replace 'nw_src' with 'nw_dst' in above cmd for DNAT with IPv4 + + Sample cmd for offloading an IPv4 flow with SNAPT: + + ovs-ofctl add-flow ovsbr0 "ip,nw_src=192.168.201.44 \ + actions=mod_nw_src:203.31.220.144, mod_tp_src:6789,output:p7p1" + + Similar to DNAT, replace 'tp_src' with 'tp_dst' for offloading flow + with DNAPT + + Sample cmd for offloading an IPv6 flow with SNAT: + + ovs-ofctl add-flow ovsbr0 "ipv6, ipv6_src=2001:5c0:9168::2/64 \ + actions=load:0x1->NXM_NX_IPV6_SRC[0..63], \ + load:0x20010db801920000->NXM_NX_IPV6_SRC[64..127],output:p7p1" + + Replace 'SRC' with DST' above for IPv6 DNAT + + Signed-off-by: Somnath Kotur + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 08f8280e8788202a67a359952cd436707f8789bd +Author: Somnath Kotur +Date: Thu Oct 31 01:07:46 2019 -0400 + + bnxt: Avoid logging an unnecessary message when a flow can't be offloaded + + For every single case where bnxt_tc_can_offload() can fail, we are + logging a user friendly descriptive message anyway, but because of the + path it would take in case of failure, another redundant error message + would get logged. Just freeing the node and returning from the point of + failure should suffice. + + Signed-off-by: Somnath Kotur + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 90f906243bf633f07757467506dfab3422b43ca2 +Author: Venkat Duvvuru +Date: Thu Oct 31 01:07:45 2019 -0400 + + bnxt_en: Add support for L2 rewrite + + This patch adds support for packet edit offload of L2 fields (src mac & + dst mac, also referred as L2 rewrite). Only when the mask is fully exact + match for a field, the command is sent down to the adapter to offload + such a flow. Otherwise, an error is returned. + + v2: Fix pointer alignment issue in bnxt_fill_l2_rewrite_fields() [MChan] + + Signed-off-by: Venkat Duvvuru + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 0b0eacf3c83cb292c6eef55c76d5138c9302dc20 +Author: Vasundhara Volam +Date: Thu Oct 31 15:38:52 2019 +0530 + + bnxt_en: Add support to collect crash dump via ethtool + + Driver supports 2 types of core dumps. + + 1. Live dump - Firmware dump when system is up and running. + 2. Crash dump - Dump which is collected during firmware crash + that can be retrieved after recovery. + Crash dump is currently supported only on specific 58800 chips + which can be retrieved using OP-TEE API only, as firmware cannot + access this region directly. + + User needs to set the dump flag using following command before + initiating the dump collection: + + $ ethtool -W|--set-dump eth0 N + + Where N is "0" for live dump and "1" for crash dump + + Command to collect the dump after setting the flag: + + $ ethtool -w eth0 data Filename + + v3: Modify set_dump to support even when CONFIG_TEE_BNXT_FW=n. + Also change log message to netdev_info(). + + Cc: Jakub Kicinski + Cc: Michael Chan + Signed-off-by: Vasundhara Volam + Signed-off-by: Sheetal Tigadoli + Signed-off-by: David S. Miller + +commit e07ab2021eb6b7123ec66ae1dc019afae566a56c +Author: Vasundhara Volam +Date: Thu Oct 31 15:38:51 2019 +0530 + + bnxt_en: Add support to invoke OP-TEE API to reset firmware + + In error recovery process when firmware indicates that it is + completely down, initiate a firmware reset by calling OP-TEE API. + + Cc: Michael Chan + Signed-off-by: Vasundhara Volam + Signed-off-by: Sheetal Tigadoli + Signed-off-by: David S. Miller + +commit acda6180e86ba9e0026287d65f30d1e2b0c8882a +Author: Saurav Girepunje +Date: Tue Oct 29 01:46:35 2019 +0530 + + broadcom: bnxt: Fix use true/false for bool + + Use true/false for bool type in bnxt_timer function. + + Signed-off-by: Saurav Girepunje + Acked-by: Michael Chan + Signed-off-by: David S. Miller + +commit f6824308c4be25ba024ab942a6135aa0356acaea +Author: Vasundhara Volam +Date: Mon Oct 21 01:34:29 2019 -0400 + + bnxt_en: Avoid disabling pci device in bnxt_remove_one() for already disabled device. + + With the recently added error recovery logic, the device may already + be disabled if the firmware recovery is unsuccessful. In + bnxt_remove_one(), check that the device is still enabled first + before calling pci_disable_device(). + + Fixes: 3bc7d4a352ef ("bnxt_en: Add BNXT_STATE_IN_FW_RESET state.") + Signed-off-by: Vasundhara Volam + Signed-off-by: Michael Chan + Signed-off-by: Jakub Kicinski + +commit f255ed1c4e4c5ed8171b6e81dce1297df1f1b60c +Author: Vasundhara Volam +Date: Mon Oct 21 01:34:28 2019 -0400 + + bnxt_en: Minor formatting changes in FW devlink_health_reporter + + Minor formatting changes to diagnose cb for FW devlink health + reporter. + + Suggested-by: Jiri Pirko + Cc: Jiri Pirko + Signed-off-by: Vasundhara Volam + Signed-off-by: Michael Chan + Signed-off-by: Jakub Kicinski + +commit c6a9e7aa2e8b15402022a15625284069d4fd6df0 +Author: Vasundhara Volam +Date: Mon Oct 21 01:34:27 2019 -0400 + + bnxt_en: Adjust the time to wait before polling firmware readiness. + + When firmware indicates that driver needs to invoke firmware reset + which is common for both error recovery and live firmware reset path, + driver needs a different time to wait before polling for firmware + readiness. + + Modify the wait time to fw_reset_min_dsecs, which is initialised to + correct timeout for error recovery and firmware reset. + + Fixes: 4037eb715680 ("bnxt_en: Add a new BNXT_FW_RESET_STATE_POLL_FW_DOWN state.") + Signed-off-by: Vasundhara Volam + Signed-off-by: Michael Chan + Signed-off-by: Jakub Kicinski + +commit 83a46a82b96c1928ad82958752523fb0c7d9fcce +Author: Michael Chan +Date: Mon Oct 21 01:34:26 2019 -0400 + + bnxt_en: Fix devlink NVRAM related byte order related issues. + + The current code does not do endian swapping between the devlink + parameter and the internal NVRAM representation. Define a union to + represent the little endian NVRAM data and add 2 helper functions to + copy to and from the NVRAM data with the proper byte swapping. + + Fixes: 782a624d00fa ("bnxt_en: Add bnxt_en initial port params table and register it") + Cc: Jiri Pirko + Reviewed-by: Vasundhara Volam + Signed-off-by: Michael Chan + Signed-off-by: Jakub Kicinski + +commit c329230ce886f449a6e559b636096b75ab00d18a +Author: Vasundhara Volam +Date: Mon Oct 21 01:34:25 2019 -0400 + + bnxt_en: Fix the size of devlink MSIX parameters. + + The current code that rounds up the NVRAM parameter bit size to the next + byte size for the devlink parameter is not always correct. The MSIX + devlink parameters are 4 bytes and we don't get the correct size + using this method. + + Fix it by adding a new dl_num_bytes member to the bnxt_dl_nvm_param + structure which statically provides bytesize information according + to the devlink parameter type definition. + + Fixes: 782a624d00fa ("bnxt_en: Add bnxt_en initial port params table and register it") + Cc: Jiri Pirko + Signed-off-by: Vasundhara Volam + Signed-off-by: Michael Chan + Signed-off-by: Jakub Kicinski + +commit e7a981050a7fb9a14b652365c00d9c5a025704ce +Author: Jiri Pirko +Date: Thu Oct 10 15:18:49 2019 +0200 + + devlink: propagate extack down to health reporter ops + + During health reporter operations, driver might want to fill-up + the extack message, so propagate extack down to the health reporter ops. + + Signed-off-by: Jiri Pirko + Signed-off-by: David S. Miller + +commit 4037eb715680caa3d80075fb54dbc35d79d5f9ff +Author: Vasundhara Volam +Date: Sat Sep 14 00:01:41 2019 -0400 + + bnxt_en: Add a new BNXT_FW_RESET_STATE_POLL_FW_DOWN state. + + This new state is required when firmware indicates that the error + recovery process requires polling for firmware state to be completely + down before initiating reset. For example, firmware may take some + time to collect the crash dump before it is down and ready to be + reset. + + Signed-off-by: Vasundhara Volam + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 72e0c9f91238f1f5f22954be6aea535d1d5fbf31 +Author: Michael Chan +Date: Sat Sep 14 00:01:40 2019 -0400 + + bnxt_en: Update firmware interface spec. to 1.10.0.100. + + Some error recovery updates to the spec., among other minor changes. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 57a8730b1f7a0be7bf8a0a0bb665329074ba764f +Author: Vasundhara Volam +Date: Sat Sep 14 00:01:39 2019 -0400 + + bnxt_en: Increase timeout for HWRM_DBG_COREDUMP_XX commands + + Firmware coredump messages take much longer than standard messages, + so increase the timeout accordingly. + + Fixes: 6c5657d085ae ("bnxt_en: Add support for ethtool get dump.") + Signed-off-by: Vasundhara Volam + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 268d0895f1b9690755d91b6ced60c9d8d17a7567 +Author: Michael Chan +Date: Sat Sep 14 00:01:38 2019 -0400 + + bnxt_en: Don't proceed in .ndo_set_rx_mode() when device is not in open state. + + Check the BNXT_STATE_OPEN flag instead of netif_running() in + bnxt_set_rx_mode(). If the driver is going through any reset, such + as firmware reset or even TX timeout, it may not be ready to set the RX + mode and may crash. The new rx mode settings will be picked up when + the device is opened again later. + + Fixes: 230d1f0de754 ("bnxt_en: Handle firmware reset.") + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit e72cb7d6245380acc11a24b75a865f7104ac8b33 +Author: Michael Chan +Date: Fri Aug 30 19:10:38 2019 -0400 + + bnxt_en: Fix compile error regression with CONFIG_BNXT_SRIOV not set. + + Add a new function bnxt_get_registered_vfs() to handle the work + of getting the number of registered VFs under #ifdef CONFIG_BNXT_SRIOV. + The main code will call this function and will always work correctly + whether CONFIG_BNXT_SRIOV is set or not. + + Fixes: 230d1f0de754 ("bnxt_en: Handle firmware reset.") + Reported-by: kbuild test robot + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit acfb50e4e773c9a5755a3c265c7c20d37a8642e5 +Author: Vasundhara Volam +Date: Thu Aug 29 23:55:05 2019 -0400 + + bnxt_en: Add FW fatal devlink_health_reporter. + + Health show command example and output: + + $ devlink health show pci/0000:af:00.0 reporter fw_fatal + + pci/0000:af:00.0: + name fw_fatal + state healthy error 1 recover 1 grace_period 0 auto_recover true + + Fatal events from firmware or missing periodic heartbeats will + be reported and recovery will be handled. + + We also turn on the support flags when we register with the firmware to + enable this health and recovery feature in the firmware. + + Cc: Jiri Pirko + Signed-off-by: Vasundhara Volam + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit d1db9e166bf6a50e1e6713f3fd3b4de6007e3671 +Author: Michael Chan +Date: Thu Aug 29 23:55:04 2019 -0400 + + bnxt_en: Add bnxt_fw_exception() to handle fatal firmware errors. + + This call will handle fatal firmware errors by forcing a reset on the + firmware. The master function driver will carry out the forced reset. + The sequence will go through the same bnxt_fw_reset_task() workqueue. + This fatal reset differs from the non-fatal reset at the beginning + stages. From the BNXT_FW_RESET_STATE_ENABLE_DEV state onwards where + the firmware is coming out of reset, it is practically identical to the + non-fatal reset. + + The next patch will add the periodic heartbeat check and the devlink + reporter to report the fatal event and to initiate the bnxt_fw_exception() + call. + + Signed-off-by: Vasundhara Volam + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit cbb51067a5f5fbae733283b67fc8013881eb4bb1 +Author: Michael Chan +Date: Thu Aug 29 23:55:03 2019 -0400 + + bnxt_en: Add RESET_FW state logic to bnxt_fw_reset_task(). + + This state handles driver initiated chip reset during error recovery. + Only the master function will perform this step during error recovery. + The next patch will add code to initiate this reset from the master + function. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit b4fff2079d1080af7dcad8ad0e80cc89e1ee000c +Author: Michael Chan +Date: Thu Aug 29 23:55:02 2019 -0400 + + bnxt_en: Do not send firmware messages if firmware is in error state. + + Add a flag to mark that the firmware has encountered fatal condition. + The driver will not send any more firmware messages and will return + error to the caller. Fix up some clean up functions to continue + and not abort when the firmware message function returns error. + + This is preparation work to fully handle firmware error recovery + under fatal conditions. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 2cd8696850450b750f278be06ee56eb51d84621c +Author: Vasundhara Volam +Date: Thu Aug 29 23:55:01 2019 -0400 + + bnxt_en: Retain user settings on a VF after RESET_NOTIFY event. + + Retain the VF MAC address, default VLAN, TX rate control, trust settings + of VFs after firmware reset. + + Signed-off-by: Vasundhara Volam + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 657a33c8a0a2342e91259b28356838dc89216b19 +Author: Vasundhara Volam +Date: Thu Aug 29 23:55:00 2019 -0400 + + bnxt_en: Add devlink health reset reporter. + + Add devlink health reporter for the firmware reset event. Once we get + the notification from firmware about the impending reset, the driver + will report this to devlink and the call to bnxt_fw_reset() will be + initiated to complete the reset sequence. + + Cc: Jiri Pirko + Signed-off-by: Vasundhara Volam + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 230d1f0de754b483ec6eefc1ca5aaeff2b6b9a4c +Author: Michael Chan +Date: Thu Aug 29 23:54:59 2019 -0400 + + bnxt_en: Handle firmware reset. + + Add the bnxt_fw_reset() main function to handle firmware reset. This + is triggered by firmware to initiate an orderly reset, for example + when a non-fatal exception condition has been detected. bnxt_fw_reset() + will first wait for all VFs to shutdown and then start the + bnxt_fw_reset_task() work queue to go through the sequence of reset, + re-probe, and re-initialization. + + The next patch will add the devlink reporter to start the sequence and + call bnxt_fw_reset(). + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 2151fe0830fdb951f8ecfcfe67306fdef2366aa0 +Author: Michael Chan +Date: Thu Aug 29 23:54:58 2019 -0400 + + bnxt_en: Handle RESET_NOTIFY async event from firmware. + + This event from firmware signals a coordinated reset initiated by the + firmware. It may be triggered by some error conditions encountered + in the firmware or other orderly reset conditions. + + We store the parameters from this event. Subsequent patches will + add logic to handle reset itself using devlink reporters. + + Signed-off-by: Vasundhara Volam + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 6763c779c2d8b568b2e174f3eeeaf644fa38b34d +Author: Vasundhara Volam +Date: Thu Aug 29 23:54:57 2019 -0400 + + bnxt_en: Add new FW devlink_health_reporter + + Create new FW devlink_health_reporter, to know the current health + status of FW. + + Command example and output: + $ devlink health show pci/0000:af:00.0 reporter fw + + pci/0000:af:00.0: + name fw + state healthy error 0 recover 0 + + FW status: Healthy; Reset count: 1 + + Cc: Jiri Pirko + Signed-off-by: Vasundhara Volam + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 3bc7d4a352efe5b596883ef16b769055320db1f6 +Author: Michael Chan +Date: Thu Aug 29 23:54:56 2019 -0400 + + bnxt_en: Add BNXT_STATE_IN_FW_RESET state. + + The new flag will be set in subsequent patches when firmware is + going through reset. If bnxt_close() is called while the new flag + is set, the FW reset sequence will have to be aborted because the + NIC is prematurely closed before FW reset has completed. We also + reject SRIOV configurations while FW reset is in progress. + + v2: No longer drop rtnl_lock() in close and wait for FW reset to complete. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 7e914027f757b656cd681ba4fe75f3984531ee50 +Author: Michael Chan +Date: Thu Aug 29 23:54:55 2019 -0400 + + bnxt_en: Enable health monitoring. + + Handle the async event from the firmware that enables firmware health + monitoring. Store initial health metrics. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 9ffbd67734909ca8bb099e62f06387649b43d5a8 +Author: Michael Chan +Date: Thu Aug 29 23:54:54 2019 -0400 + + bnxt_en: Pre-map the firmware health monitoring registers. + + Pre-map the GRC registers for periodic firmware health monitoring. + + Signed-off-by: Vasundhara Volam + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 07f83d72d238f5d41b03d6142641129e8a7a0ec4 +Author: Michael Chan +Date: Thu Aug 29 23:54:53 2019 -0400 + + bnxt_en: Discover firmware error recovery capabilities. + + Call the new firmware API HWRM_ERROR_RECOVERY_QCFG if it is supported + to discover the firmware health and recovery capabilities and settings. + This feature allows the driver to reset the chip if firmware crashes and + becomes unresponsive. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit ec5d31e3c15d5233b491400133c67f78a320062c +Author: Michael Chan +Date: Thu Aug 29 23:54:52 2019 -0400 + + bnxt_en: Handle firmware reset status during IF_UP. + + During IF_UP, newer firmware has a new status flag that indicates that + firmware has reset. Add new function bnxt_fw_init_one() to re-probe the + firmware and re-setup VF resources on the PF if necessary. If the + re-probe fails, set a flag to prevent bnxt_open() from proceeding again. + + Signed-off-by: Vasundhara Volam + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 91b9be487001d344a39c453ade6cdbd125e06208 +Author: Vasundhara Volam +Date: Thu Aug 29 23:54:51 2019 -0400 + + bnxt_en: Register buffers for VFs before reserving resources. + + When VFs need to be reconfigured dynamically after firmwware reset, the + configuration sequence on the PF needs to be changed to register the VF + buffers first. Otherwise, some VF firmware commands may not succeed as + there may not be PF buffers ready for the re-directed firmware commands. + + This sequencing did not matter much before when we only supported + the normal bring-up of VFs. + + Signed-off-by: Vasundhara Volam + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 702d5011ab5e7b9afe44058d33a89d1501645a10 +Author: Michael Chan +Date: Thu Aug 29 23:54:50 2019 -0400 + + bnxt_en: Refactor bnxt_sriov_enable(). + + Refactor the hardware/firmware configuration portion in + bnxt_sriov_enable() into a new function bnxt_cfg_hw_sriov(). This + new function can be called after a firmware reset to reconfigure the + VFs previously enabled. + + v2: straight refactor of the code. Reordering done in the next patch. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit ba642ab773db97c32293547485f562d2dfc06666 +Author: Michael Chan +Date: Thu Aug 29 23:54:49 2019 -0400 + + bnxt_en: Prepare bnxt_init_one() to be called multiple times. + + In preparation for the new firmware reset feature, some of the logic + in bnxt_init_one() and related functions will be called again after + firmware has reset. Reset some of the flags and capabilities so that + everything that can change can be re-initialized. Refactor some + functions to probe firmware versions and capabilities. Check some + buffers before allocating as they may have been allocated previously. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 5bedb5296e33e889818d77c8ec69040481fab157 +Author: Michael Chan +Date: Thu Aug 29 23:54:48 2019 -0400 + + bnxt_en: Suppress all error messages in hwrm_do_send_msg() in silent mode. + + If the silent parameter is set, suppress all messages when there is + no response from firmware. When polling for firmware to come out of + reset, no response may be normal and we want to suppress the error + messages. Also, don't poll for the firmware DMA response if Bus Master + is disabled. This is in preparation for error recovery when firmware + may be in error or reset state or Bus Master is disabled. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit a798302d56f56fb7ad6a01f64f495aeafeb6c0f0 +Author: Michael Chan +Date: Thu Aug 29 23:54:47 2019 -0400 + + bnxt_en: Simplify error checking in the SR-IOV message forwarding functions. + + There are 4 functions handling message forwarding for SR-IOV. They + check for non-zero firmware response code and then return -1. There + is no need to do this anymore. The main messaging function will + now return standard error code. Since we don't need to examine the + response, we can use the hwrm_send_message() variant which will + take the mutex automatically. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit d4f1420d365633490aa134abfe408453d1c7c238 +Author: Michael Chan +Date: Thu Aug 29 23:54:46 2019 -0400 + + bnxt_en: Convert error code in firmware message response to standard code. + + The main firmware messaging function returns the firmware defined error + code and many callers have to convert to standard error code for proper + propagation to userspace. Convert bnxt_hwrm_do_send_msg() to return + standard error code so we can do away with all the special error code + handling by the many callers. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit a935cb7ec449bca1adf806d7fb00f5032b63c6e0 +Author: Michael Chan +Date: Thu Aug 29 23:54:45 2019 -0400 + + bnxt_en: Remove the -1 error return code from bnxt_hwrm_do_send_msg(). + + Replace the non-standard -1 code with -EBUSY when there is no firmware + response after waiting for the maximum timeout. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit b3b0ddd07e63d564a3d5500938913805d06a1682 +Author: Michael Chan +Date: Thu Aug 29 23:54:44 2019 -0400 + + bnxt_en: Use a common function to print the same ethtool -f error message. + + The same message is printed 3 times in the code, so use a common function + to do that. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit fbbdbc6473070dcb3ee1d69cf1c49ff78677d716 +Author: Michael Chan +Date: Fri Aug 23 01:51:41 2019 -0400 + + bnxt_en: Fix allocation of zero statistics block size regression. + + Recent commit added logic to determine the appropriate statistics block + size to allocate and the size is stored in bp->hw_ring_stats_size. But + if the firmware spec is older than 1.6.0, it is 0 and not initialized. + This causes the allocation to fail with size 0 and bnxt_open() to + abort. Fix it by always initializing bp->hw_ring_stats_size to the + legacy default size value. + + Fixes: 4e7485066373 ("bnxt_en: Allocate the larger per-ring statistics block for 57500 chips.") + Reported-by: Jonathan Lemon + Signed-off-by: Michael Chan + Tested-by: Jonathan Lemon + Acked-by: Jonathan Lemon + Signed-off-by: David S. Miller + +commit 9bf46566e80fd94845527d01ebd888eb49313551 +Author: Somnath Kotur +Date: Sat Aug 17 17:04:52 2019 -0400 + + bnxt_en: Fix to include flow direction in L2 key + + FW expects the driver to provide unique flow reference handles + for Tx or Rx flows. When a Tx flow and an Rx flow end up sharing + a reference handle, flow offload does not seem to work. + This could happen in the case of 2 flows having their L2 fields + wildcarded but in different direction. + Fix to incorporate the flow direction as part of the L2 key + + v2: Move the dir field to the end of the bnxt_tc_l2_key struct to + fix the warning reported by kbuild test robot . + There is existing code that initializes the structure using + nested initializer and will warn with the new u8 field added to + the beginning. The structure also packs nicer when this new u8 is + added to the end of the structure [MChan]. + + Fixes: abd43a13525d ("bnxt_en: Support for 64-bit flow handle.") + Signed-off-by: Somnath Kotur + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 685ec6a81bb0d47faf1dba49437d5bdaede2733d +Author: Venkat Duvvuru +Date: Sat Aug 17 17:04:51 2019 -0400 + + bnxt_en: Use correct src_fid to determine direction of the flow + + Direction of the flow is determined using src_fid. For an RX flow, + src_fid is PF's fid and for TX flow, src_fid is VF's fid. Direction + of the flow must be specified, when getting statistics for that flow. + Currently, for DECAP flow, direction is determined incorrectly, i.e., + direction is initialized as TX for DECAP flow, instead of RX. Because + of which, stats are not reported for this DECAP flow, though it is + offloaded and there is traffic for that flow, resulting in flow age out. + + This patch fixes the problem by determining the DECAP flow's direction + using correct fid. Set the flow direction in all cases for consistency + even if 64-bit flow handle is not used. + + Fixes: abd43a13525d ("bnxt_en: Support for 64-bit flow handle.") + Signed-off-by: Venkat Duvvuru + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit b703ba751dbb4bcd086509ed4b28102bc1670b35 +Author: Vasundhara Volam +Date: Sat Aug 17 17:04:50 2019 -0400 + + bnxt_en: Suppress HWRM errors for HWRM_NVM_GET_VARIABLE command + + For newly added NVM parameters, older firmware may not have the support. + Suppress the error message to avoid the unncessary error message which is + triggered when devlink calls the driver during initialization. + + Fixes: 782a624d00fa ("bnxt_en: Add bnxt_en initial params table and register it.") + Signed-off-by: Vasundhara Volam + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit dd2ebf3404c7c295014bc025dea23960960ceb1a +Author: Vasundhara Volam +Date: Sat Aug 17 17:04:49 2019 -0400 + + bnxt_en: Fix handling FRAG_ERR when NVM_INSTALL_UPDATE cmd fails + + If FW returns FRAG_ERR in response error code, driver is resending the + command only when HWRM command returns success. Fix the code to resend + NVM_INSTALL_UPDATE command with DEFRAG install flags, if FW returns + FRAG_ERR in its response error code. + + Fixes: cb4d1d626145 ("bnxt_en: Retry failed NVM_INSTALL_UPDATE with defragmentation flag enabled.") + Signed-off-by: Vasundhara Volam + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit e8f267b063208372f7a329c6d5288d58944d873c +Author: Michael Chan +Date: Sat Aug 17 17:04:48 2019 -0400 + + bnxt_en: Improve RX doorbell sequence. + + When both RX buffers and RX aggregation buffers have to be + replenished at the end of NAPI, post the RX aggregation buffers first + before RX buffers. Otherwise, we may run into a situation where + there are only RX buffers without RX aggregation buffers for a split + second. This will cause the hardware to abort the RX packet and + report buffer errors, which will cause unnecessary cleanup by the + driver. + + Ringing the Aggregation ring doorbell first before the RX ring doorbell + will prevent some of these buffer errors. Use the same sequence during + ring initialization as well. + + Fixes: 697197e5a173 ("bnxt_en: Re-structure doorbells.") + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit a46ecb116fb7f722fa8cb2da01959c36e4e10c41 +Author: Michael Chan +Date: Sat Aug 17 17:04:47 2019 -0400 + + bnxt_en: Fix VNIC clearing logic for 57500 chips. + + During device shutdown, the VNIC clearing sequence needs to be modified + to free the VNIC first before freeing the RSS contexts. The current + code is doing the reverse and we can get mis-directed RX completions + to CP ring ID 0 when the RSS contexts are freed and zeroed. The clearing + of RSS contexts is not required with the new sequence. + + Refactor the VNIC clearing logic into a new function bnxt_clear_vnic() + and do the chip specific VNIC clearing sequence. + + Fixes: 7b3af4f75b81 ("bnxt_en: Add RSS support for 57500 chips.") + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 3a131e85043cf538d5e70c0f23f9d69a4dd642b9 +Author: Greg Kroah-Hartman +Date: Sat Aug 10 12:17:20 2019 +0200 + + bnxt: no need to check return value of debugfs_create functions + + When calling debugfs functions, there is no need to ever check the + return value. The function can work or not, but the code logic should + never do something different based on this. + + This cleans up a lot of unneeded code and logic around the debugfs + files, making all of this much simpler and easier to understand. + + Cc: Michael Chan + Cc: "David S. Miller" + Cc: netdev@vger.kernel.org + Signed-off-by: Greg Kroah-Hartman + Signed-off-by: David S. Miller + +commit b54c9d5bd6e38edac9ce3a3f95f14a1292b5268d +Author: Jonathan Lemon +Date: Tue Jul 30 07:40:33 2019 -0700 + + net: Use skb_frag_off accessors + + Use accessor functions for skb fragment's page_offset instead + of direct references, in preparation for bvec conversion. + + Signed-off-by: Jonathan Lemon + Signed-off-by: David S. Miller + +commit 49c98421e6ab33665e8ee7901218a712f5b0db2e +Author: Michael Chan +Date: Mon Jul 29 06:10:33 2019 -0400 + + bnxt_en: Add PCI IDs for 57500 series NPAR devices. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 1dc88b97a020148c0eea6c595d511a19c2fab347 +Author: Michael Chan +Date: Mon Jul 29 06:10:32 2019 -0400 + + bnxt_en: Support all variants of the 5750X chip family. + + Define the 57508, 57504, and 57502 chip IDs that are all part of the + BNXT_CHIP_P5 family of chips. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 7c3809181468a219aa2abd25910bd3b02b89b0de +Author: Michael Chan +Date: Mon Jul 29 06:10:31 2019 -0400 + + bnxt_en: Refactor bnxt_init_one() and turn on TPA support on 57500 chips. + + With the new TPA feature in the 57500 chips, we need to discover the + feature first before setting up the netdev features. Refactor the + the firmware probe and init logic more cleanly into 2 functions and + and make these calls before setting up the netdev features. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 78e7b86605b460e8b40622d16d840f9276d58627 +Author: Michael Chan +Date: Mon Jul 29 06:10:30 2019 -0400 + + bnxt_en: Support TPA counters on 57500 chips. + + Support the new expanded TPA v2 counters on 57500 B0 chips for + ethtool -S. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 4e7485066373f3e9a87fa063b65d0838990753e5 +Author: Michael Chan +Date: Mon Jul 29 06:10:29 2019 -0400 + + bnxt_en: Allocate the larger per-ring statistics block for 57500 chips. + + The new TPA implemantation has additional TPA counters that extend the + per-ring statistics block. Allocate the proper size accordingly. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit ee79566e65945dcf557bcfb9335e46fac67fb002 +Author: Michael Chan +Date: Mon Jul 29 06:10:28 2019 -0400 + + bnxt_en: Refactor ethtool ring statistics logic. + + The current code assumes that the per ring statistics counters are + fixed. In newer chips that support a newer version of TPA, the + TPA counters are also changed. Refactor the code by defining these + counter names in arrays so that it is easy to add a new array for + a new set of counters supported by the newer chips. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 67912c366d4bb0a9d108459e7c845cc7ba83f76f +Author: Michael Chan +Date: Mon Jul 29 06:10:27 2019 -0400 + + bnxt_en: Add hardware GRO setup function for 57500 chips. + + Add a more optimized hardware GRO function to setup the SKB on 57500 + chips. Some workaround code is no longer needed on 57500 chips and + the pseudo checksum is also calculated in hardware, so no need to + do the software pseudo checksum in the driver. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit ec4d8e7cf024e42def027531676918048e5c7982 +Author: Michael Chan +Date: Mon Jul 29 06:10:26 2019 -0400 + + bnxt_en: Add TPA ID mapping logic for 57500 chips. + + The new TPA feature on 57500 supports a larger number of concurrent TPAs + (up to 1024) divided among the functions. We need to add some logic to + map the hardware TPA ID to a software index that keeps track of each TPA + in progress. A 1:1 direct mapping without translation would be too + wasteful as we would have to allocate 1024 TPA structures for each RX + ring on each PCI function. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit bfcd8d791ec18496772d117774398e336917f56e +Author: Michael Chan +Date: Mon Jul 29 06:10:25 2019 -0400 + + bnxt_en: Add fast path logic for TPA on 57500 chips. + + With all the previous refactoring, the TPA fast path can now be + modified slightly to support TPA on the new chips. The main + difference is that the agg completions are retrieved differently using + the bnxt_get_tpa_agg_p5() function on the new chips. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit f45b7b78c619cd73c7ca25b68c6ba9653b8e4a0a +Author: Michael Chan +Date: Mon Jul 29 06:10:24 2019 -0400 + + bnxt_en: Set TPA GRO mode flags on 57500 chips properly. + + On 57500 chips, hardware GRO mode cannot be determined from the TPA + end, so we need to check bp->flags to determine if we are in hardware + GRO mode or not. Modify bnxt_set_features so that the TPA flags + in bp->flags don't change until the device is closed. This will ensure + that the fast path can safely rely on bp->flags to determine the + TPA mode. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit bee5a188b71657092dc9eb1a529b4e502fe51444 +Author: Michael Chan +Date: Mon Jul 29 06:10:23 2019 -0400 + + bnxt_en: Refactor tunneled hardware GRO logic. + + The 2 GRO functions to set up the hardware GRO SKB fields for 2 + different hardware chips have practically identical logic for + tunneled packets. Refactor the logic into a separate bnxt_gro_tunnel() + function that can be used by both functions. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 8fe88ce7ab3181a11989eb7a8bb00c42a2b7b3b0 +Author: Michael Chan +Date: Mon Jul 29 06:10:22 2019 -0400 + + bnxt_en: Handle standalone RX_AGG completions. + + On the new 57500 chips, these new RX_AGG completions are not coalesced + at the TPA_END completion. Handle these by storing them in the + array in the bnxt_tpa_info struct, as they are seen when processing + the CMPL ring. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 79632e9ba38671215fb193346ef6fb8db582744d +Author: Michael Chan +Date: Mon Jul 29 06:10:21 2019 -0400 + + bnxt_en: Expand bnxt_tpa_info struct to support 57500 chips. + + Add an aggregation array to bnxt_tpa_info struct to keep track of the + aggregation completions. The aggregation completions are not + completed at the TPA_END completion on 57500 chips so we need to + keep track of them. The array is only allocated on the new chips + when required. An agg_count field is also added to keep track of the + number of these completions. + + The maximum concurrent TPA is now discovered from firmware instead of + the hardcoded 64. Add a new bp->max_tpa to keep track of maximum + configured TPA. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 4a228a3a5e58e5c05c6ffb5b430e5cb936865a8b +Author: Michael Chan +Date: Mon Jul 29 06:10:20 2019 -0400 + + bnxt_en: Refactor TPA logic. + + Refactor the TPA logic slightly, so that the code can be more easily + extended to support TPA on the new 57500 chips. In particular, the + logic to get the next aggregation completion is refactored into a + new function bnxt_get_agg() so that this operation is made more + generalized. This operation will be different on the new chip in TPA + mode. The logic to recycle the aggregation buffers has a new start + index parameter added for the same purpose. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 218a8a71d91ab9e52205f4098cf1fe121c98850e +Author: Michael Chan +Date: Mon Jul 29 06:10:19 2019 -0400 + + bnxt_en: Add TPA structure definitions for BCM57500 chips. + + The new chips have a slightly modified TPA interface for LRO/GRO_HW. + Modify the TPA structures so that the same structures can also be + used on the new chips. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 2792b5b95ed5f38279da08f467a490687332324d +Author: Michael Chan +Date: Mon Jul 29 06:10:18 2019 -0400 + + bnxt_en: Update firmware interface spec. to 1.10.0.89. + + Among the changes are new CoS discard counters and new ctx_hw_stats_ext + struct for the latest 5750X B0 chips. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit f06d0ca45827a5790d7508de4759aed976933d4d +Author: Yamin Friedman +Date: Tue Jul 23 10:22:47 2019 +0300 + + linux/dim: Fix overflow in dim calculation + + While using net_dim, a dim_sample was used without ever initializing the + comps value. Added use of DIV_ROUND_DOWN_ULL() to prevent potential + overflow, it should not be a problem to save the final result in an int + because after the division by epms the value should not be larger than a + few thousand. + + [ 1040.127124] UBSAN: Undefined behaviour in lib/dim/dim.c:78:23 + [ 1040.130118] signed integer overflow: + [ 1040.131643] 134718714 * 100 cannot be represented in type 'int' + + Fixes: 398c2b05bbee ("linux/dim: Add completions count to dim_sample") + Signed-off-by: Yamin Friedman + Signed-off-by: Leon Romanovsky + Acked-by: Saeed Mahameed + Signed-off-by: David S. Miller + +commit f521eaa9d2ef6d85bc6c318148f019e9f40fc344 +Author: Chuhong Yuan +Date: Tue Jul 23 21:19:29 2019 +0800 + + net: broadcom: Use dev_get_drvdata + + Instead of using to_pci_dev + pci_get_drvdata, + use dev_get_drvdata to make code simpler. + + Signed-off-by: Chuhong Yuan + Signed-off-by: David S. Miller + +commit d7840976e3915669382c62ddd1700960f348328e +Author: Matthew Wilcox (Oracle) +Date: Mon Jul 22 20:08:25 2019 -0700 + + net: Use skb accessors in network drivers + + In preparation for unifying the skb_frag and bio_vec, use the fine + accessors which already exist and use skb_frag_t instead of + struct skb_frag_struct. + + Signed-off-by: Matthew Wilcox (Oracle) + Signed-off-by: David S. Miller + +commit 9b3d15e6b05e0b916be5fbd915f90300a403098b +Author: Michael Chan +Date: Wed Jul 17 03:07:23 2019 -0400 + + bnxt_en: Fix VNIC accounting when enabling aRFS on 57500 chips. + + Unlike legacy chips, 57500 chips don't need additional VNIC resources + for aRFS/ntuple. Fix the code accordingly so that we don't reserve + and allocate additional VNICs on 57500 chips. Without this patch, + the driver is failing to initialize when it tries to allocate extra + VNICs. + + Fixes: ac33906c67e2 ("bnxt_en: Add support for aRFS on 57500 chips.") + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit f9e30088d20016a224d8110d45356da253eaa26a +Author: Pablo Neira Ayuso +Date: Tue Jul 9 22:55:49 2019 +0200 + + net: flow_offload: rename tc_cls_flower_offload to flow_cls_offload + + And any other existing fields in this structure that refer to tc. + Specifically: + + * tc_cls_flower_offload_flow_rule() to flow_cls_offload_flow_rule(). + * TC_CLSFLOWER_* to FLOW_CLS_*. + * tc_cls_common_offload to tc_cls_common_offload. + + Signed-off-by: Pablo Neira Ayuso + Signed-off-by: David S. Miller + +commit 955bcb6ea0df0d9ace89ac475405f1295ced5962 +Author: Pablo Neira Ayuso +Date: Tue Jul 9 22:55:46 2019 +0200 + + drivers: net: use flow block API + + This patch updates flow_block_cb_setup_simple() to use the flow block API. + Several drivers are also adjusted to use it. + + This patch introduces the per-driver list of flow blocks to account for + blocks that are already in use. + + Remove tc_block_offload alias. + + Signed-off-by: Pablo Neira Ayuso + Signed-off-by: David S. Miller + +commit 4e95bc268b915c3a19ec8b9110f61e4ea41a1ed0 +Author: Pablo Neira Ayuso +Date: Tue Jul 9 22:55:39 2019 +0200 + + net: flow_offload: add flow_block_cb_setup_simple() + + Most drivers do the same thing to set up the flow block callbacks, this + patch adds a helper function to do this. + + This preparation patch reduces the number of changes to adapt the + existing drivers to use the flow block callback API. + + This new helper function takes a flow block list per-driver, which is + set to NULL until this driver list is used. + + This patch also introduces the flow_block_command and + flow_block_binder_type enumerations, which are renamed to use + FLOW_BLOCK_* in follow up patches. + + There are three definitions (aliases) in order to reduce the number of + updates in this patch, which go away once drivers are fully adapted to + use this flow block API. + + Signed-off-by: Pablo Neira Ayuso + Reviewed-by: Jakub Kicinski + Signed-off-by: David S. Miller + +commit 12479f627f7c2017e6fcd50b56c2537592674c50 +Author: Michael Chan +Date: Tue Jul 9 03:50:07 2019 -0400 + + bnxt_en: Add page_pool_destroy() during RX ring cleanup. + + Add page_pool_destroy() in bnxt_free_rx_rings() during normal RX ring + cleanup, as Ilias has informed us that the following commit has been + merged: + + 1da4bbeffe41 ("net: core: page_pool: add user refcnt and reintroduce page_pool_destroy") + + The special error handling code to call page_pool_free() can now be + removed. bnxt_free_rx_rings() will always be called during normal + shutdown or any error paths. + + Fixes: 322b87ca55f2 ("bnxt_en: add page_pool support") + Cc: Ilias Apalodimas + Cc: Andy Gospodarek + Signed-off-by: Michael Chan + Acked-by: Andy Gospodarek + Signed-off-by: David S. Miller + +commit 322b87ca55f2f3936ec9f9de438ef9b2115b5c9b +Author: Andy Gospodarek +Date: Mon Jul 8 17:53:04 2019 -0400 + + bnxt_en: add page_pool support + + This removes contention over page allocation for XDP_REDIRECT actions by + adding page_pool support per queue for the driver. The performance for + XDP_REDIRECT actions scales linearly with the number of cores performing + redirect actions when using the page pools instead of the standard page + allocator. + + v2: Fix up the error path from XDP registration, noted by Ilias Apalodimas. + + Signed-off-by: Andy Gospodarek + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit f18c2b77b2e4eec2313d519ba125bd6a069513cf +Author: Andy Gospodarek +Date: Mon Jul 8 17:53:03 2019 -0400 + + bnxt_en: optimized XDP_REDIRECT support + + This adds basic support for XDP_REDIRECT in the bnxt_en driver. Next + patch adds the more optimized page pool support. + + Signed-off-by: Andy Gospodarek + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit c1ba92a86db24dc9ca2648473d4820979a0d8a4c +Author: Michael Chan +Date: Mon Jul 8 17:53:02 2019 -0400 + + bnxt_en: Refactor __bnxt_xmit_xdp(). + + __bnxt_xmit_xdp() is used by XDP_TX and ethtool loopback packet transmit. + Refactor it so that it can be re-used by the XDP_REDIRECT logic. + Restructure the TX interrupt handler logic to cleanly separate XDP_TX + logic in preparation for XDP_REDIRECT. + + Acked-by: Andy Gospodarek + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 52c0609258658ff35b85c654c568a50abd602ac6 +Author: Andy Gospodarek +Date: Mon Jul 8 17:53:01 2019 -0400 + + bnxt_en: rename some xdp functions + + Renaming bnxt_xmit_xdp to __bnxt_xmit_xdp to get ready for XDP_REDIRECT + support and reduce confusion/namespace collision. + + Signed-off-by: Andy Gospodarek + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 4ca5fa39e1aea2f85eb9c4257075c4077c6531da +Author: Michael Chan +Date: Sat Jun 29 11:16:48 2019 -0400 + + bnxt_en: Suppress error messages when querying DSCP DCB capabilities. + + Some firmware versions do not support this so use the silent variant + to send the message to firmware to suppress the harmless error. This + error message is unnecessarily alarming the user. + + Fixes: afdc8a84844a ("bnxt_en: Add DCBNL DSCP application protocol support.") + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 1dbc59fa4bbaa108b641cd65a54f662b75e4ed36 +Author: Michael Chan +Date: Sat Jun 29 11:16:47 2019 -0400 + + bnxt_en: Cap the returned MSIX vectors to the RDMA driver. + + In an earlier commit to improve NQ reservations on 57500 chips, we + set the resv_irqs on the 57500 VFs to the fixed value assigned by + the PF regardless of how many are actually used. The current + code assumes that resv_irqs minus the ones used by the network driver + must be the ones for the RDMA driver. This is no longer true and + we may return more MSIX vectors than requested, causing inconsistency. + Fix it by capping the value. + + Fixes: 01989c6b69d9 ("bnxt_en: Improve NQ reservations.") + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit d77b1ad8e87dc5a6cd0d9158b097a4817946ca3b +Author: Michael Chan +Date: Sat Jun 29 11:16:46 2019 -0400 + + bnxt_en: Fix statistics context reservation logic for RDMA driver. + + The current logic assumes that the RDMA driver uses one statistics + context adjacent to the ones used by the network driver. This + assumption is not true and the statistics context used by the + RDMA driver is tied to its MSIX base vector. This wrong assumption + can cause RDMA driver failure after changing ethtool rings on the + network side. Fix the statistics reservation logic accordingly. + + Fixes: 780baad44f0f ("bnxt_en: Reserve 1 stat_ctx for RDMA driver.") + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit d27e2ca1166aefd54d9c48fb6647dee8115a5dfc +Author: Michael Chan +Date: Sat Jun 29 11:16:45 2019 -0400 + + bnxt_en: Fix ethtool selftest crash under error conditions. + + After ethtool loopback packet tests, we re-open the nic for the next + IRQ test. If the open fails, we must not proceed with the IRQ test + or we will crash with NULL pointer dereference. Fix it by checking + the bnxt_open_nic() return code before proceeding. + + Reported-by: Somasundaram Krishnasamy + Fixes: 67fea463fd87 ("bnxt_en: Add interrupt test to ethtool -t selftest.") + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit c20dc142dd7b2884b8570eeab323bcd4a84294fa +Author: Michael Chan +Date: Sat Jun 29 11:16:44 2019 -0400 + + bnxt_en: Disable bus master during PCI shutdown and driver unload. + + Some chips with older firmware can continue to perform DMA read from + context memory even after the memory has been freed. In the PCI shutdown + method, we need to call pci_disable_device() to shutdown DMA to prevent + this DMA before we put the device into D3hot. DMA memory request in + D3hot state will generate PCI fatal error. Similarly, in the driver + remove method, the context memory should only be freed after DMA has + been shutdown for correctness. + + Fixes: 98f04cf0f1fc ("bnxt_en: Check context memory requirements from firmware.") + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 4f75da3666c0c572967729a2401ac650be5581b6 +Author: Tal Gilboa +Date: Thu Jan 10 17:33:17 2019 +0200 + + linux/dim: Move implementation to .c files + + Moved all logic from dim.h and net_dim.h to dim.c and net_dim.c. + This is both more structurally appealing and would allow to only + expose externally used functions. + + Signed-off-by: Tal Gilboa + Signed-off-by: Saeed Mahameed + +commit 8960b38932bee8db0bc9c4d8c135f21df6cdd297 +Author: Tal Gilboa +Date: Thu Jan 31 16:44:48 2019 +0200 + + linux/dim: Rename externally used net_dim members + + Removed 'net' prefix from functions and structs used by external drivers. + + Signed-off-by: Tal Gilboa + Signed-off-by: Saeed Mahameed + +commit e5b6ab02d7aa4118c9a36491633812dcc442acbe +Author: Tal Gilboa +Date: Mon Jan 14 15:32:49 2019 +0200 + + linux/dim: Rename net_dim_sample() to net_dim_update_sample() + + In order to avoid confusion between the function and the similarly + named struct. + In preparation for removing the 'net' prefix from dim members. + + Signed-off-by: Tal Gilboa + Signed-off-by: Saeed Mahameed + +commit c002bd529d719858d4cc233431c88c9efa844053 +Author: Tal Gilboa +Date: Mon Nov 5 12:07:52 2018 +0200 + + linux/dim: Rename externally exposed macros + + Renamed macros in use by external drivers. + + Signed-off-by: Tal Gilboa + Signed-off-by: Saeed Mahameed + +commit 2e9217d1e8b72dde2c7e3e2338cc1830f68cb58d +Author: Vasundhara Volam +Date: Wed May 22 19:12:57 2019 -0400 + + bnxt_en: Device serial number is supported only for PFs. + + Don't read DSN on VFs that do not have the PCI capability. + + Fixes: 03213a996531 ("bnxt: move bp->switch_id initialization to PF probe") + Signed-off-by: Vasundhara Volam + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit d629522e1d66561f38e5c8d4f52bb6d254ec0707 +Author: Michael Chan +Date: Wed May 22 19:12:56 2019 -0400 + + bnxt_en: Reduce memory usage when running in kdump kernel. + + Skip RDMA context memory allocations, reduce to 1 ring, and disable + TPA when running in the kdump kernel. Without this patch, the driver + fails to initialize with memory allocation errors when running in a + typical kdump kernel. + + Fixes: cf6daed098d1 ("bnxt_en: Increase context memory allocations on 57500 chips for RDMA.") + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 1b3f0b75c39f534278a895c117282014e9d0ae1f +Author: Michael Chan +Date: Wed May 22 19:12:55 2019 -0400 + + bnxt_en: Fix possible BUG() condition when calling pci_disable_msix(). + + When making configuration changes, the driver calls bnxt_close_nic() + and then bnxt_open_nic() for the changes to take effect. A parameter + irq_re_init is passed to the call sequence to indicate if IRQ + should be re-initialized. This irq_re_init parameter needs to + be included in the bnxt_reserve_rings() call. bnxt_reserve_rings() + can only call pci_disable_msix() if the irq_re_init parameter is + true, otherwise it may hit BUG() because some IRQs may not have been + freed yet. + + Fixes: 41e8d7983752 ("bnxt_en: Modify the ring reservation functions for 57500 series chips.") + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 296d5b54163964b7ae536b8b57dfbd21d4e868e1 +Author: Michael Chan +Date: Wed May 22 19:12:54 2019 -0400 + + bnxt_en: Fix aggregation buffer leak under OOM condition. + + For every RX packet, the driver replenishes all buffers used for that + packet and puts them back into the RX ring and RX aggregation ring. + In one code path where the RX packet has one RX buffer and one or more + aggregation buffers, we missed recycling the aggregation buffer(s) if + we are unable to allocate a new SKB buffer. This leads to the + aggregation ring slowly running out of buffers over time. Fix it + by properly recycling the aggregation buffers. + + Fixes: c0c050c58d84 ("bnxt_en: New Broadcom ethernet driver.") + Reported-by: Rakesh Hemnani + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 51fec80d3a669cdc3950973cb2a9045adeb0e7f0 +Author: Michael Chan +Date: Sun May 5 07:17:08 2019 -0400 + + bnxt_en: Add device IDs 0x1806 and 0x1752 for 57500 devices. + + 0x1806 and 0x1752 are VF variant and PF variant of the 57500 chip + family. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit ac33906c67e22edeabe3f0150ffeb367462e754f +Author: Michael Chan +Date: Sun May 5 07:17:07 2019 -0400 + + bnxt_en: Add support for aRFS on 57500 chips. + + Set RSS ring table index of the RFS destination ring for the NTUPLE + filters on 57500 chips. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit e969ae5bbfcf48e3ff2d159870453121d5a8441d +Author: Michael Chan +Date: Sun May 5 07:17:06 2019 -0400 + + bnxt_en: Query firmware capability to support aRFS on 57500 chips. + + Query support for the aRFS ring table index in the firmware. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 01989c6b69d91a0df0af8d5c6b5f33d82a239ae0 +Author: Michael Chan +Date: Sun May 5 07:17:05 2019 -0400 + + bnxt_en: Improve NQ reservations. + + bnxt_need_reserve_rings() determines if any resources have changed and + requires new reservation with firmware. The NQ checking is currently + just an approximation. Improve the NQ checking logic to make it + accurate. NQ reservation is only needed on 57500 PFs. This fix will + eliminate unnecessary reservations and will reduce NQ reservations + when some NQs have been released on 57500 PFs. + + Fixes: c0b8cda05e1d ("bnxt_en: Fix NQ/CP rings accounting on the new 57500 chips.") + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 53579e37d13a7a87430e2ec0171e091ebf2e63a1 +Author: Devesh Sharma +Date: Sun May 5 07:17:04 2019 -0400 + + bnxt_en: Separate RDMA MR/AH context allocation. + + In newer firmware, the context memory for MR (Memory Region) + and AH (Address Handle) to support RDMA are specified separately. + Modify driver to specify and allocate the 2 context memory types + separately when supported by the firmware. + + Signed-off-by: Devesh Sharma + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 2730214ddb889c54d5f6a734e2fe584c295cbd9b +Author: Vasundhara Volam +Date: Sun May 5 07:17:03 2019 -0400 + + bnxt_en: read the clause type from the PHY ID + + Currently driver hard code Clause 45 based on speed supported by the + PHY. Instead read the clause type from the PHY ID provided as input + to the mdio ioctl. + + Fixes: 0ca12be99667 ("bnxt_en: Add support for mdio read/write to external PHY") + Signed-off-by: Vasundhara Volam + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 691aa62045c2b23152ce3b64feb601502aab97c5 +Author: Vasundhara Volam +Date: Sun May 5 07:17:02 2019 -0400 + + bnxt_en: Read package version from firmware. + + HWRM_VER_GET firmware command returns package name that is running + actively on the adapter. Use this version instead of parsing from + the package log in NVRAM. + + Signed-off-by: Vasundhara Volam + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 6154532fe8fe4e5ec5ffb1a71f587015973f8753 +Author: Vasundhara Volam +Date: Sun May 5 07:17:01 2019 -0400 + + bnxt_en: Check new firmware capability to display extended stats. + + Newer firmware now advertises the capability for extended stats + support. Check the new capability in addition to the existing + version check. + + Signed-off-by: Vasundhara Volam + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 55e4398d4ee578094fb38f25af175629a24675d5 +Author: Vasundhara Volam +Date: Sun May 5 07:17:00 2019 -0400 + + bnxt_en: Add support for PCIe statistics + + Gather periodic PCIe statistics for ethtool -S. + + Signed-off-by: Vasundhara Volam + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit a220eabc8887e3c02d308a9960e92a70cbd00b52 +Author: Vasundhara Volam +Date: Sun May 5 07:16:59 2019 -0400 + + bnxt_en: Refactor bnxt_alloc_stats(). + + Reverse the condition of the large "if" block and return early. This + will simplify the follow up patch to add PCIe statistics. + + Signed-off-by: Vasundhara Volam + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 4a50ddc2d2ea81d3fcbfbe05657d73ac9a9655fd +Author: Michael Chan +Date: Sun May 5 07:16:58 2019 -0400 + + bnxt_en: Update firmware interface to 1.10.0.69. + + PTP API updates for 57500 chips, new RX port stats counters and other + miscellaneous updates. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 0b397b17a4120cb80f7bf89eb30587b3dd9b0d1d +Author: Michael Chan +Date: Thu Apr 25 22:31:55 2019 -0400 + + bnxt_en: Fix uninitialized variable usage in bnxt_rx_pkt(). + + In bnxt_rx_pkt(), if the driver encounters BD errors, it will recycle + the buffers and jump to the end where the uninitailized variable "len" + is referenced. Fix it by adding a new jump label that will skip + the length update. This is the most correct fix since the length + may not be valid when we get this type of error. + + Fixes: 6a8788f25625 ("bnxt_en: add support for software dynamic interrupt moderation") + Reported-by: Nathan Chancellor + Cc: Nathan Chancellor + Signed-off-by: Michael Chan + Reviewed-by: Nathan Chancellor + Tested-by: Nathan Chancellor + Signed-off-by: David S. Miller + +commit 3f93cd3f098e284c851acb89265ebe35b994a5c8 +Author: Michael Chan +Date: Thu Apr 25 22:31:54 2019 -0400 + + bnxt_en: Fix statistics context reservation logic. + + In an earlier commit that fixes the number of stats contexts to + reserve for the RDMA driver, we added a function parameter to pass in + the number of stats contexts to all the relevant functions. The passed + in parameter should have been used to set the enables field of the + firmware message. + + Fixes: 780baad44f0f ("bnxt_en: Reserve 1 stat_ctx for RDMA driver.") + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit ad361adf0d08f1135f3845c6b3a36be7cc0bfda5 +Author: Michael Chan +Date: Thu Apr 25 22:31:53 2019 -0400 + + bnxt_en: Pass correct extended TX port statistics size to firmware. + + If driver determines that extended TX port statistics are not supported + or allocation of the data structure fails, make sure to pass 0 TX stats + size to firmware to disable it. The firmware returned TX stats size should + also be set to 0 for consistency. This will prevent + bnxt_get_ethtool_stats() from accessing the NULL TX stats pointer in + case there is mismatch between firmware and driver. + + Fixes: 36e53349b60b ("bnxt_en: Add additional extended port statistics.") + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 1f83391bd6fc48f92f627b0ec0bce686d100c6a5 +Author: Michael Chan +Date: Thu Apr 25 22:31:52 2019 -0400 + + bnxt_en: Fix possible crash in bnxt_hwrm_ring_free() under error conditions. + + If we encounter errors during open and proceed to clean up, + bnxt_hwrm_ring_free() may crash if the rings we try to free have never + been allocated. bnxt_cp_ring_for_rx() or bnxt_cp_ring_for_tx() + may reference pointers that have not been allocated. + + Fix it by checking for valid fw_ring_id first before calling + bnxt_cp_ring_for_rx() or bnxt_cp_ring_for_tx(). + + Fixes: 2c61d2117ecb ("bnxt_en: Add helper functions to get firmware CP ring ID.") + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit f9099d611449836a51a65f40ea7dc9cb5f2f665e +Author: Vasundhara Volam +Date: Thu Apr 25 22:31:51 2019 -0400 + + bnxt_en: Free short FW command HWRM memory in error path in bnxt_init_one() + + In the bnxt_init_one() error path, short FW command request memory + is not freed. This patch fixes it. + + Fixes: e605db801bde ("bnxt_en: Support for Short Firmware Message") + Signed-off-by: Vasundhara Volam + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit b4e30e8e7ea1d1e35ffd64ca46f7d9a7f227b4bf +Author: Michael Chan +Date: Thu Apr 25 22:31:50 2019 -0400 + + bnxt_en: Improve multicast address setup logic. + + The driver builds a list of multicast addresses and sends it to the + firmware when the driver's ndo_set_rx_mode() is called. In rare + cases, the firmware can fail this call if internal resources to + add multicast addresses are exhausted. In that case, we should + try the call again by setting the ALL_MCAST flag which is more + guaranteed to succeed. + + Fixes: c0c050c58d84 ("bnxt_en: New Broadcom ethernet driver.") + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit c43f1255b866b423d2381f77eaa2cbc64a9c49aa +Author: Stanislav Fomichev +Date: Mon Apr 22 08:55:48 2019 -0700 + + net: pass net_device argument to the eth_get_headlen + + Update all users of eth_get_headlen to pass network device, fetch + network namespace from it and pass it down to the flow dissector. + This commit is a noop until administrator inserts BPF flow dissector + program. + + Cc: Maxim Krasnyansky + Cc: Saeed Mahameed + Cc: Jeff Kirsher + Cc: intel-wired-lan@lists.osuosl.org + Cc: Yisen Zhuang + Cc: Salil Mehta + Cc: Michael Chan + Cc: Igor Russkikh + Signed-off-by: Stanislav Fomichev + Signed-off-by: Daniel Borkmann + +commit 8e44e96c6c8e8fb80b84a2ca11798a8554f710f2 +Author: Michael Chan +Date: Mon Apr 8 17:39:55 2019 -0400 + + bnxt_en: Reset device on RX buffer errors. + + If the RX completion indicates RX buffers errors, the RX ring will be + disabled by firmware and no packets will be received on that ring from + that point on. Recover by resetting the device. + + Fixes: c0c050c58d84 ("bnxt_en: New Broadcom ethernet driver.") + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit a1b0e4e684e9c300b9e759b46cb7a0147e61ddff +Author: Michael Chan +Date: Mon Apr 8 17:39:54 2019 -0400 + + bnxt_en: Improve RX consumer index validity check. + + There is logic to check that the RX/TPA consumer index is the expected + index to work around a hardware problem. However, the potentially bad + consumer index is first used to index into an array to reference an entry. + This can potentially crash if the bad consumer index is beyond legal + range. Improve the logic to use the consumer index for dereferencing + after the validity check and log an error message. + + Fixes: fa7e28127a5a ("bnxt_en: Add workaround to detect bad opaque in rx completion (part 2)") + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit fb24ea52f78e0d595852e09e3a55697c8f442189 +Author: Will Deacon +Date: Fri Feb 22 17:14:59 2019 +0000 + + drivers: Remove explicit invocations of mmiowb() + + mmiowb() is now implied by spin_unlock() on architectures that require + it, so there is no reason to call it from driver code. This patch was + generated using coccinelle: + + @mmiowb@ + @@ + - mmiowb(); + + and invoked as: + + $ for d in drivers include/linux/qed sound; do \ + spatch --include-headers --sp-file mmiowb.cocci --dir $d --in-place; done + + NOTE: mmiowb() has only ever guaranteed ordering in conjunction with + spin_unlock(). However, pairing each mmiowb() removal in this patch with + the corresponding call to spin_unlock() is not at all trivial, so there + is a small chance that this change may regress any drivers incorrectly + relying on mmiowb() to order MMIO writes between CPUs using lock-free + synchronisation. If you've ended up bisecting to this commit, you can + reintroduce the mmiowb() calls using wmb() instead, which should restore + the old behaviour on all architectures other than some esoteric ia64 + systems. + + Acked-by: Linus Torvalds + Signed-off-by: Will Deacon + +commit 56d9f4e8f70e6f47ad4da7640753cf95ae51a356 +Author: Jiri Pirko +Date: Wed Apr 3 14:24:22 2019 +0200 + + bnxt: remove ndo_get_port_parent_id implementation for physical ports + + Remove implementation of get_port_parent_id ndo and rely on core calling + into devlink for the information directly. + + Signed-off-by: Jiri Pirko + Signed-off-by: David S. Miller + +commit 6605a226781eb1224c2dcf974a39eea11862b864 +Author: Jiri Pirko +Date: Wed Apr 3 14:24:21 2019 +0200 + + bnxt: pass switch ID through devlink_port_attrs_set() + + Pass the switch ID down the to devlink through devlink_port_attrs_set() + so it can be used by devlink_compat_switch_id_get(). + + Signed-off-by: Jiri Pirko + Signed-off-by: David S. Miller + +commit 03213a996531e507e03c085d411a313e34357498 +Author: Jiri Pirko +Date: Wed Apr 3 14:24:20 2019 +0200 + + bnxt: move bp->switch_id initialization to PF probe + + Currently the switch_id is being only initialized when switching eswitch + mode from "legacy" to "switchdev". However, nothing prevents the id to + be initialized from the very beginning. Physical ports can show it even + in "legacy" mode. + + Signed-off-by: Jiri Pirko + Signed-off-by: David S. Miller + +commit bec5267cded268acdf679b651778c300d204e9f2 +Author: Jiri Pirko +Date: Wed Apr 3 14:24:16 2019 +0200 + + net: devlink: extend port attrs for switch ID + + Extend devlink_port_attrs_set() to pass switch ID for ports which are + part of switch and store it in port attrs. For other ports, this is + NULL. + + Note that this allows the driver to group devlink ports into one or more + switches according to the actual topology. + + Signed-off-by: Jiri Pirko + Signed-off-by: David S. Miller + +commit 6b16f9ee89b8d5709f24bc3ac89ae8b5452c0d7c +Author: Florian Westphal +Date: Mon Apr 1 16:42:14 2019 +0200 + + net: move skb->xmit_more hint to softnet data + + There are two reasons for this. + + First, the xmit_more flag conceptually doesn't fit into the skb, as + xmit_more is not a property related to the skb. + Its only a hint to the driver that the stack is about to transmit another + packet immediately. + + Second, it was only done this way to not have to pass another argument + to ndo_start_xmit(). + + We can place xmit_more in the softnet data, next to the device recursion. + The recursion counter is already written to on each transmit. The "more" + indicator is placed right next to it. + + Drivers can use the netdev_xmit_more() helper instead of skb->xmit_more + to check the "more packets coming" hint. + + skb->xmit_more is retained (but always 0) to not cause build breakage. + + This change takes care of the simple s/skb->xmit_more/netdev_xmit_more()/ + conversions. Remaining drivers are converted in the next patches. + + Suggested-by: Eric Dumazet + Signed-off-by: Florian Westphal + Signed-off-by: David S. Miller + +commit ab178b058c4354ea16a0b0be28914874f7e2972d +Author: Jiri Pirko +Date: Thu Mar 28 13:56:42 2019 +0100 + + bnxt: remove ndo_get_phys_port_name implementation + + Rely on the previously introduced fallback and let the core + call devlink in order to get the physical port name. + + Signed-off-by: Jiri Pirko + Signed-off-by: David S. Miller + +commit c9c49a65e53ee5115bb33e3531be66ad261ab675 +Author: Jiri Pirko +Date: Thu Mar 28 13:56:41 2019 +0100 + + bnxt: implement ndo_get_devlink_port + + In order for devlink compat functions to work, implement + ndo_get_devlink_port. Legacy slaves does not have devlink port instances + created for themselves. + + Signed-off-by: Jiri Pirko + Signed-off-by: David S. Miller + +commit a0e18132ec51301414a5c92e6c258c2e62fdf08f +Author: Jiri Pirko +Date: Sun Mar 24 11:14:27 2019 +0100 + + bnxt: set devlink port attrs properly + + Set the attrs properly so delink has enough info to generate physical + port names. + + Signed-off-by: Jiri Pirko + Signed-off-by: David S. Miller + +commit 477edb7806b652043750aa33c584b9838a7c2123 +Author: Jiri Pirko +Date: Sun Mar 24 11:14:25 2019 +0100 + + bnxt: add missing net/devlink.h include + + devlink functions are in use, so include the related header file. + + Signed-off-by: Jiri Pirko + Signed-off-by: David S. Miller + +commit 2b3c6885386020b1b9d92d45e8349637e27d1f66 +Author: Michael Chan +Date: Wed Feb 27 03:58:53 2019 -0500 + + bnxt_en: Drop oversize TX packets to prevent errors. + + There have been reports of oversize UDP packets being sent to the + driver to be transmitted, causing error conditions. The issue is + likely caused by the dst of the SKB switching between 'lo' with + 64K MTU and the hardware device with a smaller MTU. Patches are + being proposed by Mahesh Bandewar to fix the + issue. + + In the meantime, add a quick length check in the driver to prevent + the error. The driver uses the TX packet size as index to look up an + array to setup the TX BD. The array is large enough to support all MTU + sizes supported by the driver. The oversize TX packet causes the + driver to index beyond the array and put garbage values into the + TX BD. Add a simple check to prevent this. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 0000b81a063b5f3ab82fa18041c28327ce72c312 +Author: Michael Chan +Date: Wed Feb 20 19:07:32 2019 -0500 + + bnxt_en: Wait longer for the firmware message response to complete. + + The code waits up to 20 usec for the firmware response to complete + once we've seen the valid response header in the buffer. It turns + out that in some scenarios, this wait time is not long enough. + Extend it to 150 usec and use usleep_range() instead of udelay(). + + Fixes: 9751e8e71487 ("bnxt_en: reduce timeout on initial HWRM calls") + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 67681d02aaa1db9044a16df4ca9c77cde1221a3e +Author: Michael Chan +Date: Wed Feb 20 19:07:31 2019 -0500 + + bnxt_en: Fix typo in firmware message timeout logic. + + The logic that polls for the firmware message response uses a shorter + sleep interval for the first few passes. But there was a typo so it + was using the wrong counter (larger counter) for these short sleep + passes. The result is a slightly shorter timeout period for these + firmware messages than intended. Fix it by using the proper counter. + + Fixes: 9751e8e71487 ("bnxt_en: reduce timeout on initial HWRM calls") + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit b2d69122fd627aa3c6c39c99b2b8706703a92634 +Author: Sriharsha Basavapatna +Date: Tue Feb 19 05:31:16 2019 -0500 + + bnxt_en: Return relevant error code when offload fails + + The driver returns -ENOSPC when tc_can_offload() check fails. Since that + routine checks for flow parameters that are not supported by the driver, + we should return the more appropriate -EOPNOTSUPP. + + Signed-off-by: Sriharsha Basavapatna + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 0ca12be99667265ef8100860601d510e78f22cea +Author: Vasundhara Volam +Date: Tue Feb 19 05:31:15 2019 -0500 + + bnxt_en: Add support for mdio read/write to external PHY + + Add support for SIOCGMIIREG and SIOCSMIIREG ioctls to + mdio read/write to external PHY. + + Signed-off-by: Vasundhara Volam + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 2a516444434ffa4419e67c5289d5f53272cb9674 +Author: Michael Chan +Date: Tue Feb 19 05:31:14 2019 -0500 + + bnxt_en: Propagate trusted VF attribute to firmware. + + Newer firmware understands the concept of a trusted VF, so propagate the + trusted VF attribute set by the PF admin. to the firmware. Also, check + the firmware trusted setting when considering the VF MAC address change + and reporting the trusted setting to the user. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit c6cc32a2133cb1eb5aa28ced1852aab2aeaf357a +Author: Erik Burrows +Date: Tue Feb 19 05:31:13 2019 -0500 + + bnxt_en: Add support for BCM957504 + + Add support for BCM957504 with device ID 1751 + + Signed-off-by: Erik Burrows + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 3293ec232123e9a648ec5b02225cd32091ae243f +Author: Michael Chan +Date: Tue Feb 19 05:31:12 2019 -0500 + + bnxt_en: Update firmware interface spec. to 1.10.0.47. + + Firmware error recover is the major change in this spec. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit da203dfa89ce83c55b6623f73560ef7ec742aca4 +Author: Vasundhara Volam +Date: Mon Feb 11 14:46:17 2019 +0530 + + Revert "devlink: Add a generic wake_on_lan port parameter" + + This reverts commit b639583f9e36d044ac1b13090ae812266992cbac. + + As per discussion with Jakub Kicinski and Michal Kubecek, + this will be better addressed by soon-too-come ethtool netlink + API with additional indication that given configuration request + is supposed to be persisted. + + Also, remove the parameter support from bnxt_en driver. + + Cc: Jiri Pirko + Cc: Michael Chan + Cc: Michal Kubecek + Suggested-by: Jakub Kicinski + Signed-off-by: Vasundhara Volam + Signed-off-by: David S. Miller + +commit 7c62cfb8c5744b377e9f33806e0db87a00dc6884 +Author: Jiri Pirko +Date: Thu Feb 7 11:22:45 2019 +0000 + + devlink: publish params only after driver init is done + + Currently, user can do dump or get of param values right after the + devlink params are registered. However the driver may not be initialized + which is an issue. The same problem happens during notification + upon param registration. Allow driver to publish devlink params + whenever it is ready to handle get() ops. Note that this cannot + be resolved by init reordering, as the "driverinit" params have + to be available before the driver is initialized (it needs the param + values there). + + Signed-off-by: Jiri Pirko + Cc: Michael Chan + Cc: Tariq Toukan + Signed-off-by: Ido Schimmel + Signed-off-by: David S. Miller + +commit ecb53febfcad565366762b7413b03452874643db +Author: Devesh Sharma +Date: Thu Feb 7 01:31:28 2019 -0500 + + RDMA/bnxt_en: Enable RDMA driver support for 57500 chip + + Re-enabling RDMA driver support on 57500 chips. Removing the forced error + code for 57500 chip. + + Signed-off-by: Michael Chan + Signed-off-by: Devesh Sharma + Signed-off-by: Jason Gunthorpe + +commit 52d5254a2d045bba2a744042319c64e1fe41b5c8 +Author: Florian Fainelli +Date: Wed Feb 6 09:45:36 2019 -0800 + + bnxt: Implement ndo_get_port_parent_id() + + BNXT only supports SWITCHDEV_ATTR_ID_PORT_PARENT_ID, which makes it a + great candidate to be converted to use the ndo_get_port_parent_id() NDO + instead of implementing switchdev_port_attr_get(). The conversion is + straight forward here since the PF and VF code use the same getter. + + Since bnxt makes uses of switchdev_port_same_parent_id() convert it to + use netdev_port_same_parent_id(). + + Acked-by: Jiri Pirko + Signed-off-by: Florian Fainelli + Signed-off-by: David S. Miller + +commit 738678817573ce45698e1bb13222f2e53622c555 +Author: Pablo Neira Ayuso +Date: Sat Feb 2 12:50:48 2019 +0100 + + drivers: net: use flow action infrastructure + + This patch updates drivers to use the new flow action infrastructure. + + Signed-off-by: Pablo Neira Ayuso + Acked-by: Jiri Pirko + Signed-off-by: David S. Miller + +commit 3b1903ef97c080a80ead3a6a2305f55108e08269 +Author: Pablo Neira Ayuso +Date: Sat Feb 2 12:50:47 2019 +0100 + + flow_offload: add statistics retrieval infrastructure and use it + + This patch provides the flow_stats structure that acts as container for + tc_cls_flower_offload, then we can use to restore the statistics on the + existing TC actions. Hence, tcf_exts_stats_update() is not used from + drivers anymore. + + Signed-off-by: Pablo Neira Ayuso + Acked-by: Jiri Pirko + Signed-off-by: David S. Miller + +commit 8f2566225ae2d62d532bb1810ed74fa4bbc5bbdb +Author: Pablo Neira Ayuso +Date: Sat Feb 2 12:50:43 2019 +0100 + + flow_offload: add flow_rule and flow_match structures and use them + + This patch wraps the dissector key and mask - that flower uses to + represent the matching side - around the flow_match structure. + + To avoid a follow up patch that would edit the same LoCs in the drivers, + this patch also wraps this new flow match structure around the flow rule + object. This new structure will also contain the flow actions in follow + up patches. + + This introduces two new interfaces: + + bool flow_rule_match_key(rule, dissector_id) + + that returns true if a given matching key is set on, and: + + flow_rule_match_XYZ(rule, &match); + + To fetch the matching side XYZ into the match container structure, to + retrieve the key and the mask with one single call. + + Signed-off-by: Pablo Neira Ayuso + Acked-by: Jiri Pirko + Signed-off-by: David S. Miller + +commit 5e66e35aab335b83d9ffb220d8a3a13986a7a60e +Author: Michael Chan +Date: Thu Jan 31 14:31:48 2019 -0500 + + bnxt_en: Disable interrupts when allocating CP rings or NQs. + + When calling firmware to allocate a CP ring or NQ, an interrupt associated + with that ring may be generated immediately before the doorbell is even + setup after the firmware call returns. When servicing the interrupt, the + driver may crash when trying to access the doorbell. + + Fix it by disabling interrupt on that vector until the doorbell is + set up. + + Fixes: 697197e5a173 ("bnxt_en: Re-structure doorbells.") + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 782a624d00fa22e7499f5abc29747501ec671313 +Author: Vasundhara Volam +Date: Mon Jan 28 18:00:27 2019 +0530 + + bnxt_en: Add bnxt_en initial port params table and register it + + Register devlink_port with devlink and create initial port params + table for bnxt_en. The table consists of a generic parameter: + + wake_on_lan: Enables Wake on Lan for this port when magic packet + is received with this port's MAC address using ACPI pattern. + If enabled, the controller asserts a wake pin upon reception of + WoL packet. ACPI (Advanced Configuration and Power Interface) is + an industry specification for the efficient handling of power + consumption in desktop and mobile computers. + + v2->v3: + - Modify bnxt_dl_wol_validate(), to throw error message when user gives + value other than DEVLINK_PARAM_WAKE_MAGIC ot to disable WOL. + - Use netdev_err() instead of netdev_warn(), when devlink_port_register() + and devlink_port_params_register() returns error. Also, don't log rc + in this message. + + Cc: Michael Chan + Signed-off-by: Vasundhara Volam + Signed-off-by: David S. Miller + +commit 6ef982dec7eda9affa81a2bb84f75441deb56d06 +Author: Michael Chan +Date: Sat Jan 12 00:13:05 2019 -0500 + + bnxt_en: Fix context memory allocation. + + When allocating memory pages for context memory, if the last page table + should be fully populated, the current code will set nr_pages to 0 when + calling bnxt_alloc_ctx_mem_blk(). This will cause the last page table + to be completely blank and causing some RDMA failures. + + Fix it by setting the last page table's nr_pages to the remainder only + if it is non-zero. + + Fixes: 08fe9d181606 ("bnxt_en: Add Level 2 context memory paging support.") + Reported-by: Eric Davis + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 0b815023a1d479aa8f8851ee880d5388e53b7ae5 +Author: Michael Chan +Date: Sat Jan 12 00:13:04 2019 -0500 + + bnxt_en: Fix ring checking logic on 57500 chips. + + In bnxt_hwrm_check_pf_rings(), add the proper flag to test the NQ + resources. Without the proper flag, the firmware will change + the NQ resource allocation and remap the IRQ, causing missing + IRQs. This issue shows up when adding MQPRIO TX queues, for example. + + Fixes: 36d65be9a880 ("bnxt_en: Disable MSIX before re-reserving NQs/CMPL rings.") + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit abd43a13525db70926999ebe3e272c38119fdfbe +Author: Venkat Duvvuru +Date: Thu Dec 20 03:38:52 2018 -0500 + + bnxt_en: Support for 64-bit flow handle. + + Older firmware only supports 16-bit flow handle, because of which the + number of flows that can be offloaded can’t scale beyond a point. + Newer firmware supports 64-bit flow handle enabling the host to scale + upto millions of flows. With the new 64-bit flow handle support, driver + has to query flow stats in a different way compared to the older approach. + + This patch adds support for 64-bit flow handle and new way to query + flow stats. + + Signed-off-by: Venkat Duvvuru + Reviewed-by: Sriharsha Basavapatna + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit cf6daed098d14760d6605268d28bfdf7b3bfa375 +Author: Michael Chan +Date: Thu Dec 20 03:38:51 2018 -0500 + + bnxt_en: Increase context memory allocations on 57500 chips for RDMA. + + If RDMA is supported on the 57500 chip, increase context memory + allocations for the resources used by RDMA. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 08fe9d1816067170e167867d06009fb0f41cb411 +Author: Michael Chan +Date: Thu Dec 20 03:38:50 2018 -0500 + + bnxt_en: Add Level 2 context memory paging support. + + Add the new functions bnxt_alloc_ctx_pg_tbls()/bnxt_free_ctx_pg_tbls() + to allocate and free pages for context memory. The new functions + will handle the different levels of paging support and allocate/free + the pages accordingly using the existing functions. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 4f49b2b8d4167b712ce876bff08bd9606a44f7a6 +Author: Michael Chan +Date: Thu Dec 20 03:38:49 2018 -0500 + + bnxt_en: Enhance bnxt_alloc_ring()/bnxt_free_ring(). + + To support level 2 context page memory structures, enhance the + bnxt_ring_mem_info structure with a "depth" field to specify the page + level and add a flag to specify using full pages for L1 and L2 page + tables. This is needed to support RDMA functionality on 57500 chips + since RDMA requires more context memory. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 760b6d33410cda351f236058585471cb0f417978 +Author: Venkat Duvvuru +Date: Thu Dec 20 03:38:48 2018 -0500 + + bnxt_en: Add support for 2nd firmware message channel. + + Earlier, some of the firmware commands (ex: CFA_FLOW_*) which are processed + by KONG processor were sent to the CHIMP processor from the host. This + approach was taken as there was no direct message channel to KONG. + CHIMP in turn used to send them to KONG. Newer firmware supports a new + message channel which the host can send messages directly to the KONG + processor. + + This patch adds support for required changes needed in the driver + to support direct KONG message channel. This speeds up flow related + messages sent to the firmware for CLS_FLOWER offload. + + Signed-off-by: Venkat Duvvuru + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 5c209fc82159f2f71e2772f28cd1d447b821e98d +Author: Venkat Duvvuru +Date: Thu Dec 20 03:38:47 2018 -0500 + + bnxt_en: Introduce bnxt_get_hwrm_resp_addr & bnxt_get_hwrm_seq_id routines. + + These routines will be enhanced in the subsequent patch to + return the 2nd firmware comm. channel's hwrm response address & + sequence id respectively. + + Signed-off-by: Venkat Duvvuru + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 89455017fb8dd5dff0d088ecb82bccf9f9b715cd +Author: Venkat Duvvuru +Date: Thu Dec 20 03:38:46 2018 -0500 + + bnxt_en: Avoid arithmetic on void * pointer. + + Typecast hwrm_cmd_resp_addr to (u8 *) from (void *) before doing + arithmetic. + + Signed-off-by: Venkat Duvvuru + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 2e9ee3987719148308ff1794aa91f8314f839ccd +Author: Venkat Duvvuru +Date: Thu Dec 20 03:38:45 2018 -0500 + + bnxt_en: Use macros for firmware message doorbell offsets. + + In preparation for adding a 2nd communication channel to firmware. + + Signed-off-by: Venkat Duvvuru + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit fc718bb2d1efa4348995b4811c243ec59f913a59 +Author: Venkat Duvvuru +Date: Thu Dec 20 03:38:44 2018 -0500 + + bnxt_en: Set hwrm_intr_seq_id value to its inverted value. + + Set hwrm_intr_seq_id value to its inverted value instead of + HWRM_SEQ_INVALID, when an hwrm completion of type + CMPL_BASE_TYPE_HWRM_DONE is received. This will enable us to use + the complete 16-bit sequence ID space. + + Signed-off-by: Venkat Duvvuru + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 3322479e6d17ea17b75848fd7c7702ccf82c9c35 +Author: Michael Chan +Date: Thu Dec 20 03:38:43 2018 -0500 + + bnxt_en: Update firmware interface spec. to 1.10.0.33. + + The major changes are in the flow offload firmware APIs. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 84404d5fd5858588bbf694b0300dbc6310d02737 +Author: Michael Chan +Date: Wed Dec 19 13:46:50 2018 -0500 + + bnxt_en: Fix ethtool self-test loopback. + + The current code has 2 problems. It assumes that the RX ring for + the loopback packet is combined with the TX ring. This is not + true if the ethtool channels are set to non-combined mode. The + second problem is that it won't work on 57500 chips without + adjusting the logic to get the proper completion ring (cpr) pointer. + Fix both issues by locating the proper cpr pointer through the RX + ring. + + Fixes: e44758b78ae8 ("bnxt_en: Use bnxt_cp_ring_info struct pointer as parameter for RX path.") + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 56d374624778652d2a999e18c87a25338b127b41 +Author: Vasundhara Volam +Date: Sun Dec 16 18:46:31 2018 -0500 + + bnxt_en: query force speeds before disabling autoneg mode. + + With autoneg enabled, PHY loopback test fails. To disable autoneg, + driver needs to send a valid forced speed to FW. FW is not sending + async event for invalid speeds. To fix this, query forced speeds + and send the correct speed when disabling autoneg mode. + + Signed-off-by: Vasundhara Volam + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit fd3ab1c70e0b953f4f772142051d215bffada718 +Author: Michael Chan +Date: Sun Dec 16 18:46:30 2018 -0500 + + bnxt_en: Do not free port statistics buffer when device is down. + + Port statistics which include RDMA counters are useful even when the + netdevice is down. Do not free the port statistics DMA buffers + when the netdevice is down. This is keep the snapshot of the port + statistics and counters will just continue counting when the + netdevice goes back up. + + Split the bnxt_free_stats() function into 2 functions. The port + statistics buffers will only be freed when the netdevice is + removed. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit b8875ca356f1c0b17ec68be6666269373a62288e +Author: Michael Chan +Date: Sun Dec 16 18:46:29 2018 -0500 + + bnxt_en: Save ring statistics before reset. + + With the current driver, the statistics reported by .ndo_get_stats64() + are reset when the device goes down. Store a snapshot of the + rtnl_link_stats64 before shutdown. This snapshot is added to the + current counters in .ndo_get_stats64() so that the counters will not + get reset when the device is down. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 7c675421afef18253a86ffc383f57bc15ef32ea8 +Author: Vasundhara Volam +Date: Sun Dec 16 18:46:28 2018 -0500 + + bnxt_en: Return linux standard errors in bnxt_ethtool.c + + Currently firmware specific errors are returned directly in flash_device + and reset ethtool hooks. Modify it to return linux standard errors + to userspace when flashing operations fail. + + Signed-off-by: Vasundhara Volam + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 24654f095ed68eafcdfdbfccbe6324ad1475c1bb +Author: Michael Chan +Date: Sun Dec 16 18:46:27 2018 -0500 + + bnxt_en: Don't set ETS on unused TCs. + + Currently, the code allows ETS bandwidth weight 0 to be set on unused TCs. + We should not set any DCB parameters on unused TCs at all. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit e37fed790335631c57477e7bee952a039ccb7a50 +Author: Michael Chan +Date: Sun Dec 16 18:46:26 2018 -0500 + + bnxt_en: Add ethtool -S priority counters. + + Display the CoS counters as additional priority counters by looking up + the priority to CoS queue mapping. If the TX extended port statistics + block size returned by firmware is big enough to cover the CoS counters, + then we will display the new priority counters. We call firmware to get + the up-to-date pri2cos mapping to convert the CoS counters to + priority counters. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit b16b68918674af5e7beccb567fe7ebd8b44744b8 +Author: Michael Chan +Date: Sun Dec 16 18:46:25 2018 -0500 + + bnxt_en: Add SR-IOV support for 57500 chips. + + There are some minor differences when assigning VF resources on the + new chips. The MSIX (NQ) resource has to be assigned and ring group + is not needed on the new chips. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 36d65be9a88052cdfc8524eb591baf0e6c878408 +Author: Michael Chan +Date: Sun Dec 16 18:46:24 2018 -0500 + + bnxt_en: Disable MSIX before re-reserving NQs/CMPL rings. + + When bringing up a device, the code checks to see if the number of + MSIX has changed. pci_disable_msix() should be called first before + changing the number of reserved NQs/CMPL rings. This ensures that + the MSIX vectors associated with the NQs/CMPL rings are still + properly mapped when pci_disable_msix() masks the vectors. + + This patch will prevent errors when RDMA support is added for the new + 57500 chips. When the RDMA driver shuts down, the number of NQs is + decreased and we must use the new sequence to prevent MSIX errors. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 780baad44f0f1e87df5508d1ad5a87d359fb96d1 +Author: Vasundhara Volam +Date: Sun Dec 16 18:46:23 2018 -0500 + + bnxt_en: Reserve 1 stat_ctx for RDMA driver. + + bnxt_en requires same number of stat_ctxs as CP rings but RDMA + requires only 1 stat_ctx. Also add a new parameter resv_stat_ctxs + to better keep track of stat_ctxs reserved including resources used + by RDMA. Add a stat_ctxs parameter to all the relevant resource + reservation functions so we can reserve the correct number of + stat_ctxs. + + Prior to this patch, we were not reserving the extra stat_ctx for + RDMA and RDMA would not work on the new 57500 chips. + + Signed-off-by: Vasundhara Volam + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit f4e896142de8304b433fd94f7eed55750c34defd +Author: Vasundhara Volam +Date: Sun Dec 16 18:46:22 2018 -0500 + + bnxt_en: Do not modify max_stat_ctxs after RDMA driver requests/frees stat_ctxs + + Calling bnxt_set_max_func_stat_ctxs() to modify max stat_ctxs requested + or freed by the RDMA driver is wrong. After introducing reservation of + resources recently, the driver has to keep track of all stat_ctxs + including the ones used by the RDMA driver. This will provide a better + foundation for accurate accounting of the stat_ctxs. + + Signed-off-by: Vasundhara Volam + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit c027c6b4e91f21dfa4feab91e2155c8403f49f5c +Author: Vasundhara Volam +Date: Sun Dec 16 18:46:21 2018 -0500 + + bnxt_en: get rid of num_stat_ctxs variable + + For bnxt_en driver, stat_ctxs created will always be same as + cp_nr_rings. Remove extra variable that duplicates the value. + Also introduce bnxt_get_avail_stat_ctxs_for_en() helper to get + available stat_ctxs and bnxt_get_ulp_stat_ctxs() helper to return + number of stat_ctxs used by RDMA. + + Signed-off-by: Vasundhara Volam + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit e916b0815a11c6cbc82f2d8510982ea022628880 +Author: Michael Chan +Date: Sun Dec 16 18:46:20 2018 -0500 + + bnxt_en: Add bnxt_get_avail_cp_rings_for_en() helper function. + + The available CP rings are calculated differently on the new 57500 + chips, so add this helper to do this calculation correctly. The + VFs will be assigned these available CP rings. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit f7588cd89331211e2c4f0fdb94bb0ed4f92847be +Author: Michael Chan +Date: Sun Dec 16 18:46:19 2018 -0500 + + bnxt_en: Store the maximum NQs available on the PF. + + The PF has a pool of NQs and MSIX vectors assigned to it based on + NVRAM configurations. The number of usable MSIX vectors on the PF + is the minimum of the NQs and MSIX vectors. Any excess NQs without + associated MSIX may be used for the VFs, so we need to store this + max_nqs value. max_nqs minus the NQs used by the PF will be the + available NQs for the VFs. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 2fd527b72bb6f95dfe8a1902e998cb76390c431e +Author: Petr Machata +Date: Wed Dec 12 17:02:48 2018 +0000 + + net: ndo_bridge_setlink: Add extack + + Drivers may not be able to implement a VLAN addition or reconfiguration. + In those cases it's desirable to explain to the user that it was + rejected (and why). + + To that end, add extack argument to ndo_bridge_setlink. Adapt all users + to that change. + + Following patches will use the new argument in the bridge driver. + + Signed-off-by: Petr Machata + Acked-by: Jiri Pirko + Reviewed-by: Ido Schimmel + Signed-off-by: David S. Miller + +commit 351cbde969230a072cabca9969c68ab04e58e6b3 +Author: Jonathan Toppins +Date: Wed Dec 12 11:58:51 2018 -0500 + + bnxt: remove printing of hwrm message + + bnxt_en 0000:19:00.0 (unregistered net_device) (uninitialized): hwrm + req_type 0x190 seq id 0x6 error 0xffff + + The message above is commonly seen when a newer driver is used on + hardware with older firmware. The issue is this message means nothing to + anyone except Broadcom. Remove the message to not confuse users as this + message is really not very informative. + + Signed-off-by: Jonathan Toppins + Acked-by: Michael Chan + Signed-off-by: David S. Miller + +commit e30fbc33190b8ba1d6e8ff4864627f7414b5ca99 +Author: Michael Chan +Date: Sun Dec 9 07:01:02 2018 -0500 + + bnxt_en: Fix _bnxt_get_max_rings() for 57500 chips. + + The CP rings are accounted differently on the new 57500 chips. There + must be enough CP rings for the sum of RX and TX rings on the new + chips. The current logic may be over-estimating the RX and TX rings. + + The output parameter max_cp should be the maximum NQs capped by + MSIX vectors available for networking in the context of 57500 chips. + The existing code which uses CMPL rings capped by the MSIX vectors + works most of the time but is not always correct. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit c0b8cda05e1d8151f57a79e525c2c7d51cec2f4e +Author: Michael Chan +Date: Sun Dec 9 07:01:01 2018 -0500 + + bnxt_en: Fix NQ/CP rings accounting on the new 57500 chips. + + The new 57500 chips have introduced the NQ structure in addition to + the existing CP rings in all chips. We need to introduce a new + bnxt_nq_rings_in_use(). On legacy chips, the 2 functions are the + same and one will just call the other. On the new chips, they + refer to the 2 separate ring structures. The new function is now + called to determine the resource (NQ or CP rings) associated with + MSIX that are in use. + + On 57500 chips, the RDMA driver does not use the CP rings so + we don't need to do the subtraction adjustment. + + Fixes: 41e8d7983752 ("bnxt_en: Modify the ring reservation functions for 57500 series chips.") + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 75720e6323a1d195ae3ebf1a7b5e17c2e687f552 +Author: Michael Chan +Date: Sun Dec 9 07:01:00 2018 -0500 + + bnxt_en: Keep track of reserved IRQs. + + The new 57500 chips use 1 NQ per MSIX vector, whereas legacy chips use + 1 CP ring per MSIX vector. To better unify this, add a resv_irqs + field to struct bnxt_hw_resc. On legacy chips, we initialize resv_irqs + with resv_cp_rings. On new chips, we initialize it with the allocated + MSIX resources. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 804fba4e9f508c8004a4bfbdf3f300ca237c56df +Author: Michael Chan +Date: Sun Dec 9 07:00:59 2018 -0500 + + bnxt_en: Fix CNP CoS queue regression. + + Recent changes to support the 57500 devices have created this + regression. The bnxt_hwrm_queue_qportcfg() call was moved to be + called earlier before the RDMA support was determined, causing + the CoS queues configuration to be set before knowing whether RDMA + was supported or not. Fix it by moving it to the right place right + after RDMA support is determined. + + Fixes: 98f04cf0f1fc ("bnxt_en: Check context memory requirements from firmware.") + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 8dc5ae2d48976764cf3498e97963fa06befefb0e +Author: Vasundhara Volam +Date: Thu Nov 15 03:25:42 2018 -0500 + + bnxt_en: Fix filling time in bnxt_fill_coredump_record() + + Fix the year and month offset while storing it in + bnxt_fill_coredump_record(). + + Fixes: 6c5657d085ae ("bnxt_en: Add support for ethtool get dump.") + Signed-off-by: Vasundhara Volam + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 83eb5c5cff32681f3769f502cb5589c7d7509bfe +Author: Michael Chan +Date: Thu Nov 15 03:25:41 2018 -0500 + + bnxt_en: Add software "missed_irqs" counter. + + To keep track of the number of times the workaround code for 57500 A0 + has been triggered. This is a per NQ counter. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit ffd77621700ec3adcf859681e24910c38e0931f5 +Author: Michael Chan +Date: Thu Nov 15 03:25:40 2018 -0500 + + bnxt_en: Workaround occasional TX timeout on 57500 A0. + + Hardware can sometimes not generate NQ MSIX with a single pending + CP ring entry. This seems to always happen at the last entry of + the CP ring before it wraps. Add logic to check all the CP rings for + pending entries without the CP ring consumer index advancing. Calling + HWRM_DBG_RING_INFO_GET to read the context of the CP ring will flush + out the NQ entry and MSIX. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit d19819297d9284bd990e22116b8b43d0abcbf488 +Author: Michael Chan +Date: Thu Nov 15 03:25:38 2018 -0500 + + bnxt_en: Fix rx_l4_csum_errors counter on 57500 devices. + + The software counter structure is defined in both the CP ring's structure + and the NQ ring's structure on the new devices. The legacy code adds the + counter to the CP ring's structure and the counter won't get displayed + since the ethtool code is looking at the NQ ring's structure. + + Since all other counters are contained in the NQ ring's structure, it + makes more sense to count rx_l4_csum_errors in the NQ. + + Fixes: 50e3ab7836b5 ("bnxt_en: Allocate completion ring structures for 57500 series chips.") + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 6ba990384e924476b5eed1734f3bcca0df6fd77e +Author: Michael Chan +Date: Thu Nov 15 03:25:37 2018 -0500 + + bnxt_en: Fix RSS context allocation. + + Recent commit has added the reservation of RSS context. This requires + bnxt_hwrm_vnic_qcaps() to be called before allocating any RSS contexts. + The bnxt_hwrm_vnic_qcaps() call sets up proper flags that will + determine how many RSS contexts to allocate to support NTUPLE. + + This causes a regression that too many RSS contexts are being reserved + and causing resource shortage when enabling many VFs. Fix it by calling + bnxt_hwrm_vnic_qcaps() earlier. + + Fixes: 41e8d7983752 ("bnxt_en: Modify the ring reservation functions for 57500 series chips.") + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 35b842f25b603028fab7ff11b54a4410ee345c8a +Author: Dan Carpenter +Date: Thu Oct 18 11:02:39 2018 +0300 + + bnxt_en: Copy and paste bug in extended tx_stats + + The struct type was copied from the line before but it should be "tx" + instead of "rx". I have reviewed the code and I can't immediately see + that this bug causes a runtime issue. + + Fixes: 36e53349b60b ("bnxt_en: Add additional extended port statistics.") + Signed-off-by: Dan Carpenter + Acked-by: Michael Chan + Signed-off-by: David S. Miller + +commit 1ab968d2f1d6d654052dbbf95f9461a6428a5487 +Author: Michael Chan +Date: Sun Oct 14 07:02:59 2018 -0400 + + bnxt_en: Add PCI ID for BCM57508 device. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 0fcec9854ab478551debaef11e9c7875fc837906 +Author: Michael Chan +Date: Sun Oct 14 07:02:58 2018 -0400 + + bnxt_en: Add new NAPI poll function for 57500 chips. + + Add a new poll function that polls for NQ events. If the NQ event is + a CQ notification, we locate the CP ring from the cq_handle and call + __bnxt_poll_work() to handle RX/TX events on the CP ring. + + Add a new has_more_work field in struct bnxt_cp_ring_info to indicate + budget has been reached. __bnxt_poll_cqs_done() is called to update or + ARM the CP rings if budget has not been reached or not. If budget + has been reached, the next bnxt_poll_p5() call will continue to poll + from the CQ rings directly. Otherwise, the NQ will be ARMed for the + next IRQ. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 3675b92fa70ece4c9263b812fc8cbf3bd606398c +Author: Michael Chan +Date: Sun Oct 14 07:02:57 2018 -0400 + + bnxt_en: Refactor bnxt_poll_work(). + + Separate the CP ring polling logic in bnxt_poll_work() into 2 separate + functions __bnxt_poll_work() and __bnxt_poll_work_done(). Since the logic + is separated, we need to add tx_pkts and events fields to struct bnxt_napi + to keep track of the events to handle between the 2 functions. We also + add had_work_done field to struct bnxt_cp_ring_info to indicate whether + some work was performed on the CP ring. + + This is needed to better support the 57500 chips. We need to poll up to + 2 separate CP rings before we update or ARM the CP rings on the 57500 chips. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 58590c8d9044dd6ff1757d9e9af63a253cc15101 +Author: Michael Chan +Date: Sun Oct 14 07:02:56 2018 -0400 + + bnxt_en: Add coalescing setup for 57500 chips. + + On legacy chips, the CP ring may be shared between RX and TX and so only + setup the RX coalescing parameters in such a case. On 57500 chips, we + always have a dedicated CP ring for TX so we can always set up the + TX coalescing parameters in bnxt_hwrm_set_coal(). + + Also, the min_timer coalescing parameter applies to the NQ on the new + chips and a separate firmware call needs to be made to set it up. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit e44758b78ae81461a3269e7958653f1af0f78989 +Author: Michael Chan +Date: Sun Oct 14 07:02:55 2018 -0400 + + bnxt_en: Use bnxt_cp_ring_info struct pointer as parameter for RX path. + + In the RX code path, we current use the bnxt_napi struct pointer to + identify the associated RX/CP rings. Change it to use the struct + bnxt_cp_ring_info pointer instead since there are now up to 2 + CP rings per MSIX. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 7b3af4f75b81bec06ed5bf411039fbd0646da44e +Author: Michael Chan +Date: Sun Oct 14 07:02:54 2018 -0400 + + bnxt_en: Add RSS support for 57500 chips. + + RSS context allocation and RSS indirection table setup are very different + on the new chip. Refactor bnxt_setup_vnic() to call 2 different functions + to set up RSS for the vnic based on chip type. On the new chip, the + number of RSS contexts and the indirection table size depends on the + number of RX rings. Each indirection table entry is also different + on the new chip since ring groups are no longer used. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 44c6f72a4c30496c7378a62e13605b217e49f991 +Author: Michael Chan +Date: Sun Oct 14 07:02:53 2018 -0400 + + bnxt_en: Increase RSS context array count and skip ring groups on 57500 chips. + + On the new 57500 chips, we need to allocate one RSS context for every + 64 RX rings. In previous chips, only one RSS context per vnic is + required regardless of the number of RX rings. So increase the max + RSS context array count to 8. + + Hardware ring groups are not used on the new chips. Note that the + software ring group structure is still maintained in the driver to + keep track of the rings associated with the vnic. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 3e08b1841bc8debf6b3d722b9d355093a1537b1e +Author: Michael Chan +Date: Sun Oct 14 07:02:52 2018 -0400 + + bnxt_en: Allocate/Free CP rings for 57500 series chips. + + On the new 57500 chips, we allocate/free one CP ring for each RX ring or + TX ring separately. Using separate CP rings for RX/TX is an improvement + as TX events will no longer be stuck behind RX events. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 23aefdd761599e42d7f1f6504ff38c558e18de2a +Author: Michael Chan +Date: Sun Oct 14 07:02:51 2018 -0400 + + bnxt_en: Modify bnxt_ring_alloc_send_msg() to support 57500 chips. + + Firmware ring allocation semantics are slightly different for most + ring types on 57500 chips. Allocation/deallocation for NQ rings are + also added for the new chips. + + A CP ring handle is also added so that from the NQ interrupt event, + we can locate the CP ring. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 2c61d2117ecb065aaad9e9ea9bc8bc4a6c30ec24 +Author: Michael Chan +Date: Sun Oct 14 07:02:50 2018 -0400 + + bnxt_en: Add helper functions to get firmware CP ring ID. + + On the new 57500 chips, getting the associated CP ring ID associated with + an RX ring or TX ring is different than before. On the legacy chips, + we find the associated ring group and look up the CP ring ID. On the + 57500 chips, each RX ring and TX ring has a dedicated CP ring even if + they share the MSIX. Use these helper functions at appropriate places + to get the CP ring ID. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 50e3ab7836b5efdc25e935316b3a156de3ff972e +Author: Michael Chan +Date: Sun Oct 14 07:02:49 2018 -0400 + + bnxt_en: Allocate completion ring structures for 57500 series chips. + + On 57500 chips, the original bnxt_cp_ring_info struct now refers to the + NQ. bp->cp_nr_rings refer to the number of NQs on 57500 chips. There + are now 2 pointers for the CP rings associated with RX and TX rings. + Modify bnxt_alloc_cp_rings() and bnxt_free_cp_rings() accordingly. + + With multiple CP rings per NAPI, we need to add a pointer in + bnxt_cp_ring_info struct to point back to the bnxt_napi struct. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 41e8d7983752f2a0ada01fac11cbac7413e7beec +Author: Michael Chan +Date: Sun Oct 14 07:02:48 2018 -0400 + + bnxt_en: Modify the ring reservation functions for 57500 series chips. + + The ring reservation functions have to be modified for P5 chips in the + following ways: + + - bnxt_cp_ring_info structs map to internal NQs as well as CP rings. + - Ring groups are not used. + - 1 CP ring must be available for each RX or TX ring. + - number of RSS contexts to reserve is multiples of 64 RX rings. + - RFS currently not supported. + + Also, RX AGG rings are only used for jumbo frames, so we need to + unconditionally call bnxt_reserve_rings() in __bnxt_open_nic() + to see if we need to reserve AGG rings in case MTU has changed. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 9c1fabdf424f27370790f1e0868b35cf63b70981 +Author: Michael Chan +Date: Sun Oct 14 07:02:47 2018 -0400 + + bnxt_en: Adjust MSIX and ring groups for 57500 series chips. + + Store the maximum MSIX capability in PCIe config. space earlier. When + we call firmware to query capability, we need to compare the PCIe + MSIX max count with the firmware count and use the smaller one as + the MSIX count for 57500 (P5) chips. + + The new chips don't use ring groups. But previous chips do and + the existing logic limits the available rings based on resource + calculations including ring groups. Setting the max ring groups to + the max rx rings will work on the new chips without changing the + existing logic. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 697197e5a1735325c0e1ef85dde2b2986354beb9 +Author: Michael Chan +Date: Sun Oct 14 07:02:46 2018 -0400 + + bnxt_en: Re-structure doorbells. + + The 57500 series chips have a new 64-bit doorbell format. Use a new + bnxt_db_info structure to unify the new and the old 32-bit doorbells. + Add a new bnxt_set_db() function to set up the doorbell addreses and + doorbell keys ahead of time. Modify and introduce new doorbell + helpers to help abstract and unify the old and new doorbells. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit e38287b72ec5455eb1e16b1de0ba4ed54e2a748c +Author: Michael Chan +Date: Sun Oct 14 07:02:45 2018 -0400 + + bnxt_en: Add 57500 new chip ID and basic structures. + + 57500 series is a new chip class (P5) that requires some driver changes + in the next several patches. This adds basic chip ID, doorbells, and + the notification queue (NQ) structures. Each MSIX is associated with an + NQ instead of a CP ring in legacy chips. Each NQ has up to 2 associated + CP rings for RX and TX. The same bnxt_cp_ring_info struct will be used + for the NQ. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 1b9394e5a2ad48be7906557ea6a500c5e8e91ee1 +Author: Michael Chan +Date: Sun Oct 14 07:02:44 2018 -0400 + + bnxt_en: Configure context memory on new devices. + + Call firmware to configure the DMA addresses of all context memory + pages on new devices requiring context memory. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 98f04cf0f1fc26ee8401e7c06b422508acc8374c +Author: Michael Chan +Date: Sun Oct 14 07:02:43 2018 -0400 + + bnxt_en: Check context memory requirements from firmware. + + New device requires host context memory as a backing store. Call + firmware to check for context memory requirements and store the + parameters. Allocate host pages accordingly. + + We also need to move the call bnxt_hwrm_queue_qportcfg() earlier + so that all the supported hardware queues and the IDs are known + before checking and allocating context memory. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 66cca20abcf742e2e39ec437144e7787ecefa037 +Author: Michael Chan +Date: Sun Oct 14 07:02:42 2018 -0400 + + bnxt_en: Add new flags to setup new page table PTE bits on newer devices. + + Newer chips require the PTU_PTE_VALID bit to be set for every page + table entry for context memory and rings. Additional bits are also + required for page table entries for all rings. Add a flags field to + bnxt_ring_mem_info struct to specify these additional bits to be used + when setting up the pages tables as needed. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 6fe19886858f1c7f96a5da3420bd040c58bcaca6 +Author: Michael Chan +Date: Sun Oct 14 07:02:41 2018 -0400 + + bnxt_en: Refactor bnxt_ring_struct. + + Move the DMA page table and vmem fields in bnxt_ring_struct to a new + bnxt_ring_mem_info struct. This will allow context memory management + for a new device to re-use some of the existing infrastructure. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 74706afa712d3d2aa497585af44f11319b2b6348 +Author: Michael Chan +Date: Sun Oct 14 07:02:40 2018 -0400 + + bnxt_en: Update interrupt coalescing logic. + + New firmware spec. allows interrupt coalescing parameters, such as + maximums, timer units, supported features to be queried. Update + the driver to make use of the new call to query these parameters + and provide the legacy defaults if the call is not available. + + Replace the hard-coded values with these parameters. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 1dfddc41ae4990b991059de8dfc8e802ab67244c +Author: Michael Chan +Date: Sun Oct 14 07:02:39 2018 -0400 + + bnxt_en: Add maximum extended request length fw message support. + + Support the max_ext_req_len field from the HWRM_VER_GET_RESPONSE. + If this field is valid and greater than the mailbox size, use the + short command format to send firmware messages greater than the + mailbox size. Newer devices use this method to send larger messages + to the firmware. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 36e53349b60bc6c629949e041567a376c9c42228 +Author: Michael Chan +Date: Sun Oct 14 07:02:38 2018 -0400 + + bnxt_en: Add additional extended port statistics. + + Latest firmware spec. has some additional rx extended port stats and new + tx extended port stats added. We now need to check the size of the + returned rx and tx extended stats and determine how many counters are + valid. New counters added include CoS byte and packet counts for rx + and tx. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 31d357c0697dadb760bf682bf1ffd9fd7053ed59 +Author: Michael Chan +Date: Sun Oct 14 07:02:37 2018 -0400 + + bnxt_en: Update firmware interface spec. to 1.10.0.3. + + Among the new changes are trusted VF support, 200Gbps support, and new + API to dump ring information on the new chips. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 5fc7c12ffa7fe463c4da2295cdc199da9663dce5 +Author: Gustavo A. R. Silva +Date: Fri Oct 5 22:12:09 2018 +0200 + + bnxt_en: Remove unnecessary unsigned integer comparison and initialize variable + + There is no need to compare *val.vu32* with < 0 because + such variable is of type u32 (32 bits, unsigned), making it + impossible to hold a negative value. Fix this by removing + such comparison. + + Also, initialize variable *max_val* to -1, just in case + it is not initialized to either BNXT_MSIX_VEC_MAX or + BNXT_MSIX_VEC_MIN_MAX before using it in a comparison + with val.vu32 at line 159: + + if (val.vu32 > max_val) + + Addresses-Coverity-ID: 1473915 ("Unsigned compared against 0") + Addresses-Coverity-ID: 1473920 ("Uninitialized scalar variable") + Signed-off-by: Gustavo A. R. Silva + Signed-off-by: David S. Miller + +commit c78fe058879bdea919d44f23e21da26f603e9166 +Author: Vasundhara Volam +Date: Fri Oct 5 00:26:03 2018 -0400 + + bnxt_en: get the reduced max_irqs by the ones used by RDMA + + When getting the max rings supported, get the reduced max_irqs + by the ones used by RDMA. + + If the number MSIX is the limiting factor, this bug may cause the + max ring count to be higher than it should be when RDMA driver is + loaded and may result in ring allocation failures. + + Fixes: 30f529473ec9 ("bnxt_en: Do not modify max IRQ count after RDMA driver requests/frees IRQs.") + Signed-off-by: Vasundhara Volam + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit a2bf74f4e1b82395dad2b08d2a911d9151db71c1 +Author: Venkat Duvvuru +Date: Fri Oct 5 00:26:02 2018 -0400 + + bnxt_en: free hwrm resources, if driver probe fails. + + When the driver probe fails, all the resources that were allocated prior + to the failure must be freed. However, hwrm dma response memory is not + getting freed. + + This patch fixes the problem described above. + + Fixes: c0c050c58d84 ("bnxt_en: New Broadcom ethernet driver.") + Signed-off-by: Venkat Duvvuru + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 5db0e0969af6501ad45fe0494039d3b9c797822b +Author: Vasundhara Volam +Date: Fri Oct 5 00:26:01 2018 -0400 + + bnxt_en: Fix enables field in HWRM_QUEUE_COS2BW_CFG request + + In HWRM_QUEUE_COS2BW_CFG request, enables field should have the bits + set only for the queue ids which are having the valid parameters. + + This causes firmware to return error when the TC to hardware CoS queue + mapping is not 1:1 during DCBNL ETS setup. + + Fixes: 2e8ef77ee0ff ("bnxt_en: Add TC to hardware QoS queue mapping logic.") + Signed-off-by: Vasundhara Volam + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit dbe80d446c859873820eedfff4abc61c71f1927b +Author: Michael Chan +Date: Fri Oct 5 00:26:00 2018 -0400 + + bnxt_en: Fix VNIC reservations on the PF. + + The enables bit for VNIC was set wrong when calling the HWRM_FUNC_CFG + firmware call to reserve VNICs. This has the effect that the firmware + will keep a large number of VNICs for the PF, and having very few for + VFs. DPDK driver running on the VFs, which requires more VNICs, may not + work properly as a result. + + Fixes: 674f50a5b026 ("bnxt_en: Implement new method to reserve rings.") + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 2dc0865e9ac7150e2d3b29afdc32d04b99f37902 +Author: Vasundhara Volam +Date: Thu Oct 4 11:13:50 2018 +0530 + + bnxt_en: Add a driver specific gre_ver_check devlink parameter. + + This patch adds following driver-specific permanent mode boolean + parameter. + + gre_ver_check - Generic Routing Encapsulation(GRE) version check + will be enabled in the device. If disabled, device skips version + checking for GRE packets. + + Cc: Michael Chan + Signed-off-by: Vasundhara Volam + Signed-off-by: David S. Miller + +commit f399e8497826d35e4d83785cdff33ad779ffa62f +Author: Vasundhara Volam +Date: Thu Oct 4 11:13:49 2018 +0530 + + bnxt_en: Use msix_vec_per_pf_max and msix_vec_per_pf_min devlink params. + + This patch adds support for following generic permanent mode + devlink parameters. They can be modified using devlink param + commands. + + msix_vec_per_pf_max - This param sets the number of MSIX vectors + that the device requests from the host on driver initialization. + This value is set in the device which limits MSIX vectors per PF. + + msix_vec_per_pf_min - This param sets the number of minimal MSIX + vectors required for the device initialization. Value 0 indicates + a default value is selected. This value is set in the device which + limits MSIX vectors per PF. + + Cc: Michael Chan + Signed-off-by: Vasundhara Volam + Signed-off-by: David S. Miller + +commit 3a1d52a54a6a4030b294e5f5732f0bfbae0e3815 +Author: Vasundhara Volam +Date: Thu Oct 4 11:13:48 2018 +0530 + + bnxt_en: return proper error when FW returns HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED + + Return proper error code when Firmware returns + HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED for HWRM_NVM_GET/SET_VARIABLE + commands. + + Cc: Michael Chan + Signed-off-by: Vasundhara Volam + Signed-off-by: David S. Miller + +commit 7d859234875d236a363101e90aaaf52a790f4590 +Author: Vasundhara Volam +Date: Thu Oct 4 11:13:47 2018 +0530 + + bnxt_en: Use ignore_ari devlink parameter + + This patch adds support for ignore_ari generic permanent mode + devlink parameter. This parameter is disabled by default. It can be + enabled using devlink param commands. + + ignore_ari - If enabled, device ignores ARI(Alternate Routing ID) + capability, even when platforms has the support and creates same number + of partitions when platform does not support ARI capability. + + Cc: Michael Chan + Signed-off-by: Vasundhara Volam + Signed-off-by: David S. Miller + +commit db7ff19e7b119adb4618fbc6410b441d1c3b55c5 +Author: Eli Britstein +Date: Wed Aug 15 16:02:18 2018 +0300 + + devlink: Add extack for eswitch operations + + Add extack argument to the eswitch related operations. + + Signed-off-by: Eli Britstein + Reviewed-by: Or Gerlitz + Reviewed-by: Roi Dayan + Signed-off-by: Saeed Mahameed + +commit 62b36c3ea664b34004b9d29bf541b6c6ce30e33c +Author: Oza Pawandeep +Date: Fri Sep 28 13:00:56 2018 -0500 + + PCI/AER: Remove pci_cleanup_aer_uncorrect_error_status() calls + + After bfcb79fca19d ("PCI/ERR: Run error recovery callbacks for all affected + devices"), AER errors are always cleared by the PCI core and drivers don't + need to do it themselves. + + Remove calls to pci_cleanup_aer_uncorrect_error_status() from device + driver error recovery functions. + + Signed-off-by: Oza Pawandeep + [bhelgaas: changelog, remove PCI core changes, remove unused variables] + Signed-off-by: Bjorn Helgaas + +commit 73f21c653f930f438d53eed29b5e4c65c8a0f906 +Author: Michael Chan +Date: Wed Sep 26 00:41:04 2018 -0400 + + bnxt_en: Fix TX timeout during netpoll. + + The current netpoll implementation in the bnxt_en driver has problems + that may miss TX completion events. bnxt_poll_work() in effect is + only handling at most 1 TX packet before exiting. In addition, + there may be in flight TX completions that ->poll() may miss even + after we fix bnxt_poll_work() to handle all visible TX completions. + netpoll may not call ->poll() again and HW may not generate IRQ + because the driver does not ARM the IRQ when the budget (0 for netpoll) + is reached. + + We fix it by handling all TX completions and to always ARM the IRQ + when we exit ->poll() with 0 budget. + + Also, the logic to ACK the completion ring in case it is almost filled + with TX completions need to be adjusted to take care of the 0 budget + case, as discussed with Eric Dumazet + + Reported-by: Song Liu + Reviewed-by: Song Liu + Tested-by: Song Liu + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 58e0e22bff638055278ea73e34d0d07a95260790 +Author: Eric Dumazet +Date: Fri Sep 21 15:27:50 2018 -0700 + + bnxt: remove ndo_poll_controller + + As diagnosed by Song Liu, ndo_poll_controller() can + be very dangerous on loaded hosts, since the cpu + calling ndo_poll_controller() might steal all NAPI + contexts (for all RX/TX queues of the NIC). This capture + can last for unlimited amount of time, since one + cpu is generally not able to drain all the queues under load. + + bnxt uses NAPI for TX completions, so we better let core + networking stack call the napi->poll() to avoid the capture. + + Signed-off-by: Eric Dumazet + Cc: Michael Chan + Signed-off-by: David S. Miller + +commit 8c6ec3613e7b0aade20a3196169c0bab32ed3e3f +Author: Davide Caratti +Date: Wed Sep 19 19:01:37 2018 +0200 + + bnxt_en: don't try to offload VLAN 'modify' action + + bnxt offload code currently supports only 'push' and 'pop' operation: let + .ndo_setup_tc() return -EOPNOTSUPP if VLAN 'modify' action is configured. + + Fixes: 2ae7408fedfe ("bnxt_en: bnxt: add TC flower filter offload support") + Signed-off-by: Davide Caratti + Acked-by: Sathya Perla + Signed-off-by: David S. Miller + +commit 65fac4fe9080714df80d430888834ce87c6716ba +Author: zhong jiang +Date: Tue Sep 18 15:15:44 2018 +0800 + + net: bnxt: Fix a uninitialized variable warning. + + Fix the following compile warning: + + drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c:49:5: warning: ‘nvm_param.dir_type’ may be used uninitialized in this function [-Wmaybe-uninitialized] + if (nvm_param.dir_type == BNXT_NVM_PORT_CFG) + + Signed-off-by: zhong jiang + Acked-by: Michael Chan + Signed-off-by: David S. Miller + +commit 28ea334bd1657f3c43485b4a8592672fc6835fac +Author: Michael Chan +Date: Fri Sep 14 15:41:29 2018 -0400 + + bnxt_en: Fix VF mac address regression. + + The recent commit to always forward the VF MAC address to the PF for + approval may not work if the PF driver or the firmware is older. This + will cause the VF driver to fail during probe: + + bnxt_en 0000:00:03.0 (unnamed net_device) (uninitialized): hwrm req_type 0xf seq id 0x5 error 0xffff + bnxt_en 0000:00:03.0 (unnamed net_device) (uninitialized): VF MAC address 00:00:17:02:05:d0 not approved by the PF + bnxt_en 0000:00:03.0: Unable to initialize mac address. + bnxt_en: probe of 0000:00:03.0 failed with error -99 + + We fix it by treating the error as fatal only if the VF MAC address is + locally generated by the VF. + + Fixes: 707e7e966026 ("bnxt_en: Always forward VF MAC address to the PF.") + Reported-by: Seth Forshee + Reported-by: Siwei Liu + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 38bb4ac91bd8322cfed9d9b922ef2d8f5daa5700 +Author: YueHaibing +Date: Wed Sep 5 11:44:10 2018 +0000 + + bnxt_en: remove set but not used variable 'addr_type' + + Fixes gcc '-Wunused-but-set-variable' warning: + + drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c: In function 'bnxt_tc_parse_flow': + drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c:186:6: warning: + variable 'addr_type' set but not used [-Wunused-but-set-variable] + + Signed-off-by: YueHaibing + Signed-off-by: David S. Miller + +commit 00fe9c326d2027f2437dea38ef0e82f9d02d94c0 +Author: Michael Chan +Date: Mon Sep 3 04:23:19 2018 -0400 + + bnxt_en: Do not adjust max_cp_rings by the ones used by RDMA. + + Currently, the driver adjusts the bp->hw_resc.max_cp_rings by the number + of MSIX vectors used by RDMA. There is one code path in open that needs + to check the true max_cp_rings including any used by RDMA. This code + is now checking for the reduced max_cp_rings which will fail when the + number of cp rings is very small. + + To fix this in a clean way, we don't adjust max_cp_rings anymore. + Instead, we add a helper bnxt_get_max_func_cp_rings_for_en() to get the + reduced max_cp_rings when appropriate. + + Fixes: ec86f14ea506 ("bnxt_en: Add ULP calls to stop and restart IRQs.") + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit ad95c27bdb930105f3eea02621bda157caf2862d +Author: Michael Chan +Date: Mon Sep 3 04:23:18 2018 -0400 + + bnxt_en: Clean up unused functions. + + Remove unused bnxt_subtract_ulp_resources(). Change + bnxt_get_max_func_irqs() to static since it is only locally used. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 6b95c3e9697254dab0c8eafc6ab9d5e10d2eca4e +Author: Michael Chan +Date: Mon Sep 3 04:23:17 2018 -0400 + + bnxt_en: Fix firmware signaled resource change logic in open. + + When the driver detects that resources have changed during open, it + should reset the rx and tx rings to 0. This will properly setup the + init sequence to initialize the default rings again. We also need + to signal the RDMA driver to stop and clear its interrupts. We then + call the RoCE driver to restart if a new set of default rings is + successfully reserved. + + Fixes: 25e1acd6b92b ("bnxt_en: Notify firmware about IF state changes.") + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 181ab62311c62fdd8c450969d0b822e1b89de42a +Author: YueHaibing +Date: Fri Aug 31 04:08:01 2018 +0000 + + bnxt_en: remove set but not used variable 'rx_stats' + + Fixes gcc '-Wunused-but-set-variable' warning: + + drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c: In function 'bnxt_vf_rep_rx': + drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c:212:28: warning: + variable 'rx_stats' set but not used [-Wunused-but-set-variable] + struct bnxt_vf_rep_stats *rx_stats; + + Signed-off-by: YueHaibing + Signed-off-by: David S. Miller + +commit 244cd96adb5f5ab39551081fb1f9009a54bb12ee +Author: Cong Wang +Date: Sun Aug 19 12:22:09 2018 -0700 + + net_sched: remove list_head from tc_action + + After commit 90b73b77d08e, list_head is no longer needed. + Now we just need to convert the list iteration to array + iteration for drivers. + + Fixes: 90b73b77d08e ("net: sched: change action API to use array of pointers to actions") + Cc: Jiri Pirko + Cc: Vlad Buslov + Signed-off-by: Cong Wang + Signed-off-by: David S. Miller + +commit 1bbf3aed25e0fc256e825da1f5c45d7b4daa828e +Author: Arnd Bergmann +Date: Tue Aug 14 00:12:45 2018 +0200 + + bnxt_en: take coredump_record structure off stack + + The bnxt_coredump_record structure is very long, causing a warning + about possible stack overflow on 32-bit architectures: + + drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c: In function 'bnxt_get_coredump': + drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c:2989:1: error: the frame size of 1188 bytes is larger than 1024 bytes [-Werror=frame-larger-than=] + + I could not see any reason to operate on an on-stack copy of the + structure before copying it back into the caller-provided buffer, which + also simplifies the code here. + + Fixes: 6c5657d085ae ("bnxt_en: Add support for ethtool get dump.") + Signed-off-by: Arnd Bergmann + Signed-off-by: David S. Miller + +commit 3d46eee5a5f2f22ca04e2139e8c9a16b81d16073 +Author: Arnd Bergmann +Date: Mon Aug 13 23:26:54 2018 +0200 + + bnxt_en: avoid string overflow for record->system_name + + The utsname()->nodename string may be 64 bytes long, and it gets + copied without the trailing nul byte into the shorter record->system_name, + as gcc now warns: + + In file included from include/linux/bitmap.h:9, + from include/linux/ethtool.h:16, + from drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c:13: + In function 'strncpy', + inlined from 'bnxt_fill_coredump_record' at drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c:2863:2: + include/linux/string.h:254:9: error: '__builtin_strncpy' output truncated before terminating nul copying as many bytes from a string as its length [-Werror=stringop-truncation] + + Using strlcpy() at least avoids overflowing the destination buffer + and adds proper nul-termination. It may still truncate long names + though, which probably can't be solved here. + + Fixes: 6c5657d085ae ("bnxt_en: Add support for ethtool get dump.") + Signed-off-by: Arnd Bergmann + Acked-by: Michael Chan + Signed-off-by: David S. Miller + +commit 8605212a7c37b5d786544263bae5e697c1aaaa16 +Author: Vasundhara Volam +Date: Fri Aug 10 18:24:43 2018 -0400 + + bnxt_en: Fix strcpy() warnings in bnxt_ethtool.c + + This patch fixes following smatch warnings: + + drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c:2826 bnxt_fill_coredump_seg_hdr() error: strcpy() '"sEgM"' too large for 'seg_hdr->signature' (5 vs 4) + drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c:2858 bnxt_fill_coredump_record() error: strcpy() '"cOrE"' too large for 'record->signature' (5 vs 4) + drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c:2879 bnxt_fill_coredump_record() error: strcpy() 'utsname()->sysname' too large for 'record->os_name' (65 vs 32) + + Fixes: 6c5657d085ae ("bnxt_en: Add support for ethtool get dump.") + Reported-by: Dan Carpenter + Signed-off-by: Vasundhara Volam + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit bc171e87a7e29a41b119fdfccd378f7179b39c23 +Author: Gustavo A. R. Silva +Date: Tue Aug 7 18:11:14 2018 -0500 + + bnx2x: Mark expected switch fall-thoughs + + In preparation to enabling -Wimplicit-fallthrough, mark switch cases + where we are expecting to fall through. + + Addresses-Coverity-ID: 114878 ("Missing break in switch") + Signed-off-by: Gustavo A. R. Silva + Signed-off-by: David S. Miller + +commit aabfc016e9a6db2a8c2da815fc84bfd5a2e8d221 +Author: Michael Chan +Date: Sun Aug 5 16:51:58 2018 -0400 + + bnxt_en: Do not use the CNP CoS queue for networking traffic. + + The CNP CoS queue is reserved for internal RDMA Congestion Notification + Packets (CNP) and should not be used for a TC. Modify the CoS queue + discovery code to skip over the CNP CoS queue and to reduce + bp->max_tc accordingly. However, if RDMA is disabled in NVRAM, the + the CNP CoS queue can be used for a TC. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit afdc8a84844a2163e25ad735f9f69d220ae02529 +Author: Michael Chan +Date: Sun Aug 5 16:51:57 2018 -0400 + + bnxt_en: Add DCBNL DSCP application protocol support. + + Expand the .ieee_setapp() and ieee_delapp() DCBNL methods to support + DSCP. This allows DSCP values to user priority mappings instead + of using VLAN priorities. Each DSCP mapping is added or deleted one + entry at a time using the firmware API. The firmware call can only be + made from a PF. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit cde49a42a9bbba18d7f33550fd70037930c14e97 +Author: Vasundhara Volam +Date: Sun Aug 5 16:51:56 2018 -0400 + + bnxt_en: Add hwmon sysfs support to read temperature + + Export temperature sensor reading via hwmon sysfs. + + Signed-off-by: Vasundhara Volam + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 25e1acd6b92bde36c03273d883c44c4d0e8995e6 +Author: Michael Chan +Date: Sun Aug 5 16:51:55 2018 -0400 + + bnxt_en: Notify firmware about IF state changes. + + Use latest firmware API to notify firmware about IF state changes. + Firmware has the option to clean up resources during IF down and + to require the driver to reserve resources again during IF up. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 97381a1831124c95801fbfaba8436b4abc7d03f5 +Author: Michael Chan +Date: Sun Aug 5 16:51:54 2018 -0400 + + bnxt_en: Move firmware related flags to a new fw_cap field in struct bnxt. + + The flags field is almost getting full. Move firmware capability flags + to a new fw_cap field to better organize these firmware flags. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit f1ca94de0d8760726dc615e8b4f9801f7ad9cf3b +Author: Michael Chan +Date: Sun Aug 5 16:51:53 2018 -0400 + + bnxt_en: Add BNXT_NEW_RM() macro. + + The BNXT_FLAG_NEW_RM flag is checked a lot in the code to determine if + the new resource manager is in effect. Define a macro to perform + this check. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 6c5657d085ae8c13a8565b98e6a23fe68f0bede4 +Author: Vasundhara Volam +Date: Sun Aug 5 16:51:52 2018 -0400 + + bnxt_en: Add support for ethtool get dump. + + Add support to collect live firmware coredump via ethtool. + + Signed-off-by: Vasundhara Volam + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 50f011b63d8caab7f40de52ca6cf4807aea7a941 +Author: Michael Chan +Date: Sun Aug 5 16:51:51 2018 -0400 + + bnxt_en: Update RSS setup and GRO-HW logic according to the latest spec. + + Set the default hash mode flag in HWRM_VNIC_RSS_CFG to signal to the + firmware that the driver is compliant with the latest spec. With + that, the firmware can return expanded RSS profile IDs that the driver + checks to setup the proper gso_type for GRO-HW packets. But instead + of checking for the new profile IDs, we check the IP_TYPE flag + in TPA_START which is more straight forward than checking a list of + profile IDs. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit bf82736da3c376c03a42c74ea6fa971e89740d7a +Author: Michael Chan +Date: Sun Aug 5 16:51:50 2018 -0400 + + bnxt_en: Add new VF resource allocation strategy mode. + + The new mode is "minimal-static" to be used when resources are more + limited to support a large number of VFs, for example The PF driver + will provision guaranteed minimum resources of 0. Each VF has no + guranteed resources until it tries to reserve resources during device + open. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit a1ef4a7920549d015128a8a49d7c9e654d197c98 +Author: Michael Chan +Date: Sun Aug 5 16:51:49 2018 -0400 + + bnxt_en: Add PHY retry logic. + + During hotplug, the driver's open function can be called almost + immediately after power on reset. The PHY may not be ready and the + firmware may return failure when the driver tries to update PHY + settings. Add retry logic fired from the driver's timer to retry + the operation for 5 seconds. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 55fd0cf320c3051f8dcb88c07ddd1e4c54b82cba +Author: Michael Chan +Date: Sun Aug 5 16:51:48 2018 -0400 + + bnxt_en: Add external loopback test to ethtool selftest. + + Add code to detect firmware support for external loopback and the extra + test entry for external loopback. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit e795892e93b6ccbda7e0f0fc476d0d4629b44f84 +Author: Michael Chan +Date: Sun Aug 5 16:51:47 2018 -0400 + + bnxt_en: Adjust timer based on ethtool stats-block-usecs settings. + + The driver gathers statistics using 2 mechanisms. Some stats are DMA'ed + directly from hardware and others are polled from the driver's timer. + Currently, we only adjust the DMA frequency based on the ethtool + stats-block-usecs setting. This patch adjusts the driver's timer + frequency as well to make everything consistent. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 6fc92c33854b7844745ce424e1cb8029c06d1cf1 +Author: Michael Chan +Date: Sun Aug 5 16:51:46 2018 -0400 + + bnxt_en: Update firmware interface version to 1.9.2.25. + + New interface has firmware core dump support, new extended port + statistics, and IF state change notifications to the firmware. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 6fd544c897d98bc6f185da215f1585dc144218cc +Author: YueHaibing +Date: Fri Aug 3 16:48:56 2018 +0800 + + bnxt_en: combine 'else if' and 'else' into single branch + + The else-if branch and else branch set mac_ok to true similarly, + so combine the two into single else branch. + + Also add comments to explain the two conditions, which + from Michael Chan and Vasundhara Volam. + + Signed-off-by: YueHaibing + Acked-by: Michael Chan + Signed-off-by: David S. Miller + +commit 6b8675897338f874c41612655a85d8e10cdb23d8 +Author: Jakub Kicinski +Date: Wed Jul 11 20:36:39 2018 -0700 + + xdp: don't make drivers report attachment mode + + prog_attached of struct netdev_bpf should have been superseded + by simply setting prog_id long time ago, but we kept it around + to allow offloading drivers to communicate attachment mode (drv + vs hw). Subsequently drivers were also allowed to report back + attachment flags (prog_flags), and since nowadays only programs + attached will XDP_FLAGS_HW_MODE can get offloaded, we can tell + the attachment mode from the flags driver reports. Remove + prog_attached member. + + Signed-off-by: Jakub Kicinski + Reviewed-by: Quentin Monnet + Signed-off-by: Daniel Borkmann + +commit c58387ab1614f6d7fb9e244f214b61e7631421fc +Author: Vikas Gupta +Date: Mon Jul 9 02:24:52 2018 -0400 + + bnxt_en: Fix for system hang if request_irq fails + + Fix bug in the error code path when bnxt_request_irq() returns failure. + bnxt_disable_napi() should not be called in this error path because + NAPI has not been enabled yet. + + Fixes: c0c050c58d84 ("bnxt_en: New Broadcom ethernet driver.") + Signed-off-by: Vikas Gupta + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 30f529473ec962102e8bcd33a6a04f1e1b490ae2 +Author: Michael Chan +Date: Mon Jul 9 02:24:51 2018 -0400 + + bnxt_en: Do not modify max IRQ count after RDMA driver requests/frees IRQs. + + Calling bnxt_set_max_func_irqs() to modify the max IRQ count requested or + freed by the RDMA driver is flawed. The max IRQ count is checked when + re-initializing the IRQ vectors and this can happen multiple times + during ifup or ethtool -L. If the max IRQ is reduced and the RDMA + driver is operational, we may not initailize IRQs correctly. This + problem shows up on VFs with very small number of MSIX. + + There is no other logic that relies on the IRQ count excluding the ones + used by RDMA. So we fix it by just removing the call to subtract or + add the IRQs used by RDMA. + + Fixes: a588e4580a7e ("bnxt_en: Add interface to support RDMA driver.") + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 30e338487a476aff2f12f440d1190a71c245b99c +Author: Michael Chan +Date: Mon Jul 9 02:24:50 2018 -0400 + + bnxt_en: Support clearing of the IFF_BROADCAST flag. + + Currently, the driver assumes IFF_BROADCAST is always set and always sets + the broadcast filter. Modify the code to set or clear the broadcast + filter according to the IFF_BROADCAST flag. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 78f058a4aa0f2280dc4d45d2c4a95728398ef857 +Author: Michael Chan +Date: Mon Jul 9 02:24:49 2018 -0400 + + bnxt_en: Always set output parameters in bnxt_get_max_rings(). + + The current code returns -ENOMEM and does not bother to set the output + parameters to 0 when no rings are available. Some callers, such as + bnxt_get_channels() will display garbage ring numbers when that happens. + Fix it by always setting the output parameters. + + Fixes: 6e6c5a57fbe1 ("bnxt_en: Modify bnxt_get_max_rings() to support shared or non shared rings.") + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 07f4fde53d12eb8d921b465bb298e964e0bdc38c +Author: Michael Chan +Date: Mon Jul 9 02:24:48 2018 -0400 + + bnxt_en: Fix inconsistent BNXT_FLAG_AGG_RINGS logic. + + If there aren't enough RX rings available, the driver will attempt to + use a single RX ring without the aggregation ring. If that also + fails, the BNXT_FLAG_AGG_RINGS flag is cleared but the other ring + parameters are not set consistently to reflect that. If more RX + rings become available at the next open, the RX rings will be in + an inconsistent state and may crash when freeing the RX rings. + + Fix it by restoring the BNXT_FLAG_AGG_RINGS if not enough RX rings are + available to run without aggregation rings. + + Fixes: bdbd1eb59c56 ("bnxt_en: Handle no aggregation ring gracefully.") + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit e32d4e60b350124065e0ffd9c91ac13a439aee9a +Author: Venkat Duvvuru +Date: Mon Jul 9 02:24:47 2018 -0400 + + bnxt_en: Fix the vlan_tci exact match check. + + It is possible that OVS may set don’t care for DEI/CFI bit in + vlan_tci mask. Hence, checking for vlan_tci exact match will endup + in a vlan flow rejection. + + This patch fixes the problem by checking for vlan_pcp and vid + separately, instead of checking for the entire vlan_tci. + + Fixes: e85a9be93cf1 (bnxt_en: do not allow wildcard matches for L2 flows) + Signed-off-by: Venkat Duvvuru + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 6354b95eb871beee89b8679a1f576fccc132cf90 +Author: Vasundhara Volam +Date: Wed Jul 4 14:30:37 2018 +0300 + + bnxt_en: Add bnxt_en initial params table and register it. + + Create initial devlink parameters table for bnxt_en. + Table consists of a permanent generic parameter. + + enable_sriov - Enables Single-Root Input/Output Virtualization(SR-IOV) + characteristic of the device. + + Reviewed-by: Jiri Pirko + Reviewed-by: Michael Chan + Signed-off-by: Vasundhara Volam + Signed-off-by: David S. Miller + +commit 83607344d667315687e1a5ddd2ad2fbbff22cc43 +Author: Gustavo A. R. Silva +Date: Wed Jun 27 20:32:23 2018 -0500 + + bnx2x: Mark expected switch fall-throughs + + In preparation to enabling -Wimplicit-fallthrough, mark switch cases + where we are expecting to fall through. + + Signed-off-by: Gustavo A. R. Silva + Signed-off-by: David S. Miller + +commit 60513bd82c825b659c05957e4f8106ba06f0797f +Author: John Hurley +Date: Mon Jun 25 14:30:04 2018 -0700 + + net: sched: pass extack pointer to block binds and cb registration + + Pass the extact struct from a tc qdisc add to the block bind function and, + in turn, to the setup_tc ndo of binding device via the tc_block_offload + struct. Pass this back to any block callback registrations to allow + netlink logging of fails in the bind process. + + Signed-off-by: John Hurley + Signed-off-by: Jakub Kicinski + Acked-by: Jiri Pirko + Signed-off-by: David S. Miller + +commit 83741bb0430465ea9f0654d4772c03d694b33ad7 +Author: Jiri Pirko +Date: Sun Jun 24 10:38:37 2018 +0200 + + bnxt: simplify cls_flower command switch and handle default case + + Currently the default case is not handled, which with future command + introductions would introduce a warning. So handle it and make the + switch a bit simplier removing unneeded "rc" variable. + + Signed-off-by: Jiri Pirko + Signed-off-by: David S. Miller + +commit 6da2ec56059c3c7a7e5f729e6349e74ace1e5c57 +Author: Kees Cook +Date: Tue Jun 12 13:55:00 2018 -0700 + + treewide: kmalloc() -> kmalloc_array() + + The kmalloc() function has a 2-factor argument form, kmalloc_array(). This + patch replaces cases of: + + kmalloc(a * b, gfp) + + with: + kmalloc_array(a * b, gfp) + + as well as handling cases of: + + kmalloc(a * b * c, gfp) + + with: + + kmalloc(array3_size(a, b, c), gfp) + + as it's slightly less ugly than: + + kmalloc_array(array_size(a, b), c, gfp) + + This does, however, attempt to ignore constant size factors like: + + kmalloc(4 * 1024, gfp) + + though any constants defined via macros get caught up in the conversion. + + Any factors with a sizeof() of "unsigned char", "char", and "u8" were + dropped, since they're redundant. + + The tools/ directory was manually excluded, since it has its own + implementation of kmalloc(). + + The Coccinelle script used for this was: + + // Fix redundant parens around sizeof(). + @@ + type TYPE; + expression THING, E; + @@ + + ( + kmalloc( + - (sizeof(TYPE)) * E + + sizeof(TYPE) * E + , ...) + | + kmalloc( + - (sizeof(THING)) * E + + sizeof(THING) * E + , ...) + ) + + // Drop single-byte sizes and redundant parens. + @@ + expression COUNT; + typedef u8; + typedef __u8; + @@ + + ( + kmalloc( + - sizeof(u8) * (COUNT) + + COUNT + , ...) + | + kmalloc( + - sizeof(__u8) * (COUNT) + + COUNT + , ...) + | + kmalloc( + - sizeof(char) * (COUNT) + + COUNT + , ...) + | + kmalloc( + - sizeof(unsigned char) * (COUNT) + + COUNT + , ...) + | + kmalloc( + - sizeof(u8) * COUNT + + COUNT + , ...) + | + kmalloc( + - sizeof(__u8) * COUNT + + COUNT + , ...) + | + kmalloc( + - sizeof(char) * COUNT + + COUNT + , ...) + | + kmalloc( + - sizeof(unsigned char) * COUNT + + COUNT + , ...) + ) + + // 2-factor product with sizeof(type/expression) and identifier or constant. + @@ + type TYPE; + expression THING; + identifier COUNT_ID; + constant COUNT_CONST; + @@ + + ( + - kmalloc + + kmalloc_array + ( + - sizeof(TYPE) * (COUNT_ID) + + COUNT_ID, sizeof(TYPE) + , ...) + | + - kmalloc + + kmalloc_array + ( + - sizeof(TYPE) * COUNT_ID + + COUNT_ID, sizeof(TYPE) + , ...) + | + - kmalloc + + kmalloc_array + ( + - sizeof(TYPE) * (COUNT_CONST) + + COUNT_CONST, sizeof(TYPE) + , ...) + | + - kmalloc + + kmalloc_array + ( + - sizeof(TYPE) * COUNT_CONST + + COUNT_CONST, sizeof(TYPE) + , ...) + | + - kmalloc + + kmalloc_array + ( + - sizeof(THING) * (COUNT_ID) + + COUNT_ID, sizeof(THING) + , ...) + | + - kmalloc + + kmalloc_array + ( + - sizeof(THING) * COUNT_ID + + COUNT_ID, sizeof(THING) + , ...) + | + - kmalloc + + kmalloc_array + ( + - sizeof(THING) * (COUNT_CONST) + + COUNT_CONST, sizeof(THING) + , ...) + | + - kmalloc + + kmalloc_array + ( + - sizeof(THING) * COUNT_CONST + + COUNT_CONST, sizeof(THING) + , ...) + ) + + // 2-factor product, only identifiers. + @@ + identifier SIZE, COUNT; + @@ + + - kmalloc + + kmalloc_array + ( + - SIZE * COUNT + + COUNT, SIZE + , ...) + + // 3-factor product with 1 sizeof(type) or sizeof(expression), with + // redundant parens removed. + @@ + expression THING; + identifier STRIDE, COUNT; + type TYPE; + @@ + + ( + kmalloc( + - sizeof(TYPE) * (COUNT) * (STRIDE) + + array3_size(COUNT, STRIDE, sizeof(TYPE)) + , ...) + | + kmalloc( + - sizeof(TYPE) * (COUNT) * STRIDE + + array3_size(COUNT, STRIDE, sizeof(TYPE)) + , ...) + | + kmalloc( + - sizeof(TYPE) * COUNT * (STRIDE) + + array3_size(COUNT, STRIDE, sizeof(TYPE)) + , ...) + | + kmalloc( + - sizeof(TYPE) * COUNT * STRIDE + + array3_size(COUNT, STRIDE, sizeof(TYPE)) + , ...) + | + kmalloc( + - sizeof(THING) * (COUNT) * (STRIDE) + + array3_size(COUNT, STRIDE, sizeof(THING)) + , ...) + | + kmalloc( + - sizeof(THING) * (COUNT) * STRIDE + + array3_size(COUNT, STRIDE, sizeof(THING)) + , ...) + | + kmalloc( + - sizeof(THING) * COUNT * (STRIDE) + + array3_size(COUNT, STRIDE, sizeof(THING)) + , ...) + | + kmalloc( + - sizeof(THING) * COUNT * STRIDE + + array3_size(COUNT, STRIDE, sizeof(THING)) + , ...) + ) + + // 3-factor product with 2 sizeof(variable), with redundant parens removed. + @@ + expression THING1, THING2; + identifier COUNT; + type TYPE1, TYPE2; + @@ + + ( + kmalloc( + - sizeof(TYPE1) * sizeof(TYPE2) * COUNT + + array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2)) + , ...) + | + kmalloc( + - sizeof(TYPE1) * sizeof(THING2) * (COUNT) + + array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2)) + , ...) + | + kmalloc( + - sizeof(THING1) * sizeof(THING2) * COUNT + + array3_size(COUNT, sizeof(THING1), sizeof(THING2)) + , ...) + | + kmalloc( + - sizeof(THING1) * sizeof(THING2) * (COUNT) + + array3_size(COUNT, sizeof(THING1), sizeof(THING2)) + , ...) + | + kmalloc( + - sizeof(TYPE1) * sizeof(THING2) * COUNT + + array3_size(COUNT, sizeof(TYPE1), sizeof(THING2)) + , ...) + | + kmalloc( + - sizeof(TYPE1) * sizeof(THING2) * (COUNT) + + array3_size(COUNT, sizeof(TYPE1), sizeof(THING2)) + , ...) + ) + + // 3-factor product, only identifiers, with redundant parens removed. + @@ + identifier STRIDE, SIZE, COUNT; + @@ + + ( + kmalloc( + - (COUNT) * STRIDE * SIZE + + array3_size(COUNT, STRIDE, SIZE) + , ...) + | + kmalloc( + - COUNT * (STRIDE) * SIZE + + array3_size(COUNT, STRIDE, SIZE) + , ...) + | + kmalloc( + - COUNT * STRIDE * (SIZE) + + array3_size(COUNT, STRIDE, SIZE) + , ...) + | + kmalloc( + - (COUNT) * (STRIDE) * SIZE + + array3_size(COUNT, STRIDE, SIZE) + , ...) + | + kmalloc( + - COUNT * (STRIDE) * (SIZE) + + array3_size(COUNT, STRIDE, SIZE) + , ...) + | + kmalloc( + - (COUNT) * STRIDE * (SIZE) + + array3_size(COUNT, STRIDE, SIZE) + , ...) + | + kmalloc( + - (COUNT) * (STRIDE) * (SIZE) + + array3_size(COUNT, STRIDE, SIZE) + , ...) + | + kmalloc( + - COUNT * STRIDE * SIZE + + array3_size(COUNT, STRIDE, SIZE) + , ...) + ) + + // Any remaining multi-factor products, first at least 3-factor products, + // when they're not all constants... + @@ + expression E1, E2, E3; + constant C1, C2, C3; + @@ + + ( + kmalloc(C1 * C2 * C3, ...) + | + kmalloc( + - (E1) * E2 * E3 + + array3_size(E1, E2, E3) + , ...) + | + kmalloc( + - (E1) * (E2) * E3 + + array3_size(E1, E2, E3) + , ...) + | + kmalloc( + - (E1) * (E2) * (E3) + + array3_size(E1, E2, E3) + , ...) + | + kmalloc( + - E1 * E2 * E3 + + array3_size(E1, E2, E3) + , ...) + ) + + // And then all remaining 2 factors products when they're not all constants, + // keeping sizeof() as the second factor argument. + @@ + expression THING, E1, E2; + type TYPE; + constant C1, C2, C3; + @@ + + ( + kmalloc(sizeof(THING) * C2, ...) + | + kmalloc(sizeof(TYPE) * C2, ...) + | + kmalloc(C1 * C2 * C3, ...) + | + kmalloc(C1 * C2, ...) + | + - kmalloc + + kmalloc_array + ( + - sizeof(TYPE) * (E2) + + E2, sizeof(TYPE) + , ...) + | + - kmalloc + + kmalloc_array + ( + - sizeof(TYPE) * E2 + + E2, sizeof(TYPE) + , ...) + | + - kmalloc + + kmalloc_array + ( + - sizeof(THING) * (E2) + + E2, sizeof(THING) + , ...) + | + - kmalloc + + kmalloc_array + ( + - sizeof(THING) * E2 + + E2, sizeof(THING) + , ...) + | + - kmalloc + + kmalloc_array + ( + - (E1) * E2 + + E1, E2 + , ...) + | + - kmalloc + + kmalloc_array + ( + - (E1) * (E2) + + E1, E2 + , ...) + | + - kmalloc + + kmalloc_array + ( + - E1 * E2 + + E1, E2 + , ...) + ) + + Signed-off-by: Kees Cook + +commit af125b754e2f09e6061e65db8f4eda0f7730011d +Author: Bjorn Helgaas +Date: Fri Mar 30 14:09:54 2018 -0500 + + bnxt_en: Report PCIe link properties with pcie_print_link_status() + + Previously the driver used pcie_get_minimum_link() to warn when the NIC + is in a slot that can't supply as much bandwidth as the NIC could use. + + pcie_get_minimum_link() can be misleading because it finds the slowest link + and the narrowest link (which may be different links) without considering + the total bandwidth of each link. For a path with a 16 GT/s x1 link and a + 2.5 GT/s x16 link, it returns 2.5 GT/s x1, which corresponds to 250 MB/s of + bandwidth, not the true available bandwidth of about 1969 MB/s for a + 16 GT/s x1 link. + + Use pcie_print_link_status() to report PCIe link speed and possible + limitations instead of implementing this in the driver itself. This finds + the slowest link in the path to the device by computing the total bandwidth + of each link and compares that with the capabilities of the device. + + The dmesg change is: + + - PCIe: Speed %s Width x%d + + %u.%03u Gb/s available PCIe bandwidth (%s x%d link) + + Signed-off-by: Bjorn Helgaas + +commit 707e7e96602675beb5e09bb994195663da6eb56d +Author: Michael Chan +Date: Tue May 8 03:18:41 2018 -0400 + + bnxt_en: Always forward VF MAC address to the PF. + + The current code already forwards the VF MAC address to the PF, except + in one case. If the VF driver gets a valid MAC address from the firmware + during probe time, it will not forward the MAC address to the PF, + incorrectly assuming that the PF already knows the MAC address. This + causes "ip link show" to show zero VF MAC addresses for this case. + + This assumption is not correct. Newer firmware remembers the VF MAC + address last used by the VF and provides it to the VF driver during + probe. So we need to always forward the VF MAC address to the PF. + + The forwarded MAC address may now be the PF assigned MAC address and so we + need to make sure we approve it for this case. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 7328a23c063a9ecf56314fb9631889c1820bd0ce +Author: Vasundhara Volam +Date: Tue May 8 03:18:40 2018 -0400 + + bnxt_en: Read phy eeprom A2h address only when optical diagnostics is supported. + + For SFP+ modules, 0xA2 page is available only when Diagnostic Monitoring + Type [Address A0h, Byte 92] is implemented. Extend bnxt_get_module_info(), + to read optical diagnostics support at offset 92(0x5c) and set eeprom_len + length to ETH_MODULE_SFF_8436_LEN (to exclude A2 page), if dianostics is + not supported. + + Also in bnxt_get_module_info(), module id is read from offset 0x5e which + is not correct. It was working by accident, as offset was not effective + without setting enables flag in the firmware request. SFP module id is + present at location 0. Fix this by removing the offset and read it + from location 0. + + Signed-off-by: Vasundhara Volam + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit dac0490718bd17df5e3995ffca14255e5f9ed22d +Author: Michael Chan +Date: Tue May 8 03:18:39 2018 -0400 + + bnxt_en: Check unsupported speeds in bnxt_update_link() on PF only. + + Only non-NPAR PFs need to actively check and manage unsupported link + speeds. NPAR functions and VFs do not control the link speed and + should skip the unsupported speed detection logic, to avoid warning + messages from firmware rejecting the unsupported firmware calls. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit cc559c1ac250a6025bd4a9528e424b8da250655b +Author: Michael Chan +Date: Tue May 8 03:18:38 2018 -0400 + + bnxt_en: Fix firmware message delay loop regression. + + A recent change to reduce delay granularity waiting for firmware + reponse has caused a regression. With a tighter delay loop, + the driver may see the beginning part of the response faster. + The original 5 usec delay to wait for the rest of the message + is not long enough and some messages are detected as invalid. + + Increase the maximum wait time from 5 usec to 20 usec. Also, fix + the debug message that shows the total delay time for the response + when the message times out. With the new logic, the delay time + is not fixed per iteration of the loop, so we define a macro to + show the total delay time. + + Fixes: 9751e8e71487 ("bnxt_en: reduce timeout on initial HWRM calls") + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 47558acd56a74c1ac598093930a5559270bf8c09 +Author: Michael Chan +Date: Thu Apr 26 17:44:44 2018 -0400 + + bnxt_en: Reserve rings at driver open if none was reserved at probe time. + + Add logic to reserve default rings at driver open time if none was + reserved during probe time. This will happen when the PF driver did + not provision minimum rings to the VF, due to more limited resources. + + Driver open will only succeed if some minimum rings can be reserved. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 86c3380d9b1e2a3fcc87d34cea12991b81032b9f +Author: Michael Chan +Date: Thu Apr 26 17:44:43 2018 -0400 + + bnxt_en: Reserve RSS and L2 contexts for VF. + + For completeness and correctness, the VF driver needs to reserve these + RSS and L2 contexts. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 2773dfb201e18722265c38dacdea6ecadf933064 +Author: Michael Chan +Date: Thu Apr 26 17:44:42 2018 -0400 + + bnxt_en: Don't reserve rings on VF when min rings were not provisioned by PF. + + When rings are more limited and the PF has not provisioned minimum + guaranteed rings to the VF, do not reserve rings during driver probe. + Wait till device open before reserving rings when they will be used. + Device open will succeed if some minimum rings can be successfully + reserved and allocated. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit d8c09f19accb89fc08b246339abb005455e4c846 +Author: Michael Chan +Date: Thu Apr 26 17:44:41 2018 -0400 + + bnxt_en: Reserve rings in bnxt_set_channels() if device is down. + + The current code does not reserve rings during ethtool -L when the device + is down. The rings will be reserved when the device is later opened. + + Change it to reserve rings during ethtool -L when the device is down. + This provides a better guarantee that the device open will be successful + when the rings are reserved ahead of time. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit cabfb09d87bd7980cb4e39bd2ce679a788eb7e7a +Author: Andy Gospodarek +Date: Thu Apr 26 17:44:40 2018 -0400 + + bnxt_en: add debugfs support for DIM + + This adds debugfs support for bnxt_en with the purpose of allowing users + to examine the current DIM profile in use for each receive queue. This + was instrumental in debugging issues found with DIM and ensuring that + the profiles we expect to use are the profiles being used. + + Signed-off-by: Andy Gospodarek + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 9751e8e714872aa650b030e52a9fafbb694a3714 +Author: Andy Gospodarek +Date: Thu Apr 26 17:44:39 2018 -0400 + + bnxt_en: reduce timeout on initial HWRM calls + + Testing with DIM enabled on older kernels indicated that firmware calls + were slower than expected. More detailed analysis indicated that the + default 25us delay was higher than necessary. Reducing the time spend in + usleep_range() for the first several calls would reduce the overall + latency of firmware calls on newer Intel processors. + + Signed-off-by: Andy Gospodarek + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 05abe4ddf0010e15419f5a6758b5bf44b7790982 +Author: Andy Gospodarek +Date: Thu Apr 26 17:44:38 2018 -0400 + + bnxt_en: Increase RING_IDLE minimum threshold to 50 + + This keeps the RING_IDLE flag set in hardware for higher coalesce + settings by default and improved latency. + + Signed-off-by: Andy Gospodarek + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 4cebbaca12514986039b2ac7d30e36ecd2222f64 +Author: Michael Chan +Date: Thu Apr 26 17:44:37 2018 -0400 + + bnxt_en: Do not allow VF to read EEPROM. + + Firmware does not allow the operation and would return failure, causing + a warning in dmesg. So check for VF and disallow it in the driver. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 20c1d28e106c0b526ae015fcac8e1e254bff091c +Author: Vasundhara Volam +Date: Thu Apr 26 17:44:36 2018 -0400 + + bnxt_en: Display function level rx/tx_discard_pkts via ethtool + + Add counters to display sum of rx/tx_discard_pkts of all rings as + function level statistics via ethtool. + + Signed-off-by: Vasundhara Volam + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 2727c888f2f8bef071e9a07d6e2f018840d0a834 +Author: Michael Chan +Date: Thu Apr 26 17:44:35 2018 -0400 + + bnxt_en: Simplify ring alloc/free error messages. + + Replace switch statements printing different messages for every ring type + with a common message. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit ca2c39e2ec04e78ca6eb5162621cb9a5b897ca16 +Author: Michael Chan +Date: Thu Apr 26 17:44:34 2018 -0400 + + bnxt_en: Do not set firmware time from VF driver on older firmware. + + Older firmware will reject this call and cause an error message to + be printed by the VF driver. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 59895f596b13b4b09f739bf8470a5028a5ff2b9a +Author: Michael Chan +Date: Thu Apr 26 17:44:33 2018 -0400 + + bnxt_en: Check the lengths of encapsulated firmware responses. + + Firmware messages that are forwarded from PF to VFs are encapsulated. + The size of these encapsulated messages must not exceed the maximum + defined message size. Add appropriate checks to avoid oversize + messages. Firmware messages may be expanded in future specs and + this will provide some guardrails to avoid data corruption. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit d31cd579a45c44ede9e56c2f6d33537ba395a49b +Author: Michael Chan +Date: Thu Apr 26 17:44:32 2018 -0400 + + bnxt_en: Remap TC to hardware queues when configuring PFC. + + Initially, the MQPRIO TCs are mapped 1:1 directly to the hardware + queues. Some of these hardware queues are configured to be lossless. + When PFC is enabled on one of more TCs, we now need to remap the + TCs that have PFC enabled to the lossless hardware queues. + + After remapping, we need to close and open the NIC for the new + mapping to take effect. We also need to reprogram all ETS parameters. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 2e8ef77ee0ff1117251a48f79d2d57d65afd0495 +Author: Michael Chan +Date: Thu Apr 26 17:44:31 2018 -0400 + + bnxt_en: Add TC to hardware QoS queue mapping logic. + + The current driver maps MQPRIO traffic classes directly 1:1 to the + internal hardware queues (TC0 maps to hardware queue 0, etc). This + direct mapping requires the internal hardware queues to be reconfigured + from lossless to lossy and vice versa when necessary. This + involves reconfiguring internal buffer thresholds which is + disruptive and not always reliable. + + Implement a new scheme to map TCs to internal hardware queues by + matching up their PFC requirements. This will eliminate the need + to reconfigure a hardware queue internal buffers at run time. After + remapping, the NIC is closed and opened for the new TC to hardware + queues to take effect. + + This patch only adds the basic mapping logic. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 026a807c2de37aa826748c2ffa1969fc778406b2 +Author: Tal Gilboa +Date: Tue Apr 24 13:36:01 2018 +0300 + + net/dim: Rename *_get_profile() functions to *_get_rx_moderation() + + Preparation for introducing adaptive TX to net DIM. + + Signed-off-by: Tal Gilboa + Reviewed-by: Tariq Toukan + Signed-off-by: David S. Miller + +commit a60faa60da891e311e19fd3e88d611863f431130 +Author: Vasundhara Volam +Date: Thu Apr 19 03:16:16 2018 -0400 + + bnxt_en: Fix memory fault in bnxt_ethtool_init() + + In some firmware images, the length of BNX_DIR_TYPE_PKG_LOG nvram type + could be greater than the fixed buffer length of 4096 bytes allocated by + the driver. This was causing HWRM_NVM_READ to copy more data to the buffer + than the allocated size, causing general protection fault. + + Fix the issue by allocating the exact buffer length returned by + HWRM_NVM_FIND_DIR_ENTRY, instead of 4096. Move the kzalloc() call + into the bnxt_get_pkgver() function. + + Fixes: 3ebf6f0a09a2 ("bnxt_en: Add installed-package firmware version reporting via Ethtool GDRVINFO") + Signed-off-by: Vasundhara Volam + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit b968e735c79767a3c91217fbae691581aa557d8d +Author: Nikita V. Shirokov +Date: Tue Apr 17 21:42:16 2018 -0700 + + bpf: make bnxt compatible w/ bpf_xdp_adjust_tail + + w/ bpf_xdp_adjust_tail helper xdp's data_end pointer could be changed as + well (only "decrease" of pointer's location is going to be supported). + changing of this pointer will change packet's size. + for bnxt driver we will just calculate packet's length unconditionally + + Acked-by: Alexei Starovoitov + Signed-off-by: Nikita V. Shirokov + Acked-by: Michael Chan + Signed-off-by: Daniel Borkmann + +commit cb98526bf9b985866d648dbb9c983ba9eb59daba +Author: Michael Chan +Date: Wed Apr 11 11:50:18 2018 -0400 + + bnxt_en: Fix NULL pointer dereference at bnxt_free_irq(). + + When open fails during ethtool -L ring change, for example, the driver + may crash at bnxt_free_irq() because bp->bnapi is NULL. + + If we fail to allocate all the new rings, bnxt_open_nic() will free + all the memory including bp->bnapi. Subsequent call to bnxt_close_nic() + will try to dereference bp->bnapi in bnxt_free_irq(). + + Fix it by checking for !bp->bnapi in bnxt_free_irq(). + + Fixes: e5811b8c09df ("bnxt_en: Add IRQ remapping logic.") + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 11c3ec7bb940b6fa3f87f05f01b7f45eef08dfbb +Author: Michael Chan +Date: Wed Apr 11 11:50:17 2018 -0400 + + bnxt_en: Need to include RDMA rings in bnxt_check_rings(). + + With recent changes to reserve both L2 and RDMA rings, we need to include + the RDMA rings in bnxt_check_rings(). Otherwise we will under-estimate + the rings we need during ethtool -L and may lead to failure. + + Fixes: fbcfc8e46741 ("bnxt_en: Reserve completion rings and MSIX for bnxt_re RDMA driver.") + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 9d96465b111edd6c4f94345783e6e01db7f435d6 +Author: Sriharsha Basavapatna +Date: Wed Apr 11 11:50:16 2018 -0400 + + bnxt_en: Support max-mtu with VF-reps + + While a VF is configured with a bigger mtu (> 1500), any packets that + are punted to the VF-rep (slow-path) get dropped by OVS kernel-datapath + with the following message: "dropped over-mtu packet". Fix this by + returning the max-mtu value for a VF-rep derived from its corresponding VF. + VF-rep's mtu can be changed using 'ip' command as shown in this example: + + $ ip link set bnxt0_pf0vf0 mtu 9000 + + Signed-off-by: Sriharsha Basavapatna + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 479ca3bf91da971fcefc003cf5773e8d7db24794 +Author: Sriharsha Basavapatna +Date: Wed Apr 11 11:50:15 2018 -0400 + + bnxt_en: Ignore src port field in decap filter nodes + + The driver currently uses src port field (along with other fields) in the + decap tunnel key, while looking up and adding tunnel nodes. This leads to + redundant cfa_decap_filter_alloc() requests to the FW and flow-miss in the + flow engine. Fix this by ignoring the src port field in decap tunnel nodes. + + Fixes: f484f6782e01 ("bnxt_en: add hwrm FW cmds for cfa_encap_record and decap_filter") + Signed-off-by: Sriharsha Basavapatna + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit e85a9be93cf144623a823a0a60e4eda6ee337aef +Author: Andy Gospodarek +Date: Wed Apr 11 11:50:14 2018 -0400 + + bnxt_en: do not allow wildcard matches for L2 flows + + Before this patch the following commands would succeed as far as the + user was concerned: + + $ tc qdisc add dev p1p1 ingress + $ tc filter add dev p1p1 parent ffff: protocol all \ + flower skip_sw action drop + $ tc filter add dev p1p1 parent ffff: protocol ipv4 \ + flower skip_sw src_mac 00:02:00:00:00:01/44 action drop + + The current flow offload infrastructure used does not support wildcard + matching for ethernet headers, so do not allow the second or third + commands to succeed. If a user wants to drop traffic on that interface + the protocol and MAC addresses need to be specified explicitly: + + $ tc qdisc add dev p1p1 ingress + $ tc filter add dev p1p1 parent ffff: protocol arp \ + flower skip_sw action drop + $ tc filter add dev p1p1 parent ffff: protocol ipv4 \ + flower skip_sw action drop + ... + $ tc filter add dev p1p1 parent ffff: protocol ipv4 \ + flower skip_sw src_mac 00:02:00:00:00:01 action drop + $ tc filter add dev p1p1 parent ffff: protocol ipv4 \ + flower skip_sw src_mac 00:02:00:00:00:02 action drop + ... + + There are also checks for VLAN parameters in this patch as other callers + may wildcard those parameters even if tc does not. Using different + flow infrastructure could allow this to work in the future for L2 flows, + but for now it does not. + + Fixes: 2ae7408fedfe ("bnxt_en: bnxt: add TC flower filter offload support") + Signed-off-by: Andy Gospodarek + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 7991cb9cfbce1b60ac1cff819350b05de4d902e1 +Author: Michael Chan +Date: Wed Apr 11 11:50:13 2018 -0400 + + bnxt_en: Fix ethtool -x crash when device is down. + + Fix ethtool .get_rxfh() crash by checking for valid indirection table + address before copying the data. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit ec86f14ea5064e36ee111297bdb376dda4cba264 +Author: Michael Chan +Date: Sat Mar 31 13:54:21 2018 -0400 + + bnxt_en: Add ULP calls to stop and restart IRQs. + + When the driver needs to re-initailize the IRQ vectors, we make the + new ulp_irq_stop() call to tell the RDMA driver to disable and free + the IRQ vectors. After IRQ vectors have been re-initailized, we + make the ulp_irq_restart() call to tell the RDMA driver that + IRQs can be restarted. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit fbcfc8e4674156cb7eb3d8054bd4332142d2cc58 +Author: Michael Chan +Date: Sat Mar 31 13:54:20 2018 -0400 + + bnxt_en: Reserve completion rings and MSIX for bnxt_re RDMA driver. + + Add additional logic to reserve completion rings for the bnxt_re driver + when it requests MSIX vectors. The function bnxt_cp_rings_in_use() + will return the total number of completion rings used by both drivers + that need to be reserved. If the network interface in up, we will + close and open the NIC to reserve the new set of completion rings and + re-initialize the vectors. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 4e41dc5deb6e5c36ac5f2e49575485920037b2aa +Author: Michael Chan +Date: Sat Mar 31 13:54:19 2018 -0400 + + bnxt_en: Refactor bnxt_need_reserve_rings(). + + Refactor bnxt_need_reserve_rings() slightly so that __bnxt_reserve_rings() + can call it and remove some duplicated code. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit e5811b8c09df9bc80eabc95339fceded23f16289 +Author: Michael Chan +Date: Sat Mar 31 13:54:18 2018 -0400 + + bnxt_en: Add IRQ remapping logic. + + Add remapping logic so that bnxt_en can use any arbitrary MSIX vectors. + This will allow the driver to reserve one range of MSIX vectors to be + used by both bnxt_en and bnxt_re. bnxt_en can now skip over the MSIX + vectors used by bnxt_re. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 08654eb213a8066b30c41e22067a9f066b40c80f +Author: Michael Chan +Date: Sat Mar 31 13:54:17 2018 -0400 + + bnxt_en: Change IRQ assignment for RDMA driver. + + In the current code, the range of MSIX vectors allocated for the RDMA + driver is disjoint from the network driver. This creates a problem + for the new firmware ring reservation scheme. The new scheme requires + the reserved completion rings/MSIX vectors to be in a contiguous + range. + + Change the logic to allocate RDMA MSIX vectors to be contiguous with + the vectors used by bnxt_en on new firmware using the new scheme. + The new function bnxt_get_num_msix() calculates the exact number of + vectors needed by both drivers. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 9899bb59ff08a50aef033b4d388d223adca58a7f +Author: Michael Chan +Date: Sat Mar 31 13:54:16 2018 -0400 + + bnxt_en: Improve ring allocation logic. + + Currently, the driver code makes some assumptions about the group index + and the map index of rings. This makes the code more difficult to + understand and less flexible. + + Improve it by adding the grp_idx and map_idx fields explicitly to the + bnxt_ring_struct as a union. The grp_idx is initialized for each tx ring + and rx agg ring during init. time. We do the same for the map_idx for + each cmpl ring. + + The grp_idx ties the tx ring to the ring group. The map_idx is the + doorbell index of the ring. With this new infrastructure, we can change + the ring index mapping scheme easily in the future. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 845adfe40c2a75e67ddae6639fc2b987338b7983 +Author: Michael Chan +Date: Sat Mar 31 13:54:15 2018 -0400 + + bnxt_en: Improve valid bit checking in firmware response message. + + When firmware sends a DMA response to the driver, the last byte of the + message will be set to 1 to indicate that the whole response is valid. + The driver waits for the message to be valid before reading the message. + + The firmware spec allows these response messages to increase in + length by adding new fields to the end of these messages. The + older spec's valid location may become a new field in a newer + spec. To guarantee compatibility, the driver should zero the valid + byte before interpreting the entire message so that any new fields not + implemented by the older spec will be read as zero. + + For messages that are forwarded to VFs, we need to set the length + and re-instate the valid bit so the VF will see the valid response. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 596f9d55feebdf31c03172fcc82cdec62bb969ea +Author: Michael Chan +Date: Sat Mar 31 13:54:14 2018 -0400 + + bnxt_en: Improve resource accounting for SRIOV. + + When VFs are created, the current code subtracts the maximum VF + resources from the PF's pool. This under-estimates the resources + remaining in the PF pool. Instead, we should subtract the minimum + VF resources. The VF minimum resources are guaranteed to the VFs + and only these should be subtracted from the PF's pool. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit db4723b3cd2d836ae44382d16e6a4418ae8929dc +Author: Michael Chan +Date: Sat Mar 31 13:54:13 2018 -0400 + + bnxt_en: Check max_tx_scheduler_inputs value from firmware. + + When checking for the maximum pre-set TX channels for ethtool -l, we + need to check the current max_tx_scheduler_inputs parameter from firmware. + This parameter specifies the max input for the internal QoS nodes currently + available to this function. The function's TX rings will be capped by this + parameter. By adding this logic, we provide a more accurate pre-set max + TX channels to the user. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 00db3cba35211cd7d458d378a5931fadfa86a17c +Author: Vasundhara Volam +Date: Sat Mar 31 13:54:12 2018 -0400 + + bnxt_en: Add extended port statistics support + + Gather periodic extended port statistics, if the device is PF and + link is up. + + Signed-off-by: Vasundhara Volam + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 699efed00df0631e39a639b49e3b8e27e62e6c89 +Author: Vasundhara Volam +Date: Sat Mar 31 13:54:11 2018 -0400 + + bnxt_en: Include additional hardware port statistics in ethtool -S. + + Include additional hardware port statistics in ethtool -S, which + are useful for debugging. + + Signed-off-by: Vasundhara Volam + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 746df139646ea7fd11c26f88fd95a247d2a7c94b +Author: Vasundhara Volam +Date: Sat Mar 31 13:54:10 2018 -0400 + + bnxt_en: Add support for ndo_set_vf_trust + + Trusted VFs are allowed to modify MAC address, even when PF + has assigned one. + + Signed-off-by: Vasundhara Volam + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 2373d8d6a7932d28b8e31ea2a70bf6c002d97ac8 +Author: Scott Branden +Date: Sat Mar 31 13:54:09 2018 -0400 + + bnxt_en: fix clear flags in ethtool reset handling + + Clear flags when reset command processed successfully for components + specified. + + Fixes: 6502ad5963a5 ("bnxt_en: Add ETH_RESET_AP support") + Signed-off-by: Scott Branden + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit abe93ad2e06e3c16562b5de2787e7442fa088895 +Author: Michael Chan +Date: Sat Mar 31 13:54:08 2018 -0400 + + bnxt_en: Use a dedicated VNIC mode for RDMA. + + If the RDMA driver is registered, use a new VNIC mode that allows + RDMA traffic to be seen on the netdev in promiscuous mode. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 1d3ef13dd48da9177e417379644be9003bc459cc +Author: Michael Chan +Date: Sat Mar 31 13:54:07 2018 -0400 + + bnxt_en: Adjust default rings for multi-port NICs. + + Change the default ring logic to select default number of rings to be up to + 8 per port if the default rings x NIC ports <= total CPUs. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit d4f52de02f04f0a7dfcb1d0228a2ff58b06aa230 +Author: Michael Chan +Date: Sat Mar 31 13:54:06 2018 -0400 + + bnxt_en: Update firmware interface to 1.9.1.15. + + Minor changes, such as new extended port statistics. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit fd141fa47c03018aa1f77c335b0f444493e145d5 +Author: Sinan Kaya +Date: Sun Mar 25 10:39:20 2018 -0400 + + bnxt_en: Eliminate duplicate barriers on weakly-ordered archs + + Code includes wmb() followed by writel(). writel() already has a barrier on + some architectures like arm64. + + This ends up CPU observing two barriers back to back before executing the + register write. + + Create a new wrapper function with relaxed write operator. Use the new + wrapper when a write is following a wmb(). + + Since code already has an explicit barrier call, changing writel() to + writel_relaxed(). + + Also add mmiowb() so that write code doesn't move outside of scope. + + Signed-off-by: Sinan Kaya + Acked-by: Michael Chan + Signed-off-by: David S. Miller + +commit 3c4fe80b32c685bdc02b280814d0cfe80d441c72 +Author: Michael Chan +Date: Fri Mar 9 23:46:10 2018 -0500 + + bnxt_en: Check valid VNIC ID in bnxt_hwrm_vnic_set_tpa(). + + During initialization, if we encounter errors, there is a code path that + calls bnxt_hwrm_vnic_set_tpa() with invalid VNIC ID. This may cause a + warning in firmware logs. + + Fixes: c0c050c58d84 ("bnxt_en: New Broadcom ethernet driver.") + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 1a037782e79047ec3386d8ba94c103cbdfb851d0 +Author: Venkat Duvvuru +Date: Fri Mar 9 23:46:09 2018 -0500 + + bnxt_en: close & open NIC, only when the interface is in running state. + + bnxt_restore_pf_fw_resources routine frees PF resources by calling + close_nic and allocates the resources back, by doing open_nic. However, + this is not needed, if the PF is already in closed state. + + This bug causes the driver to call open the device and call request_irq() + when it is not needed. Ultimately, pci_disable_msix() will crash + when bnxt_en is unloaded. + + This patch fixes the problem by skipping __bnxt_close_nic and + __bnxt_open_nic inside bnxt_restore_pf_fw_resources routine, if the + interface is not running. + + Fixes: 80fcaf46c092 ("bnxt_en: Restore MSIX after disabling SRIOV.") + Signed-off-by: Venkat Duvvuru + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 6ae777eab2f53b50d84a5d75a48d2d149f787da8 +Author: Venkat Duvvuru +Date: Fri Mar 9 23:46:08 2018 -0500 + + bnxt_en: Return standard Linux error codes for hwrm flow cmds. + + Currently, internal error value is returned by the driver, when + hwrm_cfa_flow_alloc() fails due lack of resources. We should be returning + Linux errno value -ENOSPC instead. + + This patch also converts other similar command errors to standard Linux errno + code (-EIO) in bnxt_tc.c + + Fixes: db1d36a27324 ("bnxt_en: add TC flower offload flow_alloc/free FW cmds") + Signed-off-by: Venkat Duvvuru + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 832aed16ce7af2a43dafe9d4bc9080322e042cde +Author: Michael Chan +Date: Fri Mar 9 23:46:07 2018 -0500 + + bnxt_en: Fix regressions when setting up MQPRIO TX rings. + + Recent changes added the bnxt_init_int_mode() call in the driver's open + path whenever ring reservations are changed. This call was previously + only called in the probe path. In the open path, if MQPRIO TC has been + setup, the bnxt_init_int_mode() call would reset and mess up the MQPRIO + per TC rings. + + Fix it by not re-initilizing bp->tx_nr_rings_per_tc in + bnxt_init_int_mode(). Instead, initialize it in the probe path only + after the bnxt_init_int_mode() call. + + Fixes: 674f50a5b026 ("bnxt_en: Implement new method to reserve rings.") + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit ed7bc602f60a653e5dea488e6917d9a75d6ac0dd +Author: Michael Chan +Date: Fri Mar 9 23:46:06 2018 -0500 + + bnxt_en: Pass complete VLAN TCI to the stack. + + When receiving a packet with VLAN tag, pass the entire 16-bit TCI to the + stack when calling __vlan_hwaccel_put_tag(). The current code is only + passing the 12-bit tag and it is missing the priority bits. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit b9ecc3400bc418af3ba9e56ea852f4ad69c23454 +Author: Sriharsha Basavapatna +Date: Fri Mar 9 23:46:05 2018 -0500 + + bnxt_en: Remove unwanted ovs-offload messages in some conditions + + In some conditions when the driver fails to add a flow in HW and returns + an error back to the stack, the stack continues to invoke get_flow_stats() + and/or del_flow() on it. The driver fails these APIs with an error message + "no flow_node for cookie". The message gets logged repeatedly as long as + the stack keeps invoking these functions. + + Fix this by removing the corresponding netdev_info() calls from these + functions. + + Fixes: d7bc73053024 ("bnxt_en: add code to query TC flower offload stats") + Signed-off-by: Sriharsha Basavapatna + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 6fc2ffdf1001ae4fb485b3ba95ff757ae54565c9 +Author: Eddie Wai +Date: Fri Mar 9 23:46:04 2018 -0500 + + bnxt_en: Fix vnic accounting in the bnxt_check_rings() path. + + The number of vnics to check must be determined ahead of time because + only standard RX rings require vnics to support RFS. The logic is + similar to the ring reservation logic and we can now use the + refactored common functions to do most of the work in setting up + the firmware message. + + Fixes: 8f23d638b36b ("bnxt_en: Expand bnxt_check_rings() to check all resources.") + Signed-off-by: Eddie Wai + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 4ed50ef4da4d113fe65d9f9d049c1ce7468e3ac1 +Author: Michael Chan +Date: Fri Mar 9 23:46:03 2018 -0500 + + bnxt_en: Refactor the functions to reserve hardware rings. + + The bnxt_hwrm_reserve_{pf|vf}_rings() functions are very similar to + the bnxt_hwrm_check_{pf|vf}_rings() functions. Refactor the former + so that the latter can make use of common code in the next patch. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 0bc0b97fca73fca19edd0bd1463972144b44abaa +Author: Andy Gospodarek +Date: Fri Jan 26 10:27:47 2018 -0500 + + bnxt_en: cleanup DIM work on device shutdown + + Make sure to cancel any pending work that might update driver coalesce + settings when taking down an interface. + + Fixes: 6a8788f25625 ("bnxt_en: add support for software dynamic interrupt moderation") + Signed-off-by: Andy Gospodarek + Cc: Michael Chan + Acked-by: Michael Chan + Signed-off-by: David S. Miller + +commit 312324f1248b47a2640469039811a04ab2f5be34 +Author: Jakub Kicinski +Date: Thu Jan 25 14:00:48 2018 -0800 + + bnxt: use tc_cls_can_offload_and_chain0() + + Make use of tc_cls_can_offload_and_chain0() to set extack msg in case + ethtool tc offload flag is not set or chain unsupported. + + Signed-off-by: Jakub Kicinski + Reviewed-by: Simon Horman + Signed-off-by: David S. Miller + +commit dd4ea1da12495e1b3c400a28df11528892199f68 +Author: Sathya Perla +Date: Wed Jan 17 03:21:16 2018 -0500 + + bnxt_en: export a common switchdev PARENT_ID for all reps of an adapter + + Currently the driver exports different switchdev PARENT_IDs for + representors belonging to different SR-IOV PF-pools of an adapter. + This is not correct as the adapter can switch across all vports + of an adapter. This patch fixes this by exporting a common switchdev + PARENT_ID for all reps of an adapter. The PCIE DSN is used as the id. + + Signed-off-by: Sathya Perla + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit c3480a603773cfc5d8aa44dbbee6c96e0f9d4d9d +Author: Michael Chan +Date: Wed Jan 17 03:21:15 2018 -0500 + + bnxt_en: Add cache line size setting to optimize performance. + + The chip supports 64-byte and 128-byte cache line size for more optimal + DMA performance when matched to the CPU cache line size. The default is 64. + If the system is using 128-byte cache line size, set it to 128. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 91cdda40714178497cbd182261b2ea6ec5cb9276 +Author: Vasundhara Volam +Date: Wed Jan 17 03:21:14 2018 -0500 + + bnxt_en: Forward VF MAC address to the PF. + + Forward hwrm_func_vf_cfg command from VF to PF driver, to store + VF MAC address in PF's context. This will allow "ip link show" + to display all VF MAC addresses. + + Maintain 2 locations of MAC address in VF info structure, one for + a PF assigned MAC and one for VF assigned MAC. + + Display VF assigned MAC in "ip link show", only if PF assigned MAC is + not valid. + + Signed-off-by: Vasundhara Volam + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 92abef361bd233ea2a99db9e9a637626f523f82e +Author: Vasundhara Volam +Date: Wed Jan 17 03:21:13 2018 -0500 + + bnxt_en: Add BCM5745X NPAR device IDs + + Signed-off-by: Vasundhara Volam + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 8f23d638b36b4ff0fe5785cf01f9bdc41afb9c06 +Author: Michael Chan +Date: Wed Jan 17 03:21:12 2018 -0500 + + bnxt_en: Expand bnxt_check_rings() to check all resources. + + bnxt_check_rings() is called by ethtool, XDP setup, and ndo_setup_tc() + to see if there are enough resources to support the new configuration. + Expand the call to test all resources if the firmware supports the new + API. With the more flexible resource allocation scheme, this call must + be made to check that all resources are available before committing to + allocate the resources. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 4673d66468b80dc37abd1159a4bd038128173d48 +Author: Michael Chan +Date: Wed Jan 17 03:21:11 2018 -0500 + + bnxt_en: Implement new method for the PF to assign SRIOV resources. + + Instead of the old method of evenly dividing the resources to the VFs, + use the new firmware API to specify min and max resources for each VF. + This way, there is more flexibility for each VF to allocate more or less + resources. + + The min is the absolute minimum for each VF to function. The max is the + global resources minus the resources used by the PF. Each VF is + guaranteed the min. Up to max resources may be available for some VFs. + + The PF driver can use one of 2 strategies specified in NVRAM to assign + the resources. The old legacy strategy of evenly dividing the resources + or the new flexible strategy. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 6a1eef5b9079742ecfad647892669bd5fe6b0e3f +Author: Michael Chan +Date: Wed Jan 17 03:21:10 2018 -0500 + + bnxt_en: Reserve resources for RFS. + + In bnxt_rfs_capable(), add call to reserve vnic resources to support + NTUPLE. Return true if we can successfully reserve enough vnics. + Otherwise, reserve the minimum 1 VNIC for normal operations not + supporting NTUPLE and return false. + + Also, suppress warning message about not enough resources for NTUPLE when + only 1 RX ring is in use. NTUPLE filters by definition require multiple + RX rings. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 674f50a5b026151f4109992cb594d89f5334adde +Author: Michael Chan +Date: Wed Jan 17 03:21:09 2018 -0500 + + bnxt_en: Implement new method to reserve rings. + + The new method will call firmware to reserve the desired tx, rx, cmpl + rings, ring groups, stats context, and vnic resources. A second query + call will check the actual resources that firmware is able to reserve. + The driver will then trim and adjust based on the actual resources + provided by firmware. The driver will then reserve the final resources + in use. + + This method is a more flexible way of using hardware resources. The + resources are not fixed and can by adjusted by firmware. The driver + adapts to the available resources that the firmware can reserve for + the driver. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 58ea801ac4c166cdcaa399ce7f9b3e9095ff2842 +Author: Michael Chan +Date: Wed Jan 17 03:21:08 2018 -0500 + + bnxt_en: Set initial default RX and TX ring numbers the same in combined mode. + + In combined mode, the driver is currently not setting RX and TX ring + numbers the same when firmware can allocate more RX than TX or vice versa. + This will confuse the user as the ethtool convention assumes they are the + same in combined mode. Fix it by adding bnxt_trim_dflt_sh_rings() to trim + RX and TX ring numbers to be the same as the completion ring number in + combined mode. + + Note that if TCs are enabled and/or XDP is enabled, the number of TX rings + will not be the same as RX rings in combined mode. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit be0dd9c4100c9549fe50258e3d928072e6c31590 +Author: Michael Chan +Date: Wed Jan 17 03:21:07 2018 -0500 + + bnxt_en: Add the new firmware API to query hardware resources. + + The new API HWRM_FUNC_RESOURCE_QCAPS provides min and max hardware + resources. Use the new API when it is supported by firmware. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 6a4f29470569c5a158c1871a2f752ca22e433420 +Author: Michael Chan +Date: Wed Jan 17 03:21:06 2018 -0500 + + bnxt_en: Refactor hardware resource data structures. + + In preparation for new firmware APIs to allocate hardware resources, + add a new struct bnxt_hw_resc to hold various min, max and reserved + resources. This new structure is common for PFs and VFs. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 80fcaf46c09262a71f32bb577c976814c922f864 +Author: Michael Chan +Date: Wed Jan 17 03:21:05 2018 -0500 + + bnxt_en: Restore MSIX after disabling SRIOV. + + After SRIOV has been enabled and disabled, the MSIX vectors assigned to + the VFs have to be re-initialized. Otherwise they cannot be re-used by + the PF. For example, increasing the number of PF rings after disabling + SRIOV may fail if the PF uses MSIX vectors previously assigned to the VFs. + + To fix this, we add logic in bnxt_restore_pf_fw_resources() to close the + NIC, clear and re-init MSIX, and re-open the NIC. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 86e953db0114f396f916344395160aa267bf2627 +Author: Michael Chan +Date: Wed Jan 17 03:21:04 2018 -0500 + + bnxt_en: Refactor bnxt_close_nic(). + + Add a new __bnxt_close_nic() function to do all the work previously done + in bnxt_close_nic() except waiting for SRIOV configuration. The new + function will be used in the next patch as part of SRIOV cleanup. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 894aa69a90932907f3de9d849ab9970884151d0e +Author: Michael Chan +Date: Wed Jan 17 03:21:03 2018 -0500 + + bnxt_en: Update firmware interface to 1.9.0. + + The version has new firmware APIs to allocate PF/VF resources more + flexibly. + + New toolchains were used to generate this file, resulting in a one-time + large diffstat. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit e7e70fa6784b48a811fdd4253c41fc7195300570 +Author: Colin Ian King +Date: Tue Jan 16 10:22:50 2018 +0000 + + bnxt_en: don't update cpr->rx_bytes with uninitialized length len + + Currently in the cases where cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP or + CMP_TYPE_RX_L2_TPA_END_CMP the exit path updates cpr->rx_bytes with an + uninitialized length len. Fix this by adding a new exit path that does + not update the cpr stats with the bogus length len and remove the unused + label next_rx_no_prod. + + Detected by CoverityScan, CID#1463807 ("Uninitialized scalar variable") + Fixes: 6a8788f25625 ("bnxt_en: add support for software dynamic interrupt moderation") + Signed-off-by: Colin Ian King + Acked-by: Michael Chan + Signed-off-by: David S. Miller + +commit 6a8788f25625eab31ffa624da2db758ecae6151d +Author: Andy Gospodarek +Date: Tue Jan 9 16:06:20 2018 -0500 + + bnxt_en: add support for software dynamic interrupt moderation + + This implements the changes needed for the bnxt_en driver to add support + for dynamic interrupt moderation per ring. + + This does add additional counters in the receive path, but testing shows + that any additional instructions are offset by throughput gain when the + default configuration is for low latency. + + Signed-off-by: Andy Gospodarek + Acked-by: Michael Chan + Signed-off-by: David S. Miller + +commit 78f300049335ae81a5cc6b4b232481dc5e1f9d41 +Author: Venkat Duvvuru +Date: Thu Jan 4 18:46:55 2018 -0500 + + bnxt_en: Fix the 'Invalid VF' id check in bnxt_vf_ndo_prep routine. + + In bnxt_vf_ndo_prep (which is called by bnxt_get_vf_config ndo), there is a + check for "Invalid VF id". Currently, the check is done against max_vfs. + However, the user doesn't always create max_vfs. So, the check should be + against the created number of VFs. The number of bnxt_vf_info structures + that are allocated in bnxt_alloc_vf_resources routine is the "number of + requested VFs". So, if an "invalid VF id" falls between the requested + number of VFs and the max_vfs, the driver will be dereferencing an invalid + pointer. + + Fixes: c0c050c58d84 ("bnxt_en: New Broadcom ethernet driver.") + Signed-off-by: Venkat Devvuru + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 7deea450eb912f269d999de62c8ab922d1461748 +Author: Sunil Challa +Date: Thu Jan 4 18:46:54 2018 -0500 + + bnxt_en: Fix population of flow_type in bnxt_hwrm_cfa_flow_alloc() + + flow_type in HWRM_FLOW_ALLOC is not being populated correctly due to + incorrect passing of pointer and size of l3_mask argument of is_wildcard(). + Fixed this. + + Fixes: db1d36a27324 ("bnxt_en: add TC flower offload flow_alloc/free FW cmds") + Signed-off-by: Sunil Challa + Reviewed-by: Sathya Perla + Reviewed-by: Venkat Duvvuru + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 96a8604f95fa216b9ddfd15c687eed42a2f56901 +Author: Jesper Dangaard Brouer +Date: Wed Jan 3 11:25:44 2018 +0100 + + bnxt_en: setup xdp_rxq_info + + Driver hook points for xdp_rxq_info: + * reg : bnxt_alloc_rx_rings + * unreg: bnxt_free_rx_rings + + This driver should be updated to re-register when changing + allocation mode of RX rings. + + Tested on actual hardware. + + Cc: Andy Gospodarek + Cc: Michael Chan + Signed-off-by: Jesper Dangaard Brouer + Signed-off-by: Alexei Starovoitov + +commit aa006d1ad0a58f5dad0065b25263a73365319996 +Author: Himanshu Jha +Date: Sat Dec 30 21:14:57 2017 +0530 + + ethernet/broadcom: Use zeroing memory allocator than allocator/memset + + Use dma_zalloc_coherent for allocating zeroed + memory and remove unnecessary memset function. + + Done using Coccinelle. + Generated-by: scripts/coccinelle/api/alloc/kzalloc-simple.cocci + 0-day tested with no failures. + + Suggested-by: Luis R. Rodriguez + Signed-off-by: Himanshu Jha + Signed-off-by: David S. Miller + +commit 1054aee82321483dceabbb9b9e5d6512e8fe684b +Author: Michael Chan +Date: Sat Dec 16 03:09:42 2017 -0500 + + bnxt_en: Use NETIF_F_GRO_HW. + + Advertise NETIF_F_GRO_HW in hw_features if hardware GRO is supported. + In bnxt_fix_features(), disable GRO_HW and LRO if current hardware + configuration does not allow it. GRO_HW depends on GRO. GRO_HW is + also mutually exclusive with LRO. XDP setup will now rely on + bnxt_fix_features() to turn off aggregation. During chip init, turn on + or off hardware GRO based on NETIF_F_GRO_HW in features flag. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 97a6ec4ac021f7fbec05c15a3aa0c4aaf0461af5 +Author: Tom Herbert +Date: Mon Dec 4 10:31:41 2017 -0800 + + rhashtable: Change rhashtable_walk_start to return void + + Most callers of rhashtable_walk_start don't care about a resize event + which is indicated by a return value of -EAGAIN. So calls to + rhashtable_walk_start are wrapped wih code to ignore -EAGAIN. Something + like this is common: + + ret = rhashtable_walk_start(rhiter); + if (ret && ret != -EAGAIN) + goto out; + + Since zero and -EAGAIN are the only possible return values from the + function this check is pointless. The condition never evaluates to true. + + This patch changes rhashtable_walk_start to return void. This simplifies + code for the callers that ignore -EAGAIN. For the few cases where the + caller cares about the resize event, particularly where the table can be + walked in mulitple parts for netlink or seq file dump, the function + rhashtable_walk_start_check has been added that returns -EAGAIN on a + resize event. + + Signed-off-by: Tom Herbert + Acked-by: Herbert Xu + Signed-off-by: David S. Miller + +commit 2edbdb3159d6f6bd3a9b6e7f789f2b879699a519 +Author: Calvin Owens +Date: Fri Dec 8 09:05:26 2017 -0800 + + bnxt_en: Fix sources of spurious netpoll warnings + + After applying 2270bc5da3497945 ("bnxt_en: Fix netpoll handling") and + 903649e718f80da2 ("bnxt_en: Improve -ENOMEM logic in NAPI poll loop."), + we still see the following WARN fire: + + ------------[ cut here ]------------ + WARNING: CPU: 0 PID: 1875170 at net/core/netpoll.c:165 netpoll_poll_dev+0x15a/0x160 + bnxt_poll+0x0/0xd0 exceeded budget in poll + + Call Trace: + [] dump_stack+0x4d/0x70 + [] __warn+0xd3/0xf0 + [] warn_slowpath_fmt+0x4f/0x60 + [] netpoll_poll_dev+0x15a/0x160 + [] netpoll_send_skb_on_dev+0x168/0x250 + [] netpoll_send_udp+0x2dc/0x440 + [] write_ext_msg+0x20e/0x250 + [] call_console_drivers.constprop.23+0xa5/0x110 + [] console_unlock+0x339/0x5b0 + [] vprintk_emit+0x2c8/0x450 + [] vprintk_default+0x1f/0x30 + [] printk+0x48/0x50 + [] edac_raw_mc_handle_error+0x563/0x5c0 [edac_core] + [] edac_mc_handle_error+0x42b/0x6e0 [edac_core] + [] sbridge_mce_output_error+0x410/0x10d0 [sb_edac] + [] sbridge_check_error+0xac/0x130 [sb_edac] + [] edac_mc_workq_function+0x3c/0x90 [edac_core] + [] process_one_work+0x19b/0x480 + [] worker_thread+0x6a/0x520 + [] kthread+0xe4/0x100 + [] ret_from_fork+0x22/0x40 + + This happens because we increment rx_pkts on -ENOMEM and -EIO, resulting + in rx_pkts > 0. Fix this by only bumping rx_pkts if we were actually + given a non-zero budget. + + Signed-off-by: Calvin Owens + Acked-by: Michael Chan + Signed-off-by: David S. Miller + +commit a8168b6cee6e9334dfebb4b9108e8d73794f6088 +Author: Michael Chan +Date: Wed Dec 6 17:31:22 2017 -0500 + + bnxt_en: Don't print "Link speed -1 no longer supported" messages. + + On some dual port NICs, the 2 ports have to be configured with compatible + link speeds. Under some conditions, a port's configured speed may no + longer be supported. The firmware will send a message to the driver + when this happens. + + Improve this logic that prints out the warning by only printing it if + we can determine the link speed that is no longer supported. If the + speed is unknown or it is in autoneg mode, skip the warning message. + + Reported-by: Thomas Bogendoerfer + Signed-off-by: Michael Chan + Tested-by: Thomas Bogendoerfer + Signed-off-by: David S. Miller + +commit 9f8a739e72f1546fb0f8c518af1193522c45be12 +Author: Cong Wang +Date: Tue Dec 5 16:17:26 2017 -0800 + + act_mirred: get rid of tcfm_ifindex from struct tcf_mirred + + tcfm_dev always points to the correct netdev and we already + hold a refcnt, so no need to use tcfm_ifindex to lookup again. + + If we would support moving target netdev across netns, using + pointer would be better than ifindex. + + This also fixes dumping obsolete ifindex, now after the + target device is gone we just dump 0 as ifindex. + + Cc: Jiri Pirko + Cc: Jamal Hadi Salim + Signed-off-by: Cong Wang + Acked-by: Jiri Pirko + Signed-off-by: David S. Miller + +commit 92425c40676d498efccae6fecdb8f8e4dcf7e4a4 +Author: Dan Carpenter +Date: Tue Dec 5 17:37:52 2017 +0300 + + bnxt_en: Uninitialized variable in bnxt_tc_parse_actions() + + Smatch warns that: + + drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c:160 bnxt_tc_parse_actions() + error: uninitialized symbol 'rc'. + + "rc" is either uninitialized or set to zero here so we can just remove + the check. + + Fixes: 8c95f773b4a3 ("bnxt_en: add support for Flower based vxlan encap/decap offload") + Signed-off-by: Dan Carpenter + Acked-by: Michael Chan + Signed-off-by: David S. Miller + +commit ebd5818cc5d4847897d7fe872e2d9799d7b7fcbb +Author: Vasundhara Volam +Date: Fri Dec 1 03:13:05 2017 -0500 + + bnxt_en: Fix a variable scoping in bnxt_hwrm_do_send_msg() + + short_input variable is assigned to another data pointer which is + referred out of its scope. Fix it by moving short_input definition + to the beginning of bnxt_hwrm_do_send_msg() function. + + No failure has been reported so far due to this issue. + + Fixes: e605db801bde ("bnxt_en: Support for Short Firmware Message") + Signed-off-by: Vasundhara Volam + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit e9ecc731a87912d209d6e9b4ed20ed70451c08cb +Author: Sathya Perla +Date: Fri Dec 1 03:13:04 2017 -0500 + + bnxt_en: fix dst/src fid for vxlan encap/decap actions + + For flows that involve a vxlan encap action, the vxlan sock + interface may be specified as the outgoing interface. The driver + must resolve the outgoing PF interface used by this socket and + use the dst_fid of the PF in the hwrm_cfa_encap_record_alloc cmd. + + Similarily for flows that have a vxlan decap action, the + fid of the incoming PF interface must be used as the src_fid in + the hwrm_cfa_decap_filter_alloc cmd. + + Fixes: 8c95f773b4a3 ("bnxt_en: add support for Flower based vxlan encap/decap offload") + Signed-off-by: Sathya Perla + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit c8fb7b8259c67b86cd93a71c85e78b34d2c96fdc +Author: Sunil Challa +Date: Fri Dec 1 03:13:03 2017 -0500 + + bnxt_en: wildcard smac while creating tunnel decap filter + + While creating a decap filter the tunnel smac need not (and must not) be + specified as we cannot ascertain the neighbor in the recv path. 'ttl' + match is also not needed for the decap filter and must be wild-carded. + + Fixes: f484f6782e01 ("bnxt_en: add hwrm FW cmds for cfa_encap_record and decap_filter") + Signed-off-by: Sunil Challa + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit a7f3f939dd7d8398acebecd1ceb2e9e7ffbe91d2 +Author: Ray Jui +Date: Fri Dec 1 03:13:02 2017 -0500 + + bnxt_en: Need to unconditionally shut down RoCE in bnxt_shutdown + + The current 'bnxt_shutdown' implementation only invokes + 'bnxt_ulp_shutdown' to shut down RoCE in the case when the system is in + the path of power off (SYSTEM_POWER_OFF). While this may work in most + cases, it does not work in the smart NIC case, when Linux 'reboot' + command is initiated from the Linux that runs on the ARM cores of the + NIC card. In this particular case, Linux 'reboot' results in a system + 'L3' level reset where the entire ARM and associated subsystems are + being reset, but at the same time, Nitro core is being kept in sane state + (to allow external PCIe connected servers to continue to work). Without + properly shutting down RoCE and freeing all associated resources, it + results in the ARM core to hang immediately after the 'reboot' + + By always invoking 'bnxt_ulp_shutdown' in 'bnxt_shutdown', it fixes the + above issue + + Fixes: 0efd2fc65c92 ("bnxt_en: Add a callback to inform RDMA driver during PCI shutdown.") + + Signed-off-by: Ray Jui + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit bd0b2e7fe611953470ec7c533b455fb2abd382cd +Author: Jakub Kicinski +Date: Fri Dec 1 15:08:57 2017 -0800 + + net: xdp: make the stack take care of the tear down + + Since day one of XDP drivers had to remember to free the program + on the remove path. This leads to code duplication and is error + prone. Make the stack query the installed programs on unregister + and if something is installed, remove the program. Freeing of + program attached to XDP generic is moved from free_netdev() as well. + + Because the remove will now be called before notifiers are + invoked, BPF offload state of the program will not get destroyed + before uninstall. + + Signed-off-by: Jakub Kicinski + Reviewed-by: Simon Horman + Reviewed-by: Quentin Monnet + Signed-off-by: Daniel Borkmann + +commit 6502ad5963a5307089bed395f63173e34cb251ea +Author: Scott Branden +Date: Thu Nov 30 11:36:00 2017 -0800 + + bnxt_en: Add ETH_RESET_AP support + + Add ETH_RESET_AP support handling to reset the internal + Application Processor(s) of the SmartNIC card. + + Signed-off-by: Scott Branden + Acked-by: Michael Chan + Signed-off-by: David S. Miller + +commit dea521a2b9f96e905fa2bb2f95e23ec00c2ec436 +Author: Christophe JAILLET +Date: Tue Nov 21 20:46:49 2017 +0100 + + bnxt_en: Fix an error handling path in 'bnxt_get_module_eeprom()' + + Error code returned by 'bnxt_read_sfp_module_eeprom_info()' is handled a + few lines above when reading the A0 portion of the EEPROM. + The same should be done when reading the A2 portion of the EEPROM. + + In order to correctly propagate an error, update 'rc' in this 2nd call as + well, otherwise 0 (success) is returned. + + Signed-off-by: Christophe JAILLET + Signed-off-by: David S. Miller + +commit e99e88a9d2b067465adaa9c111ada99a041bef9a +Author: Kees Cook +Date: Mon Oct 16 14:43:17 2017 -0700 + + treewide: setup_timer() -> timer_setup() + + This converts all remaining cases of the old setup_timer() API into using + timer_setup(), where the callback argument is the structure already + holding the struct timer_list. These should have no behavioral changes, + since they just change which pointer is passed into the callback with + the same available pointers after conversion. It handles the following + examples, in addition to some other variations. + + Casting from unsigned long: + + void my_callback(unsigned long data) + { + struct something *ptr = (struct something *)data; + ... + } + ... + setup_timer(&ptr->my_timer, my_callback, ptr); + + and forced object casts: + + void my_callback(struct something *ptr) + { + ... + } + ... + setup_timer(&ptr->my_timer, my_callback, (unsigned long)ptr); + + become: + + void my_callback(struct timer_list *t) + { + struct something *ptr = from_timer(ptr, t, my_timer); + ... + } + ... + timer_setup(&ptr->my_timer, my_callback, 0); + + Direct function assignments: + + void my_callback(unsigned long data) + { + struct something *ptr = (struct something *)data; + ... + } + ... + ptr->my_timer.function = my_callback; + + have a temporary cast added, along with converting the args: + + void my_callback(struct timer_list *t) + { + struct something *ptr = from_timer(ptr, t, my_timer); + ... + } + ... + ptr->my_timer.function = (TIMER_FUNC_TYPE)my_callback; + + And finally, callbacks without a data assignment: + + void my_callback(unsigned long data) + { + ... + } + ... + setup_timer(&ptr->my_timer, my_callback, 0); + + have their argument renamed to verify they're unused during conversion: + + void my_callback(struct timer_list *unused) + { + ... + } + ... + timer_setup(&ptr->my_timer, my_callback, 0); + + The conversion is done with the following Coccinelle script: + + spatch --very-quiet --all-includes --include-headers \ + -I ./arch/x86/include -I ./arch/x86/include/generated \ + -I ./include -I ./arch/x86/include/uapi \ + -I ./arch/x86/include/generated/uapi -I ./include/uapi \ + -I ./include/generated/uapi --include ./include/linux/kconfig.h \ + --dir . \ + --cocci-file ~/src/data/timer_setup.cocci + + @fix_address_of@ + expression e; + @@ + + setup_timer( + -&(e) + +&e + , ...) + + // Update any raw setup_timer() usages that have a NULL callback, but + // would otherwise match change_timer_function_usage, since the latter + // will update all function assignments done in the face of a NULL + // function initialization in setup_timer(). + @change_timer_function_usage_NULL@ + expression _E; + identifier _timer; + type _cast_data; + @@ + + ( + -setup_timer(&_E->_timer, NULL, _E); + +timer_setup(&_E->_timer, NULL, 0); + | + -setup_timer(&_E->_timer, NULL, (_cast_data)_E); + +timer_setup(&_E->_timer, NULL, 0); + | + -setup_timer(&_E._timer, NULL, &_E); + +timer_setup(&_E._timer, NULL, 0); + | + -setup_timer(&_E._timer, NULL, (_cast_data)&_E); + +timer_setup(&_E._timer, NULL, 0); + ) + + @change_timer_function_usage@ + expression _E; + identifier _timer; + struct timer_list _stl; + identifier _callback; + type _cast_func, _cast_data; + @@ + + ( + -setup_timer(&_E->_timer, _callback, _E); + +timer_setup(&_E->_timer, _callback, 0); + | + -setup_timer(&_E->_timer, &_callback, _E); + +timer_setup(&_E->_timer, _callback, 0); + | + -setup_timer(&_E->_timer, _callback, (_cast_data)_E); + +timer_setup(&_E->_timer, _callback, 0); + | + -setup_timer(&_E->_timer, &_callback, (_cast_data)_E); + +timer_setup(&_E->_timer, _callback, 0); + | + -setup_timer(&_E->_timer, (_cast_func)_callback, _E); + +timer_setup(&_E->_timer, _callback, 0); + | + -setup_timer(&_E->_timer, (_cast_func)&_callback, _E); + +timer_setup(&_E->_timer, _callback, 0); + | + -setup_timer(&_E->_timer, (_cast_func)_callback, (_cast_data)_E); + +timer_setup(&_E->_timer, _callback, 0); + | + -setup_timer(&_E->_timer, (_cast_func)&_callback, (_cast_data)_E); + +timer_setup(&_E->_timer, _callback, 0); + | + -setup_timer(&_E._timer, _callback, (_cast_data)_E); + +timer_setup(&_E._timer, _callback, 0); + | + -setup_timer(&_E._timer, _callback, (_cast_data)&_E); + +timer_setup(&_E._timer, _callback, 0); + | + -setup_timer(&_E._timer, &_callback, (_cast_data)_E); + +timer_setup(&_E._timer, _callback, 0); + | + -setup_timer(&_E._timer, &_callback, (_cast_data)&_E); + +timer_setup(&_E._timer, _callback, 0); + | + -setup_timer(&_E._timer, (_cast_func)_callback, (_cast_data)_E); + +timer_setup(&_E._timer, _callback, 0); + | + -setup_timer(&_E._timer, (_cast_func)_callback, (_cast_data)&_E); + +timer_setup(&_E._timer, _callback, 0); + | + -setup_timer(&_E._timer, (_cast_func)&_callback, (_cast_data)_E); + +timer_setup(&_E._timer, _callback, 0); + | + -setup_timer(&_E._timer, (_cast_func)&_callback, (_cast_data)&_E); + +timer_setup(&_E._timer, _callback, 0); + | + _E->_timer@_stl.function = _callback; + | + _E->_timer@_stl.function = &_callback; + | + _E->_timer@_stl.function = (_cast_func)_callback; + | + _E->_timer@_stl.function = (_cast_func)&_callback; + | + _E._timer@_stl.function = _callback; + | + _E._timer@_stl.function = &_callback; + | + _E._timer@_stl.function = (_cast_func)_callback; + | + _E._timer@_stl.function = (_cast_func)&_callback; + ) + + // callback(unsigned long arg) + @change_callback_handle_cast + depends on change_timer_function_usage@ + identifier change_timer_function_usage._callback; + identifier change_timer_function_usage._timer; + type _origtype; + identifier _origarg; + type _handletype; + identifier _handle; + @@ + + void _callback( + -_origtype _origarg + +struct timer_list *t + ) + { + ( + ... when != _origarg + _handletype *_handle = + -(_handletype *)_origarg; + +from_timer(_handle, t, _timer); + ... when != _origarg + | + ... when != _origarg + _handletype *_handle = + -(void *)_origarg; + +from_timer(_handle, t, _timer); + ... when != _origarg + | + ... when != _origarg + _handletype *_handle; + ... when != _handle + _handle = + -(_handletype *)_origarg; + +from_timer(_handle, t, _timer); + ... when != _origarg + | + ... when != _origarg + _handletype *_handle; + ... when != _handle + _handle = + -(void *)_origarg; + +from_timer(_handle, t, _timer); + ... when != _origarg + ) + } + + // callback(unsigned long arg) without existing variable + @change_callback_handle_cast_no_arg + depends on change_timer_function_usage && + !change_callback_handle_cast@ + identifier change_timer_function_usage._callback; + identifier change_timer_function_usage._timer; + type _origtype; + identifier _origarg; + type _handletype; + @@ + + void _callback( + -_origtype _origarg + +struct timer_list *t + ) + { + + _handletype *_origarg = from_timer(_origarg, t, _timer); + + + ... when != _origarg + - (_handletype *)_origarg + + _origarg + ... when != _origarg + } + + // Avoid already converted callbacks. + @match_callback_converted + depends on change_timer_function_usage && + !change_callback_handle_cast && + !change_callback_handle_cast_no_arg@ + identifier change_timer_function_usage._callback; + identifier t; + @@ + + void _callback(struct timer_list *t) + { ... } + + // callback(struct something *handle) + @change_callback_handle_arg + depends on change_timer_function_usage && + !match_callback_converted && + !change_callback_handle_cast && + !change_callback_handle_cast_no_arg@ + identifier change_timer_function_usage._callback; + identifier change_timer_function_usage._timer; + type _handletype; + identifier _handle; + @@ + + void _callback( + -_handletype *_handle + +struct timer_list *t + ) + { + + _handletype *_handle = from_timer(_handle, t, _timer); + ... + } + + // If change_callback_handle_arg ran on an empty function, remove + // the added handler. + @unchange_callback_handle_arg + depends on change_timer_function_usage && + change_callback_handle_arg@ + identifier change_timer_function_usage._callback; + identifier change_timer_function_usage._timer; + type _handletype; + identifier _handle; + identifier t; + @@ + + void _callback(struct timer_list *t) + { + - _handletype *_handle = from_timer(_handle, t, _timer); + } + + // We only want to refactor the setup_timer() data argument if we've found + // the matching callback. This undoes changes in change_timer_function_usage. + @unchange_timer_function_usage + depends on change_timer_function_usage && + !change_callback_handle_cast && + !change_callback_handle_cast_no_arg && + !change_callback_handle_arg@ + expression change_timer_function_usage._E; + identifier change_timer_function_usage._timer; + identifier change_timer_function_usage._callback; + type change_timer_function_usage._cast_data; + @@ + + ( + -timer_setup(&_E->_timer, _callback, 0); + +setup_timer(&_E->_timer, _callback, (_cast_data)_E); + | + -timer_setup(&_E._timer, _callback, 0); + +setup_timer(&_E._timer, _callback, (_cast_data)&_E); + ) + + // If we fixed a callback from a .function assignment, fix the + // assignment cast now. + @change_timer_function_assignment + depends on change_timer_function_usage && + (change_callback_handle_cast || + change_callback_handle_cast_no_arg || + change_callback_handle_arg)@ + expression change_timer_function_usage._E; + identifier change_timer_function_usage._timer; + identifier change_timer_function_usage._callback; + type _cast_func; + typedef TIMER_FUNC_TYPE; + @@ + + ( + _E->_timer.function = + -_callback + +(TIMER_FUNC_TYPE)_callback + ; + | + _E->_timer.function = + -&_callback + +(TIMER_FUNC_TYPE)_callback + ; + | + _E->_timer.function = + -(_cast_func)_callback; + +(TIMER_FUNC_TYPE)_callback + ; + | + _E->_timer.function = + -(_cast_func)&_callback + +(TIMER_FUNC_TYPE)_callback + ; + | + _E._timer.function = + -_callback + +(TIMER_FUNC_TYPE)_callback + ; + | + _E._timer.function = + -&_callback; + +(TIMER_FUNC_TYPE)_callback + ; + | + _E._timer.function = + -(_cast_func)_callback + +(TIMER_FUNC_TYPE)_callback + ; + | + _E._timer.function = + -(_cast_func)&_callback + +(TIMER_FUNC_TYPE)_callback + ; + ) + + // Sometimes timer functions are called directly. Replace matched args. + @change_timer_function_calls + depends on change_timer_function_usage && + (change_callback_handle_cast || + change_callback_handle_cast_no_arg || + change_callback_handle_arg)@ + expression _E; + identifier change_timer_function_usage._timer; + identifier change_timer_function_usage._callback; + type _cast_data; + @@ + + _callback( + ( + -(_cast_data)_E + +&_E->_timer + | + -(_cast_data)&_E + +&_E._timer + | + -_E + +&_E->_timer + ) + ) + + // If a timer has been configured without a data argument, it can be + // converted without regard to the callback argument, since it is unused. + @match_timer_function_unused_data@ + expression _E; + identifier _timer; + identifier _callback; + @@ + + ( + -setup_timer(&_E->_timer, _callback, 0); + +timer_setup(&_E->_timer, _callback, 0); + | + -setup_timer(&_E->_timer, _callback, 0L); + +timer_setup(&_E->_timer, _callback, 0); + | + -setup_timer(&_E->_timer, _callback, 0UL); + +timer_setup(&_E->_timer, _callback, 0); + | + -setup_timer(&_E._timer, _callback, 0); + +timer_setup(&_E._timer, _callback, 0); + | + -setup_timer(&_E._timer, _callback, 0L); + +timer_setup(&_E._timer, _callback, 0); + | + -setup_timer(&_E._timer, _callback, 0UL); + +timer_setup(&_E._timer, _callback, 0); + | + -setup_timer(&_timer, _callback, 0); + +timer_setup(&_timer, _callback, 0); + | + -setup_timer(&_timer, _callback, 0L); + +timer_setup(&_timer, _callback, 0); + | + -setup_timer(&_timer, _callback, 0UL); + +timer_setup(&_timer, _callback, 0); + | + -setup_timer(_timer, _callback, 0); + +timer_setup(_timer, _callback, 0); + | + -setup_timer(_timer, _callback, 0L); + +timer_setup(_timer, _callback, 0); + | + -setup_timer(_timer, _callback, 0UL); + +timer_setup(_timer, _callback, 0); + ) + + @change_callback_unused_data + depends on match_timer_function_unused_data@ + identifier match_timer_function_unused_data._callback; + type _origtype; + identifier _origarg; + @@ + + void _callback( + -_origtype _origarg + +struct timer_list *unused + ) + { + ... when != _origarg + } + + Signed-off-by: Kees Cook + +commit 7dfaa7bc99498da1c6c4a48bee8d2d5265161a8c +Author: Arnd Bergmann +Date: Mon Nov 6 15:04:39 2017 +0100 + + bnxt: fix bnxt_hwrm_fw_set_time for y2038 + + On 32-bit architectures, rtc_time_to_tm() returns incorrect results + in 2038 or later, and do_gettimeofday() is broken for the same reason. + + This changes the code to use ktime_get_real_seconds() and time64_to_tm() + instead, both of them are 2038-safe, and we can also get rid of the + CONFIG_RTC_LIB dependency that way. + + Signed-off-by: Arnd Bergmann + Signed-off-by: David S. Miller + +commit 42ca728b829b8fee8ac85adb79eaffd36f0b4e06 +Author: Dan Carpenter +Date: Mon Nov 6 14:43:01 2017 +0300 + + bnxt: delete some unreachable code + + We return on the previous line so this "return 0;" statement should just + be deleted. + + Signed-off-by: Dan Carpenter + Signed-off-by: David S. Miller + +commit 575ed7d39e2fbe602a3894bc766a8cb49af83bd3 +Author: Nogah Frankel +Date: Mon Nov 6 07:23:42 2017 +0100 + + net_sch: mqprio: Change TC_SETUP_MQPRIO to TC_SETUP_QDISC_MQPRIO + + Change TC_SETUP_MQPRIO to TC_SETUP_QDISC_MQPRIO to match the new + convention. + + Signed-off-by: Nogah Frankel + Signed-off-by: Jiri Pirko + Reviewed-by: Simon Horman + Signed-off-by: David S. Miller + +commit f4e63525ee35f9c02e9f51f90571718363e9a9a9 +Author: Jakub Kicinski +Date: Fri Nov 3 13:56:16 2017 -0700 + + net: bpf: rename ndo_xdp to ndo_bpf + + ndo_xdp is a control path callback for setting up XDP in the + driver. We can reuse it for other forms of communication + between the eBPF stack and the drivers. Rename the callback + and associated structures and definitions. + + Signed-off-by: Jakub Kicinski + Reviewed-by: Simon Horman + Reviewed-by: Quentin Monnet + Signed-off-by: David S. Miller + +commit b153cbc507946f52d5aa687fd64f45d82cb36a3b +Author: Michael Chan +Date: Fri Nov 3 03:32:39 2017 -0400 + + bnxt_en: Fix IRQ coalescing regression. + + Recent IRQ coalescing clean up has removed a guard-rail for the max DMA + buffer coalescing value. This is a 6-bit value and must not be 0. We + already have a check for 0 but 64 is equivalent to 0 and will cause + non-stop interrupts. Fix it by adding the proper check. + + Fixes: f8503969d27b ("bnxt_en: Refactor and simplify coalescing code.") + Reported-by: Andy Gospodarek + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit de4a10ef6eff0eb0ced97a39dc3edd0d3101b6ed +Author: Andy Gospodarek +Date: Fri Nov 3 03:32:38 2017 -0400 + + bnxt_en: fix typo in bnxt_set_coalesce + + Recent refactoring of coalesce settings contained a typo that prevents + receive settings from being set properly. + + Fixes: 18775aa8a91f ("bnxt_en: Reorganize the coalescing parameters.") + Signed-off-by: Andy Gospodarek + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 44ae12a768b7212976a362c590075716a77e8f28 +Author: Jiri Pirko +Date: Wed Nov 1 11:47:39 2017 +0100 + + net: sched: move the can_offload check from binding phase to rule insertion phase + + This restores the original behaviour before the block callbacks were + introduced. Allow the drivers to do binding of block always, no matter + if the NETIF_F_HW_TC feature is on or off. Move the check to the block + callback which is called for rule insertion. + + Reported-by: Alexander Duyck + Signed-off-by: Jiri Pirko + Signed-off-by: David S. Miller + +commit 952c5719aac6587f1e0add97dca79f9e73887f9b +Author: Michael Chan +Date: Sat Oct 28 01:56:10 2017 -0400 + + bnxt_en: Fix randconfig build errors. + + Fix undefined symbols when CONFIG_VLAN_8021Q or CONFIG_INET is not set. + + Fixes: 8c95f773b4a3 ("bnxt_en: add support for Flower based vxlan encap/decap offload") + Reported-by: Jakub Kicinski + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit cd66358e52f74585f043ef63089727273b3421d3 +Author: Sathya Perla +Date: Thu Oct 26 11:51:32 2017 -0400 + + bnxt_en: alloc tc_info{} struct only when tc flower is enabled + + TC flower is not enabled on VFs and when there's no FW support. + Alloc the tc_info{} struct at init time only when TC flower is being + enabled. + + Signed-off-by: Sathya Perla + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 5a84acbebb22f93dfc9ce1e5f0427c45c94acb33 +Author: Sathya Perla +Date: Thu Oct 26 11:51:31 2017 -0400 + + bnxt_en: query cfa flow stats periodically to compute 'lastused' attribute + + This patch implements periodic querying of cfa flow stats + in batches to compute the 'lastused' attribute of TC flow stats. + + Signed-off-by: Sathya Perla + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit f484f6782e013138946122ae09c100c9e4b547e3 +Author: Sathya Perla +Date: Thu Oct 26 11:51:30 2017 -0400 + + bnxt_en: add hwrm FW cmds for cfa_encap_record and decap_filter + + Add routines for issuing the hwrm_cfa_encap_record_alloc/free + and hwrm_cfa_decap_filter_alloc/free FW cmds needed for + supporting vxlan encap/decap offload. + + Signed-off-by: Sathya Perla + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 8c95f773b4a367f7b9bcca7ab5f85675cfc812e9 +Author: Sathya Perla +Date: Thu Oct 26 11:51:29 2017 -0400 + + bnxt_en: add support for Flower based vxlan encap/decap offload + + This patch adds IPv4 vxlan encap/decap action support to TC-flower + offload. + + For vxlan encap, the driver maintains a tunnel encap hash-table. + When a new flow with a tunnel encap action arrives, this table + is looked up; if an encap entry exists, it uses the already + programmed encap_record_handle as the tunnel_handle in the + hwrm_cfa_flow_alloc cmd. Else, a new encap node is added and the + L2 header fields are queried via a route lookup. + hwrm_cfa_encap_record_alloc cmd is used to create a new encap + record and the encap_record_handle is used as the tunnel_handle + while adding the flow. + + For vxlan decap, the driver maintains a tunnel decap hash-table. + When a new flow with a tunnel decap action arrives, this table + is looked up; if a decap entry exists, it uses the already + programmed decap_filter_handle as the tunnel_handle in the + hwrm_cfa_flow_alloc cmd. Else, a new decap node is added and + a decap_filter_handle is alloc'd via the hwrm_cfa_decap_filter_alloc + cmd. This handle is used as the tunnel_handle while adding the flow. + + The code to issue the HWRM FW cmds is introduced in a follow-up patch. + + Signed-off-by: Sathya Perla + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit f8503969d27b2b26ff0adbce4b7d7cf4ba5e43c2 +Author: Michael Chan +Date: Thu Oct 26 11:51:28 2017 -0400 + + bnxt_en: Refactor and simplify coalescing code. + + The mapping of the ethtool coalescing parameters to hardware parameters + is now done in bnxt_hwrm_set_coal_params(). The same function can + handle both RX and TX settings. The code is now more clear. Some + adjustments have been made to get better hardware settings. The + coal_frames setting is now accurately set in hardware. The max_timer + is set to coal_ticks value. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 18775aa8a91fcd4cd07c722d575b4b852e3624c3 +Author: Michael Chan +Date: Thu Oct 26 11:51:27 2017 -0400 + + bnxt_en: Reorganize the coalescing parameters. + + The current IRQ coalescing logic is a little messy. The ethtool + parameters are mapped to hardware parameters in a way that is difficult + to understand. The first step is to better organize the parameters + by adding the new structure bnxt_coal. The structure is used by both + the RX and TX sets of coalescing parameters. + + Adjust the default coal_ticks to 14 us and 28 us for RX and TX. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 49f7972fd16407b3d1f03c2d447d2f1e1b95e9ba +Author: Vasundhara Volam +Date: Thu Oct 26 11:51:26 2017 -0400 + + bnxt_en: Add ethtool reset method + + This is a firmware internal reset after driver is unloaded. + + Signed-off-by: Vasundhara Volam + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 7eb9bb3a0c7c29741df2249cc3b99f06a7978d61 +Author: Michael Chan +Date: Thu Oct 26 11:51:25 2017 -0400 + + bnxt_en: Check maximum supported MTU from firmware. + + Some NICs have a firmware enforced maximum MTU setting by management + firmware. Set up netdev->max_mtu accordingly. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit c1a7bdff17247332ecff7f243e42d269b3f74c65 +Author: Michael Chan +Date: Thu Oct 26 11:51:24 2017 -0400 + + bnxt_en: Optimize .ndo_set_mac_address() for VFs. + + No need to call bnxt_approve_mac() which will send a message to the + PF if the MAC address hasn't changed. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 431aa1eb20d8ae2674723292adb832b968da868e +Author: Michael Chan +Date: Thu Oct 26 11:51:23 2017 -0400 + + bnxt_en: Get firmware package version one time. + + The current code retrieves the firmware package version from firmware + everytime ethtool -i is run. There is no reason to do that as the + firmware will not change while the driver is loaded. Get the version + once at init time. + + Also, display the full 4-part firmware version string and remove the + less useful interface spec version. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit e0ad8fc5980b362028cfd63ec037f4b491e726c6 +Author: Michael Chan +Date: Thu Oct 26 11:51:22 2017 -0400 + + bnxt_en: Check for zero length value in bnxt_get_nvram_item(). + + Return -EINVAL if the length is zero and not proceed to do essentially + nothing. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 618784e3ee1870e43e50e1c7922cc123cc050566 +Author: Rob Miller +Date: Thu Oct 26 11:51:21 2017 -0400 + + bnxt_en: adding PCI ID for SMARTNIC VF support + + Signed-off-by: Rob Miller + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 8ed693b7bbd179949f6947adaae5eff2e386a534 +Author: Ray Jui +Date: Thu Oct 26 11:51:20 2017 -0400 + + bnxt_en: Add PCIe device ID for bcm58804 + + Add new PCIe device ID and chip number for bcm58804 + + Signed-off-by: Ray Jui + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 57922b0a2f7ef9effbcdbbf7d1f8dad95aa567f7 +Author: Michael Chan +Date: Thu Oct 26 11:51:19 2017 -0400 + + bnxt_en: Update firmware interface to 1.8.3.1 + + Vxlan encap/decap filters are added to this firmware spec. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 3c467bf399106030d5a97d844ee119caec04e817 +Author: Steve Lin +Date: Thu Oct 19 10:45:56 2017 -0400 + + bnxt: Move generic devlink code to new file + + Moving generic devlink code (registration) out of VF-R code + into new bnxt_devlink file, in preparation for future work + to add additional devlink functionality to bnxt. + + Signed-off-by: Steve Lin + Acked-by: Andy Gospodarek + Signed-off-by: David S. Miller + +commit 8d26d5636dff9fca30816579910aaa9a55b4d96d +Author: Jiri Pirko +Date: Thu Oct 19 15:50:46 2017 +0200 + + net: sched: avoid ndo_setup_tc calls for TC_SETUP_CLS* + + All drivers are converted to use block callbacks for TC_SETUP_CLS*. + So it is now safe to remove the calls to ndo_setup_tc from cls_* + + Signed-off-by: Jiri Pirko + Signed-off-by: David S. Miller + +commit 9e0fd15dd6c981931a9e9f11dc0c940d17d6e051 +Author: Jiri Pirko +Date: Thu Oct 19 15:50:39 2017 +0200 + + bnxt: Convert ndo_setup_tc offloads to block callbacks + + Benefit from the newly introduced block callback infrastructure and + convert ndo_setup_tc calls for flower offloads to block callbacks. + + Signed-off-by: Jiri Pirko + Signed-off-by: David S. Miller + +commit 5b1e1a9ce06fd94b563d6c3dd896589231995d89 +Author: Sankar Patchineelam +Date: Fri Oct 13 21:09:34 2017 -0400 + + bnxt_en: Fix possible corruption in DCB parameters from firmware. + + hwrm_send_message() is replaced with _hwrm_send_message(), and + hwrm_cmd_lock mutex lock is grabbed for the whole period of + firmware call until the firmware DCB parameters have been copied. + This will prevent possible corruption of the firmware data. + + Fixes: 7df4ae9fe855 ("bnxt_en: Implement DCBNL to support host-based DCBX.") + Signed-off-by: Sankar Patchineelam + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit cc72f3b1feb4fd38d33ab7a013d5ab95041cb8ba +Author: Michael Chan +Date: Fri Oct 13 21:09:33 2017 -0400 + + bnxt_en: Fix possible corrupted NVRAM parameters from firmware response. + + In bnxt_find_nvram_item(), it is copying firmware response data after + releasing the mutex. This can cause the firmware response data + to be corrupted if the next firmware response overwrites the response + buffer. The rare problem shows up when running ethtool -i repeatedly. + + Fix it by calling the new variant _hwrm_send_message_silent() that requires + the caller to take the mutex and to release it after the response data has + been copied. + + Fixes: 3ebf6f0a09a2 ("bnxt_en: Add installed-package version reporting via Ethtool GDRVINFO") + Reported-by: Sarveswara Rao Mygapula + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 021570793d8cd86cb62ac038c535f4450586b454 +Author: Michael Chan +Date: Fri Oct 13 21:09:32 2017 -0400 + + bnxt_en: Fix VF resource checking. + + In bnxt_sriov_enable(), we calculate to see if we have enough hardware + resources to enable the requested number of VFs. The logic to check + for minimum completion rings and statistics contexts is missing. Add + the required checks so that VF configuration won't fail. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 7ab0760f5178169c4c218852f51646ea90817d7c +Author: Vasundhara Volam +Date: Fri Oct 13 21:09:31 2017 -0400 + + bnxt_en: Fix VF PCIe link speed and width logic. + + PCIE PCIE_EP_REG_LINK_STATUS_CONTROL register is only defined in PF + config space, so we must read it from the PF. + + Fixes: 90c4f788f6c0 ("bnxt_en: Report PCIe link speed and width during driver load") + Signed-off-by: Vasundhara Volam + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit e2dc9b6e38fa3919e63d6d7905da70ca41cbf908 +Author: Michael Chan +Date: Fri Oct 13 21:09:30 2017 -0400 + + bnxt_en: Don't use rtnl lock to protect link change logic in workqueue. + + As a further improvement to the PF/VF link change logic, use a private + mutex instead of the rtnl lock to protect link change logic. With the + new mutex, we don't have to take the rtnl lock in the workqueue when + we have to handle link related functions. If the VF and PF drivers + are running on the same host and both take the rtnl lock and one is + waiting for the other, it will cause timeout. This patch fixes these + timeouts. + + Fixes: 90c694bb7181 ("bnxt_en: Fix RTNL lock usage on bnxt_update_link().") + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit c213eae8d3cd4c026f348ce4fd64f4754b3acf2b +Author: Michael Chan +Date: Fri Oct 13 21:09:29 2017 -0400 + + bnxt_en: Improve VF/PF link change logic. + + Link status query firmware messages originating from the VFs are forwarded + to the PF. The driver handles these interactions in a workqueue for the + VF and PF. The VF driver waits for the response from the PF in the + workqueue. If the PF and VF driver are running on the same host and the + work for both PF and VF are queued on the same workqueue, the VF driver + may not get the response if the PF work item is queued behind it on the + same workqueue. This will lead to the VF link query message timing out. + + To prevent this, we create a private workqueue for PFs instead of using + the common workqueue. The VF query and PF response will never be on + the same workqueue. + + Fixes: c0c050c58d84 ("bnxt_en: New Broadcom ethernet driver.") + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit de8f3a83b0a0fddb2cf56e7a718127e9619ea3da +Author: Daniel Borkmann +Date: Mon Sep 25 02:25:51 2017 +0200 + + bpf: add meta pointer for direct access + + This work enables generic transfer of metadata from XDP into skb. The + basic idea is that we can make use of the fact that the resulting skb + must be linear and already comes with a larger headroom for supporting + bpf_xdp_adjust_head(), which mangles xdp->data. Here, we base our work + on a similar principle and introduce a small helper bpf_xdp_adjust_meta() + for adjusting a new pointer called xdp->data_meta. Thus, the packet has + a flexible and programmable room for meta data, followed by the actual + packet data. struct xdp_buff is therefore laid out that we first point + to data_hard_start, then data_meta directly prepended to data followed + by data_end marking the end of packet. bpf_xdp_adjust_head() takes into + account whether we have meta data already prepended and if so, memmove()s + this along with the given offset provided there's enough room. + + xdp->data_meta is optional and programs are not required to use it. The + rationale is that when we process the packet in XDP (e.g. as DoS filter), + we can push further meta data along with it for the XDP_PASS case, and + give the guarantee that a clsact ingress BPF program on the same device + can pick this up for further post-processing. Since we work with skb + there, we can also set skb->mark, skb->priority or other skb meta data + out of BPF, thus having this scratch space generic and programmable + allows for more flexibility than defining a direct 1:1 transfer of + potentially new XDP members into skb (it's also more efficient as we + don't need to initialize/handle each of such new members). The facility + also works together with GRO aggregation. The scratch space at the head + of the packet can be multiple of 4 byte up to 32 byte large. Drivers not + yet supporting xdp->data_meta can simply be set up with xdp->data_meta + as xdp->data + 1 as bpf_xdp_adjust_meta() will detect this and bail out, + such that the subsequent match against xdp->data for later access is + guaranteed to fail. + + The verifier treats xdp->data_meta/xdp->data the same way as we treat + xdp->data/xdp->data_end pointer comparisons. The requirement for doing + the compare against xdp->data is that it hasn't been modified from it's + original address we got from ctx access. It may have a range marking + already from prior successful xdp->data/xdp->data_end pointer comparisons + though. + + Signed-off-by: Daniel Borkmann + Acked-by: Alexei Starovoitov + Acked-by: John Fastabend + Signed-off-by: David S. Miller + +commit 1fac4b2fdbccab69cb781aae68f540be94d5549e +Author: Tobias Klauser +Date: Tue Sep 26 15:12:26 2017 +0200 + + bnxt_en: Remove redundant unlikely() + + IS_ERR() already implies unlikely(), so it can be omitted. + + Signed-off-by: Tobias Klauser + Signed-off-by: David S. Miller + +commit 6c43824477c2ac722325ba460c2ce683c48fb76b +Author: Allen Pais +Date: Thu Sep 21 22:35:08 2017 +0530 + + drivers: net: bnxt: use setup_timer() helper. + + Use setup_timer function instead of initializing timer with the + function and data fields. + + Signed-off-by: Allen Pais + Signed-off-by: David S. Miller + +commit 1e3c5ec66119783440ed211ae527674651affa9b +Author: Sathya Perla +Date: Mon Sep 18 17:05:37 2017 +0530 + + bnxt_en: check for ingress qdisc in flower offload + + Check for ingress-only qdisc for flower offload, as other qdiscs + are not supported for flower offload. + + Suggested-by: Jiri Pirko + Signed-off-by: Sathya Perla + Reviewed-by: Jiri Pirko + Signed-off-by: David S. Miller + +commit f143647a02825038d8d6251422e1d0ebdcb6d9ea +Author: Sathya Perla +Date: Tue Aug 29 11:45:03 2017 +0530 + + bnxt_en: add a dummy definition for bnxt_vf_rep_get_fid() + + When bnxt VF-reps are not compiled in (CONFIG_BNXT_SRIOV is off) + bnxt_tc.c needs a dummy definition of the routine bnxt_vf_rep_get_fid(). + + Reported-by: kbuild test robot + Fixes: 2ae7408fedfe ("bnxt_en: bnxt: add TC flower filter offload support") + Signed-off-by: Sathya Perla + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit d7bc73053024eecb239a5b8644a05c7745fd87a1 +Author: Sathya Perla +Date: Mon Aug 28 13:40:35 2017 -0400 + + bnxt_en: add code to query TC flower offload stats + + This patch adds code to implement TC_CLSFLOWER_STATS TC-cmd and the + required FW code to query the stats from the HW. + + Signed-off-by: Sathya Perla + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit db1d36a27324d51e62944f702a4d2d50548896ee +Author: Sathya Perla +Date: Mon Aug 28 13:40:34 2017 -0400 + + bnxt_en: add TC flower offload flow_alloc/free FW cmds + + This patch adds the hwrm_cfa_flow_alloc/free() routines + that are needed to issue the FW cmds needed for TC flower offload. + + Signed-off-by: Sathya Perla + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 2ae7408fedfee979e01ed3801223c632bb124c46 +Author: Sathya Perla +Date: Mon Aug 28 13:40:33 2017 -0400 + + bnxt_en: bnxt: add TC flower filter offload support + + This patch adds support for offloading TC based flow + rules and actions for the 'flower' classifier in the bnxt_en driver. + It includes logic to parse flow rules and actions received from the + TC subsystem, store them and issue the corresponding + hwrm_cfa_flow_alloc/free FW cmds. L2/IPv4/IPv6 flows and drop, + redir, vlan push/pop actions are supported in this patch. + + In this patch the hwrm_cfa_flow_xxx routines are just stubs. + The code for these routines is introduced in the next patch for easier + review. Also, the code to query the TC/flower action stats will + be introduced in a subsequent patch. + + Signed-off-by: Sathya Perla + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 70855603e0a129d1c810947a4e4dd7ecd6f4560d +Author: Sathya Perla +Date: Mon Aug 28 13:40:32 2017 -0400 + + bnxt_en: fix clearing devlink ptr from bnxt struct + + The routine bnxt_link_bp_to_dl() is used to set the devlink ptr + in bnxt struct (bp) and also to set the bnxt back ptr in + the devlink struct. If devlink_register() fails, bp->dl must + be cleared which is not happening currently. This patch fixes + bnxt_link_bp_to_dl() to clear bp->dl by passing a NULL dl ptr. + + Fixes: 4ab0c6a8ffd7 ("bnxt_en: add support to enable VF-representors") + Signed-off-by: Sathya Perla + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit d5430d31ca72ec37fd539fd1c5230859509be4ef +Author: Michael Chan +Date: Mon Aug 28 13:40:31 2017 -0400 + + bnxt_en: Reduce default rings on multi-port cards. + + Reduce default rings from 8 to 4 on multi-port cards to reduce memory + usage. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 903649e718f80da2ba4b65a0adf6930219b4b2e5 +Author: Michael Chan +Date: Mon Aug 28 13:40:30 2017 -0400 + + bnxt_en: Improve -ENOMEM logic in NAPI poll loop. + + If we cannot allocate RX buffers in the NAPI poll loop when processing + an RX event, the current code does not count that event towards the NAPI + budget. This can cause us to potentially loop forever in NAPI if we + consistently cannot allocate new buffers. Improve it by counting + -ENOMEM event as 1 towards the NAPI budget. + + Cc: Martin KaFai Lau + Signed-off-by: Michael Chan + Reported-by: Martin KaFai Lau + Acked-by: Martin KaFai Lau + Signed-off-by: David S. Miller + +commit 27573a7d905a49dc756fda9c0e148372136356e6 +Author: Scott Branden +Date: Mon Aug 28 13:40:29 2017 -0400 + + bnxt: initialize board_info values with proper enums + + initialize board_info values with proper enums for defensive programming + purposes. This will avoid any errors of the enums being declared not + lining up with the board_info array. + + Signed-off-by: Scott Branden + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 4a58139b8493624c6c6223b58a9e70ebbdf56338 +Author: Ray Jui +Date: Mon Aug 28 13:40:28 2017 -0400 + + bnxt: Add PCIe device IDs for bcm58802/bcm58808 + + Add PCIe device ID for bcm58802 and bcm58808. Also add chip number + update to declare bcm588xx as chip class phase 4 and later + + Signed-off-by: Ray Jui + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 56f0fd80d1886479a42ac07ed239538eb145a669 +Author: Vasundhara Volam +Date: Mon Aug 28 13:40:27 2017 -0400 + + bnxt_en: assign CPU affinity hints to bnxt_en IRQs + + This patch provides hints to irqbalance to map bnxt_en device IRQs + to specific CPU cores. cpumask_local_spread() is used, which first + maps IRQs to near NUMA cores; when those cores are exhausted, IRQs + are mapped to far NUMA cores. + + Signed-off-by: Vasundhara Volam + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 98fdbe73bfb809b1f8eec9f27a36e737caed3a44 +Author: Michael Chan +Date: Mon Aug 28 13:40:26 2017 -0400 + + bnxt_en: Improve tx ring reservation logic. + + When the number of TX rings is changed (e.g. ethtool -L, enabling XDP TX + rings, etc), the current code tries to reserve the new number of TX rings + before closing and re-opening the NIC. If we are unable to reserve the + new TX rings, we abort the operation and keep the current TX rings. + + The problem is that the firmware will disable the current TX rings even + when it cannot reserve the new set of TX rings. We fix it as follows: + + 1. Instead of reserving the new set of TX rings, just ask the firmware + to check if the new set of TX rings is available. There is a flag in + the firmware message to do that. If not available, abort and the + current TX rings will not be disabled. + + 2. Do the actual TX ring reservation in the path that opens the NIC. + We keep the number of TX rings currently successfully reserved. If the + number of TX rings is different than the reserved TX rings, we call + firmware and reserve again. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 6a17eb27bf7ece364627fcf16ad50c24b793300b +Author: Michael Chan +Date: Mon Aug 28 13:40:25 2017 -0400 + + bnxt_en: Update firmware interface spec. to 1.8.1.4. + + Flow APIs are added in this firmware interface. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit a22a6ac2ff8080c87e446e20592725c064229c71 +Author: Michael Chan +Date: Wed Aug 23 19:34:05 2017 -0400 + + bnxt_en: Do not setup MAC address in bnxt_hwrm_func_qcaps(). + + bnxt_hwrm_func_qcaps() is called during probe to get all device + resources and it also sets up the factory MAC address. The same function + is called when SRIOV is disabled to reclaim all resources. If + the MAC address has been overridden by a user administered MAC + address, calling this function will overwrite it. + + Separate the logic that sets up the default MAC address into a new + function bnxt_init_mac_addr() that is only called during probe time. + + Fixes: 4a21b49b34c0 ("bnxt_en: Improve VF resource accounting.") + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 146ed3c5b87d8c65ec31bc56df26f027fe624b8f +Author: Michael Chan +Date: Wed Aug 23 19:34:04 2017 -0400 + + bnxt_en: Free MSIX vectors when unregistering the device from bnxt_re. + + Take back ownership of the MSIX vectors when unregistering the device + from bnxt_re. + + Fixes: a588e4580a7e ("bnxt_en: Add interface to support RDMA driver.") + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 87e9b3778c94694c9e098c91a0cc05725f0e017f +Author: Michael Chan +Date: Wed Aug 23 19:34:03 2017 -0400 + + bnxt_en: Fix .ndo_setup_tc() to include XDP rings. + + When the number of TX rings is changed in bnxt_setup_tc(), we need to + include the XDP rings in the total TX ring count. + + Fixes: 38413406277f ("bnxt_en: Add support for XDP_TX action.") + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit bd76b87962833f6e55264030a227be0f090b1286 +Author: Colin Ian King +Date: Fri Aug 18 16:40:00 2017 +0100 + + bnxt_en: fix spelling mistake: "swtichdev" -> "switchdev" + + Trivial fix to spelling mistake in a netdev_info message + + Signed-off-by: Colin Ian King + Signed-off-by: David S. Miller + +commit de4784ca030fed17d527dbb2bb4e21328b12de94 +Author: Jiri Pirko +Date: Mon Aug 7 10:15:32 2017 +0200 + + net: sched: get rid of struct tc_to_netdev + + Get rid of struct tc_to_netdev which is now just unnecessary container + and rather pass per-type structures down to drivers directly. + Along with that, consolidate the naming of per-type structure variables + in cls_*. + + Signed-off-by: Jiri Pirko + Acked-by: Jamal Hadi Salim + Signed-off-by: David S. Miller + +commit 38cf0426e5178b1c3810bb88e65dd23882e40283 +Author: Jiri Pirko +Date: Mon Aug 7 10:15:31 2017 +0200 + + net: sched: change return value of ndo_setup_tc for driver supporting mqprio only + + Change the return value from -EINVAL to -EOPNOTSUPP. The rest of the + drivers have it like that, so be aligned. + + Signed-off-by: Jiri Pirko + Acked-by: Jamal Hadi Salim + Signed-off-by: David S. Miller + +commit 5fd9fc4e207dba0c05cafe78417952b4c4ca02dc +Author: Jiri Pirko +Date: Mon Aug 7 10:15:29 2017 +0200 + + net: sched: push cls related args into cls_common structure + + As ndo_setup_tc is generic offload op for whole tc subsystem, does not + really make sense to have cls-specific args. So move them under + cls_common structurure which is embedded in all cls structs. + + Signed-off-by: Jiri Pirko + Acked-by: Jamal Hadi Salim + Signed-off-by: David S. Miller + +commit 2572ac53c46f58e500b9d8d0f99785666038c590 +Author: Jiri Pirko +Date: Mon Aug 7 10:15:17 2017 +0200 + + net: sched: make type an argument for ndo_setup_tc + + Since the type is always present, push it to be a separate argument to + ndo_setup_tc. On the way, name the type enum and use it for arg type. + + Signed-off-by: Jiri Pirko + Acked-by: Jamal Hadi Salim + Signed-off-by: David S. Miller + +commit 53f70b8b5aa06db53eb06f092342e6073891729a +Author: Sathya Perla +Date: Tue Jul 25 13:28:41 2017 -0400 + + bnxt_en: fix switchdev port naming for external-port-rep and vf-reps + + Fix the phys_port_name for the external physical port to be in + "pA" format and that of VF-rep to be in "pCvfD" format as + suggested by Jakub Kicinski. + + Fixes: c124a62ff2dd ("bnxt_en: add support for port_attr_get and get_phys_port_name") + Signed-off-by: Sathya Perla + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit e408ebdc41aa53f0aa552132384daaa5f5c6301d +Author: Sathya Perla +Date: Tue Jul 25 13:28:40 2017 -0400 + + bnxt_en: use SWITCHDEV_SET_OPS() for setting vf_rep_switchdev_ops + + This fixes the build error: + ‘struct net_device’ has no member named ‘switchdev_ops’ + + Reported-by: kbuild test robot + Fixes: c124a62ff2dd ("bnxt_en: add support for port_attr_get and and get_phys_port_name") + Signed-off-by: Sathya Perla + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit d3e3becedc43adc8b8fb12e7507dd4e5aae4d17d +Author: Sathya Perla +Date: Tue Jul 25 13:28:39 2017 -0400 + + bnxt_en: include bnxt_vfr.c code under CONFIG_BNXT_SRIOV switch + + And define empty functions in bnxt_vfr.h when CONFIG_BNXT_SRIOV is not + defined. + + This fixes build error when CONFIG_BNXT_SRIOV is switched off: + >> drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c:165:16: error: 'struct + >> bnxt' has no member named 'sriov_lock' + + Reported-by: kbuild test robot + Fixes: 4ab0c6a8ffd7 ("bnxt_en: add support to enable VF-representors") + Signed-off-by: Sathya Perla + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 351bac30613378c4684d4673aac0c7917980a652 +Author: stephen hemminger +Date: Mon Jul 24 10:25:19 2017 -0700 + + bnxt: fix unused variable warnings + + Fix a couple of warnings where variable ‘txq’ set but not used + + Signed-off-by: Stephen Hemminger + Acked-by: Michael Chan v, i); + Signed-off-by: David S. Miller + +commit b721cfaf03bcaac0a3abf702c4240326eed9e4b1 +Author: stephen hemminger +Date: Mon Jul 24 10:25:18 2017 -0700 + + bnxt: fix unsigned comparsion with 0 + + Fixes warning because location is u32 and can never be netative + warning: comparison of unsigned expression < 0 is always false [-Wtype-limits] + + Signed-off-by: Stephen Hemminger + Acked-by: Michael Chan + Signed-off-by: David S. Miller + +commit bc88055ab72c0eaa080926c888628b77d2055513 +Author: David S. Miller +Date: Mon Jul 24 21:20:16 2017 -0700 + + bnxt_en: Use SWITCHDEV_SET_OPS(). + + Suggested by Jakub Kicinski. + + Fixes: c124a62ff2dd ("bnxt_en: add support for port_attr_get and and get_phys_port_name") + Reported-by: kbuild test robot + Signed-off-by: David S. Miller + +commit c124a62ff2dde9eaa9e8083de8206a142535c04e +Author: Sathya Perla +Date: Mon Jul 24 12:34:29 2017 -0400 + + bnxt_en: add support for port_attr_get and and get_phys_port_name + + This patch adds support for the switchdev_port_attr_get() and + ndo_get_phys_port_name() methods for the PF and the VF-reps. + Using this support a user application can deduce that the PF + (when in the ESWITCH_SWDEV mode) and it's VF-reps form a switch. + + Signed-off-by: Sathya Perla + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit ee5c7fb3404724b9e25fe24c81fbcda60f3f2659 +Author: Sathya Perla +Date: Mon Jul 24 12:34:28 2017 -0400 + + bnxt_en: add vf-rep RX/TX and netdev implementation + + This patch introduces the RX/TX and a simple netdev implementation + for VF-reps. The VF-reps use the RX/TX rings of the PF. For each VF-rep + the PF driver issues a VFR_ALLOC FW cmd that returns "cfa_code" + and "cfa_action" values. The FW sets up the filter tables in such + a way that VF traffic by default (in absence of other rules) + gets punted to the parent PF. The cfa_code value in the RX-compl + informs the driver of the source VF. For traffic being transmitted + from the VF-rep, the TX BD is tagged with a cfa_action value that + informs the HW to punt it to the corresponding VF. + + Signed-off-by: Sathya Perla + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 4ab0c6a8ffd7d25475dd9eb06614eec1ae53a443 +Author: Sathya Perla +Date: Mon Jul 24 12:34:27 2017 -0400 + + bnxt_en: add support to enable VF-representors + + This patch is a part of a patch-set that introduces support for + VF-reps in the bnxt_en driver. The driver registers eswitch mode + get/set methods with the devlink interface that allow a user to + enable SRIOV switchdev mode. When enabled, the driver registers + a VF-rep netdev object for each VF with the stack. This can + essentially bring the VFs unders the management perview of the + hypervisor and applications such as OVS. + + The next patch in the series, adds the RX/TX routines and a slim + netdev implementation for the VF-reps. + + Signed-off-by: Sathya Perla + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 70098a47bbf131b65c64ca935c2480e64c9c7c51 +Author: Michael Chan +Date: Mon Jul 24 12:34:26 2017 -0400 + + bnxt_en: Set ETS min_bw parameter for older firmware. + + In addition to the ETS weight, older firmware also requires the min_bw + parameter to be set for it to work properly. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 9315edca9b1d0daf41f81e1f5d4fb995d3cbc634 +Author: Michael Chan +Date: Mon Jul 24 12:34:25 2017 -0400 + + bnxt_en: Report firmware DCBX agent. + + Report DCB_CAP_DCBX_LLD_MANAGED only if the firmware DCBX agent is enabled + and running for PF or VF. Otherwise, if both LLDP and DCBX agents are + disabled in firmware, we report DCB_CAP_DCBX_LLD_HOST and allow host + IEEE DCB settings. This patch refines the current logic in the driver. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit adcc331e42e639ea44ac3c746db6c7207c3f69c0 +Author: Michael Chan +Date: Mon Jul 24 12:34:24 2017 -0400 + + bnxt_en: Allow the user to set ethtool stats-block-usecs to 0. + + For debugging purpose, it is sometimes useful to disable periodic + port statistics updates, so that the firmware logs will not be + filled with statistics update messages. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 5c8227d0d3b1eb1ad8f98d0b6dc619d70f2cfa04 +Author: Michael Chan +Date: Mon Jul 24 12:34:23 2017 -0400 + + bnxt_en: Add bnxt_get_num_stats() to centrally get the number of ethtool stats. + + Instead of duplicating the logic multiple times. Also, it is unnecessary + to zero the buffer in .get_ethtool_stats() because it is already zeroed + by the caller. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 39d8ba2e71fbdde686d7e31ad141a01994dc0793 +Author: Michael Chan +Date: Mon Jul 24 12:34:22 2017 -0400 + + bnxt_en: Implement ndo_bridge_{get|set}link methods. + + To allow users to set the hardware bridging mode to VEB or VEPA. Only + single function PF can change the bridging mode. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 32e8239c9138a050bc1feeea7cf41f27d79e6664 +Author: Michael Chan +Date: Mon Jul 24 12:34:21 2017 -0400 + + bnxt_en: Retrieve the hardware bridge mode from the firmware. + + Retrieve and store the hardware bridge mode, so that we can implement + ndo_bridge_{get|set)link methods in the next patch. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit acb2005463612930b07723e852b2483d669ff856 +Author: Michael Chan +Date: Mon Jul 24 12:34:20 2017 -0400 + + bnxt_en: Update firmware interface spec to 1.8.0. + + VF representors and PTP are added features in the new firmware spec. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 9b0436c3f29483ca91d890b0072c0c02e2e535ed +Author: Michael Chan +Date: Tue Jul 11 13:05:36 2017 -0400 + + bnxt_en: Fix SRIOV on big-endian architecture. + + The PF driver sets up a list of firmware commands from the VF driver that + needs to be forwarded to the PF for approval. This list is a 256-bit + bitmap. The code that sets up the bitmap falls apart on big-endian + architecture. __set_bit() does not work because it operates on long types + whereas the firmware interface is defined in u32 types, causing bits in + the wrong 32-bit word to be set. + + Fix it by setting the proper bits on an array of u32. + + Fixes: de68f5de5651 ("bnxt_en: Fix bitmap declaration to work on 32-bit arches.") + Reported-by: Shannon Nelson + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 3b6b34df342553a7522561e34288f5bb803aa9aa +Author: Michael Chan +Date: Tue Jul 11 13:05:35 2017 -0400 + + bnxt_en: Fix bug in ethtool -L. + + When changing channels from combined to rx/tx or vice versa, the code + uses the wrong "sh" parameter to determine if we are reserving rings + for shared or non-shared mode. It should be using the ethtool requested + "sh" parameter instead of the current "sh" parameter. + + Fix it by passing the "sh" parameter to bnxt_reserve_rings(). For + ethtool, we will pass in the requested "sh" parameter. + + Fixes: 391be5c27364 ("bnxt_en: Implement new scheme to reserve tx rings.") + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit f9b76ebd49f97458857568918c305a17fa7c6567 +Author: Michael Chan +Date: Tue Jul 11 13:05:34 2017 -0400 + + bnxt_en: Fix race conditions in .ndo_get_stats64(). + + .ndo_get_stats64() may not be protected by RTNL and can race with + .ndo_stop() or other ethtool operations that can free the statistics + memory. Fix it by setting a new flag BNXT_STATE_READ_STATS and then + proceeding to read statistics memory only if the state is OPEN. The + close path that frees the memory clears the OPEN state and then waits + for the BNXT_STATE_READ_STATS to clear before proceeding to free the + statistics memory. + + Fixes: c0c050c58d84 ("bnxt_en: New Broadcom ethernet driver.") + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 2270bc5da34979454e6f2eb133d800b635156174 +Author: Michael Chan +Date: Fri Jun 23 14:01:01 2017 -0400 + + bnxt_en: Fix netpoll handling. + + To handle netpoll properly, the driver must only handle TX packets + during NAPI. Handling RX events cause warnings and errors in + netpoll mode. The ndo_poll_controller() method should call + napi_schedule() directly so that a NAPI weight of zero will be used + during netpoll mode. + + The bnxt_en driver supports 2 ring modes: combined, and separate rx/tx. + In separate rx/tx mode, the ndo_poll_controller() method will only + process the tx rings. In combined mode, the rx and tx completion + entries are mixed in the completion ring and we need to drop the rx + entries and recycle the rx buffers. + + Add a function bnxt_force_rx_discard() to handle this in netpoll mode + when we see rx entries in combined ring mode. + + Reported-by: Calvin Owens + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 69c149e2e39e8d66437c9034bb4926ef2c1f7c23 +Author: Michael Chan +Date: Fri Jun 23 14:01:00 2017 -0400 + + bnxt_en: Add missing logic to handle TPA end error conditions. + + When we get a TPA_END completion to handle a completed LRO packet, it + is possible that hardware would indicate errors. The current code is + not checking for the error condition. Define the proper error bits and + the macro to check for this error and abort properly. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 8902965f8cb23bba8aa7f3be293ec2f3067b82c6 +Author: Martin KaFai Lau +Date: Thu Jun 15 17:29:13 2017 -0700 + + bpf: bnxt: Report bpf_prog ID during XDP_QUERY_PROG + + Add support to bnxt to report bpf_prog ID during XDP_QUERY_PROG. + + Signed-off-by: Martin KaFai Lau + Cc: Michael Chan + Acked-by: Alexei Starovoitov + Acked-by: Daniel Borkmann + Signed-off-by: David S. Miller + +commit a5fcf8a6c968ed8e312ff0b2a55d4c62d821eabb +Author: Jiri Pirko +Date: Tue Jun 6 17:00:16 2017 +0200 + + net: propagate tc filter chain index down the ndo_setup_tc call + + We need to push the chain index down to the drivers, so they have the + information to which chain the rule belongs. For now, no driver supports + multichain offload, so only chain 0 is supported. This is needed to + prevent chain squashes during offload for now. Later this will be used + to implement multichain offload. + + Signed-off-by: Jiri Pirko + Signed-off-by: David S. Miller + +commit ffe406457753a7ca2061ecc8c4d3971623066911 +Author: Michael Chan +Date: Tue May 30 20:03:00 2017 -0400 + + bnxt_en: Fix xmit_more with BQL. + + We need to write the doorbell if BQL has stopped the queue and + skb->xmit_more is set. Otherwise it is possible for the tx queue to + rot and cause tx timeout. + + Fixes: 4d172f21cefe ("bnxt_en: Implement xmit_more.") + Suggested-by: Yuval Mintz + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 702c221ca64060b81af4461553be19cba275da8b +Author: Michael Chan +Date: Mon May 29 19:06:10 2017 -0400 + + bnxt_en: Pass in sh parameter to bnxt_set_dflt_rings(). + + In the existing code, the local variable sh is hardcoded to true to + calculate default rings for shared ring configuration. It is better + to have the caller determine the value of sh. + + Reported-by: Gustavo A. R. Silva + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 4d172f21cefe896df8477940269b8d52129f8c87 +Author: Michael Chan +Date: Mon May 29 19:06:09 2017 -0400 + + bnxt_en: Implement xmit_more. + + Do not write the TX doorbell if skb->xmit_more is set unless the TX + queue is full. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 434c975a8fe2f70b70ac09ea5ddd008e0528adfa +Author: Michael Chan +Date: Mon May 29 19:06:08 2017 -0400 + + bnxt_en: Optimize doorbell write operations for newer chips. + + Older chips require the doorbells to be written twice, but newer chips + do not. Add a new common function bnxt_db_write() to write all + doorbells appropriately depending on the chip. Eliminating the extra + doorbell on newer chips has a significant performance improvement + on pktgen. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 3284f9e1ab505b41fa604c81e4b3271c6b88cdcb +Author: Michael Chan +Date: Mon May 29 19:06:07 2017 -0400 + + bnxt_en: Add additional chip ID definitions. + + Add additional chip definitions and macros for all supported chips. + Add a new macro BNXT_CHIP_P4_PLUS for the newer generation of chips and + use the macro to properly determine the features supported by these + newer chips. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 0efd2fc65c922dff207ff10a776a7a33e0e3c7c5 +Author: Michael Chan +Date: Mon May 29 19:06:06 2017 -0400 + + bnxt_en: Add a callback to inform RDMA driver during PCI shutdown. + + When bnxt_en gets a PCI shutdown call, we need to have a new callback + to inform the RDMA driver to do proper shutdown and removal. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit c7ef35eb0c8d0b58d2d5ae5be599e6aa730361b2 +Author: Deepak Khungar +Date: Mon May 29 19:06:05 2017 -0400 + + bnxt_en: Add PCI IDs for BCM57454 VF devices. + + Signed-off-by: Deepak Khungar + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit e605db801bdeb9d94cccbd4a2f641030067ef008 +Author: Deepak Khungar +Date: Mon May 29 19:06:04 2017 -0400 + + bnxt_en: Support for Short Firmware Message + + The new short message format is used on the new BCM57454 VFs. Each + firmware message is a fixed 16-byte message sent using the standard + firmware communication channel. The short message has a DMA address + pointing to the legacy long firmware message. + + Signed-off-by: Deepak Khungar + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit f667724b99ad1afc91f16064d8fb293d2805bd57 +Author: Michael Chan +Date: Tue May 16 16:39:44 2017 -0400 + + bnxt_en: Check status of firmware DCBX agent before setting DCB_CAP_DCBX_HOST. + + Otherwise, all the host based DCBX settings from lldpad will fail if the + firmware DCBX agent is running. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 87fe603274aa9889c05cca3c3e45675e1997cb13 +Author: Michael Chan +Date: Tue May 16 16:39:43 2017 -0400 + + bnxt_en: Call bnxt_dcb_init() after getting firmware DCBX configuration. + + In the current code, bnxt_dcb_init() is called too early before we + determine if the firmware DCBX agent is running or not. As a result, + we are not setting the DCB_CAP_DCBX_HOST and DCB_CAP_DCBX_LLD_MANAGED + flags properly to report to DCBNL. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit c519fe9a4f0d1a1c559529c404589b8e346143f3 +Author: Shannon Nelson +Date: Tue May 9 18:30:12 2017 -0700 + + bnxt: add dma mapping attributes + + On the SPARC platform we need to use the DMA_ATTR_WEAK_ORDERING attribute + in our Rx path dma mapping in order to get the expected performance out + of the receive path. Adding it to the Tx path has little effect, so + that's not a part of this patch. + + Signed-off-by: Shannon Nelson + Reviewed-by: Tushar Dave + Reviewed-by: Tom Saeger + Acked-by: Michael Chan + Signed-off-by: David S. Miller + +commit ac45bd93a5035c2f39c9862b8b6ed692db0fdc87 +Author: Dan Carpenter +Date: Sat May 6 03:49:01 2017 +0300 + + bnxt_en: allocate enough space for ->ntp_fltr_bmap + + We have the number of longs, but we need to calculate the number of + bytes required. + + Fixes: c0c050c58d84 ("bnxt_en: New Broadcom ethernet driver.") + Signed-off-by: Dan Carpenter + Acked-by: Michael Chan + Signed-off-by: David S. Miller + +commit 9e54e322ded40f424dcb5a13508e2556919ce12a +Author: Deepak Khungar +Date: Fri Apr 21 20:11:26 2017 -0400 + + bnxt_en: Restrict a PF in Multi-Host mode from changing port PHY configuration + + This change restricts the PF in multi-host mode from setting any port + level PHY configuration. The settings are controlled by firmware in + Multi-Host mode. + + Signed-off-by: Deepak Khungar + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 7d63818a35851cf00867248d5ab50a8fe8df5943 +Author: Michael Chan +Date: Fri Apr 21 20:11:25 2017 -0400 + + bnxt_en: Check the FW_LLDP_AGENT flag before allowing DCBX host agent. + + Check the additional flag in bnxt_hwrm_func_qcfg() before allowing + DCBX to be done in host mode. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 38a21b34aacd4db7b7b74c61afae42ea6718448d +Author: Deepak Khungar +Date: Fri Apr 21 20:11:24 2017 -0400 + + bnxt_en: Add 100G link speed reporting for BCM57454 ASIC in ethtool + + Added support for 100G link speed reporting for Broadcom BCM57454 + ASIC in ethtool command. + + Signed-off-by: Deepak Khungar + Signed-off-by: Ray Jui + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit f0249056eaf2b9a17b2b76a6e099e9b7877e187d +Author: Michael Chan +Date: Fri Apr 21 20:11:23 2017 -0400 + + bnxt_en: Fix VF attributes reporting. + + The .ndo_get_vf_config() is returning the wrong qos attribute. Fix + the code that checks and reports the qos and spoofchk attributes. The + BNXT_VF_QOS and BNXT_VF_LINK_UP flags should not be set by default + during init. time. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit a82fba8dbfb522bd19b1644bf599135680fd0122 +Author: Michael Chan +Date: Fri Apr 21 20:11:22 2017 -0400 + + bnxt_en: Pass DCB RoCE app priority to firmware. + + When the driver gets the RoCE app priority set/delete call through DCBNL, + the driver will send the information to the firmware to set up the + priority VLAN tag for RDMA traffic. + + [ New version using the common ETH_P_IBOE constant in if_ether.h ] + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 68a946bb81e07ed0e59a99e0c068d091ed42cc1b +Author: Michael Chan +Date: Tue Apr 4 18:14:17 2017 -0400 + + bnxt_en: Cap the msix vector with the max completion rings. + + The current code enables up to the maximum MSIX vectors in the PCIE + config space without considering the max completion rings available. + An MSIX vector is only useful when it has an associated completion + ring, so it is better to cap it. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 932dbf83ba18bdb871e0c03a4ffdd9785f7a9c07 +Author: Michael Chan +Date: Tue Apr 4 18:14:16 2017 -0400 + + bnxt_en: Use short TX BDs for the XDP TX ring. + + No offload is performed on the XDP_TX ring so we can use the short TX + BDs. This has the effect of doubling the size of the XDP TX ring so + that it now matches the size of the rx ring by default. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 67fea463fd873492ab641459a6d1af0e9ea3c9ce +Author: Michael Chan +Date: Tue Apr 4 18:14:15 2017 -0400 + + bnxt_en: Add interrupt test to ethtool -t selftest. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 91725d89b97acea168a94c577d999801c3b3bcfb +Author: Michael Chan +Date: Tue Apr 4 18:14:14 2017 -0400 + + bnxt_en: Add PHY loopback to ethtool self-test. + + It is necessary to disable autoneg before enabling PHY loopback, + otherwise link won't come up. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit f7dc1ea6c4c1f31371b7098d6fae0d49dc6cdff1 +Author: Michael Chan +Date: Tue Apr 4 18:14:13 2017 -0400 + + bnxt_en: Add ethtool mac loopback self test. + + The mac loopback self test operates in polling mode. To support that, + we need to add functions to open and close the NIC half way. The half + open mode allows the rings to operate without IRQ and NAPI. We + use the XDP transmit function to send the loopback packet. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit eb51365846bc418687af4c4f41b68b6e84cdd449 +Author: Michael Chan +Date: Tue Apr 4 18:14:12 2017 -0400 + + bnxt_en: Add basic ethtool -t selftest support. + + Add the basic infrastructure and only firmware tests initially. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit f65a2044a8c988adf16788c51c04ac10dbbdb494 +Author: Michael Chan +Date: Tue Apr 4 18:14:11 2017 -0400 + + bnxt_en: Add suspend/resume callbacks. + + Add suspend/resume callbacks using the newer dev_pm_ops method. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 5282db6c794fed3ea8b399bc5305c4078e084f7b +Author: Michael Chan +Date: Tue Apr 4 18:14:10 2017 -0400 + + bnxt_en: Add ethtool set_wol method. + + And add functions to set and free magic packet filter. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 8e202366dd752564d7f090ba280cc51cbf7bbbd9 +Author: Michael Chan +Date: Tue Apr 4 18:14:09 2017 -0400 + + bnxt_en: Add ethtool get_wol method. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit d196ece740bf337aa25731cd8cb44660a2a227dd +Author: Michael Chan +Date: Tue Apr 4 18:14:08 2017 -0400 + + bnxt_en: Add pci shutdown method. + + Add pci shutdown method to put device in the proper WoL and power state. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit c1ef146a5bd3b286d5c3eb2c9f631b38647c76d3 +Author: Michael Chan +Date: Tue Apr 4 18:14:07 2017 -0400 + + bnxt_en: Add basic WoL infrastructure. + + Add code to driver probe function to check if the device is WoL capable + and if Magic packet WoL filter is currently set. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 8eb992e876a88de7539b1b9e132dd171d865cd2f +Author: Michael Chan +Date: Tue Apr 4 18:14:06 2017 -0400 + + bnxt_en: Update firmware interface spec to 1.7.6.2. + + Features added include WoL and selftest. + + Signed-off-by: Deepak Khungar + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 282ccf6efb7c5d75b0283b66ed487957163ce8fe +Author: Florian Westphal +Date: Wed Mar 29 17:17:31 2017 +0200 + + drivers: add explicit interrupt.h includes + + These files all use functions declared in interrupt.h, but currently rely + on implicit inclusion of this file (via netns/xfrm.h). + + That won't work anymore when the flow cache is removed so include that + header where needed. + + Signed-off-by: Florian Westphal + Signed-off-by: David S. Miller + +commit 3ed3a83e3f3871c57b18cef09b148e96921236ed +Author: Michael Chan +Date: Tue Mar 28 19:47:31 2017 -0400 + + bnxt_en: Fix DMA unmapping of the RX buffers in XDP mode during shutdown. + + In bnxt_free_rx_skbs(), which is called to free up all RX buffers during + shutdown, we need to unmap the page if we are running in XDP mode. + + Fixes: c61fb99cae51 ("bnxt_en: Add RX page mode support.") + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 23e12c893489ed12ecfccbf866fc62af1bead4b0 +Author: Sankar Patchineelam +Date: Tue Mar 28 19:47:30 2017 -0400 + + bnxt_en: Correct the order of arguments to netdev_err() in bnxt_set_tpa() + + Signed-off-by: Sankar Patchineelam + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 2247925f0942dc4e7c09b1cde45ca18461d94c5f +Author: Sankar Patchineelam +Date: Tue Mar 28 19:47:29 2017 -0400 + + bnxt_en: Fix NULL pointer dereference in reopen failure path + + Net device reset can fail when the h/w or f/w is in a bad state. + Subsequent netdevice open fails in bnxt_hwrm_stat_ctx_alloc(). + The cleanup invokes bnxt_hwrm_resource_free() which inturn + calls bnxt_disable_int(). In this routine, the code segment + + if (ring->fw_ring_id != INVALID_HW_RING_ID) + BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons); + + results in NULL pointer dereference as cpr->cp_doorbell is not yet + initialized, and fw_ring_id is zero. + + The fix is to initialize cpr fw_ring_id to INVALID_HW_RING_ID before + bnxt_init_chip() is invoked. + + Signed-off-by: Sankar Patchineelam + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 56f36acd215cf7c28372b2fdb4f33f6900e97e05 +Author: Amritha Nambiar +Date: Wed Mar 15 10:39:25 2017 -0700 + + mqprio: Modify mqprio to pass user parameters via ndo_setup_tc. + + The configurable priority to traffic class mapping and the user specified + queue ranges are used to configure the traffic class, overriding the + hardware defaults when the 'hw' option is set to 0. However, when the 'hw' + option is non-zero, the hardware QOS defaults are used. + + This patch makes it so that we can pass the data the user provided to + ndo_setup_tc. This allows us to pull in the queue configuration if the + user requested it as well as any additional hardware offload type + requested by using a value other than 1 for the hw value. + + Finally it also provides a means for the device driver to return the level + supported for the offload type via the qopt->hw value. Previously we were + just always assuming the value to be 1, in the future values beyond just 1 + may be supported. + + Signed-off-by: Amritha Nambiar + Signed-off-by: Alexander Duyck + Signed-off-by: David S. Miller + +commit 520ad89a54edea84496695d528f73ddcf4a52ea4 +Author: Michael Chan +Date: Wed Mar 8 18:44:35 2017 -0500 + + bnxt_en: Ignore 0 value in autoneg supported speed from firmware. + + In some situations, the firmware will return 0 for autoneg supported + speed. This may happen if the firmware detects no SFP module, for + example. The driver should ignore this so that we don't end up with + an invalid autoneg setting with nothing advertised. When SFP module + is inserted, we'll get the updated settings from firmware at that time. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit bc39f885a9c3bdbff0a96ecaf07b162a78eff6e4 +Author: Michael Chan +Date: Wed Mar 8 18:44:34 2017 -0500 + + bnxt_en: Check if firmware LLDP agent is running. + + Set DCB_CAP_DCBX_HOST capability flag only if the firmware LLDP agent + is not running. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit b386cd362ffea09d05c56bfa85d104562e860647 +Author: Michael Chan +Date: Wed Mar 8 18:44:33 2017 -0500 + + bnxt_en: Call bnxt_ulp_stop() during tx timeout. + + If we call bnxt_reset_task() due to tx timeout, we should call + bnxt_ulp_stop() to inform the RDMA driver about the error and the + impending reset. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 3c2217a675bac22afb149166e0de71809189850d +Author: Michael Chan +Date: Wed Mar 8 18:44:32 2017 -0500 + + bnxt_en: Perform function reset earlier during probe. + + The firmware call to do function reset is done too late. It is causing + the rings that have been reserved to be freed. In NPAR mode, this bug + is causing us to run out of rings. + + Fixes: 391be5c27364 ("bnxt_en: Implement new scheme to reserve tx rings.") + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 1faaa78f36cb2915ae89138ba5846f87ade85dcb +Author: Tobias Klauser +Date: Tue Feb 21 15:27:28 2017 +0100 + + bnxt_en: use eth_hw_addr_random() + + Use eth_hw_addr_random() to set a random MAC address in order to make + sure bp->dev->addr_assign_type will be properly set to NET_ADDR_RANDOM. + + Signed-off-by: Tobias Klauser + Signed-off-by: David S. Miller + +commit 17086399c113d933e1202697f85b8f0f82fcb8ce +Author: Sathya Perla +Date: Mon Feb 20 19:25:18 2017 -0500 + + bnxt_en: fix pci cleanup in bnxt_init_one() failure path + + In the bnxt_init_one() failure path, bar1 and bar2 are not + being unmapped. This commit fixes this issue. Reorganize the + code so that bnxt_init_one()'s failure path and bnxt_remove_one() + can call the same function to do the PCI cleanup. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit daf1f1e7841138cb0e48d52c8573a5f064d8f495 +Author: Michael Chan +Date: Mon Feb 20 19:25:17 2017 -0500 + + bnxt_en: Fix NULL pointer dereference in a failure path during open. + + If bnxt_hwrm_ring_free() is called during a failure path in bnxt_open(), + it is possible that the completion rings have not been allocated yet. + In that case, the completion doorbell has not been initialized, and + calling bnxt_disable_int() will crash. Fix it by checking that the + completion ring has been initialized before writing to the completion + ring doorbell. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 4e00338a61998de3502d0428c4f71ffc69772316 +Author: Ray Jui +Date: Mon Feb 20 19:25:16 2017 -0500 + + bnxt_en: Reject driver probe against all bridge devices + + There are additional SoC devices that use the same device ID for + bridge and NIC devices. The bnxt driver should reject probe against + all bridge devices since it's meant to be used with only endpoint + devices. + + Signed-off-by: Ray Jui + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 32b40798c1b40343641f04cdfd09652af70ea0e9 +Author: Deepak Khungar +Date: Sun Feb 12 19:18:18 2017 -0500 + + bnxt_en: Added PCI IDs for BCM57452 and BCM57454 ASICs + + Signed-off-by: Deepak Khungar + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit b451c8b69e70de299aa6061e1fa6afbb4d7c1f9e +Author: Michael Chan +Date: Sun Feb 12 19:18:17 2017 -0500 + + bnxt_en: Fix bnxt_setup_tc() error message. + + Add proper puctuation to make the message more clear. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit e70c752f88ed23e6a0f081fa408282c2450c8ce9 +Author: Michael Chan +Date: Sun Feb 12 19:18:16 2017 -0500 + + bnxt_en: Print FEC settings as part of the linkup dmesg. + + Print FEC (Forward Error Correction) autoneg and encoding settings during + link up. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 33dac24abbd5a77eefca18fb7ebbd01a3cf1b343 +Author: Michael Chan +Date: Sun Feb 12 19:18:15 2017 -0500 + + bnxt_en: Do not setup PHY unless driving a single PF. + + If it is a VF or an NPAR function, the firmware call to setup the PHY + will fail. Adding this check will prevent unnecessary firmware calls + to setup the PHY unless calling from the PF. This will also eliminate + many unnecessary warning messages when the call from a VF or NPAR fails. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 61aad724ec0a685bc83b02b059a3ca0ad3bde6b0 +Author: Michael Chan +Date: Sun Feb 12 19:18:14 2017 -0500 + + bnxt_en: Add hardware NTUPLE filter for encapsulated packets. + + If skb_flow_dissect_flow_keys() returns with the encapsulation flag + set, pass the information to the firmware to setup the NTUPLE filter + accordingly. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 964fd4801d40ead69a447482c0dd0cd4be495e47 +Author: Michael Chan +Date: Sun Feb 12 19:18:13 2017 -0500 + + bnxt_en: Allow NETIF_F_NTUPLE to be enabled on VFs. + + Commit ae10ae740ad2 ("bnxt_en: Add new hardware RFS mode.") has added + code to allow NTUPLE to be enabled on VFs. So we now remove the + BNXT_VF() check in rfs_capable() to allow NTUPLE on VFs. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit a79a5276aa2f844bd368c1d3d5a625e1fbefd989 +Author: Michael Chan +Date: Sun Feb 12 19:18:12 2017 -0500 + + bnxt_en: Fix ethtool -l pre-set max combined channel. + + With commit d1e7925e6d80 ("bnxt_en: Centralize logic to reserve rings."), + ring allocation for combined rings has become stricter. A combined + ring must now have an rx-tx ring pair. The pre-set max. for combined + rings should now be min(rx, tx). + + Fixes: d1e7925e6d80 ("bnxt_en: Centralize logic to reserve rings.") + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit cb4d1d6261453677feb54e7a09c23fc7648dd6bc +Author: Kshitij Soni +Date: Sun Feb 12 19:18:11 2017 -0500 + + bnxt_en: Retry failed NVM_INSTALL_UPDATE with defragmentation flag. + + If the HWRM_NVM_INSTALL_UPDATE command fails with the error code + NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR, retry the command with + a new flag to allow defragmentation. Since we are checking the + response for error code, we also need to take the mutex until + we finish reading the response. + + Signed-off-by: Kshitij Soni + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit bac9a7e0f5d6da82478d5e0a2a236158f42d5757 +Author: Michael Chan +Date: Sun Feb 12 19:18:10 2017 -0500 + + bnxt_en: Update to firmware interface spec 1.7.0. + + The new spec has NVRAM defragmentation support which will be used in + the next patch to improve ethtool flash operation. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 38413406277fd060f46855ad527f6f8d4cf2652d +Author: Michael Chan +Date: Mon Feb 6 16:55:43 2017 -0500 + + bnxt_en: Add support for XDP_TX action. + + Add dedicated transmit function and transmit completion handler for + XDP. The XDP transmit logic and completion logic are different than + regular TX ring. The TX buffer is recycled back to the RX ring when + it completes. + + v3: Improved the buffer recyling scheme for XDP_TX. + + v2: Add trace_xdp_exception(). + Add dma_sync. + + Signed-off-by: Michael Chan + Tested-by: Andy Gospodarek + Signed-off-by: David S. Miller + +commit c6d30e8391b85e00eb544e6cf047ee0160ee9938 +Author: Michael Chan +Date: Mon Feb 6 16:55:42 2017 -0500 + + bnxt_en: Add basic XDP support. + + Add basic ndo_xdp support to setup and query program, configure the NIC + to run in rx page mode, and support XDP_PASS, XDP_DROP, XDP_ABORTED + actions only. + + v3: Pass modified offset and length to stack for XDP_PASS. + Remove Kconfig option. + + v2: Added trace_xdp_exception() + Added dma_syncs. + Added XDP headroom support. + + Signed-off-by: Michael Chan + Tested-by: Andy Gospodarek + Signed-off-by: David S. Miller + +commit fa3e93e86cc3d1809fba67cb138883ed4bb74a5f +Author: Michael Chan +Date: Mon Feb 6 16:55:41 2017 -0500 + + bnxt_en: Refactor tx completion path. + + XDP_TX requires a different function to handle completion. Add a + function pointer to handle tx completion logic. Regular TX rings + will be assigned the current bnxt_tx_int() for the ->tx_int() + function pointer. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 5f4492493e75dafc5cbb96eabe0f146c2ffb1e3d +Author: Michael Chan +Date: Mon Feb 6 16:55:40 2017 -0500 + + bnxt_en: Add a set of TX rings to support XDP. + + Add logic for an extra set of TX rings for XDP. If enabled, this + set of TX rings equals the number of RX rings and shares the same + IRQ as the RX ring set. A new field bp->tx_nr_rings_xdp is added + to keep track of these TX XDP rings. Adjust all other relevant functions + to handle bp->tx_nr_rings_xdp. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit a960dec98861b009b4227d2ae3b94a142c83eb96 +Author: Michael Chan +Date: Mon Feb 6 16:55:39 2017 -0500 + + bnxt_en: Add tx ring mapping logic. + + To support XDP_TX, we need to add a set of dedicated TX rings, each + associated with the NAPI of an RX ring. To assign XDP rings and regular + rings in a flexible way, we add a bp->tx_ring_map[] array to do the + remapping. The netdev txq index is stored in the new field txq_index + so that we can retrieve the netdev txq when handling TX completions. + In this patch, before we introduce XDP_TX, the mapping is 1:1. + + v2: Fixed a bug in bnxt_tx_int(). + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit d1e7925e6d80ce5f9ef6deb8f3cec7526f5c443c +Author: Michael Chan +Date: Mon Feb 6 16:55:38 2017 -0500 + + bnxt_en: Centralize logic to reserve rings. + + Currently, bnxt_setup_tc() and bnxt_set_channels() have similar and + duplicated code to check and reserve rx and tx rings. Add a new + function bnxt_reserve_rings() to centralize the logic. This will + make it easier to add XDP_TX support which requires allocating a + new set of TX rings. + + Also, the tx ring checking logic in bnxt_setup_msix() can be removed. + The rings have been reserved before hand. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 4e5dbbda4c40a239e2ed4bbc98f2aa320e4dcca2 +Author: Michael Chan +Date: Mon Feb 6 16:55:37 2017 -0500 + + bnxt_en: Use event bit map in RX path. + + In the current code, we have separate rx_event and agg_event parameters + to keep track of rx and aggregation events. Combine these events into + an u8 event mask with different bits defined for different events. This + way, it is easier to expand the logic to include XDP tx events. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit c61fb99cae51958a9096d8540c8c05e74cfa7e59 +Author: Michael Chan +Date: Mon Feb 6 16:55:36 2017 -0500 + + bnxt_en: Add RX page mode support. + + This mode is to support XDP. In this mode, each rx ring is configured + with page sized buffers for linear placement of each packet. MTU will be + restricted to what the page sized buffers can support. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit b3dba77cf0acb6e44b368979026df975658332bc +Author: Michael Chan +Date: Mon Feb 6 16:55:35 2017 -0500 + + bnxt_en: Parameterize RX buffer offsets. + + Convert the global constants BNXT_RX_OFFSET and BNXT_RX_DMA_OFFSET to + device parameters. This will make it easier to support XDP with + headroom support which requires different RX buffer offsets. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 745fc05c9db1f17da076861c7f57507e13f28a3a +Author: Michael Chan +Date: Mon Feb 6 16:55:34 2017 -0500 + + bnxt_en: Add bp->rx_dir field for rx buffer DMA direction. + + When driver is running in XDP mode, rx buffers are DMA mapped as + DMA_BIDIRECTIONAL. Add a field so the code will map/unmap rx buffers + according to this field. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 11cd119d31a71b37c2362fc621f225e2aa12aea1 +Author: Michael Chan +Date: Mon Feb 6 16:55:33 2017 -0500 + + bnxt_en: Don't use DEFINE_DMA_UNMAP_ADDR to store DMA address in RX path. + + To support XDP_TX, we need the RX buffer's DMA address to transmit the + packet. Convert the DMA address field to a permanent field in + bnxt_sw_rx_bd. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 6bb19474391d17954fee9a9997ecca25b35dfd46 +Author: Michael Chan +Date: Mon Feb 6 16:55:32 2017 -0500 + + bnxt_en: Refactor rx SKB function. + + Minor refactoring of bnxt_rx_skb() so that it can easily be replaced by + a new function that handles packets in a single page. Also, use a + function pointer bp->rx_skb_func() to switch to a new function when + we add the new mode in the next patch. + + Add a new field data_ptr that points to the packet data in the + bnxt_sw_rx_bd structure. The original data field is changed to void + pointer so that it can either hold the kmalloc'ed data or a page + pointer. + + The last parameter of bnxt_rx_skb() which was the length parameter is + changed to include the payload offset of the packet in the upper 16 bit. + The offset is needed to support the rx page mode and is not used in + this existing function. + + v3: Added a new data_ptr parameter to bp->rx_skb_func(). The caller + has the option to modify the starting address of the packet. This + will be needed when XDP with headroom support is added. + + v2: Changed the name of the last parameter to offset_and_len to make the + code more clear. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 6ad20165d376fa07919a70e4f43dfae564601829 +Author: Eric Dumazet +Date: Mon Jan 30 08:22:01 2017 -0800 + + drivers: net: generalize napi_complete_done() + + napi_complete_done() allows to opt-in for gro_flush_timeout, + added back in linux-3.19, commit 3b47d30396ba + ("net: gro: add a per device gro flush timer") + + This allows for more efficient GRO aggregation without + sacrifying latencies. + + Signed-off-by: Eric Dumazet + Signed-off-by: David S. Miller + +commit 90c694bb71819fb5bd3501ac397307d7e41ddeca +Author: Michael Chan +Date: Wed Jan 25 02:55:09 2017 -0500 + + bnxt_en: Fix RTNL lock usage on bnxt_get_port_module_status(). + + bnxt_get_port_module_status() calls bnxt_update_link() which expects + RTNL to be held. In bnxt_sp_task() that does not hold RTNL, we need to + call it with a prior call to bnxt_rtnl_lock_sp() and the call needs to + be moved to the end of bnxt_sp_task(). + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 0eaa24b971ae251ae9d3be23f77662a655532063 +Author: Michael Chan +Date: Wed Jan 25 02:55:08 2017 -0500 + + bnxt_en: Fix RTNL lock usage on bnxt_update_link(). + + bnxt_update_link() is called from multiple code paths. Most callers, + such as open, ethtool, already hold RTNL. Only the caller bnxt_sp_task() + does not. So it is a bug to take RTNL inside bnxt_update_link(). + + Fix it by removing the RTNL inside bnxt_update_link(). The function + now expects the caller to always hold RTNL. + + In bnxt_sp_task(), call bnxt_rtnl_lock_sp() before calling + bnxt_update_link(). We also need to move the call to the end of + bnxt_sp_task() since it will be clearing the BNXT_STATE_IN_SP_TASK bit. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit a551ee94ea723b4af9b827c7460f108bc13425ee +Author: Michael Chan +Date: Wed Jan 25 02:55:07 2017 -0500 + + bnxt_en: Fix bnxt_reset() in the slow path task. + + In bnxt_sp_task(), we set a bit BNXT_STATE_IN_SP_TASK so that bnxt_close() + will synchronize and wait for bnxt_sp_task() to finish. Some functions + in bnxt_sp_task() require us to clear BNXT_STATE_IN_SP_TASK and then + acquire rtnl_lock() to prevent race conditions. + + There are some bugs related to this logic. This patch refactors the code + to have common bnxt_rtnl_lock_sp() and bnxt_rtnl_unlock_sp() to handle + the RTNL and the clearing/setting of the bit. Multiple functions will + need the same logic. We also need to move bnxt_reset() to the end of + bnxt_sp_task(). Functions that clear BNXT_STATE_IN_SP_TASK must be the + last functions to be called in bnxt_sp_task(). The common scheme will + handle the condition properly. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 719ca8111402aa6157bd83a3c966d184db0d8956 +Author: Michael Chan +Date: Tue Jan 17 22:07:19 2017 -0500 + + bnxt_en: Fix "uninitialized variable" bug in TPA code path. + + In the TPA GRO code path, initialize the tcp_opt_len variable to 0 so + that it will be correct for packets without TCP timestamps. The bug + caused the SKB fields to be incorrectly set up for packets without + TCP timestamps, leading to these packets being rejected by the stack. + + Reported-by: Andy Gospodarek + Acked-by: Andy Gospodarek + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 2f5938467bd7f34e59a1d6d3809f5970f62e194b +Author: Michael Chan +Date: Fri Jan 13 01:32:04 2017 -0500 + + bnxt_en: Add the ulp_sriov_cfg hooks for bnxt_re RDMA driver. + + Add the ulp_sriov_cfg callbacks when the number of VFs is changing. This + allows the RDMA driver to provision RDMA resources for the VFs. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 5ad2cbeed74bd1e89ac4ba14288158ec7eb167da +Author: Michael Chan +Date: Fri Jan 13 01:32:03 2017 -0500 + + bnxt_en: Add support for ethtool -p. + + Add LED blinking code to support ethtool -p on the PF. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit f183886c0d798ca3cf0a51e8cab3c1902fbd1e8b +Author: Michael Chan +Date: Fri Jan 13 01:32:02 2017 -0500 + + bnxt_en: Update to firmware interface spec to 1.6.1. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 341138c3e6afa8e77f9f3e773d72b37022dbcee8 +Author: Michael Chan +Date: Fri Jan 13 01:32:01 2017 -0500 + + bnxt_en: Clear TPA flags when BNXT_FLAG_NO_AGG_RINGS is set. + + Commit bdbd1eb59c56 ("bnxt_en: Handle no aggregation ring gracefully.") + introduced the BNXT_FLAG_NO_AGG_RINGS flag. For consistency, + bnxt_set_tpa_flags() should also clear TPA flags when there are no + aggregation rings. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit b742995445fbac874f5fe19ce2afc76c7a6ac2cf +Author: Michael Chan +Date: Fri Jan 13 01:32:00 2017 -0500 + + bnxt_en: Fix compiler warnings when CONFIG_RFS_ACCEL is not defined. + + CC [M] drivers/net/ethernet/broadcom/bnxt/bnxt.o + drivers/net/ethernet/broadcom/bnxt/bnxt.c:4947:21: warning: ‘bnxt_get_max_func_rss_ctxs’ defined but not used [-Wunused-function] + static unsigned int bnxt_get_max_func_rss_ctxs(struct bnxt *bp) + ^ + CC [M] drivers/net/ethernet/broadcom/bnxt/bnxt.o + drivers/net/ethernet/broadcom/bnxt/bnxt.c:4956:21: warning: ‘bnxt_get_max_func_vnics’ defined but not used [-Wunused-function] + static unsigned int bnxt_get_max_func_vnics(struct bnxt *bp) + ^ + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 5944701df90d9577658e2354cc27c4ceaeca30fe +Author: stephen hemminger +Date: Fri Jan 6 19:12:53 2017 -0800 + + net: remove useless memset's in drivers get_stats64 + + In dev_get_stats() the statistic structure storage has already been + zeroed. Therefore network drivers do not need to call memset() again. + + Signed-off-by: Stephen Hemminger + Signed-off-by: David S. Miller + +commit bc1f44709cf27fb2a5766cadafe7e2ad5e9cb221 +Author: stephen hemminger +Date: Fri Jan 6 19:12:52 2017 -0800 + + net: make ndo_get_stats64 a void function + + The network device operation for reading statistics is only called + in one place, and it ignores the return value. Having a structure + return value is potentially confusing because some future driver could + incorrectly assume that the return value was used. + + Fix all drivers with ndo_get_stats64 to have a void function. + + Signed-off-by: Stephen Hemminger + Signed-off-by: David S. Miller + +commit bdbd1eb59c565c56a74d21076e2ae8706de00ecd +Author: Michael Chan +Date: Thu Dec 29 12:13:43 2016 -0500 + + bnxt_en: Handle no aggregation ring gracefully. + + The current code assumes that we will always have at least 2 rx rings, 1 + will be used as an aggregation ring for TPA and jumbo page placements. + However, it is possible, especially on a VF, that there is only 1 rx + ring available. In this scenario, the current code will fail to initialize. + To handle it, we need to properly set up only 1 ring without aggregation. + Set a new flag BNXT_FLAG_NO_AGG_RINGS for this condition and add logic to + set up the chip to place RX data linearly into a single buffer per packet. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 486b5c22ea1d35e00e90dd79a32a9ee530b18915 +Author: Michael Chan +Date: Thu Dec 29 12:13:42 2016 -0500 + + bnxt_en: Set default completion ring for async events. + + With the added support for the bnxt_re RDMA driver, both drivers can be + allocating completion rings in any order. The firmware does not know + which completion ring should be receiving async events. Add an + extra step to tell firmware the completion ring number for receiving + async events after bnxt_en allocates the completion rings. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 391be5c2736456f032fe0265031ecfe17aee84a0 +Author: Michael Chan +Date: Thu Dec 29 12:13:41 2016 -0500 + + bnxt_en: Implement new scheme to reserve tx rings. + + In order to properly support TX rate limiting in SRIOV VF functions or + NPAR functions, firmware needs better control over tx ring allocations. + The new scheme requires the driver to reserve the number of tx rings + and to query to see if the requested number of tx rings is reserved. + The driver will use the new scheme when the firmware interface spec is + 1.6.1 or newer. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit dda0e7465f040ed814d4a5c98c6bf042e59cba69 +Author: Michael Chan +Date: Thu Dec 29 12:13:40 2016 -0500 + + bnxt_en: Add IPV6 hardware RFS support. + + Accept ipv6 flows in .ndo_rx_flow_steer() and support ETHTOOL_GRXCLSRULE + ipv6 flows. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 8427af811a2fcbbf0c71a4b1f904f2442abdcf39 +Author: Michael Chan +Date: Thu Dec 29 12:13:39 2016 -0500 + + bnxt_en: Assign additional vnics to VFs. + + Assign additional vnics to VFs whenever possible so that NTUPLE can be + supported on the VFs. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit ae10ae740ad2befd92b6f5b2ab39220bce6e5da2 +Author: Michael Chan +Date: Thu Dec 29 12:13:38 2016 -0500 + + bnxt_en: Add new hardware RFS mode. + + The existing hardware RFS mode uses one hardware RSS context block + per ring just to calculate the RSS hash. This is very wasteful and + prevents VF functions from using it. The new hardware mode shares + the same hardware RSS context for RSS placement and RFS steering. + This allows VFs to enable RFS. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 8079e8f107bf02e1e5ece89239dd2fb475a4735f +Author: Michael Chan +Date: Thu Dec 29 12:13:37 2016 -0500 + + bnxt_en: Refactor code that determines RFS capability. + + Add function bnxt_rfs_supported() that determines if the chip supports + RFS. Refactor the existing function bnxt_rfs_capable() that determines + if run-time conditions support RFS. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 8fdefd63c203d9b2955d679704f4ed92bf40752c +Author: Michael Chan +Date: Thu Dec 29 12:13:36 2016 -0500 + + bnxt_en: Add function to get vnic capability. + + The new vnic RSS capability will enhance NTUPLE support, to be added + in subsequent patches. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 5910906ca9ee32943f67db24917f78a9ad1087db +Author: Michael Chan +Date: Thu Dec 29 12:13:35 2016 -0500 + + bnxt_en: Refactor TPA code path. + + Call tcp_gro_complete() in the common code path instead of the chip- + specific method. The newer 5731x method is missing the call. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 68515a186cf8a8f97956eaea5829277752399f58 +Author: Michael Chan +Date: Thu Dec 29 12:13:34 2016 -0500 + + bnxt_en: Fix and clarify link_info->advertising. + + The advertising field is closely related to the auto_link_speeds field. + The former is the user setting while the latter is the firmware setting. + Both should be u16. We should use the advertising field in + bnxt_get_link_ksettings because the auto_link_speeds field may not + be updated with the latest from the firmware yet. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 9d8bc09766f1a229b2d204c713a1cfc6c7fa1bb1 +Author: Michael Chan +Date: Thu Dec 29 12:13:33 2016 -0500 + + bnxt_en: Improve the IRQ disable sequence during shutdown. + + The IRQ is disabled by writing to the completion ring doorbell. This + should be done before the hardware completion ring is freed for correctness. + The current code disables IRQs after all the completion rings are freed. + + Fix it by calling bnxt_disable_int_sync() before freeing the completion + rings. Rearrange the code to avoid forward declaration. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit e7b9569102995ebc26821789628eef45bd9840d8 +Author: Michael Chan +Date: Thu Dec 29 12:13:32 2016 -0500 + + bnxt_en: Use napi_complete_done() + + For better busy polling and GRO support. Do not re-arm IRQ if + napi_complete_done() returns false. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit b356a2e729cec145a648d22ba5686357c009da25 +Author: Michael Chan +Date: Thu Dec 29 12:13:31 2016 -0500 + + bnxt_en: Remove busy poll logic in the driver. + + Use native NAPI polling instead. The next patch will complete the work + by switching to use napi_complete_done() + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit a588e4580a7ecb715dab8bf09725b97aa0e0e3a0 +Author: Michael Chan +Date: Wed Dec 7 00:26:21 2016 -0500 + + bnxt_en: Add interface to support RDMA driver. + + Since the network driver and RDMA driver operate on the same PCI function, + we need to create an interface to allow the RDMA driver to share resources + with the network driver. + + 1. Create a new bnxt_en_dev struct which will be returned by + bnxt_ulp_probe() upon success. After that, all calls from the RDMA driver + to bnxt_en will pass a pointer to this struct. + + 2. This struct contains additional function pointers to register, request + msix, send fw messages, register for async events. + + 3. If the RDMA driver wants to enable RDMA on the function, it needs to + call the function pointer bnxt_register_device(). A ulp_ops structure + is passed for RCU protected upcalls from bnxt_en to the RDMA driver. + + 4. The RDMA driver can call firmware APIs using the bnxt_send_fw_msg() + function pointer. + + 5. 1 stats context is reserved when the RDMA driver registers. MSIX + and completion rings are reserved when the RDMA driver calls + bnxt_request_msix() function pointer. + + 6. When the RDMA driver calls bnxt_unregister_device(), all RDMA resources + will be cleaned up. + + v2: Fixed 2 uninitialized variable warnings. + + Signed-off-by: Somnath Kotur + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit a1653b13f14c714f9bfd5e10c603a37c3bcba7b6 +Author: Michael Chan +Date: Wed Dec 7 00:26:20 2016 -0500 + + bnxt_en: Refactor the driver registration function with firmware. + + The driver register function with firmware consists of passing version + information and registering for async events. To support the RDMA driver, + the async events that we need to register may change. Separate the + driver register function into 2 parts so that we can just update the + async events for the RDMA driver. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit e4060d306b5196966d74e05dee48e6c3a52aaad4 +Author: Michael Chan +Date: Wed Dec 7 00:26:19 2016 -0500 + + bnxt_en: Reserve RDMA resources by default. + + If the device supports RDMA, we'll setup network default rings so that + there are enough minimum resources for RDMA, if possible. However, the + user can still increase network rings to the max if he wants. The actual + RDMA resources won't be reserved until the RDMA driver registers. + + v2: Fix compile warning when BNXT_CONFIG_SRIOV is not set. + + Signed-off-by: Somnath Kotur + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 7b08f661ab80e87dcdba7ab9a460fe2c9d08bf5b +Author: Michael Chan +Date: Wed Dec 7 00:26:18 2016 -0500 + + bnxt_en: Improve completion ring allocation for VFs. + + All available remaining completion rings not used by the PF should be + made available for the VFs so that there are enough rings in the VF to + support RDMA. The earlier workaround code of capping the rings by the + statistics context is removed. + + When SRIOV is disabled, call a new function bnxt_restore_pf_fw_resources() + to restore FW resources. Later on we need to add some logic to account + for RDMA resources. + + Signed-off-by: Somnath Kotur + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit aa8ed021ab515a93f2a052e9cc80320882889698 +Author: Michael Chan +Date: Wed Dec 7 00:26:17 2016 -0500 + + bnxt_en: Move function reset to bnxt_init_one(). + + Now that MSIX is enabled in bnxt_init_one(), resources may be allocated by + the RDMA driver before the network device is opened. So we cannot do + function reset in bnxt_open() which will clear all the resources. + + The proper place to do function reset now is in bnxt_init_one(). + If we get AER, we'll do function reset as well. + + Signed-off-by: Somnath Kotur + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 7809592d3e2ec79cd1feab0cc96169d22f6ffee1 +Author: Michael Chan +Date: Wed Dec 7 00:26:16 2016 -0500 + + bnxt_en: Enable MSIX early in bnxt_init_one(). + + To better support the new RDMA driver, we need to move pci_enable_msix() + from bnxt_open() to bnxt_init_one(). This way, MSIX vectors are available + to the RDMA driver whether the network device is up or down. + + Part of the existing bnxt_setup_int_mode() function is now refactored into + a new bnxt_init_int_mode(). bnxt_init_int_mode() is called during + bnxt_init_one() to enable MSIX. The remaining logic in + bnxt_setup_int_mode() to map the IRQs to the completion rings is called + during bnxt_open(). + + v2: Fixed compile warning when CONFIG_BNXT_SRIOV is not set. + + Signed-off-by: Somnath Kotur + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 33c2657eb688a063ab9cbe11fd4d18c93c7945e1 +Author: Michael Chan +Date: Wed Dec 7 00:26:15 2016 -0500 + + bnxt_en: Add bnxt_set_max_func_irqs(). + + By refactoring existing code into this new function. The new function + will be used in subsequent patches. + + v2: Fixed compile warning when CONFIG_BNXT_SRIOV is not set. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 89aa8445cd4e8c2556c40d42dd0ceb2cbb96ba78 +Author: Pan Bian +Date: Sat Dec 3 17:56:17 2016 +0800 + + netdev: broadcom: propagate error code + + Function bnxt_hwrm_stat_ctx_alloc() always returns 0, even if the call + to _hwrm_send_message() fails. It may be better to propagate the errors + to the caller of bnxt_hwrm_stat_ctx_alloc(). + + Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=188661 + + Signed-off-by: Pan Bian + Acked-by: Michael Chan + Signed-off-by: David S. Miller + +commit c77192f2042537b1e0e5f520db91e4d28778195f +Author: Michael Chan +Date: Fri Dec 2 21:17:18 2016 -0500 + + bnxt_en: Add PFC statistics. + + Report PFC statistics to ethtool -S and DCBNL. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 7df4ae9fe85567a1710048da8229bd85e0da9df7 +Author: Michael Chan +Date: Fri Dec 2 21:17:17 2016 -0500 + + bnxt_en: Implement DCBNL to support host-based DCBX. + + Support only IEEE DCBX initially. Add IEEE DCBNL ops and functions to + get and set the hardware DCBX parameters. The DCB code is conditional on + Kconfig CONFIG_BNXT_DCB. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 87c374ded0b2cfe50bb1e7648a4ca06df13fa399 +Author: Michael Chan +Date: Fri Dec 2 21:17:16 2016 -0500 + + bnxt_en: Update firmware header file to latest 1.6.0. + + Latest interface has the latest DCB command structs. Get and store the + max number of lossless TCs the hardware can support. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit c5e3deb8a38453037b89e0b0485d3b031896e8eb +Author: Michael Chan +Date: Fri Dec 2 21:17:15 2016 -0500 + + bnxt_en: Re-factor bnxt_setup_tc(). + + Add a new function bnxt_setup_mq_tc() to handle MQPRIO. This new function + will be called during ETS setup when we add DCBNL in the next patch. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 57aac71b3e9ed890cf2219dd980c36f859b43d6a +Author: Christophe Jaillet +Date: Tue Nov 22 06:14:40 2016 +0100 + + bnxt_en: Fix a VXLAN vs GENEVE issue + + Knowing that: + #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN (0x1UL << 0) + #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE (0x5UL << 0) + and that 'bnxt_hwrm_tunnel_dst_port_alloc()' is only called with one of + these 2 constants, the TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE can not + trigger. + + Replace the bit test that overlap by an equality test, just as in + 'bnxt_hwrm_tunnel_dst_port_free()' above. + + Signed-off-by: Christophe JAILLET + Acked-by: Michael Chan + Signed-off-by: David S. Miller + +commit e5f6f564fd191d365fcd775c06a732a488205588 +Author: Eric Dumazet +Date: Wed Nov 16 06:31:52 2016 -0800 + + bnxt: add a missing rcu synchronization + + Add a missing synchronize_net() call to avoid potential use after free, + since we explicitly call napi_hash_del() to factorize the RCU grace + period. + + Fixes: c0c050c58d84 ("bnxt_en: New Broadcom ethernet driver.") + Signed-off-by: Eric Dumazet + Cc: Michael Chan + Acked-by: Michael Chan + Signed-off-by: David S. Miller + +commit a011952a1a465258ab006a8613a41aa5367d2274 +Author: Michael Chan +Date: Wed Nov 16 21:13:10 2016 -0500 + + bnxt_en: Add ethtool -n|-N rx-flow-hash support. + + To display and modify the RSS hash. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 87da7f796d5e44311ea69afb6f4220d43a89382e +Author: Michael Chan +Date: Wed Nov 16 21:13:09 2016 -0500 + + bnxt_en: Add UDP RSS support for 57X1X chips. + + The newer chips have proper support for 4-tuple UDP RSS. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 286ef9d64ea7435a1e323d12b44a309e15cbff0e +Author: Michael Chan +Date: Wed Nov 16 21:13:08 2016 -0500 + + bnxt_en: Enhance autoneg support. + + On some dual port NICs, the speed setting on one port can affect the + available speed on the other port. Add logic to detect these changes + and adjust the advertised speed settings when necessary. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 16d663a69f4a1f3534e780e35d50142b98cf1279 +Author: Michael Chan +Date: Wed Nov 16 21:13:07 2016 -0500 + + bnxt_en: Update firmware interface spec to 1.5.4. + + Use the new FORCE_LINK_DWN bit to shutdown link during close. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 73b9bad63ae3c902ce64221d10a0d371d059748d +Author: Michael Chan +Date: Fri Nov 11 00:11:43 2016 -0500 + + bnxt_en: Fix VF virtual link state. + + If the physical link is down and the VF virtual link is set to "enable", + the current code does not always work. If the link is down but the + cable is attached, the firmware returns LINK_SIGNAL instead of + NO_LINK. The current code is treating LINK_SIGNAL as link up. + The fix is to treat link as down when the link_status != LINK. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 3ffb6a39b751b635a0c50b650064c38b8d371ef2 +Author: Michael Chan +Date: Fri Nov 11 00:11:42 2016 -0500 + + bnxt_en: Fix ring arithmetic in bnxt_setup_tc(). + + The logic is missing the check on whether the tx and rx rings are sharing + completion rings or not. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit ef8d759b5251ee9d6784fe53d90220bd91ee477f +Author: Eric Dumazet +Date: Tue Nov 8 11:06:53 2016 -0800 + + bnxt_en: do not call napi_hash_add() + + This is automatically done from netif_napi_add(), and we want to not + export napi_hash_add() anymore in the following patch. + + Signed-off-by: Eric Dumazet + Cc: Michael Chan + Acked-by: Michael Chan + Signed-off-by: David S. Miller + +commit e1c6dccaf3af291488fbad155d7ee6bc29db262a +Author: Jarod Wilson +Date: Mon Oct 17 15:54:04 2016 -0400 + + ethernet/broadcom: use core min/max MTU checking + + tg3: min_mtu 60, max_mtu 9000/1500 + + bnxt: min_mtu 60, max_mtu 9000 + + bnx2x: min_mtu 46, max_mtu 9600 + - Fix up ETH_OVREHEAD -> ETH_OVERHEAD while we're in here, remove + duplicated defines from bnx2x_link.c. + + bnx2: min_mtu 46, max_mtu 9000 + - Use more standard ETH_* defines while we're at it. + + bcm63xx_enet: min_mtu 46, max_mtu 2028 + - compute_hw_mtu was made largely pointless, and thus merged back into + bcm_enet_change_mtu. + + b44: min_mtu 60, max_mtu 1500 + + CC: netdev@vger.kernel.org + CC: Michael Chan + CC: Sony Chacko + CC: Ariel Elior + CC: Dept-HSGLinuxNICDev@qlogic.com + CC: Siva Reddy Kallam + CC: Prashant Sreedharan + Signed-off-by: Jarod Wilson + Signed-off-by: David S. Miller + +commit 79aab093a0b5370d7fc4e99df75996f4744dc03f +Author: Moshe Shemesh +Date: Thu Sep 22 12:11:15 2016 +0300 + + net: Update API for VF vlan protocol 802.1ad support + + Introduce new rtnl UAPI that exposes a list of vlans per VF, giving + the ability for user-space application to specify it for the VF, as an + option to support 802.1ad. + We adjusted IP Link tool to support this option. + + For future use cases, the new UAPI supports multiple vlans. For now we + limit the list size to a single vlan in kernel. + Add IFLA_VF_VLAN_LIST in addition to IFLA_VF_VLAN to keep backward + compatibility with older versions of IP Link tool. + + Add a vlan protocol parameter to the ndo_set_vf_vlan callback. + We kept 802.1Q as the drivers' default vlan protocol. + Suitable ip link tool command examples: + Set vf vlan protocol 802.1ad: + ip link set eth0 vf 1 vlan 100 proto 802.1ad + Set vf to VST (802.1Q) mode: + ip link set eth0 vf 1 vlan 100 proto 802.1Q + Or by omitting the new parameter + ip link set eth0 vf 1 vlan 100 + + Signed-off-by: Moshe Shemesh + Signed-off-by: Tariq Toukan + Signed-off-by: David S. Miller + +commit 878786d95e07ce2f5fb6e3cd8a6c2ed320339196 +Author: Rob Swindell +Date: Tue Sep 20 03:36:33 2016 -0400 + + bnxt_en: Fix build error for kernesl without RTC-LIB + + bnxt_hwrm_fw_set_time() now returns -EOPNOTSUPP when built for kernel + without RTC_LIB. Setting the firmware time is not critical to the + successful completion of the firmware update process. + + Signed-off-by: Rob Swindell + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 350a714960eb8a980c913c9be5a96bb18b2fe9da +Author: Eddie Wai +Date: Mon Sep 19 03:58:09 2016 -0400 + + bnxt_en: Fixed the VF link status after a link state change + + The VF link state can be changed via the 'ip link set' cmd. + Currently, the new link state does not take effect immediately. + + The fix is for the PF to send a link change async event to the + designated VF after a VF link state change. This async event will + trigger the VF to update the link status. + + Signed-off-by: Eddie Wai + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit ae8e98a6fa7a73917196c507e43414ea96b6a0fc +Author: Deepak Khungar +Date: Mon Sep 19 03:58:08 2016 -0400 + + bnxt_en: Support for "ethtool -r" command + + Restart autoneg if autoneg is enabled. + + Signed-off-by: Deepak Khungar + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 4ffcd582301bd020b1f9d00c55473af305ec19b5 +Author: Michael Chan +Date: Mon Sep 19 03:58:07 2016 -0400 + + bnxt_en: Pad TX packets below 52 bytes. + + The hardware has a limitation that it won't pass host to BMC loopback + packets below 52-bytes. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 001154eb242b5a6667b74e5cf20873fb75f1b9d3 +Author: Michael Chan +Date: Mon Sep 19 03:58:06 2016 -0400 + + bnxt_en: Call firmware to approve the random VF MAC address. + + After generating the random MAC address for VF, call the firmware to + approve it. This step serves 2 purposes. Some hypervisor (e.g. ESX) + wants to approve the MAC address. 2nd, the call will setup the + proper forwarding database in the internal switch. + + We need to unlock the hwrm_cmd_lock mutex before calling bnxt_approve_mac(). + We can do that because we are at the end of the function and all the + previous firmware response data has been copied. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 7cc5a20e38fcaf395ac59e7ed6c3decb575a0dc7 +Author: Michael Chan +Date: Mon Sep 19 03:58:05 2016 -0400 + + bnxt_en: Re-arrange bnxt_hwrm_func_qcaps(). + + Re-arrange the code so that the generation of the random MAC address for + the VF is at the end of the function. The next patch will add one more step + to call bnxt_approve_mac() to get the firmware to approve the random MAC + address. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 47f8e8b9bbbbe00740786bd1da0d5097d45ba46b +Author: Michael Chan +Date: Mon Sep 19 03:58:04 2016 -0400 + + bnxt_en: Fix ethtool -l|-L inconsistent channel counts. + + The existing code is inconsistent in reporting and accepting the combined + channel count. bnxt_get_channels() reports maximum combined as the + maximum rx count. bnxt_set_channels() accepts combined count that + cannot be bigger than max rx or max tx. + + For example, if max rx = 2 and max tx = 1, we report max supported + combined to be 2. But if the user tries to set combined to 2, it will + fail because 2 is bigger than max tx which is 1. + + Fix the code to be consistent. Max allowed combined = max(max_rx, max_tx). + We will accept a combined channel count <= max(max_rx, max_tx). + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 5ac67d8bc753b122175e682274599338b3ee7d42 +Author: Rob Swindell +Date: Mon Sep 19 03:58:03 2016 -0400 + + bnxt_en: Added support for Secure Firmware Update + + Using Ethtool flashdev command, entire NVM package (*.pkg) files + may now be staged into the "update" area of the NVM and subsequently + verified and installed by the firmware using the newly introduced + command: NVM_INSTALL_UPDATE. + + We also introduce use of the new firmware command FW_SET_TIME so that the + NVM-resident package installation log contains valid time-stamps. + + Signed-off-by: Rob Swindell + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 441cabbbf1bd0b99e283c9116fe430e53ee67a4a +Author: Michael Chan +Date: Mon Sep 19 03:58:02 2016 -0400 + + bnxt_en: Update to firmware interface spec 1.5.1. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit adbc830545003c4b7494c903654bea22e5a66bb4 +Author: Michael Chan +Date: Mon Sep 19 03:58:01 2016 -0400 + + bnxt_en: Simplify PCI device names and add additinal PCI IDs. + + Remove "Single-port/Dual-port" from the device names. Dual-port devices + will appear as 2 separate devices, so no need to call each a dual-port + device. Use a more generic name for VF devices belonging to the same + chip fanmily. Add some remaining NPAR device IDs. + + Signed-off-by: David Christensen + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 8d6be8b627389c6dc7e0ea2455a7542c8a2a16a7 +Author: Michael Chan +Date: Mon Sep 19 03:58:00 2016 -0400 + + bnxt_en: Use RSS flags defined in the bnxt_hsi.h file. + + And remove redundant definitions of the same flags. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 9d13744bb75078175ab49408f2abb980e4dbccc9 +Author: Michael Chan +Date: Mon Sep 5 01:57:35 2016 -0400 + + bnxt_en: Fix TX push operation on ARM64. + + There is a code path where we are calling __iowrite64_copy() on + an address that is not 64-bit aligned. This causes an exception on + some architectures such as arm64. Fix that code path by using + __iowrite32_copy(). + + Reported-by: JD Zheng + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 1f681688aaf1126df981615064a68a0dced458ef +Author: Michael Chan +Date: Mon Jul 25 12:33:37 2016 -0400 + + bnxt_en: Add new NPAR and dual media device IDs. + + Add 5741X/5731X NPAR device IDs and dual media SFP/10GBase-T device IDs. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit a23049091d57f4bdc47f16fce01c371647d15dd7 +Author: Vasundhara Volam +Date: Mon Jul 25 12:33:36 2016 -0400 + + bnxt_en: Log a message, if enabling NTUPLE filtering fails. + + If there are not enough resources to enable ntuple filtering, + log a warning message. + + v2: Use single message and add missing newline. + + Signed-off-by: Vasundhara Volam + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit a54c4d74989b769014b359e5b66f3e571d903d25 +Author: Michael Chan +Date: Mon Jul 25 12:33:35 2016 -0400 + + bnxt_en: Improve ntuple filters by checking destination MAC address. + + Include the destination MAC address in the ntuple filter structure. The + current code assumes that the destination MAC address is always the MAC + address of the NIC. This may not be true if there are macvlans, for + example. Add destination MAC address checking and configure the filter + correctly using the correct index for the destination MAC address. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit cbce91cad4ee39070bf3c7873767194e4be88e16 +Author: Florian Fainelli +Date: Mon Jul 18 13:02:47 2016 -0700 + + bnxt_en: Remove locking around txr->dev_state + + txr->dev_state was not consistently manipulated with the acquisition of + the per-queue lock, after further inspection the lock does not seem + necessary, either the value is read as BNXT_DEV_STATE_CLOSING or 0. + + Reported-by: coverity (CID 1339583) + Fixes: c0c050c58d840 ("bnxt_en: New Broadcom ethernet driver.") + Signed-off-by: Florian Fainelli + Acked-by: Michael Chan + Signed-off-by: David S. Miller + +commit fa853dda19a1878d2a586de19f02bc9fed052425 +Author: Prashant Sreedharan +Date: Mon Jul 18 07:15:25 2016 -0400 + + bnxt_en: Add BCM58700 PCI device ID for NS2 Nitro. + + A bridge device in NS2 has the same device ID as the ethernet controller. + Add check to avoid probing the bridge device. + + Signed-off-by: Prashant Sreedharan + Signed-off-by: Vasundhara Volam + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit dc52c6c70e0066e9cef886907f820411bebe8e07 +Author: Prashant Sreedharan +Date: Mon Jul 18 07:15:24 2016 -0400 + + bnxt_en: Workaround Nitro A0 RX hardware bug (part 4). + + Allocate special vnic for dropping packets not matching the RX filters. + First vnic is for normal RX packets and the driver will drop all + packets on the 2nd vnic. + + Signed-off-by: Prashant Sreedharan + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 10bbdaf5e4879fd7fc51f25c84d7b10de16cbe0e +Author: Prashant Sreedharan +Date: Mon Jul 18 07:15:23 2016 -0400 + + bnxt_en: Workaround Nitro A0 hardware RX bug (part 3). + + Allocate napi for special vnic, packets arriving on this + napi will simply be dropped and the buffers will be replenished back + to the HW. + + Signed-off-by: Prashant Sreedharan + Signed-off-by: Vasundhara Volam + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 765951938e2fe2e30571ef4a7de6a46659ce4c68 +Author: Prashant Sreedharan +Date: Mon Jul 18 07:15:22 2016 -0400 + + bnxt_en: Workaround Nitro A0 hardware RX bug (part 2). + + The hardware is unable to drop rx packets not matching the RX filters. To + workaround it, we create a special VNIC and configure the hardware to + direct all packets not matching the filters to it. We then setup the + driver to drop packets received on this VNIC. + + This patch creates the infrastructure for this VNIC, reserves a + completion ring, and rx rings. Only shared completion ring mode is + supported. The next 2 patches add a NAPI to handle packets from this + VNIC and the setup of the VNIC. + + Signed-off-by: Prashant Sreedharan + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 94ce9caa0f75b0d56e69550e84d7a1653f0ef3b0 +Author: Prashant Sreedharan +Date: Mon Jul 18 07:15:21 2016 -0400 + + bnxt_en: Workaround Nitro A0 hardware RX bug (part 1). + + Nitro A0 has a hardware bug in the rx path. The workaround is to create + a special COS context as a path for non-RSS (non-IP) packets. Without this + workaround, the chip may stall when receiving RSS and non-RSS packets. + + Add infrastructure to allow 2 contexts (RSS and CoS) per VNIC. Allocate + and configure the CoS context for Nitro A0. + + Signed-off-by: Prashant Sreedharan + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 3e8060fa837630f6fb4acbf59ba588c6df5b2f50 +Author: Prashant Sreedharan +Date: Mon Jul 18 07:15:20 2016 -0400 + + bnxt_en: Add basic support for Nitro in North Star 2. + + Nitro is the embedded version of the ethernet controller in the North + Star 2 SoC. Add basic code to recognize the chip ID and disable + the features (ntuple, TPA, ring and port statistics) not supported on + Nitro A0. + + Signed-off-by: Prashant Sreedharan + Signed-off-by: Vasundhara Volam + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit f3ea3119ad75dde0ba3e8da4653dbd5a189688e5 +Author: Colin Ian King +Date: Fri Jul 8 16:42:48 2016 +0100 + + bnxt_en: initialize rc to zero to avoid returning garbage + + rc is not initialized so it can contain garbage if it is not + set by the call to bnxt_read_sfp_module_eeprom_info. Ensure + garbage is not returned by initializing rc to 0. + + Signed-off-by: Colin Ian King + Acked-by: Michael Chan + Signed-off-by: David S. Miller + +commit 09a7636a5b151670072de60767ddf096dc7bd12e +Author: Dan Carpenter +Date: Thu Jul 7 11:23:09 2016 +0300 + + bnxt: fix a condition + + This code generates as static checker warning because htons(ETH_P_IPV6) + is always true. From the context it looks like the && was intended to + be !=. + + Fixes: 94758f8de037 ('bnxt_en: Add GRO logic for BCM5731X chips.') + Signed-off-by: Dan Carpenter + Acked-by: Michael Chan + Signed-off-by: David S. Miller + +commit 51f307856b60e6b10975654e15bc236aa87b53d7 +Author: Michael Chan +Date: Fri Jul 1 18:46:29 2016 -0400 + + bnxt_en: Allow statistics DMA to be configurable using ethtool -C. + + The allowable range is 0.25 seconds to 1 second interval. Default is + 1 second. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 87027db19c30aafb8ff8d98e1c8802bc920f7b32 +Author: Michael Chan +Date: Fri Jul 1 18:46:28 2016 -0400 + + bnxt_en: Assign netdev->dev_port with port ID. + + This is useful for multi-function devices. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 17c71ac38134c3369479e34911b2035a85566caf +Author: Michael Chan +Date: Fri Jul 1 18:46:27 2016 -0400 + + bnxt_en: Allow promiscuous mode for VF if default VLAN is enabled. + + With a default VLAN, the VF has its own VLAN domain and it can receive + all traffic within that domain. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit dc7aadb5133846f738c59da7af3261335af35ad3 +Author: Vasundhara Volam +Date: Fri Jul 1 18:46:26 2016 -0400 + + bnxt_en: Increase maximum supported MTU to 9500. + + Signed-off-by: Vasundhara Volam + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 550feebf5cb075f7576b3cfe9bcf05abc1ffb8cd +Author: Michael Chan +Date: Fri Jul 1 18:46:25 2016 -0400 + + bnxt_en: Enable MRU enables bit when configuring VNIC MRU. + + For correctness, the MRU enables bit must be set when passing the + MRU to firmware during vnic configuration. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 93e0b4feb90cc651f7fbdfe07c257a969c51d1bb +Author: Rob Swindell +Date: Fri Jul 1 18:46:24 2016 -0400 + + bnxt_en: Add support for firmware updates for additional processors. + + Add support to the Ethtool FLASHDEV command handler for additional + firmware types to cover all the on-chip processors. + + Signed-off-by: Rob Swindell + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 08141e0bf4f6cb82d51930e34e6a8e4af46c776f +Author: Rob Swindell +Date: Fri Jul 1 18:46:23 2016 -0400 + + bnxt_en: Request firmware reset after successful firwmare update + + Upon successful mgmt processor firmware update, request a self + reset upon next PCIe reset (e.g. system reboot). + + Signed-off-by: Rob Swindell + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit a4c363471f2fa2b0f0abbd9f0563b034340585c3 +Author: Rob Swindell +Date: Fri Jul 1 18:46:22 2016 -0400 + + bnxt_en: Add support for updating flash more securely + + To support Secure Firmware Update, we must be able to allocate + a staging area in the Flash. This patch adds support for the + "update" type to tell firmware to do that. + + Signed-off-by: Rob Swindell + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 2a5bedfa674cf81d60a20a76f456778834bd2123 +Author: Michael Chan +Date: Fri Jul 1 18:46:21 2016 -0400 + + bnxt_en: Do function reset on the 1st PF open only. + + Calling the firmware to do function reset on the PF will kill all the VFs. + To prevent that, we call function reset on the 1st PF open before any VF + can be activated. On subsequent PF opens (with possibly some active VFs), + a bit has been set and we'll skip the function reset. VF driver will + always do function reset on every open. If there is an AER event, we will + always do function reset. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit a58a3e68037647de78e3461194239a1104f76003 +Author: Michael Chan +Date: Fri Jul 1 18:46:20 2016 -0400 + + bnxt_en: Update firmware spec. to 1.3.0. + + And update driver version to 1.3.0. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 75362a3fd4e37ff8af1ef5e3d9f2d9d5ccf2f3ab +Author: Michael Chan +Date: Fri Jul 1 18:46:19 2016 -0400 + + bnxt_en: VF/NPAR should return -EOPNOTSUPP for unsupported ethtool ops. + + Returning 0 for doing nothing is confusing to the user. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 7cdd5fc376a51cdf191895c23badd699eddbc901 +Author: Alexander Duyck +Date: Thu Jun 16 12:21:36 2016 -0700 + + bnxt: Move GENEVE support from hard-coded port to using port notifier + + The port number for GENEVE is hard coded into the bnxt driver. This is the + kind of thing we want to avoid going forward. For now I will integrate + this back into the port notifier so that we can change the GENEVE port + number if we need to in the future. + + Signed-off-by: Alexander Duyck + Acked-by: Michael Chan + Signed-off-by: David S. Miller + +commit ad51b8e9f9f4f8172eb7a6219d3005861bfb9a57 +Author: Alexander Duyck +Date: Thu Jun 16 12:21:19 2016 -0700 + + bnxt: Update drivers to support unified UDP encapsulation offload functions + + This patch ends up doing several things. First it updates the driver to + make use of the new unified UDP tunnel offload notifier functions. In + addition I updated the code so that we can work around the bits that were + checking for if VXLAN was enabled since we are now using a notifier based + setup. + + Signed-off-by: Alexander Duyck + Acked-by: Michael Chan + Signed-off-by: David S. Miller + +commit 00c04a928572991d30b2473a7e992c1be8e646f3 +Author: Michael Chan +Date: Mon Jun 13 02:25:38 2016 -0400 + + bnxt_en: Support new ETHTOOL_{G|S}LINKSETTINGS API. + + To fully support 25G and 50G link settings. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 93ed8117336485af2cedb069d28f3d4270fb90a1 +Author: Michael Chan +Date: Mon Jun 13 02:25:37 2016 -0400 + + bnxt_en: Don't allow autoneg on cards that don't support it. + + Some cards do not support autoneg. The current code does not prevent the + user from enabling autoneg with ethtool on such cards, causing confusion. + Firmware provides the autoneg capability information and we just need to + store it in the support_auto_speeds field in bnxt_link_info struct. + The ethtool set_settings() call will check this field before proceeding + with autoneg. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit b24eb6ae7058ca1a42b0532489e5f5796c107d65 +Author: Michael Chan +Date: Mon Jun 13 02:25:36 2016 -0400 + + bnxt_en: Add BCM5731X and BCM5741X device IDs. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 94758f8de037cf5c62eb56287f5d5e937cda8c9b +Author: Michael Chan +Date: Mon Jun 13 02:25:35 2016 -0400 + + bnxt_en: Add GRO logic for BCM5731X chips. + + Add bnxt_gro_func_5731x() to handle GRO packets for this chip. The + completion structures used in the new chip have new data to help determine + the header offsets. The offsets can be off by 4 if the packet is an + internal loopback packet (e.g. from one VF to another VF). Some additional + logic is added to adjust the offsets if it is a loopback packet. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 309369c9b3f6a8665e581d9014f222b602f6845a +Author: Michael Chan +Date: Mon Jun 13 02:25:34 2016 -0400 + + bnxt_en: Refactor bnxt_gro_skb(). + + Newer chips require different logic to handle GRO packets. So refactor + the code so that we can call different functions depending on the chip. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 659c805cc01b3c5a6d972db0408164371a2bab4b +Author: Michael Chan +Date: Mon Jun 13 02:25:33 2016 -0400 + + bnxt_en: Define the supported chip numbers. + + Define all the supported chip numbers and chip categories. Store the + chip_num returned by firmware. If the call to get the version and chip + number fails, we should abort. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit ebcd4eeb2a0b4859d7aaa3308b222a30d51a643f +Author: Michael Chan +Date: Mon Jun 13 02:25:32 2016 -0400 + + bnxt_en: Add PCI device ID for 57404 NPAR devices. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 567b2abe68551781b725b3b739672da41cb92ef0 +Author: Satish Baddipadige +Date: Mon Jun 13 02:25:31 2016 -0400 + + bnxt_en: Enable NPAR (NIC Partitioning) Support. + + NPAR type is read from bnxt_hwrm_func_qcfg. Do not allow changing link + parameters if in NPAR mode sinc ethe port is shared among multiple + partitions. The link parameters are set up by firmware. + + Signed-off-by: Satish Baddipadige + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit fc0f19294d1ffaf9366b10d966f86e6cf13335a4 +Author: Michael Chan +Date: Mon Jun 13 02:25:30 2016 -0400 + + bnxt_en: Handle VF_CFG_CHANGE event from firmware. + + When the VF driver gets this event, the VF configuration has changed (such + as default VLAN). The VF driver will initiate a silent reset to pick up + the new configuration. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 6988bd920c6ea53497ed15db947408b7488c9e36 +Author: Michael Chan +Date: Mon Jun 13 02:25:29 2016 -0400 + + bnxt_en: Add new function bnxt_reset(). + + When a default VLAN is added to the VF, the VF driver needs to reset to + pick up the default VLAN ID. We can use the same tx timeout reset logic + to do that, without the debug output. This new function, with the + silent parameter to suppress debug output will now serve both purposes. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit cf6645f8ebc69775a857b7c51928f3ad9e37aa66 +Author: Michael Chan +Date: Mon Jun 13 02:25:28 2016 -0400 + + bnxt_en: Add function for VF driver to query default VLAN. + + The PF can setup a default VLAN for a VF. The default VLAN tag is + automatically inserted and stripped without the knowledge of the + stack running on the VF. The VF driver needs to know that default + VLAN is enabled as VLAN acceleration on the RX side is no longer + supported. Call netdev_update_features() to fix up the VLAN features + as necessary. Also, VLAN strip mode must be enabled to strip out + the default VLAN tag. + + Only allow VF default VLAN to be set if the firmware spec is >= 1.2.1. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 8852ddb4dcdfe6f877a02f79bf2bca9ae63c039a +Author: Michael Chan +Date: Mon Jun 6 02:37:16 2016 -0400 + + bnxt_en: Simplify VLAN receive logic. + + Since both CTAG and STAG rx acceleration must be enabled together, we + only need to check one feature flag (NETIF_F_HW_VLAN_CTAG_RX) before + calling __vlan_hwaccel_put_tag(). + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 5a9f6b238e59bc05afb4cdeaf3672990bf2a5309 +Author: Michael Chan +Date: Mon Jun 6 02:37:15 2016 -0400 + + bnxt_en: Enable and disable RX CTAG and RX STAG VLAN acceleration together. + + The hardware can only be set to strip or not strip both the VLAN CTAG and + STAG. It cannot strip one and not strip the other. Add logic to + bnxt_fix_features() to toggle both feature flags when the user is toggling + one of them. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit b9a8460a08a1e0150073cda3e7a0dd23cb888052 +Author: Michael Chan +Date: Mon Jun 6 02:37:14 2016 -0400 + + bnxt_en: Fix tx push race condition. + + Set the is_push flag in the software BD before the tx data is pushed to + the chip. It is possible to get the tx interrupt as soon as the tx data + is pushed. The tx handler will not handle the event properly if the + is_push flag is not set and it will crash. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 7e13318daa4a67bff2f800923a993ef3818b3c53 +Author: Tom Herbert +Date: Wed May 18 09:06:10 2016 -0700 + + net: define gso types for IPx over IPv4 and IPv6 + + This patch defines two new GSO definitions SKB_GSO_IPXIP4 and + SKB_GSO_IPXIP6 along with corresponding NETIF_F_GSO_IPXIP4 and + NETIF_F_GSO_IPXIP6. These are used to described IP in IP + tunnel and what the outer protocol is. The inner protocol + can be deduced from other GSO types (e.g. SKB_GSO_TCPV4 and + SKB_GSO_TCPV6). The GSO types of SKB_GSO_IPIP and SKB_GSO_SIT + are removed (these are both instances of SKB_GSO_IPXIP4). + SKB_GSO_IPXIP6 will be used when support for GSO with IP + encapsulation over IPv6 is added. + + Signed-off-by: Tom Herbert + Acked-by: Jeff Kirsher + Signed-off-by: David S. Miller + +commit b67daab033293b3882ba4dc926ffb084d70044e0 +Author: Michael Chan +Date: Sun May 15 03:04:51 2016 -0400 + + bnxt_en: Use dma_rmb() instead of rmb(). + + Use the weaker but more appropriate dma_rmb() to order the reading of + the completion ring. + + Suggested-by: Ajit Khaparde + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 5049e33b559a44e9f216d86c58c7c7fce6f5df2f +Author: Michael Chan +Date: Sun May 15 03:04:50 2016 -0400 + + bnxt_en: Add BCM57314 device ID. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 10289bec0072b13f629a654d94faf1dadd44f335 +Author: Michael Chan +Date: Sun May 15 03:04:49 2016 -0400 + + bnxt_en: Simplify and improve unsupported SFP+ module reporting. + + The current code is more complicated than necessary and can only report + unsupported SFP+ module if it is plugged in after the device is up. + + Rename bnxt_port_module_event() to bnxt_get_port_module_status(). We + already have the current module_status in the link_info structure, so + just check that and report any unsupported SFP+ module status. Delete + the unnecessary last_port_module_event. Call this function at the + end of bnxt_open to report unsupported module already plugged in. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 8578d6c19a308dea3daf3d03acdf18724ec05590 +Author: Michael Chan +Date: Sun May 15 03:04:48 2016 -0400 + + bnxt_en: Fix length value in dmesg log firmware error message. + + The len value in the hwrm error message is wrong. Use the properly adjusted + value in the variable len. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit a11fa2be6d1564375dc57530680268ad569c2632 +Author: Michael Chan +Date: Sun May 15 03:04:47 2016 -0400 + + bnxt_en: Improve the delay logic for firmware response. + + The current code has 2 problems: + + 1. The maximum wait time is not long enough. It is about 60% of the + duration specified by the firmware. It is calling usleep_range(600, 800) + for every 1 msec we are supposed to wait. + + 2. The granularity of the delay is too coarse. Many simple firmware + commands finish in 25 usec or less. + + We fix these 2 issues by multiplying the original 1 msec loop counter by + 40 and calling usleep_range(25, 40) for each iteration. + + There is also a second delay loop to wait for the last DMA word to + complete. This delay loop should be a very short 5 usec wait. + + This change results in much faster bring-up/down time: + + Before the patch: + + time ip link set p4p1 up + + real 0m0.120s + user 0m0.001s + sys 0m0.009s + + After the patch: + + time ip link set p4p1 up + + real 0m0.030s + user 0m0.000s + sys 0m0.010s + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit d0a42d6fc8eaf1b64f62b0bbc3b829b756eacf57 +Author: Michael Chan +Date: Sun May 15 03:04:46 2016 -0400 + + bnxt_en: Reduce maximum ring pages if page size is 64K. + + The chip supports 4K/8K/64K page sizes for the rings and we try to + match it to the CPU PAGE_SIZE. The current page size limits for the rings + are based on 4K/8K page size. If the page size is 64K, these limits are + too large. Reduce them appropriately. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 90c4f788f6c08aaa52edbb47a817403376523375 +Author: Ajit Khaparde +Date: Sun May 15 03:04:45 2016 -0400 + + bnxt_en: Report PCIe link speed and width during driver load + + Add code to log a message during driver load indicating PCIe link + speed and width. + + The log message will look like this: + bnxt_en 0000:86:00.0 eth0: PCIe: Speed 8.0GT/s Width x8 + + Signed-off-by: Ajit Khaparde + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 42ee18fe4ca2a12b8370bb1c53fa6b9f9300c70c +Author: Ajit Khaparde +Date: Sun May 15 03:04:44 2016 -0400 + + bnxt_en: Add Support for ETHTOOL_GMODULEINFO and ETHTOOL_GMODULEEEPRO + + Add support to fetch the SFP EEPROM settings from the firmware + and display it via the ethtool -m command. We support SFP+ and QSFP + modules. + + v2: Fixed a bug in bnxt_get_module_eeprom() found by Ben Hutchings. + + Signed-off-by: Ajit Khaparde + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 18d6e4e2d800cbd44a7d7d215a49f99c6508e4a5 +Author: Satish Baddipadige +Date: Sun May 15 03:04:43 2016 -0400 + + bnxt_en: Fix invalid max channel parameter in ethtool -l. + + When there is only 1 MSI-X vector or in INTA mode, tx and rx pre-set + max channel parameters are shown incorrectly in ethtool -l. With only 1 + vector, bnxt_get_max_rings() will return -ENOMEM. bnxt_get_channels + should check this return value, and set max_rx/max_tx to 0 if it is + non-zero. + + Signed-off-by: Satish Baddipadige + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit fa7e28127a5ad9fd55ac9c7707d8c8b835113a7c +Author: Michael Chan +Date: Tue May 10 19:18:00 2016 -0400 + + bnxt_en: Add workaround to detect bad opaque in rx completion (part 2) + + Add detection and recovery code when the hardware returned opaque value + does not match the expected consumer index. Once the issue is detected, + we skip the processing of all RX and LRO/GRO packets. These completion + entries are discarded without sending the SKB to the stack and without + producing new buffers. The function will be reset from a workqueue. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 376a5b8647d6c56cb8f104d7ad0390b4f4057e70 +Author: Michael Chan +Date: Tue May 10 19:17:59 2016 -0400 + + bnxt_en: Add workaround to detect bad opaque in rx completion (part 1) + + There is a rare hardware bug that can cause a bad opaque value in the RX + or TPA completion. When this happens, the hardware may have used the + same buffer twice for 2 rx packets. In addition, the driver will also + crash later using the bad opaque as the index into the ring. + + The rx opaque value is predictable and is always monotonically increasing. + The workaround is to keep track of the expected next opaque value and + compare it with the one returned by hardware during RX and TPA start + completions. If they miscompare, we will not process any more RX and + TPA completions and exit NAPI. We will then schedule a workqueue to + reset the function. + + This patch adds the logic to keep track of the next rx consumer index. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 7d2837dd7a3239e8201d9bef75c1a708e451e123 +Author: Michael Chan +Date: Wed May 4 16:56:44 2016 -0400 + + bnxt_en: Setup multicast properly after resetting device. + + The multicast/all-multicast internal flags are not properly restored + after device reset. This could lead to unreliable multicast operations + after an ethtool configuration change for example. + + Call bnxt_mc_list_updated() and setup the vnic->mask in bnxt_init_chip() + to fix the issue. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 67a95e2022c7f0405408fb1f910283785ece354a +Author: Michael Chan +Date: Wed May 4 16:56:43 2016 -0400 + + bnxt_en: Need memory barrier when processing the completion ring. + + The code determines if the next ring entry is valid before proceeding + further to read the rest of the entry. The CPU can re-order and read + the rest of the entry first, possibly reading a stale entry, if DMA + of a new entry happens right after reading it. This issue can be + readily seen on a ppc64 system, causing it to crash. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 152971ee75fddbc43fb6cf7e3ada96c1324df2af +Author: Alexander Duyck +Date: Mon May 2 09:38:55 2016 -0700 + + bnxt: Add support for segmentation of tunnels with outer checksums + + This patch assumes that the bnxt hardware will ignore existing IPv4/v6 + header fields for length and checksum as well as the length and checksum + fields for outer UDP and GRE headers. + + I have been told by Michael Chan that this is working. Though this might + be somewhat redundant for IPv6 as they are forcing the checksum to be + computed for all IPv6 frames that are offloaded. A follow-up patch may be + necessary in order to fix this as it is essentially mangling the outer IPv6 + headers to add a checksum where none was requested. + + Signed-off-by: Alexander Duyck + Signed-off-by: David S. Miller + +commit 89d0a06c516339c0a2b3d02677f5d6310b3319fb +Author: Michael Chan +Date: Mon Apr 25 02:30:51 2016 -0400 + + bnxt_en: Divide a page into 32K buffers for the aggregation ring if necessary. + + If PAGE_SIZE is bigger than BNXT_RX_PAGE_SIZE, that means the native CPU + page is bigger than the maximum length of the RX BD. Divide the page + into multiple 32K buffers for the aggregation ring. + + Add an offset field in the bnxt_sw_rx_agg_bd struct to keep track of the + page offset of each buffer. Since each page can be referenced by multiple + buffer entries, call get_page() as needed to get the proper reference + count. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 2839f28bd5bf8fd2ab4a1ea3a5589c8f94364cbb +Author: Michael Chan +Date: Mon Apr 25 02:30:50 2016 -0400 + + bnxt_en: Limit RX BD pages to be no bigger than 32K. + + The RX BD length field of this device is 16-bit, so the largest buffer + size is 65535. For LRO and GRO, we allocate native CPU pages for the + aggregation ring buffers. It won't work if the native CPU page size is + 64K or bigger. + + We fix this by defining BNXT_RX_PAGE_SIZE to be native CPU page size + up to 32K. Replace PAGE_SIZE with BNXT_RX_PAGE_SIZE in all appropriate + places related to the rx aggregation ring logic. + + The next patch will add additional logic to divide the page into 32K + chunks for aggrgation ring buffers if PAGE_SIZE is bigger than + BNXT_RX_PAGE_SIZE. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 1fa72e29e14d97fbda15437c648d7cc4eb00bff8 +Author: Michael Chan +Date: Mon Apr 25 02:30:49 2016 -0400 + + bnxt_en: Don't fallback to INTA on VF. + + Only MSI-X can be used on a VF. The driver should fail initialization + if it cannot successfully enable MSI-X. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 8cbde1175e3c8565edbb777cd09cbfdb93c78397 +Author: Michael Chan +Date: Mon Apr 11 04:11:14 2016 -0400 + + bnxt_en: Add async event handling for speed config changes. + + On some dual port cards, link speeds on both ports have to be compatible. + Firmware will inform the driver when a certain speed is no longer + supported if the other port has linked up at a certain speed. Add + logic to handle this event by logging a message and getting the + updated list of supported speeds. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 84c33dd342ad596a271a61da0119bf34e80bb1c5 +Author: Michael Chan +Date: Mon Apr 11 04:11:13 2016 -0400 + + bnxt_en: Call firmware to approve VF MAC address change. + + Some hypervisors (e.g. ESX) require the VF MAC address to be forwarded to + the PF for approval. In Linux PF, the call is not forwarded and the + firmware will simply check and approve the MAC address if the PF has not + previously administered a valid MAC address for this VF. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 33f7d55f07ab964055d73d38774346f8d4821f00 +Author: Michael Chan +Date: Mon Apr 11 04:11:12 2016 -0400 + + bnxt_en: Shutdown link when device is closed. + + Let firmware know that the driver is giving up control of the link so that + it can be shutdown if no management firmware is running. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 03efbec03198a0f505c2a6c93268c3c5df321c90 +Author: Michael Chan +Date: Mon Apr 11 04:11:11 2016 -0400 + + bnxt_en: Disallow forced speed for 10GBaseT devices. + + 10GBaseT devices must autonegotiate to determine master/slave clocking. + Disallow forced speed in ethtool .set_settings() for these devices. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 29c262fed4067c52977ba279cf71520f9991a050 +Author: Michael Chan +Date: Tue Apr 5 14:09:03 2016 -0400 + + bnxt_en: Improve ethtool .get_settings(). + + If autoneg is off, we should always report the speed and duplex settings + even if it is link down so the user knows the current settings. The + unknown speed and duplex should only be used for autoneg when link is + down. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 9d9cee08fc9f5c4df84ef314158fd19c013bcec6 +Author: Michael Chan +Date: Tue Apr 5 14:09:02 2016 -0400 + + bnxt_en: Check for valid forced speed during ethtool -s. + + Check that the forced speed is a valid speed supported by firmware. + If not supported, return -EINVAL. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 4bb13abf208cb484a9b9d1af9233b0ef850c2fe7 +Author: Michael Chan +Date: Tue Apr 5 14:09:01 2016 -0400 + + bnxt_en: Add unsupported SFP+ module warnings. + + Add the PORT_CONN_NOT_ALLOWED async event handling logic. The driver + will print an appropriate warning to reflect the SFP+ module enforcement + policy done in the firmware. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 25be862370031056989ee76e3c48c3ac8ff67fd4 +Author: Michael Chan +Date: Tue Apr 5 14:09:00 2016 -0400 + + bnxt_en: Set async event bits when registering with the firmware. + + Currently, the driver only sets bit 0 of the async_event_fwd fields. + To be compatible with the latest spec, we need to set the + appropriate event bits handled by the driver. We should be handling + link change and PF driver unload events, so these 2 bits should be + set. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 72b34f04e0b00956dd679ae18bf2163669df8b56 +Author: Michael Chan +Date: Tue Apr 5 14:08:59 2016 -0400 + + bnxt_en: Add get_eee() and set_eee() ethtool support. + + Allow users to get|set EEE parameters. + + v2: Added comment for preserving the tx_lpi_timer value in get_eee. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 939f7f0ca442187db2a4ec7a40979c711b0c939e +Author: Michael Chan +Date: Tue Apr 5 14:08:58 2016 -0400 + + bnxt_en: Add EEE setup code. + + 1. Add bnxt_hwrm_set_eee() function to setup EEE firmware parameters based + on the bp->eee settings. + 2. The new function bnxt_eee_config_ok() will check if EEE parameters need + to be modified due to autoneg changes. + 3. bnxt_hwrm_set_link() has added a new parameter to update EEE. If the + parameter is set, it will call bnxt_hwrm_set_eee(). + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 170ce01301a2a1a87808765531d938fa0b023641 +Author: Michael Chan +Date: Tue Apr 5 14:08:57 2016 -0400 + + bnxt_en: Add basic EEE support. + + Get EEE capability and the initial EEE settings from firmware. + Add "EEE is active | not active" to link up dmesg. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit c9ee9516c161da2d072e035907aa35a35dfa68a8 +Author: Michael Chan +Date: Tue Apr 5 14:08:56 2016 -0400 + + bnxt_en: Improve flow control autoneg with Firmware 1.2.1 interface. + + Make use of the new AUTONEG_PAUSE bit in the new interface to better + control autoneg flow control settings, independent of RX and TX + advertisement settings. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 11f15ed394782dd018d60a0bb550616a8571b43c +Author: Michael Chan +Date: Tue Apr 5 14:08:55 2016 -0400 + + bnxt_en: Update to Firmware 1.2.2 spec. + + Use new field names in API structs and stop using deprecated fields + auto_link_speed and auto_duplex in phy_cfg/phy_qcfg structs. + + Update copyright year to 2016. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 3c02d1bb32347d0674714ee170772d771d513469 +Author: Michael Chan +Date: Mon Mar 28 19:46:07 2016 -0400 + + bnxt_en: Fix ethtool -a reporting. + + To report flow control tx/rx settings accurately regardless of autoneg + setting, we should use link_info->req_flow_ctrl. Before this patch, + the reported settings were only correct when autoneg was on. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 49b5c7a125201bb42c25831fda3a50305c29ef50 +Author: Michael Chan +Date: Mon Mar 28 19:46:06 2016 -0400 + + bnxt_en: Fix typo in bnxt_hwrm_set_pause_common(). + + The typo caused the wrong flow control bit to be set. + + Reported by: Ajit Khaparde + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit e6ef26991a46e20879bebb8298080eb7ceed4ae8 +Author: Michael Chan +Date: Mon Mar 28 19:46:05 2016 -0400 + + bnxt_en: Implement proper firmware message padding. + + The size of every padded firmware message is specified in the first + HWRM_VER_GET response message. Use this value to pad every message + after that. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 33e52d888d0c84a0c66f13357a53113fd9710bd6 +Author: Prashant Sreedharan +Date: Mon Mar 28 19:46:04 2016 -0400 + + bnxt_en: Initialize CP doorbell value before ring allocation + + The existing code does the following: + allocate completion ring + initialize completion ring doorbell + disable interrupts on this completion ring by writing to the doorbell + + We can have a race where firmware sends an asynchronous event to the host + after completion ring allocation and before doorbell is initialized. + When this happens driver can crash while ringing the doorbell using + uninitialized value as part of handling the IRQ/napi request. + + Signed-off-by: Prashant Sreedharan + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 6316ea6db93d875df333e7ab205bf1aa3b3616d7 +Author: Satish Baddipadige +Date: Mon Mar 7 15:38:48 2016 -0500 + + bnxt_en: Enable AER support. + + Add pci_error_handler callbacks to support for pcie advanced error + recovery. + + Signed-off-by: Satish Baddipadige + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 8ddc9aaa725a9337fc7bbe95fe1d1499769fb9b2 +Author: Michael Chan +Date: Mon Mar 7 15:38:47 2016 -0500 + + bnxt_en: Include hardware port statistics in ethtool -S. + + Include the more useful port statistics in ethtool -S for the PF device. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 9947f83fb79ca501f5ab24c370211bfb78b6b364 +Author: Michael Chan +Date: Mon Mar 7 15:38:46 2016 -0500 + + bnxt_en: Include some hardware port statistics in ndo_get_stats64(). + + Include some of the port error counters (e.g. crc) in ->ndo_get_stats64() + for the PF device. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 3bdf56c47dfcd819ab1e73644c2eb9c72c08f29e +Author: Michael Chan +Date: Mon Mar 7 15:38:45 2016 -0500 + + bnxt_en: Add port statistics support. + + Gather periodic port statistics if the device is PF and link is up. This + is triggered in bnxt_timer() every one second to request firmware to DMA + the counters. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit f1a082a6f79fd5f06b27ef05a5ba7ec8d6e83b4c +Author: Michael Chan +Date: Mon Mar 7 15:38:44 2016 -0500 + + bnxt_en: Extend autoneg to all speeds. + + Allow all autoneg speeds aupported by firmware to be advertised. If + the advertising parameter is 0, then all supported speeds will be + advertised. + + Remove BNXT_ALL_COPPER_ETHTOOL_SPEED which is no longer used as all + supported speeds can be advertised. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 4b32cacca28fe8b29bf266feff19b6fc2180402e +Author: Michael Chan +Date: Mon Mar 7 15:38:43 2016 -0500 + + bnxt_en: Use common function to get ethtool supported flags. + + The supported bits and advertising bits in ethtool have the same + definitions. The same is true for the firmware bits. So use the + common function to handle the conversion for both supported and + advertising bits. + + v2: Don't use parentheses on function return. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 3277360eb29c6e482391975717d983060ecbd28d +Author: Michael Chan +Date: Mon Mar 7 15:38:42 2016 -0500 + + bnxt_en: Add reporting of link partner advertisement. + + And report actual pause settings to ETHTOOL_GPAUSEPARAM to let ethtool + resolve the actual pause settings. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 27c4d578600c401c119c012a90920805fab05cc9 +Author: Michael Chan +Date: Mon Mar 7 15:38:41 2016 -0500 + + bnxt_en: Refactor bnxt_fw_to_ethtool_advertised_spds(). + + Include the conversion of pause bits and add one extra call layer so + that the same refactored function can be reused to get the link partner + advertisement bits. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 90e209213096110bce06ef580e1c73702fe4a288 +Author: Michael Chan +Date: Fri Feb 26 04:00:08 2016 -0500 + + bnxt_en: Add hwrm_send_message_silent(). + + This is used to send NVM_FIND_DIR_ENTRY messages which can return error + if the entry is not found. This is normal and the error message will + cause unnecessary alarm, so silence it. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit fbfbc4851dd709cf1327afc283f9cca00235dcb3 +Author: Michael Chan +Date: Fri Feb 26 04:00:07 2016 -0500 + + bnxt_en: Refactor _hwrm_send_message(). + + Add a new function bnxt_do_send_msg() to do essentially the same thing + with an additional paramter to silence error response messages. All + current callers will set silent to false. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 3ebf6f0a09a284adef62111c7cfca29f56d6cce7 +Author: Rob Swindell +Date: Fri Feb 26 04:00:06 2016 -0500 + + bnxt_en: Add installed-package firmware version reporting via Ethtool GDRVINFO + + For everything to fit, we remove the PHY microcode version and replace it + with the firmware package version in the fw_version string. + + Signed-off-by: Rob Swindell + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit a8643e1604c1f39a675c6b10a7f84260fa13590c +Author: Michael Chan +Date: Fri Feb 26 04:00:05 2016 -0500 + + bnxt_en: Fix dmesg log firmware error messages. + + Use appropriate firmware request header structure to prepare the + firmware messages. This avoids the unnecessary conversion of the + fields to 32-bit fields. Add appropriate endian conversion when + printing out the message fields in dmesg so that they appear correct + in the log. + + Reported-by: Rob Swindell + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit ff4fe81d2d49e3cad3bb45c8c5b9a49ca90ee10b +Author: Michael Chan +Date: Fri Feb 26 04:00:04 2016 -0500 + + bnxt_en: Use firmware provided message timeout value. + + Before this patch, we used a hardcoded value of 500 msec as the default + value for firmware message response timeout. For better portability with + future hardware or debug platforms, use the value provided by firmware in + the first response and store it for all susequent messages. Redefine the + macro HWRM_CMD_TIMEOUT to the stored value. Since we don't have the + value yet in the first message, use the 500 ms default if the stored value + is zero. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit dfc9c94a83909f4be80e5d0c67e79793830aa312 +Author: Michael Chan +Date: Fri Feb 26 04:00:03 2016 -0500 + + bnxt_en: Add coalescing support for tx rings. + + When tx and rx rings don't share the same completion ring, tx coalescing + parameters can be set differently from the rx coalescing parameters. + Otherwise, use rx coalescing parameters on shared completion rings. + + Adjust rx coalescing default values to lower interrupt rate. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit bb053f52a54d66a6057c2220458349f7d39ce0d2 +Author: Michael Chan +Date: Fri Feb 26 04:00:02 2016 -0500 + + bnxt_en: Refactor bnxt_hwrm_set_coal(). + + Add a function to set all the coalescing parameters. The function can + be used later to set both rx and tx coalescing parameters. + + v2: Fixed function parameters formatting requested by DaveM. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit dfb5b894f87cb78168e04283e8d15626dc3e6d5a +Author: Michael Chan +Date: Fri Feb 26 04:00:01 2016 -0500 + + bnxt_en: Store irq coalescing timer values in micro seconds. + + Don't convert these to internal hardware tick values before storing + them. This avoids the confusion of ethtool -c returning slightly + different values than the ones set using ethtool -C when we convert + hardware tick values back to micro seconds. Add better comments for + the hardware settings. + + Also, rename the current set of coalescing fields with rx_ prefix. + The next patch will add support of tx coalescing values. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 19241368443ff976b1924019d29eef8e972158e7 +Author: Jeffrey Huang +Date: Fri Feb 26 04:00:00 2016 -0500 + + bnxt_en: Send PF driver unload notification to all VFs. + + During remove_one() when SRIOV is enabled, the PF driver + should broadcast PF driver unload notification to all + VFs that are attached to VMs. Upon receiving the PF + driver unload notification, the VF driver should print + a warning message to message log. Certain operations on the + VF may not succeed after the PF has unloaded. + + Signed-off-by: Jeffrey Huang + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 3874d6a8b61966a77aa743b4160ba96bf3081ce5 +Author: Jeffrey Huang +Date: Fri Feb 26 03:59:59 2016 -0500 + + bnxt_en: Improve bnxt_vf_update_mac(). + + Allow the VF to setup its own MAC address if the PF has not administratively + set it for the VF. To do that, we should always store the MAC address + from the firmware. There are 2 cases: + + 1. The MAC address is valid. This MAC address is assigned by the PF and + it needs to override the current VF MAC address. + + 2. The MAC address is zero. The VF will use a random MAC address by default. + By storing this 0 MAC address in the VF structure, it will allow the VF + user to change the MAC address later using ndo_set_mac_address() when + it sees that the stored MAC address is 0. + + v2: Expanded descriptions and added more comments. + + Signed-off-by: Jeffrey Huang + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit fbb0fa8b48892a3db8f5b89fb591c741fbd2fe7a +Author: Michael Chan +Date: Mon Feb 22 02:10:26 2016 -0500 + + bnxt_en: Fix zero padding of tx push data. + + The arithmetic to zero pad the last 64-bit word in the push buffer is not + correct. + + 1. It should be pdata + length to get to the end. + 2. 'pdata' is void pointer and passing it to PTR_ALIGN() will cast the + aligned pointer to void. Pass 'end' which is u64 pointer to PTR_ALIGN() + instead so that the aligned pointer - 1 is the last 64-bit pointer to data. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit ba41d46fe03223279054e58d570069fdc62fb768 +Author: Michael Chan +Date: Fri Feb 19 19:43:21 2016 -0500 + + bnxt_en: Failure to update PHY is not fatal condition. + + If we fail to update the PHY, we should print a warning and continue. + The current code to exit is buggy as it has not freed up the NIC + resources yet. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit de73018fb5474b33dc4f6d6b8d889e40232e325b +Author: Michael Chan +Date: Fri Feb 19 19:43:20 2016 -0500 + + bnxt_en: Remove unnecessary call to update PHY settings. + + Fix bnxt_update_phy_setting() to check the correct parameters when + determining whether to update the PHY. Requested line speed/duplex should + only be checked for forced speed mode. This avoids unnecessary link + interruptions when loading the driver. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 035a1539ab63bfdb284bdf6e8459e35897c60564 +Author: Michael Chan +Date: Fri Feb 19 19:43:19 2016 -0500 + + bnxt_en: Poll link at the end of __bnxt_open_nic(). + + When shutting down the NIC, we shutdown async event processing before + freeing all the rings. If there is a link change event during reset, the + driver may miss it and the link state may be incorrect after the NIC is + re-opened. Poll the link at the end of __bnxt_open_nic() to get the + correct link status. + + Signed-off-by Michael Chan + + Signed-off-by: David S. Miller + +commit 51dd55b5688e81f9f13fb520a59900d4c3959a9a +Author: Michael Chan +Date: Wed Feb 10 17:33:50 2016 -0500 + + bnxt_en: Reduce default ring sizes. + + The current default tx ring size of 512 causes an extra page to be + allocated for the tx ring with only 1 entry in it. Reduce it to + 511. The default rx ring size is also reduced to 511 to use less + memory by default. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 4419dbe6a0f031ddb2df4cd993805546a566d20e +Author: Michael Chan +Date: Wed Feb 10 17:33:49 2016 -0500 + + bnxt_en: Fix implementation of tx push operation. + + tx push is supported for small packets to reduce DMA latency. The + following bugs are fixed in this patch: + + 1. Fix the definition of the push BD which is different from the DMA BD. + 2. The push buffer has to be zero padded to the next 64-bit word boundary + or tx checksum won't be correct. + 3. Increase the tx push packet threshold to 164 bytes (192 bytes with the BD) + so that small tunneled packets are within the threshold. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 1c49c421f3ec446f1e0eda6d965a6cb23214d7a1 +Author: Michael Chan +Date: Wed Feb 10 17:33:48 2016 -0500 + + bnxt_en: Remove 20G support and advertise only 40GbaseCR4. + + 20G is not supported by production hardware and only the 40GbaseCR4 standard + is supported. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 0d8abf020199b0cbc5fb3aa309d36f0ac1b91631 +Author: Michael Chan +Date: Wed Feb 10 17:33:47 2016 -0500 + + bnxt_en: Cleanup and Fix flow control setup logic + + Cleanup bnxt_probe_phy() to cleanly separate 2 code blocks for autoneg + on and off. Autoneg flow control is possible only if autoneg is enabled. + + In bnxt_get_settings(), Pause and Asym_Pause are always supported. + Only the advertisement bits change depending on the ethtool -A setting + in auto mode. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit b763499ee16b74707af0fb26ab0a26bd9719870b +Author: Michael Chan +Date: Wed Feb 10 17:33:46 2016 -0500 + + bnxt_en: Fix ethtool autoneg logic. + + 1. Determine autoneg on|off setting from link_info->autoneg. Using the + firmware returned setting can be misleading if autoneg is changed and + there hasn't been a phy update from the firmware. + + 2. If autoneg is disabled, link_info->autoneg should be set to 0 to + indicate both speed and flow control autoneg are disabled. + + 3. To enable autoneg flow control, speed autoneg must be enabled. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit d612a579771385e08f7b665063b36bfa52c03ea3 +Author: Michael Chan +Date: Thu Jan 28 03:11:22 2016 -0500 + + bnxt_en: Fix crash in bnxt_free_tx_skbs() during tx timeout. + + The ring index j is not wrapped properly at the end of the ring, causing + it to reference pointers past the end of the ring. For proper loop + termination and to access the ring properly, we need to increment j and + mask it before referencing the ring entry. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 8a4d4c8dde7a4119bce3fd8287dca193ff6356da +Author: Michael Chan +Date: Thu Jan 28 03:11:21 2016 -0500 + + bnxt_en: Exclude rx_drop_pkts hw counter from the stack's rx_dropped counter. + + This hardware counter is misleading as it counts dropped packets that + don't match the hardware filters for unicast/broadcast/multicast. We + will still report this counter in ethtool -S for diagnostics purposes. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 74608fc98d2856fa7201a498b61c9dd9455b504a +Author: Prashant Sreedharan +Date: Thu Jan 28 03:11:20 2016 -0500 + + bnxt_en: Ring free response from close path should use completion ring + + Use completion ring for ring free response from firmware. The response + will be the last entry in the ring and we can free the ring after getting + the response. This will guarantee no spurious DMA to freed memory. + + Signed-off-by: Prashant Sreedharan + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 415b6f19e87e350b13585591859d4fdf50772229 +Author: Arnd Bergmann +Date: Tue Jan 12 16:05:08 2016 +0100 + + net: bnxt: always return values from _bnxt_get_max_rings + + Newly added code in the bnxt driver uses a couple of variables that + are never initialized when CONFIG_BNXT_SRIOV is not set, and gcc + correctly warns about that: + + In file included from include/linux/list.h:8:0, + from include/linux/module.h:9, + from drivers/net/ethernet/broadcom/bnxt/bnxt.c:10: + drivers/net/ethernet/broadcom/bnxt/bnxt.c: In function 'bnxt_get_max_rings': + include/linux/kernel.h:794:26: warning: 'cp' may be used uninitialized in this function [-Wmaybe-uninitialized] + include/linux/kernel.h:794:26: warning: 'tx' may be used uninitialized in this function [-Wmaybe-uninitialized] + drivers/net/ethernet/broadcom/bnxt/bnxt.c:5730:11: warning: 'rx' may be used uninitialized in this function [-Wmaybe-uninitialized] + drivers/net/ethernet/broadcom/bnxt/bnxt.c:5736:6: note: 'rx' was declared here + + This changes the condition so that we fall back to using the PF + data if VF is not available, and always initialize the variables + to something useful. + + Signed-off-by: Arnd Bergmann + Fixes: 6e6c5a57fbe1 ("bnxt_en: Modify bnxt_get_max_rings() to support shared or non shared rings.") + Acked-by: Michael Chan + Signed-off-by: David S. Miller + +commit d2d6318cb996f39112ba24ff23abe67578a611bc +Author: Rob Swindell +Date: Thu Jan 7 19:56:58 2016 -0500 + + bnxt_en: Reset embedded processor after applying firmware upgrade + + Use HWRM_FW_RESET command to request a self-reset of the embedded + processor(s) after successfully applying a firmware update. For boot + processor, the self-reset is currently deferred until the next PCIe reset. + + Signed-off-by: Rob Swindell + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit d79979a103f7820d3107cdc04096e87b37f90008 +Author: Michael Chan +Date: Thu Jan 7 19:56:57 2016 -0500 + + bnxt_en: Zero pad firmware messages to 128 bytes. + + For future compatibility, zero pad all messages that the driver sends + to the firmware to 128 bytes. If these messages are extended in the + future with new byte enables, zero padding these messages now will + guarantee future compatibility. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 068c9ec62906b626a30526638fd36189b80b6464 +Author: Michael Chan +Date: Sat Jan 2 23:45:04 2016 -0500 + + bnxt_en: Modify ethtool -l|-L to support combined or rx/tx rings. + + The driver can support either all combined or all rx/tx rings. The + default is combined, but the user can now select rx/tx rings. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 01657bcd078b924e4599a83acd402ea6f85a1e45 +Author: Michael Chan +Date: Sat Jan 2 23:45:03 2016 -0500 + + bnxt_en: Modify init sequence to support shared or non shared rings. + + Modify ring memory allocation and MSIX setup to support shared or + non shared rings and do the proper mapping. Default is still to + use shared rings. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 6e6c5a57fbe1c77c2c55e266f87a83429adc3de7 +Author: Michael Chan +Date: Sat Jan 2 23:45:02 2016 -0500 + + bnxt_en: Modify bnxt_get_max_rings() to support shared or non shared rings. + + Add logic to calculate how many shared or non shared rings can be + supported. Default is to use shared rings. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit b81a90d3028af92da61a61e2efd231a585180044 +Author: Michael Chan +Date: Sat Jan 2 23:45:01 2016 -0500 + + bnxt_en: Re-structure ring indexing and mapping. + + In order to support dedicated or shared completion rings, the ring + indexing and mapping are re-structured as below: + + 1. bp->grp_info[] array index is 1:1 with bp->bnapi[] array index and + completion ring index. + + 2. rx rings 0 to n will be mapped to completion rings 0 to n. + + 3. If tx and rx rings share completion rings, then tx rings 0 to m will + be mapped to completion rings 0 to m. + + 4. If tx and rx rings use dedicated completion rings, then tx rings 0 to + m will be mapped to completion rings n + 1 to n + m. + + 5. Each tx or rx ring will use the corresponding completion ring index + for doorbell mapping and MSIX mapping. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 3b2b7d9db74adb95aa0bd029298a741333eb847e +Author: Michael Chan +Date: Sat Jan 2 23:45:00 2016 -0500 + + bnxt_en: Check for NULL rx or tx ring. + + Each bnxt_napi structure may no longer be having both an rx ring and + a tx ring. Check for a valid ring before using it. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit b6ab4b01f53b5f9e17dbd4f91c95fa5049fa2101 +Author: Michael Chan +Date: Sat Jan 2 23:44:59 2016 -0500 + + bnxt_en: Separate bnxt_{rx|tx}_ring_info structs from bnxt_napi struct. + + Currently, an rx and a tx ring are always paired with a completion ring. + We want to restructure it so that it is possible to have a dedicated + completion ring for tx or rx only. + + The bnxt hardware uses a completion ring for rx and tx events. The driver + has to process the completion ring entries sequentially for the rx and tx + events. Using a dedicated completion ring for rx only or tx only has these + benefits: + + 1. A burst of rx packets can cause delay in processing tx events if the + completion ring is shared. If tx queue is stopped by BQL, this can cause + delay in re-starting the tx queue. + + 2. A completion ring is sized according to the rx and tx ring size rounded + up to the nearest power of 2. When the completion ring is shared, it is + sized by adding the rx and tx ring sizes and then rounded to the next power + of 2, often with a lot of wasted space. + + 3. Using dedicated completion ring, we can adjust the tx and rx coalescing + parameters independently for rx and tx. + + The first step is to separate the rx and tx ring structures from the + bnxt_napi struct. + + In this patch, an rx ring and a tx ring will point to the same bnxt_napi + struct to share the same completion ring. No change in ring assignment + and mapping yet. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 9f5545905fbcc069f6fa8030b866e967ec6a5c73 +Author: Michael Chan +Date: Sat Jan 2 23:44:58 2016 -0500 + + bnxt_en: Refactor bnxt_dbg_dump_states(). + + By adding 3 separate functions to dump the different ring states. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit fbc9a5237a767cada312cb07877d0992b1e34242 +Author: David Christensen +Date: Sun Dec 27 18:19:29 2015 -0500 + + bnxt_en: Add BCM57301 & BCM57402 devices. + + Added the PCI IDs for the BCM57301 and BCM57402 controllers. + + Signed-off-by: David Christensen + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit c193554ecd050e63753aa0ec99c188800843bca2 +Author: Michael Chan +Date: Sun Dec 27 18:19:28 2015 -0500 + + bnxt_en: Update to Firmware interface spec 1.0.0. + + This interface will be forward compatible with future changes. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit b72d4a68c443e29cb59e15a1a9b2c2f4bf802831 +Author: Michael Chan +Date: Sun Dec 27 18:19:27 2015 -0500 + + bnxt_en: Keep track of the ring group resource. + + Newer firmware will return the ring group resource when we call + hwrm_func_qcaps(). To be compatible with older firmware, use the + number of tx rings as the number of ring groups if the older firmware + returns 0. When determining how many rx rings we can support, take + the ring group resource in account as well in _bnxt_get_max_rings(). + Divide and assign the ring groups to VFs. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 4a21b49b34c01137a67bf0fe185c5d0fff747e4d +Author: Michael Chan +Date: Sun Dec 27 18:19:26 2015 -0500 + + bnxt_en: Improve VF resource accounting. + + We need to keep track of all resources, such as rx rings, tx rings, + cmpl rings, rss contexts, stats contexts, vnics, after we have + divided them for the VFs. Otherwise, subsequent ring changes on + the PF may not work correctly. + + We adjust all max resources in struct bnxt_pf_info after they have been + assigned to the VFs. There is no need to keep the separate + max_pf_tx_rings and max_pf_rx_rings. + + When SR-IOV is disabled, we call bnxt_hwrm_func_qcaps() to restore the + max resources for the PF. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 92268c328a8dae4635b3deaca52a8ed329642219 +Author: Michael Chan +Date: Sun Dec 27 18:19:25 2015 -0500 + + bnxt_en: Cleanup bnxt_hwrm_func_cfg(). + + 1. Use local variable pf for repeated access to this pointer. + + 2. The 2nd argument num_vfs was unnecessarily declared as pointer to int. + This function doesn't change num_vfs so change the argument to int. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 2bcfa6f6e7cf867e4aa623f84caea4bc413d38c9 +Author: Michael Chan +Date: Sun Dec 27 18:19:24 2015 -0500 + + bnxt_en: Check hardware resources before enabling NTUPLE. + + The hardware resources required to enable NTUPLE varies depending on + how many rx channels are configured. We need to make sure we have the + resources before we enable NTUPLE. Add bnxt_rfs_capable() to do the + checking. + + In addition, we need to do the same checking in ndo_fix_features(). As + the rx channels are changed using ethtool -L, we call + netdev_update_features() to make the necessary adjustment for NTUPLE. + + Calling netdev_update_features() in netif_running() state but before + calling bnxt_open_nic() would be a problem. To make this work, + bnxt_set_features() has to be modified to test for BNXT_STATE_OPEN for + the true hardware state instead of checking netif_running(). + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 27e241896f2e21c96200df711659117923dec8a2 +Author: Michael Chan +Date: Sun Dec 27 18:19:23 2015 -0500 + + bnxt_en: Don't treat single segment rx frames as GRO frames. + + If hardware completes single segment rx frames, don't bother setting + up all the GRO related fields. Pass the SKB up as a normal frame. + + Reviewed-by: vasundhara volam + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 45019a180358c3cf290c3f3dc953c44f978d5527 +Author: Michael Chan +Date: Sun Dec 27 18:19:22 2015 -0500 + + bnxt_en: Allocate rx_cpu_rmap only if Accelerated RFS is enabled. + + Also, no need to check for bp->rx_nr_rings as it is always >= 1. If the + allocation fails, it is not a fatal error and we can still proceed. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 665e350ddbfde88c5c18142dfd7b8c64556bc964 +Author: Satish Baddipadige +Date: Sun Dec 27 18:19:21 2015 -0500 + + bnxt_en: Increment checksum error counter only if NETIF_F_RXCSUM is set. + + rx_l4_csum_error is now incremented only when offload is enabled + + Signed-off-by: Satish Baddipadige + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 2731d70fa9cbb62e45743171bf979784fb36778c +Author: Rob Swindell +Date: Sun Dec 27 18:19:20 2015 -0500 + + bnxt_en: Add support for upgrading APE/NC-SI firmware via Ethtool FLASHDEV + + NC-SI firmware of type apeFW (10) is now supported. + + Signed-off-by: Rob Swindell + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit edd0c2cc2b73ff21f356d6cbd3b5bf83e692ea9d +Author: Michael Chan +Date: Sun Dec 27 18:19:19 2015 -0500 + + bnxt_en: Optimize ring alloc and ring free functions. + + Remove the unnecessary "if" statement before the "for" statement: + + if (x) { + for (i = 0; i < x; i++) + ... + } + + Also, change the ring free function to return void as it only returns 0. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit be58a0da1672391b246880450e990fe36d7ba24d +Author: Jeffrey Huang +Date: Sun Dec 27 18:19:18 2015 -0500 + + bnxt_en: support hwrm_func_drv_unrgtr command + + During remove_one, the driver should issue hwrm_func_drv_unrgtr + command to inform firmware that this function has been unloaded. + This is to let firmware keep track of driver present/absent state + when driver is gracefully unloaded. A keep alive timer is needed + later to keep track of driver state during abnormal shutdown. + + Signed-off-by: Jeffrey Huang + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 028de140ffdf481d4948de663b33dae78e1e9cc8 +Author: Michael Chan +Date: Wed Dec 9 19:35:44 2015 -0500 + + bnxt_en: Implement missing tx timeout reset logic. + + The reset logic calls bnxt_close_nic() and bnxt_open_nic() under rtnl_lock + from bnxt_sp_task. BNXT_STATE_IN_SP_TASK must be cleared before calling + bnxt_close_nic() to avoid deadlock. + + v2: Fixed white space error. Thanks Dave. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 4cebdcec0933bf39c0ab42e8ce8c9d72f803fbe9 +Author: Michael Chan +Date: Wed Dec 9 19:35:43 2015 -0500 + + bnxt_en: Don't cancel sp_task from bnxt_close_nic(). + + When implementing driver reset from tx_timeout in the next patch, + bnxt_close_nic() will be called from the sp_task workqueue. Calling + cancel_work() on sp_task will hang the workqueue. + + Instead, set a new bit BNXT_STATE_IN_SP_TASK when bnxt_sp_task() is running. + bnxt_close_nic() will wait for BNXT_STATE_IN_SP_TASK to clear before + proceeding. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit caefe526d7b5af11d9b5977b2862eb144fa45537 +Author: Michael Chan +Date: Wed Dec 9 19:35:42 2015 -0500 + + bnxt_en: Change bp->state to bitmap. + + This allows multiple independent bits to be set for various states. + Subsequent patches to implement tx timeout reset will require this. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit de68f5de56512a2ff5d5810ef4d54c53470c3c45 +Author: Michael Chan +Date: Wed Dec 9 19:35:41 2015 -0500 + + bnxt_en: Fix bitmap declaration to work on 32-bit arches. + + The declaration of the bitmap vf_req_snif_bmap using fixed array of + unsigned long will only work on 64-bit archs. Use DECLARE_BITMAP instead + which will work on all archs. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit b664f008b0d885db1d5617ed1c51d29a8c04da93 +Author: Michael Chan +Date: Wed Dec 2 01:54:08 2015 -0500 + + bnxt_en: Setup uc_list mac filters after resetting the chip. + + Call bnxt_cfg_rx_mode() in bnxt_init_chip() to setup uc_list and + mc_list mac address filters. Before the patch, uc_list is not + setup again after chip reset (such as ethtool ring size change) + and macvlans don't work any more after that. + + Modify bnxt_cfg_rx_mode() to return error codes appropriately so + that the init chip sequence can detect any failures. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit bdd4347b33f480187b44699cf1caac9400496d6d +Author: Jeffrey Huang +Date: Wed Dec 2 01:54:07 2015 -0500 + + bnxt_en: enforce proper storing of MAC address + + For PF, the bp->pf.mac_addr always holds the permanent MAC + addr assigned by the HW. For VF, the bp->vf.mac_addr always + holds the administrator assigned VF MAC addr. The random + generated VF MAC addr should never get stored to bp->vf.mac_addr. + This way, when the VF wants to change the MAC address, we can tell + if the adminstrator has already set it and disallow the VF from + changing it. + + v2: Fix compile error if CONFIG_BNXT_SRIOV is not set. + + Signed-off-by: Jeffrey Huang + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 1fc2cfd03bbf8f1f8b6b90f0858faba8bd6631c4 +Author: Jeffrey Huang +Date: Wed Dec 2 01:54:06 2015 -0500 + + bnxt_en: Fixed incorrect implementation of ndo_set_mac_address + + The existing ndo_set_mac_address only copies the new MAC addr + and didn't set the new MAC addr to the HW. The correct way is + to delete the existing default MAC filter from HW and add + the new one. Because of RFS filters are also dependent on the + default mac filter l2 context, the driver must go thru + close_nic() to delete the default MAC and RFS filters, then + open_nic() to set the default MAC address to HW. + + Signed-off-by: Jeffrey Huang + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 93d05d4a320cb16712bb3d57a9658f395d8cecb9 +Author: Eric Dumazet +Date: Wed Nov 18 06:31:03 2015 -0800 + + net: provide generic busy polling to all NAPI drivers + + NAPI drivers no longer need to observe a particular protocol + to benefit from busy polling (CONFIG_NET_RX_BUSY_POLL=y) + + napi_hash_add() and napi_hash_del() are automatically called + from core networking stack, respectively from + netif_napi_add() and netif_napi_del() + + This patch depends on free_netdev() and netif_napi_del() being + called from process context, which seems to be the norm. + + Drivers might still prefer to call napi_hash_del() on their + own, since they might combine all the rcu grace periods into + a single one, knowing their NAPI structures lifetime, while + core networking stack has no idea of a possible combining. + + Once this patch proves to not bring serious regressions, + we will cleanup drivers to either remove napi_hash_del() + or provide appropriate rcu grace periods combining. + + Signed-off-by: Eric Dumazet + Signed-off-by: David S. Miller + +commit 4bb6cdce386d620d10d2588ea5bf4093a3b21ab9 +Author: Jeffrey Huang +Date: Thu Nov 5 16:25:51 2015 -0500 + + bnxt_en: More robust SRIOV cleanup sequence. + + Instead of always calling pci_sriov_disable() in remove_one(), + the driver should detect whether VFs are currently assigned + to the VMs. If the VFs are active in VMs, then it should not + disable SRIOV as it is catastrophic to the VMs. Instead, + it just leaves the VFs alone and continues to unload the PF. + The user can then cleanup the VMs even after the PF driver + has been unloaded. + + Signed-off-by: Jeffrey Huang + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 84e86b98f6515aaeaac053b234be158b25457184 +Author: Michael Chan +Date: Thu Nov 5 16:25:50 2015 -0500 + + bnxt_en: Fix comparison of u16 sw_id against negative value. + + Assign the return value from bitmap_find_free_region() to an integer + variable and check for negative error codes first, before assigning + the bit ID to the unsigned sw_id field. + + Reported-by: Dan Carpenter + Cc: Dan Carpenter + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 11809490ac17810cff90c12e9f2f3e0303a72121 +Author: Jeffrey Huang +Date: Thu Nov 5 16:25:49 2015 -0500 + + bnxt_en: map CAG_REG_LEGACY_INT_STATUS_MASK to GRC window #4 + + In order to use offset 0x4014 for reading CAG interrupt status, + the actual CAG register must be mapped to GRC bar0 window #4. + Otherwise, the driver is reading garbage. This patch corrects + this issue. + + Signed-off-by: Jeffrey Huang + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 614388ce39f3d61ad7f95db65f409d35d5943616 +Author: Michael Chan +Date: Thu Nov 5 16:25:48 2015 -0500 + + bnxt_en: Determine tcp/ipv6 RSS hash type correctly. + + The profile ID in the completion record needs to be ANDed with the + profile ID mask of 0x1f. This bug was causing the SKB hash type + and the gso_type to be wrong in some cases. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit c5d7774db350e77f2506e36e1797c958d1b118c8 +Author: Jeffrey Huang +Date: Thu Nov 5 16:25:47 2015 -0500 + + bnxt_en: Change sp events definitions to represent bit position. + + Fix the sp event bits to be bit positions instead of bit values since + the bit helper functions are expecting the former. + + Signed-off-by: Jeffrey Huang + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit d1611c3aba11ffa281bdd027aace52f5a370b8c5 +Author: Michael Chan +Date: Sun Oct 25 22:27:57 2015 -0400 + + bnxt_en: Fix compile warnings when CONFIG_INET is not set. + + bnxt_gro_skb() has unused variables when CONFIG_INET is not set. We + really cannot support hardware GRO if CONFIG_INET is not set, so + compile out bnxt_gro_skb() completely and define BNXT_FLAG_GRO to be 0 + if CONFIG_INET is not set. This will effectively always disable + hardware GRO if CONFIG_INET is not set. + + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit 379a80a1d048dcacfc2011d5d32e16d5c804b9f4 +Author: Michael Chan +Date: Fri Oct 23 15:06:19 2015 -0400 + + bnxt_en: Fix compile errors when CONFIG_BNXT_SRIOV is not set. + + struct bnxt_pf_info needs to be always defined. Move bnxt_update_vf_mac() + to bnxt_sriov.c and add some missing #ifdef CONFIG_BNXT_SRIOV. + + Reported-by: Jim Hull + Tested-by: Jim Hull + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller + +commit c0c050c58d840994ba842ad1c338a98e7c12b764 +Author: Michael Chan +Date: Thu Oct 22 16:01:17 2015 -0400 + + bnxt_en: New Broadcom ethernet driver. + + Broadcom ethernet driver for the new family of NetXtreme-C/E + ethernet devices. + + v5: + - Removed empty blank lines at end of files (noted by David Miller). + - Moved busy poll helper functions to bnxt.h to at least make the + .c file look less cluttered with #ifdef (noted by Stephen Hemminger). + + v4: + - Broke up 2 long message strings with "\n" (suggested by John Linville) + - Constify an array of strings (suggested by Stephen Hemminger) + - Improve bnxt_vf_pciid() (suggested by Stephen Hemminger) + - Use PCI_VDEVICE() to populate pci_device_id table for more compact + source. + + v3: + - Fixed 2 more sparse warnings. + - Removed some unused structures in .h files. + + v2: + - Fixed all kbuild test robot reported warnings. + - Fixed many of the checkpatch.pl errors and warnings. + - Fixed the Kconfig description (noted by Dmitry Kravkov). + + Acked-by: Eddie Wai + Acked-by: Jeffrey Huang + Signed-off-by: Prashant Sreedharan + Signed-off-by: Michael Chan + Signed-off-by: David S. Miller diff --git a/drivers/thirdparty/release-drivers/bnxt/MANIFEST b/drivers/thirdparty/release-drivers/bnxt/MANIFEST new file mode 100644 index 000000000000..2503ca08d318 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/MANIFEST @@ -0,0 +1,262 @@ +d8062fa8ac8fda00de2d2d83a3a822919f50eec96fa384748029c26cdde5629a032da0dbfe2f6769563c9a1be58ab9bfef605c0051aac80a5823cd471037db16 bnxt.c +153c37da2563e1c2247dfdb1abfe2489a40e600714c1b8172582971c663c87b0b92123061af4e667e8abfb3875921c049acd2ccd9d6be309cfd9415f6c86dd3d bnxt_auxbus_compat.c +f28d2ad47cd8de7c4a7ad426cc0ec3e37dd5e3df3285d39876bf8784ee573bee58cf97d87e3ca611abe222fdf7524f12833d83cba8dd348ed0f470ad2775ee48 bnxt_auxbus_compat.h +5d5a81221983ea0c0563ee0989afc835d4a48dbccbadbe56df3e7ea8d6142886436c3006031b152df6f4c6a09829632542afd27d2ceb818fad2a7d49747d8bfe bnxt_compat.h +23741c4e97e4c0affda78daa1e57f6f83cb79667ba2974646f428889c9126771b52e8c9c7788999dda4149cf808e1042a35e9ed23f33803c2010875594f21121 bnxt_compat_link_modes.c +774008f5a76b5c3c689733742223c4490ceb33c82e56d29affeefa7ac78cde27627d881047776b3cee4bced34529788ece1ea32b8420ec259238dc120e0eba7b bnxt_compat_link_modes.h +a57a5a9b7a53ad1a673b91efc7d7407d39a1c16d63b2768a4293e1464b723cc750a981906c63c940fd69934bd2f321941f9f4622b4c081894ab3df7b94baa2a0 bnxt_coredump.c +11f7a08bf85c1cb7cc33f95afba9e12a452f480a7a9219bc99a22f29372c8e017eb87ee30fffa4d0c4789156f04dfe9920d374c603889afe17cc9617bfae3144 bnxt_coredump.h +4c9ab1cef6af1e6c339d9989365ecaa5e3333d6e1a81fdaa43cb8723f3971e1f6c9cfd11d7cd65a917c7533b282ba0a6f3f35291224f6149efb1e6f9c60b47ca bnxt_dbr.h +5711c989b1b271f75396a12254572031a49e9c7488dd38328b88de5828fd24e77484161ac5cb576d62e6a9f7f4ade49b1555e5453d5efa83fc8f45b5a19d1a82 bnxt_dcb.c +9cc26634f820e64f5f39ad169e671b9bb6c0d8f7216e5658b96e52dfa9940342abda4949af92932ac9c9943b20145fff08a3bd0890aeb341cc8dbf57d56fe585 bnxt_dcb.h +865a878d10ef7738d664bdb5dcba9cdbe8f529ffc4619a84278d5198f1f0485eddd7736ba7866d4db3ecd1d590e24744578e8b9b64afaa2b8099ccbd7b03de16 bnxt_debugfs.c +39da82080f3a2580f54da7c574cc802cc3457c68dbc445f8364771e5694e6023cc4a617649d36c7a5b719f237ec05dafff61440326eda6e549a2b32e2ee33706 bnxt_debugfs_cpt.c +1ee1e56cdf043482839d6017769fb13df968a62e988861ea26590b0753b30720377984b80a0280fe4cfcb446ca9290e2e164554d7c12abc78d05044a7b50ca07 bnxt_debugfs.h +7718e925e8e66ff3c56fe8866746c4922d87cbce78fdb043122d642b2838e45363dda125b743a41948103b029b4e2f8a784d9c6b59cdde83b45ec218f83b024d bnxt_devlink.c +977a1016a5d95ad5b8d7da4384c13777b655c386067de7eca76a63cc16eea491fff70f0b5a3339ad32338f884f92a156b870f22a32cfa719aa5f370171f0efc9 bnxt_devlink.h +2e382a8511e9cd914ab05e38dd1e6cf4b6124749acc3c02d8a4d8b290b836fc35a6092e6294247804138900d27e9c052e6465cc658bd93bf9cccdabe7f14a5ea bnxt_dim.c +e0a94914c20021944f64f24fb4e46732a84e90b4923e588e412a8685075ace14745c7dba710432a334d693bac01fa36822c9debb11858dfc80464fcc29f722d7 bnxt_dim.h +2f661ffcae1ebc74a008ee21718d935ce3d35e8f47ea0f7d59d6a03b2d492b2285501f3228c9b80212c4d149e940954bd0b3b11f9720ed9872de61fbaff28b75 bnxt_ethtool.c +47465e9bdf574083906e993a0a91b9a12d689be58f75b2c7867aa3012234ec1cc4926098a6379fc9a76d90e64ed2c4cfe6e0e0feb1b3253dc34748efe9119527 bnxt_ethtool.h +4d2831d6b347c8317b3bf9fd28142c4abac44d18d666ae35518ddb7f9829f4c0b46d5be67b34800ec3be6a59456b562f76353402bb62e9b2e34b21567437785b bnxt_ethtool_compat.c +6da387c84a806102100ba0febe41f760a55874cd2055c936355775473c22791bc3588de1b632e8d92d819c1b0676690665f1f0678096b8b3e00617110d08eb33 bnxt_extra_ver.h +66cd6f263fdf9e54673b6b75ae2417538631e17e9e65c463b52bfb45ba57b906940ddece0b24fd0eb659c00208fd5fe6c777db430e167168abc700a2cf3786e9 bnxt_fw_hdr.h +b389f7c28cd5383caed0c266edfaeb732b740c3dcdc85b2862886e62b96e978ef6e5f950e43dc6fb926f534b31120d4cc9a00559911dc61d8fded4b3d59e5108 bnxt.h +54288743fcc81dc99667e62dfde8091017abe475fd36882edd4b45b37edc5d877f1b71dda3deb59e9195d2a438545ef4beedce0ad493e6fc778173740600d319 bnxt_hdbr.c +21fcddeb6fbccbaa8d2ef5e8ed967786d214b15e7d672d33e94c077c1dcb4c0bb7bd4f393edcf0305abc55767dea343b76cd806afb780cdecbb8d27e9d4ea8ac bnxt_hdbr.h +4e3c8849724cf3992299bd11e20c1ccadbc875571186686588625ecd5860547ae5ef4f2bbda7792168ea957e8ad93477fa432335f0bb633ab77ecbfcd7409f18 bnxt_hsi.h +673752951d2faa0a99b6c159e46b7cf97af4340af679a1f4b2b1f727947d028ef18e3ab812f3a61aa932c55bb4fb609f0bb8cd72ee72fd5cb61a7b73e6802101 bnxt_hwmon.c +1d1dbfd77dd882dd1634abf9e3bacec39dc72563346c3eef4f51eab3c46e7d11e1b156ef4ef207b116b50a7290cdc2e98a671dcaa29ba7b1abbdf477864f3fcd bnxt_hwmon.h +a03972f83fdc74dea86d7152bfe1ea6480005de19964336e4de385f8d453d38e4aaff9c4472c38644e976520be6e216e44ff7dddc773346929d98a7e332b40f5 bnxt_hwrm.c +69987119457af51b1a1203568579591e86be7aabd43832fff5a4b07fd335f2d56175436ce5c25843f61b0157b08f42c1cf3fb35f73113d5aa922d6ea177e9999 bnxt_hwrm.h +758af108fa5977fc6214287a4940ce98e906f27dc1b7273e5a806a3004bc0d5d177c9983c354aeefd3b091ad6286e9a32cc9a27a55d9aff6170ad13a3f47e418 bnxt_lfc.c +1f61dfbe6066e6dd03ac33116486c737994011e57865bb54be854784fd285ee118f7425c4526f5db0161b30f2cc6b1a718cb843d6f066cc9f2ec22077f557677 bnxt_lfc.h +23e4e252721b87518afc355a506959b9584b97d4828609eb6cd758b863e931a4db7a6895958a09f923fe13c4b5f56bcbc1b1c6c782caef6d80f96ada18610a59 bnxt_lfc_ioctl.h +b818313b24a48335a729d27cc48954ebc3a638918a82490ca54d9c93b6664aaa075ffed431b9b7bb1e7572b29dbaa3ca6bccbd7b2efe853ae9128151fc61fe1e bnxt_log.c +db7118ce77a5cced614cef364c60b7be03eea34cfa5a197f72d3deb00eb31e4658712d78e714fd2ae57f8183afbf04e18cd18d404be0e96a3af59015515e4095 bnxt_log.h +a59548bab06706c1593081c1daf2c335135f0fbe384a65da66357504cd2e9ac3b57a3da6a10479bd7646b3f7f3ce4dc8c7588f9c3c482f62c39de50531069088 bnxt_log_data.c +35348f7ea785a47175d05316d62afd99675bfbcc280ecd45b5fe9163959d0a1eccf5856e8ac23f6e4de549bd83a377b8e83efd1892ffce62c55b8ce2ba795432 bnxt_log_data.h +88064db8a603665eec9b6ebac9a29849a3b6fa4be3ef17dc8d47aedc66294f1a4d18ffad1ac6868e613c334d8755ddfbd5cba4a3dc8053c1a1f4f4ecb881064b bnxt_mpc.c +b131c6759654b98d3ef4e3afed9720954187dcaa01d1bfceb62b7c300b6b0637c98f7439d05ea83bdc8b2974f24a771177b8f8d784da2eabedb469ed8d489a16 bnxt_mpc.h +90a8f6020843fbebabb5b6b89d94818188b9072f9e0d3316864efaca9a21f0a59f4e75f27db8869efdc5d959241ab6d6d815029e325443fea46c203987ae3035 bnxt_tfc.c +3cd3f83d5b32bfd3d845399f8403b1e72ba63c4dc36b3a482ecf344f7a9ab55588067478da720c464603c26fe3d8587bc913bf45252abe628d2e0f4ae1ceb8da bnxt_tfc.h +0672b9d4224ab7e0ecd9b05c1bf4515b4ac58713b819a36650292968d4254957f3ba9f475dc195f9a9fe7b272fbf6c732d59ff37aacadac3f15033031e053eb4 bnxt_ktls.c +68bebdc9b4b5552d468d01546cac163ab765e71535499c69c8b0891d1f4b36d6040dcd05fc7bcf9bf9b949a4ea1d493907790cb73c781b0c9ac6003753f792fc bnxt_ktls.h +2e1f15e3444b3856f4d6c1c45e671a21b0f2deee5e4ece48d1c9a87955bd0d8b9bc0ce9cd5a04dd9d52967e2d1f060e222ac3eab13a15b78345273afb1d6a0e1 bnxt_nic_flow.c +cb761b989dd49a79345056dc9d17a17bb18faa70f073197e7399ae7c2c929b81ef745467c9962d988b8329f4b733b4c4c3d5e98406676d692e1cd7fdfbb7b12a bnxt_nic_flow.h +59043b84f804c967d4836d0f1d3ba2a1256597fecf79213a02e48d6c686b7310b5aa1c25c98aff3a78b402a7bf1c1ff0e1cc80c46206c368dd0524ede28ebf6b bnxt_nvm_defs.h +17f9eac2aee974b70e9b70b7eaac5bac9dd12c80247472148112d96984b9a952b1374764ba7cce715e90ad7018d878d9188732c9671bad0732aee420638282e5 bnxt_ptp.c +e06fd97184d4830516c2cb6342a46a1ee11d31299db6ee20151a357d1d83bd7faa94333efa35bb27cbd7bb04978ae7e9cc3615fb8d776d9ab9b428e42c9673a3 bnxt_ptp.h +0c7abe7d3c9879e5937e28f76e1f7d1aee95fd3334bb917a9263047925e33db0e16e17484c2a5e5d8c08d5c4652ee9ac3dd7d18ff54aad20960d71b3b9b39924 bnxt_sriov.c +097d189f8242517a0d78bde0f65d8e19cf0d80d84c74f12af4fb48ad894665e5a6afd6acd5f4ba4fb21637b5ec3e1b9b3b42817dd8a55cce6a7d9f0b9c35133d bnxt_sriov.h +e5060729dae5bc3fb795bb132828fbcc9012b4cdde93c37f1a9887e017380b34fab01a8b3efc826dabe4a7d8321e42cb3404a61d642e9c974702a206838b80a2 bnxt_tc.c +1af9e9768c3543bf23b79e75d7fe41340123fde4d01b74c58f826d8b61680514c558897af4904a6a50b9926d4b893d93c05955161cc769c3dd433174535aa738 bnxt_tc_compat.h +b20290dd8b4ad48229fa3d34c8b4adae98885403f49c77743e9eac0e97d0b9f1d514be9a243b50893238827b64308d0ed78f4aa32b87400b5f5ad2e0bc206bc2 bnxt_tc.h +90a8f6020843fbebabb5b6b89d94818188b9072f9e0d3316864efaca9a21f0a59f4e75f27db8869efdc5d959241ab6d6d815029e325443fea46c203987ae3035 bnxt_tfc.c +3cd3f83d5b32bfd3d845399f8403b1e72ba63c4dc36b3a482ecf344f7a9ab55588067478da720c464603c26fe3d8587bc913bf45252abe628d2e0f4ae1ceb8da bnxt_tfc.h +db0ee5c9700679a4f9e05f08d3acbbb94d62a68c25406bcbde859a1645f3f3228445d26f1872d96682d0d032a90fcbfbbbaa6997bdbdfe9d8b6b6b63adbaba9a bnxt_ulp.c +eaf72af9c36e406dea2ae1ecdfc123b04e5def8503d71d9886c5ed2a019a3014e94b0fd0537b2ae309fe3493c030ab4551f0cfcf119d524d4d61d7a09f0fc346 bnxt_ulp.h +2c7f7ef6da98627b4f46619b0a0b9f26c6a9fca5aef4b9b095fea213966b97e9696933846bb6c192a0960dd60887826e2eb25d9835b22ab7a86ae4ab09ec45a5 bnxt_vfr.c +3568df756f0a67ce434fa6fa2cc6065829b237619a6109da1e6052ad0530847d2e772e3c1f9926e794c8e2022b76edd2b10d53f9a27a779234a36b942733e792 bnxt_vfr.h +15cad5c54432609edc57e4776107c40f9d1e8a5e34182d1fb9e04ff41875aca295ef3deea1c5653ab2b52add8e9b45aacf88ebf01d61e89756cd494887141a0b bnxt_xdp.c +c9c8db6b7fc6b1c4c8f7159ee48b16fe96c4eabcd724318aab84e05e603a92947911d493d8d33c3a1d2645fd5586b0fe6b8d19343dbf8242d82d4eff1bdd2824 bnxt_xdp.h +7203f8e3698c9db9a5360d728049f088e3edd1ba01b410c374f72a9523f83522e64f5cc1a120be1877caf0bab67ebeb25291e363328c12afc5f09eb39047ed27 bnxt_netmap_linux.h +c9f902e02acf77aa81d83a257257c8b3be1169de94b4fa6510d6cfc115d885c0832708c8282132de43ca0bb771265cc0c14b7fbfbddd35dc33c383fd7fa07ca0 bnxt_sriov_sysfs.h +25b1e386fe4c6a5386e49cfcfce9ad8774dc043a93a9ebf70becedf222552459b52d74177fca25441649a63e295b6e8f0d63ae182a8666c7113f58ffcb689cc4 bnxt_sriov_sysfs.c +b31de9889490375c90a237312e7d4345d08390d6040d58f97227904878045c730c34087ffab936c208fd8b5458cd8954a8a526e671212e477be55a8526c8d67b bnxt_udcc.h +71eaee79072dbcdc2065c324e8b0acf14155477c59fa9be106be9697953e1b2ae86efc32c02fec66547a3807ec584d48f50b1d39f3f4128437e5fefa163fca31 bnxt_udcc.c +a9e0938ad553397ba1f10567cce48699ec538d39e9d8207c655b757f88d31c71b9fb4e384a07a3e3d154f582efbb3a4c97e2d5f6454ad63b7e176a7d0e0211ae bnxt_xsk.h +31fb9e824f7a4276e90ec06d5c73e5e6c8aef143e32228891e6c06ba83ecb345cbe5a4c2d40ba417998f5c1fc3368f0591f2ddf3f52a91d6e0928611db80dc56 bnxt_xsk.c +48adf36c3749afa04e7798eb8f4b882b52b44adb242e620afbf0da542ad68b85af10c233bbf6c556147ae4d7dad0867fc5cc954889bf682672307eedcd1351ae bnxt_devlink_compat.h +c5b96a4c6fea042245bbb644b944cd9dfc0d07c62ffc791589271c79216da8a659f6113398252f57ff7640078174bb7f0c71ab305b06d31fe620c01ce2c53c10 tf_core/tf_msg.c +52125a28596a6df30516f441996ee09d941f4510439b2c1c363f0527393d9875f36266cc7a097f24035fc0b19d71b2c37804ef1c15f3fe186f8f833be7745ce6 tf_core/tf_util.c +ca4849bfd14eb0f1033987b58aae9eaeae7f9438b7c07822ef2edce83ff25f7393aca7d531832bbd6ad2e6c03001242f7bb2e08feaa8a406fc7c2482f78bfacd tf_core/tf_util.h +69630d7ba7c1dd30d2c90403063b488b5b9e2cc29dbc5981805b1db2b6d90dcae4de7a1aeb1994f4a48674f0e7748f9c34e72862c233e5f927df45d7be4b352f tf_core/tf_msg.h +f3105ca4ca4e7a459a68b2e026cddb77edc3c59d0d6c0838b29d73fc9f1a5dad0c4be2d06d6f3ed26316982c5a0aba4ad5f61a0f181534d3f7d14c51d885a504 tf_core/tf_core.h +96e738e2ec12f7f0c1bffa2781a9af4189ce910a11c4f859496d003920223674d8af19e13a313ad9780a6a281c6a7a891cf669fa03f57ac51791077e791bfb1e tf_core/tf_device.h +068a124a6bc9c04127965d5b00d076c678fab07a97524817554f2d9b6d15f39772d051f7eed08b53d53d6c15136e4fa7b0344bbe333e72c8d2dfda21f18e9704 tf_core/tf_rm.h +8508a6ad224c731ec9795271c8499f795ff4b3294a84d9304fcafd4a0f5383c062f4035cd47797e296f788f4113c782532aa63491da50476b9966f2a3ca40373 tf_core/tf_session.h +0d84efea119895b29c547a74908686da58a274229c6fd845d972def34cc81500e5bc1cf75a1cb0025088b2866d4cf9daa128a08bd0ebb81fe26a932df3d9d970 tf_core/cfa_resource_types.h +f6bace710a276d4dff096b9e4c366a8f7b1aa1de2d61bfe063d6ed1b53fab4ef65c6020fa6cb2eafec95a5de5737d49aabf9ca5d5c3c7f097f072c09a78db161 tf_core/tf_tcam.h +2ded7987aa69b482118f7b0b8aef7b95a91d682954066506bfd588aa841a766581de999a5498f320f570fecfea867fc185b97b412c77978b3a22ea3a6f3cb86e tf_core/tf_em.h +22885b749e181af379a56db0518794208dace6f76a47b36efa96a152ba7a40087a09840d4c9fba1780d56a0a359d36d06c2ba64a16cd4fb0807351373e79b594 tf_core/tf_tbl.h +16d70d357415cf924454588b3a9cc4ba0acaaef1f02e92434031cb6561935844ab99966e15cc856e2feb935d611051d0d754ac33aa524e6b17953c9427aede39 tf_core/tf_if_tbl.h +21a1f3246b6df62f73fb637f8649ce83df741bce44b1b7bc6a98f5138fbac97d220058dd1bfa821f251139f4e4e98302f9da74c7ed99fc115c68e2ff6a06e34b tf_core/tf_identifier.h +69b3e3be441e90f593ab05919c0df7bc59db74cf8371697cd4117ab772a3613db5e2cd09cc156dd96fdc66fdd545fadf750e360d7355d99a2a293b78fa44ada5 tf_core/tf_global_cfg.h +66904e6c7ac5b371fe7a19fd64491de7a471031f2144dcf1dcb357a89186e61008447e43997495e7c3f5e79862bdeddcd0f3bc0ee20d97abf3c379d5b3bbee04 hcapi/bitalloc.c +98f2af824ac6b7b85584967cd3a8dd0afa18ee2371e97fd6d62f816188999830ef27dbf2771e145e29e308fc702aace0beedf7261b5f2a99b79955745f568101 hcapi/bitalloc.h +078e4339672968bc754eea9777b4f8955ca20ab10c9e0e2ded1aca622c49c64320cb9cc3213a14a5b12428de33c022e7578c1b5ad25a98bddf89ee639920d468 hcapi/cfa/hcapi_cfa_defs.h +1a97c9b741d04a9a7c75fb325c3345e6c4d24ae591d7af73a6d685b92d3eda24612b6fa4cee595d4994c7f4888105d7ccf4094c5adc1fbd92ebc52ad34534882 hcapi/cfa/cfa_p40_hw.h +a587a1a06fc2d8adf243b3de43e8c29a3e823c8af27c5826313c6038584b7145a9e217cfef2224b9d938d6b7b0f9e34eb5ed1b29c64b0d1edad199ca5b8912fe hcapi/cfa/cfa_p58_hw.h +565b94f4a32634e96f4a008ad1fc8f0f81d28a0bb59d507ba625fa070b31e71ecf5e22c681cb70f85acbd005ab72b77ee30735e690cb542e2b2b31bc92295bf6 hcapi/cfa/hcapi_cfa_p4.h +87b5af8e3c75f0edc2b0391764b2535d7ecc38639c6a592c5734113ac05dec0fb6ef6a21c7dabc5100ee0dd4bc29a102c1c56abe0635dff7fc2ab777b2d450ce hcapi/cfa/hcapi_cfa_p58.h +af136ed09afd922d8f264884d3547d272dc85b84b2ec10190f4d0f9f1cffabea80f17781aebdaf8dbb8eecaced4fb8fd68fdb504ebb2cd62152d1f48640a55bb tf_core/tf_session.c +21cffb2ff1bcf4f2ccbaf8a57e15d43975afb20044123be804e275128f6cd6f1c990b7f0574a9efb8a1c421a141f68efe4a8006ed69d8c57a26091d4ebd5ff52 tf_core/tf_rm.c +6020f4a7a183bbe3ea7d8625634353501759d036e1417968da8ffcc1b235d451fe58e5caf8dce50ffb428323dd82f484f66abe28f5b801e99ce3a64a548358a0 tf_core/tf_tcam.c +6b9197087ff8c474a215f8b809054e50be3afc887041e2716957c73d2d0fb734b63d353095522d74cf5d18c64ffb61245f85915b19c02f6a0b2823bdfdf0b2c8 tf_core/tf_tbl.c +7521c9d90cc3c0a50dd2fbd919a281eeaab98126f1638c6878b0d28d1dd3174bb16c563b07b68a53de77719c2c394c4ca82cd036de3c909cc76c15b343492580 tf_core/tf_identifier.c +365112277ca9fb6f81125b8efb2eeba2898f60f64e11df2ec6c1e0b24a9647555dece34dd90081d99939ef34021cd549eed0fa686f337067d0218c99aa28f1f8 tf_core/dpool.c +82109fdb227726eb4dd33b44e41cd66ddf17fddc338c186d29a53a3af4852ec84bc7ecc9f63f03878a81fca4b75528d657bb0f4cb49c5455b2780f3f30010dad tf_core/dpool.h +6d85c1e04bf91809ad60c174a2a46de5efa9a6bc94999ab861c190cf4495ed3dde9bef8b956b2d2ac0e3d2435964a1973fcee23689f93a2aa102e9b721224d33 tf_core/tf_em_hash_internal.c +20816255e84fda278486ee9a760fd511c4d9f4a303a1e7837cd51175e21cc702e2587aa98dae95c7345b5d6e41a0aa386871f9792481c91cfb0642e8d633cef8 tf_core/tf_em_internal.c +e66504b580ad1f3fd02edb31f50ff8a537a3edaafad3b67b344000da683613a0f6254fdf2f4fd1a624d7b41a1436f47ec2cf3a7e653d8c59aa1db2bdafcbe6a2 tf_core/tf_ext_flow_handle.h +129eb54701cfe36e6337c191082ef4b3413727501021fdbc7498e32faada2f60f231f819be8b5c6f17ee223f56cf7cf4bdf9f02c6b94b37080c5f650cddbdf2d tf_core/tf_global_cfg.c +7fdcd884f70bc3aa0d51c711d69426685e9cb0100eb9a45c97dfddc5d3df3d38dd649e2abeec3bdfcb2b9760e0737a0dc074ab2ed64be7496b42e96eb2aa1517 tf_core/tf_if_tbl.c +43b4bf89ff554173348e4af3a18f8542a3fc34e0d83b3ba47bcbbaced9189eebef9decc10751fbe3b3bef96837daee37b3a67b99a4e64fce8980c1f2accd880a tf_core/tf_sram_mgr.c +5f61c15c35d814341a9cf3dffbf7b8d2c5d5b8db0d4821310535bef987e1f06c6b80112b71e248f61d6c73db956675ec2dc7e2c4e724df8692634de71bf26734 tf_core/tf_sram_mgr.h +1ff971ce954f8d4b235eede9c15ab82f46bbfa16d37c035848b77c17c6135e47c5e60edc6e1b5ae9a10a56a4b08d6e240d7958a855f0c1558e8903c183605998 tf_core/tf_tbl_sram.c +f0767b92cdc2be23299805b7ad4dbf842863d4db55c4f4f0d7a48bec314213d4dd74f4cbbbeab87c0f50cb385423be4a1abe44faf79259a69bb5123030545966 tf_core/tf_tbl_sram.h +8a9bcfe6c681f99fd1e7c017826455c9a56357a8f6374e83a152562a125b97382f834ce5e34d9cd30acc022df6818890cb82549543a12cd1eb4c8b689f5b7b52 tf_core/rand.c +7021c8628b208a148251da6753987793ed9a2da0109709a677f9c33eab02264e67dcfb80e0b6bded6cfa2b7236a12e3d7f8449244c2dbfca171be68003b1d8c4 tf_core/rand.h +634bec7939abcf30b301aa0030deb6ab64c903f4ed2b8646c2ef40dc5e89dcc3bed5e52a98c50b769f4862b7a9e6a36a17cecc1c245acea98b5fb17bfb519fab tf_core/tf_device.c +b5189d7e29ee7224838e05fbbf106b5ef25c20381fb29f6dcd901636caf8f45a43b6796e5424c1c5940b5c440dbf32466b8cab74b56d072a158aeaa78ebb25ce tf_core/tf_device_p4.c +54ba80b3ac90d161a2dcbf1408ff9729b2d9616fba4ca5c9404a5324edeb4ebe0a76dfd449c9047a8a5e5d1cdba7d0258c0c1b20cd2ac13d264e7eaab28a96f3 tf_core/tf_device_p4.h +d1c61b4f2bb826ef4e032ca40a1d6341ff3106a370a6ddee83e0007d5f48bc592c4e3e67e4b718bd9d33ea13b1bb1b36d0162d68b69dde2b2397398220712c5b tf_core/tf_device_p58.c +8bad78052fe1749ffe3c6c0ebae89eabd57f9b2f04d41752a5c2a628da474a0544201f889fab20e47d6478abdf1bbff358efabe1332e50fb06d8fc5787f7725b tf_core/tf_device_p58.h +0861e2323d816dafe4b5427e08b23a2307f86a23f485cf8e844c393bd55e39cb85b03a98a04d7cedbcd2442aeb143822faaafe276337a418aceb16862e28a1e9 tf_core/tf_core.c +792b7c608176d4c86957650dea07f2adee05cd6f5868e28587749fa2eec53ce6b91e326d8e6ea326fe83dd1a4202b86376a3e1e52628c38ff01d7623b7f6bed7 tf_core/cfa_tcam_mgr.h +0a502f0ff27fd9069c686dbc1347c633b1e61bbb9d180af469bfa85c5b1861db3781c7df52518441fa43eefff44c5be00fda1b09fc11f8e2bade6549af6ea6f7 tf_core/cfa_tcam_mgr_device.h +01b6d40f3bd0d83347a95cfa5eeb61d0fe1f43c54168383eea7d81a3183c1b0342e75e25355c01bf6046729fd9bc8cecf016e3e1070e8c8351f1a409f30bad0a tf_core/cfa_tcam_mgr_hwop_msg.h +6adcce5d46e76707c732d8c265a5bc801923550deb04249339a805a9c6853c5870ae0d805dd92d0c57b972596d771dce341fc89dbf3414bf440e8879c3c41bf4 tf_core/cfa_tcam_mgr_p4.h +f15ef0cdcc0a37f45b06e8703e009315a903be01bfe538a099da9e4544721641a408833ba8051ab9c486ac3c1de457cafafc3387d14cd7ecaf4491329e663459 tf_core/cfa_tcam_mgr_p58.h +1282c362bca951a12fc303a8f35952803d5b6bc5cf52919f4a28f0eb9264ca3c4daff0c5d0fa622f69dd8d5475f78290b869f78bc01b80c39b92b3908275ed92 tf_core/tf_tcam_mgr_msg.h +65df8882c383d6b5e96b22b18eab774e6eec2cf98087e70aac183edda02c3ef3acb0fdcb6b817ba0c2db8daafd5cf4b99a720c9ce3b1a19eafed3f6f2b6e375a tf_core/cfa_tcam_mgr_hwop_msg.c +660d98c8964d53600d542da8e1e46478636e145c5a00f25fb598fd45a0006a188e2fd123f0a3cb124a40652d7bdc1af256a6e5d9af90247dda7be6a974335b7f tf_core/tf_tcam_mgr_msg.c +39fc8d9a87ab64b7a0db753dbff3190e1ba400bbf30fe1e7dfa61f433168926e1b1810d8e4ddc0972685e71f022bd4be512ddc8de5ec06337b327e704a491291 tf_core/cfa_tcam_mgr.c +00aba4bdf1c0691b54cd78f2bd3252a7e36fc88a3c4376ae878ab14a90a909a8b1afa42845d25cb1aec466073e5bd2c51ca0998fdc375ad9b990027aabd66508 tf_core/cfa_tcam_mgr_p4.c +6dec04c56ef053626ac7fffda3a9a3a2a7fd494feb3a4a6354976d7284feb5bc8a2643accdaa577853c4ff9b7b85916a99c344f4f6d387fa7e79355ca2e0fc85 tf_core/cfa_tcam_mgr_p58.c +a11af38789ae54514cdef2b7dc183836edfc334f3d1b26f28f9dfb4a96330cddb3c70142dba710e31e9efd4f489c5354bc1f59b19762e433e04695e8a046e11d hcapi/cfa_v3/mpc/cfa_bld_mpc.c +65cfdcc5771971780015209b775992989ef1cb4eded3c59dd4ac622f03fda0f9103348ab05f6b2db9f86050233d31eecdc24a7405527fab6268f9021a9402aca hcapi/cfa_v3/mpc/cfa_bld_p70_mpc.c +133eb0a1d7d6ea677f428fe0f9f798266edc7c4a5875063c14d89ee38aa01e7ecec21643e7404e74ee0b5b6fa9a45774a4f26817205f93c8fb85bba0c4cc3423 hcapi/cfa_v3/mpc/cfa_bld_p70_mpcops.c +d6c2bdc2ea06794920be21d7e97b53e3b216246f60f00072f5e90ad8f7bee649f095334e6ceab0b84937928b48ce9b8eafc1825c1301bfa08567455b602892dc hcapi/cfa_v3/mpc/cfa_bld_p70_host_mpc_wrapper.c +b553253a38e2df4d140d29a48d5fabbe5a57626f3729531fbedbbb9ad7cc8eb439a514478c18526f53beaba97b92c198f3e811c7cd167158101f1d01279d0dd9 hcapi/cfa_v3/mpc/include/cfa_bld_p70_mpc.h +2dca63304863f3822814e2e2530ae101a2bc147b01eaa0fa2351af0c5740c1d18559940e6c452cc292eda3e6456abaa969f20158902ac40e62bdcf25a6b4c70c hcapi/cfa_v3/mpc/include/cfa_bld_p70_mpc_defs.h +4ed65784f722e4df9a0a84f4036401662d51f7ca46fa14ab5b6fb47c49ed61d53fac20427e9112bad94a5b7c4339dd427dbcbb532ff295d112f186a8e73f873f hcapi/cfa_v3/mpc/include/cfa_bld_mpcops.h +e9fd0f76ea50f208b9d87368b996f2441f8c15632afeb5ca65fd3e22c5fedeb6d4d1eb5f6f263671c677f9effaea5215fedde09ffdb0cbca5e84254b9e257c7a hcapi/cfa_v3/mpc/include/cfa_p70_mpc_field_ids.h +f5690a98675623fab1513b13e1829c987c373c0ed3e4b3db950367c0e1edb8a49a876c74da8b32f9b4bdf776ce37708121e304b869c60a5938087b05284cb9d0 hcapi/cfa_v3/mpc/include/cfa_bld_p70_mpcops.h +900aa73d42dd87ee8c659973b283b15579971c3691436e1b48240cd5b49902a7196d38cb4ef0ad6db76d466d1167185123a1880d57c3f17846bbb355957fa8e6 hcapi/cfa_v3/mpc/include/cfa_bld_mpc_field_ids.h +bb95d88d69e1a737929cab65a98a8a151b414de066bcfdaa75e2edc4092d78dea6d6892414f53956f1a15142dd9a13a24bf009dd46651e3768c2bb83c1742605 hcapi/cfa_v3/mpc/include/cfa_p70_mpc_cmds.h +10c82813047d744d2d5d5e0d1c2bf9968d93df2a1e8cc48b90b58143bee909b6bf324add61fe9ad5536059035516802baa78215cd985a8d1dcc97667b646065e hcapi/cfa_v3/mpc/include/cfa_p70_mpc_cmpls.h +df2630ed7c4e0b5ada9faabf97b008471a556533e6f8215f28efad7224fb68364c9c7388b0af6791b9f8baf38a7a5dd8eb0040a7874f3e04c423f5618ef6f5f7 hcapi/cfa_v3/mpc/include/cfa_p70_mpc_common.h +27d7fa22d044736bac9fb07161d9be248e4e1c73cd1ea31e5ac45e61969609cc348a9fcba9788899961d80cababd894829b08fff53627ba2e6faca8bf06401f1 hcapi/cfa_v3/mpc/include/cfa_p70_mpc_field_mapping.h +eda10745a9e126f52e2d2e420b0244d227879b8cfd301a9d31188ff61ffbdb18b305192b92f366e76a4fd12d8a24daf633fa76925fa2421ba873061aaff86f37 hcapi/cfa_v3/mpc/include/cfa_bld_p70_host_mpc_wrapper.h +dc507f329427b8533842e2ab034d8a0e9832a37374d42395a72aa4d27bdff684ee5d9bae85a98d532d85f96a3acc5037f5560f80808aad32e0198d11c5f7aeaa hcapi/cfa_v3/mpc/include/cfa_bld_defs.h +e10bd36d106e8f2e5e26e48fef14e018ae49e0649d3662fceaabad0722d8eee6aa1f56ec842a57214dfd36aa2b68be25b95c3731802ad9d0a4076a946b544e57 hcapi/cfa_v3/include/sys_util.h +de4fee416c05c3e3925ff2ec2824a05e6ecd91d703abebc00cc2f4138f7987e28460961e55b951167a1244ccb91337b8dbe24c0361fb886d89e3c6ed921b5513 hcapi/cfa_v3/include/cfa_util.h +d5e0419cd3e5b7f15b3a42912a9f6fb823dae298971caae1f5763d57b3df1427af8c33415dbf3196cc3eb226cc880d605fb9ee922eaffa4edb530e41bd6857f3 hcapi/cfa_v3/include/cfa_types.h +0774a26e3cc3c599aed590ff542f5f9a84a8a8a507e3511e41bf9d817431a60548aead54baa682f04292f0846a7fb30f61302d8a3158351c6d40d68be83c6a1f hcapi/cfa_v3/include/cfa_resources.h +94843d16d767e987a448d3c396d3cfdfea8c798200a04f3ecba205c297a11df9d51ab6eb6959e36b605c9991a3c267fd27b541c86b7640aa0c8d8705f7b2e6cc hcapi/cfa_v3/mm/include/sys_util.h +c6f4c8bff661d4f6482a58887d8a5646005a2c0be733d0660c59dd9c745db7b88689a79dfc82149d543d1b8307a2f44849495ff6064970f523c0a37e7fcb5b3b hcapi/cfa_v3/mm/include/cfa_mm.h +c02e053a98352223b4867c6952aedd03802bb5ae2930c124d04501eccdbddc942dc3c884f73b6cda96d550c44c61a64f71a420f60c13e190c776c3177dc5a618 hcapi/cfa_v3/mm/cfa_mm.c +81addeda4e134a167fb4ea611ff7d6ace78950bd2ba2d1eb24e9f308a6955a518a9893b5d31f6b1b260e01698e3ee48a23639537e8e88562e3747a2b64e5705a hcapi/cfa_v3/tim/cfa_tim.c +5b7fcd8e861ade78f655ecbfca595161f5ebf557f50e7b85972424a39685be80f198a624712519391e10a1d06c9d7a6feebaf9fa30d5cbdff75d7be545d779ee hcapi/cfa_v3/tpm/cfa_tpm.c +c9316fbaeb589c0014d48ddb39d8a514aeca53ff2d1075480b485077e877686f7a18381b5c63cc13439a7b762b4345a750bac3dbb40fe4bd7a662d65583f6987 hcapi/cfa_v3/tim/include/cfa_tim.h +7a0acb7b25d54bd1d03c4532f9587c1548a48b56be9ad58cc499b2024b1b12d926f36a61dc4c763e049964f09bee4806809b3cd5501111de0a80b41b7ca9a336 hcapi/cfa_v3/tpm/include/cfa_tpm.h +565b94f4a32634e96f4a008ad1fc8f0f81d28a0bb59d507ba625fa070b31e71ecf5e22c681cb70f85acbd005ab72b77ee30735e690cb542e2b2b31bc92295bf6 hcapi/cfa/hcapi_cfa_p4.h +87b5af8e3c75f0edc2b0391764b2535d7ecc38639c6a592c5734113ac05dec0fb6ef6a21c7dabc5100ee0dd4bc29a102c1c56abe0635dff7fc2ab777b2d450ce hcapi/cfa/hcapi_cfa_p58.h +078e4339672968bc754eea9777b4f8955ca20ab10c9e0e2ded1aca622c49c64320cb9cc3213a14a5b12428de33c022e7578c1b5ad25a98bddf89ee639920d468 hcapi/cfa/hcapi_cfa_defs.h +6d3fac3d53cfaac858c0bab3c36a09433f22399919e8da863c4b4ff24451c95e0733eefc1f931163301888c8fc466b1ca4c1429111e1c3e7904f430245496338 hcapi/cfa/hcapi_cfa_p4.c +a587a1a06fc2d8adf243b3de43e8c29a3e823c8af27c5826313c6038584b7145a9e217cfef2224b9d938d6b7b0f9e34eb5ed1b29c64b0d1edad199ca5b8912fe hcapi/cfa/cfa_p58_hw.h +dc95276a56cc194bd2753a4eb13d7619a61808f5d14c980c394ed245610b78716563340618909b9b6f619cff167b9901c58e1b60acb2ef6afa04740102fc3117 hcapi/cfa/hcapi_cfa.h +bfaae809318f79c8a16335b2e61bf1a3494ffd7d1195be4ae1ece59ed83598e614d0f1bea2f1d4b2398b8545e273e03bdb79353f9f37c269fd48c0b5456e2386 hcapi/cfa/hcapi_cfa_p58.c +1a97c9b741d04a9a7c75fb325c3345e6c4d24ae591d7af73a6d685b92d3eda24612b6fa4cee595d4994c7f4888105d7ccf4094c5adc1fbd92ebc52ad34534882 hcapi/cfa/cfa_p40_hw.h +49b1752a4ae8602c2f22386bd309a5d725820d9dcdaef131a3c9d9ed8851df7f37935ca11cc0d333f65979177f71e9ce737d4207e544b25c94868935a19ea687 tfc_v3/tfc.h +4617ee278df24456f7e13ca9dbd63b3c7a0705dfd59cd58e2a41966267b32bd0c9caaddad98cdf886d18666c923de73535fac2334419f32184301c0cd4dc5d68 tfc_v3/tfc_act.c +cc9683dc0a067c78c52a29571027a8a9e4114ae73a831c464a728f46c0ea7db3d676f75cd94aa48ca3f4263a78293be93960648c5460b83b296e063701673edf tfc_v3/tfc_action_handle.h +aa53e4e8eecdfc3af56991d94d0fbdb4a58ff20591dc85f2a16186d8ad89e1836e663e8dade4d5f85375e31e5b46f43303ea239ccb2c1bdf652241bd538a8fe3 tfc_v3/tfc_cpm.c +432907089b3115fffb467bb56cfa96b49a1a8db7faf404dbce597d074579f207feaf4a79fd971fdadc9ade1e125ef469616baacd63a09aa1a8b6b68144a20f9b tfc_v3/tfc_cpm.h +17f322abdcf5325da6d18a620049c7a46103fc74d2456fbf9d30deb0c965b62dd0c861624380ec413f27e580053f26e3b6c8074af7fea58059822f360dea7cf4 tfc_v3/tfc_debug.h +db9d7542f0db5a200847b05fa27b52bafc2def66fee2d86f48489bbcba755cbbbe17b69b57d4b38e4d53c1f8de5d00c57b4825cb3ea6e930263b1211c37da051 tfc_v3/tfc_em.c +b90e57a6fc569a6d7470d98071b97a8ca2d344abb7fa23c7b52c38b1086fdba6bf2e09faa44b5196cfa615184ec8cb40e0ae5829ff79513623899b4d0c4066c7 tfc_v3/tfc_em.h +341d795440f475da349320e56aabe3c5f7208bac97df3a3085cda3fd6a772b58ba4777467b6eee8913336b18e60941c021b5c454800495a20a380a3f6845c6d0 tfc_v3/tfc_flow_handle.h +eb7b438ae3efe5c940d4e1a5152978ea0407977559e286a372133f7feee7183a91b2e16a03d6e3d23eb8bb2805e2e3c49fe86dc106afb2b0a0bf6488dd4282a1 tfc_v3/tfc_global_id.c +1fa7d906dc97fb9d7c801798f6952b5bbe6ce9fe4119f3f4406a30e7554dd7ab90967ef95358c4eb6860cf2f3ff67fe9904af3acb68233a8f3dc78cb378f230f tfc_v3/tfc_ident.c +72f0968af9dafe3b9ff481bcec7d56479af3f2319e3fa8f76d1d299f1f584e6489bb38ab11270bb7517a514f1083a39f4d9941c0e8812c9c0045c5c688c0a6b9 tfc_v3/tfc_idx_tbl.c +2d8d6e5e0a6d5f3441d015c1a2e7fe38dd6ae04199513009296dceb21afd0250aa237b394ffeb6a2dd68b4d81488fb3f3acf8ffeb516ddca1261fd949005a6a4 tfc_v3/tfc_if_tbl.c +7fa3ca7601c384cc80b9fe7f452d275c80361679a170c72631b5458469e91a06a05137491b4c4e57c3cee10ca4700b0c9ba04965e84e642227a6de9d96530892 tfc_v3/tfc_init.c +f36b0082ec482c49c4f4e365d5f4a513acd8b677e6fb16337084f8cc10d50424a1595647f0ea4b435ce2dddff744b79ca3ef41e5784de940672dae4c3258c95b tfc_v3/tfc_msg.c +1ff2dd461a766ec8f1593d2c2d9a7b9f54601a100e996469efd9774127e151d822d9373ecec1e721b0b435b2c03260daf23218e3a742cf97eb98b88ab9a07e95 tfc_v3/tfc_msg.h +19ed9c1a215030797cc0f7ae18ce1aac3b3e012c9b22af68dae0026a9152eef12a23f56263eaa22a90c64d4cf07b15a2f3ec44fe18345c7c8f8d3c6cea2abb5d tfc_v3/tfc_priv.c +5722ff116d924191bb69d9ae8d80cd20613d0e07eda9f636ceaaee6dfc61a0b9b2b3813839607150a3abeb4f9edeaca6a7de1e4be8493faf69b73b4e13eab2d5 tfc_v3/tfc_priv.h +ff756314f3325961be395238b85e9c2b9d55c8192c12fbfb6a9a9fd0a96e30e5b41aab28b060d851c750174a5e956bb0536c29f51da9e12140cb0e26656a1f53 tfc_v3/tfc_session.c +8d99e17246d9b965a918095066d59e3b341ac82e9a49c00e4b87ff867a51d1e760b0188dabd702e91e37e05cc5c5de89f9a97cb47202f21d1847cc92897baf1d tfc_v3/tfc_tbl_scope.c +4c71d2e170f0e43d402608d28157aa916e4920266b12f12469b4bf04027186ca40c5bce53f698df537bb23ae7ecf84ca0beef95fcb720d84e997ca192b5a8e8c tfc_v3/tfc_mpc_table.c +7a57e683dbd18c9608d0700157604e1631d4724e3eca2b52eebdd9490cc3f2f2ea0274978e4e6df2eaae771f9af1633fccb16693b71663efccfec4d4dd17cd61 tfc_v3/tfc_tcam.c +5f9c2d2be7a2c89dc94a727a7a352d205cc8e9fbb24ffafbef9378dc49ad229d95870ed44ac0baff804bcf0bc8e3c77d50ffc96a155d46e98572b501d2b25c36 tfc_v3/tfc_util.c +d5ee8fb67293d13c1b70e6e99cfbc780799a66c473daffec4a6cfd5584886442816824c4dd9b066dbcd0286e3c0ae281d10f7e92aad1f3fcb67302117a49aa85 tfc_v3/tfc_util.h +285c4495b7029363c1867cc78ad0249500b5c49d4ada209023188911bc977dbc83db1ffa1ba6fa70a3a36b450b44feecb41834eb6b435674dcc1df419ede1420 tfc_v3/tfc_vf2pf_msg.c +5c5c034546a8e70b09c22a97d593a1cc74277f8ad756f06c01095b4e5cb3a6f9e0e36329818aa162c2ae745edfe895217b15d86b40283df00b63b2ac6adbc4fc tfc_v3/tfc_vf2pf_msg.h +40dc97bb4522308f9b7b0a802e1ece6c414c334ee9db88f011a8adbd9eca90a2220211c22d73795e518ac3faf1e0d3e7e9cd89bb8555f383b541146300bc03a9 tfc_v3/tfo.c +16db101485b823c82d3a25611620ed113fb786ac953d93180c87d08cdef6b95217076e72deee6da51dd0bdd56f24bcf87862510770e91d96d40738719e94b32e tfc_v3/tfo.h +261c39416305693ec92caa1153f539cab39b322f607a5b97d3e503ef825cb55d8cd6684e8d1910aa21c20f0ce8967d5f3e7a57fcb1c48a937233c90d4351f85b tf_ulp/ulp_linux.h +4fa443db84e4485582dbf24f0df7759a8a40777fff36a7c35cc3029d04e0c49b630e74b30ed6375b5937b65bd04951ba4b07bac88f5bfceaa1af2304c34aae78 tf_ulp/ulp_tc_handler_tbl.c +3a46429478521784f32f6a35e322bf18225b64db578ebfd6858c15fab097f6105f09b96685875385ad1bb938f55dc168ba69ad7edbc47393873756ee5c20cf47 tf_ulp/ulp_tc_parser.c +960c7304a30310f01608f9e14ec6d3a229e86ad152c3d0aedf78293c530ef7932114dfa0ca2396386904afb85e23c2b0a570451226d699f976639fe267068eaf tf_ulp/ulp_tc_parser.h +e049b16cd36b891f330a75dd341b6cebd4005e14de56fc25bc619ea5e96cd0e0fbba806a44474d802928bf5d2d41018c9ea72e4ac4b825b2956a2e30ac25ff66 tf_ulp/bnxt_ulp_flow.h +7135c1ea19d1b3c12864c7656e70a1debe26de67ef827c3e8c5e77efb718e8fbee91d565c674d6539f32ae8fe020e238585916d8052353273576a3178a46dbc5 tf_ulp/bnxt_ulp_linux_flow.c +f58f853902be89312730aed1085a39825388fc3d4f38f6ef84b301db3794564d2d9c5a4baf1bfcbaa46eb46e06babee2e7edbace92bea8e3f55471dc52de4211 tf_ulp/ulp_def_rules.c +3d51438bbcb5cab474275fc52f41a7b3b8ef60b51aabceb28d09f1da60e96e07d34e466582a1b4b6f72d95cd56fac9c4c73a8c67f6a4e7879d358cdd2fcb7649 tf_ulp/ulp_matcher.c +8db6c1d0b4a16c91ce6c1cd8e3de0d3bd7724917ea7375a3779886dce48552a158c3d590081cda800d811c8af9b8416942a88362b274597783cf10a9dd3a11f2 tf_ulp/ulp_matcher.h +b5bdfca5e9361ae2ac4d4cc4c8c4f699cccb76449fba90967dbace6419feff01fb7e5dea685b8ba41ea80affec846297446f418f3034c399be0d7146b78105aa tf_ulp/bnxt_tf_ulp.c +54eadc397ce3db4d385f99bcd849c7b10e6936b676908f20037676a2e2f4782e72bf3bdfc867e30e22a919dd877c9f658ca06ea8a983532ec0f0d82c26c25b21 tf_ulp/bnxt_tf_ulp_p5.c +a14960179818137ac599745ab7b59f460ca484d1887d5daa989ae4e9498548bbc4bdc2176f034aa6884352f9707719fc5f67bb2be4e431c16f3273eccd833702 tf_ulp/bnxt_tf_ulp_p5.h +bb9f80c1f6eab79db42b6d3c95eca002ea72bf2afc0584ae0baa7ce2414e2c1579888712b623ddb8dbf34b1d7e3362b51f8b7bbad1086b6be176149148a66174 tf_ulp/bnxt_tf_ulp_p7.c +f45e3c9f40ff977cca0201091aadf0414407bd13c7df655467e8bea9bfbbf2a281a042c539343eb50fde98bfe3e2588effae761ca1346676026740b7044ba01d tf_ulp/bnxt_tf_ulp_p7.h +402a7cf713e3ca2c93d52831eabefaf60b94f9e257cd4cd09baa9eb7c7f9f10d1976c3a94952c75bdacc19f58e400023e2a61e7d021671e86cc51668b4fb3768 tf_ulp/ulp_alloc_tbl.c +363f26445ed32089348b30df9304eb8fb2c85f3bfe4a8c3ec91f862d6675b6707811a0d0d705627913bd193fb8ccc638284bfcecb8655f6d5bf114011e8da0fa tf_ulp/ulp_alloc_tbl.h +b7af6cb0517793c6331fe4322722653b7559b81117902cfd390c179fe53579d79c982310a5947eae4aeba4763b53a7e8aab4621b469a5e16fe5a781d413e4e04 tf_ulp/ulp_fc_mgr.c +4c6d834123892a072177af4ac2280da5df6444a955bdee3999ce0e7b7bb65c3be439e96f81b588b2db9ec1cf07226062b38dc89ab831a0840c8e27b00cff0810 tf_ulp/ulp_fc_mgr_p5.c +92370d7c50b3fa92d365655bb07704beba1ed48f495a7980dc74404bdbba7e796290c87eb96cf6aecaa64d1d28d9ae1395eb1a9bb22958a9f9a37f52ffd753c3 tf_ulp/ulp_fc_mgr_p7.c +73f566f39d1ee038259cd0e59b3f333a9cce8abe3bd22945c56a2097966b596a1b0f1949baeaa654437f0bfc1dcb4b9c52184703497bd8601d253cbfc3af40e5 tf_ulp/ulp_fc_mgr.h +bef799c2d3a9c84f120cbde2f968299702f966a2062df5445dd908187166dc5c9fcf8dc54037986422ebdc565a25d6294deea6af9f74b78e1d68830506704ad5 tf_ulp/ulp_flow_db.c +9a9bf4d86b87005dca44790d44a1ae67085313c104238ea185e6847ed1ee522de8bb8adce3232898679ee86964cbe0f55745f7909c9bea3256197a9780c7f2b5 tf_ulp/ulp_flow_db.h +4fdb97c5671fdd694b33ef9d56f2f44cd5a021c8bbcbc4e6a86e3f21950a5e921fb2cb1e55ecdacd5559408af77f6674162172f22553f629783e7cc086432f27 tf_ulp/ulp_gen_tbl.c +b28a588cac9f163d4b588560e54f5f262c8ddd2bffcbc79fe590a55466f75b126f3d1cc15f9ad8bc0471c8aef6161788384090b59a08ee5475e5fa8180ffd95f tf_ulp/ulp_gen_tbl.h +2fd57ffc5854e2907fca8d280fe8fa159bc4fe24dfe309a4b348bfb0d68be0a25c0da238e4c595a5e7b151854ae314e71aa8be96c48fbbf5def79b6bb9d123ab tf_ulp/ulp_mapper.c +678daa115f448a3befc22689d022cc2cf14848e502aeb980936af33d597df8636eb70a839a265cf416bcf5514f93f787ee361b8f9793a8f7a56ae624e412eb6e tf_ulp/ulp_mapper_p5.c +b4a3a3612c86a5a300b1832260722db53176749730ae9b05504be5a9091632587965d501bcf290d302c9f1c069f07a35b2412926270d28daf97ffe30730e0806 tf_ulp/ulp_mapper_p7.c +e8f71f7c0be9d62815930e826890752d5168dc9bc116d9805d5d2e5a9f82b6c4623ea23a6c4518e98fd985507dbd9030ef2a7dcab3927069e156e4f2a783daf2 tf_ulp/ulp_mapper.h +85375c3825afeb00a1c5e82b3f2cdce4fbecdfb63600912b75012a58e162420cf469d1a7ffdd66a573fde35817a9173644ccf5e65cc74efd529eaee38cb57354 tf_ulp/ulp_mark_mgr.c +305e8072270b4bc9b1987c4b2a3988c277931b70f73904b1258b5e693f2e28f1b6aea701a80f21996de7eceb3807e95ca60874c5c1d4e8a3b6847ecf5bebd6af tf_ulp/ulp_mark_mgr.h +a3248de2f1b3eea5ce72c78a20e707956aa65f9648ff548b0b375734dce8476cc9843a160e944c28a4d8d670c0bb957aa94e6c0a8ed726c5b86071b058b7d1c3 tf_ulp/ulp_nic_flow.c +a87a87af2af9ce9df0a6127c2496221a4105e269a5c1215914431cb78fb9297e5a046871ba5f06718311c35f49f6cd1c44fdb2ac684a1e69980fdf7dbcb2c1eb tf_ulp/ulp_nic_flow.h +08b5634a737a4851eb791712841f1a70b61e29662db5fc26a247021a2cbeafdd4b7515b02afbd92a4e18b0d31e988cf913501288c125e0a79a4d30ab4b7680d5 tf_ulp/ulp_port_db.c +c7445b5edbe81dc53fa8e15028cb8dc7ab2ac92b7ae9c4a958062bd29156a0483813601005ded624152a2c27a93198d535773fb25023d1c358223ea63ac28ead tf_ulp/ulp_port_db.h +965cb00f70a9dd16178c032bcef5aca624143208db5bcc215cd31edda9b8f513b52a363d10218165778d3e90d024d55d0c14b86e5462e3c87fb93dd873ac5611 tf_ulp/ulp_template_debug.c +0223df8e832b7d2c097697d467bbf68f0e79a6e9e77e2527fc27efb05fc7262e41054dae4bce188a66246e26269a1380af4fed60570996c2d8e5b2781b8ec8e8 tf_ulp/ulp_template_debug.h +c92706a421462eb3ef9d0bc68d486855ef1e829a3a43ca7d162dc6f34030a5cfc857349518c424a20b89af656118411247f90260767e47a4015bafbcd6aa950a tf_ulp/ulp_template_debug_proto.h +50da5b635d2dbb5b1612b8b9392e3fdbbf4678dac42d328a3c62e22727620baee1358b90665eb8185938a2e6cc69f060b979f52956e862a1c537684023ab6b35 tf_ulp/ulp_tf_debug.c +42a11db9acaca2b4eef996fe0f7e3d7cceb29cdcebe10288154a6b337a2f84eaf5d3674e9891112f282089cef2723d184cf0a2d4fe875b4ab3bef0a475115db7 tf_ulp/ulp_tf_debug.h +a1cbc1eea55e9eb5743e8c62a056dc60ea29752c4d825f5dc512cadc7fbae1e1b5e43ecce826cfaee5fa6104328c15cbee9425555c96b69effd8dacf89054cb6 tf_ulp/bnxt_tf_common.h +054c3ef48bc169f07efba4e7090e1a7ec8edcec1240639786eff4bc27ce7d80443e1620646370af2481c47b23480689a41b32a0e729745518035d2758ce355af tf_ulp/bnxt_tf_ulp.h +8053ebbb397792cd2445b3af322e23f6f5954c05e9f13d903b06a73d048f45af56adb00c7b78258f8392fa8a3d19de73b73f3686fb1c427fd2060a124c0e7d0a tf_ulp/ulp_utils.c +0fd4e7ee736fd4459922a10dff054cd821170ca97c6329fbb6a1b48345aef17bebbb8f0856066119cd0b7ba98f2cb98787d72d95d1ad680132cf1f5b005be512 tf_ulp/ulp_utils.h +ee94fe0fc5943227b68e5e445b77ab9270421e6d0d39c236c3ac1ee548aaa2b10f64e7ea4dba4a214155ec8493dfc3a675cf77fbe329b3f93eeae4535ca4ac5e tf_ulp/ulp_udcc.c +303350ebfae8d466ce0e2ac74ba7889ab15a52836d4dcf15c8a61297883e22114013d0a545672da2e61e879690d06d8f951ba4658e6599a7c527b0b8cd7315b2 tf_ulp/ulp_udcc.h +50109c653fe840981dd60da4534cd5a41d028f6aea46e733a01b6938e02e49c6801f8d2b7ecd6ec083580854bb3b91b6762b7aba5f7d43ad2594c1cf7f4e9437 tf_ulp/ulp_generic_flow_offload.c +5ef7ccfab8a460c33011c340c72d99d6c0362eaf89f6a936963f41e8c4b86e9d159d30c5ac010b2ddf1688886a3c038f47820ade889d768d2359cebdff7e918c tf_ulp/ulp_generic_flow_offload.h +1a792be0477d81bec9bf0180033538e3a631d27d8f6c670ec1e4ca7f6e6a6144f0a179bfb87a96558ceeec518f9b5c9723a3b900d0ba67652ac8750ffd8fcde1 tf_ulp/bnxt_tf_tc_shim.c +a0b6f461e42903cf5cfb5083fd48c786a491c97303a44ce5f94e4dfd5adec8511bc08fd3fde0a816cc0fe7350c428be5e009872062d6f9b5b67c7c531c66e04a tf_ulp/bnxt_tf_tc_shim.h +b3c38c0b4d95e64e7d6a3f00c183b45620063cbe52f39e50f5aec987f450505cf6747581932722d7f206f19f4a285c00b7f6acdcd39a2e87b359cec7a2a74ddd tf_ulp/bnxt_ulp_meter.c +9f03e34b4222d99beb970a3df85b0822b781d684e230f1098dee1c2087d93cc997a1b9dafaf53f03d6d6df4f65d16d438e842d0eb5c5885c4c0e233e0718e1c2 tf_ulp/ulp_template_struct.h +8532f7004f7fe11ff32d40bccde1be08065d132ee51251b206a6afed764e332a32511f2c8ee3e58a428a2a69065c4f1eae963401d6681a0ef29ba7056765f23e tf_ulp/ulp_tc_custom_offload.c +ea12dd0a939632ca15ee87bd38f0053bf305c46b9ee9731bd28c357851d41f706810e514b590fef7e64e4e1c1e507ad1f08e94f8d165c2b3665617f8d8415568 tf_ulp/ulp_tc_custom_offload.h +1959325afbc5b234d748b5f1974c526664e12fc7e40865218bd76970bd80050f50ad3a3f7e0264480007bed6e156729ad4823e14bc2c6525ea58d6ba907b6890 tf_ulp/ulp_tc_rte_flow_gen.c +44dc39c3b5b41c62edc69e7eaffac0ad92a426c33f0f9054a15d7492257582cbe00a0993428074702e8383beb8b9ff9366014cb096a7835614e50a3e2df3d90b tf_ulp/ulp_tc_rte_flow.h +e1bbd865cb81cafbe22cbe49f0f0429ffea48c8fdf3050474aa94fd798ab429634d2086e7a0df64a7742bf3b3f062cd58620c959ba893365228379608125cfef tf_ulp/generic_templates/ulp_template_db_enum.h +4c77f8fd840a08f9643989d10a4a57ad1a2567f12447bfd6f94559380e7a578eb07291a89fa5de0ea15ae5871de062c21fbd245be0942dbdced4b55570b6c201 tf_ulp/generic_templates/ulp_template_db_field.h +bbf08d3547968c803d62d275c880985fce7f8f4faaf0ea49bee660d17ae6687bbca3f0190dbcd1b1e618738148de2ccb96e51880c94200380aac0a2f73753152 tf_ulp/generic_templates/ulp_template_db_tbl.h +e852825d9707ca4126dcdecc4a3a7143b75fc01db057ecd63b81cbf57c7395a584076d770f755bf35ab3d20cdd45f1fce37a9c6450c11e5b4b9b679b31349b77 tf_ulp/generic_templates/ulp_template_db_wh_plus_act.c +986b3905f14016bd79ed00498659ed906dba7e77dd303ec984c042f1f9a59ad7b634946d89380b8809a0087886c1ff125dabae3d63db932a1794447cb1f0fd85 tf_ulp/generic_templates/ulp_template_db_wh_plus_class.c +02544f03c31c3e7421b314f1e43abb13d6c8be8dc303d507989987e1372401bf3fd0d52f000dbf6809dbd9e0ccc0e1ae1e9444c3900bdaea579ef685e9e7ae8c tf_ulp/generic_templates/ulp_template_db_thor_act.c +81980881e4ee724fb4224f6a5f15ca063a7309aacd361653f76fcdb82f14203e40cac2b011289b9337e73fff66c99720839795aa1b99a784c4008daa040b83fe tf_ulp/generic_templates/ulp_template_db_thor_class.c +0973f0adb013a80f754d87453d26fbe33d6c78bc00006c74fcd5f0f64fd1671b4914f1f702ae64b348e4dc5e5001b81faa03471e35078b226fe8deb207fb3200 tf_ulp/generic_templates/ulp_template_db_thor2_act.c +7c70088a7c6ee59fdd9762100189e8c1a7221aaa459e8b06587822c348c64fe6c862384313a296003c1f41f8afb31934a77ba4d56429f59933c3c7f106279d85 tf_ulp/generic_templates/ulp_template_db_thor2_class.c +2025b41d37752eed09e3c71af706b3588c8f2424eb65bb958e420f98a2e7f550b5f2e73ed5403b32dd407825964daa3615744548d5618a8ac9a4f793d2a32584 tf_ulp/generic_templates/ulp_template_db_tbl.c +150fcea5c75fd6d8131366f261c4b2627535cb1e2151c7fa36a5d7a20272f910fb5bfaca64d4088b31530b56e73a1a6bc97344a44634ed4d53974e51b7c5c4fa tf_ulp/generic_templates/ulp_template_db_act.c +e966c36b159a6749b5928223c442d371023e47796e148953a8743d6fe20fef17f9a97409770b0d3df4b3176a5310d5a9f4e2ac502e59f9ef8515877f76fa0665 tf_ulp/generic_templates/ulp_template_db_class.c +cfec8347dbd37db134e0a6ac0e3747f76f8ef0db6929bbe22503b78821f0ea1fb0ad3b9e1217c9e2d525f1f89ae0b5683aac75d3106994fe21eb02648f94b6a9 find_src.awk +c4ab703e93d6698d0ba17b4cc642eabea3533bd3f6163b523cda06dfa70fd3288386472f981058214dbdd94067c520da325e78d9118c6f93859cd7b1179b4c21 ChangeLog +aee80b1f9f7f4a8a00dcf6e6ce6c41988dcaedc4de19d9d04460cbfb05d99829ffe8f9d038468eabbfba4d65b38e8dbef5ecf5eb8a1b891d9839cda6c48ee957 COPYING +03ecb05c72c926c37386df29c31542751bab78cfa591be913655a78f60f327c1d6e04d8e87a4faf238b15dbd3358a8a5ae4752d82d9fce1bf9365a85e7dcbb26 Makefile +ce37c849b8fb51afea53d4fda67bd82f52a3ff7affd44024a1f0c6a914b3749dd6ad24ee11ac8d685dbd6ff4aec426bf698e208b7228be00938ae56965228c75 README.TXT diff --git a/drivers/thirdparty/release-drivers/bnxt/Makefile b/drivers/thirdparty/release-drivers/bnxt/Makefile new file mode 100644 index 000000000000..f9d78206b343 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/Makefile @@ -0,0 +1,1558 @@ +#!/usr/bin/make +# Makefile for building Linux Broadcom Gigabit ethernet driver as a module. +# $id$ +KVER= +ifeq ($(KVER),) + KVER=$(shell uname -r) +endif + +KVER_MAJ=$(shell echo $(KVER) | cut -d "." -f1) + +__ARCH=$(shell uname -m) + +# PREFIX may be set by the RPM build to set the effective root. +PREFIX= +ifeq ($(shell ls /lib/modules/$(KVER)/build > /dev/null 2>&1 && echo build),) +# SuSE source RPMs + _KVER=$(shell echo $(KVER) | cut -d "-" -f1,2) + _KFLA=$(shell echo $(KVER) | cut -d "-" -f3) + _ARCH=$(shell file -b /lib/modules/$(shell uname -r)/build | cut -d "/" -f5) + ifeq ($(_ARCH),) + _ARCH=$(__ARCH) + endif + ifeq ($(shell ls /usr/src/linux-$(_KVER)-obj > /dev/null 2>&1 && echo linux),) + ifeq ($(shell ls /usr/src/kernels/$(KVER)-$(__ARCH) > /dev/null 2>&1 && echo linux),) + LINUX= + else + LINUX=/usr/src/kernels/$(KVER)-$(__ARCH) + LINUXSRC=$(LINUX) + endif + else + LINUX=/usr/src/linux-$(_KVER)-obj/$(_ARCH)/$(_KFLA) + LINUXSRC=/usr/src/linux-$(_KVER) + endif +else + LINUX=/lib/modules/$(KVER)/build + ifeq ($(shell ls /lib/modules/$(KVER)/source > /dev/null 2>&1 && echo source),) + LINUXSRC=$(LINUX) + else + LINUXSRC=/lib/modules/$(KVER)/source + endif +endif + +KDIR ?= $(srctree) +ifneq ($(KDIR),) + LINUX=$(KDIR) + LINUXSRC=$(LINUX) +endif + +ifeq ($M,) + BNXT_SRC=$(CURDIR) +else + BNXT_SRC=$(M) +endif +BNXT_SRC=$(srctree)/drivers/net/ethernet/broadcom/bnxt + +ifeq ($(shell ls $(LINUXSRC)/include/uapi/linux > /dev/null 2>&1 && echo uapi),) + UAPI= +else + UAPI=uapi +endif + +ifeq ($(BCMMODDIR),) + ifeq ($(shell ls /lib/modules/$(KVER)/updates > /dev/null 2>&1 && echo 1),1) + BCMMODDIR=/lib/modules/$(KVER)/updates + else + ifeq ($(shell grep -q "search.*[[:space:]]updates" /etc/depmod.conf > /dev/null 2>&1 && echo 1),1) + BCMMODDIR=/lib/modules/$(KVER)/updates + else + ifeq ($(shell grep -q "search.*[[:space:]]updates" /etc/depmod.d/* > /dev/null 2>&1 && echo 1),1) + BCMMODDIR=/lib/modules/$(KVER)/updates + else + ifeq ($(shell expr $(KVER_MAJ) \>= 3), 1) + BCMMODDIR=/lib/modules/$(KVER)/updates + else + BCMMODDIR=/lib/modules/$(KVER)/kernel/drivers/net + endif + endif + endif + endif +endif + +ifneq ($(shell grep -o "pci_enable_msix_range" $(LINUXSRC)/include/linux/pci.h),) + DISTRO_CFLAG = -DHAVE_MSIX_RANGE +else + DISTRO_CFLAG = +endif + +ifneq ($(shell grep -so "GENL_ID_GENERATE" $(LINUXSRC)/include/uapi/linux/genetlink.h),) + DISTRO_CFLAG += -DHAVE_GENL_ID_GENERATE +endif + +ifneq ($(shell grep -w "inet_pernet_hashinfo_alloc" $(LINUXSRC)/include/net/inet_hashtables.h),) + DISTRO_CFLAG += -DHAVE_PERNET_HASH +endif + +ifneq ($(shell grep "genl_register_family_with_ops" $(LINUXSRC)/include/net/genetlink.h),) + DISTRO_CFLAG += -DHAVE_GENL_REG_FAMILY_WITH_OPS +endif + +ifneq ($(shell grep -A 8 "genl_family {" $(LINUXSRC)/include/net/genetlink.h | grep -o "struct nla_policy"),) + DISTRO_CFLAG += -DHAVE_GENL_POLICY +endif + +ifneq ($(shell grep "hlist_for_each_entry_safe" $(LINUXSRC)/include/linux/list.h | grep "tpos" > /dev/null 2>&1 && echo tpos),) + DISTRO_CFLAG += -DHAVE_OLD_HLIST +endif + +ifneq ($(shell grep -o "csum_level" $(LINUXSRC)/include/linux/skbuff.h),) + DISTRO_CFLAG += -DHAVE_CSUM_LEVEL +endif + +ifneq ($(shell grep -o "build_skb" $(LINUXSRC)/include/linux/skbuff.h),) + DISTRO_CFLAG += -DHAVE_BUILD_SKB + ifneq ($(shell grep "build_skb" $(LINUXSRC)/include/linux/skbuff.h | grep "int frag_size" > /dev/null 2>&1 && echo frag_size),) + DISTRO_CFLAG += -DHAVE_NEW_BUILD_SKB + endif + ifneq ($(shell grep -o "napi_alloc_frag" $(LINUXSRC)/include/linux/skbuff.h),) + DISTRO_CFLAG += -DHAVE_NAPI_ALLOC_FRAG + endif + ifneq ($(shell grep -o "skb_free_frag" $(LINUXSRC)/include/linux/skbuff.h),) + DISTRO_CFLAG += -DHAVE_SKB_FREE_FRAG + endif + ifneq ($(shell grep -o "^struct sk_buff \*napi_build_skb" $(LINUXSRC)/include/linux/skbuff.h),) + DISTRO_CFLAG += -DHAVE_NAPI_BUILD_SKB + endif +endif + +ifneq ($(shell grep -o "inner_network_offset" $(LINUXSRC)/include/linux/skbuff.h),) + DISTRO_CFLAG += -DHAVE_INNER_NETWORK_OFFSET + ifneq ($(shell grep -o "inner_eth_hdr" $(LINUXSRC)/include/linux/if_ether.h),) + DISTRO_CFLAG += -DHAVE_INNER_ETH_HDR + endif +endif + +ifeq ($(shell grep -o "skb_frag_size" $(LINUXSRC)/include/linux/skbuff.h),) + DISTRO_CFLAG += -DNO_SKB_FRAG_SIZE +endif + +ifneq ($(shell grep -so "flow_keys" $(LINUXSRC)/include/net/flow_keys.h),) + DISTRO_CFLAG += -DHAVE_FLOW_KEYS +endif + +ifneq ($(shell grep -o "PKT_HASH_TYPE" $(LINUXSRC)/include/linux/skbuff.h),) + DISTRO_CFLAG += -DHAVE_SKB_HASH_TYPE +endif + +ifneq ($(shell grep -o "SKB_GSO_UDP_TUNNEL_CSUM" $(LINUXSRC)/include/linux/skbuff.h),) + DISTRO_CFLAG += -DHAVE_SKB_GSO_UDP_TUNNEL_CSUM +else +ifneq ($(shell grep -o "SKB_GSO_UDP_TUNNEL" $(LINUXSRC)/include/linux/skbuff.h),) + DISTRO_CFLAG += -DHAVE_SKB_GSO_UDP_TUNNEL +endif +endif + +ifneq ($(shell grep -so "define NAPI_GRO_CB" $(LINUXSRC)/include/net/gro.h),) + DISTRO_CFLAG += -DHAVE_GRO_H +endif + +ifneq ($(shell grep -so "define _LINUX_NET_QUEUES_H" $(LINUXSRC)/include/net/netdev_queues.h),) + DISTRO_CFLAG += -DHAVE_NETDEV_QUEUES_H +endif + +ifneq ($(shell grep -o "skb_frag_page" $(LINUXSRC)/include/linux/skbuff.h),) + DISTRO_CFLAG += -DHAVE_SKB_FRAG_PAGE +endif + +ifneq ($(shell grep -o "skb_frag_off_add" $(LINUXSRC)/include/linux/skbuff.h),) + DISTRO_CFLAG += -DHAVE_SKB_FRAG_ACCESSORS +endif + +ifneq ($(shell grep -o "typedef struct bio_vec skb_frag_t" $(LINUXSRC)/include/linux/skbuff.h),) + DISTRO_CFLAG += -DSKB_FRAG_USES_BIO + ifneq ($(shell grep -o "skb_frag_fill_page_desc" $(LINUXSRC)/include/linux/skbuff.h),) + DISTRO_CFLAG += -DHAVE_SKB_FRAG_FILL_PAGE_DESC + endif +endif + +ifneq ($(shell grep "skb_checksum_none_assert" $(LINUXSRC)/include/linux/skbuff.h > /dev/null 2>&1 && echo skb_cs_none_assert),) + DISTRO_CFLAG += -DHAVE_SKB_CHECKSUM_NONE_ASSERT +endif + +ifneq ($(shell grep -o "xmit_more" $(LINUXSRC)/include/linux/skbuff.h),) + DISTRO_CFLAG += -DHAVE_SKB_XMIT_MORE +endif + +ifneq ($(shell grep -so "skb_mark_for_recycle" $(LINUXSRC)/include/linux/skbuff.h),) + DISTRO_CFLAG += -DHAVE_SKB_MARK_RECYCLE + ifneq ($(shell grep "skb_mark_for_recycle" $(LINUXSRC)/include/linux/skbuff.h | grep "struct page"),) + DISTRO_CFLAG += -DHAVE_OLD_SKB_MARK_RECYCLE + endif +endif + +ifneq ($(shell grep -so "min_tx_rate" $(LINUXSRC)/include/$(UAPI)/linux/if_link.h),) + DISTRO_CFLAG += -DHAVE_IFLA_TX_RATE +endif + +ifneq ($(shell grep -so "IFLA_XDP_PROG_ID" $(LINUXSRC)/include/$(UAPI)/linux/if_link.h),) + DISTRO_CFLAG += -DHAVE_IFLA_XDP_PROG_ID +endif + +ifneq ($(shell grep -o "dma_set_mask_and_coherent" $(LINUXSRC)/include/linux/dma-mapping.h),) + DISTRO_CFLAG += -DHAVE_SET_MASK_AND_COHERENT +endif + +ifneq ($(shell grep -o "dma_set_coherent_mask" $(LINUXSRC)/include/linux/dma-mapping.h),) + DISTRO_CFLAG += -DHAVE_SET_COHERENT_MASK +endif + +ifneq ($(shell grep -w "vmap" $(LINUXSRC)/include/linux/dma-buf.h | grep "struct dma_buf_map"),) + DISTRO_CFLAG += -DHAVE_DMABUF_NEW_VMAP +endif + +ifneq ($(shell grep -w "vmap" $(LINUXSRC)/include/linux/dma-buf.h | grep "struct iosys_map"),) + DISTRO_CFLAG += -DHAVE_IOSYS_VMAP +endif + +ifneq ($(shell ls $(LINUXSRC)/include/linux/dma-attrs.h > /dev/null 2>&1 && echo dma_attrs),) + DISTRO_CFLAG += -DHAVE_DMA_ATTRS_H +endif + +ifneq ($(shell grep -o "dma_map_page_attrs" $(LINUXSRC)/include/linux/dma-mapping.h),) + DISTRO_CFLAG += -DHAVE_DMA_MAP_PAGE_ATTRS +else + ifneq ($(shell grep -so "dma_map_page_attrs" $(LINUXSRC)/include/asm-generic/dma-mapping-common.h),) + DISTRO_CFLAG += -DHAVE_DMA_MAP_PAGE_ATTRS + endif +endif + +ifneq ($(shell grep -o "dma_zalloc_coherent" $(LINUXSRC)/include/linux/dma-mapping.h),) + DISTRO_CFLAG += -DHAVE_DMA_ZALLOC_COHERENT +endif + +ifneq ($(shell grep -o "ndo_udp_tunnel_add" $(LINUXSRC)/include/linux/netdevice.h),) + DISTRO_CFLAG += -DHAVE_NDO_UDP_TUNNEL + ifneq ($(shell grep -A 24 "net_device_ops_extended" $(LINUXSRC)/include/linux/netdevice.h | grep -o "ndo_udp_tunnel_add"),) + DISTRO_CFLAG += -DHAVE_NDO_UDP_TUNNEL_RH + endif +else + ifneq ($(shell grep -o "ndo_add_vxlan_port" $(LINUXSRC)/include/linux/netdevice.h),) + DISTRO_CFLAG += -DHAVE_NDO_ADD_VXLAN + ifneq ($(shell grep -so "vxlan_get_rx_port" $(LINUXSRC)/include/net/vxlan.h),) + DISTRO_CFLAG += -DHAVE_VXLAN_GET_RX_PORT + endif + endif +endif + +ifneq ($(shell grep -o "struct dev_addr_list" $(LINUXSRC)/include/linux/netdevice.h),) + DISTRO_CFLAG += -DHAVE_DEV_ADDR_LIST +endif + +ifneq ($(shell grep -o "ipv6_hopopt_jumbo_remove" $(LINUXSRC)/include/net/ipv6.h),) + DISTRO_CFLAG += -DHAVE_IPV6_HOPOPT_JUMBO_REMOVE +endif + +ifneq ($(shell grep "netif_set_real_num_tx" $(LINUXSRC)/include/linux/netdevice.h > /dev/null 2>&1 && echo real_tx),) + DISTRO_CFLAG += -DHAVE_NETIF_SET_REAL_NUM_TX +else + DISTRO_CFLAG += -DVOID_NETIF_SET_NUM_TX +endif + +ifneq ($(shell grep "netif_set_real_num_tx" $(LINUXSRC)/include/linux/netdevice.h | grep void > /dev/null 2>&1 && echo netif_set_real),) + DISTRO_CFLAG += -DVOID_NETIF_SET_NUM_TX +endif + +ifneq ($(shell grep -o "netdev_tx_sent_queue" $(LINUXSRC)/include/linux/netdevice.h),) + DISTRO_CFLAG += -DHAVE_NETDEV_TX_QUEUE_CTRL +endif + +ifneq ($(shell ls $(LINUXSRC)/include/net/flow_dissector.h > /dev/null 2>&1 && echo flow),) + DISTRO_CFLAG += -DHAVE_FLOW_DISSECTOR + ifneq ($(shell grep -so "static inline bool skb_flow_dissect_flow_keys" $(LINUXSRC)/include/linux/skbuff.h),) + ifneq ($(shell grep -A 2 "static inline bool skb_flow_dissect_flow_keys" $(LINUXSRC)/include/linux/skbuff.h | grep -o "unsigned int flags"),) + DISTRO_CFLAG += -DHAVE_SKB_FLOW_DISSECT_WITH_FLAGS + endif + ifneq ($(shell grep -o "FLOW_DIS_ENCAPSULATION" $(LINUXSRC)/include/net/flow_dissector.h),) + DISTRO_CFLAG += -DHAVE_FLOW_KEY_CONTROL_FLAGS + endif + endif +endif + +ifneq ($(shell ls $(LINUXSRC)/include/net/flow_offload.h > /dev/null 2>&1 && echo flow_offload),) + DISTRO_CFLAG += -DHAVE_FLOW_OFFLOAD_H + ifneq ($(shell grep -so "struct flow_cls_offload" $(LINUXSRC)/include/net/flow_offload.h),) + DISTRO_CFLAG += -DHAVE_TC_FLOW_CLS_OFFLOAD + endif + ifneq ($(shell grep -o "flow_block_cb_setup_simple" $(LINUXSRC)/include/net/flow_offload.h),) + DISTRO_CFLAG += -DHAVE_SETUP_TC_BLOCK_HELPER + endif + ifneq ($(shell grep -o "__flow_indr_block_cb_register" $(LINUXSRC)/include/net/flow_offload.h || \ + grep -o "flow_indr_block_bind_cb_t" $(LINUXSRC)/include/net/flow_offload.h),) + DISTRO_CFLAG += -DHAVE_FLOW_INDR_BLOCK_CB + ifneq ($(shell grep -A 1 "void flow_indr_dev_unregister" $(LINUXSRC)/include/net/flow_offload.h | grep -o "flow_setup_cb_t \*setup_cb"),) + DISTRO_CFLAG += -DHAVE_OLD_FLOW_INDR_DEV_UNRGTR + endif + ifneq ($(shell grep -A 5 "flow_indr_block_cb_alloc" $(LINUXSRC)/include/net/flow_offload.h | grep -o "Qdisc \*sch"),) + DISTRO_CFLAG += -DHAVE_FLOW_INDR_BLOCK_CB_QDISC + endif + ifneq ($(shell grep -A 6 "struct vxlan_rdst {" $(LINUXSRC)/include/net/vxlan.h | grep -o "\*remote_dev"),) + DISTRO_CFLAG += -DHAVE_VXLAN_RDST_RDEV + endif + endif + ifneq ($(shell grep -o "FLOW_ACTION_POLICE" $(LINUXSRC)/include/net/flow_offload.h),) + DISTRO_CFLAG += -DHAVE_FLOW_ACTION_POLICE + endif + ifneq ($(shell grep -o "flow_action_basic_hw_stats_check" $(LINUXSRC)/include/net/flow_offload.h),) + DISTRO_CFLAG += -DHAVE_FLOW_ACTION_BASIC_HW_STATS_CHECK + endif + ifneq ($(shell grep -o "flow_indr_dev_register" $(LINUXSRC)/include/net/flow_offload.h),) + DISTRO_CFLAG += -DHAVE_FLOW_INDR_DEV_RGTR + endif + ifneq ($(shell grep -A 2 "flow_stats_update" $(LINUXSRC)/include/net/flow_offload.h | grep -o drops),) + DISTRO_CFLAG += -DHAVE_FLOW_STATS_DROPS + endif + ifneq ($(shell grep -A 3 "flow_indr_block_bind_cb_t" $(LINUXSRC)/include/net/flow_offload.h | grep -o cleanup),) + DISTRO_CFLAG += -DHAVE_FLOW_INDR_BLOCK_CLEANUP + endif + ifneq ($(shell grep -o "cb_list_head" $(LINUXSRC)/include/net/flow_offload.h),) + DISTRO_CFLAG += -DHAVE_FLOW_INDIR_BLK_PROTECTION + endif + ifneq ($(shell grep -o "FLOW_ACTION_MIRRED_INGRESS" $(LINUXSRC)/include/net/flow_offload.h),) + DISTRO_CFLAG += -DHAVE_FLOW_ACTION_MIRRED_INGRESS + endif + ifneq ($(shell grep -o "flow_stats_update" $(LINUXSRC)/include/net/flow_offload.h),) + DISTRO_CFLAG += -DHAVE_FLOW_STATS_UPDATE + endif +endif + +ifneq ($(shell grep -o "bitmap_zalloc" $(LINUXSRC)/include/linux/bitmap.h),) + DISTRO_CFLAG += -DHAVE_BITMAP_ZALLOC +endif + +ifneq ($(shell ls $(LINUXSRC)/include/net/udp_tunnel.h > /dev/null 2>&1 && echo udp_tunnel),) + DISTRO_CFLAG += -DHAVE_UDP_TUNNEL_H +endif + +ifneq ($(shell grep -o "ether_addr_equal" $(LINUXSRC)/include/linux/etherdevice.h),) + DISTRO_CFLAG += -DHAVE_ETHER_ADDR_EQUAL +endif + +ifneq ($(shell grep -o "ether_addr_copy" $(LINUXSRC)/include/linux/etherdevice.h),) + DISTRO_CFLAG += -DHAVE_ETHER_ADDR_COPY +endif + +ifneq ($(shell grep -o "eth_broadcast_addr" $(LINUXSRC)/include/linux/etherdevice.h),) + DISTRO_CFLAG += -DHAVE_ETH_BROADCAST_ADDR +endif + +ifneq ($(shell grep -o "eth_get_headlen" $(LINUXSRC)/include/linux/etherdevice.h),) + DISTRO_CFLAG += -DHAVE_ETH_GET_HEADLEN +endif + +ifneq ($(shell grep -o "eth_hw_addr_random" $(LINUXSRC)/include/linux/etherdevice.h),) + DISTRO_CFLAG += -DHAVE_ETH_HW_ADDR_RANDOM +endif + +ifneq ($(shell grep -s "eth_get_headlen" $(LINUXSRC)/include/linux/etherdevice.h | grep -o "struct net_device"),) + DISTRO_CFLAG += -DHAVE_ETH_GET_HEADLEN_NEW +endif + +ifneq ($(shell grep -o "eth_hw_addr_set" $(LINUXSRC)/include/linux/etherdevice.h),) + DISTRO_CFLAG += -DHAVE_ETH_HW_ADDR_SET +endif + +ifneq ($(shell grep -A 2 "get_ringparam" $(LINUXSRC)/include/linux/ethtool.h | sed '/UEK_KABI_USE/,+2 d' | grep -o "struct kernel_ethtool_ringparam"),) + DISTRO_CFLAG += -DHAVE_ETHTOOL_GET_RING_EXT + ifneq ($(shell grep -so "ETHTOOL_TCP_DATA_SPLIT_ENABLED" $(LINUXSRC)/include/uapi/linux/ethtool_netlink.h),) + DISTRO_CFLAG += -DHAVE_ETHTOOL_TCP_DATA_SPLIT + endif +endif + +ifneq ($(shell grep -o "get_rxnfc" $(LINUXSRC)/include/linux/ethtool.h),) + DISTRO_CFLAG += -DHAVE_RXNFC + ifneq ($(shell grep -A 2 "get_rxnfc" $(LINUXSRC)/include/linux/ethtool.h | grep -o "void"),) + DISTRO_CFLAG += -DHAVE_RXNFC_VOID + endif +endif + +ifneq ($(shell grep -o "get_rxfh_key_size" $(LINUXSRC)/include/linux/ethtool.h),) + ifneq ($(shell grep -o "ETH_RSS_HASH_TOP" $(LINUXSRC)/include/linux/ethtool.h),) + DISTRO_CFLAG += -DHAVE_GET_RXFH_KEY_SIZE + endif +endif + +ifneq ($(shell grep -o "(\*set_rxfh)" $(LINUXSRC)/include/linux/ethtool.h),) + DISTRO_CFLAG += -DHAVE_SET_RXFH +endif + +ifneq ($(shell grep -o "get_rxfh_indir_size" $(LINUXSRC)/include/linux/ethtool.h),) + DISTRO_CFLAG += -DHAVE_RXFH_INDIR_SIZE +endif + +ifneq ($(shell grep -o "set_phys_id" $(LINUXSRC)/include/linux/ethtool.h),) + DISTRO_CFLAG += -DHAVE_SET_PHYS_ID +endif + +ifneq ($(shell grep -A 2 "get_coalesce" $(LINUXSRC)/include/linux/ethtool.h | \ + grep -o "kernel_ethtool_coalesce"),) + DISTRO_CFLAG += -DHAVE_CQE_ETHTOOL_COALESCE +endif + +ifneq ($(shell grep -so "ethtool_tcpip6_spec" $(LINUXSRC)/include/$(UAPI)/linux/ethtool.h),) + DISTRO_CFLAG += -DHAVE_ETHTOOL_IP6_SPEC +endif + +ifneq ($(shell grep -so "linkmode_set_bit" $(LINUXSRC)/include/linux/linkmode.h),) + DISTRO_CFLAG += -DHAVE_LINKMODE +endif + +ifeq ($(shell grep -o "rx_cpu_rmap" $(LINUXSRC)/include/linux/netdevice.h),) + DISTRO_CFLAG += -DNO_NETDEV_CPU_RMAP +else + ifneq ($(shell grep -o "irq_run_affinity_notifiers" $(LINUXSRC)/include/linux/interrupt.h),) + DISTRO_CFLAG += -DNO_NETDEV_CPU_RMAP + endif +endif + +ifneq ($(shell grep -o "hw_features" $(LINUXSRC)/include/linux/netdevice.h),) + ifeq ($(shell grep -o "get_netdev_hw_features" $(LINUXSRC)/include/linux/netdevice.h),) + DISTRO_CFLAG += -DNETDEV_HW_FEATURES + endif +endif + +ifneq ($(shell grep -o "netdev_notifier_info_to_dev" $(LINUXSRC)/include/linux/netdevice.h),) + DISTRO_CFLAG += -DHAVE_NETDEV_NOTIFIER_INFO_TO_DEV +endif + +ifneq ($(shell grep "register_netdevice_notifier_rh" $(LINUXSRC)/include/linux/netdevice.h),) + DISTRO_CFLAG += -DHAVE_REGISTER_NETDEVICE_NOTIFIER_RH +endif + +ifneq ($(shell grep -o "hw_enc_features" $(LINUXSRC)/include/linux/netdevice.h),) + DISTRO_CFLAG += -DNETDEV_HW_ENC_FEATURES +endif + +ifneq ($(shell grep -o "sriov_configure" $(LINUXSRC)/include/linux/pci.h),) + DISTRO_CFLAG += -DPCIE_SRIOV_CONFIGURE + ifneq ($(shell grep -A 2 "pci_driver_rh" $(LINUXSRC)/include/linux/pci.h | \ + grep -o "sriov_configure"),) + DISTRO_CFLAG += -DSRIOV_CONF_DEF_IN_PCI_DRIVER_RH + endif +endif + +ifneq ($(shell grep -o "pci_vfs_assigned" $(LINUXSRC)/include/linux/pci.h),) + DISTRO_CFLAG += -DHAVE_PCI_VFS_ASSIGNED +endif + +ifneq ($(shell grep -o "pci_num_vf" $(LINUXSRC)/include/linux/pci.h),) + DISTRO_CFLAG += -DHAVE_PCI_NUM_VF +endif + +ifneq ($(shell grep -o "ndo_fix_features" $(LINUXSRC)/include/linux/netdevice.h),) + ifeq ($(shell grep -o "net_device_ops_ext" $(LINUXSRC)/include/linux/netdevice.h),) + DISTRO_CFLAG += -DNETDEV_FEATURE_CONTROL + endif + ifneq ($(shell grep -o "net_device_ops_extended" $(LINUXSRC)/include/linux/netdevice.h),) + DISTRO_CFLAG += -DNETDEV_FEATURE_CONTROL + endif +endif + +ifneq ($(shell grep -o "ndo_features_check" $(LINUXSRC)/include/linux/netdevice.h),) + DISTRO_CFLAG += -DHAVE_NDO_FEATURES_CHECK +endif + +ifneq ($(shell grep -o "ndo_rx_flow_steer" $(LINUXSRC)/include/linux/netdevice.h),) + ifeq ($(shell grep -o "netdev_rfs_info" $(LINUXSRC)/include/linux/netdevice.h),) + DISTRO_CFLAG += -DNETDEV_RX_FLOW_STEER + endif +endif + +ifneq ($(shell grep -o "ndo_busy_poll" $(LINUXSRC)/include/linux/netdevice.h),) + ifeq ($(shell grep -o "net_device_extended" $(LINUXSRC)/include/linux/netdevice.h),) + DISTRO_CFLAG += -DNETDEV_BUSY_POLL + endif +endif + +ifneq ($(shell grep -o "ndo_get_stats64" $(LINUXSRC)/include/linux/netdevice.h),) + ifeq ($(shell grep -o "net_device_ops_ext" $(LINUXSRC)/include/linux/netdevice.h),) + DISTRO_CFLAG += -DNETDEV_GET_STATS64 + endif + ifneq ($(shell grep -o "net_device_ops_extended" $(LINUXSRC)/include/linux/netdevice.h),) + DISTRO_CFLAG += -DNETDEV_GET_STATS64 + endif + ifneq ($(shell grep "ndo_get_stats64" $(LINUXSRC)/include/linux/netdevice.h | grep -o "void"),) + DISTRO_CFLAG += -DNETDEV_GET_STATS64_VOID + endif +endif + +ifneq ($(shell grep -o "ndo_get_vf_config" $(LINUXSRC)/include/linux/netdevice.h),) + DISTRO_CFLAG += -DHAVE_NDO_GET_VF_CONFIG +endif + +ifneq ($(shell grep -A 2 "ndo_bridge_getlink" $(LINUXSRC)/include/linux/netdevice.h | grep -o "nlflags"),) + ifneq ($(shell grep -A 3 "ndo_dflt_bridge_getlink" $(LINUXSRC)/include/linux/rtnetlink.h | grep -o "filter_mask"),) + DISTRO_CFLAG += -DHAVE_NDO_BRIDGE_GETLINK + endif +endif + +ifneq ($(shell grep -A 4 "ndo_bridge_setlink" $(LINUXSRC)/include/linux/netdevice.h | grep -o "netlink_ext_ack"),) + DISTRO_CFLAG += -DHAVE_NDO_BRIDGE_SETLINK_EXTACK +endif + +ifneq ($(shell grep -o "ndo_set_vf_link_state" $(LINUXSRC)/include/linux/netdevice.h),) + ifeq ($(shell grep -o "net_device_ops_ext" $(LINUXSRC)/include/linux/netdevice.h),) + DISTRO_CFLAG += -DHAVE_NDO_SET_VF_LINK_STATE + endif + ifneq ($(shell grep -o "net_device_ops_extended" $(LINUXSRC)/include/linux/netdevice.h),) + DISTRO_CFLAG += -DHAVE_NDO_SET_VF_LINK_STATE + endif +endif + +ifneq ($(shell grep -o "ndo_set_vf_spoofchk" $(LINUXSRC)/include/linux/netdevice.h),) + ifeq ($(shell grep -o "net_device_ops_ext" $(LINUXSRC)/include/linux/netdevice.h),) + DISTRO_CFLAG += -DHAVE_VF_SPOOFCHK + endif + ifneq ($(shell grep -o "net_device_ops_extended" $(LINUXSRC)/include/linux/netdevice.h),) + DISTRO_CFLAG += -DHAVE_VF_SPOOFCHK + endif +endif + +ifneq ($(shell grep -A 1 "ndo_set_vf_vlan" $(LINUXSRC)/include/linux/netdevice.h | grep -o "proto"),) + ifeq ($(shell grep -o "RH_KABI_EXTEND(struct net_device_ops_extended extended)" $(LINUXSRC)/include/linux/netdevice.h),) + DISTRO_CFLAG += -DNEW_NDO_SET_VF_VLAN + endif +endif + +ifneq ($(shell grep -o "ndo_set_vf_vlan_rh73" $(LINUXSRC)/include/linux/netdevice.h),) + DISTRO_CFLAG += -DHAVE_NDO_SET_VF_VLAN_RH73 +endif + +ifneq ($(shell grep -o "ndo_set_vf_trust" $(LINUXSRC)/include/linux/netdevice.h),) + DISTRO_CFLAG += -DHAVE_NDO_SET_VF_TRUST + ifneq ($(shell grep -A 3 "net_device_ops_extended" $(LINUXSRC)/include/linux/netdevice.h | grep -o "ndo_set_vf_trust"),) + DISTRO_CFLAG += -DHAVE_NDO_SET_VF_TRUST_RH + endif +endif + +ifneq ($(shell grep -o "ndo_set_vf_queues" $(LINUXSRC)/include/linux/netdevice.h),) + ifeq ($(shell grep -o "net_device_ops_ext" $(LINUXSRC)/include/linux/netdevice.h),) + DISTRO_CFLAG += -DHAVE_NDO_SET_VF_QUEUES + endif +endif + +ifneq ($(shell grep -o "ndo_change_mtu_rh74" $(LINUXSRC)/include/linux/netdevice.h),) + DISTRO_CFLAG += -DHAVE_NDO_CHANGE_MTU_RH74 +endif + +ifneq ($(shell grep -o "RH_KABI_USE_P(16, struct net_device_extended \*extended)" $(LINUXSRC)/include/linux/netdevice.h),) + DISTRO_CFLAG += -DHAVE_NET_DEVICE_EXT +endif + +ifneq ($(shell grep -o "RH_KABI_EXTEND(struct net_device_ops_extended extended)" $(LINUXSRC)/include/linux/netdevice.h),) + ifneq ($(shell grep -o "ndo_get_phys_port_name" $(LINUXSRC)/include/linux/netdevice.h),) + DISTRO_CFLAG += -DHAVE_EXT_GET_PHYS_PORT_NAME + endif +endif + +ifneq ($(shell grep -o "ndo_setup_tc_rh72" $(LINUXSRC)/include/linux/netdevice.h),) + DISTRO_CFLAG += -DHAVE_NDO_SETUP_TC_RH72 +endif + +ifneq ($(shell grep -o "(\*ndo_setup_tc_rh)" $(LINUXSRC)/include/linux/netdevice.h),) + DISTRO_CFLAG += -DHAVE_NDO_SETUP_TC_RH +endif + +ifneq ($(shell grep -o "tc_setup_cb_egdev_call" $(LINUXSRC)/include/net/act_api.h),) + DISTRO_CFLAG += -DHAVE_TC_CB_EGDEV +endif + +ifneq ($(shell grep -o "ndo_setup_tc" $(LINUXSRC)/include/linux/netdevice.h),) + DISTRO_CFLAG += -DHAVE_SETUP_TC + ifneq ($(shell grep -o "struct tc_etf_qopt_offload" $(LINUXSRC)/include/net/pkt_sched.h),) + ifneq ($(shell grep -o "skb_txtime_consumed" $(LINUXSRC)/include/net/pkt_sched.h),) + DISTRO_CFLAG += -DHAVE_ETF_QOPT_OFFLOAD + endif + endif + ifneq ($(shell grep -o "struct tc_to_netdev" $(LINUXSRC)/include/linux/netdevice.h),) + DISTRO_CFLAG += -DHAVE_TC_TO_NETDEV + ifneq ($(shell grep -o "struct tc_mqprio_qopt" $(LINUXSRC)/include/linux/netdevice.h),) + DISTRO_CFLAG += -DHAVE_MQPRIO_QOPT + endif + ifneq ($(shell grep -A 1 "ndo_setup_tc" $(LINUXSRC)/include/linux/netdevice.h | grep -o "u32 chain_index"),) + DISTRO_CFLAG += -DHAVE_CHAIN_INDEX + endif + endif + ifneq ($(shell grep -o "enum tc_setup_type" $(LINUXSRC)/include/linux/netdevice.h),) + DISTRO_CFLAG += -DHAVE_TC_SETUP_TYPE + endif + ifneq ($(shell grep -so "tc_cls_flower_offload" $(LINUXSRC)/include/net/pkt_cls.h),) + DISTRO_CFLAG += -DHAVE_TC_CLS_FLOWER_OFFLOAD + endif + ifneq ($(shell grep -o "TC_SETUP_BLOCK" $(LINUXSRC)/include/linux/netdevice.h),) + DISTRO_CFLAG += -DHAVE_TC_SETUP_BLOCK + endif + ifneq ($(shell grep -o "TC_SETUP_QDISC_MQPRIO" $(LINUXSRC)/include/linux/netdevice.h),) + DISTRO_CFLAG += -DHAVE_TC_SETUP_QDISC_MQPRIO + endif + ifneq ($(shell grep -so "tcf_mirred_dev" $(LINUXSRC)/include/net/tc_act/tc_mirred.h),) + DISTRO_CFLAG += -DHAVE_TCF_MIRRED_DEV + endif + ifneq ($(shell grep -so "tc_cls_can_offload_and_chain0" $(LINUXSRC)/include/net/pkt_cls.h),) + DISTRO_CFLAG += -DHAVE_TC_CLS_CAN_OFFLOAD_AND_CHAIN0 + endif + ifneq ($(shell grep -s -A 2 "tcf_block_cb_register" $(LINUXSRC)/include/net/pkt_cls.h | grep -o "netlink_ext_ack"),) + DISTRO_CFLAG += -DHAVE_TC_CB_REG_EXTACK + endif + ifneq ($(shell grep -so "tcf_exts_for_each_action" $(LINUXSRC)/include/net/pkt_cls.h),) + DISTRO_CFLAG += -DHAVE_TC_EXTS_FOR_ACTION + endif + ifneq ($(shell grep -s -A 3 "struct tc_cls_matchall_offload" $(LINUXSRC)/include/net/pkt_cls.h | grep -o "flow_rule"),) + DISTRO_CFLAG += -DHAVE_TC_MATCHALL_FLOW_RULE + endif +endif + +ifneq ($(shell grep -so "FLOW_DISSECTOR_KEY_ICMP" $(LINUXSRC)/include/net/flow_dissector.h),) + DISTRO_CFLAG += -DHAVE_FLOW_DISSECTOR_KEY_ICMP +endif + +ifneq ($(shell grep -so "FLOW_DISSECTOR_KEY_ENC_IP," $(LINUXSRC)/include/net/flow_dissector.h),) + DISTRO_CFLAG += -DHAVE_FLOW_DISSECTOR_KEY_ENC_IP +endif + +ifneq ($(shell grep -s -A 9 "struct flow_dissector_key_vlan" $(LINUXSRC)/include/net/flow_dissector.h | grep -o "vlan_tpid"),) + DISTRO_CFLAG += -DHAVE_FLOW_DISSECTOR_KEY_VLAN_TPID +endif + +ifneq ($(shell grep -so "rhashtable" $(LINUXSRC)/include/linux/rhashtable.h),) + DISTRO_CFLAG += -DHAVE_RHASHTABLE +endif + +ifneq ($(shell grep -so "tcf_exts_to_list" $(LINUXSRC)/include/net/pkt_cls.h),) + DISTRO_CFLAG += -DHAVE_TCF_EXTS_TO_LIST +endif + +ifneq ($(shell grep -so "tcf_exts_stats_update" $(LINUXSRC)/include/net/pkt_cls.h),) + DISTRO_CFLAG += -DHAVE_TCF_STATS_UPDATE +endif + +ifneq ($(shell grep -so "tcf_exts_has_actions" $(LINUXSRC)/include/net/pkt_cls.h),) + DISTRO_CFLAG += -DHAVE_TCF_EXTS_HAS_ACTIONS +endif + +ifneq ($(shell grep -so "is_tcf_tunnel_set" $(LINUXSRC)/include/net/tc_act/tc_tunnel_key.h),) + DISTRO_CFLAG += -DHAVE_TCF_TUNNEL +endif + +ifneq ($(shell grep -o "netdev_get_num_tc" $(LINUXSRC)/include/linux/netdevice.h),) + DISTRO_CFLAG += -DHAVE_GET_NUM_TC +endif + +ifneq ($(shell grep -so "__netif_txq_completed_wake" $(LINUXSRC)/include/net/netdev_queues.h),) + DISTRO_CFLAG += -DHAVE_NEW_QUEUE_STOPWAKE +endif + +ifneq ($(shell grep -so "__netif_txq_maybe_wake" $(LINUXSRC)/include/net/netdev_queues.h),) + DISTRO_CFLAG += -DHAVE_TXQ_MAYBE_WAKE +endif + +ifneq ($(shell grep -so "netdev_features_t" $(LINUXSRC)/include/linux/netdev_features.h || \ + grep -o "netdev_features_t" $(LINUXSRC)/include/linux/netdevice.h),) + DISTRO_CFLAG += -DHAVE_NETDEV_FEATURES_T +endif + +ifneq ($(shell grep -o "ndo_fix_features" $(LINUXSRC)/include/linux/netdevice.h),) + DISTRO_CFLAG += -DHAVE_NDO_FIX_FEATURES +endif + +ifneq ($(shell grep -o "netif_set_real_num_rx" $(LINUXSRC)/include/linux/netdevice.h),) + DISTRO_CFLAG += -DHAVE_NETIF_SET_REAL_NUM_RX +endif + +ifneq ($(shell grep -o "netif_get_num_default_rss_queues" $(LINUXSRC)/include/linux/netdevice.h),) + DISTRO_CFLAG += -DHAVE_NETIF_GET_DEFAULT_RSS +endif + +ifneq ($(shell grep -o "ndo_vlan_rx_register" $(LINUXSRC)/include/linux/netdevice.h),) + DISTRO_CFLAG += -DHAVE_VLAN_RX_REGISTER +endif + +ifneq ($(shell grep -o "ndo_get_port_parent_id" $(LINUXSRC)/include/linux/netdevice.h),) + DISTRO_CFLAG += -DHAVE_NDO_GET_PORT_PARENT_ID +endif + +ifneq ($(shell grep -o "net_device_ops_extended" $(LINUXSRC)/include/linux/netdevice.h),) + ifneq ($(shell grep -o "ndo_xdp_xmit" $(LINUXSRC)/include/linux/netdevice.h),) + DISTRO_CFLAG += -DHAVE_EXT_NDO_XDP_XMIT + endif +else ifneq ($(shell grep -o "ndo_xdp" $(LINUXSRC)/include/linux/netdevice.h),) + DISTRO_CFLAG += -DHAVE_NDO_XDP + ifneq ($(shell grep -o "ndo_bpf" $(LINUXSRC)/include/linux/netdevice.h),) + DISTRO_CFLAG += -DHAVE_NDO_BPF + endif + ifneq ($(shell ls $(LINUXSRC)/include/linux/bpf_trace.h > /dev/null 2>&1 && echo bpf_trace),) + DISTRO_CFLAG += -DHAVE_BPF_TRACE + endif + ifneq ($(shell grep -o "skb_metadata_set" $(LINUXSRC)/include/linux/skbuff.h),) + DISTRO_CFLAG += -DHAVE_XDP_DATA_META + endif + ifneq ($(shell grep -o "void bpf_prog_add" $(LINUXSRC)/include/linux/bpf.h),) + DISTRO_CFLAG += -DHAVE_VOID_BPF_PROG_ADD + endif + ifneq ($(shell grep "void bpf_warn_invalid_xdp_action" $(LINUXSRC)/include/linux/filter.h | grep -o "struct net_device"),) + DISTRO_CFLAG += -DHAVE_BPF_WARN_INVALID_XDP_ACTION_EXT + endif +endif + +ifneq ($(shell grep -so "xdp_frags_size" $(LINUXSRC)/include/linux/skbuff.h),) + DISTRO_CFLAG += -DHAVE_XDP_MULTI_BUFF +endif + +ifneq ($(shell grep -so "enum xdp_action" $(LINUXSRC)/include/uapi/linux/bpf.h),) + DISTRO_CFLAG += -DHAVE_XDP_ACTION +endif +ifneq ($(shell grep -so "XDP_REDIRECT" $(LINUXSRC)/include/uapi/linux/bpf.h),) + DISTRO_CFLAG += -DHAVE_XDP_REDIRECT +endif +ifneq ($(shell grep -so "struct xdp_frame" $(LINUXSRC)/include/net/xdp.h),) + DISTRO_CFLAG += -DHAVE_XDP_FRAME +endif +ifneq ($(shell grep -so "xdp_features_set_redirect_target" $(LINUXSRC)/include/net/xdp.h),) + DISTRO_CFLAG += -DHAVE_XDP_SET_REDIR_TARGET +endif +ifneq ($(shell grep -so "enum xdp_mem_type" $(LINUXSRC)/include/net/xdp.h),) + DISTRO_CFLAG += -DHAVE_XDP_MEM_TYPE +endif +ifneq ($(shell grep -so "xdp_get_shared_info_from_buff" $(LINUXSRC)/include/net/xdp.h),) + DISTRO_CFLAG += -DHAVE_XDP_SHARED_INFO_FROM_BUFF +endif +ifneq ($(shell grep -so "struct xdp_rxq_info" $(LINUXSRC)/include/net/xdp.h),) + DISTRO_CFLAG += -DHAVE_XDP_RXQ_INFO + ifneq ($(shell grep -A 1 "xdp_rxq_info_reg" $(LINUXSRC)/include/net/xdp.h | grep -o napi_id),) + DISTRO_CFLAG += -DHAVE_NEW_XDP_RXQ_INFO_REG + endif + ifneq ($(shell grep -o "xdp_rxq_info_is_reg" $(LINUXSRC)/include/net/xdp.h),) + DISTRO_CFLAG += -DHAVE_XDP_RXQ_INFO_IS_REG + endif +endif +ifneq ($(shell grep -so "xdp_init_buff" $(LINUXSRC)/include/net/xdp.h),) + DISTRO_CFLAG += -DHAVE_XDP_INIT_BUFF +endif +ifneq ($(shell grep -so "xdp_do_flush(void)" $(LINUXSRC)/include/linux/filter.h),) + DISTRO_CFLAG += -DHAVE_XDP_DO_FLUSH +endif + +ifneq ($(shell grep -so "xdp_data_hard_end" $(LINUXSRC)/include/net/xdp.h),) + DISTRO_CFLAG += -DHAVE_XDP_FRAME_SZ +endif + +ifeq ($(shell ls $(LINUXSRC)/include/net/page_pool/helpers.h > /dev/null 2>&1 && echo 1),1) + DISTRO_CFLAG += -DHAVE_PAGE_POOL_HELPERS_H + DISTRO_CFLAG += -DHAVE_PAGE_POOL_PAGE_FRAG + DISTRO_CFLAG += -DHAVE_PAGE_POOL_GET_DMA_ADDR + ifneq ($(shell grep -s "PP_FLAG_PAGE_FRAG" $(LINUXSRC)/include/net/page_pool/types.h),) + DISTRO_CFLAG += -DHAVE_PAGE_POOL_PP_FRAG_BIT + endif +endif + +ifeq ($(shell ls $(LINUXSRC)/include/net/page_pool.h > /dev/null 2>&1 && echo 1),1) + ifneq ($(shell grep -so "page_pool_release_page" $(LINUXSRC)/include/net/page_pool.h),) + DISTRO_CFLAG += -DHAVE_PAGE_POOL_RELEASE_PAGE + endif + ifneq ($(shell grep -so "page_pool_dev_alloc_frag" $(LINUXSRC)/include/net/page_pool.h),) + DISTRO_CFLAG += -DHAVE_PAGE_POOL_PAGE_FRAG + DISTRO_CFLAG += -DHAVE_PAGE_POOL_PP_FRAG_BIT + endif + ifneq ($(shell grep -so "page_pool_get_dma_addr" $(LINUXSRC)/include/net/page_pool.h),) + DISTRO_CFLAG += -DHAVE_PAGE_POOL_GET_DMA_ADDR + endif +endif + +ifneq ($(shell grep -o "netdev_name" $(LINUXSRC)/include/linux/netdevice.h),) + DISTRO_CFLAG += -DHAVE_NETDEV_NAME +endif + +ifneq ($(shell grep -o "netdev_update_features" $(LINUXSRC)/include/linux/netdevice.h),) + DISTRO_CFLAG += -DHAVE_NETDEV_UPDATE_FEATURES +endif + +ifneq ($(shell grep -o "napi_hash_add" $(LINUXSRC)/include/linux/netdevice.h),) + DISTRO_CFLAG += -DHAVE_NAPI_HASH_ADD +endif + +ifneq ($(shell grep -o "napi_hash_del" $(LINUXSRC)/include/linux/netdevice.h),) + DISTRO_CFLAG += -DHAVE_NAPI_HASH_DEL +endif + +ifneq ($(shell grep "napi_complete_done" $(LINUXSRC)/include/linux/netdevice.h | grep -o "bool"),) + DISTRO_CFLAG += -DHAVE_NEW_NAPI_COMPLETE_DONE +endif + +ifneq ($(shell grep -o "min_mtu" $(LINUXSRC)/include/linux/netdevice.h),) + DISTRO_CFLAG += -DHAVE_MIN_MTU +endif + +ifneq ($(shell grep -o "prog_attached" $(LINUXSRC)/include/linux/netdevice.h),) + DISTRO_CFLAG += -DHAVE_PROG_ATTACHED +endif + +ifneq ($(shell grep -o "netdev_xmit_more" $(LINUXSRC)/include/linux/netdevice.h),) + DISTRO_CFLAG += -DHAVE_NETDEV_XMIT_MORE +endif + +ifneq ($(shell grep -o "netif_xmit_stopped" $(LINUXSRC)/include/linux/netdevice.h),) + DISTRO_CFLAG += -DHAVE_NETIF_XMIT_STOPPED +endif + +ifneq ($(shell grep "tx_dropped" $(LINUXSRC)/include/linux/netdevice.h | grep -o atomic_long_t),) + DISTRO_CFLAG += -DHAVE_NETDEV_TX_DROPPED +endif + +ifneq ($(shell grep "rh_tx_dropped" $(LINUXSRC)/include/linux/netdevice.h | grep -o atomic_long_t),) + DISTRO_CFLAG += -DHAVE_NETDEV_RH_TX_DROPPED +endif + +ifneq ($(shell grep "tx_dropped" $(LINUXSRC)/include/linux/netdevice.h | grep -o local_t),) + DISTRO_CFLAG += -DHAVE_NETDEV_TX_DROPPED -DHAVE_NETDEV_TX_DROPPED_CORE_STATS +endif + +ifneq ($(shell grep -A 1 "ndo_tx_timeout" $(LINUXSRC)/include/linux/netdevice.h | grep -o txqueue),) + DISTRO_CFLAG += -DHAVE_NDO_TX_TIMEOUT_QUEUE +endif + +ifneq ($(shell grep -o "udp_tunnel_nic" $(LINUXSRC)/include/linux/netdevice.h),) + DISTRO_CFLAG += -DHAVE_UDP_TUNNEL_NIC +endif + +ifneq ($(shell grep -o "XDP_QUERY_PROG" $(LINUXSRC)/include/linux/netdevice.h),) + DISTRO_CFLAG += -DHAVE_XDP_QUERY_PROG +endif + +ifneq ($(shell grep -o "__netif_napi_del" $(LINUXSRC)/include/linux/netdevice.h),) + DISTRO_CFLAG += -DHAVE_NETIF_NAPI_DEL_NEW +endif + +OPEN_PARAN := ( +ifneq ($(shell grep -A 2 "netif_napi_add${OPEN_PARAN}" $(LINUXSRC)/include/linux/netdevice.h | grep -o "int weight"),) + DISTRO_CFLAG += -DHAVE_NETIF_NAPI_ADD_WITH_WEIGHT_ARG +endif + +ifneq ($(shell grep -o "ndo_eth_ioctl" $(LINUXSRC)/include/linux/netdevice.h),) + DISTRO_CFLAG += -DHAVE_NDO_ETH_IOCTL +endif + +ifneq ($(shell grep -o "netpoll_poll_dev" $(LINUXSRC)/include/linux/netpoll.h),) + DISTRO_CFLAG += -DHAVE_NETPOLL_POLL_DEV +endif + +ifneq ($(shell grep -o "prandom_bytes" $(LINUXSRC)/include/linux/random.h),) + DISTRO_CFLAG += -DHAVE_PRANDOM_BYTES +endif + +ifneq ($(shell grep -o "tcp_v6_check" $(LINUXSRC)/include/net/ip6_checksum.h),) + DISTRO_CFLAG += -DHAVE_TCP_V6_CHECK +endif + +ifneq ($(shell grep -o "skb_tcp_all_headers" $(LINUXSRC)/include/linux/tcp.h),) + DISTRO_CFLAG += -DHAVE_SKB_TCP_ALL_HEADERS +endif + +ifneq ($(shell grep -o "usleep_range" $(LINUXSRC)/include/linux/delay.h),) + DISTRO_CFLAG += -DHAVE_USLEEP_RANGE +endif + +ifneq ($(shell grep -o "vzalloc" $(LINUXSRC)/include/linux/vmalloc.h),) + DISTRO_CFLAG += -DHAVE_VZALLOC +endif + +ifneq ($(shell grep -o "kmalloc_array" $(LINUXSRC)/include/linux/slab.h),) + DISTRO_CFLAG += -DHAVE_KMALLOC_ARRAY +endif + +ifneq ($(shell grep -o "pcie_capability_read_word" $(LINUXSRC)/include/linux/pci.h),) + DISTRO_CFLAG += -DHAVE_PCIE_CAPABILITY_READ_WORD +endif + +ifneq ($(shell grep -o "pcie_link_width" $(LINUXSRC)/include/linux/pci.h),) + DISTRO_CFLAG += -DHAVE_PCI_LINK_WIDTH +endif + +ifneq ($(shell grep -o "PCIE_SPEED_2_5GT" $(LINUXSRC)/include/linux/pci.h),) + DISTRO_CFLAG += -DHAVE_PCIE_BUS_SPEED +endif + +ifneq ($(shell grep -o "pci_is_bridge" $(LINUXSRC)/include/linux/pci.h),) + DISTRO_CFLAG += -DHAVE_PCI_IS_BRIDGE +endif + +ifneq ($(shell grep -o "pci_upstream_bridge" $(LINUXSRC)/include/linux/pci.h),) + DISTRO_CFLAG += -DHAVE_PCI_UPSTREAM_BRIDGE +endif + +ifneq ($(shell grep -o "pcie_print_link_status" $(LINUXSRC)/include/linux/pci.h),) + DISTRO_CFLAG += -DHAVE_PCI_PRINT_LINK_STATUS +endif + +ifneq ($(shell grep -o "_genl_register_family_with_ops_grps" $(LINUXSRC)/include/net/genetlink.h),) + DISTRO_CFLAG += -DHAVE_GENL_REG_OPS_GRPS +endif + +ifneq ($(shell grep -o "pci_physfn" $(LINUXSRC)/include/linux/pci.h),) + DISTRO_CFLAG += -DHAVE_PCI_PHYSFN +endif + +ifneq ($(shell grep -o "pcie_flr" $(LINUXSRC)/include/linux/pci.h),) + DISTRO_CFLAG += -DHAVE_PCIE_FLR +endif + +ifneq ($(shell grep -o "pci_get_dsn" $(LINUXSRC)/include/linux/pci.h),) + DISTRO_CFLAG += -DHAVE_PCI_GET_DSN +endif + +ifneq ($(shell grep "pci_vpd_find_tag" $(LINUXSRC)/include/linux/pci.h | grep "unsigned int off"),) + DISTRO_CFLAG += -DHAVE_OLD_VPD_FIND_TAG +endif + +ifneq ($(shell grep -o "pci_vpd_alloc" $(LINUXSRC)/include/linux/pci.h),) + DISTRO_CFLAG += -DHAVE_PCI_VPD_ALLOC +endif + +ifneq ($(shell grep -o "eth_type_vlan" $(LINUXSRC)/include/linux/if_vlan.h),) + DISTRO_CFLAG += -DHAVE_ETH_TYPE_VLAN +endif + +ifneq ($(shell ls $(LINUXSRC)/include/$(UAPI)/linux/net_tstamp.h > /dev/null 2>&1 && echo net_tstamp),) + ifneq ($(shell ls $(LINUXSRC)/include/linux/timecounter.h > /dev/null 2>&1 && echo timecounter),) + ifneq ($(shell ls $(LINUXSRC)/include/linux/timekeeping.h > /dev/null 2>&1 && echo timekeeping),) + ifneq ($(shell grep -o "HWTSTAMP_FILTER_PTP_V2_EVENT" $(LINUXSRC)/include/$(UAPI)/linux/net_tstamp.h),) + DISTRO_CFLAG += -DHAVE_IEEE1588_SUPPORT + ifneq ($(shell grep -o "HWTSTAMP_FLAG_BONDED_PHC_INDEX" $(LINUXSRC)/include/$(UAPI)/linux/net_tstamp.h),) + DISTRO_CFLAG += -DHAVE_HWTSTAMP_FLAG_BONDED_PHC_INDEX + endif + endif + endif + endif +endif + +ifneq ($(shell grep -so "PTP_CLASS_V2" $(LINUXSRC)/include/linux/ptp_classify.h),) + DISTRO_CFLAG += -DHAVE_PTP_CLASSES +endif + +ifneq ($(shell grep -so "ptp_header" $(LINUXSRC)/include/linux/ptp_classify.h),) + DISTRO_CFLAG += -DHAVE_PTP_HEADER +endif + +ifneq ($(shell grep -so "ptp_classify_raw" $(LINUXSRC)/include/linux/ptp_classify.h),) + DISTRO_CFLAG += -DHAVE_PTP_CLASSIFY_RAW +endif + +ifneq ($(shell grep -so "ptp_parse_header" $(LINUXSRC)/include/linux/ptp_classify.h),) + DISTRO_CFLAG += -DHAVE_PTP_PARSE_HEADER +endif + +ifneq ($(shell grep -so "ptp_system_timestamp" $(LINUXSRC)/include/linux/ptp_clock_kernel.h),) + DISTRO_CFLAG += -DHAVE_PTP_SYS_TIMESTAMP +endif + +ifneq ($(shell grep -so "adjphase" $(LINUXSRC)/include/linux/ptp_clock_kernel.h),) + DISTRO_CFLAG += -DHAVE_PTP_ADJPHASE +endif + +ifneq ($(shell grep -so "do_aux_work" $(LINUXSRC)/include/linux/ptp_clock_kernel.h),) + DISTRO_CFLAG += -DHAVE_PTP_DO_AUX_WORK +endif + +ifneq ($(shell grep -so "adjust_by_scaled_ppm" $(LINUXSRC)/include/linux/ptp_clock_kernel.h),) + DISTRO_CFLAG += -DHAVE_SCALED_PPM +endif + +ifneq ($(shell grep -so "*gettimex64" $(LINUXSRC)/include/linux/ptp_clock_kernel.h),) + DISTRO_CFLAG += -DHAVE_PTP_GETTIMEX64 +else + ifneq ($(shell grep -so "timespec64" $(LINUXSRC)/include/linux/time64.h),) + DISTRO_CFLAG += -DHAVE_TIMESPEC64 + else + ifneq ($(shell grep -so "timespec64" $(LINUXSRC)/include/linux/time.h),) + DISTRO_CFLAG += -DHAVE_TIMESPEC64 + endif + endif +endif + +ifneq ($(shell grep -so "convert_art_ns_to_tsc" $(LINUXSRC)/arch/x86/include/asm/tsc.h),) + DISTRO_CFLAG += -DHAVE_ARTNS_TO_TSC +endif + +ifneq ($(shell grep -o "time64_to_tm" $(LINUXSRC)/include/linux/time.h),) + DISTRO_CFLAG += -DHAVE_TIME64 +endif + +ifneq ($(shell grep -o "timer_setup" $(LINUXSRC)/include/linux/timer.h),) + DISTRO_CFLAG += -DHAVE_TIMER_SETUP +endif + +ifneq ($(shell grep -s "devlink_ops" $(LINUXSRC)/include/net/devlink.h),) + DISTRO_CFLAG += -DHAVE_DEVLINK + ifeq ($(shell grep -o "devlink_register(struct devlink \*devlink);" $(LINUXSRC)/include/net/devlink.h),) + DISTRO_CFLAG += -DHAVE_DEVLINK_REGISTER_DEV + endif +endif + +ifneq ($(shell grep -s "devlink_param" $(LINUXSRC)/include/net/devlink.h),) + DISTRO_CFLAG += -DHAVE_DEVLINK_PARAM + ifneq ($(shell grep -s -A 2 "int (\*validate)" $(LINUXSRC)/include/net/devlink.h | grep "struct netlink_ext_ack \*extack"),) + DISTRO_CFLAG += -DHAVE_DEVLINK_VALIDATE_NEW + endif +endif + +ifneq ($(shell grep -s -A 7 "devlink_port_attrs" $(LINUXSRC)/include/net/devlink.h | grep -o "netdev_phys_item_id"),) + DISTRO_CFLAG += -DHAVE_DEVLINK_PORT_ATTRS +endif + +ifneq ($(shell grep -s -A 1 "devlink_port_attrs_set" $(LINUXSRC)/include/net/devlink.h | grep -o "struct devlink_port_attrs"),) + DISTRO_CFLAG += -DHAVE_DEVLINK_PORT_ATTRS_SET_NEW +endif + +ifneq ($(shell grep -w "ndo_get_devlink_port" $(LINUXSRC)/include/linux/netdevice.h),) + DISTRO_CFLAG += -DHAVE_NDO_DEVLINK_PORT +endif + +ifneq ($(shell grep -s "devlink_params_publish" $(LINUXSRC)/include/net/devlink.h),) + DISTRO_CFLAG += -DHAVE_DEVLINK_PARAM_PUBLISH +endif + +ifneq ($(shell grep -s -A 1 "eswitch_mode_set" $(LINUXSRC)/include/net/devlink.h | grep -o "netlink_ext_ack"),) + DISTRO_CFLAG += -DHAVE_ESWITCH_MODE_SET_EXTACK +endif + +ifneq ($(shell grep -so "DEVLINK_PARAM_GENERIC_ID_IGNORE_ARI" $(LINUXSRC)/include/net/devlink.h),) + DISTRO_CFLAG += -DHAVE_IGNORE_ARI +endif + +ifneq ($(shell grep -so "info_get" $(LINUXSRC)/include/net/devlink.h),) + DISTRO_CFLAG += -DHAVE_DEVLINK_INFO + ifneq ($(shell grep -so "devlink_info_board_serial_number_put" $(LINUXSRC)/include/net/devlink.h),) + DISTRO_CFLAG += -DHAVE_DEVLINK_INFO_BSN_PUT + endif +endif + +ifneq ($(shell grep -so "flash_update" $(LINUXSRC)/include/net/devlink.h),) + DISTRO_CFLAG += -DHAVE_DEVLINK_FLASH_UPDATE +endif + +ifneq ($(shell grep -so "devlink_flash_update_status_notify" $(LINUXSRC)/include/net/devlink.h),) + DISTRO_CFLAG += -DHAVE_DEVLINK_FLASH_UPDATE_STATUS +endif + +ifneq ($(shell grep -so "devlink_flash_update_begin_notify" $(LINUXSRC)/include/net/devlink.h),) + DISTRO_CFLAG += -DHAVE_DEVLINK_FLASH_UPDATE_BEGIN +endif + +ifneq ($(shell grep -so "devlink_flash_update_params" $(LINUXSRC)/include/net/devlink.h),) + DISTRO_CFLAG += -DHAVE_DEVLINK_FLASH_PARAMS + ifneq ($(shell grep -s -A 1 "devlink_flash_update_params" $(LINUXSRC)/include/net/devlink.h | grep "struct firmware"),) + DISTRO_CFLAG += -DHAVE_DEVLINK_FLASH_PARAMS_NEW + endif +endif + +ifneq ($(shell grep -so "struct devlink_health_reporter" $(LINUXSRC)/include/net/devlink.h),) + DISTRO_CFLAG += -DHAVE_DEVLINK_HEALTH_REPORT +endif + +ifneq ($(shell grep -so "devlink_health_reporter_state_update" $(LINUXSRC)/include/net/devlink.h),) + DISTRO_CFLAG += -DHAVE_DEVLINK_HEALTH_REPORTER_STATE_UPDATE +endif + +ifneq ($(shell grep -s -A 1 "(*recover)" $(LINUXSRC)/include/net/devlink.h | grep netlink_ext_ack),) + DISTRO_CFLAG += -DHAVE_DEVLINK_HEALTH_REPORT_EXTACK +endif + +ifneq ($(shell grep -so "devlink_health_reporter_recovery_done" $(LINUXSRC)/include/net/devlink.h),) + DISTRO_CFLAG += -DHAVE_DEVLINK_HEALTH_REPORTER_RECOVERY_DONE +endif + +ifneq ($(shell grep -s -A 2 "devlink_health_reporter_create" $(LINUXSRC)/include/net/devlink.h | grep auto_recover),) + DISTRO_CFLAG += -DHAVE_DEVLINK_HEALTH_AUTO_RECOVER +endif + +ifneq ($(shell grep -so "reload_actions" $(LINUXSRC)/include/net/devlink.h),) + DISTRO_CFLAG += -DHAVE_DEVLINK_RELOAD_ACTION +endif + +ifneq ($(shell grep -so "devlink_reload_disable" $(LINUXSRC)/include/net/devlink.h),) + DISTRO_CFLAG += -DHAVE_DEVLINK_RELOAD_DISABLE +endif + +ifneq ($(shell grep -so "devlink_set_features" $(LINUXSRC)/include/net/devlink.h),) + DISTRO_CFLAG += -DHAVE_DEVLINK_SET_FEATURES +endif + +ifneq ($(shell grep -so "*selftest_check" $(LINUXSRC)/include/net/devlink.h),) + DISTRO_CFLAG += -DHAVE_DEVLINK_SELFTESTS_FEATURES +endif + +ifneq ($(shell grep -so "DEVLINK_PARAM_GENERIC_ID_ENABLE_REMOTE_DEV_RESET" $(LINUXSRC)/include/net/devlink.h),) + DISTRO_CFLAG += -DHAVE_REMOTE_DEV_RESET +endif + +ifneq ($(shell grep -so "devlink_info_driver_name_put" $(LINUXSRC)/include/net/devlink.h),) + DISTRO_CFLAG += -DHAVE_DEVLINK_INFO_DRIVER_NAME +endif + +ifneq ($(shell grep -A 1 "devlink_fmsg_string_pair_put" $(LINUXSRC)/include/net/devlink.h | grep -o "void"),) + DISTRO_CFLAG += -DHAVE_DEVLINK_FMSG_STRING_PAIR_PUT_VOID +endif + +# Check if the file exists or not +ifneq ($(shell grep -s "switchdev_ops" $(LINUXSRC)/include/net/switchdev.h),) + DISTRO_CFLAG += -DHAVE_SWITCHDEV +endif + +ifneq ($(shell grep -s "METADATA_HW_PORT_MUX" $(LINUXSRC)/include/net/dst_metadata.h),) + DISTRO_CFLAG += -DHAVE_METADATA_HW_PORT_MUX +endif + +ifneq ($(shell grep -so "(*ieee_delapp)" $(LINUXSRC)/include/net/dcbnl.h),) + DISTRO_CFLAG += -DHAVE_IEEE_DELAPP +endif + +ifneq ($(shell grep -so "dcb_ieee_getapp_prio_dscp_mask_map" $(LINUXSRC)/include/net/dcbnl.h),) + DISTRO_CFLAG += -DHAVE_DSCP_MASK_MAP +endif + +ifneq ($(shell grep -o cpumask_local_spread $(LINUXSRC)/include/linux/cpumask.h),) + DISTRO_CFLAG += -DHAVE_CPUMASK_LOCAL_SPREAD +endif + +ifneq ($(shell grep -o cpumask_set_cpu_local_first $(LINUXSRC)/include/linux/cpumask.h),) + DISTRO_CFLAG += -DHAVE_CPUMASK_LOCAL_FIRST +endif + +ifeq ($(shell grep -so "ETH_RESET_AP" $(LINUXSRC)/include/$(UAPI)/linux/ethtool.h),) + DISTRO_CFLAG += -DNO_ETH_RESET_AP +endif + +ifneq ($(shell grep -o "get_pause_stats" $(LINUXSRC)/include/linux/ethtool.h),) + DISTRO_CFLAG += -DHAVE_GET_PAUSE_STATS +endif + +ifneq ($(shell grep -so "ETH_TEST_FL_EXTERNAL_LB" $(LINUXSRC)/include/$(UAPI)/linux/ethtool.h),) + DISTRO_CFLAG += -DHAVE_ETH_TEST_FL_EXTERNAL_LB +endif + +ifneq ($(shell ls $(LINUXSRC)/include/linux/dim.h > /dev/null 2>&1 && echo dim),) + DISTRO_CFLAG += -DHAVE_DIM +endif + +ifneq ($(shell grep -o simple_open $(LINUXSRC)/include/linux/fs.h),) + DISTRO_CFLAG += -DHAVE_SIMPLE_OPEN +endif + +ifneq ($(shell grep -o hwmon_device_register_with_info $(LINUXSRC)/include/linux/hwmon.h),) + DISTRO_CFLAG += -DHAVE_NEW_HWMON_API +endif + +ifneq ($(shell grep -o ETH_RESET_CRASHDUMP $(LINUXSRC)/include/uapi/linux/ethtool.h),) + DISTRO_CFLAG += -DHAVE_ETHTOOL_RESET_CRASHDUMP +endif + +ifneq ($(shell grep -o "struct ethtool_link_ksettings" $(LINUXSRC)/include/linux/ethtool.h),) + DISTRO_CFLAG += -DHAVE_ETHTOOL_LINK_KSETTINGS +endif + +ifneq ($(shell grep -o "get_module_eeprom_by_page" $(LINUXSRC)/include/linux/ethtool.h),) + DISTRO_CFLAG += -DHAVE_MODULE_EEPROM_BY_PAGE +endif + +ifneq ($(shell grep -o "struct ethtool_rxfh_param" $(LINUXSRC)/include/linux/ethtool.h),) + DISTRO_CFLAG += -DHAVE_ETHTOOL_RXFH_PARAM +endif + +ifneq ($(shell grep -o "\*get_rxfh_context" $(LINUXSRC)/include/linux/ethtool.h),) + DISTRO_CFLAG += -DHAVE_ETH_RXFH_CONTEXT_ALLOC +endif + + +ifneq ($(shell grep -o "strscpy" $(LINUXSRC)/include/linux/string.h),) + DISTRO_CFLAG += -DHAVE_STRSCPY + ifneq ($(shell grep -o "__must_check strscpy" $(LINUXSRC)/include/linux/string.h),) + DISTRO_CFLAG += -DHAVE_OLD_STRSCPY + endif +endif + +ifneq ($(shell grep -so "lo_hi_writeq" $(LINUXSRC)/include/linux/io-64-nonatomic-lo-hi.h),) + DISTRO_CFLAG += -DHAVE_LO_HI_WRITEQ +endif + +ifneq ($(shell grep -so "static_key_initialized" $(LINUXSRC)/include/linux/jump_label.h),) + DISTRO_CFLAG += -DHAVE_STATIC_KEY_INITIALIZED + ifneq ($(shell grep -so "DEFINE_STATIC_KEY_FALSE" $(LINUXSRC)/include/linux/jump_label.h),) + DISTRO_CFLAG += -DHAVE_DEFINE_STATIC_KEY + endif +endif + +ifneq ($(shell grep -so "DECLARE_STATIC_KEY_FALSE" $(LINUXSRC)/include/linux/jump_label.h),) + DISTRO_CFLAG += -DHAVE_DECLARE_STATIC_KEY +endif + +ifneq ($(shell $(BNXT_SRC)/find_src.awk -v struct=ethtool_link_ksettings pattern=lanes $(LINUXSRC)/include/linux/ethtool.h),) + DISTRO_CFLAG += -DHAVE_ETHTOOL_LANES +endif + +ifneq ($(shell $(BNXT_SRC)/find_src.awk -v struct=ethtool_link_ksettings pattern=ethtool_link_mode_bit_indices $(LINUXSRC)/include/linux/ethtool.h),) + DISTRO_CFLAG += -DHAVE_ETHTOOL_LINK_MODE +endif + +ifneq ($(shell grep -o ethtool_params_from_link_mode $(LINUXSRC)/include/linux/ethtool.h),) + DISTRO_CFLAG += -DHAVE_ETHTOOL_PARAMS_FROM_LINK_MODE +endif + +ifneq ($(shell grep -o "^struct ethtool_keee" $(LINUXSRC)/include/linux/ethtool.h),) + DISTRO_CFLAG += -DHAVE_ETHTOOL_KEEE +endif + +ifneq ($(shell grep -so ETHTOOL_A_LINKSTATE_EXT_DOWN_CNT $(LINUXSRC)/include/uapi/linux/ethtool_netlink.h),) + DISTRO_CFLAG += -DHAVE_ETHTOOL_A_LINKSTATE_EXT_DOWN_CNT +endif + +ifneq ($(shell grep -so ETHTOOL_A_LINKMODES_LANES $(LINUXSRC)/include/uapi/linux/ethtool_netlink.h),) + DISTRO_CFLAG += -DHAVE_ETHTOOL_A_LINKMODES_LANES +endif + +ifneq ($(shell grep -o .get_per_queue_coalesce $(LINUXSRC)/include/linux/ethtool.h),) + DISTRO_CFLAG += -DHAVE_ETHTOOL_GET_PER_QUEUE_COAL +endif + +ifneq ($(shell grep -so synchronize_rcu_bh $(LINUXSRC)/include/linux/rcutree.h),) + DISTRO_CFLAG += -DHAVE_LEGACY_RCU_BH +endif + +ifneq ($(shell grep -o "ALIGN" $(LINUXSRC)/include/linux/kernel.h),) +DISTRO_CFLAG += +else +DISTRO_CFLAG += -DHAVE_ALIGN +endif + +ifneq ($(shell grep -so "struct netlink_ext_ack" $(LINUXSRC)/include/linux/netlink.h),) + DISTRO_CFLAG += -DHAVE_NETLINK_EXT_ACK +endif + +ifneq ($(shell ls $(LINUXSRC)/include/linux/auxiliary_bus.h > /dev/null 2>&1 && echo auxiliary_driver),) + DISTRO_CFLAG += -DHAVE_AUXILIARY_DRIVER +endif + +ifneq ($(shell grep -so "ida_alloc" $(LINUXSRC)/include/linux/idr.h),) + DISTRO_CFLAG += -DHAVE_IDA_ALLOC +endif + +ifneq ($(shell grep -o "struct auxiliary_device_id" $(LINUXSRC)/include/linux/mod_devicetable.h),) + DISTRO_CFLAG += -DHAVE_AUX_DEVICE_ID +endif + +ifneq ($(shell grep -so "auxiliary_get_drvdata" $(LINUXSRC)/include/linux/auxiliary_bus.h),) + DISTRO_CFLAG += -DHAVE_AUX_GET_DRVDATA +endif + +ifneq ($(shell grep -so "tls_driver_ctx" $(LINUXSRC)/include/net/tls.h),) + ifneq ($(shell grep -so "tls_is_sk_rx_device_offloaded" $(LINUXSRC)/include/net/tls.h),) + ifneq ($(shell grep -so "TLS_DRIVER_STATE_SIZE_RX" $(LINUXSRC)/include/net/tls.h),) + DISTRO_CFLAG += -DHAVE_KTLS + endif + endif +endif + +ifneq ($(shell grep -so "hwmon_notify_event" $(LINUXSRC)/include/linux/hwmon.h),) + DISTRO_CFLAG += -DHAVE_HWMON_NOTIFY_EVENT +endif + +ifneq ($(shell grep -so "vm_flags_set" $(LINUXSRC)/include/linux/mm.h),) + DISTRO_CFLAG += -DHAVE_VM_FLAGS_SET +endif + +ifneq ($(shell grep -so "sysfs_emit" $(LINUXSRC)/include/linux/sysfs.h),) + DISTRO_CFLAG += -DHAVE_SYSFS_EMIT +endif + +ifneq ($(shell grep -so "default_groups" $(LINUXSRC)/include/linux/kobject.h),) + DISTRO_CFLAG += -DHAVE_KOBJ_DEFAULT_GROUPS +endif + +ifneq ($(shell grep "unsigned long long" $(LINUXSRC)/include/net/flow_dissector.h | grep used_keys),) + DISTRO_CFLAG += -DHAVE_FLOW_USED_KEY_SIZE_LONG_LONG +endif + +ifneq ($(shell grep -so "pci_enable_pcie_error_reporting" $(LINUXSRC)/include/linux/aer.h),) + DISTRO_CFLAG += -DHAVE_PCIE_ERROR_REPORTING +endif + +ifneq ($(shell grep -so "tls_is_skb_tx_device_offloaded" $(LINUXSRC)/include/net/tls.h),) + DISTRO_CFLAG += -DHAVE_TLS_IS_SKB_TX_DEVICE_OFFLOADED +endif + +ifneq ($(shell grep -so "*napi" $(LINUXSRC)/include/net/page_pool.h),) + DISTRO_CFLAG += -DHAVE_PAGE_POOL_NAPI_MAPPING +endif + +ifdef CONFIG_XDP_SOCKETS +ifneq ($(shell grep -so "xsk_pool_dma_map" $(LINUXSRC)/include/net/xdp_sock_drv.h),) + DISTRO_CFLAG += -DHAVE_XSK_SUPPORT +endif +endif + +# Valid values for wc_slices: 1, 2, 4 +DISTRO_CFLAG += -DTF_TCAM_WC_SLICES=2 + +ifeq ($(BNXT_INC),) + BNXT_INC:=$(shell pwd)/ + export BNXT_INC +endif + +ifeq ($(TFCORE_INC),) + TFCORE_INC:=$(shell pwd)/tf_core + export TFCORE_INC +endif + +ifeq ($(TFC_V3_INC),) + TFC_V3_INC:=$(shell pwd)/tfc_v3 + export TFC_V3_INC +endif + +ifeq ($(HCAPI_INC),) + HCAPI_INC:=$(shell pwd)/hcapi + export HCAPI_INC +endif + +ifeq ($(HCAPI_CFA_INC),) + HCAPI_CFA_INC:=$(shell pwd)/hcapi/cfa + export HCAPI_CFA_INC +endif + +ifeq ($(CFA_V3_INC),) + CFA_V3_INC:=$(shell pwd)/hcapi/cfa_v3/include + export CFA_V3_INC +endif + +ifeq ($(CFA_V3_BLD_INC),) + CFA_V3_BLD_INC:=$(shell pwd)/hcapi/cfa_v3/mpc/include + export CFA_V3_MPC_INC +endif + +ifeq ($(CFA_V3_MM_INC),) + CFA_V3_MM_INC:=$(shell pwd)/hcapi/cfa_v3/mm/include + export CFA_V3_MM_INC +endif + +ifeq ($(CFA_V3_TIM_INC),) + CFA_V3_TIM_INC:=$(shell pwd)/hcapi/cfa_v3/tim/include + export CFA_V3_TIM_INC +endif + +ifeq ($(CFA_V3_TPM_INC),) + CFA_V3_TPM_INC:=$(shell pwd)/hcapi/cfa_v3/tpm/include + export CFA_V3_TPM_INC +endif + +ifeq ($(GENERIC_TEMPLATES_INC),) + GENERIC_TEMPLATES_INC:=$(shell pwd)/tf_ulp/generic_templates + export GENERIC_TEMPLATES_INC +endif + +ifeq ($(TFULP_INC),) + TFULP_INC:=$(shell pwd)/tf_ulp + export TFULP_INC +endif + +DISTRO_CFLAG += -DBNXT_TF_LINUX + +ifneq ($(shell grep -so synchronize_rcu_bh $(LINUXSRC)/include/linux/rcutree.h),) + DISTRO_CFLAG += -DHAVE_LEGACY_RCU_BH +endif + +ifeq ($(custom_flow_offload),1) + DISTRO_CFLAG += -DCONFIG_BNXT_CUSTOM_FLOWER_OFFLOAD +endif + +override EXTRA_CFLAGS += ${DISTRO_CFLAG} -g -Werror -Wno-error=unused-variable -Wno-error=unused-function -DCHIMP_FW -D__LINUX -DCONFIG_BNXT_SRIOV -DCONFIG_BNXT_DCB -DHSI_DBG_DISABLE -DCONFIG_BNXT_LFC -DSUPPORT_CFA_HW_ALL=1 -DSUPPORT_CFA_EM_FOR_TC=0 -I$(TFCORE_INC) -I$(TFC_V3_INC) -I$(BNXT_INC) -I$(CFA_V3_INC) -I$(CFA_V3_MPC_INC) -I$(CFA_V3_MM_INC) -I$(CFA_V3_TPM_INC) -I$(CFA_V3_TIM_INC) -I$(HCAPI_INC) -I$(HCAPI_CFA_INC) -I$(GENERIC_TEMPLATES_INC) -I$(TFULP_INC) + +define cols + $$(awk '{ if (m < NF) m=NF; } END { print m; }' $1 2>/dev/null || echo 0) +endef + +define bnxt_ver + BNXT_EXTRA_VER_STATUS=$$(git status -s bnxt_extra_ver.h 2>/dev/null); \ + BNXT_EXTERNAL=$$?; \ + if [ $$BNXT_EXTERNAL -eq 0 ] && [ -z "$$BNXT_EXTRA_VER_STATUS" ]; then \ + BNXT_GIT_VER="-$$(git rev-parse --abbrev-ref HEAD)"; \ + BNXT_GIT_VER="$${BNXT_GIT_VER}-$$( \ + GIT_CONFIG_COUNT=1 \ + GIT_CONFIG_KEY_0=core.abbrev \ + GIT_CONFIG_VALUE_0=8 \ + git rev-parse --short HEAD \ + )"; \ + if [ -n "$$(git diff --name-only)" ]; then \ + BNXT_GIT_VER="$${BNXT_GIT_VER}+"; \ + fi; \ + fi; \ + BNXT_VER=$$(sed -n -E "s/(.*DRV_MODULE_VERSION\s+\")(.*)(\".*)/\2/p" \ + bnxt.h); \ + if [ -n "$$BNXT_GIT_VER" ]; then \ + BNXT_EXTRA_VER="$${BNXT_GIT_VER}"; \ + BNXT_VER="$${BNXT_VER}$${BNXT_EXTRA_VER}"; \ + else \ + BNXT_SRC_DELTA=0; \ + if [ -x "$$(which sha512sum 2>/dev/null)" ] && \ + [ $(call cols, MANIFEST) -eq 2 ]; then \ + $$(sha512sum -c MANIFEST 2>/dev/null >/dev/null); \ + BNXT_SRC_DELTA=$$?; \ + fi; \ + BNXT_SRC_EXTRA_VER=$$(sed -n -E \ + "s/(.*DRV_MODULE_EXTRA_VER\s+\")(.*)(\")/\2/p" \ + bnxt_extra_ver.h); \ + if [ $$BNXT_SRC_DELTA -eq 0 ]; then \ + BNXT_VER="$${BNXT_VER}$${BNXT_SRC_EXTRA_VER}"; \ + else \ + BNXT_EXTRA_VER="$${BNXT_SRC_EXTRA_VER}+"; \ + touch .BNXT_SRC_DELTA.modified; \ + BNXT_VER="$${BNXT_VER}$${BNXT_EXTRA_VER}"; \ + fi; \ + fi +endef + +cflags-y += $(EXTRA_CFLAGS) +KBUILD_CFLAGS += -Wframe-larger-than=2144 +src=$(BNXT_SRC) +ccflags-y := -I$(src)/ +ccflags-y += -I$(src)/tf_core/ +ccflags-y += -I$(src)/tfc_v3/ +ccflags-y += -I$(src)/hcapi/ +ccflags-y += -I$(src)/hcapi/cfa/ +ccflags-y += -I$(src)/hcapi/cfa_v3/include +ccflags-y += -I$(src)/hcapi/cfa_v3/mm +ccflags-y += -I$(src)/hcapi/cfa_v3/mm/include +ccflags-y += -I$(src)/hcapi/cfa_v3/mpc +ccflags-y += -I$(src)/hcapi/cfa_v3/mpc/include/ +ccflags-y += -I$(src)/hcapi/cfa_v3/tim +ccflags-y += -I$(src)/hcapi/cfa_v3/tim/include +ccflags-y += -I$(src)/hcapi/cfa_v3/tpm +ccflags-y += -I$(src)/hcapi/cfa_v3/tpm/include +ccflags-y += -I$(src)/tf_ulp/ +ccflags-y += -I$(src)/tf_ulp/generic_templates/ + +BCM_DRV = bnxt_en.ko +ifneq ($(KERNELRELEASE),) + +ifneq ($(BNXT_EXTRA_VER),) +override EXTRA_CFLAGS += -DDRV_MODULE_EXTRA_VER=\"$(BNXT_EXTRA_VER)\" +endif + +ifeq ($(shell expr $(KVER_MAJ) \>= 5), 1) + BNXT_DBGFS_OBJ = bnxt_debugfs.o +else + BNXT_DBGFS_OBJ = bnxt_debugfs_cpt.o +endif + +TF_CORE_OBJ = tf_core/tf_msg.o tf_core/tf_util.o tf_core/tf_session.o tf_core/tf_rm.o tf_core/tf_tcam.o tf_core/tf_tbl.o tf_core/tf_identifier.o tf_core/dpool.o tf_core/tf_em_internal.o tf_core/tf_em_hash_internal.o tf_core/tf_if_tbl.o tf_core/tf_global_cfg.o tf_core/tf_sram_mgr.o tf_core/tf_tbl_sram.o tf_core/rand.o hcapi/cfa/hcapi_cfa_p4.o hcapi/cfa/hcapi_cfa_p58.o tf_core/tf_device_p4.o tf_core/tf_device_p58.o tf_core/tf_device.o tf_core/tf_core.o tf_core/tf_tcam_mgr_msg.o tf_core/cfa_tcam_mgr_hwop_msg.o tf_core/cfa_tcam_mgr.o tf_core/cfa_tcam_mgr_p4.o tf_core/cfa_tcam_mgr_p58.o + +HCAPI_OBJ = hcapi/bitalloc.o + +TFC_V3_OBJ = tfc_v3/tfc_act.o tfc_v3/tfc_cpm.o tfc_v3/tfc_em.o tfc_v3/tfc_global_id.o tfc_v3/tfc_ident.o tfc_v3/tfc_idx_tbl.o tfc_v3/tfc_init.o tfc_v3/tfc_msg.o tfc_v3/tfc_priv.o tfc_v3/tfc_session.o tfc_v3/tfc_tbl_scope.o tfc_v3/tfc_tcam.o tfc_v3/tfc_util.o tfc_v3/tfo.o tfc_v3/tfc_vf2pf_msg.o tfc_v3/tfc_if_tbl.o tfc_v3/tfc_mpc_table.o + +CFA_V3_OBJ = hcapi/cfa_v3/mm/cfa_mm.o hcapi/cfa_v3/mpc/cfa_bld_p70_mpc.o hcapi/cfa_v3/mpc/cfa_bld_mpc.o hcapi/cfa_v3/mpc/cfa_bld_p70_mpcops.o hcapi/cfa_v3/mpc/cfa_bld_p70_host_mpc_wrapper.o hcapi/cfa_v3/tpm/cfa_tpm.o hcapi/cfa_v3/tim/cfa_tim.o +ifeq ($(custom_flow_offload),1) + TF_ULP_OBJ = tf_ulp/ulp_utils.o tf_ulp/ulp_template_debug.o tf_ulp/ulp_tf_debug.o tf_ulp/bnxt_tf_ulp.o tf_ulp/bnxt_tf_ulp_p5.o tf_ulp/bnxt_tf_ulp_p7.o tf_ulp/ulp_fc_mgr.o tf_ulp/ulp_fc_mgr_p5.o tf_ulp/ulp_fc_mgr_p7.o tf_ulp/ulp_flow_db.o tf_ulp/ulp_gen_tbl.o tf_ulp/ulp_mapper.o tf_ulp/ulp_mapper_p5.o tf_ulp/ulp_mapper_p7.o tf_ulp/ulp_mark_mgr.o tf_ulp/ulp_port_db.o tf_ulp/ulp_matcher.o tf_ulp/ulp_def_rules.o tf_ulp/bnxt_ulp_linux_flow.o tf_ulp/ulp_tc_parser.o tf_ulp/ulp_tc_handler_tbl.o tf_ulp/ulp_tc_custom_offload.o tf_ulp/bnxt_tf_tc_shim.o tf_ulp/ulp_tc_rte_flow_gen.o tf_ulp/ulp_alloc_tbl.o + TF_ULP_OBJ += tf_ulp/ulp_udcc.o tf_ulp/ulp_nic_flow.o tf_ulp/ulp_generic_flow_offload.o tf_ulp/bnxt_tf_tc_shim.o tf_ulp/bnxt_ulp_meter.o +else + TF_ULP_OBJ = tf_ulp/ulp_utils.o tf_ulp/ulp_template_debug.o tf_ulp/ulp_tf_debug.o tf_ulp/bnxt_tf_ulp.o tf_ulp/bnxt_tf_ulp_p5.o tf_ulp/bnxt_tf_ulp_p7.o tf_ulp/ulp_fc_mgr.o tf_ulp/ulp_fc_mgr_p5.o tf_ulp/ulp_fc_mgr_p7.o tf_ulp/ulp_flow_db.o tf_ulp/ulp_gen_tbl.o tf_ulp/ulp_mapper.o tf_ulp/ulp_mapper_p5.o tf_ulp/ulp_mapper_p7.o tf_ulp/ulp_mark_mgr.o tf_ulp/ulp_port_db.o tf_ulp/ulp_matcher.o tf_ulp/ulp_def_rules.o tf_ulp/bnxt_ulp_linux_flow.o tf_ulp/ulp_tc_parser.o tf_ulp/ulp_tc_handler_tbl.o tf_ulp/ulp_alloc_tbl.o + TF_ULP_OBJ += tf_ulp/ulp_udcc.o tf_ulp/ulp_nic_flow.o tf_ulp/ulp_generic_flow_offload.o tf_ulp/bnxt_tf_tc_shim.o tf_ulp/bnxt_ulp_meter.o +endif + +GENERIC_TEMPLATES_OBJ = tf_ulp/generic_templates/ulp_template_db_wh_plus_class.o tf_ulp/generic_templates/ulp_template_db_wh_plus_act.o tf_ulp/generic_templates/ulp_template_db_thor_class.o tf_ulp/generic_templates/ulp_template_db_thor_act.o tf_ulp/generic_templates/ulp_template_db_thor2_class.o tf_ulp/generic_templates/ulp_template_db_thor2_act.o tf_ulp/generic_templates/ulp_template_db_tbl.o tf_ulp/generic_templates/ulp_template_db_class.o tf_ulp/generic_templates/ulp_template_db_act.o +obj-m += bnxt_en.o + +bnxt_en-y := bnxt.o bnxt_hwrm.o bnxt_ethtool_compat.o bnxt_sriov.o bnxt_dcb.o bnxt_ulp.o bnxt_xdp.o bnxt_ptp.o bnxt_vfr.o bnxt_nic_flow.o bnxt_tc.o bnxt_devlink.o bnxt_lfc.o bnxt_dim.o bnxt_coredump.o bnxt_auxbus_compat.o bnxt_mpc.o bnxt_ktls.o bnxt_hdbr.o bnxt_hwmon.o bnxt_sriov_sysfs.o bnxt_tfc.o bnxt_udcc.o bnxt_log.o bnxt_log_data.o bnxt_xsk.o $(BNXT_DBGFS_OBJ) $(TF_CORE_OBJ) $(TFC_V3_OBJ) $(CFA_V3_OBJ) $(TF_ULP_OBJ) $(HCAPI_OBJ) $(GENERIC_TEMPLATES_OBJ)#decode_hsi.o + +else + +BNXT_EXTRA_VER=$(shell $(call bnxt_ver); echo $$BNXT_EXTRA_VER) + +define fwd_ver +$(shell if [ -n "$(BNXT_EXTRA_VER)" ] ; then echo "BNXT_EXTRA_VER=$(BNXT_EXTRA_VER) "; fi) +endef + +default: +ifeq ($(CROSS_COMPILE),) + make -C $(LINUX) M=$(shell pwd) $(call fwd_ver)modules +else ifneq ($(CROSS_COMPILE),) + make -C $(LINUXSRC) M=$(shell pwd) $(call fwd_ver)modules CROSS_COMPILE=$(CROSS_COMPILE) ARCH=$(ARCH) +endif + +yocto_all: + $(MAKE) -C $(LINUXSRC) M=$(shell pwd) + +modules_install: + $(MAKE) -C $(LINUXSRC) M=$(shell pwd) modules_install + +endif + +install: default + @if [ "$(KDIR)" != "" ]; then \ + echo "Cannot use install with KDIR option"; exit 2;\ + fi + mkdir -p $(PREFIX)$(BCMMODDIR); + install -m 444 $(BCM_DRV) $(PREFIX)$(BCMMODDIR); + @if [ "$(PREFIX)" = "" ]; then /sbin/depmod -a ;\ + else echo " *** Run '/sbin/depmod -a' to update the module database.";\ + fi + +.PHONEY: all clean install + +define src_pkg_cleanup + rm -f $1; \ + if [ -n "$$BNXT_GIT_VER" ]; then \ + git checkout $2; \ + fi +endef + +src_pkg: MANIFEST bnxt.h bnxt_extra_ver.h + @$(call bnxt_ver); \ + if [ -n "$$BNXT_GIT_VER" ]; then \ + sed -i -E \ + "s/(DRV_MODULE_EXTRA_VER\s+\")(.*)(\")/\1$$BNXT_GIT_VER\3/" \ + bnxt_extra_ver.h; \ + fi; \ + BNXT_FILES=$$(cat $< | sed -E "s/.*\s+(.*)/\1/"); \ + if [ $(call cols, $<) -eq 1 ]; then \ + if sha512sum $$BNXT_FILES > $<.hash; then \ + BNXT_FILES="$${BNXT_FILES}\n$<.hash"; \ + else \ + $(call src_pkg_cleanup, $<.hash, bnxt_extra_ver.h); \ + exit 1; \ + fi; \ + else \ + BNXT_FILES="$${BNXT_FILES}\n$<"; \ + fi; \ + printf "$${BNXT_FILES}" | tar czvf bnxt_en-$$BNXT_VER.tar.gz \ + --owner=0 --group=0 -T - \ + --xform s/$<.hash/$ make + +Alternatively, if multiple versions are installed in the standard distribution +locations, the build can be directed to use a specific version using the KVER +environment variable: + + $ KVER= make + +Other than the options to locate kernel dependencies, the BNXT_EN driver +exposes no other compile time customizable features. + +BNXT_EN Driver Settings +======================= + +The bnxt_en driver settings can be queried and changed using ethtool. The +latest ethtool can be downloaded from +ftp://ftp.kernel.org/pub/software/network/ethtool if it is not already +installed. The following are some common examples on how to use ethtool. See +the ethtool man page for more information. ethtool settings do not persist +across reboot or module reload. The ethtool commands can be put in a startup +script such as /etc/rc.local to preserve the settings across a reboot. On +Red Hat distributions, "ethtool -s" parameters can be specified in the +ifcfg-ethx scripts using the ETHTOOL_OPTS keyword. + +Some ethtool examples: + +1. Show current speed, duplex, and link status: + + ethtool eth0 + +Note that if auto-negotiation is off, ethtool will always show the speed +setting whether link is up or down. If auto-negotiation is on, ethtool will +show the negotiated speed when link is up, and unknown speed when link is +down. + +2. Set speed: + +Example: Set speed to 10Gbps with autoneg off: + + ethtool -s eth0 speed 10000 autoneg off + +Example: Set speed to 25Gbps with autoneg off: + + ethtool -s eth0 speed 25000 autoneg off + +On some NPAR (NIC partitioning) devices, the port speed and flow control +settings cannot be changed by the driver. + +See Autoneg section below for additional information on configuring +Autonegotiation. + +3. Show offload settings: + + ethtool -k eth0 + +4. Change offload settings: + +Example: Turn off TSO (TCP Segmentation Offload) + + ethtool -K eth0 tso off + +Example: Turn off hardware GRO (Generic Receive Offload) + + ethtool -K eth0 rx-gro-hw off + +Note that "rx-gro-hw" (hardware GRO) setting is available in newer kernels +such as 4.16. When "rx-gro-hw" is turned off, there is no effect on software +GRO. Prior to the introduction of "rx-gro-hw", hardware GRO settings can only +be controlled by controlling "gro", which applies to both GRO and hardware GRO. + + ethtool -K eth0 gro off + +Example: Turn off hardware LRO (Large Receive Offload) + + ethtool -K eth0 lro off + +Note that hardware GRO and hardware LRO are mutually exclusive. Hardware +GRO is generally better than LRO because the former is reversible and +is compatible with bridging and routing (including bridging to Virtual +Machines). LRO must be turned off when bridging or routing is enabled. + +Example: Turn on hardware GRO + + ethtool -K eth0 rx-gro-hw on + +If "rx-gro-hw" is not available on older kernels, use "gro". + + ethtool -K eth0 gro on + +Note that if both "gro" and "lro" are set on older kernels that don't support +"rx-gro-hw", the driver will use hardware GRO. + +Note that "rx-gro-hw" and "lro" will be automatically disabled by the driver +when the MTU exceeds 4096 to workaround a hardware performance limitation +on older BCM573xx and BCM574xx chips. When the MTU drops back to 4096 or +below, the orginal setting should be automatically restored. On some older +kernels, the user may need to restore the setting manually. + +5. Show ring sizes: + + ethtool -g eth0 + +6. Change ring sizes: + + ethtool -G eth0 rx N + +Note that the RX Jumbo ring size is set automatically when needed and +cannot be changed by the user. + +7. Get statistics: + + ethtool -S eth0 + +8. Show number of channels (rings): + + ethtool -l eth0 + +9. Set number of channels (rings): + + ethtool -L eth0 rx N tx N combined 0 + + ethtool -L eth0 rx 0 tx 0 combined M + +Note that the driver can support either all combined or all rx/tx channels, +but not a combination of combined and rx/tx channels. The default is +combined channels to match the number of CPUs up to 8. Combined channels +use less system resources but may have lower performance than rx/tx channels +under very high traffic stress. rx and tx channels can have different numbers +for rx and tx but must both be non-zero. + +Note that if RDMA is enabled on adapter, L2 tries to reserve <= 64 MSIx vectors +and if bnxt_re is loaded, L2 pre-set maximum would be a smaller value because +RoCE has used up the resources. If L2 needs more rings, unload bnxt_re and +increase the number of rings/channels used by L2 and then load the bnxt_re. +RoCE driver shall be loaded with the available number of MSIx vectors. + +10. Show interrupt coalescing settings: + + ethtool -c eth0 + +Please refer to the section on Dynamic Interrupt moderation +on how these can be dynamically altered by the stack. + +11. Set interrupt coalescing settings: + + ethtool -C eth0 rx-frames N + + Note that only these parameters are supported: + rx-usecs, rx-frames, rx-usecs-irq, rx-frames-irq, + tx-usecs, tx-frames, tx-usecs-irq, tx-frames-irq, + stats-block-usecs. + +Note that on 5.15 and newer kernels, CQE coalescing timer mode can be +enabled or disabled on some devices. CQE mode means that the relevant +timer gets reset when a new interrupt generating event is ready. + +Example: Enable CQE mode on RX: + + ethtool -C eth0 cqe-mode-rx on rx-usecs 20 rx-frames 10 + +With this setting, the RX coalescing timer will start after the first RX +frame is received. If a new RX frame is received within 20 us, the RX +coalesing timer will restart counting from 0. As long as a new RX frame +is received within 20 us since the last RX frame, the interrupt will be +delayed until 10 RX frames have been received. + +Example: Disable CQE mode on RX: + + ethtool -C eth0 cqe-mode-rx off rx-usecs 20 rx-frames 10 + +With CQE mode disabled, the RX coalecing timer will start and will not be +reset once the first RX frame is received. To coalesce the interrupt for +10 RX frames, all 10 RX frames have to be received within 20 us after the +first one has been received. + +12. Show RSS flow hash indirection table and RSS hash key: + + ethtool -x eth0 + +Note that the RSS indirection table size may vary depending on the device +and the number of RX channels. + +13. Set 40-byte RSS hash key: + + ethtool -X eth0 hkey 00:01:02:03:04:05:06:07:08:09:0a:0b:0c:0d:0e:0f:10:11:12:13:14:15:16:17:18:19:1a:1b:1c:1d:1e:1f:20:21:22:23:24:25:26:27 + +14. Set RSS indirection table: + + ethtool -X eth0 [start N] [ equal N | weight W0 W1 ... | default ] + +Example: Set RSS indirection table to have equal distribution for the first + four channels: + + ethtool -X eth0 equal 4 + +Example: Set RSS indirection table to have equal distribution for six + channels starting from channel 2: + + ethtool -X eth0 start 2 equal 6 + +Note that the start parameter is 0-based. + +Example: Set RSS indirection table to have weight distributions of 1:2:3:4 + for four channels starting from channel 4: + + ethtool -X eth0 start 4 weight 1 2 3 4 + +Note that the number of channels being configured must be valid and must not +exceed the number of RX or combined channels. The configured settings will be +preserved whenever possible even when the number of RX or combined channels is +changed. In some cases when the settings cannot be preserved, the indirection +table will revert back to default even distribution for all channels. + +On 5750X and newer chips, the size of the indirection table may change as +the number of RX channel changes. If the indirection table is set to +non-default, the RX/combined channel number changes will be restricted to the +range that does not change the indirection table size. + +Example: Set RSS indirection table when the combined channel number is 8: + + ethtool -L eth0 combined 8 rx 0 tx 0 + ethtool -X eth0 start 2 weight 1 2 3 4 0 0 + +On a 5750X chip, this uses an indirection table size of 64. When the +RX/combined channel number changes to 65 or above, the indirection table +size increases and such a change will fail: + + ethtool -L eth0 combined 65 rx 0 tx 0 + netlink error: Invalid argument + +The kernel log will show: + +bnxt_en 0000:04:00.0 eth0: RSS table size change required, RSS table entries must be default to proceed + +The indirection table must be reverted back to default first before changing +the channels to 65 or above in this example: + + ethtool -X eth0 default + ethtool -L eth0 combined 65 rx 0 tx 0 + +15. Run self test: + + ethtool -t eth0 + + Note that only single function PFs can execute self tests. If a PF has + active VFs, only online tests can be executed. + +16. Collect Firmware Coredump: + + ethtool -w eth0 data FILENAME + +17. Set coredump flags: + + ethtool -W eth0 N + + Note that the following are supported values for N: + 0 Collection for live dump + 1 Collection for crash dump + This setting is allowed in either of following cases + a) PFs on platforms that have kernel config. option CONFIG_TEE_BNXT_FW enabled. + This option is only enabled on some ARM SoCs. + b) PFs which are configured to support crash dump using host memory. + +18. Reset the device: + + ethtool --reset eth0 [flags N] [type] + + Note that driver supports 'ap' and 'all' type of resets. Also, '--reset' + option is available from ethtool version 4.15 or newer. + +19. Add receive network flow classification filters. + + ethtool -N eth0 flow-type ether|ipv4|ipv6|tcp4|udp4|tcp6|udp6 FLOW_SPEC + + This feature requires n-tuple filters to be enabled (default is enabled): + + ethtool -K eth0 ntuple on + +Example: Ethernet filter to rx queue 0 + + ethtool -N eth0 flow-type ether dst 00:11:22:33:44:55 action 0 + Note: flow-type ether is not supported on BCM575xx series chipsets. + +Example: TCP/IPv4 5-tuple filter to rx queue 1 + + ethtool -N eth0 flow-type tcp4 dst-ip 192.168.0.1 src-ip 192.168.0.2 \ + dst-port 80 src-port 32768 action 1 + +Example: UDP/IPv4 4-tuple filter to rx queue 2 + + ethtool -N eth0 flow-type udp4 dst-ip 192.168.0.1 src-ip 192.168.0.2 \ + dst-port 2049 action 2 + +Example: IPv4 4-tuple filter to drop with wildcard match i.e. TCP/UDP/ICMP + + ethtool -N eth0 flow-type ipv4 dst-ip 192.168.0.1 src-ip 192.168.0.2 \ + l4proto 255 action -1 + +The action parameter must be greater than or equal to 0 to specify the +RX queue/ring number. The standard negative action parameters for +Wake-on-LAN is not supported. Note, however, negative actions other than +drop are used to extend the ethtool interface for mapping flows to sockets, as +detailed in the Multi-root NUMA Direct section below. +Note that ipv4/ipv6 flows supports only ICMPV4/ICMPV6 protocols and reserved +protocol (255). Reserved protocol is used for wildcard match i.e. TCP/UDP/ICMP. + +The flow-type is counted as the first tuple and must always be specified. +At least one additional tuple must be specified for TCP/UDP filters. +Partial wildcard tuples with incomplete masks are supported using the +normal ethtool syntax. For example, to match the 192.168.1.0/24 subnet: + + ethtool -N eth0 flow-type udp dst-ip 192.168.1.0 m 0.0.0.255 action 1 + +When supplied, the mask is counterintuitively specified as the inverse of +the way subnet masks are typically specified. That is, ethtool masks have +ones in the bits that are to be ignored in the match - a quirk of ethtool's +backwards compatibility with the way masks were specified using the legacy +kernel ntuple interface. Note that tuple masks are optional and are assumed +by ethtool to be the complete mask (all zeroes) when the tuple alone is +specified. + +It is possible to create a 5-tuple filter that is inside the domain of +another 4-tuple filter, for example. In general, the more specific +5-tuple filter will take precedence. + +If the filter is created succesfully, the ID of the filter will be returned +by ethtool. ethtool -n will also display the current list of filters with +their IDs. A specific filter can be deleted by specifying the ID. The +"loc" parameter that allows the user to specify the location of the filter +is not supported. It must be the default 0xffffffff which means that the +driver will choose the location/ID. + +Example: Delete filter ID 3 + + ethtool -N eth0 delete 3 + +Note that if accelerated RFS is enabled and it has added some 5-tuple +filters, any duplicate 5-tuple filters added by ethtool will be rejected. +It is generally not recommended to enable accelerated RFS and create +static 5-tuple filters on the same function. + +20. Show Forward Error Correction (FEC) configured and active settings: + + ethtool --show-fec eth0 + +21. Set Forward Error Correction (FEC) settings: + + ethtool --set-fec eth0 encoding auto|off|baser|rs|llrs + +Example: set FEC to autonegotiate: + + ethtool --set-fec eth0 encoding auto + + Note that a new FEC setting will always result in a link toggle. In FEC + autoneg code, the advertised FEC settings will be shown by the main + ethtool command together with other link settings: + + ethtool eth0 + Settings for eth0: + Supported ports: [ FIBRE ] + Supported link modes: 10000baseT/Full + 40000baseCR4/Full + 25000baseCR/Full + 50000baseCR2/Full + Supported pause frame use: Symmetric Receive-only + Supports auto-negotiation: Yes + Supported FEC modes: BaseR RS + Advertised link modes: 10000baseT/Full + 40000baseCR4/Full + 25000baseCR/Full + 50000baseCR2/Full + Advertised pause frame use: No + Advertised auto-negotiation: Yes + Advertised FEC modes: BaseR RS + Speed: 25000Mb/s + Duplex: Full + Port: Direct Attach Copper + PHYAD: 1 + Transceiver: internal + Auto-negotiation: on + Supports Wake-on: d + Wake-on: d + Current message level: 0x00000000 (0) + + Link detected: yes + +Example: set FEC to forced Clause 91 (Reed Solomon): + + ethtool --set-fec eth0 encoding rs + + Note that a newer kernel such as 4.20 is required for full FEC support. + +22. Dump registers: + + ethtool -d eth0 + + This will dump some PCIe registers for diagnostics purposes. Note that + ethtool 5.10 or newer will provide formatting and decoding of the + register output. + +23. See ethtool man page for more options. + + +Autoneg +======= + +The bnxt_en driver supports Autonegotiation of speed and flow control on +most devices. Some dual-port 25G devices do not support Autoneg. Autoneg +must be enabled for 10GBase-T devices. + +Note that parallel detection is not supported when autonegotiating +100GBase-CR4, 50GBase-CR2, 40GBase-CR4, 25GBase-CR, 10GbE SFP+. +If one side is autonegoatiating and the other side is not, +link will not come up. + +25G, 50G and 100G advertisements are newer standards first defined in the 4.7 +kernel's ethtool interface. To fully support these new advertisement speeds +for autonegotiation, 4.7 (or newer) kernel and a newer ethtool utility are +required. Similarly, PAM4 speeds are only supported with post 5.1 kernels. + +Below are some examples to illustrate the limitations when using 4.6 and +older kernels: + +1. Enable Autoneg with all supported speeds advertised when the device +currently has Autoneg disabled: + + ethtool -s eth0 autoneg on advertise 0x0 + +Note that to advertise all supported speeds (including 25G, 50G and 100G), +the device must initially have Autoneg disabled. advertise is a hexadecimal +value specifying one or more advertised speed. 0x0 is special value that +means all supported speeds. See ethtool man page. These advertise values +are supported by the driver: + +0x020 1000baseT Full +0x1000 10000baseT Full +0x1000000 40000baseCR4 Full + +2. Enable Autoneg with only 10G advertised: + + ethtool -s eth0 autoneg on advertise 0x1000 + +or: + + ethtool -s eth0 autoneg on speed 10000 duplex full + + +3. Enable Autoneg with only 40G advertised: + + ethtool -s eth0 autoneg on advertise 0x01000000 + +4. Enable Autoneg with 40G and 10G advertised: + + ethtool -s eth0 autoneg on advertise 0x01001000 + +Note that the "Supported link modes" and "Advertised link modes" will not +show 25G, 50G and 100G even though they may be supported or advertised. For +example, on a device that is supporting and advertising 10G, 25G, 40G, 50G and +100G, and linking up at 50G, ethtool will show the following: + + ethtool eth0 + Settings for eth0: + Supported ports: [ FIBRE ] + Supported link modes: 10000baseT/Full + 40000baseCR4/Full + Supported pause frame use: Symmetric Receive-only + Supports auto-negotiation: Yes + Advertised link modes: 10000baseT/Full + 40000baseCR4/Full + Advertised pause frame use: Symmetric + Advertised auto-negotiation: Yes + Speed: 50000Mb/s + Duplex: Full + Port: FIBRE + PHYAD: 1 + Transceiver: internal + Auto-negotiation: on + Current message level: 0x00000000 (0) + + Link detected: yes + +Using kernels 4.7 or newer and ethtool version 4.8 or newer, 25G, 50G and 100G +advertisement speeds can be properly configured and displayed, without any +of the limitations described above. ethtool version 4.8 has a bug that +ignores the advertise parameter, so it is recommended to use ethtool 4.10. +Example ethtool 4.10 output showing 10G/25G/40G/50G/100G advertisement settings: + + ethtool eth0 + Settings for eth0: + Supported ports: [ FIBRE ] + Supported link modes: 10000baseT/Full + 40000baseCR4/Full + 25000baseCR/Full + 50000baseCR2/Full + 100000baseCR4/Full + Supported pause frame use: Symmetric Receive-only + Supports auto-negotiation: Yes + Advertised link modes: 10000baseT/Full + 40000baseCR4/Full + 25000baseCR/Full + 50000baseCR2/Full + 100000baseCR4/Full + Advertised pause frame use: No + Advertised auto-negotiation: Yes + Speed: 50000Mb/s + Duplex: Full + Port: Direct Attach Copper + PHYAD: 1 + Transceiver: internal + Auto-negotiation: on + Supports Wake-on: d + Wake-on: d + Current message level: 0x00000000 (0) + + Link detected: yes + +These are the complete advertise values supported by the driver using 4.7 +kernel or newer and a compatible version of ethtool supporting the new +values: + +0x020 1000baseT Full +0x1000 10000baseT Full +0x1000000 40000baseCR4 Full +0x80000000 25000baseCR Full +0x400000000 50000baseCR2 Full +0x4000000000 100000baseCR4 Full + +Note that older drivers (prior to 2.21) did not make a distinction on the +exact physical layer encoding and media type for a link speed. For example, +at 50G, the device may support 50000baseCR2 and 50000baseSR2 for copper and +multimode fiber cables respectively. Regardless of what cabling is used +for 50G, these drivers used only the ethtool value defined for 50000baseCR2 +to cover all variants of the 50G media types. The same applies to all +other advertise value for other link speeds listed above. + +More recent drivers report the correct media types in ethtool link modes. +In particular, if no media is detected, all supported modes should be now +reported. For instance, on a BCM575xx card one might find: + + Advertised link modes: 10000baseT/Full + 10000baseKX4/Full + 10000baseKR/Full + 25000baseCR/Full + 25000baseSR/Full + 50000baseCR2/Full + 100000baseSR4/Full + 100000baseCR4/Full + 100000baseLR4_ER4/Full + 50000baseSR2/Full + 10000baseCR/Full + 10000baseSR/Full + 10000baseLR/Full + 200000baseSR4/Full + 200000baseLR4_ER4_FR4/Full + 200000baseCR4/Full + +Note, there is a many to one relationship between the fully specified link +modes and the underlying hardware support for autonegotiated speeds. For +example, 25000baseCR/Full and 25000baseSR/Full refer to the same underlying +hardware configuration, differing only in the media that is physically +attached. Enabling or disabling one will affect the other corresponding +modes and vise versa. Thus, after issuing: + + ethtool -s eth0 advertise 200000baseCR4/Full off + +all three of the above supported 4 lane 200Gbps configurations are dropped +from the advertised list: + + Advertised link modes: 10000baseT/Full + 10000baseKX4/Full + 10000baseKR/Full + 25000baseCR/Full + 25000baseSR/Full + 50000baseCR2/Full + 100000baseSR4/Full + 100000baseCR4/Full + 100000baseLR4_ER4/Full + 50000baseSR2/Full + 10000baseCR/Full + 10000baseSR/Full + 10000baseLR/Full + +When the media type is detected by the hardware, only those modes supported +by the fitted media are relevant: + + Advertised link modes: 25000baseSR/Full + 50000baseSR2/Full + 100000baseSR4/Full + 10000baseSR/Full + +COMPATIBILITY NOTE: + +In the above case, SR optics are installed. Because older drivers reported +copper modes for all media types, the driver is still tolerant of the +incorrect mode being used. Note, however, that the fitted media will take +precedence when adding advertised speeds. That is, while modes can be added +using a mismatched media type, they cannot be removed without also clearing +the bit associated with the specific attached media. It is therefore possible +to add 200000baseSR4/Full to the above list by requesting the corresponding +200000baseCR4/Full mode, in a backward compatible fashion, but the converse +is not true. If SR media is attached and the 200000baseSR4/Full mode is +listed, then it must be explicitly removed from the active list in order to +disable it. + +Also of note, newer drivers will report 1000baseX/Full for gigabit Ethernet +when a DAC module is attached, whereas older drivers reported 1000baseT/Full +regardless of media. + + +Energy Efficient Ethernet +========================= + +The driver supports Energy Efficient Ethernet (EEE) settings on 10GBase-T +devices. If enabled, and connected to a link partner that advertises EEE, +EEE will become active. EEE saves power by entering Low Power Idle (LPI) +state when the transmitter is idle. The downside is increased latency as +it takes a few microseconds to exit LPI to start transmitting again. + +On a 10GBase-T device that supports EEE, the link up console message will +include the current state of EEE. For example: + + bnxt_en 0000:05:00.0 eth0: NIC Link is Up, 10000 Mbps full duplex, Flow control: none + bnxt_en 0000:05:00.0 eth0: EEE is active + +The active state means that EEE is negotiated to be active during +autonegotiation. Additional EEE parameters can be obtained using ethtool: + + ethtool --show-eee eth0 + + EEE Settings for eth0: + EEE status: enabled - active + Tx LPI: 8 (us) + Supported EEE link modes: 10000baseT/Full + Advertised EEE link modes: 10000baseT/Full + Link partner advertised EEE link modes: 10000baseT/Full + +The tx LPI timer of 8 microseconds is currently fixed and cannot be adjusted. +EEE is only supported on 10GBase-T. 1GBase-T does not currently support EEE. + +To disable EEE: + + ethtool --set-eee eth0 eee off + +To enable EEE, but disable LPI: + + ethtool --set-eee eth0 eee on tx-lpi off + +This setting will negotiate EEE with the link partner but the transmitter on +eth0 will not enter LPI during idle. The link partner may independently +choose to enter LPI when its transmitter is idle. + + +Enabling Receive Side Scaling (RSS) +=================================== + +By default, the driver enables RSS by allocating receive rings to match the +the number of CPUs (up to 8). Incoming packets are run through a 4-tuple +or 2-tuple hash function for TCP/IP packets and IP packets respectively. +Non fragmented UDP packets are run through a 4-tuple hash function on newer +devices (2-tuple on older devices). See below for more information about +4-tuple and 2-tuple and how to configure it. + +The computed hash value will determine the receive ring number for the +packet. This way, RSS distributes packets to multiple receive rings while +guaranteeing that all packets from the same flow will be steered to the same +receive ring. The processing of each receive ring can be done in parallel +by different CPUs to achieve higher performance. For example, irqbalance +will distribute the MSIX vector of each RSS receive ring across CPUs. +However, RSS does not guarantee even distribution or optimal distribution of +packets. + +To disable RSS, set the number of receive channels (or combined channels) to 1: + + ethtool -L eth0 rx 1 combined 0 + +or + + ethtool -L eth0 combined 1 rx 0 tx 0 + +To re-enable RSS, set the number of receive channels or (combined channels) to +a value higher than 1. + +The RSS hash can be configured for 4-tuple or 2-tuple for various flow types. +4-tuple means that the source, destination IP addresses and layer 4 port +numbers are included in the hash function. 2-tuple means that only the source +and destination IP addresses are included. 4-tuple generally gives better +results. Below are some examples on how to set and display the hash function. + +To display the current hash for TCP over IPv4: + + ethtool -u eth0 rx-flow-hash tcp4 + +To disable 4-tuple (enable 2-tuple) for UDP over IPv4: + + ethtool -U eth0 rx-flow-hash udp4 sd + +To enable 4-tuple for UDP over IPv4: + + ethtool -U eth0 rx-flow-hash udp4 sdfn + + +Enabling Accelerated Receive Flow Steering (RFS) +================================================ + +RSS distributes packets based on n-tuple hash to multiple receive rings. +The destination receive ring of a packet flow is solely determined by the +hash value. This receive ring may or may not be processed in the kernel by +the CPU where the sockets application consuming the packet flow is running. + +Accelerated RFS will steer incoming packet flows to the ring whose MSI-X +vector will interrupt the CPU running the sockets application consuming +the packets. The benefit is higher cache locality of the packet data from +the moment it is processed by the kernel until it is consumed by the +application. + +Accelerated RFS requires n-tuple filters to be supported. On older +devices, only Physical Functions (PFs, see SR-IOV below) support n-tuple +filters. On the latest devices, n-tuple filters are supported and enabled +by default on all functions. Use ethtool to disable n-tuple filters: + + ethtool -K eth0 ntuple off + +To re-enable n-tuple filters: + + ethtool -K eth0 ntuple on + +After n-tuple filters are enabled, Accelerated RFS will be automatically +enabled when RFS is enabled. These are example steps to enable RFS on +a device with 8 rx rings: + +echo 32768 > /proc/sys/net/core/rps_sock_flow_entries +echo 2048 > /sys/class/net/eth0/queues/rx-0/rps_flow_cnt +echo 2048 > /sys/class/net/eth0/queues/rx-1/rps_flow_cnt +echo 2048 > /sys/class/net/eth0/queues/rx-2/rps_flow_cnt +echo 2048 > /sys/class/net/eth0/queues/rx-3/rps_flow_cnt +echo 2048 > /sys/class/net/eth0/queues/rx-4/rps_flow_cnt +echo 2048 > /sys/class/net/eth0/queues/rx-5/rps_flow_cnt +echo 2048 > /sys/class/net/eth0/queues/rx-6/rps_flow_cnt +echo 2048 > /sys/class/net/eth0/queues/rx-7/rps_flow_cnt + +These steps will set the global flow table to have 32K entries and each +receive ring to have 2K entries. These values can be adjusted based on +usage. + +Note that for Accelerated RFS to be effective, the number of receive channels +(or combined channels) should generally match the number of CPUs. Use +ethtool -L to fine-tune the number of receive channels (or combined channels) +if necessary. Accelerated RFS has precedence over RSS. If a packet matches an +n-tuple filter rule, it will be steered to the RFS specified receive ring. +If the packet does not match any n-tuple filter rule, it will be steered +according to RSS hash. + +To display the active n-tuple filters setup for Accelerated RFS: + + ethtool -n eth0 + +Note that if there are a large number of filters and they are constantly +changing, ethtool may report some retrieval failures. These errors are +normal. + +The Accelerated RFS filters added by the stack are subject to aging based +on activity. It is normal for a small number of these filters to remain +after all traffic has stopped. New filters will eventually trigger the +removal of these old filters. + +IPv6, GRE and IP-inIP n-tuple filters are supported on 4.5 and newer kernels. +Note that RFS will only steer non-fragmented UDP packets to a connected UDP +socket. Fragmented UDP packets or UDP packets to a connectionless socket +will fall back to RSS hashing. + +n-tuple filters can also be added statically using ethtool (See +BNXT_EN Driver Settings section above). + + +Enabling Busy Poll Sockets +========================== + +Using 3.11 and newer kernels (also backported to some major distributions), +Busy Poll Sockets are supported by the bnxt_en driver if +CONFIG_NET_RX_BUSY_POLL is enabled. Individual sockets can set the +SO_BUSY_POLL option, or it can be enabled globally using sysctl: + + sysctl -w net.core.busy_read=50 + +This sets the time to busy read the device's receive ring to 50 usecs. +For socket applications waiting for data to arrive, using this method +can decrease latency by 2 or 3 usecs typically at the expense of +higher CPU utilization. The value to use depends on the expected +time the socket will wait for data to arrive. Use 50 usecs as a +starting recommended value. + +In addition, the following sysctl parameter should also be set: + + sysctl -w net.core.busy_poll=50 + +This sets the time to busy poll for socket poll and select to 50 usecs. +50 usecs is a recommended value for a small number of polling sockets. + + +Enabling SR-IOV +=============== + +The Broadcom NetXtreme-C and NetXtreme-E devices support Single Root I/O +Virtualization (SR-IOV) with Physical Functions (PFs) and Virtual Functions +(VFs) sharing the Ethernet port. The same bnxt_en driver is used for both +PFs and VFs under Linux. + +Only the PFs are automatically enabled. If a PF supports SR-IOV, lspci +will show that it has the SR-IOV capability and the total number of VFs +supported. To enable one or more VFs, write the desired number of VFs +to the following sysfs file: + + /sys/bus/pci/devices/:::/sriov_numvfs + +For example, to enable 4 VFs on bus 82 device 0 function 0: + + echo 4 > /sys/bus/pci/devices/0000:82:00.0/sriov_numvfs + +To disable the VFs, write 0 to the same sysfs file. Note that to change +the number of VFs, 0 must first be written before writing the new number +of VFs. + +On older 2.6 kernels that do not support the sysfs method to enable SR-IOV, +the driver uses the module parameter "num_vfs" to enable the desired number +of VFs. Note that this is a global parameter that applies to all PF +devices in the system. For example, to enable 4 VFs on all supported PFs: + + modprobe bnxt_en num_vfs=4 + +The 4 VFs of each supported PF will be enabled when the PF is brought up. + +The VF and the PF operate almost identically under the same Linux driver +but not all operations supported on the PF are supported on the VF. + +The resources needed by each VF are assigned by the PF based on how many +VFs are requested to be enabled and the resources currently used by the PF. +It is important to fully configure the PF first with all the desired features, +such as number of RSS/TSS channels, jumbo MTU, etc, before enabling SR-IOV. +After enabling SR-IOV, there may not be enough resources left to reconfigure +the PF. + +The resources are evenly divided among the VFs. Enabling a large number of +VFs will result in less resources (such as RSS/TSS channels) for each VF. + +Refer to other documentation on how to map a VF to a VM or a Linux Container. + +Some attributes of a VF can be set using iproute2 through the PF. SR-IOV +must be enabled by setting the number of desired VFs before any attributes +can be set. Some examples: + +1. Set VF MAC address: + + ip link set vf mac + +Example: + + ip link set eth0 vf 0 mac 00:12:34:56:78:9a + +Note that if the VF MAC addres is not set as shown, a random MAC address will +be used for the VF. If the VF MAC address is changed while the VF driver has +already brought up the VF, it is necessary to bring down and up the VF before +the new MAC address will take effect. + +2. Set VF link state: + + ip link set vf state auto|enable|disable + +The default is "auto" which reflects the true link state. Setting the VF +link to "enable" allows loopback traffic regardless of the true link state. + +Example: + + ip link set eth0 vf 0 state enable + +3. Set VF default VLAN: + + ip link set vf vlan + +Example: + + ip link set eth0 vf 0 vlan 100 + +4. Set VF MAC address spoof check: + + ip link set vf spoofchk on|off + +Example: + + ip link set eth0 vf 0 spoofchk on + +Note that spoofchk is only effective if a VF MAC address has been set as +shown in #1 above. + +5. Set VF trust: + + ip link set vf trust on|off + +Example: + + ip link set eth0 vf 0 trust on + +A VF with trust enabled can change its MAC address even if a MAC address has +been set by the PF as shown in #1 above. This will be useful in some +bonding configurations where MAC address changes may be required. + +Note VF trust attribute is supported on kernel 4.4 or newer and iproute utility +4.5 or newer. + +6. Set VF queues: + + ip link set vf min_tx_queues max_tx_queues \ + min_rx_queues max_rx_queues + +Note that this is an experimental way to configure VF queue resources +from the PF and requires the experimental kernel patch and iproute2 +patch. The official method to configure this in the official mainline +kernel will be likely very different when it becomes available. This +experimental method will be deprecated at that time. + +The PF initially divides queue resources equally among the VFs. This +command reconfigures the VF queue resources for an individual VF by +increasing or decreasing TX and RX queue parameters. The minimum +parameter represents the guaranteed resource for the VF and the maximum +parameter represents the maximum but not necessarily guaranteed resource +for the VF. These are raw queue resources used to create the channels +reported by "ethtool -l" on the VF. For example, an ethtool channel +may require 2 RX queues if hardware GRO/LRO or jumbo MTU is in use. + +After queues are successfully reconfigured, it may require the VF to be +brought down and up before it will take effect. ethtool -l on the VF +should show different channel parameters after the new queue parameters +take effect on the VF. + +Example: + + ip link set eth0 vf 0 min_tx_queues 8 max_tx_queues 16 \ + min_rx_queues 16 max_rx_queues 32 + + +Virtual Ethernet Bridge (VEB) +============================= + +The NetXtreme-C/E devices contain an internal hardware Virtual Ethernet +Bridge (VEB) to bridge traffic between virtual ports enabled by SR-IOV. +VEB is normally turned on by default. VEB can be switched to VEPA +(Virtual Ethernet Port Aggregator) mode if an external VEPA switch is used +to provide bridging between the virtual ports. + +Use the bridge command to switch between VEB/VEPA mode. Note that only +the PF driver will accept the command for all virtual ports belonging to the +same physical port. The bridge mode cannot be changed if there are multiple +PFs sharing the same physical port (e.g. NPAR or Multi-Host). + +To set the bridge mode: + + bridge link set dev hwmode {veb/vepa} + +To show the bridge mode: + + bridge link show dev + +Example: + + bridge link set dev eth0 hwmode vepa + +Note that older firmware does not support VEPA mode. This operation is +also not supported on older kernels. + + +Hardware QoS +============ + +The NetXtreme-C/E devices support hardware QoS. The hardware has multiple +internal queues, each can be configured to support different QoS attributes, +such as latency, bandwidth, lossy or lossless data delivery. These QoS +attributes are specified in the IEEE Data Center Bridging (DCB) standard +extensions to Ethernet. DCB parameters include Enhanced Transmission +Selection (ETS) and Priority-based Flow Control (PFC). In a DCB network, +all traffic will be classified into multiple Traffic Classes (TCs), each +of which is assigned different DCB parameters. + +Typically, all traffic is VLAN tagged with a 3-bit priority in the VLAN +tag. The VLAN priority is mapped to a TC. For example, a network with +3 TCs may have the following priority to TC mapping: + +0:0,1:0,2:0,3:2,4:1,5:0,6:0,7:0 + +This means that priorities 0,1,2,5,6,7 are mapped to TC0, priority 3 to TC2, +and priority 4 to TC1. ETS allows bandwidth assigment for the TCs. For +example, the ETS bandwidth assignment may be 40%, 50%, and 10% to TC0, TC1, +and TC2 respectively. PFC provides link level flow control for each VLAN +priority independently. For example, if PFC is enabled on VLAN priority 4, +then only TC1 will be subject to flow control without affecting the other +two TCs. + +Typically, DCB parameters are automatically configured using the DCB +Capabilities Exchange protocol (DCBX). The bnxt_en driver currently +supports the Linux lldpad DCBX agent. lldpad supports all versions of +DCBX but the bnxt_en driver currently only supports the IEEE DCBX version. +Typically, the DCBX enabled switch will convey the DCB parameters to lldpad +which will then send the hardware QoS parameters to bnxt_en to configure +the device. Refer to the lldpad(8) and lldptool(8) man pages for further +information on how to setup the lldpad DCBX agent. + +Note that the embedded firmware DCBX/LLDP agent must be disabled in order +to run the lldpad agent in host software. Refer to other Broadcom +documentation on how to disable the firmware agent in NVRAM. + +To support hardware TCs, the proper Linux qdisc must be used to classify +outgoing traffic into their proper hardware TCs. For example, the mqprio +qdisc may be used. A simple example using mqprio qdisc is illustrated below. +Refer to the tc-mqprio(8) man page for more information. + + tc qdisc add dev eth0 root mqprio num_tc 3 map 0 0 0 2 1 0 0 0 hw 1 + +The above command creates the mqprio qdisc with 3 hardware TCs. The priority +to TC mapping is the same as the example at the beginning of the section. +The bnxt_en driver will create 3 groups of tx rings, with each group mapping +to an internal hardware TC. + +Once this is created, SKBs with different priorities will be mapped to the +3 TCs according to the specified map above. Note that this SKB priority +is only used to direct packets within the kernel stack to the proper hardware +ring. If the outgoing packets are VLAN tagged, the SKB priority does not +automatically map to the VLAN priority of the packet. The VLAN egress map +has to be set up to have the proper VLAN priority for each packet. + +In the current example, if VLAN 100 is used for all traffic, the VLAN egress +map can be set up like this: + + ip link add link eth0 name eth0.100 type vlan id 100 \ + egress 0:0 1:1 2:2 3:3 4:4 5:5 6:6 7:7 + +This creates a one-to-one mapping of SKB priority to VLAN egress priority. +In other words, SKB priority 0 maps VLAN priority 0, SKB priority 1 maps to +VLAN priority 1, etc. This one-to-one mapping should generally be used. + +Instead of using VLAN priority to map to TCs in the network, it is also +possible to use DSCP (Differentiated Services Code Point) in the IP header +to do the mapping. Obviously, only IP traffic in the network can be mapped +this way, whereas VLAN priority will work universally for all traffic types. +The DSCP to priority mapping is specified using a new Application Priority TLV +recently added to the IEEE DCBX spec. This is supported by the driver's +interface to lldpad. Note that the application is responsible to set +the proper DSCP value in the IP header for outgoing traffic on a Linux host. +For example, iptables may be used to set the proper DSCP values for outgoing +traffic. This will replace the VLAN egress mapping mentioned earlier if DSCP +is used instead of VLAN. The rest of the steps are the same between VLAN and +DSCP. + +If each TC has more than one ring, TSS will be performed to select a tx ring +within the TC. + +To display the current qdisc configuration: + + tc qdisc show + +Example output: + + qdisc mqprio 8010: dev eth0 root tc 3 map 0 0 0 2 1 0 0 0 0 0 0 0 0 0 0 0 + queues:(0:3) (4:7) (8:11) + +The example above shows that bnxt_en has allocated 4 tx rings for each of the +3 TCs. SKBs with priorities 0,1,2,5,6,7 will be transmitted using tx rings +0 to 3 (TC0). SKBs with priority 4 will be transmitted using rings 4 to 7 +(TC1). SKBs with priority 3 will be transmitted using rings 8 to 11 (TC2). + +Next, SKB priorities have to be set for different applications so that the +packets from the different applications will be mapped to the proper TCs. +By default, the SKB priority is set to 0. There are multiple methods to set +SKB priorities. net_prio cgroup is a convenient way to do this. Refer to the +link below for more information: + +https://www.kernel.org/doc/Documentation/cgroup-v1/net_prio.txt + +As mentioned previously, the DCB attributes of each TC are normally configured +by the DCBX agent in lldpad. It is also possible to set the DCB attributes +manually in a simple network or for test purposes. The following example +will manually set up eth0 with the example DCB local parameters mentioned at +the beginning of the section. + + lldpad -d + lldptool -T -i eth0 -V ETS-CFG tsa=0:ets,1:ets,2:ets \ + up2tc=0:0,1:0,2:0,3:2,4:1,5:0,6:0,7:0 \ + tcbw=40,50,10 + lldptool -T -i eth0 -V PFC enabled=4 + +Note that the ETS bandwidth distribution will only be evident when all +traffic classes are transmitting and reaching the link capacity. + +RoCE APP TLV can also be set. For example, to map RoCE v2 traffic +to priority 4: + +lldptool -T -i eth0 -V APP app=4,3,4791 + +The bnxt_re driver will automatically obtain the proper priority and TC +mapping for offloaded RoCE traffic (RoCE v2 traffic mapped to priority 4 and +TC1 with PFC enabled in this example). + +Usage of strict priority for a given CoS can result in starvation of +other rings configured. This may result in the inability to transmit +packets on ets rings and may result in the kernel reporting transmit +timeouts. Only configure strict priority on rings with high-priority, +low throughput traffic to prevent consuming resources. + +See lldptool-ets(8), lldptool-pfc(8), lldptool-app(8) man pages for more +information. + +On an NPAR device with multiple partitions sharing the same network port, +DCBX cannot be run on more than one partition. In other words, the lldpad +adminStatus can be set to rxtx on no more than one partition. The same is +true for SRIOV virtual functions. DCBX cannot be run on the VFs. + +On these multi-function devices, the hardware TCs are generally shared +between all the functions. The DCB parameters negotiated and setup on +the main function (NPAR or PF function) will be the same on the other +functions sharing the same port. Note that the standard lldptool will +not be able to show the DCB parameters on the other functions which have +adminStatus disabled. + + +PTP Hardware Clock +================== + +The NetXtreme-C/E devices support PTP Hardware Clock which provides hardware +timestamps for PTP v2 packets. The Linux PTP project contains more +information about this feature. A newer 4.x kernel and newer firmware +(2.6.134 or newer) are required to use this feature. Only the first PF +of the network port has access to the hardware PTP feature. Use ethtool -T +to check if PTP Hardware Clock is supported. + +On BCM574xx and BCM575xx chips, occasionally ptp4l application may report +timeout error while trying to retrieve tx timestamp and might allude to a +bug in driver. +Though application resumes the functioning immediately, unless an explicit +TX timestamp failure message is logged by bnxt_en in the dmesg, increasing +the default tx_timestamp_timeout of ptp4l to a suitable value will fix the +problem. On most systems 25ms works as optimal value. +For BCM574xx chips, 100ms would be the recommended value. + +BCM575xx series chips support timestamping on VFs but not on VFs that have +transparent VLAN configured. + +Set IRQ Balance Manually +======================== + +On newer 4.x kernels, the driver does IRQ affinity for higher performance. +But in older kernels, if driver is reloaded IRQ affinity will not be set +properly. In the case where IRQ affinity is not properly set, the interrupts +will be manually associated with a CPU using SMP affinity. + +To manually balance interrupts, the `irqbalance` service needs to be stopped. + + service irqbalance stop + +View the CPU cores where NetXtreme-C/E device's interrupt is allowed to be +received. + + grep "ethX" /proc/interrupts + cat /proc/irq//smp_affinity_list + +Associate each interrupt with a CPU core. + + echo > /proc/irq//smp_affinity_list + +Note: User configured SMP affinity may change after unloading/loading RoCE + driver, or other driver configuration changes that need to reinitialize + IRQs. The user may need to configure it again. + +BNXT_EN Module Parameters +========================= + +On newer 3.x/4.x kernels, the driver does not support any driver parameters. +Please use standard tools (sysfs, ethtool, iproute2, etc) to configure the +driver. + +The only exception is the "num_vfs" module parameter supported on older 2.6 +kernels to enable SR-IOV. Please see the SR-IOV section above. + + +BNXT_EN Driver Defaults +======================= + +Speed : 1G/2.5G/10G/25G/40G/50G/100G/200G/400G depending on the board. + +Flow control : None + +MTU : 1500 (range 60 - 9500) Maximum MTU controlled by + firmware and set during driver initialization. + +Rx Ring Size : 511 (range 0 - 2047) + +Rx Jumbo Ring Size : 2044 (range 0 - 8191) automatically adjusted by the + driver. + +Tx Ring Size : 511 (range (MAX_SKB_FRAGS+2) - 2047) + + MAX_SKB_FRAGS varies on different kernels and + different architectures. On most kernels for + x86, MAX_SKB_FRAGS is 17. + +Number of RSS/TSS channels:Up to 64 combined channels or match the number of + CPUs whichever is higher, subject to chip limits. + In the case of NPAR, this will be upto 16 combined + channels. + +TSO : Enabled + +GRO (hardware) : Enabled + +LRO : Disabled + +Coalesce rx usecs : 6 usec + +Coalesce rx usecs irq : 1 usec + +Coalesce rx frames : 6 frames + +Coalesce rx frames irq : 1 frame + +Coalesce tx usecs : 28 usec + +Coalesce tx usecs irq : 2 usec + +Coalesce tx frames : 30 frames + +Coalesce tx frames irq : 2 frame + +Coalesce stats usecs : 1000000 usec (range 250000 - 1000000, 0 to disable) + + +Statistics +========== + +The driver reports all major standard network counters to the stack. These +counters are reported in /proc/net/dev or by other standard tools such as +netstat -i. + +Note that the counters are updated every second by the firmware by +default. To increase the frequency of these updates, ethtool -C can +be used to increase the frequency to 0.25 seconds if necessary. + +More detailed statistics are reported by ethtool -S. Some of the counters +reported by ethtool -S are for diagnostics purposes only. For example, +the "rx_drops" counter reported by ethtool -S includes dropped packets +that don't match the unicast and multicast filters in the hardware. A +non-zero count is normal and does not generally reflect any error conditions. +This counter should not be confused with the "RX-DRP" counter reported by +netstat -i. The latter reflects dropped packets due to buffer overflow +conditions. + +Another example is the "tpa_aborts" counter reported by ethtool -S. It +counts the LRO (Large Receive Offload) aggregation aborts due to normal +TCP conditions. A high tpa_aborts count is generally not an indication +of any errors. + +The "rx_ovrsz_frames" counter reported by ethtool -S may count all +packets bigger than 1518 bytes when using earlier versions of the firmware. +Newer version of the firmware has reprogrammed the counter to count +packets bigger than 9600 bytes. + +If the "rx_discards" and "rx_buf_errors" counters are high compared to the +total recieve packets for that ring, it generally means that the host CPU +is not processing the incoming packets fast enough and causing packet drops. +The number of receive packets dropped is indicated by these counters. +Increasing the receive ring size may reduce the number of dropped packets (See +BNXT_EN Driver Settings section above, examples 5 and 6). + +On BCM573xx and BCM574xx devices, the condition that triggers the +"rx_buf_errors" counter to increment requires a reset of the ring. The +driver will print a warning message one time only when a reset is required as +shown in this example: + +bnxt_en 0000:07:00.0 eth0: RX buffer error 2260004 + +This warning message will appear only one time even if there are multiple +of these errors requiring reset from one or multiple devices. The "rx_resets" +counter will also increment for each reset in response to this condition. If +the "rx_resets" counter is high, it is recommended to increase the RX ring size +to reduce these reset events which are disruptive. + +The "rx_l4_csum_errors" counter will increment for every TCP/UDP checksum +error detected by hardware on each ring if RX checksum offload is enabled. +Such packets will be rejected by the stack and similar stack error +counters for TCP/UDP will also increment. Note that IPv4 checksum is +always verified by the stack and not offloaded. + + +Unloading and Removing Driver +============================= + +rmmod bnxt_en + +Note that if SR-IOV is enabled and there are active VFs running in VMs, the +PF driver should never be unloaded. It can cause catastrophic failures such +as kernel panics or reboots. The only time the PF driver can be unloaded +with active VFs is when all the VFs and the PF are running in the same host +kernel environment with one driver instance controlling the PF and all the +VFs. Using Linux Containers is one such example where the PF driver can be +unloaded to gracefully shutdown the PF and all the VFs. + + +Updating Firmware for Broadcom NetXtreme-C and NetXtreme-E devices +================================================================== + +Controller firmware may be updated using the Linux request_firmware interface +in conjunction with the ethtool "flash device" interface. + +Using the ethtool utility, the controller's boot processor firmware may be +updated by copying the 2 "boot code" firmware files to the local /lib/firmware/ +directory: + + cp bc1_cm_a.bin bc2_cm_a.bin /lib/firmware + +and then issuing the following 2 ethtool commands (both are required): + + ethtool -f bc1_cm_a.bin 4 + + ethtool -f bc2_cm_a.bin 18 + +NVM packages (*.pkg files) containing controller firmware, microcode, +pre-boot software and configuration data may be installed into a controller's +NVRAM using the ethtool utility by first copying the .pkg file to the local +/lib/firmware/ directory and then executing a single command: + + ethtool -f + +Note: do not specify the full path to the file on the ethtool -f command-line. + +Note: root privileges are required to successfully execute these commands. + +After "flashing" new firmware into the controller's NVRAM, a cold restart of +the system is required for the new firmware to take effect. This requirement +will be removed in future firmware and driver versions. + +Updating Firmware for Broadcom Nitro device +=========================================== + +Nitro controller firmware should be updated from Uboot prompt by following the +below steps + + sf probe + sf erase 0x50000 0x30000 + tftpboot 0x85000000 /chimp_xxx.bin + sf write 0x85000000 0x50000 + +Devlink +======= + +In kernel versions 4.6 or higher, some operations on bnxt_en driver can be done +using devlink. + +Devlink tool is part of iproute2 routing commands and utilities. Latest devlink +can be downloaded from http://www.kernel.org/pub/linux/utils/net/iproute2/, +if it is not already installed. + +As devlink tool is evolving, use latest kernel and iproute2 tool available for +all features via devlink. + +Following are some examples on how to use devlink. See the devlink man page +for more information. + +Some devlink examples: + +1. Command to display board information and firmware versions: + + devlink dev info [DEV] + +Example: + devlink dev info pci/0000:3b:00.0 + +will show: + +pci/0000:3b:00.0: + driver bnxt_en + serial_number B0-26-28-FF-FE-C8-85-20 + versions: + fixed: + board.id BCM957508-P2100G + asic.id 0x1750 + asic.rev B1 + running: + fw.psid 0.0.6 + fw 218.1.220.0 + fw.mgmt 218.1.202.0 + fw.mgmt.api 1.10.2 + stored: + fw.psid 0.0.6 + fw 218.1.220.0 + fw.mgmt 218.1.202.0 + +Note that 'info' is supported in 5.1 or higher kernel versions. + +2. Updating firmware: + + devlink dev flash DEV file PATH + +Example: + devlink dev flash pci/0000:3b:00.0 file BCM957454A4540C.pkg + +Note: File path is relative to /lib/firmware directory. + +Note that 'flash' is supported in 5.1 or higher kernel versions. + +3. Dump device level driver parameter information: + + devlink dev param show + +4. Display the device level driver parameter information: + + devlink dev param show [ DEV name PARAMETER ] + +Example: + devlink dev param show pci/0000:3b:00.0 name enable_sriov + +will show: + +pci/0000:3b:00.0: + name enable_sriov type generic + values: + cmode permanent value true + +Note that 'param' is supported in 4.19 or higher kernel versions. + +5. Set the device level driver parameter information: + + devlink dev param set DEV name PARAMETER value VALUE cmode \ + { runtime | driverinit | permanent } + +Example: + devlink dev param set pci/0000:3b:00.0 name enable_sriov \ + value false cmode permanent + +6. Dump health reporter information: + + devlink health show [ DEV reporter REPORTER ] + +Example: + devlink health show pci/0000:3b:00.0 reporter fw + +might show: + +pci/0000:3b:00.0: + name fw + state healthy error 2 recover 2 grace_period 0 auto_recover true + +Note that health reporters are created only when corresponding features +are enabled in NVM configuration. + +Example: + devlink health show pci/0000:3b:00.0 reporter hw + +might show: + +pci/0000:3b:00.0: + reporter hw + state healthy error 18 recover 18 grace_period 0 auto_recover true + +7. Run diagnostics via health reporter: + + devlink health diagnose DEV reporter REPORTER + +Example: + devlink health diag pci/0008:01:00.1 reporter fw + +might show: + +Status: healthy Severity: normal Resets: 7 Arrests: 0 Survivals: 0 Discoveries: 0 Fatalities: 0 Diagnoses: 0 + +Example: + devlink health diag pci/0008:01:00.1 reporter hw + +might show: + +Status: healthy nvm_write_errors: 187 nvm_erase_errors: 0 + +The diagnose output may expose certain implementation details. In particular, +the various counters constitute debugging information intended for internal +use only and should not be interpreted by the user. + +8. Extract device coredump via health reporter: + + devlink health dump show DEV reporter REPORTER + +Example: + devlink health dump show pci/0008:01:00.1 reporter fw + +The binary output rendered is useful to developers for debugging purposes and +is not intended to be interpreted by the user. The devlink core stores the most +recent dump and will not capture a new one until an existing dump is cleared +using: + + devlink health dump clear DEV reporter REPORTER + +Devlink health dumps may be captured automatically on errors. If no stored dump +exists, then devlink health dump show will trigger a capture. + +9. Reset firmware using reload command: + + devlink dev reload DEV action fw_activate + +Example: + devlink dev reload pci/0000:3b:00.0 action fw_activate + +will show, if it is successful: + +reload_actions_performed: + driver_reinit fw_activate + +Note that 'reload' actions are supported in 5.10 or higher kernel versions. + +10. Reload stats can be seen using command: + + devlink dev show -s + +Example: + +$ devlink dev show -s +pci/0000:3b:00.0: + stats: + reload: + fw_activate: + unspecified 2 + remote_reload: + driver_reinit: + unspecified 0 + fw_activate: + unspecified 0 no_reset 0 +pci/0000:3b:00.1: + stats: + reload: + fw_activate: + unspecified 0 + remote_reload: + driver_reinit: + unspecified 2 + fw_activate: + unspecified 2 no_reset 0 + +11. See devlink man pages for more options. + +Error Recovery +============== + +Error reovery is a new feature that can facilitate the automatic recovery +from some fatal firmware or hardware errors. Without this feature, such +errors often cause prolonged outage, sometimes requiring cold boot to +fully recover. + +When the feature is enabled, both the firmware and the driver will take +part in monitoring the health of the adapter. If the firmware detects an +error, a notification is sent to the driver and a coordinated reset of +the adapter will be initiated. If the driver detects that the firmware is +unresponsive, it can also initiate a reset. The reset will generally take +seconds to complete and network functions will be automatically restored +after the reset. In cases where heavy or converged traffic are used, +transmit timeouts may be reported. + +If the kernel supports the devlink health framework, health related +information and counters will be reported to devlink and visible to the +user. + +Known limitations: + +1. The error recovery process generally takes seconds to complete. If +the device is brought down (ifdown) before error recovery has completed, +the error recovery process will abort and the device will be brought down. +Kernel message below will be displayed: + +bnxt_en 0000:04:00.0 eth0: FW reset in progress during close, \ + FW reset will be aborted + +2. If the above happens or if the error recovery process fails for other +reasons, bringing up the device will fail and the kernel message below will +be displayed: + +bnxt_en 0000:04:00.0 eth0: A previous firmware reset did not complete, aborting + +Reloading the driver is required. If errors persist while reloading the +driver, a reboot may be required. + +3. If SR-IOV is enabled and there are active VFs during error recovery, the +PF device needs to be in the ifup state for the recovery to succeed. +Otherwise the VF devices will not recover and the kernel message below will +be displayed: + +bnxt_en 0000:04:02.0 eth1: Firmware reset aborted + +In this case, bring up the PF device first. If the PF device is brought up +successfully, then bring up the VF devices. +Also, in this case, the PF device will rediscover itself and reconfigure +the SRIOv resources for the VFs, just as it would during an ethtool reset. + +4. If the driver is in the middle of loading or initializing on a PCIe +function or in the middle of a transmit timeout recovery while firmware +detects an error, the recovery will sometimes not succeed on this function. +The driver may abort loading or initializing. Kernel messages similar to below +will be displayed: + +bnxt_en 0000:65:00.0 (unnamed net_device) (uninitialized): Firmware not responding, status: 0x448100 +OR +bnxt_en 0000:65:00.0 eth0: Abandoning msg {0xb1 0xb7bd} len: 0 due to firmware status: 0x448100 + +If this happens, try to unload and reload the driver again, or unbind and rebind +the PCIe function using sysfs. + +5. For error recovery to succeed, the interface should be in the ifup state +with no disruptions during the process that might reconfigure the device. +In other words, for reliable error recovery, it is recommended to not run +any configuration changes (such as unloading the RoCE Driver, ethtool self-tests etc) +while error recovery is in progress. +If at all changes are done, and recovery does not succeed, try the below actions to recover: +a. Bring up the device +b. Unload and reload both the drivers +c. Unbind and rebind the PCIe function using sysfs + +6. PCIe FLR is not supported in driver. + +Multi-root NUMA Direct +====================== + +On multi-root systems, the NUMA Direct feature can be enabled via the +'numa_direct' ethtool private flag: + + ethtool --set-priv-flags eth0 numa_direct on + +and confirmed as follows: + + ethtool --show-priv-flags eth0 + + Private flags for eth0: + numa_direct: on + +Note, if the device does not advertise multi-root capability, then the +set command will return an operation not supported error when attempting +to enable the feature. + +Once enabled, it is possible to add special ntuple filters in order to +direct RX traffic from any port via the PF attached to the desired NUMA +node by providing a flow specification. + +The ntuple filters are managed in same manner as detailed in section 19 +of 'BNXT_EN Driver Settings' above, except that the target action is +different. An action of '-9999' will direct traffic matching the flow +specification to the network device where the rule is installed. For +example: + + ethtool -N eth0 flow-type tcp4 dst-ip 10.0.0.1 action -9999 + ethtool -N eth1 flow-type tcp4 dst-ip 10.0.0.2 action -9999 + +would direct the matching TCP traffic destined to 10.0.0.1 via eth0 while +the traffic destined for 10.0.0.2 would be delivered via the eth1 PF. If +these devices are attached to different NUMA nodes, then this effectively +directs traffic to the desired socket, with RSS still directing traffic +automatically to the subset of cores within this node. + +On BCM575xx and later parts, it is also possible to steer traffic to a +specific queue. The queues are numbered backwards, with -10000 corresponding +to queue 0, -10001 to queue 1 and so on. This can be used to not only +direct traffic to a desired socket, but also to a specific core where the +the traffic should be sunk. + +The driver will make a best effort to ensure that descriptor and packet +memory is allocated on the appropriate node, but the user should ideally +limit the number of device queues configured for each PF to not exceed the +number of cores in each socket. This also requires that the interrupts +associated with a given PF are also routed to cores within its attached +NUMA node. The 'Set IRQ Balance Manually' section details how this can be +achieved. + +Firmware Core Reset on TX timeout +================================= + +Firmware core reset on TX timeout can be enabled via the 'core_reset_tx_timeout' +ethtool private flag: + + ethtool --set-priv-flags eth0 core_reset_tx_timeout on + +Once enabled, a core reset will be issued to the firmware when TX timeout is +detected by the driver. + +DIM (Dynamic Interrupt Moderation) +================================== + +Dynamic Interrupt Moderation refers to changing the interrupt +moderation configuration of a channel in order to optimize packet +processing. The mechanism includes an algorithm which decides if and how to +change moderation parameters for a channel, usually based on performing an +analysis on runtime data sampled from the system. + +Run the following ethtool command to check whether `Adaptive Rx`(DIM) is ON. + +ethtool -c eth0 + +Coalesce parameters for eth0: +Adaptive RX: on TX: n/a + +The bnxt_en driver does not support DIM in the Tx direction. + +To view the coalesce settings altered dynamically by the networking stack's +DIM algorithm, run the below ethtool command: + +ethtool --per-queue eth0 --show-coalesce (To see for all rings) + +OR + +ethtool -Q eth0 queue_mask 0x1 --show-coalesce (To see for ring#0) + +Note that the queue number corresponding to the queue_mask always starts from +0 for combined channels, as well as for separate Rx channels and Tx channels. + +Note that this per-queue command and kernel infrastructure support is available +only in newer kernels. + +HWMON support +============= + +On newer kernels (from 4.9 onwards) with CONFIG_HWMON enabled, bnxt_en driver +creates the standard sysfs infrastructure in the hardware monitoring core to +display NIC temperature attributes. + +Driver will expose the following attributes: +1) Input temperature(temp1_input): current temperature of the device +2) Warning temperature(temp1_max): warning threshold temperature +3) Critical temperature(temp1_crit): critical threshold temperature +4) Emergency Temperature(temp1_emergency): Emergency threshold temperature +5) Shutdown Temperature(temp1_shutdown): Shutdown threshold temperature + +Some boards may not have all the threshold temperatures defined and if +it is not defined, the driver will not expose that threshold attribute. + +Each threshold temperature has an associated alarm file, containing a +boolean value. 1 means that an alarm condition exists. i.e, the current +device temperature is greater than the threshold temperature. 0 means no alarm. + +For example, for dualport BCM957508-P2100G will have 2 hwmon directories +(one for each PCI function) under "/sys/class/hwmon/hwmon[X,Y]". + +# grep -H -d skip . /sys/class/hwmon/hwmon2/* +/sys/class/hwmon/hwmon2/name:bnxt_en +/sys/class/hwmon/hwmon2/temp1_crit:100000 +/sys/class/hwmon/hwmon2/temp1_crit_alarm:0 +/sys/class/hwmon/hwmon2/temp1_emergency:110000 +/sys/class/hwmon/hwmon2/temp1_emergency_alarm:0 +/sys/class/hwmon/hwmon2/temp1_input:78000 +/sys/class/hwmon/hwmon2/temp1_max:95000 +/sys/class/hwmon/hwmon2/temp1_max_alarm:0 +/sys/class/hwmon/hwmon2/temp1_shutdown:105000 +/sys/class/hwmon/hwmon2/temp1_shutdown_alarm:0 diff --git a/drivers/thirdparty/release-drivers/bnxt/bnxt.c b/drivers/thirdparty/release-drivers/bnxt/bnxt.c new file mode 100644 index 000000000000..b5ffc831a3cb --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/bnxt.c @@ -0,0 +1,20609 @@ +/* Broadcom NetXtreme-C/E network driver. + * + * Copyright (c) 2014-2016 Broadcom Corporation + * Copyright (c) 2016-2018 Broadcom Limited + * Copyright (c) 2018-2024 Broadcom Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ + +#include + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef HAVE_NDO_XDP +#include +#endif +#ifdef HAVE_GRO_H +#include +#endif +#include +#include +#include +#include +#include +#if defined(HAVE_UDP_TUNNEL_H) +#include +#endif +#if defined(CONFIG_VXLAN) || defined(CONFIG_VXLAN_MODULE) +#ifdef HAVE_NDO_ADD_VXLAN +#include +#endif +#endif +#include +#include +#include +#include +#ifdef HAVE_PCIE_ERROR_REPORTING +#include +#endif +#include +#ifndef NO_NETDEV_CPU_RMAP +#include +#endif +#include +#ifdef HAVE_TC_SETUP_TYPE +#include +#endif +#ifdef HAVE_KTLS +#include +#endif +#ifdef CONFIG_PAGE_POOL +#ifdef HAVE_PAGE_POOL_HELPERS_H +#include +#else +#include +#endif +#ifdef HAVE_XDP_MULTI_BUFF +#include +#endif +#ifdef HAVE_NETDEV_QUEUES_H +#include +#endif +#endif +#include + +#include "bnxt_compat.h" +#include "bnxt_hsi.h" +#include "bnxt.h" +#include "bnxt_hwrm.h" +#include "bnxt_ulp.h" +#include "bnxt_sriov.h" +#include "bnxt_ethtool.h" +#include "bnxt_dcb.h" +#include "bnxt_xdp.h" +#include "bnxt_ptp.h" +#ifndef HSI_DBG_DISABLE +#include "decode_hsi.h" +#endif +#include "bnxt_vfr.h" +#include "bnxt_tc.h" +#include "bnxt_tfc.h" +#include "tfc.h" +#include "bnxt_devlink.h" +#include "bnxt_lfc.h" +#include "bnxt_debugfs.h" +#include "bnxt_coredump.h" +#include "bnxt_dbr.h" +#include "bnxt_mpc.h" +#include "bnxt_ktls.h" +#include "bnxt_hwmon.h" +#include "bnxt_hdbr.h" +#include "bnxt_sriov_sysfs.h" +#include "tfc.h" +#include "bnxt_udcc.h" +#include "bnxt_log.h" +#include "bnxt_log_data.h" +#include "bnxt_xsk.h" +#include "bnxt_nic_flow.h" + +#if defined(DEV_NETMAP) || defined(CONFIG_NETMAP) || defined(CONFIG_NETMAP_MODULE) +/* + * bnxt_netmap_linux.h contains functions for netmap support + * that extend the standard driver. + */ +#define NETMAP_BNXT_MAIN +#define DEV_NETMAP +#include "bnxt_netmap_linux.h" +#endif + +#define BNXT_TX_TIMEOUT (5 * HZ) +#define BNXT_DEF_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_HW | \ + NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR) + +static const char version[] = + "Broadcom NetXtreme-C/E/S driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION "\n"; + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Broadcom NetXtreme-C/E/S network driver"); +MODULE_VERSION(DRV_MODULE_VERSION); + +#define BNXT_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN) +#define BNXT_RX_DMA_OFFSET NET_SKB_PAD +#define BNXT_RX_COPY_THRESH 256 + +#define BNXT_TX_PUSH_THRESH 164 +#define BNXT_TX_PUSH_THRESH_PPP 208 + +#ifndef PCIE_SRIOV_CONFIGURE +static unsigned int num_vfs; +module_param(num_vfs, uint, 0); +MODULE_PARM_DESC(num_vfs, " Number of supported virtual functions (0 means sriov is disabled)"); +#endif + +/* indexed by enum board_idx */ +static const struct { + char *name; +} board_info[] = { + [BCM57301] = { "Broadcom BCM57301 NetXtreme-C 10Gb Ethernet" }, + [BCM57302] = { "Broadcom BCM57302 NetXtreme-C 10Gb/25Gb Ethernet" }, + [BCM57304] = { "Broadcom BCM57304 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" }, + [BCM57417_NPAR] = { "Broadcom BCM57417 NetXtreme-E Ethernet Partition" }, + [BCM58700] = { "Broadcom BCM58700 Nitro 1Gb/2.5Gb/10Gb Ethernet" }, + [BCM57311] = { "Broadcom BCM57311 NetXtreme-C 10Gb Ethernet" }, + [BCM57312] = { "Broadcom BCM57312 NetXtreme-C 10Gb/25Gb Ethernet" }, + [BCM57402] = { "Broadcom BCM57402 NetXtreme-E 10Gb Ethernet" }, + [BCM57404] = { "Broadcom BCM57404 NetXtreme-E 10Gb/25Gb Ethernet" }, + [BCM57406] = { "Broadcom BCM57406 NetXtreme-E 10GBase-T Ethernet" }, + [BCM57402_NPAR] = { "Broadcom BCM57402 NetXtreme-E Ethernet Partition" }, + [BCM57407] = { "Broadcom BCM57407 NetXtreme-E 10GBase-T Ethernet" }, + [BCM57412] = { "Broadcom BCM57412 NetXtreme-E 10Gb Ethernet" }, + [BCM57414] = { "Broadcom BCM57414 NetXtreme-E 10Gb/25Gb Ethernet" }, + [BCM57416] = { "Broadcom BCM57416 NetXtreme-E 10GBase-T Ethernet" }, + [BCM57417] = { "Broadcom BCM57417 NetXtreme-E 10GBase-T Ethernet" }, + [BCM57412_NPAR] = { "Broadcom BCM57412 NetXtreme-E Ethernet Partition" }, + [BCM57314] = { "Broadcom BCM57314 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" }, + [BCM57417_SFP] = { "Broadcom BCM57417 NetXtreme-E 10Gb/25Gb Ethernet" }, + [BCM57416_SFP] = { "Broadcom BCM57416 NetXtreme-E 10Gb Ethernet" }, + [BCM57404_NPAR] = { "Broadcom BCM57404 NetXtreme-E Ethernet Partition" }, + [BCM57406_NPAR] = { "Broadcom BCM57406 NetXtreme-E Ethernet Partition" }, + [BCM57407_SFP] = { "Broadcom BCM57407 NetXtreme-E 25Gb Ethernet" }, + [BCM57407_NPAR] = { "Broadcom BCM57407 NetXtreme-E Ethernet Partition" }, + [BCM57414_NPAR] = { "Broadcom BCM57414 NetXtreme-E Ethernet Partition" }, + [BCM57416_NPAR] = { "Broadcom BCM57416 NetXtreme-E Ethernet Partition" }, + [BCM57452] = { "Broadcom BCM57452 NetXtreme-E 10Gb/25Gb/40Gb/50Gb Ethernet" }, + [BCM57454] = { "Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" }, + [BCM5745x_NPAR] = { "Broadcom BCM5745x NetXtreme-E Ethernet Partition" }, + [BCM57508] = { "Broadcom BCM57508 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" }, + [BCM57504] = { "Broadcom BCM57504 NetXtreme-E 10Gb/25Gb/50Gb/100Gb Ethernet" }, + [BCM57502] = { "Broadcom BCM57502 NetXtreme-E 10Gb/25Gb/50Gb Ethernet" }, + [BCM57608] = { "Broadcom BCM57608 25Gb/50Gb/100Gb/200Gb/400Gb Ethernet" }, + [BCM57604] = { "Broadcom BCM57604 25Gb/50Gb/100Gb/200Gb Ethernet" }, + [BCM57602] = { "Broadcom BCM57602 25Gb/50Gb Ethernet" }, + [BCM57601] = { "Broadcom BCM57601 25Gb/50Gb Ethernet" }, + [BCM57508_NPAR] = { "Broadcom BCM57508 NetXtreme-E Ethernet Partition" }, + [BCM57504_NPAR] = { "Broadcom BCM57504 NetXtreme-E Ethernet Partition" }, + [BCM57502_NPAR] = { "Broadcom BCM57502 NetXtreme-E Ethernet Partition" }, + [BCM58802] = { "Broadcom BCM58802 NetXtreme-S 10Gb/25Gb/40Gb/50Gb Ethernet" }, + [BCM58804] = { "Broadcom BCM58804 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" }, + [BCM58808] = { "Broadcom BCM58808 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" }, +#ifdef BNXT_FPGA + [BCM58812] = { "Broadcom BCM58812 NetXtreme-S 2x50G Ethernet" }, + [BCM58814] = { "Broadcom BCM58814 NetXtreme-S 2x100G Ethernet" }, + [BCM58818] = { "Broadcom BCM58818 NetXtreme-S 2x200G Ethernet" }, +#endif + [NETXTREME_E_VF] = { "Broadcom NetXtreme-E Ethernet Virtual Function" }, + [NETXTREME_E_P5_VF] = { "Broadcom BCM5750X NetXtreme-E Ethernet Virtual Function" }, + [NETXTREME_C_VF] = { "Broadcom NetXtreme-C Ethernet Virtual Function" }, + [NETXTREME_S_VF] = { "Broadcom NetXtreme-S Ethernet Virtual Function" }, + [NETXTREME_C_VF_HV] = { "Broadcom NetXtreme-C Virtual Function for Hyper-V" }, + [NETXTREME_E_VF_HV] = { "Broadcom NetXtreme-E Virtual Function for Hyper-V" }, + [NETXTREME_E_P5_VF_HV] = { "Broadcom BCM5750X NetXtreme-E Virtual Function for Hyper-V" }, + [NETXTREME_E_P7_VF] = { "Broadcom BCM5760X Virtual Function" }, +}; + +const struct pci_device_id bnxt_pci_tbl[] = { + { PCI_VDEVICE(BROADCOM, 0x1604), .driver_data = BCM5745x_NPAR }, + { PCI_VDEVICE(BROADCOM, 0x1605), .driver_data = BCM5745x_NPAR }, + { PCI_VDEVICE(BROADCOM, 0x1614), .driver_data = BCM57454 }, + { PCI_VDEVICE(BROADCOM, 0x16c0), .driver_data = BCM57417_NPAR }, + { PCI_VDEVICE(BROADCOM, 0x16c8), .driver_data = BCM57301 }, + { PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 }, + { PCI_VDEVICE(BROADCOM, 0x16ca), .driver_data = BCM57304 }, + { PCI_VDEVICE(BROADCOM, 0x16cc), .driver_data = BCM57417_NPAR }, + { PCI_VDEVICE(BROADCOM, 0x16cd), .driver_data = BCM58700 }, + { PCI_VDEVICE(BROADCOM, 0x16ce), .driver_data = BCM57311 }, + { PCI_VDEVICE(BROADCOM, 0x16cf), .driver_data = BCM57312 }, + { PCI_VDEVICE(BROADCOM, 0x16d0), .driver_data = BCM57402 }, + { PCI_VDEVICE(BROADCOM, 0x16d1), .driver_data = BCM57404 }, + { PCI_VDEVICE(BROADCOM, 0x16d2), .driver_data = BCM57406 }, + { PCI_VDEVICE(BROADCOM, 0x16d4), .driver_data = BCM57402_NPAR }, + { PCI_VDEVICE(BROADCOM, 0x16d5), .driver_data = BCM57407 }, + { PCI_VDEVICE(BROADCOM, 0x16d6), .driver_data = BCM57412 }, + { PCI_VDEVICE(BROADCOM, 0x16d7), .driver_data = BCM57414 }, + { PCI_VDEVICE(BROADCOM, 0x16d8), .driver_data = BCM57416 }, + { PCI_VDEVICE(BROADCOM, 0x16d9), .driver_data = BCM57417 }, + { PCI_VDEVICE(BROADCOM, 0x16de), .driver_data = BCM57412_NPAR }, + { PCI_VDEVICE(BROADCOM, 0x16df), .driver_data = BCM57314 }, + { PCI_VDEVICE(BROADCOM, 0x16e2), .driver_data = BCM57417_SFP }, + { PCI_VDEVICE(BROADCOM, 0x16e3), .driver_data = BCM57416_SFP }, + { PCI_VDEVICE(BROADCOM, 0x16e7), .driver_data = BCM57404_NPAR }, + { PCI_VDEVICE(BROADCOM, 0x16e8), .driver_data = BCM57406_NPAR }, + { PCI_VDEVICE(BROADCOM, 0x16e9), .driver_data = BCM57407_SFP }, + { PCI_VDEVICE(BROADCOM, 0x16ea), .driver_data = BCM57407_NPAR }, + { PCI_VDEVICE(BROADCOM, 0x16eb), .driver_data = BCM57412_NPAR }, + { PCI_VDEVICE(BROADCOM, 0x16ec), .driver_data = BCM57414_NPAR }, + { PCI_VDEVICE(BROADCOM, 0x16ed), .driver_data = BCM57414_NPAR }, + { PCI_VDEVICE(BROADCOM, 0x16ee), .driver_data = BCM57416_NPAR }, + { PCI_VDEVICE(BROADCOM, 0x16ef), .driver_data = BCM57416_NPAR }, + { PCI_VDEVICE(BROADCOM, 0x16f0), .driver_data = BCM58808 }, + { PCI_VDEVICE(BROADCOM, 0x16f1), .driver_data = BCM57452 }, + { PCI_VDEVICE(BROADCOM, 0x1750), .driver_data = BCM57508 }, + { PCI_VDEVICE(BROADCOM, 0x1751), .driver_data = BCM57504 }, + { PCI_VDEVICE(BROADCOM, 0x1752), .driver_data = BCM57502 }, + { PCI_VDEVICE(BROADCOM, 0x1760), .driver_data = BCM57608 }, + { PCI_VDEVICE(BROADCOM, 0x1761), .driver_data = BCM57604 }, + { PCI_VDEVICE(BROADCOM, 0x1762), .driver_data = BCM57602 }, + { PCI_VDEVICE(BROADCOM, 0x1763), .driver_data = BCM57601 }, + { PCI_VDEVICE(BROADCOM, 0x1800), .driver_data = BCM57502_NPAR }, + { PCI_VDEVICE(BROADCOM, 0x1801), .driver_data = BCM57504_NPAR }, + { PCI_VDEVICE(BROADCOM, 0x1802), .driver_data = BCM57508_NPAR }, + { PCI_VDEVICE(BROADCOM, 0x1803), .driver_data = BCM57502_NPAR }, + { PCI_VDEVICE(BROADCOM, 0x1804), .driver_data = BCM57504_NPAR }, + { PCI_VDEVICE(BROADCOM, 0x1805), .driver_data = BCM57508_NPAR }, + { PCI_VDEVICE(BROADCOM, 0xd802), .driver_data = BCM58802 }, + { PCI_VDEVICE(BROADCOM, 0xd804), .driver_data = BCM58804 }, +#ifdef BNXT_FPGA + { PCI_VDEVICE(BROADCOM, 0xd812), .driver_data = BCM58812 }, + { PCI_VDEVICE(BROADCOM, 0xd814), .driver_data = BCM58814 }, + { PCI_VDEVICE(BROADCOM, 0xd818), .driver_data = BCM58818 }, +#endif +#ifdef CONFIG_BNXT_SRIOV + { PCI_VDEVICE(BROADCOM, 0x1606), .driver_data = NETXTREME_E_VF }, + { PCI_VDEVICE(BROADCOM, 0x1607), .driver_data = NETXTREME_E_VF_HV }, + { PCI_VDEVICE(BROADCOM, 0x1608), .driver_data = NETXTREME_E_VF_HV }, + { PCI_VDEVICE(BROADCOM, 0x1609), .driver_data = NETXTREME_E_VF }, + { PCI_VDEVICE(BROADCOM, 0x16bd), .driver_data = NETXTREME_E_VF_HV }, + { PCI_VDEVICE(BROADCOM, 0x16c1), .driver_data = NETXTREME_E_VF }, + { PCI_VDEVICE(BROADCOM, 0x16c2), .driver_data = NETXTREME_C_VF_HV }, + { PCI_VDEVICE(BROADCOM, 0x16c3), .driver_data = NETXTREME_C_VF_HV }, + { PCI_VDEVICE(BROADCOM, 0x16c4), .driver_data = NETXTREME_E_VF_HV }, + { PCI_VDEVICE(BROADCOM, 0x16c5), .driver_data = NETXTREME_E_VF_HV }, + { PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = NETXTREME_C_VF }, + { PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = NETXTREME_E_VF }, + { PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = NETXTREME_E_VF }, + { PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = NETXTREME_C_VF }, + { PCI_VDEVICE(BROADCOM, 0x16e5), .driver_data = NETXTREME_C_VF }, + { PCI_VDEVICE(BROADCOM, 0x16e6), .driver_data = NETXTREME_C_VF_HV }, + { PCI_VDEVICE(BROADCOM, 0x1806), .driver_data = NETXTREME_E_P5_VF }, + { PCI_VDEVICE(BROADCOM, 0x1807), .driver_data = NETXTREME_E_P5_VF }, + { PCI_VDEVICE(BROADCOM, 0x1808), .driver_data = NETXTREME_E_P5_VF_HV }, + { PCI_VDEVICE(BROADCOM, 0x1809), .driver_data = NETXTREME_E_P5_VF_HV }, + { PCI_VDEVICE(BROADCOM, 0x1819), .driver_data = NETXTREME_E_P7_VF }, + { PCI_VDEVICE(BROADCOM, 0xd800), .driver_data = NETXTREME_S_VF }, +#ifdef BNXT_FPGA + { PCI_VDEVICE(BROADCOM, 0xd82e), .driver_data = NETXTREME_S_VF }, +#endif +#endif + { 0 } +}; + +MODULE_DEVICE_TABLE(pci, bnxt_pci_tbl); + +static const u16 bnxt_vf_req_snif[] = { + HWRM_FUNC_CFG, + HWRM_FUNC_VF_CFG, + HWRM_PORT_PHY_QCFG, + HWRM_CFA_L2_FILTER_ALLOC, + HWRM_OEM_CMD, +}; + +static const u16 bnxt_async_events_arr[] = { + ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE, + ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE, + ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD, + ASYNC_EVENT_CMPL_EVENT_ID_VF_FLR, + ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED, + ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE, + ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE, + ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE, + ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY, + ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY, + ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG, + ASYNC_EVENT_CMPL_EVENT_ID_DEFAULT_VNIC_CHANGE, + ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION, + ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE, + ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST, + ASYNC_EVENT_CMPL_EVENT_ID_PPS_TIMESTAMP, + ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT, + ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE, + ASYNC_EVENT_CMPL_EVENT_ID_UDCC_SESSION_CHANGE, + ASYNC_EVENT_CMPL_EVENT_ID_DBG_BUF_PRODUCER, +}; + +static struct workqueue_struct *bnxt_pf_wq; + +#define BNXT_IPV6_MASK_ALL {{{ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, \ + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }}} +#define BNXT_IPV6_MASK_NONE {{{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }}} + +const struct bnxt_flow_masks BNXT_FLOW_MASK_NONE = { + .ports = { + .src = 0, + .dst = 0, + }, + .addrs = { + .v6addrs = { + .src = BNXT_IPV6_MASK_NONE, + .dst = BNXT_IPV6_MASK_NONE, + }, + }, +}; + +const struct bnxt_flow_masks BNXT_FLOW_IPV6_MASK_ALL = { + .ports = { + .src = 0xffff, + .dst = 0xffff, + }, + .addrs = { + .v6addrs = { + .src = BNXT_IPV6_MASK_ALL, + .dst = BNXT_IPV6_MASK_ALL, + }, + }, +}; + +const struct bnxt_flow_masks BNXT_FLOW_IPV4_MASK_ALL = { + .ports = { + .src = 0xffff, + .dst = 0xffff, + }, + .addrs = { + .v4addrs = { + .src = 0xffffffff, + .dst = 0xffffffff, + }, + }, +}; + +static bool bnxt_vf_pciid(enum board_idx idx) +{ + return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF || + idx == NETXTREME_S_VF || idx == NETXTREME_C_VF_HV || + idx == NETXTREME_E_VF_HV || idx == NETXTREME_E_P5_VF || + idx == NETXTREME_E_P5_VF_HV || idx == NETXTREME_E_P7_VF); +} + +#define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID) +#define DB_CP_FLAGS (DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS) + +#define BNXT_DB_CQ(db, idx) \ + writel(DB_CP_FLAGS | DB_RING_IDX(db, idx), (db)->doorbell) + +#define BNXT_DB_NQ_P5(db, idx) \ + bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ | DB_RING_IDX(db, idx),\ + (db)->doorbell) + +#define BNXT_DB_NQ_P7(db, idx) \ + bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ_MASK | \ + DB_RING_IDX(db, idx), (db)->doorbell) + +#define BNXT_DB_CQ_ARM(db, idx) \ + writel(DB_CP_REARM_FLAGS | DB_RING_IDX(db, idx), (db)->doorbell) + +#define BNXT_DB_NQ_ARM_P5(db, idx) \ + bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ_ARM | \ + DB_RING_IDX(db, idx), (db)->doorbell) + +static void bnxt_db_nq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx) +{ + if (bp->flags & BNXT_FLAG_CHIP_P7) + BNXT_DB_NQ_P7(db, idx); + else if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) + BNXT_DB_NQ_P5(db, idx); + else + BNXT_DB_CQ(db, idx); +} + +static void bnxt_db_nq_arm(struct bnxt *bp, struct bnxt_db_info *db, u32 idx) +{ + if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) + BNXT_DB_NQ_ARM_P5(db, idx); + else + BNXT_DB_CQ_ARM(db, idx); +} + +static void bnxt_db_cq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx) +{ + if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { + u64 db_val; + + db_val = db->db_key64 | DBR_TYPE_CQ_ARMALL | DB_RING_IDX(db, idx); + bnxt_hdbr_cp_db(db->db_cp, db_val, false, 1); + bnxt_writeq(bp, db_val, db->doorbell); + } else { + BNXT_DB_CQ(db, idx); + } +} + +static void bnxt_queue_fw_reset_work(struct bnxt *bp, unsigned long delay) +{ + if (!(test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))) + return; + + if (BNXT_PF(bp)) + queue_delayed_work(bnxt_pf_wq, &bp->fw_reset_task, delay); + else + schedule_delayed_work(&bp->fw_reset_task, delay); +} + +static void __bnxt_queue_sp_work(struct bnxt *bp) +{ + if (BNXT_PF(bp)) + queue_work(bnxt_pf_wq, &bp->sp_task); + else + schedule_work(&bp->sp_task); +} + +static void bnxt_queue_sp_work(struct bnxt *bp, unsigned int event) +{ + set_bit(event, &bp->sp_event); + __bnxt_queue_sp_work(bp); +} + +static void bnxt_sched_reset_rxr(struct bnxt *bp, struct bnxt_rx_ring_info *rxr) +{ + if (!rxr->bnapi->in_reset) { + rxr->bnapi->in_reset = true; + if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) + set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event); + else + set_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event); + __bnxt_queue_sp_work(bp); + } + rxr->rx_next_cons = 0xffff; +} + +void bnxt_sched_reset_txr(struct bnxt *bp, struct bnxt_tx_ring_info *txr, + int idx) +{ + struct bnxt_napi *bnapi = txr->bnapi; + + if (bnapi->tx_fault) + return; + + netdev_err(bp->dev, "Invalid Tx completion (ring:%d cons:%u prod:%u i:%d)", + txr->txq_index, txr->tx_cons, txr->tx_prod, idx); + WARN_ON_ONCE(1); + bnapi->tx_fault = 1; + bnxt_queue_sp_work(bp, BNXT_RESET_TASK_SP_EVENT); +} + +const u16 bnxt_lhint_arr[] = { + TX_BD_FLAGS_LHINT_512_AND_SMALLER, + TX_BD_FLAGS_LHINT_512_TO_1023, + TX_BD_FLAGS_LHINT_1024_TO_2047, + TX_BD_FLAGS_LHINT_1024_TO_2047, + TX_BD_FLAGS_LHINT_2048_AND_LARGER, + TX_BD_FLAGS_LHINT_2048_AND_LARGER, + TX_BD_FLAGS_LHINT_2048_AND_LARGER, + TX_BD_FLAGS_LHINT_2048_AND_LARGER, + TX_BD_FLAGS_LHINT_2048_AND_LARGER, + TX_BD_FLAGS_LHINT_2048_AND_LARGER, + TX_BD_FLAGS_LHINT_2048_AND_LARGER, + TX_BD_FLAGS_LHINT_2048_AND_LARGER, + TX_BD_FLAGS_LHINT_2048_AND_LARGER, + TX_BD_FLAGS_LHINT_2048_AND_LARGER, + TX_BD_FLAGS_LHINT_2048_AND_LARGER, + TX_BD_FLAGS_LHINT_2048_AND_LARGER, + TX_BD_FLAGS_LHINT_2048_AND_LARGER, + TX_BD_FLAGS_LHINT_2048_AND_LARGER, + TX_BD_FLAGS_LHINT_2048_AND_LARGER, +}; + +static u16 bnxt_xmit_get_cfa_action(struct bnxt *bp, struct sk_buff *skb) +{ +#ifdef CONFIG_VF_REPS + struct metadata_dst *md_dst = skb_metadata_dst(skb); + + /* If the transmit is happening on the uplink port (PF), use the + * tx_cfa_action. + */ + if (!md_dst || md_dst->type != METADATA_HW_PORT_MUX) + return bp->tx_cfa_action; + + return md_dst->u.port_info.port_id; +#else + if (BNXT_PF(bp)) + return bp->tx_cfa_action; + else + return 0; +#endif +} + +static int bnxt_push_xmit(struct bnxt *bp, struct bnxt_tx_ring_info *txr, + struct netdev_queue *txq, struct sk_buff *skb, + u32 vlan_tag_flags, u32 cfa_action) +{ + struct tx_push_buffer *tx_push_buf = txr->tx_push; + struct tx_push_bd *tx_push = &tx_push_buf->push_bd; + struct tx_bd_ext *tx_push1 = &tx_push->txbd2; + void __iomem *db = txr->tx_db.doorbell; + void *pdata = tx_push_buf->data; + struct bnxt_sw_tx_bd *tx_buf; + u16 prod, last_frag; + unsigned int length; + struct tx_bd *txbd; + int i, push_len; + u64 *end; + u32 len; + + prod = txr->tx_prod; + txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)]; + tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)]; + last_frag = skb_shinfo(skb)->nr_frags; + length = skb->len; + len = skb_headlen(skb); + + /* Set COAL_NOW to be ready quickly for the next push */ + tx_push->tx_bd_len_flags_type = + cpu_to_le32((length << TX_BD_LEN_SHIFT) | + TX_BD_TYPE_LONG_TX_BD | + TX_BD_FLAGS_LHINT_512_AND_SMALLER | + TX_BD_FLAGS_COAL_NOW | TX_BD_FLAGS_PACKET_END | + (2 << TX_BD_FLAGS_BD_CNT_SHIFT)); + + if (skb->ip_summed == CHECKSUM_PARTIAL) + tx_push1->tx_bd_hsize_lflags = + cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM); + else + tx_push1->tx_bd_hsize_lflags = 0; + + tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags); + tx_push1->tx_bd_cfa_action = + cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT); + + end = pdata + length; + end = PTR_ALIGN(end, 8) - 1; + *end = 0; + + skb_copy_from_linear_data(skb, pdata, len); + pdata += len; + for (i = 0; i < last_frag; i++) { + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + void *fptr; + + fptr = skb_frag_address_safe(frag); + if (!fptr) + return -EFAULT; + + memcpy(pdata, fptr, skb_frag_size(frag)); + pdata += skb_frag_size(frag); + } + + txbd->tx_bd_len_flags_type = tx_push->tx_bd_len_flags_type; + txbd->tx_bd_haddr = txr->data_mapping; + txbd->tx_bd_opaque = SET_TX_OPAQUE(bp, txr, prod, 2); + prod = NEXT_TX(prod); + tx_push->tx_bd_opaque = txbd->tx_bd_opaque; + txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)]; + memcpy(txbd, tx_push1, sizeof(*txbd)); + prod = NEXT_TX(prod); + tx_push->doorbell = cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | + DB_RING_IDX(&txr->tx_db, prod)); + WRITE_ONCE(txr->tx_prod, prod); + + tx_buf->is_push = 1; + netdev_tx_sent_queue(txq, length); + wmb(); /* Sync is_push and byte queue before pushing data */ + + push_len = (length + sizeof(*tx_push) + 7) / 8; + if (push_len > 16) { + __iowrite64_copy(db, tx_push_buf, 16); + __iowrite32_copy(db + 4, tx_push_buf + 1, (push_len - 16) << 1); + } else { + __iowrite64_copy(db, tx_push_buf, push_len); + } + txr->bnapi->cp_ring.sw_stats->tx.tx_push_xmit++; + return 0; +} + +static int bnxt_push_xmit_p5(struct bnxt *bp, struct bnxt_tx_ring_info *txr, + struct netdev_queue *txq, struct sk_buff *skb, + u32 vlan_tag_flags, u32 cfa_action) +{ + struct bnxt_db_info *db = &txr->tx_push_db; + struct bnxt_sw_tx_bd *tx_buf; + struct tx_bd_ext *txbd1; + int i, push_len, bds; + u16 prod, last_frag; + unsigned int length; + struct tx_bd *txbd; + void *pdata; + u64 *end; + u32 len; + + if (unlikely(!db->doorbell)) + return -EOPNOTSUPP; + + length = skb->len; + push_len = TX_PUSH_LEN(length); + len = skb_headlen(skb); + prod = txr->tx_prod; + + bds = TX_INLINE_BDS(push_len); + if (bds > (TX_DESC_CNT - TX_IDX(prod))) + return -E2BIG; + + txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)]; + tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)]; + last_frag = skb_shinfo(skb)->nr_frags; + + /* Set COAL_NOW to be ready quickly for the next push */ + txbd->tx_bd_len_flags_type = + cpu_to_le32((length << TX_BD_LEN_SHIFT) | + TX_BD_TYPE_LONG_TX_BD_INLINE | + TX_BD_FLAGS_LHINT_512_AND_SMALLER | + TX_BD_FLAGS_COAL_NOW | TX_BD_FLAGS_PACKET_END | + (bds << TX_BD_FLAGS_BD_CNT_SHIFT)); + txbd->tx_bd_opaque = SET_TX_OPAQUE(bp, txr, prod, bds); + txbd->tx_bd_haddr = cpu_to_le64(0); + txbd1 = (struct tx_bd_ext *) (txbd + 1); + + if (skb->ip_summed == CHECKSUM_PARTIAL) + txbd1->tx_bd_hsize_lflags = + cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM); + else + txbd1->tx_bd_hsize_lflags = 0; + + txbd1->tx_bd_kid_mss = cpu_to_le32(0); + txbd1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags); + txbd1->tx_bd_cfa_action = + cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT); + + pdata = txbd1 + 1; + end = pdata + length; + end = PTR_ALIGN(end, 8) - 1; + *end = 0; + + skb_copy_from_linear_data(skb, pdata, len); + pdata += len; + for (i = 0; i < last_frag; i++) { + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + void *fptr; + + fptr = skb_frag_address_safe(frag); + if (!fptr) + return -EFAULT; + + memcpy(pdata, fptr, skb_frag_size(frag)); + pdata += skb_frag_size(frag); + } + + txr->tx_prod = prod + bds; + + tx_buf->is_push = 1; + tx_buf->inline_data_bds = bds - 2; + netdev_tx_sent_queue(txq, length); + wmb(); /* Sync is_push and byte queue before pushing data */ + + push_len = DIV_ROUND_UP(push_len, 8); + + if (bp->tx_push_mode == BNXT_PUSH_MODE_WCB) { + bnxt_writeq(bp, db->db_key64 | DBR_TYPE_PUSH_START | + DB_RING_IDX(db, prod), db->doorbell); + __iowrite64_copy(txr->tx_push_wcb, txbd, push_len); + bnxt_writeq(bp, db->db_key64 | DBR_TYPE_PUSH_END | DBR_PATH_L2 | + DB_RING_IDX(db, txr->tx_prod), db->doorbell); + } else { + bnxt_db_write_relaxed(bp, db, txr->tx_prod); + bnxt_writeq_relaxed(bp, DB_PUSH_INFO(db, push_len, prod), + db->doorbell + sizeof(struct dbc_dbc)); + __iowrite64_copy(txr->tx_push_wcb, txbd, push_len); + /* flip buffers */ + db->doorbell = (void *)((uintptr_t)db->doorbell ^ DB_PPP_SIZE); + txr->tx_push_wcb = (void *)((uintptr_t)txr->tx_push_wcb ^ DB_PPP_SIZE); + } + + txr->bnapi->cp_ring.sw_stats->tx.tx_push_xmit++; + return 0; +} + +void bnxt_txr_db_kick(struct bnxt *bp, struct bnxt_tx_ring_info *txr, + u16 prod) +{ + /* Sync BD data before updating doorbell */ + wmb(); + bnxt_db_write(bp, &txr->tx_db, prod); + txr->kick_pending = 0; +} + +#if defined(HAVE_ETF_QOPT_OFFLOAD) +static void bnxt_generate_txtimed_bd(struct bnxt *bp, struct sk_buff *skb, + struct bnxt_tx_ring_info *txr, + struct bnxt_sw_tx_bd *tx_buf, u16 *prod) +{ + struct tx_bd_sotxtime *tx_bd_txtime; + u32 sotxtm_flags; + s64 txtime_ns; + + *prod = NEXT_TX(*prod); + + /* SO_TXTIME Timed BD is 2nd BD in chain + * Expect application to adjtimex CLOCK_TAI offset, + * so that skb->tstamp and phc is in same clock domain units. + */ + txtime_ns = ktime_to_ns(skb->tstamp); + tx_bd_txtime = (struct tx_bd_sotxtime *) + &txr->tx_desc_ring[TX_RING(bp, *prod)][TX_IDX(*prod)]; + sotxtm_flags = TX_BD_FLAGS_KIND_SO_TXTIME | TX_BD_TYPE_TIMEDTX_BD; + tx_bd_txtime->tx_bd_len_flags_type = cpu_to_le32(sotxtm_flags); + /* Currently the driver supports RTC clock only */ + tx_bd_txtime->tx_time = cpu_to_le64(txtime_ns); + skb_txtime_consumed(skb); +} +#endif + +netdev_tx_t __bnxt_start_xmit(struct bnxt *bp, struct netdev_queue *txq, + struct bnxt_tx_ring_info *txr, + struct sk_buff *skb, __le32 lflags, u32 kid) +{ + u32 len, free_size, vlan_tag_flags, cfa_action, flags = 0; + struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; + struct tx_bd *txbd, *txbd0 = NULL; + struct pci_dev *pdev = bp->pdev; + unsigned int length, pad = 0; + struct bnxt_sw_tx_bd *tx_buf; + u16 prod, last_frag, prod0; + struct tx_bd_ext *txbd1; + dma_addr_t mapping; + int i; + + prod = txr->tx_prod; + if (unlikely(ipv6_hopopt_jumbo_remove(skb))) + goto tx_free; + + length = skb->len; + len = skb_headlen(skb); + last_frag = skb_shinfo(skb)->nr_frags; + + txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)]; + + tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)]; + tx_buf->skb = skb; + tx_buf->nr_frags = last_frag; + + vlan_tag_flags = 0; + cfa_action = bnxt_xmit_get_cfa_action(bp, skb); + if (skb_vlan_tag_present(skb)) { + vlan_tag_flags = TX_BD_CFA_META_KEY_VLAN | + skb_vlan_tag_get(skb); + /* Currently supports 8021Q, 8021AD vlan offloads + * QINQ1, QINQ2, QINQ3 vlan headers are deprecated + */ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0) + if (skb->vlan_proto == htons(ETH_P_8021Q)) +#endif + vlan_tag_flags |= 1 << TX_BD_CFA_META_TPID_SHIFT; + } + +#ifdef HAVE_IEEE1588_SUPPORT + if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && ptp && + ptp->tx_tstamp_en) { + if (bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP) { + lflags |= cpu_to_le32(TX_BD_FLAGS_STAMP); + tx_buf->is_ts_pkt = 1; + } else if (!skb_is_gso(skb)) { + u16 seq_id, hdr_off, txts_prod; + + if (!bnxt_ptp_parse(skb, &seq_id, &hdr_off) && + !bnxt_ptp_get_txts_prod(ptp, &txts_prod)) { + + if (vlan_tag_flags) + hdr_off += VLAN_HLEN; + lflags |= cpu_to_le32(TX_BD_FLAGS_STAMP); + tx_buf->is_ts_pkt = 1; + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + + ptp->txts_req[txts_prod].tx_seqid = seq_id; + ptp->txts_req[txts_prod].tx_hdr_off = hdr_off; + tx_buf->txts_prod = txts_prod; + + if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) + bnxt_ptp_get_skb_pre_xmit_ts(bp); + } + } + } +#endif + +#ifdef HAVE_NOFCS + if (unlikely(skb->no_fcs)) + lflags |= TX_BD_FLAGS_NO_CRC; +#endif + + free_size = bnxt_tx_avail(bp, txr); + if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh && + !lflags && !txr->etf_enabled) { + switch (bp->tx_push_mode) { + case BNXT_PUSH_MODE_WCB: + fallthrough; + case BNXT_PUSH_MODE_PPP: + if (!bnxt_push_xmit_p5(bp, txr, txq, skb, + vlan_tag_flags, cfa_action)) + goto tx_done; + break; + case BNXT_PUSH_MODE_LEGACY: + if (!bnxt_push_xmit(bp, txr, txq, skb, vlan_tag_flags, + cfa_action)) + goto tx_done; + break; + default: + break; + } + /* Continue normal TX if push fails. */ + } + + if (length < BNXT_MIN_PKT_SIZE) { + pad = BNXT_MIN_PKT_SIZE - length; + if (skb_pad(skb, pad)) + /* SKB already freed. */ + goto tx_kick_pending; + length = BNXT_MIN_PKT_SIZE; + } + + mapping = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE); + + if (unlikely(dma_mapping_error(&pdev->dev, mapping))) + goto tx_free; + + dma_unmap_addr_set(tx_buf, mapping, mapping); + + txbd->tx_bd_haddr = cpu_to_le64(mapping); + prod0 = prod; + + prod = NEXT_TX(prod); + txbd1 = (struct tx_bd_ext *) + &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)]; + + txbd1->tx_bd_hsize_lflags = lflags; + if (skb_is_gso(skb)) { + bool udp_gso = !!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4); + u32 hdr_len; + +#ifdef HAVE_INNER_NETWORK_OFFSET + if (skb->encapsulation) { + if (udp_gso) + hdr_len = skb_inner_transport_offset(skb) + + sizeof(struct udphdr); + else + hdr_len = skb_inner_tcp_all_headers(skb); + } else if (udp_gso) { +#else + if (udp_gso) { +#endif + hdr_len = skb_transport_offset(skb) + + sizeof(struct udphdr); + } else { + hdr_len = skb_tcp_all_headers(skb); + } + + txbd1->tx_bd_hsize_lflags |= cpu_to_le32(TX_BD_FLAGS_LSO | + TX_BD_FLAGS_T_IPID | + (hdr_len << (TX_BD_HSIZE_SHIFT - 1))); + length = skb_shinfo(skb)->gso_size; + txbd1->tx_bd_kid_mss = cpu_to_le32(BNXT_TX_KID_HI(kid) | + length); + length += hdr_len; + } else if (skb->ip_summed == CHECKSUM_PARTIAL) { + txbd1->tx_bd_hsize_lflags |= + cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM); + txbd1->tx_bd_kid_mss = 0; + txbd1->tx_bd_kid_mss = cpu_to_le32(BNXT_TX_KID_HI(kid)); + } + + if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) { + length >>= 9; + if (unlikely(length >= ARRAY_SIZE(bnxt_lhint_arr))) { + dev_warn_ratelimited(&pdev->dev, "Dropped oversize %d bytes TX packet.\n", + skb->len); + i = 0; + goto tx_dma_error; + } + flags |= bnxt_lhint_arr[length]; + } + +#if defined(HAVE_ETF_QOPT_OFFLOAD) + if (txr->etf_enabled) + bnxt_generate_txtimed_bd(bp, skb, txr, tx_buf, &prod); +#endif + flags |= (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD | + ((last_frag + txr->bd_base_cnt) << TX_BD_FLAGS_BD_CNT_SHIFT); + txbd->tx_bd_opaque = SET_TX_OPAQUE(bp, txr, prod0, txr->bd_base_cnt + last_frag); + txbd->tx_bd_len_flags_type = cpu_to_le32(flags); + txbd1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags); + txbd1->tx_bd_cfa_action = + cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT); + + txbd0 = txbd; + for (i = 0; i < last_frag; i++) { + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + + prod = NEXT_TX(prod); + txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)]; + + len = skb_frag_size(frag); + mapping = skb_frag_dma_map(&pdev->dev, frag, 0, len, + DMA_TO_DEVICE); + + if (unlikely(dma_mapping_error(&pdev->dev, mapping))) + goto tx_dma_error; + + tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)]; + dma_unmap_addr_set(tx_buf, mapping, mapping); + + txbd->tx_bd_haddr = cpu_to_le64(mapping); + + flags = len << TX_BD_LEN_SHIFT; + txbd->tx_bd_len_flags_type = cpu_to_le32(flags); + } + + flags &= ~TX_BD_LEN; + txbd->tx_bd_len_flags_type = + cpu_to_le32(((len + pad) << TX_BD_LEN_SHIFT) | flags | + TX_BD_FLAGS_PACKET_END); + + netdev_tx_sent_queue(txq, skb->len); + +#ifdef HAVE_IEEE1588_SUPPORT + skb_tx_timestamp(skb); +#endif + + prod = NEXT_TX(prod); + + WRITE_ONCE(txr->tx_prod, prod); + + if (!netdev_xmit_more() || netif_xmit_stopped(txq)) { + mmiowb(); + bnxt_txr_db_kick(bp, txr, prod); + } else { + if (free_size >= bp->tx_wake_thresh) + txbd0->tx_bd_len_flags_type |= + cpu_to_le32(TX_BD_FLAGS_NO_CMPL); + txr->kick_pending = 1; + } + +tx_done: + if (unlikely(bnxt_tx_avail(bp, txr) < MAX_SKB_FRAGS + txr->bd_base_cnt)) { + if (netdev_xmit_more() && !tx_buf->is_push) { + if (txbd0) { + txbd0->tx_bd_len_flags_type &= + cpu_to_le32(~TX_BD_FLAGS_NO_CMPL); + mmiowb(); + } + bnxt_txr_db_kick(bp, txr, prod); + } + netif_txq_try_stop(txq, bnxt_tx_avail(bp, txr), + bp->tx_wake_thresh); + } + if (txr->etf_enabled) + txr->bnapi->cp_ring.sw_stats->txtime.txtime_xmit++; + + return NETDEV_TX_OK; + +tx_dma_error: + if (BNXT_TX_PTP_IS_SET(lflags)) + BNXT_PTP_INC_TX_AVAIL(ptp); + + last_frag = i; + + /* start back at beginning and unmap skb */ + prod = txr->tx_prod; + tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)]; + dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping), + skb_headlen(skb), DMA_TO_DEVICE); + prod = NEXT_TX(prod); + + /* unmap remaining mapped pages */ + for (i = 0; i < last_frag; i++) { + prod = NEXT_TX(prod); + tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)]; + dma_unmap_page(&pdev->dev, dma_unmap_addr(tx_buf, mapping), + skb_frag_size(&skb_shinfo(skb)->frags[i]), + DMA_TO_DEVICE); + } + +tx_free: + dev_kfree_skb_any(skb); +tx_kick_pending: + if (txr->kick_pending) + bnxt_txr_db_kick(bp, txr, txr->tx_prod); + txr->tx_buf_ring[txr->tx_prod].skb = NULL; + dev_core_stats_tx_dropped_inc(bp->dev); + return NETDEV_TX_OK; +} + +static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct bnxt *bp = netdev_priv(dev); + struct bnxt_tx_ring_info *txr; + struct netdev_queue *txq; + u32 free_size, kid = 0; + __le32 lflags = 0; + int i; + + i = skb_get_queue_mapping(skb); + if (unlikely(i >= bp->tx_nr_rings)) { + dev_kfree_skb_any(skb); + dev_core_stats_tx_dropped_inc(dev); + netif_warn(bp, tx_err, dev, "TX packet queue %d exceeds maximum %d\n", + i, bp->tx_nr_rings - 1); + return NETDEV_TX_OK; + } + + txq = netdev_get_tx_queue(dev, i); + txr = &bp->tx_ring[bp->tx_ring_map[i]]; + + free_size = bnxt_tx_avail(bp, txr); + if (unlikely(free_size < skb_shinfo(skb)->nr_frags + txr->bd_base_cnt)) { + /* We must have raced with NAPI cleanup */ + if (net_ratelimit() && txr->kick_pending) + netif_warn(bp, tx_err, dev, + "bnxt: ring busy w/ flush pending!\n"); + if (!netif_txq_try_stop(txq, bnxt_tx_avail(bp, txr), + bp->tx_wake_thresh)) + return NETDEV_TX_BUSY; + } + + skb = bnxt_ktls_xmit(bp, txr, skb, &lflags, &kid); + if (unlikely(!skb)) + return NETDEV_TX_OK; + + return __bnxt_start_xmit(bp, txq, txr, skb, lflags, kid); +} + +/* Returns true if some remaining TX packets not processed. */ +static bool __bnxt_tx_int(struct bnxt *bp, struct bnxt_tx_ring_info *txr) +{ + struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, txr->txq_index); + struct pci_dev *pdev = bp->pdev; + u16 hw_cons = txr->tx_hw_cons; + unsigned int tx_bytes = 0; + u16 cons = txr->tx_cons; + int tx_pkts = 0; + bool rc = false; + + while (RING_TX(bp, cons) != hw_cons) { + struct bnxt_sw_tx_bd *tx_buf; + struct sk_buff *skb; + bool is_ts_pkt; + int j, last; + + tx_buf = &txr->tx_buf_ring[RING_TX(bp, cons)]; + is_ts_pkt = tx_buf->is_ts_pkt; + if (is_ts_pkt && (bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP)) { + rc = true; + break; + } + + cons = NEXT_TX(cons); + skb = tx_buf->skb; + tx_buf->skb = NULL; + tx_buf->is_ts_pkt = 0; + + if (tx_buf->is_push) { + tx_buf->is_push = 0; + cons += tx_buf->inline_data_bds; + if (!skb) { + /* presync BD */ + cons = NEXT_TX(cons); + continue; + } + tx_bytes += skb->len; + goto next_tx_int; + } + + if (unlikely(!skb)) { + bnxt_sched_reset_txr(bp, txr, cons); + return rc; + } + + if (txr->etf_enabled) + cons = NEXT_TX(cons); + + tx_bytes += skb->len; + + dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping), + skb_headlen(skb), DMA_TO_DEVICE); + last = tx_buf->nr_frags; + + for (j = 0; j < last; j++) { + cons = NEXT_TX(cons); + tx_buf = &txr->tx_buf_ring[RING_TX(bp, cons)]; + dma_unmap_page( + &pdev->dev, + dma_unmap_addr(tx_buf, mapping), + skb_frag_size(&skb_shinfo(skb)->frags[j]), + DMA_TO_DEVICE); + } + +#ifdef HAVE_IEEE1588_SUPPORT + if (unlikely(is_ts_pkt)) { + /* PTP worker takes ownership of the skb */ + bnxt_get_tx_ts(bp, skb, tx_buf->txts_prod); + skb = NULL; + } +#endif + +next_tx_int: + cons = NEXT_TX(cons); + + tx_pkts++; + dev_consume_skb_any(skb); + } + + WRITE_ONCE(txr->tx_cons, cons); + + __netif_txq_completed_wake(txq, tx_pkts, tx_bytes, + bnxt_tx_avail(bp, txr), bp->tx_wake_thresh, + READ_ONCE(txr->dev_state) == BNXT_DEV_STATE_CLOSING); + return rc; +} + +static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int budget) +{ + struct bnxt_tx_ring_info *txr; + bool more = false; + int i; + + bnxt_for_each_napi_tx(i, bnapi, txr) { + if (txr->tx_hw_cons != RING_TX(bp, txr->tx_cons)) + more |= __bnxt_tx_int(bp, txr); + } + if (!more) + bnapi->events &= ~BNXT_TX_CMP_EVENT; +} + +#ifdef HAVE_BUILD_SKB +#if !defined(CONFIG_PAGE_POOL) +static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping, + struct bnxt_rx_ring_info *rxr, + unsigned int *page_offset, gfp_t gfp) +{ + struct device *dev = &bp->pdev->dev; + unsigned int offset = 0; + struct page *page; + + if (PAGE_SIZE <= BNXT_RX_PAGE_SIZE) { + page = alloc_page(gfp); + if (!page) + return NULL; + goto map_page; + } + page = rxr->rx_page; + if (!page) { + page = alloc_page(gfp); + if (!page) + return NULL; + rxr->rx_page = page; + rxr->rx_page_offset = 0; + } + offset = rxr->rx_page_offset; + rxr->rx_page_offset += BNXT_RX_PAGE_SIZE; + if (rxr->rx_page_offset == PAGE_SIZE) + rxr->rx_page = NULL; + else + get_page(page); + +map_page: + *mapping = dma_map_page_attrs(dev, page, offset, BNXT_RX_PAGE_SIZE, + bp->rx_dir, DMA_ATTR_WEAK_ORDERING); + if (dma_mapping_error(&bp->pdev->dev, *mapping)) { + __free_page(page); + return NULL; + } + + if (page_offset) + *page_offset = offset; + + return page; +} +#else + +static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping, + struct bnxt_rx_ring_info *rxr, + unsigned int *offset, gfp_t gfp) +{ + struct device __maybe_unused *dev = &bp->pdev->dev; + struct page *page; + + if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) { + page = page_pool_dev_alloc_frag(rxr->page_pool, offset, + BNXT_RX_PAGE_SIZE); + } else { + page = page_pool_dev_alloc_pages(rxr->page_pool); + *offset = 0; + } + if (!page) + return NULL; + +#ifndef HAVE_PAGE_POOL_GET_DMA_ADDR + *mapping = dma_map_page_attrs(dev, page, *offset, BNXT_RX_PAGE_SIZE, + bp->rx_dir, DMA_ATTR_WEAK_ORDERING); + if (dma_mapping_error(dev, *mapping)) { + page_pool_recycle_direct(rxr->page_pool, page); + return NULL; + } +#else + *mapping = page_pool_get_dma_addr(page) + *offset; +#if (!PP_FLAG_DMA_SYNC_DEV) + dma_sync_single_for_device(dev, *mapping, BNXT_RX_PAGE_SIZE, bp->rx_dir); +#endif +#endif + return page; +} +#endif + +static inline u8 *__bnxt_alloc_rx_frag(struct bnxt *bp, dma_addr_t *mapping, + gfp_t gfp) +{ + u8 *data; + struct pci_dev *pdev = bp->pdev; + + if (gfp == GFP_ATOMIC) + data = napi_alloc_frag(bp->rx_buf_size); + else + data = netdev_alloc_frag(bp->rx_buf_size); + if (!data) + return NULL; + + *mapping = dma_map_single_attrs(&pdev->dev, data + bp->rx_dma_offset, + bp->rx_buf_use_size, bp->rx_dir, + DMA_ATTR_WEAK_ORDERING); + + if (dma_mapping_error(&pdev->dev, *mapping)) { + skb_free_frag(data); + data = NULL; + } + return data; +} +#else + +static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping, + struct bnxt_rx_ring_info *rxr, + gfp_t gfp) +{ + return NULL; +} + +static inline struct sk_buff *__bnxt_alloc_rx_frag(struct bnxt *bp, + dma_addr_t *mapping, + gfp_t gfp) +{ + struct sk_buff *skb; + u8 *data; + struct pci_dev *pdev = bp->pdev; + + skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size); + if (skb == NULL) + return NULL; + + data = skb->data; + + *mapping = dma_map_single_attrs(&pdev->dev, + data + bp->rx_dma_offset, + bp->rx_buf_use_size, bp->rx_dir, + DMA_ATTR_WEAK_ORDERING); + + if (dma_mapping_error(&pdev->dev, *mapping)) { + dev_kfree_skb(skb); + skb = NULL; + } + return skb; +} +#endif + +int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, + u16 prod, gfp_t gfp) +{ + struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(bp, prod)][RX_IDX(prod)]; + struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[RING_RX(bp, prod)]; + dma_addr_t mapping; + +#ifdef HAVE_XSK_SUPPORT + if (BNXT_RING_RX_ZC_MODE(rxr) && rxr->xsk_pool) { + u32 headroom; + + headroom = xsk_pool_get_headroom(rxr->xsk_pool); + rx_buf->data = xsk_buff_alloc(rxr->xsk_pool); + if (!rx_buf->data) + return -ENOMEM; + bp->rx_dma_offset = headroom; + mapping = xsk_buff_xdp_get_dma(rx_buf->data); + } else if (BNXT_RX_PAGE_MODE(bp)) { +#else + if (BNXT_RX_PAGE_MODE(bp)) { +#endif + unsigned int offset; + struct page *page = + __bnxt_alloc_rx_page(bp, &mapping, rxr, &offset, gfp); + + if (!page) + return -ENOMEM; + + mapping += bp->rx_dma_offset; + rx_buf->data = page; + rx_buf->data_ptr = page_address(page) + offset + bp->rx_offset; + } else { +#ifdef HAVE_BUILD_SKB + u8 *data = __bnxt_alloc_rx_frag(bp, &mapping, gfp); +#else + struct sk_buff *data = __bnxt_alloc_rx_frag(bp, &mapping, gfp); +#endif + + if (!data) + return -ENOMEM; + + rx_buf->data = data; +#ifdef HAVE_BUILD_SKB + rx_buf->data_ptr = data + bp->rx_offset; +#else + rx_buf->data_ptr = data->data + bp->rx_offset; +#endif + } + rx_buf->mapping = mapping; + + rxbd->rx_bd_haddr = cpu_to_le64(mapping); + return 0; +} + +void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, void *data) +{ + u16 prod = rxr->rx_prod; + struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf; + struct bnxt *bp = rxr->bnapi->bp; + struct rx_bd *cons_bd, *prod_bd; + + prod_rx_buf = &rxr->rx_buf_ring[RING_RX(bp, prod)]; + cons_rx_buf = &rxr->rx_buf_ring[cons]; + + prod_rx_buf->data = data; + prod_rx_buf->data_ptr = cons_rx_buf->data_ptr; + + prod_rx_buf->mapping = cons_rx_buf->mapping; + + prod_bd = &rxr->rx_desc_ring[RX_RING(bp, prod)][RX_IDX(prod)]; + cons_bd = &rxr->rx_desc_ring[RX_RING(bp, cons)][RX_IDX(cons)]; + + prod_bd->rx_bd_haddr = cons_bd->rx_bd_haddr; +} + +static inline u16 bnxt_find_next_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx) +{ + u16 next, max = rxr->rx_agg_bmap_size; + + next = find_next_zero_bit(rxr->rx_agg_bmap, max, idx); + if (next >= max) + next = find_first_zero_bit(rxr->rx_agg_bmap, max); + return next; +} + +static inline int bnxt_alloc_rx_page(struct bnxt *bp, + struct bnxt_rx_ring_info *rxr, + u16 prod, gfp_t gfp) +{ + struct rx_bd *rxbd = + &rxr->rx_agg_desc_ring[RX_AGG_RING(bp, prod)][RX_IDX(prod)]; + struct bnxt_sw_rx_agg_bd *rx_agg_buf; + struct page *page; + dma_addr_t mapping; + u16 sw_prod = rxr->rx_sw_agg_prod; + unsigned int offset = 0; + + page = __bnxt_alloc_rx_page(bp, &mapping, rxr, &offset, gfp); + + if (!page) + return -ENOMEM; + + if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap))) + sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod); + + __set_bit(sw_prod, rxr->rx_agg_bmap); + rx_agg_buf = &rxr->rx_agg_ring[sw_prod]; + rxr->rx_sw_agg_prod = RING_RX_AGG(bp, NEXT_RX_AGG(sw_prod)); + + rx_agg_buf->page = page; + rx_agg_buf->offset = offset; + rx_agg_buf->mapping = mapping; + rxbd->rx_bd_haddr = cpu_to_le64(mapping); + rxbd->rx_bd_opaque = sw_prod; + return 0; +} + +struct rx_agg_cmp *bnxt_get_agg(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, + u16 cp_cons, u16 curr) +{ + struct rx_agg_cmp *agg; + + cp_cons = RING_CMP(ADV_RAW_CMP(cp_cons, curr)); + agg = (struct rx_agg_cmp *) + &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; + return agg; +} + +static struct rx_agg_cmp *bnxt_get_tpa_agg_p5(struct bnxt *bp, + struct bnxt_rx_ring_info *rxr, + u16 agg_id, u16 curr) +{ + struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[agg_id]; + + return &tpa_info->agg_arr[curr]; +} + +static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 idx, + u16 start, u32 agg_bufs, bool tpa) +{ + struct bnxt_napi *bnapi = cpr->bnapi; + struct bnxt *bp = bnapi->bp; + struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; + u16 prod = rxr->rx_agg_prod; + u16 sw_prod = rxr->rx_sw_agg_prod; + bool p5_tpa = false; + u32 i; + + if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && tpa) + p5_tpa = true; + + for (i = 0; i < agg_bufs; i++) { + u16 cons; + struct rx_agg_cmp *agg; + struct bnxt_sw_rx_agg_bd *cons_rx_buf, *prod_rx_buf; + struct rx_bd *prod_bd; + struct page *page; + + if (p5_tpa) + agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, start + i); + else + agg = bnxt_get_agg(bp, cpr, idx, start + i); + cons = agg->rx_agg_cmp_opaque; + __clear_bit(cons, rxr->rx_agg_bmap); + + if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap))) + sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod); + + __set_bit(sw_prod, rxr->rx_agg_bmap); + prod_rx_buf = &rxr->rx_agg_ring[sw_prod]; + cons_rx_buf = &rxr->rx_agg_ring[cons]; + + /* It is possible for sw_prod to be equal to cons, so + * set cons_rx_buf->page to NULL first. + */ + page = cons_rx_buf->page; + cons_rx_buf->page = NULL; + prod_rx_buf->page = page; + prod_rx_buf->offset = cons_rx_buf->offset; + + prod_rx_buf->mapping = cons_rx_buf->mapping; + + prod_bd = &rxr->rx_agg_desc_ring[RX_AGG_RING(bp, prod)][RX_IDX(prod)]; + + prod_bd->rx_bd_haddr = cpu_to_le64(cons_rx_buf->mapping); + prod_bd->rx_bd_opaque = sw_prod; + + prod = NEXT_RX_AGG(prod); + sw_prod = RING_RX_AGG(bp, NEXT_RX_AGG(sw_prod)); + } + rxr->rx_agg_prod = prod; + rxr->rx_sw_agg_prod = sw_prod; +} + +#ifdef HAVE_XDP_MULTI_BUFF +static struct sk_buff *bnxt_rx_multi_page_skb(struct bnxt *bp, + struct bnxt_rx_ring_info *rxr, + u16 cons, void *data, u8 *data_ptr, + dma_addr_t dma_addr, + unsigned int offset_and_len) +{ + unsigned int len = offset_and_len & 0xffff; + struct page *page = data; + u16 prod = rxr->rx_prod; + struct sk_buff *skb; + int err; + + err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC); + if (unlikely(err)) { + bnxt_reuse_rx_data(rxr, cons, data); + return NULL; + } + dma_addr -= bp->rx_dma_offset; + + dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, BNXT_RX_PAGE_SIZE, + bp->rx_dir); + + skb = napi_build_skb(data_ptr - bp->rx_offset, BNXT_RX_PAGE_SIZE); + if (!skb) { +#ifndef CONFIG_PAGE_POOL + __free_page(page); +#else + page_pool_recycle_direct(rxr->page_pool, page); +#endif + return NULL; + } + skb_mark_for_recycle(skb); + skb_reserve(skb, bp->rx_offset); + __skb_put(skb, len); + + return skb; +} +#endif + +static inline struct sk_buff *bnxt_copy_xdp(struct bnxt_napi *bnapi, + struct xdp_buff *xdp, + unsigned int len, + dma_addr_t mapping); +#ifdef HAVE_BUILD_SKB +#ifdef BNXT_RX_PAGE_MODE_SUPPORT +static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp, + struct bnxt_rx_ring_info *rxr, + u16 cons, void *data, u8 *data_ptr, + dma_addr_t dma_addr, + unsigned int offset_and_len) +{ + unsigned int payload = offset_and_len >> 16; + unsigned int len = offset_and_len & 0xffff; + skb_frag_t *frag; + struct page *page = data; + u16 prod = rxr->rx_prod; + struct sk_buff *skb; + int off, err; + + err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC); + if (unlikely(err)) { + bnxt_reuse_rx_data(rxr, cons, data); + return NULL; + } + dma_addr -= bp->rx_dma_offset; + + if (BNXT_RING_RX_ZC_MODE(rxr)) + return bnxt_copy_xdp(rxr->bnapi, data, len, dma_addr); + + dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, BNXT_RX_PAGE_SIZE, + bp->rx_dir); + +#if defined(CONFIG_PAGE_POOL) && !defined(HAVE_SKB_MARK_RECYCLE) + page_pool_release_page(rxr->page_pool, page); +#endif + + if (unlikely(!payload)) + payload = eth_get_headlen(bp->dev, data_ptr, len); + + skb = napi_alloc_skb(&rxr->bnapi->napi, payload); + if (!skb) { +#ifndef CONFIG_PAGE_POOL + __free_page(page); +#else + page_pool_recycle_direct(rxr->page_pool, page); +#endif + return NULL; + } + + skb_mark_for_recycle(skb); + + off = (void *)data_ptr - page_address(page); + skb_add_rx_frag(skb, 0, page, off, len, BNXT_RX_PAGE_SIZE); + memcpy(skb->data - NET_IP_ALIGN, data_ptr - NET_IP_ALIGN, + payload + NET_IP_ALIGN); + + frag = &skb_shinfo(skb)->frags[0]; + skb_frag_size_sub(frag, payload); + skb_frag_off_add(frag, payload); + skb->data_len -= payload; + skb->tail += payload; + + return skb; +} +#endif + +static struct sk_buff *bnxt_rx_skb(struct bnxt *bp, + struct bnxt_rx_ring_info *rxr, u16 cons, + void *data, u8 *data_ptr, + dma_addr_t dma_addr, + unsigned int offset_and_len) +{ + u16 prod = rxr->rx_prod; + struct sk_buff *skb; + int err; + + err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC); + if (unlikely(err)) { + bnxt_reuse_rx_data(rxr, cons, data); + return NULL; + } + + skb = napi_build_skb(data, bp->rx_buf_size); + dma_unmap_single_attrs(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size, + bp->rx_dir, DMA_ATTR_WEAK_ORDERING); + if (!skb) { + skb_free_frag(data); + return NULL; + } + + skb_reserve(skb, bp->rx_offset); + skb_put(skb, offset_and_len & 0xffff); + return skb; +} +#else +static struct sk_buff *bnxt_rx_skb(struct bnxt *bp, + struct bnxt_rx_ring_info *rxr, u16 cons, + void *data, u8 *data_ptr, + dma_addr_t dma_addr, + unsigned int offset_and_len) +{ + struct sk_buff *skb = data; + u16 prod = rxr->rx_prod; + int err; + + err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC); + if (unlikely(err)) { + bnxt_reuse_rx_data(rxr, cons, skb); + return NULL; + } + + dma_unmap_single_attrs(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size, + bp->rx_dir, DMA_ATTR_WEAK_ORDERING); + skb_reserve(skb, bp->rx_offset); + skb_put(skb, offset_and_len & 0xffff); + return skb; +} +#endif + +static u32 __bnxt_rx_agg_pages(struct bnxt *bp, + struct bnxt_cp_ring_info *cpr, + struct skb_shared_info *shinfo, + u16 idx, u32 agg_bufs, bool tpa, + struct xdp_buff *xdp) +{ + struct bnxt_napi *bnapi = cpr->bnapi; + struct pci_dev *pdev = bp->pdev; + struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; + u16 prod = rxr->rx_agg_prod; + u32 i, total_frag_len = 0; + bool p5_tpa = false; + + if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && tpa) + p5_tpa = true; + + for (i = 0; i < agg_bufs; i++) { + skb_frag_t *frag = &shinfo->frags[i]; + u16 cons, frag_len; + struct rx_agg_cmp *agg; + struct bnxt_sw_rx_agg_bd *cons_rx_buf; + struct page *page; + dma_addr_t mapping; + + if (p5_tpa) + agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, i); + else + agg = bnxt_get_agg(bp, cpr, idx, i); + cons = agg->rx_agg_cmp_opaque; + frag_len = (le32_to_cpu(agg->rx_agg_cmp_len_flags_type) & + RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT; + + cons_rx_buf = &rxr->rx_agg_ring[cons]; + skb_frag_fill_page_desc(frag, cons_rx_buf->page, + cons_rx_buf->offset, frag_len); + shinfo->nr_frags = i + 1; + __clear_bit(cons, rxr->rx_agg_bmap); + + /* It is possible for bnxt_alloc_rx_page() to allocate + * a sw_prod index that equals the cons index, so we + * need to clear the cons entry now. + */ + mapping = cons_rx_buf->mapping; + page = cons_rx_buf->page; + cons_rx_buf->page = NULL; + + if (xdp && page_is_pfmemalloc(page)) + xdp_buff_set_frag_pfmemalloc(xdp); + + if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_ATOMIC) != 0) { + --shinfo->nr_frags; + cons_rx_buf->page = page; + + /* Update prod since possibly some pages have been + * allocated already. + */ + rxr->rx_agg_prod = prod; + bnxt_reuse_rx_agg_bufs(cpr, idx, i, agg_bufs - i, tpa); + return 0; + } +#ifndef HAVE_PAGE_POOL_GET_DMA_ADDR + dma_unmap_page_attrs(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE, + bp->rx_dir, DMA_ATTR_WEAK_ORDERING); +#else + dma_sync_single_for_cpu(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE, + bp->rx_dir); +#endif + total_frag_len += frag_len; + prod = NEXT_RX_AGG(prod); + } + rxr->rx_agg_prod = prod; + return total_frag_len; +} + +static inline void bnxt_skb_mark_for_recycle(struct sk_buff *skb, struct bnxt_napi *bnapi) +{ +#ifdef CONFIG_PAGE_POOL +#if defined(HAVE_OLD_SKB_MARK_RECYCLE) || !defined(HAVE_SKB_MARK_RECYCLE) +#if defined(HAVE_PAGE_POOL_RELEASE_PAGE) + struct skb_shared_info *shinfo = skb_shinfo(skb); + struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; + unsigned int nr_frags = shinfo->nr_frags; + unsigned int i; + + for (i = 0; i < nr_frags; i++) { + skb_frag_t *frag = &shinfo->frags[i]; + struct page *page = skb_frag_page(frag); + +#ifdef HAVE_SKB_MARK_RECYCLE + skb_mark_for_recycle(skb); +#else + page_pool_release_page(rxr->page_pool, page); +#endif + } +#endif /* defined(HAVE_PAGE_POOL_RELEASE_PAGE) */ +#else + skb_mark_for_recycle(skb); +#endif +#endif +} + +static struct sk_buff *bnxt_rx_agg_pages_skb(struct bnxt *bp, + struct bnxt_cp_ring_info *cpr, + struct sk_buff *skb, u16 idx, + u32 agg_bufs, bool tpa) +{ + struct skb_shared_info *shinfo = skb_shinfo(skb); + u32 total_frag_len = 0; + + total_frag_len = __bnxt_rx_agg_pages(bp, cpr, shinfo, idx, + agg_bufs, tpa, NULL); + + if (!total_frag_len) { + bnxt_skb_mark_for_recycle(skb, cpr->bnapi); + dev_kfree_skb(skb); + return NULL; + } + + skb->data_len += total_frag_len; + skb->len += total_frag_len; + skb->truesize += BNXT_RX_PAGE_SIZE * agg_bufs; + return skb; +} + +static u32 bnxt_rx_agg_pages_xdp(struct bnxt *bp, + struct bnxt_cp_ring_info *cpr, + struct xdp_buff *xdp, u16 idx, + u32 agg_bufs, bool tpa) +{ + struct skb_shared_info *shinfo = xdp_get_shared_info_from_buff(xdp); + u32 total_frag_len = 0; + + if (!shinfo) + return -EOPNOTSUPP; + + if (!xdp_buff_has_frags(xdp)) + shinfo->nr_frags = 0; + + total_frag_len = __bnxt_rx_agg_pages(bp, cpr, shinfo, + idx, agg_bufs, tpa, xdp); + + if (total_frag_len) { + xdp_buff_set_frags_flag(xdp); + shinfo->nr_frags = agg_bufs; +#ifdef HAVE_XDP_MULTI_BUFF + shinfo->xdp_frags_size = total_frag_len; +#endif + } + return total_frag_len; +} + +int bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, + u8 agg_bufs, u32 *raw_cons) +{ + u16 last; + struct rx_agg_cmp *agg; + + *raw_cons = ADV_RAW_CMP(*raw_cons, agg_bufs); + last = RING_CMP(*raw_cons); + agg = (struct rx_agg_cmp *) + &cpr->cp_desc_ring[CP_RING(last)][CP_IDX(last)]; + return RX_AGG_CMP_VALID(agg, *raw_cons); +} + +static inline struct sk_buff *bnxt_copy_data(struct bnxt_napi *bnapi, u8 *data, + unsigned int len, + dma_addr_t mapping) +{ + struct bnxt *bp = bnapi->bp; + struct pci_dev *pdev = bp->pdev; + struct sk_buff *skb; + + skb = napi_alloc_skb(&bnapi->napi, len); + if (!skb) + return NULL; + + dma_sync_single_for_cpu(&pdev->dev, mapping, bp->rx_copy_thresh, + bp->rx_dir); + + memcpy(skb->data - NET_IP_ALIGN, data - NET_IP_ALIGN, + len + NET_IP_ALIGN); + + dma_sync_single_for_device(&pdev->dev, mapping, bp->rx_copy_thresh, + bp->rx_dir); + + skb_put(skb, len); + + return skb; +} + +static inline struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data, + unsigned int len, + dma_addr_t mapping) +{ + return bnxt_copy_data(bnapi, data, len, mapping); +} + +#ifdef HAVE_XDP_DATA_META +static inline struct sk_buff *bnxt_copy_xdp(struct bnxt_napi *bnapi, + struct xdp_buff *xdp, + unsigned int len, + dma_addr_t mapping) +{ + unsigned int metasize = 0; + u8 *data = xdp->data; + struct sk_buff *skb; + + len = xdp->data_end - xdp->data_meta; + metasize = xdp->data - xdp->data_meta; + data = xdp->data_meta; + skb = bnxt_copy_data(bnapi, data, len, mapping); + if (!skb) + return skb; + + if (metasize) { + skb_metadata_set(skb, metasize); + __skb_pull(skb, metasize); + } + + return skb; +} +#else +static inline struct sk_buff *bnxt_copy_xdp(struct bnxt_napi *bnapi, + struct xdp_buff *xdp, + unsigned int len, + dma_addr_t mapping) +{ + u8 *data = xdp->data; + + return bnxt_copy_data(bnapi, data, len, mapping); +} +#endif + +static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, + u32 *raw_cons, void *cmp) +{ + struct rx_cmp *rxcmp = cmp; + u32 tmp_raw_cons = *raw_cons; + u8 cmp_type, agg_bufs = 0; + + cmp_type = RX_CMP_TYPE(rxcmp); + + if (cmp_type == CMP_TYPE_RX_L2_CMP) { + agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) & + RX_CMP_AGG_BUFS) >> + RX_CMP_AGG_BUFS_SHIFT; + } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) { + struct rx_tpa_end_cmp *tpa_end = cmp; + + if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) + return 0; + + agg_bufs = TPA_END_AGG_BUFS(tpa_end); + } + + if (agg_bufs) { + if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons)) + return -EBUSY; + } + *raw_cons = tmp_raw_cons; + return 0; +} + +static void bnxt_set_netdev_mtu(struct bnxt *bp) +{ + struct net_device *dev = bp->dev; + u16 dflt_mtu = bp->fw_dflt_mtu; + +#ifdef HAVE_MIN_MTU + /* MTU range: 60 - FW defined max */ + dev->min_mtu = ETH_ZLEN; + dev->max_mtu = bp->max_mtu; + + /* qcfg hwrm provides user configured 'default mtu'. + * Configure it on netdev if it is valid mtu. + */ + if (dflt_mtu) { + dev->mtu = dflt_mtu; + if (bp->fw_cap & BNXT_FW_CAP_ADMIN_MTU) { + bp->max_mtu = dflt_mtu; + dev->min_mtu = dflt_mtu; + dev->max_mtu = dflt_mtu; + } + } +#else + if (dflt_mtu) + dev->mtu = dflt_mtu; +#endif +} + +static u16 bnxt_alloc_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id) +{ + struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map; + u16 idx = agg_id & MAX_TPA_P5_MASK; + + if (test_bit(idx, map->agg_idx_bmap)) + idx = find_first_zero_bit(map->agg_idx_bmap, + BNXT_AGG_IDX_BMAP_SIZE); + __set_bit(idx, map->agg_idx_bmap); + map->agg_id_tbl[agg_id] = idx; + return idx; +} + +static void bnxt_free_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx) +{ + struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map; + + __clear_bit(idx, map->agg_idx_bmap); +} + +static u16 bnxt_lookup_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id) +{ + struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map; + + return map->agg_id_tbl[agg_id]; +} + +static void bnxt_tpa_metadata(struct bnxt_tpa_info *tpa_info, + struct rx_tpa_start_cmp *tpa_start, + struct rx_tpa_start_cmp_ext *tpa_start1) +{ + tpa_info->cfa_code_valid = 1; + tpa_info->cfa_code = TPA_START_CFA_CODE(tpa_start1); + tpa_info->vlan_valid = 0; + if (tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) { + tpa_info->vlan_valid = 1; + tpa_info->metadata = + le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata); + } +} + +static void bnxt_tpa_metadata_v2(struct bnxt_tpa_info *tpa_info, + struct rx_tpa_start_cmp *tpa_start, + struct rx_tpa_start_cmp_ext *tpa_start1) +{ + tpa_info->vlan_valid = 0; + if (TPA_START_VLAN_VALID(tpa_start)) { + u32 tpid_sel = TPA_START_VLAN_TPID_SEL(tpa_start); + u32 vlan_proto = ETH_P_8021Q; + + tpa_info->vlan_valid = 1; + if (tpid_sel == RX_TPA_START_METADATA1_TPID_8021AD) + vlan_proto = ETH_P_8021AD; + tpa_info->metadata = vlan_proto << 16 | + TPA_START_METADATA0_TCI(tpa_start1); + } +} + +static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, + u8 cmp_type, struct rx_tpa_start_cmp *tpa_start, + struct rx_tpa_start_cmp_ext *tpa_start1) +{ + struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf; + struct bnxt_tpa_info *tpa_info; + u16 cons, prod, agg_id; + struct rx_bd *prod_bd; + dma_addr_t mapping; + + if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { + agg_id = TPA_START_AGG_ID_P5(tpa_start); + agg_id = bnxt_alloc_agg_idx(rxr, agg_id); + } else { + agg_id = TPA_START_AGG_ID(tpa_start); + } + cons = tpa_start->rx_tpa_start_cmp_opaque; + prod = rxr->rx_prod; + cons_rx_buf = &rxr->rx_buf_ring[cons]; + prod_rx_buf = &rxr->rx_buf_ring[RING_RX(bp, prod)]; + tpa_info = &rxr->rx_tpa[agg_id]; + + if (unlikely(cons != rxr->rx_next_cons || + TPA_START_ERROR(tpa_start))) { + /* 0xffff is forced error, don't print it */ + if (rxr->rx_next_cons != 0xffff) + netif_warn(bp, rx_err, bp->dev, + "TPA cons %x, expected cons %x, error code %x\n", + cons, rxr->rx_next_cons, + TPA_START_ERROR_CODE(tpa_start1)); + bnxt_sched_reset_rxr(bp, rxr); + return; + } + prod_rx_buf->data = tpa_info->data; + prod_rx_buf->data_ptr = tpa_info->data_ptr; + + mapping = tpa_info->mapping; + prod_rx_buf->mapping = mapping; + + prod_bd = &rxr->rx_desc_ring[RX_RING(bp, prod)][RX_IDX(prod)]; + + prod_bd->rx_bd_haddr = cpu_to_le64(mapping); + + tpa_info->data = cons_rx_buf->data; + tpa_info->data_ptr = cons_rx_buf->data_ptr; + cons_rx_buf->data = NULL; + tpa_info->mapping = cons_rx_buf->mapping; + + tpa_info->len = + le32_to_cpu(tpa_start->rx_tpa_start_cmp_len_flags_type) >> + RX_TPA_START_CMP_LEN_SHIFT; + if (likely(TPA_START_HASH_VALID(tpa_start))) { + tpa_info->hash_type = PKT_HASH_TYPE_L4; + tpa_info->gso_type = SKB_GSO_TCPV4; + if (TPA_START_IS_IPV6(tpa_start1)) + tpa_info->gso_type = SKB_GSO_TCPV6; + /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */ + else if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP && + TPA_START_HASH_TYPE(tpa_start) == 3) + tpa_info->gso_type = SKB_GSO_TCPV6; + tpa_info->rss_hash = + le32_to_cpu(tpa_start->rx_tpa_start_cmp_rss_hash); + } else { + tpa_info->hash_type = PKT_HASH_TYPE_NONE; + tpa_info->gso_type = 0; + netif_warn(bp, rx_err, bp->dev, "TPA packet without valid hash\n"); + } + tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2); + tpa_info->hdr_info = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_hdr_info); + if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP) + bnxt_tpa_metadata(tpa_info, tpa_start, tpa_start1); + else + bnxt_tpa_metadata_v2(tpa_info, tpa_start, tpa_start1); + tpa_info->agg_count = 0; + + rxr->rx_prod = NEXT_RX(prod); + cons = RING_RX(bp, NEXT_RX(cons)); + rxr->rx_next_cons = RING_RX(bp, NEXT_RX(cons)); + cons_rx_buf = &rxr->rx_buf_ring[cons]; + + bnxt_reuse_rx_data(rxr, cons, cons_rx_buf->data); + rxr->rx_prod = NEXT_RX(rxr->rx_prod); + cons_rx_buf->data = NULL; +} + +static void bnxt_abort_tpa(struct bnxt_cp_ring_info *cpr, u16 idx, u32 agg_bufs) +{ + if (agg_bufs) + bnxt_reuse_rx_agg_bufs(cpr, idx, 0, agg_bufs, true); +} + +#ifdef CONFIG_INET +static void bnxt_gro_tunnel(struct sk_buff *skb, __be16 ip_proto) +{ + struct udphdr *uh = NULL; + + if (ip_proto == htons(ETH_P_IP)) { + struct iphdr *iph = (struct iphdr *)skb->data; + + if (iph->protocol == IPPROTO_UDP) + uh = (struct udphdr *)(iph + 1); + } else { + struct ipv6hdr *iph = (struct ipv6hdr *)skb->data; + + if (iph->nexthdr == IPPROTO_UDP) + uh = (struct udphdr *)(iph + 1); + } + if (uh) { + if (uh->check) + skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL_CSUM; + else + skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL; + } +} +#endif + +static struct sk_buff *bnxt_gro_func_5731x(struct bnxt_tpa_info *tpa_info, + int tcp_ts, struct sk_buff *skb) +{ +#ifdef CONFIG_INET + struct tcphdr *th; + int len, nw_off; + u16 outer_ip_off, inner_ip_off, inner_mac_off; + u32 hdr_info = tpa_info->hdr_info; + bool loopback = false; + + inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info); + inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info); + outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info); + + /* If the packet is an internal loopback packet, the offsets will + * have an extra 4 bytes. + */ + if (inner_mac_off == 4) { + loopback = true; + } else if (inner_mac_off > 4) { + __be16 proto = *((__be16 *)(skb->data + inner_ip_off - + ETH_HLEN - 2)); + + /* We only support inner iPv4/ipv6. If we don't see the + * correct protocol ID, it must be a loopback packet where + * the offsets are off by 4. + */ + if (proto != htons(ETH_P_IP) && proto != htons(ETH_P_IPV6)) + loopback = true; + } + if (loopback) { + /* internal loopback packet, subtract all offsets by 4 */ + inner_ip_off -= 4; + inner_mac_off -= 4; + outer_ip_off -= 4; + } + + nw_off = inner_ip_off - ETH_HLEN; + skb_set_network_header(skb, nw_off); + if (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) { + struct ipv6hdr *iph = ipv6_hdr(skb); + + skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr)); + len = skb->len - skb_transport_offset(skb); + th = tcp_hdr(skb); + th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0); + } else { + struct iphdr *iph = ip_hdr(skb); + + skb_set_transport_header(skb, nw_off + sizeof(struct iphdr)); + len = skb->len - skb_transport_offset(skb); + th = tcp_hdr(skb); + th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0); + } + + if (inner_mac_off) { /* tunnel */ + __be16 proto = *((__be16 *)(skb->data + outer_ip_off - + ETH_HLEN - 2)); + + bnxt_gro_tunnel(skb, proto); + } +#endif + return skb; +} + +static struct sk_buff *bnxt_gro_func_5750x(struct bnxt_tpa_info *tpa_info, + int tcp_ts, struct sk_buff *skb) +{ +#ifdef CONFIG_INET + u16 outer_ip_off, inner_ip_off, inner_mac_off; + u32 hdr_info = tpa_info->hdr_info; + int iphdr_len, nw_off; + + inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info); + inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info); + outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info); + + nw_off = inner_ip_off - ETH_HLEN; + skb_set_network_header(skb, nw_off); + iphdr_len = (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) ? + sizeof(struct ipv6hdr) : sizeof(struct iphdr); + skb_set_transport_header(skb, nw_off + iphdr_len); + + if (inner_mac_off) { /* tunnel */ + __be16 proto = *((__be16 *)(skb->data + outer_ip_off - + ETH_HLEN - 2)); + + bnxt_gro_tunnel(skb, proto); + } +#endif + return skb; +} + +#define BNXT_IPV4_HDR_SIZE (sizeof(struct iphdr) + sizeof(struct tcphdr)) +#define BNXT_IPV6_HDR_SIZE (sizeof(struct ipv6hdr) + sizeof(struct tcphdr)) + +static struct sk_buff *bnxt_gro_func_5730x(struct bnxt_tpa_info *tpa_info, + int tcp_ts, struct sk_buff *skb) +{ +#ifdef CONFIG_INET + struct tcphdr *th; + int len, nw_off, tcp_opt_len = 0; + + if (tcp_ts) + tcp_opt_len = 12; + + if (tpa_info->gso_type == SKB_GSO_TCPV4) { + struct iphdr *iph; + + nw_off = tpa_info->payload_off - BNXT_IPV4_HDR_SIZE - + tcp_opt_len - ETH_HLEN; + skb_set_network_header(skb, nw_off); + iph = ip_hdr(skb); + skb_set_transport_header(skb, nw_off + sizeof(struct iphdr)); + len = skb->len - skb_transport_offset(skb); + th = tcp_hdr(skb); + th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0); + } else if (tpa_info->gso_type == SKB_GSO_TCPV6) { + struct ipv6hdr *iph; + + nw_off = tpa_info->payload_off - BNXT_IPV6_HDR_SIZE - + tcp_opt_len - ETH_HLEN; + skb_set_network_header(skb, nw_off); + iph = ipv6_hdr(skb); + skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr)); + len = skb->len - skb_transport_offset(skb); + th = tcp_hdr(skb); + th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0); + } else { + dev_kfree_skb_any(skb); + return NULL; + } + + if (nw_off) /* tunnel */ + bnxt_gro_tunnel(skb, skb->protocol); +#endif + return skb; +} + +static inline struct sk_buff *bnxt_gro_skb(struct bnxt *bp, + struct bnxt_tpa_info *tpa_info, + struct rx_tpa_end_cmp *tpa_end, + struct rx_tpa_end_cmp_ext *tpa_end1, + struct sk_buff *skb) +{ +#ifdef CONFIG_INET + u16 segs; + + segs = TPA_END_TPA_SEGS(tpa_end); + if (segs == 1) + return skb; + + NAPI_GRO_CB(skb)->count = segs; + skb_shinfo(skb)->gso_size = + le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len); + skb_shinfo(skb)->gso_type = tpa_info->gso_type; + skb = bp->gro_func(tpa_info, TPA_END_GRO_TS(tpa_end), skb); + if (likely(skb)) + tcp_gro_complete(skb); +#endif + return skb; +} + +/* Given the cfa_code of a received packet determine which + * netdev (vf-rep or PF) the packet is destined to. + */ +static struct net_device *bnxt_get_pkt_dev(struct bnxt *bp, + struct rx_cmp_ext *rxcmp1, + struct bnxt_tpa_info *tpa_info) +{ + struct net_device *dev; + u16 cfa_code; + + cfa_code = rxcmp1 ? RX_CMP_CFA_CODE(rxcmp1) : tpa_info->cfa_code; + dev = bnxt_get_vf_rep(bp, cfa_code); + + /* if vf-rep dev is NULL, the must belongs to the PF */ + return dev ? dev : bp->dev; +} + +static struct net_device *bnxt_tf_get_pkt_dev(struct bnxt *bp, + struct rx_cmp_ext *rxcmp1, + struct bnxt_tpa_info *tpa_info) +{ + struct net_device *dev = bnxt_tf_get_vf_rep(bp, rxcmp1, tpa_info); + + /* if vf-rep dev is NULL, the must belongs to the PF */ + return dev ? dev : bp->dev; +} + +static void bnxt_tpa_csum(struct bnxt *bp, struct sk_buff *skb, + struct bnxt_tpa_info *tpa_info) +{ + skb_checksum_none_assert(skb); + if (likely(tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_L4_CS_CALC)) { + skb->ip_summed = CHECKSUM_UNNECESSARY; +#ifdef HAVE_CSUM_LEVEL + skb->csum_level = + (tpa_info->flags2 & RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3; +#elif defined(HAVE_INNER_NETWORK_OFFSET) + skb->encapsulation = + (tpa_info->flags2 & RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3; +#endif + } +} + +static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, + struct bnxt_cp_ring_info *cpr, + u32 *raw_cons, + struct rx_tpa_end_cmp *tpa_end, + struct rx_tpa_end_cmp_ext *tpa_end1, +#ifdef OLD_VLAN + u32 *vlan, +#endif + u8 *event) +{ + struct bnxt_napi *bnapi = cpr->bnapi; + struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; + struct net_device *dev = bp->dev; + u8 *data_ptr, agg_bufs; + unsigned int len; + struct bnxt_tpa_info *tpa_info; + dma_addr_t mapping; + struct sk_buff *skb; + u16 idx = 0, agg_id; +#ifdef HAVE_BUILD_SKB + void *data; +#else + struct sk_buff *data; +#endif + bool gro; + + if (unlikely(bnapi->in_reset)) { + int rc = bnxt_discard_rx(bp, cpr, raw_cons, tpa_end); + + if (rc < 0) + return ERR_PTR(-EBUSY); + return NULL; + } + + if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { + agg_id = TPA_END_AGG_ID_P5(tpa_end); + agg_id = bnxt_lookup_agg_idx(rxr, agg_id); + agg_bufs = TPA_END_AGG_BUFS_P5(tpa_end1); + tpa_info = &rxr->rx_tpa[agg_id]; + if (unlikely(agg_bufs != tpa_info->agg_count)) { + netdev_warn(bp->dev, "TPA end agg_buf %d != expected agg_bufs %d\n", + agg_bufs, tpa_info->agg_count); + agg_bufs = tpa_info->agg_count; + } + tpa_info->agg_count = 0; + *event |= BNXT_AGG_EVENT; + bnxt_free_agg_idx(rxr, agg_id); + idx = agg_id; + gro = !!(bp->flags & BNXT_FLAG_GRO); + tpa_info->payload_off = TPA_END_PAYLOAD_OFF_P5(tpa_end1); + } else { + agg_id = TPA_END_AGG_ID(tpa_end); + agg_bufs = TPA_END_AGG_BUFS(tpa_end); + tpa_info = &rxr->rx_tpa[agg_id]; + idx = RING_CMP(*raw_cons); + if (agg_bufs) { + if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons)) + return ERR_PTR(-EBUSY); + + *event |= BNXT_AGG_EVENT; + idx = NEXT_CMP(idx); + } + gro = !!TPA_END_GRO(tpa_end); + tpa_info->payload_off = TPA_END_PAYLOAD_OFF(tpa_end); + } + data = tpa_info->data; + data_ptr = tpa_info->data_ptr; + prefetch(data_ptr); + len = tpa_info->len; + mapping = tpa_info->mapping; + + if (unlikely(agg_bufs > MAX_SKB_FRAGS || TPA_END_ERRORS(tpa_end1))) { + bnxt_abort_tpa(cpr, idx, agg_bufs); + if (agg_bufs > MAX_SKB_FRAGS) + netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n", + agg_bufs, (int)MAX_SKB_FRAGS); + return NULL; + } + + if (len <= bp->rx_copy_thresh) { + skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping); + if (!skb) { + bnxt_abort_tpa(cpr, idx, agg_bufs); + cpr->sw_stats->rx.rx_oom_discards += 1; + return NULL; + } + } else { +#ifdef HAVE_BUILD_SKB + u8 *new_data; +#else + struct sk_buff *new_data; +#endif + dma_addr_t new_mapping; + + new_data = __bnxt_alloc_rx_frag(bp, &new_mapping, GFP_ATOMIC); + if (!new_data) { + bnxt_abort_tpa(cpr, idx, agg_bufs); + cpr->sw_stats->rx.rx_oom_discards += 1; + return NULL; + } + + tpa_info->data = new_data; +#ifdef HAVE_BUILD_SKB + tpa_info->data_ptr = new_data + bp->rx_offset; +#else + tpa_info->data_ptr = new_data->data + bp->rx_offset; +#endif + tpa_info->mapping = new_mapping; + +#ifdef HAVE_BUILD_SKB + skb = napi_build_skb(data, bp->rx_buf_size); +#else + skb = data; +#endif + dma_unmap_single_attrs(&bp->pdev->dev, mapping, + bp->rx_buf_use_size, bp->rx_dir, + DMA_ATTR_WEAK_ORDERING); +#ifdef HAVE_BUILD_SKB + if (!skb) { + skb_free_frag(data); + bnxt_abort_tpa(cpr, idx, agg_bufs); + cpr->sw_stats->rx.rx_oom_discards += 1; + return NULL; + } + skb_reserve(skb, bp->rx_offset); +#endif + skb_put(skb, len); + } + + if (agg_bufs) { + if (tpa_info->payload_off == len) + cpr->sw_stats->rx.rx_tpa_hds += 1; + skb = bnxt_rx_agg_pages_skb(bp, cpr, skb, idx, agg_bufs, true); + if (!skb) { + /* Page reuse already handled by bnxt_rx_pages(). */ + cpr->sw_stats->rx.rx_oom_discards += 1; + return NULL; + } + } + + if (tpa_info->cfa_code_valid) + dev = bp->get_pkt_dev(bp, NULL, tpa_info); + + skb->protocol = eth_type_trans(skb, dev); + + if (tpa_info->hash_type != PKT_HASH_TYPE_NONE) + skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type); + + if (tpa_info->vlan_valid && + (dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)) { + __be16 vlan_proto = htons(tpa_info->metadata >> + RX_CMP_FLAGS2_METADATA_TPID_SFT); + u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_TCI_MASK; + +#ifdef OLD_VLAN + if (vlan_proto == ETH_P_8021Q) + *vlan = vtag | OLD_VLAN_VALID; +#else + if (eth_type_vlan(vlan_proto)) { + __vlan_hwaccel_put_tag(skb, vlan_proto, vtag); + } else { + dev_kfree_skb(skb); + return NULL; + } +#endif + } + + bnxt_tpa_csum(bp, skb, tpa_info); + if (gro) + skb = bnxt_gro_skb(bp, tpa_info, tpa_end, tpa_end1, skb); + + return skb; +} + +static void bnxt_tpa_agg(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, + struct rx_agg_cmp *rx_agg) +{ + u16 agg_id = TPA_AGG_AGG_ID(rx_agg); + struct bnxt_tpa_info *tpa_info; + + agg_id = bnxt_lookup_agg_idx(rxr, agg_id); + tpa_info = &rxr->rx_tpa[agg_id]; + BUG_ON(tpa_info->agg_count >= MAX_SKB_FRAGS); + tpa_info->agg_arr[tpa_info->agg_count++] = *rx_agg; +} + +void bnxt_deliver_skb(struct bnxt *bp, struct bnxt_napi *bnapi, + u32 vlan, struct sk_buff *skb) +{ + bnxt_skb_mark_for_recycle(skb, bnapi); + + if (skb->dev != bp->dev) { + /* this packet belongs to a vf-rep */ + bnxt_vf_rep_rx(bp, skb); + return; + } + + skb_record_rx_queue(skb, bnapi->index); + +#ifdef BNXT_PRIV_RX_BUSY_POLL + skb_mark_napi_id(skb, &bnapi->napi); +#endif +#ifdef OLD_VLAN + if (vlan && bp->vlgrp) + vlan_gro_receive(&bnapi->napi, bp->vlgrp, (u16)vlan, skb); +#else + if (bnxt_busy_polling(bnapi)) + netif_receive_skb(skb); +#endif + else + napi_gro_receive(&bnapi->napi, skb); +} + +#ifdef OLD_VLAN +static u32 bnxt_rx_vlan(struct sk_buff *skb, u8 cmp_type, + struct rx_cmp *rxcmp, struct rx_cmp_ext *rxcmp1) +{ + u16 vtag, vlan_proto; + u32 meta_data; + + if (cmp_type == CMP_TYPE_RX_L2_CMP) { + __le32 flags2 = rxcmp1->rx_cmp_flags2; + + if (!(flags2 & cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN))) + return 0; + + meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data); + vtag = meta_data & RX_CMP_FLAGS2_METADATA_TCI_MASK; + vlan_proto = meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT; + if (vlan_proto == ETH_P_8021Q) + return vtag | OLD_VLAN_VALID; + } else if (cmp_type == CMP_TYPE_RX_L2_V3_CMP) { + if (RX_CMP_VLAN_VALID(rxcmp)) { + u32 tpid_sel = RX_CMP_VLAN_TPID_SEL(rxcmp); + + if (tpid_sel == RX_CMP_METADATA1_TPID_8021Q) { + vlan_proto = ETH_P_8021Q; + vtag = RX_CMP_METADATA0_TCI(rxcmp1); + return vtag | OLD_VLAN_VALID; + } + } + } + return 0; +} +#else +static struct sk_buff *bnxt_rx_vlan(struct sk_buff *skb, u8 cmp_type, + struct rx_cmp *rxcmp, + struct rx_cmp_ext *rxcmp1) +{ + __be16 vlan_proto; + u32 meta_data; + u16 vtag; + + if (cmp_type == CMP_TYPE_RX_L2_CMP) { + __le32 flags2 = rxcmp1->rx_cmp_flags2; + + if (!(flags2 & cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN))) + return skb; + + meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data); + vtag = meta_data & RX_CMP_FLAGS2_METADATA_TCI_MASK; + vlan_proto = htons(meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT); + if (eth_type_vlan(vlan_proto)) + __vlan_hwaccel_put_tag(skb, vlan_proto, vtag); + else + goto vlan_err; + } else if (cmp_type == CMP_TYPE_RX_L2_V3_CMP) { + if (RX_CMP_VLAN_VALID(rxcmp)) { + u32 tpid_sel = RX_CMP_VLAN_TPID_SEL(rxcmp); + + if (tpid_sel == RX_CMP_METADATA1_TPID_8021Q) + vlan_proto = htons(ETH_P_8021Q); + else if (tpid_sel == RX_CMP_METADATA1_TPID_8021AD) + vlan_proto = htons(ETH_P_8021AD); + else + goto vlan_err; + vtag = RX_CMP_METADATA0_TCI(rxcmp1); + __vlan_hwaccel_put_tag(skb, vlan_proto, vtag); + } + } + return skb; +vlan_err: + dev_kfree_skb(skb); + return NULL; +} +#endif + +static bool bnxt_rx_csum_err(struct sk_buff *skb, u8 cmp_type, + struct rx_cmp *rxcmp, struct rx_cmp_ext *rxcmp1) +{ + if (cmp_type == CMP_TYPE_RX_L2_CMP || + cmp_type == CMP_TYPE_RX_L2_V3_CMP) { + if (RX_CMP_L4_CS_OK(rxcmp1)) { + skb->ip_summed = CHECKSUM_UNNECESSARY; +#ifdef HAVE_CSUM_LEVEL + skb->csum_level = RX_CMP_ENCAP(rxcmp1); +#elif defined(HAVE_INNER_NETWORK_OFFSET) + skb->encapsulation = RX_CMP_ENCAP(rxcmp1); +#endif + return false; + } + if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) + return true; + } + return false; +} + +static enum pkt_hash_types bnxt_rss_ext_op(struct bnxt *bp, + struct rx_cmp *rxcmp) +{ + u8 ext_op; + + ext_op = RX_CMP_V3_HASH_TYPE(bp, rxcmp); + switch (ext_op) { + case EXT_OP_INNER_4: + case EXT_OP_OUTER_4: + case EXT_OP_INNFL_3: + case EXT_OP_OUTFL_3: + return PKT_HASH_TYPE_L4; + default: + return PKT_HASH_TYPE_L3; + } +} + +/* returns the following: + * 1 - 1 packet successfully received + * 0 - successful TPA_START, packet not completed yet + * -EBUSY - completion ring does not have all the agg buffers yet + * -ENOMEM - packet aborted due to out of memory + * -EIO - packet aborted due to hw error indicated in BD + */ +static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, + u32 *raw_cons, u8 *event) +{ + struct bnxt_napi *bnapi = cpr->bnapi; + struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; + struct net_device *dev = bp->dev; + struct rx_cmp *rxcmp; + struct rx_cmp_ext *rxcmp1; + u32 tmp_raw_cons = *raw_cons; + u16 cons, prod, cp_cons = RING_CMP(tmp_raw_cons); + struct bnxt_sw_rx_bd *rx_buf; + unsigned int len; +#ifdef HAVE_BUILD_SKB + u8 *data_ptr, agg_bufs, cmp_type; + void *data; +#else + struct sk_buff *data; + u8 *data_ptr, agg_bufs, cmp_type; +#endif + bool xdp_active = false; + dma_addr_t dma_addr; + struct sk_buff *skb; + struct xdp_buff xdp, *xdp_ptr; + int rc = 0; + u32 vlan = 0; + u32 misc, flags; +#ifdef HAVE_IEEE1588_SUPPORT + u64 ts = 0; + bool compl_deferred = false; +#endif + + rxcmp = (struct rx_cmp *) + &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; + + cmp_type = RX_CMP_TYPE(rxcmp); + + if (cmp_type == CMP_TYPE_RX_TPA_AGG_CMP) { + bnxt_tpa_agg(bp, rxr, (struct rx_agg_cmp *)rxcmp); + goto next_rx_no_prod_no_len; + } + + tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons); + cp_cons = RING_CMP(tmp_raw_cons); + rxcmp1 = (struct rx_cmp_ext *) + &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; + + if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons)) + return -EBUSY; + + /* The valid test of the entry must be done first before + * reading any further. + */ + dma_rmb(); + prod = rxr->rx_prod; + + if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP || + cmp_type == CMP_TYPE_RX_L2_TPA_START_V3_CMP) { + bnxt_tpa_start(bp, rxr, cmp_type, + (struct rx_tpa_start_cmp *)rxcmp, + (struct rx_tpa_start_cmp_ext *)rxcmp1); + + *event |= BNXT_RX_EVENT; + goto next_rx_no_prod_no_len; + + } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) { + skb = bnxt_tpa_end(bp, cpr, &tmp_raw_cons, + (struct rx_tpa_end_cmp *)rxcmp, + (struct rx_tpa_end_cmp_ext *)rxcmp1, +#ifdef OLD_VLAN + &vlan, +#endif + event); + + if (IS_ERR(skb)) + return -EBUSY; + + rc = -ENOMEM; + if (likely(skb)) { + bnxt_deliver_skb(bp, bnapi, vlan, skb); + rc = 1; + } + *event |= BNXT_RX_EVENT; + goto next_rx_no_prod_no_len; + } + + cons = rxcmp->rx_cmp_opaque; + if (unlikely(cons != rxr->rx_next_cons)) { + int rc1 = bnxt_discard_rx(bp, cpr, &tmp_raw_cons, rxcmp); + + /* 0xffff is forced error, don't print it */ + if (rxr->rx_next_cons != 0xffff) + netif_warn(bp, rx_err, bp->dev, "RX cons %x != expected cons %x\n", + cons, rxr->rx_next_cons); + bnxt_sched_reset_rxr(bp, rxr); + if (rc1) + return rc1; + goto next_rx_no_prod_no_len; + } + rx_buf = &rxr->rx_buf_ring[cons]; + data = rx_buf->data; + data_ptr = rx_buf->data_ptr; + prefetch(data_ptr); + + misc = le32_to_cpu(rxcmp->rx_cmp_misc_v1); + agg_bufs = (misc & RX_CMP_AGG_BUFS) >> RX_CMP_AGG_BUFS_SHIFT; + + if (agg_bufs) { + if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons)) + return -EBUSY; + + cp_cons = NEXT_CMP(cp_cons); + *event |= BNXT_AGG_EVENT; + } + *event |= BNXT_RX_EVENT; + + rx_buf->data = NULL; + if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) { + u32 rx_err = le32_to_cpu(rxcmp1->rx_cmp_cfa_code_errors_v2); + + bnxt_reuse_rx_data(rxr, cons, data); + if (agg_bufs) + bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0, agg_bufs, false); + + rc = -EIO; + if (rx_err & RX_CMPL_ERRORS_BUFFER_ERROR_MASK) { + bnapi->cp_ring.sw_stats->rx.rx_buf_errors++; + if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && + !(bp->fw_cap & BNXT_FW_CAP_RING_MONITOR)) { + netdev_warn_once(bp->dev, "RX buffer error %x\n", + rx_err); + bnxt_sched_reset_rxr(bp, rxr); + } + } + goto next_rx_no_len; + } + + flags = le32_to_cpu(rxcmp->rx_cmp_len_flags_type); + len = flags >> RX_CMP_LEN_SHIFT; + dma_addr = rx_buf->mapping; + if (BNXT_RING_RX_ZC_MODE(rxr) && bnxt_xdp_attached(bp, rxr)) { + if (bnxt_rx_xsk(bp, rxr, cons, data, &data_ptr, &len, event)) { + rc = 1; + goto next_rx; + } + xdp_active = true; + xdp_ptr = data; + goto make_skb; + } else if (bnxt_xdp_attached(bp, rxr)) { + bnxt_xdp_buff_init(bp, rxr, cons, data_ptr, len, &xdp); + if (agg_bufs) { + u32 frag_len = bnxt_rx_agg_pages_xdp(bp, cpr, &xdp, + cp_cons, agg_bufs, + false); + if (!frag_len) { + cpr->sw_stats->rx.rx_oom_discards += 1; + rc = -ENOMEM; + goto next_rx; + } + } + xdp_active = true; + xdp_ptr = &xdp; + } + +#ifndef HAVE_XDP_MULTI_BUFF + /* skip running XDP prog if there are aggregation bufs */ + if (!agg_bufs && xdp_active) { +#else + if (xdp_active) { +#endif + if (bnxt_rx_xdp(bp, rxr, cons, &xdp, data, &data_ptr, &len, event)) { + rc = 1; + goto next_rx; + } + } + +make_skb: + if (len <= bp->rx_copy_thresh) { + if (!xdp_active) + skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr); + else + skb = bnxt_copy_xdp(bnapi, xdp_ptr, len, dma_addr); + bnxt_reuse_rx_data(rxr, cons, data); + if (!skb) { + if (agg_bufs) { + if (!xdp_active) + bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0, + agg_bufs, false); +#ifdef HAVE_XDP_MULTI_BUFF + else + bnxt_xdp_buff_frags_free(rxr, &xdp); +#endif + } + cpr->sw_stats->rx.rx_oom_discards += 1; + rc = -ENOMEM; + goto next_rx; + } + } else { + u32 payload; + + if (rx_buf->data_ptr == data_ptr) + payload = misc & RX_CMP_PAYLOAD_OFFSET; + else + payload = 0; + skb = bp->rx_skb_func(bp, rxr, cons, data, data_ptr, dma_addr, + payload | len); + if (!skb) { + cpr->sw_stats->rx.rx_oom_discards += 1; + rc = -ENOMEM; + goto next_rx; + } + } + + if (IS_ENABLED(CONFIG_TLS_DEVICE) && bp->ktls_info && + (flags & RX_CMP_FLAGS_PKT_METADATA_PRESENT)) + bnxt_ktls_rx(bp, skb, data_ptr, len, rxcmp, rxcmp1); + + if (agg_bufs) { + if ((misc & RX_CMP_PAYLOAD_OFFSET) == (flags & RX_CMP_LEN)) + cpr->sw_stats->rx.rx_hds += 1; + if (!xdp_active) { + skb = bnxt_rx_agg_pages_skb(bp, cpr, skb, cp_cons, agg_bufs, false); + if (!skb) { + cpr->sw_stats->rx.rx_oom_discards += 1; + rc = -ENOMEM; + goto next_rx; + } +#ifdef HAVE_XDP_MULTI_BUFF + } else { + skb = bnxt_xdp_build_skb(bp, skb, agg_bufs, rxr->page_pool, &xdp, rxcmp1); + if (!skb) { + /* we should be able to free the old skb here */ + bnxt_xdp_buff_frags_free(rxr, &xdp); + cpr->sw_stats->rx.rx_oom_discards += 1; + rc = -ENOMEM; + goto next_rx; + } +#endif + } + } + + if (RX_CMP_HASH_VALID(rxcmp)) { + enum pkt_hash_types type; + + if (cmp_type == CMP_TYPE_RX_L2_V3_CMP) { + type = bnxt_rss_ext_op(bp, rxcmp); + } else { + u32 hash_type; + + hash_type = RX_CMP_HASH_TYPE(rxcmp); + /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */ + if (hash_type != 1 && hash_type != 3) + type = PKT_HASH_TYPE_L3; + else + type = PKT_HASH_TYPE_L4; + } + skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type); + } + + if (cmp_type == CMP_TYPE_RX_L2_CMP || + cmp_type == CMP_TYPE_RX_L2_V3_CMP) + dev = bp->get_pkt_dev(bp, rxcmp1, NULL); + skb->protocol = eth_type_trans(skb, dev); + + if (dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX) { +#ifdef OLD_VLAN + vlan = bnxt_rx_vlan(skb, cmp_type, rxcmp, rxcmp1); +#else + skb = bnxt_rx_vlan(skb, cmp_type, rxcmp, rxcmp1); + if (!skb) + goto next_rx; +#endif + } + skb_checksum_none_assert(skb); + if (dev->features & NETIF_F_RXCSUM) { + if (bnxt_rx_csum_err(skb, cmp_type, rxcmp, rxcmp1)) + bnapi->cp_ring.sw_stats->rx.rx_l4_csum_errors++; + } + +#ifdef HAVE_IEEE1588_SUPPORT + if (unlikely((((flags & RX_CMP_FLAGS_ITYPES_MASK) == RX_CMP_FLAGS_ITYPE_PTP_W_TS) || + bp->ptp_all_rx_tstamp) && bp->ptp_cfg)) { + if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { + struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; + u64 ns; + + bnxt_get_rx_ts_p5(bp, &ts, rxcmp1->rx_cmp_timestamp); + + spin_lock_bh(&ptp->ptp_lock); + ns = timecounter_cyc2time(&ptp->tc, ts); + spin_unlock_bh(&ptp->ptp_lock); + memset(skb_hwtstamps(skb), 0, sizeof(*skb_hwtstamps(skb))); + skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(ns); + } else { + if (!bnxt_get_rx_ts(bp, bnapi, vlan, skb)) + compl_deferred = true; + } + + } + + if (!compl_deferred) +#endif + bnxt_deliver_skb(bp, bnapi, vlan, skb); + + rc = 1; + +next_rx: + cpr->rx_packets += 1; + cpr->rx_bytes += len; + +next_rx_no_len: + rxr->rx_prod = NEXT_RX(prod); + rxr->rx_next_cons = RING_RX(bp, NEXT_RX(cons)); + +next_rx_no_prod_no_len: + *raw_cons = tmp_raw_cons; + + return rc; +} + +/* In netpoll mode, if we are using a combined completion ring, we need to + * discard the rx packets and recycle the buffers. + */ +static int bnxt_force_rx_discard(struct bnxt *bp, + struct bnxt_cp_ring_info *cpr, + u32 *raw_cons, u8 *event) +{ + u32 tmp_raw_cons = *raw_cons; + struct rx_cmp_ext *rxcmp1; + struct rx_cmp *rxcmp; + u16 cp_cons; + u8 cmp_type; + int rc; + + cp_cons = RING_CMP(tmp_raw_cons); + rxcmp = (struct rx_cmp *) + &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; + + tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons); + cp_cons = RING_CMP(tmp_raw_cons); + rxcmp1 = (struct rx_cmp_ext *) + &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; + + if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons)) + return -EBUSY; + + /* The valid test of the entry must be done first before + * reading any further. + */ + dma_rmb(); + cmp_type = RX_CMP_TYPE(rxcmp); + if (cmp_type == CMP_TYPE_RX_L2_CMP || + cmp_type == CMP_TYPE_RX_L2_V3_CMP) { + rxcmp1->rx_cmp_cfa_code_errors_v2 |= + cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR); + } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) { + struct rx_tpa_end_cmp_ext *tpa_end1; + + tpa_end1 = (struct rx_tpa_end_cmp_ext *)rxcmp1; + tpa_end1->rx_tpa_end_cmp_errors_v2 |= + cpu_to_le32(RX_TPA_END_CMP_ERRORS); + } + rc = bnxt_rx_pkt(bp, cpr, raw_cons, event); + if (rc && rc != -EBUSY) + cpr->sw_stats->rx.rx_netpoll_discards += 1; + return rc; +} + +u32 bnxt_fw_health_readl(struct bnxt *bp, int reg_idx) +{ + struct bnxt_fw_health *fw_health = bp->fw_health; + u32 reg = fw_health->regs[reg_idx]; + u32 reg_type, reg_off, val = 0; + + reg_type = BNXT_FW_HEALTH_REG_TYPE(reg); + reg_off = BNXT_FW_HEALTH_REG_OFF(reg); + switch (reg_type) { + case BNXT_FW_HEALTH_REG_TYPE_CFG: + pci_read_config_dword(bp->pdev, reg_off, &val); + break; + case BNXT_FW_HEALTH_REG_TYPE_GRC: + reg_off = fw_health->mapped_regs[reg_idx]; + fallthrough; + case BNXT_FW_HEALTH_REG_TYPE_BAR0: + val = readl(bp->bar0 + reg_off); + break; + case BNXT_FW_HEALTH_REG_TYPE_BAR1: + val = readl(bp->bar1 + reg_off); + break; + } + if (reg_idx == BNXT_FW_RESET_INPROG_REG) + val &= fw_health->fw_reset_inprog_reg_mask; + return val; +} + +static int bnxt_hwrm_dbr_pacing_qcfg(struct bnxt *bp) +{ + struct hwrm_func_dbr_pacing_qcfg_output *resp; + struct hwrm_func_dbr_pacing_qcfg_input *req; + struct bnxt_dbr *dbr = &bp->dbr; + int rc = 0; + + if (!(bp->fw_cap & BNXT_FW_CAP_DBR_PACING_SUPPORTED)) + return -EOPNOTSUPP; + + rc = hwrm_req_init(bp, req, HWRM_FUNC_DBR_PACING_QCFG); + if (rc) + return rc; + + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); + if (rc) + goto req_drop; + + if ((resp->dbr_stat_db_fifo_reg & + FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_MASK) == + FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_GRC) { + dbr->stat_db_fifo_reg = resp->dbr_stat_db_fifo_reg & + ~FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_MASK; + dbr->db_fifo_reg_off = BNXT_DBR_PACING_WIN_OFF(dbr->stat_db_fifo_reg); + writel(dbr->stat_db_fifo_reg & BNXT_GRC_BASE_MASK, + bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + + BNXT_DBR_PACING_WIN_MAP_OFF); + dbr->pacing_enable = 1; + } + +req_drop: + hwrm_req_drop(bp, req); + return rc; +} + +static int bnxt_hwrm_dbr_recovery_completed(struct bnxt *bp, u32 epoch) +{ + struct hwrm_func_dbr_recovery_completed_input *req; + int rc; + + if (!(bp->fw_cap & BNXT_FW_CAP_DBR_SUPPORTED)) + return -EOPNOTSUPP; + + rc = hwrm_req_init(bp, req, HWRM_FUNC_DBR_RECOVERY_COMPLETED); + if (rc) + return rc; + + req->epoch = epoch; + + return hwrm_req_send_silent(bp, req); +} + +void bnxt_dbr_recovery_done(struct bnxt *bp, u32 epoch, int ulp_type) +{ + struct bnxt_dbr_debug *debug; + u32 l2_epoch, roce_epoch; + struct bnxt_dbr *dbr; + int rc = 0; + + dbr = &bp->dbr; + debug = &dbr->debug; + + if (debug->recover_enable) + return; + + mutex_lock(&dbr->lock); + + if (ulp_type == BNXT_ROCE_ULP) { + roce_epoch = epoch; + dbr->last_roce_epoch = roce_epoch; + l2_epoch = dbr->last_l2_epoch; + } else { + l2_epoch = epoch; + dbr->last_l2_epoch = l2_epoch; + roce_epoch = dbr->last_roce_epoch; + } + + /* if RoCE is active, its EPOCH needs to match */ + if (bnxt_ulp_registered(bp->edev) && + l2_epoch != roce_epoch) + goto exit; + + /* nothing to be done if EPOCH is already up-to-date */ + if (l2_epoch == dbr->last_completed_epoch) + goto exit; + + rc = bnxt_hwrm_dbr_recovery_completed(bp, epoch); + if (rc && rc != -EBUSY) { + netdev_warn(bp->dev, + "hwrm_dbr_recovery_completed failure: %x\n", rc); + goto exit; + } + dbr->last_completed_epoch = epoch; + dev_info_ratelimited(&bp->dev->dev, + "DBR recovery completed! epoch: 0x%x\n", epoch); +exit: + mutex_unlock(&dbr->lock); +} + +static int bnxt_cp_num_to_irq_num(struct bnxt *bp, int n); + +static void bnxt_dbr_task(struct work_struct *work) +{ + struct bnxt_dbr_sw_stats *stats; + struct bnxt_dbr_debug *debug; + struct delayed_work *dwork; + ktime_t start_ts, end_ts; + struct bnxt_dbr *dbr; + struct bnxt *bp; + u32 i, epoch; + u64 delta_t; + + dwork = to_delayed_work(work); + dbr = container_of(dwork, struct bnxt_dbr, dwork); + bp = container_of(dbr, struct bnxt, dbr); + stats = &dbr->sw_stats; + debug = &dbr->debug; + + if (!dbr->enable) { + netdev_info(bp->dev, "DBR recovery is currently disabled\n"); + goto queue_recovery_work; + } + + start_ts = ktime_get(); + + mutex_lock(&dbr->lock); + + epoch = dbr->curr_epoch; + /* + * If it's under test mode, always perform recovery. Otherwise, only + * perform recovery if epoch is new + */ + if (!debug->recover_enable && epoch == dbr->last_l2_epoch) { + mutex_unlock(&dbr->lock); + goto recovery_done; + } + + mutex_unlock(&dbr->lock); + + rtnl_lock(); + if (!test_bit(BNXT_STATE_OPEN, &bp->state)) { + rtnl_unlock(); + atomic_dec(&dbr->event_cnt); + return; + } + for (i = 0; i < bp->cp_nr_rings; i++) { + int map_idx = bnxt_cp_num_to_irq_num(bp, i), j; + struct bnxt_cp_ring_info *cpr, *cpr2; + struct netdev_queue *txq = NULL; + struct bnxt_tx_ring_info *txr; + struct bnxt_rx_ring_info *rxr; + struct bnxt_napi *bnapi; + + bnapi = bp->bnapi[i]; + if (!bnapi) + continue; + + rxr = bnapi->rx_ring; + cpr = &bnapi->cp_ring; + + disable_irq(bp->irq_tbl[map_idx].vector); + + bnxt_for_each_napi_tx(j, bnapi, txr) { + WRITE_ONCE(txr->dev_state, BNXT_DEV_STATE_CLOSING); + synchronize_net(); + + txq = netdev_get_tx_queue(bp->dev, txr->txq_index); + if (txq) { + __netif_tx_lock_bh(txq); + netif_tx_stop_queue(txq); + __netif_tx_unlock_bh(txq); + } + } + + napi_disable(&bnapi->napi); + + /* replay the last CP cons idx with ARMALL */ + for (j = 0; j < cpr->cp_ring_count; j++) { + cpr2 = &cpr->cp_ring_arr[j]; + bnxt_do_pacing_default(bp, &cpr2->cp_ring_struct.seed); + bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons); + } + + /* replay the last TX prod idx */ + bnxt_for_each_napi_tx(j, bnapi, txr) { + bnxt_do_pacing_default(bp, &txr->tx_ring_struct.seed); + bnxt_db_write(bp, &txr->tx_db, txr->tx_prod); + } + + /* replay the last RX/AGG prod index */ + if (rxr) { + if (bp->flags & BNXT_FLAG_AGG_RINGS) { + bnxt_do_pacing_default(bp, &rxr->rx_agg_ring_struct.seed); + bnxt_db_write(bp, &rxr->rx_agg_db, + rxr->rx_agg_prod); + } + + bnxt_do_pacing_default(bp, &rxr->rx_ring_struct.seed); + bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); + } + + /* replay the last NQ cons idx with ARMALL */ + bnxt_do_pacing_default(bp, &cpr->cp_ring_struct.seed); + bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons); + + napi_enable(&bnapi->napi); + + bnxt_for_each_napi_tx(j, bnapi, txr) { + WRITE_ONCE(txr->dev_state, 0); + synchronize_net(); + + txq = netdev_get_tx_queue(bp->dev, txr->txq_index); + if (txq) + netif_tx_start_queue(txq); + } + + enable_irq(bp->irq_tbl[map_idx].vector); + } + + rtnl_unlock(); + +recovery_done: + bnxt_dbr_recovery_done(bp, epoch, -1); + + end_ts = ktime_get(); + + stats->nr_dbr++; + + delta_t = ktime_to_us(ktime_sub(end_ts, start_ts)); + stats->min_dbr_us = stats->min_dbr_us ? + min(delta_t, stats->min_dbr_us) : delta_t; + stats->max_dbr_us = max(delta_t, stats->max_dbr_us); + stats->total_dbr_us += delta_t; + stats->avg_dbr_us = stats->total_dbr_us / stats->nr_dbr; + + atomic_dec(&dbr->event_cnt); + +queue_recovery_work: + /* queue recovery work periodically if recovery test is enabled */ + if (dbr->wq && debug->recover_enable) { + if (queue_delayed_work(dbr->wq, &dbr->dwork, + msecs_to_jiffies(debug->recover_interval_ms))) + atomic_inc(&dbr->event_cnt); + } +} + +int bnxt_dbr_init(struct bnxt *bp) +{ + struct bnxt_dbr *dbr = &bp->dbr; + + if (!dbr->enable) + return 0; + + if (dbr->wq) + return 0; + + mutex_init(&dbr->lock); + atomic_set(&dbr->event_cnt, 0); + + /* + * Use high-priority worker pool to achieve better DB recovery + * performance in a congested system + */ + dbr->wq = alloc_ordered_workqueue("bnxt_dbr_wq", + WQ_HIGHPRI | WQ_MEM_RECLAIM); + if (!dbr->wq) { + netdev_err(bp->dev, "Unable to create DBR workqueue.\n"); + return -ENOMEM; + } + + INIT_DELAYED_WORK(&dbr->dwork, bnxt_dbr_task); + + return 0; +} + +void bnxt_dbr_exit(struct bnxt *bp) +{ + struct bnxt_dbr *dbr = &bp->dbr; + struct workqueue_struct *old_wq; + + old_wq = dbr->wq; + + if (!old_wq) + return; + + cancel_delayed_work_sync(&dbr->dwork); + atomic_set(&dbr->event_cnt, 0); + dbr->wq = NULL; + destroy_workqueue(old_wq); +} + +static void bnxt_dbr_cancel(struct bnxt *bp) +{ + struct bnxt_dbr *dbr = &bp->dbr; + + if (!dbr->wq) + return; + + /* + * No need to wait for the wq to finish. DBR task will see that the + * BNXT_STATE_OPEN flag is cleared and will abort. + */ + if (cancel_delayed_work(&dbr->dwork)) + atomic_dec(&dbr->event_cnt); +} + +static u16 bnxt_agg_ring_id_to_grp_idx(struct bnxt *bp, u16 ring_id) +{ + int i; + + for (i = 0; i < bp->rx_nr_rings; i++) { + u16 grp_idx = bp->rx_ring[i].bnapi->index; + struct bnxt_ring_grp_info *grp_info; + + grp_info = &bp->grp_info[grp_idx]; + if (grp_info->agg_fw_ring_id == ring_id) + return grp_idx; + } + return INVALID_HW_RING_ID; +} + +static void +bnxt_process_vf_flr(struct bnxt *bp, u32 data1) +{ + u16 pfid, vfid; + int rc; + + if (!BNXT_TRUFLOW_EN(bp) || !(bp->flags & BNXT_FLAG_CHIP_P7)) + return; + + pfid = (data1 & ASYNC_EVENT_CMPL_VF_FLR_EVENT_DATA1_PF_ID_MASK) >> + ASYNC_EVENT_CMPL_VF_FLR_EVENT_DATA1_PF_ID_SFT; + vfid = (data1 & ASYNC_EVENT_CMPL_VF_FLR_EVENT_DATA1_VF_ID_MASK) >> + ASYNC_EVENT_CMPL_VF_FLR_EVENT_DATA1_VF_ID_SFT; + + netdev_dbg(bp->dev, "VF FLR async event received pfid: %u, vfid: %u\n", + pfid, vfid); + + rc = tfc_tbl_scope_func_reset(bp->tfp, vfid); + if (!rc) + netdev_dbg(bp->dev, "Failed to reset vf %d\n", vfid); +} + +static u16 bnxt_get_force_speed(struct bnxt_link_info *link_info) +{ + struct bnxt *bp = container_of(link_info, struct bnxt, link_info); + + if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) + return link_info->force_link_speed2; + if (link_info->req_signal_mode == BNXT_SIG_MODE_PAM4) + return link_info->force_pam4_link_speed; + return link_info->force_link_speed; +} + +static void bnxt_set_force_speed(struct bnxt_link_info *link_info) +{ + struct bnxt *bp = container_of(link_info, struct bnxt, link_info); + + if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) { + link_info->req_link_speed = link_info->force_link_speed2; + link_info->req_signal_mode = BNXT_SIG_MODE_NRZ; + switch (link_info->req_link_speed) { + case BNXT_LINK_SPEED_50GB_PAM4: + case BNXT_LINK_SPEED_100GB_PAM4: + case BNXT_LINK_SPEED_200GB_PAM4: + case BNXT_LINK_SPEED_400GB_PAM4: + link_info->req_signal_mode = BNXT_SIG_MODE_PAM4; + break; + case BNXT_LINK_SPEED_100GB_PAM4_112: + case BNXT_LINK_SPEED_200GB_PAM4_112: + case BNXT_LINK_SPEED_400GB_PAM4_112: + link_info->req_signal_mode = BNXT_SIG_MODE_PAM4_112; + break; + default: + link_info->req_signal_mode = BNXT_SIG_MODE_NRZ; + } + return; + } + link_info->req_link_speed = link_info->force_link_speed; + link_info->req_signal_mode = BNXT_SIG_MODE_NRZ; + if (link_info->force_pam4_link_speed) { + link_info->req_link_speed = link_info->force_pam4_link_speed; + link_info->req_signal_mode = BNXT_SIG_MODE_PAM4; + } +} + +static void bnxt_set_auto_speed(struct bnxt_link_info *link_info) +{ + struct bnxt *bp = container_of(link_info, struct bnxt, link_info); + + if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) { + link_info->advertising = link_info->auto_link_speeds2; + return; + } + link_info->advertising = link_info->auto_link_speeds; + link_info->advertising_pam4 = link_info->auto_pam4_link_speeds; +} + +static bool bnxt_force_speed_updated(struct bnxt_link_info *link_info) +{ + struct bnxt *bp = container_of(link_info, struct bnxt, link_info); + + if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) { + if (link_info->req_link_speed != link_info->force_link_speed2) + return true; + return false; + } + if (link_info->req_signal_mode == BNXT_SIG_MODE_NRZ && + link_info->req_link_speed != link_info->force_link_speed) + return true; + if (link_info->req_signal_mode == BNXT_SIG_MODE_PAM4 && + link_info->req_link_speed != link_info->force_pam4_link_speed) + return true; + return false; +} + +static bool bnxt_auto_speed_updated(struct bnxt_link_info *link_info) +{ + struct bnxt *bp = container_of(link_info, struct bnxt, link_info); + + if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) { + if (link_info->advertising != link_info->auto_link_speeds2) + return true; + return false; + } + if (link_info->advertising != link_info->auto_link_speeds || + link_info->advertising_pam4 != link_info->auto_pam4_link_speeds) + return true; + return false; +} + +int bnxt_queue_udcc_work(struct bnxt *bp, u32 session_id, u32 session_opcode, + bool suspend) +{ + struct bnxt_udcc_work *udcc_work; + + /* Store the data1 and data2 in a work_struct */ + udcc_work = kzalloc(sizeof(*udcc_work), GFP_ATOMIC); + if (!udcc_work) + return -ENOMEM; + + udcc_work->bp = bp; + udcc_work->session_id = session_id; + udcc_work->session_opcode = session_opcode; + udcc_work->session_suspend = suspend; + INIT_WORK(&udcc_work->work, bnxt_udcc_task); + queue_work(bnxt_pf_wq, &udcc_work->work); + + return 0; +} + +static void bnxt_bs_trace_init(struct bnxt *bp, struct bnxt_ctx_mem_type *ctxm, u16 trace_type) +{ + struct bnxt_bs_trace_info *bs_trace = &bp->bs_trace[trace_type]; + u32 mem_size, pages, rem_bytes, magic_byte_offset; + struct bnxt_ctx_pg_info *ctx_pg = ctxm->pg_info; + struct bnxt_ring_mem_info *rmem, *rmem_pg_tbl; + int last_pg, n = 1, size = sizeof(u8); + + mem_size = ctxm->max_entries * ctxm->entry_size; + rem_bytes = mem_size % BNXT_PAGE_SIZE; + pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE); + + last_pg = (pages - 1) & (MAX_CTX_PAGES - 1); + magic_byte_offset = (rem_bytes ? rem_bytes : BNXT_PAGE_SIZE) - size; + + if (ctxm->instance_bmap) { + if (ctxm->instance_bmap > 1) + return; + n = hweight32(ctxm->instance_bmap); + } + + rmem = &ctx_pg[n - 1].ring_mem; + if (pages > MAX_CTX_PAGES) { + int last_pg_directory = rmem->nr_pages - 1; + + rmem_pg_tbl = &ctx_pg[n - 1].ctx_pg_tbl[last_pg_directory]->ring_mem; + bs_trace->magic_byte = rmem_pg_tbl->pg_arr[last_pg]; + } else { + bs_trace->magic_byte = rmem->pg_arr[last_pg]; + } + bs_trace->magic_byte += magic_byte_offset; + *bs_trace->magic_byte = BNXT_TRACE_BUF_MAGIC_BYTE; +} + +#define BNXT_EVENT_THERMAL_CURRENT_TEMP(data2) \ + ((data2) & ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_CURRENT_TEMP_MASK) + +#define BNXT_EVENT_THERMAL_THRESHOLD_TEMP(data2) \ + (((data2) & \ + ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_THRESHOLD_TEMP_MASK) >> \ + ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_THRESHOLD_TEMP_SFT) + +#define EVENT_DATA1_THERMAL_THRESHOLD_TYPE(data1) \ + ((data1) & ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_MASK) + +#define EVENT_DATA1_THERMAL_THRESHOLD_DIR_INCREASING(data1) \ + (((data1) & \ + ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR) == \ + ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR_INCREASING) + +#define BNXT_EVENT_DBR_EPOCH(data) \ + (((data) & ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_DATA1_EPOCH_MASK) >> \ + ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_DATA1_EPOCH_SFT) + +/* Return true if the workqueue has to be scheduled */ +static bool bnxt_event_error_report(struct bnxt *bp, u32 data1, u32 data2) +{ + u32 err_type = BNXT_EVENT_ERROR_REPORT_TYPE(data1); + struct bnxt_dbr *dbr; + + switch (err_type) { + case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_INVALID_SIGNAL: + netdev_err(bp->dev, "1PPS: Received invalid signal on pin%lu from the external source. Please fix the signal and reconfigure the pin\n", + BNXT_EVENT_INVALID_SIGNAL_DATA(data2)); + break; + case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_PAUSE_STORM: + netdev_warn(bp->dev, "Pause Storm detected!\n"); + break; + case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD: + dev_warn_ratelimited(&bp->dev->dev, "One or more MMIO doorbells dropped by the device! epoch: 0x%lx\n", + BNXT_EVENT_DBR_EPOCH(data1)); + dbr = &bp->dbr; + + if (dbr->enable) { + dbr->curr_epoch = BNXT_EVENT_DBR_EPOCH(data1); + if (queue_delayed_work(dbr->wq, &dbr->dwork, 0)) + atomic_inc(&dbr->event_cnt); + } + break; + case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_NVM: { + const char *nvm_err_str; + + if (EVENT_DATA1_NVM_ERR_TYPE_WRITE(data1)) + nvm_err_str = "nvm write error"; + else if (EVENT_DATA1_NVM_ERR_TYPE_ERASE(data1)) + nvm_err_str = "nvm erase error"; + else + nvm_err_str = "unrecognized nvm error"; + + netdev_warn(bp->dev, "%s reported at address 0x%x\n", nvm_err_str, + (u32)EVENT_DATA2_NVM_ERR_ADDR(data2)); + break; + } + case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_THERMAL_THRESHOLD: { + u32 type = EVENT_DATA1_THERMAL_THRESHOLD_TYPE(data1); + char *threshold_type; + bool notify = false; + char *dir_str; + + switch (type) { + case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_WARN: + threshold_type = "warning"; + break; + case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_CRITICAL: + threshold_type = "critical"; + break; + case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_FATAL: + threshold_type = "fatal"; + break; + case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_SHUTDOWN: + threshold_type = "shutdown"; + break; + default: + netdev_err(bp->dev, "Unknown Thermal threshold type event\n"); + return false; + } + if (EVENT_DATA1_THERMAL_THRESHOLD_DIR_INCREASING(data1)) { + dir_str = "above"; + notify = true; + } else { + dir_str = "below"; + } + netdev_warn(bp->dev, "Chip temperature has gone %s the %s thermal threshold!\n", + dir_str, threshold_type); + netdev_warn(bp->dev, "Temperature (In Celsius), Current: %lu, threshold: %lu\n", + BNXT_EVENT_THERMAL_CURRENT_TEMP(data2), + BNXT_EVENT_THERMAL_THRESHOLD_TEMP(data2)); + if (notify) { + bp->thermal_threshold_type = type; + set_bit(BNXT_THERMAL_THRESHOLD_SP_EVENT, &bp->sp_event); + return true; + } + break; + } + case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DUAL_DATA_RATE_NOT_SUPPORTED: + netdev_warn(bp->dev, + "Speed change not supported with dual rate transceivers on this board\n" + ); + break; + default: + netdev_err(bp->dev, "FW reported unknown error type: %u, data1: 0x%x data2: 0x%x\n", + err_type, data1, data2); + break; + } + return false; +} + +#define BNXT_GET_EVENT_PORT(data) \ + (data & ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK) + +#define BNXT_EVENT_RING_TYPE(data2) \ + ((data2) & \ + ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_MASK) + +#define BNXT_EVENT_RING_TYPE_RX(data2) \ + (BNXT_EVENT_RING_TYPE(data2) == \ + ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_RX) + +#define BNXT_EVENT_PHC_EVENT_TYPE(data1) \ + (((data1) & ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_MASK) >>\ + ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_SFT) + +#define BNXT_EVENT_PHC_MASTER_FID(data2) \ + (((data2) & ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA2_PHC_MASTER_FID_MASK) >>\ + ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA2_PHC_MASTER_FID_SFT) + +#define BNXT_EVENT_PHC_SECONDARY_FID(data2) \ + (((data2) & ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA2_PHC_SEC_FID_MASK) >>\ + ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA2_PHC_SEC_FID_SFT) + +#define BNXT_EVENT_PHC_RTC_UPDATE(data1) \ + (((data1) & ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_PHC_TIME_MSB_MASK) >>\ + ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_PHC_TIME_MSB_SFT) + +#define BNXT_PHC_BITS 48 + +#define BNXT_EVENT_HDBR_READ_ERROR_GROUP(data1) \ + (((data1) & \ + ASYNC_EVENT_CMPL_HW_DOORBELL_RECOVERY_READ_ERROR_EVENT_DATA1_READ_ERROR_FLAGS_MASK) >>\ + ASYNC_EVENT_CMPL_HW_DOORBELL_RECOVERY_READ_ERROR_EVENT_DATA1_READ_ERROR_FLAGS_SFT) + +#define BNXT_EVENT_UDCC_SESSION_ID(data1) \ + (((data1) & ASYNC_EVENT_UDCC_SESSION_CHANGE_EVENT_DATA1_UDCC_SESSION_ID_MASK) >>\ + ASYNC_EVENT_UDCC_SESSION_CHANGE_EVENT_DATA1_UDCC_SESSION_ID_SFT) + +#define BNXT_EVENT_UDCC_SESSION_OPCODE(data2) \ + (((data2) & ASYNC_EVENT_UDCC_SESSION_CHANGE_EVENT_DATA2_SESSION_ID_OP_CODE_MASK) >>\ + ASYNC_EVENT_UDCC_SESSION_CHANGE_EVENT_DATA2_SESSION_ID_OP_CODE_SFT) + +#define BNXT_EVENT_BUF_PRODUCER_TYPE(data1) \ + (((data1) & ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_MASK) >>\ + ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_SFT) + +#define BNXT_EVENT_BUF_PRODUCER_OFFSET(data2) \ + (((data2) & ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA2_CURRENT_BUFFER_OFFSET_MASK) >>\ + ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA2_CURRENT_BUFFER_OFFSET_SFT) + +static int bnxt_async_event_process(struct bnxt *bp, + struct hwrm_async_event_cmpl *cmpl) +{ + u16 event_id = le16_to_cpu(cmpl->event_id); + u32 data1 = le32_to_cpu(cmpl->event_data1); + u32 data2 = le32_to_cpu(cmpl->event_data2); + + netdev_dbg(bp->dev, "hwrm event 0x%x {0x%x, 0x%x}\n", + event_id, data1, data2); + + /* TODO CHIMP_FW: Define event id's for link change, error etc */ + switch (event_id) { + case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE: { + struct bnxt_link_info *link_info = &bp->link_info; + + if (BNXT_VF(bp)) + goto async_event_process_exit; + + /* print unsupported speed warning in forced speed mode only */ + if (!(link_info->autoneg & BNXT_AUTONEG_SPEED) && + (data1 & 0x20000)) { + u16 fw_speed = bnxt_get_force_speed(link_info); + u32 speed = bnxt_fw_to_ethtool_speed(fw_speed); + + if (speed != SPEED_UNKNOWN) + netdev_warn(bp->dev, "Link speed %d no longer supported\n", + speed); + } + set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event); + } + fallthrough; + case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE: + case ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE: + set_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT, &bp->sp_event); + fallthrough; + case ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE: + set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event); + break; + case ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD: + set_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event); + break; + case ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED: { + u16 port_id = BNXT_GET_EVENT_PORT(data1); + + if (BNXT_VF(bp)) + break; + + if (bp->pf.port_id != port_id) + break; + + set_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event); + break; + } + case ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE: + if (BNXT_PF(bp)) { + u16 vf_id = EVENT_DATA2_VF_CFG_CHNG_VF_ID(data2); + + if (!bnxt_vf_cfg_change(bp, vf_id, data1)) + goto async_event_process_exit; + set_bit(BNXT_VF_CFG_CHNG_SP_EVENT, &bp->sp_event); + break; + } + set_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event); + break; + case ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY: { + char *type_str = "Solicited"; + + if (!bp->fw_health) + goto async_event_process_exit; + + bp->fw_reset_timestamp = jiffies; + bp->fw_reset_min_dsecs = cmpl->timestamp_lo; + if (!bp->fw_reset_min_dsecs) + bp->fw_reset_min_dsecs = BNXT_DFLT_FW_RST_MIN_DSECS; + bp->fw_reset_max_dsecs = le16_to_cpu(cmpl->timestamp_hi); + if (!bp->fw_reset_max_dsecs) + bp->fw_reset_max_dsecs = BNXT_DFLT_FW_RST_MAX_DSECS; + if (EVENT_DATA1_RESET_NOTIFY_FW_ACTIVATION(data1)) { + set_bit(BNXT_STATE_FW_ACTIVATE_RESET, &bp->state); + } else if (EVENT_DATA1_RESET_NOTIFY_FATAL(data1)) { + type_str = "Fatal"; + bp->fw_health->fatalities++; + set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state); + } else if (data2 && BNXT_FW_STATUS_HEALTHY != + EVENT_DATA2_RESET_NOTIFY_FW_STATUS_CODE(data2)) { + type_str = "Non-fatal"; + bp->fw_health->survivals++; + set_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state); + } + netif_warn(bp, hw, bp->dev, + "%s firmware reset event, data1: 0x%x, data2: 0x%x, min wait %u ms, max wait %u ms\n", + type_str, data1, data2, + bp->fw_reset_min_dsecs * 100, + bp->fw_reset_max_dsecs * 100); + set_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event); + break; + } + case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY: { + struct bnxt_fw_health *fw_health = bp->fw_health; + char *status_desc = "healthy"; + u32 status; + + if (!fw_health) + goto async_event_process_exit; + + if (!EVENT_DATA1_RECOVERY_ENABLED(data1)) { + fw_health->enabled = false; + netif_info(bp, drv, bp->dev, "Driver recovery watchdog is disabled\n"); + break; + } + fw_health->primary = EVENT_DATA1_RECOVERY_MASTER_FUNC(data1); + fw_health->tmr_multiplier = + DIV_ROUND_UP(fw_health->polling_dsecs * HZ, + bp->current_interval * 10); + fw_health->tmr_counter = fw_health->tmr_multiplier; + if (!fw_health->enabled) + fw_health->last_fw_heartbeat = + bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG); + fw_health->last_fw_reset_cnt = + bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG); + status = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG); + if (status != BNXT_FW_STATUS_HEALTHY) + status_desc = "unhealthy"; + netif_info(bp, drv, bp->dev, + "Driver recovery watchdog, role: %s, firmware status: 0x%x (%s), resets: %u\n", + fw_health->primary ? "primary" : "backup", status, + status_desc, fw_health->last_fw_reset_cnt); + if (!fw_health->enabled) { + /* Make sure tmr_counter is set and seen by + * bnxt_health_check() before setting enabled + */ + smp_mb(); + fw_health->enabled = true; + } + goto async_event_process_exit; + } + case ASYNC_EVENT_CMPL_EVENT_ID_DEFAULT_VNIC_CHANGE: { + struct bnxt_pf_info *pf = &bp->pf; + u32 pf_id, vf_idx, vf_state; + + pf_id = EVENT_DATA1_VNIC_CHNG_PF_ID(data1); + vf_idx = EVENT_DATA1_VNIC_CHNG_VF_ID(data1) - pf->first_vf_id; + vf_state = EVENT_DATA1_VNIC_CHNG_VNIC_STATE(data1); + if (BNXT_PF(bp) && pf->active_vfs && pf_id == pf->fw_fid && + vf_idx < pf->active_vfs) { + bnxt_update_vf_vnic(bp, vf_idx, vf_state); + set_bit(BNXT_VF_VNIC_CHANGE_SP_EVENT, &bp->sp_event); + break; + } + goto async_event_process_exit; + } + case ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION: + netif_notice(bp, hw, bp->dev, + "Received firmware debug notification, data1: 0x%x, data2: 0x%x\n", + data1, data2); + goto async_event_process_exit; + case ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE: { + u16 seq_id = data2 & 0xffff; + + hwrm_update_token(bp, seq_id, BNXT_HWRM_DEFERRED); + goto async_event_process_exit; + } + case ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG: { + struct bnxt_rx_ring_info *rxr; + u16 grp_idx; + + if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) + goto async_event_process_exit; + + netdev_warn(bp->dev, "Ring monitor event, ring type %lu id 0x%x\n", + BNXT_EVENT_RING_TYPE(data2), data1); + if (!BNXT_EVENT_RING_TYPE_RX(data2)) + goto async_event_process_exit; + + grp_idx = bnxt_agg_ring_id_to_grp_idx(bp, data1); + if (grp_idx == INVALID_HW_RING_ID) { + netif_warn(bp, rx_err, bp->dev, "Unknown RX agg ring id 0x%x\n", + data1); + goto async_event_process_exit; + } + rxr = bp->bnapi[grp_idx]->rx_ring; + bnxt_sched_reset_rxr(bp, rxr); + goto async_event_process_exit; + } + case ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST: { + struct bnxt_fw_health *fw_health = bp->fw_health; + + netif_notice(bp, hw, bp->dev, + "Received firmware echo request, data1: 0x%x, data2: 0x%x\n", + data1, data2); + if (fw_health) { + fw_health->echo_req_data1 = data1; + fw_health->echo_req_data2 = data2; + set_bit(BNXT_FW_ECHO_REQUEST_SP_EVENT, &bp->sp_event); + break; + } + goto async_event_process_exit; + } + case ASYNC_EVENT_CMPL_EVENT_ID_PPS_TIMESTAMP: { + bnxt_ptp_pps_event(bp, data1, data2); + goto async_event_process_exit; + } + case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT: { + if (bnxt_event_error_report(bp, data1, data2)) + break; + goto async_event_process_exit; + } + case ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE: { + switch (BNXT_EVENT_PHC_EVENT_TYPE(data1)) { + case ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_PHC_FAILOVER: + if (BNXT_EVENT_PHC_SECONDARY_FID(data2) == INVALID_HW_RING_ID) + netif_notice(bp, hw, bp->dev, "PTP Hardware Clock, state: not synchronized\n"); + else + netif_notice(bp, hw, bp->dev, "PTP Hardware Clock, state: Primary (Failed over from Secondary)\n"); + break; + case ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_PHC_MASTER: + netif_notice(bp, hw, bp->dev, "PTP Hardware Clock, state: Primary\n"); + break; + case ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_PHC_SECONDARY: + netif_notice(bp, hw, bp->dev, "PTP Hardware Clock, state: Secondary\n"); + break; + case ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_PHC_RTC_UPDATE: +#ifdef HAVE_IEEE1588_SUPPORT + if (BNXT_PTP_USE_RTC(bp)) { + struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; + u64 ns; + + if (!ptp) + goto async_event_process_exit; + + spin_lock_bh(&ptp->ptp_lock); + bnxt_ptp_update_current_time(bp); + ns = (((u64)BNXT_EVENT_PHC_RTC_UPDATE(data1) << + BNXT_PHC_BITS) | ptp->current_time); + bnxt_ptp_rtc_timecounter_init(ptp, ns); + spin_unlock_bh(&ptp->ptp_lock); + } +#endif + break; + default: + netif_notice(bp, hw, bp->dev, "PTP: Unknown PHC event received\n"); + break; + } + goto async_event_process_exit; + } + case ASYNC_EVENT_CMPL_EVENT_ID_HW_DOORBELL_RECOVERY_READ_ERROR: { + netif_notice(bp, hw, bp->dev, + "HW DB recovery read error group 0x%X (1:SQ, 2:RQ, 4:SRQ, 8:CQ)\n", + (u8)BNXT_EVENT_HDBR_READ_ERROR_GROUP(data1)); + goto async_event_process_exit; + } + case ASYNC_EVENT_CMPL_EVENT_ID_VF_FLR: + bnxt_process_vf_flr(bp, data1); + break; + case ASYNC_EVENT_CMPL_EVENT_ID_UDCC_SESSION_CHANGE: { + + netif_notice(bp, hw, bp->dev, + "UDCC event session_id: %d, session opcode: 0x%x\n", + data1, data2); + bnxt_queue_udcc_work(bp, BNXT_EVENT_UDCC_SESSION_ID(data1), + BNXT_EVENT_UDCC_SESSION_OPCODE(data2), false); + goto async_event_process_exit; + } + case ASYNC_EVENT_CMPL_EVENT_ID_DBG_BUF_PRODUCER: { + u16 type = (u16)BNXT_EVENT_BUF_PRODUCER_TYPE(data1); + u32 offset = BNXT_EVENT_BUF_PRODUCER_OFFSET(data2); + + bnxt_bs_trace_check_wrapping(&bp->bs_trace[type], offset); + goto async_event_process_exit; + } + default: + goto async_event_process_exit; + } + __bnxt_queue_sp_work(bp); +async_event_process_exit: + bnxt_ulp_async_events(bp, cmpl); + return 0; +} + +static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp) +{ + u16 cmpl_type = TX_CMP_TYPE(txcmp), vf_id, seq_id; + struct hwrm_cmpl *h_cmpl = (struct hwrm_cmpl *)txcmp; + struct hwrm_fwd_req_cmpl *fwd_req_cmpl = + (struct hwrm_fwd_req_cmpl *)txcmp; + + switch (cmpl_type) { + case CMPL_BASE_TYPE_HWRM_DONE: + seq_id = le16_to_cpu(h_cmpl->sequence_id); + hwrm_update_token(bp, seq_id, BNXT_HWRM_COMPLETE); + break; + + case CMPL_BASE_TYPE_HWRM_FWD_REQ: + vf_id = le16_to_cpu(fwd_req_cmpl->source_id); + + if ((vf_id < bp->pf.first_vf_id) || + (vf_id >= bp->pf.first_vf_id + bp->pf.active_vfs)) { + netdev_err(bp->dev, "Msg contains invalid VF id %x\n", + vf_id); + return -EINVAL; + } + + set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap); + bnxt_queue_sp_work(bp, BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT); + break; + + case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT: + bnxt_async_event_process(bp, + (struct hwrm_async_event_cmpl *)txcmp); + break; + + default: + break; + } + + return 0; +} + +static irqreturn_t bnxt_msix(int irq, void *dev_instance) +{ + struct bnxt_napi *bnapi = dev_instance; + struct bnxt *bp = bnapi->bp; + struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; + u32 cons = RING_CMP(cpr->cp_raw_cons); + + cpr->event_ctr++; + prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]); + napi_schedule(&bnapi->napi); + return IRQ_HANDLED; +} + +static inline int bnxt_has_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr) +{ + u32 raw_cons = cpr->cp_raw_cons; + u16 cons = RING_CMP(raw_cons); + struct tx_cmp *txcmp; + + txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]; + + return TX_CMP_VALID(txcmp, raw_cons); +} + +static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, + int budget) +{ + struct bnxt_napi *bnapi = cpr->bnapi; + u32 raw_cons = cpr->cp_raw_cons; + u32 cons; + int rx_pkts = 0; + u8 event = 0; + struct tx_cmp *txcmp; + + cpr->has_more_work = 0; + cpr->had_work_done = 1; + while (1) { + u8 cmp_type; + int rc; + + cons = RING_CMP(raw_cons); + txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]; + + if (!TX_CMP_VALID(txcmp, raw_cons)) + break; + + /* The valid test of the entry must be done first before + * reading any further. + */ + dma_rmb(); + cmp_type = TX_CMP_TYPE(txcmp); + if (cmp_type == CMP_TYPE_TX_L2_CMP || + cmp_type == CMP_TYPE_TX_L2_COAL_CMP) { + u32 opaque = txcmp->tx_cmp_opaque; + struct bnxt_tx_ring_info *txr; + u16 tx_freed; + + txr = bnapi->tx_ring[TX_OPAQUE_RING(opaque)]; +#ifdef DEV_NETMAP + if (BNXT_CHIP_P5_PLUS(bp) && netmap_tx_irq(bp->dev, txr->txq_index) != + NM_IRQ_PASS) + break; +#endif /* DEV_NETMAP */ + event |= BNXT_TX_CMP_EVENT; + if (cmp_type == CMP_TYPE_TX_L2_COAL_CMP) + txr->tx_hw_cons = TX_CMP_SQ_CONS_IDX(txcmp); + else + txr->tx_hw_cons = TX_OPAQUE_PROD(bp, opaque); + cpr->sw_stats->tx.tx_push_cmpl += TX_CMP_PUSH(txcmp); + cpr->sw_stats->txtime.txtime_cmpl_err += TX_CMP_TXTM_ERR(txcmp); + tx_freed = (txr->tx_hw_cons - txr->tx_cons) & + bp->tx_ring_mask; + if (txr->xsk_pool && tx_freed >= budget) { + rx_pkts = budget; + raw_cons = NEXT_RAW_CMP(raw_cons); + if (budget) + cpr->has_more_work = 1; + break; + } + /* return full budget so NAPI will complete. */ + if (unlikely(tx_freed >= bp->tx_wake_thresh)) { + rx_pkts = budget; + raw_cons = NEXT_RAW_CMP(raw_cons); + if (budget) + cpr->has_more_work = 1; + break; + } + } else if (cmp_type == CMP_TYPE_TX_L2_PKT_TS_CMP) { + bnxt_tx_ts_cmp(bp, bnapi, (struct tx_ts_cmp *)txcmp); + } else if (cmp_type >= CMP_TYPE_RX_L2_CMP && + cmp_type <= CMP_TYPE_RX_L2_TPA_START_V3_CMP) { +#if defined(CONFIG_NETMAP) || defined(CONFIG_NETMAP_MODULE) + int dummy; + + if (BNXT_CHIP_P5_PLUS(bp) && + netmap_rx_irq(bp->dev, bnapi->rx_ring->netmap_idx, &dummy) != + NM_IRQ_PASS) + break; +#endif + if (likely(budget)) + rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event); + else + rc = bnxt_force_rx_discard(bp, cpr, &raw_cons, + &event); + if (likely(rc >= 0)) + rx_pkts += rc; + /* Increment rx_pkts when rc is -ENOMEM to count towards + * the NAPI budget. Otherwise, we may potentially loop + * here forever if we consistently cannot allocate + * buffers. + */ + else if (rc == -ENOMEM && budget) + rx_pkts++; + else if (rc == -EBUSY) /* partial completion */ + break; + } else if ((cmp_type == CMP_TYPE_MPC_CMP_SHORT) || + (cmp_type == CMP_TYPE_MPC_CMP_LONG)) { + if (bnxt_mpc_cmp(bp, cpr, &raw_cons)) + break; + } else if (unlikely(cmp_type == CMPL_BASE_TYPE_HWRM_DONE || + cmp_type == CMPL_BASE_TYPE_HWRM_FWD_REQ || + cmp_type == + CMPL_BASE_TYPE_HWRM_ASYNC_EVENT)) { + bnxt_hwrm_handler(bp, txcmp); + } + raw_cons = NEXT_RAW_CMP(raw_cons); + + if (rx_pkts && rx_pkts == budget) { + cpr->has_more_work = 1; + break; + } + } + + if (event & BNXT_REDIRECT_EVENT) { + xdp_do_flush(); + event &= ~BNXT_REDIRECT_EVENT; + } + + if (event & BNXT_TX_EVENT) { + struct bnxt_tx_ring_info *txr = bnapi->tx_ring[0]; + u16 prod = txr->tx_prod; + + /* Sync BD data before updating doorbell */ + wmb(); + + bnxt_db_write_relaxed(bp, &txr->tx_db, prod); + event &= ~BNXT_TX_EVENT; + } +#ifdef DEV_NETMAP + if (!cpr->netmapped) +#endif + cpr->cp_raw_cons = raw_cons; + bnapi->events |= event; + return rx_pkts; +} + +static void __bnxt_poll_work_done(struct bnxt *bp, struct bnxt_napi *bnapi, + int budget) +{ + if ((bnapi->events & BNXT_TX_CMP_EVENT) && !bnapi->tx_fault) + bnapi->tx_int(bp, bnapi, budget); + + if ((bnapi->events & BNXT_RX_EVENT) && !(bnapi->in_reset)) { + struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; + + bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); + bnapi->events &= ~BNXT_RX_EVENT; + } + if (bnapi->events & BNXT_AGG_EVENT) { + struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; + + bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod); + bnapi->events &= ~BNXT_AGG_EVENT; + } +} + +static int bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, + int budget) +{ + struct bnxt_napi *bnapi = cpr->bnapi; + int rx_pkts; + + rx_pkts = __bnxt_poll_work(bp, cpr, budget); + + /* ACK completion ring before freeing tx ring and producing new + * buffers in rx/agg rings to prevent overflowing the completion + * ring. + */ + bnxt_db_cq(bp, &cpr->cp_db, cpr->cp_raw_cons); + + __bnxt_poll_work_done(bp, bnapi, budget); + return rx_pkts; +} + +static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget) +{ + struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi); + struct bnxt *bp = bnapi->bp; + struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; + struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; + struct tx_cmp *txcmp; + struct rx_cmp_ext *rxcmp1; + u32 cp_cons, tmp_raw_cons; + u32 raw_cons = cpr->cp_raw_cons; + bool flush_xdp = false; + u32 rx_pkts = 0; + u8 event = 0; + + while (1) { + int rc; + + cp_cons = RING_CMP(raw_cons); + txcmp = &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; + + if (!TX_CMP_VALID(txcmp, raw_cons)) + break; + + /* The valid test of the entry must be done first before + * reading any further. + */ + dma_rmb(); + if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) { + tmp_raw_cons = NEXT_RAW_CMP(raw_cons); + cp_cons = RING_CMP(tmp_raw_cons); + rxcmp1 = (struct rx_cmp_ext *) + &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; + + if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons)) + break; + + /* force an error to recycle the buffer */ + rxcmp1->rx_cmp_cfa_code_errors_v2 |= + cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR); + + rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event); + if (likely(rc == -EIO) && budget) + rx_pkts++; + else if (rc == -EBUSY) /* partial completion */ + break; + if (event & BNXT_REDIRECT_EVENT) + flush_xdp = true; + } else if (unlikely(TX_CMP_TYPE(txcmp) == + CMPL_BASE_TYPE_HWRM_DONE)) { + bnxt_hwrm_handler(bp, txcmp); + } else { + netdev_err(bp->dev, + "Invalid completion received on special ring\n"); + } + raw_cons = NEXT_RAW_CMP(raw_cons); + + if (rx_pkts == budget) + break; + } + + cpr->cp_raw_cons = raw_cons; + BNXT_DB_CQ(&cpr->cp_db, cpr->cp_raw_cons); + bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); + + if (event & BNXT_AGG_EVENT) + bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod); + if (flush_xdp) + xdp_do_flush(); + + if (!bnxt_has_work(bp, cpr) && rx_pkts < budget) { +#ifdef HAVE_NEW_NAPI_COMPLETE_DONE + napi_complete_done(napi, rx_pkts); +#else + napi_complete(napi); +#endif + BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons); + } + return rx_pkts; +} + +static int bnxt_poll(struct napi_struct *napi, int budget) +{ + struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi); + struct bnxt *bp = bnapi->bp; + struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; + int work_done = 0; + + if (!bnxt_lock_napi(bnapi)) + return budget; + + if (unlikely(test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))) { + napi_complete(napi); + bnxt_unlock_napi(bnapi); + return 0; + } + while (1) { + work_done += bnxt_poll_work(bp, cpr, budget - work_done); + + if (work_done >= budget) { + if (!budget) + BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons); + break; + } + + if (!bnxt_has_work(bp, cpr)) { +#ifdef HAVE_NEW_NAPI_COMPLETE_DONE + if (napi_complete_done(napi, work_done)) + BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons); +#else + napi_complete(napi); + BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons); +#endif + break; + } + } + if (bp->flags & BNXT_FLAG_DIM) { + struct dim_sample dim_sample = {}; + + dim_update_sample(cpr->event_ctr, + cpr->rx_packets, + cpr->rx_bytes, + &dim_sample); + net_dim(&cpr->dim, dim_sample); + } + mmiowb(); + bnxt_unlock_napi(bnapi); + return work_done; +} + +static int __bnxt_poll_cqs(struct bnxt *bp, struct bnxt_napi *bnapi, int budget) +{ + struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; + int i, work_done = 0; + + for (i = 0; i < cpr->cp_ring_count; i++) { + struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[i]; + + if (cpr2->had_nqe_notify) { + work_done += __bnxt_poll_work(bp, cpr2, + budget - work_done); + cpr->has_more_work |= cpr2->has_more_work; + } + } + return work_done; +} + +static void __bnxt_poll_cqs_done(struct bnxt *bp, struct bnxt_napi *bnapi, + u64 dbr_type, int budget) +{ + struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; + int i; + + for (i = 0; i < cpr->cp_ring_count; i++) { + struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[i]; + struct bnxt_db_info *db; + +#ifdef DEV_NETMAP + if (cpr2->had_work_done && !cpr2->netmapped) { +#else + if (cpr2->had_work_done) { +#endif + u32 tgl = 0; + u64 db_val; + + if (dbr_type == DBR_TYPE_CQ_ARMALL) { + cpr2->had_nqe_notify = 0; + tgl = cpr2->toggle; + } + db = &cpr2->cp_db; + db_val = db->db_key64 | dbr_type | DB_TOGGLE(tgl) | + DB_RING_IDX(db, cpr2->cp_raw_cons); + bnxt_hdbr_cp_db(db->db_cp, db_val, false, + dbr_type == DBR_TYPE_CQ_ARMALL ? 1 : 0); + bnxt_writeq(bp, db_val, db->doorbell); + cpr2->had_work_done = 0; + } + } + __bnxt_poll_work_done(bp, bnapi, budget); +} + +static int bnxt_poll_p5(struct napi_struct *napi, int budget) +{ + struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi); + struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; + struct bnxt_cp_ring_info *cpr_rx; + u32 raw_cons = cpr->cp_raw_cons; + struct bnxt *bp = bnapi->bp; + struct nqe_cn *nqcmp; + int work_done = 0; + u32 cons; + + if (!bnxt_lock_napi(bnapi)) + return budget; + + if (unlikely(test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))) { + napi_complete(napi); + bnxt_unlock_napi(bnapi); + return 0; + } + if (cpr->has_more_work) { + cpr->has_more_work = 0; + work_done = __bnxt_poll_cqs(bp, bnapi, budget); + } + while (1) { + u16 type; + + cons = RING_CMP(raw_cons); + nqcmp = &cpr->nq_desc_ring[CP_RING(cons)][CP_IDX(cons)]; + + if (!NQ_CMP_VALID(nqcmp, raw_cons)) { + if (cpr->has_more_work) + break; + + __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL, budget); + cpr->cp_raw_cons = raw_cons; +#ifdef HAVE_NEW_NAPI_COMPLETE_DONE + if (napi_complete_done(napi, work_done)) + BNXT_DB_NQ_ARM_P5(&cpr->cp_db, + cpr->cp_raw_cons); +#else + napi_complete(napi); + BNXT_DB_NQ_ARM_P5(&cpr->cp_db, cpr->cp_raw_cons); +#endif + goto poll_done; + } + + /* The valid test of the entry must be done first before + * reading any further. + */ + dma_rmb(); + + type = le16_to_cpu(nqcmp->type); + if (NQE_CN_TYPE(type) == NQ_CN_TYPE_CQ_NOTIFICATION) { + u32 idx = le32_to_cpu(nqcmp->cq_handle_low); + u32 cq_type = BNXT_NQ_HDL_TYPE(idx); + struct bnxt_cp_ring_info *cpr2; + + /* No more budget for RX work */ + if (budget && work_done >= budget && + cq_type == BNXT_NQ_HDL_TYPE_RX) + break; + + idx = BNXT_NQ_HDL_IDX(idx); + cpr2 = &cpr->cp_ring_arr[idx]; + cpr2->had_nqe_notify = 1; + cpr2->toggle = NQE_CN_TOGGLE(type); + work_done += __bnxt_poll_work(bp, cpr2, + budget - work_done); + cpr->has_more_work |= cpr2->has_more_work; + } else { + bnxt_hwrm_handler(bp, (struct tx_cmp *)nqcmp); + } + raw_cons = NEXT_RAW_CMP(raw_cons); + } + __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ, budget); + if (raw_cons != cpr->cp_raw_cons) { + cpr->cp_raw_cons = raw_cons; + BNXT_DB_NQ_P5(&cpr->cp_db, raw_cons); + } +poll_done: + cpr_rx = &cpr->cp_ring_arr[0]; + if (cpr_rx->cp_ring_type == BNXT_NQ_HDL_TYPE_RX && + (bp->flags & BNXT_FLAG_DIM)) { + struct dim_sample dim_sample = {}; + + dim_update_sample(cpr->event_ctr, + cpr_rx->rx_packets, + cpr_rx->rx_bytes, + &dim_sample); + net_dim(&cpr->dim, dim_sample); + } + +#ifdef HAVE_XSK_SUPPORT + if ((bnapi->flags & BNXT_NAPI_FLAG_XDP) && bnapi->tx_ring[0]->xsk_pool) + bnxt_xsk_xmit(bp, bnapi, budget); +#endif + bnxt_unlock_napi(bnapi); + return work_done; +} + +#ifdef BNXT_PRIV_RX_BUSY_POLL +static int bnxt_busy_poll(struct napi_struct *napi) +{ + struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi); + struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; + struct bnxt *bp = bnapi->bp; + int rx_work = 0, budget = 4; + + if (atomic_read(&bp->intr_sem) != 0) + return LL_FLUSH_FAILED; + + if (!BNXT_LINK_IS_UP(bp)) + return LL_FLUSH_FAILED; + + if (!bnxt_lock_poll(bnapi)) + return LL_FLUSH_BUSY; + + if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { + struct bnxt_cp_ring_info *cpr2; + int i; + + for (i = 0; i < cpr->cp_ring_count; i++) { + cpr2 = &cpr->cp_ring_arr[i]; + rx_work += bnxt_poll_work(bp, cpr2, budget - rx_work); + } + } else { + rx_work = bnxt_poll_work(bp, cpr, budget); + } + + bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons); + + bnxt_unlock_poll(bnapi); + return rx_work; +} +#endif + +static void bnxt_free_tx_skbs(struct bnxt *bp) +{ + int i, max_idx; + struct pci_dev *pdev = bp->pdev; + + if (!bp->tx_ring) + return; + + max_idx = bp->tx_nr_pages * TX_DESC_CNT; + for (i = 0; i < bp->tx_nr_rings; i++) { + struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; + int j; + + if (!txr->tx_buf_ring) + continue; + + for (j = 0; j < max_idx;) { + struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j]; + struct sk_buff *skb; + int k, last; + + if (i < bp->tx_nr_rings_xdp && + tx_buf->action == XDP_REDIRECT) { + dma_unmap_single(&pdev->dev, + dma_unmap_addr(tx_buf, mapping), + dma_unmap_len(tx_buf, len), + DMA_TO_DEVICE); +#ifdef HAVE_XDP_FRAME + xdp_return_frame(tx_buf->xdpf); +#endif + tx_buf->action = 0; + tx_buf->xdpf = NULL; + j++; + continue; + } + + skb = tx_buf->skb; + if (!skb) { + j++; + continue; + } + + tx_buf->skb = NULL; + + if (tx_buf->is_push) { + dev_kfree_skb(skb); + j += 2; + continue; + } + + dma_unmap_single(&pdev->dev, + dma_unmap_addr(tx_buf, mapping), + skb_headlen(skb), + DMA_TO_DEVICE); + + last = tx_buf->nr_frags; + j += 2; + for (k = 0; k < last; k++, j++) { + int ring_idx = j & bp->tx_ring_mask; + skb_frag_t *frag = &skb_shinfo(skb)->frags[k]; + + tx_buf = &txr->tx_buf_ring[ring_idx]; + dma_unmap_page( + &pdev->dev, + dma_unmap_addr(tx_buf, mapping), + skb_frag_size(frag), DMA_TO_DEVICE); + } + dev_kfree_skb(skb); + } + netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i)); + } +} + +void bnxt_free_one_rx_buf_ring(struct bnxt *bp, struct bnxt_rx_ring_info *rxr) +{ + struct pci_dev *pdev = bp->pdev; + int i, max_idx; + + max_idx = bp->rx_nr_pages * RX_DESC_CNT; + + for (i = 0; i < max_idx; i++) { + struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[i]; + dma_addr_t mapping = rx_buf->mapping; +#ifdef HAVE_BUILD_SKB + void *data = rx_buf->data; +#else + struct sk_buff *data = rx_buf->data; +#endif + + if (!data) + continue; + +#ifdef HAVE_BUILD_SKB +#ifdef HAVE_XSK_SUPPORT + if (BNXT_RING_RX_ZC_MODE(rxr) && rxr->xsk_pool) { + if (data) + xsk_buff_free(data); + rx_buf->data = NULL; + } else if (BNXT_RX_PAGE_MODE(bp)) { +#else + if (BNXT_RX_PAGE_MODE(bp)) { +#endif +#ifndef HAVE_PAGE_POOL_GET_DMA_ADDR + mapping -= bp->rx_dma_offset; + dma_unmap_page_attrs(&pdev->dev, mapping, + BNXT_RX_PAGE_SIZE, bp->rx_dir, + DMA_ATTR_WEAK_ORDERING); +#endif +#ifndef CONFIG_PAGE_POOL + __free_page(data); +#else + page_pool_recycle_direct(rxr->page_pool, data); +#endif + } else { + dma_unmap_single_attrs(&pdev->dev, mapping, + bp->rx_buf_use_size, + bp->rx_dir, + DMA_ATTR_WEAK_ORDERING); + skb_free_frag(data); + } +#else + dma_unmap_single_attrs(&pdev->dev, mapping, bp->rx_buf_use_size, + bp->rx_dir, DMA_ATTR_WEAK_ORDERING); + dev_kfree_skb_any(data); +#endif + rx_buf->data = NULL; + } +} + +static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr) +{ + struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr]; + struct pci_dev *pdev = bp->pdev; + struct bnxt_tpa_idx_map *map; + int i, max_agg_idx; + + max_agg_idx = bp->rx_agg_nr_pages * RX_DESC_CNT; + if (!rxr->rx_tpa) + goto skip_rx_tpa_free; + + for (i = 0; i < bp->max_tpa; i++) { + struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[i]; +#ifdef HAVE_BUILD_SKB + u8 *data = tpa_info->data; +#else + struct sk_buff *data = tpa_info->data; +#endif + + if (!data) + continue; + + dma_unmap_single_attrs(&pdev->dev, tpa_info->mapping, + bp->rx_buf_use_size, bp->rx_dir, + DMA_ATTR_WEAK_ORDERING); + + tpa_info->data = NULL; + +#ifdef HAVE_BUILD_SKB + skb_free_frag(data); +#else + dev_kfree_skb_any(data); +#endif + } + +skip_rx_tpa_free: + if (!rxr->rx_buf_ring) + goto skip_rx_buf_free; + + bnxt_free_one_rx_buf_ring(bp, rxr); + +skip_rx_buf_free: + if (!rxr->rx_agg_ring) + goto skip_rx_agg_free; + + for (i = 0; i < max_agg_idx; i++) { + struct bnxt_sw_rx_agg_bd *rx_agg_buf = &rxr->rx_agg_ring[i]; + struct page *page = rx_agg_buf->page; + + if (!page) + continue; + +#ifndef HAVE_PAGE_POOL_GET_DMA_ADDR + dma_unmap_page_attrs(&pdev->dev, rx_agg_buf->mapping, + BNXT_RX_PAGE_SIZE, bp->rx_dir, + DMA_ATTR_WEAK_ORDERING); +#endif + rx_agg_buf->page = NULL; + __clear_bit(i, rxr->rx_agg_bmap); + if (PAGE_SIZE <= BNXT_RX_PAGE_SIZE) { +#ifdef CONFIG_PAGE_POOL + page_pool_recycle_direct(rxr->page_pool, page); +#else + __free_page(page); +#endif + } else { +#ifdef HAVE_PAGE_POOL_PAGE_FRAG + page_pool_recycle_direct(rxr->page_pool, page); +#else + __free_page(page); +#endif + } + } + +skip_rx_agg_free: + if (rxr->rx_page) { + __free_page(rxr->rx_page); + rxr->rx_page = NULL; + } + map = rxr->rx_tpa_idx_map; + if (map) + memset(map->agg_idx_bmap, 0, sizeof(map->agg_idx_bmap)); +} + +static void bnxt_free_rx_skbs(struct bnxt *bp) +{ + int i; + + if (!bp->rx_ring) + return; + + for (i = 0; i < bp->rx_nr_rings; i++) + bnxt_free_one_rx_ring_skbs(bp, i); +} + +static void bnxt_free_skbs(struct bnxt *bp) +{ + bnxt_free_tx_skbs(bp); + bnxt_free_rx_skbs(bp); +} + +static void bnxt_init_ctx_mem(struct bnxt_ctx_mem_type *ctxm, void *p, int len) +{ + u8 init_val = ctxm->init_value; + u16 offset = ctxm->init_offset; + u8 *p2 = p; + int i; + + if (!init_val) + return; + if (offset == BNXT_CTX_INIT_INVALID_OFFSET) { + memset(p, init_val, len); + return; + } + for (i = 0; i < len; i += ctxm->entry_size) + *(p2 + i + offset) = init_val; +} + +int bnxt_copy_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem, void *buf, size_t offset) +{ + size_t total_len = 0; + int i; + + for (i = 0; i < rmem->nr_pages; i++) { + if (!rmem->pg_arr[i]) + continue; + + if (buf) + memcpy(buf + offset, rmem->pg_arr[i], rmem->page_size); + offset += rmem->page_size; + total_len += rmem->page_size; + } + + return total_len; +} + +void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem) +{ + struct pci_dev *pdev = bp->pdev; + int i; + + if (!rmem->pg_arr) + goto skip_pages; + + for (i = 0; i < rmem->nr_pages; i++) { + if (!rmem->pg_arr[i]) + continue; + + dma_free_coherent(&pdev->dev, rmem->page_size, + rmem->pg_arr[i], rmem->dma_arr[i]); + + rmem->pg_arr[i] = NULL; + } +skip_pages: + if (rmem->pg_tbl) { + size_t pg_tbl_size = rmem->nr_pages * 8; + + if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG) + pg_tbl_size = rmem->page_size; + dma_free_coherent(&pdev->dev, pg_tbl_size, + rmem->pg_tbl, rmem->pg_tbl_map); + rmem->pg_tbl = NULL; + } + if (rmem->vmem_size && *rmem->vmem) { + vfree(*rmem->vmem); + *rmem->vmem = NULL; + } +} + +int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem) +{ + struct pci_dev *pdev = bp->pdev; + u64 valid_bit = 0; + int i; + + if (rmem->flags & (BNXT_RMEM_VALID_PTE_FLAG | BNXT_RMEM_RING_PTE_FLAG)) + valid_bit = PTU_PTE_VALID; + if ((rmem->nr_pages > 1 || rmem->depth > 0) && !rmem->pg_tbl) { + size_t pg_tbl_size = rmem->nr_pages * 8; + + if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG) + pg_tbl_size = rmem->page_size; + rmem->pg_tbl = dma_alloc_coherent(&pdev->dev, pg_tbl_size, + &rmem->pg_tbl_map, + GFP_KERNEL); + if (!rmem->pg_tbl) + return -ENOMEM; + } + + for (i = 0; i < rmem->nr_pages; i++) { + u64 extra_bits = valid_bit; + + rmem->pg_arr[i] = dma_alloc_coherent(&pdev->dev, + rmem->page_size, + &rmem->dma_arr[i], + GFP_KERNEL); + if (!rmem->pg_arr[i]) + return -ENOMEM; + + if (rmem->ctx_mem) + bnxt_init_ctx_mem(rmem->ctx_mem, rmem->pg_arr[i], + rmem->page_size); + if (rmem->nr_pages > 1 || rmem->depth > 0) { + if (i == rmem->nr_pages - 2 && + (rmem->flags & BNXT_RMEM_RING_PTE_FLAG)) + extra_bits |= PTU_PTE_NEXT_TO_LAST; + else if (i == rmem->nr_pages - 1 && + (rmem->flags & BNXT_RMEM_RING_PTE_FLAG)) + extra_bits |= PTU_PTE_LAST; + rmem->pg_tbl[i] = + cpu_to_le64(rmem->dma_arr[i] | extra_bits); + } + } + + if (rmem->vmem_size) { + *rmem->vmem = vzalloc(rmem->vmem_size); + if (!(*rmem->vmem)) + return -ENOMEM; + } + return 0; +} + +static void bnxt_free_tpa_info(struct bnxt *bp) +{ + int i, j; + + for (i = 0; i < bp->rx_nr_rings; i++) { + struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; + + kfree(rxr->rx_tpa_idx_map); + rxr->rx_tpa_idx_map = NULL; + if (rxr->rx_tpa) { + for (j = 0; j < bp->max_tpa; j++) { + kfree(rxr->rx_tpa[j].agg_arr); + rxr->rx_tpa[j].agg_arr = NULL; + } + } + kfree(rxr->rx_tpa); + rxr->rx_tpa = NULL; + } +} + +static int bnxt_alloc_tpa_info(struct bnxt *bp) +{ + int i, j; + + bp->max_tpa = MAX_TPA; + if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { + if (!bp->max_tpa_v2) + return 0; + bp->max_tpa = max_t(u16, bp->max_tpa_v2, MAX_TPA_P5); + } + + for (i = 0; i < bp->rx_nr_rings; i++) { + struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; + struct rx_agg_cmp *agg; + + rxr->rx_tpa = kcalloc(bp->max_tpa, sizeof(struct bnxt_tpa_info), + GFP_KERNEL); + if (!rxr->rx_tpa) + return -ENOMEM; + + if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) + continue; + for (j = 0; j < bp->max_tpa; j++) { + agg = kcalloc(MAX_SKB_FRAGS, sizeof(*agg), GFP_KERNEL); + if (!agg) + return -ENOMEM; + rxr->rx_tpa[j].agg_arr = agg; + } + rxr->rx_tpa_idx_map = kzalloc(sizeof(*rxr->rx_tpa_idx_map), + GFP_KERNEL); + if (!rxr->rx_tpa_idx_map) + return -ENOMEM; + } + return 0; +} + +static void bnxt_free_rx_rings(struct bnxt *bp) +{ + int i; + + if (!bp->rx_ring) + return; + + bnxt_free_tpa_info(bp); + for (i = 0; i < bp->rx_nr_rings; i++) { + struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; + struct bnxt_ring_struct *ring; + +#ifdef HAVE_NDO_XDP + if (rxr->xdp_prog) + bpf_prog_put(rxr->xdp_prog); +#endif + +#ifdef HAVE_XDP_RXQ_INFO + if (xdp_rxq_info_is_reg(&rxr->xdp_rxq)) + xdp_rxq_info_unreg(&rxr->xdp_rxq); +#endif +#ifdef CONFIG_PAGE_POOL + page_pool_destroy(rxr->page_pool); + rxr->page_pool = NULL; +#endif + kfree(rxr->rx_agg_bmap); + rxr->rx_agg_bmap = NULL; + + ring = &rxr->rx_ring_struct; + bnxt_free_ring(bp, &ring->ring_mem); + + ring = &rxr->rx_agg_ring_struct; + bnxt_free_ring(bp, &ring->ring_mem); + } +} + +#ifdef CONFIG_PAGE_POOL +static int bnxt_alloc_rx_page_pool(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, int numa_node) +{ + struct page_pool_params pp = { 0 }; + + pp.pool_size = bp->rx_agg_ring_size; + if (BNXT_RX_PAGE_MODE(bp)) + pp.pool_size += bp->rx_ring_size; + + pp.nid = numa_node; +#ifdef HAVE_PAGE_POOL_NAPI_MAPPING + pp.napi = &rxr->bnapi->napi; +#endif + pp.dev = &bp->pdev->dev; + pp.dma_dir = bp->rx_dir; +#if (PP_FLAG_DMA_SYNC_DEV) + pp.max_len = BNXT_RX_PAGE_SIZE; +#endif +#ifdef HAVE_PAGE_POOL_GET_DMA_ADDR + pp.flags = PP_FLAG_DMA_MAP; +#endif + pp.flags |= PP_FLAG_DMA_SYNC_DEV; + if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) + pp.flags |= PP_FLAG_PAGE_FRAG; + rxr->page_pool = page_pool_create(&pp); + if (IS_ERR(rxr->page_pool)) { + int err = PTR_ERR(rxr->page_pool); + rxr->page_pool = NULL; + return err; + } + return 0; +} +#else +static int bnxt_alloc_rx_page_pool(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, int numa_node) +{ + return 0; +} +#endif + +static int bnxt_alloc_rx_rings(struct bnxt *bp) +{ + int numa_node = dev_to_node(&bp->pdev->dev); + int i, rc = 0, agg_rings = 0, cpu; + + if (!bp->rx_ring) + return -ENOMEM; + + if (bp->flags & BNXT_FLAG_AGG_RINGS) + agg_rings = 1; + + for (i = 0; i < bp->rx_nr_rings; i++) { + struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; + struct bnxt_ring_struct *ring; + int cpu_node; + + ring = &rxr->rx_ring_struct; + + cpu = cpumask_local_spread(i, numa_node); + cpu_node = cpu_to_node(cpu); + netdev_dbg(bp->dev, "Allocating page pool for rx_ring[%d] on numa_node: %d\n", + i, cpu_node); + rc = bnxt_alloc_rx_page_pool(bp, rxr, cpu_node); + if (rc) + return rc; + +#ifdef HAVE_XDP_RXQ_INFO + rc = xdp_rxq_info_reg(&rxr->xdp_rxq, bp->dev, i, 0); + if (rc < 0) + return rc; + +#ifdef HAVE_XSK_SUPPORT + rxr->xsk_pool = xsk_get_pool_from_qid(bp->dev, i); + if (BNXT_CHIP_P5_PLUS(bp) && test_bit(i, bp->af_xdp_zc_qs) && + rxr->xsk_pool && bp->xdp_prog && + xsk_buff_can_alloc(rxr->xsk_pool, bp->rx_ring_size)) { + rc = xdp_rxq_info_reg_mem_model(&rxr->xdp_rxq, + MEM_TYPE_XSK_BUFF_POOL, NULL); + rxr->flags |= BNXT_RING_FLAG_AF_XDP_ZC; + xsk_pool_set_rxq_info(rxr->xsk_pool, &rxr->xdp_rxq); + netdev_dbg(bp->dev, "%s(): AF_XDP_ZC flag set for rxring:%d\n", + __func__, i); + } else { + rc = xdp_rxq_info_reg_mem_model(&rxr->xdp_rxq, +#ifndef CONFIG_PAGE_POOL + MEM_TYPE_PAGE_SHARED, NULL); +#else + MEM_TYPE_PAGE_POOL, rxr->page_pool); +#endif + rxr->flags &= ~BNXT_RING_FLAG_AF_XDP_ZC; + netdev_dbg(bp->dev, "%s(): AF_XDP_ZC flag RESET for rxring:%d\n", + __func__, i); + } +#else /* HAVE_XSK_SUPPORT */ + rc = xdp_rxq_info_reg_mem_model(&rxr->xdp_rxq, +#ifndef CONFIG_PAGE_POOL + MEM_TYPE_PAGE_SHARED, NULL); +#else + MEM_TYPE_PAGE_POOL, rxr->page_pool); +#endif +#endif /* HAVE_XSK_SUPPORT */ + if (rc) { + xdp_rxq_info_unreg(&rxr->xdp_rxq); + return rc; + } +#endif /* HAVE_XDP_RXQ_INFO */ + + rc = bnxt_alloc_ring(bp, &ring->ring_mem); + if (rc) + return rc; + + ring->grp_idx = i; + if (agg_rings) { + u16 mem_size; + + ring = &rxr->rx_agg_ring_struct; + rc = bnxt_alloc_ring(bp, &ring->ring_mem); + if (rc) + return rc; + + ring->grp_idx = i; + rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1; + mem_size = rxr->rx_agg_bmap_size / 8; + rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL); + if (!rxr->rx_agg_bmap) + return -ENOMEM; + } + } + if (bp->flags & BNXT_FLAG_TPA) + rc = bnxt_alloc_tpa_info(bp); + return rc; +} + +static void bnxt_free_tx_rings(struct bnxt *bp) +{ + int i; + struct pci_dev *pdev = bp->pdev; + + if (!bp->tx_ring) + return; + + for (i = 0; i < bp->tx_nr_rings; i++) { + struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; + struct bnxt_ring_struct *ring; + + if (txr->tx_push) { + dma_free_coherent(&pdev->dev, bp->tx_push_size, + txr->tx_push, txr->tx_push_mapping); + txr->tx_push = NULL; + } + + ring = &txr->tx_ring_struct; + + bnxt_free_ring(bp, &ring->ring_mem); + } +} + +#define BNXT_TC_TO_RING_BASE(bp, tc) \ + ((tc) * (bp)->tx_nr_rings_per_tc) + +#define BNXT_RING_TO_TC_OFF(bp, tx) \ + ((tx) % (bp)->tx_nr_rings_per_tc) + +#define BNXT_RING_TO_TC(bp, tx) \ + ((tx) / (bp)->tx_nr_rings_per_tc) + +static int bnxt_alloc_tx_rings(struct bnxt *bp) +{ + int i, j, rc; + struct pci_dev *pdev = bp->pdev; + + bp->tx_push_size = 0; + if (bp->tx_push_mode == BNXT_PUSH_MODE_LEGACY) { + int push_size; + + push_size = L1_CACHE_ALIGN(sizeof(struct tx_push_bd) + + bp->tx_push_thresh); + + if (push_size > 256) { + push_size = 0; + bp->tx_push_mode = BNXT_PUSH_MODE_NONE; + } + + bp->tx_push_size = push_size; + } + + for (i = 0, j = 0; i < bp->tx_nr_rings; i++) { + struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; + struct bnxt_ring_struct *ring; + u8 qidx; + + ring = &txr->tx_ring_struct; + + rc = bnxt_alloc_ring(bp, &ring->ring_mem); + if (rc) + return rc; + + ring->grp_idx = txr->bnapi->index; + if (bp->tx_push_size) { + dma_addr_t mapping; + + /* One pre-allocated DMA buffer to backup + * TX push operation + */ + txr->tx_push = dma_alloc_coherent(&pdev->dev, + bp->tx_push_size, + &txr->tx_push_mapping, + GFP_KERNEL); + + if (!txr->tx_push) + return -ENOMEM; + + mapping = txr->tx_push_mapping + + sizeof(struct tx_push_bd); + txr->data_mapping = cpu_to_le64(mapping); + + memset(txr->tx_push, 0, sizeof(struct tx_push_bd)); + } + qidx = bp->tc_to_qidx[j]; + ring->queue_id = bp->tx_q_info[qidx].queue_id; + txr->bd_base_cnt = BNXT_TX_BD_LONG_CNT; + spin_lock_init(&txr->tx_lock); + if (i < bp->tx_nr_rings_xdp) + continue; + if (BNXT_RING_TO_TC_OFF(bp, i) == (bp->tx_nr_rings_per_tc - 1)) + j++; + } + return 0; +} + +static void bnxt_free_cp_arrays(struct bnxt_cp_ring_info *cpr) +{ + struct bnxt_ring_struct *ring = &cpr->cp_ring_struct; + + kfree(cpr->cp_desc_ring); + cpr->cp_desc_ring = NULL; + ring->ring_mem.pg_arr = NULL; + kfree(cpr->cp_desc_mapping); + cpr->cp_desc_mapping = NULL; + ring->ring_mem.dma_arr = NULL; +} + +static int bnxt_alloc_cp_arrays(struct bnxt_cp_ring_info *cpr, int n) +{ + cpr->cp_desc_ring = kcalloc(n, sizeof(*cpr->cp_desc_ring), GFP_KERNEL); + if (!cpr->cp_desc_ring) + return -ENOMEM; + cpr->cp_desc_mapping = kcalloc(n, sizeof(*cpr->cp_desc_mapping), + GFP_KERNEL); + if (!cpr->cp_desc_mapping) + return -ENOMEM; + return 0; +} + +static void bnxt_free_all_cp_arrays(struct bnxt *bp) +{ + int i; + + if (!bp->bnapi) + return; + for (i = 0; i < bp->cp_nr_rings; i++) { + struct bnxt_napi *bnapi = bp->bnapi[i]; + + if (!bnapi) + continue; + bnxt_free_cp_arrays(&bnapi->cp_ring); + } +} + +static int bnxt_alloc_all_cp_arrays(struct bnxt *bp) +{ + int i, n = bp->cp_nr_pages; + + for (i = 0; i < bp->cp_nr_rings; i++) { + struct bnxt_napi *bnapi = bp->bnapi[i]; + int rc; + + if (!bnapi) + continue; + rc = bnxt_alloc_cp_arrays(&bnapi->cp_ring, n); + if (rc) + return rc; + } + return 0; +} + +static void bnxt_free_cp_rings(struct bnxt *bp) +{ + int i; + + if (!bp->bnapi) + return; + + for (i = 0; i < bp->cp_nr_rings; i++) { + struct bnxt_napi *bnapi = bp->bnapi[i]; + struct bnxt_cp_ring_info *cpr; + struct bnxt_ring_struct *ring; + int j; + + if (!bnapi) + continue; + + cpr = &bnapi->cp_ring; + ring = &cpr->cp_ring_struct; + + bnxt_free_ring(bp, &ring->ring_mem); + + if (!cpr->cp_ring_arr) + continue; + + for (j = 0; j < cpr->cp_ring_count; j++) { + struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[j]; + + ring = &cpr2->cp_ring_struct; + bnxt_free_ring(bp, &ring->ring_mem); + bnxt_free_cp_arrays(cpr2); + } + kfree(cpr->cp_ring_arr); + cpr->cp_ring_arr = NULL; + cpr->cp_ring_count = 0; + } +} + +static int bnxt_alloc_cp_sub_ring(struct bnxt *bp, + struct bnxt_cp_ring_info *cpr) +{ + struct bnxt_ring_mem_info *rmem; + struct bnxt_ring_struct *ring; + int rc; + + rc = bnxt_alloc_cp_arrays(cpr, bp->cp_nr_pages); + if (rc) { + bnxt_free_cp_arrays(cpr); + return -ENOMEM; + } + ring = &cpr->cp_ring_struct; + rmem = &ring->ring_mem; + rmem->nr_pages = bp->cp_nr_pages; + rmem->page_size = HW_CMPD_RING_SIZE; + rmem->pg_arr = (void **)cpr->cp_desc_ring; + rmem->dma_arr = cpr->cp_desc_mapping; + rmem->flags = BNXT_RMEM_RING_PTE_FLAG; + rc = bnxt_alloc_ring(bp, rmem); + if (rc) { + bnxt_free_ring(bp, rmem); + bnxt_free_cp_arrays(cpr); + } + return rc; +} + +static int bnxt_alloc_cp_rings(struct bnxt *bp, bool irq_re_init) +{ + bool sh = !!(bp->flags & BNXT_FLAG_SHARED_RINGS); + int i, j, rc, ulp_msix; + int tcs = bp->num_tc; + + if (!tcs) + tcs = 1; + ulp_msix = bnxt_get_ulp_msix_num(bp); + for (i = 0, j = 0; i < bp->cp_nr_rings; i++) { + struct bnxt_napi *bnapi = bp->bnapi[i]; + struct bnxt_cp_ring_info *cpr, *cpr2; + struct bnxt_ring_struct *ring; + int cp_count = 0, k; + int rx = 0, tx = 0; + + if (!bnapi) + continue; + + cpr = &bnapi->cp_ring; + cpr->bnapi = bnapi; + ring = &cpr->cp_ring_struct; + + rc = bnxt_alloc_ring(bp, &ring->ring_mem); + if (rc) + return rc; + + if (irq_re_init) + ring->map_idx = ulp_msix + i; + + if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) + continue; + + if (i < bp->rx_nr_rings) { + cp_count++; + rx = 1; + } + if (i < bp->tx_nr_rings_xdp) { + cp_count++; + tx = 1; + } else if ((sh && i < bp->tx_nr_rings) || + (!sh && i >= bp->rx_nr_rings)) { + cp_count += tcs; + tx = 1; + if (bnxt_napi_has_mpc(bp, i)) + cp_count++; + } + + cpr->cp_ring_arr = kcalloc(cp_count, sizeof(*cpr), + GFP_KERNEL); + if (!cpr->cp_ring_arr) + return -ENOMEM; + cpr->cp_ring_count = cp_count; + + for (k = 0; k < cp_count; k++) { + cpr2 = &cpr->cp_ring_arr[k]; + rc = bnxt_alloc_cp_sub_ring(bp, cpr2); + if (rc) + return rc; + cpr2->bnapi = bnapi; + cpr2->sw_stats = cpr->sw_stats; + cpr2->cp_idx = k; + if (!k && rx) { + bp->rx_ring[i].rx_cpr = cpr2; + cpr2->cp_ring_type = BNXT_NQ_HDL_TYPE_RX; + } else { + int n, tc = k - rx; + + if (tc >= tcs) { + bnxt_set_mpc_cp_ring(bp, i, cpr2); + continue; + } + n = BNXT_TC_TO_RING_BASE(bp, tc) + j; + bp->tx_ring[n].tx_cpr = cpr2; + cpr2->cp_ring_type = BNXT_NQ_HDL_TYPE_TX; + } + } + if (tx) + j++; + } + return 0; +} + +static void bnxt_init_ring_struct(struct bnxt *bp) +{ + int i, j; + + for (i = 0; i < bp->cp_nr_rings; i++) { + struct bnxt_napi *bnapi = bp->bnapi[i]; + struct bnxt_ring_mem_info *rmem; + struct bnxt_cp_ring_info *cpr; + struct bnxt_rx_ring_info *rxr; + struct bnxt_tx_ring_info *txr; + struct bnxt_ring_struct *ring; + + if (!bnapi) + continue; + + cpr = &bnapi->cp_ring; + ring = &cpr->cp_ring_struct; + rmem = &ring->ring_mem; + rmem->nr_pages = bp->cp_nr_pages; + rmem->page_size = HW_CMPD_RING_SIZE; + rmem->pg_arr = (void **)cpr->cp_desc_ring; + rmem->dma_arr = cpr->cp_desc_mapping; + rmem->vmem_size = 0; + + rxr = bnapi->rx_ring; + if (!rxr) + goto skip_rx; + + ring = &rxr->rx_ring_struct; + rmem = &ring->ring_mem; + rmem->nr_pages = bp->rx_nr_pages; + rmem->page_size = HW_RXBD_RING_SIZE; + rmem->pg_arr = (void **)rxr->rx_desc_ring; + rmem->dma_arr = rxr->rx_desc_mapping; + rmem->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages; + rmem->vmem = (void **)&rxr->rx_buf_ring; + + ring = &rxr->rx_agg_ring_struct; + rmem = &ring->ring_mem; + rmem->nr_pages = bp->rx_agg_nr_pages; + rmem->page_size = HW_RXBD_RING_SIZE; + rmem->pg_arr = (void **)rxr->rx_agg_desc_ring; + rmem->dma_arr = rxr->rx_agg_desc_mapping; + rmem->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages; + rmem->vmem = (void **)&rxr->rx_agg_ring; + +skip_rx: + bnxt_for_each_napi_tx(j, bnapi, txr) { + ring = &txr->tx_ring_struct; + rmem = &ring->ring_mem; + rmem->nr_pages = bp->tx_nr_pages; + rmem->page_size = HW_TXBD_RING_SIZE; + rmem->pg_arr = (void **)txr->tx_desc_ring; + rmem->dma_arr = txr->tx_desc_mapping; + rmem->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages; + rmem->vmem = (void **)&txr->tx_buf_ring; + } + } + bnxt_init_mpc_ring_struct(bp); +} + +static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type) +{ + int i; + u32 prod; + struct rx_bd **rx_buf_ring; + + rx_buf_ring = (struct rx_bd **)ring->ring_mem.pg_arr; + for (i = 0, prod = 0; i < ring->ring_mem.nr_pages; i++) { + int j; + struct rx_bd *rxbd; + + rxbd = rx_buf_ring[i]; + if (!rxbd) + continue; + + for (j = 0; j < RX_DESC_CNT; j++, rxbd++, prod++) { + rxbd->rx_bd_len_flags_type = cpu_to_le32(type); + rxbd->rx_bd_opaque = prod; + } + } +} + +static int bnxt_alloc_one_rx_ring(struct bnxt *bp, int ring_nr) +{ + struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr]; + struct net_device *dev = bp->dev; + u32 prod; + int i; + + prod = rxr->rx_prod; + for (i = 0; i < bp->rx_ring_size; i++) { + if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL)) { + netdev_warn(dev, "init'ed rx ring %d with %d/%d skbs only\n", + ring_nr, i, bp->rx_ring_size); + break; + } + prod = NEXT_RX(prod); + } + rxr->rx_prod = prod; + + if (!(bp->flags & BNXT_FLAG_AGG_RINGS)) + return 0; + + prod = rxr->rx_agg_prod; + for (i = 0; i < bp->rx_agg_ring_size; i++) { + if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL)) { + netdev_warn(dev, "init'ed rx ring %d with %d/%d pages only\n", + ring_nr, i, bp->rx_ring_size); + break; + } + prod = NEXT_RX_AGG(prod); + } + rxr->rx_agg_prod = prod; + + if (rxr->rx_tpa) { + dma_addr_t mapping; +#ifdef HAVE_BUILD_SKB + u8 *data; +#else + struct sk_buff *data; +#endif + + for (i = 0; i < bp->max_tpa; i++) { + data = __bnxt_alloc_rx_frag(bp, &mapping, GFP_KERNEL); + if (!data) + return -ENOMEM; + + rxr->rx_tpa[i].data = data; +#ifdef HAVE_BUILD_SKB + rxr->rx_tpa[i].data_ptr = data + bp->rx_offset; +#else + rxr->rx_tpa[i].data_ptr = data->data + bp->rx_offset; +#endif + rxr->rx_tpa[i].mapping = mapping; + } + } + return 0; +} + +static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr) +{ + struct bnxt_rx_ring_info *rxr; + struct bnxt_ring_struct *ring; + u32 type; + + type = (bp->rx_buf_use_size << RX_BD_LEN_SHIFT) | + RX_BD_TYPE_RX_PACKET_BD | RX_BD_FLAGS_EOP; + + if (NET_IP_ALIGN == 2) + type |= RX_BD_FLAGS_SOP; + + rxr = &bp->rx_ring[ring_nr]; + ring = &rxr->rx_ring_struct; + bnxt_init_rxbd_pages(ring, type); + +#ifdef HAVE_NDO_XDP + if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) { +#ifdef HAVE_VOID_BPF_PROG_ADD + bpf_prog_add(bp->xdp_prog, 1); + rxr->xdp_prog = bp->xdp_prog; +#else + rxr->xdp_prog = bpf_prog_add(bp->xdp_prog, 1); + if (IS_ERR(rxr->xdp_prog)) { + int rc = PTR_ERR(rxr->xdp_prog); + + rxr->xdp_prog = NULL; + return rc; + } +#endif + } +#endif + + ring->fw_ring_id = INVALID_HW_RING_ID; + + ring = &rxr->rx_agg_ring_struct; + ring->fw_ring_id = INVALID_HW_RING_ID; + + if ((bp->flags & BNXT_FLAG_AGG_RINGS)) { + type = ((u32)BNXT_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) | + RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP; + + bnxt_init_rxbd_pages(ring, type); + } + + return bnxt_alloc_one_rx_ring(bp, ring_nr); +} + +static void bnxt_init_cp_rings(struct bnxt *bp) +{ + int i, j; + + for (i = 0; i < bp->cp_nr_rings; i++) { + struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring; + struct bnxt_ring_struct *ring = &cpr->cp_ring_struct; + + ring->fw_ring_id = INVALID_HW_RING_ID; + cpr->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks; + cpr->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs; + if (!cpr->cp_ring_arr) + continue; + for (j = 0; j < cpr->cp_ring_count; j++) { + struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[j]; + + ring = &cpr2->cp_ring_struct; + ring->fw_ring_id = INVALID_HW_RING_ID; + cpr2->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks; + cpr2->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs; + } + } +} + +static int bnxt_init_rx_rings(struct bnxt *bp) +{ + int i, rc = 0; + + if (BNXT_RX_PAGE_MODE(bp)) { + bp->rx_offset = NET_IP_ALIGN + XDP_PACKET_HEADROOM; + bp->rx_dma_offset = XDP_PACKET_HEADROOM; + } else { + bp->rx_offset = BNXT_RX_OFFSET; + bp->rx_dma_offset = BNXT_RX_DMA_OFFSET; + } + + for (i = 0; i < bp->rx_nr_rings; i++) { + rc = bnxt_init_one_rx_ring(bp, i); + if (rc) + break; + } + + return rc; +} + +static int bnxt_init_tx_rings(struct bnxt *bp) +{ + u16 i; + + bp->tx_wake_thresh = max_t(int, bp->tx_ring_size / 2, + BNXT_MIN_TX_DESC_CNT); + + for (i = 0; i < bp->tx_nr_rings; i++) { + struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; + struct bnxt_ring_struct *ring = &txr->tx_ring_struct; + + ring->fw_ring_id = INVALID_HW_RING_ID; + } + + return 0; +} + +static void bnxt_free_ring_grps(struct bnxt *bp) +{ + kfree(bp->grp_info); + bp->grp_info = NULL; +} + +static int bnxt_init_ring_grps(struct bnxt *bp, bool irq_re_init) +{ + int i; + + if (irq_re_init) { + bp->grp_info = kcalloc(bp->cp_nr_rings, + sizeof(struct bnxt_ring_grp_info), + GFP_KERNEL); + if (!bp->grp_info) + return -ENOMEM; + } + for (i = 0; i < bp->cp_nr_rings; i++) { + if (irq_re_init) + bp->grp_info[i].fw_stats_ctx = INVALID_HW_RING_ID; + bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID; + bp->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID; + bp->grp_info[i].agg_fw_ring_id = INVALID_HW_RING_ID; + bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID; + } + return 0; +} + +#if defined(CONFIG_BNXT_CUSTOM_FLOWER_OFFLOAD) +static bool bnxt_is_tc_q_action_active(struct bnxt *bp) +{ + int i; + + for (i = 0; i < bp->nr_vnics; i++) { + if (bp->vnic_info[i].ref_cnt) + return true; + } + return false; +} +#endif + +static void bnxt_free_vnics(struct bnxt *bp) +{ +#if defined(CONFIG_BNXT_CUSTOM_FLOWER_OFFLOAD) + if (bnxt_is_tc_q_action_active(bp)) + netdev_warn(bp->dev, "Freeing vnics while queue action flows are active\n"); + kfree(bp->vnic_meta); + bp->vnic_meta = NULL; +#endif + kfree(bp->vnic_info); + bp->vnic_info = NULL; + bp->nr_vnics = 0; +} + +static int bnxt_alloc_vnics(struct bnxt *bp) +{ + int num_vnics = 1; + +#ifdef CONFIG_RFS_ACCEL + if (bp->flags & BNXT_FLAG_RFS) { + if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) + num_vnics++; + else if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) + num_vnics += bp->rx_nr_rings; + } +#endif + +#if defined(CONFIG_BNXT_CUSTOM_FLOWER_OFFLOAD) + if (!(bp->flags & BNXT_FLAG_RFS)) + num_vnics += bp->rx_nr_rings; +#endif + + if (BNXT_CHIP_TYPE_NITRO_A0(bp)) + num_vnics++; + + bp->vnic_info = kcalloc(num_vnics, sizeof(struct bnxt_vnic_info), + GFP_KERNEL); + if (!bp->vnic_info) + return -ENOMEM; + +#if defined(CONFIG_BNXT_CUSTOM_FLOWER_OFFLOAD) + if (!(bp->flags & BNXT_FLAG_RFS)) { + bp->vnic_meta = kcalloc(num_vnics, sizeof(struct vnic_info_meta), GFP_KERNEL); + if (!bp->vnic_meta) + return -ENOMEM; + } +#endif + + bp->nr_vnics = num_vnics; + return 0; +} + +static void bnxt_init_vnics(struct bnxt *bp) +{ + int i; + + for (i = 0; i < bp->nr_vnics; i++) { + struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; + int j; + + vnic->fw_vnic_id = INVALID_HW_RING_ID; + vnic->vnic_id = i; + for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++) + vnic->fw_rss_cos_lb_ctx[j] = INVALID_HW_RING_ID; + + vnic->fw_l2_ctx_id = INVALID_HW_RING_ID; + /* HW do not need hkey for XOR and may ignore for toeplitz_cksum */ + if (bp->rss_hfunc == ETH_RSS_HASH_XOR || bp->rss_hfunc == ETH_RSS_HASH_CRC32) + continue; + if (bp->vnic_info[i].rss_hash_key) { + if (!i) { + u8 *key = (void *)vnic->rss_hash_key; + int k; + + if (!bp->rss_hash_key_valid && + !bp->rss_hash_key_updated) { + get_random_bytes(bp->rss_hash_key, + HW_HASH_KEY_SIZE); + bp->rss_hash_key_updated = true; + } + + memcpy(vnic->rss_hash_key, bp->rss_hash_key, + HW_HASH_KEY_SIZE); + + if (!bp->rss_hash_key_updated) + continue; + + bp->rss_hash_key_updated = false; + bp->rss_hash_key_valid = true; + + bp->toeplitz_prefix = 0; + for (k = 0; k < 8; k++) { + bp->toeplitz_prefix <<= 8; + bp->toeplitz_prefix |= key[k]; + } + } else { + memcpy(vnic->rss_hash_key, + bp->vnic_info[BNXT_VNIC_DEFAULT].rss_hash_key, + HW_HASH_KEY_SIZE); + } + } + } +#if defined(CONFIG_BNXT_CUSTOM_FLOWER_OFFLOAD) + if (bp->vnic_meta) { + for (i = 0; i < bp->nr_vnics; i++) { + bp->vnic_meta[i].fw_vnic_id = INVALID_HW_RING_ID; + bp->vnic_info[i].q_index = INVALID_HW_RING_ID; + } + } +#endif +} + +static int bnxt_calc_nr_ring_pages(u32 ring_size, int desc_per_pg) +{ + int pages; + + pages = ring_size / desc_per_pg; + + if (!pages) + return 1; + + pages++; + + while (pages & (pages - 1)) + pages++; + + return pages; +} + +void bnxt_set_tpa_flags(struct bnxt *bp) +{ + bp->flags &= ~BNXT_FLAG_TPA; + if (bp->flags & BNXT_FLAG_NO_AGG_RINGS) + return; + if (bp->dev->features & NETIF_F_LRO) + bp->flags |= BNXT_FLAG_LRO; +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,39) +#ifdef HAVE_NETIF_F_GRO_HW + else if (bp->dev->features & NETIF_F_GRO_HW) +#else + if ((bp->dev->features & NETIF_F_GRO) && BNXT_SUPPORTS_TPA(bp) && + BNXT_TPA_MTU_OK(bp)) +#endif + bp->flags |= BNXT_FLAG_GRO; +#endif +} + +/* bp->rx_ring_size, bp->tx_ring_size, dev->mtu, BNXT_FLAG_{G|L}RO flags must + * be set on entry. + */ +void bnxt_set_ring_params(struct bnxt *bp) +{ + u32 ring_size, rx_size, rx_space, max_rx_cmpl; + u32 agg_factor = 0, agg_ring_size = 0; + + /* 8 for CRC and VLAN */ + rx_size = SKB_DATA_ALIGN(bp->dev->mtu + ETH_HLEN + NET_IP_ALIGN + 8 + + BNXT_RX_METADATA_SIZE(bp)); + + rx_space = rx_size + ALIGN(max(NET_SKB_PAD, XDP_PACKET_HEADROOM), 8) + + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); + + bp->rx_copy_thresh = BNXT_RX_COPY_THRESH; + ring_size = bp->rx_ring_size; + bp->rx_agg_ring_size = 0; + bp->rx_agg_nr_pages = 0; + + if (bp->flags & BNXT_FLAG_TPA) + agg_factor = min_t(u32, 4, 65536 / BNXT_RX_PAGE_SIZE); + + bp->flags &= ~BNXT_FLAG_JUMBO; + if (rx_space > PAGE_SIZE && !(bp->flags & BNXT_FLAG_NO_AGG_RINGS)) { + u32 jumbo_factor; + + bp->flags |= BNXT_FLAG_JUMBO; + jumbo_factor = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT; + if (jumbo_factor > agg_factor) + agg_factor = jumbo_factor; + } + if (agg_factor) { + if (ring_size > BNXT_MAX_RX_DESC_CNT_JUM_ENA) { + ring_size = BNXT_MAX_RX_DESC_CNT_JUM_ENA; + netdev_warn(bp->dev, "RX ring size reduced from %d to %d because the jumbo ring is now enabled\n", + bp->rx_ring_size, ring_size); + bp->rx_ring_size = ring_size; + } +#ifdef DEV_NETMAP + agg_factor = AGG_NM_RINGS; +#endif /* DEV_NETMAP */ + agg_ring_size = ring_size * agg_factor; + + bp->rx_agg_nr_pages = bnxt_calc_nr_ring_pages(agg_ring_size, + RX_DESC_CNT); + if (bp->rx_agg_nr_pages > MAX_RX_AGG_PAGES) { + u32 tmp = agg_ring_size; + + bp->rx_agg_nr_pages = MAX_RX_AGG_PAGES; + agg_ring_size = MAX_RX_AGG_PAGES * RX_DESC_CNT - 1; + netdev_warn(bp->dev, "rx agg ring size %d reduced to %d.\n", + tmp, agg_ring_size); + } + bp->rx_agg_ring_size = agg_ring_size; + bp->rx_agg_ring_mask = (bp->rx_agg_nr_pages * RX_DESC_CNT) - 1; + + if (BNXT_RX_PAGE_MODE(bp)) { + rx_space = PAGE_SIZE; + rx_size = PAGE_SIZE - + ALIGN(max(NET_SKB_PAD, XDP_PACKET_HEADROOM), 8) - + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); + } else { + rx_size = SKB_DATA_ALIGN(BNXT_RX_COPY_THRESH + NET_IP_ALIGN + + BNXT_RX_METADATA_SIZE(bp)); + rx_space = rx_size + NET_SKB_PAD + + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); + } + } + + bp->rx_buf_use_size = rx_size; + bp->rx_buf_size = rx_space; + + bp->rx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, RX_DESC_CNT); + bp->rx_ring_mask = (bp->rx_nr_pages * RX_DESC_CNT) - 1; + + ring_size = bp->tx_ring_size; + bp->tx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, TX_DESC_CNT); + bp->tx_ring_mask = (bp->tx_nr_pages * TX_DESC_CNT) - 1; + + max_rx_cmpl = bp->rx_ring_size; + /* MAX TPA needs to be added because TPA_START completions are + * immediately recycled, so the TPA completions are not bound by + * the RX ring size. + */ + if (bp->flags & BNXT_FLAG_TPA) + max_rx_cmpl += bp->max_tpa; + /* RX and TPA completions are 32-byte, all others are 16-byte */ + ring_size = max_rx_cmpl * 2 + agg_ring_size + bp->tx_ring_size; + bp->cp_ring_size = ring_size; + + bp->cp_nr_pages = bnxt_calc_nr_ring_pages(ring_size, CP_DESC_CNT); + bp->cp_bit = bp->cp_nr_pages * CP_DESC_CNT; + bp->cp_ring_mask = bp->cp_bit - 1; +} + +/* Changing allocation mode of RX rings. + * TODO: Update when extending xdp_rxq_info to support allocation modes. + */ +int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode) +{ + struct net_device *dev = bp->dev; + + if (page_mode) { +#ifdef HAVE_XDP_MULTI_BUFF + bp->flags &= ~BNXT_FLAG_AGG_RINGS; + bp->flags |= BNXT_FLAG_RX_PAGE_MODE; + + if (bp->xdp_prog->aux->xdp_has_frags) + dev->max_mtu = min_t(u16, bp->max_mtu, BNXT_MAX_MTU); + else + dev->max_mtu = + min_t(u16, bp->max_mtu, BNXT_MAX_PAGE_MODE_MTU(bp)); + if (dev->mtu > BNXT_MAX_PAGE_MODE_MTU(bp)) { + bp->flags |= BNXT_FLAG_JUMBO; + bp->rx_skb_func = bnxt_rx_multi_page_skb; + } else { + bp->flags |= BNXT_FLAG_NO_AGG_RINGS; + bp->rx_skb_func = bnxt_rx_page_skb; + } + bp->rx_dir = DMA_BIDIRECTIONAL; + /* Disable LRO or GRO_HW */ + netdev_update_features(dev); +#else +#ifdef BNXT_RX_PAGE_MODE_SUPPORT + if (dev->mtu > BNXT_MAX_PAGE_MODE_MTU(bp)) + return -EOPNOTSUPP; +#ifdef HAVE_MIN_MTU + dev->max_mtu = + min_t(u16, bp->max_mtu, BNXT_MAX_PAGE_MODE_MTU(bp)); +#endif /* HAVE_MIN_MTU */ + bp->flags &= ~BNXT_FLAG_AGG_RINGS; + bp->flags |= BNXT_FLAG_NO_AGG_RINGS | BNXT_FLAG_RX_PAGE_MODE; + bp->rx_dir = DMA_BIDIRECTIONAL; + bp->rx_skb_func = bnxt_rx_page_skb; + /* Disable LRO or GRO_HW */ + netdev_update_features(dev); +#else + return -EOPNOTSUPP; +#endif /* BNXT_RX_PAGE_MODE_SUPPORT */ +#endif /* HAVE_XDP_MULTI_BUFF */ + } else { +#ifdef HAVE_MIN_MTU + dev->max_mtu = bp->max_mtu; +#endif + bp->flags &= ~BNXT_FLAG_RX_PAGE_MODE; + bp->rx_dir = DMA_FROM_DEVICE; + bp->rx_skb_func = bnxt_rx_skb; + } + return 0; +} + +static void bnxt_free_vnic_attributes(struct bnxt *bp) +{ + int i; + struct bnxt_vnic_info *vnic; + struct pci_dev *pdev = bp->pdev; + + if (!bp->vnic_info) + return; + + for (i = 0; i < bp->nr_vnics; i++) { + vnic = &bp->vnic_info[i]; + + kfree(vnic->fw_grp_ids); + vnic->fw_grp_ids = NULL; + + kfree(vnic->uc_list); + vnic->uc_list = NULL; + + if (vnic->mc_list) { + dma_free_coherent(&pdev->dev, vnic->mc_list_size, + vnic->mc_list, vnic->mc_list_mapping); + vnic->mc_list = NULL; + } + + if (vnic->rss_table) { + dma_free_coherent(&pdev->dev, vnic->rss_table_size, + vnic->rss_table, + vnic->rss_table_dma_addr); + vnic->rss_table = NULL; + } + vnic->rss_hash_key = NULL; + vnic->flags = 0; + } +} + +static int bnxt_alloc_vnic_attributes(struct bnxt *bp) +{ + int i, rc = 0, size; + struct bnxt_vnic_info *vnic; + struct pci_dev *pdev = bp->pdev; + int max_rings; + + for (i = 0; i < bp->nr_vnics; i++) { + vnic = &bp->vnic_info[i]; + + if (vnic->flags & BNXT_VNIC_UCAST_FLAG) { + int mem_size = (BNXT_MAX_UC_ADDRS - 1) * ETH_ALEN; + + if (mem_size > 0) { + vnic->uc_list = kmalloc(mem_size, GFP_KERNEL); + if (!vnic->uc_list) { + rc = -ENOMEM; + goto out; + } + } + } + + if (vnic->flags & BNXT_VNIC_MCAST_FLAG) { + vnic->mc_list_size = BNXT_MAX_MC_ADDRS * ETH_ALEN; + vnic->mc_list = + dma_alloc_coherent(&pdev->dev, + vnic->mc_list_size, + &vnic->mc_list_mapping, + GFP_KERNEL); + if (!vnic->mc_list) { + rc = -ENOMEM; + goto out; + } + } + + if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) + goto vnic_skip_grps; + + if (vnic->flags & BNXT_VNIC_RSS_FLAG) + max_rings = bp->rx_nr_rings; + else + max_rings = 1; + + vnic->fw_grp_ids = kcalloc(max_rings, sizeof(u16), GFP_KERNEL); + if (!vnic->fw_grp_ids) { + rc = -ENOMEM; + goto out; + } +vnic_skip_grps: + if ((bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP) && + !(vnic->flags & BNXT_VNIC_RSS_FLAG)) + continue; + + /* Allocate rss table and hash key */ + size = L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE * sizeof(u16)); + if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) + size = L1_CACHE_ALIGN(BNXT_MAX_RSS_TABLE_SIZE_P5); + + vnic->rss_table_size = size + HW_HASH_KEY_SIZE; + vnic->rss_table = dma_alloc_coherent(&pdev->dev, + vnic->rss_table_size, + &vnic->rss_table_dma_addr, + GFP_KERNEL); + if (!vnic->rss_table) { + rc = -ENOMEM; + goto out; + } + + vnic->rss_hash_key = ((void *)vnic->rss_table) + size; + vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size; + } + return 0; + +out: + return rc; +} + +static void bnxt_free_hwrm_resources(struct bnxt *bp) +{ + struct hlist_node __maybe_unused *dummy; + struct bnxt_hwrm_wait_token *token; + + dma_pool_destroy(bp->hwrm_dma_pool); + bp->hwrm_dma_pool = NULL; + + rcu_read_lock(); + __hlist_for_each_entry_rcu(token, dummy, &bp->hwrm_pending_list, node) + WRITE_ONCE(token->state, BNXT_HWRM_CANCELLED); + rcu_read_unlock(); +} + +static int bnxt_alloc_hwrm_resources(struct bnxt *bp) +{ + bp->hwrm_dma_pool = dma_pool_create("bnxt_hwrm", &bp->pdev->dev, + BNXT_HWRM_DMA_SIZE, + BNXT_HWRM_DMA_ALIGN, 0); + if (!bp->hwrm_dma_pool) + return -ENOMEM; + + INIT_HLIST_HEAD(&bp->hwrm_pending_list); + + return 0; +} + +void bnxt_free_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats) +{ + kfree(stats->hw_masks); + stats->hw_masks = NULL; + kfree(stats->sw_stats); + stats->sw_stats = NULL; + if (stats->hw_stats) { + dma_free_coherent(&bp->pdev->dev, stats->len, stats->hw_stats, + stats->hw_stats_map); + stats->hw_stats = NULL; + } +} + +static void bnxt_free_stats_cosqnames_mem(struct bnxt *bp) +{ + kfree(bp->tx_cosq_names); + bp->tx_cosq_names = NULL; + kfree(bp->rx_cosq_names); + bp->rx_cosq_names = NULL; +} + +int bnxt_alloc_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats, + bool alloc_masks) +{ + stats->hw_stats = dma_alloc_coherent(&bp->pdev->dev, stats->len, + &stats->hw_stats_map, GFP_KERNEL); + if (!stats->hw_stats) + return -ENOMEM; + + memset(stats->hw_stats, 0, stats->len); + + stats->sw_stats = kzalloc(stats->len, GFP_KERNEL); + if (!stats->sw_stats) + goto stats_mem_err; + + if (alloc_masks) { + stats->hw_masks = kzalloc(stats->len, GFP_KERNEL); + if (!stats->hw_masks) + goto stats_mem_err; + } + return 0; + +stats_mem_err: + bnxt_free_stats_mem(bp, stats); + return -ENOMEM; +} + +static void bnxt_fill_masks(u64 *mask_arr, u64 mask, int count) +{ + int i; + + for (i = 0; i < count; i++) + mask_arr[i] = mask; +} + +static void bnxt_copy_hw_masks(u64 *mask_arr, __le64 *hw_mask_arr, int count) +{ + int i; + + for (i = 0; i < count; i++) + mask_arr[i] = le64_to_cpu(hw_mask_arr[i]); +} + +static int bnxt_hwrm_func_qstat_ext(struct bnxt *bp, + struct bnxt_stats_mem *stats) +{ + struct hwrm_func_qstats_ext_output *resp; + struct hwrm_func_qstats_ext_input *req; + __le64 *hw_masks; + int rc; + + if (!(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED) || + !(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) + return -EOPNOTSUPP; + + rc = hwrm_req_init(bp, req, HWRM_FUNC_QSTATS_EXT); + if (rc) + return rc; + + req->fid = cpu_to_le16(0xffff); + req->flags = FUNC_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK; + + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); + if (!rc) { + hw_masks = &resp->rx_ucast_pkts; + bnxt_copy_hw_masks(stats->hw_masks, hw_masks, stats->len / 8); + } + hwrm_req_drop(bp, req); + return rc; +} + +void bnxt_get_func_stats_ext_mask(struct bnxt *bp, + struct bnxt_stats_mem *stats) +{ + u64 mask; + int rc; + + rc = bnxt_hwrm_func_qstat_ext(bp, stats); + if (rc) { + if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) + mask = (1ULL << 48) - 1; + else + mask = -1ULL; + + bnxt_fill_masks(stats->hw_masks, mask, stats->len / 8); + } +} + +static int bnxt_hwrm_generic_qstats(struct bnxt *bp, u8 flags) +{ + struct hwrm_stat_generic_qstats_input *req; + int rc; + + if (!(bp->fw_cap & BNXT_FW_CAP_GENERIC_STATS)) + return 0; + + rc = hwrm_req_init(bp, req, HWRM_STAT_GENERIC_QSTATS); + if (rc) + return rc; + + req->flags = flags; + req->generic_stat_size = bp->generic_stats.len; + req->generic_stat_host_addr = cpu_to_le64(bp->generic_stats.hw_stats_map); + + return hwrm_req_send(bp, req); +} + +static int bnxt_hwrm_lpbk_qstats(struct bnxt *bp, u8 flags) +{ + struct hwrm_port_lpbk_qstats_input *req; + int rc; + + if (!(bp->fw_cap & BNXT_FW_CAP_LPBK_STATS)) + return 0; + + rc = hwrm_req_init(bp, req, HWRM_PORT_LPBK_QSTATS); + if (rc) + return rc; + + req->flags = flags; + req->lpbk_stat_size = cpu_to_le16((u16)bp->lpbk_stats.len); + req->lpbk_stat_host_addr = cpu_to_le64(bp->lpbk_stats.hw_stats_map); + + return hwrm_req_send(bp, req); +} + +static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags); +static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags); +static int bnxt_hwrm_port_ecn_qstats(struct bnxt *bp, u8 flags); + +static void bnxt_init_stats(struct bnxt *bp) +{ + int rc, rx_count, tx_count, stats_count; + struct bnxt_napi *bnapi = bp->bnapi[0]; + __le64 *rx_stats, *tx_stats, *hw_stats; + u64 *rx_masks, *tx_masks, *hw_masks; + struct bnxt_cp_ring_info *cpr; + struct bnxt_stats_mem *stats; + u64 mask; + u8 flags; + + cpr = &bnapi->cp_ring; + stats = &cpr->stats; + bnxt_get_func_stats_ext_mask(bp, stats); + + if (bp->flags & BNXT_FLAG_PORT_STATS) { + stats = &bp->port_stats; + rx_stats = stats->hw_stats; + rx_masks = stats->hw_masks; + rx_count = sizeof(struct rx_port_stats) / 8; + tx_stats = rx_stats + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8; + tx_masks = rx_masks + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8; + tx_count = sizeof(struct tx_port_stats) / 8; + + flags = PORT_QSTATS_REQ_FLAGS_COUNTER_MASK; + rc = bnxt_hwrm_port_qstats(bp, flags); + if (rc) { + mask = (1ULL << 40) - 1; + + bnxt_fill_masks(rx_masks, mask, rx_count); + bnxt_fill_masks(tx_masks, mask, tx_count); + } else { + bnxt_copy_hw_masks(rx_masks, rx_stats, rx_count); + bnxt_copy_hw_masks(tx_masks, tx_stats, tx_count); + bnxt_hwrm_port_qstats(bp, 0); + } + } + if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) { + stats = &bp->rx_port_stats_ext; + rx_stats = stats->hw_stats; + rx_masks = stats->hw_masks; + rx_count = sizeof(struct rx_port_stats_ext) / 8; + stats = &bp->tx_port_stats_ext; + tx_stats = stats->hw_stats; + tx_masks = stats->hw_masks; + tx_count = sizeof(struct tx_port_stats_ext) / 8; + + flags = PORT_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK; + rc = bnxt_hwrm_port_qstats_ext(bp, flags); + if (rc) { + mask = (1ULL << 40) - 1; + + bnxt_fill_masks(rx_masks, mask, rx_count); + if (tx_stats) + bnxt_fill_masks(tx_masks, mask, tx_count); + } else { + bnxt_copy_hw_masks(rx_masks, rx_stats, rx_count); + if (tx_stats) + bnxt_copy_hw_masks(tx_masks, tx_stats, + tx_count); + bnxt_hwrm_port_qstats_ext(bp, 0); + } + } + if (bp->flags & BNXT_FLAG_ECN_STATS) { + stats = &bp->ecn_marked_stats; + rx_stats = stats->hw_stats; + rx_masks = stats->hw_masks; + rx_count = sizeof(struct port_stats_ecn) / 8; + + flags = PORT_ECN_QSTATS_REQ_FLAGS_COUNTER_MASK; + rc = bnxt_hwrm_port_ecn_qstats(bp, flags); + if (rc) { + mask = (1ULL << 32) - 1; + bnxt_fill_masks(stats->hw_masks, mask, stats->len / 8); + } else { + bnxt_copy_hw_masks(rx_masks, rx_stats, rx_count); + bnxt_hwrm_port_ecn_qstats(bp, 0); + } + } + if (bp->fw_cap & BNXT_FW_CAP_GENERIC_STATS) { + stats = &bp->generic_stats; + hw_stats = stats->hw_stats; + hw_masks = stats->hw_masks; + stats_count = sizeof(struct generic_sw_hw_stats) / 8; + + flags = STAT_GENERIC_QSTATS_REQ_FLAGS_COUNTER_MASK; + rc = bnxt_hwrm_generic_qstats(bp, flags); + if (rc) { + mask = (1ULL << 32) - 1; + bnxt_fill_masks(stats->hw_masks, mask, stats->len / 8); + } else { + bnxt_copy_hw_masks(hw_masks, hw_stats, stats_count); + bnxt_hwrm_generic_qstats(bp, 0); + } + } + if (bp->fw_cap & BNXT_FW_CAP_LPBK_STATS) { + stats = &bp->lpbk_stats; + hw_stats = stats->hw_stats; + hw_masks = stats->hw_masks; + stats_count = sizeof(struct port_lpbk_stats) / 8; + + flags = PORT_LPBK_QSTATS_REQ_FLAGS_COUNTER_MASK; + rc = bnxt_hwrm_lpbk_qstats(bp, flags); + if (rc) { + mask = -1ULL; + bnxt_fill_masks(stats->hw_masks, mask, stats->len / 8); + } else { + bnxt_copy_hw_masks(hw_masks, hw_stats, stats_count); + bnxt_hwrm_lpbk_qstats(bp, 0); + } + } +} + +static void bnxt_free_port_stats(struct bnxt *bp) +{ + bp->flags &= ~(BNXT_FLAG_PORT_STATS | BNXT_FLAG_PORT_STATS_EXT | + BNXT_FLAG_ECN_STATS); + bp->fw_cap &= ~BNXT_FW_CAP_GENERIC_STATS; + + bnxt_free_stats_mem(bp, &bp->port_stats); + bnxt_free_stats_mem(bp, &bp->rx_port_stats_ext); + bnxt_free_stats_mem(bp, &bp->tx_port_stats_ext); + bnxt_free_stats_mem(bp, &bp->ecn_marked_stats); + bnxt_free_stats_mem(bp, &bp->generic_stats); + bnxt_free_stats_mem(bp, &bp->lpbk_stats); +} + +static void bnxt_free_ring_stats(struct bnxt *bp) +{ + int i; + + if (!bp->bnapi) + return; + + for (i = 0; i < bp->cp_nr_rings; i++) { + struct bnxt_napi *bnapi = bp->bnapi[i]; + struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; + + bnxt_free_stats_mem(bp, &cpr->stats); + + kfree(cpr->sw_stats); + cpr->sw_stats = NULL; + } +} + +static int bnxt_hwrm_port_ecn_qcfg(struct bnxt *bp) +{ + struct hwrm_fw_ecn_qcfg_output *resp; + struct hwrm_fw_ecn_qcfg_input *req; + int rc = -EOPNOTSUPP; + + if (bp->fw_cap & BNXT_FW_CAP_ECN_STATS) { + rc = hwrm_req_init(bp, req, HWRM_FW_ECN_QCFG); + if (rc) + return rc; + + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); + if (!rc) { + u16 flags = le16_to_cpu(resp->flags); + + if (!(flags & FW_ECN_QCFG_RESP_FLAGS_ENABLE_ECN)) + rc = -EOPNOTSUPP; + } + hwrm_req_drop(bp, req); + } + + return rc; +} + +static int bnxt_hwrm_port_ecn_qstats(struct bnxt *bp, u8 flags) +{ + struct hwrm_port_ecn_qstats_input *req; + struct bnxt_pf_info *pf = &bp->pf; + int rc; + + if (!(bp->flags & BNXT_FLAG_ECN_STATS)) + return 0; + + rc = hwrm_req_init(bp, req, HWRM_PORT_ECN_QSTATS); + if (rc) + return rc; + + req->flags = flags; + req->port_id = cpu_to_le16(pf->port_id); + req->ecn_stat_buf_size = cpu_to_le16(sizeof(struct port_stats_ecn)); + req->ecn_stat_host_addr = cpu_to_le64(bp->ecn_marked_stats.hw_stats_map); + + return hwrm_req_send(bp, req); +} + +static int bnxt_alloc_stats(struct bnxt *bp) +{ + u32 size, i; + int rc; + + size = bp->hw_ring_stats_size; + + for (i = 0; i < bp->cp_nr_rings; i++) { + struct bnxt_napi *bnapi = bp->bnapi[i]; + struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; + + cpr->sw_stats = kzalloc(sizeof(*cpr->sw_stats), GFP_KERNEL); + if (!cpr->sw_stats) + return -ENOMEM; + + cpr->stats.len = size; + rc = bnxt_alloc_stats_mem(bp, &cpr->stats, !i); + if (rc) + return rc; + + cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID; + } + + if (BNXT_VF(bp) || bp->chip_num == CHIP_NUM_58700) + return 0; + + if (!BNXT_ASIC(bp) && !(bp->flags & BNXT_FLAG_CHIP_P7)) + return 0; + + if (!bnxt_hwrm_port_ecn_qcfg(bp)) { + if (bp->ecn_marked_stats.hw_stats) + goto alloc_port_stats; + + bp->ecn_marked_stats.len = sizeof(struct port_stats_ecn); + rc = bnxt_alloc_stats_mem(bp, &bp->ecn_marked_stats, true); + if (!rc) + bp->flags |= BNXT_FLAG_ECN_STATS; + } + +alloc_port_stats: + if (bp->port_stats.hw_stats) + goto alloc_ext_stats; + + bp->port_stats.len = BNXT_PORT_STATS_SIZE; + rc = bnxt_alloc_stats_mem(bp, &bp->port_stats, true); + if (rc) + return rc; + + bp->flags |= BNXT_FLAG_PORT_STATS; + +alloc_ext_stats: + /* Display extended statistics only if FW supports it */ + if (bp->hwrm_spec_code < 0x10804 || bp->hwrm_spec_code == 0x10900) + if (!(bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED)) + goto alloc_generic_stats; + + if (bp->rx_port_stats_ext.hw_stats) + goto alloc_tx_ext_stats; + + bp->rx_port_stats_ext.len = sizeof(struct rx_port_stats_ext); + rc = bnxt_alloc_stats_mem(bp, &bp->rx_port_stats_ext, true); + /* Extended stats are optional */ + if (rc) + goto alloc_generic_stats; + +alloc_tx_ext_stats: + if (bp->tx_port_stats_ext.hw_stats) + goto alloc_generic_stats; + + if (bp->hwrm_spec_code >= 0x10902 || + (bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED)) { + bp->tx_port_stats_ext.len = sizeof(struct tx_port_stats_ext); + rc = bnxt_alloc_stats_mem(bp, &bp->tx_port_stats_ext, true); + /* Extended stats are optional */ + if (rc) + goto alloc_generic_stats; + } + bp->flags |= BNXT_FLAG_PORT_STATS_EXT; + +alloc_generic_stats: + if (bp->generic_stats.hw_stats) + goto alloc_lpbk_stats; + + if (bp->fw_cap & BNXT_FW_CAP_GENERIC_STATS) { + bp->generic_stats.len = sizeof(struct generic_sw_hw_stats); + rc = bnxt_alloc_stats_mem(bp, &bp->generic_stats, true); + /* Generic stats are optional */ + if (rc) + bp->fw_cap &= ~BNXT_FW_CAP_GENERIC_STATS; + } + +alloc_lpbk_stats: + /* Allow lpbk stats only for ROCE or SRIOV cap enabled */ + if (!(bp->flags & BNXT_FLAG_ROCE_CAP) && BNXT_SINGLE_PF(bp) && + !BNXT_SUPPORTS_SRIOV(bp->pdev)) + bp->fw_cap &= ~BNXT_FW_CAP_LPBK_STATS; + + /* Allocate space for port loopback stats */ + if (bp->lpbk_stats.hw_stats) + goto ret; + + if (bp->fw_cap & BNXT_FW_CAP_LPBK_STATS) { + bp->lpbk_stats.len = sizeof(struct port_lpbk_stats); + rc = bnxt_alloc_stats_mem(bp, &bp->lpbk_stats, true); + /* lpbk stats are optional */ + if (rc) + bp->fw_cap &= ~BNXT_FW_CAP_LPBK_STATS; + } + +ret: + return 0; +} + +static void bnxt_clear_ring_indices(struct bnxt *bp) +{ + int i, j; + + if (!bp->bnapi) + return; + + for (i = 0; i < bp->cp_nr_rings; i++) { + struct bnxt_napi *bnapi = bp->bnapi[i]; + struct bnxt_cp_ring_info *cpr; + struct bnxt_rx_ring_info *rxr; + struct bnxt_tx_ring_info *txr; + + if (!bnapi) + continue; + + cpr = &bnapi->cp_ring; + cpr->cp_raw_cons = 0; + + bnxt_for_each_napi_tx(j, bnapi, txr) { + txr->tx_prod = 0; + txr->tx_cons = 0; + txr->tx_hw_cons = 0; + txr->xdp_tx_pending = 0; + } + + rxr = bnapi->rx_ring; + if (rxr) { + rxr->rx_prod = 0; + rxr->rx_agg_prod = 0; + rxr->rx_sw_agg_prod = 0; + rxr->rx_next_cons = 0; + } + } +} + +void bnxt_insert_usr_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr) +{ + INIT_LIST_HEAD(&fltr->list); + if ((fltr->type == BNXT_FLTR_TYPE_L2 && fltr->flags & BNXT_ACT_RING_DST) || + (fltr->type == BNXT_FLTR_TYPE_NTUPLE && fltr->flags & BNXT_ACT_NO_AGING)) + list_add_tail(&fltr->list, &bp->usr_fltr_list); +} + +void bnxt_del_one_usr_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr) +{ + if (!list_empty(&fltr->list)) + list_del_init(&fltr->list); +} + +void bnxt_clear_usr_fltrs(struct bnxt *bp, bool all) +{ + struct bnxt_filter_base *usr_fltr, *tmp; + + list_for_each_entry_safe(usr_fltr, tmp, &bp->usr_fltr_list, list) { + if (!all && usr_fltr->type == BNXT_FLTR_TYPE_L2) + continue; + bnxt_del_one_usr_fltr(bp, usr_fltr); + } +} + +static void bnxt_del_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr) +{ + hlist_del(&fltr->hash); + bnxt_del_one_usr_fltr(bp, fltr); + if (fltr->flags) { + clear_bit(fltr->sw_id, bp->ntp_fltr_bmap); + bp->ntp_fltr_count--; + } + kfree(fltr); +} + +static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool all) +{ + int i; + + /* Under rtnl_lock and all our NAPIs have been disabled. It's + * safe to delete the hash table. + */ + for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) { + struct hlist_head *head; + struct hlist_node *tmp, __maybe_unused *nxt; + struct bnxt_ntuple_filter *fltr; + + head = &bp->ntp_fltr_hash_tbl[i]; + __hlist_for_each_entry_safe(fltr, nxt, tmp, head, base.hash) { + bnxt_del_l2_filter(bp, fltr->l2_fltr); + if (!all && ((fltr->base.flags & BNXT_ACT_FUNC_DST) || + !list_empty(&fltr->base.list))) + continue; + bnxt_del_fltr(bp, &fltr->base); + } + } + if (!all) + return; + + bitmap_free(bp->ntp_fltr_bmap); + bp->ntp_fltr_bmap = NULL; + bp->ntp_fltr_count = 0; +} + +static int bnxt_alloc_ntp_fltrs(struct bnxt *bp) +{ + int i, rc = 0; + + if (!(bp->flags & BNXT_FLAG_RFS) || bp->ntp_fltr_bmap) + return 0; + + for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) + INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]); + + bp->ntp_fltr_count = 0; + bp->ntp_fltr_bmap = bitmap_zalloc(bp->max_fltr, GFP_KERNEL); + + if (!bp->ntp_fltr_bmap) + rc = -ENOMEM; + + return rc; +} + +static void bnxt_free_l2_filters(struct bnxt *bp, bool all) +{ + int i; + + for (i = 0; i < BNXT_L2_FLTR_HASH_SIZE; i++) { + struct hlist_head *head; + struct hlist_node *tmp, __maybe_unused *nxt; + struct bnxt_l2_filter *fltr; + + head = &bp->l2_fltr_hash_tbl[i]; + __hlist_for_each_entry_safe(fltr, nxt, tmp, head, base.hash) { + if (!all && ((fltr->base.flags & BNXT_ACT_FUNC_DST) || + !list_empty(&fltr->base.list))) + continue; + bnxt_del_fltr(bp, &fltr->base); + } + } +} + +static void bnxt_init_l2_fltr_tbl(struct bnxt *bp) +{ + int i; + + for (i = 0; i < BNXT_L2_FLTR_HASH_SIZE; i++) + INIT_HLIST_HEAD(&bp->l2_fltr_hash_tbl[i]); + prandom_bytes(&bp->hash_seed, sizeof(bp->hash_seed)); +} + +static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init) +{ + bnxt_free_vnic_attributes(bp); + bnxt_free_mpc_rings(bp); + bnxt_free_tx_rings(bp); + bnxt_free_rx_rings(bp); + bnxt_free_cp_rings(bp); + bnxt_free_all_cp_arrays(bp); + bnxt_free_ntp_fltrs(bp, false); + bnxt_free_l2_filters(bp, false); + if (irq_re_init) { + bnxt_free_ring_stats(bp); + if (!(bp->phy_flags & BNXT_PHY_FL_PORT_STATS_NO_RESET) || + test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) + bnxt_free_port_stats(bp); + bnxt_free_ring_grps(bp); + bnxt_free_vnics(bp); + bnxt_free_mpcs(bp); + kfree(bp->tx_ring_map); + bp->tx_ring_map = NULL; + kfree(bp->tx_ring); + bp->tx_ring = NULL; + kfree(bp->rx_ring); + bp->rx_ring = NULL; + kfree(bp->bnapi); + bp->bnapi = NULL; + } else { + bnxt_clear_ring_indices(bp); + } +} + +static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init) +{ + int i, j, rc, size, arr_size; + void *bnapi; + + if (irq_re_init) { + /* Allocate bnapi mem pointer array and mem block for + * all queues + */ + arr_size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi *) * + bp->cp_nr_rings); + size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi)); + bnapi = kzalloc(arr_size + size * bp->cp_nr_rings, GFP_KERNEL); + if (!bnapi) + return -ENOMEM; + + bp->bnapi = bnapi; + bnapi += arr_size; + for (i = 0; i < bp->cp_nr_rings; i++, bnapi += size) { + bp->bnapi[i] = bnapi; + bp->bnapi[i]->index = i; + bp->bnapi[i]->bp = bp; + if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { + struct bnxt_cp_ring_info *cpr = + &bp->bnapi[i]->cp_ring; + + cpr->cp_ring_struct.ring_mem.flags = + BNXT_RMEM_RING_PTE_FLAG; + } + } + + bp->rx_ring = kcalloc(bp->rx_nr_rings, + sizeof(struct bnxt_rx_ring_info), + GFP_KERNEL); + if (!bp->rx_ring) + return -ENOMEM; + + for (i = 0; i < bp->rx_nr_rings; i++) { + struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; + + if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { + rxr->rx_ring_struct.ring_mem.flags = + BNXT_RMEM_RING_PTE_FLAG; + rxr->rx_agg_ring_struct.ring_mem.flags = + BNXT_RMEM_RING_PTE_FLAG; + } else { + rxr->rx_cpr = &bp->bnapi[i]->cp_ring; + } + rxr->bnapi = bp->bnapi[i]; + bp->bnapi[i]->rx_ring = &bp->rx_ring[i]; + } + + bp->tx_ring = kcalloc(bp->tx_nr_rings, + sizeof(struct bnxt_tx_ring_info), + GFP_KERNEL); + if (!bp->tx_ring) + return -ENOMEM; + + bp->tx_ring_map = kcalloc(bp->tx_nr_rings, sizeof(u16), + GFP_KERNEL); + + if (!bp->tx_ring_map) + return -ENOMEM; + + if (bp->flags & BNXT_FLAG_SHARED_RINGS) + j = 0; + else + j = bp->rx_nr_rings; + + for (i = 0; i < bp->tx_nr_rings; i++) { + struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; + struct bnxt_napi *bnapi2; + + if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) + txr->tx_ring_struct.ring_mem.flags = + BNXT_RMEM_RING_PTE_FLAG; + bp->tx_ring_map[i] = bp->tx_nr_rings_xdp + i; + if (i >= bp->tx_nr_rings_xdp) { + int k = j + BNXT_RING_TO_TC_OFF(bp, i); + + bnapi2 = bp->bnapi[k]; + txr->txq_index = i - bp->tx_nr_rings_xdp; + txr->tx_napi_idx = + BNXT_RING_TO_TC(bp, txr->txq_index); + bnapi2->tx_ring[txr->tx_napi_idx] = txr; + bnapi2->tx_int = bnxt_tx_int; + } else { + bnapi2 = bp->bnapi[j]; + bnapi2->flags |= BNXT_NAPI_FLAG_XDP; + bnapi2->tx_ring[0] = txr; + bnapi2->tx_int = bnxt_tx_int_xdp; +#ifdef HAVE_XSK_SUPPORT + txr->xsk_pool = xsk_get_pool_from_qid(bp->dev, i); +#endif + j++; + } + txr->bnapi = bnapi2; + if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) + txr->tx_cpr = &bnapi2->cp_ring; + } + + rc = bnxt_alloc_mpcs(bp); + if (rc) + goto alloc_mem_err; + + rc = bnxt_alloc_stats(bp); + if (rc) + goto alloc_mem_err; + bnxt_init_stats(bp); + + rc = bnxt_alloc_ntp_fltrs(bp); + if (rc) + goto alloc_mem_err; + + rc = bnxt_alloc_vnics(bp); + if (rc) + goto alloc_mem_err; + + } + + rc = bnxt_alloc_all_cp_arrays(bp); + if (rc) + goto alloc_mem_err; + + bnxt_init_ring_struct(bp); + + rc = bnxt_alloc_rx_rings(bp); + if (rc) + goto alloc_mem_err; + + rc = bnxt_alloc_tx_rings(bp); + if (rc) + goto alloc_mem_err; + + rc = bnxt_alloc_mpc_rings(bp); + if (rc) + goto alloc_mem_err; + + rc = bnxt_alloc_cp_rings(bp, irq_re_init); + if (rc) + goto alloc_mem_err; + + bp->vnic_info[BNXT_VNIC_DEFAULT].flags |= BNXT_VNIC_RSS_FLAG | BNXT_VNIC_MCAST_FLAG | + BNXT_VNIC_UCAST_FLAG; + if (BNXT_SUPPORTS_NTUPLE_VNIC(bp) && (bp->flags & BNXT_FLAG_RFS)) + bp->vnic_info[BNXT_VNIC_NTUPLE].flags |= (BNXT_VNIC_RSS_FLAG | + BNXT_VNIC_NTUPLE_FLAG); + + rc = bnxt_alloc_vnic_attributes(bp); + if (rc) + goto alloc_mem_err; + return 0; + +alloc_mem_err: + bnxt_free_mem(bp, true); + return rc; +} + +static void bnxt_disable_int(struct bnxt *bp) +{ + int i; + + for (i = 0; i < bp->cp_nr_rings; i++) { + struct bnxt_napi *bnapi = bp->bnapi[i]; + struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; + struct bnxt_ring_struct *ring = &cpr->cp_ring_struct; + + if (ring->fw_ring_id != INVALID_HW_RING_ID) + bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons); + } +} + +static int bnxt_cp_num_to_irq_num(struct bnxt *bp, int n) +{ + struct bnxt_napi *bnapi = bp->bnapi[n]; + struct bnxt_cp_ring_info *cpr; + + cpr = &bnapi->cp_ring; + return cpr->cp_ring_struct.map_idx; +} + +static void bnxt_disable_int_sync(struct bnxt *bp) +{ + int i; + + if (!bp->irq_tbl || !bp->bnapi) + return; + + atomic_inc(&bp->intr_sem); + + bnxt_disable_int(bp); + for (i = 0; i < bp->cp_nr_rings; i++) { + int map_idx = bnxt_cp_num_to_irq_num(bp, i); + + synchronize_irq(bp->irq_tbl[map_idx].vector); + } +} + +static void bnxt_enable_int(struct bnxt *bp) +{ + int i; + + atomic_set(&bp->intr_sem, 0); + for (i = 0; i < bp->cp_nr_rings; i++) { + struct bnxt_napi *bnapi = bp->bnapi[i]; + struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; + + bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons); + } +} + +int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap, int bmap_size, + bool async_only) +{ + DECLARE_BITMAP(async_events_bmap, 256); + u32 *events = (u32 *)async_events_bmap; + struct hwrm_func_drv_rgtr_output *resp; + struct hwrm_func_drv_rgtr_input *req; + u32 flags = 0; + int rc, i; + + rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_RGTR); + if (rc) + return rc; + + req->enables = cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE | + FUNC_DRV_RGTR_REQ_ENABLES_VER | + FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD); + + if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET) + flags |= FUNC_DRV_RGTR_REQ_FLAGS_HOT_RESET_SUPPORT; + if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) + flags |= FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT | + FUNC_DRV_RGTR_REQ_FLAGS_MASTER_SUPPORT; + if (bp->fw_cap & BNXT_FW_CAP_NPAR_1_2) + flags |= FUNC_DRV_RGTR_REQ_FLAGS_NPAR_1_2_SUPPORT; + flags |= FUNC_DRV_RGTR_REQ_FLAGS_ASYM_QUEUE_CFG_SUPPORT; + req->flags = cpu_to_le32(flags); + req->os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX); + req->ver_maj_8b = DRV_VER_MAJ; + req->ver_min_8b = DRV_VER_MIN; + req->ver_upd_8b = DRV_VER_UPD; + + if (BNXT_PF(bp)) { + u32 data[8]; + int i; + + memset(data, 0, sizeof(data)); + for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++) { + u16 cmd = bnxt_vf_req_snif[i]; + unsigned int bit, idx; + + if ((bp->fw_cap & BNXT_FW_CAP_LINK_ADMIN) && + (cmd == HWRM_PORT_PHY_QCFG)) + continue; + + idx = cmd / 32; + bit = cmd % 32; + data[idx] |= 1 << bit; + } + + for (i = 0; i < 8; i++) + req->vf_req_fwd[i] = cpu_to_le32(data[i]); + + req->enables |= + cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD); + } + + /* Enable TF NIC Flow mode only if also UDCC capable and a PF */ + if (BNXT_PF(bp) && BNXT_TF_RX_NIC_FLOW_CAP(bp) && BNXT_UDCC_CAP(bp)) { + req->flags |= cpu_to_le32(FUNC_DRV_RGTR_REQ_FLAGS_TF_INGRESS_NIC_FLOW_MODE); + netdev_info(bp->dev, "Enabling TF ingress NIC flow mode\n"); + } + + if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE) + req->flags |= cpu_to_le32(FUNC_DRV_RGTR_REQ_FLAGS_FLOW_HANDLE_64BIT_MODE); + + memset(async_events_bmap, 0, sizeof(async_events_bmap)); + for (i = 0; i < ARRAY_SIZE(bnxt_async_events_arr); i++) { + u16 event_id = bnxt_async_events_arr[i]; + + if (event_id == ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY && + !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) + continue; + if (event_id == ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE && + !bp->ptp_cfg) + continue; + __set_bit(bnxt_async_events_arr[i], async_events_bmap); + } + if (bmap && bmap_size) { + for (i = 0; i < bmap_size; i++) { + if (test_bit(i, bmap)) + __set_bit(i, async_events_bmap); + } + } + for (i = 0; i < 8; i++) + req->async_event_fwd[i] |= cpu_to_le32(events[i]); + + if (async_only) + req->enables = + cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD); + + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); + if (!rc) { + set_bit(BNXT_STATE_DRV_REGISTERED, &bp->state); + if (resp->flags & + cpu_to_le32(FUNC_DRV_RGTR_RESP_FLAGS_IF_CHANGE_SUPPORTED)) + bp->fw_cap |= BNXT_FW_CAP_IF_CHANGE; + } + hwrm_req_drop(bp, req); + return rc; +} + +int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp) +{ + struct hwrm_func_drv_unrgtr_input *req; + int rc; + + if (!test_and_clear_bit(BNXT_STATE_DRV_REGISTERED, &bp->state)) + return 0; + + rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_UNRGTR); + if (rc) + return rc; + return hwrm_req_send(bp, req); +} + +static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa); + +static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type) +{ + struct hwrm_tunnel_dst_port_free_input *req; + u32 rc; + + if (BNXT_NO_FW_ACCESS(bp)) + return 0; + + rc = hwrm_req_init(bp, req, HWRM_TUNNEL_DST_PORT_FREE); + if (rc) + return rc; + + req->tunnel_type = tunnel_type; + + switch (tunnel_type) { + case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN: + req->tunnel_dst_port_id = cpu_to_le16(bp->vxlan_fw_dst_port_id); + bp->vxlan_port = 0; + bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID; + break; + case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE: + req->tunnel_dst_port_id = cpu_to_le16(bp->nge_fw_dst_port_id); + bp->nge_port = 0; + bp->nge_fw_dst_port_id = INVALID_HW_RING_ID; + break; + case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE: + req->tunnel_dst_port_id = cpu_to_le16(bp->vxlan_gpe_fw_dst_port_id); + bp->vxlan_gpe_port = 0; + bp->vxlan_gpe_fw_dst_port_id = INVALID_HW_RING_ID; + break; + default: + break; + } + + rc = hwrm_req_send(bp, req); + if (rc) + netdev_err(bp->dev, "hwrm_tunnel_dst_port_free failed. rc:%d\n", + rc); + if (bp->flags & BNXT_FLAG_TPA) + bnxt_set_tpa(bp, true); + return rc; +} + +static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port, + u8 tunnel_type) +{ + struct hwrm_tunnel_dst_port_alloc_output *resp; + struct hwrm_tunnel_dst_port_alloc_input *req; + u32 rc; + + rc = hwrm_req_init(bp, req, HWRM_TUNNEL_DST_PORT_ALLOC); + if (rc) + return rc; + + req->tunnel_type = tunnel_type; + req->tunnel_dst_port_val = port; + + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); + if (rc) { + netdev_err(bp->dev, "hwrm_tunnel_dst_port_alloc failed. rc:%d\n", + rc); + goto err_out; + } + + switch (tunnel_type) { + case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN: + bp->vxlan_port = port; + bp->vxlan_fw_dst_port_id = + le16_to_cpu(resp->tunnel_dst_port_id); + break; + case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE: + bp->nge_port = port; + bp->nge_fw_dst_port_id = le16_to_cpu(resp->tunnel_dst_port_id); + break; + case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE: + bp->vxlan_gpe_port = port; + bp->vxlan_gpe_fw_dst_port_id = + le16_to_cpu(resp->tunnel_dst_port_id); + break; + default: + break; + } + if (bp->flags & BNXT_FLAG_TPA) + bnxt_set_tpa(bp, true); + +err_out: + hwrm_req_drop(bp, req); + return rc; +} + +static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id) +{ + struct hwrm_cfa_l2_set_rx_mask_input *req; + struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_CFA_L2_SET_RX_MASK); + if (rc) + return rc; + + req->vnic_id = cpu_to_le32(vnic->fw_vnic_id); + if (vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_MCAST) { + req->num_mc_entries = cpu_to_le32(vnic->mc_list_count); + req->mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping); + } + req->mask = cpu_to_le32(vnic->rx_mask); + return hwrm_req_send_silent(bp, req); +} + +void bnxt_del_l2_filter(struct bnxt *bp, struct bnxt_l2_filter *fltr) +{ + if (!atomic_dec_and_test(&fltr->refcnt)) + return; + spin_lock_bh(&bp->ntp_fltr_lock); + if (!test_and_clear_bit(BNXT_FLTR_INSERTED, &fltr->base.state)) { + spin_unlock_bh(&bp->ntp_fltr_lock); + return; + } + hlist_del_rcu(&fltr->base.hash); + bnxt_del_one_usr_fltr(bp, &fltr->base); + if (fltr->base.flags) { + clear_bit(fltr->base.sw_id, bp->ntp_fltr_bmap); + bp->ntp_fltr_count--; + } + spin_unlock_bh(&bp->ntp_fltr_lock); + kfree_rcu(fltr, base.rcu); +} + +static struct bnxt_l2_filter *__bnxt_lookup_l2_filter(struct bnxt *bp, + struct bnxt_l2_key *key, + u32 idx) +{ + struct hlist_head *head = &bp->l2_fltr_hash_tbl[idx]; + struct hlist_node __maybe_unused *node; + struct bnxt_l2_filter *fltr; + + __hlist_for_each_entry_rcu(fltr, node, head, base.hash) { + struct bnxt_l2_key *l2_key = &fltr->l2_key; + + if (ether_addr_equal(l2_key->dst_mac_addr, key->dst_mac_addr) && + l2_key->vlan == key->vlan) + return fltr; + } + return NULL; +} + +static struct bnxt_l2_filter *bnxt_lookup_l2_filter(struct bnxt *bp, + struct bnxt_l2_key *key, + u32 idx) +{ + struct bnxt_l2_filter *fltr = NULL; + + rcu_read_lock(); + fltr = __bnxt_lookup_l2_filter(bp, key, idx); + if (fltr) + atomic_inc(&fltr->refcnt); + rcu_read_unlock(); + return fltr; +} + +#define BNXT_IPV4_4TUPLE(bp, fkeys) \ + (((fkeys)->basic.ip_proto == IPPROTO_TCP && \ + (bp)->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4) || \ + ((fkeys)->basic.ip_proto == IPPROTO_UDP && \ + (bp)->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4)) + +#define BNXT_IPV6_4TUPLE(bp, fkeys) \ + (((fkeys)->basic.ip_proto == IPPROTO_TCP && \ + (bp)->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6) || \ + ((fkeys)->basic.ip_proto == IPPROTO_UDP && \ + (bp)->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6)) + +static u32 bnxt_get_rss_flow_tuple_len(struct bnxt *bp, struct flow_keys *fkeys) +{ + if (fkeys->basic.n_proto == htons(ETH_P_IP)) { + if (BNXT_IPV4_4TUPLE(bp, fkeys)) + return sizeof(fkeys->addrs.v4addrs) + + sizeof(fkeys->ports); + + if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4) + return sizeof(fkeys->addrs.v4addrs); + } + + if (fkeys->basic.n_proto == htons(ETH_P_IPV6)) { + if (BNXT_IPV6_4TUPLE(bp, fkeys)) + return sizeof(fkeys->addrs.v6addrs) + + sizeof(fkeys->ports); + + if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6) + return sizeof(fkeys->addrs.v6addrs); + } + + return 0; +} + +static u32 bnxt_toeplitz(struct bnxt *bp, struct flow_keys *fkeys, const unsigned char *key) +{ + u64 prefix = bp->toeplitz_prefix, hash = 0; + struct bnxt_ipv4_tuple tuple4; + struct bnxt_ipv6_tuple tuple6; + int i, j, len = 0; + u8 *four_tuple; + + len = bnxt_get_rss_flow_tuple_len(bp, fkeys); + if (!len) + return 0; + + if (fkeys->basic.n_proto == htons(ETH_P_IP)) { + tuple4.v4addrs = fkeys->addrs.v4addrs; + tuple4.ports = fkeys->ports; + four_tuple = (unsigned char *)&tuple4; + } else { + tuple6.v6addrs = fkeys->addrs.v6addrs; + tuple6.ports = fkeys->ports; + four_tuple = (unsigned char *)&tuple6; + } + + for (i = 0, j = 8; i < len; i++, j++) { + u8 byte = four_tuple[i]; + int bit; + + for (bit = 0; bit < 8; bit++, prefix <<= 1, byte <<= 1) { + if (byte & 0x80) + hash ^= prefix; + } + prefix |= (j < HW_HASH_KEY_SIZE) ? key[j] : 0; + } + + /* The valid part of the hash is in the upper 32 bits. */ + return (hash >> 32) & BNXT_NTP_FLTR_HASH_MASK; +} + +#ifdef CONFIG_RFS_ACCEL +static struct bnxt_l2_filter *bnxt_lookup_l2_filter_from_key(struct bnxt *bp, + struct bnxt_l2_key *key) +{ + struct bnxt_l2_filter *fltr; + u32 idx; + + idx = jhash2(&key->filter_key, BNXT_L2_KEY_SIZE, bp->hash_seed) & + BNXT_L2_FLTR_HASH_MASK; + fltr = bnxt_lookup_l2_filter(bp, key, idx); + return fltr; +} +#endif + +static int bnxt_init_l2_filter(struct bnxt *bp, struct bnxt_l2_filter *fltr, + struct bnxt_l2_key *key, u32 idx) +{ + struct hlist_head *head; + + ether_addr_copy(fltr->l2_key.dst_mac_addr, key->dst_mac_addr); + fltr->l2_key.vlan = key->vlan; + fltr->base.type = BNXT_FLTR_TYPE_L2; + if (fltr->base.flags) { + int bit_id; + + bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap, + bp->max_fltr, 0); + if (bit_id < 0) + return -ENOMEM; + fltr->base.sw_id = (u16)bit_id; + bp->ntp_fltr_count++; + } + head = &bp->l2_fltr_hash_tbl[idx]; + hlist_add_head_rcu(&fltr->base.hash, head); + bnxt_insert_usr_fltr(bp, &fltr->base); + set_bit(BNXT_FLTR_INSERTED, &fltr->base.state); + atomic_set(&fltr->refcnt, 1); + return 0; +} + +static struct bnxt_l2_filter *bnxt_alloc_l2_filter(struct bnxt *bp, + struct bnxt_l2_key *key, + gfp_t gfp) +{ + struct bnxt_l2_filter *fltr; + u32 idx; + int rc; + + idx = jhash2(&key->filter_key, BNXT_L2_KEY_SIZE, bp->hash_seed) & + BNXT_L2_FLTR_HASH_MASK; + fltr = bnxt_lookup_l2_filter(bp, key, idx); + if (fltr) + return fltr; + + fltr = kzalloc(sizeof(*fltr), gfp); + if (!fltr) + return ERR_PTR(-ENOMEM); + spin_lock_bh(&bp->ntp_fltr_lock); + rc = bnxt_init_l2_filter(bp, fltr, key, idx); + spin_unlock_bh(&bp->ntp_fltr_lock); + if (rc) { + bnxt_del_l2_filter(bp, fltr); + fltr = ERR_PTR(rc); + } + return fltr; +} + +struct bnxt_l2_filter *bnxt_alloc_new_l2_filter(struct bnxt *bp, + struct bnxt_l2_key *key, + u16 flags) +{ + struct bnxt_l2_filter *fltr; + u32 idx; + int rc; + + idx = jhash2(&key->filter_key, BNXT_L2_KEY_SIZE, bp->hash_seed) & + BNXT_L2_FLTR_HASH_MASK; + spin_lock_bh(&bp->ntp_fltr_lock); + fltr = __bnxt_lookup_l2_filter(bp, key, idx); + if (fltr) { + fltr = ERR_PTR(-EEXIST); + goto l2_filter_exit; + } + fltr = kzalloc(sizeof(*fltr), GFP_ATOMIC); + if (!fltr) { + fltr = ERR_PTR(-ENOMEM); + goto l2_filter_exit; + } + fltr->base.flags = flags; + rc = bnxt_init_l2_filter(bp, fltr, key, idx); + if (rc) { + spin_unlock_bh(&bp->ntp_fltr_lock); + bnxt_del_l2_filter(bp, fltr); + return ERR_PTR(rc); + } + +l2_filter_exit: + spin_unlock_bh(&bp->ntp_fltr_lock); + return fltr; +} + +u16 bnxt_vf_target_id(struct bnxt_pf_info *pf, u16 vf_idx) +{ + u16 fid = INVALID_HW_RING_ID; + struct bnxt_vf_info *vf; + + rcu_read_lock(); + vf = rcu_dereference(pf->vf); + if (vf) + fid = vf[vf_idx].fw_fid; + rcu_read_unlock(); + return fid; +} + +int bnxt_hwrm_l2_filter_free(struct bnxt *bp, struct bnxt_l2_filter *fltr) +{ + struct hwrm_cfa_l2_filter_free_input *req; + u16 target_id = 0xffff; + int rc; + + if (fltr->base.flags & BNXT_ACT_FUNC_DST) { + struct bnxt_pf_info *pf = &bp->pf; + + if (fltr->base.vf_idx >= pf->active_vfs) + return -EINVAL; + + target_id = bnxt_vf_target_id(pf, fltr->base.vf_idx); + if (target_id == INVALID_HW_RING_ID) + return -EINVAL; + } + + rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_FREE); + if (rc) + return rc; + + req->target_id = cpu_to_le16(target_id); + req->l2_filter_id = fltr->base.filter_id; + return hwrm_req_send(bp, req); +} + +int bnxt_hwrm_l2_filter_alloc(struct bnxt *bp, struct bnxt_l2_filter *fltr) +{ + struct hwrm_cfa_l2_filter_alloc_output *resp; + struct hwrm_cfa_l2_filter_alloc_input *req; + u16 target_id = 0xffff; + u32 flags; + int rc; + + if (fltr->base.flags & BNXT_ACT_FUNC_DST) { + struct bnxt_pf_info *pf = &bp->pf; + + if (fltr->base.vf_idx >= pf->active_vfs) + return -EINVAL; + + target_id = bnxt_vf_target_id(pf, fltr->base.vf_idx); + } + rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_ALLOC); + if (rc) + return rc; + + req->target_id = cpu_to_le16(target_id); + flags = CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX | + CFA_L2_FILTER_ALLOC_REQ_FLAGS_TRAFFIC_L2; + if (!BNXT_CHIP_TYPE_NITRO_A0(bp)) + flags |= CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST; + if (!bp->xdp_prog) + flags |= CFA_L2_FILTER_ALLOC_REQ_FLAGS_XDP_DISABLE; + if (bp->flags & BNXT_FLAG_ROCE_CAP) { + flags &= ~CFA_L2_FILTER_ALLOC_REQ_FLAGS_TRAFFIC_MASK; + flags |= CFA_L2_FILTER_ALLOC_REQ_FLAGS_TRAFFIC_ROCE; + } + + req->flags = cpu_to_le32(flags); + req->dst_id = cpu_to_le16(fltr->base.fw_vnic_id); + req->enables = + cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR | + CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID | + CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK); + ether_addr_copy(req->l2_addr, fltr->l2_key.dst_mac_addr); + eth_broadcast_addr(req->l2_addr_mask); + if (fltr->l2_key.vlan) { + req->enables |= + cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN | + CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN_MASK | + CFA_L2_FILTER_ALLOC_REQ_ENABLES_NUM_VLANS); + req->num_vlans = 1; + req->l2_ivlan = fltr->l2_key.vlan; + req->l2_ivlan_mask = 0xfff; + } + + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); + if (!rc) { + fltr->base.filter_id = resp->l2_filter_id; + set_bit(BNXT_FLTR_VALID, &fltr->base.state); + } + hwrm_req_drop(bp, req); + return rc; +} + +int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp, + struct bnxt_ntuple_filter *fltr) +{ + struct hwrm_cfa_ntuple_filter_free_input *req; + int rc; + + set_bit(BNXT_FLTR_FW_DELETED, &fltr->base.state); + + rc = hwrm_req_init(bp, req, HWRM_CFA_NTUPLE_FILTER_FREE); + if (rc) + return rc; + + req->ntuple_filter_id = fltr->base.filter_id; + return hwrm_req_send(bp, req); +} + +#define BNXT_NTP_FLTR_FLAGS \ + (CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID | \ + CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE | \ + CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE | \ + CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR | \ + CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK | \ + CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR | \ + CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK | \ + CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL | \ + CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT | \ + CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK | \ + CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT | \ + CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK | \ + CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID) + +#define BNXT_NTP_TUNNEL_FLTR_FLAG \ + CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE + +void bnxt_fill_ipv6_mask(__be32 mask[4]) +{ + int i; + + for (i = 0; i < 4; i++) + mask[i] = cpu_to_be32(~0); +} + +static void bnxt_cfg_rfs_ring_tbl_idx(struct bnxt *bp, + struct hwrm_cfa_ntuple_filter_alloc_input *req, + struct bnxt_ntuple_filter *fltr) +{ + struct bnxt_rss_ctx *rss_ctx, *tmp; + u16 rxq = fltr->base.rxq; + + if (fltr->base.flags & BNXT_ACT_RSS_CTX) { + list_for_each_entry_safe(rss_ctx, tmp, &bp->rss_ctx_list, list) { + if (rss_ctx->index == fltr->base.fw_vnic_id) { + struct bnxt_vnic_info *vnic = &rss_ctx->vnic; + + req->dst_id = cpu_to_le16(vnic->fw_vnic_id); + break; + } + } + return; + } + req->dst_id = cpu_to_le16(bp->vnic_info[BNXT_VNIC_NTUPLE].fw_vnic_id); + req->enables |= + CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_RFS_RING_TBL_IDX; + if (fltr->base.flags & BNXT_ACT_NUMA_DIRECT) + rxq -= 1; + req->rfs_ring_tbl_idx = cpu_to_le16(rxq); +} + +int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp, + struct bnxt_ntuple_filter *fltr) +{ + bool cap_ring_dst = bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2; + struct hwrm_cfa_ntuple_filter_alloc_output *resp; + struct hwrm_cfa_ntuple_filter_alloc_input *req; + struct bnxt_flow_masks *masks = &fltr->fmasks; + struct flow_keys *keys = &fltr->fkeys; + struct bnxt_l2_filter *l2_fltr; + struct bnxt_vnic_info *vnic; + u32 flags = 0; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_CFA_NTUPLE_FILTER_ALLOC); + if (rc) + return rc; + + l2_fltr = fltr->l2_fltr; + req->l2_filter_id = l2_fltr->base.filter_id; + + if (fltr->base.flags & BNXT_ACT_DROP) { + flags = CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DROP; + } else if (fltr->base.flags & BNXT_ACT_NUMA_DIRECT) { + flags = CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_NO_L2_CONTEXT; + req->dst_id = cpu_to_le16(bp->vnic_info[BNXT_VNIC_DEFAULT].fw_vnic_id); + if (cap_ring_dst && fltr->base.rxq) { + if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) { + bnxt_cfg_rfs_ring_tbl_idx(bp, req, fltr); + } else { + flags |= CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_RFS_RING_IDX; + req->dst_id = cpu_to_le16(fltr->base.rxq - 1); + } + } + } else if (cap_ring_dst) { + if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) { + bnxt_cfg_rfs_ring_tbl_idx(bp, req, fltr); + } else { + flags = CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_RFS_RING_IDX; + req->dst_id = cpu_to_le16(fltr->base.rxq); + } + } else { + vnic = &bp->vnic_info[fltr->base.rxq + 1]; + req->dst_id = cpu_to_le16(vnic->fw_vnic_id); + } + req->flags = cpu_to_le32(flags); + req->enables |= cpu_to_le32(BNXT_NTP_FLTR_FLAGS); + + req->ethertype = htons(ETH_P_IP); + req->ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4; + req->ip_protocol = keys->basic.ip_proto; + + if (keys->basic.n_proto == htons(ETH_P_IPV6)) { + req->ethertype = htons(ETH_P_IPV6); + req->ip_addr_type = + CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6; + *(struct in6_addr *)&req->src_ipaddr[0] = keys->addrs.v6addrs.src; + *(struct in6_addr *)&req->src_ipaddr_mask[0] = masks->addrs.v6addrs.src; + *(struct in6_addr *)&req->dst_ipaddr[0] = keys->addrs.v6addrs.dst; + *(struct in6_addr *)&req->dst_ipaddr_mask[0] = masks->addrs.v6addrs.dst; + } else { + req->src_ipaddr[0] = keys->addrs.v4addrs.src; + req->src_ipaddr_mask[0] = masks->addrs.v4addrs.src; + req->dst_ipaddr[0] = keys->addrs.v4addrs.dst; + req->dst_ipaddr_mask[0] = masks->addrs.v4addrs.dst; + } + if (keys->control.flags & FLOW_DIS_ENCAPSULATION) { + req->enables |= cpu_to_le32(BNXT_NTP_TUNNEL_FLTR_FLAG); + req->tunnel_type = + CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL; + } + + req->src_port = keys->ports.src; + req->src_port_mask = masks->ports.src; + req->dst_port = keys->ports.dst; + req->dst_port_mask = masks->ports.dst; + + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); + if (!rc) { + fltr->base.filter_id = resp->ntuple_filter_id; + } + hwrm_req_drop(bp, req); + return rc; +} + +static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx, + const u8 *mac_addr) +{ + struct bnxt_l2_filter *fltr; + struct bnxt_l2_key key; + int rc; + + ether_addr_copy(key.dst_mac_addr, mac_addr); + key.vlan = 0; + fltr = bnxt_alloc_l2_filter(bp, &key, GFP_KERNEL); + if (IS_ERR(fltr)) + return PTR_ERR(fltr); + fltr->base.fw_vnic_id = bp->vnic_info[vnic_id].fw_vnic_id; + rc = bnxt_hwrm_l2_filter_alloc(bp, fltr); + if (rc) { + bnxt_del_l2_filter(bp, fltr); + } else { + bp->vnic_info[vnic_id].l2_filters[idx] = fltr; + bnxt_nic_flows_filter_add(bp, fltr->base.filter_id, mac_addr); + } + return rc; +} + +static void bnxt_hwrm_clear_vnic_filter(struct bnxt *bp) +{ + u16 i, j, num_of_vnics = 1; /* only vnic 0 supported */ + + /* Any associated ntuple filters will also be cleared by firmware. */ + for (i = 0; i < num_of_vnics; i++) { + struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; + + for (j = 0; j < vnic->uc_filter_count; j++) { + struct bnxt_l2_filter *fltr = vnic->l2_filters[j]; + bnxt_nic_flows_roce_rem(bp, fltr->base.filter_id); + bnxt_hwrm_l2_filter_free(bp, fltr); + bnxt_del_l2_filter(bp, fltr); + } + vnic->uc_filter_count = 0; + } +} + +#define BNXT_DFLT_TUNL_TPA_BMAP \ + (VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_GRE | \ + VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_IPV4 | \ + VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_IPV6) + +static void bnxt_hwrm_vnic_update_tunl_tpa(struct bnxt *bp, + struct hwrm_vnic_tpa_cfg_input *req) +{ + u32 tunl_tpa_bmap = BNXT_DFLT_TUNL_TPA_BMAP; + + if (!(bp->fw_cap & BNXT_FW_CAP_VNIC_TUNNEL_TPA)) + return; + + if (bp->vxlan_port) + tunl_tpa_bmap |= VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_VXLAN; + if (bp->vxlan_gpe_port) + tunl_tpa_bmap |= VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_VXLAN_GPE; + if (bp->nge_port) + tunl_tpa_bmap |= VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_GENEVE; + + req->enables |= cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_TNL_TPA_EN); + req->tnl_tpa_en_bitmap = cpu_to_le32(tunl_tpa_bmap); +} + +int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, struct bnxt_vnic_info *vnic, + u32 tpa_flags) +{ + u16 max_aggs = VNIC_TPA_CFG_REQ_MAX_AGGS_MAX; + struct hwrm_vnic_tpa_cfg_input *req; + int rc; + + if (vnic->fw_vnic_id == INVALID_HW_RING_ID) + return 0; + + rc = hwrm_req_init(bp, req, HWRM_VNIC_TPA_CFG); + if (rc) + return rc; + + if (tpa_flags) { + u16 mss = bp->dev->mtu - 40; + u32 nsegs, n, segs = 0, flags; + + flags = VNIC_TPA_CFG_REQ_FLAGS_TPA | + VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA | + VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE | + VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN | + VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ; + if (tpa_flags & BNXT_FLAG_GRO) + flags |= VNIC_TPA_CFG_REQ_FLAGS_GRO; + + req->flags = cpu_to_le32(flags); + + req->enables = + cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS | + VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS | + VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN); + + /* Number of segs are log2 units, and first packet is not + * included as part of this units. + */ + if (mss <= BNXT_RX_PAGE_SIZE) { + n = BNXT_RX_PAGE_SIZE / mss; + nsegs = (MAX_SKB_FRAGS - 1) * n; + } else { + n = mss / BNXT_RX_PAGE_SIZE; + if (mss & (BNXT_RX_PAGE_SIZE - 1)) + n++; + nsegs = (MAX_SKB_FRAGS - n) / n; + } + + if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { + segs = MAX_TPA_SEGS_P5; + max_aggs = bp->max_tpa; + } else { + segs = ilog2(nsegs); + } + req->max_agg_segs = cpu_to_le16(segs); + req->max_aggs = cpu_to_le16(max_aggs); + + req->min_agg_len = cpu_to_le32(512); + bnxt_hwrm_vnic_update_tunl_tpa(bp, req); + } + req->vnic_id = cpu_to_le16(vnic->fw_vnic_id); + + return hwrm_req_send(bp, req); +} + +static u16 bnxt_cp_ring_from_grp(struct bnxt *bp, struct bnxt_ring_struct *ring) +{ + struct bnxt_ring_grp_info *grp_info; + + grp_info = &bp->grp_info[ring->grp_idx]; + return grp_info->cp_fw_ring_id; +} + +static u16 bnxt_cp_ring_for_rx(struct bnxt *bp, struct bnxt_rx_ring_info *rxr) +{ + if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) + return rxr->rx_cpr->cp_ring_struct.fw_ring_id; + else + return bnxt_cp_ring_from_grp(bp, &rxr->rx_ring_struct); +} + +static u16 bnxt_cp_ring_for_tx(struct bnxt *bp, struct bnxt_tx_ring_info *txr) +{ + if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) + return txr->tx_cpr->cp_ring_struct.fw_ring_id; + else + return bnxt_cp_ring_from_grp(bp, &txr->tx_ring_struct); +} + +int bnxt_alloc_rss_indir_tbl(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx) +{ + int entries; + u16 *tbl; + + if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) + entries = BNXT_MAX_RSS_TABLE_ENTRIES_P5; + else + entries = HW_HASH_INDEX_SIZE; + + bp->rss_indir_tbl_entries = entries; + tbl = kmalloc_array(entries, sizeof(*bp->rss_indir_tbl), + GFP_KERNEL); + if (!tbl) + return -ENOMEM; + + if (rss_ctx) + rss_ctx->rss_indir_tbl = tbl; + else + bp->rss_indir_tbl = tbl; + + return 0; +} + +void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx) +{ + u16 max_rings, max_entries, pad, i; + u16 *rss_indir_tbl; + + if (!bp->rx_nr_rings) + return; + + if (BNXT_CHIP_TYPE_NITRO_A0(bp)) + max_rings = bp->rx_nr_rings - 1; + else + max_rings = bp->rx_nr_rings; + + max_entries = bnxt_get_rxfh_indir_size(bp->dev); + if (rss_ctx) + rss_indir_tbl = &rss_ctx->rss_indir_tbl[0]; + else + rss_indir_tbl = &bp->rss_indir_tbl[0]; + + for (i = 0; i < max_entries; i++) + rss_indir_tbl[i] = ethtool_rxfh_indir_default(i, max_rings); + + pad = bp->rss_indir_tbl_entries - max_entries; + if (pad) + memset(&rss_indir_tbl[i], 0, pad * sizeof(u16)); +} + +static u16 bnxt_get_max_rss_ring(struct bnxt *bp) +{ + u16 i, tbl_size, max_ring = 0; + + if (!bp->rss_indir_tbl) + return 0; + + tbl_size = bnxt_get_rxfh_indir_size(bp->dev); + for (i = 0; i < tbl_size; i++) + max_ring = max(max_ring, bp->rss_indir_tbl[i]); + return max_ring; +} + +static void bnxt_fill_hw_rss_tbl(struct bnxt *bp, struct bnxt_vnic_info *vnic) +{ + bool no_rss = !(vnic->flags & BNXT_VNIC_RSS_FLAG); + u16 i, j; + + /* Fill the RSS indirection table with ring group ids */ + for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++) { + if (!no_rss) + j = bp->rss_indir_tbl[i]; + vnic->rss_table[i] = cpu_to_le16(vnic->fw_grp_ids[j]); + } +} + +static void bnxt_fill_hw_rss_tbl_p5(struct bnxt *bp, + struct bnxt_vnic_info *vnic) +{ +#if defined(CONFIG_BNXT_CUSTOM_FLOWER_OFFLOAD) + void *vnic_meta = bp->vnic_meta; + u16 q_index = vnic->q_index; +#else + void *vnic_meta = NULL; + u16 q_index = INVALID_HW_RING_ID; +#endif + __le16 *ring_tbl = vnic->rss_table; + struct bnxt_rx_ring_info *rxr; + u16 tbl_size, i; + + tbl_size = bnxt_get_rxfh_indir_size(bp->dev); + + for (i = 0; i < tbl_size; i++) { + u16 ring_id, j; + + if (vnic_meta) { + j = (q_index != INVALID_HW_RING_ID) ? q_index : bp->rss_indir_tbl[i]; + } else { + if (vnic->flags & BNXT_VNIC_NTUPLE_FLAG) + j = ethtool_rxfh_indir_default(i, bp->rx_nr_rings); + else if (vnic->flags & BNXT_VNIC_RSSCTX_FLAG) + j = vnic->rss_ctx->rss_indir_tbl[i]; + else + j = bp->rss_indir_tbl[i]; + } + rxr = &bp->rx_ring[j]; + + ring_id = rxr->rx_ring_struct.fw_ring_id; + *ring_tbl++ = cpu_to_le16(ring_id); + ring_id = bnxt_cp_ring_for_rx(bp, rxr); + *ring_tbl++ = cpu_to_le16(ring_id); + } +} + +int bnxt_get_nr_rss_ctxs(struct bnxt *bp, int rx_rings) +{ + if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { + if (!rx_rings) + return 0; + return bnxt_calc_nr_ring_pages(rx_rings - 1, + BNXT_RSS_TABLE_ENTRIES_P5); + } + if (BNXT_CHIP_TYPE_NITRO_A0(bp)) + return 2; + return 1; +} + +#if defined(HAVE_ETF_QOPT_OFFLOAD) +static int bnxt_alloc_tc_etf_bitmap(struct bnxt *bp) +{ + bp->etf_tx_ring_map = bitmap_zalloc(bp->hw_resc.max_tx_rings, + GFP_KERNEL); + if (!bp->etf_tx_ring_map) + return -ENOMEM; + + return 0; +} + +static void bnxt_free_tc_etf_bitmap(struct bnxt *bp) +{ + bitmap_free(bp->etf_tx_ring_map); + bp->etf_tx_ring_map = NULL; +} + +static void bnxt_set_txr_etf_bmap(struct bnxt *bp) +{ + int i; + struct bnxt_tx_ring_info *txr; + + if (!bp->etf_tx_ring_map) + return; + + if (bp->tx_ring) { + for (i = 0; i < bp->tx_nr_rings; i++) { + txr = &bp->tx_ring[bp->tx_ring_map[i]]; + txr->etf_enabled = test_bit(i, bp->etf_tx_ring_map); + if (txr->etf_enabled) + txr->bd_base_cnt = BNXT_TX_BD_LONG_CNT + 1; + } + } +} + +#endif /* HAVE_ETF_QOPT_OFFLOAD */ + +/* map hfunc to NIC native type */ +static u8 bnxt_get_ring_sel_mode(struct bnxt *bp) +{ + u8 ring_select_mode; + + switch (bp->rss_hfunc) { + case ETH_RSS_HASH_XOR: + ring_select_mode = VNIC_RSS_CFG_REQ_RING_SELECT_MODE_XOR; + break; + case ETH_RSS_HASH_CRC32: + ring_select_mode = VNIC_RSS_CFG_REQ_RING_SELECT_MODE_TOEPLITZ_CHECKSUM; + break; + case ETH_RSS_HASH_TOP: + default: + /* set default as toeplitz if bp->rss_hfunc yet not set */ + ring_select_mode = VNIC_RSS_CFG_REQ_RING_SELECT_MODE_TOEPLITZ; + bp->rss_hfunc = ETH_RSS_HASH_TOP; + break; + } + return ring_select_mode; +} + +static void +__bnxt_hwrm_vnic_set_rss(struct bnxt *bp, struct hwrm_vnic_rss_cfg_input *req, + struct bnxt_vnic_info *vnic) +{ + if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { + bnxt_fill_hw_rss_tbl_p5(bp, vnic); + if (bp->flags & BNXT_FLAG_CHIP_P7) + req->flags |= VNIC_RSS_CFG_REQ_FLAGS_IPSEC_HASH_TYPE_CFG_SUPPORT; + } else { + bnxt_fill_hw_rss_tbl(bp, vnic); + } + + if (bp->rss_hash_delta) { + req->hash_type = cpu_to_le32(bp->rss_hash_delta); + if (bp->rss_hash_cfg & bp->rss_hash_delta) + req->flags |= VNIC_RSS_CFG_REQ_FLAGS_HASH_TYPE_INCLUDE; + else + req->flags |= VNIC_RSS_CFG_REQ_FLAGS_HASH_TYPE_EXCLUDE; + } else { + req->hash_type = cpu_to_le32(bp->rss_hash_cfg); + } + /* map hfunc to NIC native type */ + req->ring_select_mode = bnxt_get_ring_sel_mode(bp); + req->hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT; + req->ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr); + req->hash_key_tbl_addr = cpu_to_le64(vnic->rss_hash_key_dma_addr); +} + +static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, struct bnxt_vnic_info *vnic, bool set_rss) +{ + struct hwrm_vnic_rss_cfg_input *req; + int rc; + + if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) || + (vnic->fw_rss_cos_lb_ctx[0] == INVALID_HW_RING_ID)) + return 0; + + rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_CFG); + if (rc) + return rc; + + if (set_rss) + __bnxt_hwrm_vnic_set_rss(bp, req, vnic); + req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]); + return hwrm_req_send(bp, req); +} + +int bnxt_hwrm_vnic_set_rss_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic, bool set_rss) +{ + struct hwrm_vnic_rss_cfg_input *req; + dma_addr_t ring_tbl_map; + u32 i, nr_ctxs; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_CFG); + if (rc) + return rc; + + req->vnic_id = cpu_to_le16(vnic->fw_vnic_id); + if (!set_rss) + return hwrm_req_send(bp, req); + + __bnxt_hwrm_vnic_set_rss(bp, req, vnic); + ring_tbl_map = vnic->rss_table_dma_addr; + nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings); + + hwrm_req_hold(bp, req); + for (i = 0; i < nr_ctxs; ring_tbl_map += BNXT_RSS_TABLE_SIZE_P5, i++) { + req->ring_grp_tbl_addr = cpu_to_le64(ring_tbl_map); + req->ring_table_pair_index = i; + req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[i]); + rc = hwrm_req_send(bp, req); + if (rc) + goto exit; + } + +exit: + hwrm_req_drop(bp, req); + return rc; +} + +static void bnxt_hwrm_update_rss_hash_cfg(struct bnxt *bp) +{ + struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT]; + struct hwrm_vnic_rss_qcfg_output *resp; + struct hwrm_vnic_rss_qcfg_input *req; + + if (hwrm_req_init(bp, req, HWRM_VNIC_RSS_QCFG)) + return; + + /* all contexts configured to same hash_type, zero always exists */ + req->vnic_id = cpu_to_le16(vnic->fw_vnic_id); + req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]); + resp = hwrm_req_hold(bp, req); + if (!hwrm_req_send(bp, req)) { + bp->rss_hash_cfg = le32_to_cpu(resp->hash_type) ?: bp->rss_hash_cfg; + bp->rss_hash_delta = 0; + } + hwrm_req_drop(bp, req); +} + +int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, struct bnxt_vnic_info *vnic) +{ + struct hwrm_vnic_plcmodes_cfg_input *req; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_VNIC_PLCMODES_CFG); + if (rc) + return rc; + + req->flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT); + req->enables = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID); + + if (BNXT_RX_PAGE_MODE(bp)) { + req->jumbo_thresh = cpu_to_le16(bp->rx_buf_use_size); + } else { + req->flags |= cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 | + VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6); + req->enables |= + cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID); + req->jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh); + req->hds_threshold = cpu_to_le16(bp->rx_copy_thresh); + } + req->vnic_id = cpu_to_le32(vnic->fw_vnic_id); + return hwrm_req_send(bp, req); +} + +void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp, struct bnxt_vnic_info *vnic, u16 ctx_idx) +{ + struct hwrm_vnic_rss_cos_lb_ctx_free_input *req; + + if (hwrm_req_init(bp, req, HWRM_VNIC_RSS_COS_LB_CTX_FREE)) + return; + + req->rss_cos_lb_ctx_id = + cpu_to_le16(vnic->fw_rss_cos_lb_ctx[ctx_idx]); + + hwrm_req_send(bp, req); + vnic->fw_rss_cos_lb_ctx[ctx_idx] = INVALID_HW_RING_ID; +} + +static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp) +{ + int i, j; + + for (i = 0; i < bp->nr_vnics; i++) { + struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; + + for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++) { + if (vnic->fw_rss_cos_lb_ctx[j] != INVALID_HW_RING_ID) + bnxt_hwrm_vnic_ctx_free_one(bp, vnic, j); + } + } + bp->rsscos_nr_ctxs = 0; +} + +int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic, u16 ctx_idx) +{ + struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp; + struct hwrm_vnic_rss_cos_lb_ctx_alloc_input *req; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC); + if (rc) + return rc; + + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); + if (!rc) + vnic->fw_rss_cos_lb_ctx[ctx_idx] = le16_to_cpu(resp->rss_cos_lb_ctx_id); + hwrm_req_drop(bp, req); + + return rc; +} + +static u32 bnxt_get_roce_vnic_mode(struct bnxt *bp) +{ + if (bp->flags & BNXT_FLAG_ROCE_MIRROR_CAP) + return VNIC_CFG_REQ_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE; + return VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE; +} + +int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic, u16 q_index) +{ + struct hwrm_vnic_cfg_input *req; + unsigned int ring = 0, grp_idx; + u16 def_vlan = 0; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_VNIC_CFG); + if (rc) + return rc; + + if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { + struct bnxt_rx_ring_info *rxr = &bp->rx_ring[q_index]; + + req->default_rx_ring_id = + cpu_to_le16(rxr->rx_ring_struct.fw_ring_id); + req->default_cmpl_ring_id = + cpu_to_le16(bnxt_cp_ring_for_rx(bp, rxr)); + req->enables = + cpu_to_le32(VNIC_CFG_REQ_ENABLES_DEFAULT_RX_RING_ID | + VNIC_CFG_REQ_ENABLES_DEFAULT_CMPL_RING_ID); + goto vnic_mru; + } + req->enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP); + /* Only RSS support for now TBD: COS & LB */ + if (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID) { + req->rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]); + req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE | + VNIC_CFG_REQ_ENABLES_MRU); + } else if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) { + req->rss_rule = + cpu_to_le16(bp->vnic_info[BNXT_VNIC_DEFAULT].fw_rss_cos_lb_ctx[0]); + req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE | + VNIC_CFG_REQ_ENABLES_MRU); + req->flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE); + } else { + req->rss_rule = cpu_to_le16(0xffff); + } + + if (BNXT_CHIP_TYPE_NITRO_A0(bp) && + (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID)) { + req->cos_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[1]); + req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_COS_RULE); + } else { + req->cos_rule = cpu_to_le16(0xffff); + } + + if (vnic->flags & BNXT_VNIC_RSS_FLAG) + ring = 0; + else if (vnic->flags & BNXT_VNIC_RFS_FLAG) + ring = vnic->vnic_id - 1; + else if ((vnic->vnic_id == 1) && BNXT_CHIP_TYPE_NITRO_A0(bp)) + ring = bp->rx_nr_rings - 1; + + grp_idx = bp->rx_ring[ring].bnapi->index; + req->dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id); + req->lb_rule = cpu_to_le16(0xffff); +vnic_mru: + req->mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + VLAN_HLEN); + + req->vnic_id = cpu_to_le16(vnic->fw_vnic_id); +#ifdef CONFIG_BNXT_SRIOV + if (BNXT_VF(bp)) + def_vlan = bp->vf.vlan; +#endif + if ((bp->flags & BNXT_FLAG_STRIP_VLAN) || def_vlan) + req->flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE); + if (!vnic->vnic_id && bnxt_ulp_registered(bp->edev)) + req->flags |= cpu_to_le32(bnxt_get_roce_vnic_mode(bp)); + + return hwrm_req_send(bp, req); +} + +void bnxt_hwrm_vnic_free_one(struct bnxt *bp, struct bnxt_vnic_info *vnic) +{ + if (vnic->fw_vnic_id != INVALID_HW_RING_ID) { + struct hwrm_vnic_free_input *req; + + if (hwrm_req_init(bp, req, HWRM_VNIC_FREE)) + return; + + req->vnic_id = + cpu_to_le32(vnic->fw_vnic_id); + + hwrm_req_send(bp, req); + vnic->fw_vnic_id = INVALID_HW_RING_ID; +#if defined(CONFIG_BNXT_CUSTOM_FLOWER_OFFLOAD) + if (vnic->vnic_meta) { + vnic->vnic_meta->fw_vnic_id = INVALID_HW_RING_ID; + vnic->vnic_meta->meta_valid = false; + vnic->q_index = INVALID_HW_RING_ID; + vnic->vnic_meta = NULL; + } +#endif + } +} + +static void bnxt_hwrm_vnic_free(struct bnxt *bp) +{ + u16 i; + + for (i = 0; i < bp->nr_vnics; i++) + bnxt_hwrm_vnic_free_one(bp, &bp->vnic_info[i]); +} + +int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic, + unsigned int start_rx_ring_idx, + unsigned int nr_rings) +{ + unsigned int i, j, grp_idx, end_idx = start_rx_ring_idx + nr_rings; + struct hwrm_vnic_alloc_output *resp; + struct hwrm_vnic_alloc_input *req; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_VNIC_ALLOC); + if (rc) + return rc; + + if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) + goto vnic_no_ring_grps; + + /* map ring groups to this vnic */ + for (i = start_rx_ring_idx, j = 0; i < end_idx; i++, j++) { + grp_idx = bp->rx_ring[i].bnapi->index; + if (bp->grp_info[grp_idx].fw_grp_id == INVALID_HW_RING_ID) { + netdev_err(bp->dev, "Not enough ring groups avail:%x req:%x\n", + j, nr_rings); + break; + } + vnic->fw_grp_ids[j] = bp->grp_info[grp_idx].fw_grp_id; + } + +vnic_no_ring_grps: + for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++) + vnic->fw_rss_cos_lb_ctx[i] = INVALID_HW_RING_ID; + + if (vnic->vnic_id == BNXT_VNIC_DEFAULT) + req->flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT); + + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); + if (!rc) + vnic->fw_vnic_id = le32_to_cpu(resp->vnic_id); + hwrm_req_drop(bp, req); + return rc; +} + +static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp) +{ + struct hwrm_vnic_qcaps_output *resp; + struct hwrm_vnic_qcaps_input *req; + int rc; + + bp->hw_ring_stats_size = sizeof(struct ctx_hw_stats); + bp->flags &= ~BNXT_FLAG_ROCE_MIRROR_CAP; + bp->rss_cap &= ~BNXT_RSS_CAP_NEW_RSS_CAP; + if (bp->hwrm_spec_code < 0x10600) + return 0; + + rc = hwrm_req_init(bp, req, HWRM_VNIC_QCAPS); + if (rc) + return rc; + + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); + if (!rc) { + u32 flags = le32_to_cpu(resp->flags); + + if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && + (flags & VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP)) + bp->rss_cap |= BNXT_RSS_CAP_NEW_RSS_CAP; + if (flags & + VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP) + bp->flags |= BNXT_FLAG_ROCE_MIRROR_CAP; + + /* Older P5 fw before EXT_HW_STATS support did not set + * VLAN_STRIP_CAP properly. + */ + if ((flags & VNIC_QCAPS_RESP_FLAGS_VLAN_STRIP_CAP) || + (BNXT_CHIP_P5(bp) && + !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED))) + bp->fw_cap |= BNXT_FW_CAP_VLAN_RX_STRIP; + if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_HASH_TYPE_DELTA_CAP) + bp->rss_cap |= BNXT_RSS_CAP_RSS_HASH_TYPE_DELTA; + if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_PROF_TCAM_MODE_ENABLED) + bp->rss_cap |= BNXT_RSS_CAP_RSS_TCAM; + bp->max_tpa_v2 = le16_to_cpu(resp->max_aggs_supported); + if (bp->max_tpa_v2) { + if (BNXT_CHIP_P5(bp)) + bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P5; + else + bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P7; + } + if (flags & VNIC_QCAPS_RESP_FLAGS_HW_TUNNEL_TPA_CAP) + bp->fw_cap |= BNXT_FW_CAP_VNIC_TUNNEL_TPA; + if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_AH_SPI_IPV4_CAP) + bp->rss_cap |= BNXT_RSS_CAP_AH_V4_RSS_CAP; + if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_AH_SPI_IPV6_CAP) + bp->rss_cap |= BNXT_RSS_CAP_AH_V6_RSS_CAP; + if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_ESP_SPI_IPV4_CAP) + bp->rss_cap |= BNXT_RSS_CAP_ESP_V4_RSS_CAP; + if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_ESP_SPI_IPV6_CAP) + bp->rss_cap |= BNXT_RSS_CAP_ESP_V6_RSS_CAP; + if (flags & VNIC_QCAPS_RESP_FLAGS_RING_SELECT_MODE_TOEPLITZ_CAP) + bp->rss_cap |= BNXT_RSS_CAP_TOEPLITZ_CAP; + if (flags & VNIC_QCAPS_RESP_FLAGS_RING_SELECT_MODE_XOR_CAP) + bp->rss_cap |= BNXT_RSS_CAP_XOR_CAP; + if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_IPV6_FLOW_LABEL_CAP) + bp->rss_cap |= BNXT_RSS_CAP_IPV6_FLOW_LABEL_CAP; + if (flags & VNIC_QCAPS_RESP_FLAGS_RING_SELECT_MODE_TOEPLITZ_CHKSM_CAP) + bp->rss_cap |= BNXT_RSS_CAP_TOEPLITZ_CHKSM_CAP; + } + hwrm_req_drop(bp, req); + return rc; +} + +static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp) +{ + struct hwrm_ring_grp_alloc_output *resp; + struct hwrm_ring_grp_alloc_input *req; + int rc; + u16 i; + + if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) + return 0; + + rc = hwrm_req_init(bp, req, HWRM_RING_GRP_ALLOC); + if (rc) + return rc; + + resp = hwrm_req_hold(bp, req); + for (i = 0; i < bp->rx_nr_rings; i++) { + unsigned int grp_idx = bp->rx_ring[i].bnapi->index; + + req->cr = cpu_to_le16(bp->grp_info[grp_idx].cp_fw_ring_id); + req->rr = cpu_to_le16(bp->grp_info[grp_idx].rx_fw_ring_id); + req->ar = cpu_to_le16(bp->grp_info[grp_idx].agg_fw_ring_id); + req->sc = cpu_to_le16(bp->grp_info[grp_idx].fw_stats_ctx); + + rc = hwrm_req_send(bp, req); + + if (rc) + break; + + bp->grp_info[grp_idx].fw_grp_id = + le32_to_cpu(resp->ring_group_id); + } + hwrm_req_drop(bp, req); + return rc; +} + +static void bnxt_hwrm_ring_grp_free(struct bnxt *bp) +{ + struct hwrm_ring_grp_free_input *req; + u16 i; + + if (!bp->grp_info || (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) + return; + + if (hwrm_req_init(bp, req, HWRM_RING_GRP_FREE)) + return; + + hwrm_req_hold(bp, req); + for (i = 0; i < bp->cp_nr_rings; i++) { + if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID) + continue; + req->ring_group_id = + cpu_to_le32(bp->grp_info[i].fw_grp_id); + + hwrm_req_send(bp, req); + bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID; + } + hwrm_req_drop(bp, req); +} + +static void bnxt_set_rx_ring_params_p5(struct bnxt *bp, u32 ring_type, + struct hwrm_ring_alloc_input *req, + struct bnxt_ring_struct *ring) +{ + struct bnxt_ring_grp_info *grp_info = &bp->grp_info[ring->grp_idx]; + u32 enables = RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID | + RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID; + + if (ring_type == HWRM_RING_ALLOC_AGG) { + req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX_AGG; + req->rx_ring_id = cpu_to_le16(grp_info->rx_fw_ring_id); + req->rx_buf_size = cpu_to_le16(BNXT_RX_PAGE_SIZE); + enables |= RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID; + } else { + req->rx_buf_size = cpu_to_le16(bp->rx_buf_use_size); + if (NET_IP_ALIGN == 2) + req->flags = + cpu_to_le16(RING_ALLOC_REQ_FLAGS_RX_SOP_PAD); + } + req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx); + req->nq_ring_id = cpu_to_le16(grp_info->cp_fw_ring_id); + req->enables |= cpu_to_le32(enables); +} + +static int hwrm_ring_alloc_send_msg(struct bnxt *bp, + struct bnxt_ring_struct *ring, + u32 ring_type, u32 map_index) +{ + struct bnxt_ring_mem_info *rmem = &ring->ring_mem; + struct bnxt_ring_grp_info *grp_info; + struct hwrm_ring_alloc_output *resp; + struct hwrm_ring_alloc_input *req; + int rc, err = 0; + u16 ring_id; + u8 push_idx; + + rc = hwrm_req_init(bp, req, HWRM_RING_ALLOC); + if (rc) + goto exit; + + req->enables = 0; + if (rmem->nr_pages > 1) { + req->page_tbl_addr = cpu_to_le64(rmem->pg_tbl_map); + /* Page size is in log2 units */ + req->page_size = BNXT_PAGE_SHIFT; + req->page_tbl_depth = 1; + } else { + req->page_tbl_addr = cpu_to_le64(rmem->dma_arr[0]); + } + req->fbo = 0; + /* Association of ring index with doorbell index and MSIX number */ + req->logical_id = cpu_to_le16(map_index); + + switch (ring_type) { + case HWRM_RING_ALLOC_TX: { + struct bnxt_tx_ring_info *txr; + u16 flags = 0; + + txr = container_of(ring, struct bnxt_tx_ring_info, + tx_ring_struct); + req->ring_type = RING_ALLOC_REQ_RING_TYPE_TX; + /* Association of transmit ring with completion ring */ + grp_info = &bp->grp_info[ring->grp_idx]; + req->cmpl_ring_id = cpu_to_le16(bnxt_cp_ring_for_tx(bp, txr)); + req->length = cpu_to_le32(bp->tx_ring_mask + 1); + req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx); + if (ring->queue_id == BNXT_MPC_QUEUE_ID) { + req->mpc_chnls_type = ring->mpc_chnl_type; + req->enables |= cpu_to_le32(RING_ALLOC_REQ_ENABLES_MPC_CHNLS_TYPE); + } else { + req->queue_id = cpu_to_le16(ring->queue_id); + if (bp->flags & BNXT_FLAG_TX_COAL_CMPL) + req->cmpl_coal_cnt = + RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_64; + if ((bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP) && bp->ptp_cfg) + flags |= RING_ALLOC_REQ_FLAGS_TX_PKT_TS_CMPL_ENABLE; + } + req->flags = cpu_to_le16(flags); + break; + } + case HWRM_RING_ALLOC_RX: + req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX; + req->length = cpu_to_le32(bp->rx_ring_mask + 1); + if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) + bnxt_set_rx_ring_params_p5(bp, ring_type, req, ring); + break; + case HWRM_RING_ALLOC_AGG: + req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX; + req->length = cpu_to_le32(bp->rx_agg_ring_mask + 1); + if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) + bnxt_set_rx_ring_params_p5(bp, ring_type, req, ring); + break; + case HWRM_RING_ALLOC_CMPL: + req->ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL; + req->length = cpu_to_le32(bp->cp_ring_mask + 1); + if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { + /* Association of cp ring with nq */ + grp_info = &bp->grp_info[map_index]; + req->nq_ring_id = cpu_to_le16(grp_info->cp_fw_ring_id); + req->cq_handle = cpu_to_le64(ring->handle); + req->enables |= cpu_to_le32(RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID); + } else { + req->int_mode = RING_ALLOC_REQ_INT_MODE_MSIX; + } + /* CQ always sized big enough for the worst case */ + if (bp->fw_cap & BNXT_FW_CAP_CQ_OVERFLOW_DETECT_DISABLE) + req->flags = + cpu_to_le16(RING_ALLOC_REQ_FLAGS_DISABLE_CQ_OVERFLOW_DETECTION); + break; + case HWRM_RING_ALLOC_NQ: + req->ring_type = RING_ALLOC_REQ_RING_TYPE_NQ; + req->length = cpu_to_le32(bp->cp_ring_mask + 1); + req->int_mode = RING_ALLOC_REQ_INT_MODE_MSIX; + break; + default: + netdev_err(bp->dev, "hwrm alloc invalid ring type %d\n", + ring_type); + return -EINVAL; + } + + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); + err = le16_to_cpu(resp->error_code); + ring_id = le16_to_cpu(resp->ring_id); + push_idx = resp->push_buffer_index; + hwrm_req_drop(bp, req); +exit: + if (rc || err) { + netdev_err(bp->dev, "hwrm_ring_alloc type %d failed. rc:%x err:%x\n", + ring_type, rc, err); + return -EIO; + } + ring->fw_ring_id = ring_id; + ring->seed = ring_id + 1; + ring->push_idx = push_idx; + return rc; +} + +static int bnxt_hwrm_set_async_event_cr(struct bnxt *bp, int idx) +{ + int rc; + + if (BNXT_PF(bp)) { + struct hwrm_func_cfg_input *req; + + rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req); + if (rc) + return rc; + + req->fid = cpu_to_le16(0xffff); + req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR); + req->async_event_cr = cpu_to_le16(idx); + return hwrm_req_send(bp, req); + } else { + struct hwrm_func_vf_cfg_input *req; + + rc = hwrm_req_init(bp, req, HWRM_FUNC_VF_CFG); + if (rc) + return rc; + + req->enables = + cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR); + req->async_event_cr = cpu_to_le16(idx); + return hwrm_req_send(bp, req); + } +} + +static void bnxt_set_db_mask(struct bnxt *bp, struct bnxt_db_info *db, + u32 ring_type) +{ + switch (ring_type) { + case HWRM_RING_ALLOC_TX: + db->db_ring_mask = bp->tx_ring_mask; + break; + case HWRM_RING_ALLOC_RX: + db->db_ring_mask = bp->rx_ring_mask; + break; + case HWRM_RING_ALLOC_AGG: + db->db_ring_mask = bp->rx_agg_ring_mask; + break; + case HWRM_RING_ALLOC_CMPL: + case HWRM_RING_ALLOC_NQ: + db->db_ring_mask = bp->cp_ring_mask; + break; + } + if (bp->flags & BNXT_FLAG_CHIP_P7) { + db->db_epoch_mask = db->db_ring_mask + 1; + db->db_epoch_shift = DBR_EPOCH_SFT - ilog2(db->db_epoch_mask); + } +} + +static void bnxt_set_db(struct bnxt *bp, struct bnxt_db_info *db, u32 ring_type, + u32 map_idx, u32 xid) +{ + if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { + switch (ring_type) { + case HWRM_RING_ALLOC_TX: + db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SQ; + break; + case HWRM_RING_ALLOC_RX: + case HWRM_RING_ALLOC_AGG: + db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SRQ; + break; + case HWRM_RING_ALLOC_CMPL: + db->db_key64 = DBR_PATH_L2; + break; + case HWRM_RING_ALLOC_NQ: + db->db_key64 = DBR_PATH_L2; + break; + } + db->db_key64 |= (u64)xid << DBR_XID_SFT; + + if (bp->flags & BNXT_FLAG_CHIP_P7) + db->db_key64 |= DBR_VALID; + + db->doorbell = bp->bar1 + bp->db_offset; + } else { + db->doorbell = bp->bar1 + map_idx * 0x80; + switch (ring_type) { + case HWRM_RING_ALLOC_TX: + db->db_key32 = DB_KEY_TX; + break; + case HWRM_RING_ALLOC_RX: + case HWRM_RING_ALLOC_AGG: + db->db_key32 = DB_KEY_RX; + break; + case HWRM_RING_ALLOC_CMPL: + db->db_key32 = DB_KEY_CP; + break; + } + } + bnxt_set_db_mask(bp, db, ring_type); + + /* Init the doorbell copy region for HW based db drop recovery */ + if (bp->hdbr_info.hdbr_enabled) { + db->db_cp = bnxt_hdbr_reg_db(bp, bnxt_hdbr_r2g(ring_type)); + if (ring_type == HWRM_RING_ALLOC_TX && bp->hdbr_info.debug_trace) + db->db_cp_debug_trace = true; + else + db->db_cp_debug_trace = false; + } +} + +static void bnxt_set_push_db(struct bnxt *bp, struct bnxt_tx_ring_info *txr, + u32 map_idx, struct bnxt_ring_struct *ring) +{ + struct bnxt_db_info *db; + u32 offset; + u64 dpi; + + db = &txr->tx_push_db; + db->doorbell = NULL; + db->db_key64 = 0; + if (bp->hdbr_info.hdbr_enabled) { + db->db_cp = NULL; + db->db_cp_debug_trace = false; + } + if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) || !bp->db_base_wc) + return; + + switch (bp->tx_push_mode) { + case BNXT_PUSH_MODE_WCB: + dpi = (map_idx / DB_WCB_PER_PAGE) + 1; + offset = map_idx % DB_WCB_PER_PAGE; + if ((dpi * DB_WCB_PAGE_SIZE) > (bp->db_size - bp->db_size_nc)) + return; + + db->doorbell = bp->bar1 + DB_WCB_FIRST_OFFSET + (offset * 8); + db->db_key64 = (((dpi & 0xff) << DBR_PI_LO_SFT) | + ((dpi & 0xf00) >> 8) << DBR_PI_HI_SFT); + txr->tx_push_wcb = bp->db_base_wc + + ((dpi - 1) * DB_WCB_PAGE_SIZE) + + ((offset + 1) * DB_WCB_BUFFER_SIZE); + break; + case BNXT_PUSH_MODE_PPP: + /* two buffers per idx for ping pong page mode */ + offset = map_idx * 2 * DB_PPP_SIZE; + if ((offset + 2 * DB_PPP_SIZE) > (bp->db_size - bp->db_size_nc)) + return; + + offset += ring->push_idx * DB_PPP_SIZE; + db->doorbell = bp->db_base_wc + offset; + db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SQ | DBR_VALID; + bnxt_set_db_mask(bp, db, HWRM_RING_ALLOC_TX); + txr->tx_push_wcb = db->doorbell + DB_PPP_BD_OFFSET; + break; + default: + return; + } + + db->db_key64 |= (u64)ring->fw_ring_id << DBR_XID_SFT; + + /* Init the doorbell copy region for HW based db drop recovery */ + if (bp->hdbr_info.hdbr_enabled) { + /* Push DB is sharing normal DB's backup slot */ + db->db_cp = txr->tx_db.db_cp; + db->db_cp_debug_trace = bp->hdbr_info.debug_trace ? true : false; + } +} + +int bnxt_hwrm_cp_ring_alloc_p5(struct bnxt *bp, struct bnxt_cp_ring_info *cpr) +{ + struct bnxt_napi *bnapi = cpr->bnapi; + u32 type = HWRM_RING_ALLOC_CMPL; + struct bnxt_ring_struct *ring; + u32 map_idx = bnapi->index; + int rc; + + ring = &cpr->cp_ring_struct; + ring->handle = BNXT_SET_NQ_HDL(cpr); + rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx); + if (rc) + return rc; + bnxt_set_db(bp, &cpr->cp_db, type, map_idx, ring->fw_ring_id); + bnxt_db_cq(bp, &cpr->cp_db, cpr->cp_raw_cons); + return 0; +} + +int bnxt_hwrm_tx_ring_alloc(struct bnxt *bp, struct bnxt_tx_ring_info *txr, + u32 tx_idx) +{ + struct bnxt_ring_struct *ring = &txr->tx_ring_struct; + u32 type = HWRM_RING_ALLOC_TX; + int rc; + + rc = hwrm_ring_alloc_send_msg(bp, ring, type, tx_idx); + if (rc) + return rc; + bnxt_set_db(bp, &txr->tx_db, type, tx_idx, ring->fw_ring_id); + return 0; +} + +int bnxt_hwrm_rx_ring_alloc(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, + u32 rx_idx) +{ + struct bnxt_ring_struct *ring = &rxr->rx_ring_struct; + struct bnxt_napi *bnapi = rxr->bnapi; + u32 type = HWRM_RING_ALLOC_RX; + u32 map_idx = bnapi->index; + int rc; + + rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx); + if (rc) + return rc; + bnxt_set_db(bp, &rxr->rx_db, type, map_idx, ring->fw_ring_id); + bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id; + + return 0; +} + +static int bnxt_hwrm_ring_alloc(struct bnxt *bp) +{ + bool agg_rings = !!(bp->flags & BNXT_FLAG_AGG_RINGS); + int i, rc = 0; + u32 type; + + if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) + type = HWRM_RING_ALLOC_NQ; + else + type = HWRM_RING_ALLOC_CMPL; + for (i = 0; i < bp->cp_nr_rings; i++) { + struct bnxt_napi *bnapi = bp->bnapi[i]; + struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; + struct bnxt_ring_struct *ring = &cpr->cp_ring_struct; + u32 map_idx = ring->map_idx; + unsigned int vector; + + vector = bp->irq_tbl[map_idx].vector; + disable_irq_nosync(vector); + rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx); + if (rc) { + enable_irq(vector); + goto err_out; + } + bnxt_set_db(bp, &cpr->cp_db, type, map_idx, ring->fw_ring_id); + bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons); + enable_irq(vector); + bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id; + + if (!i) { + rc = bnxt_hwrm_set_async_event_cr(bp, ring->fw_ring_id); + if (rc) + netdev_warn(bp->dev, "Failed to set async event completion ring.\n"); + } + } + + for (i = 0; i < bp->tx_nr_rings; i++) { + struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; + + if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { + rc = bnxt_hwrm_cp_ring_alloc_p5(bp, txr->tx_cpr); + if (rc) + goto err_out; + } + rc = bnxt_hwrm_tx_ring_alloc(bp, txr, i); + if (rc) + goto err_out; + bnxt_set_push_db(bp, txr, i, &txr->tx_ring_struct); +#ifdef DEV_NETMAP + bnxt_netmap_configure_tx_ring(bp, txr->txq_index); +#endif /* DEV_NETMAP */ + } + + type = HWRM_RING_ALLOC_RX; + for (i = 0; i < bp->rx_nr_rings; i++) { + struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; + + rc = bnxt_hwrm_rx_ring_alloc(bp, rxr, i); + if (rc) + goto err_out; + /* If we have agg rings, post agg buffers first. */ + if (!agg_rings) + bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); + if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { + rc = bnxt_hwrm_cp_ring_alloc_p5(bp, rxr->rx_cpr); + if (rc) + goto err_out; + } +#ifdef DEV_NETMAP + if (BNXT_CHIP_P5_PLUS(bp) && !agg_rings) { + rxr->netmap_idx = i; + bnxt_netmap_configure_rx_ring(bp, rxr); + } +#endif /* DEV_NETMAP */ + } + + if (agg_rings) { + type = HWRM_RING_ALLOC_AGG; + for (i = 0; i < bp->rx_nr_rings; i++) { + struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; + struct bnxt_ring_struct *ring = + &rxr->rx_agg_ring_struct; + u32 grp_idx = ring->grp_idx; + u32 map_idx = grp_idx + bp->rx_nr_rings; + + rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx); + if (rc) + goto err_out; + + bnxt_set_db(bp, &rxr->rx_agg_db, type, map_idx, + ring->fw_ring_id); + bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod); + bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); + bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id; +#ifdef DEV_NETMAP + if (BNXT_CHIP_P5_PLUS(bp)) { + rxr->netmap_idx = i * (2 + AGG_NM_RINGS); + bnxt_netmap_configure_rx_ring(bp, rxr); + } +#endif /* DEV_NETMAP */ + } + } + + rc = bnxt_hwrm_mpc_ring_alloc(bp); + if (rc) + goto err_out; + + if (bnxt_dbr_init(bp)) + netdev_warn(bp->dev, + "Failed to initialize DB recovery. Proceed with DBR disabled\n"); +err_out: + return rc; +} + +static int hwrm_ring_free_send_msg(struct bnxt *bp, + struct bnxt_ring_struct *ring, + u32 ring_type, int cmpl_ring_id) +{ + struct hwrm_ring_free_output *resp; + struct hwrm_ring_free_input *req; + u16 error_code = 0; + int rc; + + if (BNXT_NO_FW_ACCESS(bp)) + return 0; + + rc = hwrm_req_init(bp, req, HWRM_RING_FREE); + if (rc) + goto exit; + + req->cmpl_ring = cpu_to_le16(cmpl_ring_id); + req->ring_type = ring_type; + req->ring_id = cpu_to_le16(ring->fw_ring_id); + + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); + if (!rc) + error_code = le16_to_cpu(resp->error_code); + hwrm_req_drop(bp, req); +exit: + if (rc || error_code) { + netdev_err(bp->dev, "hwrm_ring_free type %d failed. rc:%x err:%x\n", + ring_type, rc, error_code); + return -EIO; + } + return 0; +} + +void bnxt_hwrm_tx_ring_free(struct bnxt *bp, struct bnxt_tx_ring_info *txr, + bool close_path) +{ + struct bnxt_ring_struct *ring = &txr->tx_ring_struct; + u32 cmpl_ring_id; + + if (ring->fw_ring_id == INVALID_HW_RING_ID) + return; + + cmpl_ring_id = close_path ? bnxt_cp_ring_for_tx(bp, txr) : + INVALID_HW_RING_ID; +#ifdef DEV_NETMAP + if (txr->tx_cpr->netmapped) + bnxt_netmap_txflush(txr); +#endif + hwrm_ring_free_send_msg(bp, ring, RING_FREE_REQ_RING_TYPE_TX, + cmpl_ring_id); + ring->fw_ring_id = INVALID_HW_RING_ID; +} + +void bnxt_hwrm_rx_ring_free(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, + bool close_path) +{ + struct bnxt_ring_struct *ring = &rxr->rx_ring_struct; + u32 grp_idx = rxr->bnapi->index; + u32 cmpl_ring_id; + + if (ring->fw_ring_id == INVALID_HW_RING_ID) + return; + + cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr); +#ifdef DEV_NETMAP + if (rxr->rx_cpr->netmapped) + cmpl_ring_id = INVALID_HW_RING_ID; +#endif + hwrm_ring_free_send_msg(bp, ring, RING_FREE_REQ_RING_TYPE_RX, + close_path ? cmpl_ring_id : INVALID_HW_RING_ID); + ring->fw_ring_id = INVALID_HW_RING_ID; + bp->grp_info[grp_idx].rx_fw_ring_id = INVALID_HW_RING_ID; +} + +static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path) +{ + u32 type; + int i; + + if (!bp->bnapi) + return; + + bnxt_dbr_cancel(bp); + + bnxt_hwrm_mpc_ring_free(bp, close_path); + + for (i = 0; i < bp->tx_nr_rings; i++) + bnxt_hwrm_tx_ring_free(bp, &bp->tx_ring[i], close_path); + + for (i = 0; i < bp->rx_nr_rings; i++) + bnxt_hwrm_rx_ring_free(bp, &bp->rx_ring[i], close_path); + + if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) + type = RING_FREE_REQ_RING_TYPE_RX_AGG; + else + type = RING_FREE_REQ_RING_TYPE_RX; + for (i = 0; i < bp->rx_nr_rings; i++) { + struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; + struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct; + u32 grp_idx = rxr->bnapi->index; + + if (ring->fw_ring_id != INVALID_HW_RING_ID) { + u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr); + +#ifdef DEV_NETMAP + if (rxr->rx_cpr->netmapped) + cmpl_ring_id = INVALID_HW_RING_ID; +#endif + hwrm_ring_free_send_msg(bp, ring, type, + close_path ? cmpl_ring_id : + INVALID_HW_RING_ID); + ring->fw_ring_id = INVALID_HW_RING_ID; + bp->grp_info[grp_idx].agg_fw_ring_id = + INVALID_HW_RING_ID; + } + } + + /* The completion rings are about to be freed. After that the + * IRQ doorbell will not work anymore. So we need to disable + * IRQ here. + */ + bnxt_disable_int_sync(bp); + + if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) + type = RING_FREE_REQ_RING_TYPE_NQ; + else + type = RING_FREE_REQ_RING_TYPE_L2_CMPL; + for (i = 0; i < bp->cp_nr_rings; i++) { + struct bnxt_napi *bnapi = bp->bnapi[i]; + struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; + struct bnxt_ring_struct *ring; + int j; + + for (j = 0; j < cpr->cp_ring_count && cpr->cp_ring_arr; j++) { + struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[j]; + + ring = &cpr2->cp_ring_struct; + if (ring->fw_ring_id == INVALID_HW_RING_ID) + continue; + hwrm_ring_free_send_msg(bp, ring, + RING_FREE_REQ_RING_TYPE_L2_CMPL, + INVALID_HW_RING_ID); + ring->fw_ring_id = INVALID_HW_RING_ID; + } + ring = &cpr->cp_ring_struct; + if (ring->fw_ring_id != INVALID_HW_RING_ID) { + hwrm_ring_free_send_msg(bp, ring, type, + INVALID_HW_RING_ID); + ring->fw_ring_id = INVALID_HW_RING_ID; + bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID; + } + } + + if (bp->hdbr_info.hdbr_enabled) + bnxt_hdbr_reset_l2pgs(bp); +} + +int bnxt_total_tx_rings(struct bnxt *bp) +{ + return bp->tx_nr_rings + bnxt_mpc_tx_rings_in_use(bp); +} + +static int __bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max, + bool shared); +static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max, + bool shared); + +static int bnxt_hwrm_get_rings(struct bnxt *bp) +{ + struct bnxt_hw_resc *hw_resc = &bp->hw_resc; + struct hwrm_func_qcfg_output *resp; + struct hwrm_func_qcfg_input *req; + u16 flags; + int rc; + + if (bp->hwrm_spec_code < 0x10601) + return 0; + + rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG); + if (rc) + return rc; + + req->fid = cpu_to_le16(0xffff); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); + if (rc) { + hwrm_req_drop(bp, req); + return rc; + } + + flags = le16_to_cpu(resp->flags); + if (!(flags & FUNC_QCFG_RESP_FLAGS_PPP_PUSH_MODE_ENABLED)) + bp->tx_push_mode = BNXT_PUSH_MODE_NONE; + + hw_resc->resv_tx_rings = le16_to_cpu(resp->alloc_tx_rings); + if (BNXT_NEW_RM(bp)) { + u16 cp, stats; + + hw_resc->resv_rx_rings = le16_to_cpu(resp->alloc_rx_rings); + hw_resc->resv_hw_ring_grps = + le32_to_cpu(resp->alloc_hw_ring_grps); + hw_resc->resv_vnics = le16_to_cpu(resp->alloc_vnics); + hw_resc->resv_rsscos_ctxs = le16_to_cpu(resp->alloc_rsscos_ctx); + cp = le16_to_cpu(resp->alloc_cmpl_rings); + stats = le16_to_cpu(resp->alloc_stat_ctx); + hw_resc->resv_irqs = cp; + if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { + int rx = hw_resc->resv_rx_rings; + int tx = hw_resc->resv_tx_rings; + int cp_p5; + + if (tx <= bnxt_mpc_tx_rings_in_use(bp) || + cp <= bnxt_mpc_cp_rings_in_use(bp)) { + rc = -ENOMEM; + goto get_rings_exit; + } + tx -= bnxt_mpc_tx_rings_in_use(bp); + cp_p5 = cp - bnxt_mpc_cp_rings_in_use(bp); + if (bp->flags & BNXT_FLAG_AGG_RINGS) + rx >>= 1; + if (cp_p5 < (rx + tx)) { + rc = __bnxt_trim_rings(bp, &rx, &tx, cp_p5, false); + if (rc) + goto get_rings_exit; + if (bp->flags & BNXT_FLAG_AGG_RINGS) + rx <<= 1; + hw_resc->resv_rx_rings = rx; + tx += bnxt_mpc_tx_rings_in_use(bp); + hw_resc->resv_tx_rings = tx; + } + hw_resc->resv_irqs = le16_to_cpu(resp->alloc_msix); + hw_resc->resv_hw_ring_grps = rx; + } + hw_resc->resv_cp_rings = cp; + hw_resc->resv_stat_ctxs = stats; + hw_resc->resv_tx_key_ctxs = le32_to_cpu(resp->num_ktls_tx_key_ctxs); + hw_resc->resv_rx_key_ctxs = le32_to_cpu(resp->num_ktls_rx_key_ctxs); + } +get_rings_exit: + hwrm_req_drop(bp, req); + return rc; +} + +int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings) +{ + struct hwrm_func_qcfg_output *resp; + struct hwrm_func_qcfg_input *req; + int rc; + + if (bp->hwrm_spec_code < 0x10601) + return 0; + + rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG); + if (rc) + return rc; + + req->fid = cpu_to_le16(fid); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); + if (!rc) + *tx_rings = le16_to_cpu(resp->alloc_tx_rings); + + hwrm_req_drop(bp, req); + return rc; +} + +static bool bnxt_rfs_supported(struct bnxt *bp); +#if defined(CONFIG_BNXT_CUSTOM_FLOWER_OFFLOAD) +static unsigned int bnxt_get_max_func_vnics(struct bnxt *bp); +#endif + +static struct hwrm_func_cfg_input * +__bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr) +{ + struct hwrm_func_cfg_input *req; + u32 enables = 0; + + if (bnxt_hwrm_func_cfg_short_req_init(bp, &req)) + return NULL; + + req->fid = cpu_to_le16(0xffff); + enables |= hwr->tx ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0; + req->num_tx_rings = cpu_to_le16(hwr->tx); + if (hwr->tx && bp->tx_push_mode == BNXT_PUSH_MODE_PPP) + req->flags |= cpu_to_le32(FUNC_CFG_REQ_FLAGS_PPP_PUSH_MODE_ENABLE); + if (BNXT_NEW_RM(bp)) { + enables |= hwr->rx ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0; + enables |= hwr->stat ? FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0; + if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { + enables |= hwr->cp ? FUNC_CFG_REQ_ENABLES_NUM_MSIX : 0; + enables |= hwr->cp_p5 ? + FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0; + } else { + enables |= hwr->cp ? + FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0; + enables |= hwr->grp ? + FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0; + } + enables |= hwr->vnic ? FUNC_CFG_REQ_ENABLES_NUM_VNICS : 0; + enables |= hwr->rss_ctx ? FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : + 0; + req->num_rsscos_ctxs = cpu_to_le16(hwr->rss_ctx); + req->num_rx_rings = cpu_to_le16(hwr->rx); + if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { + req->num_cmpl_rings = cpu_to_le16(hwr->cp_p5); + req->num_msix = cpu_to_le16(hwr->cp); + } else { + req->num_cmpl_rings = cpu_to_le16(hwr->cp); + req->num_hw_ring_grps = cpu_to_le16(hwr->grp); + } + req->num_stat_ctxs = cpu_to_le16(hwr->stat); + req->num_vnics = cpu_to_le16(hwr->vnic); + bnxt_hwrm_reserve_pf_key_ctxs(bp, req); + } + req->enables |= cpu_to_le32(enables); + return req; +} + +static struct hwrm_func_vf_cfg_input * +__bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr) +{ + struct hwrm_func_vf_cfg_input *req; + u32 enables = 0; + + if (hwrm_req_init(bp, req, HWRM_FUNC_VF_CFG)) + return NULL; + + enables |= hwr->tx ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0; + enables |= hwr->rx ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS : 0; + enables |= hwr->stat ? FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0; + enables |= hwr->rss_ctx ? FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0; + if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { + enables |= hwr->cp_p5 ? + FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0; + } else { + enables |= hwr->cp ? FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0; + enables |= hwr->grp ? + FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0; + } + enables |= hwr->vnic ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0; + enables |= FUNC_VF_CFG_REQ_ENABLES_NUM_L2_CTXS; + + req->num_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX); + req->num_tx_rings = cpu_to_le16(hwr->tx); + if (hwr->tx && bp->tx_push_mode == BNXT_PUSH_MODE_PPP) + req->flags |= cpu_to_le32(FUNC_VF_CFG_REQ_FLAGS_PPP_PUSH_MODE_ENABLE); + req->num_rx_rings = cpu_to_le16(hwr->rx); + req->num_rsscos_ctxs = cpu_to_le16(hwr->rss_ctx); + if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { + req->num_cmpl_rings = cpu_to_le16(hwr->cp_p5); + } else { + req->num_cmpl_rings = cpu_to_le16(hwr->cp); + req->num_hw_ring_grps = cpu_to_le16(hwr->grp); + } + req->num_stat_ctxs = cpu_to_le16(hwr->stat); + req->num_vnics = cpu_to_le16(hwr->vnic); + + req->enables = cpu_to_le32(enables); + return req; +} + +static int +bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr) +{ + struct hwrm_func_cfg_input *req; + int rc; + + req = __bnxt_hwrm_reserve_pf_rings(bp, hwr); + if (!req) + return -ENOMEM; + + if (!req->enables) { + hwrm_req_drop(bp, req); + return 0; + } + + rc = hwrm_req_send(bp, req); + if (rc) + return rc; + + if (bp->hwrm_spec_code < 0x10601) + bp->hw_resc.resv_tx_rings = hwr->tx; + + return bnxt_hwrm_get_rings(bp); +} + +static int +bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr) +{ + struct hwrm_func_vf_cfg_input *req; + int rc; + + if (!BNXT_NEW_RM(bp)) { + bp->hw_resc.resv_tx_rings = hwr->tx; + return 0; + } + + req = __bnxt_hwrm_reserve_vf_rings(bp, hwr); + if (!req) + return -ENOMEM; + + rc = hwrm_req_send(bp, req); + if (rc) + return rc; + + return bnxt_hwrm_get_rings(bp); +} + +static int bnxt_hwrm_reserve_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr) +{ + if (BNXT_PF(bp)) + return bnxt_hwrm_reserve_pf_rings(bp, hwr); + else + return bnxt_hwrm_reserve_vf_rings(bp, hwr); +} + +int bnxt_nq_rings_in_use(struct bnxt *bp) +{ + return bp->cp_nr_rings + bnxt_get_ulp_msix_num(bp); +} + +int bnxt_min_nq_rings_in_use(struct bnxt *bp) +{ + if (!bnxt_ulp_registered(bp->edev)) + return bp->cp_nr_rings; + else + return bnxt_nq_rings_in_use(bp); +} + +static int bnxt_cp_rings_in_use(struct bnxt *bp) +{ + int cp; + + if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) + return bnxt_nq_rings_in_use(bp); + + cp = bp->tx_nr_rings + bp->rx_nr_rings; + return cp + bnxt_mpc_cp_rings_in_use(bp); +} + +static int bnxt_get_func_stat_ctxs(struct bnxt *bp) +{ + return bp->cp_nr_rings + bnxt_get_ulp_stat_ctxs(bp); +} + +static int bnxt_get_total_rss_ctxs(struct bnxt *bp, struct bnxt_hw_rings *hwr) +{ + if (!hwr->grp) + return 0; + if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { + int rss_ctx; + +#if defined(CONFIG_BNXT_CUSTOM_FLOWER_OFFLOAD) + if (BNXT_PF(bp)) { + /* Each ring needs a vnic apart from one default vnic. + * For each vnic, calculate the RSS ctxs number based + * on the number of RX rings. + */ + rss_ctx = bnxt_get_nr_rss_ctxs(bp, hwr->grp); + return rss_ctx * hwr->grp + rss_ctx; + } +#endif + rss_ctx = bnxt_get_nr_rss_ctxs(bp, hwr->grp); + if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) + rss_ctx *= hwr->vnic; + return rss_ctx; + } + if (BNXT_VF(bp)) + return BNXT_VF_MAX_RSS_CTX; + if (!(bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP) && bnxt_rfs_supported(bp)) + return hwr->grp + 1; + return 1; +} + +/* Check if a default RSS map needs to be setup. This function is only + * used on older firmware that does not require reserving RX rings. + */ +static void bnxt_check_rss_tbl_no_rmgr(struct bnxt *bp) +{ + struct bnxt_hw_resc *hw_resc = &bp->hw_resc; + + /* The RSS map is valid for RX rings set to resv_rx_rings */ + if (hw_resc->resv_rx_rings != bp->rx_nr_rings) { + hw_resc->resv_rx_rings = bp->rx_nr_rings; + if (!netif_is_rxfh_configured(bp->dev)) + bnxt_set_dflt_rss_indir_tbl(bp, NULL); + } +} + +static u16 +bnxt_get_total_vnics(struct bnxt *bp, int rx_rings) +{ +#if defined(CONFIG_BNXT_CUSTOM_FLOWER_OFFLOAD) + /* On Thor, Queue redirect action can be supported without allocating + * a vnic for each ring. This can be achieved using Ring table index + * feature. Currently, TruFlow library uses VNIC approach + * for Wh+ and Thor in the DPDK implementation. TruFlow library + * is kept in sync between DPDK and TC world and that is why + * Ring table index feature is not used to support Queue redirect. + */ + /* Each ring needs a vnic apart from one default vnic. */ + if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && BNXT_PF(bp)) { + if (bnxt_get_max_func_vnics(bp) < rx_rings + 1) + return bnxt_get_max_func_vnics(bp); + else + return rx_rings + 1; + } +#endif + if (bp->flags & BNXT_FLAG_RFS) { + if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) + return 2 + bp->num_rss_ctx; + if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) + return rx_rings + 1; + } + + return 1; +} + +static bool bnxt_need_reserve_rings(struct bnxt *bp) +{ + struct bnxt_hw_resc *hw_resc = &bp->hw_resc; + int cp = bnxt_cp_rings_in_use(bp); + int nq = bnxt_nq_rings_in_use(bp); + int rx = bp->rx_nr_rings, stat; + int vnic = 1, grp = rx; + + if (hw_resc->resv_tx_rings != bnxt_total_tx_rings(bp) && + bp->hwrm_spec_code >= 0x10601) + return true; + + /* Old firmware does not need RX ring reservations but we still + * need to setup a default RSS map when needed. With new firmware + * we go through RX ring reservations first and then set up the + * RSS map for the successfully reserved RX rings when needed. + */ + if (!BNXT_NEW_RM(bp)) { + bnxt_check_rss_tbl_no_rmgr(bp); + return false; + } + + vnic = bnxt_get_total_vnics(bp, rx); + + if (bp->flags & BNXT_FLAG_AGG_RINGS) + rx <<= 1; + stat = bnxt_get_func_stat_ctxs(bp); + if (hw_resc->resv_rx_rings != rx || hw_resc->resv_cp_rings != cp || + hw_resc->resv_vnics != vnic || hw_resc->resv_stat_ctxs != stat || + (hw_resc->resv_hw_ring_grps != grp && + !(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))) + return true; + if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && BNXT_PF(bp) && + hw_resc->resv_irqs != nq) + return true; + return false; +} + +static void bnxt_copy_reserved_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr) +{ + struct bnxt_hw_resc *hw_resc = &bp->hw_resc; + + hwr->tx = hw_resc->resv_tx_rings; + if (BNXT_NEW_RM(bp)) { + hwr->rx = hw_resc->resv_rx_rings; + hwr->cp = hw_resc->resv_irqs; + if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) + hwr->cp_p5 = hw_resc->resv_cp_rings; + hwr->grp = hw_resc->resv_hw_ring_grps; + hwr->vnic = hw_resc->resv_vnics; + hwr->stat = hw_resc->resv_stat_ctxs; + hwr->rss_ctx = hw_resc->resv_rsscos_ctxs; + } +} + +static bool bnxt_rings_ok(struct bnxt *bp, struct bnxt_hw_rings *hwr) +{ + return hwr->tx && hwr->rx && hwr->cp && hwr->grp && hwr->vnic && + hwr->stat && (hwr->cp_p5 || !(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)); +} + +static int bnxt_get_avail_msix(struct bnxt *bp, int num); + +static int __bnxt_reserve_rings(struct bnxt *bp) +{ + struct bnxt_hw_rings hwr = {0}; + int cp = bp->cp_nr_rings; + int rx_rings, rc; + int ulp_msix = 0; + bool sh = false; + int tx_cp; + + if (!bnxt_need_reserve_rings(bp)) + return 0; + + if (BNXT_NEW_RM(bp) && !bnxt_ulp_registered(bp->edev)) { + ulp_msix = bnxt_get_avail_msix(bp, bp->ulp_num_msix_want); + if (!ulp_msix) + bnxt_set_ulp_stat_ctxs(bp, 0); + + if (ulp_msix > bp->ulp_num_msix_want) + ulp_msix = bp->ulp_num_msix_want; + hwr.cp = cp + ulp_msix; + } else { + hwr.cp = bnxt_nq_rings_in_use(bp); + } + + hwr.tx = bp->tx_nr_rings; + hwr.rx = bp->rx_nr_rings; + if (bp->flags & BNXT_FLAG_SHARED_RINGS) + sh = true; + if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { + hwr.cp_p5 = hwr.rx + hwr.tx + bnxt_mpc_cp_rings_in_use(bp); + hwr.tx += bnxt_mpc_tx_rings_in_use(bp); + } + + hwr.vnic = bnxt_get_total_vnics(bp, hwr.rx); + + if (bp->flags & BNXT_FLAG_AGG_RINGS) + hwr.rx <<= 1; + hwr.grp = bp->rx_nr_rings; + hwr.rss_ctx = bnxt_get_total_rss_ctxs(bp, &hwr); + hwr.stat = bnxt_get_func_stat_ctxs(bp); + + rc = bnxt_hwrm_reserve_rings(bp, &hwr); + if (rc) + return rc; + + bnxt_copy_reserved_rings(bp, &hwr); + + rx_rings = hwr.rx; + if (bp->flags & BNXT_FLAG_AGG_RINGS) { + if (hwr.rx >= 2) { + rx_rings = hwr.rx >> 1; + } else { + if (netif_running(bp->dev)) + return -ENOMEM; + + bp->flags &= ~BNXT_FLAG_AGG_RINGS; + bp->flags |= BNXT_FLAG_NO_AGG_RINGS; + bp->dev->hw_features &= ~NETIF_F_LRO; + bp->dev->features &= ~NETIF_F_LRO; + bnxt_set_ring_params(bp); + } + } + rx_rings = min_t(int, rx_rings, hwr.grp); + hwr.cp = min_t(int, hwr.cp, bp->cp_nr_rings); + if (hwr.stat > bnxt_get_ulp_stat_ctxs(bp)) + hwr.stat -= bnxt_get_ulp_stat_ctxs(bp); + hwr.cp = min_t(int, hwr.cp, hwr.stat); + hwr.tx -= bnxt_mpc_tx_rings_in_use(bp); + rc = bnxt_trim_rings(bp, &rx_rings, &hwr.tx, hwr.cp, sh); + if (bp->flags & BNXT_FLAG_AGG_RINGS) + hwr.rx = rx_rings << 1; + tx_cp = bnxt_num_tx_to_cp(bp, hwr.tx); + hwr.cp = sh ? max_t(int, tx_cp, rx_rings) : tx_cp + rx_rings; + bp->tx_nr_rings = hwr.tx; + + /* If we cannot reserve all the RX rings, reset the RSS map only + * if absolutely necessary + */ + if (rx_rings != bp->rx_nr_rings) { + netdev_warn(bp->dev, "Able to reserve only %d out of %d requested RX rings\n", + rx_rings, bp->rx_nr_rings); + bnxt_clear_usr_fltrs(bp, true); + if (!test_bit(BNXT_STATE_OPEN, &bp->state)) + bnxt_free_ntp_fltrs(bp, false); + + if (netif_is_rxfh_configured(bp->dev) && + (bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) != + bnxt_get_nr_rss_ctxs(bp, rx_rings) || + bnxt_get_max_rss_ring(bp) >= rx_rings)) { + netdev_warn(bp->dev, "RSS table entries reverting to default\n"); + bp->dev->priv_flags &= ~IFF_RXFH_CONFIGURED; + } + } + bp->rx_nr_rings = rx_rings; + bp->cp_nr_rings = hwr.cp; + + if (!bnxt_rings_ok(bp, &hwr)) + return -ENOMEM; + + if (!netif_is_rxfh_configured(bp->dev)) + bnxt_set_dflt_rss_indir_tbl(bp, NULL); + + if (!bnxt_ulp_registered(bp->edev) && BNXT_NEW_RM(bp)) { + int resv_msix, resv_ctx, ulp_ctxs; + struct bnxt_hw_resc *hw_resc; + + hw_resc = &bp->hw_resc; + resv_msix = hw_resc->resv_irqs - bp->cp_nr_rings; + ulp_msix = min_t(int, resv_msix, ulp_msix); + bnxt_set_ulp_msix_num(bp, ulp_msix); + resv_ctx = hw_resc->resv_stat_ctxs - bp->cp_nr_rings; + ulp_ctxs = min(resv_ctx, bnxt_get_ulp_stat_ctxs(bp)); + bnxt_set_ulp_stat_ctxs(bp, ulp_ctxs); + } + + return rc; +} + +static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr) +{ + struct hwrm_func_vf_cfg_input *req; + u32 flags; + + if (!BNXT_NEW_RM(bp)) + return 0; + + req = __bnxt_hwrm_reserve_vf_rings(bp, hwr); + if (!req) + return -ENOMEM; + + flags = FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST | + FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST | + FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST | + FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST | + FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST | + FUNC_VF_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST; + if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) + flags |= FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST; + + req->flags = cpu_to_le32(flags); + return hwrm_req_send_silent(bp, req); +} + +static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr) +{ + struct hwrm_func_cfg_input *req; + u32 flags; + + req = __bnxt_hwrm_reserve_pf_rings(bp, hwr); + if (!req) + return -ENOMEM; + + flags = FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST; + if (BNXT_NEW_RM(bp)) { + flags |= FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST | + FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST | + FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST | + FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST; + if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { + flags |= FUNC_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST | + FUNC_CFG_REQ_FLAGS_NQ_ASSETS_TEST; + } else { + flags |= FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST; + } + } + + req->flags = cpu_to_le32(flags); + return hwrm_req_send_silent(bp, req); +} + +static int bnxt_hwrm_check_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr) +{ + if (bp->hwrm_spec_code < 0x10801) + return 0; + + if (BNXT_PF(bp)) + return bnxt_hwrm_check_pf_rings(bp, hwr); + + return bnxt_hwrm_check_vf_rings(bp, hwr); +} + +static void bnxt_hwrm_coal_params_qcaps(struct bnxt *bp) +{ + struct bnxt_coal_cap *coal_cap = &bp->coal_cap; + struct hwrm_ring_aggint_qcaps_output *resp; + struct hwrm_ring_aggint_qcaps_input *req; + int rc; + + coal_cap->cmpl_params = BNXT_LEGACY_COAL_CMPL_PARAMS; + coal_cap->num_cmpl_dma_aggr_max = 63; + coal_cap->num_cmpl_dma_aggr_during_int_max = 63; + coal_cap->cmpl_aggr_dma_tmr_max = 65535; + coal_cap->cmpl_aggr_dma_tmr_during_int_max = 65535; + coal_cap->int_lat_tmr_min_max = 65535; + coal_cap->int_lat_tmr_max_max = 65535; + coal_cap->num_cmpl_aggr_int_max = 65535; + coal_cap->timer_units = 80; + + if (bp->hwrm_spec_code < 0x10902) + return; + + if (hwrm_req_init(bp, req, HWRM_RING_AGGINT_QCAPS)) + return; + + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send_silent(bp, req); + if (!rc) { + coal_cap->cmpl_params = le32_to_cpu(resp->cmpl_params); + coal_cap->nq_params = le32_to_cpu(resp->nq_params); + coal_cap->num_cmpl_dma_aggr_max = + le16_to_cpu(resp->num_cmpl_dma_aggr_max); + coal_cap->num_cmpl_dma_aggr_during_int_max = + le16_to_cpu(resp->num_cmpl_dma_aggr_during_int_max); + coal_cap->cmpl_aggr_dma_tmr_max = + le16_to_cpu(resp->cmpl_aggr_dma_tmr_max); + coal_cap->cmpl_aggr_dma_tmr_during_int_max = + le16_to_cpu(resp->cmpl_aggr_dma_tmr_during_int_max); + coal_cap->int_lat_tmr_min_max = + le16_to_cpu(resp->int_lat_tmr_min_max); + coal_cap->int_lat_tmr_max_max = + le16_to_cpu(resp->int_lat_tmr_max_max); + coal_cap->num_cmpl_aggr_int_max = + le16_to_cpu(resp->num_cmpl_aggr_int_max); + coal_cap->timer_units = le16_to_cpu(resp->timer_units); + } + hwrm_req_drop(bp, req); +} + +static u16 bnxt_usec_to_coal_tmr(struct bnxt *bp, u16 usec) +{ + struct bnxt_coal_cap *coal_cap = &bp->coal_cap; + + return usec * 1000 / coal_cap->timer_units; +} + +static void bnxt_hwrm_set_coal_params(struct bnxt *bp, + struct bnxt_coal *hw_coal, + struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req) +{ + struct bnxt_coal_cap *coal_cap = &bp->coal_cap; + u16 val, tmr, max, flags = hw_coal->flags; + u32 cmpl_params = coal_cap->cmpl_params; + + max = hw_coal->bufs_per_record * 128; + if (hw_coal->budget) + max = hw_coal->bufs_per_record * hw_coal->budget; + max = min_t(u16, max, coal_cap->num_cmpl_aggr_int_max); + + val = clamp_t(u16, hw_coal->coal_bufs, 1, max); + req->num_cmpl_aggr_int = cpu_to_le16(val); + + val = min_t(u16, val, coal_cap->num_cmpl_dma_aggr_max); + req->num_cmpl_dma_aggr = cpu_to_le16(val); + + val = clamp_t(u16, hw_coal->coal_bufs_irq, 1, + coal_cap->num_cmpl_dma_aggr_during_int_max); + req->num_cmpl_dma_aggr_during_int = cpu_to_le16(val); + + tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks); + tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_max_max); + req->int_lat_tmr_max = cpu_to_le16(tmr); + + /* min timer set to 1/2 of interrupt timer */ + if (cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_INT_LAT_TMR_MIN) { + val = tmr / 2; + val = clamp_t(u16, val, 1, coal_cap->int_lat_tmr_min_max); + req->int_lat_tmr_min = cpu_to_le16(val); + req->enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE); + } + + /* buf timer set to 1/4 of interrupt timer */ + val = clamp_t(u16, tmr / 4, 1, coal_cap->cmpl_aggr_dma_tmr_max); + req->cmpl_aggr_dma_tmr = cpu_to_le16(val); + + if (cmpl_params & + RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_DMA_AGGR_DURING_INT) { + tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks_irq); + val = clamp_t(u16, tmr, 1, + coal_cap->cmpl_aggr_dma_tmr_during_int_max); + req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(val); + req->enables |= + cpu_to_le16(BNXT_COAL_CMPL_AGGR_TMR_DURING_INT_ENABLE); + } + + if ((cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_RING_IDLE) && + hw_coal->idle_thresh && hw_coal->coal_ticks < hw_coal->idle_thresh) + flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE; + req->flags = cpu_to_le16(flags); + req->enables |= cpu_to_le16(BNXT_COAL_CMPL_ENABLES); +} + +static int __bnxt_hwrm_set_coal_nq(struct bnxt *bp, struct bnxt_napi *bnapi, + struct bnxt_coal *hw_coal) +{ + struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req; + struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; + struct bnxt_coal_cap *coal_cap = &bp->coal_cap; + u32 nq_params = coal_cap->nq_params; + u16 tmr; + int rc; + + if (!(nq_params & RING_AGGINT_QCAPS_RESP_NQ_PARAMS_INT_LAT_TMR_MIN)) + return 0; + + rc = hwrm_req_init(bp, req, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS); + if (rc) + return rc; + + req->ring_id = cpu_to_le16(cpr->cp_ring_struct.fw_ring_id); + req->flags = + cpu_to_le16(RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_IS_NQ); + + tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks) / 2; + tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_min_max); + req->int_lat_tmr_min = cpu_to_le16(tmr); + req->enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE); + return hwrm_req_send(bp, req); +} + +int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi) +{ + struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req_rx; + struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; + struct bnxt_coal coal; + int rc; + + /* Tick values in micro seconds. + * 1 coal_buf x bufs_per_record = 1 completion record. + */ + memcpy(&coal, &bp->rx_coal, sizeof(struct bnxt_coal)); + + coal.coal_ticks = cpr->rx_ring_coal.coal_ticks; + coal.coal_bufs = cpr->rx_ring_coal.coal_bufs; + + if (!bnapi->rx_ring) + return -ENODEV; + + rc = hwrm_req_init(bp, req_rx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS); + if (rc) + return rc; + + bnxt_hwrm_set_coal_params(bp, &coal, req_rx); + + req_rx->ring_id = cpu_to_le16(bnxt_cp_ring_for_rx(bp, bnapi->rx_ring)); + + return hwrm_req_send(bp, req_rx); +} + +static int +bnxt_hwrm_set_rx_coal(struct bnxt *bp, struct bnxt_napi *bnapi, + struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req) +{ + u16 ring_id = bnxt_cp_ring_for_rx(bp, bnapi->rx_ring); + + req->ring_id = cpu_to_le16(ring_id); + return hwrm_req_send(bp, req); +} + +static int +bnxt_hwrm_set_tx_coal(struct bnxt *bp, struct bnxt_napi *bnapi, + struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req) +{ + struct bnxt_tx_ring_info *txr; + int i, rc; + + bnxt_for_each_napi_tx(i, bnapi, txr) { + u16 ring_id; + + ring_id = bnxt_cp_ring_for_tx(bp, txr); + req->ring_id = cpu_to_le16(ring_id); + rc = hwrm_req_send(bp, req); + if (rc) + return rc; + if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) + return 0; + } + return 0; +} + +int bnxt_hwrm_set_coal(struct bnxt *bp) +{ + struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req_rx, *req_tx; + int i, rc; + + rc = hwrm_req_init(bp, req_rx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS); + if (rc) + return rc; + + rc = hwrm_req_init(bp, req_tx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS); + if (rc) { + hwrm_req_drop(bp, req_rx); + return rc; + } + + bnxt_hwrm_set_coal_params(bp, &bp->rx_coal, req_rx); + bnxt_hwrm_set_coal_params(bp, &bp->tx_coal, req_tx); + + hwrm_req_hold(bp, req_rx); + hwrm_req_hold(bp, req_tx); + for (i = 0; i < bp->cp_nr_rings; i++) { + struct bnxt_napi *bnapi = bp->bnapi[i]; + struct bnxt_cp_ring_info *cpr; + struct bnxt_coal *hw_coal; + + if (!bnapi->rx_ring) + rc = bnxt_hwrm_set_tx_coal(bp, bnapi, req_tx); + else + rc = bnxt_hwrm_set_rx_coal(bp, bnapi, req_rx); + if (rc) + break; + + cpr = &bnapi->cp_ring; + cpr->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks; + cpr->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs; + + if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) + continue; + + if (bnapi->rx_ring && bnapi->tx_ring[0]) { + rc = bnxt_hwrm_set_tx_coal(bp, bnapi, req_tx); + if (rc) + break; + } + if (bnapi->rx_ring) + hw_coal = &bp->rx_coal; + else + hw_coal = &bp->tx_coal; + __bnxt_hwrm_set_coal_nq(bp, bnapi, hw_coal); + } + hwrm_req_drop(bp, req_rx); + hwrm_req_drop(bp, req_tx); + return rc; +} + +static void bnxt_hwrm_stat_ctx_free(struct bnxt *bp) +{ + struct hwrm_stat_ctx_clr_stats_input *req0 = NULL; + struct hwrm_stat_ctx_free_input *req; + int i; + + if (!bp->bnapi) + return; + + if (BNXT_CHIP_TYPE_NITRO_A0(bp)) + return; + + if (hwrm_req_init(bp, req, HWRM_STAT_CTX_FREE)) + return; + if (BNXT_FW_MAJ(bp) <= 20) { + if (hwrm_req_init(bp, req0, HWRM_STAT_CTX_CLR_STATS)) { + hwrm_req_drop(bp, req); + return; + } + hwrm_req_hold(bp, req0); + } + hwrm_req_hold(bp, req); + for (i = 0; i < bp->cp_nr_rings; i++) { + struct bnxt_napi *bnapi = bp->bnapi[i]; + struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; + + if (cpr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) { + req->stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id); + if (req0) { + req0->stat_ctx_id = req->stat_ctx_id; + hwrm_req_send(bp, req0); + } + hwrm_req_send(bp, req); + + cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID; + } + } + hwrm_req_drop(bp, req); + if (req0) + hwrm_req_drop(bp, req0); +} + +static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp) +{ + struct hwrm_stat_ctx_alloc_output *resp; + struct hwrm_stat_ctx_alloc_input *req; + int rc, i; + + if (BNXT_CHIP_TYPE_NITRO_A0(bp)) + return 0; + + rc = hwrm_req_init(bp, req, HWRM_STAT_CTX_ALLOC); + if (rc) + return rc; + + req->stats_dma_length = cpu_to_le16(bp->hw_ring_stats_size); + req->update_period_ms = cpu_to_le32(bp->stats_coal_ticks / 1000); + + resp = hwrm_req_hold(bp, req); + for (i = 0; i < bp->cp_nr_rings; i++) { + struct bnxt_napi *bnapi = bp->bnapi[i]; + struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; + + req->stats_dma_addr = cpu_to_le64(cpr->stats.hw_stats_map); + + rc = hwrm_req_send(bp, req); + if (rc) + break; + + cpr->hw_stats_ctx_id = le32_to_cpu(resp->stat_ctx_id); + + bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id; + } + hwrm_req_drop(bp, req); + return rc; +} + +static int bnxt_hwrm_func_qcfg(struct bnxt *bp) +{ + struct hwrm_func_qcfg_output *resp; + struct bnxt_pf_info *pf = &bp->pf; + struct hwrm_func_qcfg_input *req; + u16 flags, dflt_mtu; + u16 svif_info; + int rc; + + bp->func_svif = BNXT_SVIF_INVALID; + + rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG); + if (rc) + return rc; + + req->fid = cpu_to_le16(0xffff); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); + if (rc) + goto func_qcfg_exit; + + svif_info = le16_to_cpu(resp->svif_info); + if (svif_info & FUNC_QCFG_RESP_SVIF_INFO_SVIF_VALID) + bp->func_svif = svif_info & + FUNC_QCFG_RESP_SVIF_INFO_SVIF_MASK; +#ifdef CONFIG_BNXT_SRIOV + if (BNXT_VF(bp)) { + struct bnxt_vf_info *vf = &bp->vf; + + vf->vlan = le16_to_cpu(resp->vlan) & VLAN_VID_MASK; + } else { + bp->pf.registered_vfs = le16_to_cpu(resp->registered_vfs); + } +#endif + flags = le16_to_cpu(resp->flags); + if (flags & (FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED | + FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED)) { + bp->fw_cap |= BNXT_FW_CAP_LLDP_AGENT; + if (flags & FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED) + bp->fw_cap |= BNXT_FW_CAP_DCBX_AGENT; + } + if (BNXT_PF(bp) && (flags & FUNC_QCFG_RESP_FLAGS_MULTI_HOST)) + bp->flags |= BNXT_FLAG_MULTI_HOST; + if (BNXT_PF(bp) && (flags & FUNC_QCFG_RESP_FLAGS_MULTI_ROOT)) + bp->flags |= BNXT_FLAG_MULTI_ROOT; + if (flags & FUNC_QCFG_RESP_FLAGS_SECURE_MODE_ENABLED) + bp->fw_cap |= BNXT_FW_CAP_SECURE_MODE; + if (flags & FUNC_QCFG_RESP_FLAGS_RING_MONITOR_ENABLED) + bp->fw_cap |= BNXT_FW_CAP_RING_MONITOR; + if (flags & FUNC_QCFG_RESP_FLAGS_ENABLE_RDMA_SRIOV) + bp->fw_cap |= BNXT_FW_CAP_ENABLE_RDMA_SRIOV; + + switch (resp->port_partition_type) { + case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0: + case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_2: + case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5: + case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0: + bp->port_partition_type = resp->port_partition_type; + break; + } + if (bp->hwrm_spec_code < 0x10707 || + resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEB) + bp->br_mode = BRIDGE_MODE_VEB; + else if (resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEPA) + bp->br_mode = BRIDGE_MODE_VEPA; + else + bp->br_mode = BRIDGE_MODE_UNDEF; + + bp->max_mtu = le16_to_cpu(resp->max_mtu_configured); + if (!bp->max_mtu) + bp->max_mtu = BNXT_MAX_MTU; + + dflt_mtu = le16_to_cpu(resp->admin_mtu); + if (dflt_mtu >= ETH_ZLEN && dflt_mtu <= bp->max_mtu) { + bp->fw_dflt_mtu = dflt_mtu; + if ((bp->fw_cap & BNXT_FW_CAP_SECURE_MODE) && + !(bp->fw_cap & BNXT_FW_CAP_ADMIN_PF)) + bp->fw_cap |= BNXT_FW_CAP_ADMIN_MTU; + } else { + bp->fw_dflt_mtu = 0; + } + + if (bp->db_size) + goto func_qcfg_exit; + + bp->db_offset = le16_to_cpu(resp->legacy_l2_db_size_kb) * 1024; + if (BNXT_CHIP_P5(bp)) { + if (BNXT_PF(bp)) + bp->db_offset = DB_PF_OFFSET_P5; + else + bp->db_offset = DB_VF_OFFSET_P5; + } + bp->db_size = PAGE_ALIGN(le16_to_cpu(resp->l2_doorbell_bar_size_kb) * + 1024); + if (!bp->db_size || bp->db_size > pci_resource_len(bp->pdev, 2) || + bp->db_size <= bp->db_offset) + bp->db_size = pci_resource_len(bp->pdev, 2); + + if (BNXT_PF(bp)) + pf->dflt_vnic_id = le16_to_cpu(resp->dflt_vnic_id); + +func_qcfg_exit: + hwrm_req_drop(bp, req); + return rc; +} + +static bool bnxt_ptp_5745x_supported(struct bnxt *bp) +{ + if (BNXT_CHIP_NUM_5745X(bp->chip_num)) { + u32 fw_maj = BNXT_FW_MAJ(bp), fw_min = BNXT_FW_MIN(bp), + fw_bld = BNXT_FW_BLD(bp), fw_rsv = BNXT_FW_RSV(bp); + + if (fw_maj == 219 || fw_maj < 218) + return false; + + switch (fw_maj) { + case 218: + if (fw_min == 1 || (fw_min == 0 && fw_bld < 208)) + return false; + break; + case 220: + if (fw_min == 0 && fw_bld == 0 && fw_rsv < 54) + return false; + break; + } + } + return true; +} + +static int __bnxt_hwrm_ptp_qcfg(struct bnxt *bp) +{ + struct hwrm_port_mac_ptp_qcfg_output *resp; + struct hwrm_port_mac_ptp_qcfg_input *req; + struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; + bool phc_cfg; + u8 flags; + int rc; + + if (bp->hwrm_spec_code < 0x10801 || !bnxt_ptp_5745x_supported(bp)) { + rc = -EOPNOTSUPP; + goto no_ptp; + } + + rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_PTP_QCFG); + if (rc) + goto no_ptp; + + req->port_id = cpu_to_le16(bp->pf.port_id); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); + if (rc) + goto exit; + + flags = resp->flags; + if (BNXT_CHIP_P5_MINUS(bp) && !(flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS)) { + rc = -EOPNOTSUPP; + goto exit; + } + + if (!ptp) + ptp = kzalloc(sizeof(*ptp), GFP_KERNEL); + if (!ptp) { + rc = -ENOMEM; + goto exit; + } + + if (flags & (PORT_MAC_PTP_QCFG_RESP_FLAGS_PARTIAL_DIRECT_ACCESS_REF_CLOCK | + PORT_MAC_PTP_QCFG_RESP_FLAGS_64B_PHC_TIME)) { + ptp->refclk_regs[0] = le32_to_cpu(resp->ts_ref_clock_reg_lower); + ptp->refclk_regs[1] = le32_to_cpu(resp->ts_ref_clock_reg_upper); + } else if (BNXT_CHIP_P5(bp)) { + ptp->refclk_regs[0] = BNXT_TS_REG_TIMESYNC_TS0_LOWER; + ptp->refclk_regs[1] = BNXT_TS_REG_TIMESYNC_TS0_UPPER; + } + + ptp->bp = bp; + bp->ptp_cfg = ptp; + + phc_cfg = (flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_RTC_CONFIGURED) != 0; + rc = bnxt_ptp_init(bp, phc_cfg); + if (rc) + netdev_warn(bp->dev, "PTP initialization failed.\n"); +exit: + hwrm_req_drop(bp, req); + if (!rc) + return 0; + +no_ptp: + bnxt_ptp_clear(bp); + kfree(ptp); + bp->ptp_cfg = NULL; + return rc; +} + +static void bnxt_init_ctx_initializer(struct bnxt_ctx_mem_type *ctxm, + u8 init_val, u8 init_offset, + bool init_mask_set) +{ + ctxm->init_value = init_val; + ctxm->init_offset = BNXT_CTX_INIT_INVALID_OFFSET; + if (init_mask_set) + ctxm->init_offset = init_offset * 4; + else + ctxm->init_value = 0; +} + +static int bnxt_alloc_all_ctx_pg_info(struct bnxt *bp, int ctx_max) +{ + struct bnxt_ctx_mem_info *ctx = bp->ctx; + u16 type; + + for (type = 0; type < ctx_max; type++) { + struct bnxt_ctx_mem_type *ctxm = &ctx->ctx_arr[type]; + int n = 1; + + if (!ctxm->max_entries || ctxm->pg_info) + continue; + + if (ctxm->instance_bmap) + n = hweight32(ctxm->instance_bmap); + ctxm->pg_info = kcalloc(n, sizeof(*ctxm->pg_info), GFP_KERNEL); + if (!ctxm->pg_info) + return -ENOMEM; + } + return 0; +} + +static void bnxt_init_ctx_v2_driver_managed(struct bnxt *bp, struct bnxt_ctx_mem_type *ctxm) +{ + switch (ctxm->type) { + case BNXT_CTX_SQDBS: + case BNXT_CTX_RQDBS: + case BNXT_CTX_SRQDBS: + case BNXT_CTX_CQDBS: + if (bp->hdbr_info.hdbr_enabled) { + ctxm->entry_size = PAGE_SIZE_4K; + ctxm->min_entries = 1; + ctxm->max_entries = 1; + } + break; + } +} + +#define BNXT_CTX_INIT_VALID(flags) \ + (!!((flags) & \ + FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_ENABLE_CTX_KIND_INIT)) + +static int bnxt_hwrm_func_backing_store_qcaps_v2(struct bnxt *bp) +{ + struct hwrm_func_backing_store_qcaps_v2_output *resp; + struct hwrm_func_backing_store_qcaps_v2_input *req; + struct bnxt_ctx_mem_info *ctx; + u16 type; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_FUNC_BACKING_STORE_QCAPS_V2); + if (rc) + return rc; + + ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); + if (!ctx) + return -ENOMEM; + bp->ctx = ctx; + + resp = hwrm_req_hold(bp, req); + + for (type = 0; type < BNXT_CTX_V2_MAX; ) { + struct bnxt_ctx_mem_type *ctxm = &ctx->ctx_arr[type]; + u8 init_val, init_off, i; + __le32 *p; + u32 flags; + + req->type = cpu_to_le16(type); + rc = hwrm_req_send(bp, req); + if (rc) + goto ctx_done; + flags = le32_to_cpu(resp->flags); + type = le16_to_cpu(resp->next_valid_type); + if (!(flags & FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_TYPE_VALID)) + continue; + + ctxm->type = le16_to_cpu(resp->type); + ctxm->flags = flags; + if (flags & FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_DRIVER_MANAGED_MEMORY) { + bnxt_init_ctx_v2_driver_managed(bp, ctxm); + continue; + } + ctxm->entry_size = le16_to_cpu(resp->entry_size); + ctxm->instance_bmap = le32_to_cpu(resp->instance_bit_map); + ctxm->entry_multiple = resp->entry_multiple; + ctxm->max_entries = le32_to_cpu(resp->max_num_entries); + ctxm->min_entries = le32_to_cpu(resp->min_num_entries); + init_val = resp->ctx_init_value; + init_off = resp->ctx_init_offset; + bnxt_init_ctx_initializer(ctxm, init_val, init_off, + BNXT_CTX_INIT_VALID(flags)); + ctxm->split_entry_cnt = min_t(u8, resp->subtype_valid_cnt, + BNXT_MAX_SPLIT_ENTRY); + for (i = 0, p = &resp->split_entry_0; i < ctxm->split_entry_cnt; + i++, p++) + ctxm->split[i] = le32_to_cpu(*p); + } + rc = bnxt_alloc_all_ctx_pg_info(bp, BNXT_CTX_V2_MAX); + +ctx_done: + hwrm_req_drop(bp, req); + return rc; +} + +static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp) +{ + struct hwrm_func_backing_store_qcaps_output *resp; + struct hwrm_func_backing_store_qcaps_input *req; + int rc; + + if (bp->hwrm_spec_code < 0x10902 || bp->ctx) + return 0; + + if (bp->fw_cap & BNXT_FW_CAP_BACKING_STORE_V2) + return bnxt_hwrm_func_backing_store_qcaps_v2(bp); + + if (BNXT_VF(bp)) + return 0; + + rc = hwrm_req_init(bp, req, HWRM_FUNC_BACKING_STORE_QCAPS); + if (rc) + return rc; + + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send_silent(bp, req); + if (!rc) { + struct bnxt_ctx_mem_type *ctxm; + struct bnxt_ctx_mem_info *ctx; + u8 init_val, init_idx = 0; + u16 init_mask; + + ctx = bp->ctx; + if (!ctx) { + ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); + if (!ctx) { + rc = -ENOMEM; + goto ctx_err; + } + bp->ctx = ctx; + } + init_val = resp->ctx_kind_initializer; + init_mask = le16_to_cpu(resp->ctx_init_mask); + + ctxm = &ctx->ctx_arr[BNXT_CTX_QP]; + ctxm->max_entries = le32_to_cpu(resp->qp_max_entries); + ctxm->qp_qp1_entries = le16_to_cpu(resp->qp_min_qp1_entries); + ctxm->qp_l2_entries = le16_to_cpu(resp->qp_max_l2_entries); + ctxm->qp_fast_qpmd_entries = le16_to_cpu(resp->fast_qpmd_qp_num_entries); + ctxm->entry_size = le16_to_cpu(resp->qp_entry_size); + bnxt_init_ctx_initializer(ctxm, init_val, resp->qp_init_offset, + (init_mask & (1 << init_idx++)) != 0); + + ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ]; + ctxm->srq_l2_entries = le16_to_cpu(resp->srq_max_l2_entries); + ctxm->max_entries = le32_to_cpu(resp->srq_max_entries); + ctxm->entry_size = le16_to_cpu(resp->srq_entry_size); + bnxt_init_ctx_initializer(ctxm, init_val, resp->srq_init_offset, + (init_mask & (1 << init_idx++)) != 0); + + ctxm = &ctx->ctx_arr[BNXT_CTX_CQ]; + ctxm->cq_l2_entries = le16_to_cpu(resp->cq_max_l2_entries); + ctxm->max_entries = le32_to_cpu(resp->cq_max_entries); + ctxm->entry_size = le16_to_cpu(resp->cq_entry_size); + bnxt_init_ctx_initializer(ctxm, init_val, resp->cq_init_offset, + (init_mask & (1 << init_idx++)) != 0); + + ctxm = &ctx->ctx_arr[BNXT_CTX_VNIC]; + ctxm->vnic_entries = le32_to_cpu(resp->vnic_max_vnic_entries); + ctxm->max_entries = ctxm->vnic_entries + + le16_to_cpu(resp->vnic_max_ring_table_entries); + ctxm->entry_size = le16_to_cpu(resp->vnic_entry_size); + bnxt_init_ctx_initializer(ctxm, init_val, + resp->vnic_init_offset, + (init_mask & (1 << init_idx++)) != 0); + + ctxm = &ctx->ctx_arr[BNXT_CTX_STAT]; + ctxm->max_entries = le32_to_cpu(resp->stat_max_entries); + ctxm->entry_size = le16_to_cpu(resp->stat_entry_size); + bnxt_init_ctx_initializer(ctxm, init_val, + resp->stat_init_offset, + (init_mask & (1 << init_idx++)) != 0); + + ctxm = &ctx->ctx_arr[BNXT_CTX_STQM]; + ctxm->entry_size = le16_to_cpu(resp->tqm_entry_size); + ctxm->min_entries = le32_to_cpu(resp->tqm_min_entries_per_ring); + ctxm->max_entries = le32_to_cpu(resp->tqm_max_entries_per_ring); + ctxm->entry_multiple = resp->tqm_entries_multiple; + if (!ctxm->entry_multiple) + ctxm->entry_multiple = 1; + + memcpy(&ctx->ctx_arr[BNXT_CTX_FTQM], ctxm, sizeof(*ctxm)); + + ctxm = &ctx->ctx_arr[BNXT_CTX_MRAV]; + ctxm->max_entries = le32_to_cpu(resp->mrav_max_entries); + ctxm->entry_size = le16_to_cpu(resp->mrav_entry_size); + ctxm->mrav_num_entries_units = + le16_to_cpu(resp->mrav_num_entries_units); + bnxt_init_ctx_initializer(ctxm, init_val, + resp->mrav_init_offset, + (init_mask & (1 << init_idx++)) != 0); + + ctxm = &ctx->ctx_arr[BNXT_CTX_TIM]; + ctxm->entry_size = le16_to_cpu(resp->tim_entry_size); + ctxm->max_entries = le32_to_cpu(resp->tim_max_entries); + + ctx->tqm_fp_rings_count = resp->tqm_fp_rings_count; + if (!ctx->tqm_fp_rings_count) + ctx->tqm_fp_rings_count = bp->tx_max_q; + else if (ctx->tqm_fp_rings_count > BNXT_MAX_TQM_FP_LEGACY_RINGS) + ctx->tqm_fp_rings_count = BNXT_MAX_TQM_FP_LEGACY_RINGS; + if (ctx->tqm_fp_rings_count == BNXT_MAX_TQM_FP_LEGACY_RINGS && + bp->hwrm_max_ext_req_len >= BNXT_BACKING_STORE_CFG_LEN) { + ctx->tqm_fp_rings_count += resp->tqm_fp_rings_count_ext; + if (ctx->tqm_fp_rings_count > BNXT_MAX_TQM_FP_RINGS) + ctx->tqm_fp_rings_count = BNXT_MAX_TQM_FP_RINGS; + } + ctxm = &ctx->ctx_arr[BNXT_CTX_FTQM]; + memcpy(ctxm, &ctx->ctx_arr[BNXT_CTX_STQM], sizeof(*ctxm)); + ctxm->instance_bmap = (1 << ctx->tqm_fp_rings_count) - 1; + + rc = bnxt_alloc_all_ctx_pg_info(bp, BNXT_CTX_MAX); + } else { + rc = 0; + } +ctx_err: + hwrm_req_drop(bp, req); + return rc; +} + +static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem, u8 *pg_attr, + __le64 *pg_dir) +{ + if (!rmem->nr_pages) + return; + + BNXT_SET_CTX_PAGE_ATTR(*pg_attr); + if (rmem->depth >= 1) { + if (rmem->depth == 2) + *pg_attr |= 2; + else + *pg_attr |= 1; + *pg_dir = cpu_to_le64(rmem->pg_tbl_map); + } else { + *pg_dir = cpu_to_le64(rmem->dma_arr[0]); + } +} + +#define FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES \ + (FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP | \ + FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ | \ + FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ | \ + FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC | \ + FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT) + +static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables) +{ + struct hwrm_func_backing_store_cfg_input *req; + struct bnxt_ctx_mem_info *ctx = bp->ctx; + struct bnxt_ctx_pg_info *ctx_pg; + struct bnxt_ctx_mem_type *ctxm; + void **__req = (void **)&req; + u32 req_len = sizeof(*req); + __le32 *num_entries; + u32 ena, flags = 0; + __le64 *pg_dir; + u8 *pg_attr; + int i, rc; + + if (!ctx) + return 0; + + if (req_len > bp->hwrm_max_ext_req_len) + req_len = BNXT_BACKING_STORE_CFG_LEGACY_LEN; + rc = __hwrm_req_init(bp, __req, HWRM_FUNC_BACKING_STORE_CFG, req_len); + if (rc) + return rc; + + req->enables = cpu_to_le32(enables); + if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP) { + ctxm = &ctx->ctx_arr[BNXT_CTX_QP]; + ctx_pg = ctxm->pg_info; + req->qp_num_entries = cpu_to_le32(ctx_pg->entries); + req->qp_num_qp1_entries = cpu_to_le16(ctxm->qp_qp1_entries); + req->qp_num_l2_entries = cpu_to_le16(ctxm->qp_l2_entries); + req->qp_entry_size = cpu_to_le16(ctxm->entry_size); + bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, + &req->qpc_pg_size_qpc_lvl, + &req->qpc_page_dir); + + if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP_FAST_QPMD) + req->qp_num_fast_qpmd_entries = cpu_to_le16(ctxm->qp_fast_qpmd_entries); + } + if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ) { + ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ]; + ctx_pg = ctxm->pg_info; + req->srq_num_entries = cpu_to_le32(ctx_pg->entries); + req->srq_num_l2_entries = cpu_to_le16(ctxm->srq_l2_entries); + req->srq_entry_size = cpu_to_le16(ctxm->entry_size); + bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, + &req->srq_pg_size_srq_lvl, + &req->srq_page_dir); + } + if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ) { + ctxm = &ctx->ctx_arr[BNXT_CTX_CQ]; + ctx_pg = ctxm->pg_info; + req->cq_num_entries = cpu_to_le32(ctx_pg->entries); + req->cq_num_l2_entries = cpu_to_le16(ctxm->cq_l2_entries); + req->cq_entry_size = cpu_to_le16(ctxm->entry_size); + bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, + &req->cq_pg_size_cq_lvl, + &req->cq_page_dir); + } + if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV) { + u32 units; + + ctxm = &ctx->ctx_arr[BNXT_CTX_MRAV]; + ctx_pg = ctxm->pg_info; + req->mrav_num_entries = cpu_to_le32(ctx_pg->entries); + units = ctxm->mrav_num_entries_units; + if (units) { + u32 num_mr, num_ah = ctxm->mrav_av_entries; + + num_mr = ctx_pg->entries - num_ah; + req->mrav_num_entries = ((num_mr / units) << 16) | + (num_ah / units); + flags |= FUNC_BACKING_STORE_CFG_REQ_FLAGS_MRAV_RESERVATION_SPLIT; + } + req->mrav_entry_size = cpu_to_le16(ctxm->entry_size); + bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, + &req->mrav_pg_size_mrav_lvl, + &req->mrav_page_dir); + } + if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM) { + ctxm = &ctx->ctx_arr[BNXT_CTX_TIM]; + ctx_pg = ctxm->pg_info; + req->tim_num_entries = cpu_to_le32(ctx_pg->entries); + req->tim_entry_size = cpu_to_le16(ctxm->entry_size); + bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, + &req->tim_pg_size_tim_lvl, + &req->tim_page_dir); + } + if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC) { + ctxm = &ctx->ctx_arr[BNXT_CTX_VNIC]; + ctx_pg = ctxm->pg_info; + req->vnic_num_vnic_entries = cpu_to_le16(ctxm->vnic_entries); + req->vnic_num_ring_table_entries = + cpu_to_le16(ctxm->max_entries - ctxm->vnic_entries); + req->vnic_entry_size = cpu_to_le16(ctxm->entry_size); + bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, + &req->vnic_pg_size_vnic_lvl, + &req->vnic_page_dir); + } + if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT) { + ctxm = &ctx->ctx_arr[BNXT_CTX_STAT]; + ctx_pg = ctxm->pg_info; + req->stat_num_entries = cpu_to_le32(ctxm->max_entries); + req->stat_entry_size = cpu_to_le16(ctxm->entry_size); + bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, + &req->stat_pg_size_stat_lvl, + &req->stat_page_dir); + } + ctxm = &ctx->ctx_arr[BNXT_CTX_STQM]; + for (i = 0, num_entries = &req->tqm_sp_num_entries, + pg_attr = &req->tqm_sp_pg_size_tqm_sp_lvl, + pg_dir = &req->tqm_sp_page_dir, + ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP, + ctx_pg = ctxm->pg_info; + i < BNXT_MAX_TQM_LEGACY_RINGS; + ctx_pg = &ctx->ctx_arr[BNXT_CTX_FTQM].pg_info[i], + i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) { + if (!(enables & ena)) + continue; + + req->tqm_entry_size = cpu_to_le16(ctxm->entry_size); + *num_entries = cpu_to_le32(ctx_pg->entries); + bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir); + } + if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING8) { + pg_attr = &req->tqm_ring8_pg_size_tqm_ring_lvl; + pg_dir = &req->tqm_ring8_page_dir; + ctx_pg = &ctx->ctx_arr[BNXT_CTX_FTQM].pg_info[8]; + req->tqm_ring8_num_entries = cpu_to_le32(ctx_pg->entries); + bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir); + } + req->flags = cpu_to_le32(flags); + return hwrm_req_send(bp, req); +} + +static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp, + struct bnxt_ctx_pg_info *ctx_pg) +{ + struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem; + + rmem->page_size = BNXT_PAGE_SIZE; + rmem->pg_arr = ctx_pg->ctx_pg_arr; + rmem->dma_arr = ctx_pg->ctx_dma_arr; + rmem->flags = BNXT_RMEM_VALID_PTE_FLAG; + if (rmem->depth >= 1) + rmem->flags |= BNXT_RMEM_USE_FULL_PAGE_FLAG; + return bnxt_alloc_ring(bp, rmem); +} + +static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp, + struct bnxt_ctx_pg_info *ctx_pg, u32 mem_size, + u8 depth, struct bnxt_ctx_mem_type *ctxm) +{ + struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem; + int rc; + + if (!mem_size) + return -EINVAL; + + ctx_pg->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE); + if (ctx_pg->nr_pages > MAX_CTX_TOTAL_PAGES) { + ctx_pg->nr_pages = 0; + return -EINVAL; + } + if (ctx_pg->nr_pages > MAX_CTX_PAGES || depth > 1) { + int nr_tbls, i; + + rmem->depth = 2; + ctx_pg->ctx_pg_tbl = kzalloc(MAX_CTX_PAGES * sizeof(ctx_pg), + GFP_KERNEL); + if (!ctx_pg->ctx_pg_tbl) + return -ENOMEM; + nr_tbls = DIV_ROUND_UP(ctx_pg->nr_pages, MAX_CTX_PAGES); + rmem->nr_pages = nr_tbls; + rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg); + if (rc) + return rc; + for (i = 0; i < nr_tbls; i++) { + struct bnxt_ctx_pg_info *pg_tbl; + + pg_tbl = kzalloc(sizeof(*pg_tbl), GFP_KERNEL); + if (!pg_tbl) + return -ENOMEM; + ctx_pg->ctx_pg_tbl[i] = pg_tbl; + rmem = &pg_tbl->ring_mem; + rmem->pg_tbl = ctx_pg->ctx_pg_arr[i]; + rmem->pg_tbl_map = ctx_pg->ctx_dma_arr[i]; + rmem->depth = 1; + rmem->nr_pages = MAX_CTX_PAGES; + rmem->ctx_mem = ctxm; + if (i == (nr_tbls - 1)) { + int rem = ctx_pg->nr_pages % MAX_CTX_PAGES; + + if (rem) + rmem->nr_pages = rem; + } + rc = bnxt_alloc_ctx_mem_blk(bp, pg_tbl); + if (rc) + break; + } + } else { + rmem->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE); + if (rmem->nr_pages > 1 || depth) + rmem->depth = 1; + rmem->ctx_mem = ctxm; + rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg); + } + return rc; +} + +static int bnxt_copy_ctx_pg_tbls(struct bnxt *bp, + struct bnxt_ctx_pg_info *ctx_pg, void *buf, size_t offset) +{ + struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem; + size_t len = 0, total_len = 0; + + if (rmem->depth > 1 || ctx_pg->nr_pages > MAX_CTX_PAGES || + ctx_pg->ctx_pg_tbl) { + int i, nr_tbls = rmem->nr_pages; + + for (i = 0; i < nr_tbls; i++) { + struct bnxt_ctx_pg_info *pg_tbl; + struct bnxt_ring_mem_info *rmem2; + + pg_tbl = ctx_pg->ctx_pg_tbl[i]; + if (!pg_tbl) + continue; + rmem2 = &pg_tbl->ring_mem; + len = bnxt_copy_ring(bp, rmem2, buf, offset); + offset += len; + total_len += len; + } + } else { + len = bnxt_copy_ring(bp, rmem, buf, offset); + offset += len; + total_len += len; + } + + return total_len; +} + +static void bnxt_free_ctx_pg_tbls(struct bnxt *bp, + struct bnxt_ctx_pg_info *ctx_pg) +{ + struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem; + + if (rmem->depth > 1 || ctx_pg->nr_pages > MAX_CTX_PAGES || + ctx_pg->ctx_pg_tbl) { + int i, nr_tbls = rmem->nr_pages; + + for (i = 0; i < nr_tbls; i++) { + struct bnxt_ctx_pg_info *pg_tbl; + struct bnxt_ring_mem_info *rmem2; + + pg_tbl = ctx_pg->ctx_pg_tbl[i]; + if (!pg_tbl) + continue; + rmem2 = &pg_tbl->ring_mem; + bnxt_free_ring(bp, rmem2); + ctx_pg->ctx_pg_arr[i] = NULL; + kfree(pg_tbl); + ctx_pg->ctx_pg_tbl[i] = NULL; + } + kfree(ctx_pg->ctx_pg_tbl); + ctx_pg->ctx_pg_tbl = NULL; + } + bnxt_free_ring(bp, rmem); + ctx_pg->nr_pages = 0; +} + +static int bnxt_setup_ctxm_pg_tbls(struct bnxt *bp, + struct bnxt_ctx_mem_type *ctxm, u32 entries, + u8 pg_lvl) +{ + struct bnxt_ctx_pg_info *ctx_pg = ctxm->pg_info; + int i, rc = 0, n = 1; + u32 mem_size; + + if (!ctxm->entry_size || !ctx_pg) + return -EINVAL; + if (ctxm->instance_bmap) + n = hweight32(ctxm->instance_bmap); + if (ctxm->entry_multiple) + entries = roundup(entries, ctxm->entry_multiple); + entries = clamp_t(u32, entries, ctxm->min_entries, ctxm->max_entries); + mem_size = entries * ctxm->entry_size; + for (i = 0; i < n && !rc; i++) { + ctx_pg[i].entries = entries; + rc = bnxt_alloc_ctx_pg_tbls(bp, &ctx_pg[i], mem_size, pg_lvl, + ctxm->init_value ? ctxm : NULL); + } + if (!rc) + ctxm->mem_valid = 1; + return rc; +} + +static int bnxt_hwrm_func_backing_store_cfg_v2(struct bnxt *bp, + struct bnxt_ctx_mem_type *ctxm, + bool last) +{ + struct hwrm_func_backing_store_cfg_v2_input *req; + u32 instance_bmap = ctxm->instance_bmap; + int i, j, rc = 0, n = 1; + __le32 *p; + + if (!(ctxm->flags & BNXT_CTX_MEM_TYPE_VALID) || !ctxm->pg_info) + return 0; + + if (instance_bmap) + n = hweight32(ctxm->instance_bmap); + else + instance_bmap = 1; + + rc = hwrm_req_init(bp, req, HWRM_FUNC_BACKING_STORE_CFG_V2); + if (rc) + return rc; + hwrm_req_hold(bp, req); + req->type = cpu_to_le16(ctxm->type); + req->entry_size = cpu_to_le16(ctxm->entry_size); + req->subtype_valid_cnt = ctxm->split_entry_cnt; + for (i = 0, p = &req->split_entry_0; i < ctxm->split_entry_cnt; i++) + p[i] = cpu_to_le32(ctxm->split[i]); + for (i = 0, j = 0; j < n && !rc; i++) { + struct bnxt_ctx_pg_info *ctx_pg; + + if (!(instance_bmap & (1 << i))) + continue; + req->instance = cpu_to_le16(i); + ctx_pg = &ctxm->pg_info[j++]; + if (!ctx_pg->entries) + continue; + req->num_entries = cpu_to_le32(ctx_pg->entries); + bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, + &req->page_size_pbl_level, + &req->page_dir); + if (last && j == n) + req->flags = + cpu_to_le32(FUNC_BACKING_STORE_CFG_V2_REQ_FLAGS_BS_CFG_ALL_DONE); + rc = hwrm_req_send(bp, req); + } + hwrm_req_drop(bp, req); + return rc; +} + +static int bnxt_backing_store_cfg_v2(struct bnxt *bp, u32 ena) +{ + struct bnxt_ktls_info *ktls = bp->ktls_info; + struct bnxt_mpc_info *mpc = bp->mpc_info; + struct bnxt_ctx_mem_info *ctx = bp->ctx; + struct bnxt_ctx_mem_type *ctxm; + struct bnxt_ring_mem_info *rmem; + u16 last_type = BNXT_CTX_INV; + int rc = 0; + u16 type; + + if (BNXT_PF(bp) && ktls) { +#ifdef HAVE_KTLS + ctxm = &ctx->ctx_arr[BNXT_CTX_TCK]; + rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, ktls->tck.max_ctx, 1); + if (rc) + return rc; + ctxm = &ctx->ctx_arr[BNXT_CTX_RCK]; + rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, ktls->rck.max_ctx, 1); + if (rc) + return rc; + last_type = BNXT_CTX_RCK; +#endif + } + + if (BNXT_PF(bp) && mpc && mpc->mpc_chnls_cap) { + ctxm = &ctx->ctx_arr[BNXT_CTX_MTQM]; + rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, BNXT_MAX_MPC, 1); + if (rc) + return rc; + last_type = BNXT_CTX_MTQM; + } + + if (bp->hdbr_info.hdbr_enabled) { + for (type = BNXT_CTX_SQDBS; type <= BNXT_CTX_CQDBS; type++) { + ctxm = &ctx->ctx_arr[type]; + rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, ctxm->max_entries, 0); + if (rc) + return rc; + rmem = &ctxm->pg_info[0].ring_mem; + rc = bnxt_hdbr_ktbl_init(bp, type - BNXT_CTX_SQDBS, + rmem->pg_arr[0], rmem->dma_arr[0]); + if (rc) + return rc; + } + last_type = BNXT_CTX_CQDBS; + } + + if (BNXT_PF(bp)) { + for (type = BNXT_CTX_SRT_TRACE; type <= BNXT_CTX_ROCE_HWRM_TRACE; type++) { + ctxm = &ctx->ctx_arr[type]; + if (!(ctxm->flags & BNXT_CTX_MEM_TYPE_VALID)) + continue; + rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, ctxm->max_entries, 1); + if (rc) { + netdev_warn(bp->dev, "Unable to setup ctx page for type:0x%x.\n", type); + rc = 0; + continue; + } + bnxt_bs_trace_init(bp, ctxm, type - BNXT_CTX_SRT_TRACE); + last_type = type; + } + } + + if (last_type == BNXT_CTX_INV) { + if (!ena) + return 0; + else if (ena & FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM) + last_type = BNXT_CTX_MAX - 1; + else + last_type = BNXT_CTX_L2_MAX - 1; + } + ctx->ctx_arr[last_type].last = 1; + + for (type = 0 ; type < BNXT_CTX_V2_MAX; type++) { + ctxm = &ctx->ctx_arr[type]; + + if (!ctxm->mem_valid) + continue; + rc = bnxt_hwrm_func_backing_store_cfg_v2(bp, ctxm, ctxm->last); + if (rc) + return rc; + } + return 0; +} + +int bnxt_copy_ctx_mem(struct bnxt *bp, struct bnxt_ctx_mem_type *ctxm, void *buf, size_t offset) +{ + struct bnxt_ctx_pg_info *ctx_pg = ctxm->pg_info; + size_t len = 0, total_len = 0; + int i, n = 1; + + if (!ctx_pg) + return 0; + + if (ctxm->instance_bmap) + n = hweight32(ctxm->instance_bmap); + for (i = 0; i < n; i++) { + len = bnxt_copy_ctx_pg_tbls(bp, &ctx_pg[i], buf, offset); + offset += len; + total_len += len; + } + return total_len; +} + +void bnxt_free_ctx_mem(struct bnxt *bp) +{ + struct bnxt_ctx_mem_info *ctx = bp->ctx; + u16 type; + + if (!ctx) + return; + + /* + * Driver owned memory have their own data structure and additional + * pages attached to context page. Need to free first. + */ + if (bp->hdbr_info.hdbr_enabled) + for (type = BNXT_CTX_SQDBS; type <= BNXT_CTX_CQDBS; type++) { + bnxt_hdbr_l2_uninit(bp, type - BNXT_CTX_SQDBS); + bnxt_hdbr_ktbl_uninit(bp, type - BNXT_CTX_SQDBS); + } + + for (type = 0; type < BNXT_CTX_V2_MAX; type++) { + struct bnxt_ctx_mem_type *ctxm = &ctx->ctx_arr[type]; + struct bnxt_ctx_pg_info *ctx_pg = ctxm->pg_info; + int i, n = 1; + + if (!ctx_pg) + continue; + if (ctxm->instance_bmap) + n = hweight32(ctxm->instance_bmap); + for (i = 0; i < n; i++) + bnxt_free_ctx_pg_tbls(bp, &ctx_pg[i]); + + kfree(ctx_pg); + ctxm->pg_info = NULL; + } + + ctx->flags &= ~BNXT_CTX_FLAG_INITED; + kfree(ctx); + bp->ctx = NULL; +} + +static int bnxt_alloc_ctx_mem(struct bnxt *bp) +{ + struct bnxt_ctx_mem_type *ctxm; + struct bnxt_ctx_mem_info *ctx; + u32 l2_qps, qp1_qps, max_qps; + u32 ena, entries_sp, entries; + u32 srqs, max_srqs, min; + u32 fast_qpmd_qps = 0; + u32 num_mr, num_ah; + u32 extra_srqs = 0; + u32 extra_qps = 0; + u8 pg_lvl = 1; + int i, rc; + + rc = bnxt_hwrm_func_backing_store_qcaps(bp); + if (rc) { + netdev_err(bp->dev, "Failed querying context mem capability, rc = %d.\n", + rc); + return rc; + } + ctx = bp->ctx; + if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED)) + return 0; + + ena = 0; + if (BNXT_VF(bp)) + goto skip_legacy; + + ctxm = &ctx->ctx_arr[BNXT_CTX_QP]; + l2_qps = ctxm->qp_l2_entries; + qp1_qps = ctxm->qp_qp1_entries; + fast_qpmd_qps = ctxm->qp_fast_qpmd_entries; + max_qps = ctxm->max_entries; + ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ]; + srqs = ctxm->srq_l2_entries; + max_srqs = ctxm->max_entries; + if ((bp->flags & BNXT_FLAG_ROCE_CAP) && !is_kdump_kernel()) { + pg_lvl = 2; + if (BNXT_SW_RES_LMT(bp)) { + extra_qps = max_qps - l2_qps - qp1_qps; + extra_srqs = max_srqs - srqs; + } else { + extra_qps = min_t(u32, 65536, max_qps - l2_qps - qp1_qps); + /* allocate extra qps if fw supports RoCE fast qp destroy feature */ + extra_qps += fast_qpmd_qps; + extra_srqs = min_t(u32, 8192, max_srqs - srqs); + } + if (fast_qpmd_qps) + ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP_FAST_QPMD; + } + + ctxm = &ctx->ctx_arr[BNXT_CTX_QP]; + ctxm->qp_fast_qpmd_entries = fast_qpmd_qps; + if (!(ena & FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP_FAST_QPMD)) + ctxm->qp_fast_qpmd_entries = 0; + rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, l2_qps + qp1_qps + extra_qps, + pg_lvl); + if (rc) + return rc; + + ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ]; + rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, srqs + extra_srqs, pg_lvl); + if (rc) + return rc; + + ctxm = &ctx->ctx_arr[BNXT_CTX_CQ]; + rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, ctxm->cq_l2_entries + + extra_qps * 2, pg_lvl); + if (rc) + return rc; + + ctxm = &ctx->ctx_arr[BNXT_CTX_VNIC]; + rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, ctxm->max_entries, 1); + if (rc) + return rc; + + ctxm = &ctx->ctx_arr[BNXT_CTX_STAT]; + rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, ctxm->max_entries, 1); + if (rc) + return rc; + + if (!(bp->flags & BNXT_FLAG_ROCE_CAP)) + goto skip_rdma; + + ctxm = &ctx->ctx_arr[BNXT_CTX_MRAV]; + if (BNXT_SW_RES_LMT(bp) && + ctxm->split_entry_cnt == BNXT_CTX_MRAV_AV_SPLIT_ENTRY + 1) { + num_ah = ctxm->mrav_av_entries; + num_mr = ctxm->max_entries - num_ah; + } else { + /* 128K extra is needed to accommodate static AH context + * allocation by f/w. + */ + num_mr = min_t(u32, ctxm->max_entries / 2, 1024 * 256); + num_ah = min_t(u32, num_mr, 1024 * 128); + ctxm->split_entry_cnt = BNXT_CTX_MRAV_AV_SPLIT_ENTRY + 1; + if (!ctxm->mrav_av_entries || ctxm->mrav_av_entries > num_ah) + ctxm->mrav_av_entries = num_ah; + } + rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, num_mr + num_ah, 2); + if (rc) + return rc; + ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV; + + ctxm = &ctx->ctx_arr[BNXT_CTX_TIM]; + rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, l2_qps + qp1_qps + extra_qps, 1); + if (rc) + return rc; + ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM; + +skip_rdma: + ctxm = &ctx->ctx_arr[BNXT_CTX_STQM]; + min = ctxm->min_entries; + entries_sp = ctx->ctx_arr[BNXT_CTX_VNIC].vnic_entries + l2_qps + + 2 * (extra_qps + qp1_qps) + min; + rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, entries_sp, 2); + if (rc) + return rc; + + ctxm = &ctx->ctx_arr[BNXT_CTX_FTQM]; + entries = l2_qps + 2 * (extra_qps + qp1_qps); + rc = bnxt_setup_ctxm_pg_tbls(bp, ctxm, entries, 2); + if (rc) + return rc; + for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++) { + if (i < BNXT_MAX_TQM_LEGACY_RINGS) + ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP << i; + else + ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING8; + } + ena |= FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES; + +skip_legacy: + if (bp->fw_cap & BNXT_FW_CAP_BACKING_STORE_V2) + rc = bnxt_backing_store_cfg_v2(bp, ena); + else + rc = bnxt_hwrm_func_backing_store_cfg(bp, ena); + if (rc) { + netdev_err(bp->dev, "Failed configuring context mem, rc = %d.\n", + rc); + return rc; + } + ctx->flags |= BNXT_CTX_FLAG_INITED; + return 0; +} + +static int bnxt_hwrm_crash_dump_mem_cfg(struct bnxt *bp) +{ + struct hwrm_dbg_crashdump_medium_cfg_input *req; + u16 page_attr = 0; + int rc; + + if (!(bp->fw_dbg_cap & BNXT_FW_DBG_CAP_CRASHDUMP_HOST)) + return 0; + + rc = hwrm_req_init(bp, req, HWRM_DBG_CRASHDUMP_MEDIUM_CFG); + if (rc) + return rc; + + BNXT_SET_CRASHDUMP_PAGE_ATTR(page_attr); + req->pg_size_lvl = cpu_to_le16(page_attr | + bp->fw_crash_mem->ring_mem.depth); + req->pbl = cpu_to_le64(bp->fw_crash_mem->ring_mem.pg_tbl_map); + req->size = cpu_to_le32(bp->fw_crash_len); + req->output_dest_flags = cpu_to_le16(BNXT_DBG_CR_DUMP_MDM_CFG_DDR); + return hwrm_req_send(bp, req); +} + +static void bnxt_free_crash_dump_mem(struct bnxt *bp) +{ + if (bp->fw_crash_mem) { + bnxt_free_ctx_pg_tbls(bp, bp->fw_crash_mem); + kfree(bp->fw_crash_mem); + bp->fw_crash_len = 0; + bp->fw_crash_mem = NULL; + } +} + +static int bnxt_alloc_crash_dump_mem(struct bnxt *bp) +{ + u32 mem_size = 0; + int rc; + + if (!(bp->fw_dbg_cap & BNXT_FW_DBG_CAP_CRASHDUMP_HOST)) + return 0; + + rc = bnxt_hwrm_get_dump_len(bp, BNXT_DUMP_CRASH, &mem_size); + if (rc) + return rc; + + mem_size = round_up(mem_size, 4); + + if (bp->fw_crash_mem && mem_size == bp->fw_crash_len) + return 0; + + bnxt_free_crash_dump_mem(bp); + + bp->fw_crash_mem = kzalloc(sizeof(*bp->fw_crash_mem), GFP_KERNEL); + if (!bp->fw_crash_mem) + return -ENOMEM; + + rc = bnxt_alloc_ctx_pg_tbls(bp, bp->fw_crash_mem, mem_size, 1, NULL); + if (rc) { + bnxt_free_crash_dump_mem(bp); + return rc; + } + + bp->fw_crash_len = mem_size; + + return 0; +} + +static void bnxt_init_cosq_names(struct bnxt *bp, u32 path_dir) +{ + char **cosq_names = &bp->tx_cosq_names; + + if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) + return; + + if (path_dir == QUEUE_QPORTCFG_REQ_FLAGS_PATH_RX) + cosq_names = &bp->rx_cosq_names; + + if (!*cosq_names) + *cosq_names = kzalloc(BNXT_COSQ_NAME_ARR_SIZE, GFP_KERNEL); + else + memset(*cosq_names, '\0', BNXT_COSQ_NAME_ARR_SIZE); +} + +static void bnxt_cosq_save_name(struct bnxt *bp, char *queue_name, u8 qid, int offset, + u32 path_dir) +{ + char *cosq_names = bp->tx_cosq_names; + u8 qidx = qid % MAX_COS_PER_PORT; + + if (path_dir == QUEUE_QPORTCFG_REQ_FLAGS_PATH_RX) + cosq_names = bp->rx_cosq_names; + if (cosq_names && queue_name[0] && qidx < BNXT_MAX_QUEUE) + strncpy(&cosq_names[BNXT_MAX_COSQ_NAME_LEN * (offset + qidx)], + queue_name, BNXT_MAX_COSQ_NAME_LEN); +} + +static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp, u32 path_dir) +{ + struct hwrm_queue_qportcfg_output *resp; + struct hwrm_queue_qportcfg_input *req; + struct bnxt_queue_info *q_info; + char *queue_name_ptr = NULL; + u8 queue_profile, queue_id; + u8 i, j, *qptr, *q_ids; + u8 max_tc, max_lltc; + bool no_rdma; + u8 *max_q; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_QUEUE_QPORTCFG); + if (rc) + return rc; + + req->flags = cpu_to_le32(path_dir); + + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); + if (rc) + goto qportcfg_exit; + + if (!resp->max_configurable_queues) { + rc = -EINVAL; + goto qportcfg_exit; + } + if (resp->queue_cfg_info & QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG) { + bp->is_asym_q = true; + bnxt_init_cosq_names(bp, path_dir); + } else { + bp->is_asym_q = false; + bnxt_free_stats_cosqnames_mem(bp); + } + max_tc = min_t(u8, resp->max_configurable_queues, BNXT_MAX_QUEUE); + max_lltc = resp->max_configurable_lossless_queues; + + no_rdma = !(bp->flags & BNXT_FLAG_ROCE_CAP); + qptr = &resp->queue_id0; + queue_name_ptr = resp->qid0_name; + + if (path_dir == QUEUE_QPORTCFG_REQ_FLAGS_PATH_TX) { + q_info = bp->tx_q_info; + q_ids = bp->tx_q_ids; + max_q = &bp->tx_max_q; + } else { + q_info = bp->rx_q_info; + q_ids = bp->rx_q_ids; + max_q = &bp->rx_max_q; + } + + for (i = 0, j = 0; i < max_tc; i++) { + bnxt_cosq_save_name(bp, queue_name_ptr, *qptr, 0, path_dir); + queue_name_ptr += BNXT_MAX_COSQ_NAME_LEN; + + queue_id = *qptr; + qptr++; + + queue_profile = *qptr; + qptr++; + + q_info[j].queue_id = queue_id; + q_info[j].queue_profile = queue_profile; + q_ids[i] = queue_id; + + bp->tc_to_qidx[j] = j; + + if (!BNXT_CNPQ(q_info[j].queue_profile) || + (no_rdma && BNXT_PF(bp))) + j++; + } + *max_q = max_tc; + max_tc = max_t(u8, j, 1); + bp->max_tc = bp->max_tc ? min(bp->max_tc, max_tc) : max_tc; + bp->max_lltc = bp->max_lltc ? min(bp->max_lltc, max_lltc) : max_lltc; + + if (bp->max_lltc > bp->max_tc) + bp->max_lltc = bp->max_tc; + +qportcfg_exit: + hwrm_req_drop(bp, req); + return rc; +} + +static void bnxt_verify_asym_queues(struct bnxt *bp) +{ + u8 i, lltc = 0; + + if (!bp->max_lltc) + return; + + /* Verify that lossless TX and RX queues are in the same index */ + for (i = 0; i < bp->max_tc; i++) { + if (BNXT_LLQ(bp->tx_q_info[i].queue_profile) && + BNXT_LLQ(bp->rx_q_info[i].queue_profile)) + lltc++; + } + bp->max_lltc = min(bp->max_lltc, lltc); +} + +int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all) +{ + struct hwrm_func_resource_qcaps_output *resp; + struct hwrm_func_resource_qcaps_input *req; + struct bnxt_hw_resc *hw_resc = &bp->hw_resc; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_FUNC_RESOURCE_QCAPS); + if (rc) + return rc; + + req->fid = cpu_to_le16(0xffff); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send_silent(bp, req); + if (rc) + goto hwrm_func_resc_qcaps_exit; + + hw_resc->max_tx_sch_inputs = le16_to_cpu(resp->max_tx_scheduler_inputs); + if (!all) + goto hwrm_func_resc_qcaps_exit; + + hw_resc->min_rsscos_ctxs = le16_to_cpu(resp->min_rsscos_ctx); + hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx); + hw_resc->min_cp_rings = le16_to_cpu(resp->min_cmpl_rings); + hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings); + hw_resc->min_tx_rings = le16_to_cpu(resp->min_tx_rings); + hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings); + hw_resc->min_rx_rings = le16_to_cpu(resp->min_rx_rings); + hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings); + hw_resc->min_hw_ring_grps = le16_to_cpu(resp->min_hw_ring_grps); + hw_resc->max_hw_ring_grps = le16_to_cpu(resp->max_hw_ring_grps); + hw_resc->min_l2_ctxs = le16_to_cpu(resp->min_l2_ctxs); + hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs); + hw_resc->min_vnics = le16_to_cpu(resp->min_vnics); + hw_resc->max_vnics = le16_to_cpu(resp->max_vnics); + hw_resc->min_stat_ctxs = le16_to_cpu(resp->min_stat_ctx); + hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx); + + hw_resc->min_tx_key_ctxs = le32_to_cpu(resp->min_ktls_tx_key_ctxs); + hw_resc->max_tx_key_ctxs = le32_to_cpu(resp->max_ktls_tx_key_ctxs); + hw_resc->min_rx_key_ctxs = le32_to_cpu(resp->min_ktls_rx_key_ctxs); + hw_resc->max_rx_key_ctxs = le32_to_cpu(resp->max_ktls_rx_key_ctxs); + + if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { + u16 max_msix = le16_to_cpu(resp->max_msix); + + hw_resc->max_nqs = max_msix; + hw_resc->max_hw_ring_grps = hw_resc->max_rx_rings; + } + + if (BNXT_PF(bp)) { + struct bnxt_pf_info *pf = &bp->pf; + + pf->vf_resv_strategy = + le16_to_cpu(resp->vf_reservation_strategy); + if (pf->vf_resv_strategy > BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC) + pf->vf_resv_strategy = BNXT_VF_RESV_STRATEGY_MAXIMAL; + + if (resp->flags & + cpu_to_le16(FUNC_RESOURCE_QCAPS_RESP_FLAGS_MIN_GUARANTEED)) + bp->fw_cap |= BNXT_FW_CAP_VF_RES_MIN_GUARANTEED; + } +hwrm_func_resc_qcaps_exit: + hwrm_req_drop(bp, req); + return rc; +} + +static int __bnxt_hwrm_func_qcaps(struct bnxt *bp) +{ + struct bnxt_hw_resc *hw_resc = &bp->hw_resc; + struct hwrm_func_qcaps_output *resp; + struct hwrm_func_qcaps_input *req; + u32 flags, flags_ext, flags_ext2; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_FUNC_QCAPS); + if (rc) + return rc; + + req->fid = cpu_to_le16(0xffff); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); + if (rc) + goto hwrm_func_qcaps_exit; + + flags = le32_to_cpu(resp->flags); + if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED) + bp->flags |= BNXT_FLAG_ROCEV1_CAP; + if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED) + bp->flags |= BNXT_FLAG_ROCEV2_CAP; + if (flags & FUNC_QCAPS_RESP_FLAGS_LINK_ADMIN_STATUS_SUPPORTED) + bp->fw_cap |= BNXT_FW_CAP_LINK_ADMIN; + if (flags & FUNC_QCAPS_RESP_FLAGS_ADMIN_PF_SUPPORTED) + bp->fw_cap |= BNXT_FW_CAP_ADMIN_PF; + if (flags & FUNC_QCAPS_RESP_FLAGS_HOT_RESET_CAPABLE) + bp->fw_cap |= BNXT_FW_CAP_HOT_RESET; + if (flags & FUNC_QCAPS_RESP_FLAGS_ERROR_RECOVERY_CAPABLE) + bp->fw_cap |= BNXT_FW_CAP_ERROR_RECOVERY; + if (flags & FUNC_QCAPS_RESP_FLAGS_PCIE_STATS_SUPPORTED) + bp->fw_cap |= BNXT_FW_CAP_PCIE_STATS_SUPPORTED; + if (flags & FUNC_QCAPS_RESP_FLAGS_EXT_STATS_SUPPORTED) + bp->fw_cap |= BNXT_FW_CAP_EXT_STATS_SUPPORTED; + if (flags & FUNC_QCAPS_RESP_FLAGS_ERR_RECOVER_RELOAD) + bp->fw_cap |= BNXT_FW_CAP_ERR_RECOVER_RELOAD; + if (flags & FUNC_QCAPS_RESP_FLAGS_NOTIFY_VF_DEF_VNIC_CHNG_SUPPORTED) + bp->fw_cap |= BNXT_FW_CAP_VF_VNIC_NOTIFY; + if (flags & FUNC_QCAPS_RESP_FLAGS_CRASHDUMP_CMD_SUPPORTED) + bp->fw_cap |= BNXT_FW_CAP_CRASHDUMP; + if (!(flags & FUNC_QCAPS_RESP_FLAGS_VLAN_ACCELERATION_TX_DISABLED)) + bp->fw_cap |= BNXT_FW_CAP_VLAN_TX_INSERT; + if (flags & FUNC_QCAPS_RESP_FLAGS_DBG_QCAPS_CMD_SUPPORTED) + bp->fw_cap |= BNXT_FW_CAP_DBG_QCAPS; + + flags_ext = le32_to_cpu(resp->flags_ext); + if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_EXT_HW_STATS_SUPPORTED) + bp->fw_cap |= BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED; + if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_ECN_STATS_SUPPORTED)) + bp->fw_cap |= BNXT_FW_CAP_ECN_STATS; + + if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_PTP_PPS_SUPPORTED) + bp->fw_cap |= BNXT_FW_CAP_PTP_PPS; + if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_PTP_PTM_SUPPORTED) + bp->fw_cap |= BNXT_FW_CAP_PTP_PTM; + if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_PTP_64BIT_RTC_SUPPORTED) + bp->fw_cap |= BNXT_FW_CAP_PTP_RTC; + if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_HOT_RESET_IF_SUPPORT)) + bp->fw_cap |= BNXT_FW_CAP_HOT_RESET_IF; + if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_FW_LIVEPATCH_SUPPORTED)) + bp->fw_cap |= BNXT_FW_CAP_LIVEPATCH; + if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_NPAR_1_2_SUPPORTED) + bp->fw_cap |= BNXT_FW_CAP_NPAR_1_2; + if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_TX_COAL_CMPL_CAP) + bp->flags |= BNXT_FLAG_TX_COAL_CMPL; + if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_BS_V2_SUPPORTED) + bp->fw_cap |= BNXT_FW_CAP_BACKING_STORE_V2; + if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_KTLS_SUPPORTED) + bnxt_alloc_ktls_info(bp, resp); + else + bnxt_free_ktls_info(bp); + if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_HW_DBR_DROP_RECOV_SUPPORTED) { +#ifndef BNXT_HDBR_DISABLE + bp->hdbr_info.hdbr_enabled = true; +#else + netdev_info(bp->dev, "HW based doorbell drop recovery disabled\n"); + bp->hdbr_info.hdbr_enabled = false; +#endif + } else { + bp->hdbr_info.hdbr_enabled = false; + } + + flags_ext2 = le32_to_cpu(resp->flags_ext2); + if (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_RX_ALL_PKTS_TIMESTAMPS_SUPPORTED) + bp->fw_cap |= BNXT_FW_CAP_RX_ALL_PKT_TS; + if (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_SW_DBR_DROP_RECOVERY_SUPPORTED) + bp->fw_cap |= BNXT_FW_CAP_DBR_SUPPORTED; + if (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_DBR_PACING_EXT_SUPPORTED || + flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_DBR_PACING_SUPPORTED) + bp->fw_cap |= BNXT_FW_CAP_DBR_PACING_SUPPORTED; + if (BNXT_PF(bp) && (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_HW_LAG_SUPPORTED)) + bp->fw_cap |= BNXT_FW_CAP_HW_LAG_SUPPORTED; + if (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_UDP_GSO_SUPPORTED) + bp->flags |= BNXT_FLAG_UDP_GSO_CAP; + if (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_TX_PKT_TS_CMPL_SUPPORTED) + bp->fw_cap |= BNXT_FW_CAP_TX_TS_CMP; + if (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_SW_MAX_RESOURCE_LIMITS_SUPPORTED) + bp->fw_cap |= BNXT_FW_CAP_SW_MAX_RESOURCE_LIMITS; + if (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_TIMED_TX_SO_TXTIME_SUPPORTED) + bp->fw_cap |= BNXT_FW_CAP_TIMED_TX_SO_TXTIME; + + bp->tunnel_disable_flag = le16_to_cpu(resp->tunnel_disable_flag); + + if (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_GENERIC_STATS_SUPPORTED) + bp->fw_cap |= BNXT_FW_CAP_GENERIC_STATS; + if (BNXT_PF(bp) && + (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_LPBK_STATS_SUPPORTED)) + bp->fw_cap |= BNXT_FW_CAP_LPBK_STATS; + + if (BNXT_PF(bp) && + (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_ROCE_VF_RESOURCE_MGMT_SUPPORTED)) + bp->fw_cap |= BNXT_FW_CAP_ROCE_VF_RESC_MGMT_SUPPORTED; + + if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_DFLT_VLAN_TPID_PCP_SUPPORTED)) + bp->fw_cap |= BNXT_FW_CAP_DFLT_VLAN_TPID_PCP; + + /* TODO: enable BNXT_PUSH_MODE_WCB */ + bp->tx_push_mode = BNXT_PUSH_MODE_NONE; + bp->tx_push_thresh = BNXT_TX_PUSH_THRESH; + if (BITS_PER_LONG == 64 && + (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_PPP_PUSH_MODE_SUPPORTED)) { + bp->tx_push_mode = BNXT_PUSH_MODE_PPP; + bp->tx_push_thresh = BNXT_TX_PUSH_THRESH_PPP; + } else if ((flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED) && + BNXT_FW_MAJ(bp) > 217) { + bp->tx_push_mode = BNXT_PUSH_MODE_LEGACY; + } + if (BNXT_PF(bp) && + (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_VF_CFG_ASYNC_FOR_PF_SUPPORTED)) + bp->fw_cap |= BNXT_FW_CAP_VF_CFG_FOR_PF; + if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_DISABLE_CQ_OVERFLOW_DETECTION_SUPPORTED) + bp->fw_cap |= BNXT_FW_CAP_CQ_OVERFLOW_DETECT_DISABLE; + hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx); + hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings); + hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings); + hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings); + hw_resc->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps); + if (!hw_resc->max_hw_ring_grps) + hw_resc->max_hw_ring_grps = hw_resc->max_tx_rings; + hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs); + hw_resc->max_vnics = le16_to_cpu(resp->max_vnics); + hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx); + + hw_resc->max_encap_records = le32_to_cpu(resp->max_encap_records); + hw_resc->max_decap_records = le32_to_cpu(resp->max_decap_records); + hw_resc->max_tx_em_flows = le32_to_cpu(resp->max_tx_em_flows); + hw_resc->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows); + hw_resc->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows); + hw_resc->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows); + + if (BNXT_PF(bp) && + (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_ENHANCED_VF_SCALE_SUPPORTED)) + bp->fw_cap |= BNXT_FW_CAP_VF_SCALE_SUPPORTED; + + if (BNXT_PF(bp)) { + struct bnxt_pf_info *pf = &bp->pf; + + pf->fw_fid = le16_to_cpu(resp->fid); + pf->port_id = le16_to_cpu(resp->port_id); + memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN); + pf->first_vf_id = le16_to_cpu(resp->first_vf_id); + pf->max_vfs = le16_to_cpu(resp->max_vfs); + pf->max_msix_vfs = le16_to_cpu(resp->max_msix_vfs); + bp->flags &= ~BNXT_FLAG_WOL_CAP; + if (flags & FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED) { + netif_info(bp, wol, bp->dev, "WOL capable\n"); + bp->flags |= BNXT_FLAG_WOL_CAP; + } else { + netif_notice(bp, wol, bp->dev, "WOL incapable\n"); + } + if (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_UDCC_SUPPORTED) { + netdev_info(bp->dev, "UDCC supported\n"); + bp->fw_cap |= BNXT_FW_CAP_UDCC_SUPPORTED; + } + if (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_TF_INGRESS_NIC_FLOW_SUPPORTED) { + bp->fw_cap |= BNXT_FW_CAP_TF_RX_NIC_FLOW_SUPPORTED; + netdev_dbg(bp->dev, "PF Rx NIC flow supported\n"); + } + } else { +#ifdef CONFIG_BNXT_SRIOV + struct bnxt_vf_info *vf = &bp->vf; + + vf->fw_fid = le16_to_cpu(resp->fid); + memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN); +#endif + } + + if (flags & FUNC_QCAPS_RESP_FLAGS_PTP_SUPPORTED) { + if (BNXT_CHIP_P5_PLUS(bp) || BNXT_PF(bp)) + bp->fw_cap |= BNXT_FW_CAP_PTP; + } else { + bnxt_ptp_clear(bp); + kfree(bp->ptp_cfg); + bp->ptp_cfg = NULL; + } + + if (bp->fw_cap & BNXT_FW_CAP_DBR_SUPPORTED) + bp->dbr.enable = 1; + + bp->tso_max_segs = le16_to_cpu(resp->max_tso_segs); + if (!bp->tso_max_segs && BNXT_CHIP_P5(bp)) + bp->tso_max_segs = BNXT_TSO_MAX_SEGS_P5; + + bnxt_alloc_mpc_info(bp, resp->mpc_chnls_cap); + bnxt_alloc_tfc_mpc_info(bp); + +hwrm_func_qcaps_exit: + hwrm_req_drop(bp, req); + return rc; +} + +static void bnxt_hwrm_dbg_qcaps(struct bnxt *bp) +{ + struct hwrm_dbg_qcaps_output *resp; + struct hwrm_dbg_qcaps_input *req; + u32 flags; + int rc; + + if (!(bp->fw_cap & BNXT_FW_CAP_DBG_QCAPS)) + return; + + rc = hwrm_req_init(bp, req, HWRM_DBG_QCAPS); + if (rc) + return; + + req->fid = cpu_to_le16(0xffff); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); + if (rc) + goto hwrm_dbg_qcaps_exit; + + flags = le32_to_cpu(resp->flags); + if (flags & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_SOC_DDR) + bp->fw_dbg_cap |= BNXT_FW_DBG_CAP_CRASHDUMP_SOC; + if (flags & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_HOST_DDR) + bp->fw_dbg_cap |= BNXT_FW_DBG_CAP_CRASHDUMP_HOST; + +hwrm_dbg_qcaps_exit: + hwrm_req_drop(bp, req); +} + +static int bnxt_alloc_fw_health(struct bnxt *bp); +static int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp); + +static int bnxt_drv_rgtr(struct bnxt *bp) +{ + int rc; + + /* determine whether we can support error recovery before + * registering with FW + */ + if (bnxt_alloc_fw_health(bp)) { + netdev_warn(bp->dev, "no memory for firmware error recovery\n"); + } else { + rc = bnxt_hwrm_error_recovery_qcfg(bp); + if (rc) + netdev_warn(bp->dev, "hwrm query error recovery failure rc: %d\n", + rc); + } + rc = bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false); + if (rc) + return -ENODEV; + return 0; +} + +int bnxt_hwrm_func_qcaps(struct bnxt *bp, bool init) +{ + int tcs = bp->num_tc; + int rc; + + rc = __bnxt_hwrm_func_qcaps(bp); + if (rc) + return rc; + + bnxt_hwrm_dbg_qcaps(bp); + + if (!init) + goto skip_rgtr; + + rc = bnxt_drv_rgtr(bp); + if (rc) + return -ENODEV; + + rc = bnxt_hwrm_queue_qportcfg(bp, + QUEUE_QPORTCFG_REQ_FLAGS_PATH_TX); + if (rc) { + netdev_err(bp->dev, "hwrm query qportcfg failure rc: %x\n", rc); + return rc; + } + + if (bp->is_asym_q) { + rc = bnxt_hwrm_queue_qportcfg(bp, + QUEUE_QPORTCFG_REQ_FLAGS_PATH_RX); + if (rc) { + netdev_err(bp->dev, "hwrm query qportcfg failure rc: %x\n", rc); + return rc; + } + bnxt_verify_asym_queues(bp); + } else { + bp->rx_max_q = bp->tx_max_q; + memcpy(bp->rx_q_info, bp->tx_q_info, sizeof(bp->rx_q_info)); + memcpy(bp->rx_q_ids, bp->tx_q_ids, sizeof(bp->rx_q_ids)); + } + + if (tcs > bp->max_tc) { + netdev_reset_tc(bp->dev); + bp->num_tc = 0; + netdev_info(bp->dev, "FW cannot support the configured traffic classes, resetting to default values\n"); + } + +skip_rgtr: + if (bp->hwrm_spec_code >= 0x10803) { + rc = bnxt_alloc_ctx_mem(bp); + if (rc) + return rc; + rc = bnxt_hwrm_func_resc_qcaps(bp, true); + if (!rc) + bp->fw_cap |= BNXT_FW_CAP_NEW_RM; + rc = bnxt_hdbr_l2_init(bp); + if (rc) + return rc; + } + return 0; +} + +static int bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(struct bnxt *bp) +{ + struct hwrm_cfa_adv_flow_mgnt_qcaps_output *resp; + struct hwrm_cfa_adv_flow_mgnt_qcaps_input *req; + u32 flags; + int rc; + + if (!(bp->fw_cap & BNXT_FW_CAP_CFA_ADV_FLOW)) + return 0; + + rc = hwrm_req_init(bp, req, HWRM_CFA_ADV_FLOW_MGNT_QCAPS); + if (rc) + return rc; + + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); + if (rc) + goto hwrm_cfa_adv_qcaps_exit; + + flags = le32_to_cpu(resp->flags); + if (flags & + CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V2_SUPPORTED) + bp->fw_cap |= BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2; + + if (flags & + CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V3_SUPPORTED) + bp->fw_cap |= BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V3; + + if (flags & CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_TRUFLOW_CAPABLE) + bp->fw_cap |= BNXT_FW_CAP_TRUFLOW; + + if (flags & + CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_RX_EXT_IP_PROTO_SUPPORTED) + bp->fw_cap |= BNXT_FW_CAP_CFA_NTUPLE_RX_EXT_IP_PROTO; + +hwrm_cfa_adv_qcaps_exit: + hwrm_req_drop(bp, req); + return rc; +} + +static int __bnxt_alloc_fw_health(struct bnxt *bp) +{ + if (bp->fw_health) + return 0; + + bp->fw_health = kzalloc(sizeof(*bp->fw_health), GFP_KERNEL); + if (!bp->fw_health) + return -ENOMEM; + + mutex_init(&bp->fw_health->lock); + return 0; +} + +static int bnxt_alloc_fw_health(struct bnxt *bp) +{ + int rc; + + if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET) && + !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) + return 0; + + rc = __bnxt_alloc_fw_health(bp); + if (rc) { + bp->fw_cap &= ~BNXT_FW_CAP_HOT_RESET; + bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY; + return rc; + } + + return 0; +} + +static inline void __bnxt_map_fw_health_reg(struct bnxt *bp, u32 reg) +{ + writel(reg & BNXT_GRC_BASE_MASK, bp->bar0 + + BNXT_GRCPF_REG_WINDOW_BASE_OUT + + BNXT_FW_HEALTH_WIN_MAP_OFF); +} + +static void bnxt_inv_fw_health_reg(struct bnxt *bp) +{ + struct bnxt_fw_health *fw_health = bp->fw_health; + u32 reg_type; + + if (!fw_health) + return; + + reg_type = BNXT_FW_HEALTH_REG_TYPE(fw_health->regs[BNXT_FW_HEALTH_REG]); + if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC) + fw_health->status_reliable = false; + + reg_type = BNXT_FW_HEALTH_REG_TYPE(fw_health->regs[BNXT_FW_RESET_CNT_REG]); + if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC) + fw_health->resets_reliable = false; +} + +static void bnxt_try_map_fw_health_reg(struct bnxt *bp) +{ + void __iomem *hs; + u32 status_loc; + u32 reg_type; + u32 sig; + + if (bp->fw_health) + bp->fw_health->status_reliable = false; + + __bnxt_map_fw_health_reg(bp, HCOMM_STATUS_STRUCT_LOC); + hs = bp->bar0 + BNXT_FW_HEALTH_WIN_OFF(HCOMM_STATUS_STRUCT_LOC); + + sig = readl(hs + offsetof(struct hcomm_status, sig_ver)); + if ((sig & HCOMM_STATUS_SIGNATURE_MASK) != HCOMM_STATUS_SIGNATURE_VAL) { + if (!bp->chip_num) { + __bnxt_map_fw_health_reg(bp, BNXT_GRC_REG_BASE); + bp->chip_num = readl(bp->bar0 + + BNXT_FW_HEALTH_WIN_BASE + + BNXT_GRC_REG_CHIP_NUM); + } + if (!BNXT_CHIP_P5_PLUS(bp)) + return; + + status_loc = BNXT_GRC_REG_STATUS_P5 | + BNXT_FW_HEALTH_REG_TYPE_BAR0; + } else { + status_loc = readl(hs + offsetof(struct hcomm_status, + fw_status_loc)); + } + + if (__bnxt_alloc_fw_health(bp)) { + netdev_warn(bp->dev, "no memory for firmware status checks\n"); + return; + } + + bp->fw_health->regs[BNXT_FW_HEALTH_REG] = status_loc; + reg_type = BNXT_FW_HEALTH_REG_TYPE(status_loc); + if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC) { + __bnxt_map_fw_health_reg(bp, status_loc); + bp->fw_health->mapped_regs[BNXT_FW_HEALTH_REG] = + BNXT_FW_HEALTH_WIN_OFF(status_loc); + } + + bp->fw_health->status_reliable = true; +} + +static int bnxt_map_fw_health_regs(struct bnxt *bp) +{ + struct bnxt_fw_health *fw_health = bp->fw_health; + u32 reg_base = 0xffffffff; + int i; + + bp->fw_health->status_reliable = false; + bp->fw_health->resets_reliable = false; + /* Only pre-map the monitoring GRC registers using window 3 */ + for (i = 0; i < 4; i++) { + u32 reg = fw_health->regs[i]; + + if (BNXT_FW_HEALTH_REG_TYPE(reg) != BNXT_FW_HEALTH_REG_TYPE_GRC) + continue; + if (reg_base == 0xffffffff) + reg_base = reg & BNXT_GRC_BASE_MASK; + if ((reg & BNXT_GRC_BASE_MASK) != reg_base) + return -ERANGE; + fw_health->mapped_regs[i] = BNXT_FW_HEALTH_WIN_OFF(reg); + } + bp->fw_health->status_reliable = true; + bp->fw_health->resets_reliable = true; + if (reg_base == 0xffffffff) + return 0; + + __bnxt_map_fw_health_reg(bp, reg_base); + return 0; +} + +static void bnxt_remap_fw_health_regs(struct bnxt *bp) +{ + if (!bp->fw_health) + return; + + if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) { + bp->fw_health->status_reliable = true; + bp->fw_health->resets_reliable = true; + } else { + bnxt_try_map_fw_health_reg(bp); + } +} + +static int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp) +{ + struct bnxt_fw_health *fw_health = bp->fw_health; + struct hwrm_error_recovery_qcfg_output *resp; + struct hwrm_error_recovery_qcfg_input *req; + int rc, i; + + if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) + return 0; + + rc = hwrm_req_init(bp, req, HWRM_ERROR_RECOVERY_QCFG); + if (rc) + return rc; + + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); + if (rc) + goto err_recovery_out; + fw_health->flags = le32_to_cpu(resp->flags); + if ((fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) && + !(bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL)) { + rc = -EINVAL; + goto err_recovery_out; + } + fw_health->polling_dsecs = le32_to_cpu(resp->driver_polling_freq); + fw_health->master_func_wait_dsecs = + le32_to_cpu(resp->master_func_wait_period); + fw_health->normal_func_wait_dsecs = + le32_to_cpu(resp->normal_func_wait_period); + fw_health->post_reset_wait_dsecs = + le32_to_cpu(resp->master_func_wait_period_after_reset); + fw_health->post_reset_max_wait_dsecs = + le32_to_cpu(resp->max_bailout_time_after_reset); + fw_health->regs[BNXT_FW_HEALTH_REG] = + le32_to_cpu(resp->fw_health_status_reg); + fw_health->regs[BNXT_FW_HEARTBEAT_REG] = + le32_to_cpu(resp->fw_heartbeat_reg); + fw_health->regs[BNXT_FW_RESET_CNT_REG] = + le32_to_cpu(resp->fw_reset_cnt_reg); + fw_health->regs[BNXT_FW_RESET_INPROG_REG] = + le32_to_cpu(resp->reset_inprogress_reg); + fw_health->fw_reset_inprog_reg_mask = + le32_to_cpu(resp->reset_inprogress_reg_mask); + fw_health->fw_reset_seq_cnt = resp->reg_array_cnt; + if (fw_health->fw_reset_seq_cnt >= 16) { + rc = -EINVAL; + goto err_recovery_out; + } + for (i = 0; i < fw_health->fw_reset_seq_cnt; i++) { + fw_health->fw_reset_seq_regs[i] = + le32_to_cpu(resp->reset_reg[i]); + fw_health->fw_reset_seq_vals[i] = + le32_to_cpu(resp->reset_reg_val[i]); + fw_health->fw_reset_seq_delay_msec[i] = + le32_to_cpu(resp->delay_after_reset[i]); + } +err_recovery_out: + hwrm_req_drop(bp, req); + if (!rc) + rc = bnxt_map_fw_health_regs(bp); + if (rc) + bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY; + return rc; +} + +static int bnxt_hwrm_func_reset(struct bnxt *bp) +{ + struct hwrm_func_reset_input *req; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_FUNC_RESET); + if (rc) + return rc; + + req->enables = 0; + hwrm_req_timeout(bp, req, HWRM_RESET_TIMEOUT); + return hwrm_req_send(bp, req); +} + +static void bnxt_nvm_cfg_ver_get(struct bnxt *bp) +{ + struct hwrm_nvm_get_dev_info_output nvm_info; + + if (!bnxt_hwrm_nvm_get_dev_info(bp, &nvm_info)) + snprintf(bp->nvm_cfg_ver, FW_VER_STR_LEN, "%d.%d.%d", + nvm_info.nvm_cfg_ver_maj, nvm_info.nvm_cfg_ver_min, + nvm_info.nvm_cfg_ver_upd); +} + +static int bnxt_hwrm_poll(struct bnxt *bp) +{ + struct hwrm_ver_get_output *resp; + struct hwrm_ver_get_input *req; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_VER_GET); + if (rc) + return rc; + + req->hwrm_intf_maj = HWRM_VERSION_MAJOR; + req->hwrm_intf_min = HWRM_VERSION_MINOR; + req->hwrm_intf_upd = HWRM_VERSION_UPDATE; + + hwrm_req_flags(bp, req, BNXT_HWRM_CTX_SILENT | BNXT_HWRM_FULL_WAIT); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); + if (rc) + goto exit; + if (resp->flags & VER_GET_RESP_FLAGS_DEV_NOT_RDY) + rc = -EAGAIN; +exit: + hwrm_req_drop(bp, req); + return rc; +} + +static int bnxt_hwrm_ver_get(struct bnxt *bp, bool silent) +{ + u32 dev_caps_cfg, hwrm_ver, hwrm_ctx_flags; + u16 fw_maj, fw_min, fw_bld, fw_rsv; + struct hwrm_ver_get_output *resp; + struct hwrm_ver_get_input *req; + int rc, len; + + rc = hwrm_req_init(bp, req, HWRM_VER_GET); + if (rc) + return rc; + + hwrm_ctx_flags = BNXT_HWRM_FULL_WAIT; + if (silent) + hwrm_ctx_flags |= BNXT_HWRM_CTX_SILENT; + + hwrm_req_flags(bp, req, hwrm_ctx_flags); + + bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN; + req->hwrm_intf_maj = HWRM_VERSION_MAJOR; + req->hwrm_intf_min = HWRM_VERSION_MINOR; + req->hwrm_intf_upd = HWRM_VERSION_UPDATE; + +#ifdef BNXT_FPGA + hwrm_req_timeout(bp, req, HWRM_FPGA_TIMEOUT); +#endif + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); + if (rc) + goto hwrm_ver_get_exit; + + if (resp->flags & VER_GET_RESP_FLAGS_DEV_NOT_RDY) { + rc = -EAGAIN; + goto hwrm_ver_get_exit; + } + + memcpy(&bp->ver_resp, resp, sizeof(struct hwrm_ver_get_output)); + + bp->hwrm_spec_code = resp->hwrm_intf_maj_8b << 16 | + resp->hwrm_intf_min_8b << 8 | + resp->hwrm_intf_upd_8b; + if (resp->hwrm_intf_maj_8b < 1) { + netdev_warn(bp->dev, "HWRM interface %d.%d.%d is older than 1.0.0.\n", + resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b, + resp->hwrm_intf_upd_8b); + netdev_warn(bp->dev, "Please update firmware with HWRM interface 1.0.0 or newer.\n"); + } + + hwrm_ver = HWRM_VERSION_MAJOR << 16 | HWRM_VERSION_MINOR << 8 | + HWRM_VERSION_UPDATE; + + if (bp->hwrm_spec_code > hwrm_ver) + snprintf(bp->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d", + HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR, + HWRM_VERSION_UPDATE); + else + snprintf(bp->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d", + resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b, + resp->hwrm_intf_upd_8b); + + fw_maj = le16_to_cpu(resp->hwrm_fw_major); + if (bp->hwrm_spec_code > 0x10803 && fw_maj) { + fw_min = le16_to_cpu(resp->hwrm_fw_minor); + fw_bld = le16_to_cpu(resp->hwrm_fw_build); + fw_rsv = le16_to_cpu(resp->hwrm_fw_patch); + len = FW_VER_STR_LEN; + } else { + fw_maj = resp->hwrm_fw_maj_8b; + fw_min = resp->hwrm_fw_min_8b; + fw_bld = resp->hwrm_fw_bld_8b; + fw_rsv = resp->hwrm_fw_rsvd_8b; + len = BC_HWRM_STR_LEN; + } + bp->fw_ver_code = BNXT_FW_VER_CODE(fw_maj, fw_min, fw_bld, fw_rsv); + snprintf(bp->fw_ver_str, len, "%d.%d.%d.%d", fw_maj, fw_min, fw_bld, + fw_rsv); + + if (strlen(resp->active_pkg_name)) { + int fw_ver_len = strlen(bp->fw_ver_str); + + snprintf(bp->fw_ver_str + fw_ver_len, + FW_VER_STR_LEN - fw_ver_len - 1, "/pkg %s", + resp->active_pkg_name); + bp->fw_cap |= BNXT_FW_CAP_PKG_VER; + } + + bp->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout); + if (!bp->hwrm_cmd_timeout) + bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT; + bp->hwrm_cmd_max_timeout = le16_to_cpu(resp->max_req_timeout) * 1000; + if (!bp->hwrm_cmd_max_timeout) + bp->hwrm_cmd_max_timeout = HWRM_CMD_MAX_TIMEOUT; + else if (bp->hwrm_cmd_max_timeout > HWRM_CMD_MAX_TIMEOUT) + netdev_warn(bp->dev, "Device requests max timeout of %d seconds, may trigger hung task watchdog\n", + bp->hwrm_cmd_max_timeout / 1000); + + if (resp->hwrm_intf_maj_8b >= 1) { + bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len); + bp->hwrm_max_ext_req_len = le16_to_cpu(resp->max_ext_req_len); + } + if (bp->hwrm_max_ext_req_len < HWRM_MAX_REQ_LEN) + bp->hwrm_max_ext_req_len = HWRM_MAX_REQ_LEN; + + bp->chip_num = le16_to_cpu(resp->chip_num); + bp->chip_rev = resp->chip_rev; + if (bp->chip_num == CHIP_NUM_58700 && !resp->chip_rev && + !resp->chip_metal) + bp->flags |= BNXT_FLAG_CHIP_NITRO_A0; + +#ifdef BNXT_FPGA + bp->chip_platform_type = resp->chip_platform_type; + if (BNXT_ZEBU(bp)) + bp->hwrm_cmd_timeout = bp->hwrm_cmd_max_timeout; + if (!BNXT_ASIC(bp) && !BNXT_CHIP_P7(bp) && bp->pdev->devfn > 1) { + dev_err(&bp->pdev->dev, "Skipping over FPGA function %d\n", bp->pdev->devfn); + rc = -ENODEV; + goto hwrm_ver_get_exit; + } +#endif + dev_caps_cfg = le32_to_cpu(resp->dev_caps_cfg); + if ((dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) && + (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED)) + bp->fw_cap |= BNXT_FW_CAP_SHORT_CMD; + + if (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED) + bp->fw_cap |= BNXT_FW_CAP_KONG_MB_CHNL; + + if (dev_caps_cfg & + VER_GET_RESP_DEV_CAPS_CFG_FLOW_HANDLE_64BIT_SUPPORTED) + bp->fw_cap |= BNXT_FW_CAP_OVS_64BIT_HANDLE; + + if (dev_caps_cfg & + VER_GET_RESP_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED) + bp->fw_cap |= BNXT_FW_CAP_TRUSTED_VF; + + if (dev_caps_cfg & + VER_GET_RESP_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED) + bp->fw_cap |= BNXT_FW_CAP_CFA_ADV_FLOW; + + if (dev_caps_cfg & + VER_GET_RESP_DEV_CAPS_CFG_CFA_TRUFLOW_SUPPORTED) { + bp->fw_cap |= BNXT_FW_CAP_TRUFLOW_EN; + } +hwrm_ver_get_exit: + hwrm_req_drop(bp, req); + return rc; +} + +int bnxt_hwrm_fw_set_time(struct bnxt *bp) +{ + struct hwrm_fw_set_time_input *req; +#if defined(HAVE_TIME64) + struct tm tm; + time64_t now = ktime_get_real_seconds(); +#elif defined(CONFIG_RTC_LIB) || defined(CONFIG_RTC_LIB_MODULE) + struct rtc_time tm; + struct timeval tv; +#endif + int rc; + + if ((BNXT_VF(bp) && bp->hwrm_spec_code < 0x10901) || + bp->hwrm_spec_code < 0x10400) + return -EOPNOTSUPP; + +#if defined(HAVE_TIME64) + time64_to_tm(now, 0, &tm); +#elif defined(CONFIG_RTC_LIB) || defined(CONFIG_RTC_LIB_MODULE) + do_gettimeofday(&tv); + rtc_time_to_tm(tv.tv_sec, &tm); +#else + return -EOPNOTSUPP; +#endif + rc = hwrm_req_init(bp, req, HWRM_FW_SET_TIME); + if (rc) + return rc; + + req->year = cpu_to_le16(1900 + tm.tm_year); + req->month = 1 + tm.tm_mon; + req->day = tm.tm_mday; + req->hour = tm.tm_hour; + req->minute = tm.tm_min; + req->second = tm.tm_sec; + return hwrm_req_send(bp, req); +} + +static void bnxt_add_one_ctr(u64 hw, u64 *sw, u64 mask) +{ + u64 sw_tmp; + + hw &= mask; + sw_tmp = (*sw & ~mask) | hw; + if (hw < (*sw & mask)) + sw_tmp += mask + 1; + WRITE_ONCE(*sw, sw_tmp); +} + +static void __bnxt_accumulate_stats(__le64 *hw_stats, u64 *sw_stats, u64 *masks, + int count, bool ignore_zero) +{ + int i; + + for (i = 0; i < count; i++) { + u64 hw = le64_to_cpu(READ_ONCE(hw_stats[i])); + + if (ignore_zero && !hw) + continue; + + if (masks[i] == -1ULL) + sw_stats[i] = hw; + else + bnxt_add_one_ctr(hw, &sw_stats[i], masks[i]); + } +} + +/* Read the counters and do not accumulate. Due to a HW bug in Thor, + * sometimes the value returned by FUNC_QSTATS might be < previous + * value. This makes it appear like a counter rollover but it is not. + * We should not accumulate the counter in this case. But since we + * cannot differentiate between an actual rollover and the HW bug, + * we avoid counter accumulation logic altogether. The consequence + * is that the counters (pkt or byte) reported on a given invocation + * of stats might seem incorrect (< prev value). But subsequent + * invocations would show the correct value. The downside of this + * approach is that since we are exposing the 48b hw counter as is + * to the stack without aggregating into a 64b sw counter, the actual + * rollover occurs sooner (depending on data xfer rate etc). + * But note that it only applies to the VF-stats processing by the + * PF while it is in switchdev mode. Also, we don't pass ignore_zero + * flag since we are not really accumulating the counters and we + * want the sw_stat to be cleared when the corresponding hw_stat is + * zero and avoid caching previous non-zero value. + */ +static void __bnxt_read_stats(__le64 *hw_stats, u64 *sw_stats, u64 *masks, + int count) +{ + int i; + + for (i = 0; i < count; i++) { + u64 hw = le64_to_cpu(READ_ONCE(hw_stats[i])); + + if (masks[i] == -1ULL) + sw_stats[i] = hw; + else + sw_stats[i] = hw & masks[i]; + } +} + +static void bnxt_accumulate_stats(struct bnxt_stats_mem *stats) +{ + if (!stats->hw_stats) + return; + + __bnxt_accumulate_stats(stats->hw_stats, stats->sw_stats, + stats->hw_masks, stats->len / 8, false); +} + +static void bnxt_accumulate_vf_stats(struct bnxt *bp, bool ignore_zero) +{ + struct bnxt_stats_mem *ring0_stats; + struct bnxt_stats_mem *stats; + struct bnxt_vf_info *vf; + int i; + + if (!bnxt_tc_is_switchdev_mode(bp)) + return; + + mutex_lock(&bp->sriov_lock); + vf = rcu_dereference_protected(bp->pf.vf, + lockdep_is_held(&bp->sriov_lock)); + if (!vf) { + mutex_unlock(&bp->sriov_lock); + return; + } + + ring0_stats = &vf[0].stats; + + for (i = 0; i < bp->pf.active_vfs; i++) { + stats = &vf[i].stats; + if (!stats->hw_stats) { + mutex_unlock(&bp->sriov_lock); + return; + } + if (BNXT_CHIP_P5(bp)) + __bnxt_read_stats(stats->hw_stats, stats->sw_stats, + ring0_stats->hw_masks, + ring0_stats->len / 8); + else + __bnxt_accumulate_stats(stats->hw_stats, stats->sw_stats, + ring0_stats->hw_masks, + ring0_stats->len / 8, ignore_zero); + } + mutex_unlock(&bp->sriov_lock); +} + +static void bnxt_accumulate_all_stats(struct bnxt *bp) +{ + struct bnxt_stats_mem *ring0_stats; + bool ignore_zero = false; + int i; + + /* Chip bug. Counter intermittently becomes 0. */ + if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) + ignore_zero = true; + + ring0_stats = &bp->bnapi[0]->cp_ring.stats; + + for (i = 0; i < bp->cp_nr_rings; i++) { + struct bnxt_napi *bnapi = bp->bnapi[i]; + struct bnxt_cp_ring_info *cpr; + struct bnxt_stats_mem *stats; + + cpr = &bnapi->cp_ring; + stats = &cpr->stats; + __bnxt_accumulate_stats(stats->hw_stats, stats->sw_stats, + ring0_stats->hw_masks, + ring0_stats->len / 8, ignore_zero); + } + if (BNXT_PF(bp)) + bnxt_accumulate_vf_stats(bp, ignore_zero); + + if (bp->flags & BNXT_FLAG_PORT_STATS) { + struct bnxt_stats_mem *stats = &bp->port_stats; + __le64 *hw_stats = stats->hw_stats; + u64 *sw_stats = stats->sw_stats; + u64 *masks = stats->hw_masks; + int cnt; + + cnt = sizeof(struct rx_port_stats) / 8; + __bnxt_accumulate_stats(hw_stats, sw_stats, masks, cnt, false); + + hw_stats += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8; + sw_stats += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8; + masks += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8; + cnt = sizeof(struct tx_port_stats) / 8; + __bnxt_accumulate_stats(hw_stats, sw_stats, masks, cnt, false); + } + if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) { + bnxt_accumulate_stats(&bp->rx_port_stats_ext); + bnxt_accumulate_stats(&bp->tx_port_stats_ext); + } + if (bp->flags & BNXT_FLAG_ECN_STATS) + bnxt_accumulate_stats(&bp->ecn_marked_stats); +} + +static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags) +{ + struct hwrm_port_qstats_input *req; + struct bnxt_pf_info *pf = &bp->pf; + int rc; + + if (!(bp->flags & BNXT_FLAG_PORT_STATS)) + return 0; + + if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED)) + return -EOPNOTSUPP; + + rc = hwrm_req_init(bp, req, HWRM_PORT_QSTATS); + if (rc) + return rc; + + req->flags = flags; + req->port_id = cpu_to_le16(pf->port_id); + req->tx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map + + BNXT_TX_PORT_STATS_BYTE_OFFSET); + req->rx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map); + return hwrm_req_send(bp, req); +} + +static int bnxt_hwrm_pri2cos_idx(struct bnxt *bp, u32 path_dir) +{ + struct hwrm_queue_pri2cos_qcfg_output *resp_qc; + struct hwrm_queue_pri2cos_qcfg_input *req_qc; + u8 *pri2cos_idx, *q_ids, max_q; + int rc, i, j; + u8 *pri2cos; + + rc = hwrm_req_init(bp, req_qc, HWRM_QUEUE_PRI2COS_QCFG); + if (rc) + return rc; + + req_qc->flags = cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN | + path_dir); + resp_qc = hwrm_req_hold(bp, req_qc); + rc = hwrm_req_send(bp, req_qc); + if (rc) + goto out; + + if (path_dir == QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH_TX) { + pri2cos_idx = bp->tx_pri2cos_idx; + q_ids = bp->tx_q_ids; + max_q = bp->tx_max_q; + } else { + pri2cos_idx = bp->rx_pri2cos_idx; + q_ids = bp->rx_q_ids; + max_q = bp->rx_max_q; + } + + pri2cos = &resp_qc->pri0_cos_queue_id; + for (i = 0; i < BNXT_MAX_QUEUE; i++) { + u8 queue_id = pri2cos[i]; + u8 queue_idx; + + /* Per port queue IDs start from 0, 10, 20, etc */ + queue_idx = queue_id % 10; + if (queue_idx > BNXT_MAX_QUEUE) { + bp->pri2cos_valid = false; + rc = -EINVAL; + goto out; + } + + for (j = 0; j < max_q; j++) { + if (q_ids[j] == queue_id) + pri2cos_idx[i] = queue_idx; + } + } + bp->pri2cos_valid = true; + +out: + hwrm_req_drop(bp, req_qc); + + return rc; +} + +static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags) +{ + struct hwrm_port_qstats_ext_output *resp_qs; + struct hwrm_port_qstats_ext_input *req_qs; + struct bnxt_pf_info *pf = &bp->pf; + u32 tx_stat_size; + int rc; + + if (!(bp->flags & BNXT_FLAG_PORT_STATS_EXT)) + return 0; + + if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED)) + return -EOPNOTSUPP; + + rc = hwrm_req_init(bp, req_qs, HWRM_PORT_QSTATS_EXT); + if (rc) + return rc; + + req_qs->flags = flags; + req_qs->port_id = cpu_to_le16(pf->port_id); + req_qs->rx_stat_size = cpu_to_le16(sizeof(struct rx_port_stats_ext)); + req_qs->rx_stat_host_addr = cpu_to_le64(bp->rx_port_stats_ext.hw_stats_map); + tx_stat_size = bp->tx_port_stats_ext.hw_stats ? + sizeof(struct tx_port_stats_ext) : 0; + req_qs->tx_stat_size = cpu_to_le16(tx_stat_size); + req_qs->tx_stat_host_addr = cpu_to_le64(bp->tx_port_stats_ext.hw_stats_map); + + resp_qs = hwrm_req_hold(bp, req_qs); + rc = hwrm_req_send(bp, req_qs); + if (!rc) { + bp->fw_rx_stats_ext_size = + le16_to_cpu(resp_qs->rx_stat_size) / 8; + if (BNXT_FW_MAJ(bp) < 220 && !BNXT_CHIP_P7(bp) && + bp->fw_rx_stats_ext_size > BNXT_RX_STATS_EXT_NUM_LEGACY) + bp->fw_rx_stats_ext_size = BNXT_RX_STATS_EXT_NUM_LEGACY; + + bp->fw_tx_stats_ext_size = tx_stat_size ? + le16_to_cpu(resp_qs->tx_stat_size) / 8 : 0; + } else { + bp->fw_rx_stats_ext_size = 0; + bp->fw_tx_stats_ext_size = 0; + } + hwrm_req_drop(bp, req_qs); + + if (flags) + return rc; + + if (bp->fw_tx_stats_ext_size <= + offsetof(struct tx_port_stats_ext, pfc_pri0_tx_duration_us) / 8) { + bp->pri2cos_valid = false; + return rc; + } + + rc = bnxt_hwrm_pri2cos_idx(bp, QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH_TX); + if (rc) + return rc; + + if (bp->is_asym_q) { + rc = bnxt_hwrm_pri2cos_idx(bp, QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH_RX); + if (rc) + return rc; + } else { + memcpy(bp->rx_pri2cos_idx, bp->tx_pri2cos_idx, sizeof(bp->rx_pri2cos_idx)); + } + + return rc; +} + +int bnxt_hwrm_func_qstats(struct bnxt *bp, struct bnxt_stats_mem *stats, + u16 fid, u8 flags) +{ + struct hwrm_func_qstats_output *resp; + struct hwrm_func_qstats_input *req; + int rc = -EOPNOTSUPP; + struct ctx_hw_stats *hw_stats; + + hw_stats = stats->hw_stats; + if (!hw_stats) + return rc; + + memset(hw_stats, 0, stats->len); + rc = hwrm_req_init(bp, req, HWRM_FUNC_QSTATS); + if (rc) + return rc; + + req->fid = fid; + req->flags = flags; + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); + if (rc) { + hwrm_req_drop(bp, req); + return rc; + } + + hw_stats->rx_ucast_pkts = resp->rx_ucast_pkts; + hw_stats->rx_mcast_pkts = resp->rx_mcast_pkts; + hw_stats->rx_bcast_pkts = resp->rx_bcast_pkts; + hw_stats->rx_discard_pkts = resp->rx_discard_pkts; + hw_stats->rx_error_pkts = resp->rx_drop_pkts; + + hw_stats->rx_ucast_bytes = resp->rx_ucast_bytes; + hw_stats->rx_mcast_bytes = resp->rx_mcast_bytes; + hw_stats->rx_bcast_bytes = resp->rx_bcast_bytes; + + hw_stats->tx_ucast_pkts = resp->tx_ucast_pkts; + hw_stats->tx_mcast_pkts = resp->tx_mcast_pkts; + hw_stats->tx_bcast_pkts = resp->tx_bcast_pkts; + hw_stats->tx_discard_pkts = resp->tx_discard_pkts; + hw_stats->tx_error_pkts = resp->tx_drop_pkts; + + hw_stats->tx_ucast_bytes = resp->tx_ucast_bytes; + hw_stats->tx_mcast_bytes = resp->tx_mcast_bytes; + hw_stats->tx_bcast_bytes = resp->tx_bcast_bytes; + + hw_stats->tpa_pkts = resp->rx_agg_pkts; + hw_stats->tpa_bytes = resp->rx_agg_bytes; + hw_stats->tpa_events = resp->rx_agg_events; + hw_stats->tpa_aborts = resp->rx_agg_aborts; + + hwrm_req_drop(bp, req); + return rc; +} + +static int bnxt_hwrm_vf_qstats(struct bnxt *bp, u8 flags) +{ + struct bnxt_stats_mem *stats; + struct bnxt_vf_info *vf; + int rc = -EOPNOTSUPP, i; + + if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) + return rc; + + if (!bnxt_tc_is_switchdev_mode(bp)) + return rc; + + mutex_lock(&bp->sriov_lock); + vf = rcu_dereference_protected(bp->pf.vf, + lockdep_is_held(&bp->sriov_lock)); + if (!vf) { + mutex_unlock(&bp->sriov_lock); + return rc; + } + + for (i = 0; i < bp->pf.active_vfs; i++) { + stats = &(vf[i].stats); + rc = bnxt_hwrm_func_qstats(bp, stats, + cpu_to_le16(vf[i].fw_fid), flags); + if (rc) + break; + } + + mutex_unlock(&bp->sriov_lock); + return rc; +} + +static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp) +{ + if (bp->vxlan_fw_dst_port_id != INVALID_HW_RING_ID) + bnxt_hwrm_tunnel_dst_port_free( + bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN); + if (bp->nge_fw_dst_port_id != INVALID_HW_RING_ID) + bnxt_hwrm_tunnel_dst_port_free( + bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE); +#ifndef HAVE_UDP_TUNNEL_NIC + atomic_set(&bp->vxlan_port_cnt, 0); + atomic_set(&bp->nge_port_cnt, 0); +#endif +} + +/* TODO: remove this once min aggregate packet size workaround is removed */ +static int bnxt_dbg_hwrm_wr_reg(struct bnxt *, u32, u32); + +static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa) +{ + int rc, i; + u32 tpa_flags = 0; + + if (set_tpa) + tpa_flags = bp->flags & BNXT_FLAG_TPA; + else if (BNXT_NO_FW_ACCESS(bp)) + return 0; + for (i = 0; i < bp->nr_vnics; i++) { + rc = bnxt_hwrm_vnic_set_tpa(bp, &bp->vnic_info[i], tpa_flags); + if (rc) { + netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n", + i, rc); + return rc; + } + } + return 0; +} + +static void bnxt_hwrm_clear_vnic_rss(struct bnxt *bp) +{ + int i; + + for (i = 0; i < bp->nr_vnics; i++) + bnxt_hwrm_vnic_set_rss(bp, &bp->vnic_info[i], false); +} + +static void bnxt_clear_vnic(struct bnxt *bp) +{ + if (!bp->vnic_info) + return; + + bnxt_hwrm_clear_vnic_filter(bp); + if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) { + /* clear all RSS setting before free vnic ctx */ + bnxt_hwrm_clear_vnic_rss(bp); + bnxt_hwrm_vnic_ctx_free(bp); + } + /* before free the vnic, undo the vnic tpa settings */ + if (bp->flags & BNXT_FLAG_TPA) + bnxt_set_tpa(bp, false); + bnxt_hwrm_vnic_free(bp); + bnxt_clear_cfa_tls_filters_tbl(bp); + if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) + bnxt_hwrm_vnic_ctx_free(bp); +} + +static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path, + bool irq_re_init) +{ + bnxt_clear_vnic(bp); + bnxt_hwrm_ring_free(bp, close_path); + bnxt_hwrm_ring_grp_free(bp); + if (irq_re_init) { + bnxt_hwrm_stat_ctx_free(bp); + bnxt_hwrm_free_tunnel_ports(bp); + } +} + +#ifdef HAVE_NDO_BRIDGE_GETLINK +static int bnxt_hwrm_set_br_mode(struct bnxt *bp, u16 br_mode) +{ + struct hwrm_func_cfg_input *req; + u8 evb_mode; + int rc; + + if (br_mode == BRIDGE_MODE_VEB) + evb_mode = FUNC_CFG_REQ_EVB_MODE_VEB; + else if (br_mode == BRIDGE_MODE_VEPA) + evb_mode = FUNC_CFG_REQ_EVB_MODE_VEPA; + else + return -EINVAL; + + rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req); + if (rc) + return rc; + + req->fid = cpu_to_le16(0xffff); + req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_EVB_MODE); + req->evb_mode = evb_mode; + return hwrm_req_send(bp, req); +} +#endif + +static int bnxt_hwrm_set_cpu_params(struct bnxt *bp) +{ + struct hwrm_func_cfg_input *req; + int rc; + + if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10803) + return 0; + + rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req); + if (rc) + return rc; + + req->fid = cpu_to_le16(0xffff); + req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE); + req->options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_64; + if (cache_line_size() == 128) + req->options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128; + if (PAGE_SHIFT >= 12 && + PAGE_SHIFT <= FUNC_QCFG_RESP_DB_PAGE_SIZE_LAST + 12) { + req->enables2 = cpu_to_le32(FUNC_CFG_REQ_ENABLES2_DB_PAGE_SIZE); + req->db_page_size = PAGE_SHIFT - 12; + } + + return hwrm_req_send(bp, req); +} + +static int __bnxt_setup_vnic(struct bnxt *bp, struct bnxt_vnic_info *vnic) +{ + int rc; + + if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) + goto skip_rss_ctx; + + /* allocate context for vnic */ + rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, 0); + if (rc) { + netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n", + vnic->vnic_id, rc); + goto vnic_setup_err; + } + bp->rsscos_nr_ctxs++; + + if (BNXT_CHIP_TYPE_NITRO_A0(bp)) { + rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, 1); + if (rc) { + netdev_err(bp->dev, "hwrm vnic %d cos ctx alloc failure rc: %x\n", + vnic->vnic_id, rc); + goto vnic_setup_err; + } + bp->rsscos_nr_ctxs++; + } + +skip_rss_ctx: + /* configure default vnic, ring grp */ + rc = bnxt_hwrm_vnic_cfg(bp, vnic, 0); + if (rc) { + netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n", + vnic->vnic_id, rc); + goto vnic_setup_err; + } + + /* Enable RSS hashing on vnic */ + rc = bnxt_hwrm_vnic_set_rss(bp, vnic, true); + if (rc) { + netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %x\n", + vnic->vnic_id, rc); + goto vnic_setup_err; + } + + if (bp->flags & BNXT_FLAG_AGG_RINGS) { + rc = bnxt_hwrm_vnic_set_hds(bp, vnic); + if (rc) { + netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n", + vnic->vnic_id, rc); + } + } + +vnic_setup_err: + return rc; +} + +int bnxt_hwrm_vnic_update(struct bnxt *bp, struct bnxt_vnic_info *vnic, u8 valid) +{ + struct hwrm_vnic_update_input *req; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_VNIC_UPDATE); + if (rc) + return rc; + + req->vnic_id = cpu_to_le32(vnic->fw_vnic_id); + + if (valid & VNIC_UPDATE_REQ_ENABLES_METADATA_FORMAT_TYPE_VALID) + req->metadata_format_type = vnic->metadata_format; + if (valid & VNIC_UPDATE_REQ_ENABLES_VNIC_STATE_VALID) + req->vnic_state = vnic->state; + if (valid & VNIC_UPDATE_REQ_ENABLES_MRU_VALID) + req->mru = cpu_to_le16(vnic->mru); + + req->enables = cpu_to_le32(valid); + + rc = hwrm_req_send(bp, req); + + return rc; +} + +int bnxt_hwrm_vnic_rss_cfg_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic) +{ + int rc; + + rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic, true); + if (rc) { + netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n", + vnic->vnic_id, rc); + return rc; + } + rc = bnxt_hwrm_vnic_cfg(bp, vnic, 0); + if (rc) + netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n", + vnic->vnic_id, rc); + return rc; +} + +int __bnxt_setup_vnic_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic) +{ + int rc, i, nr_ctxs; + + nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings); + for (i = 0; i < nr_ctxs; i++) { + rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, i); + if (rc) { + netdev_err(bp->dev, "hwrm vnic %d ctx %d alloc failure rc: %x\n", + vnic->vnic_id, i, rc); + break; + } + bp->rsscos_nr_ctxs++; + } + if (i < nr_ctxs) + return -ENOMEM; + + rc = bnxt_hwrm_vnic_rss_cfg_p5(bp, vnic); + if (rc) + return rc; + + if (bp->flags & BNXT_FLAG_AGG_RINGS) { + rc = bnxt_hwrm_vnic_set_hds(bp, vnic); + if (rc) { + netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n", + vnic->vnic_id, rc); + return rc; + } + } + if (bp->ktls_info && bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV) { + vnic->metadata_format = VNIC_UPDATE_REQ_METADATA_FORMAT_TYPE_4; + rc = bnxt_hwrm_vnic_update(bp, vnic, + VNIC_UPDATE_REQ_ENABLES_METADATA_FORMAT_TYPE_VALID); + } + return rc; +} + +static int bnxt_setup_vnic(struct bnxt *bp, struct bnxt_vnic_info *vnic) +{ + if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) + return __bnxt_setup_vnic_p5(bp, vnic); + else + return __bnxt_setup_vnic(bp, vnic); +} + +static int bnxt_alloc_and_setup_vnic(struct bnxt *bp, struct bnxt_vnic_info *vnic, + u16 start_rx_ring_idx, int rx_rings) +{ + int rc; + + rc = bnxt_hwrm_vnic_alloc(bp, vnic, start_rx_ring_idx, rx_rings); + if (rc) { + netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n", + vnic->vnic_id, rc); + return rc; + } + return bnxt_setup_vnic(bp, vnic); +} + +static int bnxt_alloc_rfs_vnics(struct bnxt *bp) +{ + int i, rc = 0; + + if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) + return bnxt_alloc_and_setup_vnic(bp, + &bp->vnic_info[BNXT_VNIC_NTUPLE], + 0, bp->rx_nr_rings); + + if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) + return 0; + + for (i = 0; i < bp->rx_nr_rings; i++) { + struct bnxt_vnic_info *vnic; + u16 vnic_id = i + 1; + u16 ring_id = i; + + if (vnic_id >= bp->nr_vnics) + break; + + vnic = &bp->vnic_info[vnic_id]; + vnic->flags |= BNXT_VNIC_RFS_FLAG; + if (bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP) + vnic->flags |= BNXT_VNIC_RFS_NEW_RSS_FLAG; + if (bnxt_alloc_and_setup_vnic(bp, &bp->vnic_info[vnic_id], ring_id, 1)) + break; + } + return rc; +} + +void bnxt_del_one_rss_ctx(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx, + bool all) +{ + struct bnxt_vnic_info *vnic = &rss_ctx->vnic; + struct bnxt_filter_base *usr_fltr, *tmp; + struct bnxt_ntuple_filter *ntp_fltr; + int i; + + bnxt_hwrm_vnic_free_one(bp, &rss_ctx->vnic); + for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++) { + if (vnic->fw_rss_cos_lb_ctx[i] != INVALID_HW_RING_ID) + bnxt_hwrm_vnic_ctx_free_one(bp, vnic, i); + } + if (!all) + return; + + list_for_each_entry_safe(usr_fltr, tmp, &bp->usr_fltr_list, list) { + if ((usr_fltr->flags & BNXT_ACT_RSS_CTX) && + usr_fltr->fw_vnic_id == rss_ctx->index) { + ntp_fltr = container_of(usr_fltr, + struct bnxt_ntuple_filter, + base); + bnxt_hwrm_cfa_ntuple_filter_free(bp, ntp_fltr); + bnxt_del_ntp_filter(bp, ntp_fltr); + bnxt_del_one_usr_fltr(bp, usr_fltr); + } + } + + if (vnic->rss_table) + dma_free_coherent(&bp->pdev->dev, vnic->rss_table_size, + vnic->rss_table, + vnic->rss_table_dma_addr); + kfree(rss_ctx->rss_indir_tbl); + list_del(&rss_ctx->list); + bp->num_rss_ctx--; + clear_bit(rss_ctx->index, bp->rss_ctx_bmap); + kfree(rss_ctx); +} + +static void bnxt_hwrm_realloc_rss_ctx_vnic(struct bnxt *bp) +{ + bool set_tpa = !!(bp->flags & BNXT_FLAG_TPA); + struct bnxt_rss_ctx *rss_ctx, *tmp; + + list_for_each_entry_safe(rss_ctx, tmp, &bp->rss_ctx_list, list) { + struct bnxt_vnic_info *vnic = &rss_ctx->vnic; + + if (bnxt_hwrm_vnic_alloc(bp, vnic, 0, bp->rx_nr_rings) || + bnxt_hwrm_vnic_set_tpa(bp, vnic, set_tpa) || + __bnxt_setup_vnic_p5(bp, vnic)) { + netdev_err(bp->dev, "Failed to restore RSS ctx %d\n", + rss_ctx->index); + bnxt_del_one_rss_ctx(bp, rss_ctx, true); + } + } +} + +struct bnxt_rss_ctx *bnxt_alloc_rss_ctx(struct bnxt *bp) +{ + struct bnxt_rss_ctx *rss_ctx = NULL; + + rss_ctx = kzalloc(sizeof(*rss_ctx), GFP_KERNEL); + if (rss_ctx) { + rss_ctx->vnic.rss_ctx = rss_ctx; + list_add_tail(&rss_ctx->list, &bp->rss_ctx_list); + bp->num_rss_ctx++; + } + return rss_ctx; +} + +void bnxt_clear_rss_ctxs(struct bnxt *bp, bool all) +{ + struct bnxt_rss_ctx *rss_ctx, *tmp; + + list_for_each_entry_safe(rss_ctx, tmp, &bp->rss_ctx_list, list) { + bnxt_del_one_rss_ctx(bp, rss_ctx, all); + } + if (all) + bitmap_free(bp->rss_ctx_bmap); +} + +static void bnxt_init_multi_rss_ctx(struct bnxt *bp) +{ + bp->rss_cap &= ~BNXT_RSS_CAP_MULTI_RSS_CTX; + bp->rss_ctx_bmap = bitmap_zalloc(BNXT_RSS_CTX_BMAP_LEN, GFP_KERNEL); + if (bp->rss_ctx_bmap) { + /* burn index 0 since we cannot have context 0 */ + __set_bit(0, bp->rss_ctx_bmap); + INIT_LIST_HEAD(&bp->rss_ctx_list); + bp->rss_cap |= BNXT_RSS_CAP_MULTI_RSS_CTX; + } +} + +/* Allow PF, trusted VFs and VFs with default VLAN to be in promiscuous mode */ +static bool bnxt_promisc_ok(struct bnxt *bp) +{ +#ifdef CONFIG_BNXT_SRIOV + if (BNXT_VF(bp) && !bp->vf.vlan && !bnxt_is_trusted_vf(bp, &bp->vf)) + return false; +#endif + return true; +} + +static int bnxt_setup_nitroa0_vnic(struct bnxt *bp) +{ + unsigned int rc = 0; + + rc = bnxt_hwrm_vnic_alloc(bp, &bp->vnic_info[1], bp->rx_nr_rings - 1, 1); + if (rc) { + netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n", + rc); + return rc; + } + + rc = bnxt_hwrm_vnic_cfg(bp, &bp->vnic_info[1], 0); + if (rc) { + netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n", + rc); + return rc; + } + return rc; +} + +static int bnxt_cfg_rx_mode(struct bnxt *); +static bool bnxt_mc_list_updated(struct bnxt *, u32 *); + +static int bnxt_cfg_host_mtu(struct bnxt *bp) +{ + struct hwrm_func_cfg_input *req; + int rc; + + rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req); + if (rc) + return rc; + req->fid = cpu_to_le16(0xffff); + req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_HOST_MTU); + req->host_mtu = cpu_to_le16(bp->dev->mtu); + return hwrm_req_send(bp, req); +} + +static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init) +{ + struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT]; + int rc = 0; + unsigned int rx_nr_rings = bp->rx_nr_rings; + + if (irq_re_init) { + rc = bnxt_hwrm_stat_ctx_alloc(bp); + if (rc) { + netdev_err(bp->dev, "hwrm stat ctx alloc failure rc: %x\n", + rc); + goto err_out; + } + } + + rc = bnxt_hwrm_ring_alloc(bp); + if (rc) { + netdev_err(bp->dev, "hwrm ring alloc failure rc: %x\n", rc); + goto err_out; + } + + rc = bnxt_hwrm_ring_grp_alloc(bp); + if (rc) { + netdev_err(bp->dev, "hwrm_ring_grp alloc failure: %x\n", rc); + goto err_out; + } + + if (BNXT_CHIP_TYPE_NITRO_A0(bp)) + rx_nr_rings--; + + /* default vnic 0 */ + rc = bnxt_hwrm_vnic_alloc(bp, &bp->vnic_info[BNXT_VNIC_DEFAULT], 0, rx_nr_rings); + if (rc) { + netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc); + goto err_out; + } + + if (BNXT_VF(bp)) + bnxt_hwrm_func_qcfg(bp); + + rc = bnxt_setup_vnic(bp, &bp->vnic_info[BNXT_VNIC_DEFAULT]); + if (rc) + goto err_out; + if (bp->rss_cap & BNXT_RSS_CAP_RSS_HASH_TYPE_DELTA) + bnxt_hwrm_update_rss_hash_cfg(bp); + + if (bp->flags & BNXT_FLAG_RFS) { + rc = bnxt_alloc_rfs_vnics(bp); + if (rc) + goto err_out; + } + + if (bp->flags & BNXT_FLAG_TPA) { + rc = bnxt_set_tpa(bp, true); + if (rc) + goto err_out; + } + + if (BNXT_VF(bp)) + bnxt_update_vf_mac(bp); + + /* NIC flow initialization must be done prior to L2 filter creation */ + rc = bnxt_nic_flows_init(bp); + if (rc) { + netdev_err(bp->dev, "Failed to init port NIC Flow\n"); + goto err_out; + } + + /* Filter for default vnic 0 */ + rc = bnxt_hwrm_set_vnic_filter(bp, 0, 0, bp->dev->dev_addr); + if (rc) { + if (BNXT_VF(bp) && rc == -ENODEV) + netdev_err(bp->dev, "Cannot configure L2 filter while PF is unavailable\n"); + else + netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc); + goto err_out; + } + vnic->uc_filter_count = 1; + + vnic->rx_mask = 0; + if (test_bit(BNXT_STATE_HALF_OPEN, &bp->state)) + goto skip_rx_mask; + + if (bp->dev->flags & IFF_BROADCAST) + vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST; + + if (bp->dev->flags & IFF_PROMISC) + vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS; + + if (bp->dev->flags & IFF_ALLMULTI) { + vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST; + vnic->mc_list_count = 0; + } else if (bp->dev->flags & IFF_MULTICAST) { + u32 mask = 0; + + bnxt_mc_list_updated(bp, &mask); + vnic->rx_mask |= mask; + } + + rc = bnxt_cfg_rx_mode(bp); + if (rc) + goto err_out; + +skip_rx_mask: + if (BNXT_PF(bp) && bnxt_cfg_host_mtu(bp)) + netdev_warn(bp->dev, "Could not configure host MTU\n"); + + rc = bnxt_hwrm_set_coal(bp); + if (rc) + netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n", + rc); + + if (BNXT_CHIP_TYPE_NITRO_A0(bp)) { + rc = bnxt_setup_nitroa0_vnic(bp); + if (rc) + netdev_err(bp->dev, "Special vnic setup failure for NS2 A0 rc: %x\n", + rc); + } + + if (BNXT_VF(bp)) { + bnxt_hwrm_func_qcfg(bp); + netdev_update_features(bp->dev); + } + + return 0; + +err_out: + bnxt_hwrm_resource_free(bp, 0, true); + bnxt_nic_flows_deinit(bp); + + return rc; +} + +static void bnxt_shutdown_nic(struct bnxt *bp, bool irq_re_init) +{ + bnxt_hwrm_resource_free(bp, 1, irq_re_init); +} + +static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init) +{ + bnxt_init_cp_rings(bp); + bnxt_init_rx_rings(bp); + bnxt_init_tx_rings(bp); + bnxt_init_mpc_rings(bp); + bnxt_init_ring_grps(bp, irq_re_init); + bnxt_init_vnics(bp); + + return bnxt_init_chip(bp, irq_re_init); +} + +static void bnxt_set_tcs_queues(struct bnxt *bp) +{ + int tcs = bp->num_tc; + + if (tcs) { + int i, off, count; + + for (i = 0; i < tcs; i++) { + count = bp->tx_nr_rings_per_tc; + off = BNXT_TC_TO_RING_BASE(bp, i); + netdev_set_tc_queue(bp->dev, i, count, off); + } + } +} + +static int bnxt_set_real_num_queues(struct bnxt *bp) +{ + int rc; + struct net_device *dev = bp->dev; + +#ifdef VOID_NETIF_SET_NUM_TX + netif_set_real_num_tx_queues(dev, bp->tx_nr_rings - + bp->tx_nr_rings_xdp); +#else + rc = netif_set_real_num_tx_queues(dev, bp->tx_nr_rings - + bp->tx_nr_rings_xdp); + if (rc) + return rc; +#endif + rc = netif_set_real_num_rx_queues(dev, bp->rx_nr_rings); + if (rc) + return rc; + +#ifdef CONFIG_RFS_ACCEL + if (bp->flags & BNXT_FLAG_RFS) + dev->rx_cpu_rmap = alloc_irq_cpu_rmap(bp->rx_nr_rings); +#endif + + return rc; +} + +static int __bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max, + bool shared) +{ + int _rx = *rx, _tx = *tx; + + if (shared) { + *rx = min_t(int, _rx, max); + *tx = min_t(int, _tx, max); + } else { + if (max < 2) + return -ENOMEM; + + while (_rx + _tx > max) { + if (_rx > _tx && _rx > 1) + _rx--; + else if (_tx > 1) + _tx--; + } + *rx = _rx; + *tx = _tx; + } + return 0; +} + +static int __bnxt_num_tx_to_cp(struct bnxt *bp, int tx, int tx_sets, int tx_xdp) +{ + return (tx - tx_xdp) / tx_sets + tx_xdp; +} + +int bnxt_num_tx_to_cp(struct bnxt *bp, int tx) +{ + int tcs = bp->num_tc; + + if (!tcs) + tcs = 1; + return __bnxt_num_tx_to_cp(bp, tx, tcs, bp->tx_nr_rings_xdp); +} + +static int bnxt_num_cp_to_tx(struct bnxt *bp, int tx_cp) +{ + int tcs = bp->num_tc; + + return (tx_cp - bp->tx_nr_rings_xdp) * tcs + + bp->tx_nr_rings_xdp; +} + +static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max, + bool sh) +{ + int tx_cp = bnxt_num_tx_to_cp(bp, *tx); + + if (tx_cp != *tx) { + int tx_saved = tx_cp, rc; + + rc = __bnxt_trim_rings(bp, rx, &tx_cp, max, sh); + if (rc) + return rc; + if (tx_cp != tx_saved) + *tx = bnxt_num_cp_to_tx(bp, tx_cp); + return 0; + } + return __bnxt_trim_rings(bp, rx, tx, max, sh); +} + +static unsigned int bnxt_get_max_func_rss_ctxs(struct bnxt *bp) +{ + return bp->hw_resc.max_rsscos_ctxs; +} + +static unsigned int bnxt_get_max_func_vnics(struct bnxt *bp) +{ + return bp->hw_resc.max_vnics; +} + +unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp) +{ + return bp->hw_resc.max_stat_ctxs; +} + +unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp) +{ + return bp->hw_resc.max_cp_rings; +} + +static unsigned int bnxt_get_max_func_cp_rings_for_en(struct bnxt *bp) +{ + unsigned int cp = bp->hw_resc.max_cp_rings; + + if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) + cp -= bnxt_get_ulp_msix_num(bp); + + return cp - bnxt_mpc_cp_rings_in_use(bp); +} + +static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp) +{ + struct bnxt_hw_resc *hw_resc = &bp->hw_resc; + + if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) + return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_nqs); + + return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_cp_rings); +} + +static void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs) +{ + bp->hw_resc.max_irqs = max_irqs; +} + +unsigned int bnxt_get_avail_cp_rings_for_en(struct bnxt *bp) +{ + unsigned int cp; + + cp = bnxt_get_max_func_cp_rings_for_en(bp); + if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) + return cp - bp->rx_nr_rings - bp->tx_nr_rings; + else + return cp - bp->cp_nr_rings; +} + +unsigned int bnxt_get_avail_stat_ctxs_for_en(struct bnxt *bp) +{ + return bnxt_get_max_func_stat_ctxs(bp) - bnxt_get_func_stat_ctxs(bp); +} + +static int bnxt_get_avail_msix(struct bnxt *bp, int num) +{ + int max_irq = bnxt_get_max_func_irqs(bp); + int total_req = bp->cp_nr_rings + num; + struct bnxt_pf_info *pf = &bp->pf; + + if (((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && + bp->hw_resc.max_nqs == bp->cp_nr_rings) || + pf->active_vfs) + return 0; + + if (max_irq < total_req) { + num = max_irq - bp->cp_nr_rings; + if (num <= 0) + return 0; + } + return num; +} + +static int bnxt_get_num_msix(struct bnxt *bp) +{ + if (!BNXT_NEW_RM(bp)) + return bnxt_get_max_func_irqs(bp); + + return bnxt_nq_rings_in_use(bp); +} + +static int bnxt_init_int_mode(struct bnxt *bp) +{ + int i, total_vecs, max, rc = 0, min = 1, ulp_msix, tx_cp; + struct msix_entry *msix_ent; + + total_vecs = bnxt_get_num_msix(bp); + max = bnxt_get_max_func_irqs(bp); + if (total_vecs > max) + total_vecs = max; + + if (!total_vecs) + return 0; + + msix_ent = kcalloc(total_vecs, sizeof(struct msix_entry), GFP_KERNEL); + if (!msix_ent) + return -ENOMEM; + + for (i = 0; i < total_vecs; i++) { + msix_ent[i].entry = i; + msix_ent[i].vector = 0; + } + + if (!(bp->flags & BNXT_FLAG_SHARED_RINGS)) + min = 2; + + total_vecs = pci_enable_msix_range(bp->pdev, msix_ent, min, total_vecs); + ulp_msix = bnxt_get_ulp_msix_num(bp); + if (total_vecs < 0 || total_vecs < ulp_msix) { + rc = -ENODEV; + goto msix_setup_exit; + } + + bp->irq_tbl = kcalloc(total_vecs, sizeof(struct bnxt_irq), GFP_KERNEL); + if (bp->irq_tbl) { + for (i = 0; i < total_vecs; i++) + bp->irq_tbl[i].vector = msix_ent[i].vector; + + bp->total_irqs = total_vecs; + /* Trim rings based upon num of vectors allocated */ + rc = bnxt_trim_rings(bp, &bp->rx_nr_rings, &bp->tx_nr_rings, + total_vecs - ulp_msix, min == 1); + if (rc) + goto msix_setup_exit; + + tx_cp = bnxt_num_tx_to_cp(bp, bp->tx_nr_rings); + bp->cp_nr_rings = (min == 1) ? + max_t(int, tx_cp, bp->rx_nr_rings) : + tx_cp + bp->rx_nr_rings; + + } else { + rc = -ENOMEM; + goto msix_setup_exit; + } + kfree(msix_ent); + return 0; + +msix_setup_exit: + netdev_err(bp->dev, "bnxt_init_int_mode err: %x\n", rc); + kfree(bp->irq_tbl); + bp->irq_tbl = NULL; + pci_disable_msix(bp->pdev); + kfree(msix_ent); + return rc; +} + +static void bnxt_clear_int_mode(struct bnxt *bp) +{ + pci_disable_msix(bp->pdev); + + kfree(bp->irq_tbl); + bp->irq_tbl = NULL; +} + +static void bnxt_setup_msix(struct bnxt *bp) +{ + const int len = sizeof(bp->irq_tbl[0].name); + struct net_device *dev = bp->dev; + int i; + + bnxt_set_tcs_queues(bp); + + for (i = 0; i < bp->cp_nr_rings; i++) { + int map_idx = bnxt_cp_num_to_irq_num(bp, i); + char *attr; + + if (bp->flags & BNXT_FLAG_SHARED_RINGS) + attr = "TxRx"; + else if (i < bp->rx_nr_rings) + attr = "rx"; + else + attr = "tx"; + + snprintf(bp->irq_tbl[map_idx].name, len, "%s-%s-%d", dev->name, + attr, i); + bp->irq_tbl[map_idx].handler = bnxt_msix; + } +} + +static int bnxt_setup_int_mode(struct bnxt *bp) +{ + int rc; + + if (!bp->irq_tbl) { + rc = bnxt_init_int_mode(bp); + if (rc) + return rc; + } + + bnxt_setup_msix(bp); + + rc = bnxt_set_real_num_queues(bp); + return rc; +} + +int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init) +{ + bool irq_cleared = false; + int tcs = bp->num_tc; + int irqs_required; + int rc; + + if (!bnxt_need_reserve_rings(bp)) + return 0; + + if (BNXT_NEW_RM(bp) && !bnxt_ulp_registered(bp->edev)) { + int ulp_msix = bnxt_get_avail_msix(bp, bp->ulp_num_msix_want); + + if (ulp_msix > bp->ulp_num_msix_want) + ulp_msix = bp->ulp_num_msix_want; + irqs_required = ulp_msix + bp->cp_nr_rings; + } else { + irqs_required = bnxt_get_num_msix(bp); + } + + if (irq_re_init && BNXT_NEW_RM(bp) && + irqs_required != bp->total_irqs) { + bnxt_ulp_irq_stop(bp); + bnxt_clear_int_mode(bp); + irq_cleared = true; + } + rc = __bnxt_reserve_rings(bp); + if (irq_cleared) { + if (!rc) + rc = bnxt_init_int_mode(bp); + bnxt_ulp_irq_restart(bp, rc); + } + if (rc) { + netdev_err(bp->dev, "ring reservation/IRQ init failure rc: %d\n", rc); + return rc; + } + if (tcs && (bp->tx_nr_rings_per_tc * tcs != + bp->tx_nr_rings - bp->tx_nr_rings_xdp)) { + netdev_err(bp->dev, "tx ring reservation failure\n"); + netdev_reset_tc(bp->dev); + bp->num_tc = 0; + if (bp->tx_nr_rings_xdp) + bp->tx_nr_rings_per_tc = bp->tx_nr_rings_xdp; + else + bp->tx_nr_rings_per_tc = bp->tx_nr_rings; + return -ENOMEM; + } + return 0; +} + +static void bnxt_free_irq(struct bnxt *bp) +{ + struct bnxt_irq *irq; + int i; + +#ifdef CONFIG_RFS_ACCEL + free_irq_cpu_rmap(bp->dev->rx_cpu_rmap); + bp->dev->rx_cpu_rmap = NULL; +#endif + if (!bp->irq_tbl || !bp->bnapi) + return; + + for (i = 0; i < bp->cp_nr_rings; i++) { + int map_idx = bnxt_cp_num_to_irq_num(bp, i); + + irq = &bp->irq_tbl[map_idx]; + if (irq->requested) { +#if defined(HAVE_CPUMASK_LOCAL_FIRST) || defined(HAVE_CPUMASK_LOCAL_SPREAD) + if (irq->have_cpumask) { + irq_set_affinity_hint(irq->vector, NULL); + free_cpumask_var(irq->cpu_mask); + irq->have_cpumask = 0; + } +#endif + free_irq(irq->vector, bp->bnapi[i]); + } + + irq->requested = 0; + } +} + +static int bnxt_request_irq(struct bnxt *bp) +{ + int i, j, rc = 0; + unsigned long flags = 0; +#ifdef CONFIG_RFS_ACCEL + struct cpu_rmap *rmap; +#endif + + rc = bnxt_setup_int_mode(bp); + if (rc) { + netdev_err(bp->dev, "bnxt_setup_int_mode err: %x\n", + rc); + return rc; + } +#ifdef CONFIG_RFS_ACCEL + rmap = bp->dev->rx_cpu_rmap; +#endif + for (i = 0, j = 0; i < bp->cp_nr_rings; i++) { + int map_idx = bnxt_cp_num_to_irq_num(bp, i); + struct bnxt_irq *irq = &bp->irq_tbl[map_idx]; + +#ifdef CONFIG_RFS_ACCEL + if (rmap && bp->bnapi[i]->rx_ring) { + rc = irq_cpu_rmap_add(rmap, irq->vector); + if (rc) + netdev_warn(bp->dev, "failed adding irq rmap for ring %d\n", + j); + j++; + } +#endif + rc = request_irq(irq->vector, irq->handler, flags, irq->name, + bp->bnapi[i]); + if (rc) + break; + + irq->requested = 1; +#if defined(HAVE_CPUMASK_LOCAL_FIRST) || defined(HAVE_CPUMASK_LOCAL_SPREAD) + if (zalloc_cpumask_var(&irq->cpu_mask, GFP_KERNEL)) { + int numa_node = dev_to_node(&bp->pdev->dev); + int nr_cpus = num_online_cpus(); + + if (bp->flags & BNXT_FLAG_NUMA_DIRECT) + nr_cpus = nr_cpus_node(numa_node); + + irq->have_cpumask = 1; +#ifdef HAVE_CPUMASK_LOCAL_SPREAD + cpumask_set_cpu(cpumask_local_spread(i % nr_cpus, numa_node), + irq->cpu_mask); +#else + rc = cpumask_set_cpu_local_first(i % nr_cpus, numa_node, + irq->cpu_mask); + if (rc) { + netdev_warn(bp->dev, "Set CPU mask failed\n"); + break; + } +#endif + rc = irq_set_affinity_hint(irq->vector, irq->cpu_mask); + if (rc) { + netdev_warn(bp->dev, + "Set affinity failed, IRQ = %d\n", + irq->vector); + break; + } + } +#endif + } + return rc; +} + +static void bnxt_del_napi(struct bnxt *bp) +{ + int i; + + if (!bp->bnapi) + return; + + for (i = 0; i < bp->cp_nr_rings; i++) { + struct bnxt_napi *bnapi = bp->bnapi[i]; + + __netif_napi_del(&bnapi->napi); + } + /* We called __netif_napi_del(), we need + * to respect an RCU grace period before freeing napi structures. + */ + synchronize_net(); +} + +static void bnxt_init_napi(struct bnxt *bp) +{ + int (*poll_fn)(struct napi_struct *, int) = bnxt_poll; + unsigned int cp_nr_rings = bp->cp_nr_rings; + struct bnxt_napi *bnapi; + int i; + + if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) + poll_fn = bnxt_poll_p5; + else if (BNXT_CHIP_TYPE_NITRO_A0(bp)) + cp_nr_rings--; + for (i = 0; i < cp_nr_rings; i++) { + bnapi = bp->bnapi[i]; + ___netif_napi_add(bp->dev, &bnapi->napi, poll_fn); + napi_hash_add(&bnapi->napi); + } + if (BNXT_CHIP_TYPE_NITRO_A0(bp)) { + bnapi = bp->bnapi[cp_nr_rings]; + ___netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll_nitroa0); + napi_hash_add(&bnapi->napi); + } +} + +static void bnxt_disable_napi(struct bnxt *bp) +{ + int i; + + if (!bp->bnapi || + test_and_set_bit(BNXT_STATE_NAPI_DISABLED, &bp->state)) + return; + + for (i = 0; i < bp->cp_nr_rings; i++) { + struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring; + + napi_disable(&bp->bnapi[i]->napi); + bnxt_disable_poll(bp->bnapi[i]); + if (bp->bnapi[i]->rx_ring) + cancel_work_sync(&cpr->dim.work); + } +} + +static void bnxt_enable_napi(struct bnxt *bp) +{ + int i; + + clear_bit(BNXT_STATE_NAPI_DISABLED, &bp->state); + for (i = 0; i < bp->cp_nr_rings; i++) { + struct bnxt_napi *bnapi = bp->bnapi[i]; + struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; + + bnapi->tx_fault = 0; + + if (bnapi->in_reset) + cpr->sw_stats->rx.rx_resets++; + bnapi->in_reset = false; + + if (bnapi->rx_ring) { + INIT_WORK(&cpr->dim.work, bnxt_dim_work); + cpr->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; + } + bnxt_enable_poll(bnapi); + napi_enable(&bnapi->napi); + } +} + +void bnxt_tx_disable(struct bnxt *bp) +{ + int i; + struct bnxt_tx_ring_info *txr; + + if (bp->tx_ring) { + for (i = 0; i < bp->tx_nr_rings; i++) { + txr = &bp->tx_ring[i]; + WRITE_ONCE(txr->dev_state, BNXT_DEV_STATE_CLOSING); + } + } + /* Make sure napi polls see @dev_state change */ + synchronize_net(); + /* Drop carrier first to prevent TX timeout */ +#ifdef BNXT_SKIP_CARRIER_OFF + if (!(test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))) +#endif + netif_carrier_off(bp->dev); + + /* Stop all TX queues */ + netif_tx_disable(bp->dev); +} + +void bnxt_tx_enable(struct bnxt *bp) +{ + int i; + struct bnxt_tx_ring_info *txr; + + for (i = 0; i < bp->tx_nr_rings; i++) { + txr = &bp->tx_ring[i]; + WRITE_ONCE(txr->dev_state, 0); + } + /* Make sure napi polls see @dev_state change */ + synchronize_net(); + netif_tx_wake_all_queues(bp->dev); + if (BNXT_LINK_IS_UP(bp)) + netif_carrier_on(bp->dev); +} + +static char *bnxt_report_fec(struct bnxt_link_info *link_info) +{ + u8 active_fec = link_info->active_fec_sig_mode & + PORT_PHY_QCFG_RESP_ACTIVE_FEC_MASK; + + switch (active_fec) { + default: + case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_NONE_ACTIVE: + return "None"; + case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE74_ACTIVE: + return "Clause 74 BaseR"; + case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE91_ACTIVE: + return "Clause 91 RS(528,514)"; + case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_1XN_ACTIVE: + return "Clause 91 RS544_1XN"; + case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_IEEE_ACTIVE: + return "Clause 91 RS(544,514)"; + case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_1XN_ACTIVE: + return "Clause 91 RS272_1XN"; + case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_IEEE_ACTIVE: + return "Clause 91 RS(272,257)"; + } +} + +void bnxt_report_link(struct bnxt *bp) +{ + if (BNXT_LINK_IS_UP(bp)) { + const char *signal = ""; + const char *flow_ctrl; + const char *duplex; + u32 speed; + u16 fec; + + netif_carrier_on(bp->dev); + speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed); + if (speed == SPEED_UNKNOWN) { + netdev_info(bp->dev, "NIC Link is Up, speed unknown\n"); + return; + } + if (bp->link_info.duplex == BNXT_LINK_DUPLEX_FULL) + duplex = "full"; + else + duplex = "half"; + if (bp->link_info.pause == BNXT_LINK_PAUSE_BOTH) + flow_ctrl = "ON - receive & transmit"; + else if (bp->link_info.pause == BNXT_LINK_PAUSE_TX) + flow_ctrl = "ON - transmit"; + else if (bp->link_info.pause == BNXT_LINK_PAUSE_RX) + flow_ctrl = "ON - receive"; + else + flow_ctrl = "none"; + if (bp->link_info.phy_qcfg_resp.option_flags & + PORT_PHY_QCFG_RESP_OPTION_FLAGS_SIGNAL_MODE_KNOWN) { + u8 sig_mode = bp->link_info.active_fec_sig_mode & + PORT_PHY_QCFG_RESP_SIGNAL_MODE_MASK; + switch (sig_mode) { + case PORT_PHY_QCFG_RESP_SIGNAL_MODE_NRZ: + signal = "(NRZ) "; + break; + case PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4: + signal = "(PAM4 56Gbps) "; + break; + case PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4_112: + signal = "(PAM4 112Gbps) "; + break; + default: + break; + } + } + netdev_info(bp->dev, "NIC Link is Up, %u Mbps %s%s duplex, Flow control: %s\n", + speed, signal, duplex, flow_ctrl); + if (bp->phy_flags & BNXT_PHY_FL_EEE_CAP) + netdev_info(bp->dev, "EEE is %s\n", + bp->eee.eee_active ? "active" : + "not active"); + fec = bp->link_info.fec_cfg; + if (!(fec & PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED)) + netdev_info(bp->dev, "FEC autoneg %s encoding: %s\n", + (fec & BNXT_FEC_AUTONEG) ? "on" : "off", + bnxt_report_fec(&bp->link_info)); + } else { + netif_carrier_off(bp->dev); + netdev_err(bp->dev, "NIC Link is Down\n"); + } +} + +static bool bnxt_phy_qcaps_no_speed(struct hwrm_port_phy_qcaps_output *resp) +{ + if (!resp->supported_speeds_auto_mode && + !resp->supported_speeds_force_mode && + !resp->supported_pam4_speeds_auto_mode && + !resp->supported_pam4_speeds_force_mode && + !resp->supported_speeds2_auto_mode && + !resp->supported_speeds2_force_mode) + return true; + return false; +} + +static int bnxt_hwrm_phy_qcaps(struct bnxt *bp) +{ + struct bnxt_link_info *link_info = &bp->link_info; + struct hwrm_port_phy_qcaps_output *resp; + struct hwrm_port_phy_qcaps_input *req; + int rc; + + if (bp->hwrm_spec_code < 0x10201) + return 0; + + rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_QCAPS); + if (rc) + return rc; + + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); + if (rc) + goto hwrm_phy_qcaps_exit; + + bp->phy_flags = resp->flags | (resp->flags2 << 8); + if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED) { + struct ethtool_keee *eee = &bp->eee; + u16 fw_speeds = le16_to_cpu(resp->supported_speeds_eee_mode); + + _bnxt_fw_to_linkmode(eee->supported, fw_speeds); + bp->lpi_tmr_lo = le32_to_cpu(resp->tx_lpi_timer_low) & + PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK; + bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) & + PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK; + } + + if (bp->hwrm_spec_code >= 0x10a01) { + if (bnxt_phy_qcaps_no_speed(resp)) { + link_info->phy_state = BNXT_PHY_STATE_DISABLED; + netdev_warn(bp->dev, "Ethernet link disabled\n"); + } else if (link_info->phy_state == BNXT_PHY_STATE_DISABLED) { + link_info->phy_state = BNXT_PHY_STATE_ENABLED; + netdev_info(bp->dev, "Ethernet link enabled\n"); + /* Phy re-enabled, reprobe the speeds */ + link_info->support_auto_speeds = 0; + link_info->support_pam4_auto_speeds = 0; + link_info->support_auto_speeds2 = 0; + } + } + if (resp->supported_speeds_auto_mode) + link_info->support_auto_speeds = + le16_to_cpu(resp->supported_speeds_auto_mode); + if (resp->supported_pam4_speeds_auto_mode) + link_info->support_pam4_auto_speeds = + le16_to_cpu(resp->supported_pam4_speeds_auto_mode); + if (resp->supported_speeds2_auto_mode) + link_info->support_auto_speeds2 = + le16_to_cpu(resp->supported_speeds2_auto_mode); + + bp->port_count = resp->port_cnt; + +hwrm_phy_qcaps_exit: + hwrm_req_drop(bp, req); + return rc; +} + +static int bnxt_hwrm_mac_qcaps(struct bnxt *bp) +{ + struct hwrm_port_mac_qcaps_output *resp; + struct hwrm_port_mac_qcaps_input *req; + int rc; + + if (bp->hwrm_spec_code < 0x10a03) + return 0; + + rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_QCAPS); + if (rc) + return rc; + + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send_silent(bp, req); + if (rc) + goto hwrm_mac_qcaps_exit; + + bp->mac_flags = resp->flags; + +hwrm_mac_qcaps_exit: + hwrm_req_drop(bp, req); + return rc; +} + +static bool bnxt_support_dropped(u16 advertising, u16 supported) +{ + u16 diff = advertising ^ supported; + + return ((supported | diff) != supported); +} + +static bool bnxt_support_speed_dropped(struct bnxt_link_info *link_info) +{ + struct bnxt *bp = container_of(link_info, struct bnxt, link_info); + + /* Check if any advertised speeds are no longer supported. The caller + * holds the link_lock mutex, so we can modify link_info settings. + */ + if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) { + if (bnxt_support_dropped(link_info->advertising, + link_info->support_auto_speeds2)) { + link_info->advertising = link_info->support_auto_speeds2; + return true; + } + return false; + } + if (bnxt_support_dropped(link_info->advertising, + link_info->support_auto_speeds)) { + link_info->advertising = link_info->support_auto_speeds; + return true; + } + if (bnxt_support_dropped(link_info->advertising_pam4, + link_info->support_pam4_auto_speeds)) { + link_info->advertising_pam4 = link_info->support_pam4_auto_speeds; + return true; + } + return false; +} + +int bnxt_update_link(struct bnxt *bp, bool chng_link_state) +{ + struct bnxt_link_info *link_info = &bp->link_info; + struct hwrm_port_phy_qcfg_output *resp; + struct hwrm_port_phy_qcfg_input *req; + u8 link_state = link_info->link_state; + bool support_changed; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_QCFG); + if (rc) + return rc; + + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); + if (rc) { + hwrm_req_drop(bp, req); + if (BNXT_VF(bp) && rc == -ENODEV) { + netdev_warn(bp->dev, "Cannot obtain link state while PF unavailable.\n"); + rc = 0; + } + return rc; + } + + memcpy(&link_info->phy_qcfg_resp, resp, sizeof(*resp)); + link_info->phy_link_status = resp->link; + netif_info(bp, link, bp->dev, "FW reports link: %d\n", (u32)resp->link); + link_info->duplex = resp->duplex_cfg; + if (bp->hwrm_spec_code >= 0x10800) + link_info->duplex = resp->duplex_state; + link_info->pause = resp->pause; + link_info->auto_mode = resp->auto_mode; + link_info->auto_pause_setting = resp->auto_pause; + link_info->lp_pause = resp->link_partner_adv_pause; + link_info->force_pause_setting = resp->force_pause; + link_info->duplex_setting = resp->duplex_cfg; + if (link_info->phy_link_status == BNXT_LINK_LINK) { + link_info->link_speed = le16_to_cpu(resp->link_speed); + if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) + link_info->active_lanes = resp->active_lanes; + } else { + link_info->link_speed = 0; + link_info->active_lanes = 0; + } + link_info->force_link_speed = le16_to_cpu(resp->force_link_speed); + link_info->force_pam4_link_speed = + le16_to_cpu(resp->force_pam4_link_speed); + link_info->force_link_speed2 = le16_to_cpu(resp->force_link_speeds2); + link_info->support_speeds = le16_to_cpu(resp->support_speeds); + link_info->support_pam4_speeds = le16_to_cpu(resp->support_pam4_speeds); + link_info->support_speeds2 = le16_to_cpu(resp->support_speeds2); + link_info->auto_link_speeds = le16_to_cpu(resp->auto_link_speed_mask); + link_info->auto_pam4_link_speeds = + le16_to_cpu(resp->auto_pam4_link_speed_mask); + link_info->auto_link_speeds2 = le16_to_cpu(resp->auto_link_speeds2); + link_info->lp_auto_link_speeds = + le16_to_cpu(resp->link_partner_adv_speeds); + link_info->lp_auto_pam4_link_speeds = + resp->link_partner_pam4_adv_speeds; + link_info->preemphasis = le32_to_cpu(resp->preemphasis); + link_info->phy_ver[0] = resp->phy_maj; + link_info->phy_ver[1] = resp->phy_min; + link_info->phy_ver[2] = resp->phy_bld; + link_info->media_type = resp->media_type; + link_info->phy_type = resp->phy_type; + link_info->transceiver = resp->xcvr_pkg_type; + link_info->phy_addr = resp->eee_config_phy_addr & + PORT_PHY_QCFG_RESP_PHY_ADDR_MASK; + link_info->module_status = resp->module_status; + + if (bp->phy_flags & BNXT_PHY_FL_EEE_CAP) { + struct ethtool_keee *eee = &bp->eee; + u16 fw_speeds; + + eee->eee_active = 0; + if (resp->eee_config_phy_addr & + PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE) { + eee->eee_active = 1; + fw_speeds = le16_to_cpu( + resp->link_partner_adv_eee_link_speed_mask); + _bnxt_fw_to_linkmode(eee->lp_advertised, fw_speeds); + } + + /* Pull initial EEE config */ + if (!chng_link_state) { + if (resp->eee_config_phy_addr & + PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED) + eee->eee_enabled = 1; + + fw_speeds = le16_to_cpu(resp->adv_eee_link_speed_mask); + _bnxt_fw_to_linkmode(eee->advertised, fw_speeds); + + if (resp->eee_config_phy_addr & + PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI) { + __le32 tmr; + + eee->tx_lpi_enabled = 1; + tmr = resp->xcvr_identifier_type_tx_lpi_timer; + eee->tx_lpi_timer = le32_to_cpu(tmr) & + PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK; + } + } + } + + link_info->fec_cfg = PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED; + if (bp->hwrm_spec_code >= 0x10504) { + link_info->fec_cfg = le16_to_cpu(resp->fec_cfg); + link_info->active_fec_sig_mode = resp->active_fec_signal_mode; + } + /* TODO: need to add more logic to report VF link */ + if (chng_link_state) { + if (link_info->phy_link_status == BNXT_LINK_LINK) { + link_info->link_state = BNXT_LINK_STATE_UP; + netif_info(bp, link, bp->dev, "Updated link: up\n"); + } else { + link_info->link_state = BNXT_LINK_STATE_DOWN; + netif_info(bp, link, bp->dev, "Updated link: down\n"); + } + if (link_state != link_info->link_state) + bnxt_report_link(bp); + } else { + /* always link down if not require to update link state */ + link_info->link_state = BNXT_LINK_STATE_DOWN; + netif_info(bp, link, bp->dev, "Init link: down\n"); + } + hwrm_req_drop(bp, req); + + if (!BNXT_PHY_CFG_ABLE(bp)) + return 0; + + support_changed = bnxt_support_speed_dropped(link_info); + if (support_changed && (link_info->autoneg & BNXT_AUTONEG_SPEED)) + bnxt_hwrm_set_link_setting(bp, true, false); + return 0; +} + +static void bnxt_get_port_module_status(struct bnxt *bp) +{ + struct bnxt_link_info *link_info = &bp->link_info; + struct hwrm_port_phy_qcfg_output *resp = &link_info->phy_qcfg_resp; + bool open_state = true; + u8 module_status; + + if (!BNXT_CHIP_SUPPORTS_PHY(bp)) + return; + + if (!test_bit(BNXT_STATE_OPEN, &bp->state)) + open_state = false; + + if (bnxt_update_link(bp, open_state)) + return; + + module_status = link_info->module_status; + if (!open_state) + return; + + switch (module_status) { + case PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX: + case PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN: + case PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG: + netdev_warn(bp->dev, "Unqualified SFP+ module detected on port %d\n", + bp->pf.port_id); + if (bp->hwrm_spec_code >= 0x10201) { + netdev_warn(bp->dev, "Module part number %s\n", + resp->phy_vendor_partnumber); + } + if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX) + netdev_warn(bp->dev, "TX is disabled\n"); + if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN) + netdev_warn(bp->dev, "SFP+ module is shutdown\n"); + } +} + +static void +bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req) +{ + if (bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) { + if (bp->hwrm_spec_code >= 0x10201) + req->auto_pause = + PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE; + if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX) + req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX; + if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX) + req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_TX; + req->enables |= + cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE); + } else { + if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX) + req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_RX; + if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX) + req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_TX; + req->enables |= + cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE); + if (bp->hwrm_spec_code >= 0x10201) { + req->auto_pause = req->force_pause; + req->enables |= cpu_to_le32( + PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE); + } + } + netif_info(bp, link, bp->dev, "Configuring FW pause auto: 0x%x force: 0x%x\n", + req->auto_pause, req->force_pause); +} + +static void bnxt_hwrm_set_link_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req) +{ + if (bp->link_info.autoneg & BNXT_AUTONEG_SPEED) { + req->auto_mode |= PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK; + if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) { + req->enables |= + cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEEDS2_MASK); + req->auto_link_speeds2_mask = cpu_to_le16(bp->link_info.advertising); + } else if (bp->link_info.advertising) { + req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK); + req->auto_link_speed_mask = cpu_to_le16(bp->link_info.advertising); + } + if (bp->link_info.advertising_pam4) { + req->enables |= + cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAM4_LINK_SPEED_MASK); + req->auto_link_pam4_speed_mask = + cpu_to_le16(bp->link_info.advertising_pam4); + } + netif_info(bp, link, bp->dev, "Advertising FW autoneg speeds NRZ: 0x%x PAM4: 0x%x\n", + (u32)bp->link_info.advertising, + (u32)bp->link_info.advertising_pam4); + req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE); + req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG); + } else { + req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE); + if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) { + req->force_link_speeds2 = cpu_to_le16(bp->link_info.req_link_speed); + req->enables |= PORT_PHY_CFG_REQ_ENABLES_FORCE_LINK_SPEEDS2; + netif_info(bp, link, bp->dev, "Forcing FW speed2: %d\n", + (u32)bp->link_info.req_link_speed); + } else if (bp->link_info.req_signal_mode == BNXT_SIG_MODE_PAM4) { + req->force_pam4_link_speed = cpu_to_le16(bp->link_info.req_link_speed); + req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAM4_LINK_SPEED); + netif_info(bp, link, bp->dev, "Forcing FW NRZ speed: %d\n", + (u32)bp->link_info.req_link_speed); + } else { + req->force_link_speed = cpu_to_le16(bp->link_info.req_link_speed); + netif_info(bp, link, bp->dev, "Forcing FW PAM4 speed: %d\n", + (u32)bp->link_info.req_link_speed); + } + } + + /* tell chimp that the setting takes effect immediately */ + req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESET_PHY); +} + +int bnxt_hwrm_set_pause(struct bnxt *bp) +{ + struct hwrm_port_phy_cfg_input *req; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG); + if (rc) + return rc; + + bnxt_hwrm_set_pause_common(bp, req); + + if ((bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) || + bp->link_info.force_link_chng) + bnxt_hwrm_set_link_common(bp, req); + + rc = hwrm_req_send(bp, req); + if (!rc && !(bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL)) { + /* since changing of pause setting doesn't trigger any link + * change event, the driver needs to update the current pause + * result upon successfully return of the phy_cfg command + */ + bp->link_info.pause = + bp->link_info.force_pause_setting = bp->link_info.req_flow_ctrl; + bp->link_info.auto_pause_setting = 0; + if (!bp->link_info.force_link_chng) + bnxt_report_link(bp); + } + bp->link_info.force_link_chng = false; + return rc; +} + +static void bnxt_hwrm_set_eee(struct bnxt *bp, + struct hwrm_port_phy_cfg_input *req) +{ + struct ethtool_keee *eee = &bp->eee; + + if (eee->eee_enabled) { + u16 eee_speeds; + u32 flags = PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE; + + if (eee->tx_lpi_enabled) + flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE; + else + flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE; + + req->flags |= cpu_to_le32(flags); + eee_speeds = bnxt_get_fw_auto_link_speeds(eee->advertised); + req->eee_link_speed_mask = cpu_to_le16(eee_speeds); + req->tx_lpi_timer = cpu_to_le32(eee->tx_lpi_timer); + netif_info(bp, link, bp->dev, "Enabling FW EEE speeds 0x%x lpi %d\n", + eee_speeds, eee->tx_lpi_enabled); + } else { + req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE); + netif_info(bp, link, bp->dev, "Disabling FW EEE\n"); + } +} + +int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause, bool set_eee) +{ + struct hwrm_port_phy_cfg_input *req; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG); + if (rc) + return rc; + + if (set_pause) + bnxt_hwrm_set_pause_common(bp, req); + + bnxt_hwrm_set_link_common(bp, req); + + if (set_eee) + bnxt_hwrm_set_eee(bp, req); + return hwrm_req_send(bp, req); +} + +static int bnxt_hwrm_shutdown_link(struct bnxt *bp) +{ + struct hwrm_port_phy_cfg_input *req; + int rc; + + if (!BNXT_CHIP_SUPPORTS_PHY(bp)) + return 0; + + if (!BNXT_SINGLE_PF(bp)) + return 0; + + if (pci_num_vf(bp->pdev) && + !(bp->phy_flags & BNXT_PHY_FL_FW_MANAGED_LKDN)) + return 0; + + rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG); + if (rc) + return rc; + + req->flags = cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN); + rc = hwrm_req_send(bp, req); + if (!rc) { + mutex_lock(&bp->link_lock); + /* Device is not obliged link down in certain scenarios, even + * when forced. Setting the state unknown is consistent with + * driver startup and will force link state to be reported + * during subsequent open based on PORT_PHY_QCFG. + */ + bp->link_info.link_state = BNXT_LINK_STATE_UNKNOWN; + bnxt_get_port_module_status(bp); + mutex_unlock(&bp->link_lock); + } + return rc; +} + +static int bnxt_fw_reset_via_optee(struct bnxt *bp) +{ +#ifdef CONFIG_TEE_BNXT_FW + int rc = tee_bnxt_fw_load(); + + if (rc) + netdev_err(bp->dev, "Failed FW reset via OP-TEE, rc=%d\n", rc); + + return rc; +#else + netdev_err(bp->dev, "OP-TEE not supported\n"); + return -ENODEV; +#endif +} + +static int bnxt_try_recover_fw(struct bnxt *bp) +{ + if (bp->fw_health && bp->fw_health->status_reliable) { + int retry = 0, rc; + u32 sts; + + do { + sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG); + rc = bnxt_hwrm_poll(bp); + if (!BNXT_FW_IS_BOOTING(sts) && + !BNXT_FW_IS_RECOVERING(sts)) + break; + retry++; + } while (rc == -EBUSY && retry < BNXT_FW_RETRY); + + if (rc) + netdev_err(bp->dev, + "Firmware not responding, rc: %d status: 0x%x\n", + rc, sts); + + if (!BNXT_FW_IS_HEALTHY(sts)) + rc = -ENODEV; + + if (sts & FW_STATUS_REG_CRASHED_NO_MASTER) { + netdev_warn(bp->dev, "Firmware recover via OP-TEE requested\n"); + return bnxt_fw_reset_via_optee(bp); + } + return rc; + } + netdev_warn(bp->dev, "Firmware health status not reliable\n"); + return -ENODEV; +} + +static void bnxt_clear_reservations(struct bnxt *bp, bool fw_reset) +{ + struct bnxt_hw_resc *hw_resc = &bp->hw_resc; + + if (!BNXT_NEW_RM(bp)) + return; /* no resource reservations required */ + + hw_resc->resv_cp_rings = 0; + hw_resc->resv_stat_ctxs = 0; + hw_resc->resv_irqs = 0; + hw_resc->resv_tx_rings = 0; + hw_resc->resv_rx_rings = 0; + hw_resc->resv_hw_ring_grps = 0; + hw_resc->resv_vnics = 0; + hw_resc->resv_rsscos_ctxs = 0; + + if (!fw_reset) { + bp->tx_nr_rings = 0; + bp->rx_nr_rings = 0; + } +} + +int bnxt_cancel_reservations(struct bnxt *bp, bool fw_reset) +{ + int rc; + + rc = bnxt_hwrm_func_resc_qcaps(bp, true); + if (rc) + netdev_err(bp->dev, "resc_qcaps failed\n"); + + bnxt_clear_reservations(bp, fw_reset); + + return rc; +} + +static int bnxt_hwrm_if_change(struct bnxt *bp, bool up) +{ + struct hwrm_func_drv_if_change_output *resp; + struct hwrm_func_drv_if_change_input *req; + bool fw_reset = !bp->irq_tbl; + bool resc_reinit = false; + bool caps_change = false; + int rc, retry = 0; + u32 flags = 0; + + if (!(bp->fw_cap & BNXT_FW_CAP_IF_CHANGE)) + return 0; + + rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_IF_CHANGE); + if (rc) + return rc; + + if (up) + req->flags = cpu_to_le32(FUNC_DRV_IF_CHANGE_REQ_FLAGS_UP); + resp = hwrm_req_hold(bp, req); + + hwrm_req_flags(bp, req, BNXT_HWRM_FULL_WAIT); + while (retry < BNXT_FW_IF_RETRY) { + rc = hwrm_req_send(bp, req); + if (rc != -EAGAIN) + break; + + msleep(50); + retry++; + } + + if (rc == -EAGAIN) { + hwrm_req_drop(bp, req); + return rc; + } else if (!rc) { + flags = le32_to_cpu(resp->flags); + } else if (up) { + rc = bnxt_try_recover_fw(bp); + fw_reset = true; + } + hwrm_req_drop(bp, req); + if (rc) + return rc; + + if (!up) { + bnxt_inv_fw_health_reg(bp); + return 0; + } + + if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE) + resc_reinit = true; + if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_HOT_FW_RESET_DONE || + test_bit(BNXT_STATE_FW_RESET_DET, &bp->state)) + fw_reset = true; + else + bnxt_remap_fw_health_regs(bp); + + if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state) && !fw_reset) { + netdev_err(bp->dev, "RESET_DONE not set during FW reset.\n"); + set_bit(BNXT_STATE_ABORT_ERR, &bp->state); + return -ENODEV; + } + if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_CAPS_CHANGE) + caps_change = true; + + if (resc_reinit || fw_reset || caps_change) { + if (fw_reset || caps_change) { + set_bit(BNXT_STATE_FW_RESET_DET, &bp->state); + if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) + bnxt_ulp_irq_stop(bp); + bnxt_free_ctx_mem(bp); + bnxt_dcb_free(bp, true); + rc = bnxt_fw_init_one(bp); + if (rc) { + clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state); + set_bit(BNXT_STATE_ABORT_ERR, &bp->state); + return rc; + } + bnxt_clear_int_mode(bp); + rc = bnxt_init_int_mode(bp); + if (rc) { + clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state); + netdev_err(bp->dev, "init int mode failed\n"); + return rc; + } + } + rc = bnxt_cancel_reservations(bp, fw_reset); + } + return rc; +} + +static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp) +{ + struct hwrm_port_led_qcaps_output *resp; + struct hwrm_port_led_qcaps_input *req; + struct bnxt_pf_info *pf = &bp->pf; + int rc; + + bp->num_leds = 0; + if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10601) + return 0; + + rc = hwrm_req_init(bp, req, HWRM_PORT_LED_QCAPS); + if (rc) + return rc; + + req->port_id = cpu_to_le16(pf->port_id); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); + if (rc) { + hwrm_req_drop(bp, req); + return rc; + } + if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) { + int i; + + bp->num_leds = resp->num_leds; + memcpy(bp->leds, &resp->led0_id, sizeof(bp->leds[0]) * + bp->num_leds); + for (i = 0; i < bp->num_leds; i++) { + struct bnxt_led_info *led = &bp->leds[i]; + __le16 caps = led->led_state_caps; + + if (!led->led_group_id || + !BNXT_LED_ALT_BLINK_CAP(caps)) { + bp->num_leds = 0; + break; + } + } + } + hwrm_req_drop(bp, req); + return 0; +} + +int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp) +{ + struct hwrm_wol_filter_alloc_output *resp; + struct hwrm_wol_filter_alloc_input *req; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_ALLOC); + if (rc) + return rc; + + req->port_id = cpu_to_le16(bp->pf.port_id); + req->wol_type = WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT; + req->enables = cpu_to_le32(WOL_FILTER_ALLOC_REQ_ENABLES_MAC_ADDRESS); + memcpy(req->mac_address, bp->dev->dev_addr, ETH_ALEN); + + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); + if (rc) { + netif_err(bp, wol, bp->dev, "WOL failed to add FW filter, rc = %d\n", + rc); + } else { + bp->wol_filter_id = resp->wol_filter_id; + netif_info(bp, wol, bp->dev, "WOL added FW filter: %d\n", + (u32)bp->wol_filter_id); + } + hwrm_req_drop(bp, req); + return rc; +} + +int bnxt_hwrm_free_wol_fltr(struct bnxt *bp) +{ + struct hwrm_wol_filter_free_input *req; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_FREE); + if (rc) + return rc; + + req->port_id = cpu_to_le16(bp->pf.port_id); + req->enables = cpu_to_le32(WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID); + req->wol_filter_id = bp->wol_filter_id; + + rc = hwrm_req_send(bp, req); + if (rc) + netif_err(bp, wol, bp->dev, "WOL failed to remove FW filter: %d, rc = %d\n", + (u32)bp->wol_filter_id, rc); + else + netif_info(bp, wol, bp->dev, "WOL removed FW filter: %d\n", + (u32)bp->wol_filter_id); + return rc; +} + +static u16 bnxt_hwrm_get_wol_fltrs(struct bnxt *bp, u16 handle) +{ + struct hwrm_wol_filter_qcfg_output *resp; + struct hwrm_wol_filter_qcfg_input *req; + u16 next_handle = 0; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_QCFG); + if (rc) + return rc; + + req->port_id = cpu_to_le16(bp->pf.port_id); + req->handle = cpu_to_le16(handle); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); + if (!rc) { + next_handle = le16_to_cpu(resp->next_handle); + if (next_handle != 0) { + if (resp->wol_type == + WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT) { + bp->wol = 1; + bp->wol_filter_id = resp->wol_filter_id; + } + } + } + hwrm_req_drop(bp, req); + return next_handle; +} + +static void bnxt_get_wol_settings(struct bnxt *bp) +{ + u16 handle = 0; + + bp->wol = 0; + if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_WOL_CAP)) + return; + + do { + handle = bnxt_hwrm_get_wol_fltrs(bp, handle); + } while (handle && handle != 0xffff); +} + +static bool bnxt_eee_config_ok(struct bnxt *bp) +{ + struct ethtool_keee *eee = &bp->eee; + struct bnxt_link_info *link_info = &bp->link_info; + + if (!(bp->phy_flags & BNXT_PHY_FL_EEE_CAP)) + return true; + + if (eee->eee_enabled) { +#ifdef HAVE_ETHTOOL_KEEE + __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising); + __ETHTOOL_DECLARE_LINK_MODE_MASK(tmp); +#else + u32 advertising; +#endif + + _bnxt_fw_to_linkmode(advertising, link_info->advertising); + + if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) { + eee->eee_enabled = 0; + return false; + } +#ifdef HAVE_ETHTOOL_KEEE + if (linkmode_andnot(tmp, eee->advertised, advertising)) { + linkmode_and(eee->advertised, advertising, + eee->supported); + return false; + } +#else + if (eee->advertised & ~advertising) { + eee->advertised = advertising & eee->supported; + return false; + } +#endif + } + return true; +} + +static int bnxt_update_phy_setting(struct bnxt *bp) +{ + int rc; + bool update_link = false; + bool update_pause = false; + bool update_eee = false; + struct bnxt_link_info *link_info = &bp->link_info; + + if (!BNXT_CHIP_SUPPORTS_PHY(bp)) + return 0; + + rc = bnxt_update_link(bp, true); + if (rc) { + netdev_err(bp->dev, "failed to update link (rc: %x)\n", + rc); + return rc; + } + if (!BNXT_SINGLE_PF(bp)) + return 0; + + if ((link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) && + (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) != + link_info->req_flow_ctrl) + update_pause = true; + if (!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) && + link_info->force_pause_setting != link_info->req_flow_ctrl) + update_pause = true; + if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) { + if (BNXT_AUTO_MODE(link_info->auto_mode)) + update_link = true; + if (bnxt_force_speed_updated(link_info)) + update_link = true; + if (link_info->req_duplex != link_info->duplex_setting) + update_link = true; + } else { + if (link_info->auto_mode == BNXT_LINK_AUTO_NONE) + update_link = true; + if (bnxt_auto_speed_updated(link_info)) + update_link = true; + } + + /* The last close may have shutdown the link, so need to call + * PHY_CFG to bring it back up. + */ + if (!BNXT_LINK_IS_UP(bp)) + update_link = true; + + if (!bnxt_eee_config_ok(bp)) + update_eee = true; + + if (update_link) + rc = bnxt_hwrm_set_link_setting(bp, update_pause, update_eee); + else if (update_pause) + rc = bnxt_hwrm_set_pause(bp); + if (rc) { + netdev_err(bp->dev, "failed to update phy setting (rc: %x)\n", + rc); + return rc; + } + + return rc; +} + +static int bnxt_init_dflt_ring_mode(struct bnxt *bp); + +static int bnxt_reinit_after_abort(struct bnxt *bp) +{ + int rc; + + if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) + return -EBUSY; + + if (bp->dev->reg_state == NETREG_UNREGISTERED) + return -ENODEV; + + rc = bnxt_fw_init_one(bp); + if (!rc) { + bnxt_clear_int_mode(bp); + rc = bnxt_init_int_mode(bp); + if (!rc) { + bnxt_dl_health_fw_status_update(bp, true); + clear_bit(BNXT_STATE_ABORT_ERR, &bp->state); + set_bit(BNXT_STATE_FW_RESET_DET, &bp->state); + } + } + return rc; +} + +static int bnxt_set_xps_mapping(struct bnxt *bp) +{ + int numa_node = dev_to_node(&bp->pdev->dev); + unsigned int q_idx, map_idx, cpu, i; + const struct cpumask *cpu_mask_ptr; + int nr_cpus = num_online_cpus(); + cpumask_t *q_map; + int rc = 0; + + q_map = kcalloc(bp->tx_nr_rings_per_tc, sizeof(*q_map), GFP_KERNEL); + if (!q_map) + return -ENOMEM; + + /* Create CPU mask for all TX queues across MQPRIO traffic classes. + * Each TC has the same number of TX queues. The nth TX queue for each + * TC will have the same CPU mask. + */ + for (i = 0; i < nr_cpus; i++) { + map_idx = i % bp->tx_nr_rings_per_tc; + cpu = cpumask_local_spread(i, numa_node); + cpu_mask_ptr = get_cpu_mask(cpu); + cpumask_or(&q_map[map_idx], &q_map[map_idx], cpu_mask_ptr); + } + + /* Register CPU mask for each TX queue excluding the ones marked for XDP */ + for (q_idx = 0; q_idx < bp->dev->real_num_tx_queues; q_idx++) { + map_idx = q_idx % bp->tx_nr_rings_per_tc; + rc = netif_set_xps_queue(bp->dev, &q_map[map_idx], q_idx); + if (rc) { + netdev_warn(bp->dev, "Error setting XPS for q:%d\n", q_idx); + break; + } + } + + kfree(q_map); + + return rc; +} + +static void bnxt_cfg_one_usr_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr) +{ + struct bnxt_ntuple_filter *ntp_fltr; + struct bnxt_l2_filter *l2_fltr; + + if (list_empty(&fltr->list)) + return; + + if (fltr->type == BNXT_FLTR_TYPE_NTUPLE) { + ntp_fltr = container_of(fltr, struct bnxt_ntuple_filter, base); + l2_fltr = bp->vnic_info[BNXT_VNIC_DEFAULT].l2_filters[0]; + atomic_inc(&l2_fltr->refcnt); + ntp_fltr->l2_fltr = l2_fltr; + if (bnxt_hwrm_cfa_ntuple_filter_alloc(bp, ntp_fltr)) { + bnxt_del_ntp_filter(bp, ntp_fltr); + netdev_err(bp->dev, "restoring previously configured ntuple filter id %d failed\n", + fltr->sw_id); + } + } else if (fltr->type == BNXT_FLTR_TYPE_L2) { + l2_fltr = container_of(fltr, struct bnxt_l2_filter, base); + if (bnxt_hwrm_l2_filter_alloc(bp, l2_fltr)) { + bnxt_del_l2_filter(bp, l2_fltr); + netdev_err(bp->dev, "restoring previously configured l2 filter id %d failed\n", + fltr->sw_id); + } + } +} + +static void bnxt_cfg_usr_fltrs(struct bnxt *bp) +{ + struct bnxt_filter_base *usr_fltr, *tmp; + + list_for_each_entry_safe(usr_fltr, tmp, &bp->usr_fltr_list, list) + bnxt_cfg_one_usr_fltr(bp, usr_fltr); +} + +static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) +{ + int rc = 0; + + netif_info(bp, ifup, bp->dev, "Opening irq: %d link: %d\n", + irq_re_init, link_re_init); + +#ifdef BNXT_SKIP_CARRIER_OFF + if (!(test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))) +#endif + netif_carrier_off(bp->dev); + if (irq_re_init) { + /* Reserve rings now if none were reserved at driver probe. */ + rc = bnxt_init_dflt_ring_mode(bp); + if (rc) { + netdev_err(bp->dev, "Failed to reserve default rings at open\n"); + return rc; + } + } + rc = bnxt_reserve_rings(bp, irq_re_init); + if (rc) + return rc; + + rc = bnxt_alloc_mem(bp, irq_re_init); + if (rc) { + netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc); + goto open_err_free_mem; + } + + if (BNXT_TRUFLOW_EN(bp)) + bp->get_pkt_dev = bnxt_tf_get_pkt_dev; + else + bp->get_pkt_dev = bnxt_get_pkt_dev; + + if (irq_re_init) { + bnxt_init_napi(bp); + rc = bnxt_request_irq(bp); + if (rc) { + netdev_err(bp->dev, "bnxt_request_irq err: %x\n", rc); + goto open_err_irq; + } + } + + rc = bnxt_init_nic(bp, irq_re_init); + if (rc) { + netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc); + goto open_err_irq; + } + + /* Initializes Truflow only when + * CONFIG_BNXT_CUSTOM_FLOWER_OFFLOAD is enabled. + */ + bnxt_custom_tf_port_init(bp); + + bnxt_enable_napi(bp); + bnxt_debug_dev_init(bp); + + if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) + bnxt_udcc_session_debugfs_add(bp); + + if (link_re_init) { + mutex_lock(&bp->link_lock); + rc = bnxt_update_phy_setting(bp); + mutex_unlock(&bp->link_lock); + if (rc) { + netdev_warn(bp->dev, "failed to update phy settings\n"); + if (BNXT_SINGLE_PF(bp)) { + bp->link_info.phy_retry = true; + bp->link_info.phy_retry_expires = + jiffies + 5 * HZ; + } + } + } + + if (irq_re_init) { +#ifdef HAVE_UDP_TUNNEL_NIC + udp_tunnel_nic_reset_ntf(bp->dev); +#elif defined(HAVE_NDO_UDP_TUNNEL) + udp_tunnel_get_rx_info(bp->dev); +#elif defined(HAVE_NDO_ADD_VXLAN) +#if defined(CONFIG_VXLAN) || defined(CONFIG_VXLAN_MODULE) + vxlan_get_rx_port(bp->dev); +#endif + if (!bnxt_hwrm_tunnel_dst_port_alloc( + bp, htons(0x17c1), + TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE)) + atomic_set(&bp->nge_port_cnt, 1); +#endif /* HAVE_UDP_TUNNEL_NIC */ + rc = bnxt_set_xps_mapping(bp); + if (rc) + netdev_warn(bp->dev, "failed to set xps mapping\n"); + } + +#ifdef HAVE_XDP_FRAME + if (bp->tx_nr_rings_xdp < num_possible_cpus()) { + if (!static_key_enabled(&bnxt_xdp_locking_key)) + static_branch_enable(&bnxt_xdp_locking_key); + } else if (static_key_enabled(&bnxt_xdp_locking_key)) { + static_branch_disable(&bnxt_xdp_locking_key); + } +#endif + set_bit(BNXT_STATE_OPEN, &bp->state); + bnxt_enable_int(bp); + if (bp->ktls_info) /* in case vnic does not support kTLS RX */ + netdev_update_features(bp->dev); + /* Enable TX queues */ + bnxt_tx_enable(bp); + mod_timer(&bp->timer, jiffies + bp->current_interval); + /* Poll link status and check for SFP+ module status */ + mutex_lock(&bp->link_lock); + bnxt_get_port_module_status(bp); + mutex_unlock(&bp->link_lock); + + /* VF-reps may need to be re-opened after the PF is re-opened */ + if (BNXT_PF(bp)) + bnxt_vf_reps_open(bp); + bnxt_ptp_init_rtc(bp, true); + bnxt_ptp_cfg_tstamp_filters(bp); + if (bp->ptp_cfg) + WRITE_ONCE(bp->ptp_cfg->tx_avail, BNXT_MAX_TX_TS); + if (BNXT_SUPPORTS_MULTI_RSS_CTX(bp)) + bnxt_hwrm_realloc_rss_ctx_vnic(bp); + bnxt_cfg_usr_fltrs(bp); + + rc = bnxt_nic_flows_open(bp); + if (rc) + netdev_warn(bp->dev, "NIC flow support will not be available\n"); + +#if defined(HAVE_ETF_QOPT_OFFLOAD) + bnxt_set_txr_etf_bmap(bp); +#endif + return 0; + +open_err_irq: + bnxt_del_napi(bp); + +open_err_free_mem: + bnxt_free_skbs(bp); + bnxt_free_irq(bp); + bnxt_free_mem(bp, true); + return rc; +} + +/* rtnl_lock held */ +int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) +{ + int rc = 0; + + if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) + rc = -EIO; + if (!rc) + rc = __bnxt_open_nic(bp, irq_re_init, link_re_init); + if (rc) { + netdev_err(bp->dev, "nic open fail (rc: %x)\n", rc); + dev_close(bp->dev); + } + return rc; +} + +/* rtnl_lock held, open the NIC half way by allocating all resources, but + * NAPI, IRQ, and TX are not enabled. This is mainly used for offline + * self tests. + */ +int bnxt_half_open_nic(struct bnxt *bp) +{ + int rc = 0; + + if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) { + netdev_err(bp->dev, + "Aborting NIC open as a previous firmware reset not completed\n"); + rc = -EIO; + goto half_open_err; + } + + rc = bnxt_alloc_mem(bp, true); + if (rc) { + netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc); + goto half_open_err; + } + bnxt_init_napi(bp); + set_bit(BNXT_STATE_HALF_OPEN, &bp->state); + rc = bnxt_init_nic(bp, true); + if (rc) { + clear_bit(BNXT_STATE_HALF_OPEN, &bp->state); + bnxt_del_napi(bp); + netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc); + goto half_open_err; + } + return 0; + +half_open_err: + bnxt_free_skbs(bp); + bnxt_free_mem(bp, true); + dev_close(bp->dev); + return rc; +} + +/* rtnl_lock held, this call can only be made after a previous successful + * call to bnxt_half_open_nic(). + */ +void bnxt_half_close_nic(struct bnxt *bp) +{ + bnxt_hwrm_resource_free(bp, false, true); + bnxt_del_napi(bp); + bnxt_free_skbs(bp); + bnxt_free_mem(bp, true); + clear_bit(BNXT_STATE_HALF_OPEN, &bp->state); +} + +void bnxt_reenable_sriov(struct bnxt *bp) +{ + if (BNXT_PF(bp)) { + struct bnxt_pf_info *pf = &bp->pf; + int n = pf->active_vfs; + + if (n) + bnxt_cfg_hw_sriov(bp, &n, true); + } +} + +static int bnxt_open(struct net_device *dev) +{ + struct bnxt *bp = netdev_priv(dev); + int rc; + + if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) { + rc = bnxt_reinit_after_abort(bp); + if (rc) { + if (rc == -EBUSY) + netdev_err(bp->dev, "A previous firmware reset has not completed, aborting\n"); + else + netdev_err(bp->dev, "Failed to reinitialize after aborted firmware reset\n"); + return -ENODEV; + } + } + + rc = bnxt_hwrm_if_change(bp, true); + if (rc) + return rc; + + rc = __bnxt_open_nic(bp, true, true); +#ifndef PCIE_SRIOV_CONFIGURE + if (!rc) + bnxt_start_sriov(bp, num_vfs); +#endif + if (rc) { + bnxt_hwrm_if_change(bp, false); + } else { + if (test_and_clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state)) { + if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) + bnxt_queue_sp_work(bp, BNXT_RESTART_ULP_SP_EVENT); + } + } + + return rc; +} + +static bool bnxt_drv_busy(struct bnxt *bp) +{ + return (test_bit(BNXT_STATE_IN_SP_TASK, &bp->state) || + test_bit(BNXT_STATE_IN_UDCC_TASK, &bp->state) || + test_bit(BNXT_STATE_READ_STATS, &bp->state) || + bnxt_ktls_busy(bp) || + bnxt_tfc_busy(bp)); +} + +#ifdef NETDEV_GET_STATS64 +static void bnxt_get_ring_stats(struct bnxt *bp, + struct rtnl_link_stats64 *stats); +#endif + +static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init, + bool link_re_init) +{ + netif_info(bp, ifdown, bp->dev, "Closing irq: %d link: %d\n", + irq_re_init, link_re_init); + + /* Deinitializes Truflow only when + * CONFIG_BNXT_CUSTOM_FLOWER_OFFLOAD is enabled. + */ + bnxt_custom_tf_port_deinit(bp); + + /* Close the VF-reps before closing PF */ + if (BNXT_PF(bp)) { + if (bnxt_tc_is_switchdev_mode(bp)) + bnxt_vf_reps_close(bp); + } + + if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) + bnxt_udcc_session_debugfs_cleanup(bp); + else + bnxt_udcc_session_db_cleanup(bp); + + bnxt_debug_dev_exit(bp); + bnxt_dbr_cancel(bp); + + /* Remove NIC flows via MPC before open state change */ + bnxt_nic_flows_close(bp); + + /* Change device state to avoid TX queue wake up's */ + bnxt_tx_disable(bp); + + clear_bit(BNXT_STATE_OPEN, &bp->state); + smp_mb__after_atomic(); + while (bnxt_drv_busy(bp)) + msleep(20); + if (BNXT_SUPPORTS_MULTI_RSS_CTX(bp)) + bnxt_clear_rss_ctxs(bp, false); + /* Flush rings and disable interrupts */ + bnxt_shutdown_nic(bp, irq_re_init); + + /* TODO CHIMP_FW: Link/PHY related cleanup if (link_re_init) */ + + bnxt_disable_napi(bp); + del_timer_sync(&bp->timer); + bnxt_free_skbs(bp); + +#ifdef NETDEV_GET_STATS64 + /* Save ring stats before shutdown */ + if (bp->bnapi && irq_re_init) { + bnxt_get_ring_stats(bp, &bp->net_stats_prev); + bnxt_get_ring_err_stats(bp, &bp->ring_err_stats_prev); + } +#endif + if (irq_re_init) { + bnxt_free_irq(bp); + bnxt_del_napi(bp); + } + bnxt_free_mem(bp, irq_re_init); +} + +void bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) +{ + if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) { + /* If we get here, it means firmware reset is in progress + * while we are trying to close. We can safely proceed with + * the close because we are holding rtnl_lock(). Some firmware + * messages may fail as we proceed to close. We set the + * ABORT_ERR flag here so that the FW reset thread will later + * abort when it gets the rtnl_lock() and sees the flag. + */ + netdev_warn(bp->dev, "FW reset in progress during close, FW reset will be aborted\n"); + set_bit(BNXT_STATE_ABORT_ERR, &bp->state); + } + +#ifdef CONFIG_BNXT_SRIOV + if (bp->sriov_cfg) { + int rc; + + rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait, + !bp->sriov_cfg, + BNXT_SRIOV_CFG_WAIT_TMO); + if (!rc) + netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete, proceeding to close!\n"); + else if (rc < 0) + netdev_warn(bp->dev, "SRIOV config operation interrupted, proceeding to close!\n"); + } +#endif + __bnxt_close_nic(bp, irq_re_init, link_re_init); +} + +static int bnxt_close(struct net_device *dev) +{ + struct bnxt *bp = netdev_priv(dev); + + bnxt_close_nic(bp, true, true); + bnxt_hwrm_shutdown_link(bp); + bnxt_hwrm_if_change(bp, false); + + return 0; +} + +static int bnxt_hwrm_port_phy_read(struct bnxt *bp, u16 phy_addr, u16 reg, + u16 *val) +{ + struct hwrm_port_phy_mdio_read_output *resp; + struct hwrm_port_phy_mdio_read_input *req; + int rc; + + if (bp->hwrm_spec_code < 0x10a00) + return -EOPNOTSUPP; + + rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_MDIO_READ); + if (rc) + return rc; + + req->port_id = cpu_to_le16(bp->pf.port_id); + req->phy_addr = phy_addr; + req->reg_addr = cpu_to_le16(reg & 0x1f); + if (mdio_phy_id_is_c45(phy_addr)) { + req->cl45_mdio = 1; + req->phy_addr = mdio_phy_id_prtad(phy_addr); + req->dev_addr = mdio_phy_id_devad(phy_addr); + req->reg_addr = cpu_to_le16(reg); + } + + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); + if (!rc) + *val = le32_to_cpu(resp->reg_data); + hwrm_req_drop(bp, req); + return rc; +} + +static int bnxt_hwrm_port_phy_write(struct bnxt *bp, u16 phy_addr, u16 reg, + u16 val) +{ + struct hwrm_port_phy_mdio_write_input *req; + int rc; + + if (bp->hwrm_spec_code < 0x10a00) + return -EOPNOTSUPP; + + rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_MDIO_WRITE); + if (rc) + return rc; + + req->port_id = cpu_to_le16(bp->pf.port_id); + req->phy_addr = phy_addr; + req->reg_addr = cpu_to_le16(reg & 0x1f); + if (mdio_phy_id_is_c45(phy_addr)) { + req->cl45_mdio = 1; + req->phy_addr = mdio_phy_id_prtad(phy_addr); + req->dev_addr = mdio_phy_id_devad(phy_addr); + req->reg_addr = cpu_to_le16(reg); + } + req->reg_data = cpu_to_le16(val); + + return hwrm_req_send(bp, req); +} + +int bnxt_hwrm_port_mac_qcfg(struct bnxt *bp) +{ + struct hwrm_port_mac_qcfg_output *resp; + struct hwrm_port_mac_qcfg_input *req; + u16 port_svif_info; + int rc; + + bp->port_svif = BNXT_SVIF_INVALID; + + if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp)) + return 0; + + rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_QCFG); + if (rc) + return rc; + + req->port_id = cpu_to_le16(bp->pf.port_id); + + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); + if (rc) + goto hwrm_mac_qcfg_exit; + + port_svif_info = le16_to_cpu(resp->port_svif_info); + if (port_svif_info & + PORT_MAC_QCFG_RESP_PORT_SVIF_INFO_PORT_SVIF_VALID) + bp->port_svif = port_svif_info & + PORT_MAC_QCFG_RESP_PORT_SVIF_INFO_PORT_SVIF_MASK; + +hwrm_mac_qcfg_exit: + hwrm_req_drop(bp, req); + return rc; +} + +int bnxt_hwrm_get_dflt_roce_vnic(struct bnxt *bp, u16 fid, u16 *vnic_id) +{ + struct hwrm_func_qcfg_output *resp; + struct hwrm_func_qcfg_input *req; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG); + if (rc) + return rc; + req->fid = cpu_to_le16(fid); + + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); + if (rc) + goto drop_req; + + if (le16_to_cpu(resp->flags) & FUNC_QCFG_RESP_FLAGS_ROCE_VNIC_ID_VALID) + *vnic_id = le16_to_cpu(resp->roce_vnic_id); + + netdev_dbg(bp->dev, "RoCE VNIC 0x%x for fid %d\n", *vnic_id, req->fid); + +drop_req: + hwrm_req_drop(bp, req); + return rc; +} + +/* rtnl_lock held */ +static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +{ + struct mii_ioctl_data *mdio = if_mii(ifr); + struct bnxt *bp = netdev_priv(dev); + int rc; + + switch (cmd) { + case SIOCGMIIPHY: + mdio->phy_id = bp->link_info.phy_addr; + + fallthrough; + case SIOCGMIIREG: { + u16 mii_regval = 0; + + if (!netif_running(dev)) + return -EAGAIN; + + rc = bnxt_hwrm_port_phy_read(bp, mdio->phy_id, mdio->reg_num, + &mii_regval); + mdio->val_out = mii_regval; + return rc; + } + + case SIOCSMIIREG: + if (!netif_running(dev)) + return -EAGAIN; + + return bnxt_hwrm_port_phy_write(bp, mdio->phy_id, mdio->reg_num, + mdio->val_in); + +#ifdef HAVE_IEEE1588_SUPPORT + case SIOCSHWTSTAMP: + return bnxt_hwtstamp_set(dev, ifr); + + case SIOCGHWTSTAMP: + return bnxt_hwtstamp_get(dev, ifr); +#endif + + default: + /* do nothing */ + break; + } + return -EOPNOTSUPP; +} + +#ifdef NETDEV_GET_STATS64 + +u64 bnxt_add_ring_rx_pkts(u64 *sw) +{ + return BNXT_GET_RING_STATS64(sw, rx_ucast_pkts) + + BNXT_GET_RING_STATS64(sw, rx_mcast_pkts) + + BNXT_GET_RING_STATS64(sw, rx_bcast_pkts); +} + +u64 bnxt_add_ring_tx_pkts(u64 *sw) +{ + return BNXT_GET_RING_STATS64(sw, tx_ucast_pkts) + + BNXT_GET_RING_STATS64(sw, tx_mcast_pkts) + + BNXT_GET_RING_STATS64(sw, tx_bcast_pkts); +} + +u64 bnxt_add_ring_rx_bytes(u64 *sw) +{ + return BNXT_GET_RING_STATS64(sw, rx_ucast_bytes) + + BNXT_GET_RING_STATS64(sw, rx_mcast_bytes) + + BNXT_GET_RING_STATS64(sw, rx_bcast_bytes); +} + +u64 bnxt_add_ring_tx_bytes(u64 *sw) +{ + return BNXT_GET_RING_STATS64(sw, tx_ucast_bytes) + + BNXT_GET_RING_STATS64(sw, tx_mcast_bytes) + + BNXT_GET_RING_STATS64(sw, tx_bcast_bytes); +} + +void bnxt_add_ring_stats(struct rtnl_link_stats64 *stats, u64 *sw) +{ + stats->rx_packets += bnxt_add_ring_rx_pkts(sw); + stats->tx_packets += bnxt_add_ring_tx_pkts(sw); + stats->rx_bytes += bnxt_add_ring_rx_bytes(sw); + stats->tx_bytes += bnxt_add_ring_tx_bytes(sw); + + stats->rx_missed_errors += + BNXT_GET_RING_STATS64(sw, rx_discard_pkts); + stats->multicast += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts); + stats->tx_dropped += BNXT_GET_RING_STATS64(sw, tx_error_pkts); +} + +static void bnxt_get_ring_stats(struct bnxt *bp, + struct rtnl_link_stats64 *stats) +{ + int i; + + for (i = 0; i < bp->cp_nr_rings; i++) { + struct bnxt_napi *bnapi = bp->bnapi[i]; + struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; + u64 *sw = cpr->stats.sw_stats; + + bnxt_add_ring_stats(stats, sw); + stats->rx_dropped += + cpr->sw_stats->rx.rx_netpoll_discards + + cpr->sw_stats->rx.rx_oom_discards; + } +} + +void bnxt_get_vf_stats(struct bnxt *bp, u16 vf_idx, + struct rtnl_link_stats64 *stats) +{ + struct bnxt_vf_info *vf; + + rcu_read_lock(); + vf = rcu_dereference(bp->pf.vf); + if (vf) { + u64 *sw = vf[vf_idx].stats.sw_stats; + + bnxt_add_ring_stats(stats, sw); + } + rcu_read_unlock(); +} + +static void bnxt_add_prev_stats(struct bnxt *bp, + struct rtnl_link_stats64 *stats) +{ + struct rtnl_link_stats64 *prev_stats = &bp->net_stats_prev; + + stats->rx_packets += prev_stats->rx_packets; + stats->tx_packets += prev_stats->tx_packets; + stats->rx_bytes += prev_stats->rx_bytes; + stats->tx_bytes += prev_stats->tx_bytes; + stats->rx_missed_errors += prev_stats->rx_missed_errors; + stats->multicast += prev_stats->multicast; + stats->rx_dropped += prev_stats->rx_dropped; + stats->tx_dropped += prev_stats->tx_dropped; +} + +#ifdef NETDEV_GET_STATS64_VOID +static void +#else +static struct rtnl_link_stats64 * +#endif +bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) +{ + struct bnxt *bp = netdev_priv(dev); + + set_bit(BNXT_STATE_READ_STATS, &bp->state); + /* Make sure bnxt_close_nic() sees that we are reading stats before + * we check the BNXT_STATE_OPEN flag. + */ + smp_mb__after_atomic(); + if (!test_bit(BNXT_STATE_OPEN, &bp->state)) { + clear_bit(BNXT_STATE_READ_STATS, &bp->state); + *stats = bp->net_stats_prev; +#ifdef NETDEV_GET_STATS64_VOID + return; +#else + return stats; +#endif + } + + bnxt_get_ring_stats(bp, stats); + bnxt_add_prev_stats(bp, stats); + + if (bp->flags & BNXT_FLAG_PORT_STATS) { + u64 *rx = bp->port_stats.sw_stats; + u64 *tx = bp->port_stats.sw_stats + + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8; + + stats->rx_crc_errors = + BNXT_GET_RX_PORT_STATS64(rx, rx_fcs_err_frames); + stats->rx_frame_errors = + BNXT_GET_RX_PORT_STATS64(rx, rx_align_err_frames); + stats->rx_length_errors = + BNXT_GET_RX_PORT_STATS64(rx, rx_undrsz_frames) + + BNXT_GET_RX_PORT_STATS64(rx, rx_ovrsz_frames) + + BNXT_GET_RX_PORT_STATS64(rx, rx_runt_frames); + stats->rx_errors = + BNXT_GET_RX_PORT_STATS64(rx, rx_false_carrier_frames) + + BNXT_GET_RX_PORT_STATS64(rx, rx_jbr_frames); + stats->collisions = + BNXT_GET_TX_PORT_STATS64(tx, tx_total_collisions); + stats->tx_fifo_errors = + BNXT_GET_TX_PORT_STATS64(tx, tx_fifo_underruns); + stats->tx_errors = BNXT_GET_TX_PORT_STATS64(tx, tx_err); + } + clear_bit(BNXT_STATE_READ_STATS, &bp->state); +#ifndef NETDEV_GET_STATS64_VOID + return stats; +#endif +} +#else +static struct net_device_stats *bnxt_get_stats(struct net_device *dev) +{ + struct bnxt *bp = netdev_priv(dev); + struct net_device_stats *stats = &dev->stats; + int i; + + set_bit(BNXT_STATE_READ_STATS, &bp->state); + smp_mb__after_atomic(); + if (!test_bit(BNXT_STATE_OPEN, &bp->state)) { + clear_bit(BNXT_STATE_READ_STATS, &bp->state); + return stats; + } + + memset(stats, 0, sizeof(struct net_device_stats)); + + for (i = 0; i < bp->cp_nr_rings; i++) { + struct bnxt_napi *bnapi = bp->bnapi[i]; + struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; + struct ctx_hw_stats *hw_stats = cpr->stats.hw_stats; + + stats->rx_packets += GET_NET_STATS(hw_stats->rx_ucast_pkts); + stats->rx_packets += GET_NET_STATS(hw_stats->rx_mcast_pkts); + stats->rx_packets += GET_NET_STATS(hw_stats->rx_bcast_pkts); + + stats->tx_packets += GET_NET_STATS(hw_stats->tx_ucast_pkts); + stats->tx_packets += GET_NET_STATS(hw_stats->tx_mcast_pkts); + stats->tx_packets += GET_NET_STATS(hw_stats->tx_bcast_pkts); + + stats->rx_bytes += GET_NET_STATS(hw_stats->rx_ucast_bytes); + stats->rx_bytes += GET_NET_STATS(hw_stats->rx_mcast_bytes); + stats->rx_bytes += GET_NET_STATS(hw_stats->rx_bcast_bytes); + + stats->tx_bytes += GET_NET_STATS(hw_stats->tx_ucast_bytes); + stats->tx_bytes += GET_NET_STATS(hw_stats->tx_mcast_bytes); + stats->tx_bytes += GET_NET_STATS(hw_stats->tx_bcast_bytes); + + stats->rx_missed_errors += + GET_NET_STATS(hw_stats->rx_discard_pkts); + stats->multicast += GET_NET_STATS(hw_stats->rx_mcast_pkts); + stats->tx_dropped += GET_NET_STATS(hw_stats->tx_error_pkts); + } + + if (bp->flags & BNXT_FLAG_PORT_STATS) { + struct rx_port_stats *rx = bp->port_stats.hw_stats; + struct tx_port_stats *tx = bp->port_stats.hw_stats + + BNXT_TX_PORT_STATS_BYTE_OFFSET; + + stats->rx_crc_errors = GET_NET_STATS(rx->rx_fcs_err_frames); + stats->rx_frame_errors = GET_NET_STATS(rx->rx_align_err_frames); + stats->rx_length_errors = GET_NET_STATS(rx->rx_undrsz_frames) + + GET_NET_STATS(rx->rx_ovrsz_frames) + + GET_NET_STATS(rx->rx_runt_frames); + stats->rx_errors = GET_NET_STATS(rx->rx_false_carrier_frames) + + GET_NET_STATS(rx->rx_jbr_frames); + stats->collisions = GET_NET_STATS(tx->tx_total_collisions); + stats->tx_fifo_errors = GET_NET_STATS(tx->tx_fifo_underruns); + stats->tx_errors = GET_NET_STATS(tx->tx_err); + } + + clear_bit(BNXT_STATE_READ_STATS, &bp->state); + return &dev->stats; +} +#endif + +static void bnxt_get_one_ring_err_stats(struct bnxt *bp, + struct bnxt_total_ring_err_stats *stats, + struct bnxt_cp_ring_info *cpr) +{ + struct bnxt_sw_stats *sw_stats = cpr->sw_stats; + u64 *hw_stats = cpr->stats.sw_stats; + + stats->rx_total_l4_csum_errors += sw_stats->rx.rx_l4_csum_errors; + stats->rx_total_resets += sw_stats->rx.rx_resets; + stats->rx_total_buf_errors += sw_stats->rx.rx_buf_errors; + stats->rx_total_oom_discards += sw_stats->rx.rx_oom_discards; + stats->rx_total_netpoll_discards += sw_stats->rx.rx_netpoll_discards; + stats->rx_total_ring_discards += + BNXT_GET_RING_STATS64(hw_stats, rx_discard_pkts); + stats->tx_total_ring_discards += + BNXT_GET_RING_STATS64(hw_stats, tx_discard_pkts); + stats->total_missed_irqs += sw_stats->cmn.missed_irqs; +} + +void bnxt_get_ring_err_stats(struct bnxt *bp, + struct bnxt_total_ring_err_stats *stats) +{ + int i; + + for (i = 0; i < bp->cp_nr_rings; i++) + bnxt_get_one_ring_err_stats(bp, stats, &bp->bnapi[i]->cp_ring); +} + +static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask) +{ + struct net_device *dev = bp->dev; + struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT]; +#ifdef HAVE_DEV_ADDR_LIST + struct dev_addr_list *ha; +#else + struct netdev_hw_addr *ha; +#endif + u8 *haddr; + int mc_count = 0; + bool update = false; + int off = 0; + + netdev_for_each_mc_addr(ha, dev) { + if (mc_count >= BNXT_MAX_MC_ADDRS) { + *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST; + if (vnic->mc_list_count) + netdev_info(bp->dev, "Max supported (%d) MCAST filters exceeded. Turning on ALL_MCAST mode\n", + BNXT_MAX_MC_ADDRS); + vnic->mc_list_count = 0; + return false; + } +#ifdef HAVE_DEV_ADDR_LIST + haddr = ha->da_addr; +#else + haddr = ha->addr; +#endif + if (!ether_addr_equal(haddr, vnic->mc_list + off)) { + memcpy(vnic->mc_list + off, haddr, ETH_ALEN); + update = true; + } + off += ETH_ALEN; + mc_count++; + } + if (mc_count) + *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_MCAST; + + if (mc_count != vnic->mc_list_count) { + vnic->mc_list_count = mc_count; + update = true; + } + return update; +} + +static bool bnxt_uc_list_updated(struct bnxt *bp) +{ + struct net_device *dev = bp->dev; + struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT]; + struct netdev_hw_addr *ha; + int off = 0; + + if (netdev_uc_count(dev) != (vnic->uc_filter_count - 1)) + return true; + + netdev_for_each_uc_addr(ha, dev) { + if (!ether_addr_equal(ha->addr, vnic->uc_list + off)) + return true; + + off += ETH_ALEN; + } + return false; +} + +static void bnxt_set_rx_mode(struct net_device *dev) +{ + struct bnxt *bp = netdev_priv(dev); + struct bnxt_vnic_info *vnic; + bool mc_update = false; + bool uc_update; + u32 mask; + + if (!test_bit(BNXT_STATE_OPEN, &bp->state)) + return; + + vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT]; + mask = vnic->rx_mask; + mask &= ~(CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS | + CFA_L2_SET_RX_MASK_REQ_MASK_MCAST | + CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST | + CFA_L2_SET_RX_MASK_REQ_MASK_BCAST); + + if (dev->flags & IFF_PROMISC) + mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS; + + uc_update = bnxt_uc_list_updated(bp); + + if (dev->flags & IFF_BROADCAST) + mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST; + if (dev->flags & IFF_ALLMULTI) { + mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST; + vnic->mc_list_count = 0; + } else if (dev->flags & IFF_MULTICAST) { + mc_update = bnxt_mc_list_updated(bp, &mask); + } + + if (mask != vnic->rx_mask || uc_update || mc_update) { + vnic->rx_mask = mask; + + bnxt_queue_sp_work(bp, BNXT_RX_MASK_SP_EVENT); + } +} + +static int bnxt_cfg_rx_mode(struct bnxt *bp) +{ + struct net_device *dev = bp->dev; + struct bnxt_vnic_info *vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT]; + struct netdev_hw_addr *ha; + int i, off = 0, rc; + bool uc_update; + + netif_addr_lock_bh(dev); + uc_update = bnxt_uc_list_updated(bp); + netif_addr_unlock_bh(dev); + + if (!uc_update) + goto skip_uc; + + for (i = 1; i < vnic->uc_filter_count; i++) { + struct bnxt_l2_filter *fltr = vnic->l2_filters[i]; + + bnxt_hwrm_l2_filter_free(bp, fltr); + bnxt_del_l2_filter(bp, fltr); + } + + vnic->uc_filter_count = 1; + + netif_addr_lock_bh(dev); + if (netdev_uc_count(dev) > (BNXT_MAX_UC_ADDRS - 1)) { + vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS; + } else { + netdev_for_each_uc_addr(ha, dev) { + memcpy(vnic->uc_list + off, ha->addr, ETH_ALEN); + off += ETH_ALEN; + vnic->uc_filter_count++; + } + } + netif_addr_unlock_bh(dev); + + for (i = 1, off = 0; i < vnic->uc_filter_count; i++, off += ETH_ALEN) { + rc = bnxt_hwrm_set_vnic_filter(bp, 0, i, vnic->uc_list + off); + if (rc) { + if (BNXT_VF(bp) && rc == -ENODEV) { + if (!test_and_set_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state)) + netdev_warn(bp->dev, "Cannot configure L2 filters while PF is unavailable, will retry\n"); + else + netdev_dbg(bp->dev, "PF still unavailable while configuring L2 filters.\n"); + rc = 0; + } else { + netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc); + } + vnic->uc_filter_count = i; + return rc; + } + } + if (test_and_clear_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state)) + netdev_notice(bp->dev, "Retry of L2 filter configuration successful.\n"); + + +skip_uc: + if ((vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS) && + !bnxt_promisc_ok(bp)) + vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS; + rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0); + if (rc && (vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_MCAST)) { + netdev_info(bp->dev, "Failed setting MC filters rc: %d, turning on ALL_MCAST mode\n", + rc); + vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_MCAST; + vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST; + vnic->mc_list_count = 0; + rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0); + if (!rc) + vnic->flags |= BNXT_VNIC_ALL_MCAST_FLAG; + } else if (!rc && (vnic->flags & BNXT_VNIC_ALL_MCAST_FLAG)) { + if (vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_MCAST) { + netdev_info(bp->dev, "turning off ALL_MCAST mode\n"); + vnic->flags &= ~BNXT_VNIC_ALL_MCAST_FLAG; + } + } + if (rc) + netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %d\n", + rc); + + return rc; +} + +static bool bnxt_can_reserve_rings(struct bnxt *bp) +{ +#ifdef CONFIG_BNXT_SRIOV + if (BNXT_NEW_RM(bp) && BNXT_VF(bp)) { + struct bnxt_hw_resc *hw_resc = &bp->hw_resc; + + /* No minimum rings were provisioned by the PF. Don't + * reserve rings by default when device is down. + */ + if (hw_resc->min_tx_rings || hw_resc->resv_tx_rings) + return true; + + if (!netif_running(bp->dev)) + return false; + } +#endif + return true; +} + +/* If the chip and firmware supports RFS */ +static bool bnxt_rfs_supported(struct bnxt *bp) +{ + if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { + if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2) + return true; + return false; + } + /* 212 firmware is broken for aRFS */ + if (BNXT_FW_MAJ(bp) == 212) + return false; + if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp)) + return true; + if (bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP) + return true; + return false; +} + +/* If runtime conditions support RFS */ +bool bnxt_rfs_capable(struct bnxt *bp, bool new_rss_ctx) +{ + struct bnxt_hw_rings hwr = {0}; + int max_vnics, max_rss_ctxs; + + if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && + !BNXT_SUPPORTS_NTUPLE_VNIC(bp)) + return bnxt_rfs_supported(bp); + if (!bnxt_can_reserve_rings(bp) || !bp->rx_nr_rings) + return false; + + hwr.grp = bp->rx_nr_rings; + hwr.vnic = bnxt_get_total_vnics(bp, bp->rx_nr_rings); + if (new_rss_ctx) + hwr.vnic++; + hwr.rss_ctx = bnxt_get_total_rss_ctxs(bp, &hwr); + max_vnics = bnxt_get_max_func_vnics(bp); + max_rss_ctxs = bnxt_get_max_func_rss_ctxs(bp); + + if (hwr.vnic > max_vnics || hwr.rss_ctx > max_rss_ctxs) { + if (bp->rx_nr_rings > 1) + netdev_warn(bp->dev, + "Not enough resources to support NTUPLE filters, enough resources for up to %d rx rings\n", + min(max_rss_ctxs - 1, max_vnics - 1)); + return false; + } + + if (!BNXT_NEW_RM(bp)) + return true; + + if (hwr.vnic <= bp->hw_resc.resv_vnics && + hwr.rss_ctx <= bp->hw_resc.resv_rsscos_ctxs) + return true; + + bnxt_hwrm_reserve_rings(bp, &hwr); + if (hwr.vnic <= bp->hw_resc.resv_vnics && + hwr.rss_ctx <= bp->hw_resc.resv_rsscos_ctxs) + return true; + + netdev_warn(bp->dev, "Unable to reserve resources to support NTUPLE filters.\n"); + hwr.vnic = 1; + hwr.rss_ctx = 0; + bnxt_hwrm_reserve_rings(bp, &hwr); + return false; +} + +#ifdef NETDEV_FEATURE_CONTROL +static netdev_features_t bnxt_fix_features(struct net_device *dev, + netdev_features_t features) +{ + struct bnxt *bp = netdev_priv(dev); + netdev_features_t vlan_features; + + if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp, false)) + features &= ~NETIF_F_NTUPLE; + + if ((bp->flags & BNXT_FLAG_NO_AGG_RINGS) || bp->xdp_prog || + !BNXT_TPA_MTU_OK(bp)) + features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW); + + if (!(features & NETIF_F_GRO)) + features &= ~NETIF_F_GRO_HW; + + if (features & NETIF_F_GRO_HW) + features &= ~NETIF_F_LRO; + + /* Both CTAG and STAG VLAN accelaration on the RX side have to be + * turned on or off together. + */ + vlan_features = features & BNXT_HW_FEATURE_VLAN_ALL_RX; + if (vlan_features != BNXT_HW_FEATURE_VLAN_ALL_RX) { + if (dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX) + features &= ~BNXT_HW_FEATURE_VLAN_ALL_RX; + else if (vlan_features) + features |= BNXT_HW_FEATURE_VLAN_ALL_RX; + } +#ifdef CONFIG_BNXT_SRIOV + if (BNXT_VF(bp) && bp->vf.vlan) + features &= ~BNXT_HW_FEATURE_VLAN_ALL_RX; +#endif + + if (bp->ktls_info && bp->vnic_info) { + if (bp->vnic_info[BNXT_VNIC_DEFAULT].metadata_format != + VNIC_QCFG_RESP_METADATA_FORMAT_TYPE_4) + features &= ~NETIF_F_HW_TLS_RX; + } + return features; +} + +static int bnxt_reinit_features(struct bnxt *bp, bool irq_re_init, bool link_re_init, + u32 flags, bool update_tpa) +{ + bnxt_close_nic(bp, irq_re_init, link_re_init); + bp->flags = flags; + if (update_tpa) + bnxt_set_ring_params(bp); + return bnxt_open_nic(bp, irq_re_init, link_re_init); +} + +static int bnxt_set_features(struct net_device *dev, netdev_features_t features) +{ + bool update_tpa = false, update_ntuple = false; + struct bnxt *bp = netdev_priv(dev); + u32 flags = bp->flags; + u32 changes; + int rc = 0; + bool re_init = false; + + flags &= ~BNXT_FLAG_ALL_CONFIG_FEATS; +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,39) +#ifdef HAVE_NETIF_F_GRO_HW + if (features & NETIF_F_GRO_HW) +#else + if ((features & NETIF_F_GRO) && BNXT_SUPPORTS_TPA(bp) && + BNXT_TPA_MTU_OK(bp)) +#endif + flags |= BNXT_FLAG_GRO; + else if (features & NETIF_F_LRO) +#else + if (features & NETIF_F_LRO) +#endif + flags |= BNXT_FLAG_LRO; + + if (bp->flags & BNXT_FLAG_NO_AGG_RINGS) + flags &= ~BNXT_FLAG_TPA; + + if (features & BNXT_HW_FEATURE_VLAN_ALL_RX) + flags |= BNXT_FLAG_STRIP_VLAN; + + if (features & NETIF_F_NTUPLE) + flags |= BNXT_FLAG_RFS; + else + bnxt_clear_usr_fltrs(bp, true); + + changes = flags ^ bp->flags; + if (changes & BNXT_FLAG_TPA) { + update_tpa = true; + if ((bp->flags & BNXT_FLAG_TPA) == 0 || + (flags & BNXT_FLAG_TPA) == 0 || + (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) + re_init = true; + } + + if (changes & ~BNXT_FLAG_TPA) + re_init = true; + + if (changes & BNXT_FLAG_RFS) + update_ntuple = true; + + if (flags != bp->flags) { + u32 old_flags = bp->flags; + + if (!test_bit(BNXT_STATE_OPEN, &bp->state)) { + bp->flags = flags; + if (update_tpa) + bnxt_set_ring_params(bp); + return rc; + } + + if (update_ntuple) + return bnxt_reinit_features(bp, true, false, flags, update_tpa); + + if (re_init) + return bnxt_reinit_features(bp, false, false, flags, update_tpa); + + if (update_tpa) { + bp->flags = flags; + rc = bnxt_set_tpa(bp, + (flags & BNXT_FLAG_TPA) ? + true : false); + if (rc) + bp->flags = old_flags; + } + } + return rc; +} +#endif + +#ifdef HAVE_NDO_FEATURES_CHECK + +static bool bnxt_exthdr_check(struct bnxt *bp, struct sk_buff *skb, int nw_off, + u8 **nextp) +{ + struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + nw_off); +#ifdef DHAVE_IPV6_BIG_TCP + struct hop_jumbo_hdr *jhdr; +#endif + int hdr_count = 0; + u8 *nexthdr; + int start; + + /* Check that there are at most 2 IPv6 extension headers, no + * fragment header, and each is <= 64 bytes. + */ + start = nw_off + sizeof(*ip6h); + nexthdr = &ip6h->nexthdr; + while (ipv6_ext_hdr(*nexthdr)) { + struct ipv6_opt_hdr *hp; + int hdrlen; + + if (hdr_count >= 3 || *nexthdr == NEXTHDR_NONE || + *nexthdr == NEXTHDR_FRAGMENT) + return false; + hp = __skb_header_pointer(NULL, start, sizeof(*hp), skb->data, + skb_headlen(skb), NULL); + if (!hp) + return false; + if (*nexthdr == NEXTHDR_AUTH) + hdrlen = ipv6_authlen(hp); + else + hdrlen = ipv6_optlen(hp); + + if (hdrlen > 64) + return false; + +#ifdef DHAVE_IPV6_BIG_TCP + /* The ext header may be a hop-by-hop header inserted for + * big TCP purposes. This will be removed before sending + * from NIC, so do not count it. + */ + if (*nexthdr == NEXTHDR_HOP) { + if (likely(skb->len <= GRO_LEGACY_MAX_SIZE)) + goto increment_hdr; + + jhdr = (struct hop_jumbo_hdr *)hp; + if (jhdr->tlv_type != IPV6_TLV_JUMBO || jhdr->hdrlen != 0 || + jhdr->nexthdr != IPPROTO_TCP) + goto increment_hdr; + + goto next_hdr; + } +increment_hdr: + hdr_count++; +next_hdr: +#else + hdr_count++; +#endif /* DHAVE_IPV6_BIG_TCP */ + nexthdr = &hp->nexthdr; + start += hdrlen; + } + if (nextp) { + /* Caller will check inner protocol */ + if (skb->encapsulation) { + *nextp = nexthdr; + return true; + } + *nextp = NULL; + } + /* Only support TCP/UDP for non-tunneled ipv6 and inner ipv6 */ + return *nexthdr == IPPROTO_TCP || *nexthdr == IPPROTO_UDP; +} + +/* For UDP, we can only handle 1 Vxlan port and 1 Geneve port. */ +static bool bnxt_udp_tunl_check(struct bnxt *bp, struct sk_buff *skb) +{ + struct udphdr *uh = udp_hdr(skb); + __be16 udp_port = uh->dest; + + if (udp_port != bp->vxlan_port && udp_port != bp->nge_port && + udp_port != bp->vxlan_gpe_port) + return false; + if (skb->inner_protocol == htons(ETH_P_TEB)) { + struct ethhdr *eh = inner_eth_hdr(skb); + + switch (eh->h_proto) { + case htons(ETH_P_IP): + return true; + case htons(ETH_P_IPV6): + return bnxt_exthdr_check(bp, skb, + skb_inner_network_offset(skb), + NULL); + } + } else if (skb->inner_protocol == htons(ETH_P_IP)) { + return true; + } else if (skb->inner_protocol == htons(ETH_P_IPV6)) { + return bnxt_exthdr_check(bp, skb, skb_inner_network_offset(skb), + NULL); + } + return false; +} + +static bool bnxt_tunl_check(struct bnxt *bp, struct sk_buff *skb, u8 l4_proto) +{ + switch (l4_proto) { + case IPPROTO_UDP: + return bnxt_udp_tunl_check(bp, skb); + case IPPROTO_IPIP: + return true; + case IPPROTO_GRE: { + switch (skb->inner_protocol) { + default: + return false; + case htons(ETH_P_IP): + return true; + case htons(ETH_P_IPV6): + fallthrough; + } + } + case IPPROTO_IPV6: + /* Check ext headers of inner ipv6 */ + return bnxt_exthdr_check(bp, skb, skb_inner_network_offset(skb), + NULL); + } + return false; +} + +static netdev_features_t bnxt_features_check(struct sk_buff *skb, + struct net_device *dev, + netdev_features_t features) +{ + struct bnxt *bp = netdev_priv(dev); + u8 *l4_proto; + + features = vlan_features_check(skb, features); + switch (vlan_get_protocol(skb)) { + case htons(ETH_P_IP): + if (!skb->encapsulation) + return features; + l4_proto = &ip_hdr(skb)->protocol; + if (bnxt_tunl_check(bp, skb, *l4_proto)) + return features; + break; + case htons(ETH_P_IPV6): + if (!bnxt_exthdr_check(bp, skb, skb_network_offset(skb), + &l4_proto)) + break; + if (!l4_proto || bnxt_tunl_check(bp, skb, *l4_proto)) + return features; + break; + } + return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); +} +#endif + +static int bnxt_dbg_hwrm_wr_reg(struct bnxt *bp, u32 reg_off, u32 reg_val) +{ + struct hwrm_dbg_write_direct_input *req; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_DBG_WRITE_DIRECT); + if (rc) + return rc; + + req->write_addr = cpu_to_le32(reg_off + CHIMP_REG_VIEW_ADDR); + /* TODO: support reg write to one register for now */ + req->write_len32 = cpu_to_le32(1); + req->write_data[0] = cpu_to_le32(reg_val); + return hwrm_req_send(bp, req); +} + +int bnxt_dbg_hwrm_rd_reg(struct bnxt *bp, u32 reg_off, u16 num_words, + u32 *reg_buf) +{ + struct hwrm_dbg_read_direct_output *resp; + struct hwrm_dbg_read_direct_input *req; + __le32 *dbg_reg_buf; + dma_addr_t mapping; + int rc, i; + + rc = hwrm_req_init(bp, req, HWRM_DBG_READ_DIRECT); + if (rc) + return rc; + + dbg_reg_buf = hwrm_req_dma_slice(bp, req, num_words * 4, + &mapping); + if (!dbg_reg_buf) { + rc = -ENOMEM; + goto dbg_rd_reg_exit; + } + + req->host_dest_addr = cpu_to_le64(mapping); + + resp = hwrm_req_hold(bp, req); + req->read_addr = cpu_to_le32(reg_off + CHIMP_REG_VIEW_ADDR); + req->read_len32 = cpu_to_le32(num_words); + + rc = hwrm_req_send(bp, req); + if (rc || resp->error_code) { + rc = -EIO; + goto dbg_rd_reg_exit; + } + for (i = 0; i < num_words; i++) + reg_buf[i] = le32_to_cpu(dbg_reg_buf[i]); + +dbg_rd_reg_exit: + hwrm_req_drop(bp, req); + return rc; +} + +static int bnxt_dbg_hwrm_ring_info_get(struct bnxt *bp, u8 ring_type, + u32 ring_id, u32 *prod, u32 *cons) +{ + struct hwrm_dbg_ring_info_get_output *resp; + struct hwrm_dbg_ring_info_get_input *req; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_DBG_RING_INFO_GET); + if (rc) + return rc; + + req->ring_type = ring_type; + req->fw_ring_id = cpu_to_le32(ring_id); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); + if (!rc) { + *prod = resp->producer_index; + *cons = resp->consumer_index; + } + hwrm_req_drop(bp, req); + return rc; +} + +static void bnxt_dbg_dump_hw_ring(struct bnxt *bp, u32 index) +{ + u32 val[15] = {0xDEADDEAD}; + u32 fw_ring_id; + struct bnxt_napi *bnapi; + struct bnxt_tx_ring_info *txr; + struct bnxt_rx_ring_info *rxr; + struct bnxt_cp_ring_info *cpr, *cpr2; + int i; + + if (!netif_msg_hw(bp) || BNXT_VF(bp)) + return; + + bnapi = bp->bnapi[index]; + rxr = bnapi->rx_ring; + cpr = &bnapi->cp_ring; + + bnxt_for_each_napi_tx(i, bnapi, txr) { + /* TBD prod/cons */ + fw_ring_id = txr->tx_ring_struct.fw_ring_id; + if (fw_ring_id != INVALID_HW_RING_ID) { + if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { + bnxt_dbg_hwrm_ring_info_get(bp, + DBG_RING_INFO_GET_REQ_RING_TYPE_TX, + fw_ring_id, &val[0], &val[1]); + cpr2 = txr->tx_cpr; + fw_ring_id = cpr2->cp_ring_struct.fw_ring_id; + bnxt_dbg_hwrm_ring_info_get(bp, + DBG_RING_INFO_GET_REQ_RING_TYPE_L2_CMPL, + fw_ring_id, &val[2], &val[3]); + } else { + bnxt_dbg_hwrm_rd_reg(bp, + BDETBD_REG_BD_PRODUCER_IDX + + fw_ring_id * 4, 1, &val[0]); + bnxt_dbg_hwrm_rd_reg(bp, + BDETBD_REG_BD_REQ_CONSUMER_IDX + + fw_ring_id * 4, 1, &val[1]); + bnxt_dbg_hwrm_rd_reg(bp, + BDETBD_REG_BD_CMPL_CONSUMER_IDX + + fw_ring_id * 4, 1, &val[3]); + } + netdev_info(bp->dev, "[%d.%d]: TBD{prod: %x cons: %x cp prod: %x cp cons: %x}", + index, i, val[0], val[1], val[2], val[3]); + } + } + + if (!rxr) + goto skip_rxr; + + /* RBD prod/cons */ + fw_ring_id = rxr->rx_ring_struct.fw_ring_id; + if (fw_ring_id != INVALID_HW_RING_ID) { + if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { + bnxt_dbg_hwrm_ring_info_get(bp, + DBG_RING_INFO_GET_REQ_RING_TYPE_RX, + fw_ring_id, &val[4], &val[5]); + cpr2 = &cpr->cp_ring_arr[0]; + fw_ring_id = cpr2->cp_ring_struct.fw_ring_id; + bnxt_dbg_hwrm_ring_info_get(bp, + DBG_RING_INFO_GET_REQ_RING_TYPE_L2_CMPL, + fw_ring_id, &val[6], &val[7]); + } else { + bnxt_dbg_hwrm_rd_reg(bp, + BDERBD_REG_BD_PRODUCER_IDX + + fw_ring_id * 4, 1, &val[4]); + bnxt_dbg_hwrm_rd_reg(bp, + BDERBD_REG_BD_REQ_CONSUMER_IDX + + fw_ring_id * 4, 1, &val[5]); + bnxt_dbg_hwrm_rd_reg(bp, + BDERBD_REG_BD_CMPL_CONSUMER_IDX + + fw_ring_id * 4, 1, &val[7]); + } + } + /* AGG RBD prod/cons */ + fw_ring_id = rxr->rx_agg_ring_struct.fw_ring_id; + if (fw_ring_id != INVALID_HW_RING_ID) { + if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { + bnxt_dbg_hwrm_ring_info_get(bp, + DBG_RING_INFO_GET_REQ_RING_TYPE_RX, + fw_ring_id, &val[8], &val[9]); + cpr2 = &cpr->cp_ring_arr[0]; + fw_ring_id = cpr2->cp_ring_struct.fw_ring_id; + bnxt_dbg_hwrm_ring_info_get(bp, + DBG_RING_INFO_GET_REQ_RING_TYPE_L2_CMPL, + fw_ring_id, &val[10], &val[11]); + } else { + bnxt_dbg_hwrm_rd_reg(bp, + BDERBD_REG_BD_PRODUCER_IDX + + fw_ring_id * 4, 1, &val[8]); + bnxt_dbg_hwrm_rd_reg(bp, + BDERBD_REG_BD_REQ_CONSUMER_IDX + + fw_ring_id * 4, 1, &val[9]); + bnxt_dbg_hwrm_rd_reg(bp, + BDERBD_REG_BD_CMPL_CONSUMER_IDX + + fw_ring_id * 4, 1, &val[11]); + } + } + +skip_rxr: + /* CAG prod/cons/vector ctrl */ + fw_ring_id = cpr->cp_ring_struct.fw_ring_id; + if (fw_ring_id < 1024) { + bnxt_dbg_hwrm_rd_reg(bp, + CAG_REG_CAG_PRODUCER_INDEX_REG + fw_ring_id * 4, 1, + &val[12]); + bnxt_dbg_hwrm_rd_reg(bp, + CAG_REG_CAG_CONSUMER_INDEX_REG + fw_ring_id * 4, 1, + &val[13]); + bnxt_dbg_hwrm_rd_reg(bp, + CAG_REG_CAG_VECTOR_CTRL + fw_ring_id * 4, 1, &val[14]); + } else if (fw_ring_id != INVALID_HW_RING_ID) { + bnxt_dbg_hwrm_wr_reg(bp, + CAG_REG_CAG_PRODUCER_INDEX_REG_ADDR_OFFSET, fw_ring_id); + bnxt_dbg_hwrm_rd_reg(bp, CAG_REG_CAG_PRODUCER_INDEX_REG, 1, + &val[12]); + bnxt_dbg_hwrm_wr_reg(bp, + CAG_REG_CAG_PRODUCER_INDEX_REG_ADDR_OFFSET, 0); + bnxt_dbg_hwrm_wr_reg(bp, + CAG_REG_CAG_CONSUMER_INDEX_REG_ADDR_OFFSET, fw_ring_id); + bnxt_dbg_hwrm_rd_reg(bp, CAG_REG_CAG_CONSUMER_INDEX_REG, 1, + &val[13]); + bnxt_dbg_hwrm_wr_reg(bp, + CAG_REG_CAG_CONSUMER_INDEX_REG_ADDR_OFFSET, 0); + bnxt_dbg_hwrm_wr_reg(bp, CAG_REG_CAG_VECTOR_CTRL_ADDR_OFFSET, + fw_ring_id); + bnxt_dbg_hwrm_rd_reg(bp, CAG_REG_CAG_VECTOR_CTRL, 1, &val[14]); + bnxt_dbg_hwrm_wr_reg(bp, CAG_REG_CAG_VECTOR_CTRL_ADDR_OFFSET, + 0); + } + netdev_info(bp->dev, "[%d]: RBD{prod: %x cons: %x cp prod: %x cp cons: %x} " + "RBD AGG{prod: %x cons: %x cp prod: %x cp cons: %x} " + "CAG{prod: %x cons: %x vec: %x}\n", index, + val[4], val[5], val[6], val[7], + val[8], val[9], val[10], val[11], + val[12], val[13], val[14]); +} + +static void bnxt_dbg_dump_hw_states(struct bnxt *bp) +{ + int rc, i; + u32 val[32] = {0xDEADDEAD}; + u32 dbg_sel; + + if (!netif_msg_hw(bp) || BNXT_VF(bp)) + return; + + /* dump tdc interrupt status */ + rc = bnxt_dbg_hwrm_rd_reg(bp, TDC_REG_INT_STS_0, 1, val); + if (!rc) + netdev_info(bp->dev, "TDC_REG_INT_STS_0: %x\n", val[0]); + /* dump tdc debug bus */ + netdev_info(bp->dev, "TDC debug bus dump:\n"); + dbg_sel = 0x80000000; + for (i = 0; i < 5; i++) { + rc = bnxt_dbg_hwrm_wr_reg(bp, TDC_REG_TDC_DEBUG_CNTL, dbg_sel); + if (rc) + break; + rc = bnxt_dbg_hwrm_rd_reg(bp, TDC_REG_TDC_DEBUG_STATUS, 1, val); + if (rc) + break; + netdev_info(bp->dev, "\tdbg_sel %08x: %08x\n", dbg_sel, val[0]); + dbg_sel++; + } + if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) + return; + + /* dump tdi debug bus */ + netdev_info(bp->dev, "TDI debug bus dump:\n"); + dbg_sel = 0xf; + rc = bnxt_dbg_hwrm_wr_reg(bp, TDI_REG_DBG_DWORD_ENABLE, dbg_sel); + if (!rc) { + rc = bnxt_dbg_hwrm_rd_reg(bp, TDI_REG_DBG_OUT_DATA, 1, val); + if (!rc) + netdev_info(bp->dev, "\tTDI_REG_DBG_DWORD_ENABLE (%x): " + "%08x\n", dbg_sel, val[0]); + for (dbg_sel = 2; dbg_sel < 0x12; dbg_sel++) { + rc = bnxt_dbg_hwrm_wr_reg(bp, TDI_REG_DBG_SELECT, + dbg_sel); + if (rc) + break; + rc = bnxt_dbg_hwrm_rd_reg(bp, TDI_REG_DBG_OUT_DATA, + 8, val); + if (rc) + break; + netdev_info(bp->dev, "\tTDI_REG_DBG_OUT_DATA: " + "%08x %08x %08x %08x " + "%08x %08x %08x %08x\n", + val[0], val[1], val[2], val[3], + val[4], val[5], val[6], val[7]); + } + } + /* dump te_dec port and cmd credits */ + rc = bnxt_dbg_hwrm_rd_reg(bp, TE_DEC_REG_PORT_CURRENT_CREDIT_REG, 32, + val); + if (!rc) { + netdev_info(bp->dev, "TE_DEC_REG_PORT_CURRENT_CREDIT_REG: " + "%x %x %x\n", val[0], val[1], val[2]); + netdev_info(bp->dev, "TE_DEC_REG_PORT_CURRENT_CMD_CREDIT_REG: " + "%x %x %x\n", val[16], val[17], val[18]); + } + /* dump partial RDI debug bus */ + netdev_info(bp->dev, "RDI debug bus dump:\n"); + dbg_sel = 0x80000000; + for (i = 0; i < 3; i++) { + rc = bnxt_dbg_hwrm_wr_reg(bp, RDI_REG_RDI_DEBUG_CONTROL_REG, + dbg_sel); + if (rc) + break; + rc = bnxt_dbg_hwrm_rd_reg(bp, RDI_REG_RDI_DEBUG_STATUS_REG, + 1, val); + if (rc) + break; + netdev_info(bp->dev, "\tdbg_sel %x: %08x\n", dbg_sel, val[0]); + dbg_sel++; + } + dbg_sel = 0x80001000; + rc = bnxt_dbg_hwrm_wr_reg(bp, RDI_REG_RDI_DEBUG_CONTROL_REG, + dbg_sel); + if (!rc) + rc = bnxt_dbg_hwrm_rd_reg(bp, RDI_REG_RDI_DEBUG_STATUS_REG, + 1, val); + if (!rc) + netdev_info(bp->dev, "\tdbg_sel %x: %08x\n", dbg_sel, val[0]); +} + +static void bnxt_dump_tx_sw_state(struct bnxt_napi *bnapi) +{ + struct bnxt_tx_ring_info *txr; + int i = bnapi->index, j; + + bnxt_for_each_napi_tx(j, bnapi, txr) + netdev_info(bnapi->bp->dev, "[%d.%d]: tx{fw_ring: %d prod: %x cons: %x}\n", + i, j, txr->tx_ring_struct.fw_ring_id, txr->tx_prod, + txr->tx_cons); +} + +static void bnxt_dump_rx_sw_state(struct bnxt_napi *bnapi) +{ + struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; + int i = bnapi->index; + + if (!rxr) + return; + + netdev_info(bnapi->bp->dev, "[%d]: rx{fw_ring: %d prod: %x} rx_agg{fw_ring: %d agg_prod: %x sw_agg_prod: %x}\n", + i, rxr->rx_ring_struct.fw_ring_id, rxr->rx_prod, + rxr->rx_agg_ring_struct.fw_ring_id, rxr->rx_agg_prod, + rxr->rx_sw_agg_prod); +} + +static void bnxt_dump_cp_sw_state(struct bnxt_napi *bnapi) +{ + struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring, *cpr2; + int i = bnapi->index, j; + + netdev_info(bnapi->bp->dev, "[%d]: cp{fw_ring: %d raw_cons: %x}\n", + i, cpr->cp_ring_struct.fw_ring_id, cpr->cp_raw_cons); + for (j = 0; j < cpr->cp_ring_count; j++) { + cpr2 = &cpr->cp_ring_arr[j]; + if (!cpr2->bnapi) + continue; + netdev_info(bnapi->bp->dev, "[%d.%d]: cp{fw_ring: %d raw_cons: %x}\n", + i, j, cpr2->cp_ring_struct.fw_ring_id, cpr2->cp_raw_cons); + } +} + +static void bnxt_dbg_dump_states(struct bnxt *bp) +{ + int i; + struct bnxt_napi *bnapi; + + for (i = 0; i < bp->cp_nr_rings; i++) { + bnapi = bp->bnapi[i]; + if (netif_msg_drv(bp)) { + bnxt_dump_tx_sw_state(bnapi); + bnxt_dump_rx_sw_state(bnapi); + bnxt_dump_cp_sw_state(bnapi); + } + bnxt_dbg_dump_hw_ring(bp, i); + } + bnxt_dbg_dump_hw_states(bp); + bnxt_log_ring_contents(bp); +} + +static int bnxt_hwrm_rx_ring_reset(struct bnxt *bp, int ring_nr) +{ + struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr]; + struct hwrm_ring_reset_input *req; + struct bnxt_napi *bnapi = rxr->bnapi; + struct bnxt_cp_ring_info *cpr; + u16 cp_ring_id; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_RING_RESET); + if (rc) + return rc; + + cpr = &bnapi->cp_ring; + cp_ring_id = cpr->cp_ring_struct.fw_ring_id; + req->cmpl_ring = cpu_to_le16(cp_ring_id); + req->ring_type = RING_RESET_REQ_RING_TYPE_RX_RING_GRP; + req->ring_id = cpu_to_le16(bp->grp_info[bnapi->index].fw_grp_id); + return hwrm_req_send_silent(bp, req); +} + +static void bnxt_reset_task(struct bnxt *bp, bool silent) +{ + if (!silent) { + bnxt_dbg_dump_states(bp); + usleep_range(10, 50); + bnxt_dbg_dump_states(bp); + } + + if (netif_running(bp->dev)) { + bnxt_close_nic(bp, !silent, false); + bnxt_open_nic(bp, !silent, false); + } +} + +static void bnxt_tx_timeout(struct net_device *dev, unsigned int txqueue) +{ + struct bnxt *bp = netdev_priv(dev); + +#ifdef BNXT_SKIP_CARRIER_OFF + if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) + return; +#endif + if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) { + netdev_warn_once(bp->dev, + "Bailing out from false TX timeout as FW in Fatal err state\n"); + return; + } + + if (atomic_read(&bp->dbr.event_cnt) > 0) { + netdev_warn_once(bp->dev, + "Bailing out from a false TX timeout\n"); + return; + } + + if (bp->flags & BNXT_FLAG_CORE_RESET_TX_TIMEOUT) { + netdev_err(bp->dev, "TX timeout detected, starting core-reset task!\n"); + set_bit(BNXT_RESET_TASK_CORE_RESET_SP_EVENT, &bp->sp_event); + } else { + netdev_err(bp->dev, "TX timeout detected, starting reset task!\n"); + set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event); + } + __bnxt_queue_sp_work(bp); +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void bnxt_poll_controller(struct net_device *dev) +{ + struct bnxt *bp = netdev_priv(dev); + int i; + + /* Only process tx rings/combined rings in netpoll mode. */ + for (i = 0; i < bp->tx_nr_rings; i++) { + struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; + + napi_schedule(&txr->bnapi->napi); + } +} +#endif + +static void bnxt_fw_health_check(struct bnxt *bp) +{ + struct bnxt_fw_health *fw_health = bp->fw_health; + bool hb_fail = false; + u32 val; + u16 sts; + + if (!fw_health->enabled || test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) + return; + + /* Make sure it is enabled before checking the tmr_counter */ + smp_mb(); + if (fw_health->tmr_counter) { + fw_health->tmr_counter--; + return; + } + + val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG); + if (val == fw_health->last_fw_heartbeat) { + hb_fail = true; + goto fw_reset; + } + + fw_health->last_fw_heartbeat = val; + + val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG); + if (val != fw_health->last_fw_reset_cnt) + goto fw_reset; + + fw_health->tmr_counter = fw_health->tmr_multiplier; + return; + +fw_reset: + /* Check if device is accessible before jumping to force reset firmware */ + pci_read_config_word(bp->pdev, PCI_VENDOR_ID, &sts); + if (sts == 0xffff) + return; + + if (hb_fail) + fw_health->arrests++; + else + fw_health->discoveries++; + + bnxt_queue_sp_work(bp, BNXT_FW_EXCEPTION_SP_EVENT); +} + +#ifdef HAVE_TIMER_SETUP +static void bnxt_timer(struct timer_list *t) +{ + struct bnxt *bp = from_timer(bp, t, timer); +#else +static void bnxt_timer(unsigned long data) +{ + struct bnxt *bp = (struct bnxt *)data; +#endif + struct net_device *dev = bp->dev; + bool queue_work = false; + + if (!netif_running(dev) || !test_bit(BNXT_STATE_OPEN, &bp->state)) + return; + + if (atomic_read(&bp->intr_sem) != 0) + goto bnxt_restart_timer; + + if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) + bnxt_fw_health_check(bp); + + if (((bp->fw_cap & BNXT_FW_CAP_LPBK_STATS) || BNXT_LINK_IS_UP(bp)) && + bp->stats_coal_ticks) { + set_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event); + queue_work = true; + } + + if (bnxt_tc_flower_enabled(bp)) { + set_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event); + queue_work = true; + } + +#if !defined HAVE_PTP_DO_AUX_WORK + if (bp->ptp_cfg && (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) { + set_bit(BNXT_PTP_CURRENT_TIME_EVENT, &bp->sp_event); + queue_work = true; + } +#endif + +#ifdef CONFIG_RFS_ACCEL + if ((bp->flags & BNXT_FLAG_RFS) && bp->ntp_fltr_count) { + set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event); + queue_work = true; + } +#endif /*CONFIG_RFS_ACCEL*/ + + if (bp->link_info.phy_retry) { + if (time_after(jiffies, bp->link_info.phy_retry_expires)) { + bp->link_info.phy_retry = false; + netdev_warn(bp->dev, "failed to update phy settings after maximum retries.\n"); + } else { + set_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event); + queue_work = true; + } + } + + if (test_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state)) { + set_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event); + queue_work = true; + } + + if (BNXT_CHIP_P5(bp) && (!bp->chip_rev || bp->dbr.enable) && + netif_carrier_ok(dev)) { + set_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event); + queue_work = true; + } + + if (queue_work) + __bnxt_queue_sp_work(bp); +bnxt_restart_timer: + mod_timer(&bp->timer, jiffies + bp->current_interval); +} + +static void bnxt_rtnl_lock_sp(struct bnxt *bp) +{ + /* We are called from bnxt_sp_task which has BNXT_STATE_IN_SP_TASK + * set. If the device is being closed, bnxt_close() may be holding + * rtnl() and waiting for BNXT_STATE_IN_SP_TASK to clear. So we + * must clear BNXT_STATE_IN_SP_TASK before holding rtnl(). + */ + clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state); + rtnl_lock(); +} + +static void bnxt_rtnl_unlock_sp(struct bnxt *bp) +{ + set_bit(BNXT_STATE_IN_SP_TASK, &bp->state); + rtnl_unlock(); +} + +/* Only called from bnxt_sp_task() */ +static void bnxt_fw_core_reset(struct bnxt *bp) +{ + bnxt_rtnl_lock_sp(bp); + if (test_bit(BNXT_STATE_OPEN, &bp->state)) { + if (!bnxt_firmware_reset_chip(bp->dev)) { + netdev_info(bp->dev, "Firmware reset request successful.\n"); + if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET)) + netdev_info(bp->dev, "Reload driver to complete reset\n"); + } + + if (!BNXT_CHIP_P4_PLUS(bp)) { + if (!bnxt_firmware_reset_ap(bp->dev)) + netdev_info(bp->dev, "Reset application processor successful.\n"); + } + } + bnxt_rtnl_unlock_sp(bp); +} + +/* Only called from bnxt_sp_task() */ +static void bnxt_reset(struct bnxt *bp, bool silent) +{ + bnxt_rtnl_lock_sp(bp); + if (test_bit(BNXT_STATE_OPEN, &bp->state)) + bnxt_reset_task(bp, silent); + bnxt_rtnl_unlock_sp(bp); +} + +/* Only called from bnxt_sp_task() */ +static void bnxt_rx_ring_reset(struct bnxt *bp) +{ + int i; + + bnxt_rtnl_lock_sp(bp); + if (!test_bit(BNXT_STATE_OPEN, &bp->state)) { + bnxt_rtnl_unlock_sp(bp); + return; + } + /* Disable and flush TPA before resetting the RX ring */ + if (bp->flags & BNXT_FLAG_TPA) + bnxt_set_tpa(bp, false); + for (i = 0; i < bp->rx_nr_rings; i++) { + struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; + struct bnxt_cp_ring_info *cpr; + int rc; + + if (!rxr->bnapi->in_reset) + continue; + + rc = bnxt_hwrm_rx_ring_reset(bp, i); + if (rc) { + if (rc == -EINVAL || rc == -EOPNOTSUPP) + netdev_info_once(bp->dev, "RX ring reset not supported by firmware, falling back to global reset\n"); + else + netdev_warn(bp->dev, "RX ring reset failed, rc = %d, falling back to global reset\n", + rc); + bnxt_reset_task(bp, true); + break; + } + bnxt_free_one_rx_ring_skbs(bp, i); + rxr->rx_prod = 0; + rxr->rx_agg_prod = 0; + rxr->rx_sw_agg_prod = 0; + rxr->rx_next_cons = 0; + rxr->bnapi->in_reset = false; + bnxt_alloc_one_rx_ring(bp, i); + cpr = &rxr->bnapi->cp_ring; + cpr->sw_stats->rx.rx_resets++; + if (bp->flags & BNXT_FLAG_AGG_RINGS) + bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod); + bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); + } + if (bp->flags & BNXT_FLAG_TPA) + bnxt_set_tpa(bp, true); + bnxt_rtnl_unlock_sp(bp); +} + +static inline void bnxt_fw_error_tf_reinit(struct bnxt *bp) +{ + int rc; + + if (!BNXT_TF_RESET_IS_NEEDED(bp)) + return; + + rc = bnxt_tf_port_init(bp, BNXT_TF_FLAG_NONE); + if (rc) + netdev_err(bp->dev, "Truflow initialization failed during FW reset\n"); +} + +static inline void bnxt_fw_error_tf_deinit(struct bnxt *bp) +{ + if (!BNXT_TF_RESET_IS_NEEDED(bp)) + return; + + bnxt_tf_port_deinit(bp, BNXT_TF_FLAG_NONE); +} + +static void bnxt_fw_fatal_close(struct bnxt *bp) +{ + bnxt_tx_disable(bp); + bnxt_disable_napi(bp); + bnxt_disable_int_sync(bp); + bnxt_free_irq(bp); + bnxt_clear_int_mode(bp); + pci_disable_device(bp->pdev); +} + +static void bnxt_fw_reset_close(struct bnxt *bp) +{ + /* When firmware is in fatal state, quiesce device and disable + * bus master to prevent any potential bad DMAs before freeing + * kernel memory. + */ + if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) { + u16 val = 0; + + pci_read_config_word(bp->pdev, PCI_SUBSYSTEM_ID, &val); + if (val == 0xffff) + bp->fw_reset_min_dsecs = 0; + bnxt_fw_fatal_close(bp); + } + __bnxt_close_nic(bp, true, false); + bnxt_vf_reps_free(bp); + bnxt_fw_error_tf_deinit(bp); + bnxt_clear_int_mode(bp); + bnxt_hwrm_func_drv_unrgtr(bp); + if (pci_is_enabled(bp->pdev)) + pci_disable_device(bp->pdev); + bnxt_free_ctx_mem(bp); +} + +static bool is_bnxt_fw_ok(struct bnxt *bp) +{ + struct bnxt_fw_health *fw_health = bp->fw_health; + bool no_heartbeat = false, has_reset = false; + u32 val; + + val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG); + if (val == fw_health->last_fw_heartbeat) + no_heartbeat = true; + + val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG); + if (val != fw_health->last_fw_reset_cnt) + has_reset = true; + + if (!no_heartbeat && has_reset) + return true; + + return false; +} + +/* rtnl_lock is acquired before calling this function */ +static void bnxt_force_fw_reset(struct bnxt *bp) +{ + struct bnxt_fw_health *fw_health = bp->fw_health; + struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; + u32 wait_dsecs; + + if (!test_bit(BNXT_STATE_OPEN, &bp->state) || + test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) + return; + + if (ptp) { +#ifdef HAVE_IEEE1588_SUPPORT + bnxt_save_pre_reset_ts(bp); + spin_lock_bh(&bp->ptp_cfg->ptp_lock); +#endif + set_bit(BNXT_STATE_IN_FW_RESET, &bp->state); +#ifdef HAVE_IEEE1588_SUPPORT + spin_unlock_bh(&bp->ptp_cfg->ptp_lock); +#endif + } else { + set_bit(BNXT_STATE_IN_FW_RESET, &bp->state); + } + bnxt_fw_reset_close(bp); + wait_dsecs = fw_health->master_func_wait_dsecs; + if (fw_health->primary) { + if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) + wait_dsecs = 0; + bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW; + } else { + bp->fw_reset_timestamp = jiffies + wait_dsecs * HZ / 10; + wait_dsecs = fw_health->normal_func_wait_dsecs; + bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV; + } + + bp->fw_reset_min_dsecs = fw_health->post_reset_wait_dsecs; + bp->fw_reset_max_dsecs = fw_health->post_reset_max_wait_dsecs; + bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10); +} + +void bnxt_fw_exception(struct bnxt *bp) +{ + netdev_warn(bp->dev, "Detected firmware fatal condition, initiating reset\n"); + set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state); + bnxt_ulp_stop(bp); + bnxt_rtnl_lock_sp(bp); + bnxt_force_fw_reset(bp); + bnxt_rtnl_unlock_sp(bp); +} + +/* Returns the number of registered VFs, or 1 if VF configuration is pending, or + * < 0 on error. + */ +static int bnxt_get_registered_vfs(struct bnxt *bp) +{ +#ifdef CONFIG_BNXT_SRIOV + int rc; + + if (!BNXT_PF(bp)) + return 0; + + rc = bnxt_hwrm_func_qcfg(bp); + if (rc) { + netdev_err(bp->dev, "func_qcfg cmd failed, rc = %d\n", rc); + return rc; + } + if (bp->pf.registered_vfs) + return bp->pf.registered_vfs; + if (bp->sriov_cfg) + return 1; +#endif + return 0; +} + +void bnxt_fw_reset(struct bnxt *bp) +{ + bnxt_ulp_stop(bp); + bnxt_rtnl_lock_sp(bp); + if (test_bit(BNXT_STATE_OPEN, &bp->state) && + !test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) { + struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; + int n = 0, tmo; + + if (ptp) { +#ifdef HAVE_IEEE1588_SUPPORT + bnxt_save_pre_reset_ts(bp); + spin_lock_bh(&bp->ptp_cfg->ptp_lock); +#endif + set_bit(BNXT_STATE_IN_FW_RESET, &bp->state); +#ifdef HAVE_IEEE1588_SUPPORT + spin_unlock_bh(&bp->ptp_cfg->ptp_lock); +#endif + } else { + set_bit(BNXT_STATE_IN_FW_RESET, &bp->state); + } + if (bp->pf.active_vfs && + !test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) + n = bnxt_get_registered_vfs(bp); + if (n < 0) { + netdev_err(bp->dev, "Firmware reset aborted, rc = %d\n", + n); + clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); + dev_close(bp->dev); + goto fw_reset_exit; + } else if (n > 0) { + u16 vf_tmo_dsecs = n * 10; + + if (bp->fw_reset_max_dsecs < vf_tmo_dsecs) + bp->fw_reset_max_dsecs = vf_tmo_dsecs; + bp->fw_reset_state = + BNXT_FW_RESET_STATE_POLL_VF; + bnxt_queue_fw_reset_work(bp, HZ / 10); + goto fw_reset_exit; + } + bnxt_fw_reset_close(bp); + if ((bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD)) { + bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN; + tmo = HZ / 10; + } else { + bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV; + tmo = bp->fw_reset_min_dsecs * HZ /10; + } + bnxt_queue_fw_reset_work(bp, tmo); + } +fw_reset_exit: + bnxt_rtnl_unlock_sp(bp); +} + +static bool bnxt_has_missed_cp(struct bnxt *bp, struct bnxt_napi *bnapi, + struct bnxt_cp_ring_info *cpr, bool nq) +{ + u32 val[2]; + +#ifdef DEV_NETMAP + if (cpr->netmapped) + return false; +#endif + if (!cpr->bnapi || cpr->has_more_work || !bnxt_has_work(bp, cpr)) + return false; + + if (cpr->cp_raw_cons != cpr->last_cp_raw_cons) { + cpr->last_cp_raw_cons = cpr->cp_raw_cons; + return false; + } + + cpr->sw_stats->cmn.missed_irqs++; + + if (!bp->chip_rev) { + u32 fw_ring_id = cpr->cp_ring_struct.fw_ring_id; + + if (!nq) + bnxt_dbg_hwrm_ring_info_get(bp, DBG_RING_INFO_GET_REQ_RING_TYPE_L2_CMPL, + fw_ring_id, &val[0], &val[1]); + else + netdev_err(bp->dev, "Unable to recover missed irq!\n"); + } else { + local_bh_disable(); + napi_schedule(&bnapi->napi); + local_bh_enable(); + } + + return true; +} + +static void bnxt_chk_missed_irq(struct bnxt *bp) +{ + int i; + + if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) + return; + + for (i = 0; i < bp->cp_nr_rings; i++) { + struct bnxt_napi *bnapi = bp->bnapi[i]; + struct bnxt_cp_ring_info *cpr; + int j; + + if (!bnapi) + continue; + + cpr = &bnapi->cp_ring; + if (bnxt_has_missed_cp(bp, bnapi, cpr, true)) { + netdev_warn(bp->dev, + "Recovering missed irq [%d]: raw_cons: %x\n", + i, cpr->cp_raw_cons); + continue; + } + + for (j = 0; j < cpr->cp_ring_count; j++) { + struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[j]; + + if (bnxt_has_missed_cp(bp, bnapi, cpr2, false)) { + netdev_warn(bp->dev, + "Recovering missed irq [%d.%d]: raw_cons: %x\n", + i, j, cpr2->cp_raw_cons); + } + } + } +} + +static void bnxt_cfg_ntp_filters(struct bnxt *); + +static void bnxt_vf_vnic_change(struct bnxt *bp) +{ + struct bnxt_pf_info *pf = &bp->pf; + int i, num_vfs = pf->active_vfs; + + if (!num_vfs) + return; + + for (i = 0; i < num_vfs; i++) + bnxt_commit_vf_vnic(bp, i); + bnxt_cfg_ntp_filters(bp); + bnxt_reset_vf_stats(bp); +} + +static void bnxt_init_ethtool_link_settings(struct bnxt *bp) +{ + struct bnxt_link_info *link_info = &bp->link_info; + + if (BNXT_AUTO_MODE(link_info->auto_mode)) { + link_info->autoneg = BNXT_AUTONEG_SPEED; + if (bp->hwrm_spec_code >= 0x10201) { + if (link_info->auto_pause_setting & + PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE) + link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL; + } else { + link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL; + } + bnxt_set_auto_speed(link_info); + } else { + bnxt_set_force_speed(link_info); + link_info->req_duplex = link_info->duplex_setting; + } + if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) + link_info->req_flow_ctrl = + link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH; + else + link_info->req_flow_ctrl = link_info->force_pause_setting; +} + +static void bnxt_fw_echo_reply(struct bnxt *bp) +{ + struct bnxt_fw_health *fw_health = bp->fw_health; + struct hwrm_func_echo_response_input *req; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_FUNC_ECHO_RESPONSE); + if (rc) + return; + req->event_data1 = cpu_to_le32(fw_health->echo_req_data1); + req->event_data2 = cpu_to_le32(fw_health->echo_req_data2); + hwrm_req_send(bp, req); +} + +static void bnxt_ulp_restart(struct bnxt *bp) +{ + if (!bnxt_ulp_registered(bp->edev)) + return; + + bnxt_ulp_stop(bp); + bnxt_ulp_start(bp, 0); +} + +static void bnxt_sp_task(struct work_struct *work) +{ + struct bnxt *bp = container_of(work, struct bnxt, sp_task); + + set_bit(BNXT_STATE_IN_SP_TASK, &bp->state); + smp_mb__after_atomic(); + if (!test_bit(BNXT_STATE_OPEN, &bp->state)) { + clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state); + return; + } + + if (test_and_clear_bit(BNXT_RESTART_ULP_SP_EVENT, &bp->sp_event)) { + bnxt_ulp_restart(bp); + bnxt_reenable_sriov(bp); + } + + if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event)) + bnxt_cfg_rx_mode(bp); + + if (test_and_clear_bit(BNXT_VF_VNIC_CHANGE_SP_EVENT, &bp->sp_event)) + bnxt_vf_vnic_change(bp); + if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event)) + bnxt_cfg_ntp_filters(bp); + if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event)) + bnxt_hwrm_exec_fwd_req(bp); +#ifndef HAVE_UDP_TUNNEL_NIC + if (test_and_clear_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event)) { + if (bnxt_hwrm_tunnel_dst_port_alloc(bp, bp->vxlan_port_pending, + TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN)) + atomic_set(&bp->vxlan_port_cnt, 0); + } + if (test_and_clear_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event)) { + bnxt_hwrm_tunnel_dst_port_free( + bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN); + } + if (test_and_clear_bit(BNXT_GENEVE_ADD_PORT_SP_EVENT, &bp->sp_event)) { + if (bnxt_hwrm_tunnel_dst_port_alloc(bp, bp->nge_port_pending, + TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE)) + atomic_set(&bp->nge_port_cnt, 0); + } + if (test_and_clear_bit(BNXT_GENEVE_DEL_PORT_SP_EVENT, &bp->sp_event)) { + bnxt_hwrm_tunnel_dst_port_free( + bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE); + } +#endif + if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event)) + netdev_info(bp->dev, "Receive PF driver unload event!\n"); + + if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) { + if (BNXT_LINK_IS_UP(bp)) { + bnxt_hwrm_port_qstats(bp, 0); + bnxt_hwrm_port_qstats_ext(bp, 0); + bnxt_hwrm_port_ecn_qstats(bp, 0); + bnxt_hwrm_vf_qstats(bp, 0); + bnxt_hwrm_generic_qstats(bp, 0); + bnxt_accumulate_all_stats(bp); + } + if (bp->fw_cap & BNXT_FW_CAP_LPBK_STATS) { + bnxt_hwrm_lpbk_qstats(bp, 0); + bnxt_accumulate_stats(&bp->lpbk_stats); + } + } + + if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) { + int rc; + + mutex_lock(&bp->link_lock); + if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, + &bp->sp_event)) + bnxt_hwrm_phy_qcaps(bp); + + rc = bnxt_update_link(bp, true); + if (rc) + netdev_err(bp->dev, "SP task can't update link (rc: %x)\n", + rc); + + if (test_and_clear_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT, + &bp->sp_event)) + bnxt_init_ethtool_link_settings(bp); + mutex_unlock(&bp->link_lock); + } + if (test_and_clear_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event)) { + int rc; + + mutex_lock(&bp->link_lock); + rc = bnxt_update_phy_setting(bp); + mutex_unlock(&bp->link_lock); + if (rc) { + netdev_warn(bp->dev, "update phy settings retry failed\n"); + } else { + bp->link_info.phy_retry = false; + netdev_info(bp->dev, "update phy settings retry succeeded\n"); + } + } + if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) { + mutex_lock(&bp->link_lock); + bnxt_get_port_module_status(bp); + mutex_unlock(&bp->link_lock); + } + + if (test_and_clear_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event)) + bnxt_tc_flow_stats_work(bp); + + if (test_and_clear_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event)) + bnxt_chk_missed_irq(bp); + +#if !defined HAVE_PTP_DO_AUX_WORK + if (test_and_clear_bit(BNXT_PTP_CURRENT_TIME_EVENT, &bp->sp_event)) + bnxt_ptp_timer(bp); +#endif + + if (test_and_clear_bit(BNXT_FW_ECHO_REQUEST_SP_EVENT, &bp->sp_event)) + bnxt_fw_echo_reply(bp); + + if (test_and_clear_bit(BNXT_VF_CFG_CHNG_SP_EVENT, &bp->sp_event)) + bnxt_update_vf_cfg(bp); + + if (test_and_clear_bit(BNXT_THERMAL_THRESHOLD_SP_EVENT, &bp->sp_event)) + bnxt_hwmon_notify_event(bp); + + /* These functions below will clear BNXT_STATE_IN_SP_TASK. They + * must be the last functions to be called before exiting. + */ + if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event)) + bnxt_reset(bp, false); + + if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event)) + bnxt_reset(bp, true); + + if (test_and_clear_bit(BNXT_RESET_TASK_CORE_RESET_SP_EVENT, &bp->sp_event)) + bnxt_fw_core_reset(bp); + + if (test_and_clear_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event)) + bnxt_rx_ring_reset(bp); + + if (test_and_clear_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event)) { + if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) || + test_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state)) + bnxt_devlink_health_fw_report(bp); + else + bnxt_fw_reset(bp); + } + + if (test_and_clear_bit(BNXT_FW_EXCEPTION_SP_EVENT, &bp->sp_event)) { + if (!is_bnxt_fw_ok(bp)) + bnxt_devlink_health_fw_report(bp); + } + + smp_mb__before_atomic(); + clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state); +} + +static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, + int *max_cp); + +/* Under rtnl_lock */ +int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs, + int tx_xdp) +{ + int max_rx, max_tx, max_cp, tx_sets = 1, tx_cp; + struct bnxt_hw_rings hwr = {0}; + int rx_rings = rx; + int rc; + + if (tcs) + tx_sets = tcs; + + _bnxt_get_max_rings(bp, &max_rx, &max_tx, &max_cp); + + if (max_rx < rx_rings) { + netdev_warn(bp->dev, + "Resources unavailable for %d rx rings, maximum %d available\n", + rx_rings, max_rx); + return -ENOMEM; + } + + if (bp->flags & BNXT_FLAG_AGG_RINGS) + rx_rings <<= 1; + + hwr.rx = rx_rings; + hwr.tx = tx * tx_sets + tx_xdp; + if (max_tx < hwr.tx) { + netdev_warn(bp->dev, + "Resources unavailable for %d tx rings, maximum %d available\n", + hwr.tx, max_tx); + return -ENOMEM; + } + + hwr.vnic = bnxt_get_total_vnics(bp, rx); + + tx_cp = __bnxt_num_tx_to_cp(bp, hwr.tx, tx_sets, tx_xdp); + hwr.cp = sh ? max_t(int, tx_cp, rx) : tx_cp + rx; + if (max_cp < hwr.cp) { + netdev_warn(bp->dev, + "Resources unavailable for %d cp rings, maximum %d available\n", + hwr.cp, max_cp); + return -ENOMEM; + } + hwr.stat = hwr.cp; + if (BNXT_NEW_RM(bp)) { + hwr.cp += bnxt_get_ulp_msix_num_in_use(bp); + hwr.stat += bnxt_get_ulp_stat_ctxs_in_use(bp); + hwr.grp = rx; + hwr.rss_ctx = bnxt_get_total_rss_ctxs(bp, &hwr); + } + if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { + hwr.cp_p5 = hwr.tx + rx + bnxt_mpc_cp_rings_in_use(bp); + hwr.tx += bnxt_mpc_tx_rings_in_use(bp); + } + + rc = bnxt_hwrm_check_rings(bp, &hwr); + if (rc) + netdev_warn(bp->dev, + "FW unable to meet the resources requested by the driver rc: %d\n", rc); + + return rc; +} + +static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev) +{ + if (bp->db_base_wc) { + iounmap(bp->db_base_wc); + bp->db_base_wc = NULL; + } + + if (bp->bar2) { + pci_iounmap(pdev, bp->bar2); + bp->bar2 = NULL; + } + + if (bp->bar1) { + pci_iounmap(pdev, bp->bar1); + bp->bar1 = NULL; + } + + if (bp->bar0) { + pci_iounmap(pdev, bp->bar0); + bp->bar0 = NULL; + } +} + +static void bnxt_cleanup_pci(struct bnxt *bp) +{ + bnxt_unmap_bars(bp, bp->pdev); + pci_release_regions(bp->pdev); + if (pci_is_enabled(bp->pdev)) + pci_disable_device(bp->pdev); +} + +static void bnxt_init_dflt_coal(struct bnxt *bp) +{ + struct bnxt_coal_cap *coal_cap = &bp->coal_cap; + struct bnxt_coal *coal; + u16 flags = 0; + + if (coal_cap->cmpl_params & + RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET) + flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET; + + /* Tick values in micro seconds. + * 1 coal_buf x bufs_per_record = 1 completion record. + */ + coal = &bp->rx_coal; + coal->coal_ticks = 6; + coal->coal_bufs = 12; + coal->coal_ticks_irq = 1; +#ifdef DEV_NETMAP + coal->coal_bufs_irq = 8; +#else + coal->coal_bufs_irq = 2; +#endif + coal->idle_thresh = 50; + coal->bufs_per_record = 2; + coal->budget = 64; /* NAPI budget */ + coal->flags = flags; + + coal = &bp->tx_coal; + coal->coal_ticks = 28; + coal->coal_bufs = 30; + coal->coal_ticks_irq = 2; + coal->coal_bufs_irq = 2; + coal->bufs_per_record = 1; + coal->flags = flags; + + bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS; +} + +static int bnxt_init_mac_addr(struct bnxt *bp) +{ + int rc = 0; + + if (BNXT_PF(bp)) { + eth_hw_addr_set(bp->dev, bp->pf.mac_addr); + } else { +#ifdef CONFIG_BNXT_SRIOV + struct bnxt_vf_info *vf = &bp->vf; + bool strict_approval = true; + + if (is_valid_ether_addr(vf->mac_addr)) { + /* overwrite netdev dev_addr with admin VF MAC */ + eth_hw_addr_set(bp->dev, vf->mac_addr); + /* Older PF driver or firmware may not approve this + * correctly. + */ + strict_approval = false; + } else { + eth_hw_addr_random(bp->dev); + } + rc = bnxt_approve_mac(bp, bp->dev->dev_addr, strict_approval); +#endif + } + return rc; +} + +static void bnxt_set_dflt_rss_hash_type(struct bnxt *bp) +{ + bp->rss_cap &= ~BNXT_RSS_CAP_UDP_RSS_CAP; + bp->rss_hash_cfg = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 | + VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 | + VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 | + VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6; + if (bp->rss_cap & BNXT_RSS_CAP_RSS_HASH_TYPE_DELTA) + bp->rss_hash_delta = bp->rss_hash_cfg; + if (BNXT_CHIP_P4_PLUS(bp) && bp->hwrm_spec_code >= 0x10501) { + bp->rss_cap |= BNXT_RSS_CAP_UDP_RSS_CAP; + bp->rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 | + VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6; + } +} + +static void bnxt_set_dflt_rfs(struct bnxt *bp) +{ + struct net_device *dev = bp->dev; + + dev->hw_features &= ~NETIF_F_NTUPLE; + dev->features &= ~NETIF_F_NTUPLE; + bp->flags &= ~BNXT_FLAG_RFS; + if (bnxt_rfs_supported(bp)) { + dev->hw_features |= NETIF_F_NTUPLE; + if (bnxt_rfs_capable(bp, false)) { + bp->flags |= BNXT_FLAG_RFS; + dev->features |= NETIF_F_NTUPLE; + } + } +} + +static bool bnxt_is_vf_dflt_vnic_alloc(struct bnxt *bp) +{ + u32 fw_maj = BNXT_FW_MAJ(bp), fw_bld = BNXT_FW_BLD(bp); + + if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && + (fw_maj > 218 || (fw_maj == 218 && fw_bld >= 18))) + return true; + else if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS && + (fw_maj > 216 || (fw_maj == 216 && fw_bld >= 172))) + return true; + else + return false; +} + +static int bnxt_fw_init_one_p1(struct bnxt *bp) +{ + int rc; + + bp->fw_cap = 0; + bp->fw_dbg_cap = 0; + rc = bnxt_hwrm_ver_get(bp, true); + /* FW may be unresponsive after FLR. FLR must complete within 100 msec + * so wait before continuing with recovery. + */ + if (rc) + msleep(100); + bnxt_try_map_fw_health_reg(bp); + if (rc) { + rc = bnxt_try_recover_fw(bp); + if (rc) + return rc; + rc = bnxt_hwrm_ver_get(bp, false); + if (rc) + return rc; + } + bnxt_nvm_cfg_ver_get(bp); + + rc = bnxt_hwrm_func_reset(bp); + if (rc) + return -ENODEV; + + bnxt_hwrm_fw_set_time(bp); + return 0; +} + +static int bnxt_fw_init_one_p2(struct bnxt *bp) +{ + int rc; + + bp->max_tc = 0; + bp->max_lltc = 0; + /* Get the MAX capabilities for this function */ + rc = bnxt_hwrm_func_qcaps(bp, true); + if (rc) { + netdev_err(bp->dev, "hwrm query capability failure rc: %x\n", + rc); + return -ENODEV; + } + + rc = bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(bp); + if (rc) + netdev_warn(bp->dev, "hwrm query adv flow mgnt failure rc: %d\n", + rc); + + rc = bnxt_alloc_crash_dump_mem(bp); + if (rc) + netdev_warn(bp->dev, "crash dump mem alloc failure rc: %d\n", + rc); + if (!rc) { + rc = bnxt_hwrm_crash_dump_mem_cfg(bp); + if (rc) { + bnxt_free_crash_dump_mem(bp); + netdev_warn(bp->dev, + "hwrm crash dump mem failure rc: %d\n", rc); + } + } + + rc = bnxt_alloc_udcc_info(bp); + if (rc) + netdev_warn(bp->dev, "udcc alloc failure rc: %d\n", rc); + + if (bnxt_is_vf_dflt_vnic_alloc(bp)) + bp->fw_cap |= BNXT_FW_CAP_VF_RESV_VNICS_MAXVFS; + + bnxt_hwrm_func_qcfg(bp); + bnxt_hwrm_vnic_qcaps(bp); + bnxt_hwrm_port_led_qcaps(bp); + bnxt_hwrm_dbr_pacing_qcfg(bp); + if (bp->fw_cap & BNXT_FW_CAP_PTP) + __bnxt_hwrm_ptp_qcfg(bp); + bnxt_ethtool_init(bp); + bnxt_dcb_init(bp); + bnxt_hwmon_init(bp); + return 0; +} + +static void bnxt_fw_init_one_p3(struct bnxt *bp) +{ + struct pci_dev *pdev = bp->pdev; + + bnxt_set_dflt_rss_hash_type(bp); + bnxt_set_dflt_rfs(bp); + + bnxt_get_wol_settings(bp); + if (bp->flags & BNXT_FLAG_WOL_CAP) { + netif_info(bp, wol, bp->dev, "WOL device wakeup: %d\n", + (u32)bp->wol); + device_set_wakeup_enable(&pdev->dev, bp->wol); + } else { + device_set_wakeup_capable(&pdev->dev, false); + } + + bnxt_hwrm_set_cpu_params(bp); + bnxt_hwrm_coal_params_qcaps(bp); +} + +static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt); + +int bnxt_fw_init_one(struct bnxt *bp) +{ + int rc; + + rc = bnxt_fw_init_one_p1(bp); + if (rc) { + netdev_err(bp->dev, "Firmware init phase 1 failed\n"); + return rc; + } + rc = bnxt_fw_init_one_p2(bp); + if (rc) { + netdev_err(bp->dev, "Firmware init phase 2 failed\n"); + return rc; + } + rc = bnxt_probe_phy(bp, false); + if (rc) + return rc; + rc = bnxt_approve_mac(bp, bp->dev->dev_addr, false); + if (rc) + return rc; + + bnxt_fw_init_one_p3(bp); + return 0; +} + +static void bnxt_fw_reset_writel(struct bnxt *bp, int reg_idx) +{ + struct bnxt_fw_health *fw_health = bp->fw_health; + u32 reg = fw_health->fw_reset_seq_regs[reg_idx]; + u32 val = fw_health->fw_reset_seq_vals[reg_idx]; + u32 reg_type, reg_off, delay_msecs; + + delay_msecs = fw_health->fw_reset_seq_delay_msec[reg_idx]; + reg_type = BNXT_FW_HEALTH_REG_TYPE(reg); + reg_off = BNXT_FW_HEALTH_REG_OFF(reg); + switch (reg_type) { + case BNXT_FW_HEALTH_REG_TYPE_CFG: + pci_write_config_dword(bp->pdev, reg_off, val); + break; + case BNXT_FW_HEALTH_REG_TYPE_GRC: + writel(reg_off & BNXT_GRC_BASE_MASK, + bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4); + reg_off = (reg_off & BNXT_GRC_OFFSET_MASK) + 0x2000; + fallthrough; + case BNXT_FW_HEALTH_REG_TYPE_BAR0: + writel(val, bp->bar0 + reg_off); + break; + case BNXT_FW_HEALTH_REG_TYPE_BAR1: + writel(val, bp->bar1 + reg_off); + break; + } + if (delay_msecs) { + pci_read_config_dword(bp->pdev, 0, &val); + msleep(delay_msecs); + } +} + +bool bnxt_hwrm_reset_permitted(struct bnxt *bp) +{ + struct hwrm_func_qcfg_output *resp; + struct hwrm_func_qcfg_input *req; + bool result = true; /* firmware will enforce if unknown */ + + if (~bp->fw_cap & BNXT_FW_CAP_HOT_RESET_IF) + return result; + + if (hwrm_req_init(bp, req, HWRM_FUNC_QCFG)) + return result; + + req->fid = cpu_to_le16(0xffff); + resp = hwrm_req_hold(bp, req); + if (!hwrm_req_send(bp, req)) + result = !!(le16_to_cpu(resp->flags) & + FUNC_QCFG_RESP_FLAGS_HOT_RESET_ALLOWED); + hwrm_req_drop(bp, req); + return result; +} + +static void bnxt_reset_all(struct bnxt *bp) +{ + struct bnxt_fw_health *fw_health = bp->fw_health; + int i, rc; + + if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) { + bnxt_fw_reset_via_optee(bp); + bp->fw_reset_timestamp = jiffies; + return; + } + + if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_HOST) { + for (i = 0; i < fw_health->fw_reset_seq_cnt; i++) + bnxt_fw_reset_writel(bp, i); + } else if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) { + struct hwrm_fw_reset_input *req; + + rc = hwrm_req_init(bp, req, HWRM_FW_RESET); + if (!rc) { + req->target_id = cpu_to_le16(HWRM_TARGET_ID_KONG); + req->embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP; + req->selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP; + req->flags = FW_RESET_REQ_FLAGS_RESET_GRACEFUL; + rc = hwrm_req_send(bp, req); + } + if (rc != -ENODEV) + netdev_warn(bp->dev, "Unable to reset FW rc=%d\n", rc); + } + bp->fw_reset_timestamp = jiffies; +} + +static bool bnxt_fw_reset_timeout(struct bnxt *bp) +{ + return time_after(jiffies, bp->fw_reset_timestamp + + (bp->fw_reset_max_dsecs * HZ / 10)); +} + +static void bnxt_fw_reset_abort(struct bnxt *bp, int rc) +{ + clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); + if (bp->fw_reset_state != BNXT_FW_RESET_STATE_POLL_VF) + bnxt_dl_health_fw_status_update(bp, false); + bp->fw_reset_state = 0; + dev_close(bp->dev); +} + +static void bnxt_fw_reset_task(struct work_struct *work) +{ + struct bnxt *bp = container_of(work, struct bnxt, fw_reset_task.work); + int rc = 0; + + if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) { + netdev_err(bp->dev, "bnxt_fw_reset_task() called when not in fw reset mode!\n"); + return; + } + + switch (bp->fw_reset_state) { + case BNXT_FW_RESET_STATE_POLL_VF: { + int n = bnxt_get_registered_vfs(bp); + int tmo; + + if (n < 0) { + netdev_err(bp->dev, "Firmware reset aborted, subsequent func_qcfg cmd failed, rc = %d, %d msecs since reset timestamp\n", + n, jiffies_to_msecs(jiffies - + bp->fw_reset_timestamp)); + goto fw_reset_abort; + } else if (n > 0) { + if (bnxt_fw_reset_timeout(bp)) { + clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); + bp->fw_reset_state = 0; + netdev_err(bp->dev, "Firmware reset aborted, bnxt_get_registered_vfs() returns %d\n", + n); + goto ulp_start; + } + bnxt_queue_fw_reset_work(bp, HZ / 10); + return; + } + bp->fw_reset_timestamp = jiffies; + rtnl_lock(); + if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) { + bnxt_fw_reset_abort(bp, rc); + rtnl_unlock(); + goto ulp_start; + } + bnxt_fw_reset_close(bp); + if ((bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD)) { + bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN; + tmo = HZ / 10; + } else { + bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV; + tmo = bp->fw_reset_min_dsecs * HZ / 10; + } + bnxt_queue_fw_reset_work(bp, tmo); + rtnl_unlock(); + return; + } + case BNXT_FW_RESET_STATE_POLL_FW_DOWN: { + u32 val; + + val = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG); + if (!(val & BNXT_FW_STATUS_SHUTDOWN) && + !bnxt_fw_reset_timeout(bp)) { + bnxt_queue_fw_reset_work(bp, HZ / 5); + return; + } + + if (!bp->fw_health->primary) { + u32 wait_dsecs = bp->fw_health->normal_func_wait_dsecs; + + bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV; + bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10); + return; + } + bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW; + } + fallthrough; + case BNXT_FW_RESET_STATE_RESET_FW: + bnxt_reset_all(bp); + bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV; + bnxt_queue_fw_reset_work(bp, bp->fw_reset_min_dsecs * HZ / 10); + return; + case BNXT_FW_RESET_STATE_ENABLE_DEV: + bnxt_inv_fw_health_reg(bp); + if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) && + !bp->fw_reset_min_dsecs) { + u16 val; + + pci_read_config_word(bp->pdev, PCI_SUBSYSTEM_ID, &val); + if (val == 0xffff) { + if (bnxt_fw_reset_timeout(bp)) { + netdev_err(bp->dev, "Firmware reset aborted, PCI config space invalid\n"); + rc = -ETIMEDOUT; + goto fw_reset_abort; + } + bnxt_queue_fw_reset_work(bp, HZ / 1000); + return; + } + } + clear_bit(BNXT_STATE_FW_FATAL_COND, &bp->state); + clear_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state); + if (test_and_clear_bit(BNXT_STATE_FW_ACTIVATE_RESET, &bp->state) && + !test_bit(BNXT_STATE_FW_ACTIVATE, &bp->state)) + bnxt_dl_remote_reload(bp); + if (pci_enable_device(bp->pdev)) { + netdev_err(bp->dev, "Cannot re-enable PCI device\n"); + rc = -ENODEV; + goto fw_reset_abort; + } + pci_set_master(bp->pdev); + bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW; + fallthrough; + case BNXT_FW_RESET_STATE_POLL_FW: + bp->hwrm_cmd_timeout = SHORT_HWRM_CMD_TIMEOUT; + rc = bnxt_hwrm_poll(bp); + if (rc) { + if (bnxt_fw_reset_timeout(bp)) { + netdev_err(bp->dev, "Firmware reset aborted\n"); + goto fw_reset_abort_status; + } + bnxt_queue_fw_reset_work(bp, HZ / 5); + return; + } + bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT; + bp->fw_reset_state = BNXT_FW_RESET_STATE_OPENING; + fallthrough; + case BNXT_FW_RESET_STATE_OPENING: + while (!rtnl_trylock()) { + bnxt_queue_fw_reset_work(bp, HZ / 50); + return; + } + rc = bnxt_open(bp->dev); + if (rc) { + netdev_err(bp->dev, "bnxt_open() failed during FW reset\n"); + bnxt_fw_reset_abort(bp, rc); + rtnl_unlock(); + goto ulp_start; + } + + if ((bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) && + bp->fw_health->enabled) { + bp->fw_health->last_fw_reset_cnt = + bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG); + } + bp->fw_reset_state = 0; + smp_mb__before_atomic(); + clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); + bnxt_ptp_reapply_pps(bp); + bnxt_ptp_reapply_phc(bp); + clear_bit(BNXT_STATE_FW_ACTIVATE, &bp->state); + if (test_and_clear_bit(BNXT_STATE_RECOVER, &bp->state)) { + bnxt_dl_health_fw_recovery_done(bp); + bnxt_dl_health_fw_status_update(bp, true); + } + rtnl_unlock(); + bnxt_ulp_start(bp, 0); + bnxt_reenable_sriov(bp); + mutex_lock(&bp->vf_rep_lock); + bnxt_fw_error_tf_reinit(bp); + bnxt_vf_reps_alloc(bp); + bnxt_vf_reps_open(bp); + mutex_unlock(&bp->vf_rep_lock); + } + return; + +fw_reset_abort_status: + if (bp->fw_health->status_reliable || + (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) { + u32 sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG); + + netdev_err(bp->dev, "fw_health_status 0x%x\n", sts); + } +fw_reset_abort: + rtnl_lock(); + bnxt_fw_reset_abort(bp, rc); + rtnl_unlock(); +ulp_start: + bnxt_ulp_start(bp, rc); +} + +static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev) +{ + int rc; + struct bnxt *bp = netdev_priv(dev); + + SET_NETDEV_DEV(dev, &pdev->dev); + + /* enable device (incl. PCI PM wakeup), and bus-mastering */ + rc = pci_enable_device(pdev); + if (rc) { + dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n"); + goto init_err; + } + + if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { + dev_err(&pdev->dev, + "Cannot find PCI device base address, aborting\n"); + rc = -ENODEV; + goto init_err_disable; + } + + rc = pci_request_regions(pdev, DRV_MODULE_NAME); + if (rc) { + dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n"); + goto init_err_disable; + } + + if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) != 0 && + dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) { + dev_err(&pdev->dev, "System does not support DMA, aborting\n"); + rc = -EIO; + goto init_err_release; + } + + pci_set_master(pdev); + + bp->dev = dev; + bp->pdev = pdev; + + /* Doorbell BAR bp->bar1 is mapped after bnxt_fw_init_one_p2() + * determines the BAR size. + */ + bp->bar0 = pci_ioremap_bar(pdev, 0); + if (!bp->bar0) { + dev_err(&pdev->dev, "Cannot map device registers, aborting\n"); + rc = -ENOMEM; + goto init_err_release; + } + + bp->bar2 = pci_ioremap_bar(pdev, 4); + if (!bp->bar2) { + dev_err(&pdev->dev, "Cannot map bar4 registers, aborting\n"); + rc = -ENOMEM; + goto init_err_release; + } + + pci_enable_pcie_error_reporting(pdev); + + INIT_WORK(&bp->sp_task, bnxt_sp_task); + INIT_DELAYED_WORK(&bp->fw_reset_task, bnxt_fw_reset_task); + + spin_lock_init(&bp->ntp_fltr_lock); +#if BITS_PER_LONG == 32 + spin_lock_init(&bp->db_lock); +#endif + +#ifdef DEV_NETMAP + bp->rx_ring_size = (BNXT_DEFAULT_RX_RING_SIZE * 2) + 1; +#else + bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE; +#endif + bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE; + +#ifdef HAVE_TIMER_SETUP + timer_setup(&bp->timer, bnxt_timer, 0); +#else + setup_timer(&bp->timer, bnxt_timer, (unsigned long)bp); +#endif + bp->current_interval = BNXT_TIMER_INTERVAL; + + bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID; + bp->nge_fw_dst_port_id = INVALID_HW_RING_ID; +#ifndef HAVE_UDP_TUNNEL_NIC + atomic_set(&bp->vxlan_port_cnt, 0); + atomic_set(&bp->nge_port_cnt, 0); +#endif + + clear_bit(BNXT_STATE_OPEN, &bp->state); + return 0; + +init_err_release: + bnxt_unmap_bars(bp, pdev); + pci_release_regions(pdev); + +init_err_disable: + pci_disable_device(pdev); + +init_err: + return rc; +} + +/* rtnl_lock held */ +static int bnxt_change_mac_addr(struct net_device *dev, void *p) +{ + struct sockaddr *addr = p; + struct bnxt *bp = netdev_priv(dev); + int rc = 0; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + if (ether_addr_equal(addr->sa_data, dev->dev_addr)) + return 0; + + rc = bnxt_approve_mac(bp, addr->sa_data, true); + if (rc) + return rc; + + eth_hw_addr_set(dev, addr->sa_data); + bnxt_clear_usr_fltrs(bp, true); + if (netif_running(dev)) { + bnxt_close_nic(bp, false, false); + rc = bnxt_open_nic(bp, false, false); + } + + return rc; +} + +/* rtnl_lock held */ +static int bnxt_change_mtu(struct net_device *dev, int new_mtu) +{ + struct bnxt *bp = netdev_priv(dev); + +#ifndef HAVE_MIN_MTU + if (new_mtu < ETH_ZLEN || new_mtu > bp->max_mtu) + return -EINVAL; + + if (BNXT_RX_PAGE_MODE(bp) && new_mtu > BNXT_MAX_PAGE_MODE_MTU(bp)) + return -EINVAL; + + if (bp->fw_cap & BNXT_FW_CAP_ADMIN_MTU) + return -EPERM; +#endif + +#ifdef DEV_NETMAP + if (nm_netmap_on(NA(dev)) && new_mtu != bp->dev->mtu) { + netdev_warn(bp->dev, + "Please stop netmap application to change MTU\n"); + return -EINVAL; + } +#endif + if (netif_running(dev)) + bnxt_close_nic(bp, true, false); + + dev->mtu = new_mtu; + /* Due to hardware limitations, turn off LRO and GRO_HW on older + * P3/P4 chips if MTU > 4K. + */ + if (BNXT_CHIP_P3(bp) || BNXT_CHIP_P4(bp)) + netdev_update_features(dev); + + bnxt_set_ring_params(bp); + + if (netif_running(dev)) + return bnxt_open_nic(bp, true, false); + + return 0; +} + +#if defined(HAVE_SETUP_TC) || defined(CONFIG_BNXT_DCB) +int bnxt_setup_mq_tc(struct net_device *dev, u8 tc) +{ + struct bnxt *bp = netdev_priv(dev); + bool sh = false; + int rc, tx_cp; + + if (tc > bp->max_tc) { + netdev_err(dev, "Too many traffic classes requested: %d. Max supported is %d.\n", + tc, bp->max_tc); + return -EINVAL; + } + + if (bp->num_tc == tc) + return 0; + + if (bp->flags & BNXT_FLAG_SHARED_RINGS) + sh = true; + + rc = bnxt_check_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings, + sh, tc, bp->tx_nr_rings_xdp); + if (rc) + return rc; + + /* Needs to close the device and do hw resource re-allocations */ + if (netif_running(bp->dev)) + bnxt_close_nic(bp, true, false); + + if (tc) { + bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc; + netdev_set_num_tc(dev, tc); + bp->num_tc = tc; + } else { + bp->tx_nr_rings = bp->tx_nr_rings_per_tc; + netdev_reset_tc(dev); + bp->num_tc = 0; + } + + bnxt_set_tcs_queues(bp); +#ifdef VOID_NETIF_SET_NUM_TX + netif_set_real_num_tx_queues(dev, bp->tx_nr_rings); +#else + rc = netif_set_real_num_tx_queues(dev, bp->tx_nr_rings); +#endif + if (rc) + return rc; + + bp->tx_nr_rings += bp->tx_nr_rings_xdp; + tx_cp = bnxt_num_tx_to_cp(bp, bp->tx_nr_rings); + bp->cp_nr_rings = sh ? max_t(int, tx_cp, bp->rx_nr_rings) : + tx_cp + bp->rx_nr_rings; + + if (netif_running(bp->dev)) + return bnxt_open_nic(bp, true, false); + + return 0; +} + +#if defined(HAVE_ETF_QOPT_OFFLOAD) +static int bnxt_setup_etf_tc(struct net_device *dev, struct tc_etf_qopt_offload *qopt) +{ + struct bnxt *bp = netdev_priv(dev); + + /* The so_txtime driver currently supports only phc RTC mode */ + if (!BNXT_SUPPORTS_ETF(bp) || !bp->ptp_cfg || + !BNXT_PTP_USE_RTC(bp) || !bp->etf_tx_ring_map) + return -EOPNOTSUPP; + + if (qopt->queue > bp->tx_nr_rings - bp->tx_nr_rings_xdp - 1) { + netdev_err(bp->dev, "Tx packet queue %d exceeds maximum tx_rings %d.\n", + qopt->queue, bp->tx_nr_rings); + return -EINVAL; + } + + /* Update etf bitmap which xmit function checks for timedbd generation */ + qopt->enable ? __set_bit(qopt->queue, bp->etf_tx_ring_map) : + __clear_bit(qopt->queue, bp->etf_tx_ring_map); + + if (netif_running(bp->dev)) + bnxt_set_txr_etf_bmap(bp); + + return 0; +} +#endif /* defined(HAVE_ETF_QOPT_OFFLOAD) */ +#endif /* defined(HAVE_SETUP_TC) || defined(CONFIG_BNXT_DCB) */ + +#ifdef CONFIG_BNXT_FLOWER_OFFLOAD +#ifdef HAVE_TC_SETUP_BLOCK +LIST_HEAD(bnxt_block_cb_list); + +static int bnxt_setup_tc_block_cb(enum tc_setup_type type, void *type_data, + void *cb_priv) +{ + struct bnxt *bp = cb_priv; + + if (!bnxt_tc_flower_enabled(bp)) + return -EOPNOTSUPP; + + switch (type) { + case TC_SETUP_CLSFLOWER: +#ifdef HAVE_TC_CB_EGDEV + return bnxt_tc_setup_flower(bp, bp->pf.fw_fid, type_data, + BNXT_TC_DEV_INGRESS); +#else + return bnxt_tc_setup_flower(bp, bp->pf.fw_fid, type_data); +#endif + default: + return -EOPNOTSUPP; + } +} + +#else /* HAVE_TC_SETUP_BLOCK */ + +static int bnxt_setup_flower(struct net_device *dev, + struct tc_cls_flower_offload *cls_flower) +{ + struct bnxt *bp = netdev_priv(dev); + + if (!bnxt_tc_flower_enabled(bp)) + return -EOPNOTSUPP; + +#ifdef HAVE_TC_CB_EGDEV + return bnxt_tc_setup_flower(bp, bp->pf.fw_fid, cls_flower, + BNXT_TC_DEV_INGRESS); +#else + return bnxt_tc_setup_flower(bp, bp->pf.fw_fid, cls_flower); +#endif +} +#endif +#endif + +#if (defined(HAVE_NDO_SETUP_TC_RH) || !defined(HAVE_NDO_SETUP_TC_RH72)) +#ifdef HAVE_TC_SETUP_TYPE +static int bnxt_setup_tc(struct net_device *dev, enum tc_setup_type type, + void *type_data) +{ +#if defined(CONFIG_BNXT_FLOWER_OFFLOAD) && defined(HAVE_TC_SETUP_BLOCK) + struct bnxt *bp = netdev_priv(dev); +#endif + + switch (type) { +#ifdef CONFIG_BNXT_FLOWER_OFFLOAD +#ifdef HAVE_TC_SETUP_BLOCK + case TC_SETUP_BLOCK: + return flow_block_cb_setup_simple(type_data, + &bnxt_block_cb_list, + bnxt_setup_tc_block_cb, + bp, bp, true); +#else + case TC_SETUP_CLSFLOWER: + return bnxt_setup_flower(dev, type_data); +#endif +#endif + case TC_SETUP_QDISC_MQPRIO: { + struct tc_mqprio_qopt *mqprio = type_data; + + mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; + + return bnxt_setup_mq_tc(dev, mqprio->num_tc); + } +#if defined(HAVE_ETF_QOPT_OFFLOAD) + case TC_SETUP_QDISC_ETF: + return bnxt_setup_etf_tc(dev, type_data); +#endif + default: + return -EOPNOTSUPP; + } +} + +#else /* !HAVE_TC_SETUP_TYPE */ +#ifdef HAVE_TC_TO_NETDEV +#ifdef HAVE_CHAIN_INDEX +static int bnxt_setup_tc(struct net_device *dev, u32 handle, u32 chain_index, + __be16 proto, struct tc_to_netdev *ntc) +#else /* !HAVE_CHAIN_INDEX */ +static int bnxt_setup_tc(struct net_device *dev, u32 handle, __be16 proto, + struct tc_to_netdev *ntc) +#endif +{ + switch (ntc->type) { +#ifdef CONFIG_BNXT_FLOWER_OFFLOAD + case TC_SETUP_CLSFLOWER: + return bnxt_setup_flower(dev, ntc->cls_flower); +#endif + case TC_SETUP_MQPRIO: +#ifdef HAVE_MQPRIO_QOPT + ntc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; + + return bnxt_setup_mq_tc(dev, ntc->mqprio->num_tc); +#else + return bnxt_setup_mq_tc(dev, ntc->tc); +#endif + default: + return -EOPNOTSUPP; + } +} +#endif /* HAVE_TC_TO_NETDEV */ +#endif /* HAVE_TC_SETUP_TYPE */ +#endif /* HAVE_NDO_SETUP_TC_RH72 */ + +static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1, + struct bnxt_ntuple_filter *f2) +{ + struct bnxt_flow_masks *masks1 = &f1->fmasks; + struct bnxt_flow_masks *masks2 = &f2->fmasks; + struct flow_keys *keys1 = &f1->fkeys; + struct flow_keys *keys2 = &f2->fkeys; + + if (keys1->basic.n_proto != keys2->basic.n_proto || + keys1->basic.ip_proto != keys2->basic.ip_proto) + return false; + + if (keys1->basic.n_proto == htons(ETH_P_IP)) { + if (keys1->addrs.v4addrs.src != keys2->addrs.v4addrs.src || + masks1->addrs.v4addrs.src != masks2->addrs.v4addrs.src || + keys1->addrs.v4addrs.dst != keys2->addrs.v4addrs.dst || + masks1->addrs.v4addrs.dst != masks2->addrs.v4addrs.dst) + return false; + } else { + if (!ipv6_addr_equal(&keys1->addrs.v6addrs.src, + &keys2->addrs.v6addrs.src) || + !ipv6_addr_equal(&masks1->addrs.v6addrs.src, + &masks2->addrs.v6addrs.src) || + !ipv6_addr_equal(&keys1->addrs.v6addrs.dst, + &keys2->addrs.v6addrs.dst) || + !ipv6_addr_equal(&masks1->addrs.v6addrs.dst, + &masks2->addrs.v6addrs.dst)) + return false; + } + + return keys1->ports.src == keys2->ports.src && + masks1->ports.src == masks2->ports.src && + keys1->ports.dst == keys2->ports.dst && + masks1->ports.dst == masks2->ports.dst && + keys1->control.flags == keys2->control.flags && + f1->l2_fltr == f2->l2_fltr; +} + +struct bnxt_ntuple_filter * +bnxt_lookup_ntp_filter_from_idx(struct bnxt *bp, + struct bnxt_ntuple_filter *fltr, u32 idx) +{ + struct hlist_node __maybe_unused *node; + struct bnxt_ntuple_filter *f; + struct hlist_head *head; + + head = &bp->ntp_fltr_hash_tbl[idx]; + __hlist_for_each_entry_rcu(f, node, head, base.hash) { + if (bnxt_fltr_match(f, fltr)) + return f; + } + return NULL; +} + +u32 bnxt_get_ntp_filter_idx(struct bnxt *bp, struct flow_keys *fkeys, const struct sk_buff *skb) +{ + struct bnxt_vnic_info *vnic; + + if (skb) + return skb_get_hash_raw(skb) & BNXT_NTP_FLTR_HASH_MASK; + + vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT]; + return bnxt_toeplitz(bp, fkeys, (void *)vnic->rss_hash_key); +} + +int bnxt_insert_ntp_filter(struct bnxt *bp, struct bnxt_ntuple_filter *fltr, + u32 idx) +{ + struct hlist_head *head; + int bit_id; + + spin_lock_bh(&bp->ntp_fltr_lock); + bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap, bp->max_fltr, 0); + if (bit_id < 0) { + spin_unlock_bh(&bp->ntp_fltr_lock); + return -ENOMEM; + } + + fltr->base.sw_id = (u16)bit_id; + fltr->base.type = BNXT_FLTR_TYPE_NTUPLE; + fltr->base.flags |= BNXT_ACT_RING_DST; + head = &bp->ntp_fltr_hash_tbl[idx]; + hlist_add_head_rcu(&fltr->base.hash, head); + set_bit(BNXT_FLTR_INSERTED, &fltr->base.state); + bnxt_insert_usr_fltr(bp, &fltr->base); + bp->ntp_fltr_count++; + spin_unlock_bh(&bp->ntp_fltr_lock); + return 0; +} + +#ifdef CONFIG_RFS_ACCEL +static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, + u16 rxq_index, u32 flow_id) +{ + struct bnxt *bp = netdev_priv(dev); + struct bnxt_ntuple_filter *fltr, *new_fltr; + struct flow_keys *fkeys; + struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb); + struct bnxt_l2_filter *l2_fltr; + int rc = 0, idx; + struct hlist_node __maybe_unused *node; + u32 flags; + + if (!test_bit(BNXT_STATE_OPEN, &bp->state) || + !(bp->flags & BNXT_FLAG_RFS)) + return -EOPNOTSUPP; + +#ifdef HAVE_INNER_NETWORK_OFFSET + if (skb->encapsulation) + return -EPROTONOSUPPORT; +#endif + + if (ether_addr_equal(dev->dev_addr, eth->h_dest)) { + l2_fltr = bp->vnic_info[BNXT_VNIC_DEFAULT].l2_filters[0]; + atomic_inc(&l2_fltr->refcnt); + } else { + struct bnxt_l2_key key; + + ether_addr_copy(key.dst_mac_addr, eth->h_dest); + key.vlan = 0; + l2_fltr = bnxt_lookup_l2_filter_from_key(bp, &key); + if (!l2_fltr) + return -EINVAL; + if (l2_fltr->base.flags & BNXT_ACT_FUNC_DST) { + bnxt_del_l2_filter(bp, l2_fltr); + return -EINVAL; + } + } + new_fltr = kzalloc(sizeof(*new_fltr), GFP_ATOMIC); + if (!new_fltr) { + bnxt_del_l2_filter(bp, l2_fltr); + return -ENOMEM; + } + + fkeys = &new_fltr->fkeys; + if (!skb_flow_dissect_flow_keys(skb, fkeys, 0)) { + rc = -EPROTONOSUPPORT; + goto err_free; + } + + if ((fkeys->basic.n_proto != htons(ETH_P_IP) && + fkeys->basic.n_proto != htons(ETH_P_IPV6)) || + ((fkeys->basic.ip_proto != IPPROTO_TCP) && + (fkeys->basic.ip_proto != IPPROTO_UDP))) { + rc = -EPROTONOSUPPORT; + goto err_free; + } + new_fltr->fmasks = BNXT_FLOW_IPV4_MASK_ALL; + if (fkeys->basic.n_proto == htons(ETH_P_IPV6)) { + if (bp->hwrm_spec_code < 0x10601) { + rc = -EPROTONOSUPPORT; + goto err_free; + } + new_fltr->fmasks = BNXT_FLOW_IPV6_MASK_ALL; + } + flags = fkeys->control.flags; + if (((flags & FLOW_DIS_ENCAPSULATION) && + bp->hwrm_spec_code < 0x10601) || (flags & FLOW_DIS_IS_FRAGMENT)) { + rc = -EPROTONOSUPPORT; + goto err_free; + } + new_fltr->l2_fltr = l2_fltr; + + idx = bnxt_get_ntp_filter_idx(bp, fkeys, skb); + rcu_read_lock(); + fltr = bnxt_lookup_ntp_filter_from_idx(bp, new_fltr, idx); + if (fltr) { + rc = fltr->base.sw_id; + rcu_read_unlock(); + goto err_free; + } + rcu_read_unlock(); + + new_fltr->flow_id = flow_id; + new_fltr->base.rxq = rxq_index; + rc = bnxt_insert_ntp_filter(bp, new_fltr, idx); + if (!rc) { + bnxt_queue_sp_work(bp, BNXT_RX_NTP_FLTR_SP_EVENT); + return new_fltr->base.sw_id; + } + +err_free: + bnxt_del_l2_filter(bp, l2_fltr); + kfree(new_fltr); + return rc; +} +#endif /* CONFIG_RFS_ACCEL */ + +void bnxt_del_ntp_filter(struct bnxt *bp, struct bnxt_ntuple_filter *fltr) +{ + spin_lock_bh(&bp->ntp_fltr_lock); + if (!test_and_clear_bit(BNXT_FLTR_INSERTED, &fltr->base.state)) { + spin_unlock_bh(&bp->ntp_fltr_lock); + return; + } + hlist_del_rcu(&fltr->base.hash); + bnxt_del_one_usr_fltr(bp, &fltr->base); + bp->ntp_fltr_count--; + spin_unlock_bh(&bp->ntp_fltr_lock); + bnxt_del_l2_filter(bp, fltr->l2_fltr); + clear_bit(fltr->base.sw_id, bp->ntp_fltr_bmap); + kfree_rcu(fltr, base.rcu); +} + +static void bnxt_cfg_ntp_filters(struct bnxt *bp) +{ + int i; + + for (i = 0; i < BNXT_L2_FLTR_HASH_SIZE; i++) { + struct hlist_head *head; + struct hlist_node *tmp, __maybe_unused *nxt; + struct bnxt_l2_filter *fltr; + + head = &bp->l2_fltr_hash_tbl[i]; + __hlist_for_each_entry_safe(fltr, nxt, tmp, head, base.hash) { + if (fltr->base.flags & BNXT_ACT_FUNC_DST) { + u16 vf_idx = fltr->base.vf_idx; + + if (bnxt_vf_vnic_state_is_up(bp, vf_idx)) + continue; + + bnxt_del_l2_filter(bp, fltr); + } + } + } + for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) { + struct hlist_head *head; + struct hlist_node *tmp, __maybe_unused *nxt; + struct bnxt_ntuple_filter *fltr; + int rc; + + head = &bp->ntp_fltr_hash_tbl[i]; + __hlist_for_each_entry_safe(fltr, nxt, tmp, head, base.hash) { + bool del = false; + + if (test_bit(BNXT_FLTR_VALID, &fltr->base.state)) { + if (fltr->base.flags & BNXT_ACT_NO_AGING) + continue; +#ifdef CONFIG_RFS_ACCEL + if (rps_may_expire_flow(bp->dev, fltr->base.rxq, + fltr->flow_id, + fltr->base.sw_id)) { + bnxt_hwrm_cfa_ntuple_filter_free(bp, + fltr); + del = true; + } +#endif /* CONFIG_RFS_ACCEL */ + } else { + rc = bnxt_hwrm_cfa_ntuple_filter_alloc(bp, + fltr); + if (rc) + del = true; + else + set_bit(BNXT_FLTR_VALID, + &fltr->base.state); + } + + if (del) + bnxt_del_ntp_filter(bp, fltr); + } + } +} + +static void bnxt_deinit_lag(struct bnxt *bp) +{ + struct notifier_block *notif_blk; + struct bnxt_bond_info *binfo; + + binfo = bp->bond_info; + if (!binfo) + return; + + bp->bond_info = NULL; + notif_blk = &binfo->notif_blk; + unregister_netdevice_notifier(notif_blk); + kfree(binfo); +} + +#ifdef HAVE_UDP_TUNNEL_NIC +static int bnxt_udp_tunnel_set_port(struct net_device *netdev, unsigned int table, + unsigned int entry, struct udp_tunnel_info *ti) +{ + struct bnxt *bp = netdev_priv(netdev); + unsigned int cmd; + + if (ti->type == UDP_TUNNEL_TYPE_VXLAN) + cmd = TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN; + else if (ti->type == UDP_TUNNEL_TYPE_GENEVE) + cmd = TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE; + else + cmd = TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE; + + return bnxt_hwrm_tunnel_dst_port_alloc(bp, ti->port, cmd); +} + +static int bnxt_udp_tunnel_unset_port(struct net_device *netdev, unsigned int table, + unsigned int entry, struct udp_tunnel_info *ti) +{ + struct bnxt *bp = netdev_priv(netdev); + unsigned int cmd; + + if (ti->type == UDP_TUNNEL_TYPE_VXLAN) + cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN; + else if (ti->type == UDP_TUNNEL_TYPE_GENEVE) + cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE; + else + cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE; + + return bnxt_hwrm_tunnel_dst_port_free(bp, cmd); +} + +static const struct udp_tunnel_nic_info bnxt_udp_tunnels = { + .set_port = bnxt_udp_tunnel_set_port, + .unset_port = bnxt_udp_tunnel_unset_port, + .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP | + UDP_TUNNEL_NIC_INFO_OPEN_ONLY, + .tables = { + { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, }, + { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, }, + }, +}, bnxt_udp_tunnels_p7 = { + .set_port = bnxt_udp_tunnel_set_port, + .unset_port = bnxt_udp_tunnel_unset_port, + .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP | + UDP_TUNNEL_NIC_INFO_OPEN_ONLY, + .tables = { + { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, }, + { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, }, + { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN_GPE, }, + }, +}; + +#elif defined(HAVE_NDO_ADD_VXLAN) +static void bnxt_add_vxlan_port(struct net_device *dev, sa_family_t sa_family, + __be16 port) +{ + struct bnxt *bp = netdev_priv(dev); + + if (!netif_running(dev)) + return; + + if (sa_family != AF_INET6 && sa_family != AF_INET) + return; + + if (atomic_read(&bp->vxlan_port_cnt) && bp->vxlan_port != port) + return; + + if (atomic_inc_return(&bp->vxlan_port_cnt) == 1) { + bp->vxlan_port_pending = port; + set_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event); + bnxt_queue_sp_work(bp); + } +} + +static void bnxt_del_vxlan_port(struct net_device *dev, sa_family_t sa_family, + __be16 port) +{ + struct bnxt *bp = netdev_priv(dev); + + if (!netif_running(dev)) + return; + + if (sa_family != AF_INET6 && sa_family != AF_INET) + return; + + if (atomic_read(&bp->vxlan_port_cnt) && bp->vxlan_port == port) { + if (atomic_dec_return(&bp->vxlan_port_cnt) == 0) { + set_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event); + bnxt_queue_sp_work(bp); + } + } +} +#elif defined(HAVE_NDO_UDP_TUNNEL) +static void bnxt_udp_tunnel_add(struct net_device *dev, + struct udp_tunnel_info *ti) +{ + struct bnxt *bp = netdev_priv(dev); + + if (ti->sa_family != AF_INET6 && ti->sa_family != AF_INET) + return; + + if (!netif_running(dev)) + return; + + switch (ti->type) { + case UDP_TUNNEL_TYPE_VXLAN: + if (atomic_read(&bp->vxlan_port_cnt) && + bp->vxlan_port != ti->port) + return; + + if (atomic_inc_return(&bp->vxlan_port_cnt) == 1) { + bp->vxlan_port_pending = ti->port; + set_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event); + } + break; + case UDP_TUNNEL_TYPE_GENEVE: + if (atomic_read(&bp->nge_port_cnt) && bp->nge_port != ti->port) + return; + + if (atomic_inc_return(&bp->nge_port_cnt) == 1) { + bp->nge_port_pending = ti->port; + set_bit(BNXT_GENEVE_ADD_PORT_SP_EVENT, &bp->sp_event); + } + break; + default: + return; + } + + __bnxt_queue_sp_work(bp); +} + +static void bnxt_udp_tunnel_del(struct net_device *dev, + struct udp_tunnel_info *ti) +{ + struct bnxt *bp = netdev_priv(dev); + + if (ti->sa_family != AF_INET6 && ti->sa_family != AF_INET) + return; + + if (!netif_running(dev)) + return; + + switch (ti->type) { + case UDP_TUNNEL_TYPE_VXLAN: + if (!atomic_read(&bp->vxlan_port_cnt) || + bp->vxlan_port != ti->port) + return; + + if (atomic_dec_return(&bp->vxlan_port_cnt) != 0) + return; + + set_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event); + break; + case UDP_TUNNEL_TYPE_GENEVE: + if (!atomic_read(&bp->nge_port_cnt) || bp->nge_port != ti->port) + return; + + if (atomic_dec_return(&bp->nge_port_cnt) != 0) + return; + + set_bit(BNXT_GENEVE_DEL_PORT_SP_EVENT, &bp->sp_event); + break; + default: + return; + } + + __bnxt_queue_sp_work(bp); +} +#endif /* HAVE_UDP_TUNNEL_NIC */ + +#ifdef OLD_VLAN +static void bnxt_vlan_rx_register(struct net_device *dev, + struct vlan_group *vlgrp) +{ + struct bnxt *bp = netdev_priv(dev); + + if (!netif_running(dev)) { + bp->vlgrp = vlgrp; + return; + } + bnxt_disable_napi(bp); + bnxt_disable_int_sync(bp); + bp->vlgrp = vlgrp; + bnxt_enable_napi(bp); + bnxt_enable_int(bp); +} +#endif + +#ifdef HAVE_NDO_BRIDGE_GETLINK +static int bnxt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, + struct net_device *dev, u32 filter_mask, + int nlflags) +{ + struct bnxt *bp = netdev_priv(dev); + + return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bp->br_mode, 0, 0, + nlflags, filter_mask, NULL); +} + +#ifdef HAVE_NDO_BRIDGE_SETLINK_EXTACK +static int bnxt_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh, + u16 flags, struct netlink_ext_ack *extack) +#else +static int bnxt_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh, + u16 flags) +#endif +{ + struct bnxt *bp = netdev_priv(dev); + struct nlattr *attr, *br_spec; + int rem, rc = 0; + + if (bp->hwrm_spec_code < 0x10707 || !BNXT_SINGLE_PF(bp)) + return -EOPNOTSUPP; + + br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); + if (!br_spec) + return -EINVAL; + + nla_for_each_nested(attr, br_spec, rem) { + u16 mode; + + if (nla_type(attr) != IFLA_BRIDGE_MODE) + continue; + + if (nla_len(attr) < sizeof(mode)) + return -EINVAL; + + mode = nla_get_u16(attr); + if (mode == bp->br_mode) + break; + + rc = bnxt_hwrm_set_br_mode(bp, mode); + if (!rc) + bp->br_mode = mode; + break; + } + return rc; +} +#endif + +#ifdef CONFIG_VF_REPS +#if !defined(HAVE_NDO_DEVLINK_PORT) && !defined(HAVE_DEVLINK_PORT_ATTRS) +static int bnxt_get_phys_port_name(struct net_device *dev, char *buf, + size_t len) +{ + struct bnxt *bp = netdev_priv(dev); + int rc; + + /* The PF and it's VF-reps only support the switchdev framework */ + if (!BNXT_PF(bp)) + return -EOPNOTSUPP; + + rc = snprintf(buf, len, "p%d", bp->pf.port_id); + + if (rc >= len) + return -EOPNOTSUPP; + return 0; +} +#endif + +#ifdef HAVE_NDO_GET_PORT_PARENT_ID +int bnxt_get_port_parent_id(struct net_device *dev, + struct netdev_phys_item_id *ppid) + { + struct bnxt *bp = netdev_priv(dev); + + if (!bnxt_tc_is_switchdev_mode(bp)) + return -EOPNOTSUPP; + + /* The PF and it's VF-reps only support the switchdev framework */ + if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_DSN_VALID)) + return -EOPNOTSUPP; + + ppid->id_len = sizeof(bp->dsn); + memcpy(ppid->id, bp->dsn, ppid->id_len); + + return 0; +} + +#else + +int bnxt_port_attr_get(struct bnxt *bp, struct switchdev_attr *attr) +{ + if (!bnxt_tc_is_switchdev_mode(bp)) + return -EOPNOTSUPP; + + /* The PF and it's VF-reps only support the switchdev framework */ + if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_DSN_VALID)) + return -EOPNOTSUPP; + + switch (attr->id) { + case SWITCHDEV_ATTR_ID_PORT_PARENT_ID: + attr->u.ppid.id_len = sizeof(bp->dsn); + memcpy(attr->u.ppid.id, bp->dsn, attr->u.ppid.id_len); + break; + default: + return -EOPNOTSUPP; + } + return 0; +} + +static int bnxt_swdev_port_attr_get(struct net_device *dev, + struct switchdev_attr *attr) +{ + return bnxt_port_attr_get(netdev_priv(dev), attr); +} + +static const struct switchdev_ops bnxt_switchdev_ops = { + .switchdev_port_attr_get = bnxt_swdev_port_attr_get +}; +#endif /* HAVE_NDO_GET_PORT_PARENT_ID */ +#endif /* CONFIG_VF_REPS */ + +#ifdef HAVE_NDO_DEVLINK_PORT +static struct devlink_port *bnxt_get_devlink_port(struct net_device *dev) +{ + struct bnxt *bp = netdev_priv(dev); + + return &bp->dl_port; +} +#endif + +static const struct net_device_ops bnxt_netdev_ops = { +#ifdef HAVE_NDO_SIZE + .ndo_size = sizeof(const struct net_device_ops), +#endif + .ndo_open = bnxt_open, + .ndo_start_xmit = bnxt_start_xmit, + .ndo_stop = bnxt_close, +#ifdef NETDEV_GET_STATS64 + .ndo_get_stats64 = bnxt_get_stats64, +#else + .ndo_get_stats = bnxt_get_stats, +#endif + .ndo_set_rx_mode = bnxt_set_rx_mode, + .ndo_eth_ioctl = bnxt_ioctl, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = bnxt_change_mac_addr, + .ndo_change_mtu = bnxt_change_mtu, +#ifdef NETDEV_FEATURE_CONTROL + .ndo_fix_features = bnxt_fix_features, + .ndo_set_features = bnxt_set_features, +#endif +#ifdef HAVE_NDO_FEATURES_CHECK + .ndo_features_check = bnxt_features_check, +#endif + .ndo_tx_timeout = bnxt_tx_timeout, +#ifdef CONFIG_BNXT_SRIOV +#ifdef HAVE_NDO_GET_VF_CONFIG + .ndo_get_vf_config = bnxt_get_vf_config, + .ndo_set_vf_mac = bnxt_set_vf_mac, + .ndo_set_vf_vlan = bnxt_set_vf_vlan, + .ndo_set_vf_rate = bnxt_set_vf_bw, +#ifdef HAVE_NDO_SET_VF_LINK_STATE + .ndo_set_vf_link_state = bnxt_set_vf_link_state, +#endif +#ifdef HAVE_VF_SPOOFCHK + .ndo_set_vf_spoofchk = bnxt_set_vf_spoofchk, +#endif +#ifdef HAVE_NDO_SET_VF_TRUST + .ndo_set_vf_trust = bnxt_set_vf_trust, +#endif +#ifdef HAVE_NDO_SET_VF_QUEUES + .ndo_set_vf_queues = bnxt_set_vf_queues, +#endif +#endif +#endif +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = bnxt_poll_controller, +#endif + +#ifdef HAVE_SETUP_TC + +#if (defined(HAVE_TC_TO_NETDEV) || defined(HAVE_TC_SETUP_TYPE)) + +#if defined(HAVE_NDO_SETUP_TC_RH) + .extended.ndo_setup_tc_rh = bnxt_setup_tc, +#elif defined(HAVE_NDO_SETUP_TC_RH72) + .ndo_setup_tc = bnxt_setup_mq_tc, +#else /* !HAVE_NDO_SETUP_TC_RH && !HAVE_NDO_SETUP_TC_RH72 */ + .ndo_setup_tc = bnxt_setup_tc, +#endif + +#else /* !HAVE_TC_TO_NETDEV && !HAVE_TC_SETUP_TYPE */ + .ndo_setup_tc = bnxt_setup_mq_tc, +#endif + +#endif /* HAVE_SETUP_TC */ + +#ifdef CONFIG_RFS_ACCEL + .ndo_rx_flow_steer = bnxt_rx_flow_steer, +#endif +#if defined(HAVE_NDO_ADD_VXLAN) + .ndo_add_vxlan_port = bnxt_add_vxlan_port, + .ndo_del_vxlan_port = bnxt_del_vxlan_port, +#elif defined(HAVE_NDO_UDP_TUNNEL) +#ifdef HAVE_UDP_TUNNEL_NIC + .ndo_udp_tunnel_add = udp_tunnel_nic_add_port, + .ndo_udp_tunnel_del = udp_tunnel_nic_del_port, +#else + .ndo_udp_tunnel_add = bnxt_udp_tunnel_add, + .ndo_udp_tunnel_del = bnxt_udp_tunnel_del, +#endif +#endif /* HAVE_NDO_ADD_VXLAN */ +#ifdef BNXT_PRIV_RX_BUSY_POLL + .ndo_busy_poll = bnxt_busy_poll, +#endif +#ifdef OLD_VLAN + .ndo_vlan_rx_register = bnxt_vlan_rx_register, +#endif +#ifdef HAVE_NDO_XDP + .ndo_bpf = bnxt_xdp, +#endif +#if defined(HAVE_XDP_FRAME) && !defined(HAVE_EXT_NDO_XDP_XMIT) && !defined(HAVE_LEGACY_RCU_BH) + .ndo_xdp_xmit = bnxt_xdp_xmit, +#endif +#ifdef HAVE_XSK_SUPPORT + .ndo_xsk_wakeup = bnxt_xsk_wakeup, +#endif +#ifdef HAVE_NDO_BRIDGE_GETLINK + .ndo_bridge_getlink = bnxt_bridge_getlink, + .ndo_bridge_setlink = bnxt_bridge_setlink, +#endif +#ifdef CONFIG_VF_REPS +#if !defined(HAVE_DEVLINK_PORT_ATTRS) && defined(HAVE_NDO_GET_PORT_PARENT_ID) + .ndo_get_port_parent_id = bnxt_get_port_parent_id, +#endif +#if !defined(HAVE_NDO_DEVLINK_PORT) && !defined(HAVE_DEVLINK_PORT_ATTRS) +#ifdef HAVE_EXT_GET_PHYS_PORT_NAME + .extended.ndo_get_phys_port_name = bnxt_get_phys_port_name, +#else + .ndo_get_phys_port_name = bnxt_get_phys_port_name, +#endif +#endif /* HAVE_NDO_DEVLINK_PORT */ +#endif /* CONFIG_VF_REPS */ +#ifdef HAVE_NDO_DEVLINK_PORT + .ndo_get_devlink_port = bnxt_get_devlink_port, +#endif +}; + +static void bnxt_remove_one(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct bnxt *bp = netdev_priv(dev); + bool shutdown_tc = false; + + if (BNXT_PF(bp)) { + if (bnxt_tc_flower_enabled(bp)) { + bnxt_disable_tc_flower(bp); + shutdown_tc = true; + } + mutex_lock(&bp->vf_rep_lock); + bnxt_vf_reps_destroy(bp); + if (BNXT_TRUFLOW_EN(bp)) + bnxt_tf_port_deinit(bp, BNXT_TF_FLAG_NONE); + bp->eswitch_disabled = true; + mutex_unlock(&bp->vf_rep_lock); + bnxt_sriov_disable(bp); + bnxt_sriov_sysfs_exit(bp); + } + bnxt_rdma_aux_device_del(bp); + +#if defined(HAVE_DEVLINK_PORT_ATTRS) && !defined(HAVE_SET_NETDEV_DEVLINK_PORT) + if (BNXT_PF(bp)) + devlink_port_type_clear(&bp->dl_port); +#endif + bnxt_ptp_clear(bp); + pci_disable_pcie_error_reporting(pdev); + unregister_netdev(dev); + + bnxt_rdma_aux_device_uninit(bp); + + bnxt_dbr_exit(bp); + clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); + /* Flush any pending tasks */ + cancel_work_sync(&bp->sp_task); + cancel_delayed_work_sync(&bp->fw_reset_task); + bp->sp_event = 0; + + bnxt_dl_fw_reporters_destroy(bp); + bnxt_dl_unregister(bp); + bnxt_free_l2_filters(bp, true); + bnxt_free_ntp_fltrs(bp, true); + if (BNXT_SUPPORTS_MULTI_RSS_CTX(bp)) + bnxt_clear_rss_ctxs(bp, true); + if (BNXT_CHIP_P5_PLUS(bp)) + bitmap_free(bp->af_xdp_zc_qs); + if (shutdown_tc) { + bnxt_enable_tc_flower(bp); + bnxt_shutdown_tc(bp); + } + + bnxt_clear_int_mode(bp); + bnxt_hwrm_func_drv_unrgtr(bp); + bnxt_free_hwrm_resources(bp); + bnxt_hwmon_uninit(bp); + bnxt_ethtool_free(bp); + bnxt_free_stats_cosqnames_mem(bp); + bnxt_dcb_free(bp, false); + kfree(bp->ptp_cfg); + bp->ptp_cfg = NULL; +#if defined(HAVE_NDO_XDP) && (LINUX_VERSION_CODE < KERNEL_VERSION(4,16,0)) + if (bp->xdp_prog) + bpf_prog_put(bp->xdp_prog); +#endif + kfree(bp->fw_health); + bp->fw_health = NULL; + bnxt_free_tfc_mpc_info(bp); + bnxt_free_mpc_info(bp); + bnxt_deinit_lag(bp); + bnxt_free_ktls_info(bp); + bnxt_cleanup_pci(bp); + bnxt_free_ctx_mem(bp); + bnxt_free_crash_dump_mem(bp); + bnxt_free_udcc_info(bp); + kfree(bp->rss_indir_tbl); + bp->rss_indir_tbl = NULL; + bnxt_free_port_stats(bp); +#if defined(HAVE_ETF_QOPT_OFFLOAD) + bnxt_free_tc_etf_bitmap(bp); +#endif + bnxt_unregister_logger(bp, BNXT_LOGGER_L2); + bnxt_unregister_logger(bp, BNXT_LOGGER_L2_CTX_MEM); + bnxt_unregister_logger(bp, BNXT_LOGGER_L2_RING_CONTENTS); + +#ifdef DEV_NETMAP + if (BNXT_CHIP_P5_PLUS(bp)) + netmap_detach(dev); +#endif /* DEV_NETMAP */ + free_netdev(dev); +} + +static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt) +{ + int rc = 0; + struct bnxt_link_info *link_info = &bp->link_info; + + if (!BNXT_CHIP_SUPPORTS_PHY(bp)) { + link_info->link_state = BNXT_LINK_STATE_UP; + return 0; + } + + bp->phy_flags = 0; + rc = bnxt_hwrm_phy_qcaps(bp); + if (rc) { + netdev_err(bp->dev, "Probe phy can't get phy capabilities (rc: %x)\n", + rc); + return rc; + } + if (bp->phy_flags & BNXT_PHY_FL_NO_FCS) + bp->dev->priv_flags |= IFF_SUPP_NOFCS; + else + bp->dev->priv_flags &= ~IFF_SUPP_NOFCS; + + bp->mac_flags = 0; + bnxt_hwrm_mac_qcaps(bp); + + if (!fw_dflt) + return 0; + + mutex_lock(&bp->link_lock); + rc = bnxt_update_link(bp, false); + if (rc) { + mutex_unlock(&bp->link_lock); + netdev_err(bp->dev, "Probe phy can't update link (rc: %x)\n", + rc); + return rc; + } + + /* Older firmware does not have supported_auto_speeds, so assume + * that all supported speeds can be autonegotiated. + */ + if (link_info->auto_link_speeds && !link_info->support_auto_speeds) + link_info->support_auto_speeds = link_info->support_speeds; + + bnxt_init_ethtool_link_settings(bp); + mutex_unlock(&bp->link_lock); + return 0; +} + +static int bnxt_get_max_irq(struct pci_dev *pdev) +{ + u16 ctrl; + + if (!pdev->msix_cap) + return 1; + + pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl); + return (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1; +} + +static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, + int *max_cp) +{ + struct bnxt_hw_resc *hw_resc = &bp->hw_resc; + int max_ring_grps = 0, max_irq; + + *max_tx = hw_resc->max_tx_rings - bnxt_mpc_tx_rings_in_use(bp); + *max_rx = hw_resc->max_rx_rings; + *max_cp = bnxt_get_max_func_cp_rings_for_en(bp); + max_irq = min_t(int, bnxt_get_max_func_irqs(bp) - + bnxt_get_ulp_msix_num_in_use(bp), + hw_resc->max_stat_ctxs - + bnxt_get_ulp_stat_ctxs_in_use(bp)); + if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) + *max_cp = min_t(int, *max_cp, max_irq); + max_ring_grps = hw_resc->max_hw_ring_grps; + if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) { + *max_cp -= 1; + *max_rx -= 2; + } + if (bp->flags & BNXT_FLAG_AGG_RINGS) + *max_rx >>= 1; + if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { + int rc; + + rc = __bnxt_trim_rings(bp, max_rx, max_tx, *max_cp, false); + if (rc) { + *max_rx = 0; + *max_tx = 0; + } + /* On P5 chips, max_cp output param should be available NQs */ + *max_cp = max_irq; + } + *max_rx = min_t(int, *max_rx, max_ring_grps); +} + +int bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, bool shared) +{ + int rx, tx, cp; + + _bnxt_get_max_rings(bp, &rx, &tx, &cp); + *max_rx = rx; + *max_tx = tx; + if (!rx || !tx || !cp) + return -ENOMEM; + + return bnxt_trim_rings(bp, max_rx, max_tx, cp, shared); +} + +static int bnxt_get_dflt_rings(struct bnxt *bp, int *max_rx, int *max_tx, + bool shared) +{ + int rc; + + rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared); + if (rc && (bp->flags & BNXT_FLAG_AGG_RINGS)) { + /* Not enough rings, try disabling agg rings. */ + bp->flags &= ~BNXT_FLAG_AGG_RINGS; + rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared); + if (rc) { + /* set BNXT_FLAG_AGG_RINGS back for consistency */ + bp->flags |= BNXT_FLAG_AGG_RINGS; + return rc; + } + bp->flags |= BNXT_FLAG_NO_AGG_RINGS; + bp->dev->hw_features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW); + bp->dev->features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW); + bnxt_set_ring_params(bp); + } + + if (bp->flags & BNXT_FLAG_ROCE_CAP) { + int max_cp, max_stat, max_irq; + + /* Reserve minimum resources for RoCE */ + max_cp = bnxt_get_max_func_cp_rings(bp); + max_stat = bnxt_get_max_func_stat_ctxs(bp); + max_irq = bnxt_get_max_func_irqs(bp); + if (max_cp <= BNXT_MIN_ROCE_CP_RINGS || + max_irq <= BNXT_MIN_ROCE_CP_RINGS || + max_stat <= BNXT_MIN_ROCE_STAT_CTXS) + return 0; + + max_cp -= BNXT_MIN_ROCE_CP_RINGS; + max_irq -= BNXT_MIN_ROCE_CP_RINGS; + max_stat -= BNXT_MIN_ROCE_STAT_CTXS; + max_cp = min_t(int, max_cp, max_irq); + max_cp = min_t(int, max_cp, max_stat); + rc = bnxt_trim_rings(bp, max_rx, max_tx, max_cp, shared); + if (rc) + rc = 0; + } + return rc; +} + +/* In initial default shared ring setting, each shared ring must have a + * RX/TX ring pair. + */ +static void bnxt_trim_dflt_sh_rings(struct bnxt *bp) +{ + bp->cp_nr_rings = min_t(int, bp->tx_nr_rings_per_tc, bp->rx_nr_rings); + bp->rx_nr_rings = bp->cp_nr_rings; + bp->tx_nr_rings_per_tc = bp->cp_nr_rings; + bp->tx_nr_rings = bp->tx_nr_rings_per_tc; + if (bp->num_tc) + bp->tx_nr_rings *= bp->num_tc; + bnxt_trim_mpc_rings(bp); +} + +static int bnxt_get_num_local_cpus(struct pci_dev *pdev) +{ + cpumask_var_t cpus; + int cpu, count = 0; + const struct cpumask *tmp_cpu_mask; + int numa_node; + + if (unlikely(is_kdump_kernel() || !zalloc_cpumask_var(&cpus, GFP_KERNEL))) + return 1; + + numa_node = dev_to_node(&pdev->dev); + + tmp_cpu_mask = (numa_node == NUMA_NO_NODE) ? cpu_online_mask : cpumask_of_node(numa_node); + + cpumask_copy(cpus, tmp_cpu_mask); + + for_each_cpu(cpu, cpus) { + ++count; + cpumask_andnot(cpus, cpus, topology_sibling_cpumask(cpu)); + } + + free_cpumask_var(cpus); + + return count; +} + +static int bnxt_tx_nr_rings_per_tc(struct bnxt *bp) +{ + return bp->num_tc ? bp->tx_nr_rings / bp->num_tc : bp->tx_nr_rings; +} + +static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh) +{ + int dflt_rings = 0, max_rx_rings, max_tx_rings, rc, num_local_cpu_cnt, floor; + int avail_msix; + + if (!bnxt_can_reserve_rings(bp)) + return 0; + + if (sh) + bp->flags |= BNXT_FLAG_SHARED_RINGS; + + num_local_cpu_cnt = bnxt_get_num_local_cpus(bp->pdev); + + if (bp->port_count > 0) + dflt_rings = min_t(int, num_local_cpu_cnt / bp->port_count, BNXT_NUM_DFLT_RINGS); + else + dflt_rings = min_t(int, num_local_cpu_cnt, BNXT_NUM_DFLT_RINGS); + + if (BNXT_NPAR(bp) || (bp->flags & BNXT_FLAG_ROCE_CAP)) + dflt_rings = min_t(int, dflt_rings, BNXT_NUM_DFLT_RINGS_NPAR_ROCE); + + floor = min_t(int, num_online_cpus(), BNXT_MIN_NUM_DFLT_RINGS); + + if ((dflt_rings < floor)) + dflt_rings = floor; + + rc = bnxt_get_dflt_rings(bp, &max_rx_rings, &max_tx_rings, sh); + if (rc) + return rc; + bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings); + bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings); + if (sh) + bnxt_trim_dflt_sh_rings(bp); + else + bp->cp_nr_rings = bp->tx_nr_rings_per_tc + bp->rx_nr_rings; + bp->tx_nr_rings = bp->tx_nr_rings_per_tc; + if (bp->num_tc) + bp->tx_nr_rings *= bp->num_tc; + + avail_msix = bnxt_get_max_func_irqs(bp) - bp->cp_nr_rings; + if (avail_msix >= BNXT_MIN_ROCE_CP_RINGS) { + int ulp_num_msix = min(avail_msix, bp->ulp_num_msix_want); + + bnxt_set_ulp_msix_num(bp, ulp_num_msix); + bnxt_set_dflt_ulp_stat_ctxs(bp); + } + + bnxt_set_dflt_mpc_rings(bp); + + rc = __bnxt_reserve_rings(bp); + if (rc && rc != -ENODEV) + netdev_warn(bp->dev, "Unable to reserve tx rings\n"); + bp->tx_nr_rings_per_tc = bnxt_tx_nr_rings_per_tc(bp); + if (sh) + bnxt_trim_dflt_sh_rings(bp); + + /* Rings may have been trimmed, re-reserve the trimmed rings. */ + if (bnxt_need_reserve_rings(bp)) { + rc = __bnxt_reserve_rings(bp); + if (rc && rc != -ENODEV) + netdev_warn(bp->dev, "2nd rings reservation failed.\n"); + bp->tx_nr_rings_per_tc = bnxt_tx_nr_rings_per_tc(bp); + bnxt_trim_mpc_rings(bp); + } + if (BNXT_CHIP_TYPE_NITRO_A0(bp)) { + bp->rx_nr_rings++; + bp->cp_nr_rings++; + } + if (rc) { + bp->tx_nr_rings = 0; + bp->rx_nr_rings = 0; + } + return rc; +} + +static int bnxt_init_dflt_ring_mode(struct bnxt *bp) +{ + int rc; + + if (bp->tx_nr_rings) + return 0; + + bnxt_ulp_irq_stop(bp); + bnxt_clear_int_mode(bp); + rc = bnxt_set_dflt_rings(bp, true); + if (rc) { + if (BNXT_VF(bp) && rc == -ENODEV) + netdev_err(bp->dev, "Cannot configure VF rings while PF is unavailable.\n"); + else + netdev_err(bp->dev, "%s: Not enough rings available.\n", __func__); + goto init_dflt_ring_err; + } + rc = bnxt_init_int_mode(bp); + if (rc) + goto init_dflt_ring_err; + + bp->tx_nr_rings_per_tc = bnxt_tx_nr_rings_per_tc(bp); + bnxt_trim_mpc_rings(bp); + + bnxt_set_dflt_rfs(bp); + +init_dflt_ring_err: + bnxt_ulp_irq_restart(bp, rc); + return rc; +} + +int bnxt_restore_pf_fw_resources(struct bnxt *bp) +{ + int rc; + + ASSERT_RTNL(); + bnxt_hwrm_func_qcaps(bp, false); + + if (netif_running(bp->dev)) + __bnxt_close_nic(bp, true, false); + + bnxt_ulp_irq_stop(bp); + bnxt_clear_int_mode(bp); + rc = bnxt_init_int_mode(bp); + bnxt_ulp_irq_restart(bp, rc); + + if (netif_running(bp->dev)) { + if (rc) + dev_close(bp->dev); + else + rc = bnxt_open_nic(bp, true, false); + } + + return rc; +} + +static void bnxt_vpd_read_info(struct bnxt *bp) +{ + struct pci_dev *pdev = bp->pdev; + unsigned int vpd_size, kw_len; + int pos, size; + u8 *vpd_data; + + vpd_data = pci_vpd_alloc(pdev, &vpd_size); + if (IS_ERR(vpd_data)) { + pci_warn(pdev, "Unable to read VPD\n"); + return; + } + + pos = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size, + PCI_VPD_RO_KEYWORD_PARTNO, &kw_len); + if (pos < 0) + goto read_sn; + + size = min_t(int, kw_len, BNXT_VPD_FLD_LEN - 1); + memcpy(bp->board_partno, &vpd_data[pos], size); + +read_sn: + pos = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size, + PCI_VPD_RO_KEYWORD_SERIALNO, + &kw_len); + if (pos < 0) + goto exit; + + size = min_t(int, kw_len, BNXT_VPD_FLD_LEN - 1); + memcpy(bp->board_serialno, &vpd_data[pos], size); +exit: + kfree(vpd_data); +} + +static int bnxt_pcie_dsn_get(struct bnxt *bp, u8 dsn[]) +{ + struct pci_dev *pdev = bp->pdev; + u64 qword; + + qword = pci_get_dsn(pdev); + if (!qword) + return -EOPNOTSUPP; + + put_unaligned_le64(qword, dsn); + + bp->flags |= BNXT_FLAG_DSN_VALID; + return 0; +} + +static int bnxt_map_db_bar(struct bnxt *bp) +{ + if (!bp->db_size) + return -ENODEV; + + bp->db_size_nc = bp->db_size; + if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && + bp->tx_push_mode > BNXT_PUSH_MODE_LEGACY) { + if (bp->tx_push_mode == BNXT_PUSH_MODE_PPP) + bp->db_size_nc = PAGE_SIZE; + else if (BNXT_PF(bp)) + bp->db_size_nc = DB_PF_OFFSET_P5 + PAGE_SIZE; + else + bp->db_size_nc = DB_VF_OFFSET_P5 + PAGE_SIZE; + if (bp->db_size <= bp->db_size_nc) { + bp->tx_push_mode = BNXT_PUSH_MODE_NONE; + bp->db_size_nc = bp->db_size; + } else { + bp->db_base_wc = + ioremap_wc(pci_resource_start(bp->pdev, 2) + + bp->db_size_nc, + bp->db_size - bp->db_size_nc); + if (!bp->db_base_wc) { + bp->tx_push_mode = BNXT_PUSH_MODE_NONE; + netdev_warn(bp->dev, "Failed to map WCB pages, TX push not supported.\n"); + } + } + } + bp->bar1 = pci_iomap(bp->pdev, 2, bp->db_size_nc); + if (!bp->bar1) + return -ENOMEM; + return 0; +} + +void bnxt_print_device_info(struct bnxt *bp) +{ + netdev_info(bp->dev, "%s found at mem %lx, node addr %pM\n", + board_info[bp->board_idx].name, + (long)pci_resource_start(bp->pdev, 0), bp->dev->dev_addr); + + if (BNXT_PF(bp)) + pcie_print_link_status(bp->pdev); +} + +static void bnxt_log_live_data(void *d, u32 seg_id) +{ + struct bnxt *bp = d; + + bnxt_log_ring_states(bp); +} + +static void bnxt_hndl_ndev_change(struct bnxt *bp, void *ptr, bool *update) +{ + struct net_device *netdev = netdev_notifier_info_to_dev(ptr); + u16 e_port_id = (((struct bnxt *)netdev_priv(netdev))->pf.port_id); + struct bnxt_bond_info *binfo = bp->bond_info; + unsigned long a_port_map; + struct slave *iter_slave; + struct net_device *dev; + struct list_head *iter; + struct bonding *bond; + struct bnxt *tmp_bp; + u8 port; + + *update = false; + if (!test_bit(e_port_id, &binfo->member_port_map)) + return; + a_port_map = 0; + dev = netdev_master_upper_dev_get(netdev); + bond = netdev_priv(dev); + bond_for_each_slave(bond, iter_slave, iter) { + tmp_bp = netdev_priv(iter_slave->dev); + port = tmp_bp->pf.port_id; + if (!bond_slave_can_tx(iter_slave)) + __clear_bit(port, &a_port_map); + else + __set_bit(port, &a_port_map); + } + if (a_port_map != binfo->active_port_map) { + binfo->active_port_map = a_port_map; + if (binfo->primary) + *update = true; + } +} + +static int bnxt_bond_kern_to_fw(int kbond_mode) +{ + switch (kbond_mode) { + case BOND_MODE_ACTIVEBACKUP: + return FUNC_LAG_MODE_CFG_REQ_AGGR_MODE_ACTIVE_BACKUP; + case BOND_MODE_ROUNDROBIN: + return FUNC_LAG_MODE_CFG_REQ_AGGR_MODE_ACTIVE_ACTIVE; + case BOND_MODE_XOR: + return FUNC_LAG_MODE_CFG_REQ_AGGR_MODE_BALANCE_XOR; + case BOND_MODE_8023AD: + return FUNC_LAG_MODE_CFG_REQ_AGGR_MODE_802_3_AD; + default: + return 0; + } +} + +static void bnxt_hndl_bonding_info(struct bnxt *bp, void *ptr, bool *update) +{ + struct net_device *netdev = netdev_notifier_info_to_dev(ptr); + u16 e_pid = (((struct bnxt *)netdev_priv(netdev))->pf.port_id); + struct netdev_notifier_bonding_info *info = ptr; + struct bnxt_bond_info *binfo = bp->bond_info; + struct netdev_bonding_info *ev_binfo = NULL; + u8 mode; + + *update = false; + if (!test_bit(e_pid, &binfo->member_port_map)) + return; + ev_binfo = &info->bonding_info; + mode = bnxt_bond_kern_to_fw(ev_binfo->master.bond_mode); + if (!mode) { + netdev_warn(bp->dev, "bond mode = %x, is not supported\n", + ev_binfo->master.bond_mode); + return; + } + binfo->aggr_mode = mode; + binfo->bond_active = true; + if (binfo->primary) + *update = true; +} + +static int bnxt_hwrm_update_link_aggr_mode(struct bnxt *bp) +{ + struct bnxt_bond_info *bond = bp->bond_info; + struct hwrm_func_lag_mode_cfg_input *req; + bool bond_active = bond->bond_active; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_FUNC_LAG_MODE_CFG); + if (rc) + return rc; + req->flags = bond_active ? + FUNC_LAG_MODE_CFG_REQ_FLAGS_AGGR_ENABLE : FUNC_LAG_MODE_CFG_REQ_FLAGS_AGGR_DISABLE; + req->active_port_map = bond->active_port_map; + req->member_port_map = bond->member_port_map; + req->link_aggr_mode = bond->aggr_mode; + req->member_port_map = bond->member_port_map; + req->enables = (FUNC_LAG_MODE_CFG_REQ_ENABLES_FLAGS | + FUNC_LAG_MODE_CFG_REQ_ENABLES_ACTIVE_PORT_MAP | + FUNC_LAG_MODE_CFG_REQ_ENABLES_MEMBER_PORT_MAP | + FUNC_LAG_MODE_CFG_REQ_ENABLES_AGGR_MODE); + return hwrm_req_send(bp, req); +} + +static void bnxt_clear_bond_info(struct bnxt_bond_info *binfo) +{ + binfo->primary = false; + binfo->bond_active = false; + binfo->aggr_mode = 0; + binfo->fw_lag_id = 0; + binfo->member_port_map = 0; + binfo->active_port_map = 0; +} + +static void bnxt_set_primary(struct bnxt *bp, void *ptr) +{ + struct netdev_notifier_changeupper_info *info = ptr; + struct bnxt_bond_info *temp_bi, *primary_bi = NULL; + struct bnxt_bond_info *binfo = bp->bond_info; + struct net_device *dev = info->upper_dev; + struct net_device *netdev; + struct list_head *iter; + int idx; + + netdev_for_each_lower_dev(dev, netdev, iter) { + for (idx = 0; idx < bp->port_count; idx++) { + if (!binfo->p_netdev[idx] || + netdev != binfo->p_netdev[idx]) + continue; + temp_bi = ((struct bnxt *)netdev_priv(netdev))->bond_info; + if (!temp_bi) + continue; + if (temp_bi->primary) + primary_bi = temp_bi; + break; + } + if (primary_bi) + break; + } + __set_bit(bp->pf.port_id, &binfo->member_port_map); + + if (!primary_bi) { + binfo->primary = true; + } else { + binfo->aggr_mode = primary_bi->aggr_mode; + binfo->member_port_map |= primary_bi->member_port_map; + binfo->bond_active = true; + } +} + +static bool bnxt_is_member_port(struct bnxt *bp, struct net_device *netdev) +{ + struct pci_dev *event_pdev; + + if (!netdev->dev.parent || !dev_is_pci(netdev->dev.parent)) + return false; + event_pdev = to_pci_dev(netdev->dev.parent); + if (!bp->pdev->bus || !event_pdev->bus) + return false; + return (bp->pdev->bus->number == event_pdev->bus->number && + PCI_SLOT(bp->pdev->devfn) == PCI_SLOT(event_pdev->devfn)); +} + +static bool bnxt_is_netdev_bond_slave(struct net_device *dev, struct bnxt *bp) +{ + return netdev_master_upper_dev_get(bp->dev) == dev; +} + +static void bnxt_test_and_set_peer_port(struct net_device *dev, struct bnxt *bp) +{ + struct net_device *netdev; + struct list_head *iter; + u16 n_port_id; + + netdev_for_each_lower_dev(dev, netdev, iter) { + if (!bnxt_is_member_port(bp, netdev)) + continue; + n_port_id = (((struct bnxt *)netdev_priv(netdev))->pf.port_id); + if (!__test_and_set_bit(n_port_id, &bp->bond_info->peers)) + bp->bond_info->p_netdev[n_port_id] = netdev; + } +} + +static void bnxt_hndl_changeupper(struct bnxt *bp, void *ptr, bool *update) +{ + struct net_device *netdev = netdev_notifier_info_to_dev(ptr); + u16 e_port_id = (((struct bnxt *)netdev_priv(netdev))->pf.port_id); + struct netdev_notifier_changeupper_info *info = ptr; + struct bnxt_bond_info *binfo = bp->bond_info; + struct net_device *dev = info->upper_dev; + bool own_event = false; + + *update = false; + if (bp->pf.port_id == e_port_id) + own_event = true; + if (info->linking) { + if (!bnxt_is_netdev_bond_slave(dev, bp)) + return; + bnxt_test_and_set_peer_port(dev, bp); + if (own_event) { + if (!binfo->member_port_map) + bnxt_set_primary(bp, ptr); + } else { + __set_bit(e_port_id, &binfo->member_port_map); + if (binfo->primary) + *update = true; + } + } else { + if (!binfo->bond_active) + return; + if (!test_bit(e_port_id, &binfo->member_port_map)) + return; + if (own_event) { + bnxt_clear_bond_info(binfo); + if (list_empty(&dev->adj_list.lower)) + *update = true; + } else { + bnxt_set_primary(bp, ptr); + if (!binfo->primary) + return; + if (bnxt_is_netdev_bond_slave(dev, bp)) + __clear_bit(e_port_id, &binfo->member_port_map); + *update = true; + } + } +} + +static void bnxt_hndl_ndev_reg(struct bnxt *bp, void *ptr) +{ + struct net_device *netdev = netdev_notifier_info_to_dev(ptr); + u16 port_id = (((struct bnxt *)netdev_priv(netdev))->pf.port_id); + struct bnxt_bond_info *binfo = bp->bond_info; + + __set_bit(port_id, &binfo->peers); + binfo->p_netdev[port_id] = netdev; +} + +static void bnxt_hndl_ndev_unreg(struct bnxt *bp, void *ptr) +{ + struct net_device *netdev = netdev_notifier_info_to_dev(ptr); + u16 port_id = (((struct bnxt *)netdev_priv(netdev))->pf.port_id); + struct bnxt_bond_info *binfo = bp->bond_info; + + __clear_bit(port_id, &binfo->peers); + binfo->p_netdev[port_id] = NULL; +} + +static int bnxt_hdl_netdev_events(struct notifier_block *notifier, unsigned long event, void *ptr) +{ + struct bnxt_bond_info *this_binfo = + container_of(notifier, struct bnxt_bond_info, notif_blk); + struct bnxt *bp = this_binfo->bp; + bool update_reqd = false; + + if (!bp || !bp->bond_info || !bnxt_is_member_port(bp, netdev_notifier_info_to_dev(ptr))) + return NOTIFY_DONE; + + switch (event) { + case NETDEV_REGISTER: + bnxt_hndl_ndev_reg(bp, ptr); + break; + case NETDEV_UNREGISTER: + bnxt_hndl_ndev_unreg(bp, ptr); + break; + case NETDEV_CHANGEUPPER: + bnxt_hndl_changeupper(bp, ptr, &update_reqd); + break; + case NETDEV_BONDING_INFO: + bnxt_hndl_bonding_info(bp, ptr, &update_reqd); + break; + case NETDEV_CHANGELOWERSTATE: + case NETDEV_CHANGE: + bnxt_hndl_ndev_change(bp, ptr, &update_reqd); + break; + case NETDEV_CHANGEMTU: + update_reqd = true; + break; + } + if (update_reqd) + bnxt_hwrm_update_link_aggr_mode(bp); + + return NOTIFY_DONE; +} + +static void bnxt_init_lag(struct bnxt *bp) +{ + struct notifier_block *notif_blk; + struct bnxt_bond_info *binfo; + + if (!(bp->fw_cap & BNXT_FW_CAP_HW_LAG_SUPPORTED)) + return; + + binfo = kzalloc(sizeof(*bp->bond_info), GFP_KERNEL); + if (!binfo) + return; + + binfo->bp = bp; + notif_blk = &binfo->notif_blk; + notif_blk->notifier_call = bnxt_hdl_netdev_events; + if (register_netdevice_notifier(notif_blk)) { + netdev_err(bp->dev, "error: register net notifier .\n"); + kfree(binfo); + return; + } + bp->bond_info = binfo; +} + +static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct bnxt_hw_resc *hw_resc; + static int version_printed; + struct net_device *dev; + struct bnxt *bp; + int rc, max_irqs; + + if (pci_is_bridge(pdev)) + return -ENODEV; + + if (version_printed++ == 0) + pr_info("%s", version); + + /* Clear any pending DMA transactions from crash kernel + * while loading driver in capture kernel. + */ + if (is_kdump_kernel()) { + pci_clear_master(pdev); + pcie_flr(pdev); + } + + max_irqs = bnxt_get_max_irq(pdev); + dev = alloc_etherdev_mqs(sizeof(*bp), max_irqs * BNXT_MAX_QUEUE, + max_irqs); + if (!dev) + return -ENOMEM; + + bp = netdev_priv(dev); + bp->board_idx = ent->driver_data; + bp->msg_enable = BNXT_DEF_MSG_ENABLE; + mutex_init(&bp->log_lock); + INIT_LIST_HEAD(&bp->loggers_list); + bnxt_register_logger(bp, BNXT_LOGGER_L2, BNXT_L2_MAX_LOG_BUFFERS, + bnxt_log_live_data, BNXT_L2_MAX_LIVE_LOG_SIZE); + bnxt_register_logger(bp, BNXT_LOGGER_L2_CTX_MEM, 0, NULL, 0); + bnxt_register_logger(bp, BNXT_LOGGER_L2_RING_CONTENTS, 0, NULL, 0); + bnxt_set_max_func_irqs(bp, max_irqs); + + if (bnxt_vf_pciid(bp->board_idx)) + bp->flags |= BNXT_FLAG_VF; + + /* No devlink port registration in case of a VF */ + if (BNXT_PF(bp)) + SET_NETDEV_DEVLINK_PORT(dev, &bp->dl_port); + + if (!pdev->msix_cap) { + dev_err(&pdev->dev, "MSIX capability not found, aborting\n"); + return -ENODEV; + } + + rc = bnxt_init_board(pdev, dev); + if (rc < 0) + goto init_err_free; + + dev->netdev_ops = &bnxt_netdev_ops; + dev->watchdog_timeo = BNXT_TX_TIMEOUT; + dev->ethtool_ops = &bnxt_ethtool_ops; +#ifdef CONFIG_VF_REPS +#ifndef HAVE_NDO_GET_PORT_PARENT_ID + SWITCHDEV_SET_OPS(dev, &bnxt_switchdev_ops); +#endif +#endif + pci_set_drvdata(pdev, dev); + + rc = bnxt_alloc_hwrm_resources(bp); + if (rc) + goto init_err_pci_clean; + + mutex_init(&bp->hwrm_cmd_lock); + mutex_init(&bp->link_lock); + + rc = bnxt_fw_init_one_p1(bp); + if (rc) + goto init_err_pci_clean; + + if (BNXT_PF(bp)) + bnxt_vpd_read_info(bp); + + if (BNXT_CHIP_P5_PLUS(bp)) { + bp->flags |= BNXT_FLAG_CHIP_P5_PLUS; + if (BNXT_CHIP_P7(bp)) + bp->flags |= BNXT_FLAG_CHIP_P7; + } + + rc = bnxt_alloc_rss_indir_tbl(bp, NULL); + if (rc) + goto init_err_pci_clean; + + rc = bnxt_fw_init_one_p2(bp); + if (rc) + goto init_err_pci_clean; + + rc = bnxt_map_db_bar(bp); + if (rc) { + dev_err(&pdev->dev, "Cannot map doorbell BAR rc = %d, aborting\n", + rc); + goto init_err_pci_clean; + } + + dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG | + NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN | + NETIF_F_GSO_PARTIAL | NETIF_F_RXHASH | + NETIF_F_RXCSUM | NETIF_F_GRO; + if (bp->flags & BNXT_FLAG_UDP_GSO_CAP) + dev->hw_features |= NETIF_F_GSO_UDP_L4; + +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,39) + if (BNXT_SUPPORTS_TPA(bp)) + dev->hw_features |= NETIF_F_LRO; +#endif + +#ifdef NETDEV_HW_ENC_FEATURES + dev->hw_enc_features = + NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG | + NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN | + NETIF_F_GSO_PARTIAL; + if (bp->flags & BNXT_FLAG_UDP_GSO_CAP) + dev->hw_enc_features |= NETIF_F_GSO_UDP_L4; +#endif + + if (!(bp->tunnel_disable_flag & BNXT_FW_CAP_UDP_TNL_OFFLOAD_DISABLED)) { + dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_UDP_TUNNEL_CSUM; +#ifdef NETDEV_HW_ENC_FEATURES + dev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_UDP_TUNNEL_CSUM; +#endif + } + + if (!(bp->tunnel_disable_flag & BNXT_FW_CAP_GRE_TNL_OFFLOAD_DISABLED)) { + dev->hw_features |= NETIF_F_GSO_GRE | NETIF_F_GSO_GRE_CSUM; +#ifdef NETDEV_HW_ENC_FEATURES + dev->hw_enc_features |= NETIF_F_GSO_GRE | NETIF_F_GSO_GRE_CSUM; +#endif + } + + if (!(bp->tunnel_disable_flag & FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_IPINIP)) { + dev->hw_features |= NETIF_F_GSO_IPXIP4; +#ifdef NETDEV_HW_ENC_FEATURES + dev->hw_enc_features |= NETIF_F_GSO_IPXIP4; +#endif + } + +#ifdef HAVE_UDP_TUNNEL_NIC + if (bp->fw_cap & BNXT_FW_CAP_VNIC_TUNNEL_TPA) + dev->udp_tunnel_nic_info = &bnxt_udp_tunnels_p7; + else + dev->udp_tunnel_nic_info = &bnxt_udp_tunnels; +#endif + +#ifdef HAVE_GSO_PARTIAL_FEATURES + dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM | + NETIF_F_GSO_GRE_CSUM; +#endif + + dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA; + if (bp->fw_cap & BNXT_FW_CAP_VLAN_RX_STRIP) + dev->hw_features |= BNXT_HW_FEATURE_VLAN_ALL_RX; + if (bp->fw_cap & BNXT_FW_CAP_VLAN_TX_INSERT) + dev->hw_features |= BNXT_HW_FEATURE_VLAN_ALL_TX; + if (BNXT_SUPPORTS_TPA(bp)) + dev->hw_features |= NETIF_F_GRO_HW; + dev->features |= dev->hw_features | NETIF_F_HIGHDMA; + if (dev->features & NETIF_F_GRO_HW) + dev->features &= ~NETIF_F_LRO; + dev->priv_flags |= IFF_UNICAST_FLT; + +#ifdef DHAVE_IPV6_BIG_TCP + netif_set_tso_max_size(dev, GSO_MAX_SIZE); +#endif + if (bp->tso_max_segs) + netif_set_tso_max_segs(dev, bp->tso_max_segs); + +#ifdef HAVE_XDP_SET_REDIR_TARGET + dev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT | + NETDEV_XDP_ACT_RX_SG | NETDEV_XDP_ACT_XSK_ZEROCOPY; +#endif + +#ifdef CONFIG_BNXT_SRIOV + init_waitqueue_head(&bp->sriov_cfg_wait); + mutex_init(&bp->sriov_lock); + mutex_init(&bp->vf_rep_lock); +#endif + if (BNXT_SUPPORTS_TPA(bp)) { + bp->gro_func = bnxt_gro_func_5730x; + if (BNXT_CHIP_P4(bp)) + bp->gro_func = bnxt_gro_func_5731x; + else if (BNXT_CHIP_P5_PLUS(bp)) + bp->gro_func = bnxt_gro_func_5750x; + } + if (!BNXT_CHIP_P4_PLUS(bp)) + bp->flags |= BNXT_FLAG_DOUBLE_DB; + + rc = bnxt_probe_phy(bp, true); + if (rc) + goto init_err_pci_clean; + + hw_resc = &bp->hw_resc; + bp->max_fltr = hw_resc->max_rx_em_flows + hw_resc->max_rx_wm_flows + + BNXT_L2_FLTR_MAX_FLTR; + /* Older firmware may not report these filters properly */ + if (bp->max_fltr < BNXT_MAX_FLTR) + bp->max_fltr = BNXT_MAX_FLTR; + bnxt_init_l2_fltr_tbl(bp); + rc = bnxt_init_mac_addr(bp); + if (rc) { + netdev_err(bp->dev, "Unable to initialize mac address.\n"); + rc = -EADDRNOTAVAIL; + goto init_err_pci_clean; + } + + if (BNXT_PF(bp)) { + /* Read the adapter's DSN to use as the eswitch id */ + rc = bnxt_pcie_dsn_get(bp, bp->dsn); + if (rc) + netdev_warn(dev, "Failed to read DSN.\n"); + } + + bnxt_set_netdev_mtu(bp); + + bnxt_set_rx_skb_mode(bp, false); + bnxt_set_tpa_flags(bp); + bnxt_set_ring_params(bp); + bnxt_rdma_aux_device_init(bp); + rc = bnxt_set_dflt_rings(bp, true); + if (rc) { + if (BNXT_VF(bp) && rc == -ENODEV) { + netdev_err(bp->dev, "Cannot configure VF rings while PF is unavailable.\n"); + } else { + netdev_err(bp->dev, + "%s: Not enough rings available rc[%d].\n", __func__, rc); + rc = -ENOMEM; + } + goto init_err_pci_clean; + } + + bnxt_fw_init_one_p3(bp); + + bnxt_init_dflt_coal(bp); + + if (dev->hw_features & BNXT_HW_FEATURE_VLAN_ALL_RX) + bp->flags |= BNXT_FLAG_STRIP_VLAN; + + rc = bnxt_init_int_mode(bp); + if (rc) + goto init_err_pci_clean; + + /* No TC has been set yet and rings may have been trimmed due to + * limited MSIX, so we re-initialize the TX rings per TC. + */ + bp->tx_nr_rings_per_tc = bp->tx_nr_rings; + bnxt_trim_mpc_rings(bp); + + if (BNXT_PF(bp)) { + if (!bnxt_pf_wq) { + bnxt_pf_wq = + create_singlethread_workqueue("bnxt_pf_wq"); + if (!bnxt_pf_wq) { + dev_err(&pdev->dev, "Unable to create workqueue.\n"); + rc = -ENOMEM; + goto init_err_pci_clean; + } + } + rc = bnxt_init_tc(bp); + if (rc) + netdev_err(dev, "Failed to initialize TC flower offload, err = %d.\n", + rc); + } + + bnxt_inv_fw_health_reg(bp); + rc = bnxt_dl_register(bp); + if (rc) + goto init_err_dl; + bnxt_init_lag(bp); + rc = bnxt_ktls_init(bp); + if (rc) + bnxt_free_ktls_info(bp); + + INIT_LIST_HEAD(&bp->usr_fltr_list); + + if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) + bnxt_init_multi_rss_ctx(bp); + + if (BNXT_CHIP_P5_PLUS(bp)) { + bp->af_xdp_zc_qs = bitmap_zalloc(BNXT_MAX_XSK_RINGS, GFP_KERNEL); + if (!bp->af_xdp_zc_qs) { + netdev_warn(bp->dev, + "Could not allocate memory for af_xdp_zc_qs\n"); + goto init_err_cleanup; + } + } + + rc = register_netdev(dev); + if (rc) + goto init_err_cleanup; + +#if defined(HAVE_DEVLINK_PORT_ATTRS) && !defined(HAVE_SET_NETDEV_DEVLINK_PORT) + if (BNXT_PF(bp)) + devlink_port_type_eth_set(&bp->dl_port, bp->dev); +#endif + bnxt_dl_fw_reporters_create(bp); + + bnxt_rdma_aux_device_add(bp); + + bnxt_print_device_info(bp); + + pci_save_state(pdev); + + if (BNXT_PF(bp)) + bnxt_sriov_sysfs_init(bp); +#ifdef DEV_NETMAP + if (BNXT_CHIP_P5_PLUS(bp)) + bnxt_netmap_attach(bp); +#endif /* DEV_NETMAP */ + +#if defined(HAVE_ETF_QOPT_OFFLOAD) + if (BNXT_SUPPORTS_ETF(bp)) { + rc = bnxt_alloc_tc_etf_bitmap(bp); + if (rc) + netdev_dbg(bp->dev, "Failed to alloc etf bitmap\n"); + } +#endif + + return 0; + +init_err_cleanup: + bnxt_rdma_aux_device_uninit(bp); + bnxt_dl_unregister(bp); + if (BNXT_SUPPORTS_MULTI_RSS_CTX(bp)) + bnxt_clear_rss_ctxs(bp, true); + if (BNXT_CHIP_P5_PLUS(bp)) + bitmap_free(bp->af_xdp_zc_qs); +init_err_dl: + bnxt_shutdown_tc(bp); + bnxt_clear_int_mode(bp); + +init_err_pci_clean: + bnxt_hwrm_func_drv_unrgtr(bp); + bnxt_free_hwrm_resources(bp); + bnxt_hwmon_uninit(bp); + bnxt_ethtool_free(bp); + bnxt_free_stats_cosqnames_mem(bp); + bnxt_ptp_clear(bp); + kfree(bp->ptp_cfg); + bp->ptp_cfg = NULL; + kfree(bp->fw_health); + bp->fw_health = NULL; + bnxt_free_tfc_mpc_info(bp); + bnxt_free_mpc_info(bp); + bnxt_deinit_lag(bp); + bnxt_free_ktls_info(bp); + bnxt_cleanup_pci(bp); + bnxt_free_ctx_mem(bp); + bnxt_free_crash_dump_mem(bp); + bnxt_free_udcc_info(bp); + kfree(bp->rss_indir_tbl); + bp->rss_indir_tbl = NULL; + +init_err_free: + free_netdev(dev); + return rc; +} + +static void bnxt_shutdown(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct bnxt *bp; + + if (!dev) + return; + + bp = netdev_priv(dev); + + rtnl_lock(); + if (netif_running(dev)) + dev_close(dev); + rtnl_unlock(); + +#ifndef HAVE_AUXILIARY_DRIVER + bnxt_ulp_shutdown(bp); +#endif + bnxt_clear_int_mode(bp); + pci_disable_device(pdev); + + if (system_state == SYSTEM_POWER_OFF && + pdev->pm_cap) { + netif_info(bp, wol, bp->dev, "WOL D3 wake: %d\n", (u32)bp->wol); + pci_wake_from_d3(pdev, bp->wol); + pci_set_power_state(pdev, PCI_D3hot); + } +} + +#ifdef CONFIG_PM_SLEEP +static int bnxt_suspend(struct device *device) +{ + struct net_device *dev = dev_get_drvdata(device); + struct bnxt *bp = netdev_priv(dev); + int rc = 0; + + bnxt_ulp_stop(bp); + + rtnl_lock(); + if (netif_running(dev)) { + netif_device_detach(dev); + rc = bnxt_close(dev); + } + bnxt_hwrm_func_drv_unrgtr(bp); + pci_disable_device(bp->pdev); + bnxt_free_ctx_mem(bp); + bnxt_free_crash_dump_mem(bp); + rtnl_unlock(); + return rc; +} + +static int bnxt_resume(struct device *device) +{ + struct net_device *dev = dev_get_drvdata(device); + struct bnxt *bp = netdev_priv(dev); + int rc = 0; + + rtnl_lock(); + rc = pci_enable_device(bp->pdev); + if (rc) { + netdev_err(dev, "Cannot re-enable PCI device during resume, err = %d\n", + rc); + goto resume_exit; + } + pci_set_master(bp->pdev); + if (bnxt_hwrm_ver_get(bp, false)) { + rc = -ENODEV; + goto resume_exit; + } + rc = bnxt_hwrm_func_reset(bp); + if (rc) { + rc = -EBUSY; + goto resume_exit; + } + + rc = bnxt_hwrm_func_qcaps(bp, true); + if (rc) + goto resume_exit; + + bnxt_clear_reservations(bp, true); + + if (bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false)) { + rc = -ENODEV; + goto resume_exit; + } + + bnxt_get_wol_settings(bp); + if (netif_running(dev)) { + rc = bnxt_open(dev); + if (!rc) + netif_device_attach(dev); + } + +resume_exit: + rtnl_unlock(); + bnxt_ulp_start(bp, rc); + if (!rc) + bnxt_reenable_sriov(bp); + return rc; +} + +static SIMPLE_DEV_PM_OPS(bnxt_pm_ops, bnxt_suspend, bnxt_resume); +#define BNXT_PM_OPS (&bnxt_pm_ops) + +#else + +#define BNXT_PM_OPS NULL + +#endif /* CONFIG_PM_SLEEP */ + +/** + * bnxt_io_error_detected - called when PCI error is detected + * @pdev: Pointer to PCI device + * @state: The current pci connection state + * + * This function is called after a PCI bus error affecting + * this device has been detected. + */ +static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct bnxt *bp = netdev_priv(netdev); + struct bnxt_ptp_cfg *ptp_cfg = bp->ptp_cfg; + bool abort = false; + + netdev_info(netdev, "PCI I/O error detected state %d\n", state); + + rtnl_lock(); + netif_device_detach(netdev); + + if (ptp_cfg) + spin_lock_bh(&ptp_cfg->ptp_lock); + if (test_and_set_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) { + netdev_err(bp->dev, "Firmware reset already in progress\n"); + abort = true; + } + if (ptp_cfg) + spin_unlock_bh(&ptp_cfg->ptp_lock); + + if (abort || state == pci_channel_io_perm_failure) { + rtnl_unlock(); + return PCI_ERS_RESULT_DISCONNECT; + } + + /* Link is not reliable anymore if state is pci_channel_io_frozen + * so we disable bus master to prevent any potential bad DMAs before + * freeing kernel memory. + */ + if (state == pci_channel_io_frozen) { + set_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN, &bp->state); + /* For io frozen case, MSIX will be disabled from the same context. + * See bnxt_fw_fatal_close(). ulp irq stop is required to be called + * before bnxt_fw_fatal_close. + * For io normal case, MSIX will be disabled from bnxt_io_slot_reset. + */ + bnxt_ulp_irq_stop(bp); + bnxt_fw_fatal_close(bp); + } + + if (netif_running(netdev)) + __bnxt_close_nic(bp, true, true); + + rtnl_unlock(); + bnxt_ulp_stop(bp); + + if (pci_is_enabled(pdev)) + pci_disable_device(pdev); + bnxt_free_ctx_mem(bp); + + /* Request a slot slot reset. */ + return PCI_ERS_RESULT_NEED_RESET; +} + +/** + * bnxt_io_slot_reset - called after the pci bus has been reset. + * @pdev: Pointer to PCI device + * + * Restart the card from scratch, as if from a cold-boot. + * At this point, the card has exprienced a hard reset, + * followed by fixups by BIOS, and has its config space + * set up identically to what it was at cold boot. + */ +static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev) +{ + pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT; + struct net_device *netdev = pci_get_drvdata(pdev); + struct bnxt *bp = netdev_priv(netdev); + int retry = 0; + int err = 0; + int off; + + netdev_info(bp->dev, "PCI Slot Reset\n"); + + if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && + test_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN, &bp->state)) + msleep(900); + + rtnl_lock(); + + if (pci_enable_device(pdev)) { + dev_err(&pdev->dev, + "Cannot re-enable PCI device after reset.\n"); + } else { + pci_set_master(pdev); + /* Upon fatal error, our device internal logic that latches to + * BAR value is getting reset and will restore only upon + * rewritting the BARs. + * + * As pci_restore_state() does not re-write the BARs if the + * value is same as saved value earlier, driver needs to + * write the BARs to 0 to force restore, in case of fatal error. + */ + if (test_and_clear_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN, + &bp->state)) { + for (off = PCI_BASE_ADDRESS_0; + off <= PCI_BASE_ADDRESS_5; off += 4) + pci_write_config_dword(bp->pdev, off, 0); + } + pci_restore_state(pdev); + pci_save_state(pdev); + + bnxt_inv_fw_health_reg(bp); + bnxt_try_map_fw_health_reg(bp); + + /* In some PCIe AER scenarios, firmware may take up to + * 10 seconds to become ready in the worst case. + */ + do { + err = bnxt_try_recover_fw(bp); + if (!err) + break; + retry++; + } while (retry < BNXT_FW_SLOT_RESET_RETRY); + + if (err) { + dev_err(&pdev->dev, "Firmware not ready\n"); + goto reset_exit; + } + + err = bnxt_hwrm_func_reset(bp); + if (!err) + result = PCI_ERS_RESULT_RECOVERED; + + bnxt_ulp_irq_stop(bp); + bnxt_clear_int_mode(bp); + err = bnxt_init_int_mode(bp); + bnxt_ulp_irq_restart(bp, err); + } + +reset_exit: + clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); + bnxt_clear_reservations(bp, true); + rtnl_unlock(); + +#if !defined(RHEL_RELEASE_CODE) && LINUX_VERSION_CODE < KERNEL_VERSION(4, 20, 0) || \ + defined(RHEL_RELEASE_CODE) && RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(8, 1) + err = pci_cleanup_aer_uncorrect_error_status(pdev); + if (err) { + dev_err(&pdev->dev, + "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n", + err); /* non-fatal, continue */ + } +#endif + return result; +} + +/** + * bnxt_io_resume - called when traffic can start flowing again. + * @pdev: Pointer to PCI device + * + * This callback is called when the error recovery driver tells + * us that its OK to resume normal operation. + */ +static void bnxt_io_resume(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct bnxt *bp = netdev_priv(netdev); + int err; + + netdev_info(bp->dev, "PCI Slot Resume\n"); + rtnl_lock(); + + err = bnxt_hwrm_func_qcaps(bp, true); + if (!err && netif_running(netdev)) + err = bnxt_open(netdev); + + if (!err) + netif_device_attach(netdev); + + rtnl_unlock(); + bnxt_ulp_start(bp, err); + if (!err) + bnxt_reenable_sriov(bp); +} + +static const struct pci_error_handlers bnxt_err_handler = { + .error_detected = bnxt_io_error_detected, + .slot_reset = bnxt_io_slot_reset, + .resume = bnxt_io_resume +}; + +#if defined(CONFIG_BNXT_SRIOV) && \ + defined(SRIOV_CONF_DEF_IN_PCI_DRIVER_RH) +static struct pci_driver_rh bnxt_pci_driver_rh = { + .sriov_configure = bnxt_sriov_configure +}; +#endif + +static struct pci_driver bnxt_pci_driver = { + .name = DRV_MODULE_NAME, + .id_table = bnxt_pci_tbl, + .probe = bnxt_init_one, + .remove = bnxt_remove_one, + .shutdown = bnxt_shutdown, + .driver.pm = BNXT_PM_OPS, + .err_handler = &bnxt_err_handler, +#if defined(CONFIG_BNXT_SRIOV) && defined(PCIE_SRIOV_CONFIGURE) +#ifndef SRIOV_CONF_DEF_IN_PCI_DRIVER_RH + .sriov_configure = bnxt_sriov_configure, +#else + .rh_reserved = &bnxt_pci_driver_rh, +#endif +#endif +}; + +static int __init bnxt_init(void) +{ + int err; + +#ifndef PCIE_SRIOV_CONFIGURE + bnxt_sriov_init(num_vfs); +#endif + bnxt_lfc_init(); + + bnxt_debug_init(); + err = pci_register_driver(&bnxt_pci_driver); + if (err) + goto err; + + return 0; +err: + bnxt_debug_exit(); + bnxt_lfc_exit(); +#ifndef PCIE_SRIOV_CONFIGURE + bnxt_sriov_exit(); +#endif + return err; +} + +static void __exit bnxt_exit(void) +{ +#ifndef PCIE_SRIOV_CONFIGURE + bnxt_sriov_exit(); +#endif + bnxt_lfc_exit(); + pci_unregister_driver(&bnxt_pci_driver); + if (bnxt_pf_wq) + destroy_workqueue(bnxt_pf_wq); + bnxt_debug_exit(); +} + +module_init(bnxt_init); +module_exit(bnxt_exit); diff --git a/drivers/thirdparty/release-drivers/bnxt/bnxt.h b/drivers/thirdparty/release-drivers/bnxt/bnxt.h new file mode 100644 index 000000000000..68d20913aaf8 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/bnxt.h @@ -0,0 +1,3737 @@ +/* Broadcom NetXtreme-C/E network driver. + * + * Copyright (c) 2014-2016 Broadcom Corporation + * Copyright (c) 2016-2018 Broadcom Limited + * Copyright (c) 2018-2023 Broadcom Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ + +#ifndef BNXT_H +#define BNXT_H + +#include "bnxt_hsi.h" +#include "bnxt_extra_ver.h" + +#define DRV_MODULE_NAME "bnxt_en" +#define DRV_MODULE_VERSION "1.10.3" DRV_MODULE_EXTRA_VER + +#define DRV_VER_MAJ 1 +#define DRV_VER_MIN 10 +#define DRV_VER_UPD 3 + +#include +#include +#include +#ifdef HAVE_DEVLINK +#include +#endif +#ifdef HAVE_METADATA_HW_PORT_MUX +#include +#endif +#if defined(CONFIG_BNXT_FLOWER_OFFLOAD) || defined(CONFIG_BNXT_CUSTOM_FLOWER_OFFLOAD) +#include +#endif +#if defined(HAVE_SWITCHDEV) && !defined(HAVE_NDO_GET_PORT_PARENT_ID) +#include +#endif +#ifdef HAVE_XDP_RXQ_INFO +#include +#endif +#ifdef HAVE_DIM +#include +#else +#include "bnxt_dim.h" +#endif +#ifdef HAVE_LO_HI_WRITEQ +#include +#endif +#ifdef CONFIG_TEE_BNXT_FW +#include +#endif +#include "bnxt_dbr.h" +#include "bnxt_auxbus_compat.h" + +#if defined(CONFIG_BNXT_FLOWER_OFFLOAD) +#include "hcapi/bitalloc.h" +#endif + +#ifdef CONFIG_PAGE_POOL +struct page_pool; +#endif + +#define BNXT_XSK_TX 0x10 +#define BNXT_TX_BD_LONG_CNT 2 +struct tx_bd { + __le32 tx_bd_len_flags_type; + #define TX_BD_TYPE (0x3f << 0) + #define TX_BD_TYPE_SHORT_TX_BD (0x00 << 0) + #define TX_BD_TYPE_MPC_TX_BD (0x08 << 0) + #define TX_BD_TYPE_LONG_TX_BD (0x10 << 0) + #define TX_BD_TYPE_LONG_TX_BD_INLINE (0x11 << 0) + #define TX_BD_FLAGS_PACKET_END (1 << 6) + #define TX_BD_FLAGS_NO_CMPL (1 << 7) + #define TX_BD_FLAGS_BD_CNT (0x1f << 8) + #define TX_BD_FLAGS_BD_CNT_SHIFT 8 + #define TX_BD_FLAGS_LHINT (3 << 13) + #define TX_BD_FLAGS_LHINT_SHIFT 13 + #define TX_BD_FLAGS_LHINT_512_AND_SMALLER (0 << 13) + #define TX_BD_FLAGS_LHINT_512_TO_1023 (1 << 13) + #define TX_BD_FLAGS_LHINT_1024_TO_2047 (2 << 13) + #define TX_BD_FLAGS_LHINT_2048_AND_LARGER (3 << 13) + #define TX_BD_FLAGS_COAL_NOW (1 << 15) + #define TX_BD_LEN (0xffff << 16) + #define TX_BD_LEN_SHIFT 16 + + u32 tx_bd_opaque; + __le64 tx_bd_haddr; +} __packed; + +#define TX_OPAQUE_IDX_MASK 0x0000ffff +#define TX_OPAQUE_BDS_MASK 0x00ff0000 +#define TX_OPAQUE_BDS_SHIFT 16 +#define TX_OPAQUE_RING_MASK 0xff000000 +#define TX_OPAQUE_RING_SHIFT 24 + +#define SET_TX_OPAQUE(bp, txr, idx, bds) \ + (((txr)->tx_napi_idx << TX_OPAQUE_RING_SHIFT) | \ + ((bds) << TX_OPAQUE_BDS_SHIFT) | ((idx) & (bp)->tx_ring_mask)) + +#define TX_OPAQUE_IDX(opq) ((opq) & TX_OPAQUE_IDX_MASK) +#define TX_OPAQUE_RING(opq) (((opq) & TX_OPAQUE_RING_MASK) >> \ + TX_OPAQUE_RING_SHIFT) +#define TX_OPAQUE_BDS(opq) (((opq) & TX_OPAQUE_BDS_MASK) >> \ + TX_OPAQUE_BDS_SHIFT) +#define TX_OPAQUE_PROD(bp, opq) ((TX_OPAQUE_IDX(opq) + TX_OPAQUE_BDS(opq)) &\ + (bp)->tx_ring_mask) + +struct tx_bd_ext { + __le32 tx_bd_hsize_lflags; + #define TX_BD_FLAGS_TCP_UDP_CHKSUM (1 << 0) + #define TX_BD_FLAGS_IP_CKSUM (1 << 1) + #define TX_BD_FLAGS_NO_CRC (1 << 2) + #define TX_BD_FLAGS_STAMP (1 << 3) + #define TX_BD_FLAGS_T_IP_CHKSUM (1 << 4) + #define TX_BD_FLAGS_LSO (1 << 5) + #define TX_BD_FLAGS_IPID_FMT (1 << 6) + #define TX_BD_FLAGS_T_IPID (1 << 7) + #define TX_BD_FLAGS_CRYPTO_EN (1 << 15) + #define TX_BD_HSIZE (0xff << 16) + #define TX_BD_HSIZE_SHIFT 16 + #define TX_BD_KID_LO (0x7f << 25) + #define TX_BD_KID_LO_MASK 0x7f + #define TX_BD_KID_LO_SHIFT 25 + + __le32 tx_bd_kid_mss; + #define TX_BD_MSS 0x7fff + #define TX_BD_KID_HI (0x1ffff << 15) + #define TX_BD_KID_HI_MASK 0xffff80 + #define TX_BD_KID_HI_SHIFT 8 + __le32 tx_bd_cfa_action; + #define TX_BD_CFA_ACTION (0xffff << 16) + #define TX_BD_CFA_ACTION_SHIFT 16 + + __le32 tx_bd_cfa_meta; + #define TX_BD_CFA_META_MASK 0xfffffff + #define TX_BD_CFA_META_VID_MASK 0xfff + #define TX_BD_CFA_META_PRI_MASK (0xf << 12) + #define TX_BD_CFA_META_PRI_SHIFT 12 + #define TX_BD_CFA_META_TPID_MASK (3 << 16) + #define TX_BD_CFA_META_TPID_SHIFT 16 + #define TX_BD_CFA_META_KEY (0xf << 28) + #define TX_BD_CFA_META_KEY_SHIFT 28 + #define TX_BD_CFA_META_KEY_VLAN (1 << 28) +}; + +#define BNXT_TX_PTP_IS_SET(lflags) ((lflags) & cpu_to_le32(TX_BD_FLAGS_STAMP)) +#define BNXT_TX_KID_LO(kid) (((kid) & TX_BD_KID_LO_MASK) << TX_BD_KID_LO_SHIFT) +#define BNXT_TX_KID_HI(kid) (((kid) & TX_BD_KID_HI_MASK) << TX_BD_KID_HI_SHIFT) + +struct tx_bd_presync { + __le32 tx_bd_len_flags_type; + #define TX_BD_TYPE_PRESYNC_TX_BD (0x09 << 0) + u32 tx_bd_opaque; + __le32 tx_bd_kid; + u32 tx_bd_unused; +}; + +/* SO_TXTIME BD */ +struct tx_bd_sotxtime { + __le32 tx_bd_len_flags_type; + #define TX_BD_TYPE_TIMEDTX_BD (0xaUL << 0) + #define TX_BD_FLAGS_KIND_SO_TXTIME (0x1UL << 6) + __le32 rate; + __le64 tx_time; +}; + +struct rx_bd { + __le32 rx_bd_len_flags_type; + #define RX_BD_TYPE (0x3f << 0) + #define RX_BD_TYPE_RX_PACKET_BD 0x4 + #define RX_BD_TYPE_RX_BUFFER_BD 0x5 + #define RX_BD_TYPE_RX_AGG_BD 0x6 + #define RX_BD_TYPE_16B_BD_SIZE (0 << 4) + #define RX_BD_TYPE_32B_BD_SIZE (1 << 4) + #define RX_BD_TYPE_48B_BD_SIZE (2 << 4) + #define RX_BD_TYPE_64B_BD_SIZE (3 << 4) + #define RX_BD_FLAGS_SOP (1 << 6) + #define RX_BD_FLAGS_EOP (1 << 7) + #define RX_BD_FLAGS_BUFFERS (3 << 8) + #define RX_BD_FLAGS_1_BUFFER_PACKET (0 << 8) + #define RX_BD_FLAGS_2_BUFFER_PACKET (1 << 8) + #define RX_BD_FLAGS_3_BUFFER_PACKET (2 << 8) + #define RX_BD_FLAGS_4_BUFFER_PACKET (3 << 8) + #define RX_BD_LEN (0xffff << 16) + #define RX_BD_LEN_SHIFT 16 + + u32 rx_bd_opaque; + __le64 rx_bd_haddr; +}; + +struct tx_cmp { + __le32 tx_cmp_flags_type; + #define CMP_TYPE (0x3f << 0) + #define CMP_TYPE_TX_L2_CMP 0 + #define CMP_TYPE_TX_NO_OP_CMP 1 + #define CMP_TYPE_TX_L2_COAL_CMP 2 + #define CMP_TYPE_TX_L2_PKT_TS_CMP 4 + #define CMP_TYPE_RX_L2_CMP 17 + #define CMP_TYPE_RX_AGG_CMP 18 + #define CMP_TYPE_RX_L2_TPA_START_CMP 19 + #define CMP_TYPE_RX_L2_TPA_END_CMP 21 + #define CMP_TYPE_RX_TPA_AGG_CMP 22 + #define CMP_TYPE_RX_L2_V3_CMP 23 + #define CMP_TYPE_RX_L2_TPA_START_V3_CMP 25 + #define CMP_TYPE_MPC_CMP_SHORT 30 + #define CMP_TYPE_MPC_CMP_LONG 31 + #define CMP_TYPE_STATUS_CMP 32 + #define CMP_TYPE_REMOTE_DRIVER_REQ 34 + #define CMP_TYPE_REMOTE_DRIVER_RESP 36 + #define CMP_TYPE_ERROR_STATUS 48 + #define CMPL_BASE_TYPE_STAT_EJECT 0x1aUL + #define CMPL_BASE_TYPE_HWRM_DONE 0x20UL + #define CMPL_BASE_TYPE_HWRM_FWD_REQ 0x22UL + #define CMPL_BASE_TYPE_HWRM_FWD_RESP 0x24UL + #define CMPL_BASE_TYPE_HWRM_ASYNC_EVENT 0x2eUL + + #define TX_CMP_FLAGS_ERROR (1 << 6) + #define TX_CMP_FLAGS_PUSH (1 << 7) + + u32 tx_cmp_opaque; + __le32 tx_cmp_errors_v; + #define TX_CMP_V (1 << 0) + #define TX_CMP_ERRORS_BUFFER_ERROR (7 << 1) + #define TX_CMP_ERRORS_BUFFER_ERROR_NO_ERROR 0 + #define TX_CMP_ERRORS_BUFFER_ERROR_BAD_FORMAT 2 + #define TX_CMP_ERRORS_BUFFER_ERROR_INVALID_STAG 4 + #define TX_CMP_ERRORS_BUFFER_ERROR_STAG_BOUNDS 5 + #define TX_CMP_ERRORS_ZERO_LENGTH_PKT (1 << 4) + #define TX_CMP_ERRORS_EXCESSIVE_BD_LEN (1 << 5) + #define TX_CMP_ERRORS_DMA_ERROR (1 << 6) + #define TX_CMP_ERRORS_HINT_TOO_SHORT (1 << 7) + #define TX_CMP_ERRORS_TTX_OVERTIME (1 << 10) + + __le32 sq_cons_idx; + #define TX_CMP_SQ_CONS_IDX_MASK 0x00ffffff +}; + +#define TX_CMP_SQ_CONS_IDX(txcmp) \ + (le32_to_cpu(txcmp->sq_cons_idx) & TX_CMP_SQ_CONS_IDX_MASK) + +struct tx_ts_cmp { + __le32 tx_ts_cmp_flags_type; + #define TX_TS_CMP_FLAGS_ERROR (1 << 6) + #define TX_TS_CMP_FLAGS_TS_TYPE (1 << 7) + #define TX_TS_CMP_FLAGS_TS_TYPE_PM (0 << 7) + #define TX_TS_CMP_FLAGS_TS_TYPE_PA (1 << 7) + #define TX_TS_CMP_FLAGS_TS_FALLBACK (1 << 8) + #define TX_TS_CMP_TS_SUB_NS (0xf << 12) + #define TX_TS_CMP_TS_NS_MID (0xffff << 16) + #define TX_TS_CMP_TS_NS_MID_SFT 16 + u32 tx_ts_cmp_opaque; + __le32 tx_ts_cmp_errors_v; + #define TX_TS_CMP_V (1 << 0) + #define TX_TS_CMP_TS_INVALID_ERR (1 << 10) + __le32 tx_ts_cmp_ts_ns_lo; +}; + +#define BNXT_GET_TX_TS_48B_NS(tscmp) \ + (le32_to_cpu((tscmp)->tx_ts_cmp_ts_ns_lo) | \ + ((u64)le32_to_cpu((tscmp)->tx_ts_cmp_flags_type & \ + TX_TS_CMP_TS_NS_MID) << TX_TS_CMP_TS_NS_MID_SFT)) + +#define BNXT_TX_TS_ERR(tscmp) \ + ((tscmp->tx_ts_cmp_flags_type & cpu_to_le32(TX_TS_CMP_FLAGS_ERROR)) &&\ + (tscmp->tx_ts_cmp_errors_v & cpu_to_le32(TX_TS_CMP_TS_INVALID_ERR))) + +struct rx_cmp { + __le32 rx_cmp_len_flags_type; + #define RX_CMP_CMP_TYPE (0x3f << 0) + #define RX_CMP_FLAGS_ERROR (1 << 6) + #define RX_CMP_FLAGS_PLACEMENT (7 << 7) + #define RX_CMP_FLAGS_RSS_VALID (1 << 10) + #define RX_CMP_FLAGS_PKT_METADATA_PRESENT (1 << 11) + #define RX_CMP_FLAGS_ITYPES_SHIFT 12 + #define RX_CMP_FLAGS_ITYPES_MASK 0xf000 + #define RX_CMP_FLAGS_ITYPE_UNKNOWN (0 << 12) + #define RX_CMP_FLAGS_ITYPE_IP (1 << 12) + #define RX_CMP_FLAGS_ITYPE_TCP (2 << 12) + #define RX_CMP_FLAGS_ITYPE_UDP (3 << 12) + #define RX_CMP_FLAGS_ITYPE_FCOE (4 << 12) + #define RX_CMP_FLAGS_ITYPE_ROCE (5 << 12) + #define RX_CMP_FLAGS_ITYPE_PTP_WO_TS (8 << 12) + #define RX_CMP_FLAGS_ITYPE_PTP_W_TS (9 << 12) + #define RX_CMP_LEN (0xffff << 16) + #define RX_CMP_LEN_SHIFT 16 + + u32 rx_cmp_opaque; + __le32 rx_cmp_misc_v1; + #define RX_CMP_V1 (1 << 0) + #define RX_CMP_AGG_BUFS (0x1f << 1) + #define RX_CMP_AGG_BUFS_SHIFT 1 + #define RX_CMP_RSS_HASH_TYPE (0x7f << 9) + #define RX_CMP_RSS_HASH_TYPE_SHIFT 9 + #define RX_CMP_V3_RSS_EXT_OP_LEGACY (0xf << 12) + #define RX_CMP_V3_RSS_EXT_OP_LEGACY_SHIFT 12 + #define RX_CMP_V3_RSS_EXT_OP_NEW (0xf << 8) + #define RX_CMP_V3_RSS_EXT_OP_NEW_SHIFT 8 + #define RX_CMP_PAYLOAD_OFFSET (0xff << 16) + #define RX_CMP_PAYLOAD_OFFSET_SHIFT 16 + #define RX_CMP_SUB_NS_TS (0xf << 16) + #define RX_CMP_SUB_NS_TS_SHIFT 16 + #define RX_CMP_METADATA1 (0xf << 28) + #define RX_CMP_METADATA1_SHIFT 28 + #define RX_CMP_METADATA1_TPID_SEL (0x7 << 28) + #define RX_CMP_METADATA1_TPID_8021Q (0x1 << 28) + #define RX_CMP_METADATA1_TPID_8021AD (0x0 << 28) + #define RX_CMP_METADATA1_VALID (0x8 << 28) + + __le32 rx_cmp_rss_hash; +}; + +#define RX_CMP_HASH_VALID(rxcmp) \ + ((rxcmp)->rx_cmp_len_flags_type & cpu_to_le32(RX_CMP_FLAGS_RSS_VALID)) + +#define RSS_PROFILE_ID_MASK 0x1f + +#define RX_CMP_HASH_TYPE(rxcmp) \ + (((le32_to_cpu((rxcmp)->rx_cmp_misc_v1) & RX_CMP_RSS_HASH_TYPE) >>\ + RX_CMP_RSS_HASH_TYPE_SHIFT) & RSS_PROFILE_ID_MASK) + +#define RX_CMP_V3_HASH_TYPE_LEGACY(rxcmp) \ + ((le32_to_cpu((rxcmp)->rx_cmp_misc_v1) & RX_CMP_V3_RSS_EXT_OP_LEGACY) >>\ + RX_CMP_V3_RSS_EXT_OP_LEGACY_SHIFT) + +#define RX_CMP_V3_HASH_TYPE_NEW(rxcmp) \ + ((le32_to_cpu((rxcmp)->rx_cmp_misc_v1) & RX_CMP_V3_RSS_EXT_OP_NEW) >>\ + RX_CMP_V3_RSS_EXT_OP_NEW_SHIFT) + +#define RX_CMP_V3_HASH_TYPE(bp, rxcmp) \ + (((bp)->rss_cap & BNXT_RSS_CAP_RSS_TCAM) ? \ + RX_CMP_V3_HASH_TYPE_NEW(rxcmp) : \ + RX_CMP_V3_HASH_TYPE_LEGACY(rxcmp)) + +#define EXT_OP_INNER_4 0x0 +#define EXT_OP_OUTER_4 0x2 +#define EXT_OP_INNFL_3 0x8 +#define EXT_OP_OUTFL_3 0xa + +#define RX_CMP_VLAN_VALID(rxcmp) \ + ((rxcmp)->rx_cmp_misc_v1 & cpu_to_le32(RX_CMP_METADATA1_VALID)) + +#define RX_CMP_VLAN_TPID_SEL(rxcmp) \ + (le32_to_cpu((rxcmp)->rx_cmp_misc_v1) & RX_CMP_METADATA1_TPID_SEL) + +#define RX_CMP_PAYLOAD_OFF(misc) \ + (((misc) & RX_CMP_PAYLOAD_OFFSET) >> RX_CMP_PAYLOAD_OFFSET_SHIFT) + +#define BNXT_RX_META_CFA_CODE_SHIFT 19 +#define BNXT_CFA_CODE_META_SHIFT 16 +#define BNXT_RX_META_CFA_CODE_INT_ACT_REC_BIT 0x8000000 +#define BNXT_RX_META_CFA_CODE_EEM_BIT 0x4000000 +#define BNXT_CFA_META_FMT_MASK 0x70 +#define BNXT_CFA_META_FMT_SHFT 4 +#define BNXT_CFA_META_FMT_EM_EEM_SHFT 1 +#define BNXT_CFA_META_FMT_EEM 3 +#define BNXT_CFA_META_EEM_TCAM_SHIFT 31 +#define BNXT_CFA_META_EM_TEST(x) ((x) >> BNXT_CFA_META_EEM_TCAM_SHIFT) + +struct rx_cmp_ext { + __le32 rx_cmp_flags2; + #define RX_CMP_FLAGS2_IP_CS_CALC 0x1 + #define RX_CMP_FLAGS2_L4_CS_CALC (0x1 << 1) + #define RX_CMP_FLAGS2_T_IP_CS_CALC (0x1 << 2) + #define RX_CMP_FLAGS2_T_L4_CS_CALC (0x1 << 3) + #define RX_CMP_FLAGS2_META_FORMAT_VLAN (0x1 << 4) + #define RX_CMP_FLAGS2_IP_TYPE (0x1 << 8) + __le32 rx_cmp_meta_data; + #define RX_CMP_FLAGS2_METADATA_TCI_MASK 0xffff + #define RX_CMP_FLAGS2_METADATA_VID_MASK 0xfff + #define RX_CMP_FLAGS2_METADATA_TPID_MASK 0xffff0000 + #define RX_CMP_FLAGS2_METADATA_TPID_SFT 16 + #define RX_CMP_META_INNER_L3_OFF_MASK (0x1ff << 18) + #define RX_CMP_META_INNER_L3_OFF_SFT 18 + #define RX_CMPL_CFA_V3_CODE_MASK (0xffff) + #define RX_CMPL_CFA_V3_CODE_SFT 0 + __le32 rx_cmp_cfa_code_errors_v2; + #define RX_CMP_V (1 << 0) + #define RX_CMPL_ERRORS_MASK (0x7fff << 1) + #define RX_CMPL_ERRORS_SFT 1 + #define RX_CMPL_ERRORS_BUFFER_ERROR_MASK (0x7 << 1) + #define RX_CMPL_ERRORS_BUFFER_ERROR_NO_BUFFER (0x0 << 1) + #define RX_CMPL_ERRORS_BUFFER_ERROR_DID_NOT_FIT (0x1 << 1) + #define RX_CMPL_ERRORS_BUFFER_ERROR_NOT_ON_CHIP (0x2 << 1) + #define RX_CMPL_ERRORS_BUFFER_ERROR_BAD_FORMAT (0x3 << 1) + #define RX_CMPL_ERRORS_IP_CS_ERROR (0x1 << 4) + #define RX_CMPL_ERRORS_L4_CS_ERROR (0x1 << 5) + #define RX_CMPL_ERRORS_T_IP_CS_ERROR (0x1 << 6) + #define RX_CMPL_ERRORS_T_L4_CS_ERROR (0x1 << 7) + #define RX_CMPL_ERRORS_CRC_ERROR (0x1 << 8) + #define RX_CMPL_ERRORS_T_PKT_ERROR_MASK (0x7 << 9) + #define RX_CMPL_ERRORS_T_PKT_ERROR_NO_ERROR (0x0 << 9) + #define RX_CMPL_ERRORS_T_PKT_ERROR_T_L3_BAD_VERSION (0x1 << 9) + #define RX_CMPL_ERRORS_T_PKT_ERROR_T_L3_BAD_HDR_LEN (0x2 << 9) + #define RX_CMPL_ERRORS_T_PKT_ERROR_TUNNEL_TOTAL_ERROR (0x3 << 9) + #define RX_CMPL_ERRORS_T_PKT_ERROR_T_IP_TOTAL_ERROR (0x4 << 9) + #define RX_CMPL_ERRORS_T_PKT_ERROR_T_UDP_TOTAL_ERROR (0x5 << 9) + #define RX_CMPL_ERRORS_T_PKT_ERROR_T_L3_BAD_TTL (0x6 << 9) + #define RX_CMPL_ERRORS_PKT_ERROR_MASK (0xf << 12) + #define RX_CMPL_ERRORS_PKT_ERROR_NO_ERROR (0x0 << 12) + #define RX_CMPL_ERRORS_PKT_ERROR_L3_BAD_VERSION (0x1 << 12) + #define RX_CMPL_ERRORS_PKT_ERROR_L3_BAD_HDR_LEN (0x2 << 12) + #define RX_CMPL_ERRORS_PKT_ERROR_L3_BAD_TTL (0x3 << 12) + #define RX_CMPL_ERRORS_PKT_ERROR_IP_TOTAL_ERROR (0x4 << 12) + #define RX_CMPL_ERRORS_PKT_ERROR_UDP_TOTAL_ERROR (0x5 << 12) + #define RX_CMPL_ERRORS_PKT_ERROR_L4_BAD_HDR_LEN (0x6 << 12) + #define RX_CMPL_ERRORS_PKT_ERROR_L4_BAD_HDR_LEN_TOO_SMALL (0x7 << 12) + #define RX_CMPL_ERRORS_PKT_ERROR_L4_BAD_OPT_LEN (0x8 << 12) + + #define RX_CMPL_CFA_CODE_MASK (0xffff << 16) + #define RX_CMPL_CFA_CODE_SFT 16 + #define RX_CMPL_METADATA0_TCI_MASK (0xffff << 16) + #define RX_CMPL_METADATA0_VID_MASK (0x0fff << 16) + #define RX_CMPL_METADATA0_SFT 16 + + __le32 rx_cmp_timestamp; +}; + +#define RX_CMP_L2_ERRORS \ + cpu_to_le32(RX_CMPL_ERRORS_BUFFER_ERROR_MASK | RX_CMPL_ERRORS_CRC_ERROR) + +#define RX_CMP_L4_CS_BITS \ + (cpu_to_le32(RX_CMP_FLAGS2_L4_CS_CALC | RX_CMP_FLAGS2_T_L4_CS_CALC)) + +#define RX_CMP_L4_CS_ERR_BITS \ + (cpu_to_le32(RX_CMPL_ERRORS_L4_CS_ERROR | RX_CMPL_ERRORS_T_L4_CS_ERROR)) + +#define RX_CMP_L4_CS_OK(rxcmp1) \ + (((rxcmp1)->rx_cmp_flags2 & RX_CMP_L4_CS_BITS) && \ + !((rxcmp1)->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS)) + +#define RX_CMP_ENCAP(rxcmp1) \ + ((le32_to_cpu((rxcmp1)->rx_cmp_flags2) & \ + RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3) + +#define RX_CMP_CFA_CODE(rxcmpl1) \ + ((le32_to_cpu((rxcmpl1)->rx_cmp_cfa_code_errors_v2) & \ + RX_CMPL_CFA_CODE_MASK) >> RX_CMPL_CFA_CODE_SFT) + +#define RX_CMP_CFA_V3_CODE(rxcmpl1) \ + (le32_to_cpu((rxcmpl1)->rx_cmp_meta_data) & \ + RX_CMPL_CFA_V3_CODE_MASK) + +#define RX_CMP_METADATA0_TCI(rxcmp1) \ + ((le32_to_cpu((rxcmp1)->rx_cmp_cfa_code_errors_v2) & \ + RX_CMPL_METADATA0_TCI_MASK) >> RX_CMPL_METADATA0_SFT) + +#define RX_CMP_IS_IPV6(rxcmp1) \ + (!!((rxcmp1)->rx_cmp_flags2 & cpu_to_le32(RX_CMP_FLAGS2_IP_TYPE))) + +#define RX_CMP_INNER_L3_OFF(rxcmp1) \ + ((le32_to_cpu((rxcmp1)->rx_cmp_meta_data) & \ + RX_CMP_META_INNER_L3_OFF_MASK) >> RX_CMP_META_INNER_L3_OFF_SFT) + +struct rx_agg_cmp { + __le32 rx_agg_cmp_len_flags_type; + #define RX_AGG_CMP_TYPE (0x3f << 0) + #define RX_AGG_CMP_LEN (0xffff << 16) + #define RX_AGG_CMP_LEN_SHIFT 16 + u32 rx_agg_cmp_opaque; + __le32 rx_agg_cmp_v; + #define RX_AGG_CMP_V (1 << 0) + #define RX_AGG_CMP_AGG_ID (0x0fff << 16) + #define RX_AGG_CMP_AGG_ID_SHIFT 16 + __le32 rx_agg_cmp_unused; +}; + +#define TPA_AGG_AGG_ID(rx_agg) \ + ((le32_to_cpu((rx_agg)->rx_agg_cmp_v) & \ + RX_AGG_CMP_AGG_ID) >> RX_AGG_CMP_AGG_ID_SHIFT) + +struct rx_tpa_start_cmp { + __le32 rx_tpa_start_cmp_len_flags_type; + #define RX_TPA_START_CMP_TYPE (0x3f << 0) + #define RX_TPA_START_CMP_FLAGS (0x3ff << 6) + #define RX_TPA_START_CMP_FLAGS_SHIFT 6 + #define RX_TPA_START_CMP_FLAGS_ERROR (0x1 << 6) + #define RX_TPA_START_CMP_FLAGS_PLACEMENT (0x7 << 7) + #define RX_TPA_START_CMP_FLAGS_PLACEMENT_SHIFT 7 + #define RX_TPA_START_CMP_FLAGS_PLACEMENT_JUMBO (0x1 << 7) + #define RX_TPA_START_CMP_FLAGS_PLACEMENT_HDS (0x2 << 7) + #define RX_TPA_START_CMP_FLAGS_PLACEMENT_GRO_JUMBO (0x5 << 7) + #define RX_TPA_START_CMP_FLAGS_PLACEMENT_GRO_HDS (0x6 << 7) + #define RX_TPA_START_CMP_FLAGS_RSS_VALID (0x1 << 10) + #define RX_TPA_START_CMP_FLAGS_TIMESTAMP (0x1 << 11) + #define RX_TPA_START_CMP_FLAGS_ITYPES (0xf << 12) + #define RX_TPA_START_CMP_FLAGS_ITYPES_SHIFT 12 + #define RX_TPA_START_CMP_FLAGS_ITYPE_TCP (0x2 << 12) + #define RX_TPA_START_CMP_LEN (0xffff << 16) + #define RX_TPA_START_CMP_LEN_SHIFT 16 + + u32 rx_tpa_start_cmp_opaque; + __le32 rx_tpa_start_cmp_misc_v1; + #define RX_TPA_START_CMP_V1 (0x1 << 0) + #define RX_TPA_START_CMP_RSS_HASH_TYPE (0x7f << 9) + #define RX_TPA_START_CMP_RSS_HASH_TYPE_SHIFT 9 + #define RX_TPA_START_CMP_V3_RSS_HASH_TYPE (0x1ff << 7) + #define RX_TPA_START_CMP_V3_RSS_HASH_TYPE_SHIFT 7 + #define RX_TPA_START_CMP_AGG_ID (0x7f << 25) + #define RX_TPA_START_CMP_AGG_ID_SHIFT 25 + #define RX_TPA_START_CMP_AGG_ID_P5 (0x0fff << 16) + #define RX_TPA_START_CMP_AGG_ID_SHIFT_P5 16 + #define RX_TPA_START_CMP_METADATA1 (0xf << 28) + #define RX_TPA_START_CMP_METADATA1_SHIFT 28 + #define RX_TPA_START_METADATA1_TPID_SEL (0x7 << 28) + #define RX_TPA_START_METADATA1_TPID_8021Q (0x1 << 28) + #define RX_TPA_START_METADATA1_TPID_8021AD (0x0 << 28) + #define RX_TPA_START_METADATA1_VALID (0x8 << 28) + + __le32 rx_tpa_start_cmp_rss_hash; +}; + +#define TPA_START_HASH_VALID(rx_tpa_start) \ + ((rx_tpa_start)->rx_tpa_start_cmp_len_flags_type & \ + cpu_to_le32(RX_TPA_START_CMP_FLAGS_RSS_VALID)) + +#define TPA_START_HASH_TYPE(rx_tpa_start) \ + (((le32_to_cpu((rx_tpa_start)->rx_tpa_start_cmp_misc_v1) & \ + RX_TPA_START_CMP_RSS_HASH_TYPE) >> \ + RX_TPA_START_CMP_RSS_HASH_TYPE_SHIFT) & RSS_PROFILE_ID_MASK) + +#define TPA_START_V3_HASH_TYPE(rx_tpa_start) \ + (((le32_to_cpu((rx_tpa_start)->rx_tpa_start_cmp_misc_v1) & \ + RX_TPA_START_CMP_V3_RSS_HASH_TYPE) >> \ + RX_TPA_START_CMP_V3_RSS_HASH_TYPE_SHIFT) & RSS_PROFILE_ID_MASK) + +#define TPA_START_AGG_ID(rx_tpa_start) \ + ((le32_to_cpu((rx_tpa_start)->rx_tpa_start_cmp_misc_v1) & \ + RX_TPA_START_CMP_AGG_ID) >> RX_TPA_START_CMP_AGG_ID_SHIFT) + +#define TPA_START_AGG_ID_P5(rx_tpa_start) \ + ((le32_to_cpu((rx_tpa_start)->rx_tpa_start_cmp_misc_v1) & \ + RX_TPA_START_CMP_AGG_ID_P5) >> RX_TPA_START_CMP_AGG_ID_SHIFT_P5) + +#define TPA_START_ERROR(rx_tpa_start) \ + ((rx_tpa_start)->rx_tpa_start_cmp_len_flags_type & \ + cpu_to_le32(RX_TPA_START_CMP_FLAGS_ERROR)) + +#define TPA_START_VLAN_VALID(rx_tpa_start) \ + ((rx_tpa_start)->rx_tpa_start_cmp_misc_v1 & \ + cpu_to_le32(RX_TPA_START_METADATA1_VALID)) + +#define TPA_START_VLAN_TPID_SEL(rx_tpa_start) \ + (le32_to_cpu((rx_tpa_start)->rx_tpa_start_cmp_misc_v1) & \ + RX_TPA_START_METADATA1_TPID_SEL) + +struct rx_tpa_start_cmp_ext { + __le32 rx_tpa_start_cmp_flags2; + #define RX_TPA_START_CMP_FLAGS2_IP_CS_CALC (0x1 << 0) + #define RX_TPA_START_CMP_FLAGS2_L4_CS_CALC (0x1 << 1) + #define RX_TPA_START_CMP_FLAGS2_T_IP_CS_CALC (0x1 << 2) + #define RX_TPA_START_CMP_FLAGS2_T_L4_CS_CALC (0x1 << 3) + #define RX_TPA_START_CMP_FLAGS2_AGG_GRO (0x1 << 2) + #define RX_TPA_START_CMP_FLAGS2_IP_TYPE (0x1 << 8) + #define RX_TPA_START_CMP_FLAGS2_CSUM_CMPL_VALID (0x1 << 9) + #define RX_TPA_START_CMP_FLAGS2_EXT_META_FORMAT (0x3 << 10) + #define RX_TPA_START_CMP_FLAGS2_EXT_META_FORMAT_SHIFT 10 + #define RX_TPA_START_CMP_V3_FLAGS2_T_IP_TYPE (0x1 << 10) + #define RX_TPA_START_CMP_V3_FLAGS2_AGG_GRO (0x1 << 11) + #define RX_TPA_START_CMP_FLAGS2_CSUM_CMPL (0xffff << 16) + #define RX_TPA_START_CMP_FLAGS2_CSUM_CMPL_SHIFT 16 + + __le32 rx_tpa_start_cmp_metadata; + __le32 rx_tpa_start_cmp_cfa_code_v2; + #define RX_TPA_START_CMP_V2 (0x1 << 0) + #define RX_TPA_START_CMP_ERRORS_BUFFER_ERROR_MASK (0x7 << 1) + #define RX_TPA_START_CMP_ERRORS_BUFFER_ERROR_SHIFT 1 + #define RX_TPA_START_CMP_ERRORS_BUFFER_ERROR_NO_BUFFER (0x0 << 1) + #define RX_TPA_START_CMP_ERRORS_BUFFER_ERROR_BAD_FORMAT (0x3 << 1) + #define RX_TPA_START_CMP_ERRORS_BUFFER_ERROR_FLUSH (0x5 << 1) + #define RX_TPA_START_CMP_CFA_CODE (0xffff << 16) + #define RX_TPA_START_CMPL_CFA_CODE_SHIFT 16 + #define RX_TPA_START_CMP_METADATA0_TCI_MASK (0xffff << 16) + #define RX_TPA_START_CMP_METADATA0_VID_MASK (0x0fff << 16) + #define RX_TPA_START_CMP_METADATA0_SFT 16 + __le32 rx_tpa_start_cmp_hdr_info; +}; + +#define TPA_START_CFA_CODE(rx_tpa_start) \ + ((le32_to_cpu((rx_tpa_start)->rx_tpa_start_cmp_cfa_code_v2) & \ + RX_TPA_START_CMP_CFA_CODE) >> RX_TPA_START_CMPL_CFA_CODE_SHIFT) + +#define TPA_START_IS_IPV6(rx_tpa_start) \ + (!!((rx_tpa_start)->rx_tpa_start_cmp_flags2 & \ + cpu_to_le32(RX_TPA_START_CMP_FLAGS2_IP_TYPE))) + +#define TPA_START_ERROR_CODE(rx_tpa_start) \ + ((le32_to_cpu((rx_tpa_start)->rx_tpa_start_cmp_cfa_code_v2) & \ + RX_TPA_START_CMP_ERRORS_BUFFER_ERROR_MASK) >> \ + RX_TPA_START_CMP_ERRORS_BUFFER_ERROR_SHIFT) + +#define TPA_START_METADATA0_TCI(rx_tpa_start) \ + ((le32_to_cpu((rx_tpa_start)->rx_tpa_start_cmp_cfa_code_v2) & \ + RX_TPA_START_CMP_METADATA0_TCI_MASK) >> \ + RX_TPA_START_CMP_METADATA0_SFT) + +struct rx_tpa_end_cmp { + __le32 rx_tpa_end_cmp_len_flags_type; + #define RX_TPA_END_CMP_TYPE (0x3f << 0) + #define RX_TPA_END_CMP_FLAGS (0x3ff << 6) + #define RX_TPA_END_CMP_FLAGS_SHIFT 6 + #define RX_TPA_END_CMP_FLAGS_PLACEMENT (0x7 << 7) + #define RX_TPA_END_CMP_FLAGS_PLACEMENT_SHIFT 7 + #define RX_TPA_END_CMP_FLAGS_PLACEMENT_JUMBO (0x1 << 7) + #define RX_TPA_END_CMP_FLAGS_PLACEMENT_HDS (0x2 << 7) + #define RX_TPA_END_CMP_FLAGS_PLACEMENT_GRO_JUMBO (0x5 << 7) + #define RX_TPA_END_CMP_FLAGS_PLACEMENT_GRO_HDS (0x6 << 7) + #define RX_TPA_END_CMP_FLAGS_RSS_VALID (0x1 << 10) + #define RX_TPA_END_CMP_FLAGS_ITYPES (0xf << 12) + #define RX_TPA_END_CMP_FLAGS_ITYPES_SHIFT 12 + #define RX_TPA_END_CMP_FLAGS_ITYPE_TCP (0x2 << 12) + #define RX_TPA_END_CMP_LEN (0xffff << 16) + #define RX_TPA_END_CMP_LEN_SHIFT 16 + + u32 rx_tpa_end_cmp_opaque; + __le32 rx_tpa_end_cmp_misc_v1; + #define RX_TPA_END_CMP_V1 (0x1 << 0) + #define RX_TPA_END_CMP_AGG_BUFS (0x3f << 1) + #define RX_TPA_END_CMP_AGG_BUFS_SHIFT 1 + #define RX_TPA_END_CMP_TPA_SEGS (0xff << 8) + #define RX_TPA_END_CMP_TPA_SEGS_SHIFT 8 + #define RX_TPA_END_CMP_PAYLOAD_OFFSET (0xff << 16) + #define RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT 16 + #define RX_TPA_END_CMP_AGG_ID (0x7f << 25) + #define RX_TPA_END_CMP_AGG_ID_SHIFT 25 + #define RX_TPA_END_CMP_AGG_ID_P5 (0x0fff << 16) + #define RX_TPA_END_CMP_AGG_ID_SHIFT_P5 16 + + __le32 rx_tpa_end_cmp_tsdelta; + #define RX_TPA_END_GRO_TS (0x1 << 31) +}; + +#define TPA_END_AGG_ID(rx_tpa_end) \ + ((le32_to_cpu((rx_tpa_end)->rx_tpa_end_cmp_misc_v1) & \ + RX_TPA_END_CMP_AGG_ID) >> RX_TPA_END_CMP_AGG_ID_SHIFT) + +#define TPA_END_AGG_ID_P5(rx_tpa_end) \ + ((le32_to_cpu((rx_tpa_end)->rx_tpa_end_cmp_misc_v1) & \ + RX_TPA_END_CMP_AGG_ID_P5) >> RX_TPA_END_CMP_AGG_ID_SHIFT_P5) + +#define TPA_END_PAYLOAD_OFF(rx_tpa_end) \ + ((le32_to_cpu((rx_tpa_end)->rx_tpa_end_cmp_misc_v1) & \ + RX_TPA_END_CMP_PAYLOAD_OFFSET) >> RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT) + +#define TPA_END_AGG_BUFS(rx_tpa_end) \ + ((le32_to_cpu((rx_tpa_end)->rx_tpa_end_cmp_misc_v1) & \ + RX_TPA_END_CMP_AGG_BUFS) >> RX_TPA_END_CMP_AGG_BUFS_SHIFT) + +#define TPA_END_TPA_SEGS(rx_tpa_end) \ + ((le32_to_cpu((rx_tpa_end)->rx_tpa_end_cmp_misc_v1) & \ + RX_TPA_END_CMP_TPA_SEGS) >> RX_TPA_END_CMP_TPA_SEGS_SHIFT) + +#define RX_TPA_END_CMP_FLAGS_PLACEMENT_ANY_GRO \ + cpu_to_le32(RX_TPA_END_CMP_FLAGS_PLACEMENT_GRO_JUMBO & \ + RX_TPA_END_CMP_FLAGS_PLACEMENT_GRO_HDS) + +#define TPA_END_GRO(rx_tpa_end) \ + ((rx_tpa_end)->rx_tpa_end_cmp_len_flags_type & \ + RX_TPA_END_CMP_FLAGS_PLACEMENT_ANY_GRO) + +#define TPA_END_GRO_TS(rx_tpa_end) \ + (!!((rx_tpa_end)->rx_tpa_end_cmp_tsdelta & \ + cpu_to_le32(RX_TPA_END_GRO_TS))) + +struct rx_tpa_end_cmp_ext { + __le32 rx_tpa_end_cmp_dup_acks; + #define RX_TPA_END_CMP_TPA_DUP_ACKS (0xf << 0) + #define RX_TPA_END_CMP_PAYLOAD_OFFSET_P5 (0xff << 16) + #define RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT_P5 16 + #define RX_TPA_END_CMP_AGG_BUFS_P5 (0xff << 24) + #define RX_TPA_END_CMP_AGG_BUFS_SHIFT_P5 24 + + __le32 rx_tpa_end_cmp_seg_len; + #define RX_TPA_END_CMP_TPA_SEG_LEN (0xffff << 0) + + __le32 rx_tpa_end_cmp_errors_v2; + #define RX_TPA_END_CMP_V2 (0x1 << 0) + #define RX_TPA_END_CMP_ERRORS (0x3 << 1) + #define RX_TPA_END_CMP_ERRORS_P5 (0x7 << 1) + #define RX_TPA_END_CMPL_ERRORS_SHIFT 1 + #define RX_TPA_END_CMP_ERRORS_BUFFER_ERROR_NO_BUFFER (0x0 << 1) + #define RX_TPA_END_CMP_ERRORS_BUFFER_ERROR_NOT_ON_CHIP (0x2 << 1) + #define RX_TPA_END_CMP_ERRORS_BUFFER_ERROR_BAD_FORMAT (0x3 << 1) + #define RX_TPA_END_CMP_ERRORS_BUFFER_ERROR_RSV_ERROR (0x4 << 1) + #define RX_TPA_END_CMP_ERRORS_BUFFER_ERROR_FLUSH (0x5 << 1) + + u32 rx_tpa_end_cmp_start_opaque; +}; + +#define TPA_END_ERRORS(rx_tpa_end_ext) \ + ((rx_tpa_end_ext)->rx_tpa_end_cmp_errors_v2 & \ + cpu_to_le32(RX_TPA_END_CMP_ERRORS)) + +#define TPA_END_PAYLOAD_OFF_P5(rx_tpa_end_ext) \ + ((le32_to_cpu((rx_tpa_end_ext)->rx_tpa_end_cmp_dup_acks) & \ + RX_TPA_END_CMP_PAYLOAD_OFFSET_P5) >> \ + RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT_P5) + +#define TPA_END_AGG_BUFS_P5(rx_tpa_end_ext) \ + ((le32_to_cpu((rx_tpa_end_ext)->rx_tpa_end_cmp_dup_acks) & \ + RX_TPA_END_CMP_AGG_BUFS_P5) >> RX_TPA_END_CMP_AGG_BUFS_SHIFT_P5) + +#define EVENT_DATA1_RESET_NOTIFY_FATAL(data1) \ + (((data1) & \ + ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_MASK) ==\ + ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_EXCEPTION_FATAL) + +#define EVENT_DATA1_RESET_NOTIFY_FW_ACTIVATION(data1) \ + (((data1) & \ + ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_MASK) ==\ + ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_ACTIVATION) + +#define EVENT_DATA2_RESET_NOTIFY_FW_STATUS_CODE(data2) \ + ((data2) & \ + ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA2_FW_STATUS_CODE_MASK) + +#define EVENT_DATA1_RECOVERY_MASTER_FUNC(data1) \ + !!((data1) & \ + ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_DATA1_FLAGS_MASTER_FUNC) + +#define EVENT_DATA1_RECOVERY_ENABLED(data1) \ + !!((data1) & \ + ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_DATA1_FLAGS_RECOVERY_ENABLED) + +#define EVENT_DATA2_VF_CFG_CHNG_VF_ID(data2) \ + ((data2) & \ + ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA2_VF_ID_MASK) + +#define EVENT_DATA1_VNIC_CHNG_PF_ID(data1) \ + (((data1) & \ + ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_PF_ID_MASK) >>\ + ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_PF_ID_SFT) + +#define EVENT_DATA1_VNIC_CHNG_VF_ID(data1) \ + (((data1) & \ + ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_VF_ID_MASK) >>\ + ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_VF_ID_SFT) + +#define EVENT_DATA1_VNIC_CHNG_VNIC_STATE(data1) \ + ((data1) & \ + ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_DEF_VNIC_STATE_MASK) + +#define BNXT_EVENT_ERROR_REPORT_TYPE(data1) \ + (((data1) & \ + ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_MASK) >>\ + ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_SFT) + +#define BNXT_EVENT_INVALID_SIGNAL_DATA(data2) \ + (((data2) & \ + ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_DATA2_PIN_ID_MASK) >>\ + ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_DATA2_PIN_ID_SFT) + +#define EVENT_DATA2_NVM_ERR_ADDR(data2) \ + (((data2) & \ + ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA2_ERR_ADDR_MASK) >> \ + ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA2_ERR_ADDR_SFT) + +#define EVENT_DATA1_NVM_ERR_TYPE_WRITE(data1) \ + (((data1) & \ + ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_NVM_ERR_TYPE_MASK) == \ + ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_NVM_ERR_TYPE_WRITE) + +#define EVENT_DATA1_NVM_ERR_TYPE_ERASE(data1) \ + (((data1) & \ + ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_NVM_ERR_TYPE_MASK) == \ + ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_NVM_ERR_TYPE_ERASE) + +#define EVENT_DATA1_VNIC_CHNG_VNIC_STATE_ALLOC 1 +#define EVENT_DATA1_VNIC_CHNG_VNIC_STATE_FREE 2 + +struct nqe_cn { + __le16 type; + #define NQ_CN_TYPE_MASK 0x3fUL + #define NQ_CN_TYPE_SFT 0 + #define NQ_CN_TYPE_CQ_NOTIFICATION 0x30UL + #define NQ_CN_TYPE_LAST NQ_CN_TYPE_CQ_NOTIFICATION + #define NQ_CN_TOGGLE_MASK 0xc0UL + #define NQ_CN_TOGGLE_SFT 6 + __le16 reserved16; + __le32 cq_handle_low; + __le32 v; + #define NQ_CN_V 0x1UL + __le32 cq_handle_high; +}; + +#define BNXT_NQ_HDL_IDX_MASK 0x00ffffff +#define BNXT_NQ_HDL_TYPE_MASK 0xff000000 +#define BNXT_NQ_HDL_TYPE_SHIFT 24 +#define BNXT_NQ_HDL_TYPE_RX 0x00 +#define BNXT_NQ_HDL_TYPE_TX 0x01 +#define BNXT_NQ_HDL_TYPE_MP 0x02 + +#define BNXT_NQ_HDL_IDX(hdl) ((hdl) & BNXT_NQ_HDL_IDX_MASK) +#define BNXT_NQ_HDL_TYPE(hdl) (((hdl) & BNXT_NQ_HDL_TYPE_MASK) >> \ + BNXT_NQ_HDL_TYPE_SHIFT) + +#define BNXT_SET_NQ_HDL(cpr) \ + (((cpr)->cp_ring_type << BNXT_NQ_HDL_TYPE_SHIFT) | (cpr)->cp_idx) + +#define NQE_CN_TYPE(type) ((type) & NQ_CN_TYPE_MASK) +#define NQE_CN_TOGGLE(type) (((type) & NQ_CN_TOGGLE_MASK) >> \ + NQ_CN_TOGGLE_SFT) + +#define DB_IDX_MASK 0xffffff +#define DB_IDX_VALID (0x1 << 26) +#define DB_IRQ_DIS (0x1 << 27) +#define DB_KEY_TX (0x0 << 28) +#define DB_KEY_RX (0x1 << 28) +#define DB_KEY_CP (0x2 << 28) +#define DB_KEY_ST (0x3 << 28) +#define DB_KEY_TX_PUSH (0x4 << 28) +#define DB_LONG_TX_PUSH (0x2 << 24) + +/* 64-bit doorbell */ +#define DBR_INDEX_MASK 0x0000000000ffffffULL +#define DBR_PI_LO_MASK 0xff000000UL +#define DBR_PI_LO_SFT 24 +#define DBR_EPOCH_MASK 0x01000000UL +#define DBR_EPOCH_SFT 24 +#define DBR_TOGGLE_MASK 0x06000000UL +#define DBR_TOGGLE_SFT 25 +#define DBR_XID_MASK 0x000fffff00000000ULL +#define DBR_XID_SFT 32 +#define DBR_PI_HI_MASK 0xf0000000000000ULL +#define DBR_PI_HI_SFT 52 +#define DBR_PATH_L2 (0x1ULL << 56) +#define DBR_VALID (0x1ULL << 58) +#define DBR_TYPE_SQ (0x0ULL << 60) +#define DBR_TYPE_RQ (0x1ULL << 60) +#define DBR_TYPE_SRQ (0x2ULL << 60) +#define DBR_TYPE_SRQ_ARM (0x3ULL << 60) +#define DBR_TYPE_CQ (0x4ULL << 60) +#define DBR_TYPE_CQ_ARMSE (0x5ULL << 60) +#define DBR_TYPE_CQ_ARMALL (0x6ULL << 60) +#define DBR_TYPE_CQ_ARMENA (0x7ULL << 60) +#define DBR_TYPE_SRQ_ARMENA (0x8ULL << 60) +#define DBR_TYPE_CQ_CUTOFF_ACK (0x9ULL << 60) +#define DBR_TYPE_NQ (0xaULL << 60) +#define DBR_TYPE_NQ_ARM (0xbULL << 60) +#define DBR_TYPE_PUSH_START (0xcULL << 60) +#define DBR_TYPE_PUSH_END (0xdULL << 60) +#define DBR_TYPE_NQ_MASK (0xeULL << 60) +#define DBR_TYPE_NULL (0xfULL << 60) + +/* Bit needed by DB copy */ +#define DBC_DEBUG_TRACE_SHIFT 59 +#define DBC_DEBUG_TRACE_MASK (0x1ULL << DBC_DEBUG_TRACE_SHIFT) + +#define DB_PF_OFFSET_P5 0x10000 +#define DB_VF_OFFSET_P5 0x4000 + +#define DB_WCB_FIRST_OFFSET 16 +#define DB_WCB_PER_PAGE 15 +#define DB_WCB_PAGE_SIZE 4096 +#define DB_WCB_BUFFER_SIZE 256 + +#define DB_PPP_SIZE 256 +#define DB_PPP_BD_OFFSET 16 + +#define INVALID_HW_RING_ID ((u16)-1) +#define INVALID_PORT_ID ((u16)-1) +/* The hardware supports certain page sizes. Use the supported page sizes + * to allocate the rings. + */ +#if (PAGE_SHIFT < 12) +#define BNXT_PAGE_SHIFT 12 +#elif (PAGE_SHIFT <= 13) +#define BNXT_PAGE_SHIFT PAGE_SHIFT +#elif (PAGE_SHIFT < 16) +#define BNXT_PAGE_SHIFT 13 +#else +#define BNXT_PAGE_SHIFT 16 +#endif + +#define BNXT_PAGE_SIZE (1 << BNXT_PAGE_SHIFT) + +/* The RXBD length is 16-bit so we can only support page sizes < 64K */ +#if (PAGE_SHIFT > 15) +#define BNXT_RX_PAGE_SHIFT 15 +#else +#define BNXT_RX_PAGE_SHIFT PAGE_SHIFT +#endif + +#define BNXT_RX_PAGE_SIZE (1 << BNXT_RX_PAGE_SHIFT) + +#if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) && !defined(HAVE_PAGE_POOL_PAGE_FRAG) +#undef CONFIG_PAGE_POOL +#endif + +#define BNXT_RX_METADATA_SIZE(bp) \ + ((bp)->ktls_info ? sizeof(struct tls_metadata_resync_msg) + 32 :\ + 0) + +#define BNXT_MAX_MTU 9500 + +/* + * First RX buffer page in XDP multi-buf mode + * + * +-------------------------------------------------------------------------+ + * | XDP_PACKET_HEADROOM | bp->rx_buf_use_size | skb_shared_info| + * | (bp->rx_dma_offset) | | | + * +-------------------------------------------------------------------------+ + */ +#define BNXT_MAX_PAGE_MODE_MTU_SBUF \ + ((unsigned int)PAGE_SIZE - VLAN_ETH_HLEN - NET_IP_ALIGN - \ + XDP_PACKET_HEADROOM) +#define BNXT_MAX_PAGE_MODE_MTU(bp) \ + (BNXT_MAX_PAGE_MODE_MTU_SBUF - \ + SKB_DATA_ALIGN((unsigned int)sizeof(struct skb_shared_info)) - \ + (unsigned int)BNXT_RX_METADATA_SIZE(bp)) + +#define BNXT_MIN_PKT_SIZE 52 + +#define BNXT_DEFAULT_RX_RING_SIZE 511 +#define BNXT_DEFAULT_TX_RING_SIZE 511 + +#define BNXT_TSO_MAX_SEGS_P5 4096 + +#define MAX_TPA 64 +#define MAX_TPA_P5 256 +#define MAX_TPA_P5_MASK (MAX_TPA_P5 - 1) +#define MAX_TPA_SEGS_P5 0x3f + +#if (BNXT_PAGE_SHIFT == 16) +#define MAX_RX_PAGES_AGG_ENA 1 +#define MAX_RX_PAGES 4 +#define MAX_RX_AGG_PAGES 4 +#define MAX_TX_PAGES 1 +#else +#define MAX_RX_PAGES_AGG_ENA 8 +#define MAX_RX_PAGES 32 +#define MAX_RX_AGG_PAGES 32 +#define MAX_TX_PAGES 8 +#endif + +#define RX_DESC_CNT (BNXT_PAGE_SIZE / sizeof(struct rx_bd)) +#define TX_DESC_CNT (BNXT_PAGE_SIZE / sizeof(struct tx_bd)) +#define CP_DESC_CNT (BNXT_PAGE_SIZE / sizeof(struct tx_cmp)) + +#define SW_RXBD_RING_SIZE (sizeof(struct bnxt_sw_rx_bd) * RX_DESC_CNT) +#define HW_RXBD_RING_SIZE (sizeof(struct rx_bd) * RX_DESC_CNT) + +#define SW_RXBD_AGG_RING_SIZE (sizeof(struct bnxt_sw_rx_agg_bd) * RX_DESC_CNT) + +#define SW_TXBD_RING_SIZE (sizeof(struct bnxt_sw_tx_bd) * TX_DESC_CNT) +#define HW_TXBD_RING_SIZE (sizeof(struct tx_bd) * TX_DESC_CNT) + +#define HW_CMPD_RING_SIZE (sizeof(struct tx_cmp) * CP_DESC_CNT) + +#define BNXT_MAX_RX_DESC_CNT (RX_DESC_CNT * MAX_RX_PAGES - 1) +#define BNXT_MAX_RX_DESC_CNT_JUM_ENA (RX_DESC_CNT * MAX_RX_PAGES_AGG_ENA - 1) +#define BNXT_MAX_RX_JUM_DESC_CNT (RX_DESC_CNT * MAX_RX_AGG_PAGES - 1) +#define BNXT_MAX_TX_DESC_CNT (TX_DESC_CNT * MAX_TX_PAGES - 1) + +/* Minimum TX BDs for a TX packet with MAX_SKB_FRAGS + 1. We need one extra + * BD because the first TX BD is always a long BD. + */ +#define BNXT_MIN_TX_DESC_CNT (MAX_SKB_FRAGS + 2) + +#define RX_RING(bp, x) (((x) & (bp)->rx_ring_mask) >> (BNXT_PAGE_SHIFT - 4)) +#define RX_AGG_RING(bp, x) (((x) & (bp)->rx_agg_ring_mask) >> \ + (BNXT_PAGE_SHIFT - 4)) +#define RX_IDX(x) ((x) & (RX_DESC_CNT - 1)) + +#define TX_RING(bp, x) (((x) & (bp)->tx_ring_mask) >> (BNXT_PAGE_SHIFT - 4)) +#define TX_IDX(x) ((x) & (TX_DESC_CNT - 1)) + +#define CP_RING(x) (((x) & ~(CP_DESC_CNT - 1)) >> (BNXT_PAGE_SHIFT - 4)) +#define CP_IDX(x) ((x) & (CP_DESC_CNT - 1)) + +#define TX_CMP_VALID(txcmp, raw_cons) \ + (!!((txcmp)->tx_cmp_errors_v & cpu_to_le32(TX_CMP_V)) == \ + !((raw_cons) & bp->cp_bit)) + +#define RX_CMP_VALID(rxcmp1, raw_cons) \ + (!!((rxcmp1)->rx_cmp_cfa_code_errors_v2 & cpu_to_le32(RX_CMP_V)) ==\ + !((raw_cons) & bp->cp_bit)) + +#define RX_AGG_CMP_VALID(agg, raw_cons) \ + (!!((agg)->rx_agg_cmp_v & cpu_to_le32(RX_AGG_CMP_V)) == \ + !((raw_cons) & bp->cp_bit)) + +#define NQ_CMP_VALID(nqcmp, raw_cons) \ + (!!((nqcmp)->v & cpu_to_le32(NQ_CN_V)) == !((raw_cons) & bp->cp_bit)) + +#define TX_CMP_TYPE(txcmp) \ + (le32_to_cpu((txcmp)->tx_cmp_flags_type) & CMP_TYPE) + +#define RX_CMP_TYPE(rxcmp) \ + (le32_to_cpu((rxcmp)->rx_cmp_len_flags_type) & RX_CMP_CMP_TYPE) + +#define TX_CMP_PUSH(txcmp) \ + (!!((txcmp)->tx_cmp_flags_type & cpu_to_le32(TX_CMP_FLAGS_PUSH))) + +#define TX_CMP_TXTM_ERR(txcmp) \ + (!!((txcmp)->tx_cmp_errors_v & cpu_to_le32(TX_CMP_ERRORS_TTX_OVERTIME))) + +#define RING_RX(bp, idx) ((idx) & (bp)->rx_ring_mask) +#define NEXT_RX(idx) ((idx) + 1) + +#define RING_RX_AGG(bp, idx) ((idx) & (bp)->rx_agg_ring_mask) +#define NEXT_RX_AGG(idx) ((idx) + 1) + +#define RING_TX(bp, idx) ((idx) & (bp)->tx_ring_mask) +#define NEXT_TX(idx) ((idx) + 1) + +#define TX_PUSH_LEN(len) \ + ((len) + sizeof(struct tx_bd) + sizeof(struct tx_bd_ext)) + +#define TX_INLINE_BDS(len) (DIV_ROUND_UP(len, sizeof(struct tx_bd))) + +#define ADV_RAW_CMP(idx, n) ((idx) + (n)) +#define NEXT_RAW_CMP(idx) ADV_RAW_CMP(idx, 1) +#define RING_CMP(idx) ((idx) & bp->cp_ring_mask) +#define NEXT_CMP(idx) RING_CMP(ADV_RAW_CMP(idx, 1)) + +#define DFLT_HWRM_CMD_TIMEOUT 500 + +#define BNXT_RX_EVENT 1 +#define BNXT_AGG_EVENT 2 +#define BNXT_TX_EVENT 4 +#define BNXT_REDIRECT_EVENT 8 +#define BNXT_TX_CMP_EVENT 0x10 + +struct bnxt_sw_tx_bd { + union { + struct sk_buff *skb; + struct xdp_frame *xdpf; + }; + DEFINE_DMA_UNMAP_ADDR(mapping); + DEFINE_DMA_UNMAP_LEN(len); + struct page *page; + u8 is_ts_pkt; + u8 is_push; + u8 inline_data_bds; + u8 action; + unsigned short nr_frags; + union { + u16 rx_prod; + u16 txts_prod; + }; +}; + +struct bnxt_sw_rx_bd { + void *data; + u8 *data_ptr; + dma_addr_t mapping; +}; + +struct bnxt_sw_rx_agg_bd { + struct page *page; + unsigned int offset; + dma_addr_t mapping; +}; + +struct bnxt_ring_mem_info { + int nr_pages; + int page_size; + u16 flags; +#define BNXT_RMEM_VALID_PTE_FLAG 1 +#define BNXT_RMEM_RING_PTE_FLAG 2 +#define BNXT_RMEM_USE_FULL_PAGE_FLAG 4 + + u16 depth; + struct bnxt_ctx_mem_type *ctx_mem; + + void **pg_arr; + dma_addr_t *dma_arr; + + __le64 *pg_tbl; + dma_addr_t pg_tbl_map; + + int vmem_size; + void **vmem; +}; + +struct bnxt_ring_struct { + struct bnxt_ring_mem_info ring_mem; + + u16 fw_ring_id; /* Ring id filled by Chimp FW */ + union { + u16 grp_idx; + u16 map_idx; /* Used by cmpl rings */ + }; + u32 handle; + u8 queue_id; +#define BNXT_MPC_QUEUE_ID 0xff + u8 mpc_chnl_type; + u8 push_idx; + u32 seed; /* seed for DBR pacing */ +}; + +struct tx_push_bd { + __le32 doorbell; + __le32 tx_bd_len_flags_type; + u32 tx_bd_opaque; + struct tx_bd_ext txbd2; +}; + +struct tx_push_buffer { + struct tx_push_bd push_bd; + u32 data[25]; +}; + +struct bnxt_db_info { + void __iomem *doorbell; + union { + u64 db_key64; + u32 db_key32; + }; + u32 db_ring_mask; + u32 db_epoch_mask; + u8 db_epoch_shift; + u8 db_cp_debug_trace; + __le64 *db_cp; /* HW DB recovery */ +}; + +#define DB_EPOCH(db, idx) (((idx) & (db)->db_epoch_mask) << \ + ((db)->db_epoch_shift)) + +#define DB_TOGGLE(tgl) ((tgl) << DBR_TOGGLE_SFT) + +#define DB_RING_IDX(db, idx) (((idx) & (db)->db_ring_mask) | \ + DB_EPOCH(db, idx)) + +#define DB_PUSH_LEN(len) (DB_PUSH_INFO_PUSH_SIZE_MASK & \ + (((sizeof(struct db_push_info) + \ + sizeof(struct dbc_dbc)) / 8 + \ + (len)) << DB_PUSH_INFO_PUSH_SIZE_SFT)) + +#define DB_PUSH_INFO(db, len, idx) (DB_PUSH_LEN(len) | \ + ((idx) & (db)->db_ring_mask)) + +struct bnxt_tx_ring_info { + struct bnxt_napi *bnapi; + struct bnxt_cp_ring_info *tx_cpr; + u16 tx_prod; + u16 tx_cons; + u16 tx_hw_cons; + u16 txq_index; + u8 tx_napi_idx; + u8 kick_pending; + u8 bd_base_cnt; + u8 etf_enabled; + u16 xdp_tx_pending; + struct bnxt_db_info tx_db; + + struct tx_bd *tx_desc_ring[MAX_TX_PAGES]; + union { + struct bnxt_sw_tx_bd *tx_buf_ring; + struct bnxt_sw_mpc_tx_bd *tx_mpc_buf_ring; + }; + + dma_addr_t tx_desc_mapping[MAX_TX_PAGES]; + + struct bnxt_db_info tx_push_db; + void __iomem *tx_push_wcb; + struct tx_push_buffer *tx_push; + dma_addr_t tx_push_mapping; + __le64 data_mapping; + +#define BNXT_DEV_STATE_CLOSING 0x1 + u32 dev_state; + + struct bnxt_ring_struct tx_ring_struct; + + /* Synchronize simultaneous xdp_xmit on same ring or for MPC ring */ + spinlock_t tx_lock; + struct xsk_buff_pool *xsk_pool; +}; + +#define BNXT_LEGACY_COAL_CMPL_PARAMS \ + (RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_INT_LAT_TMR_MIN | \ + RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_INT_LAT_TMR_MAX | \ + RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET | \ + RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_RING_IDLE | \ + RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_DMA_AGGR | \ + RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_DMA_AGGR_DURING_INT | \ + RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_CMPL_AGGR_DMA_TMR | \ + RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_CMPL_AGGR_DMA_TMR_DURING_INT | \ + RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_AGGR_INT) + +#define BNXT_COAL_CMPL_ENABLES \ + (RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_NUM_CMPL_DMA_AGGR | \ + RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_CMPL_AGGR_DMA_TMR | \ + RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_INT_LAT_TMR_MAX | \ + RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_NUM_CMPL_AGGR_INT) + +#define BNXT_COAL_CMPL_MIN_TMR_ENABLE \ + RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_INT_LAT_TMR_MIN + +#define BNXT_COAL_CMPL_AGGR_TMR_DURING_INT_ENABLE \ + RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_NUM_CMPL_DMA_AGGR_DURING_INT + +struct bnxt_coal_cap { + u32 cmpl_params; + u32 nq_params; + u16 num_cmpl_dma_aggr_max; + u16 num_cmpl_dma_aggr_during_int_max; + u16 cmpl_aggr_dma_tmr_max; + u16 cmpl_aggr_dma_tmr_during_int_max; + u16 int_lat_tmr_min_max; + u16 int_lat_tmr_max_max; + u16 num_cmpl_aggr_int_max; + u16 timer_units; +}; + +struct bnxt_coal { + u16 coal_ticks; + u16 coal_ticks_irq; + u16 coal_bufs; + u16 coal_bufs_irq; + /* RING_IDLE enabled when coal ticks < idle_thresh */ + u16 idle_thresh; + u8 bufs_per_record; + u8 budget; + u16 flags; +}; + +struct bnxt_tpa_info { + void *data; + u8 *data_ptr; + dma_addr_t mapping; + u16 len; + unsigned short gso_type; + u32 flags2; + u32 metadata; + enum pkt_hash_types hash_type; + u32 rss_hash; + u32 hdr_info; + +#define BNXT_TPA_L4_SIZE(hdr_info) \ + (((hdr_info) & 0xf8000000) ? ((hdr_info) >> 27) : 32) + +#define BNXT_TPA_INNER_L3_OFF(hdr_info) \ + (((hdr_info) >> 18) & 0x1ff) + +#define BNXT_TPA_INNER_L2_OFF(hdr_info) \ + (((hdr_info) >> 9) & 0x1ff) + +#define BNXT_TPA_OUTER_L3_OFF(hdr_info) \ + ((hdr_info) & 0x1ff) + + u16 cfa_code; /* cfa_code in TPA start compl */ + u8 payload_off; + u8 agg_count; + u8 vlan_valid:1; + u8 cfa_code_valid:1; + struct rx_agg_cmp *agg_arr; +}; + +#define BNXT_AGG_IDX_BMAP_SIZE (MAX_TPA_P5 / BITS_PER_LONG) + +struct bnxt_tpa_idx_map { + u16 agg_id_tbl[1024]; + unsigned long agg_idx_bmap[BNXT_AGG_IDX_BMAP_SIZE]; +}; + +struct bnxt_rx_ring_info { + struct bnxt_napi *bnapi; + struct bnxt_cp_ring_info *rx_cpr; + u16 rx_prod; + u16 rx_agg_prod; + u16 rx_sw_agg_prod; + u16 rx_next_cons; +#ifdef CONFIG_NETMAP + u32 netmap_idx; +#endif + struct bnxt_db_info rx_db; + struct bnxt_db_info rx_agg_db; + + struct bpf_prog *xdp_prog; + + struct rx_bd *rx_desc_ring[MAX_RX_PAGES]; + struct bnxt_sw_rx_bd *rx_buf_ring; + + struct rx_bd *rx_agg_desc_ring[MAX_RX_AGG_PAGES]; + struct bnxt_sw_rx_agg_bd *rx_agg_ring; + + unsigned long *rx_agg_bmap; + u16 rx_agg_bmap_size; + + struct page *rx_page; + unsigned int rx_page_offset; + + dma_addr_t rx_desc_mapping[MAX_RX_PAGES]; + dma_addr_t rx_agg_desc_mapping[MAX_RX_AGG_PAGES]; + + struct bnxt_tpa_info *rx_tpa; + struct bnxt_tpa_idx_map *rx_tpa_idx_map; + + struct bnxt_ring_struct rx_ring_struct; + struct bnxt_ring_struct rx_agg_ring_struct; +#ifdef HAVE_XDP_RXQ_INFO + struct xdp_rxq_info xdp_rxq; +#endif +#ifdef CONFIG_PAGE_POOL + struct page_pool *page_pool; +#endif + struct xsk_buff_pool *xsk_pool; + u32 flags; +#define BNXT_RING_FLAG_AF_XDP_ZC 0x00000001 +#define BNXT_RING_RX_ZC_MODE(rxr) ((rxr)->flags & BNXT_RING_FLAG_AF_XDP_ZC) +}; + +struct bnxt_rx_sw_stats { + u64 rx_hds; + u64 rx_tpa_hds; + u64 rx_l4_csum_errors; + u64 rx_resets; + u64 rx_buf_errors; + u64 rx_oom_discards; + u64 rx_netpoll_discards; +}; + +struct bnxt_tx_sw_push_stats { + u64 tx_push_xmit; + u64 tx_push_cmpl; +}; + +struct bnxt_txtime_sw_stats { + u64 txtime_xmit; + u64 txtime_cmpl_err; +}; + +struct bnxt_cmn_sw_stats { + u64 missed_irqs; +}; + +struct bnxt_xsk_stats { + u64 xsk_rx_success; + u64 xsk_rx_redirect_fail; + u64 xsk_rx_alloc_fail; + u64 xsk_rx_no_room; + u64 xsk_tx_ring_full; + u64 xsk_wakeup; + u64 xsk_tx_completed; + u64 xsk_tx_sent_pkts; +}; + +struct bnxt_sw_stats { + struct bnxt_rx_sw_stats rx; + struct bnxt_tx_sw_push_stats tx; + struct bnxt_txtime_sw_stats txtime; + struct bnxt_cmn_sw_stats cmn; + struct bnxt_xsk_stats xsk_stats; +}; + +struct bnxt_total_ring_err_stats { + u64 rx_total_l4_csum_errors; + u64 rx_total_resets; + u64 rx_total_buf_errors; + u64 rx_total_oom_discards; + u64 rx_total_netpoll_discards; + u64 rx_total_ring_discards; + u64 tx_total_ring_discards; + u64 total_missed_irqs; +}; + +struct bnxt_stats_mem { + u64 *sw_stats; + u64 *hw_masks; + void *hw_stats; + dma_addr_t hw_stats_map; + int len; +}; + +struct bnxt_cp_ring_info { + struct bnxt_napi *bnapi; + u32 cp_raw_cons; + struct bnxt_db_info cp_db; + + u8 had_work_done:1; + u8 has_more_work:1; + u8 had_nqe_notify:1; + u8 toggle; + u8 cp_ring_type; + u8 cp_idx; + + u32 last_cp_raw_cons; + + struct bnxt_coal rx_ring_coal; + u64 rx_packets; + u64 rx_bytes; + u64 event_ctr; + + struct dim dim; + + union { + struct tx_cmp **cp_desc_ring; + struct nqe_cn **nq_desc_ring; + }; + + dma_addr_t *cp_desc_mapping; + + struct bnxt_stats_mem stats; + u32 hw_stats_ctx_id; + + struct bnxt_sw_stats *sw_stats; + + struct bnxt_ring_struct cp_ring_struct; + + int cp_ring_count; + struct bnxt_cp_ring_info *cp_ring_arr; +#ifdef CONFIG_NETMAP + u8 netmapped; +#endif +}; + +#define BNXT_MAX_QUEUE 8 +#define BNXT_MAX_TXR_PER_NAPI BNXT_MAX_QUEUE +#define BNXT_MAX_XSK_RINGS 2048 + +#define bnxt_for_each_napi_tx(iter, bnapi, txr) \ + for (iter = 0, txr = (bnapi)->tx_ring[0]; txr; \ + txr = (iter < BNXT_MAX_TXR_PER_NAPI - 1) ? \ + (bnapi)->tx_ring[++iter] : NULL) + +struct bnxt_napi { + struct napi_struct napi; + struct bnxt *bp; + + int index; + struct bnxt_cp_ring_info cp_ring; + struct bnxt_rx_ring_info *rx_ring; + struct bnxt_tx_ring_info *tx_ring[BNXT_MAX_TXR_PER_NAPI]; + struct bnxt_tx_ring_info **tx_mpc_ring; + + void (*tx_int)(struct bnxt *bp, + struct bnxt_napi *bnapi, + int budget); + u8 events; + u8 tx_fault:1; + + u32 flags; +#define BNXT_NAPI_FLAG_XDP 0x1 + +#ifdef BNXT_PRIV_RX_BUSY_POLL + atomic_t poll_state; +#endif + bool in_reset; +}; + +#ifdef BNXT_PRIV_RX_BUSY_POLL +enum bnxt_poll_state_t { + BNXT_STATE_IDLE = 0, + BNXT_STATE_NAPI, + BNXT_STATE_POLL, + BNXT_STATE_DISABLE, +}; +#endif + +struct bnxt_irq { + irq_handler_t handler; + unsigned int vector; + u8 requested:1; + u8 have_cpumask:1; + char name[IFNAMSIZ + 17]; + cpumask_var_t cpu_mask; +}; + +#define HWRM_RING_ALLOC_TX 0x1 +#define HWRM_RING_ALLOC_RX 0x2 +#define HWRM_RING_ALLOC_AGG 0x4 +#define HWRM_RING_ALLOC_CMPL 0x8 +#define HWRM_RING_ALLOC_NQ 0x10 + +#define INVALID_STATS_CTX_ID -1 + +struct bnxt_ring_grp_info { + u16 fw_stats_ctx; + u16 fw_grp_id; + u16 rx_fw_ring_id; + u16 agg_fw_ring_id; + u16 cp_fw_ring_id; +}; + +#define BNXT_VNIC_DEFAULT 0 +#define BNXT_VNIC_NTUPLE 1 + +struct bnxt_vnic_info { + u16 fw_vnic_id; /* returned by Chimp during alloc */ +#define BNXT_MAX_CTX_PER_VNIC 8 + u16 fw_rss_cos_lb_ctx[BNXT_MAX_CTX_PER_VNIC]; + u16 fw_l2_ctx_id; + u16 mru; +#define BNXT_MAX_UC_ADDRS 4 + struct bnxt_l2_filter *l2_filters[BNXT_MAX_UC_ADDRS]; + /* index 0 always dev_addr */ + u16 uc_filter_count; + u8 *uc_list; + + u16 *fw_grp_ids; + dma_addr_t rss_table_dma_addr; + __le16 *rss_table; + dma_addr_t rss_hash_key_dma_addr; + u64 *rss_hash_key; + int rss_table_size; +#define BNXT_RSS_TABLE_ENTRIES_P5 64 +#define BNXT_RSS_TABLE_SIZE_P5 (BNXT_RSS_TABLE_ENTRIES_P5 * 4) +#define BNXT_RSS_TABLE_MAX_TBL_P5 8 +#define BNXT_MAX_RSS_TABLE_SIZE_P5 \ + (BNXT_RSS_TABLE_SIZE_P5 * BNXT_RSS_TABLE_MAX_TBL_P5) + +#define BNXT_MAX_RSS_TABLE_ENTRIES_P5 \ + (BNXT_RSS_TABLE_ENTRIES_P5 * BNXT_RSS_TABLE_MAX_TBL_P5) + + u32 rx_mask; + + u8 *mc_list; + int mc_list_size; + int mc_list_count; + dma_addr_t mc_list_mapping; +#define BNXT_MAX_MC_ADDRS 16 + + u8 metadata_format; + u8 state; + u32 flags; +#define BNXT_VNIC_RSS_FLAG 1 +#define BNXT_VNIC_RFS_FLAG 2 +#define BNXT_VNIC_MCAST_FLAG 4 +#define BNXT_VNIC_UCAST_FLAG 8 +#define BNXT_VNIC_RFS_NEW_RSS_FLAG 0x10 +#define BNXT_VNIC_ALL_MCAST_FLAG 0x20 +#define BNXT_VNIC_NTUPLE_FLAG 0x40 +#define BNXT_VNIC_RSSCTX_FLAG 0x80 + struct bnxt_rss_ctx *rss_ctx; +#if defined(CONFIG_BNXT_FLOWER_OFFLOAD) || defined(CONFIG_BNXT_CUSTOM_FLOWER_OFFLOAD) + u16 ref_cnt; + u16 q_index; + struct vnic_info_meta *vnic_meta; +#endif + u32 vnic_id; +}; + +struct bnxt_rss_ctx { + struct list_head list; + struct bnxt_vnic_info vnic; + u16 *rss_indir_tbl; + u8 index; +}; + +#define BNXT_SUPPORTS_NTUPLE_VNIC(bp) (BNXT_PF(bp) && \ + (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V3)) + +#define BNXT_SUPPORTS_MULTI_RSS_CTX(bp) \ + (BNXT_SUPPORTS_NTUPLE_VNIC(bp) && \ + ((bp)->rss_cap & BNXT_RSS_CAP_MULTI_RSS_CTX)) + +#define BNXT_MAX_ETH_RSS_CTX 32 +#define BNXT_RSS_CTX_BMAP_LEN (BNXT_MAX_ETH_RSS_CTX + 1) +#define BNXT_VNIC_ID_INVALID 0xffffffff + +struct bnxt_hw_rings { + int tx; + int rx; + int grp; + int cp; + int cp_p5; + int stat; + int vnic; + int rss_ctx; +}; + +struct bnxt_hw_resc { + u16 min_rsscos_ctxs; + u16 max_rsscos_ctxs; + u16 resv_rsscos_ctxs; + u16 min_cp_rings; + u16 max_cp_rings; + u16 resv_cp_rings; + u16 min_tx_rings; + u16 max_tx_rings; + u16 resv_tx_rings; + u16 max_tx_sch_inputs; + u16 min_rx_rings; + u16 max_rx_rings; + u16 resv_rx_rings; + u16 min_hw_ring_grps; + u16 max_hw_ring_grps; + u16 resv_hw_ring_grps; + u16 min_l2_ctxs; + u16 max_l2_ctxs; + u16 min_vnics; + u16 max_vnics; + u16 resv_vnics; + u16 min_stat_ctxs; + u16 max_stat_ctxs; + u16 resv_stat_ctxs; + u16 max_nqs; + u16 max_irqs; + u16 resv_irqs; + u32 max_encap_records; + u32 max_decap_records; + u32 max_tx_em_flows; + u32 max_tx_wm_flows; + u32 max_rx_em_flows; + u32 max_rx_wm_flows; + + u32 min_tx_key_ctxs; + u32 max_tx_key_ctxs; + u32 resv_tx_key_ctxs; + u32 min_rx_key_ctxs; + u32 max_rx_key_ctxs; + u32 resv_rx_key_ctxs; +}; + +#if defined(CONFIG_BNXT_SRIOV) +struct bnxt_vf_info { + u16 fw_fid; + u8 mac_addr[ETH_ALEN]; /* PF assigned MAC Address */ + u8 vf_mac_addr[ETH_ALEN]; /* VF assigned MAC address, only + * stored by PF. + */ + u8 vnic_state_pending; + u8 vnic_state; + u8 cfg_change; + u16 vlan; + u16 func_qcfg_flags; + u32 flags; +#define BNXT_VF_QOS 0x1 +#define BNXT_VF_SPOOFCHK 0x2 +#define BNXT_VF_LINK_FORCED 0x4 +#define BNXT_VF_LINK_UP 0x8 +#define BNXT_VF_TRUST 0x10 + u32 min_tx_rate; + u32 max_tx_rate; + u16 min_tx_rings; + u16 max_tx_rings; + u16 min_rx_rings; + u16 max_rx_rings; + u16 min_cp_rings; + u16 min_stat_ctxs; + u16 min_ring_grps; + u16 min_vnics; + void *hwrm_cmd_req_addr; + dma_addr_t hwrm_cmd_req_dma_addr; + unsigned long police_id; + struct bnxt_stats_mem stats; +}; + +struct bnxt_vf_sysfs_obj { + u16 fw_fid; + struct bnxt_stats_mem stats; + struct bnxt *parent_pf_bp; + struct kobject kobj; +}; +#endif + +struct bnxt_pf_info { +#define BNXT_FIRST_PF_FID 1 +#define BNXT_FIRST_VF_FID 128 + u16 fw_fid; + u16 port_id; + u16 dflt_vnic_id; + u8 mac_addr[ETH_ALEN]; + u32 first_vf_id; + u16 active_vfs; + u16 registered_vfs; + u16 max_vfs; + u16 max_msix_vfs; + u16 vf_hwrm_cmd_req_page_shift; + unsigned long *vf_event_bmap; + u16 hwrm_cmd_req_pages; + u8 vf_resv_strategy; +#define BNXT_VF_RESV_STRATEGY_MAXIMAL 0 +#define BNXT_VF_RESV_STRATEGY_MINIMAL 1 +#define BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC 2 + +#define BNXT_MAX_VF_CMD_FWD_PAGES 4 + void *hwrm_cmd_req_addr[BNXT_MAX_VF_CMD_FWD_PAGES]; + dma_addr_t hwrm_cmd_req_dma_addr[BNXT_MAX_VF_CMD_FWD_PAGES]; + struct bnxt_vf_info __rcu *vf; +}; + +struct bnxt_filter_base { + struct hlist_node hash; + struct list_head list; + __le64 filter_id; + u8 type; +#define BNXT_FLTR_TYPE_NTUPLE 1 +#define BNXT_FLTR_TYPE_L2 2 + u8 flags; +#define BNXT_ACT_DROP BIT(0) +#define BNXT_ACT_RING_DST BIT(1) +#define BNXT_ACT_FUNC_DST BIT(2) +#define BNXT_ACT_NO_AGING BIT(3) +#define BNXT_ACT_NUMA_DIRECT BIT(4) +#define BNXT_ACT_RSS_CTX BIT(5) + u16 sw_id; + u16 rxq; + u16 fw_vnic_id; + u16 vf_idx; + unsigned long state; +#define BNXT_FLTR_VALID 0 +#define BNXT_FLTR_INSERTED 1 +#define BNXT_FLTR_FW_DELETED 2 + + struct rcu_head rcu; +}; + +struct bnxt_flow_masks { + struct flow_dissector_key_ports ports; + struct flow_dissector_key_addrs addrs; +}; + +extern const struct bnxt_flow_masks BNXT_FLOW_MASK_NONE; +extern const struct bnxt_flow_masks BNXT_FLOW_IPV6_MASK_ALL; +extern const struct bnxt_flow_masks BNXT_FLOW_IPV4_MASK_ALL; + +struct bnxt_ntuple_filter { + /* base filter must be the first member */ + struct bnxt_filter_base base; + struct flow_keys fkeys; + struct bnxt_flow_masks fmasks; + struct bnxt_l2_filter *l2_fltr; + u32 flow_id; +}; + +struct bnxt_l2_key { + union { + struct { + u8 dst_mac_addr[ETH_ALEN]; + u16 vlan; + }; + u32 filter_key; + }; +}; + +struct bnxt_ipv4_tuple { + struct flow_dissector_key_ipv4_addrs v4addrs; + struct flow_dissector_key_ports ports; +}; + +struct bnxt_ipv6_tuple { + struct flow_dissector_key_ipv6_addrs v6addrs; + struct flow_dissector_key_ports ports; +}; + +#define BNXT_L2_KEY_SIZE (sizeof(struct bnxt_l2_key) / 4) +#define BNXT_NTUPLE_KEY_SIZE ((sizeof(struct flow_keys) - \ + FLOW_KEYS_HASH_OFFSET) / 4) +#define BNXT_NTUPLE_COOKIE_NUMA_DIRECT -9999 + +struct bnxt_l2_filter { + /* base filter must be the first member */ + struct bnxt_filter_base base; + struct bnxt_l2_key l2_key; + atomic_t refcnt; +}; + +/* hwrm_port_phy_qcfg_output (size:96 bytes) */ +struct hwrm_port_phy_qcfg_output_compat { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 link; + u8 active_fec_signal_mode; + __le16 link_speed; + u8 duplex_cfg; + u8 pause; + __le16 support_speeds; + __le16 force_link_speed; + u8 auto_mode; + u8 auto_pause; + __le16 auto_link_speed; + __le16 auto_link_speed_mask; + u8 wirespeed; + u8 lpbk; + u8 force_pause; + u8 module_status; + __le32 preemphasis; + u8 phy_maj; + u8 phy_min; + u8 phy_bld; + u8 phy_type; + u8 media_type; + u8 xcvr_pkg_type; + u8 eee_config_phy_addr; + u8 parallel_detect; + __le16 link_partner_adv_speeds; + u8 link_partner_adv_auto_mode; + u8 link_partner_adv_pause; + __le16 adv_eee_link_speed_mask; + __le16 link_partner_adv_eee_link_speed_mask; + __le32 xcvr_identifier_type_tx_lpi_timer; + __le16 fec_cfg; + u8 duplex_state; + u8 option_flags; + char phy_vendor_name[16]; + char phy_vendor_partnumber[16]; + __le16 support_pam4_speeds; + __le16 force_pam4_link_speed; + __le16 auto_pam4_link_speed_mask; + u8 link_partner_pam4_adv_speeds; + u8 valid; +}; + +struct bnxt_link_info { + u8 phy_type; + u8 media_type; + u8 transceiver; + u8 phy_addr; + u8 phy_link_status; +#define BNXT_LINK_NO_LINK PORT_PHY_QCFG_RESP_LINK_NO_LINK +#define BNXT_LINK_SIGNAL PORT_PHY_QCFG_RESP_LINK_SIGNAL +#define BNXT_LINK_LINK PORT_PHY_QCFG_RESP_LINK_LINK + u8 wire_speed; + u8 phy_state; +#define BNXT_PHY_STATE_ENABLED 0 +#define BNXT_PHY_STATE_DISABLED 1 + + u8 link_state; +#define BNXT_LINK_STATE_UNKNOWN 0 +#define BNXT_LINK_STATE_DOWN 1 +#define BNXT_LINK_STATE_UP 2 +#define BNXT_LINK_IS_UP(bp) ((bp)->link_info.link_state == BNXT_LINK_STATE_UP) + u8 active_lanes; + u8 duplex; +#define BNXT_LINK_DUPLEX_HALF PORT_PHY_QCFG_RESP_DUPLEX_STATE_HALF +#define BNXT_LINK_DUPLEX_FULL PORT_PHY_QCFG_RESP_DUPLEX_STATE_FULL + u8 pause; +#define BNXT_LINK_PAUSE_TX PORT_PHY_QCFG_RESP_PAUSE_TX +#define BNXT_LINK_PAUSE_RX PORT_PHY_QCFG_RESP_PAUSE_RX +#define BNXT_LINK_PAUSE_BOTH (PORT_PHY_QCFG_RESP_PAUSE_RX | \ + PORT_PHY_QCFG_RESP_PAUSE_TX) + u8 lp_pause; + u8 auto_pause_setting; + u8 force_pause_setting; + u8 duplex_setting; + u8 auto_mode; +#define BNXT_AUTO_MODE(mode) ((mode) > BNXT_LINK_AUTO_NONE && \ + (mode) <= BNXT_LINK_AUTO_MSK) +#define BNXT_LINK_AUTO_NONE PORT_PHY_QCFG_RESP_AUTO_MODE_NONE +#define BNXT_LINK_AUTO_ALLSPDS PORT_PHY_QCFG_RESP_AUTO_MODE_ALL_SPEEDS +#define BNXT_LINK_AUTO_ONESPD PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_SPEED +#define BNXT_LINK_AUTO_ONEORBELOW PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_OR_BELOW +#define BNXT_LINK_AUTO_MSK PORT_PHY_QCFG_RESP_AUTO_MODE_SPEED_MASK +#define PHY_VER_LEN 3 + u8 phy_ver[PHY_VER_LEN]; + u16 link_speed; +#define BNXT_LINK_SPEED_100MB PORT_PHY_QCFG_RESP_LINK_SPEED_100MB +#define BNXT_LINK_SPEED_1GB PORT_PHY_QCFG_RESP_LINK_SPEED_1GB +#define BNXT_LINK_SPEED_2GB PORT_PHY_QCFG_RESP_LINK_SPEED_2GB +#define BNXT_LINK_SPEED_2_5GB PORT_PHY_QCFG_RESP_LINK_SPEED_2_5GB +#define BNXT_LINK_SPEED_10GB PORT_PHY_QCFG_RESP_LINK_SPEED_10GB +#define BNXT_LINK_SPEED_20GB PORT_PHY_QCFG_RESP_LINK_SPEED_20GB +#define BNXT_LINK_SPEED_25GB PORT_PHY_QCFG_RESP_LINK_SPEED_25GB +#define BNXT_LINK_SPEED_40GB PORT_PHY_QCFG_RESP_LINK_SPEED_40GB +#define BNXT_LINK_SPEED_50GB PORT_PHY_QCFG_RESP_LINK_SPEED_50GB +#define BNXT_LINK_SPEED_100GB PORT_PHY_QCFG_RESP_LINK_SPEED_100GB +#define BNXT_LINK_SPEED_200GB PORT_PHY_QCFG_RESP_LINK_SPEED_200GB +#define BNXT_LINK_SPEED_400GB PORT_PHY_QCFG_RESP_LINK_SPEED_400GB + u16 support_speeds; + u16 support_pam4_speeds; + u16 support_speeds2; + + u16 auto_link_speeds; /* fw adv setting */ +#define BNXT_LINK_SPEED_MSK_100MB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100MB +#define BNXT_LINK_SPEED_MSK_1GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_1GB +#define BNXT_LINK_SPEED_MSK_2GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_2GB +#define BNXT_LINK_SPEED_MSK_10GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10GB +#define BNXT_LINK_SPEED_MSK_2_5GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_2_5GB +#define BNXT_LINK_SPEED_MSK_20GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_20GB +#define BNXT_LINK_SPEED_MSK_25GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_25GB +#define BNXT_LINK_SPEED_MSK_40GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_40GB +#define BNXT_LINK_SPEED_MSK_50GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_50GB +#define BNXT_LINK_SPEED_MSK_100GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100GB + u16 auto_pam4_link_speeds; +#define BNXT_LINK_PAM4_SPEED_MSK_50GB PORT_PHY_QCFG_RESP_SUPPORT_PAM4_SPEEDS_50G +#define BNXT_LINK_PAM4_SPEED_MSK_100GB PORT_PHY_QCFG_RESP_SUPPORT_PAM4_SPEEDS_100G +#define BNXT_LINK_PAM4_SPEED_MSK_200GB PORT_PHY_QCFG_RESP_SUPPORT_PAM4_SPEEDS_200G + u16 auto_link_speeds2; +#define BNXT_LINK_SPEEDS2_MSK_1GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_1GB +#define BNXT_LINK_SPEEDS2_MSK_10GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_10GB +#define BNXT_LINK_SPEEDS2_MSK_25GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_25GB +#define BNXT_LINK_SPEEDS2_MSK_40GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_40GB +#define BNXT_LINK_SPEEDS2_MSK_50GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_50GB +#define BNXT_LINK_SPEEDS2_MSK_100GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_100GB +#define BNXT_LINK_SPEEDS2_MSK_50GB_PAM4 \ + PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_50GB_PAM4_56 +#define BNXT_LINK_SPEEDS2_MSK_100GB_PAM4 \ + PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_100GB_PAM4_56 +#define BNXT_LINK_SPEEDS2_MSK_200GB_PAM4 \ + PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_200GB_PAM4_56 +#define BNXT_LINK_SPEEDS2_MSK_400GB_PAM4 \ + PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_400GB_PAM4_56 +#define BNXT_LINK_SPEEDS2_MSK_100GB_PAM4_112 \ + PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_100GB_PAM4_112 +#define BNXT_LINK_SPEEDS2_MSK_200GB_PAM4_112 \ + PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_200GB_PAM4_112 +#define BNXT_LINK_SPEEDS2_MSK_400GB_PAM4_112 \ + PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_400GB_PAM4_112 + + u16 support_auto_speeds; + u16 support_pam4_auto_speeds; + u16 support_auto_speeds2; + + u16 lp_auto_link_speeds; + u16 lp_auto_pam4_link_speeds; + u16 force_link_speed; + u16 force_pam4_link_speed; + u16 force_link_speed2; +#define BNXT_LINK_SPEED_50GB_PAM4 \ + PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_50GB_PAM4_56 +#define BNXT_LINK_SPEED_100GB_PAM4 \ + PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_100GB_PAM4_56 +#define BNXT_LINK_SPEED_200GB_PAM4 \ + PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_200GB_PAM4_56 +#define BNXT_LINK_SPEED_400GB_PAM4 \ + PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_400GB_PAM4_56 +#define BNXT_LINK_SPEED_100GB_PAM4_112 \ + PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_100GB_PAM4_112 +#define BNXT_LINK_SPEED_200GB_PAM4_112 \ + PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_200GB_PAM4_112 +#define BNXT_LINK_SPEED_400GB_PAM4_112 \ + PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_400GB_PAM4_112 + + u32 preemphasis; + u8 module_status; + u8 active_fec_sig_mode; + u16 fec_cfg; +#define BNXT_FEC_NONE PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED +#define BNXT_FEC_AUTONEG_CAP PORT_PHY_QCFG_RESP_FEC_CFG_FEC_AUTONEG_SUPPORTED +#define BNXT_FEC_AUTONEG PORT_PHY_QCFG_RESP_FEC_CFG_FEC_AUTONEG_ENABLED +#define BNXT_FEC_ENC_BASE_R_CAP \ + PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_SUPPORTED +#define BNXT_FEC_ENC_BASE_R PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_ENABLED +#define BNXT_FEC_ENC_RS_CAP \ + PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_SUPPORTED +#define BNXT_FEC_ENC_LLRS_CAP \ + (PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS272_1XN_SUPPORTED | \ + PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS272_IEEE_SUPPORTED) +#define BNXT_FEC_ENC_RS \ + (PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_ENABLED | \ + PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS544_1XN_ENABLED | \ + PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS544_IEEE_ENABLED) +#define BNXT_FEC_ENC_LLRS \ + (PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS272_1XN_ENABLED | \ + PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS272_IEEE_ENABLED) + + /* copy of requested setting from ethtool cmd */ + u8 autoneg; +#define BNXT_AUTONEG_SPEED 1 +#define BNXT_AUTONEG_FLOW_CTRL 2 + u8 req_signal_mode; +#define BNXT_SIG_MODE_NRZ PORT_PHY_QCFG_RESP_SIGNAL_MODE_NRZ +#define BNXT_SIG_MODE_PAM4 PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4 +#define BNXT_SIG_MODE_PAM4_112 PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4_112 +#define BNXT_SIG_MODE_MAX (BNXT_SIG_MODE_PAM4_112 + 1) + u8 req_duplex; + u8 req_flow_ctrl; + u16 req_link_speed; + u16 advertising; /* user adv setting */ + u16 advertising_pam4; + bool force_link_chng; + + bool phy_retry; + unsigned long phy_retry_expires; + + /* a copy of phy_qcfg output used to report link + * info to VF + */ + struct hwrm_port_phy_qcfg_output phy_qcfg_resp; +}; + +#define BNXT_FEC_RS544_ON \ + (PORT_PHY_CFG_REQ_FLAGS_FEC_RS544_1XN_ENABLE | \ + PORT_PHY_CFG_REQ_FLAGS_FEC_RS544_IEEE_ENABLE) + +#define BNXT_FEC_RS544_OFF \ + (PORT_PHY_CFG_REQ_FLAGS_FEC_RS544_1XN_DISABLE | \ + PORT_PHY_CFG_REQ_FLAGS_FEC_RS544_IEEE_DISABLE) + +#define BNXT_FEC_RS272_ON \ + (PORT_PHY_CFG_REQ_FLAGS_FEC_RS272_1XN_ENABLE | \ + PORT_PHY_CFG_REQ_FLAGS_FEC_RS272_IEEE_ENABLE) + +#define BNXT_FEC_RS272_OFF \ + (PORT_PHY_CFG_REQ_FLAGS_FEC_RS272_1XN_DISABLE | \ + PORT_PHY_CFG_REQ_FLAGS_FEC_RS272_IEEE_DISABLE) + +#define BNXT_PAM4_SUPPORTED(link_info) \ + ((link_info)->support_pam4_speeds) + +#define BNXT_FEC_RS_ON(link_info) \ + (PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE91_ENABLE | \ + PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE74_DISABLE | \ + (BNXT_PAM4_SUPPORTED(link_info) ? \ + (BNXT_FEC_RS544_ON | BNXT_FEC_RS272_OFF) : 0)) + +#define BNXT_FEC_LLRS_ON \ + (PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE91_ENABLE | \ + PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE74_DISABLE | \ + BNXT_FEC_RS272_ON | BNXT_FEC_RS544_OFF) + +#define BNXT_FEC_RS_OFF(link_info) \ + (PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE91_DISABLE | \ + (BNXT_PAM4_SUPPORTED(link_info) ? \ + (BNXT_FEC_RS544_OFF | BNXT_FEC_RS272_OFF) : 0)) + +#define BNXT_FEC_BASE_R_ON(link_info) \ + (PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE74_ENABLE | \ + BNXT_FEC_RS_OFF(link_info)) + +#define BNXT_FEC_ALL_OFF(link_info) \ + (PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE74_DISABLE | \ + BNXT_FEC_RS_OFF(link_info)) + +#define BNXT_MAX_COSQ_NAME_LEN 16 +#define MAX_COS_PER_PORT 10 +#define BNXT_COSQ_NAME_ARR_SIZE (BNXT_MAX_QUEUE * 2 * BNXT_MAX_COSQ_NAME_LEN) + +struct bnxt_queue_info { + u8 queue_id; + u8 queue_profile; +}; + +#define BNXT_MAX_LED 4 + +struct bnxt_led_info { + u8 led_id; + u8 led_type; + u8 led_group_id; + u8 unused; + __le16 led_state_caps; +#define BNXT_LED_ALT_BLINK_CAP(x) ((x) & \ + cpu_to_le16(PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_BLINK_ALT_SUPPORTED)) + + __le16 led_color_caps; +}; + +#define BNXT_MAX_TEST 8 + +struct bnxt_test_info { + u8 offline_mask; + u16 timeout; + char string[BNXT_MAX_TEST][ETH_GSTRING_LEN]; +}; + +#define CHIMP_REG_VIEW_ADDR \ + ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) ? 0x80000000 : 0xb1000000) + +#define BNXT_GRCPF_REG_CHIMP_COMM 0x0 +#define BNXT_GRCPF_REG_CHIMP_COMM_TRIGGER 0x100 +#define BNXT_GRCPF_REG_WINDOW_BASE_OUT 0x400 +#define BNXT_GRCPF_REG_SYNC_TIME_ADJ 0x488 +#define BNXT_GRCPF_REG_SYNC_TIME_ADJ_PER_MSK 0xffffffUL +#define BNXT_GRCPF_REG_SYNC_TIME_ADJ_PER_SFT 0 +#define BNXT_GRCPF_REG_SYNC_TIME_ADJ_VAL_MSK 0x1f000000UL +#define BNXT_GRCPF_REG_SYNC_TIME_ADJ_VAL_SFT 24 +#define BNXT_GRCPF_REG_SYNC_TIME_ADJ_SIGN_MSK 0x20000000UL +#define BNXT_GRCPF_REG_SYNC_TIME_ADJ_SIGN_SFT 29 + +#define BNXT_GRC_REG_STATUS_P5 0x520 + +#define BNXT_GRCPF_REG_KONG_COMM 0xA00 +#define BNXT_GRCPF_REG_KONG_COMM_TRIGGER 0xB00 + +#define BNXT_GRC_REG_CHIP_NUM 0x48 +#define BNXT_GRC_REG_BASE 0x260000 + +#define BNXT_TS_REG_TIMESYNC_TS0_LOWER 0x640180c +#define BNXT_TS_REG_TIMESYNC_TS0_UPPER 0x6401810 + +#define BNXT_GRC_BASE_MASK 0xfffff000 +#define BNXT_GRC_OFFSET_MASK 0x00000ffc + +#ifdef CONFIG_BNXT_FLOWER_OFFLOAD +struct bnxt_tc_flow_stats { + u64 packets; + u64 bytes; +}; + +struct bnxt_flower_indr_block_cb_priv { + struct net_device *tunnel_netdev; + struct bnxt *bp; + struct list_head list; +}; + +struct bnxt_tc_info { + bool enabled; + + /* hash table to store TC offloaded flows */ + struct rhashtable flow_table; + struct rhashtable_params flow_ht_params; + + struct rhashtable tf_flow_table; + struct rhashtable_params tf_flow_ht_params; + + /* hash table to store L2 keys of TC flows */ + struct rhashtable l2_table; + struct rhashtable_params l2_ht_params; + /* hash table to store L2 keys for TC tunnel decap */ + struct rhashtable decap_l2_table; + struct rhashtable_params decap_l2_ht_params; + /* hash table to store tunnel decap entries */ + struct rhashtable decap_table; + struct rhashtable_params decap_ht_params; + /* hash table to store tunnel encap entries */ + struct rhashtable encap_table; + struct rhashtable_params encap_ht_params; + /* hash table to store neighbour */ + struct rhashtable neigh_table; + struct rhashtable_params neigh_ht_params; + /* hash table to store v6 subnets */ + struct rhashtable v6_subnet_table; + struct rhashtable_params v6_subnet_ht_params; +#define BNXT_ULP_MAX_V6_SUBNETS 4096 + struct bitalloc v6_subnet_pool; + + /* lock to atomically add/del an l2 node when a flow is + * added or deleted. + */ + struct mutex lock; + + /* Fields used for batching stats query */ + struct rhashtable_iter iter; +#define BNXT_FLOW_STATS_BATCH_MAX 10 + struct bnxt_tc_stats_batch { + void *flow_node; + struct bnxt_tc_flow_stats hw_stats; + } stats_batch[BNXT_FLOW_STATS_BATCH_MAX]; + + /* Stat counter mask (width) */ + u64 bytes_mask; + u64 packets_mask; +}; + +struct bnxt_tc_neigh_update { + struct work_struct work; + struct notifier_block netevent_nb; + struct neighbour *neigh; + /* Lock to protect neigh variable between neigh event handler and work + * queue handler. + */ + spinlock_t lock; +}; +#endif + +#ifdef CONFIG_VF_REPS +struct bnxt_vf_rep_stats { + u64 packets; + u64 bytes; + u64 dropped; +}; + +struct bnxt_vf_rep { + struct bnxt *bp; + struct net_device *dev; + struct metadata_dst *dst; + u16 vf_idx; + u32 tx_cfa_action; + u16 rx_cfa_code; + + struct bnxt_vf_rep_stats rx_stats; + struct bnxt_vf_rep_stats tx_stats; +}; +#endif + +#define PTU_PTE_VALID 0x1UL +#define PTU_PTE_LAST 0x2UL +#define PTU_PTE_NEXT_TO_LAST 0x4UL + +#define MAX_CTX_PAGES (BNXT_PAGE_SIZE / 8) +#define MAX_CTX_TOTAL_PAGES (MAX_CTX_PAGES * MAX_CTX_PAGES) + +struct bnxt_ctx_pg_info { + u32 entries; + u32 nr_pages; + void *ctx_pg_arr[MAX_CTX_PAGES]; + dma_addr_t ctx_dma_arr[MAX_CTX_PAGES]; + struct bnxt_ring_mem_info ring_mem; + struct bnxt_ctx_pg_info **ctx_pg_tbl; +}; + +#define BNXT_MAX_TQM_SP_RINGS 1 +#define BNXT_MAX_TQM_FP_LEGACY_RINGS 8 +#define BNXT_MAX_TQM_FP_RINGS 9 +#ifdef BNXT_FPGA +#define BNXT_NUM_DFLT_RINGS 8 +#else +#define BNXT_NUM_DFLT_RINGS 64 +#endif +#define BNXT_NUM_DFLT_RINGS_NPAR_ROCE 16 + +#define BNXT_MIN_NUM_DFLT_RINGS 8 + +#define BNXT_MAX_TQM_LEGACY_RINGS \ + (BNXT_MAX_TQM_SP_RINGS + BNXT_MAX_TQM_FP_LEGACY_RINGS) +#define BNXT_MAX_TQM_RINGS \ + (BNXT_MAX_TQM_SP_RINGS + BNXT_MAX_TQM_FP_RINGS) + +#define BNXT_BACKING_STORE_CFG_LEGACY_LEN 256 +#define BNXT_BACKING_STORE_CFG_LEN \ + sizeof(struct hwrm_func_backing_store_cfg_input) + +#define BNXT_SET_CTX_PAGE_ATTR(attr) \ +do { \ + if (BNXT_PAGE_SIZE == 0x2000) \ + attr = FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_8K; \ + else if (BNXT_PAGE_SIZE == 0x10000) \ + attr = FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_64K; \ + else \ + attr = FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_4K; \ +} while (0) + +struct bnxt_ctx_mem_type { + u16 type; + u16 entry_size; + u32 flags; +#define BNXT_CTX_MEM_TYPE_VALID FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_TYPE_VALID + u32 instance_bmap; + u8 init_value; + u8 entry_multiple; + u16 init_offset; +#define BNXT_CTX_INIT_INVALID_OFFSET 0xffff + u32 max_entries; + u32 min_entries; + u8 last:1; + u8 mem_valid:1; + u8 split_entry_cnt; +#define BNXT_MAX_SPLIT_ENTRY 4 + union { + struct { + u32 qp_l2_entries; + u32 qp_qp1_entries; + u32 qp_fast_qpmd_entries; + }; + u32 srq_l2_entries; + u32 cq_l2_entries; + u32 vnic_entries; + struct { + u32 mrav_av_entries; + u32 mrav_num_entries_units; + }; + u32 split[BNXT_MAX_SPLIT_ENTRY]; + }; + struct bnxt_ctx_pg_info *pg_info; +}; + +#define BNXT_CTX_MRAV_AV_SPLIT_ENTRY 0 + +#define BNXT_CTX_QP FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_QP +#define BNXT_CTX_SRQ FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRQ +#define BNXT_CTX_CQ FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CQ +#define BNXT_CTX_VNIC FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_VNIC +#define BNXT_CTX_STAT FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_STAT +#define BNXT_CTX_STQM FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SP_TQM_RING +#define BNXT_CTX_FTQM FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_FP_TQM_RING +#define BNXT_CTX_MRAV FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_MRAV +#define BNXT_CTX_TIM FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TIM +#define BNXT_CTX_TCK FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TX_CK +#define BNXT_CTX_RCK FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RX_CK +#define BNXT_CTX_MTQM FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_MP_TQM_RING +#define BNXT_CTX_SQDBS FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SQ_DB_SHADOW +#define BNXT_CTX_RQDBS FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RQ_DB_SHADOW +#define BNXT_CTX_SRQDBS FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRQ_DB_SHADOW +#define BNXT_CTX_CQDBS FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CQ_DB_SHADOW +#define BNXT_CTX_SRT_TRACE FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRT_TRACE +#define BNXT_CTX_SRT2_TRACE FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRT2_TRACE +#define BNXT_CTX_CRT_TRACE FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CRT_TRACE +#define BNXT_CTX_CRT2_TRACE FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CRT2_TRACE +#define BNXT_CTX_RIGP0_TRACE FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RIGP0_TRACE +#define BNXT_CTX_L2_HWRM_TRACE FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_L2_HWRM_TRACE +#define BNXT_CTX_ROCE_HWRM_TRACE FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_ROCE_HWRM_TRACE +#define BNXT_CTX_MAX (BNXT_CTX_TIM + 1) +#define BNXT_CTX_L2_MAX (BNXT_CTX_FTQM + 1) +#define BNXT_CTX_INV ((u16)-1) + +#define BNXT_CTX_V2_MAX (FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_ROCE_HWRM_TRACE + 1) + +struct bnxt_ctx_mem_info { + u8 tqm_fp_rings_count; + + u32 flags; + #define BNXT_CTX_FLAG_INITED 0x01 + struct bnxt_ctx_mem_type ctx_arr[BNXT_CTX_V2_MAX]; +}; + +enum bnxt_health_severity { + SEVERITY_NORMAL = 0, + SEVERITY_WARNING, + SEVERITY_RECOVERABLE, + SEVERITY_FATAL, +}; + +enum bnxt_health_remedy { + REMEDY_DEVLINK_RECOVER, + REMEDY_POWER_CYCLE_DEVICE, + REMEDY_POWER_CYCLE_HOST, + REMEDY_FW_UPDATE, + REMEDY_HW_REPLACE, +}; + +struct bnxt_fw_health { + u32 flags; + u32 polling_dsecs; + u32 master_func_wait_dsecs; + u32 normal_func_wait_dsecs; + u32 post_reset_wait_dsecs; + u32 post_reset_max_wait_dsecs; + u32 regs[4]; + u32 mapped_regs[4]; +#define BNXT_FW_HEALTH_REG 0 +#define BNXT_FW_HEARTBEAT_REG 1 +#define BNXT_FW_RESET_CNT_REG 2 +#define BNXT_FW_RESET_INPROG_REG 3 + u32 fw_reset_inprog_reg_mask; + u32 last_fw_heartbeat; + u32 last_fw_reset_cnt; + u8 enabled:1; + u8 primary:1; + u8 status_reliable:1; + u8 resets_reliable:1; + u8 tmr_multiplier; + u8 tmr_counter; + u8 fw_reset_seq_cnt; + u32 fw_reset_seq_regs[16]; + u32 fw_reset_seq_vals[16]; + u32 fw_reset_seq_delay_msec[16]; + u32 echo_req_data1; + u32 echo_req_data2; + struct devlink_health_reporter *fw_reporter; + struct mutex lock; + enum bnxt_health_severity severity; + enum bnxt_health_remedy remedy; + u32 arrests; + u32 discoveries; + u32 survivals; + u32 fatalities; + u32 diagnoses; +}; + +#define BNXT_FW_HEALTH_REG_TYPE_MASK 3 +#define BNXT_FW_HEALTH_REG_TYPE_CFG 0 +#define BNXT_FW_HEALTH_REG_TYPE_GRC 1 +#define BNXT_FW_HEALTH_REG_TYPE_BAR0 2 +#define BNXT_FW_HEALTH_REG_TYPE_BAR1 3 + +#define BNXT_FW_HEALTH_REG_TYPE(reg) ((reg) & BNXT_FW_HEALTH_REG_TYPE_MASK) +#define BNXT_FW_HEALTH_REG_OFF(reg) ((reg) & ~BNXT_FW_HEALTH_REG_TYPE_MASK) + +#define BNXT_FW_HEALTH_WIN_BASE 0x3000 +#define BNXT_FW_HEALTH_WIN_MAP_OFF 8 + +#define BNXT_FW_HEALTH_WIN_OFF(reg) (BNXT_FW_HEALTH_WIN_BASE + \ + ((reg) & BNXT_GRC_OFFSET_MASK)) + +#define BNXT_FW_STATUS_HEALTH_MSK 0xffff +#define BNXT_FW_STATUS_HEALTHY 0x8000 +#define BNXT_FW_STATUS_SHUTDOWN 0x100000 +#define BNXT_FW_STATUS_RECOVERING 0x400000 + +#define BNXT_FW_IS_HEALTHY(sts) (((sts) & BNXT_FW_STATUS_HEALTH_MSK) ==\ + BNXT_FW_STATUS_HEALTHY) + +#define BNXT_FW_IS_BOOTING(sts) (((sts) & BNXT_FW_STATUS_HEALTH_MSK) < \ + BNXT_FW_STATUS_HEALTHY) + +#define BNXT_FW_IS_ERR(sts) (((sts) & BNXT_FW_STATUS_HEALTH_MSK) > \ + BNXT_FW_STATUS_HEALTHY) + +#define BNXT_FW_IS_RECOVERING(sts) (BNXT_FW_IS_ERR(sts) && \ + ((sts) & BNXT_FW_STATUS_RECOVERING)) + +#define BNXT_FW_RETRY 5 +#define BNXT_FW_IF_RETRY 10 +#define BNXT_FW_SLOT_RESET_RETRY 4 + +enum bnxt_push_mode { + BNXT_PUSH_MODE_NONE = 0, + BNXT_PUSH_MODE_LEGACY, /* legacy silicon operation mode */ + BNXT_PUSH_MODE_WCB, /* write combining supported on P5 silicon */ + BNXT_PUSH_MODE_PPP, /* double buffered mode supported on Thor2 */ +}; + +struct bnxt_aux_priv { + struct auxiliary_device aux_dev; + struct bnxt_en_dev *edev; + int id; +}; + +/* Bit needed by DB copy */ +#define DBC_DEBUG_TRACE_SHIFT 59 +#define DBC_DEBUG_TRACE_MASK (0x1ULL << DBC_DEBUG_TRACE_SHIFT) + +#define DBC_GROUP_SQ 0 +#define DBC_GROUP_RQ 1 +#define DBC_GROUP_SRQ 2 +#define DBC_GROUP_CQ 3 +#define DBC_GROUP_MAX (DBC_GROUP_CQ + 1) + +struct bnxt_hdbr_info { + u8 hdbr_enabled; + u8 debug_trace; + void *ktbl[DBC_GROUP_MAX]; +}; + +enum board_idx { + BCM57301, + BCM57302, + BCM57304, + BCM57417_NPAR, + BCM58700, + BCM57311, + BCM57312, + BCM57402, + BCM57404, + BCM57406, + BCM57402_NPAR, + BCM57407, + BCM57412, + BCM57414, + BCM57416, + BCM57417, + BCM57412_NPAR, + BCM57314, + BCM57417_SFP, + BCM57416_SFP, + BCM57404_NPAR, + BCM57406_NPAR, + BCM57407_SFP, + BCM57407_NPAR, + BCM57414_NPAR, + BCM57416_NPAR, + BCM57452, + BCM57454, + BCM5745x_NPAR, + BCM57508, + BCM57504, + BCM57502, + BCM57508_NPAR, + BCM57504_NPAR, + BCM57502_NPAR, + BCM57608, + BCM57604, + BCM57602, + BCM57601, + BCM58802, + BCM58804, + BCM58808, + #ifdef BNXT_FPGA + BCM58812, + BCM58814, + BCM58818, + #endif + NETXTREME_E_VF, + NETXTREME_C_VF, + NETXTREME_S_VF, + NETXTREME_C_VF_HV, + NETXTREME_E_VF_HV, + NETXTREME_E_P5_VF, + NETXTREME_E_P5_VF_HV, + NETXTREME_E_P7_VF, +}; + +#if defined(CONFIG_BNXT_FLOWER_OFFLOAD) || defined(CONFIG_BNXT_CUSTOM_FLOWER_OFFLOAD) +struct vnic_info_meta { + int32_t meta_valid; + int32_t vnic_idx; + uint16_t fw_vnic_id; +}; +#endif + +#define BNXT_DIR_MAX 2 +struct backingstore_debug_data_t { + uint32_t tsid; + uint32_t dir; +}; + +#define BNXT_PORTS_MAX 2 +struct bnxt_bond_info { + struct net_device *p_netdev[BNXT_PORTS_MAX]; + struct notifier_block notif_blk; + unsigned long active_port_map; + unsigned long member_port_map; + unsigned long peers; + struct bnxt *bp; + u8 bond_active:1; + u8 primary:1; + u8 aggr_mode; + u8 fw_lag_id; +}; + +#define BNXT_TRACE_BUF_MAGIC_BYTE ((u8)0xBC) +#define BNXT_TRACE_BUF_COUNT (BNXT_CTX_ROCE_HWRM_TRACE - BNXT_CTX_SRT_TRACE + 1) +struct bnxt_bs_trace_info { + u8 *magic_byte; + u32 last_offset; + u8 wrapped:1; +}; + +static inline void bnxt_bs_trace_check_wrapping(struct bnxt_bs_trace_info *bs_trace, + u32 offset) +{ + if (!bs_trace->wrapped && *bs_trace->magic_byte != BNXT_TRACE_BUF_MAGIC_BYTE) + bs_trace->wrapped = 1; + bs_trace->last_offset = offset; +} + +struct bnxt { + void __iomem *bar0; + void __iomem *bar1; + void __iomem *bar2; + + u32 reg_base; + u16 chip_num; +#define CHIP_NUM_57301 0x16c8 +#define CHIP_NUM_57302 0x16c9 +#define CHIP_NUM_57304 0x16ca +#define CHIP_NUM_58700 0x16cd +#define CHIP_NUM_57402 0x16d0 +#define CHIP_NUM_57404 0x16d1 +#define CHIP_NUM_57406 0x16d2 +#define CHIP_NUM_57407 0x16d5 + +#define CHIP_NUM_57311 0x16ce +#define CHIP_NUM_57312 0x16cf +#define CHIP_NUM_57314 0x16df +#define CHIP_NUM_57317 0x16e0 +#define CHIP_NUM_57412 0x16d6 +#define CHIP_NUM_57414 0x16d7 +#define CHIP_NUM_57416 0x16d8 +#define CHIP_NUM_57417 0x16d9 +#define CHIP_NUM_57412L 0x16da +#define CHIP_NUM_57414L 0x16db + +#define CHIP_NUM_5745X 0xd730 +#define CHIP_NUM_57452 0xc452 +#define CHIP_NUM_57454 0xc454 + +#define CHIP_NUM_57508 0x1750 +#define CHIP_NUM_57504 0x1751 +#define CHIP_NUM_57502 0x1752 + +#define CHIP_NUM_57608 0x1760 + +#define CHIP_NUM_58802 0xd802 +#define CHIP_NUM_58804 0xd804 +#define CHIP_NUM_58808 0xd808 + +#define CHIP_NUM_58818 0xd818 + +#define BNXT_CHIP_NUM_5730X(chip_num) \ + ((chip_num) >= CHIP_NUM_57301 && \ + (chip_num) <= CHIP_NUM_57304) + +#define BNXT_CHIP_NUM_5740X(chip_num) \ + (((chip_num) >= CHIP_NUM_57402 && \ + (chip_num) <= CHIP_NUM_57406) || \ + (chip_num) == CHIP_NUM_57407) + +#define BNXT_CHIP_NUM_5731X(chip_num) \ + ((chip_num) == CHIP_NUM_57311 || \ + (chip_num) == CHIP_NUM_57312 || \ + (chip_num) == CHIP_NUM_57314 || \ + (chip_num) == CHIP_NUM_57317) + +#define BNXT_CHIP_NUM_5741X(chip_num) \ + ((chip_num) >= CHIP_NUM_57412 && \ + (chip_num) <= CHIP_NUM_57414L) + +#define BNXT_CHIP_NUM_58700(chip_num) \ + ((chip_num) == CHIP_NUM_58700) + +#define BNXT_CHIP_NUM_5745X(chip_num) \ + ((chip_num) == CHIP_NUM_5745X || \ + (chip_num) == CHIP_NUM_57452 || \ + (chip_num) == CHIP_NUM_57454) + + +#define BNXT_CHIP_NUM_57X0X(chip_num) \ + (BNXT_CHIP_NUM_5730X(chip_num) || BNXT_CHIP_NUM_5740X(chip_num)) + +#define BNXT_CHIP_NUM_57X1X(chip_num) \ + (BNXT_CHIP_NUM_5731X(chip_num) || BNXT_CHIP_NUM_5741X(chip_num)) + +#define BNXT_CHIP_NUM_588XX(chip_num) \ + ((chip_num) == CHIP_NUM_58802 || \ + (chip_num) == CHIP_NUM_58804 || \ + (chip_num) == CHIP_NUM_58808) + + u8 chip_rev; +#ifdef BNXT_FPGA + u8 chip_platform_type; + +#define BNXT_ASIC(bp) \ + ((bp)->chip_platform_type == VER_GET_RESP_CHIP_PLATFORM_TYPE_ASIC) +#define BNXT_ZEBU(bp) \ + ((bp)->chip_platform_type == VER_GET_RESP_CHIP_PLATFORM_TYPE_PALLADIUM) +#else +#define BNXT_ASIC(bp) true +#endif +#define BNXT_VPD_FLD_LEN 32 + char board_partno[BNXT_VPD_FLD_LEN]; + + /* Must remain NULL for old bnxt_re upstream/ + * inbox driver. + */ + void *reserved_ulp_kabi_0; + + char board_serialno[BNXT_VPD_FLD_LEN]; + + struct net_device *dev; + struct pci_dev *pdev; + + atomic_t intr_sem; + + u32 flags; + #define BNXT_FLAG_CHIP_P5_PLUS 0x1 + #define BNXT_FLAG_VF 0x2 + #define BNXT_FLAG_LRO 0x4 +#ifdef CONFIG_INET + #define BNXT_FLAG_GRO 0x8 +#else + /* Cannot support hardware GRO if CONFIG_INET is not set */ + #define BNXT_FLAG_GRO 0x0 +#endif + #define BNXT_FLAG_TPA (BNXT_FLAG_LRO | BNXT_FLAG_GRO) + #define BNXT_FLAG_JUMBO 0x10 + #define BNXT_FLAG_STRIP_VLAN 0x20 + #define BNXT_FLAG_AGG_RINGS (BNXT_FLAG_JUMBO | BNXT_FLAG_GRO | \ + BNXT_FLAG_LRO) + #define BNXT_FLAG_RFS 0x100 + #define BNXT_FLAG_SHARED_RINGS 0x200 + #define BNXT_FLAG_PORT_STATS 0x400 + #define BNXT_FLAG_MULTI_ROOT 0x1000 + #define BNXT_FLAG_WOL_CAP 0x4000 + #define BNXT_FLAG_ROCEV1_CAP 0x8000 + #define BNXT_FLAG_ROCEV2_CAP 0x10000 + #define BNXT_FLAG_ROCE_CAP (BNXT_FLAG_ROCEV1_CAP | \ + BNXT_FLAG_ROCEV2_CAP) + #define BNXT_FLAG_NO_AGG_RINGS 0x20000 + #define BNXT_FLAG_RX_PAGE_MODE 0x40000 + #define BNXT_FLAG_CHIP_P7 0x80000 + #define BNXT_FLAG_MULTI_HOST 0x100000 + #define BNXT_FLAG_DSN_VALID 0x200000 + #define BNXT_FLAG_DOUBLE_DB 0x400000 + #define BNXT_FLAG_UDP_GSO_CAP 0x800000 + #define BNXT_FLAG_CHIP_NITRO_A0 0x1000000 + #define BNXT_FLAG_TX_COAL_CMPL 0x2000000 + + #define BNXT_FLAG_ROCE_MIRROR_CAP 0x4000000 + #define BNXT_FLAG_ECN_STATS 0x8000000 + #define BNXT_FLAG_PORT_STATS_EXT 0x10000000 + #define BNXT_FLAG_DIM 0x20000000 + #define BNXT_FLAG_NUMA_DIRECT 0x40000000 + #define BNXT_FLAG_CORE_RESET_TX_TIMEOUT 0x80000000 + #define BNXT_FLAG_ALL_CONFIG_FEATS (BNXT_FLAG_TPA | \ + BNXT_FLAG_RFS | \ + BNXT_FLAG_STRIP_VLAN) + +#define BNXT_PF(bp) (!((bp)->flags & BNXT_FLAG_VF)) +#define BNXT_VF(bp) ((bp)->flags & BNXT_FLAG_VF) +#define BNXT_VF_IS_TRUSTED(bp) ((bp)->fw_cap & BNXT_FW_CAP_TRUSTED_VF) +#define BNXT_NPAR(bp) ((bp)->port_partition_type) +#define BNXT_MH(bp) ((bp)->flags & BNXT_FLAG_MULTI_HOST) +#define BNXT_MR(bp) ((bp)->flags & BNXT_FLAG_MULTI_ROOT) +#define BNXT_SINGLE_PF(bp) (BNXT_PF(bp) && !BNXT_NPAR(bp) && \ + !BNXT_MH(bp) && !BNXT_MR(bp)) +#define BNXT_SH_PORT_CFG_OK(bp) (BNXT_PF(bp) && \ + ((bp)->phy_flags & BNXT_PHY_FL_SHARED_PORT_CFG)) +#define BNXT_PHY_CFG_ABLE(bp) ((BNXT_SINGLE_PF(bp) || \ + BNXT_SH_PORT_CFG_OK(bp)) && \ + (bp)->link_info.phy_state == BNXT_PHY_STATE_ENABLED) +#define BNXT_CHIP_TYPE_NITRO_A0(bp) ((bp)->flags & BNXT_FLAG_CHIP_NITRO_A0) +#define BNXT_RX_PAGE_MODE(bp) ((bp)->flags & BNXT_FLAG_RX_PAGE_MODE) +#define BNXT_SUPPORTS_TPA(bp) (!BNXT_CHIP_TYPE_NITRO_A0(bp) && \ + (!((bp)->flags & BNXT_FLAG_CHIP_P5_PLUS) || \ + (bp)->max_tpa_v2) && !is_kdump_kernel()) +#define BNXT_RX_JUMBO_MODE(bp) ((bp)->flags & BNXT_FLAG_JUMBO) + +#define BNXT_CHIP_P7(bp) \ + ((bp)->chip_num == CHIP_NUM_58818 || \ + (bp)->chip_num == CHIP_NUM_57608) + +#define BNXT_CHIP_P5(bp) \ + ((bp)->chip_num == CHIP_NUM_57508 || \ + (bp)->chip_num == CHIP_NUM_57504 || \ + (bp)->chip_num == CHIP_NUM_57502) + +/* Chip class phase 5 plus */ +#define BNXT_CHIP_P5_PLUS(bp) \ + (BNXT_CHIP_P5(bp) || BNXT_CHIP_P7(bp)) + +/* Chip class phase 4.x */ +#define BNXT_CHIP_P4(bp) \ + (BNXT_CHIP_NUM_57X1X((bp)->chip_num) || \ + BNXT_CHIP_NUM_5745X((bp)->chip_num) || \ + BNXT_CHIP_NUM_588XX((bp)->chip_num) || \ + (BNXT_CHIP_NUM_58700((bp)->chip_num) && \ + !BNXT_CHIP_TYPE_NITRO_A0(bp))) + +#define BNXT_CHIP_P4_PLUS(bp) \ + (BNXT_CHIP_P4(bp) || BNXT_CHIP_P5_PLUS(bp)) + +/* Chip class phase 3.x */ +#define BNXT_CHIP_P3(bp) \ + (BNXT_CHIP_NUM_57X0X((bp)->chip_num) || \ + BNXT_CHIP_TYPE_NITRO_A0(bp)) + +#define BNXT_CHIP_THOR BNXT_CHIP_P5 +#define BNXT_STINGRAY BNXT_CHIP_P5 + +#define BNXT_CHIP_P5_MINUS(bp) \ + (BNXT_CHIP_P3(bp) || BNXT_CHIP_P4(bp) || BNXT_CHIP_P5(bp)) + +#define BNXT_TPA_MTU_OK(bp) \ + ((!BNXT_CHIP_P3(bp) && !BNXT_CHIP_P4(bp)) || (bp)->dev->mtu <= 4096) + +#define BNXT_CHIP_SUPPORTS_PHY(bp) (BNXT_ASIC(bp) || BNXT_CHIP_P7(bp)) + /* Must remain NULL for new bnxt_re upstream/ + * inbox driver. + */ + void *reserved_ulp_kabi_1; + + struct bnxt_en_dev *edev; + /* The following 2 fields are for + * OOT compatibility checking only. + */ + void *reserved; /* Old bnxt_re sees that the + * original ulp_probe pointer + * is NULL and will not call. + */ + struct bnxt_en_dev * (*ulp_probe)(struct net_device *); + /* Do not add any fields before + * ulp_probe in both OOT and + * upstream/inbox drivers. + */ + + struct bnxt_napi **bnapi; + +#ifdef OLD_VLAN + struct vlan_group *vlgrp; +#endif + + struct bnxt_rx_ring_info *rx_ring; + struct bnxt_tx_ring_info *tx_ring; + u16 *tx_ring_map; + + struct sk_buff * (*gro_func)(struct bnxt_tpa_info *, int, + struct sk_buff *); + + struct sk_buff * (*rx_skb_func)(struct bnxt *, + struct bnxt_rx_ring_info *, + u16, void *, u8 *, dma_addr_t, + unsigned int); + + u16 max_tpa_v2; + u16 max_tpa; + u32 rx_buf_size; + u32 rx_buf_use_size; /* useable size */ + u16 rx_offset; + u16 rx_dma_offset; + enum dma_data_direction rx_dir; + u32 rx_ring_size; + u32 rx_agg_ring_size; + u32 rx_copy_thresh; + u32 rx_ring_mask; + u32 rx_agg_ring_mask; + int rx_nr_pages; + int rx_agg_nr_pages; + int rx_nr_rings; + int rsscos_nr_ctxs; + + u32 tx_ring_size; + u32 tx_ring_mask; + int tx_nr_pages; + int tx_nr_rings; + int tx_nr_rings_per_tc; + int tx_nr_rings_xdp; + + int tx_wake_thresh; + enum bnxt_push_mode tx_push_mode; + int tx_push_thresh; + int tx_push_size; +#if defined(HAVE_ETF_QOPT_OFFLOAD) + unsigned long *etf_tx_ring_map; +#endif + + u32 cp_ring_size; + u32 cp_ring_mask; + u32 cp_bit; + int cp_nr_pages; + int cp_nr_rings; + + /* grp_info indexed by completion ring index */ + struct bnxt_ring_grp_info *grp_info; + struct bnxt_vnic_info *vnic_info; + struct list_head rss_ctx_list; + unsigned long *rss_ctx_bmap; + u32 num_rss_ctx; + int nr_vnics; + u32 rss_hash_cfg; + u32 rss_hash_delta; + u16 *rss_indir_tbl; + u16 rss_indir_tbl_entries; +#define HW_HASH_KEY_SIZE 40 + u8 rss_hash_key[HW_HASH_KEY_SIZE]; + u8 rss_hash_key_valid:1; + u8 rss_hash_key_updated:1; + u32 rss_cap; +#define BNXT_RSS_CAP_AH_V4_RSS_CAP BIT(0) +#define BNXT_RSS_CAP_AH_V6_RSS_CAP BIT(1) +#define BNXT_RSS_CAP_ESP_V4_RSS_CAP BIT(2) +#define BNXT_RSS_CAP_ESP_V6_RSS_CAP BIT(3) +#define BNXT_RSS_CAP_RSS_HASH_TYPE_DELTA BIT(4) +#define BNXT_RSS_CAP_RSS_TCAM BIT(5) +#define BNXT_RSS_CAP_UDP_RSS_CAP BIT(6) +#define BNXT_RSS_CAP_NEW_RSS_CAP BIT(7) +#define BNXT_RSS_CAP_TOEPLITZ_CAP BIT(8) +#define BNXT_RSS_CAP_XOR_CAP BIT(9) +#define BNXT_RSS_CAP_IPV6_FLOW_LABEL_CAP BIT(10) +#define BNXT_RSS_CAP_TOEPLITZ_CHKSM_CAP BIT(11) +#define BNXT_RSS_CAP_MULTI_RSS_CTX BIT(12) + + u16 max_mtu; + u16 fw_dflt_mtu; + u16 tso_max_segs; + u8 max_tc; + u8 max_lltc; /* lossless TCs */ + struct bnxt_queue_info tx_q_info[BNXT_MAX_QUEUE]; + struct bnxt_queue_info rx_q_info[BNXT_MAX_QUEUE]; + u8 tc_to_qidx[BNXT_MAX_QUEUE]; + u8 tx_q_ids[BNXT_MAX_QUEUE]; + u8 rx_q_ids[BNXT_MAX_QUEUE]; + u8 tx_max_q; + u8 rx_max_q; + u8 is_asym_q; + u8 num_tc; + + struct bnxt_mpc_info *mpc_info; + struct bnxt_ktls_info *ktls_info; + + struct bnxt_udcc_info *udcc_info; + unsigned int current_interval; +#define BNXT_TIMER_INTERVAL HZ + + struct timer_list timer; + + unsigned long state; +#define BNXT_STATE_OPEN 0 +#define BNXT_STATE_IN_SP_TASK 1 +#define BNXT_STATE_READ_STATS 2 +#define BNXT_STATE_FW_RESET_DET 3 +#define BNXT_STATE_IN_FW_RESET 4 +#define BNXT_STATE_ABORT_ERR 5 +#define BNXT_STATE_FW_FATAL_COND 6 +#define BNXT_STATE_DRV_REGISTERED 7 +#define BNXT_STATE_PCI_CHANNEL_IO_FROZEN 8 +#define BNXT_STATE_NAPI_DISABLED 9 +#define BNXT_STATE_L2_FILTER_RETRY 10 +#define BNXT_STATE_FW_ACTIVATE 11 +#define BNXT_STATE_RECOVER 12 +#define BNXT_STATE_FW_NON_FATAL_COND 13 +#define BNXT_STATE_FW_ACTIVATE_RESET 14 +#define BNXT_STATE_HALF_OPEN 15 /* For offline ethtool tests */ +#define BNXT_STATE_IN_UDCC_TASK 16 + +#define BNXT_NO_FW_ACCESS(bp) \ + (test_bit(BNXT_STATE_FW_FATAL_COND, &(bp)->state) || \ + pci_channel_offline(bp->pdev)) + + struct bnxt_irq *irq_tbl; + int total_irqs; + u8 mac_addr[ETH_ALEN]; + +#ifdef CONFIG_BNXT_DCB + struct ieee_pfc *ieee_pfc; + struct ieee_ets *ieee_ets; + u8 dcbx_cap; + u8 default_pri; + u8 max_dscp_value; +#endif /* CONFIG_BNXT_DCB */ + + u32 msg_enable; + + u64 fw_cap; + #define BNXT_FW_CAP_SHORT_CMD BIT_ULL(0) + #define BNXT_FW_CAP_LLDP_AGENT BIT_ULL(1) + #define BNXT_FW_CAP_DCBX_AGENT BIT_ULL(2) + #define BNXT_FW_CAP_NEW_RM BIT_ULL(3) + #define BNXT_FW_CAP_IF_CHANGE BIT_ULL(4) + #define BNXT_FW_CAP_LINK_ADMIN BIT_ULL(5) + #define BNXT_FW_CAP_VF_RES_MIN_GUARANTEED BIT_ULL(6) + #define BNXT_FW_CAP_KONG_MB_CHNL BIT_ULL(7) + #define BNXT_FW_CAP_ADMIN_MTU BIT_ULL(8) + #define BNXT_FW_CAP_ADMIN_PF BIT_ULL(9) + #define BNXT_FW_CAP_OVS_64BIT_HANDLE BIT_ULL(10) + #define BNXT_FW_CAP_TRUSTED_VF BIT_ULL(11) + #define BNXT_FW_CAP_VF_VNIC_NOTIFY BIT_ULL(12) + #define BNXT_FW_CAP_ERROR_RECOVERY BIT_ULL(13) + #define BNXT_FW_CAP_PKG_VER BIT_ULL(14) + #define BNXT_FW_CAP_CFA_ADV_FLOW BIT_ULL(15) + #define BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2 BIT_ULL(16) + #define BNXT_FW_CAP_PCIE_STATS_SUPPORTED BIT_ULL(17) + #define BNXT_FW_CAP_EXT_STATS_SUPPORTED BIT_ULL(18) + #define BNXT_FW_CAP_SECURE_MODE BIT_ULL(19) + #define BNXT_FW_CAP_ERR_RECOVER_RELOAD BIT_ULL(20) + #define BNXT_FW_CAP_HOT_RESET BIT_ULL(21) + #define BNXT_FW_CAP_CQ_OVERFLOW_DETECT_DISABLE BIT_ULL(22) + #define BNXT_FW_CAP_CRASHDUMP BIT_ULL(23) + #define BNXT_FW_CAP_VLAN_RX_STRIP BIT_ULL(24) + #define BNXT_FW_CAP_VLAN_TX_INSERT BIT_ULL(25) + #define BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED BIT_ULL(26) + #define BNXT_FW_CAP_TX_TS_CMP BIT_ULL(27) + #define BNXT_FW_CAP_DBG_QCAPS BIT_ULL(29) + #define BNXT_FW_CAP_RING_MONITOR BIT_ULL(30) + #define BNXT_FW_CAP_ECN_STATS BIT_ULL(31) + #define BNXT_FW_CAP_TRUFLOW BIT_ULL(32) + #define BNXT_FW_CAP_VF_CFG_FOR_PF BIT_ULL(33) + #define BNXT_FW_CAP_PTP_PPS BIT_ULL(34) + #define BNXT_FW_CAP_HOT_RESET_IF BIT_ULL(35) + #define BNXT_FW_CAP_LIVEPATCH BIT_ULL(36) + #define BNXT_FW_CAP_NPAR_1_2 BIT_ULL(37) + #define BNXT_FW_CAP_PTP_RTC BIT_ULL(39) + #define BNXT_FW_CAP_TRUFLOW_EN BIT_ULL(40) + #define BNXT_TRUFLOW_EN(bp) ((bp)->fw_cap & BNXT_FW_CAP_TRUFLOW_EN) + #define BNXT_FW_CAP_RX_ALL_PKT_TS BIT_ULL(41) + #define BNXT_FW_CAP_BACKING_STORE_V2 BIT_ULL(42) + #define BNXT_FW_CAP_DBR_SUPPORTED BIT_ULL(43) + #define BNXT_FW_CAP_GENERIC_STATS BIT_ULL(44) + #define BNXT_FW_CAP_DBR_PACING_SUPPORTED BIT_ULL(45) + #define BNXT_FW_CAP_PTP_PTM BIT_ULL(46) + #define BNXT_FW_CAP_CFA_NTUPLE_RX_EXT_IP_PROTO BIT_ULL(47) + #define BNXT_FW_CAP_ENABLE_RDMA_SRIOV BIT_ULL(48) + #define BNXT_RDMA_SRIOV_EN(bp) ((bp)->fw_cap & BNXT_FW_CAP_ENABLE_RDMA_SRIOV) + #define BNXT_FW_CAP_PTP BIT_ULL(50) + #define BNXT_FW_CAP_DFLT_VLAN_TPID_PCP BIT_ULL(51) + #define BNXT_FW_CAP_THRESHOLD_TEMP_SUPPORTED BIT_ULL(52) + #define BNXT_FW_CAP_VNIC_TUNNEL_TPA BIT_ULL(53) + #define BNXT_FW_CAP_HW_LAG_SUPPORTED BIT_ULL(54) + #define BNXT_FW_CAP_VF_SCALE_SUPPORTED BIT_ULL(55) + #define BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V3 BIT_ULL(56) + #define BNXT_FW_CAP_VF_RESV_VNICS_MAXVFS BIT_ULL(57) + #define BNXT_FW_CAP_ROCE_VF_RESC_MGMT_SUPPORTED BIT_ULL(58) + #define BNXT_ROCE_VF_RESC_CAP(bp) ((bp)->fw_cap & \ + BNXT_FW_CAP_ROCE_VF_RESC_MGMT_SUPPORTED) + #define BNXT_FW_CAP_TIMED_TX_SO_TXTIME BIT_ULL(59) + #define BNXT_SUPPORTS_ETF(bp) ((bp)->fw_cap & BNXT_FW_CAP_TIMED_TX_SO_TXTIME) + #define BNXT_FW_CAP_UDCC_SUPPORTED BIT_ULL(60) + #define BNXT_UDCC_CAP(bp) ((bp)->fw_cap & \ + BNXT_FW_CAP_UDCC_SUPPORTED) + #define BNXT_FW_CAP_TF_RX_NIC_FLOW_SUPPORTED BIT_ULL(61) + #define BNXT_TF_RX_NIC_FLOW_CAP(bp) ((bp)->fw_cap & \ + BNXT_FW_CAP_TF_RX_NIC_FLOW_SUPPORTED) + #define BNXT_FW_CAP_SW_MAX_RESOURCE_LIMITS BIT_ULL(62) + #define BNXT_SW_RES_LMT(bp) ((bp)->fw_cap & BNXT_FW_CAP_SW_MAX_RESOURCE_LIMITS) + #define BNXT_FW_CAP_LPBK_STATS BIT_ULL(63) + + u32 fw_dbg_cap; + #define BNXT_FW_DBG_CAP_CRASHDUMP_SOC 0x00000001 + #define BNXT_FW_DBG_CAP_CRASHDUMP_HOST 0x00000002 + +#define BNXT_NEW_RM(bp) ((bp)->fw_cap & BNXT_FW_CAP_NEW_RM) +#define BNXT_PTP_USE_RTC(bp) (!BNXT_MH(bp) && \ + ((bp)->fw_cap & BNXT_FW_CAP_PTP_RTC)) + u32 hwrm_spec_code; + u16 hwrm_cmd_seq; + u16 hwrm_cmd_kong_seq; + struct dma_pool *hwrm_dma_pool; + struct hlist_head hwrm_pending_list; + +#ifdef NETDEV_GET_STATS64 + struct rtnl_link_stats64 net_stats_prev; +#endif + struct bnxt_stats_mem port_stats; + struct bnxt_stats_mem rx_port_stats_ext; + struct bnxt_stats_mem tx_port_stats_ext; + struct bnxt_stats_mem ecn_marked_stats; + struct bnxt_stats_mem generic_stats; + struct bnxt_stats_mem lpbk_stats; + u16 fw_rx_stats_ext_size; + u16 fw_tx_stats_ext_size; + u16 hw_ring_stats_size; + u8 tx_pri2cos_idx[8]; + u8 rx_pri2cos_idx[8]; + bool pri2cos_valid; + + struct bnxt_total_ring_err_stats ring_err_stats_prev; + + u16 hwrm_max_req_len; + u16 hwrm_max_ext_req_len; + unsigned int hwrm_cmd_timeout; + unsigned int hwrm_cmd_max_timeout; + struct mutex hwrm_cmd_lock; /* serialize hwrm messages */ + struct hwrm_ver_get_output ver_resp; +#define FW_VER_STR_LEN 32 +#define BC_HWRM_STR_LEN 21 +#define PHY_VER_STR_LEN (FW_VER_STR_LEN - BC_HWRM_STR_LEN) + char fw_ver_str[FW_VER_STR_LEN]; + char hwrm_ver_supp[FW_VER_STR_LEN]; + char nvm_cfg_ver[FW_VER_STR_LEN]; + u64 fw_ver_code; +#define BNXT_FW_VER_CODE(maj, min, bld, rsv) \ + ((u64)(maj) << 48 | (u64)(min) << 32 | (u64)(bld) << 16 | (rsv)) +#define BNXT_FW_MAJ(bp) ((bp)->fw_ver_code >> 48) +#define BNXT_FW_MIN(bp) (((bp)->fw_ver_code >> 32) & 0xffff) +#define BNXT_FW_BLD(bp) (((bp)->fw_ver_code >> 16) & 0xffff) +#define BNXT_FW_RSV(bp) (((bp)->fw_ver_code) & 0xffff) + + __le16 vxlan_fw_dst_port_id; + __le16 nge_fw_dst_port_id; + __le16 vxlan_gpe_fw_dst_port_id; + __be16 vxlan_port; + __be16 nge_port; + __be16 vxlan_gpe_port; +#ifndef HAVE_UDP_TUNNEL_NIC + __be16 vxlan_port_pending; + __be16 nge_port_pending; + atomic_t vxlan_port_cnt; + atomic_t nge_port_cnt; +#endif + u8 port_partition_type; + u8 port_count; + u16 br_mode; + + struct bnxt_coal_cap coal_cap; + struct bnxt_coal rx_coal; + struct bnxt_coal tx_coal; + + u32 stats_coal_ticks; +#define BNXT_DEF_STATS_COAL_TICKS 1000000 +#define BNXT_MIN_STATS_COAL_TICKS 250000 +#define BNXT_MAX_STATS_COAL_TICKS 1000000 + + struct work_struct sp_task; + unsigned long sp_event; +#define BNXT_RX_MASK_SP_EVENT 0 +#define BNXT_RX_NTP_FLTR_SP_EVENT 1 +#define BNXT_LINK_CHNG_SP_EVENT 2 +#define BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT 3 +#define BNXT_VXLAN_ADD_PORT_SP_EVENT 4 +#define BNXT_VXLAN_DEL_PORT_SP_EVENT 5 +#define BNXT_RESET_TASK_SP_EVENT 6 +#define BNXT_RST_RING_SP_EVENT 7 +#define BNXT_HWRM_PF_UNLOAD_SP_EVENT 8 +#define BNXT_PERIODIC_STATS_SP_EVENT 9 +#define BNXT_HWRM_PORT_MODULE_SP_EVENT 10 +#define BNXT_RESET_TASK_SILENT_SP_EVENT 11 +#define BNXT_GENEVE_ADD_PORT_SP_EVENT 12 +#define BNXT_GENEVE_DEL_PORT_SP_EVENT 13 +#define BNXT_LINK_SPEED_CHNG_SP_EVENT 14 +#define BNXT_FLOW_STATS_SP_EVENT 15 +#define BNXT_UPDATE_PHY_SP_EVENT 16 +#define BNXT_RING_COAL_NOW_SP_EVENT 17 +#define BNXT_FW_RESET_NOTIFY_SP_EVENT 18 +#define BNXT_FW_EXCEPTION_SP_EVENT 19 +#define BNXT_VF_VNIC_CHANGE_SP_EVENT 20 +#define BNXT_LINK_CFG_CHANGE_SP_EVENT 21 +#define BNXT_PTP_CURRENT_TIME_EVENT 22 +#define BNXT_FW_ECHO_REQUEST_SP_EVENT 23 +#define BNXT_VF_CFG_CHNG_SP_EVENT 24 +#define BNXT_RESET_TASK_CORE_RESET_SP_EVENT 25 +#define BNXT_THERMAL_THRESHOLD_SP_EVENT 26 +#define BNXT_RESTART_ULP_SP_EVENT 27 + + struct delayed_work fw_reset_task; + int fw_reset_state; +#define BNXT_FW_RESET_STATE_POLL_VF 1 +#define BNXT_FW_RESET_STATE_RESET_FW 2 +#define BNXT_FW_RESET_STATE_ENABLE_DEV 3 +#define BNXT_FW_RESET_STATE_POLL_FW 4 +#define BNXT_FW_RESET_STATE_OPENING 5 +#define BNXT_FW_RESET_STATE_POLL_FW_DOWN 6 + u16 fw_reset_min_dsecs; +#define BNXT_DFLT_FW_RST_MIN_DSECS 20 + u16 fw_reset_max_dsecs; +#define BNXT_DFLT_FW_RST_MAX_DSECS 60 + unsigned long fw_reset_timestamp; + + struct bnxt_fw_health *fw_health; + struct bnxt_aux_priv *aux_priv; + + struct bnxt_dbr dbr; + + struct bnxt_hw_resc hw_resc; + struct bnxt_pf_info pf; + struct bnxt_ctx_mem_info *ctx; +#ifdef CONFIG_BNXT_SRIOV + int nr_vfs; + struct bnxt_vf_info vf; + struct bnxt_vf_sysfs_obj *vf_sysfs_objs; + struct kobject *sriov_sysfs_config; + struct hwrm_func_vf_resource_cfg_input vf_resc_cfg_input; + wait_queue_head_t sriov_cfg_wait; + bool sriov_cfg; +#define BNXT_SRIOV_CFG_WAIT_TMO msecs_to_jiffies(10000) + + /* lock to protect VF-rep creation/cleanup via + * multiple paths such as ->sriov_configure() and + * devlink ->eswitch_mode_set() + */ + struct mutex vf_rep_lock; + struct mutex sriov_lock; +#endif + +#if BITS_PER_LONG == 32 + /* ensure atomic 64-bit doorbell writes on 32-bit systems. */ + spinlock_t db_lock; +#endif + int db_offset; + int db_size; + int db_size_nc; + void __iomem *db_base_wc; + +#define BNXT_NTP_FLTR_MAX_FLTR 8192 +#define BNXT_NTP_FLTR_HASH_SIZE 512 +#define BNXT_NTP_FLTR_HASH_MASK (BNXT_NTP_FLTR_HASH_SIZE - 1) + struct hlist_head ntp_fltr_hash_tbl[BNXT_NTP_FLTR_HASH_SIZE]; + spinlock_t ntp_fltr_lock; /* for hash table add, del */ + + unsigned long *ntp_fltr_bmap; + int ntp_fltr_count; + int max_fltr; + +#define BNXT_L2_FLTR_MAX_FLTR 1024 +#define BNXT_MAX_FLTR (BNXT_NTP_FLTR_MAX_FLTR + BNXT_L2_FLTR_MAX_FLTR) +#define BNXT_L2_FLTR_HASH_SIZE 32 +#define BNXT_L2_FLTR_HASH_MASK (BNXT_L2_FLTR_HASH_SIZE - 1) + struct hlist_head l2_fltr_hash_tbl[BNXT_L2_FLTR_HASH_SIZE]; + + u32 hash_seed; + u64 toeplitz_prefix; + + struct list_head usr_fltr_list; + + struct mutex link_lock; + struct bnxt_link_info link_info; + struct ethtool_keee eee; + u32 lpi_tmr_lo; + u32 lpi_tmr_hi; + + /* copied from flags and flags2 in hwrm_port_phy_qcaps_output */ + u16 phy_flags; +#define BNXT_PHY_FL_EEE_CAP PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED +#define BNXT_PHY_FL_EXT_LPBK PORT_PHY_QCAPS_RESP_FLAGS_EXTERNAL_LPBK_SUPPORTED +#define BNXT_PHY_FL_AN_PHY_LPBK PORT_PHY_QCAPS_RESP_FLAGS_AUTONEG_LPBK_SUPPORTED +#define BNXT_PHY_FL_SHARED_PORT_CFG PORT_PHY_QCAPS_RESP_FLAGS_SHARED_PHY_CFG_SUPPORTED +#define BNXT_PHY_FL_PORT_STATS_NO_RESET PORT_PHY_QCAPS_RESP_FLAGS_CUMULATIVE_COUNTERS_ON_RESET +#define BNXT_PHY_FL_NO_PHY_LPBK PORT_PHY_QCAPS_RESP_FLAGS_LOCAL_LPBK_NOT_SUPPORTED +#define BNXT_PHY_FL_FW_MANAGED_LKDN PORT_PHY_QCAPS_RESP_FLAGS_FW_MANAGED_LINK_DOWN +#define BNXT_PHY_FL_NO_FCS PORT_PHY_QCAPS_RESP_FLAGS_NO_FCS +#define BNXT_PHY_FL_NO_PAUSE (PORT_PHY_QCAPS_RESP_FLAGS2_PAUSE_UNSUPPORTED << 8) +#define BNXT_PHY_FL_NO_PFC (PORT_PHY_QCAPS_RESP_FLAGS2_PFC_UNSUPPORTED << 8) +#define BNXT_PHY_FL_BANK_SEL (PORT_PHY_QCAPS_RESP_FLAGS2_BANK_ADDR_SUPPORTED << 8) +#define BNXT_PHY_FL_SPEEDS2 (PORT_PHY_QCAPS_RESP_FLAGS2_SPEEDS2_SUPPORTED << 8) + + /* copied from flags in hwrm_port_mac_qcaps_output */ + u8 mac_flags; +#define BNXT_MAC_FL_NO_MAC_LPBK PORT_MAC_QCAPS_RESP_FLAGS_LOCAL_LPBK_NOT_SUPPORTED + + u8 num_tests; + struct bnxt_test_info *test_info; + + u8 wol_filter_id; + u8 wol; + + u8 num_leds; + struct bnxt_led_info leds[BNXT_MAX_LED]; + u16 dump_flag; +#define BNXT_DUMP_LIVE 0 +#define BNXT_DUMP_CRASH 1 +#define BNXT_DUMP_DRIVER 2 +#define BNXT_DUMP_DRIVER_WITH_CTX_MEM 3 + + struct bpf_prog *xdp_prog; + + struct bnxt_ptp_cfg *ptp_cfg; + u8 ptp_all_rx_tstamp; + +#ifndef PCIE_SRIOV_CONFIGURE + int req_vfs; + struct work_struct iov_task; +#endif + + struct devlink *dl; +#ifdef CONFIG_VF_REPS + /* devlink interface and vf-rep structs */ +#ifdef HAVE_DEVLINK_PORT_ATTRS + struct devlink_port dl_port; +#endif + enum devlink_eswitch_mode eswitch_mode; + struct bnxt_vf_rep **vf_reps; /* array of vf-rep ptrs */ + u16 *cfa_code_map; /* cfa_code -> vf_idx map */ +#endif + /* Flag to stop eswitch mode transitions (e.g, during + * PCI device removal). + * TBD: Change this to a bitmask of flag bits to track + * various TC hw-offload events (TF-init, VF-Rep creation + * etc). + */ + bool eswitch_disabled; + u8 dsn[8]; + +#ifdef CONFIG_BNXT_FLOWER_OFFLOAD + struct bnxt_tc_info *tc_info; + struct bnxt_tc_neigh_update neigh_update; + struct list_head tc_indr_block_list; +#if defined(HAVE_FLOW_INDR_BLOCK_CB) && !defined(HAVE_FLOW_INDR_DEV_RGTR) + struct notifier_block tc_netdev_nb; +#endif +#endif + struct dentry *debugfs_pdev; + struct dentry *debugfs_dim; + struct backingstore_debug_data_t bs_data[BNXT_DIR_MAX]; +#ifdef CONFIG_BNXT_HWMON + struct device *hwmon_dev; + u8 warn_thresh_temp; + u8 crit_thresh_temp; + u8 fatal_thresh_temp; + u8 shutdown_thresh_temp; +#endif + u32 thermal_threshold_type; + char *tx_cosq_names; + char *rx_cosq_names; + enum board_idx board_idx; + + struct bnxt_ctx_pg_info *fw_crash_mem; + u32 fw_crash_len; +#define BNXT_SET_CRASHDUMP_PAGE_ATTR(attr) \ +do { \ + if (BNXT_PAGE_SIZE == 0x2000) \ + attr = DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_8K; \ + else if (BNXT_PAGE_SIZE == 0x10000) \ + attr = DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_64K; \ + else \ + attr = DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_4K; \ +} while (0) + + struct net_device * (*get_pkt_dev)(struct bnxt *bp, + struct rx_cmp_ext *rxcmp1, + struct bnxt_tpa_info *tpa_info); + /* Truflow Related: START */ + u16 tf_flags; + #define BNXT_TF_FLAG_NONE 0 + #define BNXT_TF_FLAG_INITIALIZED BIT(0) + #define BNXT_TF_FLAG_SWITCHDEV BIT(1) + #define BNXT_TF_FLAG_NICFLOW BIT(2) + #define BNXT_TF_FLAG_DEVLINK BIT(3) + #define BNXT_TF_FLAG_GFID_ENABLE BIT(8) +#define BNXT_GFID_ENABLED(bp) ((bp)->tf_flags & BNXT_TF_FLAG_GFID_ENABLE) +#define BNXT_SVIF_INVALID 0xFFFF + u16 port_svif; + u16 func_svif; + void *ulp_ctx; + void *tfp; + void *nic_flow_info; + u32 tx_cfa_action; + u16 max_num_kflows; +#define BNXT_ULP_APP_ID_SET_CONFIGURED 0x80 + u8 app_id; +#if defined(CONFIG_BNXT_FLOWER_OFFLOAD) || defined(CONFIG_BNXT_CUSTOM_FLOWER_OFFLOAD) + struct vnic_info_meta *vnic_meta; +#endif + bool dl_param_truflow; + /* Truflow MPC info */ + void *tfc_info; + /* Truflow Related: END */ + +#define BNXT_FW_CAP_UDP_TNL_OFFLOAD_DISABLED \ + (FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_VXLAN | \ + FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_NGE) +#define BNXT_FW_CAP_GRE_TNL_OFFLOAD_DISABLED \ + (FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_NVGRE | \ + FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_L2GRE | \ + FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_GRE) + + u16 tunnel_disable_flag; + + struct bnxt_hdbr_info hdbr_info; + void *hdbr_pgs[DBC_GROUP_MAX]; + u8 rss_hfunc; + u8 ipv6_flow_lbl_rss_en; + + int ulp_num_msix_want; + + struct list_head loggers_list; + void *debug_buf; + struct mutex log_lock; /* logging ops lock */ + unsigned long *af_xdp_zc_qs; + struct bnxt_bond_info *bond_info; + struct bnxt_bs_trace_info bs_trace[BNXT_TRACE_BUF_COUNT]; +}; + +#define BNXT_NUM_RX_RING_STATS 8 +#define BNXT_NUM_TX_RING_STATS 8 +#define BNXT_NUM_TPA_RING_STATS 4 +#define BNXT_NUM_TPA_RING_STATS_P5 5 +#define BNXT_NUM_TPA_RING_STATS_P7 6 +#define BNXT_NUM_RX_PFC_DURATION_STATS 8 +#define BNXT_NUM_TX_PFC_DURATION_STATS 8 +#define BNXT_NUM_PFC_DURATION_STATS 16 + +#define BNXT_RING_STATS_SIZE_P5 \ + ((BNXT_NUM_RX_RING_STATS + BNXT_NUM_TX_RING_STATS + \ + BNXT_NUM_TPA_RING_STATS_P5) * 8) + +#define BNXT_RING_STATS_SIZE_P7 \ + ((BNXT_NUM_RX_RING_STATS + BNXT_NUM_TX_RING_STATS + \ + BNXT_NUM_TPA_RING_STATS_P7) * 8) + +#define BNXT_GET_RING_STATS64(sw, counter) \ + (*((sw) + offsetof(struct ctx_hw_stats, counter) / 8)) + +#define BNXT_GET_RX_PORT_STATS64(sw, counter) \ + (*((sw) + offsetof(struct rx_port_stats, counter) / 8)) + +#define BNXT_GET_TX_PORT_STATS64(sw, counter) \ + (*((sw) + offsetof(struct tx_port_stats, counter) / 8)) + +#define BNXT_PORT_STATS_SIZE \ + (sizeof(struct rx_port_stats) + sizeof(struct tx_port_stats) + 1024) + +#define BNXT_TX_PORT_STATS_BYTE_OFFSET \ + (sizeof(struct rx_port_stats) + 512) + +#define BNXT_RX_STATS_OFFSET(counter) \ + (offsetof(struct rx_port_stats, counter) / 8) + +#define BNXT_TX_STATS_OFFSET(counter) \ + ((offsetof(struct tx_port_stats, counter) + \ + BNXT_TX_PORT_STATS_BYTE_OFFSET) / 8) + +#define BNXT_RX_STATS_EXT_OFFSET(counter) \ + (offsetof(struct rx_port_stats_ext, counter) / 8) + +#define BNXT_RX_STATS_EXT_NUM_LEGACY \ + BNXT_RX_STATS_EXT_OFFSET(rx_fec_corrected_blocks) + +#define BNXT_TX_STATS_EXT_OFFSET(counter) \ + (offsetof(struct tx_port_stats_ext, counter) / 8) + +#define BNXT_HW_FEATURE_VLAN_ALL_RX \ + (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX) +#define BNXT_HW_FEATURE_VLAN_ALL_TX \ + (NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX) + +#define BNXT_TF_RESET_IS_NEEDED(bp) (BNXT_PF(bp) && \ + BNXT_TRUFLOW_EN(bp) && \ + bnxt_tc_is_switchdev_mode(bp)) + +#ifdef BNXT_PRIV_RX_BUSY_POLL +static inline void bnxt_enable_poll(struct bnxt_napi *bnapi) +{ + atomic_set(&bnapi->poll_state, BNXT_STATE_IDLE); +} + +/* called from the NAPI poll routine to get ownership of a bnapi */ +static inline bool bnxt_lock_napi(struct bnxt_napi *bnapi) +{ + int rc = atomic_cmpxchg(&bnapi->poll_state, BNXT_STATE_IDLE, + BNXT_STATE_NAPI); + + return rc == BNXT_STATE_IDLE; +} + +static inline void bnxt_unlock_napi(struct bnxt_napi *bnapi) +{ + atomic_set(&bnapi->poll_state, BNXT_STATE_IDLE); +} + +/* called from the busy poll routine to get ownership of a bnapi */ +static inline bool bnxt_lock_poll(struct bnxt_napi *bnapi) +{ + int rc = atomic_cmpxchg(&bnapi->poll_state, BNXT_STATE_IDLE, + BNXT_STATE_POLL); + + return rc == BNXT_STATE_IDLE; +} + +static inline void bnxt_unlock_poll(struct bnxt_napi *bnapi) +{ + atomic_set(&bnapi->poll_state, BNXT_STATE_IDLE); +} + +static inline bool bnxt_busy_polling(struct bnxt_napi *bnapi) +{ + return atomic_read(&bnapi->poll_state) == BNXT_STATE_POLL; +} + +static inline void bnxt_disable_poll(struct bnxt_napi *bnapi) +{ + int old; + + while (1) { + old = atomic_cmpxchg(&bnapi->poll_state, BNXT_STATE_IDLE, + BNXT_STATE_DISABLE); + if (old == BNXT_STATE_IDLE) + break; + usleep_range(500, 5000); + } +} + +#else + +static inline void bnxt_enable_poll(struct bnxt_napi *bnapi) +{ +} + +static inline bool bnxt_lock_napi(struct bnxt_napi *bnapi) +{ + return true; +} + +static inline void bnxt_unlock_napi(struct bnxt_napi *bnapi) +{ +} + +static inline bool bnxt_lock_poll(struct bnxt_napi *bnapi) +{ + return false; +} + +static inline void bnxt_unlock_poll(struct bnxt_napi *bnapi) +{ +} + +static inline bool bnxt_busy_polling(struct bnxt_napi *bnapi) +{ + return false; +} + +static inline void bnxt_disable_poll(struct bnxt_napi *bnapi) +{ +} + +#endif + +#define I2C_DEV_ADDR_A0 0xa0 +#define I2C_DEV_ADDR_A2 0xa2 +#define SFF_DIAG_SUPPORT_OFFSET 0x5c +#define SFF_MODULE_ID_SFP 0x3 +#define SFF_MODULE_ID_QSFP 0xc +#define SFF_MODULE_ID_QSFP_PLUS 0xd +#define SFF_MODULE_ID_QSFP28 0x11 +#define BNXT_MAX_PHY_I2C_RESP_SIZE 64 + +#define BDETBD_REG_BD_PRODUCER_IDX 0x90000UL +#define BDETBD_REG_BD_REQ_CONSUMER_IDX 0x91000UL +#define BDETBD_REG_BD_CMPL_CONSUMER_IDX 0x92000UL +#define BDERBD_REG_BD_PRODUCER_IDX 0x410000UL +#define BDERBD_REG_BD_REQ_CONSUMER_IDX 0x411000UL +#define BDERBD_REG_BD_CMPL_CONSUMER_IDX 0x412000UL +#define CAG_REG_CAG_VECTOR_CTRL_ADDR_OFFSET 0x30003cUL +#define CAG_REG_CAG_PRODUCER_INDEX_REG_ADDR_OFFSET 0x300040UL +#define CAG_REG_CAG_CONSUMER_INDEX_REG_ADDR_OFFSET 0x300044UL +#define CAG_REG_CAG_PRODUCER_INDEX_REG 0x302000UL +#define CAG_REG_CAG_CONSUMER_INDEX_REG 0x303000UL +#define CAG_REG_CAG_VECTOR_CTRL 0x301000UL +#define TDC_REG_INT_STS_0 0x180020UL +#define TDC_REG_TDC_DEBUG_CNTL 0x180014UL +#define TDC_REG_TDC_DEBUG_STATUS 0x180018UL +#define TDI_REG_DBG_DWORD_ENABLE 0x100104UL +#define TDI_REG_DBG_OUT_DATA 0x100120UL +#define TDI_REG_DBG_SELECT 0x100100UL +#define TE_DEC_REG_PORT_CURRENT_CREDIT_REG 0x2401300UL +#define RDI_REG_RDI_DEBUG_CONTROL_REG 0x27001cUL +#define RDI_REG_RDI_DEBUG_STATUS_REG 0x270020UL + +static inline u32 bnxt_tx_avail(struct bnxt *bp, struct bnxt_tx_ring_info *txr) +{ + u32 used = READ_ONCE(txr->tx_prod) - READ_ONCE(txr->tx_cons); + + return bp->tx_ring_size - (used & bp->tx_ring_mask); +} + +static inline void bnxt_writeq(struct bnxt *bp, u64 val, + volatile void __iomem *addr) +{ +#ifdef DBR_DBG_DROP_ENABLE + struct bnxt_dbr_debug *debug = &bp->dbr.debug; + + if (debug->drop_enable) { + if (++debug->drop_cnt >= debug->drop_ratio) { + debug->drop_cnt = 0; + return; + } + } +#endif + +#if BITS_PER_LONG == 32 + spin_lock(&bp->db_lock); + lo_hi_writeq(val, addr); + spin_unlock(&bp->db_lock); +#else + writeq(val, addr); +#endif +} + +static inline void bnxt_writeq_relaxed(struct bnxt *bp, u64 val, + volatile void __iomem *addr) +{ +#ifdef DBR_DBG_DROP_ENABLE + struct bnxt_dbr_debug *debug = &bp->dbr.debug; + + if (debug->drop_enable) { + if (++debug->drop_cnt >= debug->drop_ratio) { + debug->drop_cnt = 0; + return; + } + } +#endif + +#if BITS_PER_LONG == 32 + spin_lock(&bp->db_lock); + lo_hi_writeq_relaxed(val, addr); + spin_unlock(&bp->db_lock); +#else + writeq_relaxed(val, addr); +#endif +} + +/* + * Save the db value into db copy memory region. Set debug_trace bit if it is + * configured. + * This function is called before each DB written to chip. Memory barrier is + * used to make sure, that memory copy is written before DB reach chip. + */ +static inline void bnxt_hdbr_cp_db(u64 *db_cp, u64 db_val, bool dt, int offset) +{ + if (db_cp) { + if (dt) + db_val |= DBC_DEBUG_TRACE_MASK; + *(db_cp + offset) = cpu_to_le64(db_val); + wmb(); /* Sync db copy before db written into HW */ + } +} + +/* For TX and RX ring doorbells with no ordering guarantee*/ +static inline void bnxt_db_write_relaxed(struct bnxt *bp, + struct bnxt_db_info *db, u32 idx) +{ + if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { + u64 db_val; + + db_val = db->db_key64 | DB_RING_IDX(db, idx); + bnxt_hdbr_cp_db(db->db_cp, db_val, db->db_cp_debug_trace, 0); + bnxt_writeq_relaxed(bp, db_val, db->doorbell); + } else { + u32 db_val = db->db_key32 | DB_RING_IDX(db, idx); + + writel_relaxed(db_val, db->doorbell); + if (bp->flags & BNXT_FLAG_DOUBLE_DB) + writel_relaxed(db_val, db->doorbell); + } +} + +/* For TX and RX ring doorbells */ +static inline void bnxt_db_write(struct bnxt *bp, struct bnxt_db_info *db, + u32 idx) +{ + if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { + u64 db_val; + + db_val = db->db_key64 | DB_RING_IDX(db, idx); + bnxt_hdbr_cp_db(db->db_cp, db_val, db->db_cp_debug_trace, 0); + bnxt_writeq(bp, db_val, db->doorbell); + } else { + u32 db_val = db->db_key32 | DB_RING_IDX(db, idx); + + writel(db_val, db->doorbell); + if (bp->flags & BNXT_FLAG_DOUBLE_DB) + writel(db_val, db->doorbell); + } +} + +static inline void bnxt_do_pacing_default(struct bnxt *bp, u32 *seed) +{ + bnxt_do_pacing(bp->bar0, &bp->dbr, seed, BNXT_DB_PACING_ALGO_THRESHOLD, + BNXT_DEFAULT_PACING_PROBABILITY); +} + +extern const u16 bnxt_lhint_arr[]; +extern const struct pci_device_id bnxt_pci_tbl[]; + +netdev_tx_t __bnxt_start_xmit(struct bnxt *bp, struct netdev_queue *txq, + struct bnxt_tx_ring_info *txr, + struct sk_buff *skb, __le32 lflags, u32 kid); +int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, + u16 prod, gfp_t gfp); +void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, void *data); +void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem); +int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem); +void bnxt_set_tpa_flags(struct bnxt *bp); +void bnxt_set_ring_params(struct bnxt *); +int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode); +int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap, + int bmap_size, bool async_only); +int bnxt_hwrm_func_qcaps(struct bnxt *bp, bool init); +void bnxt_del_l2_filter(struct bnxt *bp, struct bnxt_l2_filter *fltr); +struct bnxt_l2_filter *bnxt_alloc_new_l2_filter(struct bnxt *bp, + struct bnxt_l2_key *key, + u16 flags); +int bnxt_hwrm_l2_filter_free(struct bnxt *bp, struct bnxt_l2_filter *fltr); +int bnxt_hwrm_l2_filter_alloc(struct bnxt *bp, struct bnxt_l2_filter *fltr); +int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp, + struct bnxt_ntuple_filter *fltr); +int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp, + struct bnxt_ntuple_filter *fltr); +void bnxt_fill_ipv6_mask(__be32 mask[4]); +int bnxt_get_nr_rss_ctxs(struct bnxt *bp, int rx_rings); +int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic, u16 q_index); +int bnxt_hwrm_cp_ring_alloc_p5(struct bnxt *bp, struct bnxt_cp_ring_info *cpr); +int bnxt_hwrm_tx_ring_alloc(struct bnxt *bp, struct bnxt_tx_ring_info *txr, + u32 tx_idx); +int bnxt_hwrm_rx_ring_alloc(struct bnxt *bp, struct bnxt_rx_ring_info *txr, + u32 rx_idx); +void bnxt_hwrm_tx_ring_free(struct bnxt *bp, struct bnxt_tx_ring_info *txr, + bool close_path); +void bnxt_hwrm_rx_ring_free(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, + bool close_path); +int bnxt_total_tx_rings(struct bnxt *bp); +int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings); +int bnxt_nq_rings_in_use(struct bnxt *bp); +int bnxt_min_nq_rings_in_use(struct bnxt *bp); +int bnxt_hwrm_set_coal(struct bnxt *); +int bnxt_num_tx_to_cp(struct bnxt *bp, int tx); +unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp); +unsigned int bnxt_get_avail_stat_ctxs_for_en(struct bnxt *bp); +unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp); +unsigned int bnxt_get_avail_cp_rings_for_en(struct bnxt *bp); +int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init); +void bnxt_tx_disable(struct bnxt *bp); +void bnxt_tx_enable(struct bnxt *bp); +void bnxt_sched_reset_txr(struct bnxt *bp, struct bnxt_tx_ring_info *txr, + int idx); +int bnxt_update_link(struct bnxt *bp, bool chng_link_state); +int bnxt_hwrm_set_pause(struct bnxt *); +int bnxt_hwrm_set_link_setting(struct bnxt *, bool, bool); +int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp); +int bnxt_hwrm_free_wol_fltr(struct bnxt *bp); +int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all); +int bnxt_hwrm_fw_set_time(struct bnxt *); +int bnxt_hwrm_vnic_rss_cfg_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic); +void bnxt_del_one_rss_ctx(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx, + bool all); +struct bnxt_rss_ctx *bnxt_alloc_rss_ctx(struct bnxt *bp); +void bnxt_clear_rss_ctxs(struct bnxt *bp, bool all); +int bnxt_open_nic(struct bnxt *, bool, bool); +int bnxt_half_open_nic(struct bnxt *bp); +void bnxt_half_close_nic(struct bnxt *bp); +void bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init); +int bnxt_dbg_hwrm_rd_reg(struct bnxt *bp, u32 reg_off, u16 num_words, + u32 *reg_buf); +void bnxt_fw_exception(struct bnxt *bp); +void bnxt_fw_reset(struct bnxt *bp); +int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs, + int tx_xdp); +u16 bnxt_vf_target_id(struct bnxt_pf_info *pf, u16 vf_idx); +#if defined(HAVE_SETUP_TC) || defined(CONFIG_BNXT_DCB) +int bnxt_setup_mq_tc(struct net_device *dev, u8 tc); +#endif +struct bnxt_ntuple_filter *bnxt_lookup_ntp_filter_from_idx(struct bnxt *bp, + struct bnxt_ntuple_filter *fltr, u32 idx); +u32 bnxt_get_ntp_filter_idx(struct bnxt *bp, struct flow_keys *fkeys, const struct sk_buff *skb); +int bnxt_insert_ntp_filter(struct bnxt *bp, struct bnxt_ntuple_filter *fltr, + u32 idx); +void bnxt_del_ntp_filter(struct bnxt *bp, struct bnxt_ntuple_filter *fltr); +int bnxt_get_max_rings(struct bnxt *, int *, int *, bool); +int bnxt_restore_pf_fw_resources(struct bnxt *bp); + +#ifdef CONFIG_VF_REPS +#ifdef HAVE_NDO_GET_PORT_PARENT_ID +int bnxt_get_port_parent_id(struct net_device *dev, + struct netdev_phys_item_id *ppid); +#else +int bnxt_port_attr_get(struct bnxt *bp, struct switchdev_attr *attr); +#endif +#endif +void bnxt_dim_work(struct work_struct *work); +int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi); +u32 bnxt_fw_health_readl(struct bnxt *bp, int reg_idx); +int bnxt_alloc_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats, bool alloc_masks); +void bnxt_free_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats); +void bnxt_get_func_stats_ext_mask(struct bnxt *bp, + struct bnxt_stats_mem *stats); +void bnxt_add_ring_stats(struct rtnl_link_stats64 *stats, u64 *sw); +u64 bnxt_add_ring_rx_pkts(u64 *sw); +u64 bnxt_add_ring_tx_pkts(u64 *sw); +u64 bnxt_add_ring_rx_bytes(u64 *sw); +u64 bnxt_add_ring_tx_bytes(u64 *sw); +#ifdef NETDEV_GET_STATS64 +void bnxt_get_vf_stats(struct bnxt *bp, u16 vf_idx, + struct rtnl_link_stats64 *stats); +#endif +void bnxt_get_ring_err_stats(struct bnxt *bp, + struct bnxt_total_ring_err_stats *stats); +int bnxt_hwrm_port_mac_qcfg(struct bnxt *bp); +int bnxt_hwrm_get_dflt_roce_vnic(struct bnxt *bp, u16 fid, u16 *vnic_id); +void bnxt_print_device_info(struct bnxt *bp); +int bnxt_cancel_reservations(struct bnxt *bp, bool fw_reset); +void bnxt_report_link(struct bnxt *bp); +void bnxt_free_ctx_mem(struct bnxt *bp); +int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp); +int bnxt_fw_init_one(struct bnxt *bp); +void bnxt_reenable_sriov(struct bnxt *bp); +bool bnxt_hwrm_reset_permitted(struct bnxt *bp); + +int bnxt_dbr_init(struct bnxt *bp); +void bnxt_dbr_exit(struct bnxt *bp); +void bnxt_dbr_recovery_done(struct bnxt *bp, u32 epoch, int ulp_id); +void bnxt_deliver_skb(struct bnxt *bp, struct bnxt_napi *bnapi, + u32 vlan, struct sk_buff *skb); +void bnxt_txr_db_kick(struct bnxt *bp, struct bnxt_tx_ring_info *txr, + u16 prod); +int bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, + u8 agg_bufs, u32 *raw_cons); +struct rx_agg_cmp *bnxt_get_agg(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, + u16 cp_cons, u16 curr); +int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic, + unsigned int start_rx_ring_idx, unsigned int nr_rings); +void bnxt_hwrm_vnic_free_one(struct bnxt *bp, struct bnxt_vnic_info *vnic); +int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, struct bnxt_vnic_info *vnic); +int bnxt_hwrm_vnic_set_rss_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic, bool set_rss); +int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, struct bnxt_vnic_info *vnic, u32 tpa_flags); +int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic, u16 ctx_idx); +void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp, struct bnxt_vnic_info *vnic, u16 ctx_idx); +void bnxt_insert_usr_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr); +void bnxt_del_one_usr_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr); +void bnxt_clear_usr_fltrs(struct bnxt *bp, bool all); +int bnxt_hwrm_vnic_update(struct bnxt *bp, struct bnxt_vnic_info *vnic, u8 valid); +int bnxt_hwrm_func_qstats(struct bnxt *bp, struct bnxt_stats_mem *stats, + u16 fid, u8 flags); +int bnxt_alloc_rss_indir_tbl(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx); +void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx); +bool bnxt_rfs_capable(struct bnxt *bp, bool new_rss_ctx); +int __bnxt_setup_vnic_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic); +void bnxt_logger_ulp_live_data(void *d, u32 seg_id); +void bnxt_free_one_rx_buf_ring(struct bnxt *bp, struct bnxt_rx_ring_info *rxr); +u32 bnxt_get_rxfh_indir_size(struct net_device *dev); +int bnxt_copy_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem, void *buf, size_t offset); +int bnxt_copy_ctx_mem(struct bnxt *bp, struct bnxt_ctx_mem_type *ctxm, void *buf, size_t offset); +#endif diff --git a/drivers/thirdparty/release-drivers/bnxt/bnxt_auxbus_compat.c b/drivers/thirdparty/release-drivers/bnxt/bnxt_auxbus_compat.c new file mode 100644 index 000000000000..f57154a8bde4 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/bnxt_auxbus_compat.c @@ -0,0 +1,178 @@ +/* Broadcom NetXtreme-C/E network driver. + * + * Copyright (c) 2022-2023 Broadcom Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ + +#if !defined(CONFIG_AUXILIARY_BUS) +#undef HAVE_AUXILIARY_DRIVER +#endif + +#ifndef HAVE_AUXILIARY_DRIVER + +#include +#include +#include +#include + +#include "bnxt_auxbus_compat.h" + +static struct list_head bnxt_aux_bus_dev_list = LIST_HEAD_INIT(bnxt_aux_bus_dev_list); +static struct list_head bnxt_aux_bus_drv_list = LIST_HEAD_INIT(bnxt_aux_bus_drv_list); +static DEFINE_MUTEX(bnxt_auxbus_lock); + +static const struct auxiliary_device_id *auxiliary_match_id(const struct auxiliary_device_id *id, + const struct auxiliary_device *auxdev) +{ + for (; id->name[0]; id++) { + const char *p = strrchr(dev_name(&auxdev->dev), '.'); + int match_size; + + if (!p) + continue; + match_size = p - dev_name(&auxdev->dev); + + /* use dev_name(&auxdev->dev) prefix before last '.' char to match to */ + if (strlen(id->name) == match_size && + !strncmp(dev_name(&auxdev->dev), id->name, match_size)) + return id; + } + return NULL; +} + +int auxiliary_device_init(struct auxiliary_device *auxdev) +{ + struct device *dev = &auxdev->dev; + char *modname = KBUILD_MODNAME; + int ret; + + if (!dev->parent) { + pr_err("auxiliary_device has a NULL dev->parent\n"); + return -EINVAL; + } + + if (!auxdev->name) { + pr_err("auxiliary_device has a NULL name\n"); + return -EINVAL; + } + + ret = dev_set_name(dev, "%s.%s.%d", modname, auxdev->name, auxdev->id); + if (ret) { + dev_err(dev, "auxiliary device dev_set_name failed: %d\n", ret); + return ret; + } + + return 0; +} + +int auxiliary_device_add(struct auxiliary_device *auxdev) +{ + const struct auxiliary_device_id *id; + struct auxiliary_driver *auxdrv; + bool found = true; + int ret = 0; + + mutex_lock(&bnxt_auxbus_lock); + list_for_each_entry(auxdrv, &bnxt_aux_bus_drv_list, list) { + id = auxiliary_match_id(auxdrv->id_table, auxdev); + if (id) { + ret = auxdrv->probe(auxdev, id); + if (!ret) + auxdev->dev.driver = &auxdrv->driver; + else + found = false; + break; + } + } + if (found) + list_add_tail(&auxdev->list, &bnxt_aux_bus_dev_list); + mutex_unlock(&bnxt_auxbus_lock); + + return ret; +} + +void auxiliary_device_uninit(struct auxiliary_device *auxdev) +{ + struct device *dev = &auxdev->dev; + + dev->release(dev); +} + +void auxiliary_device_delete(struct auxiliary_device *auxdev) +{ + struct auxiliary_driver *auxdrv; + + mutex_lock(&bnxt_auxbus_lock); + list_for_each_entry(auxdrv, &bnxt_aux_bus_drv_list, list) { + if (auxdev->dev.driver != &auxdrv->driver) + continue; + if (auxdrv->remove) + auxdrv->remove(auxdev); + auxdev->dev.driver = NULL; + } + list_del(&auxdev->list); + mutex_unlock(&bnxt_auxbus_lock); +} + +int bnxt_auxiliary_driver_register(struct auxiliary_driver *auxdrv) +{ + const struct auxiliary_device_id *id; + struct auxiliary_device *auxdev; + int ret = 0; + + if (WARN_ON(!auxdrv->probe) || WARN_ON(!auxdrv->id_table)) + return -EINVAL; + + if (auxdrv->name) + auxdrv->driver.name = kasprintf(GFP_KERNEL, "%s.%s", KBUILD_MODNAME, + auxdrv->name); + else + auxdrv->driver.name = kasprintf(GFP_KERNEL, "%s", KBUILD_MODNAME); + if (!auxdrv->driver.name) + return -ENOMEM; + + mutex_lock(&bnxt_auxbus_lock); + list_for_each_entry(auxdev, &bnxt_aux_bus_dev_list, list) { + if (auxdev->dev.driver) + continue; + + id = auxiliary_match_id(auxdrv->id_table, auxdev); + if (id) { + ret = auxdrv->probe(auxdev, id); + if (ret) + continue; + auxdev->dev.driver = &auxdrv->driver; + } + } + list_add_tail(&auxdrv->list, &bnxt_aux_bus_drv_list); + mutex_unlock(&bnxt_auxbus_lock); + return 0; +} +EXPORT_SYMBOL(bnxt_auxiliary_driver_register); + +void bnxt_auxiliary_driver_unregister(struct auxiliary_driver *auxdrv) +{ + struct auxiliary_device *auxdev; + + /* PF auxiliary devices are added to the list first and then VF devices. + * If we remove PF aux device driver first, it causes failures while + * removing VF driver. + * We need to remove VF auxiliary drivers first, so walk backwards. + */ + mutex_lock(&bnxt_auxbus_lock); + list_for_each_entry_reverse(auxdev, &bnxt_aux_bus_dev_list, list) { + if (auxdev->dev.driver != &auxdrv->driver) + continue; + if (auxdrv->remove) + auxdrv->remove(auxdev); + auxdev->dev.driver = NULL; + } + kfree(auxdrv->driver.name); + list_del(&auxdrv->list); + mutex_unlock(&bnxt_auxbus_lock); +} +EXPORT_SYMBOL(bnxt_auxiliary_driver_unregister); +#endif /* HAVE_AUXILIARY_DRIVER */ diff --git a/drivers/thirdparty/release-drivers/bnxt/bnxt_auxbus_compat.h b/drivers/thirdparty/release-drivers/bnxt/bnxt_auxbus_compat.h new file mode 100644 index 000000000000..066cd9c761dc --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/bnxt_auxbus_compat.h @@ -0,0 +1,111 @@ +/* Broadcom NetXtreme-C/E network driver. + * + * Copyright (c) 2022-2023 Broadcom Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ + +#ifndef _BNXT_AUXILIARY_COMPAT_H_ +#define _BNXT_AUXILIARY_COMPAT_H_ + +#if !defined(CONFIG_AUXILIARY_BUS) +#undef HAVE_AUXILIARY_DRIVER +#endif + +#ifdef HAVE_AUXILIARY_DRIVER +#include +#endif + +#if defined(HAVE_AUXILIARY_DRIVER) && !defined(HAVE_AUX_GET_DRVDATA) +static inline void *auxiliary_get_drvdata(struct auxiliary_device *auxdev) +{ + return dev_get_drvdata(&auxdev->dev); +} + +static inline void auxiliary_set_drvdata(struct auxiliary_device *auxdev, void *data) +{ + dev_set_drvdata(&auxdev->dev, data); +} +#endif + +#ifndef HAVE_AUXILIARY_DRIVER + +#ifndef AUXILIARY_NAME_SIZE +#define AUXILIARY_NAME_SIZE 32 +#endif + +#ifndef HAVE_AUX_DEVICE_ID +#include + +struct auxiliary_device_id { + char name[AUXILIARY_NAME_SIZE]; + kernel_ulong_t driver_data; +}; +#endif + +#include +#include +#ifndef HAVE_IDA_ALLOC +#include +#endif + +struct auxiliary_device { + struct device dev; + const char *name; + u32 id; + struct list_head list; +}; + +struct auxiliary_driver { + int (*probe)(struct auxiliary_device *auxdev, const struct auxiliary_device_id *id); + void (*remove)(struct auxiliary_device *auxdev); + void (*shutdown)(struct auxiliary_device *auxdev); + int (*suspend)(struct auxiliary_device *auxdev, pm_message_t state); + int (*resume)(struct auxiliary_device *auxdev); + const char *name; + struct device_driver driver; + const struct auxiliary_device_id *id_table; + struct list_head list; +}; + +int auxiliary_device_init(struct auxiliary_device *auxdev); +int auxiliary_device_add(struct auxiliary_device *auxdev); +void auxiliary_device_uninit(struct auxiliary_device *auxdev); +void auxiliary_device_delete(struct auxiliary_device *auxdev); +int bnxt_auxiliary_driver_register(struct auxiliary_driver *auxdrv); +void bnxt_auxiliary_driver_unregister(struct auxiliary_driver *auxdrv); + +#define auxiliary_driver_register bnxt_auxiliary_driver_register +#define auxiliary_driver_unregister bnxt_auxiliary_driver_unregister + +static inline void *auxiliary_get_drvdata(struct auxiliary_device *auxdev) +{ + return dev_get_drvdata(&auxdev->dev); +} + +static inline void auxiliary_set_drvdata(struct auxiliary_device *auxdev, void *data) +{ + dev_set_drvdata(&auxdev->dev, data); +} + +static inline struct auxiliary_driver *to_auxiliary_drv(struct device_driver *drv) +{ + return container_of(drv, struct auxiliary_driver, driver); +} + +#endif /* HAVE_AUXILIARY_DRIVER */ + +#ifndef HAVE_IDA_ALLOC +static inline int ida_alloc(struct ida *ida, gfp_t gfp) +{ + return ida_simple_get(ida, 0, 0, gfp); +} + +static inline void ida_free(struct ida *ida, unsigned int id) +{ + ida_simple_remove(ida, id); +} +#endif /* HAVE_IDA_ALLOC */ +#endif /* _BNXT_AUXILIARY_COMPAT_H_ */ diff --git a/drivers/thirdparty/release-drivers/bnxt/bnxt_compat.h b/drivers/thirdparty/release-drivers/bnxt/bnxt_compat.h new file mode 100644 index 000000000000..3a8402980fc2 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/bnxt_compat.h @@ -0,0 +1,2835 @@ +/* Broadcom NetXtreme-C/E network driver. + * + * Copyright (c) 2014-2016 Broadcom Corporation + * Copyright (c) 2016-2018 Broadcom Limited + * Copyright (c) 2018-2023 Broadcom Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ +#ifndef _BNXT_COMPAT_H_ +#define _BNXT_COMPAT_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef HAVE_IEEE1588_SUPPORT +#include +#include +#endif +#if defined(HAVE_TC_FLOW_CLS_OFFLOAD) || defined(HAVE_TC_CLS_FLOWER_OFFLOAD) +#include +#endif +#ifdef HAVE_DIM +#include +#endif +#ifdef HAVE_DEVLINK +#include +#endif +#if defined(HAVE_SWITCHDEV) +#include +#endif +#ifdef HAVE_NDO_XDP +#include +#endif +#include +#ifdef HAVE_XDP_RXQ_INFO +#include +#endif + +#ifndef IS_ENABLED +#define __ARG_PLACEHOLDER_1 0, +#define config_enabled(cfg) _config_enabled(cfg) +#define _config_enabled(value) __config_enabled(__ARG_PLACEHOLDER_##value) +#define __config_enabled(arg1_or_junk) ___config_enabled(arg1_or_junk 1, 0) +#define ___config_enabled(__ignored, val, ...) val +#define IS_ENABLED(option) \ + (config_enabled(option) || config_enabled(option##_MODULE)) +#endif + +#if !IS_ENABLED(CONFIG_NET_DEVLINK) +#undef HAVE_DEVLINK +#endif + +#ifndef HAVE_DEVLINK +#undef HAVE_DEVLINK_INFO +#undef HAVE_DEVLINK_PARAM +#undef HAVE_NDO_DEVLINK_PORT +#undef HAVE_DEVLINK_FLASH_UPDATE +#undef HAVE_DEVLINK_HEALTH_REPORT +#undef HAVE_DEVLINK_RELOAD_ACTION +#endif + +/* Reconcile all dependencies for VF reps: + * SRIOV, Devlink, Switchdev and HW port info in metadata_dst + */ +#if defined(CONFIG_BNXT_SRIOV) && defined(HAVE_DEVLINK) && \ + defined(CONFIG_NET_SWITCHDEV) && defined(HAVE_METADATA_HW_PORT_MUX) && \ + (LINUX_VERSION_CODE >= 0x030a00) +#define CONFIG_VF_REPS 1 +#endif +/* DEVLINK code has dependencies on VF reps */ +#ifdef HAVE_DEVLINK_PARAM +#define CONFIG_VF_REPS 1 +#endif +#ifdef CONFIG_VF_REPS +#ifndef SWITCHDEV_SET_OPS +#define SWITCHDEV_SET_OPS(netdev, ops) ((netdev)->switchdev_ops = (ops)) +#endif +#endif + +/* Reconcile all dependencies for TC Flower offload + * Need the following to be defined to build TC flower offload + * HAVE_TC_FLOW_CLS_OFFLOAD OR HAVE_TC_CLS_FLOWER_OFFLOAD + * HAVE_RHASHTABLE + * HAVE_FLOW_DISSECTOR_KEY_ICMP + * HAVE_FLOW_DISSECTOR_KEY_ENC_IP + * CONFIG_NET_SWITCHDEV + * HAVE_TCF_EXTS_TO_LIST (its possible to do without this but + * the code gets a bit complicated. So, for now depend on this.) + * HAVE_TCF_TUNNEL + * Instead of checking for all of the above defines, enable one + * define when all are enabled. + */ +#if (defined(HAVE_TC_FLOW_CLS_OFFLOAD) || \ + defined(HAVE_TC_CLS_FLOWER_OFFLOAD)) && \ + (defined(HAVE_TCF_EXTS_TO_LIST) || \ + defined(HAVE_TC_EXTS_FOR_ACTION)) && \ + defined(HAVE_RHASHTABLE) && defined(HAVE_FLOW_DISSECTOR_KEY_ICMP) && \ + defined(HAVE_FLOW_DISSECTOR_KEY_ENC_IP) && \ + defined(HAVE_TCF_TUNNEL) && defined(CONFIG_NET_SWITCHDEV) && \ + (LINUX_VERSION_CODE >= 0x030a00) +#define CONFIG_BNXT_FLOWER_OFFLOAD 1 +#ifndef HAVE_NDO_GET_PORT_PARENT_ID +#define netdev_port_same_parent_id(a, b) switchdev_port_same_parent_id(a, b) +#endif +#else +#undef CONFIG_BNXT_FLOWER_OFFLOAD +#endif + +/* With upstream kernels >= v5.2.0, struct tc_cls_flower_offload has been + * replaced by struct flow_cls_offload. For older kernels(< v5.2.0), rename + * the respective definitions here. + */ +#ifndef HAVE_TC_FLOW_CLS_OFFLOAD +#ifdef HAVE_TC_CLS_FLOWER_OFFLOAD +#define flow_cls_offload tc_cls_flower_offload +#define flow_cls_offload_flow_rule tc_cls_flower_offload_flow_rule +#define FLOW_CLS_REPLACE TC_CLSFLOWER_REPLACE +#define FLOW_CLS_DESTROY TC_CLSFLOWER_DESTROY +#define FLOW_CLS_STATS TC_CLSFLOWER_STATS +#endif +#endif + +#if defined(CONFIG_BNXT_FLOWER_OFFLOAD) +#ifndef NUM_FLOW_ACTIONS +#define NUM_FLOW_ACTIONS 64 +#endif +#endif + +#if defined(CONFIG_HWMON) || defined(CONFIG_HWMON_MODULE) +#if defined(HAVE_NEW_HWMON_API) +#define CONFIG_BNXT_HWMON 1 +#endif +#endif + +#ifndef SPEED_2500 +#define SPEED_2500 2500 +#endif + +#ifndef SPEED_5000 +#define SPEED_5000 5000 +#endif + +#ifndef SPEED_14000 +#define SPEED_14000 14000 +#endif + +#ifndef SPEED_20000 +#define SPEED_20000 20000 +#endif + +#ifndef SPEED_25000 +#define SPEED_25000 25000 +#endif + +#ifndef SPEED_40000 +#define SPEED_40000 40000 +#endif + +#ifndef SPEED_50000 +#define SPEED_50000 50000 +#endif + +#ifndef SPEED_56000 +#define SPEED_56000 56000 +#endif + +#ifndef SPEED_100000 +#define SPEED_100000 100000 +#endif + +#ifndef SPEED_200000 +#define SPEED_200000 200000 +#endif + +#ifndef SPEED_400000 +#define SPEED_400000 400000 +#endif + +#ifndef SPEED_UNKNOWN +#define SPEED_UNKNOWN -1 +#endif + +#ifndef DUPLEX_UNKNOWN +#define DUPLEX_UNKNOWN 0xff +#endif + +#ifndef PORT_DA +#define PORT_DA 0x05 +#endif + +#ifndef PORT_NONE +#define PORT_NONE 0xef +#endif + +#if !defined(SUPPORTED_40000baseCR4_Full) +#define SUPPORTED_40000baseCR4_Full (1 << 24) + +#define ADVERTISED_40000baseCR4_Full (1 << 24) +#endif + +#if !defined(ETHTOOL_FEC_LLRS) +#define ETHTOOL_FEC_LLRS (1 << 5) +#else +#define HAVE_ETHTOOL_FEC_LLRS +#endif + +#if !defined(HAVE_ETH_TEST_FL_EXTERNAL_LB) +#define ETH_TEST_FL_EXTERNAL_LB 0 +#define ETH_TEST_FL_EXTERNAL_LB_DONE 0 +#endif + +#if !defined(IPV4_FLOW) +#define IPV4_FLOW 0x10 +#endif + +#if !defined(IPV6_FLOW) +#define IPV6_FLOW 0x11 +#endif + +#if defined(HAVE_ETH_GET_HEADLEN) || (LINUX_VERSION_CODE > 0x040900) +#define BNXT_RX_PAGE_MODE_SUPPORT 1 +#endif + +#if !defined(ETH_P_8021AD) +#define ETH_P_8021AD 0x88A8 +#endif + +#if !defined(ETH_P_ROCE) +#define ETH_P_ROCE 0x8915 +#endif + +#if !defined(ROCE_V2_UDP_PORT) +#define ROCE_V2_UDP_DPORT 4791 +#endif + +#ifndef NETIF_F_GSO_UDP_TUNNEL +#define NETIF_F_GSO_UDP_TUNNEL 0 +#endif + +#ifndef NETIF_F_GSO_UDP_TUNNEL_CSUM +#define NETIF_F_GSO_UDP_TUNNEL_CSUM 0 +#endif + +#ifndef NETIF_F_GSO_GRE +#define NETIF_F_GSO_GRE 0 +#endif + +#ifndef NETIF_F_GSO_GRE_CSUM +#define NETIF_F_GSO_GRE_CSUM 0 +#endif + +#ifndef NETIF_F_GSO_IPIP +#define NETIF_F_GSO_IPIP 0 +#endif + +#ifndef NETIF_F_GSO_SIT +#define NETIF_F_GSO_SIT 0 +#endif + +#ifndef NETIF_F_GSO_IPXIP4 +#define NETIF_F_GSO_IPXIP4 (NETIF_F_GSO_IPIP | NETIF_F_GSO_SIT) +#endif + +#ifndef NETIF_F_GSO_PARTIAL +#define NETIF_F_GSO_PARTIAL 0 +#else +#define HAVE_GSO_PARTIAL_FEATURES 1 +#endif + +#ifndef NETIF_F_GSO_UDP_L4 +#define NETIF_F_GSO_UDP_L4 0 +#define SKB_GSO_UDP_L4 0 +#endif + +/* Tie rx checksum offload to tx checksum offload for older kernels. */ +#ifndef NETIF_F_RXCSUM +#define NETIF_F_RXCSUM NETIF_F_IP_CSUM +#endif + +#ifndef NETIF_F_CSUM_MASK +#define NETIF_F_CSUM_MASK (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | \ + NETIF_F_HW_CSUM) +#endif + +#ifndef NETIF_F_NTUPLE +#define NETIF_F_NTUPLE 0 +#endif + +#ifndef NETIF_F_RXHASH +#define NETIF_F_RXHASH 0 +#else +#define HAVE_NETIF_F_RXHASH +#endif + +#ifdef NETIF_F_GRO_HW +#define HAVE_NETIF_F_GRO_HW 1 +#else +#define NETIF_F_GRO_HW 0 +#endif + +#ifndef NETIF_F_HW_TLS_RX +#define NETIF_F_HW_TLS_RX 0 +#endif + +#ifndef NETIF_F_HW_TLS_TX +#define NETIF_F_HW_TLS_TX 0 +#endif + +#ifndef HAVE_SKB_GSO_UDP_TUNNEL_CSUM +#ifndef HAVE_SKB_GSO_UDP_TUNNEL +#define SKB_GSO_UDP_TUNNEL 0 +#endif +#define SKB_GSO_UDP_TUNNEL_CSUM SKB_GSO_UDP_TUNNEL +#endif + +#ifndef BRIDGE_MODE_VEB +#define BRIDGE_MODE_VEB 0 +#endif + +#ifndef BRIDGE_MODE_VEPA +#define BRIDGE_MODE_VEPA 1 +#endif + +#ifndef BRIDGE_MODE_UNDEF +#define BRIDGE_MODE_UNDEF 0xffff +#endif + +#ifndef DEFINE_DMA_UNMAP_ADDR +#define DEFINE_DMA_UNMAP_ADDR(mapping) DECLARE_PCI_UNMAP_ADDR(mapping) +#endif + +#ifndef DEFINE_DMA_UNMAP_LEN +#define DEFINE_DMA_UNMAP_LEN(len) DECLARE_PCI_UNMAP_LEN(len) +#endif + +#ifndef dma_unmap_addr_set +#define dma_unmap_addr_set pci_unmap_addr_set +#endif + +#ifndef dma_unmap_addr +#define dma_unmap_addr pci_unmap_addr +#endif + +#ifndef dma_unmap_len +#define dma_unmap_len pci_unmap_len +#endif + +#ifdef HAVE_DMA_ATTRS_H +#define dma_map_single_attrs(dev, cpu_addr, size, dir, attrs) \ + dma_map_single_attrs(dev, cpu_addr, size, dir, NULL) + +#define dma_unmap_single_attrs(dev, dma_addr, size, dir, attrs) \ + dma_unmap_single_attrs(dev, dma_addr, size, dir, NULL) + +#ifdef HAVE_DMA_MAP_PAGE_ATTRS +#define dma_map_page_attrs(dev, page, offset, size, dir, attrs) \ + dma_map_page_attrs(dev, page, offset, size, dir, NULL) + +#define dma_unmap_page_attrs(dev, dma_addr, size, dir, attrs) \ + dma_unmap_page_attrs(dev, dma_addr, size, dir, NULL) +#endif +#endif + +#ifndef HAVE_DMA_MAP_PAGE_ATTRS +#define dma_map_page_attrs(dev, page, offset, size, dir, attrs) \ + dma_map_page(dev, page, offset, size, dir) + +#define dma_unmap_page_attrs(dev, dma_addr, size, dir, attrs) \ + dma_unmap_page(dev, dma_addr, size, dir) +#endif + +#ifndef RHEL_RELEASE_VERSION +#define RHEL_RELEASE_VERSION(a, b) 0 +#endif + +#if defined(RHEL_RELEASE_CODE) && (RHEL_RELEASE_CODE == RHEL_RELEASE_VERSION(6,3)) +#if defined(CONFIG_X86_64) && !defined(CONFIG_NEED_DMA_MAP_STATE) +#undef DEFINE_DMA_UNMAP_ADDR +#define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME +#undef DEFINE_DMA_UNMAP_LEN +#define DEFINE_DMA_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME +#undef dma_unmap_addr +#define dma_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME) +#undef dma_unmap_addr_set +#define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) (((PTR)->ADDR_NAME) = (VAL)) +#undef dma_unmap_len +#define dma_unmap_len(PTR, LEN_NAME) ((PTR)->LEN_NAME) +#undef dma_unmap_len_set +#define dma_unmap_len_set(PTR, LEN_NAME, VAL) (((PTR)->LEN_NAME) = (VAL)) +#endif +#endif + +#ifdef HAVE_NDO_SET_VF_VLAN_RH73 +#define ndo_set_vf_vlan ndo_set_vf_vlan_rh73 +#endif + +#ifdef HAVE_NDO_CHANGE_MTU_RH74 +#define ndo_change_mtu ndo_change_mtu_rh74 +#undef HAVE_MIN_MTU +#endif + +#ifdef HAVE_NDO_SETUP_TC_RH72 +#define ndo_setup_tc ndo_setup_tc_rh72 +#endif + +#ifdef HAVE_NDO_SET_VF_TRUST_RH +#define ndo_set_vf_trust extended.ndo_set_vf_trust +#endif + +#ifdef HAVE_NDO_UDP_TUNNEL_RH +#define ndo_udp_tunnel_add extended.ndo_udp_tunnel_add +#define ndo_udp_tunnel_del extended.ndo_udp_tunnel_del +#endif + +#ifndef HAVE_TC_SETUP_QDISC_MQPRIO +#define TC_SETUP_QDISC_MQPRIO TC_SETUP_MQPRIO +#endif + +#if defined(HAVE_NDO_SETUP_TC_RH) || defined(HAVE_EXT_GET_PHYS_PORT_NAME) || defined(HAVE_NDO_SET_VF_TRUST_RH) +#define HAVE_NDO_SIZE 1 +#endif + +#ifndef FLOW_RSS +#define FLOW_RSS 0x20000000 +#endif + +#ifndef FLOW_MAC_EXT +#define FLOW_MAC_EXT 0x40000000 +#endif + +#ifndef ETHTOOL_RX_FLOW_SPEC_RING +#define ETHTOOL_RX_FLOW_SPEC_RING 0x00000000FFFFFFFFLL +#define ETHTOOL_RX_FLOW_SPEC_RING_VF 0x000000FF00000000LL +#define ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF 32 +static inline __u64 ethtool_get_flow_spec_ring(__u64 ring_cookie) +{ + return ETHTOOL_RX_FLOW_SPEC_RING & ring_cookie; +}; + +static inline __u64 ethtool_get_flow_spec_ring_vf(__u64 ring_cookie) +{ + return (ETHTOOL_RX_FLOW_SPEC_RING_VF & ring_cookie) >> + ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF; +}; +#endif + +#ifndef ETHTOOL_GEEE +struct ethtool_eee { + __u32 cmd; + __u32 supported; + __u32 advertised; + __u32 lp_advertised; + __u32 eee_active; + __u32 eee_enabled; + __u32 tx_lpi_enabled; + __u32 tx_lpi_timer; + __u32 reserved[2]; +}; +#endif + +#ifndef HAVE_ETHTOOL_KEEE +/* ethtool_keee must be compatible with ethtool_eee. Do not follow + * the upstream structure. + */ +struct ethtool_keee { + __u32 cmd; + __u32 supported; + __u32 advertised; + __u32 lp_advertised; + __u32 eee_active; + __u32 eee_enabled; + __u32 tx_lpi_enabled; + __u32 tx_lpi_timer; + __u32 reserved[2]; +}; + +#define _bnxt_fw_to_linkmode(mode, fw_speeds) \ + mode = _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0) + +#endif + +#ifndef HAVE_ETHTOOL_RESET_CRASHDUMP +enum compat_ethtool_reset_flags { ETH_RESET_CRASHDUMP = 1 << 9 }; +#endif + +#ifndef HAVE_CQE_ETHTOOL_COALESCE +#define bnxt_get_coalesce(dev, coal, kernel_coal, extack) \ + bnxt_get_coalesce(dev, coal) + +#define bnxt_set_coalesce(dev, coal, kernel_coal, extack) \ + bnxt_set_coalesce(dev, coal) + +#define ETHTOOL_COALESCE_USE_CQE 0 +#endif + +#ifndef HAVE_ETHTOOL_GET_RING_EXT +#define bnxt_get_ringparam(dev, ering, kernel_ering, extack) \ + bnxt_get_ringparam(dev, ering) + +#define bnxt_set_ringparam(dev, ering, kernel_ering, extack) \ + bnxt_set_ringparam(dev, ering) +#endif + +#ifndef HAVE_SKB_FRAG_PAGE +static inline struct page *skb_frag_page(const skb_frag_t *frag) +{ + return frag->page; +} + +static inline void *skb_frag_address_safe(const skb_frag_t *frag) +{ + void *ptr = page_address(skb_frag_page(frag)); + if (unlikely(!ptr)) + return NULL; + + return ptr + frag->page_offset; +} + +static inline void __skb_frag_set_page(skb_frag_t *frag, struct page *page) +{ + frag->page = page; +} + +#define skb_frag_dma_map(x, frag, y, len, z) \ + pci_map_page(bp->pdev, (frag)->page, \ + (frag)->page_offset, (len), PCI_DMA_TODEVICE) +#endif /* HAVE_SKB_FRAG_PAGE */ + +#ifndef HAVE_SKB_FRAG_FILL_PAGE_DESC +#ifdef SKB_FRAG_USES_BIO +static inline void skb_frag_fill_page_desc(skb_frag_t *frag, + struct page *page, + int off, int size) +{ + frag->bv_page = page; + frag->bv_offset = off; + skb_frag_size_set(frag, size); +} + +#else + +static inline void skb_frag_fill_page_desc(skb_frag_t *frag, + struct page *page, + int off, int size) +{ + frag->page_offset = off; + skb_frag_size_set(frag, size); + __skb_frag_set_page(frag, page); +} +#endif +#endif /* HAVE_SKB_FRAG_FILL_PAGE_DESC */ + +#ifndef HAVE_SKB_FRAG_ACCESSORS +static inline void skb_frag_off_add(skb_frag_t *frag, int delta) +{ + frag->page_offset += delta; +} +#endif + +#ifndef HAVE_SKB_FREE_FRAG +static inline void skb_free_frag(void *addr) +{ + put_page(virt_to_head_page(addr)); +} +#endif + +#ifndef HAVE_PCI_VFS_ASSIGNED +static inline int pci_vfs_assigned(struct pci_dev *dev) +{ + return 0; +} +#endif + +#ifndef HAVE_PCI_NUM_VF +#include <../drivers/pci/pci.h> + +static inline int pci_num_vf(struct pci_dev *dev) +{ + if (!dev->is_physfn) + return 0; + + return dev->sriov->nr_virtfn; +} +#endif + +#ifndef SKB_ALLOC_NAPI +static inline struct sk_buff *napi_alloc_skb(struct napi_struct *napi, + unsigned int length) +{ + struct sk_buff *skb; + + length += NET_SKB_PAD + NET_IP_ALIGN; + skb = netdev_alloc_skb(napi->dev, length); + + if (likely(skb)) + skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN); + + return skb; +} +#endif + +#ifndef HAVE_SKB_HASH_TYPE + +enum pkt_hash_types { + PKT_HASH_TYPE_NONE, /* Undefined type */ + PKT_HASH_TYPE_L2, /* Input: src_MAC, dest_MAC */ + PKT_HASH_TYPE_L3, /* Input: src_IP, dst_IP */ + PKT_HASH_TYPE_L4, /* Input: src_IP, dst_IP, src_port, dst_port */ +}; + +static inline void +skb_set_hash(struct sk_buff *skb, __u32 hash, enum pkt_hash_types type) +{ +#ifdef HAVE_NETIF_F_RXHASH + skb->rxhash = hash; +#endif +} + +#endif + +#define GET_NET_STATS(x) (unsigned long)le64_to_cpu(x) + +#if !defined(NETDEV_RX_FLOW_STEER) || (LINUX_VERSION_CODE < 0x030300) || \ + defined(NO_NETDEV_CPU_RMAP) +#undef CONFIG_RFS_ACCEL +#endif + +#if !defined(IEEE_8021QAZ_APP_SEL_DGRAM) || !defined(CONFIG_DCB) || !defined(HAVE_IEEE_DELAPP) +#undef CONFIG_BNXT_DCB +#endif + +#ifdef CONFIG_BNXT_DCB +#ifndef IEEE_8021QAZ_APP_SEL_DSCP +#define IEEE_8021QAZ_APP_SEL_DSCP 5 +#endif +#endif + +#ifndef NETDEV_HW_FEATURES +#define hw_features features +#endif + +#ifndef HAVE_NETDEV_FEATURES_T +#ifdef HAVE_NDO_FIX_FEATURES +typedef u32 netdev_features_t; +#else +typedef unsigned long netdev_features_t; +#endif +#endif + +#if !defined(IFF_UNICAST_FLT) +#define IFF_UNICAST_FLT 0 +#endif + +#if !defined(IFF_SUPP_NOFCS) +#define IFF_SUPP_NOFCS 0 +#else +#define HAVE_NOFCS 1 +#endif + +#ifndef HAVE_NEW_BUILD_SKB +#define build_skb(data, frag) build_skb(data) +#endif + +#ifndef HAVE_NAPI_ALLOC_FRAG +#define napi_alloc_frag(fragsz) netdev_alloc_frag(fragsz) +#endif + +#ifndef HAVE_NAPI_BUILD_SKB +#define napi_build_skb(data, frag_size) build_skb(data, frag_size) +#endif + +#ifndef __rcu +#define __rcu +#endif + +#ifndef rcu_dereference_protected +#define rcu_dereference_protected(p, c) \ + rcu_dereference((p)) +#endif + +#ifndef rcu_access_pointer +#define rcu_access_pointer rcu_dereference +#endif + +#ifndef rtnl_dereference +#define rtnl_dereference(p) \ + rcu_dereference_protected(p, lockdep_rtnl_is_held()) +#endif + +#ifndef RCU_INIT_POINTER +#define RCU_INIT_POINTER(p, v) \ + p = (typeof(*v) __force __rcu *)(v) +#endif + +#ifdef HAVE_OLD_HLIST +#define __hlist_for_each_entry_rcu(f, n, h, m) \ + hlist_for_each_entry_rcu(f, n, h, m) +#define __hlist_for_each_entry_safe(f, n, t, h, m) \ + hlist_for_each_entry_safe(f, n, t, h, m) +#else +#define __hlist_for_each_entry_rcu(f, n, h, m) \ + hlist_for_each_entry_rcu(f, h, m) +#define __hlist_for_each_entry_safe(f, n, t, h, m) \ + hlist_for_each_entry_safe(f, t, h, m) +#endif + +#ifndef VLAN_PRIO_SHIFT +#define VLAN_PRIO_SHIFT 13 +#endif + +#ifndef IEEE_8021Q_MAX_PRIORITIES +#define IEEE_8021Q_MAX_PRIORITIES 8 +#endif + +#ifndef NETIF_F_HW_VLAN_CTAG_TX +#define NETIF_F_HW_VLAN_CTAG_TX NETIF_F_HW_VLAN_TX +#define NETIF_F_HW_VLAN_CTAG_RX NETIF_F_HW_VLAN_RX +/* 802.1AD not supported on older kernels */ +#define NETIF_F_HW_VLAN_STAG_TX 0 +#define NETIF_F_HW_VLAN_STAG_RX 0 + +#define __vlan_hwaccel_put_tag(skb, proto, tag) \ +do { \ + if (proto == ntohs(ETH_P_8021Q)) \ + __vlan_hwaccel_put_tag(skb, tag);\ +} while (0) + +#define vlan_proto protocol + +#if defined(HAVE_VLAN_RX_REGISTER) +#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) +#define OLD_VLAN 1 +#define OLD_VLAN_VALID (1 << 31) +#endif +#endif + +#endif + +#ifndef HAVE_ETH_TYPE_VLAN + +static inline bool eth_type_vlan(__be16 ethertype) +{ + switch (ethertype) { + case htons(ETH_P_8021Q): + case htons(ETH_P_8021AD): + return true; + default: + return false; + } +} +#endif + +#ifndef HAVE_NETDEV_NOTIFIER_INFO_TO_DEV +#ifndef netdev_notifier_info_to_dev +static inline struct net_device * +netdev_notifier_info_to_dev(void *ptr) +{ + return (struct net_device *)ptr; +} +#endif +#endif + +static inline int bnxt_en_register_netdevice_notifier(struct notifier_block *nb) +{ + int rc; +#ifdef HAVE_REGISTER_NETDEVICE_NOTIFIER_RH + rc = register_netdevice_notifier_rh(nb); +#else + rc = register_netdevice_notifier(nb); +#endif + return rc; +} + +static inline int bnxt_en_unregister_netdevice_notifier(struct notifier_block *nb) +{ + int rc; +#ifdef HAVE_REGISTER_NETDEVICE_NOTIFIER_RH + rc = unregister_netdevice_notifier_rh(nb); +#else + rc = unregister_netdevice_notifier(nb); +#endif + return rc; +} + +#ifndef HAVE_NETDEV_UPDATE_FEATURES +static inline void netdev_update_features(struct net_device *dev) +{ + /* Do nothing, since we can't set default VLAN on these old kernels. */ +} +#endif + +#if !defined(netdev_printk) && (LINUX_VERSION_CODE < 0x020624) + +#ifndef HAVE_NETDEV_NAME +static inline const char *netdev_name(const struct net_device *dev) +{ + if (dev->reg_state != NETREG_REGISTERED) + return "(unregistered net_device)"; + return dev->name; +} +#endif + +#define NET_PARENT_DEV(netdev) ((netdev)->dev.parent) + +#define netdev_printk(level, netdev, format, args...) \ + dev_printk(level, NET_PARENT_DEV(netdev), \ + "%s: " format, \ + netdev_name(netdev), ##args) + +#endif + +#ifndef netdev_err +#define netdev_err(dev, format, args...) \ + netdev_printk(KERN_ERR, dev, format, ##args) +#endif + +#ifndef netdev_info +#define netdev_info(dev, format, args...) \ + netdev_printk(KERN_INFO, dev, format, ##args) +#endif + +#ifndef netdev_warn +#define netdev_warn(dev, format, args...) \ + netdev_printk(KERN_WARNING, dev, format, ##args) +#endif + +#ifndef dev_warn_ratelimited +#define dev_warn_ratelimited(dev, format, args...) \ + dev_warn(dev, format, ##args) +#endif + +#ifndef netdev_level_once +#define netdev_level_once(level, dev, fmt, ...) \ +do { \ + static bool __print_once __read_mostly; \ + \ + if (!__print_once) { \ + __print_once = true; \ + netdev_printk(level, dev, fmt, ##__VA_ARGS__); \ + } \ +} while (0) + +#define netdev_info_once(dev, fmt, ...) \ + netdev_level_once(KERN_INFO, dev, fmt, ##__VA_ARGS__) +#define netdev_warn_once(dev, fmt, ...) \ + netdev_level_once(KERN_WARNING, dev, fmt, ##__VA_ARGS__) +#endif + +#ifndef netif_level +#define netif_level(level, priv, type, dev, fmt, args...) \ +do { \ + if (netif_msg_##type(priv)) \ + netdev_##level(dev, fmt, ##args); \ +} while (0) +#endif + +#ifndef netif_warn +#define netif_warn(priv, type, dev, fmt, args...) \ + netif_level(warn, priv, type, dev, fmt, ##args) +#endif + +#ifndef netif_notice +#define netif_notice(priv, type, dev, fmt, args...) \ + netif_level(notice, priv, type, dev, fmt, ##args) +#endif + +#ifndef netif_info +#define netif_info(priv, type, dev, fmt, args...) \ + netif_level(info, priv, type, dev, fmt, ##args) +#endif + +#ifndef pci_warn +#define pci_warn(pdev, fmt, arg...) dev_warn(&(pdev)->dev, fmt, ##arg) +#endif + +#ifndef netdev_uc_count +#define netdev_uc_count(dev) ((dev)->uc.count) +#endif + +#ifndef netdev_for_each_uc_addr +#define netdev_for_each_uc_addr(ha, dev) \ + list_for_each_entry(ha, &dev->uc.list, list) +#endif + +#ifndef netdev_for_each_mc_addr +#define netdev_for_each_mc_addr(mclist, dev) \ + for (mclist = dev->mc_list; mclist; mclist = mclist->next) +#endif + +#ifndef smp_mb__before_atomic +#define smp_mb__before_atomic() smp_mb() +#endif + +#ifndef smp_mb__after_atomic +#define smp_mb__after_atomic() smp_mb() +#endif + +#ifndef dma_rmb +#define dma_rmb() rmb() +#endif + +#ifndef writel_relaxed +#define writel_relaxed(v, a) writel(v, a) +#endif + +#ifndef writeq_relaxed +#define writeq_relaxed(v, a) writeq(v, a) +#endif + +#ifndef HAVE_LO_HI_WRITEQ +static inline void lo_hi_writeq(u64 val, volatile void __iomem *addr) +{ + writel(val, addr); + writel(val >> 32, addr + 4); +} + +static inline void lo_hi_writeq_relaxed(u64 val, volatile void __iomem *addr) +{ + writel_relaxed(val, addr); + writel_relaxed(val >> 32, addr + 4); +} +#endif + +#ifndef WRITE_ONCE +#define WRITE_ONCE(x, val) (x = val) +#endif + +#ifndef READ_ONCE +#define READ_ONCE(val) (val) +#endif + +#ifdef CONFIG_NET_RX_BUSY_POLL +#include +#if defined(HAVE_NAPI_HASH_ADD) && defined(NETDEV_BUSY_POLL) +#define BNXT_PRIV_RX_BUSY_POLL 1 +#endif +#endif + +#if defined(HAVE_NETPOLL_POLL_DEV) +#undef CONFIG_NET_POLL_CONTROLLER +#endif + +#if !defined(CONFIG_PTP_1588_CLOCK) && !defined(CONFIG_PTP_1588_CLOCK_MODULE) +#undef HAVE_IEEE1588_SUPPORT +#endif + +#ifdef HAVE_IEEE1588_SUPPORT + +#if !defined(HAVE_PTP_HEADER) +struct clock_identity { + u8 id[8]; +} __packed; + +struct port_identity { + struct clock_identity clock_identity; + __be16 port_number; +} __packed; + +struct ptp_header { + u8 tsmt; /* transportSpecific | messageType */ + u8 ver; /* reserved | versionPTP */ + __be16 message_length; + u8 domain_number; + u8 reserved1; + u8 flag_field[2]; + __be64 correction; + __be32 reserved2; + struct port_identity source_port_identity; + __be16 sequence_id; + u8 control; + u8 log_message_interval; +} __packed; +#endif + +#if !defined(HAVE_PTP_CLASSES) +#define PTP_CLASS_V2 0x02 /* protocol version 2 */ +#define PTP_CLASS_IPV4 0x10 /* event in an IPV4 UDP packet */ +#define PTP_CLASS_IPV6 0x20 /* event in an IPV6 UDP packet */ +#define PTP_CLASS_L2 0x30 /* event in a L2 packet */ +#define PTP_CLASS_VLAN 0x40 /* event in a VLAN tagged L2 packet */ +#define PTP_CLASS_PMASK 0xf0 /* mask for the packet type field */ +#define OFF_IHL 14 +#define IPV4_HLEN(data) (((struct iphdr *)((data) + OFF_IHL))->ihl << 2) +#define IP6_HLEN 40 +#define UDP_HLEN 8 +#endif + +#if !defined(HAVE_PTP_CLASSIFY_RAW) +static inline unsigned int ptp_classify_raw(const struct sk_buff *skb) +{ + u32 ptp_class = PTP_CLASS_V2; + + if (skb_vlan_tag_present(skb)) + ptp_class |= PTP_CLASS_VLAN; + if (skb->protocol == htons(ETH_P_IP)) + ptp_class |= PTP_CLASS_IPV4; + if (skb->protocol == htons(ETH_P_IPV6)) + ptp_class |= PTP_CLASS_IPV6; + if (skb->protocol == htons(ETH_P_1588)) + ptp_class |= PTP_CLASS_L2; + + return ptp_class; +} +#endif + +#if !defined(HAVE_PTP_PARSE_HEADER) +static inline struct ptp_header *ptp_parse_header(struct sk_buff *skb, + unsigned int type) +{ + u8 *ptr = skb_mac_header(skb); + + if (type & PTP_CLASS_VLAN) + ptr += VLAN_HLEN; + + switch (type & PTP_CLASS_PMASK) { + case PTP_CLASS_IPV4: + ptr += IPV4_HLEN(ptr) + UDP_HLEN; + break; + case PTP_CLASS_IPV6: + ptr += IP6_HLEN + UDP_HLEN; + break; + case PTP_CLASS_L2: + break; + default: + return NULL; + } + + ptr += ETH_HLEN; + + /* Ensure that the entire header is present in this packet. */ + if (ptr + sizeof(struct ptp_header) > skb->data + skb->len) + return NULL; + + return (struct ptp_header *)ptr; +} +#endif +#endif /* HAVE_IEEE1588_SUPPORT */ + +#if !defined(HAVE_PTP_GETTIMEX64) +#if !defined(HAVE_TIMESPEC64) +struct timespec64 { + __signed__ long tv_sec; + long tv_nsec; +}; +#endif + +#ifndef HAVE_PTP_SYS_TIMESTAMP +struct ptp_system_timestamp { + struct timespec64 pre_ts; + struct timespec64 post_ts; +}; +#endif + +static inline void ptp_read_system_prets(struct ptp_system_timestamp *sts) +{ +} + +static inline void ptp_read_system_postts(struct ptp_system_timestamp *sts) +{ +} +#endif + +#if defined(RHEL_RELEASE_CODE) && (RHEL_RELEASE_CODE == RHEL_RELEASE_VERSION(7,0)) +#undef CONFIG_CRASH_DUMP +#endif + +#ifndef skb_vlan_tag_present +#define skb_vlan_tag_present(skb) vlan_tx_tag_present(skb) +#define skb_vlan_tag_get(skb) vlan_tx_tag_get(skb) +#endif + +#if !defined(HAVE_NAPI_HASH_DEL) +static inline void napi_hash_del(struct napi_struct *napi) +{ +} +#endif + +#if !defined(LL_FLUSH_FAILED) || !defined(HAVE_NAPI_HASH_ADD) +static inline void napi_hash_add(struct napi_struct *napi) +{ +} +#endif + +#ifndef HAVE_SET_COHERENT_MASK +static inline int dma_set_coherent_mask(struct device *dev, u64 mask) +{ + struct pci_dev *pdev = container_of(dev, struct pci_dev, dev); + + return pci_set_consistent_dma_mask(pdev, mask); +} +#endif + +#ifndef HAVE_SET_MASK_AND_COHERENT +static inline int dma_set_mask_and_coherent(struct device *dev, u64 mask) +{ + int rc = dma_set_mask(dev, mask); + if (rc == 0) + dma_set_coherent_mask(dev, mask); + return rc; +} +#endif + +#ifndef HAVE_DMA_ZALLOC_COHERENT +static inline void *dma_zalloc_coherent(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t flag) +{ + void *ret = dma_alloc_coherent(dev, size, dma_handle, + flag | __GFP_ZERO); + return ret; +} +#endif + +#ifndef HAVE_IFLA_TX_RATE +#define ndo_set_vf_rate ndo_set_vf_tx_rate +#endif + +#ifndef HAVE_NDO_ETH_IOCTL +#define ndo_eth_ioctl ndo_do_ioctl +#endif + +#ifndef HAVE_PRANDOM_BYTES +#define prandom_bytes get_random_bytes +#endif + +#ifndef rounddown +#define rounddown(x, y) ( \ +{ \ + typeof(x) __x = (x); \ + __x - (__x % (y)); \ +} \ +) +#endif + +#ifdef NO_SKB_FRAG_SIZE +static inline unsigned int skb_frag_size(const skb_frag_t *frag) +{ + return frag->size; +} +#endif + +#ifdef NO_ETH_RESET_AP +#define ETH_RESET_AP (1<<8) +#endif + +#ifndef HAVE_SKB_CHECKSUM_NONE_ASSERT +static inline void skb_checksum_none_assert(struct sk_buff *skb) +{ + skb->ip_summed = CHECKSUM_NONE; +} +#endif + +#ifndef HAVE_ETHER_ADDR_EQUAL +static inline bool ether_addr_equal(const u8 *addr1, const u8 *addr2) +{ + return !compare_ether_addr(addr1, addr2); +} +#endif + +#ifndef HAVE_ETHER_ADDR_COPY +static inline void ether_addr_copy(u8 *dst, const u8 *src) +{ + memcpy(dst, src, ETH_ALEN); +} +#endif + +#ifndef HAVE_ETH_BROADCAST_ADDR +static inline void eth_broadcast_addr(u8 *addr) +{ + memset(addr, 0xff, ETH_ALEN); +} +#endif + +#ifndef HAVE_ETH_HW_ADDR_RANDOM +static inline void eth_hw_addr_random(struct net_device *dev) +{ +#if defined(NET_ADDR_RANDOM) + dev->addr_assign_type = NET_ADDR_RANDOM; +#endif + random_ether_addr(dev->dev_addr); +} +#endif + +#ifndef HAVE_NETDEV_TX_QUEUE_CTRL +static inline void netdev_tx_sent_queue(struct netdev_queue *dev_queue, + unsigned int bytes) +{ +} + +static inline void netdev_tx_completed_queue(struct netdev_queue *dev_queue, + unsigned int pkts, unsigned int bytes) +{ +} + +static inline void netdev_tx_reset_queue(struct netdev_queue *q) +{ +} +#endif + +#ifndef HAVE_NETIF_SET_REAL_NUM_RX +static inline int netif_set_real_num_rx_queues(struct net_device *dev, + unsigned int rxq) +{ + return 0; +} +#endif + +#ifndef HAVE_NETIF_SET_REAL_NUM_TX +static inline void netif_set_real_num_tx_queues(struct net_device *dev, + unsigned int txq) +{ + dev->real_num_tx_queues = txq; +} +#endif + +#ifndef TSO_MAX_SEGS +static inline void netif_set_tso_max_segs(struct net_device *dev, + unsigned int segs) +{ + dev->gso_max_segs = segs; +} +#endif + +#ifndef HAVE_NETIF_GET_DEFAULT_RSS +static inline int netif_get_num_default_rss_queues(void) +{ + return min_t(int, 8, num_online_cpus()); +} +#endif + +#ifndef IFF_RXFH_CONFIGURED +#define IFF_RXFH_CONFIGURED 0 +#undef HAVE_SET_RXFH +static inline bool netif_is_rxfh_configured(const struct net_device *dev) +{ + return false; +} +#endif + +#if defined(HAVE_NETDEV_TX_DROPPED) +#if !defined(HAVE_NETDEV_TX_DROPPED_CORE_STATS) +#if defined(HAVE_NETDEV_RH_TX_DROPPED) +#define dev_core_stats_tx_dropped_inc(dev) atomic_long_inc(&(dev)->rh_tx_dropped) +#else +#define dev_core_stats_tx_dropped_inc(dev) atomic_long_inc(&(dev)->tx_dropped) +#endif +#endif +#else +#define dev_core_stats_tx_dropped_inc(dev) +#endif + +#if !defined(HAVE_TCP_V6_CHECK) +static __inline__ __sum16 tcp_v6_check(int len, + const struct in6_addr *saddr, + const struct in6_addr *daddr, + __wsum base) +{ + return csum_ipv6_magic(saddr, daddr, len, IPPROTO_TCP, base); +} +#endif + +#if !defined(HAVE_SKB_TCP_ALL_HEADERS) +#include + +static inline int skb_tcp_all_headers(const struct sk_buff *skb) +{ + return skb_transport_offset(skb) + tcp_hdrlen(skb); +} + +static inline int skb_inner_tcp_all_headers(const struct sk_buff *skb) +{ + return skb_inner_network_offset(skb) + skb_inner_network_header_len(skb) + + inner_tcp_hdrlen(skb); +} +#endif + +#ifndef ipv6_authlen +#define ipv6_authlen(p) (((p)->hdrlen+2) << 2) +#endif + +#ifdef HAVE_NDO_FEATURES_CHECK +#if defined(HAVE_INNER_NETWORK_OFFSET) && !defined(HAVE_INNER_ETH_HDR) +static inline struct ethhdr *inner_eth_hdr(const struct sk_buff *skb) +{ + return (struct ethhdr *)(skb->head + skb->inner_mac_header); +} +#endif +#endif + +#ifndef HAVE_USLEEP_RANGE +static inline void usleep_range(unsigned long min, unsigned long max) +{ + if (min < 1000) + udelay(min); + else + msleep(min / 1000); +} +#endif + +#ifndef HAVE_GET_NUM_TC +static inline int netdev_get_num_tc(struct net_device *dev) +{ + return 0; +} + +static inline void netdev_reset_tc(struct net_device *dev) +{ +} + +static inline int netdev_set_tc_queue(struct net_device *devi, u8 tc, + u16 count, u16 offset) +{ + return 0; +} +#endif + +#ifndef HAVE_VZALLOC +static inline void *vzalloc(size_t size) +{ + void *ret = vmalloc(size); + if (ret) + memset(ret, 0, size); + return ret; +} +#endif + +#ifndef HAVE_KMALLOC_ARRAY +static inline void *kmalloc_array(unsigned n, size_t s, gfp_t gfp) +{ + return kmalloc(n * s, gfp); +} +#endif + +#ifndef ETH_MODULE_SFF_8436 +#define ETH_MODULE_SFF_8436 0x4 +#endif + +#ifndef ETH_MODULE_SFF_8436_LEN +#define ETH_MODULE_SFF_8436_LEN 256 +#endif + +#ifndef ETH_MODULE_SFF_8636 +#define ETH_MODULE_SFF_8636 0x3 +#endif + +#ifndef ETH_MODULE_SFF_8636_LEN +#define ETH_MODULE_SFF_8636_LEN 256 +#endif + +#ifndef HAVE_MSIX_RANGE +static inline int +pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries, + int minvec, int maxvec) +{ + int rc = -ERANGE; + + while (maxvec >= minvec) { + rc = pci_enable_msix(dev, entries, maxvec); + if (!rc) + return maxvec; + if (rc < 0) + return rc; + if (rc < minvec) + return -ENOSPC; + maxvec = rc; + } + + return rc; +} +#endif /* HAVE_MSIX_RANGE */ + +#ifndef HAVE_PCI_PHYSFN +static inline struct pci_dev *pci_physfn(struct pci_dev *dev) +{ +#ifdef CONFIG_PCI_IOV + if (dev->is_virtfn) + dev = dev->physfn; +#endif + + return dev; +} +#endif + +#ifndef HAVE_PCI_PRINT_LINK_STATUS +#ifndef HAVE_PCI_LINK_WIDTH +enum pcie_link_width { + PCIE_LNK_WIDTH_UNKNOWN = 0xFF, +}; +#endif + +#ifndef HAVE_PCIE_BUS_SPEED +enum pci_bus_speed { + PCIE_SPEED_2_5GT = 0x14, + PCIE_SPEED_5_0GT = 0x15, + PCIE_SPEED_8_0GT = 0x16, +#ifndef PCIE_SPEED_16_0GT + PCIE_SPEED_16_0GT = 0x17, +#endif + PCI_SPEED_UNKNOWN = 0xFF, +}; +#endif + +#ifndef PCIE_SPEED_16_0GT +#define PCIE_SPEED_16_0GT 0x17 +#endif + +static const unsigned char pcie_link_speed[] = { + PCI_SPEED_UNKNOWN, /* 0 */ + PCIE_SPEED_2_5GT, /* 1 */ + PCIE_SPEED_5_0GT, /* 2 */ + PCIE_SPEED_8_0GT, /* 3 */ + PCIE_SPEED_16_0GT, /* 4 */ + PCI_SPEED_UNKNOWN, /* 5 */ + PCI_SPEED_UNKNOWN, /* 6 */ + PCI_SPEED_UNKNOWN, /* 7 */ + PCI_SPEED_UNKNOWN, /* 8 */ + PCI_SPEED_UNKNOWN, /* 9 */ + PCI_SPEED_UNKNOWN, /* A */ + PCI_SPEED_UNKNOWN, /* B */ + PCI_SPEED_UNKNOWN, /* C */ + PCI_SPEED_UNKNOWN, /* D */ + PCI_SPEED_UNKNOWN, /* E */ + PCI_SPEED_UNKNOWN /* F */ +}; + +#ifndef PCI_EXP_LNKSTA_NLW_SHIFT +#define PCI_EXP_LNKSTA_NLW_SHIFT 4 +#endif + +#ifndef PCI_EXP_LNKCAP2 +#define PCI_EXP_LNKCAP2 44 /* Link Capabilities 2 */ +#endif + +#ifndef PCI_EXP_LNKCAP2_SLS_2_5GB +#define PCI_EXP_LNKCAP2_SLS_2_5GB 0x00000002 /* Supported Speed 2.5GT/s */ +#define PCI_EXP_LNKCAP2_SLS_5_0GB 0x00000004 /* Supported Speed 5GT/s */ +#define PCI_EXP_LNKCAP2_SLS_8_0GB 0x00000008 /* Supported Speed 8GT/s */ +#endif + +#ifndef PCI_EXP_LNKCAP2_SLS_16_0GB +#define PCI_EXP_LNKCAP2_SLS_16_0GB 0x00000010 /* Supported Speed 16GT/s */ +#endif + +#ifndef PCI_EXP_LNKCAP_SLS_2_5GB +#define PCI_EXP_LNKCAP_SLS_2_5GB 0x00000001 /* LNKCAP2 SLS Vector bit 0*/ +#define PCI_EXP_LNKCAP_SLS_5_0GB 0x00000002 /* LNKCAP2 SLS Vector bit 1*/ +#endif + +#ifndef PCI_EXP_LNKCAP_SLS_8_0GB +#define PCI_EXP_LNKCAP_SLS_8_0GB 0x00000003 /* LNKCAP2 SLS Vector bit2 */ +#endif + +#ifndef PCI_EXP_LNKCAP_SLS_16_0GB +#define PCI_EXP_LNKCAP_SLS_16_0GB 0x00000004 /* LNKCAP2 SLS Vector bit 3 */ +#endif + +#ifndef PCIE_SPEED2STR +/* PCIe link information */ +#define PCIE_SPEED2STR(speed) \ + ((speed) == PCIE_SPEED_16_0GT ? "16 GT/s" : \ + (speed) == PCIE_SPEED_8_0GT ? "8 GT/s" : \ + (speed) == PCIE_SPEED_5_0GT ? "5 GT/s" : \ + (speed) == PCIE_SPEED_2_5GT ? "2.5 GT/s" : \ + "Unknown speed") + +/* PCIe speed to Mb/s reduced by encoding overhead */ +#define PCIE_SPEED2MBS_ENC(speed) \ + ((speed) == PCIE_SPEED_16_0GT ? 16000 * 128 / 130 : \ + (speed) == PCIE_SPEED_8_0GT ? 8000 * 128 / 130 : \ + (speed) == PCIE_SPEED_5_0GT ? 5000 * 8 / 10 : \ + (speed) == PCIE_SPEED_2_5GT ? 2500 * 8 / 10 : \ + 0) +#endif /* PCIE_SPEED2STR */ + +#define BNXT_PCIE_CAP 0xAC +#ifndef HAVE_PCI_UPSTREAM_BRIDGE +static inline struct pci_dev *pci_upstream_bridge(struct pci_dev *dev) +{ + dev = pci_physfn(dev); + if (pci_is_root_bus(dev->bus)) + return NULL; + + return dev->bus->self; +} +#endif + +static u32 +pcie_bandwidth_available(struct pci_dev *dev, struct pci_dev **limiting_dev, + enum pci_bus_speed *speed, enum pcie_link_width *width) +{ + enum pcie_link_width next_width; + enum pci_bus_speed next_speed; + u32 bw, next_bw; + u16 lnksta; + + if (speed) + *speed = PCI_SPEED_UNKNOWN; + if (width) + *width = PCIE_LNK_WIDTH_UNKNOWN; + + bw = 0; +#ifdef HAVE_PCIE_CAPABILITY_READ_WORD + while (dev) { + pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta); + + next_speed = pcie_link_speed[lnksta & PCI_EXP_LNKSTA_CLS]; + next_width = (lnksta & PCI_EXP_LNKSTA_NLW) >> + PCI_EXP_LNKSTA_NLW_SHIFT; + + next_bw = next_width * PCIE_SPEED2MBS_ENC(next_speed); + + /* Check if current device limits the total bandwidth */ + if (!bw || next_bw <= bw) { + bw = next_bw; + + if (limiting_dev) + *limiting_dev = dev; + if (speed) + *speed = next_speed; + if (width) + *width = next_width; + } + + dev = pci_upstream_bridge(dev); + } +#else + pci_read_config_word(dev, BNXT_PCIE_CAP + PCI_EXP_LNKSTA, &lnksta); + next_speed = pcie_link_speed[lnksta & PCI_EXP_LNKSTA_CLS]; + next_width = (lnksta & PCI_EXP_LNKSTA_NLW) >> PCI_EXP_LNKSTA_NLW_SHIFT; + next_bw = next_width * PCIE_SPEED2MBS_ENC(next_speed); + + if (limiting_dev) + *limiting_dev = dev; + if (speed) + *speed = next_speed; + if (width) + *width = next_width; +#endif /* HAVE_PCIE_CAPABILITY_READ_WORD */ + + return bw; +} + +static enum pci_bus_speed pcie_get_speed_cap(struct pci_dev *dev) +{ + /* + * Link Capabilities 2 was added in PCIe r3.0, sec 7.8.18. The + * implementation note there recommends using the Supported Link + * Speeds Vector in Link Capabilities 2 when supported. + * + * Without Link Capabilities 2, i.e., prior to PCIe r3.0, software + * should use the Supported Link Speeds field in Link Capabilities, + * where only 2.5 GT/s and 5.0 GT/s speeds were defined. + */ +#ifdef HAVE_PCIE_CAPABILITY_READ_WORD + u32 lnkcap2, lnkcap; + + pcie_capability_read_dword(dev, PCI_EXP_LNKCAP2, &lnkcap2); +#else + u16 lnkcap2, lnkcap; + + pci_read_config_word(dev, BNXT_PCIE_CAP + PCI_EXP_LNKCAP2, &lnkcap2); +#endif + if (lnkcap2) { /* PCIe r3.0-compliant */ + if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_16_0GB) + return PCIE_SPEED_16_0GT; + else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_8_0GB) + return PCIE_SPEED_8_0GT; + else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_5_0GB) + return PCIE_SPEED_5_0GT; + else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_2_5GB) + return PCIE_SPEED_2_5GT; + return PCI_SPEED_UNKNOWN; + } + +#ifdef HAVE_PCIE_CAPABILITY_READ_WORD + pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap); +#else + pci_read_config_word(dev, BNXT_PCIE_CAP + PCI_EXP_LNKCAP, &lnkcap); +#endif + if ((lnkcap & PCI_EXP_LNKCAP_SLS) == PCI_EXP_LNKCAP_SLS_5_0GB) + return PCIE_SPEED_5_0GT; + else if ((lnkcap & PCI_EXP_LNKCAP_SLS) == PCI_EXP_LNKCAP_SLS_2_5GB) + return PCIE_SPEED_2_5GT; + + return PCI_SPEED_UNKNOWN; +} + +static enum pcie_link_width pcie_get_width_cap(struct pci_dev *dev) +{ +#ifdef HAVE_PCIE_CAPABILITY_READ_WORD + u32 lnkcap; + + pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap); +#else + u16 lnkcap; + + pci_read_config_word(dev, BNXT_PCIE_CAP + PCI_EXP_LNKCAP, &lnkcap); +#endif + if (lnkcap) + return (lnkcap & PCI_EXP_LNKCAP_MLW) >> 4; + + return PCIE_LNK_WIDTH_UNKNOWN; +} + +static u32 +pcie_bandwidth_capable(struct pci_dev *dev, enum pci_bus_speed *speed, + enum pcie_link_width *width) +{ + *speed = pcie_get_speed_cap(dev); + *width = pcie_get_width_cap(dev); + + if (*speed == PCI_SPEED_UNKNOWN || *width == PCIE_LNK_WIDTH_UNKNOWN) + return 0; + + return *width * PCIE_SPEED2MBS_ENC(*speed); +} + +static inline void pcie_print_link_status(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + enum pcie_link_width width, width_cap; + enum pci_bus_speed speed, speed_cap; + struct pci_dev *limiting_dev = NULL; + u32 bw_avail, bw_cap; + + bw_cap = pcie_bandwidth_capable(pdev, &speed_cap, &width_cap); + bw_avail = pcie_bandwidth_available(pdev, &limiting_dev, &speed, + &width); + + if (bw_avail >= bw_cap) + netdev_info(dev, "%u.%03u Gb/s available PCIe bandwidth (%s x%d link)\n", + bw_cap / 1000, bw_cap % 1000, + PCIE_SPEED2STR(speed_cap), width_cap); + else + netdev_info(dev, "%u.%03u Gb/s available PCIe bandwidth, limited by %s x%d link at %s (capable of %u.%03u Gb/s with %s x%d link)\n", + bw_avail / 1000, bw_avail % 1000, + PCIE_SPEED2STR(speed), width, + limiting_dev ? pci_name(limiting_dev) : "", + bw_cap / 1000, bw_cap % 1000, + PCIE_SPEED2STR(speed_cap), width_cap); +} +#endif /* HAVE_PCI_PRINT_LINK_STATUS */ + +#ifndef HAVE_PCI_IS_BRIDGE +static inline bool pci_is_bridge(struct pci_dev *dev) +{ + return dev->hdr_type == PCI_HEADER_TYPE_BRIDGE || + dev->hdr_type == PCI_HEADER_TYPE_CARDBUS; +} +#endif + +#ifndef HAVE_PCI_GET_DSN +static inline u64 pci_get_dsn(struct pci_dev *dev) +{ + u32 dword; + u64 dsn; + int pos; + + pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_DSN); + if (!pos) + return 0; + + /* + * The Device Serial Number is two dwords offset 4 bytes from the + * capability position. The specification says that the first dword is + * the lower half, and the second dword is the upper half. + */ + pos += 4; + pci_read_config_dword(dev, pos, &dword); + dsn = (u64)dword; + pci_read_config_dword(dev, pos + 4, &dword); + dsn |= ((u64)dword) << 32; + + return dsn; +} +#endif + +#ifndef PCI_VPD_RO_KEYWORD_SERIALNO +#define PCI_VPD_RO_KEYWORD_SERIALNO "SN" +#endif + +#ifdef HAVE_OLD_VPD_FIND_TAG +#define pci_vpd_find_tag(buf, len, rdt) pci_vpd_find_tag(buf, 0, len, rdt) +#endif + +#ifndef HAVE_PCI_VPD_ALLOC + +#define BNXT_VPD_LEN 512 +static inline void *pci_vpd_alloc(struct pci_dev *dev, unsigned int *size) +{ + unsigned int len = BNXT_VPD_LEN; + void *buf; + int cnt; + + buf = kmalloc(len, GFP_KERNEL); + if (!buf) + return ERR_PTR(-ENOMEM); + + cnt = pci_read_vpd(dev, 0, len, buf); + if (cnt <= 0) { + kfree(buf); + return ERR_PTR(-EIO); + } + + if (size) + *size = cnt; + + return buf; +} + +static inline +int pci_vpd_find_ro_info_keyword(const void *buf, unsigned int len, + const char *kw, unsigned int *size) +{ + int ro_start, infokw_start; + unsigned int ro_len, infokw_size; + + ro_start = pci_vpd_find_tag(buf, len, PCI_VPD_LRDT_RO_DATA); + if (ro_start < 0) + return ro_start; + + ro_len = pci_vpd_lrdt_size(buf + ro_start); + ro_start += PCI_VPD_LRDT_TAG_SIZE; + + if (ro_start + ro_len > len) + return -EINVAL; + + infokw_start = pci_vpd_find_info_keyword(buf, ro_start, ro_len, kw); + if (infokw_start < 0) + return infokw_start; + + infokw_size = pci_vpd_info_field_size(buf + infokw_start); + infokw_start += PCI_VPD_INFO_FLD_HDR_SIZE; + + if (infokw_start + infokw_size > len) + return -EINVAL; + + if (size) + *size = infokw_size; + + return infokw_start; +} +#endif + +#ifndef HAVE_NDO_XDP +struct netdev_bpf; +#ifndef HAVE_EXT_NDO_XDP_XMIT +struct xdp_buff { + void *data; +}; +#endif +#elif !defined(HAVE_NDO_BPF) +#define ndo_bpf ndo_xdp +#define netdev_bpf netdev_xdp +#endif + +#ifndef XDP_PACKET_HEADROOM +#define XDP_PACKET_HEADROOM 0 +#endif + +#ifndef HAVE_XDP_FRAME +#define xdp_do_flush() +#ifndef HAVE_XDP_REDIRECT +struct bpf_prog; +static inline int xdp_do_redirect(struct net_device *dev, struct xdp_buff *xdp, + struct bpf_prog *prog) +{ + return 0; +} +#endif +#else +#ifndef HAVE_XDP_DO_FLUSH +#define xdp_do_flush xdp_do_flush_map +#endif +#endif + +#ifndef HAVE_XDP_ACTION +enum xdp_action { + XDP_ABORTED = 0, + XDP_DROP, + XDP_PASS, + XDP_TX, +#ifndef HAVE_XDP_REDIRECT + XDP_REDIRECT, +#endif +}; +#else +#ifndef HAVE_XDP_REDIRECT +#define XDP_REDIRECT 4 +#endif +#endif + +#if defined(HAVE_NDO_XDP) && defined(HAVE_LEGACY_RCU_BH) +static inline u32 +bnxt_compat_bpf_prog_run_xdp(const struct bpf_prog *prog, struct xdp_buff *xdp) +{ + u32 act; + + rcu_read_lock(); + act = bpf_prog_run_xdp(prog, xdp); + rcu_read_unlock(); + if (act == XDP_REDIRECT) { + WARN_ONCE(1, "bnxt_en does not support XDP_REDIRECT on this kernel"); + return XDP_ABORTED; + } + return act; +} + +#define bpf_prog_run_xdp(prog, xdp) bnxt_compat_bpf_prog_run_xdp(prog, xdp) +#endif + +#if defined(HAVE_NDO_XDP) +#ifndef HAVE_BPF_TRACE +#define trace_xdp_exception(dev, xdp_prog, act) +#define bpf_warn_invalid_xdp_action(dev, xdp_prog, act) +#elif !defined(HAVE_BPF_WARN_INVALID_XDP_ACTION_EXT) +#define bpf_warn_invalid_xdp_action(dev, xdp_prog, act) \ + bpf_warn_invalid_xdp_action(act) +#endif +#endif + +#ifdef HAVE_XDP_RXQ_INFO +#ifndef HAVE_XDP_RXQ_INFO_IS_REG + +#define REG_STATE_REGISTERED 0x1 + +static inline bool xdp_rxq_info_is_reg(struct xdp_rxq_info *xdp_rxq) +{ + return (xdp_rxq->reg_state == REG_STATE_REGISTERED); +} +#endif +#ifndef HAVE_NEW_XDP_RXQ_INFO_REG +#define xdp_rxq_info_reg(q, dev, qidx, napi_id) xdp_rxq_info_reg(q, dev, qidx) +#endif +#else +struct xdp_rxq_info { + struct net_device *dev; + u32 queue_index; + u32 reg_state; +}; +#endif + +#ifndef HAVE_XDP_MEM_TYPE +enum xdp_mem_type { + MEM_TYPE_PAGE_SHARED = 0, /* Split-page refcnt based model */ + MEM_TYPE_PAGE_ORDER0, /* Orig XDP full page model */ + MEM_TYPE_PAGE_POOL, + MEM_TYPE_ZERO_COPY, + MEM_TYPE_MAX, +}; +static inline int xdp_rxq_info_reg_mem_model(struct xdp_rxq_info *xdp_rxq, + enum xdp_mem_type type, void *allocator) +{ + return 0; +} +#endif + +#if defined(HAVE_NDO_XDP) && !defined(HAVE_XDP_INIT_BUFF) +static __always_inline void +xdp_init_buff(struct xdp_buff *xdp, u32 frame_sz, struct xdp_rxq_info *rxq) +{ +#ifdef HAVE_XDP_FRAME_SZ + xdp->frame_sz = frame_sz; +#endif +#ifdef HAVE_XDP_RXQ_INFO + xdp->rxq = rxq; +#endif +} + +#ifndef HAVE_XDP_RXQ_INFO +#define xdp_init_buff(xdp, frame_sz, rxq) xdp_init_buff(xdp, frame_sz, NULL) +#endif + +static __always_inline void +xdp_prepare_buff(struct xdp_buff *xdp, unsigned char *hard_start, + int headroom, int data_len, const bool meta_valid) +{ + unsigned char *data = hard_start + headroom; + +#if XDP_PACKET_HEADROOM + xdp->data_hard_start = hard_start; +#endif + xdp->data = data; + xdp->data_end = data + data_len; +#ifdef HAVE_XDP_DATA_META + xdp->data_meta = meta_valid ? data : data + 1; +#endif +} +#endif + +#ifndef HAVE_XDP_SHARED_INFO_FROM_BUFF +static inline struct skb_shared_info * +xdp_get_shared_info_from_buff(struct xdp_buff *xdp) +{ + return NULL; +} +#endif + +#ifndef HAVE_XDP_MULTI_BUFF +static __always_inline bool xdp_buff_has_frags(struct xdp_buff *xdp) +{ + return false; +} + +static __always_inline void xdp_buff_set_frags_flag(struct xdp_buff *xdp) +{ +} + +static __always_inline void xdp_buff_set_frag_pfmemalloc(struct xdp_buff *xdp) +{ +} + +static inline void +xdp_update_skb_shared_info(struct sk_buff *skb, u8 nr_frags, + unsigned int size, unsigned int truesize, + bool pfmemalloc) +{ +} +#endif /* HAVE_XDP_MULTI_BUFF */ +#if !defined(CONFIG_PAGE_POOL) || !defined(HAVE_PAGE_POOL_RELEASE_PAGE) || defined(HAVE_SKB_MARK_RECYCLE) +#define page_pool_release_page(page_pool, page) +#endif + +#ifndef HAVE_TCF_EXTS_HAS_ACTIONS +#define tcf_exts_has_actions(x) (!tc_no_actions(x)) +#endif + +#if defined(CONFIG_BNXT_FLOWER_OFFLOAD) && !defined(HAVE_FLOW_OFFLOAD_H) && !defined(HAVE_TCF_STATS_UPDATE) +static inline void +tcf_exts_stats_update(const struct tcf_exts *exts, + u64 bytes, u64 packets, u64 lastuse) +{ +#ifdef CONFIG_NET_CLS_ACT + int i; + + preempt_disable(); + + for (i = 0; i < exts->nr_actions; i++) { + struct tc_action *a = exts->actions[i]; + + tcf_action_stats_update(a, bytes, packets, lastuse); + } + + preempt_enable(); +#endif +} +#endif + +#ifndef HAVE_TC_CB_REG_EXTACK +#define tcf_block_cb_register(block, cb, cb_ident, cb_priv, extack) \ + tcf_block_cb_register(block, cb, cb_ident, cb_priv) +#endif + +#ifdef CONFIG_BNXT_FLOWER_OFFLOAD +#if !defined(HAVE_TC_CLS_CAN_OFFLOAD_AND_CHAIN0) && defined(HAVE_TC_SETUP_BLOCK) +static inline bool +tc_cls_can_offload_and_chain0(const struct net_device *dev, + struct tc_cls_common_offload *common) +{ + if (!tc_can_offload(dev)) + return false; + if (common->chain_index) + return false; + return true; +} +#endif + +#ifdef HAVE_TC_CB_EGDEV + +static inline void bnxt_reg_egdev(const struct net_device *dev, + void *cb, void *cb_priv, int vf_idx) +{ + if (tc_setup_cb_egdev_register(dev, (tc_setup_cb_t *)cb, cb_priv)) + netdev_warn(dev, + "Failed to register egdev for VF-Rep: %d", vf_idx); +} + +static inline void bnxt_unreg_egdev(const struct net_device *dev, + void *cb, void *cb_priv) +{ + tc_setup_cb_egdev_unregister(dev, (tc_setup_cb_t *)cb, cb_priv); +} + +#else + +static inline void bnxt_reg_egdev(const struct net_device *dev, + void *cb, void *cb_priv, int vf_idx) +{ +} + +static inline void bnxt_unreg_egdev(const struct net_device *dev, + void *cb, void *cb_priv) +{ +} + +#endif /* HAVE_TC_CB_EGDEV */ + +#ifdef HAVE_TC_SETUP_BLOCK +#ifndef HAVE_SETUP_TC_BLOCK_HELPER + +static inline int +flow_block_cb_setup_simple(struct tc_block_offload *f, + struct list_head *driver_block_list, + tc_setup_cb_t *cb, void *cb_ident, void *cb_priv, + bool ingress_only) +{ + if (ingress_only && + f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) + return -EOPNOTSUPP; + + switch (f->command) { + case TC_BLOCK_BIND: + return tcf_block_cb_register(f->block, cb, cb_ident, cb_priv, + f->extack); + case TC_BLOCK_UNBIND: + tcf_block_cb_unregister(f->block, cb, cb_ident); + return 0; + default: + return -EOPNOTSUPP; + } +} + +#endif /* !HAVE_SETUP_TC_BLOCK_HELPER */ +#endif /* HAVE_TC_SETUP_BLOCK */ +#endif /* CONFIG_BNXT_FLOWER_OFFLOAD */ + +#ifndef BIT_ULL +#define BIT_ULL(nr) (1ULL << (nr)) +#endif + +#ifndef HAVE_SIMPLE_OPEN +static inline int simple_open(struct inode *inode, struct file *file) +{ + if (inode->i_private) + file->private_data = inode->i_private; + return 0; +} +#endif + +#if !defined(HAVE_DEVLINK_PORT_ATTRS_SET_NEW) && defined(HAVE_DEVLINK_PORT_ATTRS) +#define devlink_port_attrs_set(dl_port, attrs) \ + devlink_port_attrs_set(dl_port, (*attrs).flavour, \ + (*attrs).phys.port_number, false, 0, \ + (*attrs).switch_id.id, (*attrs).switch_id.id_len) +#endif /* !HAVE_DEVLINK_PORT_ATTRS_SET_NEW && HAVE_DEVLINK_PORT_ATTRS */ + +#if !defined(HAVE_DEVLINK_PARAM_PUBLISH) && defined(HAVE_DEVLINK_PARAM) +static inline void devlink_params_publish(struct devlink *devlink) +{ +} +#endif + +#ifdef HAVE_DEVLINK_HEALTH_REPORT +#ifndef HAVE_DEVLINK_HEALTH_REPORTER_STATE_UPDATE + +#define DEVLINK_HEALTH_REPORTER_STATE_HEALTHY 0 +#define DEVLINK_HEALTH_REPORTER_STATE_ERROR 1 + +static inline void +devlink_health_reporter_state_update(struct devlink_health_reporter *reporter, + int state) +{ +} +#endif + +#ifndef HAVE_DEVLINK_HEALTH_REPORTER_RECOVERY_DONE +static inline void +devlink_health_reporter_recovery_done(struct devlink_health_reporter *reporter) +{ +} +#endif + +#ifndef HAVE_DEVLINK_HEALTH_REPORT_EXTACK +#define bnxt_fw_diagnose(reporter, priv_ctx, extack) \ + bnxt_fw_diagnose(reporter, priv_ctx) +#define bnxt_fw_dump(reporter, fmsg, priv_ctx, extack) \ + bnxt_fw_dump(reporter, fmsg, priv_ctx) +#define bnxt_fw_recover(reporter, priv_ctx, extack) \ + bnxt_fw_recover(reporter, priv_ctx) +#define bnxt_hw_diagnose(reporter, priv_ctx, extack) \ + bnxt_hw_diagnose(reporter, priv_ctx) +#define bnxt_hw_recover(reporter, priv_ctx, extack) \ + bnxt_hw_recover(reporter, priv_ctx) +#endif /* HAVE_DEVLINK_HEALTH_REPORT_EXTACK */ +#endif /* HAVE_DEVLINK_HEALTH_REPORT */ + +#ifdef SET_NETDEV_DEVLINK_PORT +#define HAVE_SET_NETDEV_DEVLINK_PORT 1 +#else +#define SET_NETDEV_DEVLINK_PORT(dev, port) +#endif + +#ifndef mmiowb +#define mmiowb() do {} while (0) +#endif + +#ifndef HAVE_ETH_GET_HEADLEN_NEW +#define eth_get_headlen(dev, data, len) eth_get_headlen(data, len) +#endif + +#ifndef HAVE_NETDEV_XMIT_MORE +#ifdef HAVE_SKB_XMIT_MORE +#define netdev_xmit_more() skb->xmit_more +#else +#define netdev_xmit_more() 0 +#endif +#ifndef HAVE_NETIF_XMIT_STOPPED +#define netif_xmit_stopped(q) 0 +#endif +#endif + +#ifndef HAVE_NDO_TX_TIMEOUT_QUEUE +#define bnxt_tx_timeout(dev, queue) bnxt_tx_timeout(dev) +#endif + +#ifdef HAVE_NDO_ADD_VXLAN +#ifndef HAVE_VXLAN_GET_RX_PORT +static inline void vxlan_get_rx_port(struct net_device *netdev) +{ +} +#endif +#endif + +#ifndef HAVE_DEVLINK_VALIDATE_NEW +#define bnxt_dl_msix_validate(dl, id, val, extack) \ + bnxt_dl_msix_validate(dl, id, val) +#endif + +#ifndef kfree_rcu +#define kfree_rcu(ptr, rcu_head) \ + do { \ + synchronize_rcu(); \ + kfree(ptr); \ + } while (0) +#endif + +#ifdef HAVE_DEVLINK_FLASH_UPDATE +#ifndef HAVE_DEVLINK_FLASH_UPDATE_BEGIN +static inline void devlink_flash_update_begin_notify(struct devlink *devlink) +{ +} + +static inline void devlink_flash_update_end_notify(struct devlink *devlink) +{ +} +#endif /* HAVE_DEVLINK_FLASH_UPDATE_BEGIN */ + +#ifndef HAVE_DEVLINK_FLASH_UPDATE_STATUS +static inline void devlink_flash_update_status_notify(struct devlink *devlink, + const char *status_msg, + const char *component, + unsigned long done, + unsigned long total) +{ +} +#endif +#endif /* HAVE_DEVLINK_FLASH_UPDATE */ + +#ifdef HAVE_DEVLINK_INFO +#ifndef DEVLINK_INFO_VERSION_GENERIC_ASIC_ID +#define DEVLINK_INFO_VERSION_GENERIC_ASIC_ID "asic.id" +#define DEVLINK_INFO_VERSION_GENERIC_ASIC_REV "asic.rev" +#define DEVLINK_INFO_VERSION_GENERIC_FW "fw" +#endif +#ifndef DEVLINK_INFO_VERSION_GENERIC_FW_PSID +#define DEVLINK_INFO_VERSION_GENERIC_FW_PSID "fw.psid" +#endif +#ifndef DEVLINK_INFO_VERSION_GENERIC_FW_ROCE +#define DEVLINK_INFO_VERSION_GENERIC_FW_ROCE "fw.roce" +#endif +#ifndef DEVLINK_INFO_VERSION_GENERIC_FW_MGMT_API +#define DEVLINK_INFO_VERSION_GENERIC_FW_MGMT_API "fw.mgmt.api" +#endif + +#ifndef HAVE_DEVLINK_INFO_BSN_PUT +static inline int devlink_info_board_serial_number_put(struct devlink_info_req *req, + const char *bsn) +{ + return 0; +} +#endif +#endif /* HAVE_DEVLINK_INFO */ + +#ifdef HAVE_DEVLINK_REGISTER_DEV +static inline struct devlink * +bnxt_compat_devlink_alloc(const struct devlink_ops *ops, size_t size, + struct device *dev) +{ + struct devlink *d = devlink_alloc(ops, size); + + d->dev = dev; + return d; +} + +#define devlink_register(dl) devlink_register(dl, &bp->pdev->dev) +#define devlink_alloc bnxt_compat_devlink_alloc +#endif /* HAVE_DEVLINK_REGISTER_NEW */ + +#ifndef HAVE_PCIE_FLR +static inline int pcie_flr(struct pci_dev *dev) +{ + pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR); + + msleep(100); + + return 0; +} +#endif /* pcie_flr */ + +#ifndef fallthrough +#if defined __has_attribute +#ifndef __GCC4_has_attribute___fallthrough__ +#define __GCC4_has_attribute___fallthrough__ 0 +#endif +#if __has_attribute(__fallthrough__) +#define fallthrough __attribute__((__fallthrough__)) +#else +#define fallthrough do {} while (0) /* fall through */ +#endif +#else +#define fallthrough do {} while (0) /* fall through */ +#endif +#endif + +#ifndef __ALIGN_KERNEL_MASK +#define __ALIGN_KERNEL_MASK(x, mask) (((x) + (mask)) & ~(mask)) +#endif +#ifndef __ALIGN_KERNEL +#define __ALIGN_KERNEL(x, a) __ALIGN_KERNEL_MASK(x, (typeof(x))(a) - 1) +#endif +#ifndef ALIGN_DOWN +#define ALIGN_DOWN(x, a) __ALIGN_KERNEL((x) - ((a) - 1), (a)) +#endif + +struct bnxt_compat_dma_pool { + struct dma_pool *pool; + size_t size; +}; + +static inline struct bnxt_compat_dma_pool* +bnxt_compat_dma_pool_create(const char *name, struct device *dev, size_t size, + size_t align, size_t allocation) +{ + struct bnxt_compat_dma_pool *wrapper; + + wrapper = kmalloc_node(sizeof(*wrapper), GFP_KERNEL, dev_to_node(dev)); + if (!wrapper) + return NULL; + + wrapper->pool = dma_pool_create(name, dev, size, align, allocation); + wrapper->size = size; + + return wrapper; +} + +static inline void +bnxt_compat_dma_pool_destroy(struct bnxt_compat_dma_pool *wrapper) +{ + dma_pool_destroy(wrapper->pool); + kfree(wrapper); +} + +static inline void * +bnxt_compat_dma_pool_alloc(struct bnxt_compat_dma_pool *wrapper, + gfp_t mem_flags, dma_addr_t *handle) +{ + void *mem; + + mem = dma_pool_alloc(wrapper->pool, mem_flags & ~__GFP_ZERO, handle); + if (mem_flags & __GFP_ZERO) + memset(mem, 0, wrapper->size); + return mem; +} + +static inline void +bnxt_compat_dma_pool_free(struct bnxt_compat_dma_pool *wrapper, void *vaddr, + dma_addr_t addr) +{ + dma_pool_free(wrapper->pool, vaddr, addr); +} + +#define dma_pool_create bnxt_compat_dma_pool_create +#define dma_pool_destroy bnxt_compat_dma_pool_destroy +#define dma_pool_alloc bnxt_compat_dma_pool_alloc +#define dma_pool_free bnxt_compat_dma_pool_free +#define dma_pool bnxt_compat_dma_pool + +#ifndef HAVE_NETIF_NAPI_DEL_NEW +static inline void __netif_napi_del(struct napi_struct *napi) +{ + napi_hash_del(napi); + netif_napi_del(napi); +} +#endif /* HAVE_NETIF_NAPI_DEL_NEW */ + +#ifdef HAVE_NETIF_NAPI_ADD_WITH_WEIGHT_ARG +#define ___netif_napi_add(ndev, napi, poll) netif_napi_add(ndev, napi, poll, 64) +#else +#define ___netif_napi_add(ndev, napi, poll) netif_napi_add(ndev, napi, poll) +#endif /* HAVE_NETIF_NAPI_ADD_WITH_WEIGHT_ARG */ + +#include "bnxt_compat_link_modes.h" + +#ifndef HAVE_ETHTOOL_LINK_KSETTINGS +struct ethtool_link_settings { + u32 cmd; + u32 speed; + u8 duplex; + u8 port; + u8 phy_address; + u8 autoneg; +}; + +struct ethtool_link_ksettings { + struct ethtool_link_settings base; + struct { + DECLARE_BITMAP(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); + DECLARE_BITMAP(advertising, __ETHTOOL_LINK_MODE_MASK_NBITS); + DECLARE_BITMAP(lp_advertising, __ETHTOOL_LINK_MODE_MASK_NBITS); + } link_modes; +}; + +#define ethtool_link_ksettings_zero_link_mode(ptr, name) \ + (ptr)->link_modes.name[0] = 0 + +int bnxt_get_settings(struct net_device *dev, struct ethtool_cmd *cmd); +int bnxt_set_settings(struct net_device *dev, struct ethtool_cmd *cmd); +#endif + +#if !defined(HAVE_ETHTOOL_RXFH_PARAM) +#if defined(HAVE_ETH_RXFH_CONTEXT_ALLOC) +int bnxt_set_rxfh_context(struct net_device *dev, const u32 *indir, + const u8 *key, const u8 hfunc, u32 *rss_context, + bool delete); +int bnxt_get_rxfh_context(struct net_device *dev, u32 *indir, u8 *key, + u8 *hfunc, u32 rss_context); +#endif +int bnxt_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfunc); +int bnxt_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key, + const u8 hfunc); +#endif + +#if !defined(HAVE_ETHTOOL_KEEE) +int bnxt_set_eee(struct net_device *dev, struct ethtool_eee *edata); +int bnxt_get_eee(struct net_device *dev, struct ethtool_eee *edata); +#endif + +#if defined(HAVE_DEVLINK) && !defined(HAVE_DEVLINK_RELOAD_DISABLE) +#define devlink_reload_enable(x) +#define devlink_reload_disable(x) +#endif + +#if defined(HAVE_DEVLINK) && !defined(HAVE_DEVLINK_SET_FEATURES) +#define devlink_set_features(x, y) +#endif + +#ifndef HAVE_STRSCPY +static inline ssize_t strscpy(char *dest, const char *src, size_t count) +{ + int len = strlcpy(dest, src, count); + + return (!count || count <= len) ? -E2BIG : len; +} +#elif defined(HAVE_OLD_STRSCPY) +#define strscpy(d, s, c) strlcpy(d, s, c) +#endif + +#ifndef HAVE_ETHTOOL_PARAMS_FROM_LINK_MODE +struct link_mode_info { + int speed; + u8 lanes; + u8 duplex; +}; + +void +ethtool_params_from_link_mode(struct ethtool_link_ksettings *link_ksettings, + enum ethtool_link_mode_bit_indices link_mode); +#endif + +static inline void bnxt_compat_linkmode_set_bit(int nr, unsigned long *addr) +{ + if (nr < __ETHTOOL_LINK_MODE_MASK_NBITS) + __set_bit(nr, addr); +} + +static inline int bnxt_compat_linkmode_test_bit(int nr, const unsigned long *addr) +{ + return (nr < __ETHTOOL_LINK_MODE_MASK_NBITS) ? test_bit(nr, addr) : 0; +} +#define linkmode_set_bit bnxt_compat_linkmode_set_bit +#define linkmode_test_bit bnxt_compat_linkmode_test_bit + +#if !defined(HAVE_FLOW_DISSECTOR) || \ + !defined(HAVE_SKB_FLOW_DISSECT_WITH_FLAGS) || \ + !defined(HAVE_FLOW_KEY_CONTROL_FLAGS) + +struct bnxt_compat_key_control { + u32 flags; +}; + +struct bnxt_compat_key_basic { + __be16 n_proto; + u8 ip_proto; +}; + +struct bnxt_compat_key_ports { + __be16 src; + __be16 dst; +}; + +struct bnxt_compat_key_ipv4 { + __be32 src; + __be32 dst; +}; + +struct bnxt_compat_key_ipv6 { + struct in6_addr src; + struct in6_addr dst; +}; + +struct bnxt_compat_key_addrs { + union { + struct bnxt_compat_key_ipv4 v4addrs; + struct bnxt_compat_key_ipv6 v6addrs; + }; +}; + +struct bnxt_compat_flow_keys { + struct bnxt_compat_key_control control; +#define FLOW_KEYS_HASH_START_FIELD basic + struct bnxt_compat_key_basic basic; + struct bnxt_compat_key_ports ports; + struct bnxt_compat_key_addrs addrs; +}; + +#define FLOW_KEYS_HASH_OFFSET \ + offsetof(struct flow_keys, FLOW_KEYS_HASH_START_FIELD) + +#ifdef HAVE_FLOW_KEYS +#include +#endif + +static inline bool +skb_compat_flow_dissect_flow_keys(const struct sk_buff *skb, + struct bnxt_compat_flow_keys *flow, + unsigned int flags) +{ +#if defined(HAVE_FLOW_KEYS) + /* this is the legacy structure from flow_keys.h */ + struct flow_keys legacy_flow = { 0 }; + + if (skb->protocol != htons(ETH_P_IP)) + return false; + + if (!skb_flow_dissect(skb, &legacy_flow)) + return false; + + flow->addrs.v4addrs.src = legacy_flow.src; + flow->addrs.v4addrs.dst = legacy_flow.dst; + flow->ports.src = legacy_flow.port16[0]; + flow->ports.dst = legacy_flow.port16[1]; + flow->basic.n_proto = htons(ETH_P_IP); + flow->basic.ip_proto = legacy_flow.ip_proto; + flow->control.flags = 0; + + return true; +#elif defined(HAVE_FLOW_DISSECTOR) + /* this is the older version of flow_keys, which excludes flags in + * flow_dissector_key_control, as defined in 4.2's flow_dissector.h + */ + struct flow_keys legacy_flow; + + memset(&legacy_flow, 0, sizeof(legacy_flow)); + if (!skb_flow_dissect_flow_keys(skb, &legacy_flow)) + return false; + + if (legacy_flow.basic.ip_proto == htons(ETH_P_IP)) { + flow->addrs.v4addrs.src = legacy_flow.addrs.v4addrs.src; + flow->addrs.v4addrs.dst = legacy_flow.addrs.v4addrs.dst; + } else if (legacy_flow.basic.ip_proto == htons(ETH_P_IPV6)) { + flow->addrs.v6addrs.src = legacy_flow.addrs.v6addrs.src; + flow->addrs.v6addrs.dst = legacy_flow.addrs.v6addrs.dst; + } else { + return false; + } + + flow->ports.src = legacy_flow.ports.src; + flow->ports.dst = legacy_flow.ports.dst; + flow->basic.n_proto = legacy_flow.basic.n_proto; + flow->basic.ip_proto = legacy_flow.basic.ip_proto; + flow->control.flags = 0; + + return true; +#else + return false; +#endif +} + +#define skb_flow_dissect_flow_keys skb_compat_flow_dissect_flow_keys + +#ifndef HAVE_FLOW_KEY_CONTROL_FLAGS +#define FLOW_DIS_IS_FRAGMENT 1 +#define FLOW_DIS_ENCAPSULATION 4 +#endif +#define flow_dissector_key_ports bnxt_compat_key_ports +#define flow_dissector_key_addrs bnxt_compat_key_addrs +#define flow_keys bnxt_compat_flow_keys +#endif + +#ifndef HAVE_ETH_HW_ADDR_SET +static inline void eth_hw_addr_set(struct net_device *dev, const u8 *addr) +{ + memcpy(dev->dev_addr, addr, ETH_ALEN); +} +#endif + +#ifndef HAVE_BITMAP_ZALLOC + +static inline unsigned long *bitmap_alloc(unsigned int nbits, gfp_t flags) +{ + return kmalloc_array(BITS_TO_LONGS(nbits), sizeof(unsigned long), + flags); +} + +static inline unsigned long *bitmap_zalloc(unsigned int nbits, gfp_t flags) +{ + return bitmap_alloc(nbits, flags | __GFP_ZERO); +} + +static inline void bitmap_free(const unsigned long *bitmap) +{ + kfree(bitmap); +} + +#endif + +#ifndef HAVE_DEFINE_STATIC_KEY +#ifndef HAVE_STATIC_KEY_INITIALIZED +#define STATIC_KEY_CHECK_USE() +#endif +#define DEFINE_STATIC_KEY_FALSE(name) \ + struct static_key name = STATIC_KEY_INIT_FALSE +#define DECLARE_STATIC_KEY_FALSE(name) \ + extern struct static_key name +#define static_branch_unlikely(x) unlikely(static_key_enabled(&(x)->key)) +static inline void static_branch_enable(struct static_key *key) +{ + STATIC_KEY_CHECK_USE(); + + if (atomic_read(&key->enabled) != 0) { + WARN_ON_ONCE(atomic_read(&key->enabled) != 1); + return; + } + atomic_set(&key->enabled, 1); +} + +static inline void static_branch_disable(struct static_key *key) +{ + STATIC_KEY_CHECK_USE(); + + if (atomic_read(&key->enabled) != 1) { + WARN_ON_ONCE(atomic_read(&key->enabled) != 0); + return; + } + atomic_set(&key->enabled, 0); +} +#else +#if !defined(HAVE_DECLARE_STATIC_KEY) +#define DECLARE_STATIC_KEY_FALSE(name) \ + extern struct static_key_false name +#endif +#endif /* !defined(HAVE_DEFINE_STATIC_KEY) */ + +#ifdef HAVE_ARTNS_TO_TSC +#ifndef CONFIG_X86 +static inline struct system_counterval_t convert_art_ns_to_tsc(u64 art_ns) +{ + WARN_ONCE(1, "%s is only supported on X86", __func__); + return (struct system_counterval_t){}; +} +#endif /* CONFIG_X86 */ +#endif /* HAVE_ARTNS_TO_TSC */ + +#if ((LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0)) && \ + (defined(UEK_KABI_UNIQUE_ID))) +#define UEK_KERNEL_WITH_NS_DMA_BUF +#endif + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 16, 0)) || \ + (defined(RHEL_RELEASE_CODE) && \ + (RHEL_RELEASE_VERSION(9, 0) <= RHEL_RELEASE_CODE)) || \ + (defined(CONFIG_SUSE_KERNEL) && \ + ((CONFIG_SUSE_VERSION == 15) && (CONFIG_SUSE_PATCHLEVEL >= 5))) || \ + (defined(UEK_KERNEL_WITH_NS_DMA_BUF)) +#define HAVE_MODULE_IMPORT_NS_DMA_BUF +#endif + +#ifndef NL_SET_ERR_MSG_MOD +#define NL_SET_ERR_MSG_MOD(extack, msg) +#endif + +#ifndef HAVE_NETLINK_EXT_ACK +struct netlink_ext_ack { +}; +#endif + +#ifndef __struct_group +#define __struct_group(TAG, NAME, ATTRS, MEMBERS...) \ + union { \ + struct { MEMBERS } ATTRS; \ + struct TAG { MEMBERS } ATTRS NAME; \ + } +#endif /* __struct_group */ +#ifndef struct_group_attr +#define struct_group_attr(NAME, ATTRS, MEMBERS...) \ + __struct_group(/* no tag */, NAME, ATTRS, MEMBERS) +#endif /* struct_group_attr */ + +#ifndef HAVE_SKB_MARK_RECYCLE +#define skb_mark_for_recycle(skb) +#endif +#ifdef HAVE_OLD_SKB_MARK_RECYCLE +#define skb_mark_for_recycle(skb) skb_mark_for_recycle(skb, page, rxr->page_pool) +#endif + +#ifdef CONFIG_BNXT_HWMON +#include +#ifndef HWMON_CHANNEL_INFO +#define HWMON_CHANNEL_INFO(stype, ...) \ + (&(struct hwmon_channel_info) { \ + .type = hwmon_##stype, \ + .config = (u32 []) { \ + __VA_ARGS__, 0 \ + } \ + }) +#endif /* HWMON_CHANNEL_INFO */ + +#ifndef HAVE_HWMON_NOTIFY_EVENT +static inline void hwmon_notify_event(struct device *dev, enum hwmon_sensor_types type, + u32 attr, int channel) +{ +} +#endif /* HAVE_HWMON_NOTIFY_EVENT */ +#endif /* CONFIG_BNXT_HWMON */ + +#if !defined(HAVE_PAGE_POOL_PP_FRAG_BIT) +#define PP_FLAG_PAGE_FRAG 0 + +#define page_pool_dev_alloc_frag(page_pool, offset, size) NULL +#endif + +#ifndef PP_FLAG_DMA_SYNC_DEV +#define PP_FLAG_DMA_SYNC_DEV 0 +#endif + +#if (defined(GRO_MAX_SIZE) && (GRO_MAX_SIZE > 65536)) +#define HAVE_IPV6_BIG_TCP +#endif + +#ifndef HAVE_IPV6_HOPOPT_JUMBO_REMOVE +#ifdef DHAVE_IPV6_BIG_TCP +static inline int ipv6_hopopt_jumbo_remove(struct sk_buff *skb) +{ + const int hophdr_len = sizeof(struct hop_jumbo_hdr); + int nexthdr = ipv6_has_hopopt_jumbo(skb); + struct ipv6hdr *h6; + + if (!nexthdr) + return 0; + + if (skb_cow_head(skb, 0)) + return -1; + + /* Remove the HBH header. + * Layout: [Ethernet header][IPv6 header][HBH][L4 Header] + */ + memmove(skb_mac_header(skb) + hophdr_len, skb_mac_header(skb), + skb_network_header(skb) - skb_mac_header(skb) + + sizeof(struct ipv6hdr)); + + __skb_pull(skb, hophdr_len); + skb->network_header += hophdr_len; + skb->mac_header += hophdr_len; + + h6 = ipv6_hdr(skb); + h6->nexthdr = nexthdr; + + return 0; +} +#else +static inline int ipv6_hopopt_jumbo_remove(struct sk_buff *skb) +{ + return 0; +} +#endif /* DHAVE_IPV6_BIG_TCP */ +#endif /* HAVE_IPV6_HOPOPT_JUMBO_REMOVE */ + +#ifndef HAVE_XDP_SET_REDIR_TARGET +static inline void +xdp_features_set_redirect_target(struct net_device *dev, bool support_sg) +{ +} + +static inline void +xdp_features_clear_redirect_target(struct net_device *dev) +{ +} +#endif /* HAVE_XDP_SET_REDIR_TARGET */ + +#ifndef HAVE_PERNET_HASH +#define __inet6_lookup_established(n, h, sa, sp, da, dp, dif, sdif) \ + __inet6_lookup_established(n, &tcp_hashinfo, sa, sp, da, dp, dif, sdif) +#define inet_lookup_established(n, h, sa, sp, da, dp, dif) \ + inet_lookup_established(n, &tcp_hashinfo, sa, sp, da, dp, dif) +#endif + +#ifndef HAVE_SYSFS_EMIT +#define sysfs_emit sprintf +#endif /* HAVE_SYSFS_EMIT */ + +#ifndef class_create +#define class_create(owner, name) class_create(name) +#endif + +#ifndef HAVE_PCIE_ERROR_REPORTING +#define pci_enable_pcie_error_reporting(pdev) +#define pci_disable_pcie_error_reporting(pdev) +#endif + +#ifndef HAVE_TLS_IS_SKB_TX_DEVICE_OFFLOADED +#define tls_is_skb_tx_device_offloaded(skb) tls_is_sk_tx_device_offloaded((skb)->sk) +#endif + +#ifndef HAVE_XDP_DATA_META +#define skb_metadata_set(skb, metasize) +#endif + +#ifndef HAVE_TXQ_MAYBE_WAKE +#define __netif_txq_maybe_wake(txq, get_desc, start_thrs, down_cond) \ + ({ \ + int _res; \ + \ + _res = -1; \ + if (likely(get_desc > start_thrs)) { \ + /* Make sure that anybody stopping the queue after \ + * this sees the new next_to_clean. \ + */ \ + smp_mb(); \ + _res = 1; \ + if (unlikely(netif_tx_queue_stopped(txq)) && \ + !(down_cond)) { \ + netif_tx_wake_queue(txq); \ + _res = 0; \ + } \ + } \ + _res; \ + }) +#endif /* HAVE_TXQ_MAYBE_WAKE */ + +#ifndef HAVE_NEW_QUEUE_STOPWAKE +static inline void +netdev_txq_completed_mb(struct netdev_queue *dev_queue, + unsigned int pkts, unsigned int bytes) +{ + if (IS_ENABLED(CONFIG_BQL)) + netdev_tx_completed_queue(dev_queue, pkts, bytes); + else if (bytes) + smp_mb(); +} + +#define __netif_txq_completed_wake(txq, pkts, bytes, \ + get_desc, start_thrs, down_cond) \ + ({ \ + int _res; \ + \ + /* Report to BQL and piggy back on its barrier. \ + * Barrier makes sure that anybody stopping the queue \ + * after this point sees the new consumer index. \ + * Pairs with barrier in netif_txq_try_stop(). \ + */ \ + netdev_txq_completed_mb(txq, pkts, bytes); \ + \ + _res = -1; \ + if (pkts && likely(get_desc > start_thrs)) { \ + _res = 1; \ + if (unlikely(netif_tx_queue_stopped(txq)) && \ + !(down_cond)) { \ + netif_tx_wake_queue(txq); \ + _res = 0; \ + } \ + } \ + _res; \ + }) + +#define netif_txq_try_stop(txq, get_desc, start_thrs) \ + ({ \ + int _res; \ + \ + netif_tx_stop_queue(txq); \ + /* Producer index and stop bit must be visible \ + * to consumer before we recheck. \ + * Pairs with a barrier in __netif_txq_completed_wake(). \ + */ \ + smp_mb__after_atomic(); \ + \ + /* We need to check again in a case another \ + * CPU has just made room available. \ + */ \ + _res = 0; \ + if (unlikely(get_desc >= start_thrs)) { \ + netif_tx_start_queue(txq); \ + _res = -1; \ + } \ + _res; \ + }) \ + +#endif /* HAVE_NEW_QUEUE_STOPWAKE */ + +#ifndef __counted_by +#define __counted_by(member) +#endif + +#ifndef struct_size +#define struct_size(p, member, n) (sizeof(*(p)) + sizeof(*(p)->member) * (n)) +#endif + +#endif /* _BNXT_COMPAT_H_ */ diff --git a/drivers/thirdparty/release-drivers/bnxt/bnxt_compat_link_modes.c b/drivers/thirdparty/release-drivers/bnxt/bnxt_compat_link_modes.c new file mode 100644 index 000000000000..3b6afbc026f2 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/bnxt_compat_link_modes.c @@ -0,0 +1,149 @@ +/* THIS A GENERATED FILE - DO NOT EDIT! + * + * To regenerate, use: update_compat.sh + */ + +#define __LINK_MODE_LANES_CR 1 +#define __LINK_MODE_LANES_CR2 2 +#define __LINK_MODE_LANES_CR4 4 +#define __LINK_MODE_LANES_CR8 8 +#define __LINK_MODE_LANES_DR 1 +#define __LINK_MODE_LANES_DR2 2 +#define __LINK_MODE_LANES_DR4 4 +#define __LINK_MODE_LANES_DR8 8 +#define __LINK_MODE_LANES_KR 1 +#define __LINK_MODE_LANES_KR2 2 +#define __LINK_MODE_LANES_KR4 4 +#define __LINK_MODE_LANES_KR8 8 +#define __LINK_MODE_LANES_SR 1 +#define __LINK_MODE_LANES_SR2 2 +#define __LINK_MODE_LANES_SR4 4 +#define __LINK_MODE_LANES_SR8 8 +#define __LINK_MODE_LANES_ER 1 +#define __LINK_MODE_LANES_KX 1 +#define __LINK_MODE_LANES_KX4 4 +#define __LINK_MODE_LANES_LR 1 +#define __LINK_MODE_LANES_LR4 4 +#define __LINK_MODE_LANES_LR4_ER4 4 +#define __LINK_MODE_LANES_LR_ER_FR 1 +#define __LINK_MODE_LANES_LR2_ER2_FR2 2 +#define __LINK_MODE_LANES_LR4_ER4_FR4 4 +#define __LINK_MODE_LANES_LR8_ER8_FR8 8 +#define __LINK_MODE_LANES_LRM 1 +#define __LINK_MODE_LANES_MLD2 2 +#define __LINK_MODE_LANES_T 1 +#define __LINK_MODE_LANES_T1 1 +#define __LINK_MODE_LANES_X 1 +#define __LINK_MODE_LANES_FX 1 +#define __DUPLEX_Half DUPLEX_HALF +#define __DUPLEX_Full DUPLEX_FULL +#define __DEFINE_LINK_MODE_PARAMS(_speed, _type, _duplex) \ + [ETHTOOL_LINK_MODE(_speed, _type, _duplex)] = { \ + .speed = SPEED_ ## _speed, \ + .lanes = __LINK_MODE_LANES_ ## _type, \ + .duplex = __DUPLEX_ ## _duplex \ + } +#define __DEFINE_SPECIAL_MODE_PARAMS(_mode) \ + [ETHTOOL_LINK_MODE_ ## _mode ## _BIT] = { \ + .speed = SPEED_UNKNOWN, \ + .lanes = 0, \ + .duplex = DUPLEX_UNKNOWN, \ + } +const struct link_mode_info link_mode_params[] = { + __DEFINE_LINK_MODE_PARAMS(10, T, Half), + __DEFINE_LINK_MODE_PARAMS(10, T, Full), + __DEFINE_LINK_MODE_PARAMS(100, T, Half), + __DEFINE_LINK_MODE_PARAMS(100, T, Full), + __DEFINE_LINK_MODE_PARAMS(1000, T, Half), + __DEFINE_LINK_MODE_PARAMS(1000, T, Full), + __DEFINE_SPECIAL_MODE_PARAMS(Autoneg), + __DEFINE_SPECIAL_MODE_PARAMS(TP), + __DEFINE_SPECIAL_MODE_PARAMS(AUI), + __DEFINE_SPECIAL_MODE_PARAMS(MII), + __DEFINE_SPECIAL_MODE_PARAMS(FIBRE), + __DEFINE_SPECIAL_MODE_PARAMS(BNC), + __DEFINE_LINK_MODE_PARAMS(10000, T, Full), + __DEFINE_SPECIAL_MODE_PARAMS(Pause), + __DEFINE_SPECIAL_MODE_PARAMS(Asym_Pause), + __DEFINE_LINK_MODE_PARAMS(2500, X, Full), + __DEFINE_SPECIAL_MODE_PARAMS(Backplane), + __DEFINE_LINK_MODE_PARAMS(1000, KX, Full), + __DEFINE_LINK_MODE_PARAMS(10000, KX4, Full), + __DEFINE_LINK_MODE_PARAMS(10000, KR, Full), + [ETHTOOL_LINK_MODE_10000baseR_FEC_BIT] = { + .speed = SPEED_10000, + .lanes = 1, + .duplex = DUPLEX_FULL, + }, + __DEFINE_LINK_MODE_PARAMS(20000, MLD2, Full), + __DEFINE_LINK_MODE_PARAMS(20000, KR2, Full), + __DEFINE_LINK_MODE_PARAMS(40000, KR4, Full), + __DEFINE_LINK_MODE_PARAMS(40000, CR4, Full), + __DEFINE_LINK_MODE_PARAMS(40000, SR4, Full), + __DEFINE_LINK_MODE_PARAMS(40000, LR4, Full), + __DEFINE_LINK_MODE_PARAMS(56000, KR4, Full), + __DEFINE_LINK_MODE_PARAMS(56000, CR4, Full), + __DEFINE_LINK_MODE_PARAMS(56000, SR4, Full), + __DEFINE_LINK_MODE_PARAMS(56000, LR4, Full), + __DEFINE_LINK_MODE_PARAMS(25000, CR, Full), + __DEFINE_LINK_MODE_PARAMS(25000, KR, Full), + __DEFINE_LINK_MODE_PARAMS(25000, SR, Full), + __DEFINE_LINK_MODE_PARAMS(50000, CR2, Full), + __DEFINE_LINK_MODE_PARAMS(50000, KR2, Full), + __DEFINE_LINK_MODE_PARAMS(100000, KR4, Full), + __DEFINE_LINK_MODE_PARAMS(100000, SR4, Full), + __DEFINE_LINK_MODE_PARAMS(100000, CR4, Full), + __DEFINE_LINK_MODE_PARAMS(100000, LR4_ER4, Full), + __DEFINE_LINK_MODE_PARAMS(50000, SR2, Full), + __DEFINE_LINK_MODE_PARAMS(1000, X, Full), + __DEFINE_LINK_MODE_PARAMS(10000, CR, Full), + __DEFINE_LINK_MODE_PARAMS(10000, SR, Full), + __DEFINE_LINK_MODE_PARAMS(10000, LR, Full), + __DEFINE_LINK_MODE_PARAMS(10000, LRM, Full), + __DEFINE_LINK_MODE_PARAMS(10000, ER, Full), + __DEFINE_LINK_MODE_PARAMS(2500, T, Full), + __DEFINE_LINK_MODE_PARAMS(5000, T, Full), + __DEFINE_SPECIAL_MODE_PARAMS(FEC_NONE), + __DEFINE_SPECIAL_MODE_PARAMS(FEC_RS), + __DEFINE_SPECIAL_MODE_PARAMS(FEC_BASER), + __DEFINE_LINK_MODE_PARAMS(50000, KR, Full), + __DEFINE_LINK_MODE_PARAMS(50000, SR, Full), + __DEFINE_LINK_MODE_PARAMS(50000, CR, Full), + __DEFINE_LINK_MODE_PARAMS(50000, LR_ER_FR, Full), + __DEFINE_LINK_MODE_PARAMS(50000, DR, Full), + __DEFINE_LINK_MODE_PARAMS(100000, KR2, Full), + __DEFINE_LINK_MODE_PARAMS(100000, SR2, Full), + __DEFINE_LINK_MODE_PARAMS(100000, CR2, Full), + __DEFINE_LINK_MODE_PARAMS(100000, LR2_ER2_FR2, Full), + __DEFINE_LINK_MODE_PARAMS(100000, DR2, Full), + __DEFINE_LINK_MODE_PARAMS(200000, KR4, Full), + __DEFINE_LINK_MODE_PARAMS(200000, SR4, Full), + __DEFINE_LINK_MODE_PARAMS(200000, LR4_ER4_FR4, Full), + __DEFINE_LINK_MODE_PARAMS(200000, DR4, Full), + __DEFINE_LINK_MODE_PARAMS(200000, CR4, Full), + __DEFINE_LINK_MODE_PARAMS(100, T1, Full), + __DEFINE_LINK_MODE_PARAMS(1000, T1, Full), + __DEFINE_LINK_MODE_PARAMS(400000, KR8, Full), + __DEFINE_LINK_MODE_PARAMS(400000, SR8, Full), + __DEFINE_LINK_MODE_PARAMS(400000, LR8_ER8_FR8, Full), + __DEFINE_LINK_MODE_PARAMS(400000, DR8, Full), + __DEFINE_LINK_MODE_PARAMS(400000, CR8, Full), + __DEFINE_SPECIAL_MODE_PARAMS(FEC_LLRS), + __DEFINE_LINK_MODE_PARAMS(100000, KR, Full), + __DEFINE_LINK_MODE_PARAMS(100000, SR, Full), + __DEFINE_LINK_MODE_PARAMS(100000, LR_ER_FR, Full), + __DEFINE_LINK_MODE_PARAMS(100000, DR, Full), + __DEFINE_LINK_MODE_PARAMS(100000, CR, Full), + __DEFINE_LINK_MODE_PARAMS(200000, KR2, Full), + __DEFINE_LINK_MODE_PARAMS(200000, SR2, Full), + __DEFINE_LINK_MODE_PARAMS(200000, LR2_ER2_FR2, Full), + __DEFINE_LINK_MODE_PARAMS(200000, DR2, Full), + __DEFINE_LINK_MODE_PARAMS(200000, CR2, Full), + __DEFINE_LINK_MODE_PARAMS(400000, KR4, Full), + __DEFINE_LINK_MODE_PARAMS(400000, SR4, Full), + __DEFINE_LINK_MODE_PARAMS(400000, LR4_ER4_FR4, Full), + __DEFINE_LINK_MODE_PARAMS(400000, DR4, Full), + __DEFINE_LINK_MODE_PARAMS(400000, CR4, Full), + __DEFINE_LINK_MODE_PARAMS(100, FX, Half), + __DEFINE_LINK_MODE_PARAMS(100, FX, Full), +}; diff --git a/drivers/thirdparty/release-drivers/bnxt/bnxt_compat_link_modes.h b/drivers/thirdparty/release-drivers/bnxt/bnxt_compat_link_modes.h new file mode 100644 index 000000000000..847b68c95ef2 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/bnxt_compat_link_modes.h @@ -0,0 +1,208 @@ +/* THIS A GENERATED FILE - DO NOT EDIT! + * + * To regenerate, use: update_compat.sh + */ + +#ifdef HAVE_ETHTOOL_LINK_KSETTINGS +/* can't determine existing enum contents using preprocessor, + * so override or supplement as appropriate using #define" + */ +#define ETHTOOL_LINK_MODE_10baseT_Half_BIT 0 +#define ETHTOOL_LINK_MODE_10baseT_Full_BIT 1 +#define ETHTOOL_LINK_MODE_100baseT_Half_BIT 2 +#define ETHTOOL_LINK_MODE_100baseT_Full_BIT 3 +#define ETHTOOL_LINK_MODE_1000baseT_Half_BIT 4 +#define ETHTOOL_LINK_MODE_1000baseT_Full_BIT 5 +#define ETHTOOL_LINK_MODE_Autoneg_BIT 6 +#define ETHTOOL_LINK_MODE_TP_BIT 7 +#define ETHTOOL_LINK_MODE_AUI_BIT 8 +#define ETHTOOL_LINK_MODE_MII_BIT 9 +#define ETHTOOL_LINK_MODE_FIBRE_BIT 10 +#define ETHTOOL_LINK_MODE_BNC_BIT 11 +#define ETHTOOL_LINK_MODE_10000baseT_Full_BIT 12 +#define ETHTOOL_LINK_MODE_Pause_BIT 13 +#define ETHTOOL_LINK_MODE_Asym_Pause_BIT 14 +#define ETHTOOL_LINK_MODE_2500baseX_Full_BIT 15 +#define ETHTOOL_LINK_MODE_Backplane_BIT 16 +#define ETHTOOL_LINK_MODE_1000baseKX_Full_BIT 17 +#define ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT 18 +#define ETHTOOL_LINK_MODE_10000baseKR_Full_BIT 19 +#define ETHTOOL_LINK_MODE_10000baseR_FEC_BIT 20 +#define ETHTOOL_LINK_MODE_20000baseMLD2_Full_BIT 21 +#define ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT 22 +#define ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT 23 +#define ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT 24 +#define ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT 25 +#define ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT 26 +#define ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT 27 +#define ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT 28 +#define ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT 29 +#define ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT 30 +#define ETHTOOL_LINK_MODE_25000baseCR_Full_BIT 31 +#define ETHTOOL_LINK_MODE_25000baseKR_Full_BIT 32 +#define ETHTOOL_LINK_MODE_25000baseSR_Full_BIT 33 +#define ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT 34 +#define ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT 35 +#define ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT 36 +#define ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT 37 +#define ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT 38 +#define ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT 39 +#define ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT 40 +#define ETHTOOL_LINK_MODE_1000baseX_Full_BIT 41 +#define ETHTOOL_LINK_MODE_10000baseCR_Full_BIT 42 +#define ETHTOOL_LINK_MODE_10000baseSR_Full_BIT 43 +#define ETHTOOL_LINK_MODE_10000baseLR_Full_BIT 44 +#define ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT 45 +#define ETHTOOL_LINK_MODE_10000baseER_Full_BIT 46 +#define ETHTOOL_LINK_MODE_2500baseT_Full_BIT 47 +#define ETHTOOL_LINK_MODE_5000baseT_Full_BIT 48 +#define ETHTOOL_LINK_MODE_FEC_NONE_BIT 49 +#define ETHTOOL_LINK_MODE_FEC_RS_BIT 50 +#define ETHTOOL_LINK_MODE_FEC_BASER_BIT 51 +#define ETHTOOL_LINK_MODE_50000baseKR_Full_BIT 52 +#define ETHTOOL_LINK_MODE_50000baseSR_Full_BIT 53 +#define ETHTOOL_LINK_MODE_50000baseCR_Full_BIT 54 +#define ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT 55 +#define ETHTOOL_LINK_MODE_50000baseDR_Full_BIT 56 +#define ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT 57 +#define ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT 58 +#define ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT 59 +#define ETHTOOL_LINK_MODE_100000baseLR2_ER2_FR2_Full_BIT 60 +#define ETHTOOL_LINK_MODE_100000baseDR2_Full_BIT 61 +#define ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT 62 +#define ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT 63 +#define ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT 64 +#define ETHTOOL_LINK_MODE_200000baseDR4_Full_BIT 65 +#define ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT 66 +#define ETHTOOL_LINK_MODE_100baseT1_Full_BIT 67 +#define ETHTOOL_LINK_MODE_1000baseT1_Full_BIT 68 +#define ETHTOOL_LINK_MODE_400000baseKR8_Full_BIT 69 +#define ETHTOOL_LINK_MODE_400000baseSR8_Full_BIT 70 +#define ETHTOOL_LINK_MODE_400000baseLR8_ER8_FR8_Full_BIT 71 +#define ETHTOOL_LINK_MODE_400000baseDR8_Full_BIT 72 +#define ETHTOOL_LINK_MODE_400000baseCR8_Full_BIT 73 +#define ETHTOOL_LINK_MODE_FEC_LLRS_BIT 74 +#define ETHTOOL_LINK_MODE_100000baseKR_Full_BIT 75 +#define ETHTOOL_LINK_MODE_100000baseSR_Full_BIT 76 +#define ETHTOOL_LINK_MODE_100000baseLR_ER_FR_Full_BIT 77 +#define ETHTOOL_LINK_MODE_100000baseCR_Full_BIT 78 +#define ETHTOOL_LINK_MODE_100000baseDR_Full_BIT 79 +#define ETHTOOL_LINK_MODE_200000baseKR2_Full_BIT 80 +#define ETHTOOL_LINK_MODE_200000baseSR2_Full_BIT 81 +#define ETHTOOL_LINK_MODE_200000baseLR2_ER2_FR2_Full_BIT 82 +#define ETHTOOL_LINK_MODE_200000baseDR2_Full_BIT 83 +#define ETHTOOL_LINK_MODE_200000baseCR2_Full_BIT 84 +#define ETHTOOL_LINK_MODE_400000baseKR4_Full_BIT 85 +#define ETHTOOL_LINK_MODE_400000baseSR4_Full_BIT 86 +#define ETHTOOL_LINK_MODE_400000baseLR4_ER4_FR4_Full_BIT 87 +#define ETHTOOL_LINK_MODE_400000baseDR4_Full_BIT 88 +#define ETHTOOL_LINK_MODE_400000baseCR4_Full_BIT 89 +#define ETHTOOL_LINK_MODE_100baseFX_Half_BIT 90 +#define ETHTOOL_LINK_MODE_100baseFX_Full_BIT 91 +#else /* !HAVE_ETHTOOL_LINK_KSETTINGS */ +/* ethtool_link_mode_bit_indices enum doesn't exist, define it */ +enum ethtool_link_mode_bit_indices { + ETHTOOL_LINK_MODE_10baseT_Half_BIT = 0, + ETHTOOL_LINK_MODE_10baseT_Full_BIT = 1, + ETHTOOL_LINK_MODE_100baseT_Half_BIT = 2, + ETHTOOL_LINK_MODE_100baseT_Full_BIT = 3, + ETHTOOL_LINK_MODE_1000baseT_Half_BIT = 4, + ETHTOOL_LINK_MODE_1000baseT_Full_BIT = 5, + ETHTOOL_LINK_MODE_Autoneg_BIT = 6, + ETHTOOL_LINK_MODE_TP_BIT = 7, + ETHTOOL_LINK_MODE_AUI_BIT = 8, + ETHTOOL_LINK_MODE_MII_BIT = 9, + ETHTOOL_LINK_MODE_FIBRE_BIT = 10, + ETHTOOL_LINK_MODE_BNC_BIT = 11, + ETHTOOL_LINK_MODE_10000baseT_Full_BIT = 12, + ETHTOOL_LINK_MODE_Pause_BIT = 13, + ETHTOOL_LINK_MODE_Asym_Pause_BIT = 14, + ETHTOOL_LINK_MODE_2500baseX_Full_BIT = 15, + ETHTOOL_LINK_MODE_Backplane_BIT = 16, + ETHTOOL_LINK_MODE_1000baseKX_Full_BIT = 17, + ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT = 18, + ETHTOOL_LINK_MODE_10000baseKR_Full_BIT = 19, + ETHTOOL_LINK_MODE_10000baseR_FEC_BIT = 20, + ETHTOOL_LINK_MODE_20000baseMLD2_Full_BIT = 21, + ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT = 22, + ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT = 23, + ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT = 24, + ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT = 25, + ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT = 26, + ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT = 27, + ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT = 28, + ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT = 29, + ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT = 30, + ETHTOOL_LINK_MODE_25000baseCR_Full_BIT = 31, + + /* Last allowed bit for __ETHTOOL_LINK_MODE_LEGACY_MASK is bit + * 31. Please do NOT define any SUPPORTED_* or ADVERTISED_* + * macro for bits > 31. The only way to use indices > 31 is to + * use the new ETHTOOL_GLINKSETTINGS/ETHTOOL_SLINKSETTINGS API. + */ + + ETHTOOL_LINK_MODE_25000baseKR_Full_BIT = 32, + ETHTOOL_LINK_MODE_25000baseSR_Full_BIT = 33, + ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT = 34, + ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT = 35, + ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT = 36, + ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT = 37, + ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT = 38, + ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT = 39, + ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT = 40, + ETHTOOL_LINK_MODE_1000baseX_Full_BIT = 41, + ETHTOOL_LINK_MODE_10000baseCR_Full_BIT = 42, + ETHTOOL_LINK_MODE_10000baseSR_Full_BIT = 43, + ETHTOOL_LINK_MODE_10000baseLR_Full_BIT = 44, + ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT = 45, + ETHTOOL_LINK_MODE_10000baseER_Full_BIT = 46, + ETHTOOL_LINK_MODE_2500baseT_Full_BIT = 47, + ETHTOOL_LINK_MODE_5000baseT_Full_BIT = 48, + + ETHTOOL_LINK_MODE_FEC_NONE_BIT = 49, + ETHTOOL_LINK_MODE_FEC_RS_BIT = 50, + ETHTOOL_LINK_MODE_FEC_BASER_BIT = 51, + ETHTOOL_LINK_MODE_50000baseKR_Full_BIT = 52, + ETHTOOL_LINK_MODE_50000baseSR_Full_BIT = 53, + ETHTOOL_LINK_MODE_50000baseCR_Full_BIT = 54, + ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT = 55, + ETHTOOL_LINK_MODE_50000baseDR_Full_BIT = 56, + ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT = 57, + ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT = 58, + ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT = 59, + ETHTOOL_LINK_MODE_100000baseLR2_ER2_FR2_Full_BIT = 60, + ETHTOOL_LINK_MODE_100000baseDR2_Full_BIT = 61, + ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT = 62, + ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT = 63, + ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT = 64, + ETHTOOL_LINK_MODE_200000baseDR4_Full_BIT = 65, + ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT = 66, + ETHTOOL_LINK_MODE_100baseT1_Full_BIT = 67, + ETHTOOL_LINK_MODE_1000baseT1_Full_BIT = 68, + ETHTOOL_LINK_MODE_400000baseKR8_Full_BIT = 69, + ETHTOOL_LINK_MODE_400000baseSR8_Full_BIT = 70, + ETHTOOL_LINK_MODE_400000baseLR8_ER8_FR8_Full_BIT = 71, + ETHTOOL_LINK_MODE_400000baseDR8_Full_BIT = 72, + ETHTOOL_LINK_MODE_400000baseCR8_Full_BIT = 73, + ETHTOOL_LINK_MODE_FEC_LLRS_BIT = 74, + ETHTOOL_LINK_MODE_100000baseKR_Full_BIT = 75, + ETHTOOL_LINK_MODE_100000baseSR_Full_BIT = 76, + ETHTOOL_LINK_MODE_100000baseLR_ER_FR_Full_BIT = 77, + ETHTOOL_LINK_MODE_100000baseCR_Full_BIT = 78, + ETHTOOL_LINK_MODE_100000baseDR_Full_BIT = 79, + ETHTOOL_LINK_MODE_200000baseKR2_Full_BIT = 80, + ETHTOOL_LINK_MODE_200000baseSR2_Full_BIT = 81, + ETHTOOL_LINK_MODE_200000baseLR2_ER2_FR2_Full_BIT = 82, + ETHTOOL_LINK_MODE_200000baseDR2_Full_BIT = 83, + ETHTOOL_LINK_MODE_200000baseCR2_Full_BIT = 84, + ETHTOOL_LINK_MODE_400000baseKR4_Full_BIT = 85, + ETHTOOL_LINK_MODE_400000baseSR4_Full_BIT = 86, + ETHTOOL_LINK_MODE_400000baseLR4_ER4_FR4_Full_BIT = 87, + ETHTOOL_LINK_MODE_400000baseDR4_Full_BIT = 88, + ETHTOOL_LINK_MODE_400000baseCR4_Full_BIT = 89, + ETHTOOL_LINK_MODE_100baseFX_Half_BIT = 90, + ETHTOOL_LINK_MODE_100baseFX_Full_BIT = 91, + /* must be last entry */ + __ETHTOOL_LINK_MODE_MASK_NBITS +}; +#endif diff --git a/drivers/thirdparty/release-drivers/bnxt/bnxt_coredump.c b/drivers/thirdparty/release-drivers/bnxt/bnxt_coredump.c new file mode 100644 index 000000000000..85877e6d4fb0 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/bnxt_coredump.c @@ -0,0 +1,732 @@ +/* Broadcom NetXtreme-C/E network driver. + * + * Copyright (c) 2021-2023 Broadcom Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ + +#include "bnxt_compat.h" +#include "bnxt_hsi.h" +#include "bnxt.h" +#include "bnxt_hwrm.h" +#include "bnxt_coredump.h" +#include "bnxt_log.h" + +static int bnxt_dbg_hwrm_log_buffer_flush(struct bnxt *bp, u16 type, u32 flags, u32 *offset) +{ + struct hwrm_dbg_log_buffer_flush_output *resp; + struct hwrm_dbg_log_buffer_flush_input *req; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_DBG_LOG_BUFFER_FLUSH); + if (rc) + return rc; + + req->flags = cpu_to_le32(flags); + req->type = cpu_to_le16(type); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); + if (!rc) + *offset = le32_to_cpu(resp->current_buffer_offset); + hwrm_req_drop(bp, req); + return rc; +} + +static void bnxt_fill_driver_segment_record(struct bnxt *bp, + struct bnxt_driver_segment_record *driver_seg_record, + struct bnxt_ctx_mem_type *ctxm, u16 type) +{ + struct bnxt_bs_trace_info *bs_trace = &bp->bs_trace[type]; + u32 offset = 0; + int rc = 0; + + rc = bnxt_dbg_hwrm_log_buffer_flush(bp, type, 0, &offset); + if (rc) + return; + + bnxt_bs_trace_check_wrapping(bs_trace, offset); + driver_seg_record->max_entries = cpu_to_le32(ctxm->max_entries); + driver_seg_record->entry_size = cpu_to_le32(ctxm->entry_size); + driver_seg_record->offset = cpu_to_le32(bs_trace->last_offset); + driver_seg_record->wrapped = bs_trace->wrapped; +} + +static void bnxt_retrieve_driver_coredump(struct bnxt *bp, u16 type, u32 *seg_len, + void *buf, u32 offset) +{ + struct bnxt_driver_segment_record driver_seg_record = {0}; + u32 dump_len, data_offset, record_len, record_offset; + struct bnxt_ctx_mem_info *ctx = bp->ctx; + struct bnxt_ctx_pg_info *ctx_pg; + struct bnxt_ring_mem_info *rmem; + struct bnxt_ctx_mem_type *ctxm; + int k, n = 1; + + ctxm = &ctx->ctx_arr[type]; + + dump_len = 0; + record_len = sizeof(struct bnxt_driver_segment_record); + record_offset = offset; + data_offset = record_offset + sizeof(struct bnxt_driver_segment_record); + bnxt_fill_driver_segment_record(bp, &driver_seg_record, ctxm, type - BNXT_CTX_SRT_TRACE); + + ctx_pg = ctxm->pg_info; + if (ctxm->instance_bmap) + n = hweight32(ctxm->instance_bmap); + + for (k = 0; k < n ; k++) { + struct bnxt_ctx_pg_info *ctx_pg_block = &ctx_pg[k]; + int nr_tbls, i, j; + + rmem = &ctx_pg_block->ring_mem; + if (rmem->depth > 1) { + nr_tbls = DIV_ROUND_UP(ctx_pg_block->nr_pages, MAX_CTX_PAGES); + for (i = 0; i < nr_tbls; i++) { + struct bnxt_ctx_pg_info *pg_tbl; + struct bnxt_ring_mem_info *rmem_pde; + + pg_tbl = ctx_pg_block->ctx_pg_tbl[i]; + rmem_pde = &pg_tbl->ring_mem; + if (i == (nr_tbls - 1)) { + int rem = ctx_pg_block->nr_pages % MAX_CTX_PAGES; + + if (rem) + rmem_pde->nr_pages = rem; + } + for (j = 0; j < rmem_pde->nr_pages; j++) { + memcpy(buf + data_offset, rmem_pde->pg_arr[j], + BNXT_PAGE_SIZE); + dump_len += BNXT_PAGE_SIZE; + data_offset += BNXT_PAGE_SIZE; + } + } + } else { + for (i = 0; i < ctx_pg_block->nr_pages; i++) { + memcpy(buf + data_offset, rmem->pg_arr[i], BNXT_PAGE_SIZE); + dump_len += BNXT_PAGE_SIZE; + data_offset += BNXT_PAGE_SIZE; + } + } + memcpy(buf + record_offset, &driver_seg_record, record_len); + *seg_len = dump_len + record_len; + } +} + +static int bnxt_hwrm_dbg_dma_data(struct bnxt *bp, void *msg, + struct bnxt_hwrm_dbg_dma_info *info) +{ + struct hwrm_dbg_cmn_input *cmn_req = msg; + __le16 *seq_ptr = msg + info->seq_off; + struct hwrm_dbg_cmn_output *cmn_resp; + u16 seq = 0, len, segs_off; + dma_addr_t dma_handle; + void *dma_buf, *resp; + int rc, off = 0; + + dma_buf = hwrm_req_dma_slice(bp, msg, info->dma_len, &dma_handle); + if (!dma_buf) { + hwrm_req_drop(bp, msg); + return -ENOMEM; + } + + hwrm_req_timeout(bp, msg, HWRM_COREDUMP_TIMEOUT); + cmn_resp = hwrm_req_hold(bp, msg); + resp = cmn_resp; + + segs_off = offsetof(struct hwrm_dbg_coredump_list_output, total_segments); + cmn_req->host_dest_addr = cpu_to_le64(dma_handle); + cmn_req->host_buf_len = cpu_to_le32(info->dma_len); + while (1) { + *seq_ptr = cpu_to_le16(seq); + rc = hwrm_req_send(bp, msg); + if (rc) + break; + + len = le16_to_cpu(*((__le16 *)(resp + info->data_len_off))); + if (!seq && + cmn_req->req_type == cpu_to_le16(HWRM_DBG_COREDUMP_LIST)) { + info->segs = le16_to_cpu(*((__le16 *)(resp + segs_off))); + if (!info->segs) { + rc = -EIO; + break; + } + + info->dest_buf_size = info->segs * + sizeof(struct coredump_segment_record); + info->dest_buf = kmalloc(info->dest_buf_size, GFP_KERNEL); + if (!info->dest_buf) { + rc = -ENOMEM; + break; + } + } + + if (info->dest_buf) { + if ((info->seg_start + off + len) <= + BNXT_COREDUMP_BUF_LEN(info->buf_len)) { + memcpy(info->dest_buf + off, dma_buf, len); + } else { + rc = -ENOBUFS; + break; + } + } + + if (cmn_req->req_type == cpu_to_le16(HWRM_DBG_COREDUMP_RETRIEVE)) + info->dest_buf_size += len; + + if (!(cmn_resp->flags & HWRM_DBG_CMN_FLAGS_MORE)) + break; + + seq++; + off += len; + } + hwrm_req_drop(bp, msg); + return rc; +} + +static int bnxt_hwrm_dbg_coredump_list(struct bnxt *bp, + struct bnxt_coredump *coredump) +{ + struct bnxt_hwrm_dbg_dma_info info = {NULL}; + struct hwrm_dbg_coredump_list_input *req; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_DBG_COREDUMP_LIST); + if (rc) + return rc; + + info.dma_len = COREDUMP_LIST_BUF_LEN; + info.seq_off = offsetof(struct hwrm_dbg_coredump_list_input, seq_no); + info.data_len_off = offsetof(struct hwrm_dbg_coredump_list_output, + data_len); + + rc = bnxt_hwrm_dbg_dma_data(bp, req, &info); + if (!rc) { + coredump->data = info.dest_buf; + coredump->data_size = info.dest_buf_size; + coredump->total_segs = info.segs; + } + return rc; +} + +static int bnxt_hwrm_dbg_coredump_initiate(struct bnxt *bp, u16 component_id, + u16 segment_id) +{ + struct hwrm_dbg_coredump_initiate_input *req; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_DBG_COREDUMP_INITIATE); + if (rc) + return rc; + + hwrm_req_timeout(bp, req, HWRM_COREDUMP_TIMEOUT); + req->component_id = cpu_to_le16(component_id); + req->segment_id = cpu_to_le16(segment_id); + + return hwrm_req_send(bp, req); +} + +static int bnxt_hwrm_dbg_coredump_retrieve(struct bnxt *bp, u16 component_id, + u16 segment_id, u32 *seg_len, + void *buf, u32 buf_len, u32 offset) +{ + struct hwrm_dbg_coredump_retrieve_input *req; + struct bnxt_hwrm_dbg_dma_info info = {NULL}; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_DBG_COREDUMP_RETRIEVE); + if (rc) + return rc; + + req->component_id = cpu_to_le16(component_id); + req->segment_id = cpu_to_le16(segment_id); + + info.dma_len = COREDUMP_RETRIEVE_BUF_LEN; + info.seq_off = offsetof(struct hwrm_dbg_coredump_retrieve_input, + seq_no); + info.data_len_off = offsetof(struct hwrm_dbg_coredump_retrieve_output, + data_len); + if (buf) { + info.dest_buf = buf + offset; + info.buf_len = buf_len; + info.seg_start = offset; + } + + rc = bnxt_hwrm_dbg_dma_data(bp, req, &info); + if (!rc) + *seg_len = info.dest_buf_size; + + return rc; +} + +void +bnxt_fill_coredump_seg_hdr(struct bnxt *bp, + struct bnxt_coredump_segment_hdr *seg_hdr, + struct coredump_segment_record *seg_rec, u32 seg_len, + int status, u32 duration, u32 instance, u32 comp_id, + u32 seg_id) +{ + memset(seg_hdr, 0, sizeof(*seg_hdr)); + memcpy(seg_hdr->signature, "sEgM", 4); + if (seg_rec) { + seg_hdr->component_id = (__force __le32)seg_rec->component_id; + seg_hdr->segment_id = (__force __le32)seg_rec->segment_id; + seg_hdr->low_version = seg_rec->version_low; + seg_hdr->high_version = seg_rec->version_hi; + seg_hdr->flags = seg_rec->compress_flags; + } else { + seg_hdr->component_id = cpu_to_le32(comp_id); + seg_hdr->segment_id = cpu_to_le32(seg_id); + } + seg_hdr->function_id = cpu_to_le16(bp->pdev->devfn); + seg_hdr->length = cpu_to_le32(seg_len); + seg_hdr->status = cpu_to_le32(status); + seg_hdr->duration = cpu_to_le32(duration); + seg_hdr->data_offset = cpu_to_le32(sizeof(*seg_hdr)); + seg_hdr->instance = cpu_to_le32(instance); +} + +struct bnxt_time bnxt_get_current_time(struct bnxt *bp) +{ + struct bnxt_time time; +#if defined(HAVE_TIME64) + time64_to_tm(ktime_get_real_seconds(), -sys_tz.tz_minuteswest * 60, &time.tm); +#else + struct timeval tv; + + do_gettimeofday(&tv); + time_to_tm(tv.tv_sec, -sys_tz.tz_minuteswest * 60, &time.tm); +#endif + time.tm.tm_mon += 1; + time.tm.tm_year += 1900; + + return time; +} + +static void bnxt_fill_cmdline(struct bnxt_coredump_record *record) +{ + struct mm_struct *mm = current->mm; + + if (mm) { + unsigned long len = mm->arg_end - mm->arg_start; + int i, last = 0; + + len = min(len, sizeof(record->commandline) - 1); + if (len && !copy_from_user(record->commandline, + (char __user *) mm->arg_start, len)) { + for (i = 0; i < len; i++) { + if (record->commandline[i]) + last = i; + else + record->commandline[i] = ' '; + } + record->commandline[last + 1] = 0; + return; + } + } + + strscpy(record->commandline, current->comm, TASK_COMM_LEN); +} + +void bnxt_fill_empty_seg(struct bnxt *bp, void *buf, u32 len) +{ + struct bnxt_coredump_segment_hdr seg_hdr; + + bnxt_fill_coredump_seg_hdr(bp, &seg_hdr, NULL, len, 0, 0, 0, 0, 0); + memcpy(buf, &seg_hdr, sizeof(seg_hdr)); +} + +void +bnxt_fill_coredump_record(struct bnxt *bp, struct bnxt_coredump_record *record, + struct bnxt_time start, s16 start_utc, u16 total_segs, + int status) +{ + struct bnxt_time end = bnxt_get_current_time(bp); + u32 os_ver_major = 0, os_ver_minor = 0; + + memset(record, 0, sizeof(*record)); + memcpy(record->signature, "cOrE", 4); + record->flags = 0; + record->low_version = 0; + record->high_version = 1; + record->asic_state = 0; + strscpy(record->system_name, utsname()->nodename, + sizeof(record->system_name)); + record->year = cpu_to_le16(start.tm.tm_year); + record->month = cpu_to_le16(start.tm.tm_mon); + record->day = cpu_to_le16(start.tm.tm_mday); + record->hour = cpu_to_le16(start.tm.tm_hour); + record->minute = cpu_to_le16(start.tm.tm_min); + record->second = cpu_to_le16(start.tm.tm_sec); + record->utc_bias = cpu_to_le16(start_utc); + bnxt_fill_cmdline(record); + record->total_segments = cpu_to_le32(total_segs); + + if (sscanf(utsname()->release, "%u.%u", &os_ver_major, &os_ver_minor) != 2) + netdev_warn(bp->dev, "Unknown OS release in coredump\n"); + record->os_ver_major = cpu_to_le32(os_ver_major); + record->os_ver_minor = cpu_to_le32(os_ver_minor); + + strscpy(record->os_name, utsname()->sysname, sizeof(record->os_name)); + record->end_year = cpu_to_le16(end.tm.tm_year); + record->end_month = cpu_to_le16(end.tm.tm_mon); + record->end_day = cpu_to_le16(end.tm.tm_mday); + record->end_hour = cpu_to_le16(end.tm.tm_hour); + record->end_minute = cpu_to_le16(end.tm.tm_min); + record->end_second = cpu_to_le16(end.tm.tm_sec); + record->end_utc_bias = cpu_to_le16(sys_tz.tz_minuteswest); + record->asic_id1 = cpu_to_le32(bp->chip_num << 16 | + bp->ver_resp.chip_rev << 8 | + bp->ver_resp.chip_metal); + record->asic_id2 = 0; + record->coredump_status = cpu_to_le32(status); + record->ioctl_low_version = 0; + record->ioctl_high_version = 0; +} + +static int __bnxt_get_coredump(struct bnxt *bp, void *buf, u32 *dump_len) +{ + u32 offset = 0, seg_hdr_len, seg_record_len = 0, buf_len = 0; + u32 ver_get_resp_len = sizeof(struct hwrm_ver_get_output); + struct coredump_segment_record *seg_record = NULL; + u32 driver_comp_id = DRV_COREDUMP_COMP_ID; + struct bnxt_coredump_segment_hdr seg_hdr; + struct bnxt_ctx_mem_info *ctx = bp->ctx; + struct bnxt_coredump coredump = {NULL}; + int rc = 0, i, type, drv_seg_count = 0; + u32 driver_seg_id = DRV_SEG_SRT_TRACE; + struct bnxt_ctx_mem_type *ctxm; + struct bnxt_time start_time; + u32 null_seg_len; + s16 start_utc; + + if (buf) + buf_len = *dump_len; + + start_time = bnxt_get_current_time(bp); + start_utc = sys_tz.tz_minuteswest; + seg_hdr_len = sizeof(seg_hdr); + + /* First segment should be hwrm_ver_get response. + * For hwrm_ver_get response Component id = 2 and Segment id = 0 + */ + *dump_len = seg_hdr_len + ver_get_resp_len; + if (buf) { + bnxt_fill_coredump_seg_hdr(bp, &seg_hdr, NULL, ver_get_resp_len, + 0, 0, 0, 2, 0); + memcpy(buf + offset, &seg_hdr, seg_hdr_len); + offset += seg_hdr_len; + memcpy(buf + offset, &bp->ver_resp, ver_get_resp_len); + offset += ver_get_resp_len; + } + + rc = bnxt_hwrm_dbg_coredump_list(bp, &coredump); + if (rc) { + netdev_err(bp->dev, "Failed to get coredump segment list\n"); + goto fw_coredump_err; + } + + *dump_len += seg_hdr_len * coredump.total_segs; + + seg_record = (struct coredump_segment_record *)coredump.data; + seg_record_len = sizeof(*seg_record); + + for (i = 0; i < coredump.total_segs; i++) { + u16 comp_id = le16_to_cpu(seg_record->component_id); + u16 seg_id = le16_to_cpu(seg_record->segment_id); + u32 duration = 0, seg_len = 0; + unsigned long start, end; + + if (buf && ((offset + seg_hdr_len) > BNXT_COREDUMP_BUF_LEN(buf_len))) { + rc = -ENOBUFS; + goto fw_coredump_err; + } + + start = jiffies; + + rc = bnxt_hwrm_dbg_coredump_initiate(bp, comp_id, seg_id); + if (rc) { + netdev_err(bp->dev, + "Failed to initiate coredump for seg = %d\n", + seg_record->segment_id); + goto next_seg; + } + + /* Write segment data into the buffer */ + rc = bnxt_hwrm_dbg_coredump_retrieve(bp, comp_id, seg_id, + &seg_len, buf, buf_len, + offset + seg_hdr_len); + if (rc && rc == -ENOBUFS) + goto fw_coredump_err; + else if (rc) + netdev_err(bp->dev, + "Failed to retrieve coredump for seg = %d\n", + seg_record->segment_id); +next_seg: + end = jiffies; + duration = jiffies_to_msecs(end - start); + bnxt_fill_coredump_seg_hdr(bp, &seg_hdr, seg_record, seg_len, + rc, duration, 0, 0, 0); + + if (buf) { + /* Write segment header into the buffer */ + memcpy(buf + offset, &seg_hdr, seg_hdr_len); + offset += seg_hdr_len + seg_len; + } + + *dump_len += seg_len; + seg_record = + (struct coredump_segment_record *)((u8 *)seg_record + + seg_record_len); + } + +fw_coredump_err: + if (!ctx) + goto skip_drv_coredump; + + for (type = BNXT_CTX_SRT_TRACE; type <= BNXT_CTX_ROCE_HWRM_TRACE; + type++, driver_seg_id++) { + u32 duration = 0, seg_len = 0; + unsigned long start, end; + ctxm = &ctx->ctx_arr[type]; + + if (!buf || !(ctxm->flags & BNXT_CTX_MEM_TYPE_VALID)) + continue; + *dump_len += seg_hdr_len; + start = jiffies; + bnxt_retrieve_driver_coredump(bp, type, &seg_len, buf, offset + seg_hdr_len); + end = jiffies; + duration = jiffies_to_msecs(end - start); + bnxt_fill_coredump_seg_hdr(bp, &seg_hdr, NULL, seg_len, + rc, duration, 0, driver_comp_id, driver_seg_id); + + /* Write segment header into the buffer */ + memcpy(buf + offset, &seg_hdr, seg_hdr_len); + offset += seg_hdr_len + seg_len; + + *dump_len += seg_len; + seg_record = (struct coredump_segment_record *)((u8 *)seg_record + seg_record_len); + drv_seg_count++; + } +skip_drv_coredump: + null_seg_len = BNXT_COREDUMP_BUF_LEN(buf_len) - *dump_len; + if (buf) { + bnxt_fill_empty_seg(bp, buf + offset, null_seg_len); + /* Fix the coredump record at last 1024 bytes */ + offset = buf_len - sizeof(struct bnxt_coredump_record); + bnxt_fill_coredump_record(bp, buf + offset, start_time, start_utc, + coredump.total_segs + drv_seg_count + 2, rc); + } + + kfree(coredump.data); + *dump_len += sizeof(struct bnxt_coredump_record) + seg_hdr_len + null_seg_len; + if (rc == -ENOBUFS) + netdev_err(bp->dev, "Firmware returned large coredump buffer\n"); + return rc; +} + +static u32 bnxt_copy_crash_data(struct bnxt_ring_mem_info *rmem, void *buf, + u32 dump_len) +{ + u32 data_copied = 0; + u32 data_len; + int i; + + for (i = 0; i < rmem->nr_pages; i++) { + data_len = rmem->page_size; + if (data_copied + data_len > dump_len) + data_len = dump_len - data_copied; + memcpy(buf + data_copied, rmem->pg_arr[i], data_len); + data_copied += data_len; + if (data_copied >= dump_len) + break; + } + return data_copied; +} + +static int bnxt_copy_crash_dump(struct bnxt *bp, void *buf, u32 dump_len) +{ + struct bnxt_ring_mem_info *rmem; + u32 offset = 0; + + if (!bp->fw_crash_mem) + return -EEXIST; + + rmem = &bp->fw_crash_mem->ring_mem; + + if (rmem->depth > 1) { + int i; + + for (i = 0; i < rmem->nr_pages; i++) { + struct bnxt_ctx_pg_info *pg_tbl; + + pg_tbl = bp->fw_crash_mem->ctx_pg_tbl[i]; + offset += bnxt_copy_crash_data(&pg_tbl->ring_mem, + buf + offset, dump_len - offset); + if (offset >= dump_len) + break; + } + } else { + bnxt_copy_crash_data(rmem, buf, dump_len); + } + + return 0; +} + +static bool bnxt_crash_dump_avail(struct bnxt *bp) +{ + u32 sig = 0; + + /* First 4 bytes(signature) of crash dump is always non-zero */ + bnxt_copy_crash_dump(bp, &sig, sizeof(u32)); + if (!sig) + return false; + + return true; +} + +int bnxt_get_coredump(struct bnxt *bp, u16 dump_type, void *buf, u32 *dump_len) +{ + if (dump_type >= BNXT_DUMP_DRIVER) { + bnxt_start_logging_coredump(bp, buf, dump_len, dump_type); + return 0; + } + + if (dump_type == BNXT_DUMP_CRASH) { + if (bp->fw_dbg_cap & BNXT_FW_DBG_CAP_CRASHDUMP_HOST) + return bnxt_copy_crash_dump(bp, buf, *dump_len); +#ifdef CONFIG_TEE_BNXT_FW + else if (bp->fw_dbg_cap & BNXT_FW_DBG_CAP_CRASHDUMP_SOC) + return tee_bnxt_copy_coredump(buf, 0, *dump_len); +#endif + else + return -EOPNOTSUPP; + } else { + return __bnxt_get_coredump(bp, buf, dump_len); + } +} + +static void bnxt_get_bs_trace_size(struct bnxt *bp, u8 *segments, u32 *seg_len) +{ + struct bnxt_ctx_mem_info *ctx = bp->ctx; + struct bnxt_ctx_pg_info *ctx_pg; + struct bnxt_ctx_mem_type *ctxm; + int k, n = 1; + u16 type; + + if (!ctx) + return; + + for (type = BNXT_CTX_SRT_TRACE; type <= BNXT_CTX_ROCE_HWRM_TRACE; type++) { + ctxm = &ctx->ctx_arr[type]; + if (!(ctxm->flags & BNXT_CTX_MEM_TYPE_VALID)) + continue; + + ctx_pg = ctxm->pg_info; + if (ctxm->instance_bmap) + n = hweight32(ctxm->instance_bmap); + for (k = 0; k < n ; k++) + *seg_len += ctx_pg[k].nr_pages * BNXT_PAGE_SIZE; + *segments = *segments + 1; + } +} + +static void bnxt_append_driver_coredump_len(struct bnxt *bp, u32 *len) +{ + u8 segments = 0; + u32 size = 0; + int hdr_len; + + bnxt_get_bs_trace_size(bp, &segments, &size); + if (size) { + hdr_len = segments * sizeof(struct bnxt_driver_segment_record); + *len += size + hdr_len; + } +} + +int bnxt_hwrm_get_dump_len(struct bnxt *bp, u16 dump_type, u32 *dump_len) +{ + struct hwrm_dbg_qcfg_output *resp; + struct hwrm_dbg_qcfg_input *req; + int rc, hdr_len = 0; + + if (dump_type >= BNXT_DUMP_DRIVER) { + hdr_len = 2 * sizeof(struct bnxt_coredump_segment_hdr) + + sizeof(struct hwrm_ver_get_output) + + sizeof(struct bnxt_coredump_record); + *dump_len = bnxt_get_loggers_coredump_size(bp, dump_type); + *dump_len = *dump_len + hdr_len; + return 0; + } + + if (!(bp->fw_cap & BNXT_FW_CAP_DBG_QCAPS)) + return -EOPNOTSUPP; + + if (dump_type == BNXT_DUMP_CRASH && + !(bp->fw_dbg_cap & BNXT_FW_DBG_CAP_CRASHDUMP_SOC || + (bp->fw_dbg_cap & BNXT_FW_DBG_CAP_CRASHDUMP_HOST))) + return -EOPNOTSUPP; + + rc = hwrm_req_init(bp, req, HWRM_DBG_QCFG); + if (rc) + return rc; + + req->fid = cpu_to_le16(0xffff); + if (dump_type == BNXT_DUMP_CRASH) { + if (bp->fw_dbg_cap & BNXT_FW_DBG_CAP_CRASHDUMP_SOC) + req->flags = cpu_to_le16(BNXT_DBG_FL_CR_DUMP_SIZE_SOC); + else + req->flags = cpu_to_le16(BNXT_DBG_FL_CR_DUMP_SIZE_HOST); + } + + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); + if (rc) + goto get_dump_len_exit; + + if (dump_type == BNXT_DUMP_CRASH) { + if (bp->fw_dbg_cap & BNXT_FW_DBG_CAP_CRASHDUMP_SOC) + *dump_len = BNXT_CRASH_DUMP_LEN; + else + *dump_len = le32_to_cpu(resp->crashdump_size); + } else { + /* Driver adds coredump headers for "HWRM_VER_GET response" + * and null segments additionally to coredump. + */ + hdr_len = 2 * sizeof(struct bnxt_coredump_segment_hdr) + + sizeof(struct hwrm_ver_get_output) + + sizeof(struct bnxt_coredump_record); + *dump_len = le32_to_cpu(resp->coredump_size) + hdr_len; + } + if (*dump_len <= hdr_len) + rc = -EINVAL; + +get_dump_len_exit: + hwrm_req_drop(bp, req); + return rc; +} + +u32 bnxt_get_coredump_length(struct bnxt *bp, u16 dump_type) +{ + u32 len = 0; + + if (dump_type == BNXT_DUMP_CRASH && + bp->fw_dbg_cap & BNXT_FW_DBG_CAP_CRASHDUMP_HOST && + bp->fw_crash_mem) { + if (!bnxt_crash_dump_avail(bp)) + return 0; + + return bp->fw_crash_len; + } + + if (bnxt_hwrm_get_dump_len(bp, dump_type, &len)) { + if (dump_type == BNXT_DUMP_LIVE) + __bnxt_get_coredump(bp, NULL, &len); + } + + if (dump_type == BNXT_DUMP_LIVE) + bnxt_append_driver_coredump_len(bp, &len); + return len; +} + diff --git a/drivers/thirdparty/release-drivers/bnxt/bnxt_coredump.h b/drivers/thirdparty/release-drivers/bnxt/bnxt_coredump.h new file mode 100644 index 000000000000..c851fcf3f266 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/bnxt_coredump.h @@ -0,0 +1,159 @@ +/* Broadcom NetXtreme-C/E network driver. + * + * Copyright (c) 2018 Broadcom Limited + * Copyright (c) 2018-2022 Broadcom Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ + +#ifndef BNXT_COREDUMP_H +#define BNXT_COREDUMP_H + +#include +#include +#include + +struct bnxt_coredump_segment_hdr { + __u8 signature[4]; + __le32 component_id; + __le32 segment_id; + __le32 flags; + __u8 low_version; + __u8 high_version; + __le16 function_id; + __le32 offset; + __le32 length; + __le32 status; + __le32 duration; + __le32 data_offset; + __le32 instance; + __le32 rsvd[5]; +}; + +struct bnxt_coredump_record { + __u8 signature[4]; + __le32 flags; + __u8 low_version; + __u8 high_version; + __u8 asic_state; + __u8 rsvd0[5]; + char system_name[32]; + __le16 year; + __le16 month; + __le16 day; + __le16 hour; + __le16 minute; + __le16 second; + __le16 utc_bias; + __le16 rsvd1; + char commandline[256]; + __le32 total_segments; + __le32 os_ver_major; + __le32 os_ver_minor; + __le32 rsvd2; + char os_name[32]; + __le16 end_year; + __le16 end_month; + __le16 end_day; + __le16 end_hour; + __le16 end_minute; + __le16 end_second; + __le16 end_utc_bias; + __le32 asic_id1; + __le32 asic_id2; + __le32 coredump_status; + __u8 ioctl_low_version; + __u8 ioctl_high_version; + __le16 rsvd3[313]; +}; + +struct bnxt_driver_segment_record { + __le32 max_entries; + __le32 entry_size; + __le32 offset; + __u8 wrapped:1; + __u8 unused[3]; +}; + +#define DRV_COREDUMP_COMP_ID 0xD + +#define DRV_SEG_SRT_TRACE 1 +#define DRV_SEG_SRT2_TRACE 2 +#define DRV_SEG_CRT_TRACE 3 +#define DRV_SEG_CRT2_TRACE 4 +#define DRV_SEG_RIGP0_TRACE 5 +#define DRV_SEG_LOG_HWRM_L2_TRACE 6 +#define DRV_SEG_LOG_HWRM_ROCE_TRACE 7 + +#define BNXT_CRASH_DUMP_LEN (8 << 20) + +#define COREDUMP_LIST_BUF_LEN 2048 +#define COREDUMP_RETRIEVE_BUF_LEN 4096 + +struct bnxt_coredump { + void *data; + int data_size; + u16 total_segs; +}; + +struct bnxt_time { + struct tm tm; +}; + +#define BNXT_COREDUMP_BUF_LEN(len) ((len) - sizeof(struct bnxt_coredump_record) - \ + sizeof(struct bnxt_coredump_segment_hdr)) + +struct bnxt_hwrm_dbg_dma_info { + void *dest_buf; + int dest_buf_size; + u16 dma_len; + u16 seq_off; + u16 data_len_off; + u16 segs; + u32 seg_start; + u32 buf_len; +}; + +struct hwrm_dbg_cmn_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 host_dest_addr; + __le32 host_buf_len; +}; + +struct hwrm_dbg_cmn_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 flags; + #define HWRM_DBG_CMN_FLAGS_MORE 1 +}; + +#define BNXT_DBG_FL_CR_DUMP_SIZE_SOC \ + (DBG_QCFG_REQ_FLAGS_CRASHDUMP_SIZE_FOR_DEST_DEST_SOC_DDR) +#define BNXT_DBG_FL_CR_DUMP_SIZE_HOST \ + (DBG_QCFG_REQ_FLAGS_CRASHDUMP_SIZE_FOR_DEST_DEST_HOST_DDR) +#define BNXT_DBG_CR_DUMP_MDM_CFG_DDR \ + (DBG_CRASHDUMP_MEDIUM_CFG_REQ_TYPE_DDR) + +u32 bnxt_get_coredump_length(struct bnxt *bp, u16 dump_type); +int bnxt_hwrm_get_dump_len(struct bnxt *bp, u16 dump_type, u32 *dump_len); +int bnxt_get_coredump(struct bnxt *bp, u16 dump_type, void *buf, u32 *dump_len); +void bnxt_fill_coredump_seg_hdr(struct bnxt *bp, + struct bnxt_coredump_segment_hdr *seg_hdr, + struct coredump_segment_record *seg_rec, + u32 seg_len, int status, u32 duration, + u32 instance, u32 comp_id, u32 seg_id); +struct bnxt_time bnxt_get_current_time(struct bnxt *bp); +void bnxt_fill_empty_seg(struct bnxt *bp, void *buf, u32 len); +void +bnxt_fill_coredump_record(struct bnxt *bp, struct bnxt_coredump_record *record, + struct bnxt_time start, s16 start_utc, u16 total_segs, + int status); +#endif diff --git a/drivers/thirdparty/release-drivers/bnxt/bnxt_dbr.h b/drivers/thirdparty/release-drivers/bnxt/bnxt_dbr.h new file mode 100644 index 000000000000..634a6e6f8df3 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/bnxt_dbr.h @@ -0,0 +1,119 @@ +/* Broadcom NetXtreme-C/E network driver. + * + * Copyright (c) 2014-2016 Broadcom Corporation + * Copyright (c) 2016-2018 Broadcom Limited + * Copyright (c) 2018-2022 Broadcom Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ + +#ifndef BNXT_DBR_H +#define BNXT_DBR_H + +#include + +/* 32-bit XORSHIFT generator. Seed must not be zero. */ +static inline u32 xorshift(u32 *state) +{ + u32 seed = *state; + + seed ^= seed << 13; + seed ^= seed >> 17; + seed ^= seed << 5; + + *state = seed; + return seed; +} + +static inline u16 rnd(u32 *state, uint16_t range) +{ + /* range must be a power of 2 - 1 */ + return (xorshift(state) & range); +} + +#define BNXT_DB_FIFO_ROOM_MASK 0x1fff8000 +#define BNXT_DB_FIFO_ROOM_SHIFT 15 +#define BNXT_MAX_FIFO_DEPTH 0x2c00 + +#define BNXT_DB_PACING_ALGO_THRESHOLD 250 +#define BNXT_DEFAULT_PACING_PROBABILITY 0xFFFF + +#define BNXT_DBR_PACING_WIN_BASE 0x2000 +#define BNXT_DBR_PACING_WIN_MAP_OFF 4 +#define BNXT_DBR_PACING_WIN_OFF(reg) (BNXT_DBR_PACING_WIN_BASE + \ + ((reg) & BNXT_GRC_OFFSET_MASK)) + +struct bnxt_dbr_sw_stats { + u32 nr_dbr; + u64 total_dbr_us; + u64 avg_dbr_us; + u64 max_dbr_us; + u64 min_dbr_us; +}; + +struct bnxt_dbr_debug { + u32 recover_interval_ms; + u32 drop_ratio; + u32 drop_cnt; + u8 recover_enable; + u8 drop_enable; +}; + +struct bnxt_dbr { + u8 enable; + u8 pacing_enable; + atomic_t event_cnt; + + /* dedicated workqueue for DB recovery DRA */ + struct workqueue_struct *wq; + struct delayed_work dwork; + struct mutex lock; /* protect this data struct */ + + u32 curr_epoch; + u32 last_l2_epoch; + u32 last_roce_epoch; + u32 last_completed_epoch; + + u32 stat_db_fifo_reg; + u32 db_fifo_reg_off; + + struct bnxt_dbr_sw_stats sw_stats; + struct bnxt_dbr_debug debug; +}; + +static inline int __get_fifo_occupancy(void __iomem *bar0, u32 db_fifo_reg_off) +{ + u32 val; + + val = readl(bar0 + db_fifo_reg_off); + return BNXT_MAX_FIFO_DEPTH - + ((val & BNXT_DB_FIFO_ROOM_MASK) >> + BNXT_DB_FIFO_ROOM_SHIFT); +} + +/* Caller make sure that the pacing is enabled or not */ +static inline void bnxt_do_pacing(void __iomem *bar0, struct bnxt_dbr *dbr, + u32 *seed, u32 pacing_th, u32 pacing_prob) +{ + u32 pace_time = 1; + u32 retry = 10; + + if (!dbr->pacing_enable) + return; + + if (rnd(seed, 0xFFFF) < pacing_prob) { + while (__get_fifo_occupancy(bar0, dbr->db_fifo_reg_off) > pacing_th && + retry--) { + u32 us_delay; + + us_delay = rnd(seed, pace_time - 1); + if (us_delay) + udelay(us_delay); + /* pacing delay time capped at 128 us */ + pace_time = min_t(u16, pace_time * 2, 128); + } + } +} +#endif diff --git a/drivers/thirdparty/release-drivers/bnxt/bnxt_dcb.c b/drivers/thirdparty/release-drivers/bnxt/bnxt_dcb.c new file mode 100644 index 000000000000..b441a2742dab --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/bnxt_dcb.c @@ -0,0 +1,938 @@ +/* Broadcom NetXtreme-C/E network driver. + * + * Copyright (c) 2014-2016 Broadcom Corporation + * Copyright (c) 2016-2018 Broadcom Limited + * Copyright (c) 2018-2023 Broadcom Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include "bnxt_compat.h" +#include "bnxt_hsi.h" +#include "bnxt.h" +#include "bnxt_hwrm.h" +#include "bnxt_dcb.h" + +#ifdef CONFIG_BNXT_DCB +static int bnxt_tx_queue_to_tc(struct bnxt *bp, u8 queue_id) +{ + int i, j; + + for (i = 0; i < bp->max_tc; i++) { + if (bp->tx_q_info[i].queue_id == queue_id) { + for (j = 0; j < bp->max_tc; j++) { + if (bp->tc_to_qidx[j] == i) + return j; + } + } + } + return -EINVAL; +} + +static int bnxt_hwrm_queue_pri2cos_cfg(struct bnxt *bp, + struct ieee_ets *ets, + u32 path_dir) +{ + struct hwrm_queue_pri2cos_cfg_input *req; + struct bnxt_queue_info *q_info; + u8 *pri2cos; + int rc, i; + + rc = hwrm_req_init(bp, req, HWRM_QUEUE_PRI2COS_CFG); + if (rc) + return rc; + + req->flags = cpu_to_le32(path_dir | QUEUE_PRI2COS_CFG_REQ_FLAGS_IVLAN); + if (path_dir == QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_BIDIR || + path_dir == QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_TX) + q_info = bp->tx_q_info; + else + q_info = bp->rx_q_info; + pri2cos = &req->pri0_cos_queue_id; + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { + u8 qidx; + + req->enables |= cpu_to_le32(QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI0_COS_QUEUE_ID << i); + + qidx = bp->tc_to_qidx[ets->prio_tc[i]]; + pri2cos[i] = q_info[qidx].queue_id; + } + return hwrm_req_send(bp, req); +} + +static int bnxt_hwrm_queue_pri2cos_qcfg(struct bnxt *bp, struct ieee_ets *ets) +{ + struct hwrm_queue_pri2cos_qcfg_output *resp; + struct hwrm_queue_pri2cos_qcfg_input *req; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_QUEUE_PRI2COS_QCFG); + if (rc) + return rc; + + req->flags = cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); + if (!rc) { + u8 *pri2cos = &resp->pri0_cos_queue_id; + int i; + + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { + u8 queue_id = pri2cos[i]; + int tc; + + tc = bnxt_tx_queue_to_tc(bp, queue_id); + if (tc >= 0) + ets->prio_tc[i] = tc; + } + } + hwrm_req_drop(bp, req); + return rc; +} + +/* + * Caller of this function must call hwrm_req_drop() + * if the function returns success with a valid resp. + */ +static struct bnxt_queue_cos2bw_qcfg_output * +bnxt_hwrm_queue_cos2bw_qcfg(struct bnxt *bp, struct hwrm_queue_cos2bw_qcfg_input **out_req) +{ + struct bnxt_queue_cos2bw_qcfg_output *resp; + struct hwrm_queue_cos2bw_qcfg_input *req; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_QUEUE_COS2BW_QCFG); + if (rc) + return ERR_PTR(rc); + + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); + if (rc) { + hwrm_req_drop(bp, req); + return ERR_PTR(rc); + } + + *out_req = req; + return resp; +} + +static __le32 bnxt_get_max_bw_from_queue(struct bnxt *bp, + struct bnxt_queue_cos2bw_qcfg_output *resp, u8 queue_id) +{ + int i; + + if (resp->queue_id0 == queue_id) + return resp->queue_id0_max_bw; + + for (i = 0; i < (IEEE_8021QAZ_MAX_TCS - 1); i++) { + if (resp->cfg[i].queue_id == queue_id) + return resp->cfg[i].queue_id_max_bw; + } + + return 0; +} + +static int bnxt_hwrm_queue_cos2bw_cfg(struct bnxt *bp, struct ieee_ets *ets, + u8 max_tc) +{ + struct bnxt_queue_cos2bw_qcfg_output *cos2bw_qcfg_resp; + struct hwrm_queue_cos2bw_qcfg_input *cos2bw_qcfg_req; + struct bnxt_queue_cos2bw_cfg_input *req; + struct bnxt_cos2bw_cfg cos2bw; + int rc, i; + + rc = hwrm_req_init(bp, req, HWRM_QUEUE_COS2BW_CFG); + if (rc) + return rc; + + cos2bw_qcfg_resp = bnxt_hwrm_queue_cos2bw_qcfg(bp, &cos2bw_qcfg_req); + if (IS_ERR(cos2bw_qcfg_resp)) { + hwrm_req_drop(bp, req); + return PTR_ERR(cos2bw_qcfg_resp); + } + + for (i = 0; i < bp->max_tc; i++) { + u8 qidx = bp->tc_to_qidx[i]; + u8 queue_id; + + req->enables |= + cpu_to_le32(QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID0_VALID << qidx); + + memset(&cos2bw, 0, sizeof(cos2bw)); + queue_id = bp->tx_q_info[qidx].queue_id; + cos2bw.queue_id = queue_id; + if (i >= max_tc) + goto skip_ets; + + if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_STRICT) { + cos2bw.tsa = + QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_SP; + cos2bw.pri_lvl = i; + } else { + cos2bw.tsa = + QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_ETS; + cos2bw.bw_weight = ets->tc_tx_bw[i]; + /* older firmware requires min_bw to be set to the + * same weight value in percent. + */ +#ifdef BNXT_FPGA + if (BNXT_FW_MAJ(bp) < 218 && + !(bp->flags & BNXT_FLAG_CHIP_P7)) { +#else + if (BNXT_FW_MAJ(bp) < 218) { +#endif + cos2bw.min_bw = + cpu_to_le32((ets->tc_tx_bw[i] * 100) | + BW_VALUE_UNIT_PERCENT1_100); + } + } +skip_ets: + cos2bw.max_bw = bnxt_get_max_bw_from_queue(bp, cos2bw_qcfg_resp, queue_id); + if (qidx == 0) { + req->queue_id0 = cos2bw.queue_id; + req->queue_id0_min_bw = cos2bw.min_bw; + req->queue_id0_max_bw = cos2bw.max_bw; + req->queue_id0_tsa_assign = cos2bw.tsa; + req->queue_id0_pri_lvl = cos2bw.pri_lvl; + req->queue_id0_bw_weight = cos2bw.bw_weight; + } else { + memcpy(&req->cfg[qidx - 1], &cos2bw.cfg, sizeof(cos2bw.cfg)); + } + } + hwrm_req_drop(bp, cos2bw_qcfg_req); + return hwrm_req_send(bp, req); +} + +static int bnxt_getets(struct bnxt *bp, struct ieee_ets *ets) +{ + struct bnxt_queue_cos2bw_qcfg_output *resp; + struct hwrm_queue_cos2bw_qcfg_input *req; + struct bnxt_cos2bw_cfg cos2bw; + int i; + + resp = bnxt_hwrm_queue_cos2bw_qcfg(bp, &req); + if (IS_ERR(resp)) + return PTR_ERR(resp); + + for (i = 0; i < bp->max_tc; i++) { + int tc; + + if (i == 0) { + cos2bw.queue_id = resp->queue_id0; + cos2bw.min_bw = resp->queue_id0_min_bw; + cos2bw.max_bw = resp->queue_id0_max_bw; + cos2bw.tsa = resp->queue_id0_tsa_assign; + cos2bw.pri_lvl = resp->queue_id0_pri_lvl; + cos2bw.bw_weight = resp->queue_id0_bw_weight; + } else { + memcpy(&cos2bw.cfg, &resp->cfg[i - 1], sizeof(cos2bw.cfg)); + } + + tc = bnxt_tx_queue_to_tc(bp, cos2bw.queue_id); + if (tc < 0) + continue; + + if (cos2bw.tsa == + QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_SP) { + ets->tc_tsa[tc] = IEEE_8021QAZ_TSA_STRICT; + } else { + ets->tc_tsa[tc] = IEEE_8021QAZ_TSA_ETS; + ets->tc_tx_bw[tc] = cos2bw.bw_weight; + } + } + hwrm_req_drop(bp, req); + return 0; +} + +static int bnxt_queue_remap(struct bnxt *bp, unsigned int lltc_mask) +{ + unsigned long qmap = 0; + int max = bp->max_tc; + int i, j, rc; + + /* Assign lossless TCs first */ + for (i = 0, j = 0; i < max; ) { + if (lltc_mask & (1 << i)) { + if (BNXT_LLQ(bp->rx_q_info[j].queue_profile)) { + bp->tc_to_qidx[i] = j; + __set_bit(j, &qmap); + i++; + } + j++; + continue; + } + i++; + } + + for (i = 0, j = 0; i < max; i++) { + if (lltc_mask & (1 << i)) + continue; + j = find_next_zero_bit(&qmap, max, j); + bp->tc_to_qidx[i] = j; + __set_bit(j, &qmap); + j++; + } + + if (netif_running(bp->dev)) { + bnxt_close_nic(bp, false, false); + rc = bnxt_open_nic(bp, false, false); + if (rc) { + netdev_warn(bp->dev, "failed to open NIC, rc = %d\n", rc); + return rc; + } + } + if (bp->ieee_ets) { + int tc = bp->num_tc; + + if (!tc) + tc = 1; + rc = bnxt_hwrm_queue_cos2bw_cfg(bp, bp->ieee_ets, tc); + if (rc) { + netdev_warn(bp->dev, "failed to config BW, rc = %d\n", rc); + return rc; + } + rc = bnxt_hwrm_queue_pri2cos_cfg(bp, bp->ieee_ets, + QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_BIDIR); + if (rc) { + netdev_warn(bp->dev, "failed to config prio, rc = %d\n", rc); + return rc; + } + } + return 0; +} + +static int bnxt_hwrm_queue_pfc_cfg(struct bnxt *bp, struct ieee_pfc *pfc) +{ + struct hwrm_queue_pfcenable_cfg_input *req; + struct ieee_ets *my_ets = bp->ieee_ets; + unsigned int tc_mask = 0, pri_mask = 0; + u8 i, pri, lltc_count = 0; + bool need_q_remap = false; + int rc; + + if (!my_ets) + return -EINVAL; + + for (i = 0; i < bp->max_tc; i++) { + for (pri = 0; pri < IEEE_8021QAZ_MAX_TCS; pri++) { + if ((pfc->pfc_en & (1 << pri)) && + (my_ets->prio_tc[pri] == i)) { + pri_mask |= 1 << pri; + tc_mask |= 1 << i; + } + } + if (tc_mask & (1 << i)) + lltc_count++; + } + if (lltc_count > bp->max_lltc) + return -EINVAL; + + for (i = 0; i < bp->max_tc; i++) { + if (tc_mask & (1 << i)) { + u8 qidx = bp->tc_to_qidx[i]; + + if (!BNXT_LLQ(bp->rx_q_info[qidx].queue_profile)) { + need_q_remap = true; + break; + } + } + } + + if (need_q_remap) + bnxt_queue_remap(bp, tc_mask); + + rc = hwrm_req_init(bp, req, HWRM_QUEUE_PFCENABLE_CFG); + if (rc) + return rc; + + req->flags = cpu_to_le32(pri_mask); + return hwrm_req_send(bp, req); +} + +static int bnxt_hwrm_queue_pfc_qcfg(struct bnxt *bp, struct ieee_pfc *pfc) +{ + struct hwrm_queue_pfcenable_qcfg_output *resp; + struct hwrm_queue_pfcenable_qcfg_input *req; + u8 pri_mask; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_QUEUE_PFCENABLE_QCFG); + if (rc) + return rc; + + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); + if (rc) { + hwrm_req_drop(bp, req); + return rc; + } + + pri_mask = le32_to_cpu(resp->flags); + pfc->pfc_en = pri_mask; + hwrm_req_drop(bp, req); + return 0; +} + +static int bnxt_hwrm_set_dcbx_app(struct bnxt *bp, struct dcb_app *app, + bool add) +{ + struct hwrm_fw_set_structured_data_input *set; + struct hwrm_fw_get_structured_data_input *get; + struct hwrm_struct_data_dcbx_app *fw_app; + struct hwrm_struct_hdr *data; + dma_addr_t mapping; + size_t data_len; + int rc, n, i; + + if (bp->hwrm_spec_code < 0x10601) + return 0; + + rc = hwrm_req_init(bp, get, HWRM_FW_GET_STRUCTURED_DATA); + if (rc) + return rc; + + hwrm_req_hold(bp, get); + hwrm_req_alloc_flags(bp, get, GFP_KERNEL | __GFP_ZERO); + + n = IEEE_8021QAZ_MAX_TCS; + data_len = sizeof(*data) + sizeof(*fw_app) * n; + data = hwrm_req_dma_slice(bp, get, data_len, &mapping); + if (!data) { + rc = -ENOMEM; + goto set_app_exit; + } + + get->dest_data_addr = cpu_to_le64(mapping); + get->structure_id = cpu_to_le16(STRUCT_HDR_STRUCT_ID_DCBX_APP); + get->subtype = cpu_to_le16(HWRM_STRUCT_DATA_SUBTYPE_HOST_OPERATIONAL); + get->count = 0; + rc = hwrm_req_send(bp, get); + if (rc) + goto set_app_exit; + + fw_app = (struct hwrm_struct_data_dcbx_app *)(data + 1); + + if (data->struct_id != cpu_to_le16(STRUCT_HDR_STRUCT_ID_DCBX_APP)) { + rc = -ENODEV; + goto set_app_exit; + } + + n = data->count; + for (i = 0; i < n; i++, fw_app++) { + if (fw_app->protocol_id == cpu_to_be16(app->protocol) && + fw_app->protocol_selector == app->selector && + fw_app->priority == app->priority) { + if (add) + goto set_app_exit; + else + break; + } + } + if (add) { + /* append */ + n++; + fw_app->protocol_id = cpu_to_be16(app->protocol); + fw_app->protocol_selector = app->selector; + fw_app->priority = app->priority; + fw_app->valid = 1; + } else { + size_t len = 0; + + /* not found, nothing to delete */ + if (n == i) + goto set_app_exit; + + len = (n - 1 - i) * sizeof(*fw_app); + if (len) + memmove(fw_app, fw_app + 1, len); + n--; + memset(fw_app + n, 0, sizeof(*fw_app)); + } + data->count = n; + data->len = cpu_to_le16(sizeof(*fw_app) * n); + data->subtype = cpu_to_le16(HWRM_STRUCT_DATA_SUBTYPE_HOST_OPERATIONAL); + + rc = hwrm_req_init(bp, set, HWRM_FW_SET_STRUCTURED_DATA); + if (rc) + goto set_app_exit; + + set->src_data_addr = cpu_to_le64(mapping); + set->data_len = cpu_to_le16(sizeof(*data) + sizeof(*fw_app) * n); + set->hdr_cnt = 1; + rc = hwrm_req_send(bp, set); + +set_app_exit: + hwrm_req_drop(bp, get); /* dropping get request and associated slice */ + return rc; +} + +static int bnxt_hwrm_queue_dscp_qcaps(struct bnxt *bp) +{ + struct hwrm_queue_dscp_qcaps_output *resp; + struct hwrm_queue_dscp_qcaps_input *req; + int rc; + + bp->max_dscp_value = 0; + if (bp->hwrm_spec_code < 0x10800 || BNXT_VF(bp)) + return 0; + + rc = hwrm_req_init(bp, req, HWRM_QUEUE_DSCP_QCAPS); + if (rc) + return rc; + + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send_silent(bp, req); + if (!rc) { + bp->max_dscp_value = (1 << resp->num_dscp_bits) - 1; + if (bp->max_dscp_value < 0x3f) + bp->max_dscp_value = 0; + } + hwrm_req_drop(bp, req); + return rc; +} + +static int bnxt_hwrm_queue_dscp2pri_cfg(struct bnxt *bp, struct dcb_app *app, + bool add) +{ + struct hwrm_queue_dscp2pri_cfg_input *req; + struct bnxt_dscp2pri_entry *dscp2pri; + dma_addr_t mapping; + int rc; + + if (bp->hwrm_spec_code < 0x10800) + return 0; + + rc = hwrm_req_init(bp, req, HWRM_QUEUE_DSCP2PRI_CFG); + if (rc) + return rc; + + dscp2pri = hwrm_req_dma_slice(bp, req, sizeof(*dscp2pri), &mapping); + if (!dscp2pri) { + hwrm_req_drop(bp, req); + return -ENOMEM; + } + + req->src_data_addr = cpu_to_le64(mapping); + dscp2pri->dscp = app->protocol; + if (add) + dscp2pri->mask = 0x3f; + else + dscp2pri->mask = 0; + dscp2pri->pri = app->priority; + req->entry_cnt = cpu_to_le16(1); + rc = hwrm_req_send(bp, req); + return rc; +} + +static int bnxt_ets_validate(struct bnxt *bp, struct ieee_ets *ets, u8 *tc) +{ + int total_ets_bw = 0; + bool zero = false; + u8 max_tc = 0; + int i; + + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { + if (ets->prio_tc[i] > bp->max_tc) { + netdev_err(bp->dev, "priority to TC mapping exceeds TC count %d\n", + ets->prio_tc[i]); + return -EINVAL; + } + if (ets->prio_tc[i] > max_tc) + max_tc = ets->prio_tc[i]; + + if ((ets->tc_tx_bw[i] || ets->tc_tsa[i]) && i > bp->max_tc) + return -EINVAL; + + switch (ets->tc_tsa[i]) { + case IEEE_8021QAZ_TSA_STRICT: + break; + case IEEE_8021QAZ_TSA_ETS: + total_ets_bw += ets->tc_tx_bw[i]; + zero = zero || !ets->tc_tx_bw[i]; + break; + default: + return -ENOTSUPP; + } + } + if (total_ets_bw > 100) { + netdev_warn(bp->dev, "rejecting ETS config exceeding available bandwidth\n"); + return -EINVAL; + } + if (zero && total_ets_bw == 100) { + netdev_warn(bp->dev, "rejecting ETS config starving a TC\n"); + return -EINVAL; + } + + if (max_tc >= bp->max_tc) + *tc = bp->max_tc; + else + *tc = max_tc + 1; + return 0; +} + +static int bnxt_dcbnl_ieee_getets(struct net_device *dev, struct ieee_ets *ets) +{ + struct bnxt *bp = netdev_priv(dev); + struct ieee_ets *my_ets = bp->ieee_ets; + int rc; + + ets->ets_cap = bp->max_tc; + + if (!my_ets) { + if (bp->dcbx_cap & DCB_CAP_DCBX_HOST) + return 0; + + my_ets = kzalloc(sizeof(*my_ets), GFP_KERNEL); + if (!my_ets) + return -ENOMEM; + rc = bnxt_getets(bp, my_ets); + if (rc) + goto error; + rc = bnxt_hwrm_queue_pri2cos_qcfg(bp, my_ets); + if (rc) + goto error; + + /* cache result */ + bp->ieee_ets = my_ets; + } + + ets->cbs = my_ets->cbs; + memcpy(ets->tc_tx_bw, my_ets->tc_tx_bw, sizeof(ets->tc_tx_bw)); + memcpy(ets->tc_rx_bw, my_ets->tc_rx_bw, sizeof(ets->tc_rx_bw)); + memcpy(ets->tc_tsa, my_ets->tc_tsa, sizeof(ets->tc_tsa)); + memcpy(ets->prio_tc, my_ets->prio_tc, sizeof(ets->prio_tc)); + return 0; +error: + kfree(my_ets); + return rc; +} + +static int bnxt_dcbnl_ieee_setets(struct net_device *dev, struct ieee_ets *ets) +{ + struct bnxt *bp = netdev_priv(dev); + bool alloc = !bp->ieee_ets; + u8 max_tc = 0; + int rc; + + if (!(bp->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) || + !(bp->dcbx_cap & DCB_CAP_DCBX_HOST)) + return -EINVAL; + + rc = bnxt_ets_validate(bp, ets, &max_tc); + if (rc) + return rc; + if (alloc) { + bp->ieee_ets = kmalloc(sizeof(*ets), GFP_KERNEL); + if (!bp->ieee_ets) + return -ENOMEM; + } + rc = bnxt_setup_mq_tc(dev, max_tc); + if (rc) + goto error; + rc = bnxt_hwrm_queue_cos2bw_cfg(bp, ets, max_tc); + if (rc) + goto error; + + if (!bp->is_asym_q) { + rc = bnxt_hwrm_queue_pri2cos_cfg(bp, ets, + QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_BIDIR); + if (rc) + goto error; + } else { + rc = bnxt_hwrm_queue_pri2cos_cfg(bp, ets, + QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH_TX); + if (rc) + goto error; + + rc = bnxt_hwrm_queue_pri2cos_cfg(bp, ets, + QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH_RX); + if (rc) + goto error; + } + + memcpy(bp->ieee_ets, ets, sizeof(*ets)); + return 0; +error: + if (alloc) { + kfree(bp->ieee_ets); + bp->ieee_ets = NULL; + } + return rc; +} + +static int bnxt_dcbnl_ieee_getpfc(struct net_device *dev, struct ieee_pfc *pfc) +{ + struct bnxt *bp = netdev_priv(dev); + __le64 *stats = bp->port_stats.hw_stats; + struct ieee_pfc *my_pfc = bp->ieee_pfc; + long rx_off, tx_off; + int i, rc; + + pfc->pfc_cap = bp->max_lltc; + + if (!my_pfc) { + if (bp->dcbx_cap & DCB_CAP_DCBX_HOST) + return 0; + + my_pfc = kzalloc(sizeof(*my_pfc), GFP_KERNEL); + if (!my_pfc) + return 0; + bp->ieee_pfc = my_pfc; + rc = bnxt_hwrm_queue_pfc_qcfg(bp, my_pfc); + if (rc) + return 0; + } + + pfc->pfc_en = my_pfc->pfc_en; + pfc->mbc = my_pfc->mbc; + pfc->delay = my_pfc->delay; + + if (!stats) + return 0; + + rx_off = BNXT_RX_STATS_OFFSET(rx_pfc_ena_frames_pri0); + tx_off = BNXT_TX_STATS_OFFSET(tx_pfc_ena_frames_pri0); + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++, rx_off++, tx_off++) { + pfc->requests[i] = le64_to_cpu(*(stats + tx_off)); + pfc->indications[i] = le64_to_cpu(*(stats + rx_off)); + } + + return 0; +} + +static int bnxt_dcbnl_ieee_setpfc(struct net_device *dev, struct ieee_pfc *pfc) +{ + struct bnxt *bp = netdev_priv(dev); + struct ieee_pfc *my_pfc = bp->ieee_pfc; + int rc; + + if (!(bp->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) || + !(bp->dcbx_cap & DCB_CAP_DCBX_HOST) || + (bp->phy_flags & BNXT_PHY_FL_NO_PAUSE)) + return -EINVAL; + + if (!my_pfc) { + my_pfc = kzalloc(sizeof(*my_pfc), GFP_KERNEL); + if (!my_pfc) + return -ENOMEM; + bp->ieee_pfc = my_pfc; + } + rc = bnxt_hwrm_queue_pfc_cfg(bp, pfc); + if (!rc) + memcpy(my_pfc, pfc, sizeof(*my_pfc)); + + return rc; +} + +static int bnxt_dcbnl_ieee_dscp_app_prep(struct bnxt *bp, struct dcb_app *app) +{ + if (app->selector == IEEE_8021QAZ_APP_SEL_DSCP) { + if (!bp->max_dscp_value) + return -ENOTSUPP; + if (app->protocol > bp->max_dscp_value) + return -EINVAL; + } + return 0; +} + +static int bnxt_dcbnl_ieee_setapp(struct net_device *dev, struct dcb_app *app) +{ + struct bnxt *bp = netdev_priv(dev); + int rc; + + if (!(bp->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) || + !(bp->dcbx_cap & DCB_CAP_DCBX_HOST)) + return -EINVAL; + + rc = bnxt_dcbnl_ieee_dscp_app_prep(bp, app); + if (rc) + return rc; + + rc = dcb_ieee_setapp(dev, app); + if (rc) + return rc; + + if ((app->selector == IEEE_8021QAZ_APP_SEL_ETHERTYPE && + app->protocol == ETH_P_ROCE) || + (app->selector == IEEE_8021QAZ_APP_SEL_DGRAM && + app->protocol == ROCE_V2_UDP_DPORT)) + rc = bnxt_hwrm_set_dcbx_app(bp, app, true); + + if (app->selector == IEEE_8021QAZ_APP_SEL_DSCP) + rc = bnxt_hwrm_queue_dscp2pri_cfg(bp, app, true); + + return rc; +} + +static int bnxt_dcbnl_ieee_delapp(struct net_device *dev, struct dcb_app *app) +{ + struct bnxt *bp = netdev_priv(dev); + int rc; + + if (!(bp->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) || + !(bp->dcbx_cap & DCB_CAP_DCBX_HOST)) + return -EINVAL; + + rc = bnxt_dcbnl_ieee_dscp_app_prep(bp, app); + if (rc) + return rc; + + rc = dcb_ieee_delapp(dev, app); + if (rc) + return rc; + if ((app->selector == IEEE_8021QAZ_APP_SEL_ETHERTYPE && + app->protocol == ETH_P_ROCE) || + (app->selector == IEEE_8021QAZ_APP_SEL_DGRAM && + app->protocol == ROCE_V2_UDP_DPORT)) + rc = bnxt_hwrm_set_dcbx_app(bp, app, false); + + if (app->selector == IEEE_8021QAZ_APP_SEL_DSCP) + rc = bnxt_hwrm_queue_dscp2pri_cfg(bp, app, false); + + return rc; +} + +static void __bnxt_del_roce_app(struct bnxt *bp, struct dcb_app *app) +{ + u32 prio_mask = dcb_ieee_getapp_mask(bp->dev, app); + + if (!prio_mask) + return; + + app->priority = ilog2(prio_mask); + dcb_ieee_delapp(bp->dev, app); +} + +static void bnxt_del_roce_apps(struct bnxt *bp) +{ + struct dcb_app app; + + if (!(bp->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) || + !(bp->dcbx_cap & DCB_CAP_DCBX_HOST)) + return; + + app.selector = IEEE_8021QAZ_APP_SEL_ETHERTYPE; + app.protocol = ETH_P_ROCE; + __bnxt_del_roce_app(bp, &app); + + app.selector = IEEE_8021QAZ_APP_SEL_DGRAM; + app.protocol = ROCE_V2_UDP_DPORT; + __bnxt_del_roce_app(bp, &app); +} + +static void bnxt_del_dscp_apps(struct bnxt *bp) +{ +#ifdef HAVE_DSCP_MASK_MAP + struct dcb_ieee_app_prio_map dscp_map; + struct dcb_app app; + int i, j; + + if (!(bp->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) || + !(bp->dcbx_cap & DCB_CAP_DCBX_HOST)) + return; + + app.selector = IEEE_8021QAZ_APP_SEL_DSCP; + dcb_ieee_getapp_prio_dscp_mask_map(bp->dev, &dscp_map); + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { + for (j = 0; j < 64; j++) { + if (dscp_map.map[i] & (1ULL << j)) { + app.protocol = j; + app.priority = i; + dcb_ieee_delapp(bp->dev, &app); + } + } + } +#endif +} + +static u8 bnxt_dcbnl_getdcbx(struct net_device *dev) +{ + struct bnxt *bp = netdev_priv(dev); + + return bp->dcbx_cap; +} + +static u8 bnxt_dcbnl_setdcbx(struct net_device *dev, u8 mode) +{ + struct bnxt *bp = netdev_priv(dev); + + /* All firmware DCBX settings are set in NVRAM */ + if (bp->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) + return 1; + + if (mode & DCB_CAP_DCBX_HOST) { + if (BNXT_VF(bp) || (bp->fw_cap & BNXT_FW_CAP_LLDP_AGENT)) + return 1; + + /* only support IEEE */ + if ((mode & DCB_CAP_DCBX_VER_CEE) || + !(mode & DCB_CAP_DCBX_VER_IEEE)) + return 1; + } + + if (mode == bp->dcbx_cap) + return 0; + + bp->dcbx_cap = mode; + return 0; +} + +static const struct dcbnl_rtnl_ops dcbnl_ops = { + .ieee_getets = bnxt_dcbnl_ieee_getets, + .ieee_setets = bnxt_dcbnl_ieee_setets, + .ieee_getpfc = bnxt_dcbnl_ieee_getpfc, + .ieee_setpfc = bnxt_dcbnl_ieee_setpfc, + .ieee_setapp = bnxt_dcbnl_ieee_setapp, + .ieee_delapp = bnxt_dcbnl_ieee_delapp, + .getdcbx = bnxt_dcbnl_getdcbx, + .setdcbx = bnxt_dcbnl_setdcbx, +}; + +void bnxt_dcb_init(struct bnxt *bp) +{ + bp->dcbx_cap = 0; + if (bp->hwrm_spec_code < 0x10501) + return; + + bnxt_hwrm_queue_dscp_qcaps(bp); + bp->dcbx_cap = DCB_CAP_DCBX_VER_IEEE; + if (BNXT_PF(bp) && !(bp->fw_cap & BNXT_FW_CAP_LLDP_AGENT)) + bp->dcbx_cap |= DCB_CAP_DCBX_HOST; + else if (bp->fw_cap & BNXT_FW_CAP_DCBX_AGENT) + bp->dcbx_cap |= DCB_CAP_DCBX_LLD_MANAGED; + bp->dev->dcbnl_ops = &dcbnl_ops; +} + +void bnxt_dcb_free(struct bnxt *bp, bool reset) +{ + kfree(bp->ieee_pfc); + kfree(bp->ieee_ets); + bp->ieee_pfc = NULL; + bp->ieee_ets = NULL; + if (reset) { + bnxt_del_roce_apps(bp); + bnxt_del_dscp_apps(bp); + } +} + +#else + +void bnxt_dcb_init(struct bnxt *bp) +{ +} + +void bnxt_dcb_free(struct bnxt *bp, bool reset) +{ +} + +#endif diff --git a/drivers/thirdparty/release-drivers/bnxt/bnxt_dcb.h b/drivers/thirdparty/release-drivers/bnxt/bnxt_dcb.h new file mode 100644 index 000000000000..9574ef8bc48d --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/bnxt_dcb.h @@ -0,0 +1,186 @@ +/* Broadcom NetXtreme-C/E network driver. + * + * Copyright (c) 2014-2016 Broadcom Corporation + * Copyright (c) 2016-2018 Broadcom Limited + * Copyright (c) 2018-2022 Broadcom Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ + +#ifndef BNXT_DCB_H +#define BNXT_DCB_H + +#include + +struct bnxt_dcb { + u8 max_tc; + struct ieee_pfc *ieee_pfc; + struct ieee_ets *ieee_ets; + u8 dcbx_cap; + u8 default_pri; +}; + +struct bnxt_cos2bw_cfg { + u8 pad[3]; + struct_group_attr(cfg, __packed, + u8 queue_id; + __le32 min_bw; + __le32 max_bw; + u8 tsa; + u8 pri_lvl; + u8 bw_weight; + ); +/* for min_bw / max_bw */ +#define BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + u8 unused; +}; + +struct bnxt_dscp2pri_entry { + u8 dscp; + u8 mask; + u8 pri; +}; + +#define BNXT_LLQ(q_profile) \ + ((q_profile) == QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS_ROCE || \ + (q_profile) == QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS_NIC) +#define BNXT_CNPQ(q_profile) \ + ((q_profile) == QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSY_ROCE_CNP) + +#define HWRM_STRUCT_DATA_SUBTYPE_HOST_OPERATIONAL 0x0300 + +/* bnxt_queue_cos2bw_qcfg_output (size:896b/112B) + * This structure is identical in memory layout to + * struct hwrm_queue_cos2bw_qcfg_output in bnxt_hsi.h. + * Using the structure prevents fortify memcpy warnings. + */ +struct bnxt_queue_cos2bw_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 queue_id0; + u8 unused_0; + __le16 unused_1; + __le32 queue_id0_min_bw; + __le32 queue_id0_max_bw; + u8 queue_id0_tsa_assign; + u8 queue_id0_pri_lvl; + u8 queue_id0_bw_weight; + struct { + u8 queue_id; + __le32 queue_id_min_bw; + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_SCALE_BYTES + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_INVALID + __le32 queue_id_max_bw; + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_SCALE_BYTES + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_INVALID + u8 queue_id_tsa_assign; + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_TSA_ASSIGN_SP 0x0UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_TSA_ASSIGN_ETS 0x1UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_TSA_ASSIGN_RESERVED_FIRST 0x2UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_TSA_ASSIGN_RESERVED_LAST 0xffUL + u8 queue_id_pri_lvl; + u8 queue_id_bw_weight; + } __packed cfg[7]; + u8 unused_2[4]; + u8 valid; +}; + +/* bnxt_queue_cos2bw_cfg_input (size:1024b/128B) + * This structure is identical in memory layout to + * struct hwrm_queue_cos2bw_cfg_input in bnxt_hsi.h. + * Using the structure prevents fortify memcpy warnings. + */ +struct bnxt_queue_cos2bw_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + __le32 enables; + __le16 port_id; + u8 queue_id0; + u8 unused_0; + __le32 queue_id0_min_bw; + __le32 queue_id0_max_bw; + u8 queue_id0_tsa_assign; + u8 queue_id0_pri_lvl; + u8 queue_id0_bw_weight; + struct { + u8 queue_id; + __le32 queue_id_min_bw; + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_SCALE_BYTES + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_INVALID + __le32 queue_id_max_bw; + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_SCALE_BYTES + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_INVALID + u8 queue_id_tsa_assign; + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_TSA_ASSIGN_SP 0x0UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_TSA_ASSIGN_ETS 0x1UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_TSA_ASSIGN_RESERVED_FIRST 0x2UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_TSA_ASSIGN_RESERVED_LAST 0xffUL + u8 queue_id_pri_lvl; + u8 queue_id_bw_weight; + } __packed cfg[7]; + u8 unused_1[5]; +}; + +void bnxt_dcb_init(struct bnxt *bp); +void bnxt_dcb_free(struct bnxt *bp, bool reset); +#endif diff --git a/drivers/thirdparty/release-drivers/bnxt/bnxt_debugfs.c b/drivers/thirdparty/release-drivers/bnxt/bnxt_debugfs.c new file mode 100644 index 000000000000..51e56ceaedbd --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/bnxt_debugfs.c @@ -0,0 +1,603 @@ +/* Broadcom NetXtreme-C/E network driver. + * + * Copyright (c) 2017-2018 Broadcom Limited + * Copyright (c) 2018-2023 Broadcom Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ + +#include +#include +#include +#include "bnxt_hsi.h" +#include "bnxt_compat.h" +#ifdef HAVE_DIM +#include +#else +#include "bnxt_dim.h" +#endif +#include "bnxt.h" +#include "bnxt_debugfs.h" +#include "bnxt_hdbr.h" +#include "bnxt_udcc.h" +#include "cfa_types.h" +#include "bnxt_vfr.h" + +#ifdef CONFIG_DEBUG_FS + +static struct dentry *bnxt_debug_mnt; +static struct dentry *bnxt_debug_tf; + +#if defined(CONFIG_BNXT_FLOWER_OFFLOAD) + +static ssize_t debugfs_session_query_read(struct file *filep, char __user *buffer, + size_t count, loff_t *ppos) +{ + struct bnxt_udcc_session_entry *entry = filep->private_data; + struct hwrm_udcc_session_query_output resp; + int len = 0, size = 4096; + char *buf; + int rc; + + rc = bnxt_hwrm_udcc_session_query(entry->bp, entry->session_id, &resp); + if (rc) + return rc; + + buf = kzalloc(size, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + len = scnprintf(buf, size, "min_rtt_ns = %u\n", + le32_to_cpu(resp.min_rtt_ns)); + len += scnprintf(buf + len, size - len, "max_rtt_ns = %u\n", + le32_to_cpu(resp.max_rtt_ns)); + len += scnprintf(buf + len, size - len, "cur_rate_mbps = %u\n", + le32_to_cpu(resp.cur_rate_mbps)); + len += scnprintf(buf + len, size - len, "tx_event_count = %u\n", + le32_to_cpu(resp.tx_event_count)); + len += scnprintf(buf + len, size - len, "cnp_rx_event_count = %u\n", + le32_to_cpu(resp.cnp_rx_event_count)); + len += scnprintf(buf + len, size - len, "rtt_req_count = %u\n", + le32_to_cpu(resp.rtt_req_count)); + len += scnprintf(buf + len, size - len, "rtt_resp_count = %u\n", + le32_to_cpu(resp.rtt_resp_count)); + len += scnprintf(buf + len, size - len, "tx_bytes_sent = %u\n", + le32_to_cpu(resp.tx_bytes_count)); + len += scnprintf(buf + len, size - len, "tx_pkts_sent = %u\n", + le32_to_cpu(resp.tx_packets_count)); + len += scnprintf(buf + len, size - len, "init_probes_sent = %u\n", + le32_to_cpu(resp.init_probes_sent)); + len += scnprintf(buf + len, size - len, "term_probes_recv = %u\n", + le32_to_cpu(resp.term_probes_recv)); + len += scnprintf(buf + len, size - len, "cnp_packets_recv = %u\n", + le32_to_cpu(resp.cnp_packets_recv)); + len += scnprintf(buf + len, size - len, "rto_event_recv = %u\n", + le32_to_cpu(resp.rto_event_recv)); + len += scnprintf(buf + len, size - len, "seq_err_nak_recv = %u\n", + le32_to_cpu(resp.seq_err_nak_recv)); + len += scnprintf(buf + len, size - len, "qp_count = %u\n", + le32_to_cpu(resp.qp_count)); + + if (count < strlen(buf)) { + kfree(buf); + return -ENOSPC; + } + + len = simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf)); + kfree(buf); + return len; +} + +static const struct file_operations session_query_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = debugfs_session_query_read, +}; + +void bnxt_debugfs_create_udcc_session(struct bnxt *bp, u32 session_id) +{ + struct bnxt_udcc_info *udcc = bp->udcc_info; + struct bnxt_udcc_session_entry *entry; + static char sname[16]; + + entry = udcc->session_db[session_id]; + + if (entry->debugfs_dir || !bp->debugfs_pdev) + return; + + snprintf(sname, 10, "%d", session_id); + + entry->debugfs_dir = debugfs_create_dir(sname, bp->udcc_info->udcc_debugfs_dir); + entry->bp = bp; + + debugfs_create_file("session_query", 0644, entry->debugfs_dir, entry, &session_query_fops); +} + +void bnxt_debugfs_delete_udcc_session(struct bnxt *bp, u32 session_id) +{ + struct bnxt_udcc_info *udcc = bp->udcc_info; + struct bnxt_udcc_session_entry *entry; + + entry = udcc->session_db[session_id]; + if (!entry->debugfs_dir || !bp->debugfs_pdev) + return; + + debugfs_remove_recursive(entry->debugfs_dir); + entry->debugfs_dir = NULL; +} +#endif + +static ssize_t debugfs_dim_read(struct file *filep, + char __user *buffer, + size_t count, loff_t *ppos) +{ + struct dim *dim = filep->private_data; + int len; + char *buf; + + if (*ppos) + return 0; + if (!dim) + return -ENODEV; + buf = kasprintf(GFP_KERNEL, + "state = %d\n" \ + "profile_ix = %d\n" \ + "mode = %d\n" \ + "tune_state = %d\n" \ + "steps_right = %d\n" \ + "steps_left = %d\n" \ + "tired = %d\n", + dim->state, + dim->profile_ix, + dim->mode, + dim->tune_state, + dim->steps_right, + dim->steps_left, + dim->tired); + if (!buf) + return -ENOMEM; + if (count < strlen(buf)) { + kfree(buf); + return -ENOSPC; + } + len = simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf)); + kfree(buf); + return len; +} + +static const struct file_operations debugfs_dim_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = debugfs_dim_read, +}; + +static void debugfs_dim_ring_init(struct dim *dim, int ring_idx, + struct dentry *dd) +{ + static char qname[16]; + + snprintf(qname, 10, "%d", ring_idx); + debugfs_create_file(qname, 0600, dd, dim, &debugfs_dim_fops); +} + +static int dbr_enable_get(void *data, u64 *val) +{ + struct bnxt *bp = data; + + *val = bp->dbr.enable; + return 0; +} + +static int dbr_enable_set(void *data, u64 val) +{ + struct bnxt *bp = data; + struct bnxt_dbr *dbr; + int rc; + + dbr = &bp->dbr; + + if (val) { + dbr->enable = 1; + rc = bnxt_dbr_init(bp); + if (rc) { + netdev_err(bp->dev, + "Failed to initialize DB recovery\n"); + dbr->enable = 0; + return rc; + } + } else { + dbr->enable = 0; + bnxt_dbr_exit(bp); + } + + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(dbr_enable_fops, dbr_enable_get, dbr_enable_set, + "%llu\n"); + +static ssize_t dbr_stats_read(struct file *filep, char __user *buffer, +size_t count, loff_t *ppos) +{ + struct bnxt_dbr_sw_stats *stat = filep->private_data; + char *buf; + int len; + + if (*ppos) + return 0; + + buf = kasprintf(GFP_KERNEL, "nr_dbr = %u\n" "avg_dbr_us = %llu\n" \ + "max_dbr_us = %llu\n" "min_dbr_us = %llu\n", + stat->nr_dbr, stat->avg_dbr_us, stat->max_dbr_us, + stat->min_dbr_us); + if (!buf) + return -ENOMEM; + + if (count < strlen(buf)) { + kfree(buf); + return -ENOSPC; + } + + len = simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf)); + kfree(buf); + + return len; +} + +static const struct file_operations dbr_stats_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = dbr_stats_read, +}; + +static int dbr_test_recover_enable_get(void *data, u64 *val) +{ + struct bnxt *bp = data; + + *val = bp->dbr.debug.recover_enable; + return 0; +} + +static int dbr_test_recover_enable_set(void *data, u64 val) +{ + struct bnxt_dbr_debug *debug; + struct bnxt *bp = data; + struct bnxt_dbr *dbr; + + dbr = &bp->dbr; + debug = &dbr->debug; + + if (!dbr->enable && val) { + netdev_err(bp->dev, + "Unable to run DB recovery test when DBR is disabled\n"); + return -EINVAL; + } + + if (val) { + debug->recover_enable = 1; + if (dbr->wq) + /* kick start the recovery work */ + if (queue_delayed_work(dbr->wq, &dbr->dwork, + msecs_to_jiffies(debug->recover_interval_ms))) + atomic_inc(&dbr->event_cnt); + } else { + debug->recover_enable = 0; + } + + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(dbr_test_recover_enable_fops, + dbr_test_recover_enable_get, + dbr_test_recover_enable_set, + "%llu\n"); + +static ssize_t hdbr_debug_trace_read(struct file *filep, char __user *buffer, + size_t count, loff_t *ppos) +{ + struct bnxt *bp = filep->private_data; + int len = 2; + char buf[2]; + + if (*ppos) + return 0; + if (!bp) + return -ENODEV; + if (count < len) + return -ENOSPC; + + if (bp->hdbr_info.debug_trace) + buf[0] = '1'; + else + buf[0] = '0'; + buf[1] = '\n'; + + return simple_read_from_buffer(buffer, count, ppos, buf, len); +} + +static ssize_t hdbr_debug_trace_write(struct file *file, const char __user *u, + size_t size, loff_t *off) +{ + struct bnxt *bp = file->private_data; + char u_in[2]; + size_t n; + + if (!bp) + return -ENODEV; + if (*off || !size || size > 2) + return -EFAULT; + + n = simple_write_to_buffer(u_in, size, off, u, 2); + if (n != size) + return -EFAULT; + + if (u_in[0] == '0') + bp->hdbr_info.debug_trace = 0; + else + bp->hdbr_info.debug_trace = 1; + + return size; +} + +static const struct file_operations hdbr_debug_trace_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = hdbr_debug_trace_read, + .write = hdbr_debug_trace_write, +}; + +static ssize_t debugfs_hdbr_kdmp_read(struct file *filep, char __user *buffer, + size_t count, loff_t *ppos) +{ + struct bnxt_hdbr_ktbl *ktbl = *((void **)filep->private_data); + size_t len; + char *buf; + + if (*ppos) + return 0; + if (!ktbl) + return -ENODEV; + + buf = bnxt_hdbr_ktbl_dump(ktbl); + if (!buf) + return -ENOMEM; + len = strlen(buf); + if (count < len) { + kfree(buf); + return -ENOSPC; + } + len = simple_read_from_buffer(buffer, count, ppos, buf, len); + kfree(buf); + return len; +} + +static const struct file_operations hdbr_kdmp_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = debugfs_hdbr_kdmp_read, +}; + +static ssize_t debugfs_hdbr_l2dmp_read(struct file *filep, char __user *buffer, + size_t count, loff_t *ppos) +{ + struct bnxt_hdbr_l2_pgs *l2pgs = *((void **)filep->private_data); + size_t len; + char *buf; + + if (*ppos) + return 0; + if (!l2pgs) + return -ENODEV; + + buf = bnxt_hdbr_l2pg_dump(l2pgs); + if (!buf) + return -ENOMEM; + len = strlen(buf); + if (count < len) { + kfree(buf); + return -ENOSPC; + } + len = simple_read_from_buffer(buffer, count, ppos, buf, len); + kfree(buf); + return len; +} + +static const struct file_operations hdbr_l2dmp_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = debugfs_hdbr_l2dmp_read, +}; + +static void bnxt_debugfs_hdbr_init(struct bnxt *bp) +{ + const char *pname = pci_name(bp->pdev); + struct dentry *pdevf, *phdbr, *pktbl, *pl2pgs; + int i; + char *names[4] = {"sq", "rq", "srq", "cq"}; + + if (!bp->hdbr_info.hdbr_enabled) + return; + + /* Create top dir */ + phdbr = debugfs_create_dir("hdbr", bp->debugfs_pdev); + if (!phdbr) { + pr_err("Failed to create debugfs entry %s/hdbr\n", pname); + return; + } + + /* Create debug_trace knob */ + pdevf = debugfs_create_file("debug_trace", 0644, phdbr, bp, &hdbr_debug_trace_fops); + if (!pdevf) { + pr_err("Failed to create debugfs entry %s/hdbr/debug_trace\n", pname); + return; + } + + /* Create ktbl dir */ + pktbl = debugfs_create_dir("ktbl", phdbr); + if (!pktbl) { + pr_err("Failed to create debugfs entry %s/hdbr/ktbl\n", pname); + return; + } + + /* Create l2pgs dir */ + pl2pgs = debugfs_create_dir("l2pgs", phdbr); + if (!pl2pgs) { + pr_err("Failed to create debugfs entry %s/hdbr/l2pgs\n", pname); + return; + } + + /* Create hdbr kernel page and L2 page dumping knobs */ + for (i = 0; i < DBC_GROUP_MAX; i++) { + pdevf = debugfs_create_file(names[i], 0644, pktbl, &bp->hdbr_info.ktbl[i], + &hdbr_kdmp_fops); + if (!pdevf) { + pr_err("Failed to create debugfs entry %s/hdbr/ktbl/%s\n", + pname, names[i]); + return; + } + if (i == DBC_GROUP_RQ) + continue; + pdevf = debugfs_create_file(names[i], 0644, pl2pgs, &bp->hdbr_pgs[i], + &hdbr_l2dmp_fops); + if (!pdevf) { + pr_err("Failed to create debugfs entry %s/hdbr/l2pgs/%s\n", + pname, names[i]); + return; + } + } +} + +#define BNXT_DEBUGFS_TRUFLOW "truflow" + +int bnxt_debug_tf_create(struct bnxt *bp, u8 tsid) +{ + char name[32]; + struct dentry *port_dir; + + bnxt_debug_tf = debugfs_lookup(BNXT_DEBUGFS_TRUFLOW, bnxt_debug_mnt); + + if (!bnxt_debug_tf) + return -ENODEV; + + /* If not there create the port # directory */ + sprintf(name, "%d", bp->pf.port_id); + port_dir = debugfs_lookup(name, bnxt_debug_tf); + + if (!port_dir) { + port_dir = debugfs_create_dir(name, bnxt_debug_tf); + if (!port_dir) { + pr_debug("Failed to create TF debugfs port %d directory.\n", + bp->pf.port_id); + return -ENODEV; + } + } + /* Call TF function to create the table scope debugfs seq files */ + bnxt_tf_debugfs_create_files(bp, tsid, port_dir); + + return 0; +} + +void bnxt_debug_tf_delete(struct bnxt *bp) +{ + char name[32]; + struct dentry *port_dir; + + if (!bnxt_debug_tf) + return; + + sprintf(name, "%d", bp->pf.port_id); + port_dir = debugfs_lookup(name, bnxt_debug_tf); + if (port_dir) + debugfs_remove_recursive(port_dir); +} + +void bnxt_debug_dev_init(struct bnxt *bp) +{ + const char *pname = pci_name(bp->pdev); + struct bnxt_dbr_debug *debug; + struct bnxt_dbr *dbr; + struct dentry *dir; + int i; + + bp->debugfs_pdev = debugfs_create_dir(pname, bnxt_debug_mnt); + dir = debugfs_create_dir("dim", bp->debugfs_pdev); + + /* Create files for each rx ring */ + for (i = 0; i < bp->cp_nr_rings; i++) { + struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring; + + if (cpr && bp->bnapi[i]->rx_ring) + debugfs_dim_ring_init(&cpr->dim, i, dir); + } + +#define DBR_TEST_RECOVER_INTERVAL_MS 1000 +#define DBR_TEST_DROP_RATIO 10 + dbr = &bp->dbr; + debug = &bp->dbr.debug; + + debug->recover_interval_ms = DBR_TEST_RECOVER_INTERVAL_MS; + debug->drop_ratio = DBR_TEST_DROP_RATIO; + + dir = debugfs_create_dir("dbr", bp->debugfs_pdev); + debugfs_create_file("dbr_enable", 0644, dir, bp, &dbr_enable_fops); + debugfs_create_file("dbr_stats", 0444, dir, &dbr->sw_stats, + &dbr_stats_fops); +#ifdef DBR_DBG_DROP_ENABLE + debugfs_create_u8("dbr_test_drop_enable", 0644, dir, + &debug->drop_enable); + debugfs_create_u32("dbr_test_drop_ratio", 0644, dir, + &debug->drop_ratio); +#endif + debugfs_create_file("dbr_test_recover_enable", 0644, dir, bp, + &dbr_test_recover_enable_fops); + debugfs_create_u32("dbr_test_recover_interval_ms", 0644, dir, + &debug->recover_interval_ms); + + bnxt_debugfs_hdbr_init(bp); + +#if defined(CONFIG_BNXT_FLOWER_OFFLOAD) + if (bp->udcc_info) + bp->udcc_info->udcc_debugfs_dir = debugfs_create_dir("udcc", bp->debugfs_pdev); +#endif +} + +void bnxt_debug_dev_exit(struct bnxt *bp) +{ + struct bnxt_dbr_debug *debug = &bp->dbr.debug; + + if (!bp) + return; + + memset(debug, 0, sizeof(*debug)); + + debugfs_remove_recursive(bp->debugfs_pdev); + bp->debugfs_pdev = NULL; +} + +void bnxt_debug_init(void) +{ + bnxt_debug_mnt = debugfs_create_dir("bnxt_en", NULL); + if (!bnxt_debug_mnt) { + pr_err("failed to init bnxt_en debugfs\n"); + return; + } + + bnxt_debug_tf = debugfs_create_dir(BNXT_DEBUGFS_TRUFLOW, + bnxt_debug_mnt); + + if (!bnxt_debug_tf) + pr_err("Failed to create TF debugfs backingstore directory.\n"); +} + +void bnxt_debug_exit(void) +{ + /* Remove subdirectories. Older kernels have bug in remove for 2 level + * directories. + */ + debugfs_remove_recursive(bnxt_debug_tf); + debugfs_remove_recursive(bnxt_debug_mnt); +} + +#endif /* CONFIG_DEBUG_FS */ diff --git a/drivers/thirdparty/release-drivers/bnxt/bnxt_debugfs.h b/drivers/thirdparty/release-drivers/bnxt/bnxt_debugfs.h new file mode 100644 index 000000000000..6bfaa5172f67 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/bnxt_debugfs.h @@ -0,0 +1,31 @@ +/* Broadcom NetXtreme-C/E network driver. + * + * Copyright (c) 2017-2018 Broadcom Limited + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ + +#include "bnxt_hsi.h" +#include "bnxt.h" + +#ifdef CONFIG_DEBUG_FS +void bnxt_debug_init(void); +void bnxt_debug_exit(void); +void bnxt_debug_dev_init(struct bnxt *bp); +void bnxt_debug_dev_exit(struct bnxt *bp); +void bnxt_debugfs_create_udcc_session(struct bnxt *bp, u32 session_id); +void bnxt_debugfs_delete_udcc_session(struct bnxt *bp, u32 session_id); +int bnxt_debug_tf_create(struct bnxt *bp, u8 tsid); +void bnxt_debug_tf_delete(struct bnxt *bp); +#else +static inline void bnxt_debug_init(void) {} +static inline void bnxt_debug_exit(void) {} +static inline void bnxt_debug_dev_init(struct bnxt *bp) {} +static inline void bnxt_debug_dev_exit(struct bnxt *bp) {} +static inline void bnxt_debugfs_create_udcc_session(struct bnxt *bp, u32 session_id) {} +static inline void bnxt_debugfs_delete_udcc_session(struct bnxt *bp, u32 session_id) {} +static inline int bnxt_debug_tf_create(struct bnxt *bp, u8 tsid) { return 0; } +static inline void bnxt_debug_tf_delete(struct bnxt *bp) {} +#endif diff --git a/drivers/thirdparty/release-drivers/bnxt/bnxt_debugfs_cpt.c b/drivers/thirdparty/release-drivers/bnxt/bnxt_debugfs_cpt.c new file mode 100644 index 000000000000..a92c4b0ae30a --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/bnxt_debugfs_cpt.c @@ -0,0 +1,476 @@ +/* Broadcom NetXtreme-C/E network driver. + * + * Copyright (c) 2017-2018 Broadcom Limited + * Copyright (c) 2018-2023 Broadcom Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ + +#include +#include +#include +#include "bnxt_hsi.h" +#include "bnxt_compat.h" +#ifdef HAVE_DIM +#include +#else +#include "bnxt_dim.h" +#endif +#include "bnxt.h" +#include "bnxt_hdbr.h" +#include "bnxt_udcc.h" +#include "cfa_types.h" +#include "bnxt_vfr.h" + +#ifdef CONFIG_DEBUG_FS + +static struct dentry *bnxt_debug_mnt; +static struct dentry *bnxt_debug_tf; + +#if defined(CONFIG_BNXT_FLOWER_OFFLOAD) + +static ssize_t debugfs_session_query_read(struct file *filep, char __user *buffer, + size_t count, loff_t *ppos) +{ + struct bnxt_udcc_session_entry *entry = filep->private_data; + struct hwrm_udcc_session_query_output resp; + int len = 0, size = 4096; + char *buf; + int rc; + + rc = bnxt_hwrm_udcc_session_query(entry->bp, entry->session_id, &resp); + if (rc) + return rc; + + buf = kzalloc(size, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + len = scnprintf(buf, size, "min_rtt_ns = %u\n", + le32_to_cpu(resp.min_rtt_ns)); + len += scnprintf(buf + len, size - len, "max_rtt_ns = %u\n", + le32_to_cpu(resp.max_rtt_ns)); + len += scnprintf(buf + len, size - len, "cur_rate_mbps = %u\n", + le32_to_cpu(resp.cur_rate_mbps)); + len += scnprintf(buf + len, size - len, "tx_event_count = %u\n", + le32_to_cpu(resp.tx_event_count)); + len += scnprintf(buf + len, size - len, "cnp_rx_event_count = %u\n", + le32_to_cpu(resp.cnp_rx_event_count)); + len += scnprintf(buf + len, size - len, "rtt_req_count = %u\n", + le32_to_cpu(resp.rtt_req_count)); + len += scnprintf(buf + len, size - len, "rtt_resp_count = %u\n", + le32_to_cpu(resp.rtt_resp_count)); + len += scnprintf(buf + len, size - len, "tx_bytes_sent = %u\n", + le32_to_cpu(resp.tx_bytes_count)); + len += scnprintf(buf + len, size - len, "tx_pkts_sent = %u\n", + le32_to_cpu(resp.tx_packets_count)); + len += scnprintf(buf + len, size - len, "init_probes_sent = %u\n", + le32_to_cpu(resp.init_probes_sent)); + len += scnprintf(buf + len, size - len, "term_probes_recv = %u\n", + le32_to_cpu(resp.term_probes_recv)); + len += scnprintf(buf + len, size - len, "cnp_packets_recv = %u\n", + le32_to_cpu(resp.cnp_packets_recv)); + len += scnprintf(buf + len, size - len, "rto_event_recv = %u\n", + le32_to_cpu(resp.rto_event_recv)); + len += scnprintf(buf + len, size - len, "seq_err_nak_recv = %u\n", + le32_to_cpu(resp.seq_err_nak_recv)); + len += scnprintf(buf + len, size - len, "qp_count = %u\n", + le32_to_cpu(resp.qp_count)); + + if (count < strlen(buf)) { + kfree(buf); + return -ENOSPC; + } + + len = simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf)); + kfree(buf); + return len; +} + +static const struct file_operations session_query_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = debugfs_session_query_read, +}; + +void bnxt_debugfs_create_udcc_session(struct bnxt *bp, u32 session_id) +{ + struct bnxt_udcc_info *udcc = bp->udcc_info; + struct bnxt_udcc_session_entry *entry; + static char sname[16]; + + entry = udcc->session_db[session_id]; + if (entry->debugfs_dir || !bp->debugfs_pdev) + return; + + snprintf(sname, 10, "%d", session_id); + entry->debugfs_dir = debugfs_create_dir(sname, bp->udcc_info->udcc_debugfs_dir); + entry->bp = bp; + + debugfs_create_file("session_query", 0644, entry->debugfs_dir, entry, &session_query_fops); +} + +void bnxt_debugfs_delete_udcc_session(struct bnxt *bp, u32 session_id) +{ + struct bnxt_udcc_info *udcc = bp->udcc_info; + struct bnxt_udcc_session_entry *entry; + + entry = udcc->session_db[session_id]; + if (!entry->debugfs_dir || !bp->debugfs_pdev) + return; + + debugfs_remove_recursive(entry->debugfs_dir); + entry->debugfs_dir = NULL; +} +#endif + +static ssize_t debugfs_dim_read(struct file *filep, + char __user *buffer, + size_t count, loff_t *ppos) +{ + struct dim *dim = filep->private_data; + int len; + char *buf; + + if (*ppos) + return 0; + if (!dim) + return -ENODEV; + buf = kasprintf(GFP_KERNEL, + "state = %d\n" \ + "profile_ix = %d\n" \ + "mode = %d\n" \ + "tune_state = %d\n" \ + "steps_right = %d\n" \ + "steps_left = %d\n" \ + "tired = %d\n", + dim->state, + dim->profile_ix, + dim->mode, + dim->tune_state, + dim->steps_right, + dim->steps_left, + dim->tired); + if (!buf) + return -ENOMEM; + if (count < strlen(buf)) { + kfree(buf); + return -ENOSPC; + } + len = simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf)); + kfree(buf); + return len; +} + +static const struct file_operations debugfs_dim_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = debugfs_dim_read, +}; + +static struct dentry *debugfs_dim_ring_init(struct dim *dim, int ring_idx, + struct dentry *dd) +{ + static char qname[16]; + + snprintf(qname, 10, "%d", ring_idx); + return debugfs_create_file(qname, 0600, dd, + dim, &debugfs_dim_fops); +} + +static ssize_t debugfs_dt_read(struct file *filep, char __user *buffer, + size_t count, loff_t *ppos) +{ + struct bnxt *bp = filep->private_data; + int len = 2; + char buf[2]; + + if (*ppos) + return 0; + if (!bp) + return -ENODEV; + if (count < len) + return -ENOSPC; + + if (bp->hdbr_info.debug_trace) + buf[0] = '1'; + else + buf[0] = '0'; + buf[1] = '\n'; + + return simple_read_from_buffer(buffer, count, ppos, buf, len); +} + +static ssize_t debugfs_dt_write(struct file *file, const char __user *u, + size_t size, loff_t *off) +{ + struct bnxt *bp = file->private_data; + char u_in[2]; + size_t n; + + if (!bp) + return -ENODEV; + if (*off || !size || size > 2) + return -EFAULT; + + n = simple_write_to_buffer(u_in, size, off, u, 2); + if (n != size) + return -EFAULT; + + if (u_in[0] == '0') + bp->hdbr_info.debug_trace = 0; + else + bp->hdbr_info.debug_trace = 1; + + return size; +} + +static const struct file_operations debug_trace_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = debugfs_dt_read, + .write = debugfs_dt_write, +}; + +static ssize_t debugfs_hdbr_kdmp_read(struct file *filep, char __user *buffer, + size_t count, loff_t *ppos) +{ + struct bnxt_hdbr_ktbl *ktbl = *((void **)filep->private_data); + size_t len; + char *buf; + + if (*ppos) + return 0; + if (!ktbl) + return -ENODEV; + + buf = bnxt_hdbr_ktbl_dump(ktbl); + if (!buf) + return -ENOMEM; + len = strlen(buf); + if (count < len) { + kfree(buf); + return -ENOSPC; + } + len = simple_read_from_buffer(buffer, count, ppos, buf, len); + kfree(buf); + return len; +} + +static const struct file_operations debugfs_hdbr_kdmp_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = debugfs_hdbr_kdmp_read, +}; + +static ssize_t debugfs_hdbr_l2dmp_read(struct file *filep, char __user *buffer, + size_t count, loff_t *ppos) +{ + struct bnxt_hdbr_l2_pgs *l2pgs = *((void **)filep->private_data); + size_t len; + char *buf; + + if (*ppos) + return 0; + if (!l2pgs) + return -ENODEV; + + buf = bnxt_hdbr_l2pg_dump(l2pgs); + if (!buf) + return -ENOMEM; + len = strlen(buf); + if (count < len) { + kfree(buf); + return -ENOSPC; + } + len = simple_read_from_buffer(buffer, count, ppos, buf, len); + kfree(buf); + return len; +} + +static const struct file_operations debugfs_hdbr_l2dmp_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = debugfs_hdbr_l2dmp_read, +}; + +static void bnxt_debugfs_hdbr_init(struct bnxt *bp) +{ + struct dentry *pdevf, *phdbr, *pktbl, *pl2pgs; + char *names[4] = {"sq", "rq", "srq", "cq"}; + const char *pname = pci_name(bp->pdev); + int i; + + if (!bp->hdbr_info.hdbr_enabled) + return; + + /* Create top dir */ + phdbr = debugfs_create_dir("hdbr", bp->debugfs_pdev); + if (!phdbr) { + pr_err("Failed to create debugfs entry %s/hdbr\n", pname); + return; + } + + /* Create debug_trace knob */ + pdevf = debugfs_create_file("debug_trace", 0600, phdbr, bp, &debug_trace_fops); + if (!pdevf) { + pr_err("Failed to create debugfs entry %s/hdbr/debug_trace\n", pname); + return; + } + + /* Create ktbl dir */ + pktbl = debugfs_create_dir("ktbl", phdbr); + if (!pktbl) { + pr_err("Failed to create debugfs entry %s/hdbr/ktbl\n", pname); + return; + } + + /* Create l2pgs dir */ + pl2pgs = debugfs_create_dir("l2pgs", phdbr); + if (!pl2pgs) { + pr_err("Failed to create debugfs entry %s/hdbr/l2pgs\n", pname); + return; + } + + /* Create hdbr kernel page and L2 page dumping knobs */ + for (i = 0; i < DBC_GROUP_MAX; i++) { + pdevf = debugfs_create_file(names[i], 0600, pktbl, + &bp->hdbr_info.ktbl[i], + &debugfs_hdbr_kdmp_fops); + if (!pdevf) { + pr_err("Failed to create debugfs entry %s/hdbr/ktbl/%s\n", + pname, names[i]); + return; + } + if (i == DBC_GROUP_RQ) + continue; + pdevf = debugfs_create_file(names[i], 0600, pl2pgs, + &bp->hdbr_pgs[i], + &debugfs_hdbr_l2dmp_fops); + if (!pdevf) { + pr_err("Failed to create debugfs entry %s/hdbr/l2pgs/%s\n", + pname, names[i]); + return; + } + } +} + +#define BNXT_DEBUGFS_TRUFLOW "truflow" + +int bnxt_debug_tf_create(struct bnxt *bp, u8 tsid) +{ + char name[32]; + struct dentry *port_dir; + + bnxt_debug_tf = debugfs_lookup(BNXT_DEBUGFS_TRUFLOW, bnxt_debug_mnt); + + if (!bnxt_debug_tf) + return -ENODEV; + + /* If not there create the port # directory */ + sprintf(name, "%d", bp->pf.port_id); + port_dir = debugfs_lookup(name, bnxt_debug_tf); + + if (!port_dir) { + port_dir = debugfs_create_dir(name, bnxt_debug_tf); + if (!port_dir) { + pr_debug("Failed to create TF debugfs port %d directory.\n", + bp->pf.port_id); + return -ENODEV; + } + } + + /* Call TF function to create the table scope debugfs seq files */ + bnxt_tf_debugfs_create_files(bp, tsid, port_dir); + + return 0; +} + +void bnxt_debug_tf_delete(struct bnxt *bp) +{ + char name[32]; + struct dentry *port_dir; + + if (!bnxt_debug_tf) + return; + + sprintf(name, "%d", bp->pf.port_id); + port_dir = debugfs_lookup(name, bnxt_debug_tf); + if (port_dir) + debugfs_remove_recursive(port_dir); +} + +void bnxt_debug_dev_init(struct bnxt *bp) +{ + const char *pname = pci_name(bp->pdev); + struct dentry *pdevf; + int i; + + bp->debugfs_pdev = debugfs_create_dir(pname, bnxt_debug_mnt); + if (bp->debugfs_pdev) { + pdevf = debugfs_create_dir("dim", bp->debugfs_pdev); + if (!pdevf) { + pr_err("failed to create debugfs entry %s/dim\n", pname); + return; + } + bp->debugfs_dim = pdevf; + /* create files for each rx ring */ + for (i = 0; i < bp->cp_nr_rings; i++) { + struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring; + + if (cpr && bp->bnapi[i]->rx_ring) { + pdevf = debugfs_dim_ring_init(&cpr->dim, i, + bp->debugfs_dim); + if (!pdevf) + pr_err("failed to create debugfs entry %s/dim/%d\n", + pname, i); + } + } + + bnxt_debugfs_hdbr_init(bp); +#if defined(CONFIG_BNXT_FLOWER_OFFLOAD) + if (bp->udcc_info) + bp->udcc_info->udcc_debugfs_dir = + debugfs_create_dir("udcc", bp->debugfs_pdev); +#endif + } else { + pr_err("failed to create debugfs entry %s\n", pname); + } +} + +void bnxt_debug_dev_exit(struct bnxt *bp) +{ + if (!bp) + return; + + debugfs_remove_recursive(bp->debugfs_pdev); + bp->debugfs_pdev = NULL; +} + +void bnxt_debug_init(void) +{ + bnxt_debug_mnt = debugfs_create_dir("bnxt_en", NULL); + if (!bnxt_debug_mnt) { + pr_err("failed to init bnxt_en debugfs\n"); + return; + } + + bnxt_debug_tf = debugfs_create_dir(BNXT_DEBUGFS_TRUFLOW, + bnxt_debug_mnt); + + if (!bnxt_debug_tf) + pr_err("Failed to create TF debugfs backingstore directory.\n"); +} + +void bnxt_debug_exit(void) +{ + /* Remove subdirectories. Older kernels have bug in remove for 2 level + * directories. + */ + debugfs_remove_recursive(bnxt_debug_tf); + debugfs_remove_recursive(bnxt_debug_mnt); +} + +#endif /* CONFIG_DEBUG_FS */ diff --git a/drivers/thirdparty/release-drivers/bnxt/bnxt_devlink.c b/drivers/thirdparty/release-drivers/bnxt/bnxt_devlink.c new file mode 100644 index 000000000000..bf62697068ad --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/bnxt_devlink.c @@ -0,0 +1,1476 @@ +/* Broadcom NetXtreme-C/E network driver. + * + * Copyright (c) 2017-2018 Broadcom Limited + * Copyright (c) 2018-2023 Broadcom Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ + +#include +#include +#include +#if defined(CONFIG_VF_REPS) || defined(HAVE_DEVLINK_PARAM) +#include +#endif +#include "bnxt_compat.h" +#include "bnxt_hsi.h" +#include "bnxt.h" +#include "bnxt_hwrm.h" +#include "bnxt_vfr.h" +#include "bnxt_devlink.h" +#include "bnxt_ethtool.h" +#include "bnxt_ulp.h" +#include "bnxt_ptp.h" +#include "bnxt_coredump.h" +#include "bnxt_nvm_defs.h" +#include "bnxt_ethtool.h" +#include "bnxt_devlink_compat.h" + +static void __bnxt_fw_recover(struct bnxt *bp) +{ + if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) || + test_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state)) + bnxt_fw_reset(bp); + else + bnxt_fw_exception(bp); +} + +int bnxt_hwrm_nvm_get_var(struct bnxt *bp, dma_addr_t data_dma_addr, + u16 offset, u16 dim, u16 index, u16 num_bits) +{ + struct hwrm_nvm_get_variable_input *req; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_NVM_GET_VARIABLE); + if (rc) + return rc; + + req->dest_data_addr = cpu_to_le64(data_dma_addr); + req->option_num = cpu_to_le16(offset); + req->data_len = cpu_to_le16(num_bits); + req->dimensions = cpu_to_le16(dim); + req->index_0 = cpu_to_le16(index); + return hwrm_req_send_silent(bp, req); +} + +#if defined(CONFIG_VF_REPS) || defined(HAVE_DEVLINK_PARAM) +#ifdef HAVE_DEVLINK_INFO +static void bnxt_copy_from_nvm_data(union devlink_param_value *dst, + union bnxt_nvm_data *src, + int nvm_num_bits, int dl_num_bytes); + +static int bnxt_get_nvm_cfg_ver(struct bnxt *bp, u32 *nvm_cfg_ver) +{ + u16 bytes = BNXT_NVM_CFG_VER_BYTES; + u16 bits = BNXT_NVM_CFG_VER_BITS; + union devlink_param_value ver; + union bnxt_nvm_data *data; + dma_addr_t data_dma_addr; + int rc, i = 2; + u16 dim = 1; + + data = dma_zalloc_coherent(&bp->pdev->dev, sizeof(*data), + &data_dma_addr, GFP_KERNEL); + if (!data) + return -ENOMEM; + + /* earlier devices present as an array of raw bytes */ + if (!BNXT_CHIP_P5_PLUS(bp)) { + dim = 0; + i = 0; + bits *= 3; /* array of 3 version components */ + bytes *= 4; /* copy whole word */ + } + + while (i >= 0) { + rc = bnxt_hwrm_nvm_get_var(bp, data_dma_addr, + NVM_OFF_NVM_CFG_VER, dim, i--, bits); + if (!rc) + bnxt_copy_from_nvm_data(&ver, data, bits, bytes); + + if (BNXT_CHIP_P5_PLUS(bp)) { + *nvm_cfg_ver <<= 8; + *nvm_cfg_ver |= ver.vu8; + } else { + *nvm_cfg_ver = ver.vu32; + } + } + + dma_free_coherent(&bp->pdev->dev, sizeof(*data), data, data_dma_addr); + return rc; +} + +static int bnxt_dl_info_put(struct bnxt *bp, struct devlink_info_req *req, + enum bnxt_dl_version_type type, const char *key, + char *buf) +{ + if (!strlen(buf)) + return 0; + + if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && + (!strcmp(key, DEVLINK_INFO_VERSION_GENERIC_FW_NCSI) || + !strcmp(key, DEVLINK_INFO_VERSION_GENERIC_FW_ROCE))) + return 0; + + switch (type) { + case BNXT_VERSION_FIXED: + return devlink_info_version_fixed_put(req, key, buf); + case BNXT_VERSION_RUNNING: + return devlink_info_version_running_put(req, key, buf); + case BNXT_VERSION_STORED: + return devlink_info_version_stored_put(req, key, buf); + } + return 0; +} + +#define BNXT_FW_SRT_PATCH "fw.srt.patch" +#define BNXT_FW_CRT_PATCH "fw.crt.patch" + +static int bnxt_dl_livepatch_info_put(struct bnxt *bp, + struct devlink_info_req *req, + const char *key) +{ + struct hwrm_fw_livepatch_query_input *query; + struct hwrm_fw_livepatch_query_output *resp; + u16 flags; + int rc; + + if (~bp->fw_cap & BNXT_FW_CAP_LIVEPATCH) + return 0; + + rc = hwrm_req_init(bp, query, HWRM_FW_LIVEPATCH_QUERY); + if (rc) + return rc; + + if (!strcmp(key, BNXT_FW_SRT_PATCH)) + query->fw_target = FW_LIVEPATCH_QUERY_REQ_FW_TARGET_SECURE_FW; + else if (!strcmp(key, BNXT_FW_CRT_PATCH)) + query->fw_target = FW_LIVEPATCH_QUERY_REQ_FW_TARGET_COMMON_FW; + else + goto exit; + + resp = hwrm_req_hold(bp, query); + rc = hwrm_req_send(bp, query); + if (rc) + goto exit; + + flags = le16_to_cpu(resp->status_flags); + if (flags & FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_ACTIVE) { + resp->active_ver[sizeof(resp->active_ver) - 1] = '\0'; + rc = devlink_info_version_running_put(req, key, resp->active_ver); + if (rc) + goto exit; + } + + if (flags & FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_INSTALL) { + resp->install_ver[sizeof(resp->install_ver) - 1] = '\0'; + rc = devlink_info_version_stored_put(req, key, resp->install_ver); + if (rc) + goto exit; + } + +exit: + hwrm_req_drop(bp, query); + return rc; +} + +#define HWRM_FW_VER_STR_LEN 16 + +static int bnxt_dl_info_get(struct devlink *dl, struct devlink_info_req *req, + struct netlink_ext_ack *extack) +{ + struct hwrm_nvm_get_dev_info_output nvm_dev_info; + struct bnxt *bp = bnxt_get_bp_from_dl(dl); + struct hwrm_ver_get_output *ver_resp; + char mgmt_ver[FW_VER_STR_LEN]; + char roce_ver[FW_VER_STR_LEN]; + char ncsi_ver[FW_VER_STR_LEN]; + char buf[32]; + u32 ver = 0; + int rc; + +#ifdef HAVE_DEVLINK_INFO_DRIVER_NAME + rc = devlink_info_driver_name_put(req, DRV_MODULE_NAME); + if (rc) + return rc; +#endif + + if (BNXT_PF(bp) && (bp->flags & BNXT_FLAG_DSN_VALID)) { + sprintf(buf, "%02X-%02X-%02X-%02X-%02X-%02X-%02X-%02X", + bp->dsn[7], bp->dsn[6], bp->dsn[5], bp->dsn[4], + bp->dsn[3], bp->dsn[2], bp->dsn[1], bp->dsn[0]); + rc = devlink_info_serial_number_put(req, buf); + if (rc) + return rc; + } + + if (strlen(bp->board_serialno)) { + rc = devlink_info_board_serial_number_put(req, bp->board_serialno); + if (rc) + return rc; + } + + rc = bnxt_dl_info_put(bp, req, BNXT_VERSION_FIXED, + DEVLINK_INFO_VERSION_GENERIC_BOARD_ID, + bp->board_partno); + if (rc) + return rc; + + sprintf(buf, "0x%x", bp->chip_num); + rc = bnxt_dl_info_put(bp, req, BNXT_VERSION_FIXED, + DEVLINK_INFO_VERSION_GENERIC_ASIC_ID, buf); + if (rc) + return rc; + + ver_resp = &bp->ver_resp; + sprintf(buf, "%c%d", 'A' + ver_resp->chip_rev, ver_resp->chip_metal); + rc = bnxt_dl_info_put(bp, req, BNXT_VERSION_FIXED, + DEVLINK_INFO_VERSION_GENERIC_ASIC_REV, buf); + if (rc) + return rc; + + rc = bnxt_dl_info_put(bp, req, BNXT_VERSION_RUNNING, + DEVLINK_INFO_VERSION_GENERIC_FW_PSID, + bp->nvm_cfg_ver); + if (rc) + return rc; + + buf[0] = 0; + strncat(buf, ver_resp->active_pkg_name, HWRM_FW_VER_STR_LEN); + rc = bnxt_dl_info_put(bp, req, BNXT_VERSION_RUNNING, + DEVLINK_INFO_VERSION_GENERIC_FW, buf); + if (rc) + return rc; + + if (BNXT_PF(bp) && !bnxt_get_nvm_cfg_ver(bp, &ver)) { + sprintf(buf, "%d.%d.%d", (ver >> 16) & 0xff, (ver >> 8) & 0xff, + ver & 0xff); + rc = bnxt_dl_info_put(bp, req, BNXT_VERSION_STORED, + DEVLINK_INFO_VERSION_GENERIC_FW_PSID, + buf); + if (rc) + return rc; + } + + if (ver_resp->flags & VER_GET_RESP_FLAGS_EXT_VER_AVAIL) { + snprintf(mgmt_ver, FW_VER_STR_LEN, "%d.%d.%d.%d", + ver_resp->hwrm_fw_major, ver_resp->hwrm_fw_minor, + ver_resp->hwrm_fw_build, ver_resp->hwrm_fw_patch); + + snprintf(ncsi_ver, FW_VER_STR_LEN, "%d.%d.%d.%d", + ver_resp->mgmt_fw_major, ver_resp->mgmt_fw_minor, + ver_resp->mgmt_fw_build, ver_resp->mgmt_fw_patch); + + snprintf(roce_ver, FW_VER_STR_LEN, "%d.%d.%d.%d", + ver_resp->roce_fw_major, ver_resp->roce_fw_minor, + ver_resp->roce_fw_build, ver_resp->roce_fw_patch); + } else { + snprintf(mgmt_ver, FW_VER_STR_LEN, "%d.%d.%d.%d", + ver_resp->hwrm_fw_maj_8b, ver_resp->hwrm_fw_min_8b, + ver_resp->hwrm_fw_bld_8b, ver_resp->hwrm_fw_rsvd_8b); + + snprintf(ncsi_ver, FW_VER_STR_LEN, "%d.%d.%d.%d", + ver_resp->mgmt_fw_maj_8b, ver_resp->mgmt_fw_min_8b, + ver_resp->mgmt_fw_bld_8b, ver_resp->mgmt_fw_rsvd_8b); + + snprintf(roce_ver, FW_VER_STR_LEN, "%d.%d.%d.%d", + ver_resp->roce_fw_maj_8b, ver_resp->roce_fw_min_8b, + ver_resp->roce_fw_bld_8b, ver_resp->roce_fw_rsvd_8b); + } + rc = bnxt_dl_info_put(bp, req, BNXT_VERSION_RUNNING, + DEVLINK_INFO_VERSION_GENERIC_FW_MGMT, mgmt_ver); + if (rc) + return rc; + + rc = bnxt_dl_info_put(bp, req, BNXT_VERSION_RUNNING, + DEVLINK_INFO_VERSION_GENERIC_FW_MGMT_API, + bp->hwrm_ver_supp); + if (rc) + return rc; + + rc = bnxt_dl_info_put(bp, req, BNXT_VERSION_RUNNING, + DEVLINK_INFO_VERSION_GENERIC_FW_NCSI, ncsi_ver); + if (rc) + return rc; + + rc = bnxt_dl_info_put(bp, req, BNXT_VERSION_RUNNING, + DEVLINK_INFO_VERSION_GENERIC_FW_ROCE, roce_ver); + if (rc) + return rc; + + rc = bnxt_hwrm_nvm_get_dev_info(bp, &nvm_dev_info); + if (rc || + !(nvm_dev_info.flags & NVM_GET_DEV_INFO_RESP_FLAGS_FW_VER_VALID)) { + if (!bnxt_get_pkginfo(bp->dev, buf, sizeof(buf))) + return bnxt_dl_info_put(bp, req, BNXT_VERSION_STORED, + DEVLINK_INFO_VERSION_GENERIC_FW, + buf); + return 0; + } + + buf[0] = 0; + strncat(buf, nvm_dev_info.pkg_name, HWRM_FW_VER_STR_LEN); + rc = bnxt_dl_info_put(bp, req, BNXT_VERSION_STORED, + DEVLINK_INFO_VERSION_GENERIC_FW, buf); + if (rc) + return rc; + + snprintf(mgmt_ver, FW_VER_STR_LEN, "%d.%d.%d.%d", + nvm_dev_info.hwrm_fw_major, nvm_dev_info.hwrm_fw_minor, + nvm_dev_info.hwrm_fw_build, nvm_dev_info.hwrm_fw_patch); + rc = bnxt_dl_info_put(bp, req, BNXT_VERSION_STORED, + DEVLINK_INFO_VERSION_GENERIC_FW_MGMT, mgmt_ver); + if (rc) + return rc; + + snprintf(ncsi_ver, FW_VER_STR_LEN, "%d.%d.%d.%d", + nvm_dev_info.mgmt_fw_major, nvm_dev_info.mgmt_fw_minor, + nvm_dev_info.mgmt_fw_build, nvm_dev_info.mgmt_fw_patch); + rc = bnxt_dl_info_put(bp, req, BNXT_VERSION_STORED, + DEVLINK_INFO_VERSION_GENERIC_FW_NCSI, ncsi_ver); + if (rc) + return rc; + + snprintf(roce_ver, FW_VER_STR_LEN, "%d.%d.%d.%d", + nvm_dev_info.roce_fw_major, nvm_dev_info.roce_fw_minor, + nvm_dev_info.roce_fw_build, nvm_dev_info.roce_fw_patch); + rc = bnxt_dl_info_put(bp, req, BNXT_VERSION_STORED, + DEVLINK_INFO_VERSION_GENERIC_FW_ROCE, roce_ver); + if (rc) + return rc; + + if (BNXT_CHIP_P5_PLUS(bp)) { + rc = bnxt_dl_livepatch_info_put(bp, req, BNXT_FW_SRT_PATCH); + if (rc) + return rc; + } + return bnxt_dl_livepatch_info_put(bp, req, BNXT_FW_CRT_PATCH); + +} +#endif /* HAVE_DEVLINK_INFO */ + +#ifdef HAVE_DEVLINK_FLASH_UPDATE +static int +#ifdef HAVE_DEVLINK_FLASH_PARAMS +bnxt_dl_flash_update(struct devlink *dl, + struct devlink_flash_update_params *params, + struct netlink_ext_ack *extack) +#else +bnxt_dl_flash_update(struct devlink *dl, const char *filename, + const char *region, struct netlink_ext_ack *extack) +#endif +{ + struct bnxt *bp = bnxt_get_bp_from_dl(dl); + int rc; + +#ifndef HAVE_DEVLINK_FLASH_PARAMS + if (region) + return -EOPNOTSUPP; +#endif + + if (!BNXT_PF(bp)) { + NL_SET_ERR_MSG_MOD(extack, + "flash update not supported from a VF"); + return -EPERM; + } + + devlink_flash_update_begin_notify(dl); + devlink_flash_update_status_notify(dl, "Preparing to flash", NULL, 0, 0); +#ifdef HAVE_DEVLINK_FLASH_PARAMS +#ifdef HAVE_DEVLINK_FLASH_PARAMS_NEW + rc = bnxt_flash_package_from_fw_obj(bp->dev, params->fw, 0, extack); +#else + rc = bnxt_flash_package_from_file(bp->dev, params->file_name, 0, extack); +#endif /* HAVE_DEVLINK_FLASH_PARAMS_NEW */ +#else + rc = bnxt_flash_package_from_file(bp->dev, filename, 0, extack); +#endif /* HAVE_DEVLINK_FLASH_PARAMS */ + if (!rc) + devlink_flash_update_status_notify(dl, "Flashing done", NULL, 0, 0); + else + devlink_flash_update_status_notify(dl, "Flashing failed", NULL, 0, 0); + devlink_flash_update_end_notify(dl); + return rc; +} +#endif /* HAVE_DEVLINK_FLASH_UPDATE */ + +#if defined(HAVE_REMOTE_DEV_RESET) || defined(HAVE_DEVLINK_HEALTH_REPORT) +static int bnxt_hwrm_remote_dev_reset_set(struct bnxt *bp, bool remote_reset) +{ + struct hwrm_func_cfg_input *req; + int rc; + + if (~bp->fw_cap & BNXT_FW_CAP_HOT_RESET_IF) + return -EOPNOTSUPP; + + rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req); + if (rc) + return rc; + + req->fid = cpu_to_le16(0xffff); + req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_HOT_RESET_IF_SUPPORT); + if (remote_reset) + req->flags = cpu_to_le32(FUNC_CFG_REQ_FLAGS_HOT_RESET_IF_EN_DIS); + + return hwrm_req_send(bp, req); +} +#endif + +#ifdef HAVE_DEVLINK_HEALTH_REPORT +char *bnxt_health_severity_str(enum bnxt_health_severity severity) +{ + switch (severity) { + case SEVERITY_NORMAL: return "normal"; + case SEVERITY_WARNING: return "warning"; + case SEVERITY_RECOVERABLE: return "recoverable"; + case SEVERITY_FATAL: return "fatal"; + default: return "unknown"; + } +} + +char *bnxt_health_remedy_str(enum bnxt_health_remedy remedy) +{ + switch (remedy) { + case REMEDY_DEVLINK_RECOVER: return "devlink recover"; + case REMEDY_POWER_CYCLE_DEVICE: return "device power cycle"; + case REMEDY_POWER_CYCLE_HOST: return "host power cycle"; + case REMEDY_FW_UPDATE: return "update firmware"; + case REMEDY_HW_REPLACE: return "replace hardware"; + default: return "unknown"; + } +} + +static int bnxt_fw_diagnose(struct devlink_health_reporter *reporter, + struct devlink_fmsg *fmsg, + struct netlink_ext_ack *extack) +{ + struct bnxt *bp = devlink_health_reporter_priv(reporter); + struct bnxt_fw_health *h = bp->fw_health; + u32 fw_status, fw_resets; + +#ifndef HAVE_DEVLINK_FMSG_STRING_PAIR_PUT_VOID + return bnxt_fw_diagnose_compat(reporter, fmsg); +#endif + + if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) { + devlink_fmsg_string_pair_put(fmsg, "Status", "recovering"); + return 0; + } + + if (!h->status_reliable) { + devlink_fmsg_string_pair_put(fmsg, "Status", "unknown"); + return 0; + } + + mutex_lock(&h->lock); + fw_status = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG); + if (BNXT_FW_IS_BOOTING(fw_status)) { + devlink_fmsg_string_pair_put(fmsg, "Status", "initializing"); + } else if (h->severity || fw_status != BNXT_FW_STATUS_HEALTHY) { + if (!h->severity) { + h->severity = SEVERITY_FATAL; + h->remedy = REMEDY_POWER_CYCLE_DEVICE; + h->diagnoses++; + devlink_health_report(h->fw_reporter, + "FW error diagnosed", h); + } + devlink_fmsg_string_pair_put(fmsg, "Status", "error"); + devlink_fmsg_u32_pair_put(fmsg, "Syndrome", fw_status); + } else { + devlink_fmsg_string_pair_put(fmsg, "Status", "healthy"); + } + + devlink_fmsg_string_pair_put(fmsg, "Severity", + bnxt_health_severity_str(h->severity)); + + if (h->severity) { + devlink_fmsg_string_pair_put(fmsg, "Remedy", + bnxt_health_remedy_str(h->remedy)); + if (h->remedy == REMEDY_DEVLINK_RECOVER) { + devlink_fmsg_string_pair_put(fmsg, "Impact", + "traffic+ntuple_cfg"); + } + } + + mutex_unlock(&h->lock); + if (!h->resets_reliable) + return 0; + + fw_resets = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG); + devlink_fmsg_u32_pair_put(fmsg, "Resets", fw_resets); + devlink_fmsg_u32_pair_put(fmsg, "Arrests", h->arrests); + devlink_fmsg_u32_pair_put(fmsg, "Survivals", h->survivals); + devlink_fmsg_u32_pair_put(fmsg, "Discoveries", h->discoveries); + devlink_fmsg_u32_pair_put(fmsg, "Fatalities", h->fatalities); + devlink_fmsg_u32_pair_put(fmsg, "Diagnoses", h->diagnoses); + + return 0; +} + +static int bnxt_fw_dump(struct devlink_health_reporter *reporter, + struct devlink_fmsg *fmsg, void *priv_ctx, + struct netlink_ext_ack *extack) +{ + struct bnxt *bp = devlink_health_reporter_priv(reporter); + u32 dump_len; + void *data; + int rc; + + /* TODO: no firmware dump support in devlink_health_report() context */ + if (priv_ctx) + return -EOPNOTSUPP; + + dump_len = bnxt_get_coredump_length(bp, BNXT_DUMP_LIVE); + if (!dump_len) + return -EIO; + + data = vmalloc(dump_len); + if (!data) + return -ENOMEM; + + rc = bnxt_get_coredump(bp, BNXT_DUMP_LIVE, data, &dump_len); + if (!rc) { +#ifndef HAVE_DEVLINK_FMSG_STRING_PAIR_PUT_VOID + rc = devlink_fmsg_pair_nest_start(fmsg, "core"); + if (rc) + goto exit; + rc = devlink_fmsg_binary_pair_put(fmsg, "data", data, dump_len); + if (rc) + goto exit; + rc = devlink_fmsg_u32_pair_put(fmsg, "size", dump_len); + if (rc) + goto exit; + rc = devlink_fmsg_pair_nest_end(fmsg); +#else + devlink_fmsg_pair_nest_start(fmsg, "core"); + devlink_fmsg_binary_pair_put(fmsg, "data", data, dump_len); + devlink_fmsg_u32_pair_put(fmsg, "size", dump_len); + devlink_fmsg_pair_nest_end(fmsg); + goto exit; +#endif + } +exit: + vfree(data); + return rc; +} + +static int bnxt_fw_recover(struct devlink_health_reporter *reporter, + void *priv_ctx, + struct netlink_ext_ack *extack) +{ + struct bnxt *bp = devlink_health_reporter_priv(reporter); + + if (bp->fw_health->severity == SEVERITY_FATAL) + return -ENODEV; + + set_bit(BNXT_STATE_RECOVER, &bp->state); + __bnxt_fw_recover(bp); + +#ifdef HAVE_DEVLINK_HEALTH_REPORTER_RECOVERY_DONE + return -EINPROGRESS; +#else + return 0; +#endif +} + +static const struct devlink_health_reporter_ops bnxt_dl_fw_reporter_ops = { + .name = "fw", + .diagnose = bnxt_fw_diagnose, + .dump = bnxt_fw_dump, + .recover = bnxt_fw_recover, +}; + +static struct devlink_health_reporter * +__bnxt_dl_reporter_create(struct bnxt *bp, + const struct devlink_health_reporter_ops *ops) +{ + struct devlink_health_reporter *reporter; + +#ifndef HAVE_DEVLINK_HEALTH_AUTO_RECOVER + reporter = devlink_health_reporter_create(bp->dl, ops, 0, bp); +#else + reporter = devlink_health_reporter_create(bp->dl, ops, 0, + !!ops->recover, bp); +#endif + if (IS_ERR(reporter)) { + netdev_warn(bp->dev, "Failed to create %s health reporter, rc = %ld\n", + ops->name, PTR_ERR(reporter)); + return NULL; + } + + return reporter; +} + +void bnxt_dl_fw_reporters_create(struct bnxt *bp) +{ + struct bnxt_fw_health *fw_health = bp->fw_health; + + if (fw_health && !fw_health->fw_reporter) + fw_health->fw_reporter = __bnxt_dl_reporter_create(bp, &bnxt_dl_fw_reporter_ops); +} + +void bnxt_dl_fw_reporters_destroy(struct bnxt *bp) +{ + struct bnxt_fw_health *fw_health = bp->fw_health; + + if (fw_health && fw_health->fw_reporter) { + devlink_health_reporter_destroy(fw_health->fw_reporter); + fw_health->fw_reporter = NULL; + } +} + +void bnxt_devlink_health_fw_report(struct bnxt *bp) +{ + struct bnxt_fw_health *fw_health = bp->fw_health; + int rc; + + if (!fw_health) + return; + + if (!fw_health->fw_reporter) { + __bnxt_fw_recover(bp); + return; + } + + mutex_lock(&fw_health->lock); + fw_health->severity = SEVERITY_RECOVERABLE; + fw_health->remedy = REMEDY_DEVLINK_RECOVER; + mutex_unlock(&fw_health->lock); + rc = devlink_health_report(fw_health->fw_reporter, "FW error reported", + fw_health); + if (rc == -ECANCELED) + __bnxt_fw_recover(bp); +} + +void bnxt_dl_health_fw_status_update(struct bnxt *bp, bool healthy) +{ + struct bnxt_fw_health *fw_health = bp->fw_health; + u8 state; + + mutex_lock(&fw_health->lock); + if (healthy) { + fw_health->severity = SEVERITY_NORMAL; + state = DEVLINK_HEALTH_REPORTER_STATE_HEALTHY; + } else { + fw_health->severity = SEVERITY_FATAL; + fw_health->remedy = REMEDY_POWER_CYCLE_DEVICE; + state = DEVLINK_HEALTH_REPORTER_STATE_ERROR; + } + mutex_unlock(&fw_health->lock); + devlink_health_reporter_state_update(fw_health->fw_reporter, state); +} + +void bnxt_dl_health_fw_recovery_done(struct bnxt *bp) +{ + struct bnxt_dl *dl = devlink_priv(bp->dl); + + devlink_health_reporter_recovery_done(bp->fw_health->fw_reporter); + bnxt_hwrm_remote_dev_reset_set(bp, dl->remote_reset); +} +#endif /* HAVE_DEVLINK_HEALTH_REPORT */ + +#ifdef HAVE_DEVLINK_RELOAD_ACTION +static void +bnxt_dl_livepatch_report_err(struct bnxt *bp, struct netlink_ext_ack *extack, + struct hwrm_fw_livepatch_output *resp) +{ + int err = ((struct hwrm_err_output *)resp)->cmd_err; + + switch (err) { + case FW_LIVEPATCH_CMD_ERR_CODE_INVALID_OPCODE: + netdev_err(bp->dev, "Illegal live patch opcode"); + NL_SET_ERR_MSG_MOD(extack, "Invalid opcode"); + break; + case FW_LIVEPATCH_CMD_ERR_CODE_NOT_SUPPORTED: + NL_SET_ERR_MSG_MOD(extack, "Live patch operation not supported"); + break; + case FW_LIVEPATCH_CMD_ERR_CODE_NOT_INSTALLED: + NL_SET_ERR_MSG_MOD(extack, "Live patch not found"); + break; + case FW_LIVEPATCH_CMD_ERR_CODE_NOT_PATCHED: + NL_SET_ERR_MSG_MOD(extack, + "Live patch deactivation failed. Firmware not patched."); + break; + case FW_LIVEPATCH_CMD_ERR_CODE_AUTH_FAIL: + NL_SET_ERR_MSG_MOD(extack, "Live patch not authenticated"); + break; + case FW_LIVEPATCH_CMD_ERR_CODE_INVALID_HEADER: + NL_SET_ERR_MSG_MOD(extack, "Incompatible live patch"); + break; + case FW_LIVEPATCH_CMD_ERR_CODE_INVALID_SIZE: + NL_SET_ERR_MSG_MOD(extack, "Live patch has invalid size"); + break; + case FW_LIVEPATCH_CMD_ERR_CODE_ALREADY_PATCHED: + NL_SET_ERR_MSG_MOD(extack, "Live patch already applied"); + break; + default: + netdev_err(bp->dev, "Unexpected live patch error: %d\n", err); + NL_SET_ERR_MSG_MOD(extack, "Failed to activate live patch"); + break; + } +} + +/* Live patch status in NVM */ +#define BNXT_LIVEPATCH_NOT_INSTALLED 0 +#define BNXT_LIVEPATCH_INSTALLED FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_INSTALL +#define BNXT_LIVEPATCH_REMOVED FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_ACTIVE +#define BNXT_LIVEPATCH_MASK (FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_INSTALL | \ + FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_ACTIVE) +#define BNXT_LIVEPATCH_ACTIVATED BNXT_LIVEPATCH_MASK + +#define BNXT_LIVEPATCH_STATE(flags) ((flags) & BNXT_LIVEPATCH_MASK) + +static int +bnxt_dl_livepatch_activate(struct bnxt *bp, struct netlink_ext_ack *extack) +{ + struct hwrm_fw_livepatch_query_output *query_resp; + struct hwrm_fw_livepatch_query_input *query_req; + struct hwrm_fw_livepatch_output *patch_resp; + struct hwrm_fw_livepatch_input *patch_req; + u16 flags, live_patch_state; + bool activated = false; + u32 installed = 0; + u8 target; + int rc; + + if (~bp->fw_cap & BNXT_FW_CAP_LIVEPATCH) { + NL_SET_ERR_MSG_MOD(extack, "Device does not support live patch"); + return -EOPNOTSUPP; + } + + rc = hwrm_req_init(bp, query_req, HWRM_FW_LIVEPATCH_QUERY); + if (rc) + return rc; + query_resp = hwrm_req_hold(bp, query_req); + + rc = hwrm_req_init(bp, patch_req, HWRM_FW_LIVEPATCH); + if (rc) { + hwrm_req_drop(bp, query_req); + return rc; + } + patch_req->loadtype = FW_LIVEPATCH_REQ_LOADTYPE_NVM_INSTALL; + patch_resp = hwrm_req_hold(bp, patch_req); + + for (target = 1; target <= FW_LIVEPATCH_REQ_FW_TARGET_LAST; target++) { + query_req->fw_target = target; + rc = hwrm_req_send(bp, query_req); + if (rc) { + NL_SET_ERR_MSG_MOD(extack, "Failed to query packages"); + break; + } + + flags = le16_to_cpu(query_resp->status_flags); + live_patch_state = BNXT_LIVEPATCH_STATE(flags); + + if (live_patch_state == BNXT_LIVEPATCH_NOT_INSTALLED) + continue; + + if (live_patch_state == BNXT_LIVEPATCH_ACTIVATED) { + activated = true; + continue; + } + + if (live_patch_state == BNXT_LIVEPATCH_INSTALLED) + patch_req->opcode = FW_LIVEPATCH_REQ_OPCODE_ACTIVATE; + else if (live_patch_state == BNXT_LIVEPATCH_REMOVED) + patch_req->opcode = FW_LIVEPATCH_REQ_OPCODE_DEACTIVATE; + + patch_req->fw_target = target; + rc = hwrm_req_send(bp, patch_req); + if (rc) { + bnxt_dl_livepatch_report_err(bp, extack, patch_resp); + break; + } + installed++; + } + + if (!rc && !installed) { + if (activated) { + NL_SET_ERR_MSG_MOD(extack, "Live patch already activated"); + rc = -EEXIST; + } else { + NL_SET_ERR_MSG_MOD(extack, "No live patches found"); + rc = -ENOENT; + } + } + hwrm_req_drop(bp, query_req); + hwrm_req_drop(bp, patch_req); + return rc; +} + +static int bnxt_dl_reload_down(struct devlink *dl, bool netns_change, + enum devlink_reload_action action, + enum devlink_reload_limit limit, + struct netlink_ext_ack *extack) + __acquires(&rtnl_mutex) +{ + struct bnxt *bp = bnxt_get_bp_from_dl(dl); + int rc = 0; + + switch (action) { + case DEVLINK_RELOAD_ACTION_DRIVER_REINIT: { + rtnl_lock(); + if (BNXT_PF(bp) && (bp->pf.active_vfs || bp->sriov_cfg)) { + NL_SET_ERR_MSG_MOD(extack, "reload is unsupported while VFs are allocated or being configured"); + rtnl_unlock(); + return -EOPNOTSUPP; + } + if (bp->dev->reg_state == NETREG_UNREGISTERED) { + rtnl_unlock(); + return -ENODEV; + } + if (bnxt_ulp_registered(bp->edev)) { + NL_SET_ERR_MSG_MOD(extack, "reload is unsupported while RoCE driver is loaded"); + rtnl_unlock(); + return -EOPNOTSUPP; + } + if (netif_running(bp->dev)) + bnxt_close_nic(bp, true, true); + bnxt_vf_reps_free(bp); + rc = bnxt_hwrm_func_drv_unrgtr(bp); + if (rc) { + NL_SET_ERR_MSG_MOD(extack, "Failed to deregister"); + if (netif_running(bp->dev)) + dev_close(bp->dev); + rtnl_unlock(); + break; + } + bnxt_cancel_reservations(bp, false); + bnxt_free_ctx_mem(bp); + break; + } + case DEVLINK_RELOAD_ACTION_FW_ACTIVATE: { + if (limit == DEVLINK_RELOAD_LIMIT_NO_RESET) + return bnxt_dl_livepatch_activate(bp, extack); + if (~bp->fw_cap & BNXT_FW_CAP_HOT_RESET) { + NL_SET_ERR_MSG_MOD(extack, "Device not capable, requires reboot"); + return -EOPNOTSUPP; + } + if (!bnxt_hwrm_reset_permitted(bp)) { + NL_SET_ERR_MSG_MOD(extack, "Reset denied by firmware, it may be inhibited by remote driver"); + return -EPERM; + } + rtnl_lock(); + if (bp->dev->reg_state == NETREG_UNREGISTERED) { + rtnl_unlock(); + return -ENODEV; + } + if (netif_running(bp->dev)) + set_bit(BNXT_STATE_FW_ACTIVATE, &bp->state); + rc = bnxt_hwrm_firmware_reset(bp->dev, + FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP, + FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP, + FW_RESET_REQ_FLAGS_RESET_GRACEFUL | + FW_RESET_REQ_FLAGS_FW_ACTIVATION); + if (rc) { + NL_SET_ERR_MSG_MOD(extack, "Failed to activate firmware"); + clear_bit(BNXT_STATE_FW_ACTIVATE, &bp->state); + rtnl_unlock(); + } + break; + } + default: + rc = -EOPNOTSUPP; + } + + return rc; +} + +static int bnxt_dl_reload_up(struct devlink *dl, enum devlink_reload_action action, + enum devlink_reload_limit limit, u32 *actions_performed, + struct netlink_ext_ack *extack) + __releases(&rtnl_mutex) +{ + struct bnxt *bp = bnxt_get_bp_from_dl(dl); + int rc = 0; + + *actions_performed = 0; + switch (action) { + case DEVLINK_RELOAD_ACTION_DRIVER_REINIT: { + bnxt_fw_init_one(bp); + bnxt_vf_reps_alloc(bp); + if (netif_running(bp->dev)) + rc = bnxt_open_nic(bp, true, true); + if (!rc) { + bnxt_reenable_sriov(bp); + bnxt_ptp_reapply_pps(bp); + } + break; + } + case DEVLINK_RELOAD_ACTION_FW_ACTIVATE: { + unsigned long start = jiffies; + unsigned long timeout = start + BNXT_DFLT_FW_RST_MAX_DSECS * HZ / 10; + + if (limit == DEVLINK_RELOAD_LIMIT_NO_RESET) + break; + if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) + timeout = start + bp->fw_health->normal_func_wait_dsecs * HZ / 10; + if (!netif_running(bp->dev)) + NL_SET_ERR_MSG_MOD(extack, "Device is closed, not waiting for reset notice that will never come"); + rtnl_unlock(); + while (test_bit(BNXT_STATE_FW_ACTIVATE, &bp->state)) { + if (time_after(jiffies, timeout)) { + NL_SET_ERR_MSG_MOD(extack, "Activation incomplete"); + rc = -ETIMEDOUT; + break; + } + if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) { + NL_SET_ERR_MSG_MOD(extack, "Activation aborted"); + rc = -ENODEV; + break; + } + msleep(50); + } + rtnl_lock(); + if (!rc) { + rc = bnxt_sync_firmware(bp); + if (rc) + NL_SET_ERR_MSG_MOD(extack, "Firmware sync failed"); + else + *actions_performed |= BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT); + } + clear_bit(BNXT_STATE_FW_ACTIVATE, &bp->state); + break; + } + default: + return -EOPNOTSUPP; + } + + if (!rc) { + bnxt_print_device_info(bp); + if (netif_running(bp->dev)) { + mutex_lock(&bp->link_lock); + bnxt_report_link(bp); + mutex_unlock(&bp->link_lock); + } + *actions_performed |= BIT(action); + } else if (netif_running(bp->dev)) { + dev_close(bp->dev); + } + rtnl_unlock(); + return rc; +} +#endif /* HAVE_DEVLINK_RELOAD_ACTION */ + +#ifdef HAVE_DEVLINK_SELFTESTS_FEATURES +static bool bnxt_nvm_test(struct bnxt *bp, struct netlink_ext_ack *extack) +{ + bool rc = false; + u32 datalen; + u16 index; + u8 *buf; + + if (bnxt_find_nvram_item(bp->dev, BNX_DIR_TYPE_VPD, + BNX_DIR_ORDINAL_FIRST, BNX_DIR_EXT_NONE, + &index, NULL, &datalen) || !datalen) { + NL_SET_ERR_MSG_MOD(extack, "nvm test vpd entry error"); + return false; + } + + buf = kzalloc(datalen, GFP_KERNEL); + if (!buf) { + NL_SET_ERR_MSG_MOD(extack, "insufficient memory for nvm test"); + return false; + } + + if (bnxt_get_nvram_item(bp->dev, index, 0, datalen, buf)) { + NL_SET_ERR_MSG_MOD(extack, "nvm test vpd read error"); + goto done; + } + + if (bnxt_flash_nvram(bp->dev, BNX_DIR_TYPE_VPD, BNX_DIR_ORDINAL_FIRST, + BNX_DIR_EXT_NONE, 0, 0, buf, datalen)) { + NL_SET_ERR_MSG_MOD(extack, "nvm test vpd write error"); + goto done; + } + + rc = true; + +done: + kfree(buf); + return rc; +} + +static bool bnxt_dl_selftest_check(struct devlink *dl, unsigned int id, + struct netlink_ext_ack *extack) +{ + return id == DEVLINK_ATTR_SELFTEST_ID_FLASH; +} + +static enum devlink_selftest_status bnxt_dl_selftest_run(struct devlink *dl, + unsigned int id, + struct netlink_ext_ack *extack) +{ + struct bnxt *bp = bnxt_get_bp_from_dl(dl); + + if (id == DEVLINK_ATTR_SELFTEST_ID_FLASH) + return bnxt_nvm_test(bp, extack) ? + DEVLINK_SELFTEST_STATUS_PASS : + DEVLINK_SELFTEST_STATUS_FAIL; + + return DEVLINK_SELFTEST_STATUS_SKIP; +} +#endif + +static const struct devlink_ops bnxt_dl_ops = { +#ifdef CONFIG_VF_REPS +#ifdef CONFIG_BNXT_SRIOV + .eswitch_mode_set = bnxt_dl_eswitch_mode_set, + .eswitch_mode_get = bnxt_dl_eswitch_mode_get, +#endif /* CONFIG_BNXT_SRIOV */ +#endif /* CONFIG_VF_REPS */ +#ifdef HAVE_DEVLINK_INFO + .info_get = bnxt_dl_info_get, +#endif /* HAVE_DEVLINK_INFO */ +#ifdef HAVE_DEVLINK_FLASH_UPDATE + .flash_update = bnxt_dl_flash_update, +#endif /* HAVE_DEVLINK_FLASH_UPDATE */ +#ifdef HAVE_DEVLINK_RELOAD_ACTION + .reload_actions = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT) | + BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE), + .reload_limits = BIT(DEVLINK_RELOAD_LIMIT_NO_RESET), + .reload_down = bnxt_dl_reload_down, + .reload_up = bnxt_dl_reload_up, +#endif /* HAVE_DEVLINK_RELOAD_ACTION */ +#ifdef HAVE_DEVLINK_SELFTESTS_FEATURES + .selftest_check = bnxt_dl_selftest_check, + .selftest_run = bnxt_dl_selftest_run, +#endif +}; + +static const struct devlink_ops bnxt_vf_dl_ops = { +#ifdef HAVE_DEVLINK_INFO + .info_get = bnxt_dl_info_get, +#endif /* HAVE_DEVLINK_INFO */ +}; + +#ifdef HAVE_DEVLINK_PARAM +enum bnxt_dl_param_id { + BNXT_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX, + BNXT_DEVLINK_PARAM_ID_GRE_VER_CHECK, + BNXT_DEVLINK_PARAM_ID_TRUFLOW, +}; + +static const struct bnxt_dl_nvm_param nvm_params[] = { + {DEVLINK_PARAM_GENERIC_ID_ENABLE_SRIOV, NVM_OFF_ENABLE_SRIOV, + BNXT_NVM_SHARED_CFG, 1, 1}, +#ifdef HAVE_IGNORE_ARI + {DEVLINK_PARAM_GENERIC_ID_IGNORE_ARI, NVM_OFF_IGNORE_ARI, + BNXT_NVM_SHARED_CFG, 1, 1}, + {DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MAX, + NVM_OFF_MSIX_VEC_PER_PF_MAX, BNXT_NVM_SHARED_CFG, 10, 4}, + {DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MIN, + NVM_OFF_MSIX_VEC_PER_PF_MIN, BNXT_NVM_SHARED_CFG, 7, 4}, +#endif + {BNXT_DEVLINK_PARAM_ID_GRE_VER_CHECK, NVM_OFF_DIS_GRE_VER_CHECK, + BNXT_NVM_SHARED_CFG, 1, 1}, +}; + +static void bnxt_copy_to_nvm_data(union bnxt_nvm_data *dst, + union devlink_param_value *src, + int nvm_num_bits, int dl_num_bytes) +{ + u32 val32 = 0; + + if (nvm_num_bits == 1) { + dst->val8 = src->vbool; + return; + } + if (dl_num_bytes == 4) + val32 = src->vu32; + else if (dl_num_bytes == 2) + val32 = (u32)src->vu16; + else if (dl_num_bytes == 1) + val32 = (u32)src->vu8; + dst->val32 = cpu_to_le32(val32); +} + +static void bnxt_copy_from_nvm_data(union devlink_param_value *dst, + union bnxt_nvm_data *src, + int nvm_num_bits, int dl_num_bytes) +{ + u32 val32; + + if (nvm_num_bits == 1) { + dst->vbool = src->val8; + return; + } + val32 = le32_to_cpu(src->val32); + if (dl_num_bytes == 4) + dst->vu32 = val32; + else if (dl_num_bytes == 2) + dst->vu16 = (u16)val32; + else if (dl_num_bytes == 1) + dst->vu8 = (u8)val32; +} + +static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg, + union devlink_param_value *val) +{ + struct hwrm_nvm_get_variable_input *req = msg; + struct bnxt_dl_nvm_param nvm_param; + struct hwrm_err_output *resp; + union bnxt_nvm_data *data; + dma_addr_t data_dma_addr; + int idx = 0, rc, i; + + /* Get/Set NVM CFG parameter is supported only on PFs */ + if (BNXT_VF(bp)) { + hwrm_req_drop(bp, req); + return -EPERM; + } + + for (i = 0; i < ARRAY_SIZE(nvm_params); i++) { + if (nvm_params[i].id == param_id) { + nvm_param = nvm_params[i]; + break; + } + } + + if (i == ARRAY_SIZE(nvm_params)) { + hwrm_req_drop(bp, req); + return -EOPNOTSUPP; + } + + if (nvm_param.dir_type == BNXT_NVM_PORT_CFG) + idx = bp->pf.port_id; + else if (nvm_param.dir_type == BNXT_NVM_FUNC_CFG) + idx = bp->pf.fw_fid - BNXT_FIRST_PF_FID; + + data = hwrm_req_dma_slice(bp, req, sizeof(*data), &data_dma_addr); + + if (!data) { + hwrm_req_drop(bp, req); + return -ENOMEM; + } + + req->dest_data_addr = cpu_to_le64(data_dma_addr); + req->data_len = cpu_to_le16(nvm_param.nvm_num_bits); + req->option_num = cpu_to_le16(nvm_param.offset); + req->index_0 = cpu_to_le16(idx); + if (idx) + req->dimensions = cpu_to_le16(1); + + resp = hwrm_req_hold(bp, req); + if (req->req_type == cpu_to_le16(HWRM_NVM_SET_VARIABLE)) { + bnxt_copy_to_nvm_data(data, val, nvm_param.nvm_num_bits, + nvm_param.dl_num_bytes); + rc = hwrm_req_send(bp, msg); + } else { + rc = hwrm_req_send_silent(bp, msg); + if (!rc) { + bnxt_copy_from_nvm_data(val, data, + nvm_param.nvm_num_bits, + nvm_param.dl_num_bytes); + } else { + if (resp->cmd_err == + NVM_GET_VARIABLE_CMD_ERR_CODE_VAR_NOT_EXIST) + rc = -EOPNOTSUPP; + } + } + hwrm_req_drop(bp, req); + if (rc == -EACCES) + netdev_err(bp->dev, "PF does not have admin privileges to modify NVM config\n"); + return rc; +} + +static int bnxt_dl_nvm_param_get(struct devlink *dl, u32 id, + struct devlink_param_gset_ctx *ctx) +{ + struct hwrm_nvm_get_variable_input *req; + struct bnxt *bp = bnxt_get_bp_from_dl(dl); + int rc; + + rc = hwrm_req_init(bp, req, HWRM_NVM_GET_VARIABLE); + if (rc) + return rc; + + rc = bnxt_hwrm_nvm_req(bp, id, req, &ctx->val); + if (!rc && id == BNXT_DEVLINK_PARAM_ID_GRE_VER_CHECK) + ctx->val.vbool = !ctx->val.vbool; + return rc; +} + +static int bnxt_dl_nvm_param_set(struct devlink *dl, u32 id, + struct devlink_param_gset_ctx *ctx) +{ + struct bnxt *bp = bnxt_get_bp_from_dl(dl); + struct hwrm_nvm_set_variable_input *req; + int rc; + + if (id == BNXT_DEVLINK_PARAM_ID_GRE_VER_CHECK) + ctx->val.vbool = !ctx->val.vbool; + + rc = hwrm_req_init(bp, req, HWRM_NVM_SET_VARIABLE); + if (rc) + return rc; + + return bnxt_hwrm_nvm_req(bp, id, req, &ctx->val); +} + +#ifdef HAVE_IGNORE_ARI +static int bnxt_dl_msix_validate(struct devlink *dl, u32 id, + union devlink_param_value val, + struct netlink_ext_ack *extack) +{ + int max_val = -1; + + if (id == DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MAX) + max_val = BNXT_MSIX_VEC_MAX; + + if (id == DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MIN) + max_val = BNXT_MSIX_VEC_MIN_MAX; + + if (val.vu32 > max_val) { + NL_SET_ERR_MSG_MOD(extack, "MSIX value is exceeding the range"); + return -EINVAL; + } + + return 0; +} +#endif + +#ifdef HAVE_REMOTE_DEV_RESET +static int bnxt_remote_dev_reset_get(struct devlink *dl, u32 id, + struct devlink_param_gset_ctx *ctx) +{ + struct bnxt *bp = bnxt_get_bp_from_dl(dl); + + if (~bp->fw_cap & BNXT_FW_CAP_HOT_RESET_IF) + return -EOPNOTSUPP; + + ctx->val.vbool = bnxt_dl_get_remote_reset(dl); + return 0; +} + +static int bnxt_remote_dev_reset_set(struct devlink *dl, u32 id, + struct devlink_param_gset_ctx *ctx) +{ + struct bnxt *bp = bnxt_get_bp_from_dl(dl); + int rc; + + rc = bnxt_hwrm_remote_dev_reset_set(bp, ctx->val.vbool); + if (rc) + return rc; + + bnxt_dl_set_remote_reset(dl, ctx->val.vbool); + return rc; +} +#endif /* HAVE_REMOTE_DEV_RESET */ + +static int bnxt_dl_truflow_param_get(struct devlink *dl, u32 id, + struct devlink_param_gset_ctx *ctx) +{ + struct bnxt *bp = bnxt_get_bp_from_dl(dl); + + ctx->val.vbool = bp->dl_param_truflow; + return 0; +} + +static int bnxt_dl_truflow_param_set(struct devlink *dl, u32 id, + struct devlink_param_gset_ctx *ctx) +{ + struct bnxt *bp = bnxt_get_bp_from_dl(dl); + int rc = 0; + + if (ctx->val.vbool) + rc = bnxt_devlink_tf_port_init(bp); + else + bnxt_devlink_tf_port_deinit(bp); + + if (!rc) + bp->dl_param_truflow = ctx->val.vbool; + + return rc; +} + +static const struct devlink_param bnxt_dl_params[] = { + DEVLINK_PARAM_GENERIC(ENABLE_SRIOV, + BIT(DEVLINK_PARAM_CMODE_PERMANENT), + bnxt_dl_nvm_param_get, bnxt_dl_nvm_param_set, + NULL), +#ifdef HAVE_IGNORE_ARI + DEVLINK_PARAM_GENERIC(IGNORE_ARI, + BIT(DEVLINK_PARAM_CMODE_PERMANENT), + bnxt_dl_nvm_param_get, bnxt_dl_nvm_param_set, + NULL), + DEVLINK_PARAM_GENERIC(MSIX_VEC_PER_PF_MAX, + BIT(DEVLINK_PARAM_CMODE_PERMANENT), + bnxt_dl_nvm_param_get, bnxt_dl_nvm_param_set, + bnxt_dl_msix_validate), + DEVLINK_PARAM_GENERIC(MSIX_VEC_PER_PF_MIN, + BIT(DEVLINK_PARAM_CMODE_PERMANENT), + bnxt_dl_nvm_param_get, bnxt_dl_nvm_param_set, + bnxt_dl_msix_validate), +#endif + DEVLINK_PARAM_DRIVER(BNXT_DEVLINK_PARAM_ID_GRE_VER_CHECK, + "gre_ver_check", DEVLINK_PARAM_TYPE_BOOL, + BIT(DEVLINK_PARAM_CMODE_PERMANENT), + bnxt_dl_nvm_param_get, bnxt_dl_nvm_param_set, + NULL), + + DEVLINK_PARAM_DRIVER(BNXT_DEVLINK_PARAM_ID_TRUFLOW, + "truflow", DEVLINK_PARAM_TYPE_BOOL, + BIT(DEVLINK_PARAM_CMODE_RUNTIME), + bnxt_dl_truflow_param_get, bnxt_dl_truflow_param_set, + NULL), +#ifdef HAVE_REMOTE_DEV_RESET + /* keep REMOTE_DEV_RESET last, it is excluded based on caps */ + DEVLINK_PARAM_GENERIC(ENABLE_REMOTE_DEV_RESET, + BIT(DEVLINK_PARAM_CMODE_RUNTIME), + bnxt_remote_dev_reset_get, + bnxt_remote_dev_reset_set, NULL), +#endif +}; +#endif /* HAVE_DEVLINK_PARAM */ + +#ifdef HAVE_DEVLINK_PARAM +static int bnxt_dl_params_register(struct bnxt *bp) +{ + int num_params = ARRAY_SIZE(bnxt_dl_params); + int rc; + + if (bp->hwrm_spec_code < 0x10600) + return 0; + +#ifdef HAVE_REMOTE_DEV_RESET + if (~bp->fw_cap & BNXT_FW_CAP_HOT_RESET_IF) + num_params--; +#endif + + rc = devlink_params_register(bp->dl, bnxt_dl_params, num_params); + if (rc) + netdev_warn(bp->dev, "devlink_params_register failed. rc=%d\n", + rc); + +#ifdef HAVE_DEVLINK_PARAM_PUBLISH + if (!rc) + devlink_params_publish(bp->dl); +#endif + return rc; +} +#endif /* HAVE_DEVLINK_PARAM */ + +static void bnxt_dl_params_unregister(struct bnxt *bp) +{ +#ifdef HAVE_DEVLINK_PARAM + int num_params = ARRAY_SIZE(bnxt_dl_params); + + if (bp->hwrm_spec_code < 0x10600) + return; + +#ifdef HAVE_REMOTE_DEV_RESET + if (~bp->fw_cap & BNXT_FW_CAP_HOT_RESET_IF) + num_params--; +#endif + + devlink_params_unregister(bp->dl, bnxt_dl_params, num_params); +#endif /* HAVE_DEVLINK_PARAM */ +} + +int bnxt_dl_register(struct bnxt *bp) +{ + const struct devlink_ops *devlink_ops; +#ifdef HAVE_DEVLINK_PORT_ATTRS + struct devlink_port_attrs attrs = {}; +#endif + struct bnxt_dl *bp_dl; + struct devlink *dl; + int rc; + + if (BNXT_PF(bp)) + devlink_ops = &bnxt_dl_ops; + else + devlink_ops = &bnxt_vf_dl_ops; + dl = devlink_alloc(devlink_ops, sizeof(struct bnxt_dl), &bp->pdev->dev); + if (!dl) { + netdev_warn(bp->dev, "devlink_alloc failed\n"); + return -ENOMEM; + } + + bp->dl = dl; + bp_dl = devlink_priv(dl); + bp_dl->bp = bp; + bnxt_dl_set_remote_reset(dl, true); + +#ifdef CONFIG_VF_REPS + /* Add switchdev eswitch mode setting, if SRIOV supported */ + if (pci_find_ext_capability(bp->pdev, PCI_EXT_CAP_ID_SRIOV) && + bp->hwrm_spec_code > 0x10803) + bp->eswitch_mode = DEVLINK_ESWITCH_MODE_LEGACY; +#endif + + if (!BNXT_PF(bp)) + goto out; + +#ifdef HAVE_DEVLINK_PORT_ATTRS + attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL; + attrs.phys.port_number = bp->pf.port_id; + memcpy(attrs.switch_id.id, bp->dsn, sizeof(bp->dsn)); + attrs.switch_id.id_len = sizeof(bp->dsn); + devlink_port_attrs_set(&bp->dl_port, &attrs); + rc = devlink_port_register(dl, &bp->dl_port, bp->pf.port_id); + if (rc) { + netdev_err(bp->dev, "devlink_port_register failed\n"); + goto err_dl_free; + } +#endif /* HAVE_DEVLINK_PORT_ATTRS */ + + rc = bnxt_dl_params_register(bp); + if (rc) + goto err_dl_port_unreg; + + devlink_set_features(dl, DEVLINK_F_RELOAD); +out: + devlink_register(dl); + + devlink_reload_enable(dl); + + return 0; + +err_dl_port_unreg: +#ifdef HAVE_DEVLINK_PORT_ATTRS + devlink_port_unregister(&bp->dl_port); +err_dl_free: +#endif /* HAVE_DEVLINK_PORT_ATTRS */ + devlink_free(dl); + return rc; +} + +void bnxt_dl_unregister(struct bnxt *bp) +{ + struct devlink *dl = bp->dl; + + devlink_reload_disable(dl); + + devlink_unregister(dl); + if (BNXT_PF(bp)) { + bnxt_dl_params_unregister(bp); +#ifdef HAVE_DEVLINK_PORT_ATTRS + devlink_port_unregister(&bp->dl_port); +#endif + } + devlink_free(dl); +} +#endif /* CONFIG_VF_REPS || HAVE_DEVLINK_PARAM */ + +#ifndef HAVE_DEVLINK_HEALTH_REPORT +void bnxt_devlink_health_fw_report(struct bnxt *bp) +{ + if (!bp->fw_health) + return; + + __bnxt_fw_recover(bp); +} + +void bnxt_dl_health_fw_status_update(struct bnxt *bp, bool healthy) +{ +} + +void bnxt_dl_health_fw_recovery_done(struct bnxt *bp) +{ +} + +#endif /* HAVE_DEVLINK_HEALTH_REPORT */ diff --git a/drivers/thirdparty/release-drivers/bnxt/bnxt_devlink.h b/drivers/thirdparty/release-drivers/bnxt/bnxt_devlink.h new file mode 100644 index 000000000000..674d9dfe2a85 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/bnxt_devlink.h @@ -0,0 +1,133 @@ +/* Broadcom NetXtreme-C/E network driver. + * + * Copyright (c) 2017-2018 Broadcom Limited + * Copyright (c) 2018-2022 Broadcom Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ + +#ifndef BNXT_DEVLINK_H +#define BNXT_DEVLINK_H + +#if defined(HAVE_DEVLINK_PARAM) +#include +#endif + +#if defined(CONFIG_VF_REPS) || defined(HAVE_DEVLINK_PARAM) +/* Struct to hold housekeeping info needed by devlink interface */ +struct bnxt_dl { + struct bnxt *bp; /* back ptr to the controlling dev */ + bool remote_reset; +}; + +static inline struct bnxt *bnxt_get_bp_from_dl(struct devlink *dl) +{ + return ((struct bnxt_dl *)devlink_priv(dl))->bp; +} + +static inline bool bnxt_dl_get_remote_reset(struct devlink *dl) +{ + return ((struct bnxt_dl *)devlink_priv(dl))->remote_reset; +} + +static inline void bnxt_dl_set_remote_reset(struct devlink *dl, bool value) +{ + ((struct bnxt_dl *)devlink_priv(dl))->remote_reset = value; +} + +#endif /* CONFIG_VF_REPS || HAVE_DEVLINK_PARAM */ + +union bnxt_nvm_data { + u8 val8; + __le32 val32; +}; + +#define NVM_OFF_MSIX_VEC_PER_PF_MAX 108 +#define NVM_OFF_MSIX_VEC_PER_PF_MIN 114 +#define NVM_OFF_IGNORE_ARI 164 +#define NVM_OFF_DIS_GRE_VER_CHECK 171 +#define NVM_OFF_ENABLE_SRIOV 401 +#define NVM_OFF_MSIX_VEC_PER_VF 406 +#define NVM_OFF_NVM_CFG_VER 602 + +#define BNXT_NVM_CFG_VER_BITS 8 +#define BNXT_NVM_CFG_VER_BYTES 1 + +#define BNXT_MSIX_VEC_MAX 512 +#define BNXT_MSIX_VEC_MIN_MAX 128 + +#if defined(CONFIG_VF_REPS) || defined(HAVE_DEVLINK_PARAM) +#ifdef HAVE_DEVLINK_PARAM +enum bnxt_nvm_dir_type { + BNXT_NVM_SHARED_CFG = 40, + BNXT_NVM_PORT_CFG, + BNXT_NVM_FUNC_CFG, +}; + +struct bnxt_dl_nvm_param { + u16 id; + u16 offset; + u16 dir_type; + u16 nvm_num_bits; + u8 dl_num_bytes; +}; + +enum bnxt_dl_version_type { + BNXT_VERSION_FIXED, + BNXT_VERSION_RUNNING, + BNXT_VERSION_STORED, +}; +#else +static inline int bnxt_dl_params_register(struct bnxt *bp) +{ + return 0; +} +#endif /* HAVE_DEVLINK_PARAM */ + +int bnxt_dl_register(struct bnxt *bp); +void bnxt_dl_unregister(struct bnxt *bp); + +#else /* CONFIG_VF_REPS || HAVE_DEVLINK_PARAM */ + +static inline int bnxt_dl_register(struct bnxt *bp) +{ + return 0; +} + +static inline void bnxt_dl_unregister(struct bnxt *bp) +{ +} + +#endif /* CONFIG_VF_REPS || HAVE_DEVLINK_PARAM */ + +void bnxt_devlink_health_fw_report(struct bnxt *bp); +void bnxt_dl_health_fw_status_update(struct bnxt *bp, bool healthy); +void bnxt_dl_health_fw_recovery_done(struct bnxt *bp); +#ifdef HAVE_DEVLINK_HEALTH_REPORT +void bnxt_dl_fw_reporters_create(struct bnxt *bp); +void bnxt_dl_fw_reporters_destroy(struct bnxt *bp); +#else +static inline void bnxt_dl_fw_reporters_create(struct bnxt *bp) +{ +} + +static inline void bnxt_dl_fw_reporters_destroy(struct bnxt *bp) +{ +} +#endif /* HAVE_DEVLINK_HEALTH_REPORT */ +static inline void bnxt_dl_remote_reload(struct bnxt *bp) +{ +#ifdef HAVE_DEVLINK_RELOAD_ACTION + devlink_remote_reload_actions_performed(bp->dl, 0, + BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT) | + BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE)); +#endif +} + +int bnxt_hwrm_nvm_get_var(struct bnxt *bp, dma_addr_t data_dma_addr, + u16 offset, u16 dim, u16 index, u16 num_bits); +char *bnxt_health_severity_str(enum bnxt_health_severity severity); +char *bnxt_health_remedy_str(enum bnxt_health_remedy remedy); +#endif /* BNXT_DEVLINK_H */ diff --git a/drivers/thirdparty/release-drivers/bnxt/bnxt_devlink_compat.h b/drivers/thirdparty/release-drivers/bnxt/bnxt_devlink_compat.h new file mode 100644 index 000000000000..f1b765e71256 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/bnxt_devlink_compat.h @@ -0,0 +1,101 @@ +/* Broadcom NetXtreme-C/E network driver. + * + * Copyright (c) 2024 Broadcom Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ +#ifndef _BNXT_DEVLINK_COMPAT_H_ +#define _BNXT_DEVLINK_COMPAT_H_ + +#ifdef HAVE_DEVLINK +#include +#endif + +#ifdef HAVE_DEVLINK_HEALTH_REPORT +#ifndef HAVE_DEVLINK_FMSG_STRING_PAIR_PUT_VOID +static int bnxt_fw_diagnose_compat(struct devlink_health_reporter *reporter, + struct devlink_fmsg *fmsg) +{ + struct bnxt *bp = devlink_health_reporter_priv(reporter); + struct bnxt_fw_health *h = bp->fw_health; + u32 fw_status, fw_resets; + int rc; + + if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) + return devlink_fmsg_string_pair_put(fmsg, "Status", "recovering"); + + if (!h->status_reliable) + return devlink_fmsg_string_pair_put(fmsg, "Status", "unknown"); + + mutex_lock(&h->lock); + fw_status = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG); + if (BNXT_FW_IS_BOOTING(fw_status)) { + rc = devlink_fmsg_string_pair_put(fmsg, "Status", "initializing"); + if (rc) + goto unlock; + } else if (h->severity || fw_status != BNXT_FW_STATUS_HEALTHY) { + if (!h->severity) { + h->severity = SEVERITY_FATAL; + h->remedy = REMEDY_POWER_CYCLE_DEVICE; + h->diagnoses++; + devlink_health_report(h->fw_reporter, + "FW error diagnosed", h); + } + rc = devlink_fmsg_string_pair_put(fmsg, "Status", "error"); + if (rc) + goto unlock; + rc = devlink_fmsg_u32_pair_put(fmsg, "Syndrome", fw_status); + if (rc) + goto unlock; + } else { + rc = devlink_fmsg_string_pair_put(fmsg, "Status", "healthy"); + if (rc) + goto unlock; + } + + rc = devlink_fmsg_string_pair_put(fmsg, "Severity", + bnxt_health_severity_str(h->severity)); + if (rc) + goto unlock; + + if (h->severity) { + rc = devlink_fmsg_string_pair_put(fmsg, "Remedy", + bnxt_health_remedy_str(h->remedy)); + if (rc) + goto unlock; + if (h->remedy == REMEDY_DEVLINK_RECOVER) { + rc = devlink_fmsg_string_pair_put(fmsg, "Impact", + "traffic+ntuple_cfg"); + if (rc) + goto unlock; + } + } + +unlock: + mutex_unlock(&h->lock); + if (rc || !h->resets_reliable) + return rc; + + fw_resets = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG); + rc = devlink_fmsg_u32_pair_put(fmsg, "Resets", fw_resets); + if (rc) + return rc; + rc = devlink_fmsg_u32_pair_put(fmsg, "Arrests", h->arrests); + if (rc) + return rc; + rc = devlink_fmsg_u32_pair_put(fmsg, "Survivals", h->survivals); + if (rc) + return rc; + rc = devlink_fmsg_u32_pair_put(fmsg, "Discoveries", h->discoveries); + if (rc) + return rc; + rc = devlink_fmsg_u32_pair_put(fmsg, "Fatalities", h->fatalities); + if (rc) + return rc; + return devlink_fmsg_u32_pair_put(fmsg, "Diagnoses", h->diagnoses); +} +#endif +#endif +#endif diff --git a/drivers/thirdparty/release-drivers/bnxt/bnxt_dim.c b/drivers/thirdparty/release-drivers/bnxt/bnxt_dim.c new file mode 100644 index 000000000000..d52ef04da769 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/bnxt_dim.c @@ -0,0 +1,68 @@ +/* Broadcom NetXtreme-C/E network driver. + * + * Copyright (c) 2017-2018 Broadcom Limited + * Copyright (c) 2018-2020 Broadcom Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ + +#include +#include +#include "bnxt_compat.h" +#ifdef HAVE_DIM +#include +#else +#include "bnxt_dim.h" +#endif +#include "bnxt_hsi.h" +#include "bnxt.h" + +void bnxt_dim_work(struct work_struct *work) +{ + struct dim *dim = container_of(work, struct dim, work); + struct bnxt_cp_ring_info *cpr = container_of(dim, + struct bnxt_cp_ring_info, + dim); + struct bnxt_napi *bnapi = container_of(cpr, + struct bnxt_napi, + cp_ring); + struct dim_cq_moder cur_moder = + net_dim_get_rx_moderation(dim->mode, dim->profile_ix); + + cpr->rx_ring_coal.coal_ticks = cur_moder.usec; + cpr->rx_ring_coal.coal_bufs = cur_moder.pkts; + + bnxt_hwrm_set_ring_coal(bnapi->bp, bnapi); + dim->state = DIM_START_MEASURE; +} + +#ifndef HAVE_DIM +void net_dim(struct dim *dim, struct dim_sample end_sample) +{ + struct dim_stats curr_stats; + u16 nevents; + + switch (dim->state) { + case DIM_MEASURE_IN_PROGRESS: + nevents = BIT_GAP(BITS_PER_TYPE(u16), + end_sample.event_ctr, + dim->start_sample.event_ctr); + if (nevents < DIM_NEVENTS) + break; + dim_calc_stats(&dim->start_sample, &end_sample, &curr_stats); + if (net_dim_decision(&curr_stats, dim)) { + dim->state = DIM_APPLY_NEW_PROFILE; + schedule_work(&dim->work); + break; + } + fallthrough; + case DIM_START_MEASURE: + dim->state = DIM_MEASURE_IN_PROGRESS; + break; + case DIM_APPLY_NEW_PROFILE: + break; + } +} +#endif diff --git a/drivers/thirdparty/release-drivers/bnxt/bnxt_dim.h b/drivers/thirdparty/release-drivers/bnxt/bnxt_dim.h new file mode 100644 index 000000000000..cdef5d38519a --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/bnxt_dim.h @@ -0,0 +1,354 @@ +/* + * Copyright (c) 2016, Mellanox Technologies. All rights reserved. + * Copyright (c) 2017-2018, Broadcom Limited. All rights reserved. + * Copyright (c) 2018-2022, Broadcom Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef NET_DIM_H +#define NET_DIM_H + +#include + +struct dim_cq_moder { + u16 usec; + u16 pkts; + u8 cq_period_mode; +}; + +struct dim_sample { + ktime_t time; + u32 pkt_ctr; + u32 byte_ctr; + u16 event_ctr; +}; + +struct dim_stats { + int ppms; /* packets per msec */ + int bpms; /* bytes per msec */ + int epms; /* events per msec */ +}; + +struct dim { /* Adaptive Moderation */ + u8 state; + struct dim_stats prev_stats; + struct dim_sample start_sample; + struct work_struct work; + u8 profile_ix; + u8 mode; + u8 tune_state; + u8 steps_right; + u8 steps_left; + u8 tired; +}; + +enum { + DIM_CQ_PERIOD_MODE_START_FROM_EQE = 0x0, + DIM_CQ_PERIOD_MODE_START_FROM_CQE = 0x1, + DIM_CQ_PERIOD_NUM_MODES +}; + +/* Adaptive moderation logic */ +enum { + DIM_START_MEASURE, + DIM_MEASURE_IN_PROGRESS, + DIM_APPLY_NEW_PROFILE, +}; + +enum { + DIM_PARKING_ON_TOP, + DIM_PARKING_TIRED, + DIM_GOING_RIGHT, + DIM_GOING_LEFT, +}; + +enum { + DIM_STATS_WORSE, + DIM_STATS_SAME, + DIM_STATS_BETTER, +}; + +enum { + DIM_STEPPED, + DIM_TOO_TIRED, + DIM_ON_EDGE, +}; + +#define NET_DIM_PARAMS_NUM_PROFILES 5 +/* Adaptive moderation profiles */ +#define NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE 256 +#define NET_DIM_DEF_PROFILE_CQE 1 +#define NET_DIM_DEF_PROFILE_EQE 1 + +/* All profiles sizes must be NET_PARAMS_DIM_NUM_PROFILES */ +#define NET_DIM_EQE_PROFILES { \ + {1, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \ + {4, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \ + {8, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \ + {32, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \ + {256, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \ +} + +#define NET_DIM_CQE_PROFILES { \ + {2, 256}, \ + {8, 128}, \ + {16, 64}, \ + {32, 64}, \ + {64, 64} \ +} + +static const struct dim_cq_moder +profile[DIM_CQ_PERIOD_NUM_MODES][NET_DIM_PARAMS_NUM_PROFILES] = { + NET_DIM_EQE_PROFILES, + NET_DIM_CQE_PROFILES, +}; + +static inline struct dim_cq_moder +net_dim_get_rx_moderation(u8 cq_period_mode, int ix) +{ + struct dim_cq_moder cq_moder = profile[cq_period_mode][ix]; + + cq_moder.cq_period_mode = cq_period_mode; + return cq_moder; +} + +static inline struct dim_cq_moder +net_dim_get_def_rx_moderation(u8 rx_cq_period_mode) +{ + int default_profile_ix; + + if (rx_cq_period_mode == DIM_CQ_PERIOD_MODE_START_FROM_CQE) + default_profile_ix = NET_DIM_DEF_PROFILE_CQE; + else /* DIM_CQ_PERIOD_MODE_START_FROM_EQE */ + default_profile_ix = NET_DIM_DEF_PROFILE_EQE; + + return net_dim_get_rx_moderation(rx_cq_period_mode, default_profile_ix); +} + +static inline bool dim_on_top(struct dim *dim) +{ + switch (dim->tune_state) { + case DIM_PARKING_ON_TOP: + case DIM_PARKING_TIRED: + return true; + case DIM_GOING_RIGHT: + return (dim->steps_left > 1) && (dim->steps_right == 1); + default: /* DIM_GOING_LEFT */ + return (dim->steps_right > 1) && (dim->steps_left == 1); + } +} + +static inline void dim_turn(struct dim *dim) +{ + switch (dim->tune_state) { + case DIM_PARKING_ON_TOP: + case DIM_PARKING_TIRED: + break; + case DIM_GOING_RIGHT: + dim->tune_state = DIM_GOING_LEFT; + dim->steps_left = 0; + break; + case DIM_GOING_LEFT: + dim->tune_state = DIM_GOING_RIGHT; + dim->steps_right = 0; + break; + } +} + +static inline int net_dim_step(struct dim *dim) +{ + if (dim->tired == (NET_DIM_PARAMS_NUM_PROFILES * 2)) + return DIM_TOO_TIRED; + + switch (dim->tune_state) { + case DIM_PARKING_ON_TOP: + case DIM_PARKING_TIRED: + break; + case DIM_GOING_RIGHT: + if (dim->profile_ix == (NET_DIM_PARAMS_NUM_PROFILES - 1)) + return DIM_ON_EDGE; + dim->profile_ix++; + dim->steps_right++; + break; + case DIM_GOING_LEFT: + if (dim->profile_ix == 0) + return DIM_ON_EDGE; + dim->profile_ix--; + dim->steps_left++; + break; + } + + dim->tired++; + return DIM_STEPPED; +} + +static inline void dim_park_on_top(struct dim *dim) +{ + dim->steps_right = 0; + dim->steps_left = 0; + dim->tired = 0; + dim->tune_state = DIM_PARKING_ON_TOP; +} + +static inline void dim_park_tired(struct dim *dim) +{ + dim->steps_right = 0; + dim->steps_left = 0; + dim->tune_state = DIM_PARKING_TIRED; +} + +static inline void net_dim_exit_parking(struct dim *dim) +{ + dim->tune_state = dim->profile_ix ? DIM_GOING_LEFT : + DIM_GOING_RIGHT; + net_dim_step(dim); +} + +#define IS_SIGNIFICANT_DIFF(val, ref) \ + (((100 * abs((val) - (ref))) / (ref)) > 10) /* more than 10% difference */ + +static inline int net_dim_stats_compare(struct dim_stats *curr, + struct dim_stats *prev) +{ + if (!prev->bpms) + return curr->bpms ? DIM_STATS_BETTER : + DIM_STATS_SAME; + + if (IS_SIGNIFICANT_DIFF(curr->bpms, prev->bpms)) + return (curr->bpms > prev->bpms) ? DIM_STATS_BETTER : + DIM_STATS_WORSE; + + if (!prev->ppms) + return curr->ppms ? DIM_STATS_BETTER : + DIM_STATS_SAME; + + if (IS_SIGNIFICANT_DIFF(curr->ppms, prev->ppms)) + return (curr->ppms > prev->ppms) ? DIM_STATS_BETTER : + DIM_STATS_WORSE; + + if (!prev->epms) + return DIM_STATS_SAME; + + if (IS_SIGNIFICANT_DIFF(curr->epms, prev->epms)) + return (curr->epms < prev->epms) ? DIM_STATS_BETTER : + DIM_STATS_WORSE; + + return DIM_STATS_SAME; +} + +static inline bool net_dim_decision(struct dim_stats *curr_stats, + struct dim *dim) +{ + int prev_state = dim->tune_state; + int prev_ix = dim->profile_ix; + int stats_res; + int step_res; + + switch (dim->tune_state) { + case DIM_PARKING_ON_TOP: + stats_res = net_dim_stats_compare(curr_stats, &dim->prev_stats); + if (stats_res != DIM_STATS_SAME) + net_dim_exit_parking(dim); + break; + + case DIM_PARKING_TIRED: + dim->tired--; + if (!dim->tired) + net_dim_exit_parking(dim); + break; + + case DIM_GOING_RIGHT: + case DIM_GOING_LEFT: + stats_res = net_dim_stats_compare(curr_stats, &dim->prev_stats); + if (stats_res != DIM_STATS_BETTER) + dim_turn(dim); + + if (dim_on_top(dim)) { + dim_park_on_top(dim); + break; + } + + step_res = net_dim_step(dim); + switch (step_res) { + case DIM_ON_EDGE: + dim_park_on_top(dim); + break; + case DIM_TOO_TIRED: + dim_park_tired(dim); + break; + } + + break; + } + + if ((prev_state != DIM_PARKING_ON_TOP) || + (dim->tune_state != DIM_PARKING_ON_TOP)) + dim->prev_stats = *curr_stats; + + return dim->profile_ix != prev_ix; +} + +static inline void dim_update_sample(u16 event_ctr, + u64 packets, + u64 bytes, + struct dim_sample *s) +{ + s->time = ktime_get(); + s->pkt_ctr = packets; + s->byte_ctr = bytes; + s->event_ctr = event_ctr; +} + +#define DIM_NEVENTS 320 +#define BITS_PER_TYPE(type) (sizeof(type) * BITS_PER_BYTE) +#define BIT_GAP(bits, end, start) ((((end) - (start)) + BIT_ULL(bits)) & (BIT_ULL(bits) - 1)) + +static inline void dim_calc_stats(struct dim_sample *start, + struct dim_sample *end, + struct dim_stats *curr_stats) +{ + /* u32 holds up to 71 minutes, should be enough */ + u32 delta_us = ktime_us_delta(end->time, start->time); + u32 npkts = BIT_GAP(BITS_PER_TYPE(u32), end->pkt_ctr, start->pkt_ctr); + u32 nbytes = BIT_GAP(BITS_PER_TYPE(u32), end->byte_ctr, + start->byte_ctr); + + if (!delta_us) + return; + + curr_stats->ppms = DIV_ROUND_UP(npkts * USEC_PER_MSEC, delta_us); + curr_stats->bpms = DIV_ROUND_UP(nbytes * USEC_PER_MSEC, delta_us); + curr_stats->epms = DIV_ROUND_UP(DIM_NEVENTS * USEC_PER_MSEC, + delta_us); +} + +void net_dim(struct dim *dim, struct dim_sample end_sample); + +#endif /* NET_DIM_H */ diff --git a/drivers/thirdparty/release-drivers/bnxt/bnxt_ethtool.c b/drivers/thirdparty/release-drivers/bnxt/bnxt_ethtool.c new file mode 100644 index 000000000000..19d6ecdb8d85 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/bnxt_ethtool.c @@ -0,0 +1,6292 @@ +/* Broadcom NetXtreme-C/E network driver. + * + * Copyright (c) 2014-2016 Broadcom Corporation + * Copyright (c) 2016-2018 Broadcom Limited + * Copyright (c) 2018-2023 Broadcom Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ + +#include +#include +#include +#include +#ifdef HAVE_ETHTOOL_TCP_DATA_SPLIT +#include +#endif +#ifdef HAVE_LINKMODE +#include +#endif +#include +#include +#include +#include +#include +#if defined(ETHTOOL_GET_TS_INFO) && defined(HAVE_IEEE1588_SUPPORT) +#include +#include +#include +#endif +#include +#include "bnxt_compat.h" +#include "bnxt_hsi.h" +#include "bnxt.h" +#if defined(ETHTOOL_GET_DUMP_FLAG) && !defined(GET_ETHTOOL_OP_EXT) +#include "bnxt_coredump.h" +#endif +#include "bnxt_hwrm.h" +#include "bnxt_ulp.h" +#include "bnxt_xdp.h" +#include "bnxt_ptp.h" +#include "bnxt_ethtool.h" +#include "bnxt_sriov.h" +#include "bnxt_nvm_defs.h" /* NVRAM content constant and structure defs */ +#include "bnxt_fw_hdr.h" /* Firmware hdr constant and structure defs */ +#include "bnxt_mpc.h" +#include "bnxt_ktls.h" + +#define FLASH_NVRAM_TIMEOUT (bp->hwrm_cmd_max_timeout) +#define FLASH_PACKAGE_TIMEOUT (bp->hwrm_cmd_max_timeout) +#define INSTALL_PACKAGE_TIMEOUT (bp->hwrm_cmd_max_timeout) +#define FW_SYNC_TIMEOUT (bp->hwrm_cmd_max_timeout) + +#define BNXT_NVM_ERR_MSG(dev, extack, msg) \ + do { \ + if (extack) \ + NL_SET_ERR_MSG_MOD(extack, msg); \ + netdev_err(dev, "%s\n", msg); \ + } while (0) + +enum bnxt_priv_flags { + BNXT_PRIV_FLAG_NUMA_DIRECT, + BNXT_PRIV_FLAG_CORE_RESET_TX_TIMEOUT, + BNXT_PRIV_FLAG_RSS_IPV6_FLOW_LABEL_EN, +}; + +static const char * const bnxt_priv_flags[] = { + [BNXT_PRIV_FLAG_NUMA_DIRECT] = "numa_direct", + [BNXT_PRIV_FLAG_CORE_RESET_TX_TIMEOUT] = "core_reset_tx_timeout", + [BNXT_PRIV_FLAG_RSS_IPV6_FLOW_LABEL_EN] = "ipv6_flow_label_rss_en", +}; + +static u32 bnxt_get_msglevel(struct net_device *dev) +{ + struct bnxt *bp = netdev_priv(dev); + + return bp->msg_enable; +} + +static void bnxt_set_msglevel(struct net_device *dev, u32 value) +{ + struct bnxt *bp = netdev_priv(dev); + + bp->msg_enable = value; +} + +static int bnxt_get_coalesce(struct net_device *dev, + struct ethtool_coalesce *coal, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) +{ + struct bnxt *bp = netdev_priv(dev); + struct bnxt_coal *hw_coal; + u16 mult; + + memset(coal, 0, sizeof(*coal)); + + coal->use_adaptive_rx_coalesce = bp->flags & BNXT_FLAG_DIM; + + hw_coal = &bp->rx_coal; + mult = hw_coal->bufs_per_record; + coal->rx_coalesce_usecs = hw_coal->coal_ticks; + coal->rx_max_coalesced_frames = hw_coal->coal_bufs / mult; + coal->rx_coalesce_usecs_irq = hw_coal->coal_ticks_irq; + coal->rx_max_coalesced_frames_irq = hw_coal->coal_bufs_irq / mult; +#ifdef HAVE_CQE_ETHTOOL_COALESCE + if (hw_coal->flags & + RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET) + kernel_coal->use_cqe_mode_rx = true; +#endif + + hw_coal = &bp->tx_coal; + mult = hw_coal->bufs_per_record; + coal->tx_coalesce_usecs = hw_coal->coal_ticks; + coal->tx_max_coalesced_frames = hw_coal->coal_bufs/ mult; + coal->tx_coalesce_usecs_irq = hw_coal->coal_ticks_irq; + coal->tx_max_coalesced_frames_irq = hw_coal->coal_bufs_irq / mult; +#ifdef HAVE_CQE_ETHTOOL_COALESCE + if (hw_coal->flags & + RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET) + kernel_coal->use_cqe_mode_tx = true; +#endif + + coal->stats_block_coalesce_usecs = bp->stats_coal_ticks; + + return 0; +} + +#ifdef HAVE_ETHTOOL_GET_PER_QUEUE_COAL +static int bnxt_get_per_queue_coalesce(struct net_device *dev, u32 queue, + struct ethtool_coalesce *coal) +{ + struct bnxt *bp = netdev_priv(dev); + struct bnxt_cp_ring_info *cpr; + struct bnxt_coal *hw_coal; + u16 mult; + + if (queue >= bp->rx_nr_rings && bp->flags & BNXT_FLAG_SHARED_RINGS) + return -EINVAL; + + if (queue >= bp->rx_nr_rings && queue >= bp->tx_nr_rings_per_tc) + return -EINVAL; + + if (queue >= bp->rx_nr_rings) + goto tx_coal; + + coal->use_adaptive_rx_coalesce = bp->flags & BNXT_FLAG_DIM; + + hw_coal = &bp->rx_coal; + mult = hw_coal->bufs_per_record; + + if (!bp->bnapi) { + coal->rx_coalesce_usecs = hw_coal->coal_ticks; + coal->rx_max_coalesced_frames = hw_coal->coal_bufs / mult; + } else { + cpr = &bp->bnapi[queue]->cp_ring; + coal->rx_coalesce_usecs = cpr->rx_ring_coal.coal_ticks; + coal->rx_max_coalesced_frames = cpr->rx_ring_coal.coal_bufs / mult; + } + coal->rx_coalesce_usecs_irq = hw_coal->coal_ticks_irq; + coal->rx_max_coalesced_frames_irq = hw_coal->coal_bufs_irq / mult; + +tx_coal: + if (queue >= bp->tx_nr_rings_per_tc) + goto skip_tx_coal; + + hw_coal = &bp->tx_coal; + mult = hw_coal->bufs_per_record; + coal->tx_coalesce_usecs = hw_coal->coal_ticks; + coal->tx_max_coalesced_frames = hw_coal->coal_bufs / mult; + coal->tx_coalesce_usecs_irq = hw_coal->coal_ticks_irq; + coal->tx_max_coalesced_frames_irq = hw_coal->coal_bufs_irq / mult; + +skip_tx_coal: + return 0; +} +#endif + +static int bnxt_set_coalesce(struct net_device *dev, + struct ethtool_coalesce *coal, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) +{ + struct bnxt *bp = netdev_priv(dev); + bool update_stats = false; + struct bnxt_coal *hw_coal; + int rc = 0; + u16 mult; + + if (coal->use_adaptive_rx_coalesce) { + bp->flags |= BNXT_FLAG_DIM; + } else { + if (bp->flags & BNXT_FLAG_DIM) { + bp->flags &= ~(BNXT_FLAG_DIM); + goto reset_coalesce; + } + } + +#ifdef HAVE_CQE_ETHTOOL_COALESCE + if ((kernel_coal->use_cqe_mode_rx || kernel_coal->use_cqe_mode_tx) && + !(bp->coal_cap.cmpl_params & + RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET)) + return -EOPNOTSUPP; +#endif + + hw_coal = &bp->rx_coal; + mult = hw_coal->bufs_per_record; + hw_coal->coal_ticks = coal->rx_coalesce_usecs; + hw_coal->coal_bufs = coal->rx_max_coalesced_frames * mult; + hw_coal->coal_ticks_irq = coal->rx_coalesce_usecs_irq; + hw_coal->coal_bufs_irq = coal->rx_max_coalesced_frames_irq * mult; +#ifdef HAVE_CQE_ETHTOOL_COALESCE + hw_coal->flags &= + ~RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET; + if (kernel_coal->use_cqe_mode_rx) + hw_coal->flags |= + RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET; +#endif + + hw_coal = &bp->tx_coal; + mult = hw_coal->bufs_per_record; + hw_coal->coal_ticks = coal->tx_coalesce_usecs; + hw_coal->coal_bufs = coal->tx_max_coalesced_frames * mult; + hw_coal->coal_ticks_irq = coal->tx_coalesce_usecs_irq; + hw_coal->coal_bufs_irq = coal->tx_max_coalesced_frames_irq * mult; +#ifdef HAVE_CQE_ETHTOOL_COALESCE + hw_coal->flags &= + ~RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET; + if (kernel_coal->use_cqe_mode_tx) + hw_coal->flags |= + RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET; +#endif + + if (bp->stats_coal_ticks != coal->stats_block_coalesce_usecs) { + u32 stats_ticks = coal->stats_block_coalesce_usecs; + + /* Allow 0, which means disable. */ + if (stats_ticks) + stats_ticks = clamp_t(u32, stats_ticks, + BNXT_MIN_STATS_COAL_TICKS, + BNXT_MAX_STATS_COAL_TICKS); + stats_ticks = rounddown(stats_ticks, BNXT_MIN_STATS_COAL_TICKS); + bp->stats_coal_ticks = stats_ticks; + if (bp->stats_coal_ticks) + bp->current_interval = + bp->stats_coal_ticks * HZ / 1000000; + else + bp->current_interval = BNXT_TIMER_INTERVAL; + update_stats = true; + } + +reset_coalesce: + if (test_bit(BNXT_STATE_OPEN, &bp->state)) { + if (update_stats) { + bnxt_close_nic(bp, true, false); + rc = bnxt_open_nic(bp, true, false); + } else { + rc = bnxt_hwrm_set_coal(bp); + } + } + + return rc; +} + +static const char *const bnxt_ring_rx_stats_str[] = { + "rx_ucast_packets", + "rx_mcast_packets", + "rx_bcast_packets", + "rx_discards", + "rx_errors", + "rx_ucast_bytes", + "rx_mcast_bytes", + "rx_bcast_bytes", +}; + +static const char *const bnxt_ring_tx_stats_str[] = { + "tx_ucast_packets", + "tx_mcast_packets", + "tx_bcast_packets", + "tx_errors", + "tx_discards", + "tx_ucast_bytes", + "tx_mcast_bytes", + "tx_bcast_bytes", +}; + +static const char *bnxt_ring_tpa_stats_str[] = { + "tpa_packets", + "tpa_bytes", + "tpa_events", + "tpa_aborts", +}; + +static const char *bnxt_ring_tpa2_stats_str[] = { + "rx_tpa_eligible_pkt", + "rx_tpa_eligible_bytes", + "rx_tpa_pkt", + "rx_tpa_bytes", + "rx_tpa_errors", + "rx_tpa_events", +}; + +static const char *const bnxt_rx_sw_stats_str[] = { + "rx_hds_pkt", + "rx_tpa_hds_pkt", + "rx_l4_csum_errors", + "rx_resets", + "rx_buf_errors", +}; + +static const char *const bnxt_tx_sw_push_stats_str[] = { + "tx_push_xmit", + "tx_push_cmpl", +}; + +static const char *const bnxt_cmn_sw_stats_str[] = { + "missed_irqs", +}; + +static const char *const bnxt_txtime_sw_stats_str[] = { + "so_txtime_xmit", + "so_txtime_cmpl_errors", +}; + +#define BNXT_RX_STATS_ENTRY(counter) \ + { BNXT_RX_STATS_OFFSET(counter), __stringify(counter) } + +#define BNXT_TX_STATS_ENTRY(counter) \ + { BNXT_TX_STATS_OFFSET(counter), __stringify(counter) } + +#define BNXT_RX_STATS_EXT_ENTRY(counter) \ + { BNXT_RX_STATS_EXT_OFFSET(counter), __stringify(counter) } + +#define BNXT_TX_STATS_EXT_ENTRY(counter) \ + { BNXT_TX_STATS_EXT_OFFSET(counter), __stringify(counter) } + +#define BNXT_RX_STATS_EXT_PFC_ENTRY(n) \ + BNXT_RX_STATS_EXT_ENTRY(pfc_pri##n##_rx_duration_us), \ + BNXT_RX_STATS_EXT_ENTRY(pfc_pri##n##_rx_transitions) + +#define BNXT_TX_STATS_EXT_PFC_ENTRY(n) \ + BNXT_TX_STATS_EXT_ENTRY(pfc_pri##n##_tx_duration_us), \ + BNXT_TX_STATS_EXT_ENTRY(pfc_pri##n##_tx_transitions) + +#define BNXT_RX_STATS_EXT_PFC_ENTRIES \ + BNXT_RX_STATS_EXT_PFC_ENTRY(0), \ + BNXT_RX_STATS_EXT_PFC_ENTRY(1), \ + BNXT_RX_STATS_EXT_PFC_ENTRY(2), \ + BNXT_RX_STATS_EXT_PFC_ENTRY(3), \ + BNXT_RX_STATS_EXT_PFC_ENTRY(4), \ + BNXT_RX_STATS_EXT_PFC_ENTRY(5), \ + BNXT_RX_STATS_EXT_PFC_ENTRY(6), \ + BNXT_RX_STATS_EXT_PFC_ENTRY(7) + +#define BNXT_TX_STATS_EXT_PFC_ENTRIES \ + BNXT_TX_STATS_EXT_PFC_ENTRY(0), \ + BNXT_TX_STATS_EXT_PFC_ENTRY(1), \ + BNXT_TX_STATS_EXT_PFC_ENTRY(2), \ + BNXT_TX_STATS_EXT_PFC_ENTRY(3), \ + BNXT_TX_STATS_EXT_PFC_ENTRY(4), \ + BNXT_TX_STATS_EXT_PFC_ENTRY(5), \ + BNXT_TX_STATS_EXT_PFC_ENTRY(6), \ + BNXT_TX_STATS_EXT_PFC_ENTRY(7) + +/* If BNXT_RX_STATS_EXT_ENTRY changes, bnxt_port_stats_cos_strcpy() + * should be modified accordingly + */ +#define BNXT_RX_STATS_EXT_COS_ENTRY(n) \ + BNXT_RX_STATS_EXT_ENTRY(rx_bytes_cos##n), \ + BNXT_RX_STATS_EXT_ENTRY(rx_packets_cos##n) + +/* If BNXT_TX_STATS_EXT_ENTRY changes, bnxt_port_stats_cos_strcpy() + * should be modified accordingly + */ +#define BNXT_TX_STATS_EXT_COS_ENTRY(n) \ + BNXT_TX_STATS_EXT_ENTRY(tx_bytes_cos##n), \ + BNXT_TX_STATS_EXT_ENTRY(tx_packets_cos##n) + +#define BNXT_RX_STATS_EXT_COS_ENTRIES \ + BNXT_RX_STATS_EXT_COS_ENTRY(0), \ + BNXT_RX_STATS_EXT_COS_ENTRY(1), \ + BNXT_RX_STATS_EXT_COS_ENTRY(2), \ + BNXT_RX_STATS_EXT_COS_ENTRY(3), \ + BNXT_RX_STATS_EXT_COS_ENTRY(4), \ + BNXT_RX_STATS_EXT_COS_ENTRY(5), \ + BNXT_RX_STATS_EXT_COS_ENTRY(6), \ + BNXT_RX_STATS_EXT_COS_ENTRY(7) \ + +#define BNXT_TX_STATS_EXT_COS_ENTRIES \ + BNXT_TX_STATS_EXT_COS_ENTRY(0), \ + BNXT_TX_STATS_EXT_COS_ENTRY(1), \ + BNXT_TX_STATS_EXT_COS_ENTRY(2), \ + BNXT_TX_STATS_EXT_COS_ENTRY(3), \ + BNXT_TX_STATS_EXT_COS_ENTRY(4), \ + BNXT_TX_STATS_EXT_COS_ENTRY(5), \ + BNXT_TX_STATS_EXT_COS_ENTRY(6), \ + BNXT_TX_STATS_EXT_COS_ENTRY(7) \ + +#define BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(n) \ + BNXT_RX_STATS_EXT_ENTRY(rx_discard_bytes_cos##n), \ + BNXT_RX_STATS_EXT_ENTRY(rx_discard_packets_cos##n) + +#define BNXT_RX_STATS_EXT_DISCARD_COS_ENTRIES \ + BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(0), \ + BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(1), \ + BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(2), \ + BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(3), \ + BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(4), \ + BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(5), \ + BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(6), \ + BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(7) + +#define BNXT_RX_STATS_PRI_ENTRY(counter, n) \ + { BNXT_RX_STATS_EXT_OFFSET(counter##_cos0), \ + __stringify(counter##_pri##n) } + +#define BNXT_TX_STATS_PRI_ENTRY(counter, n) \ + { BNXT_TX_STATS_EXT_OFFSET(counter##_cos0), \ + __stringify(counter##_pri##n) } + +#define BNXT_RX_STATS_PRI_ENTRIES(counter) \ + BNXT_RX_STATS_PRI_ENTRY(counter, 0), \ + BNXT_RX_STATS_PRI_ENTRY(counter, 1), \ + BNXT_RX_STATS_PRI_ENTRY(counter, 2), \ + BNXT_RX_STATS_PRI_ENTRY(counter, 3), \ + BNXT_RX_STATS_PRI_ENTRY(counter, 4), \ + BNXT_RX_STATS_PRI_ENTRY(counter, 5), \ + BNXT_RX_STATS_PRI_ENTRY(counter, 6), \ + BNXT_RX_STATS_PRI_ENTRY(counter, 7) + +#define BNXT_TX_STATS_PRI_ENTRIES(counter) \ + BNXT_TX_STATS_PRI_ENTRY(counter, 0), \ + BNXT_TX_STATS_PRI_ENTRY(counter, 1), \ + BNXT_TX_STATS_PRI_ENTRY(counter, 2), \ + BNXT_TX_STATS_PRI_ENTRY(counter, 3), \ + BNXT_TX_STATS_PRI_ENTRY(counter, 4), \ + BNXT_TX_STATS_PRI_ENTRY(counter, 5), \ + BNXT_TX_STATS_PRI_ENTRY(counter, 6), \ + BNXT_TX_STATS_PRI_ENTRY(counter, 7) + +static const char *const bnxt_ecn_port_stats_arr[] = { + "ecnmark_pkts_cos0", + "ecnmark_pkts_cos1", + "ecnmark_pkts_cos2", + "ecnmark_pkts_cos3", + "ecnmark_pkts_cos4", + "ecnmark_pkts_cos5", + "ecnmark_pkts_cos6", + "ecnmark_pkts_cos7", +}; + +enum { + RX_TOTAL_DISCARDS, + TX_TOTAL_DISCARDS, + RX_OOM_DISCARDS, + RX_NETPOLL_DISCARDS, +}; + +static const char *const bnxt_ring_err_stats_arr[] = { + "rx_total_l4_csum_errors", + "rx_total_resets", + "rx_total_buf_errors", + "rx_total_oom_discards", + "rx_total_netpoll_discards", + "rx_total_ring_discards", + "tx_total_ring_discards", + "total_missed_irqs", +}; + +static const char *const bnxt_ktls_stats[] = { + "ktls_tx_add", + "ktls_tx_del", + "ktls_tx_hw_pkt", + "ktls_tx_sw_pkt", + "ktls_tx_tls_ooo", + "ktls_tx_retrans_pkt", + "ktls_tx_replay", + "ktls_rx_add", + "ktls_rx_del", + "ktls_rx_hw_pkt", + "ktls_rx_sw_pkt", + "ktls_rx_resync_req", + "ktls_rx_resync_ack", + "ktls_rx_resync_discard", + "ktls_rx_resync_nak", +}; + +static const char *const bnxt_generic_stats[] = { + "pcie_statistics_tx_tlp", + "pcie_statistics_rx_tlp", + "pcie_credit_fc_hdr_posted", + "pcie_credit_fc_hdr_nonposted", + "pcie_credit_fc_hdr_cmpl", + "pcie_credit_fc_data_posted", + "pcie_credit_fc_data_nonposted", + "pcie_credit_fc_data_cmpl", + "pcie_credit_fc_tgt_nonposted", + "pcie_credit_fc_tgt_data_posted", + "pcie_credit_fc_tgt_hdr_posted", + "pcie_credit_fc_cmpl_hdr_posted", + "pcie_credit_fc_cmpl_data_posted", + "pcie_cmpl_longest", + "pcie_cmpl_shortest", + "cache_miss_count_cfcq", + "cache_miss_count_cfcs", + "cache_miss_count_cfcc", + "cache_miss_count_cfcm", + "hw_db_recov_dbs_dropped", + "hw_db_recov_drops_serviced", + "hw_db_recov_dbs_recovered", + "hw_db_recov_oo_drop_count", +}; + +static const char *const bnxt_xsk_stats[] = { + "xsk_rx_success", + "xsk_rx_redirect_fail", + "xsk_rx_alloc_fail", + "xsk_rx_no_room", + "xsk_tx_ring_full", + "xsk_wakeup", + "xsk_tx_completed", + "xsk_tx_sent_pkts", +}; + +static const char *const bnxt_lpbk_stats[] = { + "lpbk_ucast_frames", + "lpbk_mcast_frames", + "lpbk_bcast_frames", + "lpbk_ucast_bytes", + "lpbk_mcast_bytes", + "lpbk_bcast_bytes", + "lpbk_tx_discards", + "lpbk_tx_errors", + "lpbk_rx_discards", + "lpbk_rx_errors", +}; + +struct stats_entry { + long offset; + char string[ETH_GSTRING_LEN]; +}; + +#define NUM_RING_RX_SW_STATS ARRAY_SIZE(bnxt_rx_sw_stats_str) +#define NUM_RING_TX_PUSH_SW_STATS ARRAY_SIZE(bnxt_tx_sw_push_stats_str) +#define NUM_RING_CMN_SW_STATS ARRAY_SIZE(bnxt_cmn_sw_stats_str) +#define NUM_RING_RX_HW_STATS ARRAY_SIZE(bnxt_ring_rx_stats_str) +#define NUM_RING_TX_HW_STATS ARRAY_SIZE(bnxt_ring_tx_stats_str) +#define NUM_RING_TXTIME_SW_STATS ARRAY_SIZE(bnxt_txtime_sw_stats_str) + +static const struct { + long offset; + char string[ETH_GSTRING_LEN]; +} bnxt_port_stats_arr[] = { + BNXT_RX_STATS_ENTRY(rx_64b_frames), + BNXT_RX_STATS_ENTRY(rx_65b_127b_frames), + BNXT_RX_STATS_ENTRY(rx_128b_255b_frames), + BNXT_RX_STATS_ENTRY(rx_256b_511b_frames), + BNXT_RX_STATS_ENTRY(rx_512b_1023b_frames), + BNXT_RX_STATS_ENTRY(rx_1024b_1518b_frames), + BNXT_RX_STATS_ENTRY(rx_good_vlan_frames), + BNXT_RX_STATS_ENTRY(rx_1519b_2047b_frames), + BNXT_RX_STATS_ENTRY(rx_2048b_4095b_frames), + BNXT_RX_STATS_ENTRY(rx_4096b_9216b_frames), + BNXT_RX_STATS_ENTRY(rx_9217b_16383b_frames), + BNXT_RX_STATS_ENTRY(rx_total_frames), + BNXT_RX_STATS_ENTRY(rx_ucast_frames), + BNXT_RX_STATS_ENTRY(rx_mcast_frames), + BNXT_RX_STATS_ENTRY(rx_bcast_frames), + BNXT_RX_STATS_ENTRY(rx_fcs_err_frames), + BNXT_RX_STATS_ENTRY(rx_ctrl_frames), + BNXT_RX_STATS_ENTRY(rx_pause_frames), + BNXT_RX_STATS_ENTRY(rx_pfc_frames), + BNXT_RX_STATS_ENTRY(rx_align_err_frames), + BNXT_RX_STATS_ENTRY(rx_ovrsz_frames), + BNXT_RX_STATS_ENTRY(rx_jbr_frames), + BNXT_RX_STATS_ENTRY(rx_mtu_err_frames), + BNXT_RX_STATS_ENTRY(rx_tagged_frames), + BNXT_RX_STATS_ENTRY(rx_double_tagged_frames), + BNXT_RX_STATS_ENTRY(rx_good_frames), + BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri0), + BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri1), + BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri2), + BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri3), + BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri4), + BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri5), + BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri6), + BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri7), + BNXT_RX_STATS_ENTRY(rx_undrsz_frames), + BNXT_RX_STATS_ENTRY(rx_eee_lpi_events), + BNXT_RX_STATS_ENTRY(rx_eee_lpi_duration), + BNXT_RX_STATS_ENTRY(rx_bytes), + BNXT_RX_STATS_ENTRY(rx_runt_bytes), + BNXT_RX_STATS_ENTRY(rx_runt_frames), + BNXT_RX_STATS_ENTRY(rx_stat_discard), + BNXT_RX_STATS_ENTRY(rx_stat_err), + + BNXT_TX_STATS_ENTRY(tx_64b_frames), + BNXT_TX_STATS_ENTRY(tx_65b_127b_frames), + BNXT_TX_STATS_ENTRY(tx_128b_255b_frames), + BNXT_TX_STATS_ENTRY(tx_256b_511b_frames), + BNXT_TX_STATS_ENTRY(tx_512b_1023b_frames), + BNXT_TX_STATS_ENTRY(tx_1024b_1518b_frames), + BNXT_TX_STATS_ENTRY(tx_good_vlan_frames), + BNXT_TX_STATS_ENTRY(tx_1519b_2047b_frames), + BNXT_TX_STATS_ENTRY(tx_2048b_4095b_frames), + BNXT_TX_STATS_ENTRY(tx_4096b_9216b_frames), + BNXT_TX_STATS_ENTRY(tx_9217b_16383b_frames), + BNXT_TX_STATS_ENTRY(tx_good_frames), + BNXT_TX_STATS_ENTRY(tx_total_frames), + BNXT_TX_STATS_ENTRY(tx_ucast_frames), + BNXT_TX_STATS_ENTRY(tx_mcast_frames), + BNXT_TX_STATS_ENTRY(tx_bcast_frames), + BNXT_TX_STATS_ENTRY(tx_pause_frames), + BNXT_TX_STATS_ENTRY(tx_pfc_frames), + BNXT_TX_STATS_ENTRY(tx_jabber_frames), + BNXT_TX_STATS_ENTRY(tx_fcs_err_frames), + BNXT_TX_STATS_ENTRY(tx_err), + BNXT_TX_STATS_ENTRY(tx_fifo_underruns), + BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri0), + BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri1), + BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri2), + BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri3), + BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri4), + BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri5), + BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri6), + BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri7), + BNXT_TX_STATS_ENTRY(tx_eee_lpi_events), + BNXT_TX_STATS_ENTRY(tx_eee_lpi_duration), + BNXT_TX_STATS_ENTRY(tx_total_collisions), + BNXT_TX_STATS_ENTRY(tx_bytes), + BNXT_TX_STATS_ENTRY(tx_xthol_frames), + BNXT_TX_STATS_ENTRY(tx_stat_discard), + BNXT_TX_STATS_ENTRY(tx_stat_error), +}; + +static const struct { + long offset; + char string[ETH_GSTRING_LEN]; +} bnxt_port_stats_ext_arr[] = { + BNXT_RX_STATS_EXT_ENTRY(link_down_events), + BNXT_RX_STATS_EXT_ENTRY(continuous_pause_events), + BNXT_RX_STATS_EXT_ENTRY(resume_pause_events), + BNXT_RX_STATS_EXT_ENTRY(continuous_roce_pause_events), + BNXT_RX_STATS_EXT_ENTRY(resume_roce_pause_events), + BNXT_RX_STATS_EXT_COS_ENTRIES, + BNXT_RX_STATS_EXT_PFC_ENTRIES, + BNXT_RX_STATS_EXT_ENTRY(rx_bits), + BNXT_RX_STATS_EXT_ENTRY(rx_buffer_passed_threshold), + BNXT_RX_STATS_EXT_ENTRY(rx_pcs_symbol_err), + BNXT_RX_STATS_EXT_ENTRY(rx_corrected_bits), + BNXT_RX_STATS_EXT_DISCARD_COS_ENTRIES, + BNXT_RX_STATS_EXT_ENTRY(rx_fec_corrected_blocks), + BNXT_RX_STATS_EXT_ENTRY(rx_fec_uncorrectable_blocks), + BNXT_RX_STATS_EXT_ENTRY(rx_filter_miss), +}; + +static const struct { + long offset; + char string[ETH_GSTRING_LEN]; +} bnxt_tx_port_stats_ext_arr[] = { + BNXT_TX_STATS_EXT_COS_ENTRIES, + BNXT_TX_STATS_EXT_PFC_ENTRIES, +}; + +static const struct { + long base_off; + char string[ETH_GSTRING_LEN]; +} bnxt_rx_bytes_pri_arr[] = { + BNXT_RX_STATS_PRI_ENTRIES(rx_bytes), +}; + +static const struct { + long base_off; + char string[ETH_GSTRING_LEN]; +} bnxt_rx_pkts_pri_arr[] = { + BNXT_RX_STATS_PRI_ENTRIES(rx_packets), +}; + +static const struct { + long base_off; + char string[ETH_GSTRING_LEN]; +} bnxt_tx_bytes_pri_arr[] = { + BNXT_TX_STATS_PRI_ENTRIES(tx_bytes), +}; + +static const struct { + long base_off; + char string[ETH_GSTRING_LEN]; +} bnxt_tx_pkts_pri_arr[] = { + BNXT_TX_STATS_PRI_ENTRIES(tx_packets), +}; + +#define BNXT_NUM_ECN_PORT_STATS ARRAY_SIZE(bnxt_ecn_port_stats_arr) +#define BNXT_NUM_RING_ERR_STATS ARRAY_SIZE(bnxt_ring_err_stats_arr) +#define BNXT_NUM_KTLS_STATS ARRAY_SIZE(bnxt_ktls_stats) +#define BNXT_NUM_PORT_STATS ARRAY_SIZE(bnxt_port_stats_arr) +#define BNXT_NUM_STATS_PRI \ + (ARRAY_SIZE(bnxt_rx_bytes_pri_arr) + \ + ARRAY_SIZE(bnxt_rx_pkts_pri_arr) + \ + ARRAY_SIZE(bnxt_tx_bytes_pri_arr) + \ + ARRAY_SIZE(bnxt_tx_pkts_pri_arr)) +#define BNXT_NUM_GENERIC_STATS ARRAY_SIZE(bnxt_generic_stats) +#define BNXT_NUM_XSK_STATS ARRAY_SIZE(bnxt_xsk_stats) +#define BNXT_NUM_LPBK_STATS ARRAY_SIZE(bnxt_lpbk_stats) + +static int bnxt_get_num_tpa_ring_stats(struct bnxt *bp) +{ + if (BNXT_SUPPORTS_TPA(bp)) { + if (bp->max_tpa_v2) { + if (BNXT_CHIP_P5(bp)) + return BNXT_NUM_TPA_RING_STATS_P5; + return BNXT_NUM_TPA_RING_STATS_P7; + } + return BNXT_NUM_TPA_RING_STATS; + } + return 0; +} + +static int bnxt_get_num_tx_sw_push_stats(struct bnxt *bp) +{ + if (bp->tx_push_mode == BNXT_PUSH_MODE_NONE) + return 0; + return NUM_RING_TX_PUSH_SW_STATS; +} + +static int bnxt_get_num_txtime_sw_stats(struct bnxt *bp) +{ + if (BNXT_SUPPORTS_ETF(bp)) + return NUM_RING_TXTIME_SW_STATS; + return 0; +} + +static int bnxt_get_num_ring_stats(struct bnxt *bp) +{ + int rx, tx, cmn, xsk; + + rx = NUM_RING_RX_HW_STATS + NUM_RING_RX_SW_STATS + + bnxt_get_num_tpa_ring_stats(bp); + tx = NUM_RING_TX_HW_STATS + bnxt_get_num_tx_sw_push_stats(bp) + + bnxt_get_num_txtime_sw_stats(bp); + cmn = NUM_RING_CMN_SW_STATS; + xsk = BNXT_NUM_XSK_STATS; + + return rx * bp->rx_nr_rings + + tx * (bp->tx_nr_rings_xdp + bp->tx_nr_rings_per_tc) + + cmn * bp->cp_nr_rings + + xsk * bp->cp_nr_rings; +} + +static int bnxt_skip_port_stats_ext_count(struct bnxt *bp) +{ + int skip = 0; + + if (!BNXT_CHIP_P5_PLUS(bp)) + return 0; + + if (bp->fw_rx_stats_ext_size > + BNXT_RX_STATS_EXT_OFFSET(pfc_pri0_rx_duration_us)) + skip = BNXT_NUM_RX_PFC_DURATION_STATS; + + if (bp->fw_tx_stats_ext_size > + BNXT_TX_STATS_EXT_OFFSET(pfc_pri0_tx_duration_us)) + skip += BNXT_NUM_TX_PFC_DURATION_STATS; + + return skip; +} + +#define BNXT_DURATION_OFFSET(base, offset) ((offset) >= (base) && \ + (offset) < ((base) + BNXT_NUM_PFC_DURATION_STATS) && \ + ((offset) - (base)) % 2 == 0) + +static bool bnxt_skip_port_stats_ext(struct bnxt *bp, u32 idx) +{ + long base = BNXT_RX_STATS_EXT_OFFSET(pfc_pri0_rx_duration_us); + long offset = bnxt_port_stats_ext_arr[idx].offset; + + if (BNXT_CHIP_P5_PLUS(bp)) + return BNXT_DURATION_OFFSET(base, offset); + + return false; +} + +static bool bnxt_skip_tx_port_stats_ext(struct bnxt *bp, u32 idx) +{ + long base = BNXT_TX_STATS_EXT_OFFSET(pfc_pri0_tx_duration_us); + long offset = bnxt_tx_port_stats_ext_arr[idx].offset; + + if (BNXT_CHIP_P5_PLUS(bp)) + return BNXT_DURATION_OFFSET(base, offset); + + return false; +} + +static int bnxt_get_num_stats(struct bnxt *bp) +{ + int num_stats = bnxt_get_num_ring_stats(bp); + int len; + + num_stats += BNXT_NUM_RING_ERR_STATS; + + if (bp->ktls_info) + num_stats += BNXT_NUM_KTLS_STATS; + + if (bp->flags & BNXT_FLAG_PORT_STATS) + num_stats += BNXT_NUM_PORT_STATS; + + if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) { + len = min_t(u32, bp->fw_rx_stats_ext_size, + ARRAY_SIZE(bnxt_port_stats_ext_arr)); + num_stats += len; + len = min_t(u32, bp->fw_tx_stats_ext_size, + ARRAY_SIZE(bnxt_tx_port_stats_ext_arr)); + num_stats += len; + if (bp->pri2cos_valid) + num_stats += BNXT_NUM_STATS_PRI; + num_stats -= bnxt_skip_port_stats_ext_count(bp); + } + if (bp->flags & BNXT_FLAG_ECN_STATS) + num_stats += BNXT_NUM_ECN_PORT_STATS; + + if (bp->fw_cap & BNXT_FW_CAP_GENERIC_STATS) { + if (bp->generic_stats.hw_stats) + num_stats += BNXT_NUM_GENERIC_STATS; + } + + if (bp->fw_cap & BNXT_FW_CAP_LPBK_STATS) { + if (bp->lpbk_stats.hw_stats) + num_stats += BNXT_NUM_LPBK_STATS; + } + return num_stats; +} + +static bool bnxt_core_reset_avail(struct bnxt *bp) +{ + if (!BNXT_PF(bp) || + (pci_vfs_assigned(bp->pdev) && + !(bp->fw_cap & BNXT_FW_CAP_HOT_RESET)) || + bp->hwrm_spec_code < 0x10803) + return false; + else + return true; +} + +static int bnxt_get_sset_count(struct net_device *dev, int sset) +{ + struct bnxt *bp = netdev_priv(dev); + + switch (sset) { + case ETH_SS_STATS: + return bnxt_get_num_stats(bp); + case ETH_SS_TEST: + if (!bp->num_tests) + return -EOPNOTSUPP; + return bp->num_tests; + case ETH_SS_PRIV_FLAGS: + return ARRAY_SIZE(bnxt_priv_flags); + default: + return -EOPNOTSUPP; + } +} + +static bool is_rx_ring(struct bnxt *bp, int ring_num) +{ + return ring_num < bp->rx_nr_rings; +} + +static bool is_tx_ring(struct bnxt *bp, int ring_num) +{ + int tx_base = 0; + + if (!(bp->flags & BNXT_FLAG_SHARED_RINGS)) + tx_base = bp->rx_nr_rings; + + if (ring_num >= tx_base && ring_num < (tx_base + bp->tx_nr_rings)) + return true; + return false; +} + +static void bnxt_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *stats, u64 *buf) +{ + u32 i, j = 0; + struct bnxt *bp = netdev_priv(dev); + struct bnxt_total_ring_err_stats ring_err_stats = {0}; + int buf_size = bnxt_get_num_stats(bp) * sizeof(u64); + u64 *curr, *prev; + u32 tpa_stats; + + memset(buf, 0, buf_size); + + if (!bp->bnapi) { + j += bnxt_get_num_ring_stats(bp); + goto skip_ring_stats; + } + + tpa_stats = bnxt_get_num_tpa_ring_stats(bp); + for (i = 0; i < bp->cp_nr_rings; i++) { + struct bnxt_napi *bnapi = bp->bnapi[i]; + struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; + u64 *sw_stats = cpr->stats.sw_stats; + u64 *sw; + int k; + + if (is_rx_ring(bp, i)) { + for (k = 0; k < NUM_RING_RX_HW_STATS; j++, k++) + buf[j] = sw_stats[k]; + } + if (is_tx_ring(bp, i)) { + k = NUM_RING_RX_HW_STATS; + for (; k < NUM_RING_RX_HW_STATS + NUM_RING_TX_HW_STATS; + j++, k++) + buf[j] = sw_stats[k]; + } + if (!tpa_stats || !is_rx_ring(bp, i)) + goto skip_tpa_ring_stats; + + k = NUM_RING_RX_HW_STATS + NUM_RING_TX_HW_STATS; + for (; k < NUM_RING_RX_HW_STATS + NUM_RING_TX_HW_STATS + + tpa_stats; j++, k++) + buf[j] = sw_stats[k]; + +skip_tpa_ring_stats: + sw = (u64 *)&cpr->sw_stats->rx; + if (is_rx_ring(bp, i)) { + for (k = 0; k < NUM_RING_RX_SW_STATS; j++, k++) + buf[j] = sw[k]; + } + + sw = (u64 *)&cpr->sw_stats->tx; + if (is_tx_ring(bp, i)) { + for (k = 0; k < bnxt_get_num_tx_sw_push_stats(bp); j++, k++) + buf[j] = sw[k]; + + sw = (u64 *)&cpr->sw_stats->txtime; + if (is_tx_ring(bp, i)) { + for (k = 0; k < bnxt_get_num_txtime_sw_stats(bp); j++, k++) + buf[j] = sw[k]; + } + } + + sw = (u64 *)&cpr->sw_stats->cmn; + for (k = 0; k < NUM_RING_CMN_SW_STATS; j++, k++) + buf[j] = sw[k]; + + /* xsk stats */ + sw = (u64 *)&cpr->sw_stats->xsk_stats; + for (k = 0; k < BNXT_NUM_XSK_STATS; j++, k++) + buf[j] = sw[k]; + } + + bnxt_get_ring_err_stats(bp, &ring_err_stats); + +skip_ring_stats: + curr = &ring_err_stats.rx_total_l4_csum_errors; + prev = &bp->ring_err_stats_prev.rx_total_l4_csum_errors; + for (i = 0; i < BNXT_NUM_RING_ERR_STATS; i++, j++, curr++, prev++) + buf[j] = *curr + *prev; + + if (bp->ktls_info) { + struct bnxt_ktls_info *ktls = bp->ktls_info; + + for (i = 0; i < BNXT_NUM_KTLS_STATS; i++, j++) + buf[j] = atomic64_read(&ktls->counters[i]); + } + if (bp->flags & BNXT_FLAG_PORT_STATS) { + u64 *port_stats = bp->port_stats.sw_stats; + + for (i = 0; i < BNXT_NUM_PORT_STATS; i++, j++) + buf[j] = *(port_stats + bnxt_port_stats_arr[i].offset); + } + + if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) { + u64 *rx_port_stats_ext = bp->rx_port_stats_ext.sw_stats; + u64 *tx_port_stats_ext = bp->tx_port_stats_ext.sw_stats; + u32 len; + + len = min_t(u32, bp->fw_rx_stats_ext_size, + ARRAY_SIZE(bnxt_port_stats_ext_arr)); + for (i = 0; i < len; i++) { + if (bnxt_skip_port_stats_ext(bp, i)) + continue; + + buf[j++] = *(rx_port_stats_ext + + bnxt_port_stats_ext_arr[i].offset); + } + len = min_t(u32, bp->fw_tx_stats_ext_size, + ARRAY_SIZE(bnxt_tx_port_stats_ext_arr)); + for (i = 0; i < len; i++) { + if (bnxt_skip_tx_port_stats_ext(bp, i)) + continue; + + buf[j++] = *(tx_port_stats_ext + + bnxt_tx_port_stats_ext_arr[i].offset); + } + if (bp->pri2cos_valid) { + for (i = 0; i < 8; i++, j++) { + long n = bnxt_rx_bytes_pri_arr[i].base_off + + bp->rx_pri2cos_idx[i]; + + buf[j] = *(rx_port_stats_ext + n); + } + for (i = 0; i < 8; i++, j++) { + long n = bnxt_rx_pkts_pri_arr[i].base_off + + bp->rx_pri2cos_idx[i]; + + buf[j] = *(rx_port_stats_ext + n); + } + for (i = 0; i < 8; i++, j++) { + long n = bnxt_tx_bytes_pri_arr[i].base_off + + bp->tx_pri2cos_idx[i]; + + buf[j] = *(tx_port_stats_ext + n); + } + for (i = 0; i < 8; i++, j++) { + long n = bnxt_tx_pkts_pri_arr[i].base_off + + bp->tx_pri2cos_idx[i]; + + buf[j] = *(tx_port_stats_ext + n); + } + } + } + if (bp->flags & BNXT_FLAG_ECN_STATS) { + u64 *port_stats = bp->ecn_marked_stats.sw_stats; + + for (i = 0; i < BNXT_NUM_ECN_PORT_STATS; i++, j++) + buf[j] = *(port_stats++); + } + + if (bp->fw_cap & BNXT_FW_CAP_GENERIC_STATS) { + u64 *hw = bp->generic_stats.hw_stats; + + if (!hw) + goto skip_gen_stats; + + for (i = 0; i < BNXT_NUM_GENERIC_STATS; i++, j++) + buf[j] = le64_to_cpu(hw[i]); + } + +skip_gen_stats: + if (bp->fw_cap & BNXT_FW_CAP_LPBK_STATS) { + u64 *hw = bp->lpbk_stats.hw_stats; + + if (!hw) + return; + + for (i = 0; i < BNXT_NUM_LPBK_STATS; i++, j++) + buf[j] = le64_to_cpu(hw[i]); + } +} + +static void bnxt_port_stats_cos_strcpy(char *cosq_names, int base, char *out, const char *in) +{ + int len = 0; + + while ((*out++ = *in++)) + len++; + if (!strncmp(out - 6, "_cos", 4)) { + int id = *(out - 2) - '0'; + + if (cosq_names && cosq_names[base + (BNXT_MAX_COSQ_NAME_LEN * id)]) { + strncpy(out - 2, &cosq_names[base + (BNXT_MAX_COSQ_NAME_LEN * id)], + ETH_GSTRING_LEN - len + 1); + out[ETH_GSTRING_LEN - len - 2] = '\0'; + } + } +} + +static void bnxt_get_strings(struct net_device *dev, u32 stringset, u8 *buf) +{ + struct bnxt *bp = netdev_priv(dev); + static const char **str; + u32 i, j, num_str; + + switch (stringset) { + case ETH_SS_STATS: + for (i = 0; i < bp->cp_nr_rings; i++) { + if (is_rx_ring(bp, i)) { + num_str = NUM_RING_RX_HW_STATS; + for (j = 0; j < num_str; j++) { + sprintf(buf, "[%d]: %s", i, + bnxt_ring_rx_stats_str[j]); + buf += ETH_GSTRING_LEN; + } + } + if (is_tx_ring(bp, i)) { + num_str = NUM_RING_TX_HW_STATS; + for (j = 0; j < num_str; j++) { + sprintf(buf, "[%d]: %s", i, + bnxt_ring_tx_stats_str[j]); + buf += ETH_GSTRING_LEN; + } + } + num_str = bnxt_get_num_tpa_ring_stats(bp); + if (!num_str || !is_rx_ring(bp, i)) + goto skip_tpa_stats; + + if (bp->max_tpa_v2) + str = bnxt_ring_tpa2_stats_str; + else + str = bnxt_ring_tpa_stats_str; + + for (j = 0; j < num_str; j++) { + sprintf(buf, "[%d]: %s", i, str[j]); + buf += ETH_GSTRING_LEN; + } +skip_tpa_stats: + if (is_rx_ring(bp, i)) { + num_str = NUM_RING_RX_SW_STATS; + for (j = 0; j < num_str; j++) { + sprintf(buf, "[%d]: %s", i, + bnxt_rx_sw_stats_str[j]); + buf += ETH_GSTRING_LEN; + } + } + if (is_tx_ring(bp, i)) { + num_str = bnxt_get_num_tx_sw_push_stats(bp); + for (j = 0; j < num_str; j++) { + sprintf(buf, "[%d]: %s", i, + bnxt_tx_sw_push_stats_str[j]); + buf += ETH_GSTRING_LEN; + } + + num_str = bnxt_get_num_txtime_sw_stats(bp); + for (j = 0; j < num_str; j++) { + sprintf(buf, "[%d]: %s", i, + bnxt_txtime_sw_stats_str[j]); + buf += ETH_GSTRING_LEN; + } + } + num_str = NUM_RING_CMN_SW_STATS; + for (j = 0; j < num_str; j++) { + sprintf(buf, "[%d]: %s", i, + bnxt_cmn_sw_stats_str[j]); + buf += ETH_GSTRING_LEN; + } + num_str = BNXT_NUM_XSK_STATS; + for (j = 0; j < num_str; j++) { + sprintf(buf, "[%d]: %s", i, bnxt_xsk_stats[j]); + buf += ETH_GSTRING_LEN; + } + } + for (i = 0; i < BNXT_NUM_RING_ERR_STATS; i++) { + strcpy(buf, bnxt_ring_err_stats_arr[i]); + buf += ETH_GSTRING_LEN; + } + if (bp->ktls_info) { + for (i = 0; i < BNXT_NUM_KTLS_STATS; i++) { + strcpy(buf, bnxt_ktls_stats[i]); + buf += ETH_GSTRING_LEN; + } + } + if (bp->flags & BNXT_FLAG_PORT_STATS) { + for (i = 0; i < BNXT_NUM_PORT_STATS; i++) { + strcpy(buf, bnxt_port_stats_arr[i].string); + buf += ETH_GSTRING_LEN; + } + } + if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) { + u32 len; + + len = min_t(u32, bp->fw_rx_stats_ext_size, + ARRAY_SIZE(bnxt_port_stats_ext_arr)); + for (i = 0; i < len; i++) { + if (bnxt_skip_port_stats_ext(bp, i)) + continue; + + bnxt_port_stats_cos_strcpy(bp->rx_cosq_names, BNXT_MAX_QUEUE * + BNXT_MAX_COSQ_NAME_LEN, buf, + bnxt_port_stats_ext_arr[i].string); + buf += ETH_GSTRING_LEN; + } + len = min_t(u32, bp->fw_tx_stats_ext_size, + ARRAY_SIZE(bnxt_tx_port_stats_ext_arr)); + for (i = 0; i < len; i++) { + if (bnxt_skip_tx_port_stats_ext(bp, i)) + continue; + + bnxt_port_stats_cos_strcpy(bp->tx_cosq_names, 0, buf, + bnxt_tx_port_stats_ext_arr[i].string); + buf += ETH_GSTRING_LEN; + } + if (bp->pri2cos_valid) { + for (i = 0; i < 8; i++) { + strcpy(buf, + bnxt_rx_bytes_pri_arr[i].string); + buf += ETH_GSTRING_LEN; + } + for (i = 0; i < 8; i++) { + strcpy(buf, + bnxt_rx_pkts_pri_arr[i].string); + buf += ETH_GSTRING_LEN; + } + for (i = 0; i < 8; i++) { + strcpy(buf, + bnxt_tx_bytes_pri_arr[i].string); + buf += ETH_GSTRING_LEN; + } + for (i = 0; i < 8; i++) { + strcpy(buf, + bnxt_tx_pkts_pri_arr[i].string); + buf += ETH_GSTRING_LEN; + } + } + } + if (bp->flags & BNXT_FLAG_ECN_STATS) { + for (i = 0; i < BNXT_NUM_ECN_PORT_STATS; i++) { + bnxt_port_stats_cos_strcpy(bp->rx_cosq_names, 0, buf, + bnxt_ecn_port_stats_arr[i]); + if (bp->rx_cosq_names) + bnxt_port_stats_cos_strcpy(bp->rx_cosq_names, + BNXT_MAX_QUEUE * + BNXT_MAX_COSQ_NAME_LEN, buf, + bnxt_ecn_port_stats_arr[i]); + buf += ETH_GSTRING_LEN; + } + } + if (bp->fw_cap & BNXT_FW_CAP_GENERIC_STATS) { + if (!bp->generic_stats.hw_stats) + goto skip_gen_stats; + + for (i = 0; i < BNXT_NUM_GENERIC_STATS; i++) { + strscpy(buf, bnxt_generic_stats[i], ETH_GSTRING_LEN); + buf += ETH_GSTRING_LEN; + } + } + +skip_gen_stats: + if (bp->fw_cap & BNXT_FW_CAP_LPBK_STATS) { + if (bp->lpbk_stats.hw_stats) { + for (i = 0; i < BNXT_NUM_LPBK_STATS; i++) { + strscpy(buf, bnxt_lpbk_stats[i], ETH_GSTRING_LEN); + buf += ETH_GSTRING_LEN; + } + } + } + break; + case ETH_SS_TEST: + if (bp->num_tests) + memcpy(buf, bp->test_info->string, + bp->num_tests * ETH_GSTRING_LEN); + break; + case ETH_SS_PRIV_FLAGS: + for (i = 0; i < ARRAY_SIZE(bnxt_priv_flags); i++) { + strscpy(buf, bnxt_priv_flags[i], ETH_GSTRING_LEN); + buf += ETH_GSTRING_LEN; + } + break; + default: + netdev_err(bp->dev, "bnxt_get_strings invalid request %x\n", + stringset); + break; + } +} + +static void bnxt_get_ringparam(struct net_device *dev, + struct ethtool_ringparam *ering, + struct kernel_ethtool_ringparam *kernel_ering, + struct netlink_ext_ack *extack) +{ + struct bnxt *bp = netdev_priv(dev); + + if (bp->flags & BNXT_FLAG_AGG_RINGS) { + ering->rx_max_pending = BNXT_MAX_RX_DESC_CNT_JUM_ENA; + ering->rx_jumbo_max_pending = BNXT_MAX_RX_JUM_DESC_CNT; +#ifdef HAVE_ETHTOOL_TCP_DATA_SPLIT + kernel_ering->tcp_data_split = ETHTOOL_TCP_DATA_SPLIT_ENABLED; +#endif + } else { + ering->rx_max_pending = BNXT_MAX_RX_DESC_CNT; + ering->rx_jumbo_max_pending = 0; +#ifdef HAVE_ETHTOOL_TCP_DATA_SPLIT + kernel_ering->tcp_data_split = ETHTOOL_TCP_DATA_SPLIT_DISABLED; +#endif + } + ering->tx_max_pending = BNXT_MAX_TX_DESC_CNT; + + ering->rx_pending = bp->rx_ring_size; + ering->rx_jumbo_pending = bp->rx_agg_ring_size; + ering->tx_pending = bp->tx_ring_size; +} + +static int bnxt_set_ringparam(struct net_device *dev, + struct ethtool_ringparam *ering, + struct kernel_ethtool_ringparam *kernel_ering, + struct netlink_ext_ack *extack) +{ + struct bnxt *bp = netdev_priv(dev); + + if ((ering->rx_pending > BNXT_MAX_RX_DESC_CNT) || + (ering->tx_pending > BNXT_MAX_TX_DESC_CNT) || + (ering->tx_pending < BNXT_MIN_TX_DESC_CNT)) + return -EINVAL; + + if (netif_running(dev)) + bnxt_close_nic(bp, false, false); + + bp->rx_ring_size = ering->rx_pending; + bp->tx_ring_size = ering->tx_pending; + bnxt_set_ring_params(bp); + + if (netif_running(dev)) + return bnxt_open_nic(bp, false, false); + + return 0; +} + +#if defined(ETHTOOL_GCHANNELS) && !defined(GET_ETHTOOL_OP_EXT) +static void bnxt_get_channels(struct net_device *dev, + struct ethtool_channels *channel) +{ + struct bnxt *bp = netdev_priv(dev); + struct bnxt_hw_resc *hw_resc = &bp->hw_resc; + int max_rx_rings, max_tx_rings, tcs; + int max_tx_sch_inputs, tx_grps; + u32 curr_rx_shared = 0; + + if (bp->flags & BNXT_FLAG_SHARED_RINGS) { + curr_rx_shared = bp->rx_nr_rings; + if (BNXT_CHIP_TYPE_NITRO_A0(bp)) + curr_rx_shared--; + } + /* Get the most up-to-date max_tx_sch_inputs. */ + if (netif_running(dev) && BNXT_NEW_RM(bp)) + bnxt_hwrm_func_resc_qcaps(bp, false); + max_tx_sch_inputs = hw_resc->max_tx_sch_inputs; + + bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, true); + if (max_tx_sch_inputs) + max_tx_rings = min_t(int, max_tx_rings, max_tx_sch_inputs); + + tcs = bp->num_tc; + tx_grps = max(tcs, 1); + if (bp->tx_nr_rings_xdp) + tx_grps++; + max_tx_rings /= tx_grps; + channel->max_combined = min_t(int, max_rx_rings, max_tx_rings); + channel->max_combined = max(channel->max_combined, curr_rx_shared); + + if (bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, false)) { + max_rx_rings = 0; + max_tx_rings = 0; + } + if (max_tx_sch_inputs) + max_tx_rings = min_t(int, max_tx_rings, max_tx_sch_inputs); + + if (tcs > 1) + max_tx_rings /= tcs; + + channel->max_rx = max_rx_rings; + channel->max_tx = max_tx_rings; + channel->max_other = 0; + + if (!bp->tx_nr_rings) + return; + + if (bp->flags & BNXT_FLAG_SHARED_RINGS) { + channel->combined_count = curr_rx_shared; + } else { + if (!BNXT_CHIP_TYPE_NITRO_A0(bp)) { + channel->rx_count = bp->rx_nr_rings; + channel->tx_count = bp->tx_nr_rings_per_tc; + } + } +} + +#if defined(HAVE_ETF_QOPT_OFFLOAD) +static int bnxt_check_etf_tx_rings(struct bnxt *bp, int tx, int tcs, int tx_xdp) +{ + int max_etf_bit, tx_count, tx_sets = 1; + + if (tcs) + tx_sets = tcs; + + tx_count = tx * tx_sets - tx_xdp; + + if (!bp->etf_tx_ring_map) + return 0; + + max_etf_bit = find_last_bit(bp->etf_tx_ring_map, + bp->hw_resc.max_tx_rings); + /* no bit is set */ + if (max_etf_bit == bp->hw_resc.max_tx_rings) + max_etf_bit = 0; + + if (tx_count <= max_etf_bit) { + netdev_warn(bp->dev, + "Cannot configure tx rings below ETF enabled ring %d\n", + max_etf_bit); + return -EINVAL; + } + return 0; +} +#endif + +static int bnxt_set_channels(struct net_device *dev, + struct ethtool_channels *channel) +{ + struct bnxt *bp = netdev_priv(dev); + int req_tx_rings, req_rx_rings, tcs; + bool sh = false; + int tx_xdp = 0; + int rc = 0; + int tx_cp; + + if (channel->other_count) + return -EINVAL; + + if (!channel->combined_count && + (!channel->rx_count || !channel->tx_count)) + return -EINVAL; + + if (channel->combined_count && + (channel->rx_count || channel->tx_count)) + return -EINVAL; + + if (BNXT_CHIP_TYPE_NITRO_A0(bp) && (channel->rx_count || + channel->tx_count)) + return -EINVAL; + + if (channel->combined_count) + sh = true; + + tcs = bp->num_tc; + + req_tx_rings = sh ? channel->combined_count : channel->tx_count; + req_rx_rings = sh ? channel->combined_count : channel->rx_count; + if (bp->tx_nr_rings_xdp) { + if (!sh) { + netdev_err(dev, "Only combined mode supported when XDP is enabled.\n"); + return -EINVAL; + } + tx_xdp = req_rx_rings; + } + +#if defined(HAVE_ETF_QOPT_OFFLOAD) + rc = bnxt_check_etf_tx_rings(bp, req_tx_rings, tcs, tx_xdp); + if (rc) + return rc; +#endif + rc = bnxt_check_rings(bp, req_tx_rings, req_rx_rings, sh, tcs, tx_xdp); + if (rc) { + netdev_warn(dev, "Unable to allocate the requested rings\n"); + return rc; + } + + if (bnxt_get_nr_rss_ctxs(bp, req_rx_rings) != + bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) && + netif_is_rxfh_configured(dev)) { + netdev_warn(dev, "RSS table size change required, RSS table entries must be default to proceed\n"); + return -EINVAL; + } + + bnxt_clear_usr_fltrs(bp, true); + if (BNXT_SUPPORTS_MULTI_RSS_CTX(bp)) + bnxt_clear_rss_ctxs(bp, false); + if (netif_running(dev)) { + if (BNXT_PF(bp)) { + /* TODO CHIMP_FW: Send message to all VF's + * before PF unload + */ + } + bnxt_close_nic(bp, true, false); + } + + if (sh) { + bp->flags |= BNXT_FLAG_SHARED_RINGS; + bp->rx_nr_rings = channel->combined_count; + bp->tx_nr_rings_per_tc = channel->combined_count; + } else { + bp->flags &= ~BNXT_FLAG_SHARED_RINGS; + bp->rx_nr_rings = channel->rx_count; + bp->tx_nr_rings_per_tc = channel->tx_count; + } + bp->tx_nr_rings_xdp = tx_xdp; + bp->tx_nr_rings = bp->tx_nr_rings_per_tc + tx_xdp; + if (tcs > 1) + bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tcs + tx_xdp; + + tx_cp = bnxt_num_tx_to_cp(bp, bp->tx_nr_rings); + bp->cp_nr_rings = sh ? max_t(int, tx_cp, bp->rx_nr_rings) : + tx_cp + bp->rx_nr_rings; + + bnxt_set_dflt_mpc_rings(bp); + + /* After changing number of rx channels, update NTUPLE feature. */ + netdev_update_features(dev); + if (netif_running(dev)) { + rc = bnxt_open_nic(bp, true, false); + if ((!rc) && BNXT_PF(bp)) { + /* TODO CHIMP_FW: Send message to all VF's + * to renable + */ + } + } else { + rc = bnxt_reserve_rings(bp, true); + } + + return rc; +} +#endif + +#ifdef HAVE_RXNFC +static u32 bnxt_get_all_fltr_ids_rcu(struct bnxt *bp, struct hlist_head tbl[], + int tbl_size, u32 *ids, u32 start, + u32 id_cnt) +{ + int i, j = start; + + if (j >= id_cnt) + return j; + for (i = 0; i < tbl_size; i++) { + struct hlist_head *head; + struct hlist_node __maybe_unused *node; + struct bnxt_filter_base *fltr; + + head = &tbl[i]; + __hlist_for_each_entry_rcu(fltr, node, head, hash) { + if (!fltr->flags || + test_bit(BNXT_FLTR_FW_DELETED, &fltr->state)) + continue; + ids[j++] = fltr->sw_id; + if (j == id_cnt) + return j; + } + } + return j; +} + +static struct bnxt_filter_base *bnxt_get_one_fltr_rcu(struct bnxt *bp, + struct hlist_head tbl[], + int tbl_size, u32 id) +{ + int i; + + for (i = 0; i < tbl_size; i++) { + struct hlist_head *head; + struct hlist_node __maybe_unused *node; + struct bnxt_filter_base *fltr; + + head = &tbl[i]; + __hlist_for_each_entry_rcu(fltr, node, head, hash) { + if (fltr->flags && fltr->sw_id == id) + return fltr; + } + } + return NULL; +} + +static int bnxt_grxclsrlall(struct bnxt *bp, struct ethtool_rxnfc *cmd, + u32 *rule_locs) +{ + u32 count; + + cmd->data = bp->ntp_fltr_count; + rcu_read_lock(); + count = bnxt_get_all_fltr_ids_rcu(bp, bp->l2_fltr_hash_tbl, + BNXT_L2_FLTR_HASH_SIZE, rule_locs, 0, + cmd->rule_cnt); + cmd->rule_cnt = bnxt_get_all_fltr_ids_rcu(bp, bp->ntp_fltr_hash_tbl, + BNXT_NTP_FLTR_HASH_SIZE, + rule_locs, count, + cmd->rule_cnt); + rcu_read_unlock(); + return 0; +} + +static int bnxt_grxclsrule(struct bnxt *bp, struct ethtool_rxnfc *cmd) +{ + struct ethtool_rx_flow_spec *fs = + (struct ethtool_rx_flow_spec *)&cmd->fs; + struct bnxt_filter_base *fltr_base; + struct bnxt_ntuple_filter *fltr; + struct bnxt_flow_masks *fmasks; + struct flow_keys *fkeys; + int rc = -EINVAL; + + if (fs->location >= bp->max_fltr) + return rc; + + rcu_read_lock(); + fltr_base = bnxt_get_one_fltr_rcu(bp, bp->l2_fltr_hash_tbl, + BNXT_L2_FLTR_HASH_SIZE, + fs->location); + if (fltr_base) { + struct ethhdr *h_ether = &fs->h_u.ether_spec; + struct ethhdr *m_ether = &fs->m_u.ether_spec; + struct bnxt_l2_filter *l2_fltr; + struct bnxt_l2_key *l2_key; + + l2_fltr = container_of(fltr_base, struct bnxt_l2_filter, base); + l2_key = &l2_fltr->l2_key; + fs->flow_type = ETHER_FLOW; + ether_addr_copy(h_ether->h_dest, l2_key->dst_mac_addr); + eth_broadcast_addr(m_ether->h_dest); + if (l2_key->vlan) { + struct ethtool_flow_ext *m_ext = &fs->m_ext; + struct ethtool_flow_ext *h_ext = &fs->h_ext; + + fs->flow_type |= FLOW_EXT; + m_ext->vlan_tci = htons(0xfff); + h_ext->vlan_tci = htons(l2_key->vlan); + } + if (fltr_base->flags & BNXT_ACT_RING_DST) + fs->ring_cookie = fltr_base->rxq; + if (fltr_base->flags & BNXT_ACT_FUNC_DST) + fs->ring_cookie = (u64)(fltr_base->vf_idx + 1) << + ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF; + rcu_read_unlock(); + return 0; + } + fltr_base = bnxt_get_one_fltr_rcu(bp, bp->ntp_fltr_hash_tbl, + BNXT_NTP_FLTR_HASH_SIZE, + fs->location); + if (!fltr_base) { + rcu_read_unlock(); + return rc; + } + fltr = container_of(fltr_base, struct bnxt_ntuple_filter, base); + fkeys = &fltr->fkeys; + fmasks = &fltr->fmasks; + if (fkeys->basic.n_proto == htons(ETH_P_IP)) { + if (fkeys->basic.ip_proto == IPPROTO_ICMP || + fkeys->basic.ip_proto == IPPROTO_RAW) { + fs->flow_type = IP_USER_FLOW; + fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4; + if (fkeys->basic.ip_proto == IPPROTO_ICMP) + fs->h_u.usr_ip4_spec.proto = IPPROTO_ICMP; + else + fs->h_u.usr_ip4_spec.proto = IPPROTO_RAW; + fs->m_u.usr_ip4_spec.proto = BNXT_IP_PROTO_FULL_MASK; + } else if (fkeys->basic.ip_proto == IPPROTO_TCP) { + fs->flow_type = TCP_V4_FLOW; + } else if (fkeys->basic.ip_proto == IPPROTO_UDP) { + fs->flow_type = UDP_V4_FLOW; + } else { + goto fltr_err; + } + + fs->h_u.tcp_ip4_spec.ip4src = fkeys->addrs.v4addrs.src; + fs->m_u.tcp_ip4_spec.ip4src = fmasks->addrs.v4addrs.src; + fs->h_u.tcp_ip4_spec.ip4dst = fkeys->addrs.v4addrs.dst; + fs->m_u.tcp_ip4_spec.ip4dst = fmasks->addrs.v4addrs.dst; + if (fs->flow_type == TCP_V4_FLOW || + fs->flow_type == UDP_V4_FLOW) { + fs->h_u.tcp_ip4_spec.psrc = fkeys->ports.src; + fs->m_u.tcp_ip4_spec.psrc = fmasks->ports.src; + fs->h_u.tcp_ip4_spec.pdst = fkeys->ports.dst; + fs->m_u.tcp_ip4_spec.pdst = fmasks->ports.dst; + } + } else { +#ifdef HAVE_ETHTOOL_IP6_SPEC + if (fkeys->basic.ip_proto == IPPROTO_ICMPV6 || + fkeys->basic.ip_proto == IPPROTO_RAW) { + fs->flow_type = IPV6_USER_FLOW; + if (fkeys->basic.ip_proto == IPPROTO_ICMPV6) + fs->h_u.usr_ip6_spec.l4_proto = IPPROTO_ICMPV6; + else + fs->h_u.usr_ip6_spec.l4_proto = IPPROTO_RAW; + fs->m_u.usr_ip6_spec.l4_proto = BNXT_IP_PROTO_FULL_MASK; + } else if (fkeys->basic.ip_proto == IPPROTO_TCP) { + fs->flow_type = TCP_V6_FLOW; + } else if (fkeys->basic.ip_proto == IPPROTO_UDP) { + fs->flow_type = UDP_V6_FLOW; + } else { + goto fltr_err; + } + + *(struct in6_addr *)&fs->h_u.tcp_ip6_spec.ip6src[0] = + fkeys->addrs.v6addrs.src; + *(struct in6_addr *)&fs->m_u.tcp_ip6_spec.ip6src[0] = + fmasks->addrs.v6addrs.src; + *(struct in6_addr *)&fs->h_u.tcp_ip6_spec.ip6dst[0] = + fkeys->addrs.v6addrs.dst; + *(struct in6_addr *)&fs->m_u.tcp_ip6_spec.ip6dst[0] = + fmasks->addrs.v6addrs.dst; + if (fs->flow_type == TCP_V6_FLOW || + fs->flow_type == UDP_V6_FLOW) { + fs->h_u.tcp_ip6_spec.psrc = fkeys->ports.src; + fs->m_u.tcp_ip6_spec.psrc = fmasks->ports.src; + fs->h_u.tcp_ip6_spec.pdst = fkeys->ports.dst; + fs->m_u.tcp_ip6_spec.pdst = fmasks->ports.dst; + } +#endif + } + + if (fltr->base.flags & BNXT_ACT_DROP) + fs->ring_cookie = RX_CLS_FLOW_DISC; + else if (fltr->base.flags & BNXT_ACT_NUMA_DIRECT) + fs->ring_cookie = fltr->base.rxq - BNXT_NTUPLE_COOKIE_NUMA_DIRECT; + else + fs->ring_cookie = fltr->base.rxq; + rc = 0; + +fltr_err: + rcu_read_unlock(); + + return rc; +} + +#if defined(HAVE_ETH_RXFH_CONTEXT_ALLOC) || defined(HAVE_ETHTOOL_RXFH_PARAM) +static struct bnxt_rss_ctx *bnxt_get_rss_ctx_from_index(struct bnxt *bp, + u32 index) +{ + struct bnxt_rss_ctx *rss_ctx, *tmp; + + list_for_each_entry_safe(rss_ctx, tmp, &bp->rss_ctx_list, list) + if (rss_ctx->index == index) + return rss_ctx; + return NULL; +} + +static int bnxt_alloc_rss_ctx_rss_table(struct bnxt *bp, + struct bnxt_rss_ctx *rss_ctx) +{ + int size = L1_CACHE_ALIGN(BNXT_MAX_RSS_TABLE_SIZE_P5); + struct bnxt_vnic_info *vnic = &rss_ctx->vnic; + + vnic->rss_table_size = size + HW_HASH_KEY_SIZE; + vnic->rss_table = dma_alloc_coherent(&bp->pdev->dev, + vnic->rss_table_size, + &vnic->rss_table_dma_addr, + GFP_KERNEL); + if (!vnic->rss_table) + return -ENOMEM; + + vnic->rss_hash_key = ((void *)vnic->rss_table) + size; + vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size; + return 0; +} +#endif + +static int bnxt_add_l2_cls_rule(struct bnxt *bp, + struct ethtool_rx_flow_spec *fs) +{ + u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie); + u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie); + struct ethhdr *h_ether = &fs->h_u.ether_spec; + struct ethhdr *m_ether = &fs->m_u.ether_spec; + struct bnxt_l2_filter *fltr; + struct bnxt_l2_key key; + u16 vnic_id; + u8 flags; + int rc; + + if (BNXT_CHIP_P5_PLUS(bp)) + return -EOPNOTSUPP; + + if (!is_broadcast_ether_addr(m_ether->h_dest)) + return -EINVAL; + ether_addr_copy(key.dst_mac_addr, h_ether->h_dest); + key.vlan = 0; + if (fs->flow_type & FLOW_EXT) { + struct ethtool_flow_ext *m_ext = &fs->m_ext; + struct ethtool_flow_ext *h_ext = &fs->h_ext; + + if (m_ext->vlan_tci != htons(0xfff) || !h_ext->vlan_tci) + return -EINVAL; + key.vlan = htons(h_ext->vlan_tci); + } + + if (vf) { + flags = BNXT_ACT_FUNC_DST; + vnic_id = 0xffff; + vf--; + if (!bnxt_vf_vnic_state_is_up(bp, vf)) + return -ENODEV; + } else { + flags = BNXT_ACT_RING_DST; + if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) + vnic_id = bp->vnic_info[BNXT_VNIC_DEFAULT].fw_vnic_id; + else + vnic_id = bp->vnic_info[ring + 1].fw_vnic_id; + } + fltr = bnxt_alloc_new_l2_filter(bp, &key, flags); + if (IS_ERR(fltr)) + return PTR_ERR(fltr); + + fltr->base.fw_vnic_id = vnic_id; + fltr->base.rxq = ring; + fltr->base.vf_idx = vf; + rc = bnxt_hwrm_l2_filter_alloc(bp, fltr); + if (rc) + bnxt_del_l2_filter(bp, fltr); + else + fs->location = fltr->base.sw_id; + return rc; +} + +static bool bnxt_verify_ntuple_ip4_flow(struct ethtool_usrip4_spec *ip_spec, + struct ethtool_usrip4_spec *ip_mask) +{ + if (ip_mask->l4_4_bytes || ip_mask->tos || + ip_spec->ip_ver != ETH_RX_NFC_IP4 || + ip_mask->proto != BNXT_IP_PROTO_FULL_MASK || + (ip_spec->proto != IPPROTO_RAW && ip_spec->proto != IPPROTO_ICMP)) + return false; + return true; +} + +#ifdef HAVE_ETHTOOL_IP6_SPEC +static bool bnxt_verify_ntuple_ip6_flow(struct ethtool_usrip6_spec *ip_spec, + struct ethtool_usrip6_spec *ip_mask) +{ + if (ip_mask->l4_4_bytes || ip_mask->tclass || + ip_mask->l4_proto != BNXT_IP_PROTO_FULL_MASK || + (ip_spec->l4_proto != IPPROTO_RAW && + ip_spec->l4_proto != IPPROTO_ICMPV6)) + return false; + return true; +} +#endif + +static int bnxt_add_ntuple_cls_rule(struct bnxt *bp, + struct ethtool_rxnfc *cmd) +{ + struct ethtool_rx_flow_spec *fs = &cmd->fs; + u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie); + struct bnxt_ntuple_filter *new_fltr, *fltr; + u32 flow_type = fs->flow_type & 0xff; + s64 cookie = (s64)fs->ring_cookie; + struct bnxt_l2_filter *l2_fltr; + struct bnxt_flow_masks *fmasks; + struct flow_keys *fkeys; + u32 idx; + int rc; + + if (!bp->vnic_info) + return -EAGAIN; + + if ((fs->flow_type & (FLOW_MAC_EXT | FLOW_EXT)) || (vf && cookie > 0)) + return -EOPNOTSUPP; + + if (flow_type == IP_USER_FLOW) { + if (!bnxt_verify_ntuple_ip4_flow(&fs->h_u.usr_ip4_spec, + &fs->m_u.usr_ip4_spec)) + return -EOPNOTSUPP; + } + +#ifdef HAVE_ETHTOOL_IP6_SPEC + if (flow_type == IPV6_USER_FLOW) { + if (!bnxt_verify_ntuple_ip6_flow(&fs->h_u.usr_ip6_spec, + &fs->m_u.usr_ip6_spec)) + return -EOPNOTSUPP; + } +#endif + + new_fltr = kzalloc(sizeof(*new_fltr), GFP_KERNEL); + if (!new_fltr) + return -ENOMEM; + + l2_fltr = bp->vnic_info[BNXT_VNIC_DEFAULT].l2_filters[0]; + atomic_inc(&l2_fltr->refcnt); + new_fltr->l2_fltr = l2_fltr; + fmasks = &new_fltr->fmasks; + fkeys = &new_fltr->fkeys; + + rc = -EOPNOTSUPP; + switch (flow_type) { + case IP_USER_FLOW: { + struct ethtool_usrip4_spec *ip_spec = &fs->h_u.usr_ip4_spec; + struct ethtool_usrip4_spec *ip_mask = &fs->m_u.usr_ip4_spec; + + fkeys->basic.ip_proto = ip_spec->proto; + fkeys->basic.n_proto = htons(ETH_P_IP); + fkeys->addrs.v4addrs.src = ip_spec->ip4src; + fmasks->addrs.v4addrs.src = ip_mask->ip4src; + fkeys->addrs.v4addrs.dst = ip_spec->ip4dst; + fmasks->addrs.v4addrs.dst = ip_mask->ip4dst; + break; + } + case TCP_V4_FLOW: + case UDP_V4_FLOW: { + struct ethtool_tcpip4_spec *ip_spec = &fs->h_u.tcp_ip4_spec; + struct ethtool_tcpip4_spec *ip_mask = &fs->m_u.tcp_ip4_spec; + + fkeys->basic.ip_proto = IPPROTO_TCP; + if (flow_type == UDP_V4_FLOW) + fkeys->basic.ip_proto = IPPROTO_UDP; + fkeys->basic.n_proto = htons(ETH_P_IP); + fkeys->addrs.v4addrs.src = ip_spec->ip4src; + fmasks->addrs.v4addrs.src = ip_mask->ip4src; + fkeys->addrs.v4addrs.dst = ip_spec->ip4dst; + fmasks->addrs.v4addrs.dst = ip_mask->ip4dst; + fkeys->ports.src = ip_spec->psrc; + fmasks->ports.src = ip_mask->psrc; + fkeys->ports.dst = ip_spec->pdst; + fmasks->ports.dst = ip_mask->pdst; + break; + } +#ifdef HAVE_ETHTOOL_IP6_SPEC + case IPV6_USER_FLOW: { + struct ethtool_usrip6_spec *ip_spec = &fs->h_u.usr_ip6_spec; + struct ethtool_usrip6_spec *ip_mask = &fs->m_u.usr_ip6_spec; + + fkeys->basic.ip_proto = ip_spec->l4_proto; + fkeys->basic.n_proto = htons(ETH_P_IPV6); + fkeys->addrs.v6addrs.src = *(struct in6_addr *)&ip_spec->ip6src; + fmasks->addrs.v6addrs.src = *(struct in6_addr *)&ip_mask->ip6src; + fkeys->addrs.v6addrs.dst = *(struct in6_addr *)&ip_spec->ip6dst; + fmasks->addrs.v6addrs.dst = *(struct in6_addr *)&ip_mask->ip6dst; + break; + } + case TCP_V6_FLOW: + case UDP_V6_FLOW: { + struct ethtool_tcpip6_spec *ip_spec = &fs->h_u.tcp_ip6_spec; + struct ethtool_tcpip6_spec *ip_mask = &fs->m_u.tcp_ip6_spec; + + fkeys->basic.ip_proto = IPPROTO_TCP; + if (flow_type == UDP_V6_FLOW) + fkeys->basic.ip_proto = IPPROTO_UDP; + fkeys->basic.n_proto = htons(ETH_P_IPV6); + + fkeys->addrs.v6addrs.src = *(struct in6_addr *)&ip_spec->ip6src; + fmasks->addrs.v6addrs.src = *(struct in6_addr *)&ip_mask->ip6src; + fkeys->addrs.v6addrs.dst = *(struct in6_addr *)&ip_spec->ip6dst; + fmasks->addrs.v6addrs.dst = *(struct in6_addr *)&ip_mask->ip6dst; + fkeys->ports.src = ip_spec->psrc; + fmasks->ports.src = ip_mask->psrc; + fkeys->ports.dst = ip_spec->pdst; + fmasks->ports.dst = ip_mask->pdst; + break; + } +#endif + default: + goto ntuple_err; + } + if (!memcmp(&BNXT_FLOW_MASK_NONE, fmasks, sizeof(*fmasks))) + goto ntuple_err; + + idx = bnxt_get_ntp_filter_idx(bp, fkeys, NULL); + rcu_read_lock(); + fltr = bnxt_lookup_ntp_filter_from_idx(bp, new_fltr, idx); + if (fltr) { + rcu_read_unlock(); + rc = -EEXIST; + goto ntuple_err; + } + rcu_read_unlock(); + + new_fltr->base.flags = BNXT_ACT_NO_AGING; +#if defined(HAVE_ETH_RXFH_CONTEXT_ALLOC) + if (fs->flow_type & FLOW_RSS) { + struct bnxt_rss_ctx *rss_ctx; + + new_fltr->base.fw_vnic_id = 0; + new_fltr->base.flags |= BNXT_ACT_RSS_CTX; + rss_ctx = bnxt_get_rss_ctx_from_index(bp, cmd->rss_context); + if (rss_ctx) { + new_fltr->base.fw_vnic_id = rss_ctx->index; + } else { + rc = -EINVAL; + goto ntuple_err; + } + } +#endif + if (cookie == RX_CLS_FLOW_DISC) { + new_fltr->base.flags |= BNXT_ACT_DROP; + } else if (cookie <= BNXT_NTUPLE_COOKIE_NUMA_DIRECT) { + new_fltr->base.flags |= BNXT_ACT_NUMA_DIRECT; + new_fltr->base.rxq = BNXT_NTUPLE_COOKIE_NUMA_DIRECT - cookie; + } else { + new_fltr->base.rxq = ethtool_get_flow_spec_ring(fs->ring_cookie); + } + __set_bit(BNXT_FLTR_VALID, &new_fltr->base.state); + rc = bnxt_insert_ntp_filter(bp, new_fltr, idx); + if (!rc) { + rc = bnxt_hwrm_cfa_ntuple_filter_alloc(bp, new_fltr); + if (rc) { + bnxt_del_ntp_filter(bp, new_fltr); + return rc; + } + fs->location = new_fltr->base.sw_id; + return 0; + } + +ntuple_err: + atomic_dec(&l2_fltr->refcnt); + kfree(new_fltr); + return rc; +} + +static int bnxt_srxclsrlins(struct bnxt *bp, struct ethtool_rxnfc *cmd) +{ + struct ethtool_rx_flow_spec *fs = &cmd->fs; + s64 cookie = (s64)fs->ring_cookie; + u32 ring, flow_type; + int rc; + u8 vf; + + if (!netif_running(bp->dev)) + return -EAGAIN; + if (fs->location != RX_CLS_LOC_ANY) + return -EINVAL; + + flow_type = fs->flow_type; +#ifdef HAVE_ETHTOOL_IP6_SPEC + if ((flow_type == IP_USER_FLOW || + flow_type == IPV6_USER_FLOW) && + !(bp->fw_cap & BNXT_FW_CAP_CFA_NTUPLE_RX_EXT_IP_PROTO)) + return -EOPNOTSUPP; +#else + if ((flow_type == IP_USER_FLOW) && + !(bp->fw_cap & BNXT_FW_CAP_CFA_NTUPLE_RX_EXT_IP_PROTO)) + return -EOPNOTSUPP; +#endif + +#if defined(HAVE_ETH_RXFH_CONTEXT_ALLOC) + if (flow_type & FLOW_MAC_EXT) +#else + if (flow_type & (FLOW_MAC_EXT | FLOW_RSS)) +#endif + return -EINVAL; + flow_type &= ~FLOW_EXT; + + if (cookie == RX_CLS_FLOW_DISC && flow_type != ETHER_FLOW) + return bnxt_add_ntuple_cls_rule(bp, cmd); + + if (cookie <= BNXT_NTUPLE_COOKIE_NUMA_DIRECT) { + if (~bp->flags & BNXT_FLAG_NUMA_DIRECT) + return -EPERM; + if (flow_type == ETHER_FLOW) + return -EINVAL; /* no L2 context */ + if (BNXT_NTUPLE_COOKIE_NUMA_DIRECT - cookie > bp->rx_nr_rings) + return -EINVAL; + return bnxt_add_ntuple_cls_rule(bp, cmd); + } + + if (!(bp->flags & BNXT_FLAG_RFS)) + return -EPERM; + + ring = ethtool_get_flow_spec_ring(fs->ring_cookie); + vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie); + if (BNXT_VF(bp) && vf) + return -EINVAL; + if (BNXT_PF(bp) && vf > bp->pf.active_vfs) + return -EINVAL; + if (!vf && ring >= bp->rx_nr_rings) + return -EINVAL; + + if (flow_type == ETHER_FLOW) + rc = bnxt_add_l2_cls_rule(bp, fs); + else + rc = bnxt_add_ntuple_cls_rule(bp, cmd); + return rc; +} + +static int bnxt_srxclsrldel(struct bnxt *bp, struct ethtool_rxnfc *cmd) +{ + struct ethtool_rx_flow_spec *fs = &cmd->fs; + struct bnxt_filter_base *fltr_base; + struct bnxt_ntuple_filter *fltr; + u32 id = fs->location; + + rcu_read_lock(); + fltr_base = bnxt_get_one_fltr_rcu(bp, bp->l2_fltr_hash_tbl, + BNXT_L2_FLTR_HASH_SIZE, id); + if (fltr_base) { + struct bnxt_l2_filter *l2_fltr; + + l2_fltr = container_of(fltr_base, struct bnxt_l2_filter, base); + rcu_read_unlock(); + bnxt_hwrm_l2_filter_free(bp, l2_fltr); + bnxt_del_l2_filter(bp, l2_fltr); + return 0; + } + fltr_base = bnxt_get_one_fltr_rcu(bp, bp->ntp_fltr_hash_tbl, + BNXT_NTP_FLTR_HASH_SIZE, + fs->location); + if (!fltr_base) { + rcu_read_unlock(); + return -ENOENT; + } + + fltr = container_of(fltr_base, struct bnxt_ntuple_filter, base); + if (!(fltr->base.flags & BNXT_ACT_NO_AGING)) { + rcu_read_unlock(); + return -EINVAL; + } + rcu_read_unlock(); + bnxt_hwrm_cfa_ntuple_filter_free(bp, fltr); + bnxt_del_ntp_filter(bp, fltr); + return 0; +} + +static u64 get_ethtool_ipv4_rss(struct bnxt *bp) +{ + if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4) + return RXH_IP_SRC | RXH_IP_DST; + return 0; +} + +static u64 get_ethtool_ipv6_rss(struct bnxt *bp) +{ + if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6) + return RXH_IP_SRC | RXH_IP_DST; + return 0; +} + +static int bnxt_grxfh(struct bnxt *bp, struct ethtool_rxnfc *cmd) +{ + cmd->data = 0; + switch (cmd->flow_type) { + case TCP_V4_FLOW: + if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4) + cmd->data |= RXH_IP_SRC | RXH_IP_DST | + RXH_L4_B_0_1 | RXH_L4_B_2_3; + cmd->data |= get_ethtool_ipv4_rss(bp); + break; + case UDP_V4_FLOW: + if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4) + cmd->data |= RXH_IP_SRC | RXH_IP_DST | + RXH_L4_B_0_1 | RXH_L4_B_2_3; + fallthrough; + case AH_ESP_V4_FLOW: + if (bp->rss_hash_cfg & + (VNIC_RSS_CFG_REQ_HASH_TYPE_AH_SPI_IPV4 | + VNIC_RSS_CFG_REQ_HASH_TYPE_ESP_SPI_IPV4)) + cmd->data |= RXH_IP_SRC | RXH_IP_DST | + RXH_L4_B_0_1 | RXH_L4_B_2_3; + fallthrough; + case SCTP_V4_FLOW: + case AH_V4_FLOW: + case ESP_V4_FLOW: + case IPV4_FLOW: + cmd->data |= get_ethtool_ipv4_rss(bp); + break; + + case TCP_V6_FLOW: + if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6) + cmd->data |= RXH_IP_SRC | RXH_IP_DST | + RXH_L4_B_0_1 | RXH_L4_B_2_3; + cmd->data |= get_ethtool_ipv6_rss(bp); + break; + case UDP_V6_FLOW: + if (bp->rss_hash_cfg & VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6) + cmd->data |= RXH_IP_SRC | RXH_IP_DST | + RXH_L4_B_0_1 | RXH_L4_B_2_3; + fallthrough; + case AH_ESP_V6_FLOW: + if (bp->rss_hash_cfg & + (VNIC_RSS_CFG_REQ_HASH_TYPE_AH_SPI_IPV6 | + VNIC_RSS_CFG_REQ_HASH_TYPE_ESP_SPI_IPV6)) + cmd->data |= RXH_IP_SRC | RXH_IP_DST | + RXH_L4_B_0_1 | RXH_L4_B_2_3; + fallthrough; + case SCTP_V6_FLOW: + case AH_V6_FLOW: + case ESP_V6_FLOW: + case IPV6_FLOW: + cmd->data |= get_ethtool_ipv6_rss(bp); + break; + } + return 0; +} + +#define RXH_4TUPLE (RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3) +#define RXH_2TUPLE (RXH_IP_SRC | RXH_IP_DST) + +static int bnxt_srxfh(struct bnxt *bp, struct ethtool_rxnfc *cmd) +{ + u32 rss_hash_cfg = bp->rss_hash_cfg; + int tuple, rc = 0; + + if (cmd->data == RXH_4TUPLE) + tuple = 4; + else if (cmd->data == RXH_2TUPLE) + tuple = 2; + else if (!cmd->data) + tuple = 0; + else + return -EINVAL; + + if (cmd->flow_type == TCP_V4_FLOW) { + rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4; + if (tuple == 4) + rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4; + } else if (cmd->flow_type == UDP_V4_FLOW) { + if (tuple == 4 && !(bp->rss_cap & BNXT_RSS_CAP_UDP_RSS_CAP)) + return -EINVAL; + rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4; + if (tuple == 4) + rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4; + } else if (cmd->flow_type == TCP_V6_FLOW) { + rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6; + if (tuple == 4) + rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6; + } else if (cmd->flow_type == UDP_V6_FLOW) { + if (tuple == 4 && !(bp->rss_cap & BNXT_RSS_CAP_UDP_RSS_CAP)) + return -EINVAL; + rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6; + if (tuple == 4) + rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6; + } else if (cmd->flow_type == AH_ESP_V4_FLOW) { + if (tuple == 4 && (!(bp->rss_cap & BNXT_RSS_CAP_AH_V4_RSS_CAP) || + !(bp->rss_cap & BNXT_RSS_CAP_ESP_V4_RSS_CAP))) + return -EINVAL; + rss_hash_cfg &= ~(VNIC_RSS_CFG_REQ_HASH_TYPE_AH_SPI_IPV4 | + VNIC_RSS_CFG_REQ_HASH_TYPE_ESP_SPI_IPV4); + if (tuple == 4) + rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_AH_SPI_IPV4 | + VNIC_RSS_CFG_REQ_HASH_TYPE_ESP_SPI_IPV4; + } else if (cmd->flow_type == AH_ESP_V6_FLOW) { + if (tuple == 4 && (!(bp->rss_cap & BNXT_RSS_CAP_AH_V6_RSS_CAP) || + !(bp->rss_cap & BNXT_RSS_CAP_ESP_V6_RSS_CAP))) + return -EINVAL; + rss_hash_cfg &= ~(VNIC_RSS_CFG_REQ_HASH_TYPE_AH_SPI_IPV6 | + VNIC_RSS_CFG_REQ_HASH_TYPE_ESP_SPI_IPV6); + if (tuple == 4) + rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_AH_SPI_IPV6 | + VNIC_RSS_CFG_REQ_HASH_TYPE_ESP_SPI_IPV6; + } else if (tuple == 4) { + return -EINVAL; + } + + switch (cmd->flow_type) { + case TCP_V4_FLOW: + case UDP_V4_FLOW: + case SCTP_V4_FLOW: + case AH_ESP_V4_FLOW: + case AH_V4_FLOW: + case ESP_V4_FLOW: + case IPV4_FLOW: + if (tuple == 2) + rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4; + else if (!tuple) + rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4; + break; + + case TCP_V6_FLOW: + case UDP_V6_FLOW: + case SCTP_V6_FLOW: + case AH_ESP_V6_FLOW: + case AH_V6_FLOW: + case ESP_V6_FLOW: + case IPV6_FLOW: + if (tuple == 2) { + if (bp->ipv6_flow_lbl_rss_en) { + /* Hash type ipv6 and ipv6_flow_label are mutually + * exclusive. HW does not include the flow_label + * in hash calculation for the packets that are + * matching tcp_ipv6 and udp_ipv6 hash types + */ + rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6_FLOW_LABEL; + rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6; + } else { + /* Negate flow label if priv flag not set */ + rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6; + rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6_FLOW_LABEL; + } + } else if (!tuple) { + rss_hash_cfg &= ~VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6; + } + break; + } + + if (bp->rss_hash_cfg == rss_hash_cfg) + return 0; + + if (bp->rss_cap & BNXT_RSS_CAP_RSS_HASH_TYPE_DELTA) + bp->rss_hash_delta = bp->rss_hash_cfg ^ rss_hash_cfg; + bp->rss_hash_cfg = rss_hash_cfg; + if (netif_running(bp->dev)) { + bnxt_close_nic(bp, false, false); + rc = bnxt_open_nic(bp, false, false); + } + return rc; +} + +static int bnxt_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, +#ifdef HAVE_RXNFC_VOID + void *rule_locs) +#else + u32 *rule_locs) +#endif +{ + struct bnxt *bp = netdev_priv(dev); + int rc = 0; + + switch (cmd->cmd) { + case ETHTOOL_GRXRINGS: + cmd->data = bp->rx_nr_rings; + break; + + case ETHTOOL_GRXCLSRLCNT: + cmd->rule_cnt = bp->ntp_fltr_count; + cmd->data = bp->max_fltr | RX_CLS_LOC_SPECIAL; + break; + + case ETHTOOL_GRXCLSRLALL: + rc = bnxt_grxclsrlall(bp, cmd, (u32 *)rule_locs); + break; + + case ETHTOOL_GRXCLSRULE: + rc = bnxt_grxclsrule(bp, cmd); + break; + + case ETHTOOL_GRXFH: + rc = bnxt_grxfh(bp, cmd); + break; + + default: + rc = -EOPNOTSUPP; + break; + } + + return rc; +} + +static int bnxt_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) +{ + struct bnxt *bp = netdev_priv(dev); + int rc; + + switch (cmd->cmd) { + case ETHTOOL_SRXFH: + rc = bnxt_srxfh(bp, cmd); + break; + + case ETHTOOL_SRXCLSRLINS: + rc = bnxt_srxclsrlins(bp, cmd); + break; + + case ETHTOOL_SRXCLSRLDEL: + rc = bnxt_srxclsrldel(bp, cmd); + break; + + default: + rc = -EOPNOTSUPP; + break; + } + return rc; +} + +#endif /* HAVE_RXNFC */ + +u32 bnxt_get_rxfh_indir_size(struct net_device *dev) +{ + struct bnxt *bp = netdev_priv(dev); + + if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) + return bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) * + BNXT_RSS_TABLE_ENTRIES_P5; + return HW_HASH_INDEX_SIZE; +} + +#if defined(HAVE_GET_RXFH_KEY_SIZE) && !defined(GET_ETHTOOL_OP_EXT) +static u32 bnxt_get_rxfh_key_size(struct net_device *dev) +{ + return HW_HASH_KEY_SIZE; +} + +#if defined(HAVE_ETHTOOL_RXFH_PARAM) +static int bnxt_get_rxfh(struct net_device *dev, + struct ethtool_rxfh_param *rxfh) +{ + u32 rss_context = rxfh->rss_context; + struct bnxt_rss_ctx *rss_ctx = NULL; + struct bnxt *bp = netdev_priv(dev); + u16 *indir_tbl = bp->rss_indir_tbl; + struct bnxt_vnic_info *vnic; + u32 i, tbl_size; + + /* WIP: Return HWRM_VNIC_RSS_QCFG response, instead of driver cache */ + rxfh->hfunc = bp->rss_hfunc; + + if (!bp->vnic_info) + return 0; + + vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT]; + if (rxfh->rss_context) { + rss_ctx = bnxt_get_rss_ctx_from_index(bp, rss_context); + if (!rss_ctx) + return -EINVAL; + indir_tbl = rss_ctx->rss_indir_tbl; + vnic = &rss_ctx->vnic; + } + + if (rxfh->indir && indir_tbl) { + tbl_size = bnxt_get_rxfh_indir_size(dev); + for (i = 0; i < tbl_size; i++) + rxfh->indir[i] = indir_tbl[i]; + } + + if (rxfh->key && vnic->rss_hash_key) + memcpy(rxfh->key, vnic->rss_hash_key, HW_HASH_KEY_SIZE); + + return 0; +} +#endif /* HAVE_ETHTOOL_RXFH_PARAM */ +#endif + +#if defined(HAVE_ETHTOOL_RXFH_PARAM) +static void bnxt_modify_rss(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx, + struct ethtool_rxfh_param *rxfh) +{ + if (rxfh->key) { + if (rss_ctx) { + memcpy(rss_ctx->vnic.rss_hash_key, rxfh->key, + HW_HASH_KEY_SIZE); + } else { + memcpy(bp->rss_hash_key, rxfh->key, HW_HASH_KEY_SIZE); + bp->rss_hash_key_updated = true; + } + } + if (rxfh->indir) { + u32 i, pad, tbl_size = bnxt_get_rxfh_indir_size(bp->dev); + u16 *indir_tbl = bp->rss_indir_tbl; + + if (rss_ctx) + indir_tbl = rss_ctx->rss_indir_tbl; + for (i = 0; i < tbl_size; i++) + indir_tbl[i] = rxfh->indir[i]; + pad = bp->rss_indir_tbl_entries - tbl_size; + if (pad) + memset(&bp->rss_indir_tbl[i], 0, pad * sizeof(u16)); + } +} + +static int bnxt_set_rxfh_context(struct bnxt *bp, + struct ethtool_rxfh_param *rxfh, + struct netlink_ext_ack *extack) +{ + u32 *rss_context = &rxfh->rss_context; + struct bnxt_rss_ctx *rss_ctx; + struct bnxt_vnic_info *vnic; + bool modify = false; + int bit_id; + int rc; + + if (!BNXT_SUPPORTS_MULTI_RSS_CTX(bp)) { + NL_SET_ERR_MSG_MOD(extack, "RSS contexts not supported"); + return -EOPNOTSUPP; + } + + if (!netif_running(bp->dev)) { + NL_SET_ERR_MSG_MOD(extack, "Unable to set RSS contexts when interface is down"); + return -EAGAIN; + } + + if (*rss_context != ETH_RXFH_CONTEXT_ALLOC) { + rss_ctx = bnxt_get_rss_ctx_from_index(bp, *rss_context); + if (!rss_ctx) { + NL_SET_ERR_MSG_FMT_MOD(extack, "RSS context %u not found", + *rss_context); + return -EINVAL; + } + if (*rss_context && rxfh->rss_delete) { + bnxt_del_one_rss_ctx(bp, rss_ctx, true); + return 0; + } + modify = true; + vnic = &rss_ctx->vnic; + goto modify_context; + } + + if (rxfh->hfunc && rxfh->hfunc != ETH_RSS_HASH_TOP) + return -EOPNOTSUPP; + + if (bp->num_rss_ctx >= BNXT_MAX_ETH_RSS_CTX) { + NL_SET_ERR_MSG_FMT_MOD(extack, "Out of RSS contexts, maximum %u", + BNXT_MAX_ETH_RSS_CTX); + return -EINVAL; + } + + if (!bnxt_rfs_capable(bp, true)) { + NL_SET_ERR_MSG_MOD(extack, "Out hardware resources"); + return -ENOMEM; + } + + rss_ctx = bnxt_alloc_rss_ctx(bp); + if (!rss_ctx) + return -ENOMEM; + + vnic = &rss_ctx->vnic; + vnic->flags |= BNXT_VNIC_RSSCTX_FLAG; + vnic->vnic_id = BNXT_VNIC_ID_INVALID; + rc = bnxt_alloc_rss_ctx_rss_table(bp, rss_ctx); + if (rc) + goto out; + + rc = bnxt_alloc_rss_indir_tbl(bp, rss_ctx); + if (rc) + goto out; + + bnxt_set_dflt_rss_indir_tbl(bp, rss_ctx); + memcpy(vnic->rss_hash_key, bp->rss_hash_key, HW_HASH_KEY_SIZE); + + rc = bnxt_hwrm_vnic_alloc(bp, vnic, 0, bp->rx_nr_rings); + if (rc) { + NL_SET_ERR_MSG_MOD(extack, "Unable to allocate VNIC"); + goto out; + } + + rc = bnxt_hwrm_vnic_set_tpa(bp, vnic, bp->flags & BNXT_FLAG_TPA); + if (rc) { + NL_SET_ERR_MSG_MOD(extack, "Unable to setup TPA"); + goto out; + } +modify_context: + bnxt_modify_rss(bp, rss_ctx, rxfh); + + if (modify) + return bnxt_hwrm_vnic_rss_cfg_p5(bp, vnic); + + rc = __bnxt_setup_vnic_p5(bp, vnic); + if (rc) { + NL_SET_ERR_MSG_MOD(extack, "Unable to setup TPA"); + goto out; + } + + bit_id = bitmap_find_free_region(bp->rss_ctx_bmap, + BNXT_RSS_CTX_BMAP_LEN, 0); + if (bit_id < 0) { + rc = -ENOMEM; + goto out; + } + rss_ctx->index = (u16)bit_id; + *rss_context = rss_ctx->index; + + return 0; +out: + bnxt_del_one_rss_ctx(bp, rss_ctx, true); + return rc; +} + +static int bnxt_set_rxfh(struct net_device *dev, + struct ethtool_rxfh_param *rxfh, + struct netlink_ext_ack *extack) +{ + struct bnxt *bp = netdev_priv(dev); + bool skip_key = false; + int rc = 0; + + /* Check HW cap and cache hash func details */ + switch (rxfh->hfunc) { + case ETH_RSS_HASH_XOR: + if (!(bp->rss_cap & BNXT_RSS_CAP_XOR_CAP)) + return -EOPNOTSUPP; + /* hkey not needed in XOR mode */ + skip_key = true; + break; + case ETH_RSS_HASH_TOP: + if (!(bp->rss_cap & BNXT_RSS_CAP_TOEPLITZ_CAP)) + return -EOPNOTSUPP; + break; + case ETH_RSS_HASH_CRC32: + /* default keys/indir */ + if (!(bp->rss_cap & BNXT_RSS_CAP_TOEPLITZ_CHKSM_CAP)) + return -EOPNOTSUPP; + skip_key = true; + break; + case ETH_RSS_HASH_NO_CHANGE: + break; + default: + return -EOPNOTSUPP; + } + + if (rxfh->rss_context) + return bnxt_set_rxfh_context(bp, rxfh, extack); + + /* Repeat of same hfunc with no key or weight */ + if (bp->rss_hfunc == rxfh->hfunc && !rxfh->key && !rxfh->indir) + return -EINVAL; + + /* for xor and crc32 block hkey config */ + if (rxfh->key && skip_key) + return -EINVAL; + + bnxt_modify_rss(bp, NULL, rxfh); + + bp->rss_hfunc = rxfh->hfunc; + bnxt_clear_usr_fltrs(bp, false); + if (netif_running(bp->dev)) { + bnxt_close_nic(bp, false, false); + rc = bnxt_open_nic(bp, false, false); + } + return rc; +} +#endif /* HAVE_ETHTOOL_RXFH_PARAM */ + +static void bnxt_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + struct bnxt *bp = netdev_priv(dev); + + strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver)); + strscpy(info->version, DRV_MODULE_VERSION, sizeof(info->version)); + strscpy(info->fw_version, bp->fw_ver_str, sizeof(info->fw_version)); + strscpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info)); + info->n_stats = bnxt_get_num_stats(bp); + info->testinfo_len = bp->num_tests; + /* TODO CHIMP_FW: eeprom dump details */ + info->eedump_len = 0; + /* TODO CHIMP FW: reg dump details */ + info->regdump_len = 0; +} + +static int bnxt_get_regs_len(struct net_device *dev) +{ + struct bnxt *bp = netdev_priv(dev); + int reg_len; + + if (!BNXT_PF(bp)) + return -EOPNOTSUPP; + + reg_len = BNXT_PXP_REG_LEN; + + if (bp->fw_cap & BNXT_FW_CAP_PCIE_STATS_SUPPORTED) + reg_len += sizeof(struct pcie_ctx_hw_stats); + + return reg_len; +} + +static void bnxt_get_regs(struct net_device *dev, struct ethtool_regs *regs, + void *_p) +{ + struct pcie_ctx_hw_stats *hw_pcie_stats; + struct bnxt *bp = netdev_priv(dev); + struct hwrm_pcie_qstats_input *req; + dma_addr_t hw_pcie_stats_addr; + int rc; + + regs->version = 0; + bnxt_dbg_hwrm_rd_reg(bp, 0, BNXT_PXP_REG_LEN / 4, _p); + + if (!(bp->fw_cap & BNXT_FW_CAP_PCIE_STATS_SUPPORTED)) + return; + + if (hwrm_req_init(bp, req, HWRM_PCIE_QSTATS)) + return; + + hw_pcie_stats = hwrm_req_dma_slice(bp, req, sizeof(*hw_pcie_stats), + &hw_pcie_stats_addr); + if (!hw_pcie_stats) { + hwrm_req_drop(bp, req); + return; + } + + regs->version = 1; + hwrm_req_hold(bp, req); /* hold on to slice */ + req->pcie_stat_size = cpu_to_le16(sizeof(*hw_pcie_stats)); + req->pcie_stat_host_addr = cpu_to_le64(hw_pcie_stats_addr); + rc = hwrm_req_send(bp, req); + if (!rc) { + __le64 *src = (__le64 *)hw_pcie_stats; + u64 *dst = (u64 *)(_p + BNXT_PXP_REG_LEN); + int i; + + for (i = 0; i < sizeof(*hw_pcie_stats) / sizeof(__le64); i++) + dst[i] = le64_to_cpu(src[i]); + } + hwrm_req_drop(bp, req); +} + +static void bnxt_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct bnxt *bp = netdev_priv(dev); + + wol->supported = 0; + wol->wolopts = 0; + memset(&wol->sopass, 0, sizeof(wol->sopass)); + if (bp->flags & BNXT_FLAG_WOL_CAP) { + wol->supported = WAKE_MAGIC; + if (bp->wol) + wol->wolopts = WAKE_MAGIC; + } +} + +static int bnxt_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct bnxt *bp = netdev_priv(dev); + + if (wol->wolopts & ~WAKE_MAGIC) + return -EINVAL; + + if (wol->wolopts & WAKE_MAGIC) { + if (!(bp->flags & BNXT_FLAG_WOL_CAP)) + return -EINVAL; + if (!bp->wol) { + if (bnxt_hwrm_alloc_wol_fltr(bp)) + return -EBUSY; + bp->wol = 1; + } + } else { + if (bp->wol) { + if (bnxt_hwrm_free_wol_fltr(bp)) + return -EBUSY; + bp->wol = 0; + } + } + return 0; +} + +#ifdef HAVE_ETHTOOL_KEEE +/* TODO: support 25GB, 40GB, 50GB with different cable type */ +void _bnxt_fw_to_linkmode(unsigned long *mode, u16 fw_speeds) +{ + linkmode_zero(mode); + + if (fw_speeds & BNXT_LINK_SPEED_MSK_100MB) + linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, mode); + if (fw_speeds & BNXT_LINK_SPEED_MSK_1GB) + linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, mode); + if (fw_speeds & BNXT_LINK_SPEED_MSK_2_5GB) + linkmode_set_bit(ETHTOOL_LINK_MODE_2500baseX_Full_BIT, mode); + if (fw_speeds & BNXT_LINK_SPEED_MSK_10GB) + linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT, mode); + if (fw_speeds & BNXT_LINK_SPEED_MSK_40GB) + linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, mode); +} +#endif + +enum bnxt_media_type { + BNXT_MEDIA_UNKNOWN = 0, + BNXT_MEDIA_TP, + BNXT_MEDIA_CR, + BNXT_MEDIA_SR, + BNXT_MEDIA_LR_ER_FR, + BNXT_MEDIA_KR, + BNXT_MEDIA_KX, + BNXT_MEDIA_X, + __BNXT_MEDIA_END, +}; + +static const enum bnxt_media_type bnxt_phy_types[] = { + [PORT_PHY_QCFG_RESP_PHY_TYPE_BASECR] = BNXT_MEDIA_CR, + [PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR4] = BNXT_MEDIA_KR, + [PORT_PHY_QCFG_RESP_PHY_TYPE_BASELR] = BNXT_MEDIA_LR_ER_FR, + [PORT_PHY_QCFG_RESP_PHY_TYPE_BASESR] = BNXT_MEDIA_SR, + [PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR2] = BNXT_MEDIA_KR, + [PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKX] = BNXT_MEDIA_KX, + [PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR] = BNXT_MEDIA_KR, + [PORT_PHY_QCFG_RESP_PHY_TYPE_BASET] = BNXT_MEDIA_TP, + [PORT_PHY_QCFG_RESP_PHY_TYPE_BASETE] = BNXT_MEDIA_TP, + [PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_L] = BNXT_MEDIA_CR, + [PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_S] = BNXT_MEDIA_CR, + [PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_N] = BNXT_MEDIA_CR, + [PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASESR] = BNXT_MEDIA_SR, + [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASECR4] = BNXT_MEDIA_CR, + [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR4] = BNXT_MEDIA_SR, + [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASELR4] = BNXT_MEDIA_LR_ER_FR, + [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASEER4] = BNXT_MEDIA_LR_ER_FR, + [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR10] = BNXT_MEDIA_SR, + [PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASECR4] = BNXT_MEDIA_CR, + [PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASESR4] = BNXT_MEDIA_SR, + [PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASELR4] = BNXT_MEDIA_LR_ER_FR, + [PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASEER4] = BNXT_MEDIA_LR_ER_FR, + [PORT_PHY_QCFG_RESP_PHY_TYPE_40G_ACTIVE_CABLE] = BNXT_MEDIA_SR, + [PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASET] = BNXT_MEDIA_TP, + [PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASESX] = BNXT_MEDIA_X, + [PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASECX] = BNXT_MEDIA_X, + [PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASECR4] = BNXT_MEDIA_CR, + [PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASESR4] = BNXT_MEDIA_SR, + [PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASELR4] = BNXT_MEDIA_LR_ER_FR, + [PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASEER4] = BNXT_MEDIA_LR_ER_FR, + [PORT_PHY_QCFG_RESP_PHY_TYPE_50G_BASECR] = BNXT_MEDIA_CR, + [PORT_PHY_QCFG_RESP_PHY_TYPE_50G_BASESR] = BNXT_MEDIA_SR, + [PORT_PHY_QCFG_RESP_PHY_TYPE_50G_BASELR] = BNXT_MEDIA_LR_ER_FR, + [PORT_PHY_QCFG_RESP_PHY_TYPE_50G_BASEER] = BNXT_MEDIA_LR_ER_FR, + [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASECR2] = BNXT_MEDIA_CR, + [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR2] = BNXT_MEDIA_SR, + [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASELR2] = BNXT_MEDIA_LR_ER_FR, + [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASEER2] = BNXT_MEDIA_LR_ER_FR, + [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASECR] = BNXT_MEDIA_CR, + [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR] = BNXT_MEDIA_SR, + [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASELR] = BNXT_MEDIA_LR_ER_FR, + [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASEER] = BNXT_MEDIA_LR_ER_FR, + [PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASECR2] = BNXT_MEDIA_CR, + [PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASESR2] = BNXT_MEDIA_SR, + [PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASELR2] = BNXT_MEDIA_LR_ER_FR, + [PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASEER2] = BNXT_MEDIA_LR_ER_FR, + [PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASECR8] = BNXT_MEDIA_CR, + [PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASESR8] = BNXT_MEDIA_SR, + [PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASELR8] = BNXT_MEDIA_LR_ER_FR, + [PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASEER8] = BNXT_MEDIA_LR_ER_FR, + [PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASECR4] = BNXT_MEDIA_CR, + [PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASESR4] = BNXT_MEDIA_SR, + [PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASELR4] = BNXT_MEDIA_LR_ER_FR, + [PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASEER4] = BNXT_MEDIA_LR_ER_FR, +}; + +static enum bnxt_media_type +bnxt_get_media(struct bnxt_link_info *link_info) +{ + switch (link_info->media_type) { + case PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP: + return BNXT_MEDIA_TP; + case PORT_PHY_QCFG_RESP_MEDIA_TYPE_DAC: + return BNXT_MEDIA_CR; + default: + if (link_info->phy_type < ARRAY_SIZE(bnxt_phy_types)) + return bnxt_phy_types[link_info->phy_type]; + return BNXT_MEDIA_UNKNOWN; + } +} + +enum bnxt_link_speed_indices { + BNXT_LINK_SPEED_UNKNOWN = 0, + BNXT_LINK_SPEED_100MB_IDX, + BNXT_LINK_SPEED_1GB_IDX, + BNXT_LINK_SPEED_10GB_IDX, + BNXT_LINK_SPEED_25GB_IDX, + BNXT_LINK_SPEED_40GB_IDX, + BNXT_LINK_SPEED_50GB_IDX, + BNXT_LINK_SPEED_100GB_IDX, + BNXT_LINK_SPEED_200GB_IDX, + BNXT_LINK_SPEED_400GB_IDX, + __BNXT_LINK_SPEED_END +}; + +static enum bnxt_link_speed_indices bnxt_fw_speed_idx(u16 speed) +{ + switch (speed) { + case BNXT_LINK_SPEED_100MB: return BNXT_LINK_SPEED_100MB_IDX; + case BNXT_LINK_SPEED_1GB: return BNXT_LINK_SPEED_1GB_IDX; + case BNXT_LINK_SPEED_10GB: return BNXT_LINK_SPEED_10GB_IDX; + case BNXT_LINK_SPEED_25GB: return BNXT_LINK_SPEED_25GB_IDX; + case BNXT_LINK_SPEED_40GB: return BNXT_LINK_SPEED_40GB_IDX; + case BNXT_LINK_SPEED_50GB: + case BNXT_LINK_SPEED_50GB_PAM4: + return BNXT_LINK_SPEED_50GB_IDX; + case BNXT_LINK_SPEED_100GB: + case BNXT_LINK_SPEED_100GB_PAM4: + case BNXT_LINK_SPEED_100GB_PAM4_112: + return BNXT_LINK_SPEED_100GB_IDX; + case BNXT_LINK_SPEED_200GB: + case BNXT_LINK_SPEED_200GB_PAM4: + case BNXT_LINK_SPEED_200GB_PAM4_112: + return BNXT_LINK_SPEED_200GB_IDX; + case BNXT_LINK_SPEED_400GB: + case BNXT_LINK_SPEED_400GB_PAM4: + case BNXT_LINK_SPEED_400GB_PAM4_112: + return BNXT_LINK_SPEED_400GB_IDX; + default: return BNXT_LINK_SPEED_UNKNOWN; + } +} + +static const enum ethtool_link_mode_bit_indices +bnxt_link_modes[__BNXT_LINK_SPEED_END][BNXT_SIG_MODE_MAX][__BNXT_MEDIA_END] = { + [BNXT_LINK_SPEED_100MB_IDX] = { + { + [BNXT_MEDIA_TP] = ETHTOOL_LINK_MODE_100baseT_Full_BIT, + }, + }, + [BNXT_LINK_SPEED_1GB_IDX] = { + { + [BNXT_MEDIA_TP] = ETHTOOL_LINK_MODE_1000baseT_Full_BIT, + /* historically baseT, but DAC is more correctly baseX */ + [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_1000baseX_Full_BIT, + [BNXT_MEDIA_KX] = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, + [BNXT_MEDIA_X] = ETHTOOL_LINK_MODE_1000baseX_Full_BIT, + [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, + }, + }, + [BNXT_LINK_SPEED_10GB_IDX] = { + { + [BNXT_MEDIA_TP] = ETHTOOL_LINK_MODE_10000baseT_Full_BIT, + [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_10000baseCR_Full_BIT, + [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_10000baseSR_Full_BIT, + [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_10000baseLR_Full_BIT, + [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, + [BNXT_MEDIA_KX] = ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT, + }, + }, + [BNXT_LINK_SPEED_25GB_IDX] = { + { + [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, + [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, + [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, + }, + }, + [BNXT_LINK_SPEED_40GB_IDX] = { + { + [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, + [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT, + [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT, + [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, + }, + }, + [BNXT_LINK_SPEED_50GB_IDX] = { + [BNXT_SIG_MODE_NRZ] = { + [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT, + [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT, + [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, + }, + [BNXT_SIG_MODE_PAM4] = { + [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_50000baseCR_Full_BIT, + [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_50000baseKR_Full_BIT, + [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_50000baseSR_Full_BIT, + [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT, + }, + }, + [BNXT_LINK_SPEED_100GB_IDX] = { + [BNXT_SIG_MODE_NRZ] = { + [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, + [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, + [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT, + [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT, + }, + [BNXT_SIG_MODE_PAM4] = { + [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT, + [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT, + [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT, + [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_100000baseLR2_ER2_FR2_Full_BIT, + }, + [BNXT_SIG_MODE_PAM4_112] = { + [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_100000baseCR_Full_BIT, + [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_100000baseSR_Full_BIT, + [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_100000baseKR_Full_BIT, + [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_100000baseLR_ER_FR_Full_BIT, + }, + }, + [BNXT_LINK_SPEED_200GB_IDX] = { + [BNXT_SIG_MODE_PAM4] = { + [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT, + [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT, + [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT, + [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT, + }, + [BNXT_SIG_MODE_PAM4_112] = { + [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_200000baseCR2_Full_BIT, + [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_200000baseKR2_Full_BIT, + [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_200000baseSR2_Full_BIT, + [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_200000baseLR2_ER2_FR2_Full_BIT, + }, + }, + [BNXT_LINK_SPEED_400GB_IDX] = { + [BNXT_SIG_MODE_PAM4] = { + [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_400000baseCR8_Full_BIT, + [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_400000baseKR8_Full_BIT, + [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_400000baseSR8_Full_BIT, + [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_400000baseLR8_ER8_FR8_Full_BIT, + }, + [BNXT_SIG_MODE_PAM4_112] = { + [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_400000baseCR4_Full_BIT, + [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_400000baseKR4_Full_BIT, + [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_400000baseSR4_Full_BIT, + [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_400000baseLR4_ER4_FR4_Full_BIT, + }, + }, +}; + +#define BNXT_LINK_MODE_UNKNOWN -1 +static enum ethtool_link_mode_bit_indices +bnxt_get_link_mode(struct bnxt_link_info *link_info) +{ + enum ethtool_link_mode_bit_indices link_mode; + enum bnxt_link_speed_indices speed; + enum bnxt_media_type media; + u8 sig_mode; + + if (link_info->phy_link_status != BNXT_LINK_LINK) + return BNXT_LINK_MODE_UNKNOWN; + + media = bnxt_get_media(link_info); + if (BNXT_AUTO_MODE(link_info->auto_mode)) { + speed = bnxt_fw_speed_idx(link_info->link_speed); + sig_mode = link_info->active_fec_sig_mode & + PORT_PHY_QCFG_RESP_SIGNAL_MODE_MASK; + } else { + speed = bnxt_fw_speed_idx(link_info->req_link_speed); + sig_mode = link_info->req_signal_mode; + } + if (sig_mode >= BNXT_SIG_MODE_MAX) + return BNXT_LINK_MODE_UNKNOWN; + + /* Note ETHTOOL_LINK_MODE_10baseT_Half_BIT == 0 is a legal Linux + * link mode, but since no such devices exist, the zeroes in the + * map can be conveniently used to represent unknown link modes. + */ + link_mode = bnxt_link_modes[speed][sig_mode][media]; + if (!link_mode) + return BNXT_LINK_MODE_UNKNOWN; + + switch (link_mode) { + case ETHTOOL_LINK_MODE_100baseT_Full_BIT: + if (~link_info->duplex & BNXT_LINK_DUPLEX_FULL) + link_mode = ETHTOOL_LINK_MODE_100baseT_Half_BIT; + break; + case ETHTOOL_LINK_MODE_1000baseT_Full_BIT: + if (~link_info->duplex & BNXT_LINK_DUPLEX_FULL) + link_mode = ETHTOOL_LINK_MODE_1000baseT_Half_BIT; + break; + default: + break; + } + + return link_mode; +} + +static void bnxt_get_ethtool_modes(struct bnxt_link_info *link_info, + struct ethtool_link_ksettings *lk_ksettings) +{ + struct bnxt *bp = container_of(link_info, struct bnxt, link_info); + + if (!(bp->phy_flags & BNXT_PHY_FL_NO_PAUSE)) { + linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, + lk_ksettings->link_modes.supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, + lk_ksettings->link_modes.supported); + } + + if (link_info->support_auto_speeds || link_info->support_auto_speeds2 || + link_info->support_pam4_auto_speeds) + linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, + lk_ksettings->link_modes.supported); + + if (~link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) + return; + + if (link_info->auto_pause_setting & BNXT_LINK_PAUSE_RX) + linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, + lk_ksettings->link_modes.advertising); + if (hweight8(link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) == 1) + linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, + lk_ksettings->link_modes.advertising); + if (link_info->lp_pause & BNXT_LINK_PAUSE_RX) + linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, + lk_ksettings->link_modes.lp_advertising); + if (hweight8(link_info->lp_pause & BNXT_LINK_PAUSE_BOTH) == 1) + linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, + lk_ksettings->link_modes.lp_advertising); +} + +static const u16 bnxt_nrz_speed_masks[] = { + [BNXT_LINK_SPEED_100MB_IDX] = BNXT_LINK_SPEED_MSK_100MB, + [BNXT_LINK_SPEED_1GB_IDX] = BNXT_LINK_SPEED_MSK_1GB, + [BNXT_LINK_SPEED_10GB_IDX] = BNXT_LINK_SPEED_MSK_10GB, + [BNXT_LINK_SPEED_25GB_IDX] = BNXT_LINK_SPEED_MSK_25GB, + [BNXT_LINK_SPEED_40GB_IDX] = BNXT_LINK_SPEED_MSK_40GB, + [BNXT_LINK_SPEED_50GB_IDX] = BNXT_LINK_SPEED_MSK_50GB, + [BNXT_LINK_SPEED_100GB_IDX] = BNXT_LINK_SPEED_MSK_100GB, + [__BNXT_LINK_SPEED_END - 1] = 0 /* make any legal speed a valid index */ +}; + +static const u16 bnxt_pam4_speed_masks[] = { + [BNXT_LINK_SPEED_50GB_IDX] = BNXT_LINK_PAM4_SPEED_MSK_50GB, + [BNXT_LINK_SPEED_100GB_IDX] = BNXT_LINK_PAM4_SPEED_MSK_100GB, + [BNXT_LINK_SPEED_200GB_IDX] = BNXT_LINK_PAM4_SPEED_MSK_200GB, + [__BNXT_LINK_SPEED_END - 1] = 0 /* make any legal speed a valid index */ +}; + +static const u16 bnxt_nrz_speeds2_masks[] = { + [BNXT_LINK_SPEED_1GB_IDX] = BNXT_LINK_SPEEDS2_MSK_1GB, + [BNXT_LINK_SPEED_10GB_IDX] = BNXT_LINK_SPEEDS2_MSK_10GB, + [BNXT_LINK_SPEED_25GB_IDX] = BNXT_LINK_SPEEDS2_MSK_25GB, + [BNXT_LINK_SPEED_40GB_IDX] = BNXT_LINK_SPEEDS2_MSK_40GB, + [BNXT_LINK_SPEED_50GB_IDX] = BNXT_LINK_SPEEDS2_MSK_50GB, + [BNXT_LINK_SPEED_100GB_IDX] = BNXT_LINK_SPEEDS2_MSK_100GB, + [__BNXT_LINK_SPEED_END - 1] = 0 /* make any legal speed a valid index */ +}; + +static const u16 bnxt_pam4_speeds2_masks[] = { + [BNXT_LINK_SPEED_50GB_IDX] = BNXT_LINK_SPEEDS2_MSK_50GB_PAM4, + [BNXT_LINK_SPEED_100GB_IDX] = BNXT_LINK_SPEEDS2_MSK_100GB_PAM4, + [BNXT_LINK_SPEED_200GB_IDX] = BNXT_LINK_SPEEDS2_MSK_200GB_PAM4, + [BNXT_LINK_SPEED_400GB_IDX] = BNXT_LINK_SPEEDS2_MSK_400GB_PAM4, +}; + +static const u16 bnxt_pam4_112_speeds2_masks[] = { + [BNXT_LINK_SPEED_100GB_IDX] = BNXT_LINK_SPEEDS2_MSK_100GB_PAM4_112, + [BNXT_LINK_SPEED_200GB_IDX] = BNXT_LINK_SPEEDS2_MSK_200GB_PAM4_112, + [BNXT_LINK_SPEED_400GB_IDX] = BNXT_LINK_SPEEDS2_MSK_400GB_PAM4_112, +}; + +static enum bnxt_link_speed_indices +bnxt_encoding_speed_idx(u8 sig_mode, u16 phy_flags, u16 speed_msk) +{ + const u16 *speeds; + int idx, len; + + switch (sig_mode) { + case BNXT_SIG_MODE_NRZ: + if (phy_flags & BNXT_PHY_FL_SPEEDS2) { + speeds = bnxt_nrz_speeds2_masks; + len = ARRAY_SIZE(bnxt_nrz_speeds2_masks); + } else { + speeds = bnxt_nrz_speed_masks; + len = ARRAY_SIZE(bnxt_nrz_speed_masks); + } + break; + case BNXT_SIG_MODE_PAM4: + if (phy_flags & BNXT_PHY_FL_SPEEDS2) { + speeds = bnxt_pam4_speeds2_masks; + len = ARRAY_SIZE(bnxt_pam4_speeds2_masks); + } else { + speeds = bnxt_pam4_speed_masks; + len = ARRAY_SIZE(bnxt_pam4_speed_masks); + } + break; + case BNXT_SIG_MODE_PAM4_112: + speeds = bnxt_pam4_112_speeds2_masks; + len = ARRAY_SIZE(bnxt_pam4_112_speeds2_masks); + break; + default: + return BNXT_LINK_SPEED_UNKNOWN; + } + + for (idx = 0; idx < len; idx++) { + if (speeds[idx] == speed_msk) + return idx; + } + + return BNXT_LINK_SPEED_UNKNOWN; +} + +#define BNXT_FW_SPEED_MSK_BITS 16 +static void +__bnxt_get_ethtool_speeds(unsigned long fw_mask, enum bnxt_media_type media, + u8 sig_mode, u16 phy_flags, unsigned long *et_mask) +{ + enum ethtool_link_mode_bit_indices link_mode; + enum bnxt_link_speed_indices speed; + u8 bit; + + for_each_set_bit(bit, &fw_mask, BNXT_FW_SPEED_MSK_BITS) { + speed = bnxt_encoding_speed_idx(sig_mode, phy_flags, 1 << bit); + if (!speed) + continue; + + link_mode = bnxt_link_modes[speed][sig_mode][media]; + if (!link_mode) + continue; + + linkmode_set_bit(link_mode, et_mask); + } +} + +static void +bnxt_get_ethtool_speeds(unsigned long fw_mask, enum bnxt_media_type media, + u8 sig_mode, u16 phy_flags, unsigned long *et_mask) +{ + if (media) { + __bnxt_get_ethtool_speeds(fw_mask, media, sig_mode, phy_flags, + et_mask); + return; + } + + /* list speeds for all media if unknown */ + for (media = 1; media < __BNXT_MEDIA_END; media++) + __bnxt_get_ethtool_speeds(fw_mask, media, sig_mode, phy_flags, + et_mask); +} + +static void +bnxt_get_all_ethtool_support_speeds(struct bnxt_link_info *link_info, + enum bnxt_media_type media, + struct ethtool_link_ksettings *lk_ksettings) +{ + struct bnxt *bp = container_of(link_info, struct bnxt, link_info); + u16 sp_nrz, sp_pam4, sp_pam4_112 = 0; + u16 phy_flags = bp->phy_flags; + + if (phy_flags & BNXT_PHY_FL_SPEEDS2) { + sp_nrz = link_info->support_speeds2; + sp_pam4 = link_info->support_speeds2; + sp_pam4_112 = link_info->support_speeds2; + } else { + sp_nrz = link_info->support_speeds; + sp_pam4 = link_info->support_pam4_speeds; + } + bnxt_get_ethtool_speeds(sp_nrz, media, BNXT_SIG_MODE_NRZ, phy_flags, + lk_ksettings->link_modes.supported); + bnxt_get_ethtool_speeds(sp_pam4, media, BNXT_SIG_MODE_PAM4, phy_flags, + lk_ksettings->link_modes.supported); + bnxt_get_ethtool_speeds(sp_pam4_112, media, BNXT_SIG_MODE_PAM4_112, + phy_flags, lk_ksettings->link_modes.supported); +} + +static void +bnxt_get_all_ethtool_adv_speeds(struct bnxt_link_info *link_info, + enum bnxt_media_type media, + struct ethtool_link_ksettings *lk_ksettings) +{ + struct bnxt *bp = container_of(link_info, struct bnxt, link_info); + u16 sp_nrz, sp_pam4, sp_pam4_112 = 0; + u16 phy_flags = bp->phy_flags; + + sp_nrz = link_info->advertising; + if (phy_flags & BNXT_PHY_FL_SPEEDS2) { + sp_pam4 = link_info->advertising; + sp_pam4_112 = link_info->advertising; + } else { + sp_pam4 = link_info->advertising_pam4; + } + bnxt_get_ethtool_speeds(sp_nrz, media, BNXT_SIG_MODE_NRZ, phy_flags, + lk_ksettings->link_modes.advertising); + bnxt_get_ethtool_speeds(sp_pam4, media, BNXT_SIG_MODE_PAM4, phy_flags, + lk_ksettings->link_modes.advertising); + bnxt_get_ethtool_speeds(sp_pam4_112, media, BNXT_SIG_MODE_PAM4_112, + phy_flags, lk_ksettings->link_modes.advertising); +} + +static void +bnxt_get_all_ethtool_lp_speeds(struct bnxt_link_info *link_info, + enum bnxt_media_type media, + struct ethtool_link_ksettings *lk_ksettings) +{ + struct bnxt *bp = container_of(link_info, struct bnxt, link_info); + u16 phy_flags = bp->phy_flags; + + bnxt_get_ethtool_speeds(link_info->lp_auto_link_speeds, media, + BNXT_SIG_MODE_NRZ, phy_flags, + lk_ksettings->link_modes.lp_advertising); + bnxt_get_ethtool_speeds(link_info->lp_auto_pam4_link_speeds, media, + BNXT_SIG_MODE_PAM4, phy_flags, + lk_ksettings->link_modes.lp_advertising); +} + +static void bnxt_update_speed(u32 *delta, bool installed_media, u16 *speeds, + u16 speed_msk, const unsigned long *et_mask, + enum ethtool_link_mode_bit_indices mode) +{ + bool mode_desired = linkmode_test_bit(mode, et_mask); + + if (!mode) + return; + + /* enabled speeds for installed media should override */ + if (installed_media && mode_desired) { + *speeds |= speed_msk; + *delta |= speed_msk; + return; + } + + /* many to one mapping, only allow one change per fw_speed bit */ + if (!(*delta & speed_msk) && (mode_desired == !(*speeds & speed_msk))) { + *speeds ^= speed_msk; + *delta |= speed_msk; + } +} + +static void bnxt_set_ethtool_speeds(struct bnxt_link_info *link_info, + const unsigned long *et_mask) +{ + struct bnxt *bp = container_of(link_info, struct bnxt, link_info); + u16 const *sp_msks, *sp_pam4_msks, *sp_pam4_112_msks; + enum bnxt_media_type media = bnxt_get_media(link_info); + u16 *adv, *adv_pam4, *adv_pam4_112 = NULL; + u32 delta_pam4_112 = 0; + u32 delta_pam4 = 0; + u32 delta_nrz = 0; + int i, m; + + adv = &link_info->advertising; + if (bp->phy_flags & BNXT_PHY_FL_SPEEDS2) { + adv_pam4 = &link_info->advertising; + adv_pam4_112 = &link_info->advertising; + sp_msks = bnxt_nrz_speeds2_masks; + sp_pam4_msks = bnxt_pam4_speeds2_masks; + sp_pam4_112_msks = bnxt_pam4_112_speeds2_masks; + } else { + adv_pam4 = &link_info->advertising_pam4; + sp_msks = bnxt_nrz_speed_masks; + sp_pam4_msks = bnxt_pam4_speed_masks; + } + for (i = 1; i < __BNXT_LINK_SPEED_END; i++) { + /* accept any legal media from user */ + for (m = 1; m < __BNXT_MEDIA_END; m++) { + bnxt_update_speed(&delta_nrz, m == media, + adv, sp_msks[i], et_mask, + bnxt_link_modes[i][BNXT_SIG_MODE_NRZ][m]); + bnxt_update_speed(&delta_pam4, m == media, + adv_pam4, sp_pam4_msks[i], et_mask, + bnxt_link_modes[i][BNXT_SIG_MODE_PAM4][m]); + if (!adv_pam4_112) + continue; + + bnxt_update_speed(&delta_pam4_112, m == media, + adv_pam4_112, sp_pam4_112_msks[i], et_mask, + bnxt_link_modes[i][BNXT_SIG_MODE_PAM4_112][m]); + } + } +} + +static void bnxt_fw_to_ethtool_advertised_fec(struct bnxt_link_info *link_info, + struct ethtool_link_ksettings *lk_ksettings) +{ + u16 fec_cfg = link_info->fec_cfg; + + if ((fec_cfg & BNXT_FEC_NONE) || !(fec_cfg & BNXT_FEC_AUTONEG)) { + linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, + lk_ksettings->link_modes.advertising); + return; + } + if (fec_cfg & BNXT_FEC_ENC_BASE_R) + linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, + lk_ksettings->link_modes.advertising); + if (fec_cfg & BNXT_FEC_ENC_RS) + linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, + lk_ksettings->link_modes.advertising); + if (fec_cfg & BNXT_FEC_ENC_LLRS) + linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_LLRS_BIT, + lk_ksettings->link_modes.advertising); +} + +static void bnxt_fw_to_ethtool_support_fec(struct bnxt_link_info *link_info, + struct ethtool_link_ksettings *lk_ksettings) +{ + u16 fec_cfg = link_info->fec_cfg; + + if (fec_cfg & BNXT_FEC_NONE) { + linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, + lk_ksettings->link_modes.supported); + return; + } + if (fec_cfg & BNXT_FEC_ENC_BASE_R_CAP) + linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, + lk_ksettings->link_modes.supported); + if (fec_cfg & BNXT_FEC_ENC_RS_CAP) + linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, + lk_ksettings->link_modes.supported); + if (fec_cfg & BNXT_FEC_ENC_LLRS_CAP) + linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_LLRS_BIT, + lk_ksettings->link_modes.supported); +} + +u32 bnxt_fw_to_ethtool_speed(u16 fw_link_speed) +{ + switch (fw_link_speed) { + case BNXT_LINK_SPEED_100MB: + return SPEED_100; + case BNXT_LINK_SPEED_1GB: + return SPEED_1000; + case BNXT_LINK_SPEED_2_5GB: + return SPEED_2500; + case BNXT_LINK_SPEED_10GB: + return SPEED_10000; + case BNXT_LINK_SPEED_20GB: + return SPEED_20000; + case BNXT_LINK_SPEED_25GB: + return SPEED_25000; + case BNXT_LINK_SPEED_40GB: + return SPEED_40000; + case BNXT_LINK_SPEED_50GB: + case BNXT_LINK_SPEED_50GB_PAM4: + return SPEED_50000; + case BNXT_LINK_SPEED_100GB: + case BNXT_LINK_SPEED_100GB_PAM4: + case BNXT_LINK_SPEED_100GB_PAM4_112: + return SPEED_100000; + case BNXT_LINK_SPEED_200GB: + case BNXT_LINK_SPEED_200GB_PAM4: + case BNXT_LINK_SPEED_200GB_PAM4_112: + return SPEED_200000; + case BNXT_LINK_SPEED_400GB: + case BNXT_LINK_SPEED_400GB_PAM4: + case BNXT_LINK_SPEED_400GB_PAM4_112: + return SPEED_400000; + default: + return SPEED_UNKNOWN; + } +} + +static void bnxt_get_default_speeds(struct ethtool_link_ksettings *lk_ksettings, + struct bnxt_link_info *link_info) +{ + struct ethtool_link_settings *base = &lk_ksettings->base; + + if (link_info->link_state == BNXT_LINK_STATE_UP) { + base->speed = bnxt_fw_to_ethtool_speed(link_info->link_speed); + base->duplex = DUPLEX_HALF; + if (link_info->duplex & BNXT_LINK_DUPLEX_FULL) + base->duplex = DUPLEX_FULL; +#ifdef HAVE_ETHTOOL_A_LINKMODES_LANES + lk_ksettings->lanes = link_info->active_lanes; +#endif + } else if (!link_info->autoneg) { + base->speed = bnxt_fw_to_ethtool_speed(link_info->req_link_speed); + base->duplex = DUPLEX_HALF; + if (link_info->req_duplex == BNXT_LINK_DUPLEX_FULL) + base->duplex = DUPLEX_FULL; + } +} + +static int bnxt_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *lk_ksettings) +{ + struct ethtool_link_settings *base = &lk_ksettings->base; + enum ethtool_link_mode_bit_indices link_mode; + struct bnxt *bp = netdev_priv(dev); + struct bnxt_link_info *link_info; + enum bnxt_media_type media; + + ethtool_link_ksettings_zero_link_mode(lk_ksettings, lp_advertising); + ethtool_link_ksettings_zero_link_mode(lk_ksettings, advertising); + ethtool_link_ksettings_zero_link_mode(lk_ksettings, supported); + base->duplex = DUPLEX_UNKNOWN; + base->speed = SPEED_UNKNOWN; + link_info = &bp->link_info; + + mutex_lock(&bp->link_lock); + bnxt_get_ethtool_modes(link_info, lk_ksettings); + media = bnxt_get_media(link_info); + bnxt_get_all_ethtool_support_speeds(link_info, media, lk_ksettings); + bnxt_fw_to_ethtool_support_fec(link_info, lk_ksettings); + link_mode = bnxt_get_link_mode(link_info); + if (link_mode != BNXT_LINK_MODE_UNKNOWN) + ethtool_params_from_link_mode(lk_ksettings, link_mode); + else + bnxt_get_default_speeds(lk_ksettings, link_info); + + if (link_info->autoneg) { + bnxt_fw_to_ethtool_advertised_fec(link_info, lk_ksettings); + linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, + lk_ksettings->link_modes.advertising); + base->autoneg = AUTONEG_ENABLE; + bnxt_get_all_ethtool_adv_speeds(link_info, media, lk_ksettings); + if (link_info->phy_link_status == BNXT_LINK_LINK) + bnxt_get_all_ethtool_lp_speeds(link_info, media, + lk_ksettings); + } else { + base->autoneg = AUTONEG_DISABLE; + } + + base->port = PORT_NONE; + if (link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP) { + base->port = PORT_TP; + linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, + lk_ksettings->link_modes.supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, + lk_ksettings->link_modes.advertising); + } else { + linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, + lk_ksettings->link_modes.supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, + lk_ksettings->link_modes.advertising); + + if (link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_DAC) + base->port = PORT_DA; + else + base->port = PORT_FIBRE; + } + base->phy_address = link_info->phy_addr; + mutex_unlock(&bp->link_lock); + + return 0; +} + +static int +bnxt_force_link_speed(struct net_device *dev, u32 ethtool_speed, u32 lanes) +{ + struct bnxt *bp = netdev_priv(dev); + struct bnxt_link_info *link_info = &bp->link_info; + u16 support_pam4_spds = link_info->support_pam4_speeds; + u16 support_spds2 = link_info->support_speeds2; + u16 support_spds = link_info->support_speeds; + u8 sig_mode = BNXT_SIG_MODE_NRZ; + u32 lanes_needed = 1; + u16 fw_speed = 0; + + switch (ethtool_speed) { + case SPEED_100: + if (support_spds & BNXT_LINK_SPEED_MSK_100MB) + fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100MB; + break; + case SPEED_1000: + if ((support_spds & BNXT_LINK_SPEED_MSK_1GB) || + (support_spds2 & BNXT_LINK_SPEEDS2_MSK_1GB)) + fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_1GB; + break; + case SPEED_2500: + if (support_spds & BNXT_LINK_SPEED_MSK_2_5GB) + fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_2_5GB; + break; + case SPEED_10000: + if ((support_spds & BNXT_LINK_SPEED_MSK_10GB) || + (support_spds2 & BNXT_LINK_SPEEDS2_MSK_10GB)) + fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10GB; + break; + case SPEED_20000: + if (support_spds & BNXT_LINK_SPEED_MSK_20GB) { + fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_20GB; + lanes_needed = 2; + } + break; + case SPEED_25000: + if ((support_spds & BNXT_LINK_SPEED_MSK_25GB) || + (support_spds2 & BNXT_LINK_SPEEDS2_MSK_25GB)) + fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_25GB; + break; + case SPEED_40000: + if ((support_spds & BNXT_LINK_SPEED_MSK_40GB) || + (support_spds2 & BNXT_LINK_SPEEDS2_MSK_40GB)) { + fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB; + lanes_needed = 4; + } + break; + case SPEED_50000: + if (((support_spds & BNXT_LINK_SPEED_MSK_50GB) || + (support_spds2 & BNXT_LINK_SPEEDS2_MSK_50GB)) && + lanes != 1) { + fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB; + lanes_needed = 2; + } else if (support_pam4_spds & BNXT_LINK_PAM4_SPEED_MSK_50GB) { + fw_speed = PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_50GB; + sig_mode = BNXT_SIG_MODE_PAM4; + } else if (support_spds2 & BNXT_LINK_SPEEDS2_MSK_50GB_PAM4) { + fw_speed = BNXT_LINK_SPEED_50GB_PAM4; + sig_mode = BNXT_SIG_MODE_PAM4; + } + break; + case SPEED_100000: + if (((support_spds & BNXT_LINK_SPEED_MSK_100GB) || + (support_spds2 & BNXT_LINK_SPEEDS2_MSK_100GB)) && + lanes != 2 && lanes != 1) { + fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100GB; + lanes_needed = 4; + } else if (support_pam4_spds & BNXT_LINK_PAM4_SPEED_MSK_100GB) { + fw_speed = PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_100GB; + sig_mode = BNXT_SIG_MODE_PAM4; + lanes_needed = 2; + } else if ((support_spds2 & BNXT_LINK_SPEEDS2_MSK_100GB_PAM4) && + lanes != 1) { + fw_speed = BNXT_LINK_SPEED_100GB_PAM4; + sig_mode = BNXT_SIG_MODE_PAM4; + lanes_needed = 2; + } else if (support_spds2 & BNXT_LINK_SPEEDS2_MSK_100GB_PAM4_112) { + fw_speed = BNXT_LINK_SPEED_100GB_PAM4_112; + sig_mode = BNXT_SIG_MODE_PAM4_112; + } + break; + case SPEED_200000: + if (support_pam4_spds & BNXT_LINK_PAM4_SPEED_MSK_200GB) { + fw_speed = PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_200GB; + sig_mode = BNXT_SIG_MODE_PAM4; + lanes_needed = 4; + } else if ((support_spds2 & BNXT_LINK_SPEEDS2_MSK_200GB_PAM4) && + lanes != 2) { + fw_speed = BNXT_LINK_SPEED_200GB_PAM4; + sig_mode = BNXT_SIG_MODE_PAM4; + lanes_needed = 4; + } else if (support_spds2 & BNXT_LINK_SPEEDS2_MSK_200GB_PAM4_112) { + fw_speed = BNXT_LINK_SPEED_200GB_PAM4_112; + sig_mode = BNXT_SIG_MODE_PAM4_112; + lanes_needed = 2; + } + break; + case SPEED_400000: + if ((support_spds2 & BNXT_LINK_SPEEDS2_MSK_400GB_PAM4) && + lanes != 4) { + fw_speed = BNXT_LINK_SPEED_400GB_PAM4; + sig_mode = BNXT_SIG_MODE_PAM4; + lanes_needed = 8; + } else if (support_spds2 & BNXT_LINK_SPEEDS2_MSK_400GB_PAM4_112) { + fw_speed = BNXT_LINK_SPEED_400GB_PAM4_112; + sig_mode = BNXT_SIG_MODE_PAM4_112; + lanes_needed = 4; + } + break; + } + + if (!fw_speed) { + netdev_err(dev, "unsupported speed!\n"); + return -EINVAL; + } + + if (lanes && lanes != lanes_needed) { + netdev_err(dev, "unsupported number of lanes for speed\n"); + return -EINVAL; + } + + if (link_info->req_link_speed == fw_speed && + link_info->req_signal_mode == sig_mode && + link_info->autoneg == 0) + return -EALREADY; + + link_info->req_link_speed = fw_speed; + link_info->req_signal_mode = sig_mode; + link_info->req_duplex = BNXT_LINK_DUPLEX_FULL; + link_info->autoneg = 0; + link_info->advertising = 0; + link_info->advertising_pam4 = 0; + + return 0; +} + +#ifdef HAVE_ETHTOOL_KEEE +u16 bnxt_get_fw_auto_link_speeds(const unsigned long *mode) +{ + u16 fw_speed_mask = 0; + + if (linkmode_test_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, mode) || + linkmode_test_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, mode)) + fw_speed_mask |= BNXT_LINK_SPEED_MSK_100MB; + + if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, mode) || + linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, mode)) + fw_speed_mask |= BNXT_LINK_SPEED_MSK_1GB; + + if (linkmode_test_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT, mode)) + fw_speed_mask |= BNXT_LINK_SPEED_MSK_10GB; + + if (linkmode_test_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, mode)) + fw_speed_mask |= BNXT_LINK_SPEED_MSK_40GB; + + return fw_speed_mask; +} +#endif /* HAVE_ETHTOOL_KEEE */ + +static int bnxt_set_link_ksettings(struct net_device *dev, + const struct ethtool_link_ksettings *lk_ksettings) +{ + struct bnxt *bp = netdev_priv(dev); + struct bnxt_link_info *link_info = &bp->link_info; + const struct ethtool_link_settings *base = &lk_ksettings->base; + bool set_pause = false; + u32 speed, lanes = 0; + int rc = 0; + + if (!BNXT_PHY_CFG_ABLE(bp)) + return -EOPNOTSUPP; + + mutex_lock(&bp->link_lock); + if (base->autoneg == AUTONEG_ENABLE) { + bnxt_set_ethtool_speeds(link_info, + lk_ksettings->link_modes.advertising); + link_info->autoneg |= BNXT_AUTONEG_SPEED; + if (!link_info->advertising && !link_info->advertising_pam4) { + link_info->advertising = link_info->support_auto_speeds; + link_info->advertising_pam4 = + link_info->support_pam4_auto_speeds; + } + /* any change to autoneg will cause link change, therefore the + * driver should put back the original pause setting in autoneg + */ + if (!(bp->phy_flags & BNXT_PHY_FL_NO_PAUSE)) + set_pause = true; + } else { + u8 phy_type = link_info->phy_type; + + if (phy_type == PORT_PHY_QCFG_RESP_PHY_TYPE_BASET || + phy_type == PORT_PHY_QCFG_RESP_PHY_TYPE_BASETE || + link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP) { + + netdev_err(dev, "10GBase-T devices must autoneg\n"); + rc = -EINVAL; + goto set_setting_exit; + } + if (base->duplex == DUPLEX_HALF) { + netdev_err(dev, "HALF DUPLEX is not supported!\n"); + rc = -EINVAL; + goto set_setting_exit; + } + speed = base->speed; +#ifdef HAVE_ETHTOOL_LANES + lanes = lk_ksettings->lanes; +#endif + rc = bnxt_force_link_speed(dev, speed, lanes); + if (rc) { + if (rc == -EALREADY) + rc = 0; + goto set_setting_exit; + } + } + if (netif_running(dev)) + rc = bnxt_hwrm_set_link_setting(bp, set_pause, false); + +set_setting_exit: + mutex_unlock(&bp->link_lock); + return rc; +} + +#ifdef ETHTOOL_GFECPARAM +static int bnxt_get_fecparam(struct net_device *dev, + struct ethtool_fecparam *fec) +{ + struct bnxt *bp = netdev_priv(dev); + struct bnxt_link_info *link_info; + u8 active_fec; + u16 fec_cfg; + + link_info = &bp->link_info; + fec_cfg = link_info->fec_cfg; + active_fec = link_info->active_fec_sig_mode & + PORT_PHY_QCFG_RESP_ACTIVE_FEC_MASK; + if (fec_cfg & BNXT_FEC_NONE) { + fec->fec = ETHTOOL_FEC_NONE; + fec->active_fec = ETHTOOL_FEC_NONE; + return 0; + } + if (fec_cfg & BNXT_FEC_AUTONEG) + fec->fec |= ETHTOOL_FEC_AUTO; + if (fec_cfg & BNXT_FEC_ENC_BASE_R) + fec->fec |= ETHTOOL_FEC_BASER; + if (fec_cfg & BNXT_FEC_ENC_RS) + fec->fec |= ETHTOOL_FEC_RS; + if (fec_cfg & BNXT_FEC_ENC_LLRS) + fec->fec |= ETHTOOL_FEC_LLRS; + + switch (active_fec) { + case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE74_ACTIVE: + fec->active_fec |= ETHTOOL_FEC_BASER; + break; + case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE91_ACTIVE: + case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_1XN_ACTIVE: + case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_IEEE_ACTIVE: + fec->active_fec |= ETHTOOL_FEC_RS; + break; + case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_1XN_ACTIVE: + case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_IEEE_ACTIVE: + fec->active_fec |= ETHTOOL_FEC_LLRS; + break; + case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_NONE_ACTIVE: + fec->active_fec |= ETHTOOL_FEC_OFF; + break; + } + return 0; +} + +#ifdef ETHTOOL_MAX_LANES +static void bnxt_get_fec_stats(struct net_device *dev, + struct ethtool_fec_stats *fec_stats) +{ + struct bnxt *bp = netdev_priv(dev); + u64 *rx; + + if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS_EXT)) + return; + + rx = bp->rx_port_stats_ext.sw_stats; + fec_stats->corrected_bits.total = + *(rx + BNXT_RX_STATS_EXT_OFFSET(rx_corrected_bits)); + + if (bp->fw_rx_stats_ext_size <= BNXT_RX_STATS_EXT_NUM_LEGACY) + return; + + fec_stats->corrected_blocks.total = + *(rx + BNXT_RX_STATS_EXT_OFFSET(rx_fec_corrected_blocks)); + fec_stats->uncorrectable_blocks.total = + *(rx + BNXT_RX_STATS_EXT_OFFSET(rx_fec_uncorrectable_blocks)); +} +#endif + +static u32 bnxt_ethtool_forced_fec_to_fw(struct bnxt_link_info *link_info, + u32 fec) +{ + u32 fw_fec = PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_DISABLE; + + if (fec & ETHTOOL_FEC_BASER) + fw_fec |= BNXT_FEC_BASE_R_ON(link_info); + else if (fec & ETHTOOL_FEC_RS) + fw_fec |= BNXT_FEC_RS_ON(link_info); + else if (fec & ETHTOOL_FEC_LLRS) + fw_fec |= BNXT_FEC_LLRS_ON; + return fw_fec; +} + +static int bnxt_set_fecparam(struct net_device *dev, + struct ethtool_fecparam *fecparam) +{ + struct hwrm_port_phy_cfg_input *req; + struct bnxt *bp = netdev_priv(dev); + struct bnxt_link_info *link_info; + u32 new_cfg, fec = fecparam->fec; + u16 fec_cfg; + int rc; + + link_info = &bp->link_info; + fec_cfg = link_info->fec_cfg; + if (fec_cfg & BNXT_FEC_NONE) + return -EOPNOTSUPP; + + if (fec & ETHTOOL_FEC_OFF) { + new_cfg = PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_DISABLE | + BNXT_FEC_ALL_OFF(link_info); + goto apply_fec; + } + if (((fec & ETHTOOL_FEC_AUTO) && !(fec_cfg & BNXT_FEC_AUTONEG_CAP)) || + ((fec & ETHTOOL_FEC_RS) && !(fec_cfg & BNXT_FEC_ENC_RS_CAP)) || + ((fec & ETHTOOL_FEC_LLRS) && !(fec_cfg & BNXT_FEC_ENC_LLRS_CAP)) || + ((fec & ETHTOOL_FEC_BASER) && !(fec_cfg & BNXT_FEC_ENC_BASE_R_CAP))) + return -EINVAL; + + if (fec & ETHTOOL_FEC_AUTO) { + if (!link_info->autoneg) + return -EINVAL; + new_cfg = PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_ENABLE; + } else { + new_cfg = bnxt_ethtool_forced_fec_to_fw(link_info, fec); + } + +apply_fec: + rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG); + if (rc) + return rc; + req->flags = cpu_to_le32(new_cfg | PORT_PHY_CFG_REQ_FLAGS_RESET_PHY); + rc = hwrm_req_send(bp, req); + /* update current settings */ + if (!rc) { + mutex_lock(&bp->link_lock); + bnxt_update_link(bp, false); + mutex_unlock(&bp->link_lock); + } + return rc; +} +#endif + +static void bnxt_get_pauseparam(struct net_device *dev, + struct ethtool_pauseparam *epause) +{ + struct bnxt *bp = netdev_priv(dev); + struct bnxt_link_info *link_info = &bp->link_info; + + if (BNXT_VF(bp)) + return; + epause->autoneg = !!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL); + epause->rx_pause = !!(link_info->req_flow_ctrl & BNXT_LINK_PAUSE_RX); + epause->tx_pause = !!(link_info->req_flow_ctrl & BNXT_LINK_PAUSE_TX); +} + +#ifdef HAVE_GET_PAUSE_STATS +static void bnxt_get_pause_stats(struct net_device *dev, + struct ethtool_pause_stats *epstat) +{ + struct bnxt *bp = netdev_priv(dev); + u64 *rx, *tx; + + if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS)) + return; + + rx = bp->port_stats.sw_stats; + tx = bp->port_stats.sw_stats + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8; + + epstat->rx_pause_frames = BNXT_GET_RX_PORT_STATS64(rx, rx_pause_frames); + epstat->tx_pause_frames = BNXT_GET_TX_PORT_STATS64(tx, tx_pause_frames); +} +#endif /* HAVE_GET_PAUSE_STATS */ + +static int bnxt_set_pauseparam(struct net_device *dev, + struct ethtool_pauseparam *epause) +{ + int rc = 0; + struct bnxt *bp = netdev_priv(dev); + struct bnxt_link_info *link_info = &bp->link_info; + + if (!BNXT_PHY_CFG_ABLE(bp) || (bp->phy_flags & BNXT_PHY_FL_NO_PAUSE)) + return -EOPNOTSUPP; + + mutex_lock(&bp->link_lock); + if (epause->autoneg) { + if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) { + rc = -EINVAL; + goto pause_exit; + } + + link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL; + link_info->req_flow_ctrl = 0; + } else { + /* when transition from auto pause to force pause, + * force a link change + */ + if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) + link_info->force_link_chng = true; + link_info->autoneg &= ~BNXT_AUTONEG_FLOW_CTRL; + link_info->req_flow_ctrl = 0; + } + if (epause->rx_pause) + link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_RX; + + if (epause->tx_pause) + link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_TX; + + if (netif_running(dev)) + rc = bnxt_hwrm_set_pause(bp); + +pause_exit: + mutex_unlock(&bp->link_lock); + return rc; +} + +static u32 bnxt_get_link(struct net_device *dev) +{ + struct bnxt *bp = netdev_priv(dev); + + /* TODO: handle MF, VF, driver close case */ + return BNXT_LINK_IS_UP(bp); +} + +int bnxt_hwrm_fw_sync(struct bnxt *bp, u16 fw_status) +{ + struct hwrm_fw_sync_output *resp; + struct hwrm_fw_sync_input *req; + u32 sync_action, sync_status; + int rc; + + if (!BNXT_CHIP_P5_PLUS(bp)) + return 0; + + rc = hwrm_req_init(bp, req, HWRM_FW_SYNC); + if (rc) + return rc; + + hwrm_req_timeout(bp, req, FW_SYNC_TIMEOUT); + + sync_action = FW_SYNC_REQ_SYNC_ACTION_ACTION; + if (fw_status & FW_HEALTH_CHECK_RESP_FW_STATUS_SBI_MISMATCH) + sync_action |= FW_SYNC_REQ_SYNC_ACTION_SYNC_SBI; + if (fw_status & FW_HEALTH_CHECK_RESP_FW_STATUS_SRT_MISMATCH) + sync_action |= FW_SYNC_REQ_SYNC_ACTION_SYNC_SRT; + if (fw_status & FW_HEALTH_CHECK_RESP_FW_STATUS_CRT_MISMATCH) + sync_action |= FW_SYNC_REQ_SYNC_ACTION_SYNC_CRT; + if (fw_status & FW_HEALTH_CHECK_RESP_FW_STATUS_CFG_MISMATCH) + sync_action |= FW_SYNC_REQ_SYNC_ACTION_SYNC_CFG; + if (fw_status & FW_HEALTH_CHECK_RESP_FW_STATUS_FRU_MISMATCH) + sync_action |= FW_SYNC_REQ_SYNC_ACTION_SYNC_FRU; + if (fw_status & FW_HEALTH_CHECK_RESP_FW_STATUS_DIR_HDR_MISMATCH) + sync_action |= FW_SYNC_REQ_SYNC_ACTION_SYNC_DIR_HDR; + if (fw_status & FW_HEALTH_CHECK_RESP_FW_STATUS_MBR_CORRUPT) + sync_action |= FW_SYNC_REQ_SYNC_ACTION_WRITE_MBR; + + req->sync_action = cpu_to_le32(sync_action); + + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); + if (!rc) { + sync_status = le32_to_cpu(resp->sync_status); + if (sync_status & + (FW_SYNC_RESP_SYNC_STATUS_ERR_CODE_GENERAL | FW_SYNC_RESP_SYNC_STATUS_SYNC_ERR)) + rc = -EIO; + } + + hwrm_req_drop(bp, req); + return rc; +} + +int bnxt_hwrm_get_fw_sync_status(struct bnxt *bp, u16 *fw_status) +{ + struct hwrm_fw_health_check_output *resp; + struct hwrm_fw_health_check_input *req; + int rc; + + if (!BNXT_CHIP_P5_PLUS(bp)) + return 0; + + rc = hwrm_req_init(bp, req, HWRM_FW_HEALTH_CHECK); + if (rc) + return rc; + + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); + if (!rc) + *fw_status = le16_to_cpu(resp->fw_status); + + hwrm_req_drop(bp, req); + return rc; +} + +#define BNXT_FW_SYNC_STATUS_MISMATCH (FW_HEALTH_CHECK_RESP_FW_STATUS_SBI_MISMATCH | \ + FW_HEALTH_CHECK_RESP_FW_STATUS_SRT_MISMATCH | \ + FW_HEALTH_CHECK_RESP_FW_STATUS_CRT_MISMATCH | \ + FW_HEALTH_CHECK_RESP_FW_STATUS_DIR_HDR_MISMATCH | \ + FW_HEALTH_CHECK_RESP_FW_STATUS_MBR_CORRUPT | \ + FW_HEALTH_CHECK_RESP_FW_STATUS_CFG_MISMATCH | \ + FW_HEALTH_CHECK_RESP_FW_STATUS_FRU_MISMATCH) + +int bnxt_sync_firmware(struct bnxt *bp) +{ + u16 fw_status = 0; + int rc; + + if (!BNXT_CHIP_P5_PLUS(bp)) + return 0; + + rc = bnxt_hwrm_get_fw_sync_status(bp, &fw_status); + if (rc) + return rc; + + if (fw_status & BNXT_FW_SYNC_STATUS_MISMATCH) + rc = bnxt_hwrm_fw_sync(bp, fw_status); + + return rc; +} + +int bnxt_hwrm_nvm_get_dev_info(struct bnxt *bp, + struct hwrm_nvm_get_dev_info_output *nvm_dev_info) +{ + struct hwrm_nvm_get_dev_info_output *resp; + struct hwrm_nvm_get_dev_info_input *req; + int rc; + + if (BNXT_VF(bp)) + return -EOPNOTSUPP; + + rc = hwrm_req_init(bp, req, HWRM_NVM_GET_DEV_INFO); + if (rc) + return rc; + + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); + if (!rc) + memcpy(nvm_dev_info, resp, sizeof(struct hwrm_nvm_get_dev_info_output)); + hwrm_req_drop(bp, req); + return rc; +} + +static void bnxt_print_admin_err(struct bnxt *bp) +{ + netdev_info(bp->dev, "PF does not have admin privileges to flash the device\n"); +} + +int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal, + u16 ext, u16 *index, u32 *item_length, + u32 *data_length); + +int bnxt_flash_nvram(struct net_device *dev, u16 dir_type, + u16 dir_ordinal, u16 dir_ext, u16 dir_attr, + u32 dir_item_len, const u8 *data, + size_t data_len) +{ + struct bnxt *bp = netdev_priv(dev); + struct hwrm_nvm_write_input *req; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_NVM_WRITE); + if (rc) + return rc; + + if (data_len && data) { + dma_addr_t dma_handle; + u8 *kmem; + + kmem = hwrm_req_dma_slice(bp, req, data_len, &dma_handle); + if (!kmem) { + hwrm_req_drop(bp, req); + return -ENOMEM; + } + + req->dir_data_length = cpu_to_le32(data_len); + + memcpy(kmem, data, data_len); + req->host_src_addr = cpu_to_le64(dma_handle); + } + + hwrm_req_timeout(bp, req, FLASH_NVRAM_TIMEOUT); + req->dir_type = cpu_to_le16(dir_type); + req->dir_ordinal = cpu_to_le16(dir_ordinal); + req->dir_ext = cpu_to_le16(dir_ext); + req->dir_attr = cpu_to_le16(dir_attr); + req->dir_item_length = cpu_to_le32(dir_item_len); + rc = hwrm_req_send(bp, req); + + if (rc == -EACCES) + bnxt_print_admin_err(bp); + return rc; +} + +int bnxt_hwrm_firmware_reset(struct net_device *dev, u8 proc_type, + u8 self_reset, u8 flags) +{ + struct bnxt *bp = netdev_priv(dev); + struct hwrm_fw_reset_input *req; + int rc; + + if (!bnxt_hwrm_reset_permitted(bp)) { + netdev_warn(bp->dev, "Reset denied by firmware, it may be inhibited by remote driver"); + return -EPERM; + } + + rc = hwrm_req_init(bp, req, HWRM_FW_RESET); + if (rc) + return rc; + + req->embedded_proc_type = proc_type; + req->selfrst_status = self_reset; + req->flags = flags; + + if (proc_type == FW_RESET_REQ_EMBEDDED_PROC_TYPE_AP) { + rc = hwrm_req_send_silent(bp, req); + } else { + rc = hwrm_req_send(bp, req); + if (rc == -EACCES) + bnxt_print_admin_err(bp); + } + return rc; +} + +static int bnxt_firmware_reset(struct net_device *dev, + enum bnxt_nvm_directory_type dir_type) +{ + u8 self_reset = FW_RESET_REQ_SELFRST_STATUS_SELFRSTNONE; + u8 proc_type, flags = 0; + + /* TODO: Address self-reset of APE/KONG/BONO/TANG or ungraceful reset */ + /* (e.g. when firmware isn't already running) */ + switch (dir_type) { + case BNX_DIR_TYPE_CHIMP_PATCH: + case BNX_DIR_TYPE_BOOTCODE: + case BNX_DIR_TYPE_BOOTCODE_2: + proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_BOOT; + /* Self-reset ChiMP upon next PCIe reset: */ + self_reset = FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST; + break; + case BNX_DIR_TYPE_APE_FW: + case BNX_DIR_TYPE_APE_PATCH: + proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_MGMT; + /* Self-reset APE upon next PCIe reset: */ + self_reset = FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST; + break; + case BNX_DIR_TYPE_KONG_FW: + case BNX_DIR_TYPE_KONG_PATCH: + proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_NETCTRL; + break; + case BNX_DIR_TYPE_BONO_FW: + case BNX_DIR_TYPE_BONO_PATCH: + proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_ROCE; + break; + default: + return -EINVAL; + } + + return bnxt_hwrm_firmware_reset(dev, proc_type, self_reset, flags); +} + +int bnxt_firmware_reset_chip(struct net_device *dev) +{ + struct bnxt *bp = netdev_priv(dev); + u8 flags = 0; + + if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET) + flags = FW_RESET_REQ_FLAGS_RESET_GRACEFUL; + + return bnxt_hwrm_firmware_reset(dev, + FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP, + FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP, + flags); +} + +int bnxt_firmware_reset_ap(struct net_device *dev) +{ + return bnxt_hwrm_firmware_reset(dev, FW_RESET_REQ_EMBEDDED_PROC_TYPE_AP, + FW_RESET_REQ_SELFRST_STATUS_SELFRSTNONE, + 0); +} + +static int bnxt_flash_firmware(struct net_device *dev, + u16 dir_type, + const u8 *fw_data, + size_t fw_size) +{ + int rc = 0; + u16 code_type; + u32 stored_crc; + u32 calculated_crc; + struct bnxt_fw_header *header = (struct bnxt_fw_header *)fw_data; + + switch (dir_type) { + case BNX_DIR_TYPE_BOOTCODE: + case BNX_DIR_TYPE_BOOTCODE_2: + code_type = CODE_BOOT; + break; + case BNX_DIR_TYPE_CHIMP_PATCH: + code_type = CODE_CHIMP_PATCH; + break; + case BNX_DIR_TYPE_APE_FW: + code_type = CODE_MCTP_PASSTHRU; + break; + case BNX_DIR_TYPE_APE_PATCH: + code_type = CODE_APE_PATCH; + break; + case BNX_DIR_TYPE_KONG_FW: + code_type = CODE_KONG_FW; + break; + case BNX_DIR_TYPE_KONG_PATCH: + code_type = CODE_KONG_PATCH; + break; + case BNX_DIR_TYPE_BONO_FW: + code_type = CODE_BONO_FW; + break; + case BNX_DIR_TYPE_BONO_PATCH: + code_type = CODE_BONO_PATCH; + break; + default: + netdev_err(dev, "Unsupported directory entry type: %u\n", + dir_type); + return -EINVAL; + } + if (fw_size < sizeof(struct bnxt_fw_header)) { + netdev_err(dev, "Invalid firmware file size: %u\n", + (unsigned int)fw_size); + return -EINVAL; + } + if (header->signature != cpu_to_le32(BNXT_FIRMWARE_BIN_SIGNATURE)) { + netdev_err(dev, "Invalid firmware signature: %08X\n", + le32_to_cpu(header->signature)); + return -EINVAL; + } + if (header->code_type != code_type) { + netdev_err(dev, "Expected firmware type: %d, read: %d\n", + code_type, header->code_type); + return -EINVAL; + } + if (header->device != DEVICE_CUMULUS_FAMILY) { + netdev_err(dev, "Expected firmware device family %d, read: %d\n", + DEVICE_CUMULUS_FAMILY, header->device); + return -EINVAL; + } + /* Confirm the CRC32 checksum of the file: */ + stored_crc = le32_to_cpu(*(__le32 *)(fw_data + fw_size - + sizeof(stored_crc))); + calculated_crc = ~crc32(~0, fw_data, fw_size - sizeof(stored_crc)); + if (calculated_crc != stored_crc) { + netdev_err(dev, "Firmware file CRC32 checksum (%08lX) does not match calculated checksum (%08lX)\n", + (unsigned long)stored_crc, + (unsigned long)calculated_crc); + return -EINVAL; + } + rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST, + 0, 0, 0, fw_data, fw_size); + if (rc == 0) /* Firmware update successful */ + rc = bnxt_firmware_reset(dev, dir_type); + + return rc; +} + +static int bnxt_flash_microcode(struct net_device *dev, + u16 dir_type, + const u8 *fw_data, + size_t fw_size) +{ + struct bnxt_ucode_trailer *trailer; + u32 calculated_crc; + u32 stored_crc; + int rc = 0; + + if (fw_size < sizeof(struct bnxt_ucode_trailer)) { + netdev_err(dev, "Invalid microcode file size: %u\n", + (unsigned int)fw_size); + return -EINVAL; + } + trailer = (struct bnxt_ucode_trailer *)(fw_data + (fw_size - + sizeof(*trailer))); + if (trailer->sig != cpu_to_le32(BNXT_UCODE_TRAILER_SIGNATURE)) { + netdev_err(dev, "Invalid microcode trailer signature: %08X\n", + le32_to_cpu(trailer->sig)); + return -EINVAL; + } + if (le16_to_cpu(trailer->dir_type) != dir_type) { + netdev_err(dev, "Expected microcode type: %d, read: %d\n", + dir_type, le16_to_cpu(trailer->dir_type)); + return -EINVAL; + } + if (le16_to_cpu(trailer->trailer_length) < + sizeof(struct bnxt_ucode_trailer)) { + netdev_err(dev, "Invalid microcode trailer length: %d\n", + le16_to_cpu(trailer->trailer_length)); + return -EINVAL; + } + + /* Confirm the CRC32 checksum of the file: */ + stored_crc = le32_to_cpu(*(__le32 *)(fw_data + fw_size - + sizeof(stored_crc))); + calculated_crc = ~crc32(~0, fw_data, fw_size - sizeof(stored_crc)); + if (calculated_crc != stored_crc) { + netdev_err(dev, + "CRC32 (%08lX) does not match calculated: %08lX\n", + (unsigned long)stored_crc, + (unsigned long)calculated_crc); + return -EINVAL; + } + rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST, + 0, 0, 0, fw_data, fw_size); + + return rc; +} + +static bool bnxt_dir_type_is_ape_bin_format(u16 dir_type) +{ + switch (dir_type) { + case BNX_DIR_TYPE_CHIMP_PATCH: + case BNX_DIR_TYPE_BOOTCODE: + case BNX_DIR_TYPE_BOOTCODE_2: + case BNX_DIR_TYPE_APE_FW: + case BNX_DIR_TYPE_APE_PATCH: + case BNX_DIR_TYPE_KONG_FW: + case BNX_DIR_TYPE_KONG_PATCH: + case BNX_DIR_TYPE_BONO_FW: + case BNX_DIR_TYPE_BONO_PATCH: + return true; + } + + return false; +} + +static bool bnxt_dir_type_is_other_exec_format(u16 dir_type) +{ + switch (dir_type) { + case BNX_DIR_TYPE_AVS: + case BNX_DIR_TYPE_EXP_ROM_MBA: + case BNX_DIR_TYPE_PCIE: + case BNX_DIR_TYPE_TSCF_UCODE: + case BNX_DIR_TYPE_EXT_PHY: + case BNX_DIR_TYPE_CCM: + case BNX_DIR_TYPE_ISCSI_BOOT: + case BNX_DIR_TYPE_ISCSI_BOOT_IPV6: + case BNX_DIR_TYPE_ISCSI_BOOT_IPV4N6: + return true; + } + + return false; +} + +static bool bnxt_dir_type_is_executable(u16 dir_type) +{ + return bnxt_dir_type_is_ape_bin_format(dir_type) || + bnxt_dir_type_is_other_exec_format(dir_type); +} + +static int bnxt_flash_firmware_from_file(struct net_device *dev, + u16 dir_type, + const char *filename) +{ + const struct firmware *fw; + int rc; + + rc = request_firmware(&fw, filename, &dev->dev); + if (rc != 0) { + netdev_err(dev, "Error %d requesting firmware file: %s\n", + rc, filename); + return rc; + } + if (bnxt_dir_type_is_ape_bin_format(dir_type)) + rc = bnxt_flash_firmware(dev, dir_type, fw->data, fw->size); + else if (bnxt_dir_type_is_other_exec_format(dir_type)) + rc = bnxt_flash_microcode(dev, dir_type, fw->data, fw->size); + else + rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST, + 0, 0, 0, fw->data, fw->size); + release_firmware(fw); + return rc; +} + +#define MSG_INTEGRITY_ERR "PKG install error : Data integrity on NVM" +#define MSG_INVALID_PKG "PKG install error : Invalid package" +#define MSG_AUTHENTICATION_ERR "PKG install error : Authentication error" +#define MSG_INVALID_DEV "PKG install error : Invalid device" +#define MSG_INTERNAL_ERR "PKG install error : Internal error" +#define MSG_NO_PKG_UPDATE_AREA_ERR "PKG update area not created in nvram" +#define MSG_NO_SPACE_ERR "PKG insufficient update area in nvram" +#define MSG_RESIZE_UPDATE_ERR "Resize UPDATE entry error" +#define MSG_ANTI_ROLLBACK_ERR "HWRM_NVM_INSTALL_UPDATE failure due to Anti-rollback detected" +#define MSG_GENERIC_FAILURE_ERR "HWRM_NVM_INSTALL_UPDATE failure" + +static int nvm_update_err_to_stderr(struct net_device *dev, u8 result, + struct netlink_ext_ack *extack) +{ + switch (result) { + case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_TYPE_PARAMETER: + case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_INDEX_PARAMETER: + case NVM_INSTALL_UPDATE_RESP_RESULT_INSTALL_DATA_ERROR: + case NVM_INSTALL_UPDATE_RESP_RESULT_INSTALL_CHECKSUM_ERROR: + case NVM_INSTALL_UPDATE_RESP_RESULT_ITEM_NOT_FOUND: + case NVM_INSTALL_UPDATE_RESP_RESULT_ITEM_LOCKED: + BNXT_NVM_ERR_MSG(dev, extack, MSG_INTEGRITY_ERR); + return -EINVAL; + case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_PREREQUISITE: + case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_FILE_HEADER: + case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_SIGNATURE: + case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_PROP_STREAM: + case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_PROP_LENGTH: + case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_MANIFEST: + case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_TRAILER: + case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_CHECKSUM: + case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_ITEM_CHECKSUM: + case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_DATA_LENGTH: + case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_DIRECTIVE: + case NVM_INSTALL_UPDATE_RESP_RESULT_DUPLICATE_ITEM: + case NVM_INSTALL_UPDATE_RESP_RESULT_ZERO_LENGTH_ITEM: + BNXT_NVM_ERR_MSG(dev, extack, MSG_INVALID_PKG); + return -ENOPKG; + case NVM_INSTALL_UPDATE_RESP_RESULT_INSTALL_AUTHENTICATION_ERROR: + BNXT_NVM_ERR_MSG(dev, extack, MSG_AUTHENTICATION_ERR); + return -EPERM; + case NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_CHIP_REV: + case NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_DEVICE_ID: + case NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_SUBSYS_VENDOR: + case NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_SUBSYS_ID: + case NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_PLATFORM: + BNXT_NVM_ERR_MSG(dev, extack, MSG_INVALID_DEV); + return -EOPNOTSUPP; + default: + BNXT_NVM_ERR_MSG(dev, extack, MSG_INTERNAL_ERR); + return -EIO; + } +} + +#define BNXT_PKG_DMA_SIZE 0x40000 +#define BNXT_NVM_MORE_FLAG (cpu_to_le16(NVM_MODIFY_REQ_FLAGS_BATCH_MODE)) +#define BNXT_NVM_LAST_FLAG (cpu_to_le16(NVM_MODIFY_REQ_FLAGS_BATCH_LAST)) + +static int bnxt_resize_update_entry(struct net_device *dev, size_t fw_size, + struct netlink_ext_ack *extack) +{ + u32 item_len; + int rc; + + rc = bnxt_find_nvram_item(dev, BNX_DIR_TYPE_UPDATE, + BNX_DIR_ORDINAL_FIRST, BNX_DIR_EXT_NONE, NULL, + &item_len, NULL); + if (rc) { + BNXT_NVM_ERR_MSG(dev, extack, MSG_NO_PKG_UPDATE_AREA_ERR); + return rc; + } + + if (fw_size > item_len) { + rc = bnxt_flash_nvram(dev, BNX_DIR_TYPE_UPDATE, + BNX_DIR_ORDINAL_FIRST, 0, 1, + round_up(fw_size, 4096), NULL, 0); + if (rc) { + BNXT_NVM_ERR_MSG(dev, extack, MSG_RESIZE_UPDATE_ERR); + return rc; + } + } + return 0; +} + +int bnxt_flash_package_from_fw_obj(struct net_device *dev, const struct firmware *fw, + u32 install_type, struct netlink_ext_ack *extack) +{ + struct hwrm_nvm_install_update_input *install; + struct hwrm_nvm_install_update_output *resp; + struct hwrm_nvm_modify_input *modify; + struct bnxt *bp = netdev_priv(dev); + bool defrag_attempted = false; + dma_addr_t dma_handle; + u8 *kmem = NULL; + u32 modify_len; + u32 item_len; + u8 cmd_err; + u16 index; + int rc; + + /* resize before flashing larger image than available space */ + rc = bnxt_resize_update_entry(dev, fw->size, extack); + if (rc) + return rc; + + bnxt_hwrm_fw_set_time(bp); + + rc = hwrm_req_init(bp, modify, HWRM_NVM_MODIFY); + if (rc) + return rc; + + /* Try allocating a large DMA buffer first. Older fw will + * cause excessive NVRAM erases when using small blocks. + */ + modify_len = roundup_pow_of_two(fw->size); + modify_len = min_t(u32, modify_len, BNXT_PKG_DMA_SIZE); + while (1) { + kmem = hwrm_req_dma_slice(bp, modify, modify_len, &dma_handle); + if (!kmem && modify_len > PAGE_SIZE) + modify_len /= 2; + else + break; + } + if (!kmem) { + hwrm_req_drop(bp, modify); + return -ENOMEM; + } + + rc = hwrm_req_init(bp, install, HWRM_NVM_INSTALL_UPDATE); + if (rc) { + hwrm_req_drop(bp, modify); + return rc; + } + + hwrm_req_timeout(bp, modify, FLASH_PACKAGE_TIMEOUT); + hwrm_req_timeout(bp, install, INSTALL_PACKAGE_TIMEOUT); + + hwrm_req_hold(bp, modify); + modify->host_src_addr = cpu_to_le64(dma_handle); + + resp = hwrm_req_hold(bp, install); + if ((install_type & 0xffff) == 0) + install_type >>= 16; + install->install_type = cpu_to_le32(install_type); + + do { + u32 copied = 0, len = modify_len; + + rc = bnxt_find_nvram_item(dev, BNX_DIR_TYPE_UPDATE, + BNX_DIR_ORDINAL_FIRST, + BNX_DIR_EXT_NONE, + &index, &item_len, NULL); + if (rc) { + BNXT_NVM_ERR_MSG(dev, extack, MSG_NO_PKG_UPDATE_AREA_ERR); + break; + } + if (fw->size > item_len) { + BNXT_NVM_ERR_MSG(dev, extack, MSG_NO_SPACE_ERR); + rc = -EFBIG; + break; + } + + modify->dir_idx = cpu_to_le16(index); + + if (fw->size > modify_len) + modify->flags = BNXT_NVM_MORE_FLAG; + while (copied < fw->size) { + u32 balance = fw->size - copied; + + if (balance <= modify_len) { + len = balance; + if (copied) + modify->flags |= BNXT_NVM_LAST_FLAG; + } + memcpy(kmem, fw->data + copied, len); + modify->len = cpu_to_le32(len); + modify->offset = cpu_to_le32(copied); + rc = hwrm_req_send(bp, modify); + if (rc) + goto pkg_abort; + copied += len; + } + + rc = hwrm_req_send_silent(bp, install); + if (!rc) + break; + + if (defrag_attempted) + break; + + cmd_err = ((struct hwrm_err_output *)resp)->cmd_err; + + switch (cmd_err) { + case NVM_INSTALL_UPDATE_CMD_ERR_CODE_ANTI_ROLLBACK: + BNXT_NVM_ERR_MSG(dev, extack, MSG_ANTI_ROLLBACK_ERR); + rc = -EALREADY; + break; + case NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR: + install->flags = + cpu_to_le16(NVM_INSTALL_UPDATE_REQ_FLAGS_ALLOWED_TO_DEFRAG); + + rc = hwrm_req_send_silent(bp, install); + if (!rc) + break; + + cmd_err = ((struct hwrm_err_output *)resp)->cmd_err; + + if (cmd_err == NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_SPACE) { + /* FW has cleared NVM area, driver will create + * UPDATE directory and try the flash again + */ + defrag_attempted = true; + install->flags = 0; + rc = bnxt_flash_nvram(bp->dev, + BNX_DIR_TYPE_UPDATE, + BNX_DIR_ORDINAL_FIRST, + 0, 0, item_len, NULL, 0); + if (!rc) + break; + } + fallthrough; + default: + BNXT_NVM_ERR_MSG(dev, extack, MSG_GENERIC_FAILURE_ERR); + } + } while (defrag_attempted && !rc); + +pkg_abort: + hwrm_req_drop(bp, modify); + hwrm_req_drop(bp, install); + + if (resp->result) { + netdev_err(dev, "PKG install error = %#x, problem_item = %#x\n", + resp->result, resp->problem_item); + rc = nvm_update_err_to_stderr(dev, resp->result, extack); + } + + if (rc == -EACCES) + bnxt_print_admin_err(bp); + return rc; +} + +int bnxt_flash_package_from_file(struct net_device *dev, const char *filename, + u32 install_type, struct netlink_ext_ack *extack) +{ + const struct firmware *fw; + int rc; + + rc = request_firmware(&fw, filename, &dev->dev); + if (rc) { + netdev_err(dev, "PKG error %d requesting file: %s\n", + rc, filename); + return rc; + } + + rc = bnxt_flash_package_from_fw_obj(dev, fw, install_type, extack); + + release_firmware(fw); + + return rc; +} + +static int bnxt_flash_device(struct net_device *dev, + struct ethtool_flash *flash) +{ + if (!BNXT_PF((struct bnxt *)netdev_priv(dev))) { + netdev_err(dev, "flashdev not supported from a virtual function\n"); + return -EINVAL; + } + + if (flash->region == ETHTOOL_FLASH_ALL_REGIONS || + flash->region > 0xffff) + return bnxt_flash_package_from_file(dev, flash->data, + flash->region, NULL); + + return bnxt_flash_firmware_from_file(dev, flash->region, flash->data); +} + +static int nvm_get_dir_info(struct net_device *dev, u32 *entries, u32 *length) +{ + struct hwrm_nvm_get_dir_info_output *output; + struct hwrm_nvm_get_dir_info_input *req; + struct bnxt *bp = netdev_priv(dev); + int rc; + + rc = hwrm_req_init(bp, req, HWRM_NVM_GET_DIR_INFO); + if (rc) + return rc; + + output = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); + if (!rc) { + *entries = le32_to_cpu(output->entries); + *length = le32_to_cpu(output->entry_length); + } + hwrm_req_drop(bp, req); + return rc; +} + +static int bnxt_get_eeprom_len(struct net_device *dev) +{ + struct bnxt *bp = netdev_priv(dev); + + if (BNXT_VF(bp)) + return 0; + + /* The -1 return value allows the entire 32-bit range of offsets to be + * passed via the ethtool command-line utility. + */ + return -1; +} + +static int bnxt_get_nvram_directory(struct net_device *dev, u32 len, u8 *data) +{ + struct hwrm_nvm_get_dir_entries_input *req; + struct bnxt *bp = netdev_priv(dev); + dma_addr_t dma_handle; + u32 entry_length; + u32 dir_entries; + size_t buflen; + u8 *buf; + int rc; + + rc = nvm_get_dir_info(dev, &dir_entries, &entry_length); + if (rc != 0) + return rc; + + if (!dir_entries || !entry_length) + return -EIO; + + /* Insert 2 bytes of directory info (count and size of entries) */ + if (len < 2) + return -EINVAL; + + *data++ = dir_entries; + *data++ = entry_length; + len -= 2; + memset(data, 0xff, len); + + rc = hwrm_req_init(bp, req, HWRM_NVM_GET_DIR_ENTRIES); + if (rc) + return rc; + + buflen = mul_u32_u32(dir_entries, entry_length); + buf = hwrm_req_dma_slice(bp, req, buflen, &dma_handle); + if (!buf) { + hwrm_req_drop(bp, req); + return -ENOMEM; + } + req->host_dest_addr = cpu_to_le64(dma_handle); + + hwrm_req_hold(bp, req); /* hold the slice */ + rc = hwrm_req_send(bp, req); + if (rc == 0) + memcpy(data, buf, len > buflen ? buflen : len); + hwrm_req_drop(bp, req); + return rc; +} + +int bnxt_get_nvram_item(struct net_device *dev, u32 index, u32 offset, + u32 length, u8 *data) +{ + struct bnxt *bp = netdev_priv(dev); + struct hwrm_nvm_read_input *req; + dma_addr_t dma_handle; + u8 *buf; + int rc; + + if (!length) + return -EINVAL; + + rc = hwrm_req_init(bp, req, HWRM_NVM_READ); + if (rc) + return rc; + + buf = hwrm_req_dma_slice(bp, req, length, &dma_handle); + if (!buf) { + hwrm_req_drop(bp, req); + return -ENOMEM; + } + + req->host_dest_addr = cpu_to_le64(dma_handle); + req->dir_idx = cpu_to_le16(index); + req->offset = cpu_to_le32(offset); + req->len = cpu_to_le32(length); + + hwrm_req_hold(bp, req); /* hold the slice */ + rc = hwrm_req_send(bp, req); + if (rc == 0) + memcpy(data, buf, length); + hwrm_req_drop(bp, req); + return rc; +} + +int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal, + u16 ext, u16 *index, u32 *item_length, + u32 *data_length) +{ + struct hwrm_nvm_find_dir_entry_output *output; + struct hwrm_nvm_find_dir_entry_input *req; + struct bnxt *bp = netdev_priv(dev); + int rc; + + rc = hwrm_req_init(bp, req, HWRM_NVM_FIND_DIR_ENTRY); + if (rc) + return rc; + + req->enables = 0; + req->dir_idx = 0; + req->dir_type = cpu_to_le16(type); + req->dir_ordinal = cpu_to_le16(ordinal); + req->dir_ext = cpu_to_le16(ext); + req->opt_ordinal = NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_EQ; + output = hwrm_req_hold(bp, req); + rc = hwrm_req_send_silent(bp, req); + if (rc == 0) { + if (index) + *index = le16_to_cpu(output->dir_idx); + if (item_length) + *item_length = le32_to_cpu(output->dir_item_length); + if (data_length) + *data_length = le32_to_cpu(output->dir_data_length); + } + hwrm_req_drop(bp, req); + return rc; +} + +static char *bnxt_parse_pkglog(int desired_field, u8 *data, size_t datalen) +{ + char *retval = NULL; + char *p; + char *value; + int field = 0; + + if (datalen < 1) + return NULL; + /* null-terminate the log data (removing last '\n'): */ + data[datalen - 1] = 0; + for (p = data; *p != 0; p++) { + field = 0; + retval = NULL; + while (*p != 0 && *p != '\n') { + value = p; + while (*p != 0 && *p != '\t' && *p != '\n') + p++; + if (field == desired_field) + retval = value; + if (*p != '\t') + break; + *p = 0; + field++; + p++; + } + if (*p == 0) + break; + *p = 0; + } + return retval; +} + +int bnxt_get_pkginfo(struct net_device *dev, char *ver, int size) +{ + struct bnxt *bp = netdev_priv(dev); + u16 index = 0; + char *pkgver; + u32 pkglen; + u8 *pkgbuf; + int rc; + + rc = bnxt_find_nvram_item(dev, BNX_DIR_TYPE_PKG_LOG, + BNX_DIR_ORDINAL_FIRST, BNX_DIR_EXT_NONE, + &index, NULL, &pkglen); + if (rc) + return rc; + + pkgbuf = kzalloc(pkglen, GFP_KERNEL); + if (!pkgbuf) { + dev_err(&bp->pdev->dev, "Unable to allocate memory for pkg version, length = %u\n", + pkglen); + return -ENOMEM; + } + + rc = bnxt_get_nvram_item(dev, index, 0, pkglen, pkgbuf); + if (rc) + goto err; + + pkgver = bnxt_parse_pkglog(BNX_PKG_LOG_FIELD_IDX_PKG_VERSION, pkgbuf, + pkglen); + if (pkgver && *pkgver != 0 && isdigit(*pkgver)) + strscpy(ver, pkgver, size); + else + rc = -ENOENT; + +err: + kfree(pkgbuf); + + return rc; +} + +static void bnxt_get_pkgver(struct net_device *dev) +{ + struct bnxt *bp = netdev_priv(dev); + char buf[FW_VER_STR_LEN]; + int len; + + if (!bnxt_get_pkginfo(dev, buf, sizeof(buf))) { + len = strlen(bp->fw_ver_str); + snprintf(bp->fw_ver_str + len, FW_VER_STR_LEN - len - 1, + "/pkg %s", buf); + } +} + +static int bnxt_get_eeprom(struct net_device *dev, + struct ethtool_eeprom *eeprom, + u8 *data) +{ + struct hwrm_nvm_get_dev_info_output nvm_dev_info; + struct hwrm_fw_qstatus_output *fw_status_resp; + struct hwrm_fw_qstatus_input *fw_status_req; + struct bnxt *bp = netdev_priv(dev); + u32 size, index, offset; + int rc, i; + + if (eeprom->len < 1) + return -EINVAL; + + if (eeprom->offset == 0) /* special offset value to get directory */ + return bnxt_get_nvram_directory(dev, eeprom->len, data); + + index = eeprom->offset >> 24; + offset = eeprom->offset & 0xffffff; + + if (index != 0) + return bnxt_get_nvram_item(dev, index - 1, offset, eeprom->len, + data); + + switch (offset) { + case 1: /* Query firmware reset status */ + if (eeprom->len < 5) + return -EINVAL; + size = 4; /* procs: BOOT, MGMT, NETCTRL, and ROCE */ + *(data++) = size; + rc = hwrm_req_init(bp, fw_status_req, HWRM_FW_QSTATUS); + if (rc) + return rc; + + fw_status_resp = hwrm_req_hold(bp, fw_status_req); + for (i = 0; i < size; i++) { + fw_status_req->embedded_proc_type = i; + rc = hwrm_req_send(bp, fw_status_req); + if (rc == 0) + *(data++) = fw_status_resp->selfrst_status; + else + break; + } + hwrm_req_drop(bp, fw_status_req); + return rc; + case 2: /* Query firmware version information */ + size = sizeof(bp->ver_resp); + *(data++) = size; + memcpy(data, &bp->ver_resp, min(size, eeprom->len - 1)); + return 0; + case 3: /* Query NVM device information */ + rc = bnxt_hwrm_nvm_get_dev_info(bp, &nvm_dev_info); + if (rc) + return rc; + + size = sizeof(nvm_dev_info); + *(data++) = size; + memcpy(data, &nvm_dev_info, min(size, eeprom->len - 1)); + return 0; + } + return -EINVAL; +} + +static int bnxt_erase_nvram_directory(struct net_device *dev, u8 index) +{ + struct hwrm_nvm_erase_dir_entry_input *req; + struct bnxt *bp = netdev_priv(dev); + int rc; + + rc = hwrm_req_init(bp, req, HWRM_NVM_ERASE_DIR_ENTRY); + if (rc) + return rc; + + req->dir_idx = cpu_to_le16(index); + return hwrm_req_send(bp, req); +} + +static int bnxt_set_eeprom(struct net_device *dev, + struct ethtool_eeprom *eeprom, + u8 *data) +{ + struct bnxt *bp = netdev_priv(dev); + u8 index, dir_op; + u16 type, ext, ordinal, attr; + + if (!BNXT_PF(bp)) { + netdev_err(dev, "NVM write not supported from a virtual function\n"); + return -EINVAL; + } + + type = eeprom->magic >> 16; + + if (type == 0xffff) { /* special value for directory operations */ + index = eeprom->magic & 0xff; + dir_op = eeprom->magic >> 8; + if (index == 0) + return -EINVAL; + switch (dir_op) { + case 0x0e: /* erase */ + if (eeprom->offset != ~eeprom->magic) + return -EINVAL; + return bnxt_erase_nvram_directory(dev, index - 1); + default: + return -EINVAL; + } + } + + /* Create or re-write an NVM item: */ + if (bnxt_dir_type_is_executable(type)) + return -EOPNOTSUPP; + ext = eeprom->magic & 0xffff; + ordinal = eeprom->offset >> 16; + attr = eeprom->offset & 0xffff; + + return bnxt_flash_nvram(dev, type, ordinal, ext, attr, 0, data, + eeprom->len); +} + +#if defined(HAVE_ETHTOOL_KEEE) +static int bnxt_set_eee(struct net_device *dev, struct ethtool_keee *edata) +{ + __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising); + __ETHTOOL_DECLARE_LINK_MODE_MASK(tmp); + struct bnxt *bp = netdev_priv(dev); + struct ethtool_keee *eee = &bp->eee; + struct bnxt_link_info *link_info = &bp->link_info; + int rc = 0; + + if (!BNXT_PHY_CFG_ABLE(bp)) + return -EOPNOTSUPP; + + if (!(bp->phy_flags & BNXT_PHY_FL_EEE_CAP)) + return -EOPNOTSUPP; + + mutex_lock(&bp->link_lock); + _bnxt_fw_to_linkmode(advertising, link_info->advertising); + if (!edata->eee_enabled) + goto eee_ok; + + if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) { + netdev_warn(dev, "EEE requires autoneg\n"); + rc = -EINVAL; + goto eee_exit; + } + if (edata->tx_lpi_enabled) { + if (bp->lpi_tmr_hi && (edata->tx_lpi_timer > bp->lpi_tmr_hi || + edata->tx_lpi_timer < bp->lpi_tmr_lo)) { + netdev_warn(dev, "Valid LPI timer range is %d and %d microsecs\n", + bp->lpi_tmr_lo, bp->lpi_tmr_hi); + rc = -EINVAL; + goto eee_exit; + } else if (!bp->lpi_tmr_hi) { + edata->tx_lpi_timer = eee->tx_lpi_timer; + } + } + if (linkmode_empty(edata->advertised)) { + linkmode_and(edata->advertised, advertising, eee->supported); + } else if (linkmode_andnot(tmp, edata->advertised, advertising)) { + netdev_warn(dev, "EEE advertised must be a subset of autoneg advertised speeds\n"); + rc = -EINVAL; + goto eee_exit; + } + + linkmode_copy(eee->advertised, edata->advertised); + eee->tx_lpi_enabled = edata->tx_lpi_enabled; + eee->tx_lpi_timer = edata->tx_lpi_timer; +eee_ok: + eee->eee_enabled = edata->eee_enabled; + + if (netif_running(dev)) + rc = bnxt_hwrm_set_link_setting(bp, false, true); + +eee_exit: + mutex_unlock(&bp->link_lock); + return rc; +} + +static int bnxt_get_eee(struct net_device *dev, struct ethtool_keee *edata) +{ + struct bnxt *bp = netdev_priv(dev); + + if (!(bp->phy_flags & BNXT_PHY_FL_EEE_CAP)) + return -EOPNOTSUPP; + + *edata = bp->eee; + if (!bp->eee.eee_enabled) { + /* Preserve tx_lpi_timer so that the last value will be used + * by default when it is re-enabled. + */ + linkmode_zero(edata->advertised); + edata->tx_lpi_enabled = 0; + } + + if (!bp->eee.eee_active) + linkmode_zero(edata->lp_advertised); + + return 0; +} +#endif + +#if (defined(ETHTOOL_GMODULEEEPROM) && !defined(GET_ETHTOOL_OP_EXT)) || \ + defined(HAVE_MODULE_EEPROM_BY_PAGE) +static int bnxt_read_sfp_module_eeprom_info(struct bnxt *bp, u16 i2c_addr, + u16 page_number, u8 bank, + u16 start_addr, u16 data_length, + u8 *buf) +{ + struct hwrm_port_phy_i2c_read_output *output; + struct hwrm_port_phy_i2c_read_input *req; + int rc, byte_offset = 0; + + rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_I2C_READ); + if (rc) + return rc; + + output = hwrm_req_hold(bp, req); + req->i2c_slave_addr = i2c_addr; + req->page_number = cpu_to_le16(page_number); + req->port_id = cpu_to_le16(bp->pf.port_id); + do { + u16 xfer_size; + + xfer_size = min_t(u16, data_length, BNXT_MAX_PHY_I2C_RESP_SIZE); + data_length -= xfer_size; + req->page_offset = cpu_to_le16(start_addr + byte_offset); + req->data_length = xfer_size; + req->enables = + cpu_to_le32((start_addr + byte_offset ? + PORT_PHY_I2C_READ_REQ_ENABLES_PAGE_OFFSET : + 0) | + (bank ? + PORT_PHY_I2C_READ_REQ_ENABLES_BANK_NUMBER : + 0)); + rc = hwrm_req_send(bp, req); + if (!rc) + memcpy(buf + byte_offset, output->data, xfer_size); + byte_offset += xfer_size; + } while (!rc && data_length > 0); + hwrm_req_drop(bp, req); + + return rc; +} + +static int bnxt_get_module_info(struct net_device *dev, + struct ethtool_modinfo *modinfo) +{ + u8 data[SFF_DIAG_SUPPORT_OFFSET + 1]; + struct bnxt *bp = netdev_priv(dev); + int rc; + + /* No point in going further if phy status indicates + * module is not inserted or if it is powered down or + * if it is of type 10GBase-T + */ + if (bp->link_info.module_status > + PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG) + return -EOPNOTSUPP; + + /* This feature is not supported in older firmware versions */ + if (bp->hwrm_spec_code < 0x10202) + return -EOPNOTSUPP; + + rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A0, 0, 0, 0, + SFF_DIAG_SUPPORT_OFFSET + 1, + data); + if (!rc) { + u8 module_id = data[0]; + u8 diag_supported = data[SFF_DIAG_SUPPORT_OFFSET]; + + switch (module_id) { + case SFF_MODULE_ID_SFP: + modinfo->type = ETH_MODULE_SFF_8472; + modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; + if (!diag_supported) + modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN; + break; + case SFF_MODULE_ID_QSFP: + case SFF_MODULE_ID_QSFP_PLUS: + modinfo->type = ETH_MODULE_SFF_8436; + modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN; + break; + case SFF_MODULE_ID_QSFP28: + modinfo->type = ETH_MODULE_SFF_8636; + modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN; + break; + default: + rc = -EOPNOTSUPP; + break; + } + } + return rc; +} + +static int bnxt_get_module_eeprom(struct net_device *dev, + struct ethtool_eeprom *eeprom, + u8 *data) +{ + struct bnxt *bp = netdev_priv(dev); + u16 start = eeprom->offset, length = eeprom->len; + int rc = 0; + + memset(data, 0, eeprom->len); + + /* Read A0 portion of the EEPROM */ + if (start < ETH_MODULE_SFF_8436_LEN) { + if (start + eeprom->len > ETH_MODULE_SFF_8436_LEN) + length = ETH_MODULE_SFF_8436_LEN - start; + rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A0, 0, 0, + start, length, data); + if (rc) + return rc; + start += length; + data += start; + length = eeprom->len - length; + } + + /* Read A2 portion of the EEPROM */ + if (length) { + start -= ETH_MODULE_SFF_8436_LEN; + rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A2, 0, 0, + start, length, data); + } + return rc; +} +#endif + +#if defined(HAVE_MODULE_EEPROM_BY_PAGE) +static int bnxt_get_module_status(struct bnxt *bp, struct netlink_ext_ack *extack) +{ + if (bp->link_info.module_status <= + PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG) + return 0; + + switch (bp->link_info.module_status) { + case PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN: + NL_SET_ERR_MSG_MOD(extack, "Transceiver module is powering down"); + break; + case PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTINSERTED: + NL_SET_ERR_MSG_MOD(extack, "Transceiver module not inserted"); + break; + case PORT_PHY_QCFG_RESP_MODULE_STATUS_CURRENTFAULT: + NL_SET_ERR_MSG_MOD(extack, "Transceiver module disabled due to current fault"); + break; + default: + NL_SET_ERR_MSG_MOD(extack, "Unknown error"); + break; + } + return -EINVAL; +} + +static int bnxt_get_module_eeprom_by_page(struct net_device *dev, + const struct ethtool_module_eeprom *page_data, + struct netlink_ext_ack *extack) +{ + struct bnxt *bp = netdev_priv(dev); + int rc; + + rc = bnxt_get_module_status(bp, extack); + if (rc) + return rc; + + if (bp->hwrm_spec_code < 0x10202) { + NL_SET_ERR_MSG_MOD(extack, "Firmware version too old"); + return -EINVAL; + } + + if (page_data->bank && !(bp->phy_flags & BNXT_PHY_FL_BANK_SEL)) { + NL_SET_ERR_MSG_MOD(extack, "Firmware not capable for bank selection"); + return -EINVAL; + } + + rc = bnxt_read_sfp_module_eeprom_info(bp, page_data->i2c_address << 1, + page_data->page, page_data->bank, + page_data->offset, + page_data->length, + page_data->data); + if (rc) { + NL_SET_ERR_MSG_MOD(extack, "Module`s eeprom read failed"); + return rc; + } + return page_data->length; +} +#endif + +static int bnxt_nway_reset(struct net_device *dev) +{ + int rc = 0; + + struct bnxt *bp = netdev_priv(dev); + struct bnxt_link_info *link_info = &bp->link_info; + + if (!BNXT_PHY_CFG_ABLE(bp)) + return -EOPNOTSUPP; + + if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) + return -EINVAL; + + if (netif_running(dev)) + rc = bnxt_hwrm_set_link_setting(bp, true, false); + + return rc; +} + +#if (LINUX_VERSION_CODE < 0x30000) +static int bnxt_phys_id(struct net_device *dev, u32 data) +{ + struct hwrm_port_led_cfg_input *req; + struct bnxt *bp = netdev_priv(dev); + struct bnxt_pf_info *pf = &bp->pf; + struct bnxt_led_cfg *led_cfg; + u8 led_state; + u16 duration; + int i, rc; + + if (!bp->num_leds || BNXT_VF(bp)) + return -EOPNOTSUPP; + + if (!data) + data = 2; + + led_state = PORT_LED_CFG_REQ_LED0_STATE_BLINKALT; + duration = 500; + + while (1) { + /* reinit, don't hold onto the HWRM resources during delay */ + rc = hwrm_req_init(bp, req, HWRM_PORT_LED_CFG); + if (rc) + return rc; + + req->port_id = cpu_to_le16(pf->port_id); + req->num_leds = bp->num_leds; + led_cfg = (struct bnxt_led_cfg *)&req->led0_id; + for (i = 0; i < bp->num_leds; i++, led_cfg++) { + req->enables |= BNXT_LED_DFLT_ENABLES(i); + led_cfg->led_id = bp->leds[i].led_id; + led_cfg->led_state = led_state; + led_cfg->led_blink_on = cpu_to_le16(duration); + led_cfg->led_blink_off = cpu_to_le16(duration); + led_cfg->led_group_id = bp->leds[i].led_group_id; + } + rc = hwrm_req_send(bp, req); + + if (!duration || rc) + break; + + msleep_interruptible(data * 1000); + led_state = PORT_LED_CFG_REQ_LED1_STATE_DEFAULT; + duration = 0; + } + return rc; +} + +#else +#if defined(HAVE_SET_PHYS_ID) && !defined(GET_ETHTOOL_OP_EXT) +static int bnxt_set_phys_id(struct net_device *dev, + enum ethtool_phys_id_state state) +{ + struct hwrm_port_led_cfg_input *req; + struct bnxt *bp = netdev_priv(dev); + struct bnxt_pf_info *pf = &bp->pf; + struct bnxt_led_cfg *led_cfg; + __le16 duration; + u8 led_state; + int rc, i; + + if (!bp->num_leds || BNXT_VF(bp)) + return -EOPNOTSUPP; + + if (state == ETHTOOL_ID_ACTIVE) { + led_state = PORT_LED_CFG_REQ_LED0_STATE_BLINKALT; + duration = cpu_to_le16(500); + } else if (state == ETHTOOL_ID_INACTIVE) { + led_state = PORT_LED_CFG_REQ_LED1_STATE_DEFAULT; + duration = cpu_to_le16(0); + } else { + return -EINVAL; + } + + rc = hwrm_req_init(bp, req, HWRM_PORT_LED_CFG); + if (rc) + return rc; + + req->port_id = cpu_to_le16(pf->port_id); + req->num_leds = bp->num_leds; + led_cfg = (struct bnxt_led_cfg *)&req->led0_id; + for (i = 0; i < bp->num_leds; i++, led_cfg++) { + req->enables |= BNXT_LED_DFLT_ENABLES(i); + led_cfg->led_id = bp->leds[i].led_id; + led_cfg->led_state = led_state; + led_cfg->led_blink_on = duration; + led_cfg->led_blink_off = duration; + led_cfg->led_group_id = bp->leds[i].led_group_id; + } + return hwrm_req_send(bp, req); +} +#endif +#endif + +static int bnxt_hwrm_selftest_irq(struct bnxt *bp, u16 cmpl_ring) +{ + struct hwrm_selftest_irq_input *req; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_SELFTEST_IRQ); + if (rc) + return rc; + + req->cmpl_ring = cpu_to_le16(cmpl_ring); + return hwrm_req_send(bp, req); +} + +static int bnxt_test_irq(struct bnxt *bp) +{ + int i; + + for (i = 0; i < bp->cp_nr_rings; i++) { + u16 cmpl_ring = bp->grp_info[i].cp_fw_ring_id; + int rc; + + rc = bnxt_hwrm_selftest_irq(bp, cmpl_ring); + if (rc) + return rc; + } + return 0; +} + +static int bnxt_hwrm_mac_loopback(struct bnxt *bp, bool enable) +{ + struct hwrm_port_mac_cfg_input *req; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_CFG); + if (rc) + return rc; + + req->enables = cpu_to_le32(PORT_MAC_CFG_REQ_ENABLES_LPBK); + if (enable) + req->lpbk = PORT_MAC_CFG_REQ_LPBK_LOCAL; + else + req->lpbk = PORT_MAC_CFG_REQ_LPBK_NONE; + return hwrm_req_send(bp, req); +} + +static int bnxt_query_force_speeds(struct bnxt *bp, u16 *force_speeds) +{ + struct hwrm_port_phy_qcaps_output *resp; + struct hwrm_port_phy_qcaps_input *req; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_QCAPS); + if (rc) + return rc; + + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); + if (!rc) + *force_speeds = le16_to_cpu(resp->supported_speeds_force_mode); + + hwrm_req_drop(bp, req); + return rc; +} + +static int bnxt_disable_an_for_lpbk(struct bnxt *bp, + struct hwrm_port_phy_cfg_input *req) +{ + struct bnxt_link_info *link_info = &bp->link_info; + u16 fw_advertising; + u16 fw_speed; + int rc; + + if (!link_info->autoneg || + (bp->phy_flags & BNXT_PHY_FL_AN_PHY_LPBK)) + return 0; + + rc = bnxt_query_force_speeds(bp, &fw_advertising); + if (rc) + return rc; + + fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_1GB; + if (BNXT_LINK_IS_UP(bp)) + fw_speed = bp->link_info.link_speed; + else if (fw_advertising & BNXT_LINK_SPEED_MSK_10GB) + fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10GB; + else if (fw_advertising & BNXT_LINK_SPEED_MSK_25GB) + fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_25GB; + else if (fw_advertising & BNXT_LINK_SPEED_MSK_40GB) + fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB; + else if (fw_advertising & BNXT_LINK_SPEED_MSK_50GB) + fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB; + + req->force_link_speed = cpu_to_le16(fw_speed); + req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE | + PORT_PHY_CFG_REQ_FLAGS_RESET_PHY); + rc = hwrm_req_send(bp, req); + req->flags = 0; + req->force_link_speed = cpu_to_le16(0); + return rc; +} + +static int bnxt_hwrm_phy_loopback(struct bnxt *bp, bool enable, bool ext) +{ + struct hwrm_port_phy_cfg_input *req; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG); + if (rc) + return rc; + + /* prevent bnxt_disable_an_for_lpbk() from consuming the request */ + hwrm_req_hold(bp, req); + + if (enable) { + bnxt_disable_an_for_lpbk(bp, req); + if (ext) + req->lpbk = PORT_PHY_CFG_REQ_LPBK_EXTERNAL; + else + req->lpbk = PORT_PHY_CFG_REQ_LPBK_LOCAL; + } else { + req->lpbk = PORT_PHY_CFG_REQ_LPBK_NONE; + } + req->enables = cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_LPBK); + rc = hwrm_req_send(bp, req); + hwrm_req_drop(bp, req); + return rc; +} + +static int bnxt_rx_loopback(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, + u32 raw_cons, int pkt_size) +{ + struct bnxt_napi *bnapi = cpr->bnapi; + struct bnxt_rx_ring_info *rxr; + struct bnxt_sw_rx_bd *rx_buf; + struct rx_cmp *rxcmp; + u16 cp_cons, cons; + u8 *data; + u32 len; + int i; + + rxr = bnapi->rx_ring; + cp_cons = RING_CMP(raw_cons); + rxcmp = (struct rx_cmp *) + &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; + cons = rxcmp->rx_cmp_opaque; + rx_buf = &rxr->rx_buf_ring[cons]; + data = rx_buf->data_ptr; + len = le32_to_cpu(rxcmp->rx_cmp_len_flags_type) >> RX_CMP_LEN_SHIFT; + if (len != pkt_size) + return -EIO; + i = ETH_ALEN; + if (!ether_addr_equal(data + i, bnapi->bp->dev->dev_addr)) + return -EIO; + i += ETH_ALEN; + for ( ; i < pkt_size; i++) { + if (data[i] != (u8)(i & 0xff)) + return -EIO; + } + return 0; +} + +static int bnxt_poll_loopback(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, + int pkt_size) +{ + struct tx_cmp *txcmp; + int rc = -EIO; + u32 raw_cons; + u32 cons; + int i; + + raw_cons = cpr->cp_raw_cons; + for (i = 0; i < 200; i++) { + cons = RING_CMP(raw_cons); + txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]; + + if (!TX_CMP_VALID(txcmp, raw_cons)) { + udelay(5); + continue; + } + + /* The valid test of the entry must be done first before + * reading any further. + */ + dma_rmb(); + if (TX_CMP_TYPE(txcmp) == CMP_TYPE_RX_L2_CMP || + TX_CMP_TYPE(txcmp) == CMP_TYPE_RX_L2_V3_CMP) { + rc = bnxt_rx_loopback(bp, cpr, raw_cons, pkt_size); + raw_cons = NEXT_RAW_CMP(raw_cons); + raw_cons = NEXT_RAW_CMP(raw_cons); + break; + } + raw_cons = NEXT_RAW_CMP(raw_cons); + } + cpr->cp_raw_cons = raw_cons; + return rc; +} + +static int bnxt_run_loopback(struct bnxt *bp) +{ + struct bnxt_tx_ring_info *txr = &bp->tx_ring[0]; + struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0]; + struct bnxt_cp_ring_info *cpr; + int pkt_size, i = 0; + struct sk_buff *skb; + dma_addr_t map; + u8 *data; + int rc; + + cpr = &rxr->bnapi->cp_ring; + if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) + cpr = rxr->rx_cpr; + pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_copy_thresh); + skb = netdev_alloc_skb(bp->dev, pkt_size); + if (!skb) + return -ENOMEM; + data = skb_put(skb, pkt_size); + ether_addr_copy(&data[i], bp->dev->dev_addr); + i += ETH_ALEN; + ether_addr_copy(&data[i], bp->dev->dev_addr); + i += ETH_ALEN; + for ( ; i < pkt_size; i++) + data[i] = (u8)(i & 0xff); + + map = dma_map_single(&bp->pdev->dev, skb->data, pkt_size, + DMA_TO_DEVICE); + if (dma_mapping_error(&bp->pdev->dev, map)) { + dev_kfree_skb(skb); + return -EIO; + } + bnxt_xmit_bd(bp, txr, map, pkt_size, NULL); + + /* Sync BD data before updating doorbell */ + wmb(); + + bnxt_db_write(bp, &txr->tx_db, txr->tx_prod); + rc = bnxt_poll_loopback(bp, cpr, pkt_size); + + dma_unmap_single(&bp->pdev->dev, map, pkt_size, DMA_TO_DEVICE); + dev_kfree_skb(skb); + return rc; +} + +static int bnxt_run_fw_tests(struct bnxt *bp, u8 test_mask, u8 *test_results) +{ + struct hwrm_selftest_exec_output *resp; + struct hwrm_selftest_exec_input *req; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_SELFTEST_EXEC); + if (rc) + return rc; + + hwrm_req_timeout(bp, req, bp->test_info->timeout); + req->flags = test_mask; + + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); + *test_results = resp->test_success; + hwrm_req_drop(bp, req); + return rc; +} + +#define BNXT_DRV_TESTS 4 +#define BNXT_MACLPBK_TEST_IDX (bp->num_tests - BNXT_DRV_TESTS) +#define BNXT_PHYLPBK_TEST_IDX (BNXT_MACLPBK_TEST_IDX + 1) +#define BNXT_EXTLPBK_TEST_IDX (BNXT_MACLPBK_TEST_IDX + 2) +#define BNXT_IRQ_TEST_IDX (BNXT_MACLPBK_TEST_IDX + 3) + +static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest, + u64 *buf) +{ + struct bnxt *bp = netdev_priv(dev); + bool do_ext_lpbk = false; + bool offline = false; + u8 test_results = 0; + u8 test_mask = 0; + int rc = 0, i; + + if (!bp->num_tests || !BNXT_PF(bp)) + return; + + if (etest->flags & ETH_TEST_FL_OFFLINE && + bnxt_ulp_registered(bp->edev)) { + etest->flags |= ETH_TEST_FL_FAILED; + netdev_warn(dev, "Offline tests cannot be run with RoCE driver loaded\n"); + return; + } + + memset(buf, 0, sizeof(u64) * bp->num_tests); + if (!netif_running(dev)) { + etest->flags |= ETH_TEST_FL_FAILED; + return; + } + + if ((etest->flags & ETH_TEST_FL_EXTERNAL_LB) && + (bp->phy_flags & BNXT_PHY_FL_EXT_LPBK)) + do_ext_lpbk = true; + + if (etest->flags & ETH_TEST_FL_OFFLINE) { + if (bp->pf.active_vfs || !BNXT_SINGLE_PF(bp)) { + etest->flags |= ETH_TEST_FL_FAILED; + netdev_warn(dev, "Offline tests cannot be run with active VFs or on shared PF\n"); + return; + } + offline = true; + } + + for (i = 0; i < bp->num_tests - BNXT_DRV_TESTS; i++) { + u8 bit_val = 1 << i; + + if (!(bp->test_info->offline_mask & bit_val)) + test_mask |= bit_val; + else if (offline) + test_mask |= bit_val; + } + if (!offline) { + bnxt_run_fw_tests(bp, test_mask, &test_results); + } else { + bnxt_close_nic(bp, true, false); + bnxt_run_fw_tests(bp, test_mask, &test_results); + + rc = bnxt_half_open_nic(bp); + if (rc) { + etest->flags |= ETH_TEST_FL_FAILED; + return; + } + if (bp->mac_flags & BNXT_MAC_FL_NO_MAC_LPBK) + goto skip_mac_loopback; + + buf[BNXT_MACLPBK_TEST_IDX] = 1; + bnxt_hwrm_mac_loopback(bp, true); + msleep(250); + if (bnxt_run_loopback(bp)) + etest->flags |= ETH_TEST_FL_FAILED; + else + buf[BNXT_MACLPBK_TEST_IDX] = 0; + + bnxt_hwrm_mac_loopback(bp, false); +skip_mac_loopback: + if (bp->phy_flags & BNXT_PHY_FL_NO_PHY_LPBK) + goto skip_phy_loopback; + + bnxt_hwrm_phy_loopback(bp, true, false); + msleep(1000); + if (bnxt_run_loopback(bp)) { + buf[BNXT_PHYLPBK_TEST_IDX] = 1; + etest->flags |= ETH_TEST_FL_FAILED; + } +skip_phy_loopback: + if (do_ext_lpbk) { + etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE; + bnxt_hwrm_phy_loopback(bp, true, true); + msleep(1000); + if (bnxt_run_loopback(bp)) { + buf[BNXT_EXTLPBK_TEST_IDX] = 1; + etest->flags |= ETH_TEST_FL_FAILED; + } + } + bnxt_hwrm_phy_loopback(bp, false, false); + bnxt_half_close_nic(bp); + rc = bnxt_open_nic(bp, true, true); + } + if (rc || bnxt_test_irq(bp)) { + buf[BNXT_IRQ_TEST_IDX] = 1; + etest->flags |= ETH_TEST_FL_FAILED; + } + for (i = 0; i < bp->num_tests - BNXT_DRV_TESTS; i++) { + u8 bit_val = 1 << i; + + if ((test_mask & bit_val) && !(test_results & bit_val)) { + buf[i] = 1; + etest->flags |= ETH_TEST_FL_FAILED; + } + } +} + +#if defined(ETHTOOL_GET_TS_INFO) && defined(HAVE_IEEE1588_SUPPORT) +static int bnxt_get_ts_info(struct net_device *dev, + struct ethtool_ts_info *info) +{ + struct bnxt *bp = netdev_priv(dev); + struct bnxt_ptp_cfg *ptp; + + ptp = bp->ptp_cfg; + info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | + SOF_TIMESTAMPING_RX_SOFTWARE | + SOF_TIMESTAMPING_SOFTWARE; + + info->phc_index = -1; + if (!ptp) + return 0; + + info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE | + SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RAW_HARDWARE; + if (ptp->ptp_clock) + info->phc_index = ptp_clock_index(ptp->ptp_clock); + + info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON); + + info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) | + (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) | + (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT); + + if (bp->fw_cap & BNXT_FW_CAP_RX_ALL_PKT_TS) + info->rx_filters |= (1 << HWTSTAMP_FILTER_ALL); + return 0; +} +#endif + +#if defined(ETHTOOL_RESET) && !defined(GET_ETHTOOL_OP_EXT) +static int bnxt_hwrm_crashdump_erase(struct net_device *dev, u8 scope) +{ + struct hwrm_dbg_crashdump_erase_input *req; + struct bnxt *bp = netdev_priv(dev); + int rc; + + rc = hwrm_req_init(bp, req, HWRM_DBG_CRASHDUMP_ERASE); + if (rc) + return rc; + + req->scope = scope; + hwrm_req_timeout(bp, req, HWRM_COREDUMP_TIMEOUT); + return hwrm_req_send(bp, req); +} + +static int bnxt_reset(struct net_device *dev, u32 *flags) +{ + struct bnxt *bp = netdev_priv(dev); + u32 req = *flags; + + if (!req) + return -EINVAL; + + if (!BNXT_PF(bp)) { + netdev_err(dev, "Reset is not supported from a VF\n"); + return -EOPNOTSUPP; + } + + if (req & BNXT_FW_RESET_CRASHDUMP) { + if (bp->fw_cap & BNXT_FW_CAP_CRASHDUMP) { + u8 scope = DBG_CRASHDUMP_ERASE_REQ_SCOPE_INVALIDATE; + + if (!bnxt_hwrm_crashdump_erase(dev, scope)) { + netdev_info(dev, "Crashdump data erased.\n"); + *flags &= ~BNXT_FW_RESET_CRASHDUMP; + if (!*flags) + return 0; /* done, skip VF check */ + } + } else if (req == BNXT_FW_RESET_CRASHDUMP) { + return -EOPNOTSUPP; /* only request, fail hard */ + } + } + + if (pci_vfs_assigned(bp->pdev) && + !(bp->fw_cap & BNXT_FW_CAP_HOT_RESET)) { + netdev_err(dev, + "Reset not allowed when VFs are assigned to VMs\n"); + return -EBUSY; + } + + if ((req & BNXT_FW_RESET_CHIP) == BNXT_FW_RESET_CHIP) { + /* This feature is not supported in older firmware versions */ + if (bp->hwrm_spec_code >= 0x10803) { + if (!bnxt_firmware_reset_chip(dev)) { + netdev_info(dev, "Firmware reset request successful.\n"); + if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET)) + netdev_info(dev, "Reload driver to complete reset\n"); + *flags &= ~BNXT_FW_RESET_CHIP; + } + } else if (req == BNXT_FW_RESET_CHIP) { + return -EOPNOTSUPP; /* only request, fail hard */ + } + } + + if (!BNXT_CHIP_P4_PLUS(bp) && (req & BNXT_FW_RESET_AP)) { + /* This feature is not supported in older firmware versions */ + if (bp->hwrm_spec_code >= 0x10803) { + if (!bnxt_firmware_reset_ap(dev)) { + netdev_info(dev, "Reset application processor successful.\n"); + *flags &= ~BNXT_FW_RESET_AP; + } + } else if (req == BNXT_FW_RESET_AP) { + return -EOPNOTSUPP; /* only request, fail hard */ + } + } + + return 0; +} +#endif + +#if defined(ETHTOOL_GET_DUMP_FLAG) && !defined(GET_ETHTOOL_OP_EXT) +static int bnxt_set_dump(struct net_device *dev, struct ethtool_dump *dump) +{ + struct bnxt *bp = netdev_priv(dev); + + if (dump->flag > BNXT_DUMP_DRIVER_WITH_CTX_MEM) { + netdev_info(dev, "Supports only Live(0), Crash(1), Driver(2), Driver with CTX MEM(3) dumps.\n"); + return -EINVAL; + } + + if (dump->flag == BNXT_DUMP_CRASH) { + if (bp->fw_dbg_cap & BNXT_FW_DBG_CAP_CRASHDUMP_SOC && + (!IS_ENABLED(CONFIG_TEE_BNXT_FW))) { + netdev_info(dev, + "Cannot collect crash dump as TEE_BNXT_FW config option is not enabled.\n"); + return -EOPNOTSUPP; + } else if (!(bp->fw_dbg_cap & BNXT_FW_DBG_CAP_CRASHDUMP_HOST)) { + netdev_info(dev, "Crash dump collection from host memory is not supported on this interface.\n"); + return -EOPNOTSUPP; + } + } + + bp->dump_flag = dump->flag; + return 0; +} + +static int bnxt_get_dump_flag(struct net_device *dev, struct ethtool_dump *dump) +{ + struct bnxt *bp = netdev_priv(dev); + + if (bp->hwrm_spec_code < 0x10801) + return -EOPNOTSUPP; + + dump->version = bp->ver_resp.hwrm_fw_maj_8b << 24 | + bp->ver_resp.hwrm_fw_min_8b << 16 | + bp->ver_resp.hwrm_fw_bld_8b << 8 | + bp->ver_resp.hwrm_fw_rsvd_8b; + + dump->flag = bp->dump_flag; + dump->len = bnxt_get_coredump_length(bp, bp->dump_flag); + return 0; +} + +static int bnxt_get_dump_data(struct net_device *dev, struct ethtool_dump *dump, + void *buf) +{ + struct bnxt *bp = netdev_priv(dev); + + if (bp->hwrm_spec_code < 0x10801) + return -EOPNOTSUPP; + + memset(buf, 0, dump->len); + + dump->flag = bp->dump_flag; + return bnxt_get_coredump(bp, dump->flag, buf, &dump->len); +} +#endif /* ETHTOOL_GET_DUMP_FLAG */ + +void bnxt_ethtool_init(struct bnxt *bp) +{ + struct hwrm_selftest_qlist_output *resp; + struct hwrm_selftest_qlist_input *req; + struct bnxt_test_info *test_info; + struct net_device *dev = bp->dev; + int i, rc; + + if (!(bp->fw_cap & BNXT_FW_CAP_PKG_VER)) + bnxt_get_pkgver(dev); + + bp->num_tests = 0; + if (bp->hwrm_spec_code < 0x10704 || !BNXT_PF(bp)) + return; + + test_info = bp->test_info; + if (!test_info) { + test_info = kzalloc(sizeof(*bp->test_info), GFP_KERNEL); + if (!test_info) + return; + bp->test_info = test_info; + } + + if (hwrm_req_init(bp, req, HWRM_SELFTEST_QLIST)) + return; + + hwrm_req_flags(bp, req, BNXT_HWRM_CTX_SILENT); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); + if (rc) { + if (rc != -EACCES) + netdev_warn(bp->dev, + "Expected firmware self tests unavailable (err: %d)\n", + rc); + goto ethtool_init_exit; + } + + bp->num_tests = resp->num_tests + BNXT_DRV_TESTS; + if (bp->num_tests > BNXT_MAX_TEST) + bp->num_tests = BNXT_MAX_TEST; + + test_info->offline_mask = resp->offline_tests; + test_info->timeout = le16_to_cpu(resp->test_timeout); + if (!test_info->timeout) + test_info->timeout = HWRM_CMD_TIMEOUT; + for (i = 0; i < bp->num_tests; i++) { + char *str = test_info->string[i]; + char *fw_str = resp->test_name[i]; + + if (i == BNXT_MACLPBK_TEST_IDX) { + strcpy(str, "Mac loopback test (offline)"); + } else if (i == BNXT_PHYLPBK_TEST_IDX) { + strcpy(str, "Phy loopback test (offline)"); + } else if (i == BNXT_EXTLPBK_TEST_IDX) { + strcpy(str, "Ext loopback test (offline)"); + } else if (i == BNXT_IRQ_TEST_IDX) { + strcpy(str, "Interrupt_test (online)"); + } else { + snprintf(str, ETH_GSTRING_LEN, "%s test (%s)", + fw_str, test_info->offline_mask & (1 << i) ? + "offline" : "online"); + } + } + +ethtool_init_exit: + hwrm_req_drop(bp, req); +} + +#ifdef ETHTOOL_RMON_HIST_MAX +static void bnxt_get_eth_phy_stats(struct net_device *dev, + struct ethtool_eth_phy_stats *phy_stats) +{ + struct bnxt *bp = netdev_priv(dev); + u64 *rx; + + if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS_EXT)) + return; + + rx = bp->rx_port_stats_ext.sw_stats; + phy_stats->SymbolErrorDuringCarrier = + *(rx + BNXT_RX_STATS_EXT_OFFSET(rx_pcs_symbol_err)); +} + +static void bnxt_get_eth_mac_stats(struct net_device *dev, + struct ethtool_eth_mac_stats *mac_stats) +{ + struct bnxt *bp = netdev_priv(dev); + u64 *rx, *tx; + + if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS)) + return; + + rx = bp->port_stats.sw_stats; + tx = bp->port_stats.sw_stats + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8; + + mac_stats->FramesReceivedOK = + BNXT_GET_RX_PORT_STATS64(rx, rx_good_frames); + mac_stats->FramesTransmittedOK = + BNXT_GET_TX_PORT_STATS64(tx, tx_good_frames); + mac_stats->FrameCheckSequenceErrors = + BNXT_GET_RX_PORT_STATS64(rx, rx_fcs_err_frames); + mac_stats->AlignmentErrors = + BNXT_GET_RX_PORT_STATS64(rx, rx_align_err_frames); + mac_stats->OutOfRangeLengthField = + BNXT_GET_RX_PORT_STATS64(rx, rx_oor_len_frames); +} + +static void bnxt_get_eth_ctrl_stats(struct net_device *dev, + struct ethtool_eth_ctrl_stats *ctrl_stats) +{ + struct bnxt *bp = netdev_priv(dev); + u64 *rx; + + if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS)) + return; + + rx = bp->port_stats.sw_stats; + ctrl_stats->MACControlFramesReceived = + BNXT_GET_RX_PORT_STATS64(rx, rx_ctrl_frames); +} + +static const struct ethtool_rmon_hist_range bnxt_rmon_ranges[] = { + { 0, 64 }, + { 65, 127 }, + { 128, 255 }, + { 256, 511 }, + { 512, 1023 }, + { 1024, 1518 }, + { 1519, 2047 }, + { 2048, 4095 }, + { 4096, 9216 }, + { 9217, 16383 }, + {} +}; + +static void bnxt_get_rmon_stats(struct net_device *dev, + struct ethtool_rmon_stats *rmon_stats, + const struct ethtool_rmon_hist_range **ranges) +{ + struct bnxt *bp = netdev_priv(dev); + u64 *rx, *tx; + + if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS)) + return; + + rx = bp->port_stats.sw_stats; + tx = bp->port_stats.sw_stats + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8; + + rmon_stats->jabbers = + BNXT_GET_RX_PORT_STATS64(rx, rx_jbr_frames); + rmon_stats->oversize_pkts = + BNXT_GET_RX_PORT_STATS64(rx, rx_ovrsz_frames); + rmon_stats->undersize_pkts = + BNXT_GET_RX_PORT_STATS64(rx, rx_undrsz_frames); + + rmon_stats->hist[0] = BNXT_GET_RX_PORT_STATS64(rx, rx_64b_frames); + rmon_stats->hist[1] = BNXT_GET_RX_PORT_STATS64(rx, rx_65b_127b_frames); + rmon_stats->hist[2] = BNXT_GET_RX_PORT_STATS64(rx, rx_128b_255b_frames); + rmon_stats->hist[3] = BNXT_GET_RX_PORT_STATS64(rx, rx_256b_511b_frames); + rmon_stats->hist[4] = + BNXT_GET_RX_PORT_STATS64(rx, rx_512b_1023b_frames); + rmon_stats->hist[5] = + BNXT_GET_RX_PORT_STATS64(rx, rx_1024b_1518b_frames); + rmon_stats->hist[6] = + BNXT_GET_RX_PORT_STATS64(rx, rx_1519b_2047b_frames); + rmon_stats->hist[7] = + BNXT_GET_RX_PORT_STATS64(rx, rx_2048b_4095b_frames); + rmon_stats->hist[8] = + BNXT_GET_RX_PORT_STATS64(rx, rx_4096b_9216b_frames); + rmon_stats->hist[9] = + BNXT_GET_RX_PORT_STATS64(rx, rx_9217b_16383b_frames); + + rmon_stats->hist_tx[0] = + BNXT_GET_TX_PORT_STATS64(tx, tx_64b_frames); + rmon_stats->hist_tx[1] = + BNXT_GET_TX_PORT_STATS64(tx, tx_65b_127b_frames); + rmon_stats->hist_tx[2] = + BNXT_GET_TX_PORT_STATS64(tx, tx_128b_255b_frames); + rmon_stats->hist_tx[3] = + BNXT_GET_TX_PORT_STATS64(tx, tx_256b_511b_frames); + rmon_stats->hist_tx[4] = + BNXT_GET_TX_PORT_STATS64(tx, tx_512b_1023b_frames); + rmon_stats->hist_tx[5] = + BNXT_GET_TX_PORT_STATS64(tx, tx_1024b_1518b_frames); + rmon_stats->hist_tx[6] = + BNXT_GET_TX_PORT_STATS64(tx, tx_1519b_2047b_frames); + rmon_stats->hist_tx[7] = + BNXT_GET_TX_PORT_STATS64(tx, tx_2048b_4095b_frames); + rmon_stats->hist_tx[8] = + BNXT_GET_TX_PORT_STATS64(tx, tx_4096b_9216b_frames); + rmon_stats->hist_tx[9] = + BNXT_GET_TX_PORT_STATS64(tx, tx_9217b_16383b_frames); + + *ranges = bnxt_rmon_ranges; +} +#endif + +static int bnxt_set_priv_flags(struct net_device *dev, u32 flags) +{ + struct bnxt *bp = netdev_priv(dev); + bool reload = false; + int rc = 0; + + if (flags >> ARRAY_SIZE(bnxt_priv_flags)) + return -EINVAL; + + if (flags & (1 << BNXT_PRIV_FLAG_CORE_RESET_TX_TIMEOUT)) { + if (!bnxt_core_reset_avail(bp)) + return -EOPNOTSUPP; + bp->flags |= BNXT_FLAG_CORE_RESET_TX_TIMEOUT; + } else { + bp->flags &= ~BNXT_FLAG_CORE_RESET_TX_TIMEOUT; + } + + if (flags & (1 << BNXT_PRIV_FLAG_NUMA_DIRECT)) { + if (~bp->flags & BNXT_FLAG_MULTI_ROOT) + return -EOPNOTSUPP; + reload |= !(bp->flags & BNXT_FLAG_NUMA_DIRECT); + bp->flags |= BNXT_FLAG_NUMA_DIRECT; + } else { + reload |= !!(bp->flags & BNXT_FLAG_NUMA_DIRECT); + bp->flags &= ~BNXT_FLAG_NUMA_DIRECT; + } + + /* Check this flag while configuring ipv6 n-tuple */ + if (flags & (1 << BNXT_PRIV_FLAG_RSS_IPV6_FLOW_LABEL_EN)) { + /* Reject if HW in-capable */ + if (!(bp->rss_cap & BNXT_RSS_CAP_IPV6_FLOW_LABEL_CAP)) + return -EOPNOTSUPP; + bp->ipv6_flow_lbl_rss_en = 1; + } else { + bp->ipv6_flow_lbl_rss_en = 0; + } + + if (reload && netif_running(dev)) { + bnxt_close_nic(bp, true, false); + rc = bnxt_open_nic(bp, true, false); + } + + return rc; +} + +static u32 bnxt_get_priv_flags(struct net_device *dev) +{ + struct bnxt *bp = netdev_priv(dev); + u32 flags = 0; + + if (bp->flags & BNXT_FLAG_NUMA_DIRECT) + flags |= 1 << BNXT_PRIV_FLAG_NUMA_DIRECT; + + if (bp->flags & BNXT_FLAG_CORE_RESET_TX_TIMEOUT) + flags |= 1 << BNXT_PRIV_FLAG_CORE_RESET_TX_TIMEOUT; + + if (bp->ipv6_flow_lbl_rss_en) + flags |= 1 << BNXT_PRIV_FLAG_RSS_IPV6_FLOW_LABEL_EN; + + return flags; +} + +#ifdef HAVE_ETHTOOL_A_LINKSTATE_EXT_DOWN_CNT +static void bnxt_get_link_ext_stats(struct net_device *dev, + struct ethtool_link_ext_stats *stats) +{ + struct bnxt *bp = netdev_priv(dev); + u64 *rx; + + if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS_EXT)) + return; + + rx = bp->rx_port_stats_ext.sw_stats; + stats->link_down_events = + *(rx + BNXT_RX_STATS_EXT_OFFSET(link_down_events)); +} +#endif + +void bnxt_ethtool_free(struct bnxt *bp) +{ + kfree(bp->test_info); + bp->test_info = NULL; +} + +const struct ethtool_ops bnxt_ethtool_ops = { +#ifdef HAVE_ETHTOOL_LANES + .cap_link_lanes_supported = 1, +#endif +#ifdef ETHTOOL_COALESCE_USECS + .supported_coalesce_params = ETHTOOL_COALESCE_USECS | + ETHTOOL_COALESCE_MAX_FRAMES | + ETHTOOL_COALESCE_USECS_IRQ | + ETHTOOL_COALESCE_MAX_FRAMES_IRQ | + ETHTOOL_COALESCE_STATS_BLOCK_USECS | + ETHTOOL_COALESCE_USE_ADAPTIVE_RX | + ETHTOOL_COALESCE_USE_CQE, +#endif +#ifdef HAVE_ETHTOOL_LINK_KSETTINGS + .get_link_ksettings = bnxt_get_link_ksettings, + .set_link_ksettings = bnxt_set_link_ksettings, +#else + .get_settings = bnxt_get_settings, + .set_settings = bnxt_set_settings, +#endif +#ifdef ETHTOOL_MAX_LANES + .get_fec_stats = bnxt_get_fec_stats, +#endif +#ifdef ETHTOOL_GFECPARAM + .get_fecparam = bnxt_get_fecparam, + .set_fecparam = bnxt_set_fecparam, +#endif +#ifdef HAVE_GET_PAUSE_STATS + .get_pause_stats = bnxt_get_pause_stats, +#endif + .get_pauseparam = bnxt_get_pauseparam, + .set_pauseparam = bnxt_set_pauseparam, + .get_drvinfo = bnxt_get_drvinfo, + .get_regs_len = bnxt_get_regs_len, + .get_regs = bnxt_get_regs, + .get_wol = bnxt_get_wol, + .set_wol = bnxt_set_wol, + .get_coalesce = bnxt_get_coalesce, + .set_coalesce = bnxt_set_coalesce, +#ifdef HAVE_ETHTOOL_GET_PER_QUEUE_COAL + .get_per_queue_coalesce = bnxt_get_per_queue_coalesce, +#endif + .get_msglevel = bnxt_get_msglevel, + .set_msglevel = bnxt_set_msglevel, + .get_sset_count = bnxt_get_sset_count, + .get_strings = bnxt_get_strings, + .get_ethtool_stats = bnxt_get_ethtool_stats, + .set_ringparam = bnxt_set_ringparam, + .get_ringparam = bnxt_get_ringparam, +#if defined(ETHTOOL_GCHANNELS) && !defined(GET_ETHTOOL_OP_EXT) + .get_channels = bnxt_get_channels, + .set_channels = bnxt_set_channels, +#endif +#ifdef HAVE_RXNFC + .get_rxnfc = bnxt_get_rxnfc, + .set_rxnfc = bnxt_set_rxnfc, +#endif +#if defined(HAVE_RXFH_INDIR_SIZE) && !defined(GET_ETHTOOL_OP_EXT) + .get_rxfh_indir_size = bnxt_get_rxfh_indir_size, +#endif +#if defined(HAVE_GET_RXFH_KEY_SIZE) && !defined(GET_ETHTOOL_OP_EXT) + .get_rxfh_key_size = bnxt_get_rxfh_key_size, + .get_rxfh = bnxt_get_rxfh, +#endif +#if defined(HAVE_SET_RXFH) && defined(ETH_RSS_HASH_TOP) && !defined(GET_ETHTOOL_OP_EXT) + .set_rxfh = bnxt_set_rxfh, +#endif + .flash_device = bnxt_flash_device, + .get_eeprom_len = bnxt_get_eeprom_len, + .get_eeprom = bnxt_get_eeprom, + .set_eeprom = bnxt_set_eeprom, + .get_link = bnxt_get_link, +#ifdef HAVE_ETHTOOL_A_LINKSTATE_EXT_DOWN_CNT + .get_link_ext_stats = bnxt_get_link_ext_stats, +#endif +#if defined(ETHTOOL_GEEE) && !defined(GET_ETHTOOL_OP_EXT) + .get_eee = bnxt_get_eee, + .set_eee = bnxt_set_eee, +#endif +#if defined(ETHTOOL_GMODULEEEPROM) && !defined(GET_ETHTOOL_OP_EXT) + .get_module_info = bnxt_get_module_info, + .get_module_eeprom = bnxt_get_module_eeprom, +#endif +#if defined(HAVE_MODULE_EEPROM_BY_PAGE) + .get_module_eeprom_by_page = bnxt_get_module_eeprom_by_page, +#endif + .nway_reset = bnxt_nway_reset, +#if (LINUX_VERSION_CODE < 0x30000) + .phys_id = bnxt_phys_id, +#else +#if defined(HAVE_SET_PHYS_ID) && !defined(GET_ETHTOOL_OP_EXT) + .set_phys_id = bnxt_set_phys_id, +#endif +#endif + .self_test = bnxt_self_test, +#if defined(ETHTOOL_GET_TS_INFO) && defined(HAVE_IEEE1588_SUPPORT) + .get_ts_info = bnxt_get_ts_info, +#endif +#if defined(ETHTOOL_RESET) && !defined(GET_ETHTOOL_OP_EXT) + .reset = bnxt_reset, +#endif +#if defined(ETHTOOL_GET_DUMP_FLAG) && !defined(GET_ETHTOOL_OP_EXT) + .set_dump = bnxt_set_dump, + .get_dump_flag = bnxt_get_dump_flag, + .get_dump_data = bnxt_get_dump_data, +#endif +#ifdef ETHTOOL_RMON_HIST_MAX + .get_eth_phy_stats = bnxt_get_eth_phy_stats, + .get_eth_mac_stats = bnxt_get_eth_mac_stats, + .get_eth_ctrl_stats = bnxt_get_eth_ctrl_stats, + .get_rmon_stats = bnxt_get_rmon_stats, +#endif + .get_priv_flags = bnxt_get_priv_flags, + .set_priv_flags = bnxt_set_priv_flags, +#if defined(HAVE_ETH_RXFH_CONTEXT_ALLOC) && defined(ETH_RSS_HASH_TOP) && \ + !defined(GET_ETHTOOL_OP_EXT) + .set_rxfh_context = bnxt_set_rxfh_context, + .get_rxfh_context = bnxt_get_rxfh_context, +#endif +}; diff --git a/drivers/thirdparty/release-drivers/bnxt/bnxt_ethtool.h b/drivers/thirdparty/release-drivers/bnxt/bnxt_ethtool.h new file mode 100644 index 000000000000..e39d4855e094 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/bnxt_ethtool.h @@ -0,0 +1,95 @@ +/* Broadcom NetXtreme-C/E network driver. + * + * Copyright (c) 2014-2016 Broadcom Corporation + * Copyright (c) 2016-2018 Broadcom Limited + * Copyright (c) 2018-2022 Broadcom Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ + +#ifndef BNXT_ETHTOOL_H +#define BNXT_ETHTOOL_H + +#include + +struct bnxt_led_cfg { + u8 led_id; + u8 led_state; + u8 led_color; + u8 unused; + __le16 led_blink_on; + __le16 led_blink_off; + u8 led_group_id; + u8 rsvd; +}; + +#define BNXT_LED_DFLT_ENA \ + (PORT_LED_CFG_REQ_ENABLES_LED0_ID | \ + PORT_LED_CFG_REQ_ENABLES_LED0_STATE | \ + PORT_LED_CFG_REQ_ENABLES_LED0_BLINK_ON | \ + PORT_LED_CFG_REQ_ENABLES_LED0_BLINK_OFF | \ + PORT_LED_CFG_REQ_ENABLES_LED0_GROUP_ID) + +#define BNXT_LED_DFLT_ENA_SHIFT 6 + +#define BNXT_LED_DFLT_ENABLES(x) \ + cpu_to_le32(BNXT_LED_DFLT_ENA << (BNXT_LED_DFLT_ENA_SHIFT * (x))) + +#define BNXT_FW_RESET_CRASHDUMP (ETH_RESET_CRASHDUMP << ETH_RESET_SHARED_SHIFT) +#define BNXT_FW_RESET_AP (ETH_RESET_AP << ETH_RESET_SHARED_SHIFT) +#define BNXT_FW_RESET_CHIP ((ETH_RESET_MGMT | ETH_RESET_IRQ | \ + ETH_RESET_DMA | ETH_RESET_FILTER | \ + ETH_RESET_OFFLOAD | ETH_RESET_MAC | \ + ETH_RESET_PHY | ETH_RESET_RAM) \ + << ETH_RESET_SHARED_SHIFT) + +#define BNXT_PXP_REG_LEN 0x3110 + +#define BNXT_IP_PROTO_FULL_MASK 0xFF + +extern const struct ethtool_ops bnxt_ethtool_ops; + +u32 bnxt_get_rxfh_indir_size(struct net_device *dev); +#ifdef HAVE_ETHTOOL_KEEE +void _bnxt_fw_to_linkmode(unsigned long *mode, u16 fw_speeds); +#else +u32 _bnxt_fw_to_ethtool_adv_spds(u16, u8); +#endif +u32 bnxt_fw_to_ethtool_speed(u16); +#ifdef HAVE_ETHTOOL_KEEE +u16 bnxt_get_fw_auto_link_speeds(const unsigned long *mode); +#else +u16 bnxt_get_fw_auto_link_speeds(u32); +#endif +int bnxt_hwrm_nvm_get_dev_info(struct bnxt *bp, + struct hwrm_nvm_get_dev_info_output *nvm_dev_info); +int bnxt_flash_package_from_fw_obj(struct net_device *dev, const struct firmware *fw, + u32 install_type, struct netlink_ext_ack *extack); +int bnxt_flash_package_from_file(struct net_device *dev, const char *filename, + u32 install_type, struct netlink_ext_ack *extack); +int bnxt_hwrm_firmware_reset(struct net_device *dev, u8 proc_type, + u8 self_reset, u8 flags); +int bnxt_get_pkginfo(struct net_device *dev, char *ver, int size); +int bnxt_sync_firmware(struct bnxt *bp); +int bnxt_hwrm_get_fw_sync_status(struct bnxt *bp, u16 *fw_status); +int bnxt_hwrm_fw_sync(struct bnxt *bp, u16 fw_status); +void bnxt_ethtool_init(struct bnxt *bp); +void bnxt_ethtool_free(struct bnxt *bp); +int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal, + u16 ext, u16 *index, u32 *item_length, + u32 *data_length); +int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal, + u16 ext, u16 *index, u32 *item_length, + u32 *data_length); +int bnxt_flash_nvram(struct net_device *dev, u16 dir_type, + u16 dir_ordinal, u16 dir_ext, u16 dir_attr, + u32 dir_item_len, const u8 *data, + size_t data_len); +int bnxt_get_nvram_item(struct net_device *dev, u32 index, u32 offset, + u32 length, u8 *data); +int bnxt_firmware_reset_chip(struct net_device *dev); +int bnxt_firmware_reset_ap(struct net_device *dev); + +#endif diff --git a/drivers/thirdparty/release-drivers/bnxt/bnxt_ethtool_compat.c b/drivers/thirdparty/release-drivers/bnxt/bnxt_ethtool_compat.c new file mode 100644 index 000000000000..f6bce060e797 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/bnxt_ethtool_compat.c @@ -0,0 +1,458 @@ +/* Broadcom NetXtreme-C/E network driver. + * + * Copyright (c) 2021 Broacom Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ +#include "bnxt_ethtool.c" + +#ifndef HAVE_ETHTOOL_LINK_KSETTINGS +int bnxt_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct bnxt *bp = netdev_priv(dev); + struct ethtool_link_ksettings ks; + int rc; + + memset(&ks, 0, sizeof(ks)); + rc = bnxt_get_link_ksettings(dev, &ks); + if (rc) + return rc; + + cmd->supported = ks.link_modes.supported[0]; + cmd->advertising = ks.link_modes.advertising[0]; + cmd->lp_advertising = ks.link_modes.lp_advertising[0]; + ethtool_cmd_speed_set(cmd, ks.base.speed); + cmd->duplex = ks.base.duplex; + cmd->autoneg = ks.base.autoneg; + cmd->port = ks.base.port; + cmd->phy_address = ks.base.phy_address; + if (bp->link_info.transceiver == + PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_XCVR_INTERNAL) + cmd->transceiver = XCVR_INTERNAL; + else + cmd->transceiver = XCVR_EXTERNAL; + + return 0; +} + +static void bnxt_fw_to_ethtool_support_spds(struct bnxt_link_info *link_info, + struct ethtool_link_ksettings *ks) +{ + u16 fw_speeds = link_info->support_speeds; + u32 supported; + + supported = _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0); + ks->link_modes.supported[0] = supported | SUPPORTED_Pause | + SUPPORTED_Asym_Pause; +} + +int bnxt_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct bnxt *bp = netdev_priv(dev); + struct ethtool_link_ksettings ks; + + memset(&ks, 0, sizeof(ks)); + if (cmd->autoneg == AUTONEG_ENABLE) { + bnxt_fw_to_ethtool_support_spds(&bp->link_info, &ks); + + if (!ks.link_modes.supported) { + netdev_err(dev, "Autoneg not supported\n"); + return -EINVAL; + } + if (cmd->advertising & ~(ks.link_modes.supported[0] | + ADVERTISED_Autoneg | + ADVERTISED_TP | ADVERTISED_FIBRE)) { + netdev_err(dev, "Unsupported advertising mask (adv: 0x%x)\n", + cmd->advertising); + return -EINVAL; + } + } else { + /* If received a request for an unknown duplex, assume full*/ + if (cmd->duplex == DUPLEX_UNKNOWN) + cmd->duplex = DUPLEX_FULL; + } + + ks.link_modes.advertising[0] = cmd->advertising; + ks.base.speed = ethtool_cmd_speed(cmd); + ks.base.duplex = cmd->duplex; + ks.base.autoneg = cmd->autoneg; + return bnxt_set_link_ksettings(dev, &ks); +} +#endif + +#ifndef HAVE_ETHTOOL_PARAMS_FROM_LINK_MODE +#define ETHTOOL_LINK_MODE(speed, type, duplex) \ + ETHTOOL_LINK_MODE_ ## speed ## base ## type ## _ ## duplex ## _BIT + +#include "bnxt_compat_link_modes.c" + +void +ethtool_params_from_link_mode(struct ethtool_link_ksettings *link_ksettings, + enum ethtool_link_mode_bit_indices link_mode) +{ + const struct link_mode_info *link_info; + + if (WARN_ON_ONCE(link_mode >= ARRAY_SIZE(link_mode_params))) + return; + + link_info = &link_mode_params[link_mode]; + link_ksettings->base.speed = link_info->speed; +#ifdef HAVE_ETHTOOL_LANES + link_ksettings->lanes = link_info->lanes; +#endif + link_ksettings->base.duplex = link_info->duplex; +#ifdef HAVE_ETHTOOL_LINK_MODE + link_ksettings->link_mode = link_mode; +#endif +} +#endif + +#if !defined(HAVE_ETHTOOL_RXFH_PARAM) +#if defined(HAVE_ETH_RXFH_CONTEXT_ALLOC) +int bnxt_set_rxfh_context(struct net_device *dev, const u32 *indir, + const u8 *key, const u8 hfunc, u32 *rss_context, + bool delete) +{ + struct bnxt *bp = netdev_priv(dev); + struct bnxt_rss_ctx *rss_ctx; + struct bnxt_vnic_info *vnic; + bool modify = false; + int bit_id; + int rc; + + if (!BNXT_SUPPORTS_MULTI_RSS_CTX(bp)) + return -EOPNOTSUPP; + + if (!netif_running(dev)) + return -EAGAIN; + + if (*rss_context != ETH_RXFH_CONTEXT_ALLOC) { + rss_ctx = bnxt_get_rss_ctx_from_index(bp, *rss_context); + if (!rss_ctx) + return -EINVAL; + if (delete) { + bnxt_del_one_rss_ctx(bp, rss_ctx, true); + return 0; + } + modify = true; + vnic = &rss_ctx->vnic; + goto modify_context; + } + + if (hfunc && hfunc != ETH_RSS_HASH_TOP) + return -EOPNOTSUPP; + + if (bp->num_rss_ctx >= BNXT_MAX_ETH_RSS_CTX) + return -EINVAL; + + if (!bnxt_rfs_capable(bp, true)) + return -ENOMEM; + + rss_ctx = bnxt_alloc_rss_ctx(bp); + if (!rss_ctx) + return -ENOMEM; + + vnic = &rss_ctx->vnic; + vnic->flags |= BNXT_VNIC_RSSCTX_FLAG; + vnic->vnic_id = BNXT_VNIC_ID_INVALID; + rc = bnxt_alloc_rss_ctx_rss_table(bp, rss_ctx); + if (rc) + goto out; + + rc = bnxt_alloc_rss_indir_tbl(bp, rss_ctx); + if (rc) + goto out; + + bnxt_set_dflt_rss_indir_tbl(bp, rss_ctx); + memcpy(vnic->rss_hash_key, bp->rss_hash_key, HW_HASH_KEY_SIZE); + + rc = bnxt_hwrm_vnic_alloc(bp, vnic, 0, bp->rx_nr_rings); + if (rc) + goto out; + + rc = bnxt_hwrm_vnic_set_tpa(bp, vnic, bp->flags & BNXT_FLAG_TPA); + if (rc) + goto out; +modify_context: + if (indir) { + u32 i, pad, tbl_size = bnxt_get_rxfh_indir_size(dev); + + for (i = 0; i < tbl_size; i++) + rss_ctx->rss_indir_tbl[i] = indir[i]; + pad = bp->rss_indir_tbl_entries - tbl_size; + if (pad) + memset(&rss_ctx->rss_indir_tbl[i], 0, pad * sizeof(u16)); + } + + if (key) + memcpy(vnic->rss_hash_key, key, HW_HASH_KEY_SIZE); + + if (modify) + return bnxt_hwrm_vnic_rss_cfg_p5(bp, vnic); + + rc = __bnxt_setup_vnic_p5(bp, vnic); + if (rc) + goto out; + + bit_id = bitmap_find_free_region(bp->rss_ctx_bmap, + BNXT_RSS_CTX_BMAP_LEN, 0); + if (bit_id < 0) { + rc = -ENOMEM; + goto out; + } + rss_ctx->index = (u16)bit_id; + *rss_context = rss_ctx->index; + + return 0; +out: + bnxt_del_one_rss_ctx(bp, rss_ctx, true); + return rc; +} + +int bnxt_get_rxfh_context(struct net_device *dev, u32 *indir, u8 *key, + u8 *hfunc, u32 rss_context) +{ + struct bnxt *bp = netdev_priv(dev); + struct bnxt_rss_ctx *rss_ctx; + struct bnxt_vnic_info *vnic; + int i; + + rss_ctx = bnxt_get_rss_ctx_from_index(bp, rss_context); + if (!rss_ctx) + return -EINVAL; + + vnic = &rss_ctx->vnic; + if (hfunc) + *hfunc = ETH_RSS_HASH_TOP; + if (indir) + for (i = 0; i < bnxt_get_rxfh_indir_size(bp->dev); i++) + indir[i] = rss_ctx->rss_indir_tbl[i]; + if (key) + memcpy(key, vnic->rss_hash_key, HW_HASH_KEY_SIZE); + return 0; +} +#endif /* HAVE_ETH_RXFH_CONTEXT_ALLOC */ + +int bnxt_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfunc) +{ + struct bnxt *bp = netdev_priv(dev); + struct bnxt_vnic_info *vnic; + u32 i, tbl_size; + + /* WIP: Return HWRM_VNIC_RSS_QCFG response, instead of driver cache */ + if (hfunc) + *hfunc = bp->rss_hfunc; + + if (!bp->vnic_info) + return 0; + + vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT]; + if (indir && bp->rss_indir_tbl) { + tbl_size = bnxt_get_rxfh_indir_size(dev); + for (i = 0; i < tbl_size; i++) + indir[i] = bp->rss_indir_tbl[i]; + } + + if (key && vnic->rss_hash_key) + memcpy(key, vnic->rss_hash_key, HW_HASH_KEY_SIZE); + + return 0; +} + +int bnxt_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key, + const u8 hfunc) +{ + struct bnxt *bp = netdev_priv(dev); + bool skip_key = false; + int rc = 0; + + /* Check HW cap and cache hash func details */ + switch (hfunc) { + case ETH_RSS_HASH_XOR: + if (!(bp->rss_cap & BNXT_RSS_CAP_XOR_CAP)) + return -EOPNOTSUPP; + /* hkey not needed in XOR mode */ + skip_key = true; + break; + case ETH_RSS_HASH_TOP: + if (!(bp->rss_cap & BNXT_RSS_CAP_TOEPLITZ_CAP)) + return -EOPNOTSUPP; + break; + case ETH_RSS_HASH_CRC32: + /* default keys/indir */ + if (!(bp->rss_cap & BNXT_RSS_CAP_TOEPLITZ_CHKSM_CAP)) + return -EOPNOTSUPP; + skip_key = true; + break; + case ETH_RSS_HASH_NO_CHANGE: + break; + default: + return -EOPNOTSUPP; + } + + /* Repeat of same hfunc with no key or weight */ + if (bp->rss_hfunc == hfunc && !key && !indir) + return -EINVAL; + + /* for xor and crc32 block hkey config */ + if (key && skip_key) + return -EINVAL; + + if (key) { + memcpy(bp->rss_hash_key, key, HW_HASH_KEY_SIZE); + bp->rss_hash_key_updated = true; + } + + bp->rss_hfunc = hfunc; + if (indir) { + u32 i, pad, tbl_size = bnxt_get_rxfh_indir_size(dev); + + for (i = 0; i < tbl_size; i++) + bp->rss_indir_tbl[i] = indir[i]; + pad = bp->rss_indir_tbl_entries - tbl_size; + if (pad) + memset(&bp->rss_indir_tbl[i], 0, pad * sizeof(u16)); + } + bnxt_clear_usr_fltrs(bp, false); + if (netif_running(bp->dev)) { + bnxt_close_nic(bp, false, false); + rc = bnxt_open_nic(bp, false, false); + } + return rc; +} +#endif /* !HAVE_ETHTOOL_RXFH_PARAM */ + +#if !defined(HAVE_ETHTOOL_KEEE) || !defined(HAVE_ETHTOOL_LINK_KSETTINGS) +u32 _bnxt_fw_to_ethtool_adv_spds(u16 fw_speeds, u8 fw_pause) +{ + u32 speed_mask = 0; + + /* TODO: support 25GB, 40GB, 50GB with different cable type */ + /* set the advertised speeds */ + if (fw_speeds & BNXT_LINK_SPEED_MSK_100MB) + speed_mask |= ADVERTISED_100baseT_Full; + if (fw_speeds & BNXT_LINK_SPEED_MSK_1GB) + speed_mask |= ADVERTISED_1000baseT_Full; + if (fw_speeds & BNXT_LINK_SPEED_MSK_2_5GB) + speed_mask |= ADVERTISED_2500baseX_Full; + if (fw_speeds & BNXT_LINK_SPEED_MSK_10GB) + speed_mask |= ADVERTISED_10000baseT_Full; + if (fw_speeds & BNXT_LINK_SPEED_MSK_40GB) + speed_mask |= ADVERTISED_40000baseCR4_Full; + + if ((fw_pause & BNXT_LINK_PAUSE_BOTH) == BNXT_LINK_PAUSE_BOTH) + speed_mask |= ADVERTISED_Pause; + else if (fw_pause & BNXT_LINK_PAUSE_TX) + speed_mask |= ADVERTISED_Asym_Pause; + else if (fw_pause & BNXT_LINK_PAUSE_RX) + speed_mask |= ADVERTISED_Pause | ADVERTISED_Asym_Pause; + + return speed_mask; +} + +u16 bnxt_get_fw_auto_link_speeds(u32 advertising) +{ + u16 fw_speed_mask = 0; + + /* only support autoneg at speed 100, 1000, and 10000 */ + if (advertising & (ADVERTISED_100baseT_Full | + ADVERTISED_100baseT_Half)) { + fw_speed_mask |= BNXT_LINK_SPEED_MSK_100MB; + } + if (advertising & (ADVERTISED_1000baseT_Full | + ADVERTISED_1000baseT_Half)) { + fw_speed_mask |= BNXT_LINK_SPEED_MSK_1GB; + } + if (advertising & ADVERTISED_10000baseT_Full) + fw_speed_mask |= BNXT_LINK_SPEED_MSK_10GB; + + if (advertising & ADVERTISED_40000baseCR4_Full) + fw_speed_mask |= BNXT_LINK_SPEED_MSK_40GB; + + return fw_speed_mask; +} +#endif /* !HAVE_ETHTOOL_KEEE || !HAVE_ETHTOOL_LINK_KSETTINGS */ + +#if defined(ETHTOOL_GEEE) && !defined(GET_ETHTOOL_OP_EXT) && !defined(HAVE_ETHTOOL_KEEE) +int bnxt_set_eee(struct net_device *dev, struct ethtool_eee *edata) +{ + struct bnxt *bp = netdev_priv(dev); + struct ethtool_eee *eee = (struct ethtool_eee *)&bp->eee; + struct bnxt_link_info *link_info = &bp->link_info; + u32 advertising; + int rc = 0; + + if (!BNXT_PHY_CFG_ABLE(bp)) + return -EOPNOTSUPP; + + if (!(bp->phy_flags & BNXT_PHY_FL_EEE_CAP)) + return -EOPNOTSUPP; + + mutex_lock(&bp->link_lock); + advertising = _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0); + if (!edata->eee_enabled) + goto eee_ok; + + if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) { + netdev_warn(dev, "EEE requires autoneg\n"); + rc = -EINVAL; + goto eee_exit; + } + if (edata->tx_lpi_enabled) { + if (bp->lpi_tmr_hi && (edata->tx_lpi_timer > bp->lpi_tmr_hi || + edata->tx_lpi_timer < bp->lpi_tmr_lo)) { + netdev_warn(dev, "Valid LPI timer range is %d and %d microsecs\n", + bp->lpi_tmr_lo, bp->lpi_tmr_hi); + rc = -EINVAL; + goto eee_exit; + } else if (!bp->lpi_tmr_hi) { + edata->tx_lpi_timer = eee->tx_lpi_timer; + } + } + if (!edata->advertised) { + edata->advertised = advertising & eee->supported; + } else if (edata->advertised & ~advertising) { + netdev_warn(dev, "EEE advertised %x must be a subset of autoneg advertised speeds %x\n", + edata->advertised, advertising); + rc = -EINVAL; + goto eee_exit; + } + + eee->advertised = edata->advertised; + eee->tx_lpi_enabled = edata->tx_lpi_enabled; + eee->tx_lpi_timer = edata->tx_lpi_timer; +eee_ok: + eee->eee_enabled = edata->eee_enabled; + + if (netif_running(dev)) + rc = bnxt_hwrm_set_link_setting(bp, false, true); + +eee_exit: + mutex_unlock(&bp->link_lock); + return rc; +} + +int bnxt_get_eee(struct net_device *dev, struct ethtool_eee *edata) +{ + struct bnxt *bp = netdev_priv(dev); + + if (!(bp->phy_flags & BNXT_PHY_FL_EEE_CAP)) + return -EOPNOTSUPP; + + memcpy(edata, &bp->eee, sizeof(*edata)); + if (!bp->eee.eee_enabled) { + /* Preserve tx_lpi_timer so that the last value will be used + * by default when it is re-enabled. + */ + edata->advertised = 0; + edata->tx_lpi_enabled = 0; + } + + if (!bp->eee.eee_active) + edata->lp_advertised = 0; + + return 0; +} +#endif diff --git a/drivers/thirdparty/release-drivers/bnxt/bnxt_extra_ver.h b/drivers/thirdparty/release-drivers/bnxt/bnxt_extra_ver.h new file mode 100644 index 000000000000..30e15e414af1 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/bnxt_extra_ver.h @@ -0,0 +1,18 @@ +/* Broadcom NetXtreme-C/E network driver. + * + * Copyright (c) 2016-2018 Broadcom Limited + * Copyright (c) 2018-2021 Broadcom Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ + +#ifndef BNXT_EXTRA_VER_H +#define BNXT_EXTRA_VER_H + +#ifndef DRV_MODULE_EXTRA_VER +#define DRV_MODULE_EXTRA_VER "-230.2.52.0" +#endif + +#endif diff --git a/drivers/thirdparty/release-drivers/bnxt/bnxt_fw_hdr.h b/drivers/thirdparty/release-drivers/bnxt/bnxt_fw_hdr.h new file mode 100644 index 000000000000..b94d804c2adb --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/bnxt_fw_hdr.h @@ -0,0 +1,120 @@ +/* Broadcom NetXtreme-C/E network driver. + * + * Copyright (c) 2014-2016 Broadcom Corporation + * Copyright (c) 2016-2017 Broadcom Limited + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ + +#ifndef __BNXT_FW_HDR_H__ +#define __BNXT_FW_HDR_H__ + +#define BNXT_FIRMWARE_BIN_SIGNATURE 0x1a4d4342 /* "BCM"+0x1a */ +#define BNXT_UCODE_TRAILER_SIGNATURE 0x726c7254 /* "Trlr" */ + +enum SUPPORTED_FAMILY { + DEVICE_5702_3_4_FAMILY, /* 0 - Denali, Vinson, K2 */ + DEVICE_5705_FAMILY, /* 1 - Bachelor */ + DEVICE_SHASTA_FAMILY, /* 2 - 5751 */ + DEVICE_5706_FAMILY, /* 3 - Teton */ + DEVICE_5714_FAMILY, /* 4 - Hamilton */ + DEVICE_STANFORD_FAMILY, /* 5 - 5755 */ + DEVICE_STANFORD_ME_FAMILY, /* 6 - 5756 */ + DEVICE_SOLEDAD_FAMILY, /* 7 - 5761[E] */ + DEVICE_CILAI_FAMILY, /* 8 - 57780/60/90/91 */ + DEVICE_ASPEN_FAMILY, /* 9 - 57781/85/61/65/91/95 */ + DEVICE_ASPEN_PLUS_FAMILY, /* 10 - 57786 */ + DEVICE_LOGAN_FAMILY, /* 11 - Any device in the Logan family + */ + DEVICE_LOGAN_5762, /* 12 - Logan Enterprise (aka Columbia) + */ + DEVICE_LOGAN_57767, /* 13 - Logan Client */ + DEVICE_LOGAN_57787, /* 14 - Logan Consumer */ + DEVICE_LOGAN_5725, /* 15 - Logan Server (TruManage-enabled) + */ + DEVICE_SAWTOOTH_FAMILY, /* 16 - 5717/18 */ + DEVICE_COTOPAXI_FAMILY, /* 17 - 5719 */ + DEVICE_SNAGGLETOOTH_FAMILY, /* 18 - 5720 */ + DEVICE_CUMULUS_FAMILY, /* 19 - Cumulus/Whitney */ + MAX_DEVICE_FAMILY +}; + +enum SUPPORTED_CODE { + CODE_ASF1, /* 0 - ASF VERSION 1.03 */ + CODE_ASF2, /* 1 - ASF VERSION 2.00 */ + CODE_PASSTHRU, /* 2 - PassThru */ + CODE_PT_SEC, /* 3 - PassThru with security */ + CODE_UMP, /* 4 - UMP */ + CODE_BOOT, /* 5 - Bootcode */ + CODE_DASH, /* 6 - TruManage (DASH + ASF + PMCI) + * Management firmwares + */ + CODE_MCTP_PASSTHRU, /* 7 - NCSI / MCTP Passt-hrough firmware */ + CODE_PM_OFFLOAD, /* 8 - Power-Management Proxy Offload firmwares + */ + CODE_MDNS_SD_OFFLOAD, /* 9 - Multicast DNS Service Discovery Proxys + * Offload firmware + */ + CODE_DISC_OFFLOAD, /* 10 - Discovery Offload firmware */ + CODE_MUSTANG, /* 11 - I2C Error reporting APE firmwares + * + */ + CODE_ARP_BATCH, /* 12 - ARP Batch firmware */ + CODE_SMASH, /* 13 - TruManage (SMASH + DCMI/IPMI + PMCI) + * Management firmware + */ + CODE_APE_DIAG, /* 14 - APE Test Diag firmware */ + CODE_APE_PATCH, /* 15 - APE Patch firmware */ + CODE_TANG_PATCH, /* 16 - TANG Patch firmware */ + CODE_KONG_FW, /* 17 - KONG firmware */ + CODE_KONG_PATCH, /* 18 - KONG Patch firmware */ + CODE_BONO_FW, /* 19 - BONO firmware */ + CODE_BONO_PATCH, /* 20 - BONO Patch firmware */ + CODE_CHIMP_PATCH, /* 21 - ChiMP Patch firmware */ + + MAX_CODE_TYPE, +}; + +enum SUPPORTED_MEDIA { + MEDIA_COPPER, /* 0 */ + MEDIA_FIBER, /* 1 */ + MEDIA_NONE, /* 2 */ + MEDIA_COPPER_FIBER, /* 3 */ + MAX_MEDIA_TYPE, +}; + +struct bnxt_fw_header { + __le32 signature; /* constains the constant value of + * BNXT_FIRMWARE_BIN_SIGNATURE + */ + u8 flags; /* reserved for ChiMP use */ + u8 code_type; /* enum SUPPORTED_CODE */ + u8 device; /* enum SUPPORTED_FAMILY */ + u8 media; /* enum SUPPORTED_MEDIA */ + u8 version[16]; /* the null terminated version string to + * indicate the version of the + * file, this will be copied from the binary + * file version string + */ + u8 build; + u8 revision; + u8 minor_ver; + u8 major_ver; +}; + +/* Microcode and pre-boot software/firmware trailer: */ +struct bnxt_ucode_trailer { + u8 rsa_sig[256]; + __le16 flags; + u8 version_format; + u8 version_length; + u8 version[16]; + __le16 dir_type; + __le16 trailer_length; + __le32 sig; /* BNXT_UCODE_TRAILER_SIGNATURE */ + __le32 chksum; /* CRC-32 */ +}; + +#endif diff --git a/drivers/thirdparty/release-drivers/bnxt/bnxt_hdbr.c b/drivers/thirdparty/release-drivers/bnxt/bnxt_hdbr.c new file mode 100644 index 000000000000..718c66c185ac --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/bnxt_hdbr.c @@ -0,0 +1,588 @@ +/* Broadcom NetXtreme-C/E network driver. + * + * Copyright (c) 2022-2023 Broadcom Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ + +#include +#include +#include +#include + +#include "bnxt_compat.h" +#include "bnxt_hsi.h" +#include "bnxt.h" +#include "bnxt_hdbr.h" + +/* + * Map DB type to DB copy group type + */ +int bnxt_hdbr_get_grp(u64 db_val) +{ + db_val &= DBC_TYPE_MASK; + + switch (db_val) { + case DBR_TYPE_SQ: + return DBC_GROUP_SQ; + + case DBR_TYPE_RQ: + return DBC_GROUP_RQ; + + case DBR_TYPE_SRQ: + case DBR_TYPE_SRQ_ARM: + case DBR_TYPE_SRQ_ARMENA: + return DBC_GROUP_SRQ; + + case DBR_TYPE_CQ: + case DBR_TYPE_CQ_ARMSE: + case DBR_TYPE_CQ_ARMALL: + case DBR_TYPE_CQ_ARMENA: + case DBR_TYPE_CQ_CUTOFF_ACK: + return DBC_GROUP_CQ; + + default: + break; + } + + return DBC_GROUP_MAX; +} + +/* + * Caller of this function is debugfs knob. It dumps the kernel memory table + * main structure value to caller. + * Additionally, dump page content to dmesg. Since we may have many pages, it + * is too large to output to debugfs. + */ +char *bnxt_hdbr_ktbl_dump(struct bnxt_hdbr_ktbl *ktbl) +{ + struct dbc_drk64 *slot; + char *buf; + int i, j; + + if (!ktbl) { + buf = kasprintf(GFP_KERNEL, "ktbl is NULL\n"); + return buf; + } + + /* Structure data to debugfs console */ + buf = kasprintf(GFP_KERNEL, + "group_type = %d\n" + "first_avail = %d\n" + "first_empty = %d\n" + "last_entry = %d\n" + "slot_avail = %d\n" + "num_4k_pages = %d\n" + "daddr = 0x%016llX\n" + "link_slot = 0x%016llX\n", + ktbl->group_type, + ktbl->first_avail, + ktbl->first_empty, + ktbl->last_entry, + ktbl->slot_avail, + ktbl->num_4k_pages, + ktbl->daddr, + (u64)ktbl->link_slot); + + /* Page content dump to dmesg console */ + pr_info("====== Dumping ktbl info ======\n%s", buf); + for (i = 0; i < ktbl->num_4k_pages; i++) { + slot = ktbl->pages[i]; + pr_info("ktbl->pages[%d]: 0x%016llX\n", i, (u64)slot); + for (j = 0; j < 256; j++) { + if (j && j < 255 && !slot[j].flags && !slot[j].memptr) + continue; + pr_info("pages[%2d][%3d], 0x%016llX, 0x%016llX\n", + i, j, le64_to_cpu(slot[j].flags), + le64_to_cpu(slot[j].memptr)); + } + } + + return buf; +} + +/* + * This function is called during L2 driver context memory allocation time. + * It is on the path of nic open. + * The initialization is allocating the memory for main data structure and + * setup initial values. + * pg_ptr and da are pointing to the first page allocated in + * bnxt_setup_ctxm_pg_tbls() + */ +int bnxt_hdbr_ktbl_init(struct bnxt *bp, int group, void *pg_ptr, dma_addr_t da) +{ + struct bnxt_hdbr_ktbl *ktbl; + int i; + + ktbl = kzalloc(sizeof(*ktbl), GFP_KERNEL); + if (!ktbl) + return -ENOMEM; + + memset(pg_ptr, 0, PAGE_SIZE_4K); + ktbl->pdev = bp->pdev; + spin_lock_init(&ktbl->hdbr_kmem_lock); + ktbl->group_type = group; + ktbl->first_avail = 0; + ktbl->first_empty = 0; + ktbl->last_entry = -1; /* There isn't last entry at first */ + ktbl->slot_avail = NSLOT_PER_4K_PAGE; + ktbl->num_4k_pages = 1; + ktbl->pages[0] = pg_ptr; + ktbl->daddr = da; + ktbl->link_slot = pg_ptr + PAGE_SIZE_4K - DBC_KERNEL_ENTRY_SIZE; + for (i = 1; i < ktbl->num_4k_pages; i++) { + pg_ptr += PAGE_SIZE_4K; + ktbl->pages[i] = pg_ptr; + da += PAGE_SIZE_4K; + bnxt_hdbr_set_link(ktbl->link_slot, da); + ktbl->link_slot += PAGE_SIZE_4K; + } + + /* Link to main bnxt structure */ + bp->hdbr_info.ktbl[group] = ktbl; + + return 0; +} + +/* + * This function is called during L2 driver context memory free time. It is on + * the path of nic close. + */ +void bnxt_hdbr_ktbl_uninit(struct bnxt *bp, int group) +{ + struct bnxt_hdbr_ktbl *ktbl; + struct dbc_drk64 *slot; + dma_addr_t da; + void *ptr; + int i; + + /* Tear off from bp structure first */ + ktbl = bp->hdbr_info.ktbl[group]; + bp->hdbr_info.ktbl[group] = NULL; + if (!ktbl) + return; + + /* Free attached pages(first page will be freed by bnxt_free_ctx_pg_tbls() */ + for (i = ktbl->num_4k_pages - 1; i >= 1; i--) { + ptr = ktbl->pages[i]; + slot = ktbl->pages[i - 1] + PAGE_SIZE_4K - DBC_KERNEL_ENTRY_SIZE; + da = (dma_addr_t)le64_to_cpu(slot->memptr); + dma_free_coherent(&bp->pdev->dev, PAGE_SIZE_4K, ptr, da); + } + + /* Free the control structure at last */ + kfree(ktbl); +} + +/* + * This function is called when dbnxt_hdbr_reg_apg() run out of memory slots. + * hdbr_kmem_lock is held in caller, so it is safe to alter the kernel page + * chain. + */ +static int bnxt_hdbr_alloc_ktbl_pg(struct bnxt_hdbr_ktbl *ktbl) +{ + dma_addr_t da; + void *ptr; + + /* Development stage guard */ + if (ktbl->num_4k_pages >= MAX_KMEM_4K_PAGES) { + pr_err("Must fix: need more than MAX_KMEM_4K_PAGES\n"); + return -ENOMEM; + } + + /* Alloc one page */ + ptr = dma_alloc_coherent(&ktbl->pdev->dev, PAGE_SIZE_4K, &da, GFP_KERNEL | __GFP_ZERO); + if (!ptr) + return -ENOMEM; + + /* Chain up with existing pages */ + ktbl->pages[ktbl->num_4k_pages] = ptr; + bnxt_hdbr_set_link(ktbl->link_slot, da); + ktbl->link_slot = ptr + PAGE_SIZE_4K - DBC_KERNEL_ENTRY_SIZE; + ktbl->num_4k_pages += 1; + ktbl->slot_avail += NSLOT_PER_4K_PAGE; + + return 0; +} + +/* + * This function is called when L2 driver, RoCE driver or RoCE driver on + * behalf of rocelib need to register its application memory page. + * Each application memory page is linked in kernel memory table with a + * 16 bytes memory slot. + */ +int bnxt_hdbr_reg_apg(struct bnxt_hdbr_ktbl *ktbl, dma_addr_t ap_da, int *idx, u16 pi) +{ + struct dbc_drk64 *slot; + int rc = 0; + + spin_lock(&ktbl->hdbr_kmem_lock); + + /* Add into kernel talbe */ + if (ktbl->slot_avail == 0) { + rc = bnxt_hdbr_alloc_ktbl_pg(ktbl); + if (rc) + goto exit; + } + + /* Fill up the new entry */ + slot = get_slot(ktbl, ktbl->first_avail); + bnxt_hdbr_set_slot(slot, ap_da, pi, ktbl->first_avail == ktbl->first_empty); + *idx = ktbl->first_avail; + ktbl->slot_avail--; + + /* Clear last flag of previous and advance first_avail index */ + if (ktbl->first_avail == ktbl->first_empty) { + if (ktbl->last_entry >= 0) { + slot = get_slot(ktbl, ktbl->last_entry); + slot->flags &= cpu_to_le64(~DBC_DRK64_LAST); + } + ktbl->last_entry = ktbl->first_avail; + ktbl->first_avail++; + ktbl->first_empty++; + } else { + while (++ktbl->first_avail < ktbl->first_empty) { + slot = get_slot(ktbl, ktbl->first_avail); + if (slot->flags & cpu_to_le64(DBC_DRK64_VALID)) + continue; + break; + } + } + +exit: + spin_unlock(&ktbl->hdbr_kmem_lock); + return rc; +} +EXPORT_SYMBOL(bnxt_hdbr_reg_apg); + +/* + * This function is called when L2 driver, RoCE driver or RoCE driver on + * behalf of rocelib need to unregister its application memory page. + * The corresponding memory slot need to be cleared. + * Kernel memory table will reuse that slot for later application page. + */ +void bnxt_hdbr_unreg_apg(struct bnxt_hdbr_ktbl *ktbl, int idx) +{ + struct dbc_drk64 *slot; + + spin_lock(&ktbl->hdbr_kmem_lock); + if (idx == ktbl->last_entry) { + /* Find the new last_entry index, and mark last */ + while (--ktbl->last_entry >= 0) { + slot = get_slot(ktbl, ktbl->last_entry); + if (slot->flags & cpu_to_le64(DBC_DRK64_VALID)) + break; + } + if (ktbl->last_entry >= 0) { + slot = get_slot(ktbl, ktbl->last_entry); + slot->flags |= cpu_to_le64(DBC_DRK64_LAST); + } + ktbl->first_empty = ktbl->last_entry + 1; + } + + /* unregister app page entry */ + bnxt_hdbr_clear_slot(get_slot(ktbl, idx)); + + /* update first_avail index to lower possible */ + if (idx < ktbl->first_avail) + ktbl->first_avail = idx; + ktbl->slot_avail++; + spin_unlock(&ktbl->hdbr_kmem_lock); +} +EXPORT_SYMBOL(bnxt_hdbr_unreg_apg); + +/* + * Map L2 ring type to DB copy group type + */ +int bnxt_hdbr_r2g(u32 ring_type) +{ + switch (ring_type) { + case HWRM_RING_ALLOC_TX: + return DBC_GROUP_SQ; + + case HWRM_RING_ALLOC_RX: + case HWRM_RING_ALLOC_AGG: + return DBC_GROUP_SRQ; + + case HWRM_RING_ALLOC_CMPL: + return DBC_GROUP_CQ; + + default: + break; + } + + return DBC_GROUP_MAX; +} + +/* + * Allocate a 4K page for L2 DB copies. This is called when running out of + * available DB copy blocks during DB registering. + */ +static int bnxt_hdbr_l2_alloc_page(struct bnxt *bp, int group) +{ + struct bnxt_hdbr_l2_pgs *app_pgs; + dma_addr_t da = 0; + int ktbl_idx; + __le64 *ptr; + int rc; + + app_pgs = bp->hdbr_pgs[group]; + if (app_pgs->alloced_pages >= app_pgs->max_pages) { + dev_err(&bp->pdev->dev, "Max reserved HDBR pages exceeded\n"); + return -EINVAL; + } + ptr = dma_zalloc_coherent(&bp->pdev->dev, PAGE_SIZE_4K, &da, GFP_KERNEL); + if (!ptr) + return -ENOMEM; + ptr[0] = cpu_to_le64(DBC_VALUE_LAST); + wmb(); /* Make sure HW see this slot when page linked in */ + /* Register to kernel table */ + rc = bnxt_hdbr_reg_apg(bp->hdbr_info.ktbl[group], da, &ktbl_idx, 0); + if (rc) { + dma_free_coherent(&bp->pdev->dev, PAGE_SIZE_4K, ptr, da); + return rc; + } + app_pgs->pages[app_pgs->alloced_pages].ptr = ptr; + app_pgs->pages[app_pgs->alloced_pages].da = da; + app_pgs->pages[app_pgs->alloced_pages].ktbl_idx = ktbl_idx; + app_pgs->alloced_pages++; + return 0; +} + +/* + * The l2 init function is called after L2 driver configured backing store + * context memory and bnxt_hwrm_func_resc_qcaps. + * The initialization is allocating the management structure and initialize + * it with the proper values. + * + * Inside L2 DB copy app page, DBs are grouped by group type. + * DBC_GROUP_SQ : grp_size = 1, + * offset 0: SQ producer index doorbell + * DBC_GROUP_SRQ : grp_size = 1, + * offset 0: SRQ producer index doorbell + * DBC_GROUP_CQ : grp_size = 3, + * offset 0: CQ consumer index doorbell + * offset 1: CQ_ARMALL/CQ_ARMASE (share slot) + * offset 2: CUTOFF_ACK + */ +static int bnxt_hdbr_l2_init_group(struct bnxt *bp, int group) +{ + struct bnxt_hdbr_l2_pgs *app_pgs = NULL; + int grp_size, entries_per_pg, entries, max_pgs; + + switch (group) { + case DBC_GROUP_SQ: + grp_size = HDBR_L2_SQ_BLK_SIZE; + entries_per_pg = HDBR_L2_SQ_ENTRY_PER_PAGE; + entries = bp->hw_resc.max_tx_rings; + break; + case DBC_GROUP_SRQ: + grp_size = HDBR_L2_SRQ_BLK_SIZE; + entries_per_pg = HDBR_L2_SRQ_ENTRY_PER_PAGE; + entries = bp->hw_resc.max_rx_rings; + break; + case DBC_GROUP_CQ: + grp_size = HDBR_L2_CQ_BLK_SIZE; + entries_per_pg = HDBR_L2_CQ_ENTRY_PER_PAGE; + entries = bp->hw_resc.max_cp_rings; + break; + default: + /* Other group/DB types are not needed */ + goto exit; + } + max_pgs = DIV_ROUND_UP(entries, entries_per_pg); + + app_pgs = kzalloc(struct_size(app_pgs, pages, max_pgs), GFP_KERNEL); + if (!app_pgs) + return -ENOMEM; + app_pgs->max_pages = max_pgs; + app_pgs->grp_size = grp_size; + app_pgs->entries_per_pg = entries_per_pg; + +exit: + /* Link to main bnxt structure */ + bp->hdbr_pgs[group] = app_pgs; + + return 0; +} + +int bnxt_hdbr_l2_init(struct bnxt *bp) +{ + int rc, group; + + if (!bp->hdbr_info.hdbr_enabled) + return 0; + + for (group = DBC_GROUP_SQ; group < DBC_GROUP_MAX; group++) { + rc = bnxt_hdbr_l2_init_group(bp, group); + if (rc) + return rc; + } + + return 0; +} + +/* + * This function is called during L2 driver context memory free time. It is on + * the path of nic close. + */ +void bnxt_hdbr_l2_uninit(struct bnxt *bp, int group) +{ + struct bnxt_hdbr_l2_pgs *pgs; + struct hdbr_l2_pg *p; + int i; + + /* Cut off from main structure */ + pgs = bp->hdbr_pgs[group]; + bp->hdbr_pgs[group] = NULL; + + if (!pgs) + return; + + for (i = 0; i < pgs->alloced_pages; i++) { + p = &pgs->pages[i]; + /* Unregister from kernel table */ + bnxt_hdbr_unreg_apg(bp->hdbr_info.ktbl[group], p->ktbl_idx); + /* Free memory up */ + dma_free_coherent(&bp->pdev->dev, PAGE_SIZE_4K, p->ptr, p->da); + } + kfree(pgs); +} + +/* + * This function is called when a new db is created. + * It finds a memoty slot in the DB copy application page, and return the + * address. + * Not all DB type need a copy, for those DB types don't need a copy, we + * simply return NULL. + */ +__le64 *bnxt_hdbr_reg_db(struct bnxt *bp, int group) +{ + struct bnxt_hdbr_l2_pgs *pgs; + struct hdbr_l2_pg *p; + int rc, i, n, idx; + + if (group >= DBC_GROUP_MAX) + return NULL; + + pgs = bp->hdbr_pgs[group]; + if (!pgs) + return NULL; + + if (pgs->next_page == pgs->alloced_pages) { + rc = bnxt_hdbr_l2_alloc_page(bp, group); + if (rc) + return NULL; + } + + n = pgs->grp_size; + p = &pgs->pages[pgs->next_page]; + idx = pgs->next_entry * n; /* This is what we'll return */ + for (i = 0; i < n; i++) + p->ptr[idx + i] = cpu_to_le64(DBC_VALUE_INIT); + pgs->next_entry++; + if (pgs->next_entry == pgs->entries_per_pg) { + pgs->next_page++; + pgs->next_entry = 0; + } else { + p->ptr[pgs->next_entry * n] = cpu_to_le64(DBC_VALUE_LAST); + } + + return &p->ptr[idx]; +} + +/* + * This function is called when all L2 rings are freed. + * Driver is still running, but rings are freed, so that all DB copy slots should be + * reclaimed for later newly created rings' DB. + */ +void bnxt_hdbr_reset_l2pgs(struct bnxt *bp) +{ + struct bnxt_hdbr_l2_pgs *pgs; + struct hdbr_l2_pg *p; + int group, i; + + for (group = DBC_GROUP_SQ; group < DBC_GROUP_MAX; group++) { + pgs = bp->hdbr_pgs[group]; + if (!pgs) + continue; + + for (i = 0; i < pgs->alloced_pages; i++) { + p = &pgs->pages[i]; + memset(p->ptr, 0, PAGE_SIZE_4K); + p->ptr[0] = cpu_to_le64(DBC_VALUE_LAST); + } + pgs->next_page = 0; + pgs->next_entry = 0; + } +} + +/* + * Caller of this function is debugfs knob. It dumps the main structure value + * of L2 driver DB copy region to caller. + * Additionally, dump page content to dmesg. Since we may have many pages, it + * is too large to output to debugfs. + */ +char *bnxt_hdbr_l2pg_dump(struct bnxt_hdbr_l2_pgs *app_pgs) +{ + struct hdbr_l2_pg *p; + int used_entries = 0; + u64 dbc_val; + char *buf; + int pi, i; + + if (!app_pgs) { + buf = kasprintf(GFP_KERNEL, "No data available!\n"); + return buf; + } + + if (app_pgs->alloced_pages) + used_entries = app_pgs->next_page * app_pgs->entries_per_pg + app_pgs->next_entry; + /* Structure data to debugfs console */ + buf = kasprintf(GFP_KERNEL, + "max_pages = %d\n" + "alloced_pages = %d\n" + "group_size = %d\n" + "entries_per_pg = %d\n" + "used entries = %d\n" + "used db slots = %d\n", + app_pgs->max_pages, + app_pgs->alloced_pages, + app_pgs->grp_size, + app_pgs->entries_per_pg, + used_entries, + used_entries * app_pgs->grp_size); + + pr_info("====== Dumping pages info ======\n%s", buf); + for (pi = 0; pi < app_pgs->alloced_pages; pi++) { + p = &app_pgs->pages[pi]; + /* Page content dump to dmesg console */ + pr_info("page[%d].kernel addr = 0x%016llX\n" + "page[%d].dma addr = 0x%016llX\n" + "page[%d].Kernel index = %d\n", + pi, (u64)p->ptr, + pi, p->da, + pi, p->ktbl_idx); + for (i = 0; i < 512; i++) { + if (i && i < 511 && !p->ptr[i]) + continue; + dbc_val = le64_to_cpu(p->ptr[i]); + pr_info("page[%d][%3d] 0x%016llX : type=%llx " + "debug_trace=%d valid=%d path=%llx xID=0x%05llx " + "toggle=%llx epoch=%d index=0x%06llx\n", + pi, i, dbc_val, + (dbc_val & DBC_DBC64_TYPE_MASK) >> DBC_DBC64_TYPE_SFT, + (dbc_val & DBC_DBC64_DEBUG_TRACE) ? 1 : 0, + (dbc_val & DBC_DBC64_VALID) ? 1 : 0, + (dbc_val & DBC_DBC64_PATH_MASK) >> DBC_DBC64_PATH_SFT, + (dbc_val & DBC_DBC64_XID_MASK) >> DBC_DBC64_XID_SFT, + (dbc_val & DBC_DBC64_TOGGLE_MASK) >> DBC_DBC64_TOGGLE_SFT, + (dbc_val & DBC_DBC64_EPOCH) ? 1 : 0, + (dbc_val & DBC_DBC64_INDEX_MASK)); + } + } + + return buf; +} diff --git a/drivers/thirdparty/release-drivers/bnxt/bnxt_hdbr.h b/drivers/thirdparty/release-drivers/bnxt/bnxt_hdbr.h new file mode 100644 index 000000000000..c5f55e2c5b76 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/bnxt_hdbr.h @@ -0,0 +1,141 @@ +/* Broadcom NetXtreme-C/E network driver. + * + * Copyright (c) 2022-2023 Broadcom Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ + +#ifndef __BNXT_HDBR_H__ +#define __BNXT_HDBR_H__ + +/* + * 64-bit doorbell + * +------+-----+-----------+-----+-----+------+-----+------+------+-----+-----+ + * |Offset|63,60| 59| 58|57,56| (4) |51,32|31,,27| 26,25| 24| 23,0| + * +------+-----+-----------+-----+-----+------+-----+------+------+-----+-----+ + * | | | | | |unused| |unused|toggle|epoch| | + * | 0x0 | type| unused |valid| path|------| xID |------+------+-----+index| + * | | | | | | pi-hi| | pi-lo | | + * +------+-----+-----------+-----+-----+------+-----+------+------+-----+-----+ + * + * 64-bit doorbell copy format for HW DBR recovery + * +------+-----+-----------+-----+-----+------+-----+------+------+-----+-----+ + * |Offset|63,60| 59| 58|57,56| (4) |51,32| (5) | 26,25| 24| 23,0| + * +------+-----+-----------+-----+-----+------+-----+------+------+-----+-----+ + * |0x0 | type|debug_trace|valid| path|unused| xID |unused|toggle|epoch|index| + * +------+-----+-----------+-----+-----+------+-----+------+------+-----+-----+ + */ + +#define DBC_TYPE_MASK (0xfULL << 60) + +#define DBC_VALUE_INIT DBR_INDEX_MASK +#define DBC_VALUE_LAST (DBC_TYPE_MASK | DBR_VALID) + +/* Doorbell Recovery Kernel Memory Structures + * +------+------+-----+------+-----+------+------+---------+------+----+-----+ + * |Offset| 63,48|47,32| 31,12|11,10| 9,8| 7,4| 3| 2| 1| 0| + * +------+------+-----+------+-----+------+------+---------+------+----+-----+ + * |0x0 |unused| pi |unused| size|stride|unused|db_format|linked|last|valid| + * +------+------+-----+------+-----+------+------+---------+------+----+-----+ + * |0x8 | memptr | + * +------+-------------------------------------------------------------------+ + */ +#define DBC_KERNEL_ENTRY_SIZE 16 + +#define PAGE_SIZE_4K 4096 +#define MAX_KMEM_4K_PAGES 1029 +#define NSLOT_PER_4K_PAGE (PAGE_SIZE_4K / DBC_KERNEL_ENTRY_SIZE - 1) + +struct bnxt_hdbr_ktbl { + struct pci_dev *pdev; + /* protect this main DB copy kernel memory table data structure */ + spinlock_t hdbr_kmem_lock; + int group_type; + int first_avail; + int first_empty; + int last_entry; + int num_4k_pages; + int slot_avail; + void *pages[MAX_KMEM_4K_PAGES]; + dma_addr_t daddr; + struct dbc_drk64 *link_slot; +}; + +static inline struct dbc_drk64 *get_slot(struct bnxt_hdbr_ktbl *ktbl, int idx) +{ + return ((struct dbc_drk64 *)ktbl->pages[idx / NSLOT_PER_4K_PAGE]) + + idx % NSLOT_PER_4K_PAGE; +} + +static inline void bnxt_hdbr_clear_slot(struct dbc_drk64 *slt) +{ + slt->flags = 0; + wmb(); /* Sync flags before clear memory pointer */ + slt->memptr = 0; +} + +static inline void bnxt_hdbr_set_slot(struct dbc_drk64 *slt, dma_addr_t da, + u16 pi, bool last) +{ + u64 flags; + + flags = DBC_DRK64_VALID | DBC_DRK64_DB_FORMAT_B64 | + DBC_DRK64_STRIDE_OFF; + flags |= ((u64)pi << DBC_DRK64_PI_SFT); + if (last) + flags |= DBC_DRK64_LAST; + + slt->memptr = cpu_to_le64(da); + wmb(); /* Sync memory pointer before setting flags */ + slt->flags = cpu_to_le64(flags); +} + +static inline void bnxt_hdbr_set_link(struct dbc_drk64 *ls, dma_addr_t da) +{ + ls->memptr = cpu_to_le64(da); + wmb(); /* Sync memory pointer before setting flags */ + ls->flags = cpu_to_le64(DBC_DRK64_VALID | DBC_DRK64_LINKED); +} + +/* L2 driver part HW based doorbell drop recovery defination */ +#define HDBR_DB_SIZE 8 +#define HDBR_L2_SQ_BLK_SIZE 1 +#define HDBR_L2_SRQ_BLK_SIZE 1 +#define HDBR_L2_CQ_BLK_SIZE 3 +#define HDBR_DB_PER_PAGE (PAGE_SIZE_4K / HDBR_DB_SIZE) +#define HDBR_L2_SQ_ENTRY_PER_PAGE (HDBR_DB_PER_PAGE / HDBR_L2_SQ_BLK_SIZE) +#define HDBR_L2_SRQ_ENTRY_PER_PAGE (HDBR_DB_PER_PAGE / HDBR_L2_SRQ_BLK_SIZE) +#define HDBR_L2_CQ_ENTRY_PER_PAGE (HDBR_DB_PER_PAGE / HDBR_L2_CQ_BLK_SIZE) + +struct hdbr_l2_pg { + __le64 *ptr; + dma_addr_t da; + int ktbl_idx; +}; + +struct bnxt_hdbr_l2_pgs { + int max_pages; + int alloced_pages; + int grp_size; + int entries_per_pg; + int next_page; + int next_entry; + struct hdbr_l2_pg pages[] __counted_by(max_pages); +}; + +int bnxt_hdbr_r2g(u32 ring_type); +int bnxt_hdbr_get_grp(u64 db_val); +int bnxt_hdbr_ktbl_init(struct bnxt *bp, int group, void *pg_ptr, dma_addr_t da); +void bnxt_hdbr_ktbl_uninit(struct bnxt *bp, int group); +int bnxt_hdbr_reg_apg(struct bnxt_hdbr_ktbl *ktbl, dma_addr_t ap_da, int *idx, u16 pi); +void bnxt_hdbr_unreg_apg(struct bnxt_hdbr_ktbl *ktbl, int idx); +char *bnxt_hdbr_ktbl_dump(struct bnxt_hdbr_ktbl *ktbl); +int bnxt_hdbr_l2_init(struct bnxt *bp); +void bnxt_hdbr_l2_uninit(struct bnxt *bp, int group); +__le64 *bnxt_hdbr_reg_db(struct bnxt *bp, int group); +void bnxt_hdbr_reset_l2pgs(struct bnxt *bp); +char *bnxt_hdbr_l2pg_dump(struct bnxt_hdbr_l2_pgs *app_pgs); + +#endif diff --git a/drivers/thirdparty/release-drivers/bnxt/bnxt_hsi.h b/drivers/thirdparty/release-drivers/bnxt/bnxt_hsi.h new file mode 100644 index 000000000000..bb1868dee9fd --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/bnxt_hsi.h @@ -0,0 +1,21048 @@ +/* Broadcom NetXtreme-C/E network driver. + * + * Copyright (c) 2014-2016 Broadcom Corporation + * Copyright (c) 2014-2018 Broadcom Limited + * Copyright (c) 2018-2024 Broadcom Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + * + * DO NOT MODIFY!!! This file is automatically generated. + */ + +#ifndef _BNXT_HSI_H_ +#define _BNXT_HSI_H_ + +/* hwrm_cmd_hdr (size:128b/16B) */ +struct hwrm_cmd_hdr { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; +}; + +/* hwrm_resp_hdr (size:64b/8B) */ +struct hwrm_resp_hdr { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; +}; + +#define CMD_DISCR_TLV_ENCAP 0x8000UL +#define CMD_DISCR_LAST CMD_DISCR_TLV_ENCAP + + +#define TLV_TYPE_HWRM_REQUEST 0x1UL +#define TLV_TYPE_HWRM_RESPONSE 0x2UL +#define TLV_TYPE_ROCE_SP_COMMAND 0x3UL +#define TLV_TYPE_QUERY_ROCE_CC_GEN1 0x4UL +#define TLV_TYPE_MODIFY_ROCE_CC_GEN1 0x5UL +#define TLV_TYPE_QUERY_ROCE_CC_GEN2 0x6UL +#define TLV_TYPE_MODIFY_ROCE_CC_GEN2 0x7UL +#define TLV_TYPE_ENGINE_CKV_ALIAS_ECC_PUBLIC_KEY 0x8001UL +#define TLV_TYPE_ENGINE_CKV_IV 0x8003UL +#define TLV_TYPE_ENGINE_CKV_AUTH_TAG 0x8004UL +#define TLV_TYPE_ENGINE_CKV_CIPHERTEXT 0x8005UL +#define TLV_TYPE_ENGINE_CKV_HOST_ALGORITHMS 0x8006UL +#define TLV_TYPE_ENGINE_CKV_HOST_ECC_PUBLIC_KEY 0x8007UL +#define TLV_TYPE_ENGINE_CKV_ECDSA_SIGNATURE 0x8008UL +#define TLV_TYPE_ENGINE_CKV_FW_ECC_PUBLIC_KEY 0x8009UL +#define TLV_TYPE_ENGINE_CKV_FW_ALGORITHMS 0x800aUL +#define TLV_TYPE_LAST TLV_TYPE_ENGINE_CKV_FW_ALGORITHMS + + +/* tlv (size:64b/8B) */ +struct tlv { + __le16 cmd_discr; + u8 reserved_8b; + u8 flags; + #define TLV_FLAGS_MORE 0x1UL + #define TLV_FLAGS_MORE_LAST 0x0UL + #define TLV_FLAGS_MORE_NOT_LAST 0x1UL + #define TLV_FLAGS_REQUIRED 0x2UL + #define TLV_FLAGS_REQUIRED_NO (0x0UL << 1) + #define TLV_FLAGS_REQUIRED_YES (0x1UL << 1) + #define TLV_FLAGS_REQUIRED_LAST TLV_FLAGS_REQUIRED_YES + __le16 tlv_type; + __le16 length; +}; + +/* input (size:128b/16B) */ +struct input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; +}; + +/* output (size:64b/8B) */ +struct output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; +}; + +/* hwrm_short_input (size:128b/16B) */ +struct hwrm_short_input { + __le16 req_type; + __le16 signature; + #define SHORT_REQ_SIGNATURE_SHORT_CMD 0x4321UL + #define SHORT_REQ_SIGNATURE_LAST SHORT_REQ_SIGNATURE_SHORT_CMD + __le16 target_id; + #define SHORT_REQ_TARGET_ID_DEFAULT 0x0UL + #define SHORT_REQ_TARGET_ID_TOOLS 0xfffdUL + #define SHORT_REQ_TARGET_ID_LAST SHORT_REQ_TARGET_ID_TOOLS + __le16 size; + __le64 req_addr; +}; + +/* cmd_nums (size:64b/8B) */ +struct cmd_nums { + __le16 req_type; + #define HWRM_VER_GET 0x0UL + #define HWRM_FUNC_ECHO_RESPONSE 0xbUL + #define HWRM_ERROR_RECOVERY_QCFG 0xcUL + #define HWRM_FUNC_DRV_IF_CHANGE 0xdUL + #define HWRM_FUNC_BUF_UNRGTR 0xeUL + #define HWRM_FUNC_VF_CFG 0xfUL + #define HWRM_RESERVED1 0x10UL + #define HWRM_FUNC_RESET 0x11UL + #define HWRM_FUNC_GETFID 0x12UL + #define HWRM_FUNC_VF_ALLOC 0x13UL + #define HWRM_FUNC_VF_FREE 0x14UL + #define HWRM_FUNC_QCAPS 0x15UL + #define HWRM_FUNC_QCFG 0x16UL + #define HWRM_FUNC_CFG 0x17UL + #define HWRM_FUNC_QSTATS 0x18UL + #define HWRM_FUNC_CLR_STATS 0x19UL + #define HWRM_FUNC_DRV_UNRGTR 0x1aUL + #define HWRM_FUNC_VF_RESC_FREE 0x1bUL + #define HWRM_FUNC_VF_VNIC_IDS_QUERY 0x1cUL + #define HWRM_FUNC_DRV_RGTR 0x1dUL + #define HWRM_FUNC_DRV_QVER 0x1eUL + #define HWRM_FUNC_BUF_RGTR 0x1fUL + #define HWRM_PORT_PHY_CFG 0x20UL + #define HWRM_PORT_MAC_CFG 0x21UL + #define HWRM_PORT_TS_QUERY 0x22UL + #define HWRM_PORT_QSTATS 0x23UL + #define HWRM_PORT_LPBK_QSTATS 0x24UL + #define HWRM_PORT_CLR_STATS 0x25UL + #define HWRM_PORT_LPBK_CLR_STATS 0x26UL + #define HWRM_PORT_PHY_QCFG 0x27UL + #define HWRM_PORT_MAC_QCFG 0x28UL + #define HWRM_PORT_MAC_PTP_QCFG 0x29UL + #define HWRM_PORT_PHY_QCAPS 0x2aUL + #define HWRM_PORT_PHY_I2C_WRITE 0x2bUL + #define HWRM_PORT_PHY_I2C_READ 0x2cUL + #define HWRM_PORT_LED_CFG 0x2dUL + #define HWRM_PORT_LED_QCFG 0x2eUL + #define HWRM_PORT_LED_QCAPS 0x2fUL + #define HWRM_QUEUE_QPORTCFG 0x30UL + #define HWRM_QUEUE_QCFG 0x31UL + #define HWRM_QUEUE_CFG 0x32UL + #define HWRM_FUNC_VLAN_CFG 0x33UL + #define HWRM_FUNC_VLAN_QCFG 0x34UL + #define HWRM_QUEUE_PFCENABLE_QCFG 0x35UL + #define HWRM_QUEUE_PFCENABLE_CFG 0x36UL + #define HWRM_QUEUE_PRI2COS_QCFG 0x37UL + #define HWRM_QUEUE_PRI2COS_CFG 0x38UL + #define HWRM_QUEUE_COS2BW_QCFG 0x39UL + #define HWRM_QUEUE_COS2BW_CFG 0x3aUL + #define HWRM_QUEUE_DSCP_QCAPS 0x3bUL + #define HWRM_QUEUE_DSCP2PRI_QCFG 0x3cUL + #define HWRM_QUEUE_DSCP2PRI_CFG 0x3dUL + #define HWRM_VNIC_ALLOC 0x40UL + #define HWRM_VNIC_FREE 0x41UL + #define HWRM_VNIC_CFG 0x42UL + #define HWRM_VNIC_QCFG 0x43UL + #define HWRM_VNIC_TPA_CFG 0x44UL + #define HWRM_VNIC_TPA_QCFG 0x45UL + #define HWRM_VNIC_RSS_CFG 0x46UL + #define HWRM_VNIC_RSS_QCFG 0x47UL + #define HWRM_VNIC_PLCMODES_CFG 0x48UL + #define HWRM_VNIC_PLCMODES_QCFG 0x49UL + #define HWRM_VNIC_QCAPS 0x4aUL + #define HWRM_VNIC_UPDATE 0x4bUL + #define HWRM_RING_ALLOC 0x50UL + #define HWRM_RING_FREE 0x51UL + #define HWRM_RING_CMPL_RING_QAGGINT_PARAMS 0x52UL + #define HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS 0x53UL + #define HWRM_RING_AGGINT_QCAPS 0x54UL + #define HWRM_RING_SCHQ_ALLOC 0x55UL + #define HWRM_RING_SCHQ_CFG 0x56UL + #define HWRM_RING_SCHQ_FREE 0x57UL + #define HWRM_RING_RESET 0x5eUL + #define HWRM_RING_GRP_ALLOC 0x60UL + #define HWRM_RING_GRP_FREE 0x61UL + #define HWRM_RING_CFG 0x62UL + #define HWRM_RING_QCFG 0x63UL + #define HWRM_RESERVED5 0x64UL + #define HWRM_RESERVED6 0x65UL + #define HWRM_VNIC_RSS_COS_LB_CTX_ALLOC 0x70UL + #define HWRM_VNIC_RSS_COS_LB_CTX_FREE 0x71UL + #define HWRM_QUEUE_MPLS_QCAPS 0x80UL + #define HWRM_QUEUE_MPLSTC2PRI_QCFG 0x81UL + #define HWRM_QUEUE_MPLSTC2PRI_CFG 0x82UL + #define HWRM_QUEUE_VLANPRI_QCAPS 0x83UL + #define HWRM_QUEUE_VLANPRI2PRI_QCFG 0x84UL + #define HWRM_QUEUE_VLANPRI2PRI_CFG 0x85UL + #define HWRM_QUEUE_GLOBAL_CFG 0x86UL + #define HWRM_QUEUE_GLOBAL_QCFG 0x87UL + #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG 0x88UL + #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG 0x89UL + #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_QCFG 0x8aUL + #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_CFG 0x8bUL + #define HWRM_QUEUE_QCAPS 0x8cUL + #define HWRM_QUEUE_ADPTV_QOS_RX_TUNING_QCFG 0x8dUL + #define HWRM_QUEUE_ADPTV_QOS_RX_TUNING_CFG 0x8eUL + #define HWRM_QUEUE_ADPTV_QOS_TX_TUNING_QCFG 0x8fUL + #define HWRM_CFA_L2_FILTER_ALLOC 0x90UL + #define HWRM_CFA_L2_FILTER_FREE 0x91UL + #define HWRM_CFA_L2_FILTER_CFG 0x92UL + #define HWRM_CFA_L2_SET_RX_MASK 0x93UL + #define HWRM_CFA_VLAN_ANTISPOOF_CFG 0x94UL + #define HWRM_CFA_TUNNEL_FILTER_ALLOC 0x95UL + #define HWRM_CFA_TUNNEL_FILTER_FREE 0x96UL + #define HWRM_CFA_ENCAP_RECORD_ALLOC 0x97UL + #define HWRM_CFA_ENCAP_RECORD_FREE 0x98UL + #define HWRM_CFA_NTUPLE_FILTER_ALLOC 0x99UL + #define HWRM_CFA_NTUPLE_FILTER_FREE 0x9aUL + #define HWRM_CFA_NTUPLE_FILTER_CFG 0x9bUL + #define HWRM_CFA_EM_FLOW_ALLOC 0x9cUL + #define HWRM_CFA_EM_FLOW_FREE 0x9dUL + #define HWRM_CFA_EM_FLOW_CFG 0x9eUL + #define HWRM_TUNNEL_DST_PORT_QUERY 0xa0UL + #define HWRM_TUNNEL_DST_PORT_ALLOC 0xa1UL + #define HWRM_TUNNEL_DST_PORT_FREE 0xa2UL + #define HWRM_QUEUE_ADPTV_QOS_TX_TUNING_CFG 0xa3UL + #define HWRM_STAT_CTX_ENG_QUERY 0xafUL + #define HWRM_STAT_CTX_ALLOC 0xb0UL + #define HWRM_STAT_CTX_FREE 0xb1UL + #define HWRM_STAT_CTX_QUERY 0xb2UL + #define HWRM_STAT_CTX_CLR_STATS 0xb3UL + #define HWRM_PORT_QSTATS_EXT 0xb4UL + #define HWRM_PORT_PHY_MDIO_WRITE 0xb5UL + #define HWRM_PORT_PHY_MDIO_READ 0xb6UL + #define HWRM_PORT_PHY_MDIO_BUS_ACQUIRE 0xb7UL + #define HWRM_PORT_PHY_MDIO_BUS_RELEASE 0xb8UL + #define HWRM_PORT_QSTATS_EXT_PFC_WD 0xb9UL + #define HWRM_RESERVED7 0xbaUL + #define HWRM_PORT_TX_FIR_CFG 0xbbUL + #define HWRM_PORT_TX_FIR_QCFG 0xbcUL + #define HWRM_PORT_ECN_QSTATS 0xbdUL + #define HWRM_FW_LIVEPATCH_QUERY 0xbeUL + #define HWRM_FW_LIVEPATCH 0xbfUL + #define HWRM_FW_RESET 0xc0UL + #define HWRM_FW_QSTATUS 0xc1UL + #define HWRM_FW_HEALTH_CHECK 0xc2UL + #define HWRM_FW_SYNC 0xc3UL + #define HWRM_FW_STATE_QCAPS 0xc4UL + #define HWRM_FW_STATE_QUIESCE 0xc5UL + #define HWRM_FW_STATE_BACKUP 0xc6UL + #define HWRM_FW_STATE_RESTORE 0xc7UL + #define HWRM_FW_SET_TIME 0xc8UL + #define HWRM_FW_GET_TIME 0xc9UL + #define HWRM_FW_SET_STRUCTURED_DATA 0xcaUL + #define HWRM_FW_GET_STRUCTURED_DATA 0xcbUL + #define HWRM_FW_IPC_MAILBOX 0xccUL + #define HWRM_FW_ECN_CFG 0xcdUL + #define HWRM_FW_ECN_QCFG 0xceUL + #define HWRM_FW_SECURE_CFG 0xcfUL + #define HWRM_EXEC_FWD_RESP 0xd0UL + #define HWRM_REJECT_FWD_RESP 0xd1UL + #define HWRM_FWD_RESP 0xd2UL + #define HWRM_FWD_ASYNC_EVENT_CMPL 0xd3UL + #define HWRM_OEM_CMD 0xd4UL + #define HWRM_PORT_PRBS_TEST 0xd5UL + #define HWRM_PORT_SFP_SIDEBAND_CFG 0xd6UL + #define HWRM_PORT_SFP_SIDEBAND_QCFG 0xd7UL + #define HWRM_FW_STATE_UNQUIESCE 0xd8UL + #define HWRM_PORT_DSC_DUMP 0xd9UL + #define HWRM_PORT_EP_TX_QCFG 0xdaUL + #define HWRM_PORT_EP_TX_CFG 0xdbUL + #define HWRM_PORT_CFG 0xdcUL + #define HWRM_PORT_QCFG 0xddUL + #define HWRM_PORT_MAC_QCAPS 0xdfUL + #define HWRM_TEMP_MONITOR_QUERY 0xe0UL + #define HWRM_REG_POWER_QUERY 0xe1UL + #define HWRM_CORE_FREQUENCY_QUERY 0xe2UL + #define HWRM_REG_POWER_HISTOGRAM 0xe3UL + #define HWRM_WOL_FILTER_ALLOC 0xf0UL + #define HWRM_WOL_FILTER_FREE 0xf1UL + #define HWRM_WOL_FILTER_QCFG 0xf2UL + #define HWRM_WOL_REASON_QCFG 0xf3UL + #define HWRM_CFA_METER_QCAPS 0xf4UL + #define HWRM_CFA_METER_PROFILE_ALLOC 0xf5UL + #define HWRM_CFA_METER_PROFILE_FREE 0xf6UL + #define HWRM_CFA_METER_PROFILE_CFG 0xf7UL + #define HWRM_CFA_METER_INSTANCE_ALLOC 0xf8UL + #define HWRM_CFA_METER_INSTANCE_FREE 0xf9UL + #define HWRM_CFA_METER_INSTANCE_CFG 0xfaUL + #define HWRM_CFA_VFR_ALLOC 0xfdUL + #define HWRM_CFA_VFR_FREE 0xfeUL + #define HWRM_CFA_VF_PAIR_ALLOC 0x100UL + #define HWRM_CFA_VF_PAIR_FREE 0x101UL + #define HWRM_CFA_VF_PAIR_INFO 0x102UL + #define HWRM_CFA_FLOW_ALLOC 0x103UL + #define HWRM_CFA_FLOW_FREE 0x104UL + #define HWRM_CFA_FLOW_FLUSH 0x105UL + #define HWRM_CFA_FLOW_STATS 0x106UL + #define HWRM_CFA_FLOW_INFO 0x107UL + #define HWRM_CFA_DECAP_FILTER_ALLOC 0x108UL + #define HWRM_CFA_DECAP_FILTER_FREE 0x109UL + #define HWRM_CFA_VLAN_ANTISPOOF_QCFG 0x10aUL + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC 0x10bUL + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_FREE 0x10cUL + #define HWRM_CFA_PAIR_ALLOC 0x10dUL + #define HWRM_CFA_PAIR_FREE 0x10eUL + #define HWRM_CFA_PAIR_INFO 0x10fUL + #define HWRM_FW_IPC_MSG 0x110UL + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_INFO 0x111UL + #define HWRM_CFA_REDIRECT_QUERY_TUNNEL_TYPE 0x112UL + #define HWRM_CFA_FLOW_AGING_TIMER_RESET 0x113UL + #define HWRM_CFA_FLOW_AGING_CFG 0x114UL + #define HWRM_CFA_FLOW_AGING_QCFG 0x115UL + #define HWRM_CFA_FLOW_AGING_QCAPS 0x116UL + #define HWRM_CFA_CTX_MEM_RGTR 0x117UL + #define HWRM_CFA_CTX_MEM_UNRGTR 0x118UL + #define HWRM_CFA_CTX_MEM_QCTX 0x119UL + #define HWRM_CFA_CTX_MEM_QCAPS 0x11aUL + #define HWRM_CFA_COUNTER_QCAPS 0x11bUL + #define HWRM_CFA_COUNTER_CFG 0x11cUL + #define HWRM_CFA_COUNTER_QCFG 0x11dUL + #define HWRM_CFA_COUNTER_QSTATS 0x11eUL + #define HWRM_CFA_TCP_FLAG_PROCESS_QCFG 0x11fUL + #define HWRM_CFA_EEM_QCAPS 0x120UL + #define HWRM_CFA_EEM_CFG 0x121UL + #define HWRM_CFA_EEM_QCFG 0x122UL + #define HWRM_CFA_EEM_OP 0x123UL + #define HWRM_CFA_ADV_FLOW_MGNT_QCAPS 0x124UL + #define HWRM_CFA_TFLIB 0x125UL + #define HWRM_CFA_LAG_GROUP_MEMBER_RGTR 0x126UL + #define HWRM_CFA_LAG_GROUP_MEMBER_UNRGTR 0x127UL + #define HWRM_CFA_TLS_FILTER_ALLOC 0x128UL + #define HWRM_CFA_TLS_FILTER_FREE 0x129UL + #define HWRM_CFA_RELEASE_AFM_FUNC 0x12aUL + #define HWRM_ENGINE_CKV_STATUS 0x12eUL + #define HWRM_ENGINE_CKV_CKEK_ADD 0x12fUL + #define HWRM_ENGINE_CKV_CKEK_DELETE 0x130UL + #define HWRM_ENGINE_CKV_KEY_ADD 0x131UL + #define HWRM_ENGINE_CKV_KEY_DELETE 0x132UL + #define HWRM_ENGINE_CKV_FLUSH 0x133UL + #define HWRM_ENGINE_CKV_RNG_GET 0x134UL + #define HWRM_ENGINE_CKV_KEY_GEN 0x135UL + #define HWRM_ENGINE_CKV_KEY_LABEL_CFG 0x136UL + #define HWRM_ENGINE_CKV_KEY_LABEL_QCFG 0x137UL + #define HWRM_ENGINE_QG_CONFIG_QUERY 0x13cUL + #define HWRM_ENGINE_QG_QUERY 0x13dUL + #define HWRM_ENGINE_QG_METER_PROFILE_CONFIG_QUERY 0x13eUL + #define HWRM_ENGINE_QG_METER_PROFILE_QUERY 0x13fUL + #define HWRM_ENGINE_QG_METER_PROFILE_ALLOC 0x140UL + #define HWRM_ENGINE_QG_METER_PROFILE_FREE 0x141UL + #define HWRM_ENGINE_QG_METER_QUERY 0x142UL + #define HWRM_ENGINE_QG_METER_BIND 0x143UL + #define HWRM_ENGINE_QG_METER_UNBIND 0x144UL + #define HWRM_ENGINE_QG_FUNC_BIND 0x145UL + #define HWRM_ENGINE_SG_CONFIG_QUERY 0x146UL + #define HWRM_ENGINE_SG_QUERY 0x147UL + #define HWRM_ENGINE_SG_METER_QUERY 0x148UL + #define HWRM_ENGINE_SG_METER_CONFIG 0x149UL + #define HWRM_ENGINE_SG_QG_BIND 0x14aUL + #define HWRM_ENGINE_QG_SG_UNBIND 0x14bUL + #define HWRM_ENGINE_CONFIG_QUERY 0x154UL + #define HWRM_ENGINE_STATS_CONFIG 0x155UL + #define HWRM_ENGINE_STATS_CLEAR 0x156UL + #define HWRM_ENGINE_STATS_QUERY 0x157UL + #define HWRM_ENGINE_STATS_QUERY_CONTINUOUS_ERROR 0x158UL + #define HWRM_ENGINE_RQ_ALLOC 0x15eUL + #define HWRM_ENGINE_RQ_FREE 0x15fUL + #define HWRM_ENGINE_CQ_ALLOC 0x160UL + #define HWRM_ENGINE_CQ_FREE 0x161UL + #define HWRM_ENGINE_NQ_ALLOC 0x162UL + #define HWRM_ENGINE_NQ_FREE 0x163UL + #define HWRM_ENGINE_ON_DIE_RQE_CREDITS 0x164UL + #define HWRM_ENGINE_FUNC_QCFG 0x165UL + #define HWRM_FUNC_RESOURCE_QCAPS 0x190UL + #define HWRM_FUNC_VF_RESOURCE_CFG 0x191UL + #define HWRM_FUNC_BACKING_STORE_QCAPS 0x192UL + #define HWRM_FUNC_BACKING_STORE_CFG 0x193UL + #define HWRM_FUNC_BACKING_STORE_QCFG 0x194UL + #define HWRM_FUNC_VF_BW_CFG 0x195UL + #define HWRM_FUNC_VF_BW_QCFG 0x196UL + #define HWRM_FUNC_HOST_PF_IDS_QUERY 0x197UL + #define HWRM_FUNC_QSTATS_EXT 0x198UL + #define HWRM_STAT_EXT_CTX_QUERY 0x199UL + #define HWRM_FUNC_SPD_CFG 0x19aUL + #define HWRM_FUNC_SPD_QCFG 0x19bUL + #define HWRM_FUNC_PTP_PIN_QCFG 0x19cUL + #define HWRM_FUNC_PTP_PIN_CFG 0x19dUL + #define HWRM_FUNC_PTP_CFG 0x19eUL + #define HWRM_FUNC_PTP_TS_QUERY 0x19fUL + #define HWRM_FUNC_PTP_EXT_CFG 0x1a0UL + #define HWRM_FUNC_PTP_EXT_QCFG 0x1a1UL + #define HWRM_FUNC_KEY_CTX_ALLOC 0x1a2UL + #define HWRM_FUNC_BACKING_STORE_CFG_V2 0x1a3UL + #define HWRM_FUNC_BACKING_STORE_QCFG_V2 0x1a4UL + #define HWRM_FUNC_DBR_PACING_CFG 0x1a5UL + #define HWRM_FUNC_DBR_PACING_QCFG 0x1a6UL + #define HWRM_FUNC_DBR_PACING_BROADCAST_EVENT 0x1a7UL + #define HWRM_FUNC_BACKING_STORE_QCAPS_V2 0x1a8UL + #define HWRM_FUNC_DBR_PACING_NQLIST_QUERY 0x1a9UL + #define HWRM_FUNC_DBR_RECOVERY_COMPLETED 0x1aaUL + #define HWRM_FUNC_SYNCE_CFG 0x1abUL + #define HWRM_FUNC_SYNCE_QCFG 0x1acUL + #define HWRM_FUNC_KEY_CTX_FREE 0x1adUL + #define HWRM_FUNC_LAG_MODE_CFG 0x1aeUL + #define HWRM_FUNC_LAG_MODE_QCFG 0x1afUL + #define HWRM_FUNC_LAG_CREATE 0x1b0UL + #define HWRM_FUNC_LAG_UPDATE 0x1b1UL + #define HWRM_FUNC_LAG_FREE 0x1b2UL + #define HWRM_FUNC_LAG_QCFG 0x1b3UL + #define HWRM_SELFTEST_QLIST 0x200UL + #define HWRM_SELFTEST_EXEC 0x201UL + #define HWRM_SELFTEST_IRQ 0x202UL + #define HWRM_SELFTEST_RETRIEVE_SERDES_DATA 0x203UL + #define HWRM_PCIE_QSTATS 0x204UL + #define HWRM_MFG_FRU_WRITE_CONTROL 0x205UL + #define HWRM_MFG_TIMERS_QUERY 0x206UL + #define HWRM_MFG_OTP_CFG 0x207UL + #define HWRM_MFG_OTP_QCFG 0x208UL + #define HWRM_MFG_HDMA_TEST 0x209UL + #define HWRM_MFG_FRU_EEPROM_WRITE 0x20aUL + #define HWRM_MFG_FRU_EEPROM_READ 0x20bUL + #define HWRM_MFG_SOC_IMAGE 0x20cUL + #define HWRM_MFG_SOC_QSTATUS 0x20dUL + #define HWRM_MFG_PARAM_CRITICAL_DATA_FINALIZE 0x20eUL + #define HWRM_MFG_PARAM_CRITICAL_DATA_READ 0x20fUL + #define HWRM_MFG_PARAM_CRITICAL_DATA_HEALTH 0x210UL + #define HWRM_MFG_PRVSN_EXPORT_CSR 0x211UL + #define HWRM_MFG_PRVSN_IMPORT_CERT 0x212UL + #define HWRM_MFG_PRVSN_GET_STATE 0x213UL + #define HWRM_MFG_GET_NVM_MEASUREMENT 0x214UL + #define HWRM_MFG_PSOC_QSTATUS 0x215UL + #define HWRM_MFG_SELFTEST_QLIST 0x216UL + #define HWRM_MFG_SELFTEST_EXEC 0x217UL + #define HWRM_STAT_GENERIC_QSTATS 0x218UL + #define HWRM_MFG_PRVSN_EXPORT_CERT 0x219UL + #define HWRM_STAT_DB_ERROR_QSTATS 0x21aUL + #define HWRM_UDCC_QCAPS 0x258UL + #define HWRM_UDCC_CFG 0x259UL + #define HWRM_UDCC_QCFG 0x25aUL + #define HWRM_UDCC_SESSION_CFG 0x25bUL + #define HWRM_UDCC_SESSION_QCFG 0x25cUL + #define HWRM_UDCC_SESSION_QUERY 0x25dUL + #define HWRM_UDCC_COMP_CFG 0x25eUL + #define HWRM_UDCC_COMP_QCFG 0x25fUL + #define HWRM_UDCC_COMP_QUERY 0x260UL + #define HWRM_TF 0x2bcUL + #define HWRM_TF_VERSION_GET 0x2bdUL + #define HWRM_TF_SESSION_OPEN 0x2c6UL + #define HWRM_TF_SESSION_REGISTER 0x2c8UL + #define HWRM_TF_SESSION_UNREGISTER 0x2c9UL + #define HWRM_TF_SESSION_CLOSE 0x2caUL + #define HWRM_TF_SESSION_QCFG 0x2cbUL + #define HWRM_TF_SESSION_RESC_QCAPS 0x2ccUL + #define HWRM_TF_SESSION_RESC_ALLOC 0x2cdUL + #define HWRM_TF_SESSION_RESC_FREE 0x2ceUL + #define HWRM_TF_SESSION_RESC_FLUSH 0x2cfUL + #define HWRM_TF_SESSION_RESC_INFO 0x2d0UL + #define HWRM_TF_SESSION_HOTUP_STATE_SET 0x2d1UL + #define HWRM_TF_SESSION_HOTUP_STATE_GET 0x2d2UL + #define HWRM_TF_TBL_TYPE_GET 0x2daUL + #define HWRM_TF_TBL_TYPE_SET 0x2dbUL + #define HWRM_TF_TBL_TYPE_BULK_GET 0x2dcUL + #define HWRM_TF_EM_INSERT 0x2eaUL + #define HWRM_TF_EM_DELETE 0x2ebUL + #define HWRM_TF_EM_HASH_INSERT 0x2ecUL + #define HWRM_TF_EM_MOVE 0x2edUL + #define HWRM_TF_TCAM_SET 0x2f8UL + #define HWRM_TF_TCAM_GET 0x2f9UL + #define HWRM_TF_TCAM_MOVE 0x2faUL + #define HWRM_TF_TCAM_FREE 0x2fbUL + #define HWRM_TF_GLOBAL_CFG_SET 0x2fcUL + #define HWRM_TF_GLOBAL_CFG_GET 0x2fdUL + #define HWRM_TF_IF_TBL_SET 0x2feUL + #define HWRM_TF_IF_TBL_GET 0x2ffUL + #define HWRM_TF_RESC_USAGE_SET 0x300UL + #define HWRM_TF_RESC_USAGE_QUERY 0x301UL + #define HWRM_TF_TBL_TYPE_ALLOC 0x302UL + #define HWRM_TF_TBL_TYPE_FREE 0x303UL + #define HWRM_TFC_TBL_SCOPE_QCAPS 0x380UL + #define HWRM_TFC_TBL_SCOPE_ID_ALLOC 0x381UL + #define HWRM_TFC_TBL_SCOPE_CONFIG 0x382UL + #define HWRM_TFC_TBL_SCOPE_DECONFIG 0x383UL + #define HWRM_TFC_TBL_SCOPE_FID_ADD 0x384UL + #define HWRM_TFC_TBL_SCOPE_FID_REM 0x385UL + #define HWRM_TFC_TBL_SCOPE_POOL_ALLOC 0x386UL + #define HWRM_TFC_TBL_SCOPE_POOL_FREE 0x387UL + #define HWRM_TFC_SESSION_ID_ALLOC 0x388UL + #define HWRM_TFC_SESSION_FID_ADD 0x389UL + #define HWRM_TFC_SESSION_FID_REM 0x38aUL + #define HWRM_TFC_IDENT_ALLOC 0x38bUL + #define HWRM_TFC_IDENT_FREE 0x38cUL + #define HWRM_TFC_IDX_TBL_ALLOC 0x38dUL + #define HWRM_TFC_IDX_TBL_ALLOC_SET 0x38eUL + #define HWRM_TFC_IDX_TBL_SET 0x38fUL + #define HWRM_TFC_IDX_TBL_GET 0x390UL + #define HWRM_TFC_IDX_TBL_FREE 0x391UL + #define HWRM_TFC_GLOBAL_ID_ALLOC 0x392UL + #define HWRM_TFC_TCAM_SET 0x393UL + #define HWRM_TFC_TCAM_GET 0x394UL + #define HWRM_TFC_TCAM_ALLOC 0x395UL + #define HWRM_TFC_TCAM_ALLOC_SET 0x396UL + #define HWRM_TFC_TCAM_FREE 0x397UL + #define HWRM_TFC_IF_TBL_SET 0x398UL + #define HWRM_TFC_IF_TBL_GET 0x399UL + #define HWRM_TFC_TBL_SCOPE_CONFIG_GET 0x39aUL + #define HWRM_TFC_RESC_USAGE_QUERY 0x39bUL + #define HWRM_QUEUE_PFCWD_TIMEOUT_QCAPS 0x39cUL + #define HWRM_QUEUE_PFCWD_TIMEOUT_CFG 0x39dUL + #define HWRM_QUEUE_PFCWD_TIMEOUT_QCFG 0x39eUL + #define HWRM_SV 0x400UL + #define HWRM_DBG_LOG_BUFFER_FLUSH 0xff0fUL + #define HWRM_DBG_READ_DIRECT 0xff10UL + #define HWRM_DBG_READ_INDIRECT 0xff11UL + #define HWRM_DBG_WRITE_DIRECT 0xff12UL + #define HWRM_DBG_WRITE_INDIRECT 0xff13UL + #define HWRM_DBG_DUMP 0xff14UL + #define HWRM_DBG_ERASE_NVM 0xff15UL + #define HWRM_DBG_CFG 0xff16UL + #define HWRM_DBG_COREDUMP_LIST 0xff17UL + #define HWRM_DBG_COREDUMP_INITIATE 0xff18UL + #define HWRM_DBG_COREDUMP_RETRIEVE 0xff19UL + #define HWRM_DBG_FW_CLI 0xff1aUL + #define HWRM_DBG_I2C_CMD 0xff1bUL + #define HWRM_DBG_RING_INFO_GET 0xff1cUL + #define HWRM_DBG_CRASHDUMP_HEADER 0xff1dUL + #define HWRM_DBG_CRASHDUMP_ERASE 0xff1eUL + #define HWRM_DBG_DRV_TRACE 0xff1fUL + #define HWRM_DBG_QCAPS 0xff20UL + #define HWRM_DBG_QCFG 0xff21UL + #define HWRM_DBG_CRASHDUMP_MEDIUM_CFG 0xff22UL + #define HWRM_DBG_USEQ_ALLOC 0xff23UL + #define HWRM_DBG_USEQ_FREE 0xff24UL + #define HWRM_DBG_USEQ_FLUSH 0xff25UL + #define HWRM_DBG_USEQ_QCAPS 0xff26UL + #define HWRM_DBG_USEQ_CW_CFG 0xff27UL + #define HWRM_DBG_USEQ_SCHED_CFG 0xff28UL + #define HWRM_DBG_USEQ_RUN 0xff29UL + #define HWRM_DBG_USEQ_DELIVERY_REQ 0xff2aUL + #define HWRM_DBG_USEQ_RESP_HDR 0xff2bUL + #define HWRM_NVM_GET_VPD_FIELD_INFO 0xffeaUL + #define HWRM_NVM_SET_VPD_FIELD_INFO 0xffebUL + #define HWRM_NVM_DEFRAG 0xffecUL + #define HWRM_NVM_REQ_ARBITRATION 0xffedUL + #define HWRM_NVM_FACTORY_DEFAULTS 0xffeeUL + #define HWRM_NVM_VALIDATE_OPTION 0xffefUL + #define HWRM_NVM_FLUSH 0xfff0UL + #define HWRM_NVM_GET_VARIABLE 0xfff1UL + #define HWRM_NVM_SET_VARIABLE 0xfff2UL + #define HWRM_NVM_INSTALL_UPDATE 0xfff3UL + #define HWRM_NVM_MODIFY 0xfff4UL + #define HWRM_NVM_VERIFY_UPDATE 0xfff5UL + #define HWRM_NVM_GET_DEV_INFO 0xfff6UL + #define HWRM_NVM_ERASE_DIR_ENTRY 0xfff7UL + #define HWRM_NVM_MOD_DIR_ENTRY 0xfff8UL + #define HWRM_NVM_FIND_DIR_ENTRY 0xfff9UL + #define HWRM_NVM_GET_DIR_ENTRIES 0xfffaUL + #define HWRM_NVM_GET_DIR_INFO 0xfffbUL + #define HWRM_NVM_RAW_DUMP 0xfffcUL + #define HWRM_NVM_READ 0xfffdUL + #define HWRM_NVM_WRITE 0xfffeUL + #define HWRM_NVM_RAW_WRITE_BLK 0xffffUL + #define HWRM_LAST HWRM_NVM_RAW_WRITE_BLK + __le16 unused_0[3]; +}; + +/* ret_codes (size:64b/8B) */ +struct ret_codes { + __le16 error_code; + #define HWRM_ERR_CODE_SUCCESS 0x0UL + #define HWRM_ERR_CODE_FAIL 0x1UL + #define HWRM_ERR_CODE_INVALID_PARAMS 0x2UL + #define HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED 0x3UL + #define HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR 0x4UL + #define HWRM_ERR_CODE_INVALID_FLAGS 0x5UL + #define HWRM_ERR_CODE_INVALID_ENABLES 0x6UL + #define HWRM_ERR_CODE_UNSUPPORTED_TLV 0x7UL + #define HWRM_ERR_CODE_NO_BUFFER 0x8UL + #define HWRM_ERR_CODE_UNSUPPORTED_OPTION_ERR 0x9UL + #define HWRM_ERR_CODE_HOT_RESET_PROGRESS 0xaUL + #define HWRM_ERR_CODE_HOT_RESET_FAIL 0xbUL + #define HWRM_ERR_CODE_NO_FLOW_COUNTER_DURING_ALLOC 0xcUL + #define HWRM_ERR_CODE_KEY_HASH_COLLISION 0xdUL + #define HWRM_ERR_CODE_KEY_ALREADY_EXISTS 0xeUL + #define HWRM_ERR_CODE_HWRM_ERROR 0xfUL + #define HWRM_ERR_CODE_BUSY 0x10UL + #define HWRM_ERR_CODE_RESOURCE_LOCKED 0x11UL + #define HWRM_ERR_CODE_PF_UNAVAILABLE 0x12UL + #define HWRM_ERR_CODE_ENTITY_NOT_PRESENT 0x13UL + #define HWRM_ERR_CODE_TLV_ENCAPSULATED_RESPONSE 0x8000UL + #define HWRM_ERR_CODE_UNKNOWN_ERR 0xfffeUL + #define HWRM_ERR_CODE_CMD_NOT_SUPPORTED 0xffffUL + #define HWRM_ERR_CODE_LAST HWRM_ERR_CODE_CMD_NOT_SUPPORTED + __le16 unused_0[3]; +}; + +/* hwrm_err_output (size:128b/16B) */ +struct hwrm_err_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 opaque_0; + __le16 opaque_1; + u8 cmd_err; + u8 valid; +}; +#define HWRM_NA_SIGNATURE ((__le32)(-1)) +#define HWRM_MAX_REQ_LEN 128 +#define HWRM_MAX_RESP_LEN 704 +#define HW_HASH_INDEX_SIZE 0x80 +#define HW_HASH_KEY_SIZE 40 +#define HWRM_RESP_VALID_KEY 1 +#define HWRM_TARGET_ID_BONO 0xFFF8 +#define HWRM_TARGET_ID_KONG 0xFFF9 +#define HWRM_TARGET_ID_APE 0xFFFA +#define HWRM_TARGET_ID_TOOLS 0xFFFD +#define HWRM_VERSION_MAJOR 1 +#define HWRM_VERSION_MINOR 10 +#define HWRM_VERSION_UPDATE 3 +#define HWRM_VERSION_RSVD 44 +#define HWRM_VERSION_STR "1.10.3.44" + +/* hwrm_ver_get_input (size:192b/24B) */ +struct hwrm_ver_get_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 hwrm_intf_maj; + u8 hwrm_intf_min; + u8 hwrm_intf_upd; + u8 unused_0[5]; +}; + +/* hwrm_ver_get_output (size:1408b/176B) */ +struct hwrm_ver_get_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 hwrm_intf_maj_8b; + u8 hwrm_intf_min_8b; + u8 hwrm_intf_upd_8b; + u8 hwrm_intf_rsvd_8b; + u8 hwrm_fw_maj_8b; + u8 hwrm_fw_min_8b; + u8 hwrm_fw_bld_8b; + u8 hwrm_fw_rsvd_8b; + u8 mgmt_fw_maj_8b; + u8 mgmt_fw_min_8b; + u8 mgmt_fw_bld_8b; + u8 mgmt_fw_rsvd_8b; + u8 netctrl_fw_maj_8b; + u8 netctrl_fw_min_8b; + u8 netctrl_fw_bld_8b; + u8 netctrl_fw_rsvd_8b; + __le32 dev_caps_cfg; + #define VER_GET_RESP_DEV_CAPS_CFG_SECURE_FW_UPD_SUPPORTED 0x1UL + #define VER_GET_RESP_DEV_CAPS_CFG_FW_DCBX_AGENT_SUPPORTED 0x2UL + #define VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED 0x4UL + #define VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED 0x8UL + #define VER_GET_RESP_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED 0x10UL + #define VER_GET_RESP_DEV_CAPS_CFG_FLOW_HANDLE_64BIT_SUPPORTED 0x20UL + #define VER_GET_RESP_DEV_CAPS_CFG_L2_FILTER_TYPES_ROCE_OR_L2_SUPPORTED 0x40UL + #define VER_GET_RESP_DEV_CAPS_CFG_VIRTIO_VSWITCH_OFFLOAD_SUPPORTED 0x80UL + #define VER_GET_RESP_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED 0x100UL + #define VER_GET_RESP_DEV_CAPS_CFG_FLOW_AGING_SUPPORTED 0x200UL + #define VER_GET_RESP_DEV_CAPS_CFG_ADV_FLOW_COUNTERS_SUPPORTED 0x400UL + #define VER_GET_RESP_DEV_CAPS_CFG_CFA_EEM_SUPPORTED 0x800UL + #define VER_GET_RESP_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED 0x1000UL + #define VER_GET_RESP_DEV_CAPS_CFG_CFA_TFLIB_SUPPORTED 0x2000UL + #define VER_GET_RESP_DEV_CAPS_CFG_CFA_TRUFLOW_SUPPORTED 0x4000UL + #define VER_GET_RESP_DEV_CAPS_CFG_SECURE_BOOT_CAPABLE 0x8000UL + #define VER_GET_RESP_DEV_CAPS_CFG_SECURE_SOC_CAPABLE 0x10000UL + u8 roce_fw_maj_8b; + u8 roce_fw_min_8b; + u8 roce_fw_bld_8b; + u8 roce_fw_rsvd_8b; + char hwrm_fw_name[16]; + char mgmt_fw_name[16]; + char netctrl_fw_name[16]; + char active_pkg_name[16]; + char roce_fw_name[16]; + __le16 chip_num; + u8 chip_rev; + u8 chip_metal; + u8 chip_bond_id; + u8 chip_platform_type; + #define VER_GET_RESP_CHIP_PLATFORM_TYPE_ASIC 0x0UL + #define VER_GET_RESP_CHIP_PLATFORM_TYPE_FPGA 0x1UL + #define VER_GET_RESP_CHIP_PLATFORM_TYPE_PALLADIUM 0x2UL + #define VER_GET_RESP_CHIP_PLATFORM_TYPE_LAST VER_GET_RESP_CHIP_PLATFORM_TYPE_PALLADIUM + __le16 max_req_win_len; + __le16 max_resp_len; + __le16 def_req_timeout; + u8 flags; + #define VER_GET_RESP_FLAGS_DEV_NOT_RDY 0x1UL + #define VER_GET_RESP_FLAGS_EXT_VER_AVAIL 0x2UL + #define VER_GET_RESP_FLAGS_DEV_NOT_RDY_BACKING_STORE 0x4UL + u8 unused_0[2]; + u8 always_1; + __le16 hwrm_intf_major; + __le16 hwrm_intf_minor; + __le16 hwrm_intf_build; + __le16 hwrm_intf_patch; + __le16 hwrm_fw_major; + __le16 hwrm_fw_minor; + __le16 hwrm_fw_build; + __le16 hwrm_fw_patch; + __le16 mgmt_fw_major; + __le16 mgmt_fw_minor; + __le16 mgmt_fw_build; + __le16 mgmt_fw_patch; + __le16 netctrl_fw_major; + __le16 netctrl_fw_minor; + __le16 netctrl_fw_build; + __le16 netctrl_fw_patch; + __le16 roce_fw_major; + __le16 roce_fw_minor; + __le16 roce_fw_build; + __le16 roce_fw_patch; + __le16 max_ext_req_len; + __le16 max_req_timeout; + u8 unused_1[3]; + u8 valid; +}; + +/* eject_cmpl (size:128b/16B) */ +struct eject_cmpl { + __le16 type; + #define EJECT_CMPL_TYPE_MASK 0x3fUL + #define EJECT_CMPL_TYPE_SFT 0 + #define EJECT_CMPL_TYPE_STAT_EJECT 0x1aUL + #define EJECT_CMPL_TYPE_LAST EJECT_CMPL_TYPE_STAT_EJECT + #define EJECT_CMPL_FLAGS_MASK 0xffc0UL + #define EJECT_CMPL_FLAGS_SFT 6 + #define EJECT_CMPL_FLAGS_ERROR 0x40UL + __le16 len; + __le32 opaque; + __le16 v; + #define EJECT_CMPL_V 0x1UL + #define EJECT_CMPL_ERRORS_MASK 0xfffeUL + #define EJECT_CMPL_ERRORS_SFT 1 + #define EJECT_CMPL_ERRORS_BUFFER_ERROR_MASK 0xeUL + #define EJECT_CMPL_ERRORS_BUFFER_ERROR_SFT 1 + #define EJECT_CMPL_ERRORS_BUFFER_ERROR_NO_BUFFER (0x0UL << 1) + #define EJECT_CMPL_ERRORS_BUFFER_ERROR_DID_NOT_FIT (0x1UL << 1) + #define EJECT_CMPL_ERRORS_BUFFER_ERROR_BAD_FORMAT (0x3UL << 1) + #define EJECT_CMPL_ERRORS_BUFFER_ERROR_FLUSH (0x5UL << 1) + #define EJECT_CMPL_ERRORS_BUFFER_ERROR_LAST EJECT_CMPL_ERRORS_BUFFER_ERROR_FLUSH + __le16 reserved16; + __le32 unused_2; +}; + +/* hwrm_cmpl (size:128b/16B) */ +struct hwrm_cmpl { + __le16 type; + #define CMPL_TYPE_MASK 0x3fUL + #define CMPL_TYPE_SFT 0 + #define CMPL_TYPE_HWRM_DONE 0x20UL + #define CMPL_TYPE_LAST CMPL_TYPE_HWRM_DONE + __le16 sequence_id; + __le32 unused_1; + __le32 v; + #define CMPL_V 0x1UL + __le32 unused_3; +}; + +/* hwrm_fwd_req_cmpl (size:128b/16B) */ +struct hwrm_fwd_req_cmpl { + __le16 req_len_type; + #define FWD_REQ_CMPL_TYPE_MASK 0x3fUL + #define FWD_REQ_CMPL_TYPE_SFT 0 + #define FWD_REQ_CMPL_TYPE_HWRM_FWD_REQ 0x22UL + #define FWD_REQ_CMPL_TYPE_LAST FWD_REQ_CMPL_TYPE_HWRM_FWD_REQ + #define FWD_REQ_CMPL_REQ_LEN_MASK 0xffc0UL + #define FWD_REQ_CMPL_REQ_LEN_SFT 6 + __le16 source_id; + __le32 unused0; + __le32 req_buf_addr_v[2]; + #define FWD_REQ_CMPL_V 0x1UL + #define FWD_REQ_CMPL_REQ_BUF_ADDR_MASK 0xfffffffeUL + #define FWD_REQ_CMPL_REQ_BUF_ADDR_SFT 1 +}; + +/* hwrm_fwd_resp_cmpl (size:128b/16B) */ +struct hwrm_fwd_resp_cmpl { + __le16 type; + #define FWD_RESP_CMPL_TYPE_MASK 0x3fUL + #define FWD_RESP_CMPL_TYPE_SFT 0 + #define FWD_RESP_CMPL_TYPE_HWRM_FWD_RESP 0x24UL + #define FWD_RESP_CMPL_TYPE_LAST FWD_RESP_CMPL_TYPE_HWRM_FWD_RESP + __le16 source_id; + __le16 resp_len; + __le16 unused_1; + __le32 resp_buf_addr_v[2]; + #define FWD_RESP_CMPL_V 0x1UL + #define FWD_RESP_CMPL_RESP_BUF_ADDR_MASK 0xfffffffeUL + #define FWD_RESP_CMPL_RESP_BUF_ADDR_SFT 1 +}; + +/* hwrm_async_event_cmpl (size:128b/16B) */ +struct hwrm_async_event_cmpl { + __le16 type; + #define ASYNC_EVENT_CMPL_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_TYPE_LAST ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE 0x0UL + #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_MTU_CHANGE 0x1UL + #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE 0x2UL + #define ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE 0x3UL + #define ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED 0x4UL + #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_NOT_ALLOWED 0x5UL + #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE 0x6UL + #define ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE 0x7UL + #define ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY 0x8UL + #define ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY 0x9UL + #define ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG 0xaUL + #define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_UNLOAD 0x10UL + #define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_LOAD 0x11UL + #define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_FLR_PROC_CMPLT 0x12UL + #define ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD 0x20UL + #define ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_LOAD 0x21UL + #define ASYNC_EVENT_CMPL_EVENT_ID_VF_FLR 0x30UL + #define ASYNC_EVENT_CMPL_EVENT_ID_VF_MAC_ADDR_CHANGE 0x31UL + #define ASYNC_EVENT_CMPL_EVENT_ID_PF_VF_COMM_STATUS_CHANGE 0x32UL + #define ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE 0x33UL + #define ASYNC_EVENT_CMPL_EVENT_ID_LLFC_PFC_CHANGE 0x34UL + #define ASYNC_EVENT_CMPL_EVENT_ID_DEFAULT_VNIC_CHANGE 0x35UL + #define ASYNC_EVENT_CMPL_EVENT_ID_HW_FLOW_AGED 0x36UL + #define ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION 0x37UL + #define ASYNC_EVENT_CMPL_EVENT_ID_EEM_CACHE_FLUSH_REQ 0x38UL + #define ASYNC_EVENT_CMPL_EVENT_ID_EEM_CACHE_FLUSH_DONE 0x39UL + #define ASYNC_EVENT_CMPL_EVENT_ID_TCP_FLAG_ACTION_CHANGE 0x3aUL + #define ASYNC_EVENT_CMPL_EVENT_ID_EEM_FLOW_ACTIVE 0x3bUL + #define ASYNC_EVENT_CMPL_EVENT_ID_EEM_CFG_CHANGE 0x3cUL + #define ASYNC_EVENT_CMPL_EVENT_ID_TFLIB_DEFAULT_VNIC_CHANGE 0x3dUL + #define ASYNC_EVENT_CMPL_EVENT_ID_TFLIB_LINK_STATUS_CHANGE 0x3eUL + #define ASYNC_EVENT_CMPL_EVENT_ID_QUIESCE_DONE 0x3fUL + #define ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE 0x40UL + #define ASYNC_EVENT_CMPL_EVENT_ID_PFC_WATCHDOG_CFG_CHANGE 0x41UL + #define ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST 0x42UL + #define ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE 0x43UL + #define ASYNC_EVENT_CMPL_EVENT_ID_PPS_TIMESTAMP 0x44UL + #define ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT 0x45UL + #define ASYNC_EVENT_CMPL_EVENT_ID_DOORBELL_PACING_THRESHOLD 0x46UL + #define ASYNC_EVENT_CMPL_EVENT_ID_RSS_CHANGE 0x47UL + #define ASYNC_EVENT_CMPL_EVENT_ID_DOORBELL_PACING_NQ_UPDATE 0x48UL + #define ASYNC_EVENT_CMPL_EVENT_ID_HW_DOORBELL_RECOVERY_READ_ERROR 0x49UL + #define ASYNC_EVENT_CMPL_EVENT_ID_CTX_ERROR 0x4aUL + #define ASYNC_EVENT_CMPL_EVENT_ID_UDCC_SESSION_CHANGE 0x4bUL + #define ASYNC_EVENT_CMPL_EVENT_ID_DBG_BUF_PRODUCER 0x4cUL + #define ASYNC_EVENT_CMPL_EVENT_ID_PEER_MMAP_CHANGE 0x4dUL + #define ASYNC_EVENT_CMPL_EVENT_ID_MAX_RGTR_EVENT_ID 0x4eUL + #define ASYNC_EVENT_CMPL_EVENT_ID_FW_TRACE_MSG 0xfeUL + #define ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR 0xffUL + #define ASYNC_EVENT_CMPL_EVENT_ID_LAST ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR + __le32 event_data2; + u8 opaque_v; + #define ASYNC_EVENT_CMPL_V 0x1UL + #define ASYNC_EVENT_CMPL_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; +}; + +/* hwrm_async_event_cmpl_link_status_change (size:128b/16B) */ +struct hwrm_async_event_cmpl_link_status_change { + __le16 type; + #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_LAST ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_ID_LINK_STATUS_CHANGE 0x0UL + #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_ID_LAST ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_ID_LINK_STATUS_CHANGE + __le32 event_data2; + u8 opaque_v; + #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_V 0x1UL + #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; + #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE 0x1UL + #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_DOWN 0x0UL + #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_UP 0x1UL + #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_LAST ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_UP + #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_MASK 0xeUL + #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_SFT 1 + #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffff0UL + #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_ID_SFT 4 + #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PF_ID_MASK 0xff00000UL + #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PF_ID_SFT 20 +}; + +/* hwrm_async_event_cmpl_link_mtu_change (size:128b/16B) */ +struct hwrm_async_event_cmpl_link_mtu_change { + __le16 type; + #define ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_TYPE_LAST ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_EVENT_ID_LINK_MTU_CHANGE 0x1UL + #define ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_EVENT_ID_LAST ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_EVENT_ID_LINK_MTU_CHANGE + __le32 event_data2; + u8 opaque_v; + #define ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_V 0x1UL + #define ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; + #define ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_EVENT_DATA1_NEW_MTU_MASK 0xffffUL + #define ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_EVENT_DATA1_NEW_MTU_SFT 0 +}; + +/* hwrm_async_event_cmpl_link_speed_change (size:128b/16B) */ +struct hwrm_async_event_cmpl_link_speed_change { + __le16 type; + #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_TYPE_LAST ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_ID_LINK_SPEED_CHANGE 0x2UL + #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_ID_LAST ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_ID_LINK_SPEED_CHANGE + __le32 event_data2; + u8 opaque_v; + #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_V 0x1UL + #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; + #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_FORCE 0x1UL + #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_MASK 0xfffeUL + #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_SFT 1 + #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_100MB (0x1UL << 1) + #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_1GB (0xaUL << 1) + #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_2GB (0x14UL << 1) + #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_2_5GB (0x19UL << 1) + #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_10GB (0x64UL << 1) + #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_20GB (0xc8UL << 1) + #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_25GB (0xfaUL << 1) + #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_40GB (0x190UL << 1) + #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_50GB (0x1f4UL << 1) + #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_100GB (0x3e8UL << 1) + #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_LAST ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_100GB + #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffff0000UL + #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_PORT_ID_SFT 16 +}; + +/* hwrm_async_event_cmpl_dcb_config_change (size:128b/16B) */ +struct hwrm_async_event_cmpl_dcb_config_change { + __le16 type; + #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_TYPE_LAST ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_ID_DCB_CONFIG_CHANGE 0x3UL + #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_ID_LAST ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_ID_DCB_CONFIG_CHANGE + __le32 event_data2; + #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA2_ETS 0x1UL + #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA2_PFC 0x2UL + #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA2_APP 0x4UL + #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA2_DSCP 0x8UL + u8 opaque_v; + #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_V 0x1UL + #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; + #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffffUL + #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_PORT_ID_SFT 0 + #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_ROCE_PRIORITY_MASK 0xff0000UL + #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_ROCE_PRIORITY_SFT 16 + #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_ROCE_PRIORITY_NONE (0xffUL << 16) + #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_ROCE_PRIORITY_LAST ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_ROCE_PRIORITY_NONE + #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_L2_PRIORITY_MASK 0xff000000UL + #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_L2_PRIORITY_SFT 24 + #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_L2_PRIORITY_NONE (0xffUL << 24) + #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_L2_PRIORITY_LAST ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_L2_PRIORITY_NONE +}; + +/* hwrm_async_event_cmpl_port_conn_not_allowed (size:128b/16B) */ +struct hwrm_async_event_cmpl_port_conn_not_allowed { + __le16 type; + #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_LAST ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_ID_PORT_CONN_NOT_ALLOWED 0x4UL + #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_ID_LAST ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_ID_PORT_CONN_NOT_ALLOWED + __le32 event_data2; + u8 opaque_v; + #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_V 0x1UL + #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; + #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK 0xffffUL + #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_SFT 0 + #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_MASK 0xff0000UL + #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_SFT 16 + #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_NONE (0x0UL << 16) + #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_DISABLETX (0x1UL << 16) + #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_WARNINGMSG (0x2UL << 16) + #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_PWRDOWN (0x3UL << 16) + #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_LAST ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_PWRDOWN +}; + +/* hwrm_async_event_cmpl_link_speed_cfg_not_allowed (size:128b/16B) */ +struct hwrm_async_event_cmpl_link_speed_cfg_not_allowed { + __le16 type; + #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_TYPE_LAST ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_EVENT_ID_LINK_SPEED_CFG_NOT_ALLOWED 0x5UL + #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_EVENT_ID_LAST ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_EVENT_ID_LINK_SPEED_CFG_NOT_ALLOWED + __le32 event_data2; + u8 opaque_v; + #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_V 0x1UL + #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; + #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK 0xffffUL + #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_EVENT_DATA1_PORT_ID_SFT 0 +}; + +/* hwrm_async_event_cmpl_link_speed_cfg_change (size:128b/16B) */ +struct hwrm_async_event_cmpl_link_speed_cfg_change { + __le16 type; + #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_LAST ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_ID_LINK_SPEED_CFG_CHANGE 0x6UL + #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_ID_LAST ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_ID_LINK_SPEED_CFG_CHANGE + __le32 event_data2; + u8 opaque_v; + #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_V 0x1UL + #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; + #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffffUL + #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_PORT_ID_SFT 0 + #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_SUPPORTED_LINK_SPEEDS_CHANGE 0x10000UL + #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_ILLEGAL_LINK_SPEED_CFG 0x20000UL +}; + +/* hwrm_async_event_cmpl_port_phy_cfg_change (size:128b/16B) */ +struct hwrm_async_event_cmpl_port_phy_cfg_change { + __le16 type; + #define ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_TYPE_LAST ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_EVENT_ID_PORT_PHY_CFG_CHANGE 0x7UL + #define ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_EVENT_ID_LAST ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_EVENT_ID_PORT_PHY_CFG_CHANGE + __le32 event_data2; + u8 opaque_v; + #define ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_V 0x1UL + #define ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; + #define ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffffUL + #define ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_EVENT_DATA1_PORT_ID_SFT 0 + #define ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_EVENT_DATA1_FEC_CFG_CHANGE 0x10000UL + #define ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_EVENT_DATA1_EEE_CFG_CHANGE 0x20000UL + #define ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_EVENT_DATA1_PAUSE_CFG_CHANGE 0x40000UL +}; + +/* hwrm_async_event_cmpl_reset_notify (size:128b/16B) */ +struct hwrm_async_event_cmpl_reset_notify { + __le16 type; + #define ASYNC_EVENT_CMPL_RESET_NOTIFY_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_RESET_NOTIFY_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_RESET_NOTIFY_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_RESET_NOTIFY_TYPE_LAST ASYNC_EVENT_CMPL_RESET_NOTIFY_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_ID_RESET_NOTIFY 0x8UL + #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_ID_LAST ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_ID_RESET_NOTIFY + __le32 event_data2; + #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA2_FW_STATUS_CODE_MASK 0xffffUL + #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA2_FW_STATUS_CODE_SFT 0 + u8 opaque_v; + #define ASYNC_EVENT_CMPL_RESET_NOTIFY_V 0x1UL + #define ASYNC_EVENT_CMPL_RESET_NOTIFY_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_RESET_NOTIFY_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; + #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DRIVER_ACTION_MASK 0xffUL + #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DRIVER_ACTION_SFT 0 + #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DRIVER_ACTION_DRIVER_STOP_TX_QUEUE 0x1UL + #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DRIVER_ACTION_DRIVER_IFDOWN 0x2UL + #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DRIVER_ACTION_LAST ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DRIVER_ACTION_DRIVER_IFDOWN + #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_MASK 0xff00UL + #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_SFT 8 + #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_MANAGEMENT_RESET_REQUEST (0x1UL << 8) + #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_EXCEPTION_FATAL (0x2UL << 8) + #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_EXCEPTION_NON_FATAL (0x3UL << 8) + #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FAST_RESET (0x4UL << 8) + #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_ACTIVATION (0x5UL << 8) + #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_LAST ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_ACTIVATION + #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DELAY_IN_100MS_TICKS_MASK 0xffff0000UL + #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DELAY_IN_100MS_TICKS_SFT 16 +}; + +/* hwrm_async_event_cmpl_error_recovery (size:128b/16B) */ +struct hwrm_async_event_cmpl_error_recovery { + __le16 type; + #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_RECOVERY_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_ID_ERROR_RECOVERY 0x9UL + #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_ID_LAST ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_ID_ERROR_RECOVERY + __le32 event_data2; + u8 opaque_v; + #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_V 0x1UL + #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; + #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_DATA1_FLAGS_MASK 0xffUL + #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_DATA1_FLAGS_SFT 0 + #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_DATA1_FLAGS_MASTER_FUNC 0x1UL + #define ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_DATA1_FLAGS_RECOVERY_ENABLED 0x2UL +}; + +/* hwrm_async_event_cmpl_ring_monitor_msg (size:128b/16B) */ +struct hwrm_async_event_cmpl_ring_monitor_msg { + __le16 type; + #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_TYPE_LAST ASYNC_EVENT_CMPL_RING_MONITOR_MSG_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_ID_RING_MONITOR_MSG 0xaUL + #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_ID_LAST ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_ID_RING_MONITOR_MSG + __le32 event_data2; + #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_MASK 0xffUL + #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_TX 0x0UL + #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_RX 0x1UL + #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_CMPL 0x2UL + #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_LAST ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_CMPL + u8 opaque_v; + #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_V 0x1UL + #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; +}; + +/* hwrm_async_event_cmpl_func_drvr_unload (size:128b/16B) */ +struct hwrm_async_event_cmpl_func_drvr_unload { + __le16 type; + #define ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_TYPE_LAST ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_EVENT_ID_FUNC_DRVR_UNLOAD 0x10UL + #define ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_EVENT_ID_LAST ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_EVENT_ID_FUNC_DRVR_UNLOAD + __le32 event_data2; + u8 opaque_v; + #define ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_V 0x1UL + #define ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; + #define ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_MASK 0xffffUL + #define ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_SFT 0 +}; + +/* hwrm_async_event_cmpl_func_drvr_load (size:128b/16B) */ +struct hwrm_async_event_cmpl_func_drvr_load { + __le16 type; + #define ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_TYPE_LAST ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_EVENT_ID_FUNC_DRVR_LOAD 0x11UL + #define ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_EVENT_ID_LAST ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_EVENT_ID_FUNC_DRVR_LOAD + __le32 event_data2; + u8 opaque_v; + #define ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_V 0x1UL + #define ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; + #define ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_EVENT_DATA1_FUNC_ID_MASK 0xffffUL + #define ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_EVENT_DATA1_FUNC_ID_SFT 0 +}; + +/* hwrm_async_event_cmpl_func_flr_proc_cmplt (size:128b/16B) */ +struct hwrm_async_event_cmpl_func_flr_proc_cmplt { + __le16 type; + #define ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_TYPE_LAST ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_EVENT_ID_FUNC_FLR_PROC_CMPLT 0x12UL + #define ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_EVENT_ID_LAST ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_EVENT_ID_FUNC_FLR_PROC_CMPLT + __le32 event_data2; + u8 opaque_v; + #define ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_V 0x1UL + #define ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; + #define ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_EVENT_DATA1_FUNC_ID_MASK 0xffffUL + #define ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_EVENT_DATA1_FUNC_ID_SFT 0 +}; + +/* hwrm_async_event_cmpl_pf_drvr_unload (size:128b/16B) */ +struct hwrm_async_event_cmpl_pf_drvr_unload { + __le16 type; + #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_LAST ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_ID_PF_DRVR_UNLOAD 0x20UL + #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_ID_LAST ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_ID_PF_DRVR_UNLOAD + __le32 event_data2; + u8 opaque_v; + #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_V 0x1UL + #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; + #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_MASK 0xffffUL + #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_SFT 0 + #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_PORT_MASK 0x70000UL + #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_PORT_SFT 16 +}; + +/* hwrm_async_event_cmpl_pf_drvr_load (size:128b/16B) */ +struct hwrm_async_event_cmpl_pf_drvr_load { + __le16 type; + #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_TYPE_LAST ASYNC_EVENT_CMPL_PF_DRVR_LOAD_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_ID_PF_DRVR_LOAD 0x21UL + #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_ID_LAST ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_ID_PF_DRVR_LOAD + __le32 event_data2; + u8 opaque_v; + #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_V 0x1UL + #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; + #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_DATA1_FUNC_ID_MASK 0xffffUL + #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_DATA1_FUNC_ID_SFT 0 + #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_DATA1_PORT_MASK 0x70000UL + #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_DATA1_PORT_SFT 16 +}; + +/* hwrm_async_event_cmpl_vf_flr (size:128b/16B) */ +struct hwrm_async_event_cmpl_vf_flr { + __le16 type; + #define ASYNC_EVENT_CMPL_VF_FLR_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_VF_FLR_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_VF_FLR_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_VF_FLR_TYPE_LAST ASYNC_EVENT_CMPL_VF_FLR_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_CMPL_VF_FLR_EVENT_ID_VF_FLR 0x30UL + #define ASYNC_EVENT_CMPL_VF_FLR_EVENT_ID_LAST ASYNC_EVENT_CMPL_VF_FLR_EVENT_ID_VF_FLR + __le32 event_data2; + u8 opaque_v; + #define ASYNC_EVENT_CMPL_VF_FLR_V 0x1UL + #define ASYNC_EVENT_CMPL_VF_FLR_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_VF_FLR_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; + #define ASYNC_EVENT_CMPL_VF_FLR_EVENT_DATA1_VF_ID_MASK 0xffffUL + #define ASYNC_EVENT_CMPL_VF_FLR_EVENT_DATA1_VF_ID_SFT 0 + #define ASYNC_EVENT_CMPL_VF_FLR_EVENT_DATA1_PF_ID_MASK 0xff0000UL + #define ASYNC_EVENT_CMPL_VF_FLR_EVENT_DATA1_PF_ID_SFT 16 +}; + +/* hwrm_async_event_cmpl_vf_mac_addr_change (size:128b/16B) */ +struct hwrm_async_event_cmpl_vf_mac_addr_change { + __le16 type; + #define ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_TYPE_LAST ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_EVENT_ID_VF_MAC_ADDR_CHANGE 0x31UL + #define ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_EVENT_ID_LAST ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_EVENT_ID_VF_MAC_ADDR_CHANGE + __le32 event_data2; + u8 opaque_v; + #define ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_V 0x1UL + #define ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; + #define ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_EVENT_DATA1_VF_ID_MASK 0xffffUL + #define ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_EVENT_DATA1_VF_ID_SFT 0 +}; + +/* hwrm_async_event_cmpl_pf_vf_comm_status_change (size:128b/16B) */ +struct hwrm_async_event_cmpl_pf_vf_comm_status_change { + __le16 type; + #define ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_TYPE_LAST ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_EVENT_ID_PF_VF_COMM_STATUS_CHANGE 0x32UL + #define ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_EVENT_ID_LAST ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_EVENT_ID_PF_VF_COMM_STATUS_CHANGE + __le32 event_data2; + u8 opaque_v; + #define ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_V 0x1UL + #define ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; + #define ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_EVENT_DATA1_COMM_ESTABLISHED 0x1UL +}; + +/* hwrm_async_event_cmpl_vf_cfg_change (size:128b/16B) */ +struct hwrm_async_event_cmpl_vf_cfg_change { + __le16 type; + #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_LAST ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_ID_VF_CFG_CHANGE 0x33UL + #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_ID_LAST ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_ID_VF_CFG_CHANGE + __le32 event_data2; + #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA2_VF_ID_MASK 0xffffUL + #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA2_VF_ID_SFT 0 + u8 opaque_v; + #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_V 0x1UL + #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; + #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_MTU_CHANGE 0x1UL + #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_MRU_CHANGE 0x2UL + #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_DFLT_MAC_ADDR_CHANGE 0x4UL + #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_DFLT_VLAN_CHANGE 0x8UL + #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_TRUSTED_VF_CFG_CHANGE 0x10UL + #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_TF_OWNERSHIP_RELEASE 0x20UL +}; + +/* hwrm_async_event_cmpl_llfc_pfc_change (size:128b/16B) */ +struct hwrm_async_event_cmpl_llfc_pfc_change { + __le16 type; + #define ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_TYPE_LAST ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_TYPE_HWRM_ASYNC_EVENT + #define ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_UNUSED1_MASK 0xffc0UL + #define ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_UNUSED1_SFT 6 + __le16 event_id; + #define ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_EVENT_ID_LLFC_PFC_CHANGE 0x34UL + #define ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_EVENT_ID_LAST ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_EVENT_ID_LLFC_PFC_CHANGE + __le32 event_data2; + u8 opaque_v; + #define ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_V 0x1UL + #define ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; + #define ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_EVENT_DATA1_LLFC_PFC_MASK 0x3UL + #define ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_EVENT_DATA1_LLFC_PFC_SFT 0 + #define ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_EVENT_DATA1_LLFC_PFC_LLFC 0x1UL + #define ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_EVENT_DATA1_LLFC_PFC_PFC 0x2UL + #define ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_EVENT_DATA1_LLFC_PFC_LAST ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_EVENT_DATA1_LLFC_PFC_PFC + #define ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_EVENT_DATA1_PORT_MASK 0x1cUL + #define ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_EVENT_DATA1_PORT_SFT 2 + #define ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_EVENT_DATA1_PORT_ID_MASK 0x1fffe0UL + #define ASYNC_EVENT_CMPL_LLFC_PFC_CHANGE_EVENT_DATA1_PORT_ID_SFT 5 +}; + +/* hwrm_async_event_cmpl_default_vnic_change (size:128b/16B) */ +struct hwrm_async_event_cmpl_default_vnic_change { + __le16 type; + #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_TYPE_LAST ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_TYPE_HWRM_ASYNC_EVENT + #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_UNUSED1_MASK 0xffc0UL + #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_UNUSED1_SFT 6 + __le16 event_id; + #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_ID_ALLOC_FREE_NOTIFICATION 0x35UL + #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_ID_LAST ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_ID_ALLOC_FREE_NOTIFICATION + __le32 event_data2; + u8 opaque_v; + #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_V 0x1UL + #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; + #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_DEF_VNIC_STATE_MASK 0x3UL + #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_DEF_VNIC_STATE_SFT 0 + #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_DEF_VNIC_STATE_DEF_VNIC_ALLOC 0x1UL + #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_DEF_VNIC_STATE_DEF_VNIC_FREE 0x2UL + #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_DEF_VNIC_STATE_LAST ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_DEF_VNIC_STATE_DEF_VNIC_FREE + #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_PF_ID_MASK 0x3fcUL + #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_PF_ID_SFT 2 + #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_VF_ID_MASK 0x3fffc00UL + #define ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_VF_ID_SFT 10 +}; + +/* hwrm_async_event_cmpl_hw_flow_aged (size:128b/16B) */ +struct hwrm_async_event_cmpl_hw_flow_aged { + __le16 type; + #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_TYPE_LAST ASYNC_EVENT_CMPL_HW_FLOW_AGED_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_ID_HW_FLOW_AGED 0x36UL + #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_ID_LAST ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_ID_HW_FLOW_AGED + __le32 event_data2; + u8 opaque_v; + #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_V 0x1UL + #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; + #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_DATA1_FLOW_ID_MASK 0x7fffffffUL + #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_DATA1_FLOW_ID_SFT 0 + #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_DATA1_FLOW_DIRECTION 0x80000000UL + #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_DATA1_FLOW_DIRECTION_RX (0x0UL << 31) + #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_DATA1_FLOW_DIRECTION_TX (0x1UL << 31) + #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_DATA1_FLOW_DIRECTION_LAST ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_DATA1_FLOW_DIRECTION_TX +}; + +/* hwrm_async_event_cmpl_eem_cache_flush_req (size:128b/16B) */ +struct hwrm_async_event_cmpl_eem_cache_flush_req { + __le16 type; + #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_TYPE_LAST ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_EVENT_ID_EEM_CACHE_FLUSH_REQ 0x38UL + #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_EVENT_ID_LAST ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_EVENT_ID_EEM_CACHE_FLUSH_REQ + __le32 event_data2; + u8 opaque_v; + #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_V 0x1UL + #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; +}; + +/* hwrm_async_event_cmpl_eem_cache_flush_done (size:128b/16B) */ +struct hwrm_async_event_cmpl_eem_cache_flush_done { + __le16 type; + #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_TYPE_LAST ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_EVENT_ID_EEM_CACHE_FLUSH_DONE 0x39UL + #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_EVENT_ID_LAST ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_EVENT_ID_EEM_CACHE_FLUSH_DONE + __le32 event_data2; + u8 opaque_v; + #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_V 0x1UL + #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; + #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_EVENT_DATA1_FID_MASK 0xffffUL + #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_EVENT_DATA1_FID_SFT 0 +}; + +/* hwrm_async_event_cmpl_tcp_flag_action_change (size:128b/16B) */ +struct hwrm_async_event_cmpl_tcp_flag_action_change { + __le16 type; + #define ASYNC_EVENT_CMPL_TCP_FLAG_ACTION_CHANGE_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_TCP_FLAG_ACTION_CHANGE_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_TCP_FLAG_ACTION_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_TCP_FLAG_ACTION_CHANGE_TYPE_LAST ASYNC_EVENT_CMPL_TCP_FLAG_ACTION_CHANGE_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_CMPL_TCP_FLAG_ACTION_CHANGE_EVENT_ID_TCP_FLAG_ACTION_CHANGE 0x3aUL + #define ASYNC_EVENT_CMPL_TCP_FLAG_ACTION_CHANGE_EVENT_ID_LAST ASYNC_EVENT_CMPL_TCP_FLAG_ACTION_CHANGE_EVENT_ID_TCP_FLAG_ACTION_CHANGE + __le32 event_data2; + u8 opaque_v; + #define ASYNC_EVENT_CMPL_TCP_FLAG_ACTION_CHANGE_V 0x1UL + #define ASYNC_EVENT_CMPL_TCP_FLAG_ACTION_CHANGE_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_TCP_FLAG_ACTION_CHANGE_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; +}; + +/* hwrm_async_event_cmpl_eem_flow_active (size:128b/16B) */ +struct hwrm_async_event_cmpl_eem_flow_active { + __le16 type; + #define ASYNC_EVENT_CMPL_EEM_FLOW_ACTIVE_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_EEM_FLOW_ACTIVE_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_EEM_FLOW_ACTIVE_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_EEM_FLOW_ACTIVE_TYPE_LAST ASYNC_EVENT_CMPL_EEM_FLOW_ACTIVE_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_CMPL_EEM_FLOW_ACTIVE_EVENT_ID_EEM_FLOW_ACTIVE 0x3bUL + #define ASYNC_EVENT_CMPL_EEM_FLOW_ACTIVE_EVENT_ID_LAST ASYNC_EVENT_CMPL_EEM_FLOW_ACTIVE_EVENT_ID_EEM_FLOW_ACTIVE + __le32 event_data2; + #define ASYNC_EVENT_CMPL_EEM_FLOW_ACTIVE_EVENT_DATA2_GLOBAL_ID_2_MASK 0x3fffffffUL + #define ASYNC_EVENT_CMPL_EEM_FLOW_ACTIVE_EVENT_DATA2_GLOBAL_ID_2_SFT 0 + #define ASYNC_EVENT_CMPL_EEM_FLOW_ACTIVE_EVENT_DATA2_FLOW_DIRECTION 0x40000000UL + #define ASYNC_EVENT_CMPL_EEM_FLOW_ACTIVE_EVENT_DATA2_FLOW_DIRECTION_RX (0x0UL << 30) + #define ASYNC_EVENT_CMPL_EEM_FLOW_ACTIVE_EVENT_DATA2_FLOW_DIRECTION_TX (0x1UL << 30) + #define ASYNC_EVENT_CMPL_EEM_FLOW_ACTIVE_EVENT_DATA2_FLOW_DIRECTION_LAST ASYNC_EVENT_CMPL_EEM_FLOW_ACTIVE_EVENT_DATA2_FLOW_DIRECTION_TX + u8 opaque_v; + #define ASYNC_EVENT_CMPL_EEM_FLOW_ACTIVE_V 0x1UL + #define ASYNC_EVENT_CMPL_EEM_FLOW_ACTIVE_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_EEM_FLOW_ACTIVE_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; + #define ASYNC_EVENT_CMPL_EEM_FLOW_ACTIVE_EVENT_DATA1_GLOBAL_ID_1_MASK 0x3fffffffUL + #define ASYNC_EVENT_CMPL_EEM_FLOW_ACTIVE_EVENT_DATA1_GLOBAL_ID_1_SFT 0 + #define ASYNC_EVENT_CMPL_EEM_FLOW_ACTIVE_EVENT_DATA1_FLOW_DIRECTION 0x40000000UL + #define ASYNC_EVENT_CMPL_EEM_FLOW_ACTIVE_EVENT_DATA1_FLOW_DIRECTION_RX (0x0UL << 30) + #define ASYNC_EVENT_CMPL_EEM_FLOW_ACTIVE_EVENT_DATA1_FLOW_DIRECTION_TX (0x1UL << 30) + #define ASYNC_EVENT_CMPL_EEM_FLOW_ACTIVE_EVENT_DATA1_FLOW_DIRECTION_LAST ASYNC_EVENT_CMPL_EEM_FLOW_ACTIVE_EVENT_DATA1_FLOW_DIRECTION_TX + #define ASYNC_EVENT_CMPL_EEM_FLOW_ACTIVE_EVENT_DATA1_MODE 0x80000000UL + #define ASYNC_EVENT_CMPL_EEM_FLOW_ACTIVE_EVENT_DATA1_MODE_0 (0x0UL << 31) + #define ASYNC_EVENT_CMPL_EEM_FLOW_ACTIVE_EVENT_DATA1_MODE_1 (0x1UL << 31) + #define ASYNC_EVENT_CMPL_EEM_FLOW_ACTIVE_EVENT_DATA1_MODE_LAST ASYNC_EVENT_CMPL_EEM_FLOW_ACTIVE_EVENT_DATA1_MODE_1 +}; + +/* hwrm_async_event_cmpl_eem_cfg_change (size:128b/16B) */ +struct hwrm_async_event_cmpl_eem_cfg_change { + __le16 type; + #define ASYNC_EVENT_CMPL_EEM_CFG_CHANGE_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_EEM_CFG_CHANGE_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_EEM_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_EEM_CFG_CHANGE_TYPE_LAST ASYNC_EVENT_CMPL_EEM_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_CMPL_EEM_CFG_CHANGE_EVENT_ID_EEM_CFG_CHANGE 0x3cUL + #define ASYNC_EVENT_CMPL_EEM_CFG_CHANGE_EVENT_ID_LAST ASYNC_EVENT_CMPL_EEM_CFG_CHANGE_EVENT_ID_EEM_CFG_CHANGE + __le32 event_data2; + u8 opaque_v; + #define ASYNC_EVENT_CMPL_EEM_CFG_CHANGE_V 0x1UL + #define ASYNC_EVENT_CMPL_EEM_CFG_CHANGE_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_EEM_CFG_CHANGE_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; + #define ASYNC_EVENT_CMPL_EEM_CFG_CHANGE_EVENT_DATA1_EEM_TX_ENABLE 0x1UL + #define ASYNC_EVENT_CMPL_EEM_CFG_CHANGE_EVENT_DATA1_EEM_RX_ENABLE 0x2UL +}; + +/* hwrm_async_event_cmpl_quiesce_done (size:128b/16B) */ +struct hwrm_async_event_cmpl_quiesce_done { + __le16 type; + #define ASYNC_EVENT_CMPL_QUIESCE_DONE_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_QUIESCE_DONE_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_QUIESCE_DONE_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_QUIESCE_DONE_TYPE_LAST ASYNC_EVENT_CMPL_QUIESCE_DONE_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_CMPL_QUIESCE_DONE_EVENT_ID_QUIESCE_DONE 0x3fUL + #define ASYNC_EVENT_CMPL_QUIESCE_DONE_EVENT_ID_LAST ASYNC_EVENT_CMPL_QUIESCE_DONE_EVENT_ID_QUIESCE_DONE + __le32 event_data2; + #define ASYNC_EVENT_CMPL_QUIESCE_DONE_EVENT_DATA2_QUIESCE_STATUS_MASK 0xffUL + #define ASYNC_EVENT_CMPL_QUIESCE_DONE_EVENT_DATA2_QUIESCE_STATUS_SFT 0 + #define ASYNC_EVENT_CMPL_QUIESCE_DONE_EVENT_DATA2_QUIESCE_STATUS_SUCCESS 0x0UL + #define ASYNC_EVENT_CMPL_QUIESCE_DONE_EVENT_DATA2_QUIESCE_STATUS_TIMEOUT 0x1UL + #define ASYNC_EVENT_CMPL_QUIESCE_DONE_EVENT_DATA2_QUIESCE_STATUS_ERROR 0x2UL + #define ASYNC_EVENT_CMPL_QUIESCE_DONE_EVENT_DATA2_QUIESCE_STATUS_LAST ASYNC_EVENT_CMPL_QUIESCE_DONE_EVENT_DATA2_QUIESCE_STATUS_ERROR + #define ASYNC_EVENT_CMPL_QUIESCE_DONE_EVENT_DATA2_OPAQUE_MASK 0xff00UL + #define ASYNC_EVENT_CMPL_QUIESCE_DONE_EVENT_DATA2_OPAQUE_SFT 8 + #define ASYNC_EVENT_CMPL_QUIESCE_DONE_EVENT_DATA2_IDLE_STATE_FLAGS_MASK 0xff0000UL + #define ASYNC_EVENT_CMPL_QUIESCE_DONE_EVENT_DATA2_IDLE_STATE_FLAGS_SFT 16 + #define ASYNC_EVENT_CMPL_QUIESCE_DONE_EVENT_DATA2_IDLE_STATE_FLAGS_INCOMPLETE_NQ 0x10000UL + #define ASYNC_EVENT_CMPL_QUIESCE_DONE_EVENT_DATA2_IDLE_STATE_FLAGS_IDLE_STATUS_1 0x20000UL + #define ASYNC_EVENT_CMPL_QUIESCE_DONE_EVENT_DATA2_IDLE_STATE_FLAGS_IDLE_STATUS_2 0x40000UL + #define ASYNC_EVENT_CMPL_QUIESCE_DONE_EVENT_DATA2_IDLE_STATE_FLAGS_IDLE_STATUS_3 0x80000UL + u8 opaque_v; + #define ASYNC_EVENT_CMPL_QUIESCE_DONE_V 0x1UL + #define ASYNC_EVENT_CMPL_QUIESCE_DONE_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_QUIESCE_DONE_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; + #define ASYNC_EVENT_CMPL_QUIESCE_DONE_EVENT_DATA1_TIMESTAMP 0x1UL +}; + +/* hwrm_async_event_cmpl_deferred_response (size:128b/16B) */ +struct hwrm_async_event_cmpl_deferred_response { + __le16 type; + #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_TYPE_LAST ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_EVENT_ID_DEFERRED_RESPONSE 0x40UL + #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_EVENT_ID_LAST ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_EVENT_ID_DEFERRED_RESPONSE + __le32 event_data2; + #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_EVENT_DATA2_SEQ_ID_MASK 0xffffUL + #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_EVENT_DATA2_SEQ_ID_SFT 0 + u8 opaque_v; + #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_V 0x1UL + #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; +}; + +/* hwrm_async_event_cmpl_pfc_watchdog_cfg_change (size:128b/16B) */ +struct hwrm_async_event_cmpl_pfc_watchdog_cfg_change { + __le16 type; + #define ASYNC_EVENT_CMPL_PFC_WATCHDOG_CFG_CHANGE_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_PFC_WATCHDOG_CFG_CHANGE_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_PFC_WATCHDOG_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_PFC_WATCHDOG_CFG_CHANGE_TYPE_LAST ASYNC_EVENT_CMPL_PFC_WATCHDOG_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_CMPL_PFC_WATCHDOG_CFG_CHANGE_EVENT_ID_PFC_WATCHDOG_CFG_CHANGE 0x41UL + #define ASYNC_EVENT_CMPL_PFC_WATCHDOG_CFG_CHANGE_EVENT_ID_LAST ASYNC_EVENT_CMPL_PFC_WATCHDOG_CFG_CHANGE_EVENT_ID_PFC_WATCHDOG_CFG_CHANGE + __le32 event_data2; + u8 opaque_v; + #define ASYNC_EVENT_CMPL_PFC_WATCHDOG_CFG_CHANGE_V 0x1UL + #define ASYNC_EVENT_CMPL_PFC_WATCHDOG_CFG_CHANGE_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_PFC_WATCHDOG_CFG_CHANGE_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; + #define ASYNC_EVENT_CMPL_PFC_WATCHDOG_CFG_CHANGE_EVENT_DATA1_PFC_WD_COS_MASK 0xffUL + #define ASYNC_EVENT_CMPL_PFC_WATCHDOG_CFG_CHANGE_EVENT_DATA1_PFC_WD_COS_SFT 0 + #define ASYNC_EVENT_CMPL_PFC_WATCHDOG_CFG_CHANGE_EVENT_DATA1_PFC_WD_COS_PFC_WD_COS0 0x1UL + #define ASYNC_EVENT_CMPL_PFC_WATCHDOG_CFG_CHANGE_EVENT_DATA1_PFC_WD_COS_PFC_WD_COS1 0x2UL + #define ASYNC_EVENT_CMPL_PFC_WATCHDOG_CFG_CHANGE_EVENT_DATA1_PFC_WD_COS_PFC_WD_COS2 0x4UL + #define ASYNC_EVENT_CMPL_PFC_WATCHDOG_CFG_CHANGE_EVENT_DATA1_PFC_WD_COS_PFC_WD_COS3 0x8UL + #define ASYNC_EVENT_CMPL_PFC_WATCHDOG_CFG_CHANGE_EVENT_DATA1_PFC_WD_COS_PFC_WD_COS4 0x10UL + #define ASYNC_EVENT_CMPL_PFC_WATCHDOG_CFG_CHANGE_EVENT_DATA1_PFC_WD_COS_PFC_WD_COS5 0x20UL + #define ASYNC_EVENT_CMPL_PFC_WATCHDOG_CFG_CHANGE_EVENT_DATA1_PFC_WD_COS_PFC_WD_COS6 0x40UL + #define ASYNC_EVENT_CMPL_PFC_WATCHDOG_CFG_CHANGE_EVENT_DATA1_PFC_WD_COS_PFC_WD_COS7 0x80UL + #define ASYNC_EVENT_CMPL_PFC_WATCHDOG_CFG_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffff00UL + #define ASYNC_EVENT_CMPL_PFC_WATCHDOG_CFG_CHANGE_EVENT_DATA1_PORT_ID_SFT 8 +}; + +/* hwrm_async_event_cmpl_echo_request (size:128b/16B) */ +struct hwrm_async_event_cmpl_echo_request { + __le16 type; + #define ASYNC_EVENT_CMPL_ECHO_REQUEST_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_ECHO_REQUEST_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_ECHO_REQUEST_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_ECHO_REQUEST_TYPE_LAST ASYNC_EVENT_CMPL_ECHO_REQUEST_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_CMPL_ECHO_REQUEST_EVENT_ID_ECHO_REQUEST 0x42UL + #define ASYNC_EVENT_CMPL_ECHO_REQUEST_EVENT_ID_LAST ASYNC_EVENT_CMPL_ECHO_REQUEST_EVENT_ID_ECHO_REQUEST + __le32 event_data2; + u8 opaque_v; + #define ASYNC_EVENT_CMPL_ECHO_REQUEST_V 0x1UL + #define ASYNC_EVENT_CMPL_ECHO_REQUEST_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_ECHO_REQUEST_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; +}; + +/* hwrm_async_event_cmpl_phc_update (size:128b/16B) */ +struct hwrm_async_event_cmpl_phc_update { + __le16 type; + #define ASYNC_EVENT_CMPL_PHC_UPDATE_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_PHC_UPDATE_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_PHC_UPDATE_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_PHC_UPDATE_TYPE_LAST ASYNC_EVENT_CMPL_PHC_UPDATE_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_ID_PHC_UPDATE 0x43UL + #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_ID_LAST ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_ID_PHC_UPDATE + __le32 event_data2; + #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA2_PHC_MASTER_FID_MASK 0xffffUL + #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA2_PHC_MASTER_FID_SFT 0 + #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA2_PHC_SEC_FID_MASK 0xffff0000UL + #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA2_PHC_SEC_FID_SFT 16 + u8 opaque_v; + #define ASYNC_EVENT_CMPL_PHC_UPDATE_V 0x1UL + #define ASYNC_EVENT_CMPL_PHC_UPDATE_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_PHC_UPDATE_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; + #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_MASK 0xfUL + #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_SFT 0 + #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_PHC_MASTER 0x1UL + #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_PHC_SECONDARY 0x2UL + #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_PHC_FAILOVER 0x3UL + #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_PHC_RTC_UPDATE 0x4UL + #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_LAST ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_PHC_RTC_UPDATE + #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_PHC_TIME_MSB_MASK 0xffff0UL + #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_PHC_TIME_MSB_SFT 4 +}; + +/* hwrm_async_event_cmpl_pps_timestamp (size:128b/16B) */ +struct hwrm_async_event_cmpl_pps_timestamp { + __le16 type; + #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_TYPE_LAST ASYNC_EVENT_CMPL_PPS_TIMESTAMP_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_ID_PPS_TIMESTAMP 0x44UL + #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_ID_LAST ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_ID_PPS_TIMESTAMP + __le32 event_data2; + #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_EVENT_TYPE 0x1UL + #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_EVENT_TYPE_INTERNAL 0x0UL + #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_EVENT_TYPE_EXTERNAL 0x1UL + #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_EVENT_TYPE_LAST ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_EVENT_TYPE_EXTERNAL + #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_PIN_NUMBER_MASK 0xeUL + #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_PIN_NUMBER_SFT 1 + #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_PPS_TIMESTAMP_UPPER_MASK 0xffff0UL + #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_PPS_TIMESTAMP_UPPER_SFT 4 + u8 opaque_v; + #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_V 0x1UL + #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; + #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA1_PPS_TIMESTAMP_LOWER_MASK 0xffffffffUL + #define ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA1_PPS_TIMESTAMP_LOWER_SFT 0 +}; + +/* hwrm_async_event_cmpl_error_report (size:128b/16B) */ +struct hwrm_async_event_cmpl_error_report { + __le16 type; + #define ASYNC_EVENT_CMPL_ERROR_REPORT_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_ERROR_REPORT_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_CMPL_ERROR_REPORT_EVENT_ID_ERROR_REPORT 0x45UL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_EVENT_ID_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_EVENT_ID_ERROR_REPORT + __le32 event_data2; + u8 opaque_v; + #define ASYNC_EVENT_CMPL_ERROR_REPORT_V 0x1UL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; + #define ASYNC_EVENT_CMPL_ERROR_REPORT_EVENT_DATA1_ERROR_TYPE_MASK 0xffUL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_EVENT_DATA1_ERROR_TYPE_SFT 0 +}; + +/* hwrm_async_event_cmpl_doorbell_pacing_threshold (size:128b/16B) */ +struct hwrm_async_event_cmpl_doorbell_pacing_threshold { + __le16 type; + #define ASYNC_EVENT_CMPL_DOORBELL_PACING_THRESHOLD_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_DOORBELL_PACING_THRESHOLD_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_DOORBELL_PACING_THRESHOLD_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_DOORBELL_PACING_THRESHOLD_TYPE_LAST ASYNC_EVENT_CMPL_DOORBELL_PACING_THRESHOLD_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_CMPL_DOORBELL_PACING_THRESHOLD_EVENT_ID_DOORBELL_PACING_THRESHOLD 0x46UL + #define ASYNC_EVENT_CMPL_DOORBELL_PACING_THRESHOLD_EVENT_ID_LAST ASYNC_EVENT_CMPL_DOORBELL_PACING_THRESHOLD_EVENT_ID_DOORBELL_PACING_THRESHOLD + __le32 event_data2; + u8 opaque_v; + #define ASYNC_EVENT_CMPL_DOORBELL_PACING_THRESHOLD_V 0x1UL + #define ASYNC_EVENT_CMPL_DOORBELL_PACING_THRESHOLD_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_DOORBELL_PACING_THRESHOLD_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; +}; + +/* hwrm_async_event_cmpl_rss_change (size:128b/16B) */ +struct hwrm_async_event_cmpl_rss_change { + __le16 type; + #define ASYNC_EVENT_CMPL_RSS_CHANGE_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_RSS_CHANGE_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_RSS_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_RSS_CHANGE_TYPE_LAST ASYNC_EVENT_CMPL_RSS_CHANGE_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_CMPL_RSS_CHANGE_EVENT_ID_RSS_CHANGE 0x47UL + #define ASYNC_EVENT_CMPL_RSS_CHANGE_EVENT_ID_LAST ASYNC_EVENT_CMPL_RSS_CHANGE_EVENT_ID_RSS_CHANGE + __le32 event_data2; + u8 opaque_v; + #define ASYNC_EVENT_CMPL_RSS_CHANGE_V 0x1UL + #define ASYNC_EVENT_CMPL_RSS_CHANGE_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_RSS_CHANGE_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; +}; + +/* hwrm_async_event_cmpl_doorbell_pacing_nq_update (size:128b/16B) */ +struct hwrm_async_event_cmpl_doorbell_pacing_nq_update { + __le16 type; + #define ASYNC_EVENT_CMPL_DOORBELL_PACING_NQ_UPDATE_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_DOORBELL_PACING_NQ_UPDATE_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_DOORBELL_PACING_NQ_UPDATE_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_DOORBELL_PACING_NQ_UPDATE_TYPE_LAST ASYNC_EVENT_CMPL_DOORBELL_PACING_NQ_UPDATE_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_CMPL_DOORBELL_PACING_NQ_UPDATE_EVENT_ID_DOORBELL_PACING_NQ_UPDATE 0x48UL + #define ASYNC_EVENT_CMPL_DOORBELL_PACING_NQ_UPDATE_EVENT_ID_LAST ASYNC_EVENT_CMPL_DOORBELL_PACING_NQ_UPDATE_EVENT_ID_DOORBELL_PACING_NQ_UPDATE + __le32 event_data2; + u8 opaque_v; + #define ASYNC_EVENT_CMPL_DOORBELL_PACING_NQ_UPDATE_V 0x1UL + #define ASYNC_EVENT_CMPL_DOORBELL_PACING_NQ_UPDATE_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_DOORBELL_PACING_NQ_UPDATE_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; +}; + +/* hwrm_async_event_cmpl_hw_doorbell_recovery_read_error (size:128b/16B) */ +struct hwrm_async_event_cmpl_hw_doorbell_recovery_read_error { + __le16 type; + #define ASYNC_EVENT_CMPL_HW_DOORBELL_RECOVERY_READ_ERROR_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_HW_DOORBELL_RECOVERY_READ_ERROR_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_HW_DOORBELL_RECOVERY_READ_ERROR_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_HW_DOORBELL_RECOVERY_READ_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_HW_DOORBELL_RECOVERY_READ_ERROR_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_CMPL_HW_DOORBELL_RECOVERY_READ_ERROR_EVENT_ID_HW_DOORBELL_RECOVERY_READ_ERROR 0x49UL + #define ASYNC_EVENT_CMPL_HW_DOORBELL_RECOVERY_READ_ERROR_EVENT_ID_LAST ASYNC_EVENT_CMPL_HW_DOORBELL_RECOVERY_READ_ERROR_EVENT_ID_HW_DOORBELL_RECOVERY_READ_ERROR + __le32 event_data2; + u8 opaque_v; + #define ASYNC_EVENT_CMPL_HW_DOORBELL_RECOVERY_READ_ERROR_V 0x1UL + #define ASYNC_EVENT_CMPL_HW_DOORBELL_RECOVERY_READ_ERROR_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_HW_DOORBELL_RECOVERY_READ_ERROR_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; + #define ASYNC_EVENT_CMPL_HW_DOORBELL_RECOVERY_READ_ERROR_EVENT_DATA1_READ_ERROR_FLAGS_MASK 0xfUL + #define ASYNC_EVENT_CMPL_HW_DOORBELL_RECOVERY_READ_ERROR_EVENT_DATA1_READ_ERROR_FLAGS_SFT 0 + #define ASYNC_EVENT_CMPL_HW_DOORBELL_RECOVERY_READ_ERROR_EVENT_DATA1_READ_ERROR_FLAGS_SQ_ERR 0x1UL + #define ASYNC_EVENT_CMPL_HW_DOORBELL_RECOVERY_READ_ERROR_EVENT_DATA1_READ_ERROR_FLAGS_RQ_ERR 0x2UL + #define ASYNC_EVENT_CMPL_HW_DOORBELL_RECOVERY_READ_ERROR_EVENT_DATA1_READ_ERROR_FLAGS_SRQ_ERR 0x4UL + #define ASYNC_EVENT_CMPL_HW_DOORBELL_RECOVERY_READ_ERROR_EVENT_DATA1_READ_ERROR_FLAGS_CQ_ERR 0x8UL +}; + +/* hwrm_async_event_cmpl_ctx_error (size:128b/16B) */ +struct hwrm_async_event_cmpl_ctx_error { + __le16 type; + #define ASYNC_EVENT_CMPL_CTX_ERROR_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_CTX_ERROR_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_CTX_ERROR_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_CTX_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_CTX_ERROR_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_CMPL_CTX_ERROR_EVENT_ID_CTX_ERROR 0x4aUL + #define ASYNC_EVENT_CMPL_CTX_ERROR_EVENT_ID_LAST ASYNC_EVENT_CMPL_CTX_ERROR_EVENT_ID_CTX_ERROR + __le32 event_data2; + #define ASYNC_EVENT_CMPL_CTX_ERROR_EVENT_DATA2_CTX_OP_CODE 0x1UL + #define ASYNC_EVENT_CMPL_CTX_ERROR_EVENT_DATA2_CTX_OP_CODE_ALLOC 0x0UL + #define ASYNC_EVENT_CMPL_CTX_ERROR_EVENT_DATA2_CTX_OP_CODE_FREE 0x1UL + #define ASYNC_EVENT_CMPL_CTX_ERROR_EVENT_DATA2_CTX_OP_CODE_LAST ASYNC_EVENT_CMPL_CTX_ERROR_EVENT_DATA2_CTX_OP_CODE_FREE + #define ASYNC_EVENT_CMPL_CTX_ERROR_EVENT_DATA2_NUM_CTXS_MASK 0xfffeUL + #define ASYNC_EVENT_CMPL_CTX_ERROR_EVENT_DATA2_NUM_CTXS_SFT 1 + #define ASYNC_EVENT_CMPL_CTX_ERROR_EVENT_DATA2_FID_MASK 0xffff0000UL + #define ASYNC_EVENT_CMPL_CTX_ERROR_EVENT_DATA2_FID_SFT 16 + u8 opaque_v; + #define ASYNC_EVENT_CMPL_CTX_ERROR_V 0x1UL + #define ASYNC_EVENT_CMPL_CTX_ERROR_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_CTX_ERROR_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; + #define ASYNC_EVENT_CMPL_CTX_ERROR_EVENT_DATA1_START_XID_MASK 0xffffffffUL + #define ASYNC_EVENT_CMPL_CTX_ERROR_EVENT_DATA1_START_XID_SFT 0 +}; + +/* hwrm_async_event_udcc_session_change (size:128b/16B) */ +struct hwrm_async_event_udcc_session_change { + __le16 type; + #define ASYNC_EVENT_UDCC_SESSION_CHANGE_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_UDCC_SESSION_CHANGE_TYPE_SFT 0 + #define ASYNC_EVENT_UDCC_SESSION_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_UDCC_SESSION_CHANGE_TYPE_LAST ASYNC_EVENT_UDCC_SESSION_CHANGE_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_UDCC_SESSION_CHANGE_EVENT_ID_UDCC_SESSION_CHANGE 0x4bUL + #define ASYNC_EVENT_UDCC_SESSION_CHANGE_EVENT_ID_LAST ASYNC_EVENT_UDCC_SESSION_CHANGE_EVENT_ID_UDCC_SESSION_CHANGE + __le32 event_data2; + #define ASYNC_EVENT_UDCC_SESSION_CHANGE_EVENT_DATA2_SESSION_ID_OP_CODE_MASK 0xffUL + #define ASYNC_EVENT_UDCC_SESSION_CHANGE_EVENT_DATA2_SESSION_ID_OP_CODE_SFT 0 + #define ASYNC_EVENT_UDCC_SESSION_CHANGE_EVENT_DATA2_SESSION_ID_OP_CODE_CREATED 0x0UL + #define ASYNC_EVENT_UDCC_SESSION_CHANGE_EVENT_DATA2_SESSION_ID_OP_CODE_FREED 0x1UL + #define ASYNC_EVENT_UDCC_SESSION_CHANGE_EVENT_DATA2_SESSION_ID_OP_CODE_LAST ASYNC_EVENT_UDCC_SESSION_CHANGE_EVENT_DATA2_SESSION_ID_OP_CODE_FREED + u8 opaque_v; + #define ASYNC_EVENT_UDCC_SESSION_CHANGE_V 0x1UL + #define ASYNC_EVENT_UDCC_SESSION_CHANGE_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_UDCC_SESSION_CHANGE_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; + #define ASYNC_EVENT_UDCC_SESSION_CHANGE_EVENT_DATA1_UDCC_SESSION_ID_MASK 0xffffUL + #define ASYNC_EVENT_UDCC_SESSION_CHANGE_EVENT_DATA1_UDCC_SESSION_ID_SFT 0 +}; + +/* hwrm_async_event_cmpl_dbg_buf_producer (size:128b/16B) */ +struct hwrm_async_event_cmpl_dbg_buf_producer { + __le16 type; + #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_TYPE_LAST ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_ID_DBG_BUF_PRODUCER 0x4cUL + #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_ID_LAST ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_ID_DBG_BUF_PRODUCER + __le32 event_data2; + #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA2_CURRENT_BUFFER_OFFSET_MASK 0xffffffffUL + #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA2_CURRENT_BUFFER_OFFSET_SFT 0 + u8 opaque_v; + #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_V 0x1UL + #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; + #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_MASK 0xffffUL + #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_SRT_TRACE 0x0UL + #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_SRT2_TRACE 0x1UL + #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_CRT_TRACE 0x2UL + #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_CRT2_TRACE 0x3UL + #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_RIGP0_TRACE 0x4UL + #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_L2_HWRM_TRACE 0x5UL + #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_ROCE_HWRM_TRACE 0x6UL + #define ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_LAST ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_ROCE_HWRM_TRACE +}; + +/* hwrm_async_event_cmpl_peer_mmap_change (size:128b/16B) */ +struct hwrm_async_event_cmpl_peer_mmap_change { + __le16 type; + #define ASYNC_EVENT_CMPL_PEER_MMAP_CHANGE_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_PEER_MMAP_CHANGE_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_PEER_MMAP_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_PEER_MMAP_CHANGE_TYPE_LAST ASYNC_EVENT_CMPL_PEER_MMAP_CHANGE_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_CMPL_PEER_MMAP_CHANGE_EVENT_ID_PEER_MMAP_CHANGE 0x4dUL + #define ASYNC_EVENT_CMPL_PEER_MMAP_CHANGE_EVENT_ID_LAST ASYNC_EVENT_CMPL_PEER_MMAP_CHANGE_EVENT_ID_PEER_MMAP_CHANGE + __le32 event_data2; + u8 opaque_v; + #define ASYNC_EVENT_CMPL_PEER_MMAP_CHANGE_V 0x1UL + #define ASYNC_EVENT_CMPL_PEER_MMAP_CHANGE_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_PEER_MMAP_CHANGE_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; +}; + +/* hwrm_async_event_cmpl_fw_trace_msg (size:128b/16B) */ +struct hwrm_async_event_cmpl_fw_trace_msg { + __le16 type; + #define ASYNC_EVENT_CMPL_FW_TRACE_MSG_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_FW_TRACE_MSG_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_FW_TRACE_MSG_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_FW_TRACE_MSG_TYPE_LAST ASYNC_EVENT_CMPL_FW_TRACE_MSG_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_CMPL_FW_TRACE_MSG_EVENT_ID_FW_TRACE_MSG 0xfeUL + #define ASYNC_EVENT_CMPL_FW_TRACE_MSG_EVENT_ID_LAST ASYNC_EVENT_CMPL_FW_TRACE_MSG_EVENT_ID_FW_TRACE_MSG + __le32 event_data2; + #define ASYNC_EVENT_CMPL_FW_TRACE_MSG_EVENT_DATA2_BYTE0_MASK 0xffUL + #define ASYNC_EVENT_CMPL_FW_TRACE_MSG_EVENT_DATA2_BYTE0_SFT 0 + #define ASYNC_EVENT_CMPL_FW_TRACE_MSG_EVENT_DATA2_BYTE1_MASK 0xff00UL + #define ASYNC_EVENT_CMPL_FW_TRACE_MSG_EVENT_DATA2_BYTE1_SFT 8 + #define ASYNC_EVENT_CMPL_FW_TRACE_MSG_EVENT_DATA2_BYTE2_MASK 0xff0000UL + #define ASYNC_EVENT_CMPL_FW_TRACE_MSG_EVENT_DATA2_BYTE2_SFT 16 + #define ASYNC_EVENT_CMPL_FW_TRACE_MSG_EVENT_DATA2_BYTE3_MASK 0xff000000UL + #define ASYNC_EVENT_CMPL_FW_TRACE_MSG_EVENT_DATA2_BYTE3_SFT 24 + u8 opaque_v; + #define ASYNC_EVENT_CMPL_FW_TRACE_MSG_V 0x1UL + #define ASYNC_EVENT_CMPL_FW_TRACE_MSG_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_FW_TRACE_MSG_OPAQUE_SFT 1 + u8 timestamp_lo; + #define ASYNC_EVENT_CMPL_FW_TRACE_MSG_TIMESTAMP_LO_STRING 0x1UL + #define ASYNC_EVENT_CMPL_FW_TRACE_MSG_TIMESTAMP_LO_STRING_COMPLETE 0x0UL + #define ASYNC_EVENT_CMPL_FW_TRACE_MSG_TIMESTAMP_LO_STRING_PARTIAL 0x1UL + #define ASYNC_EVENT_CMPL_FW_TRACE_MSG_TIMESTAMP_LO_STRING_LAST ASYNC_EVENT_CMPL_FW_TRACE_MSG_TIMESTAMP_LO_STRING_PARTIAL + #define ASYNC_EVENT_CMPL_FW_TRACE_MSG_TIMESTAMP_LO_FIRMWARE 0x2UL + #define ASYNC_EVENT_CMPL_FW_TRACE_MSG_TIMESTAMP_LO_FIRMWARE_PRIMARY (0x0UL << 1) + #define ASYNC_EVENT_CMPL_FW_TRACE_MSG_TIMESTAMP_LO_FIRMWARE_SECONDARY (0x1UL << 1) + #define ASYNC_EVENT_CMPL_FW_TRACE_MSG_TIMESTAMP_LO_FIRMWARE_LAST ASYNC_EVENT_CMPL_FW_TRACE_MSG_TIMESTAMP_LO_FIRMWARE_SECONDARY + __le16 timestamp_hi; + #define ASYNC_EVENT_CMPL_FW_TRACE_MSG_TIMESTAMP_HI_BYTE4_MASK 0xffUL + #define ASYNC_EVENT_CMPL_FW_TRACE_MSG_TIMESTAMP_HI_BYTE4_SFT 0 + #define ASYNC_EVENT_CMPL_FW_TRACE_MSG_TIMESTAMP_HI_BYTE5_MASK 0xff00UL + #define ASYNC_EVENT_CMPL_FW_TRACE_MSG_TIMESTAMP_HI_BYTE5_SFT 8 + __le32 event_data1; + #define ASYNC_EVENT_CMPL_FW_TRACE_MSG_EVENT_DATA1_BYTE6_MASK 0xffUL + #define ASYNC_EVENT_CMPL_FW_TRACE_MSG_EVENT_DATA1_BYTE6_SFT 0 + #define ASYNC_EVENT_CMPL_FW_TRACE_MSG_EVENT_DATA1_BYTE7_MASK 0xff00UL + #define ASYNC_EVENT_CMPL_FW_TRACE_MSG_EVENT_DATA1_BYTE7_SFT 8 + #define ASYNC_EVENT_CMPL_FW_TRACE_MSG_EVENT_DATA1_BYTE8_MASK 0xff0000UL + #define ASYNC_EVENT_CMPL_FW_TRACE_MSG_EVENT_DATA1_BYTE8_SFT 16 + #define ASYNC_EVENT_CMPL_FW_TRACE_MSG_EVENT_DATA1_BYTE9_MASK 0xff000000UL + #define ASYNC_EVENT_CMPL_FW_TRACE_MSG_EVENT_DATA1_BYTE9_SFT 24 +}; + +/* hwrm_async_event_cmpl_hwrm_error (size:128b/16B) */ +struct hwrm_async_event_cmpl_hwrm_error { + __le16 type; + #define ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_ID_HWRM_ERROR 0xffUL + #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_ID_LAST ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_ID_HWRM_ERROR + __le32 event_data2; + #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_MASK 0xffUL + #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_SFT 0 + #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_WARNING 0x0UL + #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_NONFATAL 0x1UL + #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_FATAL 0x2UL + #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_LAST ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_FATAL + u8 opaque_v; + #define ASYNC_EVENT_CMPL_HWRM_ERROR_V 0x1UL + #define ASYNC_EVENT_CMPL_HWRM_ERROR_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_HWRM_ERROR_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; + #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA1_TIMESTAMP 0x1UL +}; + +/* hwrm_async_event_cmpl_error_report_base (size:128b/16B) */ +struct hwrm_async_event_cmpl_error_report_base { + __le16 type; + #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_ID_ERROR_REPORT 0x45UL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_ID_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_ID_ERROR_REPORT + __le32 event_data2; + u8 opaque_v; + #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_V 0x1UL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; + #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_MASK 0xffUL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_RESERVED 0x0UL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_PAUSE_STORM 0x1UL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_INVALID_SIGNAL 0x2UL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_NVM 0x3UL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD 0x4UL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_THERMAL_THRESHOLD 0x5UL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DUAL_DATA_RATE_NOT_SUPPORTED 0x6UL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DUAL_DATA_RATE_NOT_SUPPORTED +}; + +/* hwrm_async_event_cmpl_error_report_pause_storm (size:128b/16B) */ +struct hwrm_async_event_cmpl_error_report_pause_storm { + __le16 type; + #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_EVENT_ID_ERROR_REPORT 0x45UL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_EVENT_ID_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_EVENT_ID_ERROR_REPORT + __le32 event_data2; + u8 opaque_v; + #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_V 0x1UL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; + #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_EVENT_DATA1_ERROR_TYPE_MASK 0xffUL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_EVENT_DATA1_ERROR_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_EVENT_DATA1_ERROR_TYPE_PAUSE_STORM 0x1UL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_EVENT_DATA1_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_PAUSE_STORM_EVENT_DATA1_ERROR_TYPE_PAUSE_STORM +}; + +/* hwrm_async_event_cmpl_error_report_invalid_signal (size:128b/16B) */ +struct hwrm_async_event_cmpl_error_report_invalid_signal { + __le16 type; + #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_ID_ERROR_REPORT 0x45UL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_ID_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_ID_ERROR_REPORT + __le32 event_data2; + #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_DATA2_PIN_ID_MASK 0xffUL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_DATA2_PIN_ID_SFT 0 + u8 opaque_v; + #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_V 0x1UL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; + #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_DATA1_ERROR_TYPE_MASK 0xffUL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_DATA1_ERROR_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_DATA1_ERROR_TYPE_INVALID_SIGNAL 0x2UL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_DATA1_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_DATA1_ERROR_TYPE_INVALID_SIGNAL +}; + +/* hwrm_async_event_cmpl_error_report_nvm (size:128b/16B) */ +struct hwrm_async_event_cmpl_error_report_nvm { + __le16 type; + #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_ID_ERROR_REPORT 0x45UL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_ID_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_ID_ERROR_REPORT + __le32 event_data2; + #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA2_ERR_ADDR_MASK 0xffffffffUL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA2_ERR_ADDR_SFT 0 + u8 opaque_v; + #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_V 0x1UL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; + #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_ERROR_TYPE_MASK 0xffUL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_ERROR_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_ERROR_TYPE_NVM_ERROR 0x3UL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_ERROR_TYPE_NVM_ERROR + #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_NVM_ERR_TYPE_MASK 0xff00UL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_NVM_ERR_TYPE_SFT 8 + #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_NVM_ERR_TYPE_WRITE (0x1UL << 8) + #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_NVM_ERR_TYPE_ERASE (0x2UL << 8) + #define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_NVM_ERR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_NVM_ERR_TYPE_ERASE +}; + +/* hwrm_async_event_cmpl_error_report_doorbell_drop_threshold (size:128b/16B) */ +struct hwrm_async_event_cmpl_error_report_doorbell_drop_threshold { + __le16 type; + #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_ID_ERROR_REPORT 0x45UL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_ID_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_ID_ERROR_REPORT + __le32 event_data2; + u8 opaque_v; + #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_V 0x1UL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; + #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_DATA1_ERROR_TYPE_MASK 0xffUL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_DATA1_ERROR_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD 0x4UL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_DATA1_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD + #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_DATA1_EPOCH_MASK 0xffffff00UL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_DATA1_EPOCH_SFT 8 +}; + +/* hwrm_async_event_cmpl_error_report_thermal (size:128b/16B) */ +struct hwrm_async_event_cmpl_error_report_thermal { + __le16 type; + #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_ID_ERROR_REPORT 0x45UL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_ID_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_ID_ERROR_REPORT + __le32 event_data2; + #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_CURRENT_TEMP_MASK 0xffUL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_CURRENT_TEMP_SFT 0 + #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_THRESHOLD_TEMP_MASK 0xff00UL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_THRESHOLD_TEMP_SFT 8 + u8 opaque_v; + #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_V 0x1UL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; + #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_ERROR_TYPE_MASK 0xffUL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_ERROR_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_ERROR_TYPE_THERMAL_EVENT 0x5UL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_ERROR_TYPE_THERMAL_EVENT + #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_MASK 0x700UL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_SFT 8 + #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_WARN (0x0UL << 8) + #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_CRITICAL (0x1UL << 8) + #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_FATAL (0x2UL << 8) + #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_SHUTDOWN (0x3UL << 8) + #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_SHUTDOWN + #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR 0x800UL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR_DECREASING (0x0UL << 11) + #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR_INCREASING (0x1UL << 11) + #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR_INCREASING +}; + +/* hwrm_async_event_cmpl_error_report_dual_data_rate_not_supported (size:128b/16B) */ +struct hwrm_async_event_cmpl_error_report_dual_data_rate_not_supported { + __le16 type; + #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_EVENT_ID_ERROR_REPORT 0x45UL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_EVENT_ID_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_EVENT_ID_ERROR_REPORT + __le32 event_data2; + u8 opaque_v; + #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_V 0x1UL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; + #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_EVENT_DATA1_ERROR_TYPE_MASK 0xffUL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_EVENT_DATA1_ERROR_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_EVENT_DATA1_ERROR_TYPE_DUAL_DATA_RATE_NOT_SUPPORTED 0x6UL + #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_EVENT_DATA1_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_EVENT_DATA1_ERROR_TYPE_DUAL_DATA_RATE_NOT_SUPPORTED +}; + +/* metadata_base_msg (size:64b/8B) */ +struct metadata_base_msg { + __le16 md_type_link; + #define METADATA_BASE_MSG_MD_TYPE_MASK 0x1fUL + #define METADATA_BASE_MSG_MD_TYPE_SFT 0 + #define METADATA_BASE_MSG_MD_TYPE_NONE 0x0UL + #define METADATA_BASE_MSG_MD_TYPE_TLS_INSYNC 0x1UL + #define METADATA_BASE_MSG_MD_TYPE_TLS_RESYNC 0x2UL + #define METADATA_BASE_MSG_MD_TYPE_QUIC 0x3UL + #define METADATA_BASE_MSG_MD_TYPE_ILLEGAL 0x1fUL + #define METADATA_BASE_MSG_MD_TYPE_LAST METADATA_BASE_MSG_MD_TYPE_ILLEGAL + #define METADATA_BASE_MSG_LINK_MASK 0x1e0UL + #define METADATA_BASE_MSG_LINK_SFT 5 + __le16 unused0; + __le32 unused1; +}; + +/* tls_metadata_base_msg (size:64b/8B) */ +struct tls_metadata_base_msg { + __le32 md_type_link_flags_kid_lo; + #define TLS_METADATA_BASE_MSG_MD_TYPE_MASK 0x1fUL + #define TLS_METADATA_BASE_MSG_MD_TYPE_SFT 0 + #define TLS_METADATA_BASE_MSG_MD_TYPE_TLS_INSYNC 0x1UL + #define TLS_METADATA_BASE_MSG_MD_TYPE_TLS_RESYNC 0x2UL + #define TLS_METADATA_BASE_MSG_MD_TYPE_LAST TLS_METADATA_BASE_MSG_MD_TYPE_TLS_RESYNC + #define TLS_METADATA_BASE_MSG_LINK_MASK 0x1e0UL + #define TLS_METADATA_BASE_MSG_LINK_SFT 5 + #define TLS_METADATA_BASE_MSG_FLAGS_MASK 0x1fffe00UL + #define TLS_METADATA_BASE_MSG_FLAGS_SFT 9 + #define TLS_METADATA_BASE_MSG_FLAGS_DECRYPTED 0x200UL + #define TLS_METADATA_BASE_MSG_FLAGS_GHASH_MASK 0xc00UL + #define TLS_METADATA_BASE_MSG_FLAGS_GHASH_SFT 10 + #define TLS_METADATA_BASE_MSG_FLAGS_GHASH_NOT_VALID (0x0UL << 10) + #define TLS_METADATA_BASE_MSG_FLAGS_GHASH_CUR_REC (0x1UL << 10) + #define TLS_METADATA_BASE_MSG_FLAGS_GHASH_PRIOR_REC (0x2UL << 10) + #define TLS_METADATA_BASE_MSG_FLAGS_GHASH_LAST TLS_METADATA_BASE_MSG_FLAGS_GHASH_PRIOR_REC + #define TLS_METADATA_BASE_MSG_FLAGS_TAG_AUTH_STATUS_MASK 0x3000UL + #define TLS_METADATA_BASE_MSG_FLAGS_TAG_AUTH_STATUS_SFT 12 + #define TLS_METADATA_BASE_MSG_FLAGS_TAG_AUTH_STATUS_NOT_CHECKED (0x0UL << 12) + #define TLS_METADATA_BASE_MSG_FLAGS_TAG_AUTH_STATUS_SUCCESS (0x1UL << 12) + #define TLS_METADATA_BASE_MSG_FLAGS_TAG_AUTH_STATUS_FAILURE (0x2UL << 12) + #define TLS_METADATA_BASE_MSG_FLAGS_TAG_AUTH_STATUS_LAST TLS_METADATA_BASE_MSG_FLAGS_TAG_AUTH_STATUS_FAILURE + #define TLS_METADATA_BASE_MSG_FLAGS_HEADER_FLDS_VALID 0x4000UL + #define TLS_METADATA_BASE_MSG_FLAGS_CTX_LOAD_ERR 0x8000UL + #define TLS_METADATA_BASE_MSG_FLAGS_PKT_OPERATION_STATE_MASK 0x70000UL + #define TLS_METADATA_BASE_MSG_FLAGS_PKT_OPERATION_STATE_SFT 16 + #define TLS_METADATA_BASE_MSG_FLAGS_PKT_OPERATION_STATE_IN_ORDER (0x0UL << 16) + #define TLS_METADATA_BASE_MSG_FLAGS_PKT_OPERATION_STATE_OUT_OF_ORDER (0x1UL << 16) + #define TLS_METADATA_BASE_MSG_FLAGS_PKT_OPERATION_STATE_HEADER_SEARCH (0x2UL << 16) + #define TLS_METADATA_BASE_MSG_FLAGS_PKT_OPERATION_STATE_RESYNC (0x3UL << 16) + #define TLS_METADATA_BASE_MSG_FLAGS_PKT_OPERATION_STATE_RESYNC_WAIT (0x4UL << 16) + #define TLS_METADATA_BASE_MSG_FLAGS_PKT_OPERATION_STATE_RESYNC_WAIT_PARTIAL (0x5UL << 16) + #define TLS_METADATA_BASE_MSG_FLAGS_PKT_OPERATION_STATE_RESYNC_SUCCESS (0x6UL << 16) + #define TLS_METADATA_BASE_MSG_FLAGS_PKT_OPERATION_STATE_RESYNC_SUCCESS_WAIT (0x7UL << 16) + #define TLS_METADATA_BASE_MSG_FLAGS_PKT_OPERATION_STATE_LAST TLS_METADATA_BASE_MSG_FLAGS_PKT_OPERATION_STATE_RESYNC_SUCCESS_WAIT + #define TLS_METADATA_BASE_MSG_KID_LO_MASK 0xfe000000UL + #define TLS_METADATA_BASE_MSG_KID_LO_SFT 25 + __le16 kid_hi; + #define TLS_METADATA_BASE_MSG_KID_HI_MASK 0x1fffUL + #define TLS_METADATA_BASE_MSG_KID_HI_SFT 0 + __le16 unused0; +}; + +/* tls_metadata_insync_msg (size:192b/24B) */ +struct tls_metadata_insync_msg { + __le32 md_type_link_flags_kid_lo; + #define TLS_METADATA_INSYNC_MSG_MD_TYPE_MASK 0x1fUL + #define TLS_METADATA_INSYNC_MSG_MD_TYPE_SFT 0 + #define TLS_METADATA_INSYNC_MSG_MD_TYPE_TLS_INSYNC 0x1UL + #define TLS_METADATA_INSYNC_MSG_MD_TYPE_LAST TLS_METADATA_INSYNC_MSG_MD_TYPE_TLS_INSYNC + #define TLS_METADATA_INSYNC_MSG_LINK_MASK 0x1e0UL + #define TLS_METADATA_INSYNC_MSG_LINK_SFT 5 + #define TLS_METADATA_INSYNC_MSG_FLAGS_MASK 0x1fffe00UL + #define TLS_METADATA_INSYNC_MSG_FLAGS_SFT 9 + #define TLS_METADATA_INSYNC_MSG_FLAGS_DECRYPTED 0x200UL + #define TLS_METADATA_INSYNC_MSG_FLAGS_GHASH_MASK 0xc00UL + #define TLS_METADATA_INSYNC_MSG_FLAGS_GHASH_SFT 10 + #define TLS_METADATA_INSYNC_MSG_FLAGS_GHASH_NOT_VALID (0x0UL << 10) + #define TLS_METADATA_INSYNC_MSG_FLAGS_GHASH_CUR_REC (0x1UL << 10) + #define TLS_METADATA_INSYNC_MSG_FLAGS_GHASH_PRIOR_REC (0x2UL << 10) + #define TLS_METADATA_INSYNC_MSG_FLAGS_GHASH_LAST TLS_METADATA_INSYNC_MSG_FLAGS_GHASH_PRIOR_REC + #define TLS_METADATA_INSYNC_MSG_FLAGS_TAG_AUTH_STATUS_MASK 0x3000UL + #define TLS_METADATA_INSYNC_MSG_FLAGS_TAG_AUTH_STATUS_SFT 12 + #define TLS_METADATA_INSYNC_MSG_FLAGS_TAG_AUTH_STATUS_NOT_CHECKED (0x0UL << 12) + #define TLS_METADATA_INSYNC_MSG_FLAGS_TAG_AUTH_STATUS_SUCCESS (0x1UL << 12) + #define TLS_METADATA_INSYNC_MSG_FLAGS_TAG_AUTH_STATUS_FAILURE (0x2UL << 12) + #define TLS_METADATA_INSYNC_MSG_FLAGS_TAG_AUTH_STATUS_LAST TLS_METADATA_INSYNC_MSG_FLAGS_TAG_AUTH_STATUS_FAILURE + #define TLS_METADATA_INSYNC_MSG_FLAGS_HEADER_FLDS_VALID 0x4000UL + #define TLS_METADATA_INSYNC_MSG_FLAGS_CTX_LOAD_ERR 0x8000UL + #define TLS_METADATA_INSYNC_MSG_FLAGS_PKT_OPERATION_STATE_MASK 0x70000UL + #define TLS_METADATA_INSYNC_MSG_FLAGS_PKT_OPERATION_STATE_SFT 16 + #define TLS_METADATA_INSYNC_MSG_FLAGS_PKT_OPERATION_STATE_IN_ORDER (0x0UL << 16) + #define TLS_METADATA_INSYNC_MSG_FLAGS_PKT_OPERATION_STATE_OUT_OF_ORDER (0x1UL << 16) + #define TLS_METADATA_INSYNC_MSG_FLAGS_PKT_OPERATION_STATE_HEADER_SEARCH (0x2UL << 16) + #define TLS_METADATA_INSYNC_MSG_FLAGS_PKT_OPERATION_STATE_RESYNC (0x3UL << 16) + #define TLS_METADATA_INSYNC_MSG_FLAGS_PKT_OPERATION_STATE_RESYNC_WAIT (0x4UL << 16) + #define TLS_METADATA_INSYNC_MSG_FLAGS_PKT_OPERATION_STATE_RESYNC_WAIT_PARTIAL (0x5UL << 16) + #define TLS_METADATA_INSYNC_MSG_FLAGS_PKT_OPERATION_STATE_RESYNC_SUCCESS (0x6UL << 16) + #define TLS_METADATA_INSYNC_MSG_FLAGS_PKT_OPERATION_STATE_RESYNC_SUCCESS_WAIT (0x7UL << 16) + #define TLS_METADATA_INSYNC_MSG_FLAGS_PKT_OPERATION_STATE_LAST TLS_METADATA_INSYNC_MSG_FLAGS_PKT_OPERATION_STATE_RESYNC_SUCCESS_WAIT + #define TLS_METADATA_INSYNC_MSG_KID_LO_MASK 0xfe000000UL + #define TLS_METADATA_INSYNC_MSG_KID_LO_SFT 25 + __le16 kid_hi; + #define TLS_METADATA_INSYNC_MSG_KID_HI_MASK 0x1fffUL + #define TLS_METADATA_INSYNC_MSG_KID_HI_SFT 0 + __le16 tls_header_offset; + __le64 record_seq_num; + u8 partial_ghash[8]; +}; + +/* tls_metadata_resync_msg (size:256b/32B) */ +struct tls_metadata_resync_msg { + __le32 md_type_link_flags_kid_lo; + #define TLS_METADATA_RESYNC_MSG_MD_TYPE_MASK 0x1fUL + #define TLS_METADATA_RESYNC_MSG_MD_TYPE_SFT 0 + #define TLS_METADATA_RESYNC_MSG_MD_TYPE_TLS_RESYNC 0x2UL + #define TLS_METADATA_RESYNC_MSG_MD_TYPE_LAST TLS_METADATA_RESYNC_MSG_MD_TYPE_TLS_RESYNC + #define TLS_METADATA_RESYNC_MSG_LINK_MASK 0x1e0UL + #define TLS_METADATA_RESYNC_MSG_LINK_SFT 5 + #define TLS_METADATA_RESYNC_MSG_FLAGS_MASK 0x1fffe00UL + #define TLS_METADATA_RESYNC_MSG_FLAGS_SFT 9 + #define TLS_METADATA_RESYNC_MSG_FLAGS_DECRYPTED 0x200UL + #define TLS_METADATA_RESYNC_MSG_FLAGS_GHASH_MASK 0xc00UL + #define TLS_METADATA_RESYNC_MSG_FLAGS_GHASH_SFT 10 + #define TLS_METADATA_RESYNC_MSG_FLAGS_GHASH_NOT_VALID (0x0UL << 10) + #define TLS_METADATA_RESYNC_MSG_FLAGS_GHASH_LAST TLS_METADATA_RESYNC_MSG_FLAGS_GHASH_NOT_VALID + #define TLS_METADATA_RESYNC_MSG_FLAGS_TAG_AUTH_STATUS_MASK 0x3000UL + #define TLS_METADATA_RESYNC_MSG_FLAGS_TAG_AUTH_STATUS_SFT 12 + #define TLS_METADATA_RESYNC_MSG_FLAGS_TAG_AUTH_STATUS_NOT_CHECKED (0x0UL << 12) + #define TLS_METADATA_RESYNC_MSG_FLAGS_TAG_AUTH_STATUS_LAST TLS_METADATA_RESYNC_MSG_FLAGS_TAG_AUTH_STATUS_NOT_CHECKED + #define TLS_METADATA_RESYNC_MSG_FLAGS_HEADER_FLDS_VALID 0x4000UL + #define TLS_METADATA_RESYNC_MSG_FLAGS_CTX_LOAD_ERR 0x8000UL + #define TLS_METADATA_RESYNC_MSG_FLAGS_PKT_OPERATION_STATE_MASK 0x70000UL + #define TLS_METADATA_RESYNC_MSG_FLAGS_PKT_OPERATION_STATE_SFT 16 + #define TLS_METADATA_RESYNC_MSG_FLAGS_PKT_OPERATION_STATE_IN_ORDER (0x0UL << 16) + #define TLS_METADATA_RESYNC_MSG_FLAGS_PKT_OPERATION_STATE_OUT_OF_ORDER (0x1UL << 16) + #define TLS_METADATA_RESYNC_MSG_FLAGS_PKT_OPERATION_STATE_HEADER_SEARCH (0x2UL << 16) + #define TLS_METADATA_RESYNC_MSG_FLAGS_PKT_OPERATION_STATE_RESYNC (0x3UL << 16) + #define TLS_METADATA_RESYNC_MSG_FLAGS_PKT_OPERATION_STATE_RESYNC_WAIT (0x4UL << 16) + #define TLS_METADATA_RESYNC_MSG_FLAGS_PKT_OPERATION_STATE_RESYNC_WAIT_PARTIAL (0x5UL << 16) + #define TLS_METADATA_RESYNC_MSG_FLAGS_PKT_OPERATION_STATE_RESYNC_SUCCESS (0x6UL << 16) + #define TLS_METADATA_RESYNC_MSG_FLAGS_PKT_OPERATION_STATE_RESYNC_SUCCESS_WAIT (0x7UL << 16) + #define TLS_METADATA_RESYNC_MSG_FLAGS_PKT_OPERATION_STATE_LAST TLS_METADATA_RESYNC_MSG_FLAGS_PKT_OPERATION_STATE_RESYNC_SUCCESS_WAIT + #define TLS_METADATA_RESYNC_MSG_KID_LO_MASK 0xfe000000UL + #define TLS_METADATA_RESYNC_MSG_KID_LO_SFT 25 + __le16 kid_hi; + #define TLS_METADATA_RESYNC_MSG_KID_HI_MASK 0x1fffUL + #define TLS_METADATA_RESYNC_MSG_KID_HI_SFT 0 + __le16 metadata_0; + __le32 resync_record_tcp_seq_num; + __le32 unused0; + __le64 metadata_2; + __le64 metadata_3; +}; + +/* hwrm_func_reset_input (size:192b/24B) */ +struct hwrm_func_reset_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 enables; + #define FUNC_RESET_REQ_ENABLES_VF_ID_VALID 0x1UL + __le16 vf_id; + u8 func_reset_level; + #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETALL 0x0UL + #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETME 0x1UL + #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETCHILDREN 0x2UL + #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETVF 0x3UL + #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_LAST FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETVF + u8 unused_0; +}; + +/* hwrm_func_reset_output (size:128b/16B) */ +struct hwrm_func_reset_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_func_getfid_input (size:192b/24B) */ +struct hwrm_func_getfid_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 enables; + #define FUNC_GETFID_REQ_ENABLES_PCI_ID 0x1UL + __le16 pci_id; + u8 unused_0[2]; +}; + +/* hwrm_func_getfid_output (size:128b/16B) */ +struct hwrm_func_getfid_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 fid; + u8 unused_0[5]; + u8 valid; +}; + +/* hwrm_func_vf_alloc_input (size:192b/24B) */ +struct hwrm_func_vf_alloc_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 enables; + #define FUNC_VF_ALLOC_REQ_ENABLES_FIRST_VF_ID 0x1UL + __le16 first_vf_id; + __le16 num_vfs; +}; + +/* hwrm_func_vf_alloc_output (size:128b/16B) */ +struct hwrm_func_vf_alloc_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 first_vf_id; + u8 unused_0[5]; + u8 valid; +}; + +/* hwrm_func_vf_free_input (size:192b/24B) */ +struct hwrm_func_vf_free_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 enables; + #define FUNC_VF_FREE_REQ_ENABLES_FIRST_VF_ID 0x1UL + __le16 first_vf_id; + __le16 num_vfs; +}; + +/* hwrm_func_vf_free_output (size:128b/16B) */ +struct hwrm_func_vf_free_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_func_vf_cfg_input (size:576b/72B) */ +struct hwrm_func_vf_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 enables; + #define FUNC_VF_CFG_REQ_ENABLES_MTU 0x1UL + #define FUNC_VF_CFG_REQ_ENABLES_GUEST_VLAN 0x2UL + #define FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR 0x4UL + #define FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR 0x8UL + #define FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS 0x10UL + #define FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS 0x20UL + #define FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS 0x40UL + #define FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS 0x80UL + #define FUNC_VF_CFG_REQ_ENABLES_NUM_L2_CTXS 0x100UL + #define FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS 0x200UL + #define FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS 0x400UL + #define FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS 0x800UL + #define FUNC_VF_CFG_REQ_ENABLES_NUM_KTLS_TX_KEY_CTXS 0x1000UL + #define FUNC_VF_CFG_REQ_ENABLES_NUM_KTLS_RX_KEY_CTXS 0x2000UL + #define FUNC_VF_CFG_REQ_ENABLES_NUM_QUIC_TX_KEY_CTXS 0x4000UL + #define FUNC_VF_CFG_REQ_ENABLES_NUM_QUIC_RX_KEY_CTXS 0x8000UL + __le16 mtu; + __le16 guest_vlan; + __le16 async_event_cr; + u8 dflt_mac_addr[6]; + __le32 flags; + #define FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST 0x1UL + #define FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST 0x2UL + #define FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST 0x4UL + #define FUNC_VF_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST 0x8UL + #define FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST 0x10UL + #define FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST 0x20UL + #define FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST 0x40UL + #define FUNC_VF_CFG_REQ_FLAGS_L2_CTX_ASSETS_TEST 0x80UL + #define FUNC_VF_CFG_REQ_FLAGS_PPP_PUSH_MODE_ENABLE 0x100UL + #define FUNC_VF_CFG_REQ_FLAGS_PPP_PUSH_MODE_DISABLE 0x200UL + __le16 num_rsscos_ctxs; + __le16 num_cmpl_rings; + __le16 num_tx_rings; + __le16 num_rx_rings; + __le16 num_l2_ctxs; + __le16 num_vnics; + __le16 num_stat_ctxs; + __le16 num_hw_ring_grps; + __le32 num_ktls_tx_key_ctxs; + __le32 num_ktls_rx_key_ctxs; + __le16 num_msix; + u8 unused[2]; + __le32 num_quic_tx_key_ctxs; + __le32 num_quic_rx_key_ctxs; +}; + +/* hwrm_func_vf_cfg_output (size:128b/16B) */ +struct hwrm_func_vf_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_func_qcaps_input (size:192b/24B) */ +struct hwrm_func_qcaps_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 fid; + u8 unused_0[6]; +}; + +/* hwrm_func_qcaps_output (size:1088b/136B) */ +struct hwrm_func_qcaps_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 fid; + __le16 port_id; + __le32 flags; + #define FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED 0x1UL + #define FUNC_QCAPS_RESP_FLAGS_GLOBAL_MSIX_AUTOMASKING 0x2UL + #define FUNC_QCAPS_RESP_FLAGS_PTP_SUPPORTED 0x4UL + #define FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED 0x8UL + #define FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED 0x10UL + #define FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED 0x20UL + #define FUNC_QCAPS_RESP_FLAGS_WOL_BMP_SUPPORTED 0x40UL + #define FUNC_QCAPS_RESP_FLAGS_TX_RING_RL_SUPPORTED 0x80UL + #define FUNC_QCAPS_RESP_FLAGS_TX_BW_CFG_SUPPORTED 0x100UL + #define FUNC_QCAPS_RESP_FLAGS_VF_TX_RING_RL_SUPPORTED 0x200UL + #define FUNC_QCAPS_RESP_FLAGS_VF_BW_CFG_SUPPORTED 0x400UL + #define FUNC_QCAPS_RESP_FLAGS_STD_TX_RING_MODE_SUPPORTED 0x800UL + #define FUNC_QCAPS_RESP_FLAGS_GENEVE_TUN_FLAGS_SUPPORTED 0x1000UL + #define FUNC_QCAPS_RESP_FLAGS_NVGRE_TUN_FLAGS_SUPPORTED 0x2000UL + #define FUNC_QCAPS_RESP_FLAGS_GRE_TUN_FLAGS_SUPPORTED 0x4000UL + #define FUNC_QCAPS_RESP_FLAGS_MPLS_TUN_FLAGS_SUPPORTED 0x8000UL + #define FUNC_QCAPS_RESP_FLAGS_PCIE_STATS_SUPPORTED 0x10000UL + #define FUNC_QCAPS_RESP_FLAGS_ADOPTED_PF_SUPPORTED 0x20000UL + #define FUNC_QCAPS_RESP_FLAGS_ADMIN_PF_SUPPORTED 0x40000UL + #define FUNC_QCAPS_RESP_FLAGS_LINK_ADMIN_STATUS_SUPPORTED 0x80000UL + #define FUNC_QCAPS_RESP_FLAGS_WCB_PUSH_MODE 0x100000UL + #define FUNC_QCAPS_RESP_FLAGS_DYNAMIC_TX_RING_ALLOC 0x200000UL + #define FUNC_QCAPS_RESP_FLAGS_HOT_RESET_CAPABLE 0x400000UL + #define FUNC_QCAPS_RESP_FLAGS_ERROR_RECOVERY_CAPABLE 0x800000UL + #define FUNC_QCAPS_RESP_FLAGS_EXT_STATS_SUPPORTED 0x1000000UL + #define FUNC_QCAPS_RESP_FLAGS_ERR_RECOVER_RELOAD 0x2000000UL + #define FUNC_QCAPS_RESP_FLAGS_NOTIFY_VF_DEF_VNIC_CHNG_SUPPORTED 0x4000000UL + #define FUNC_QCAPS_RESP_FLAGS_VLAN_ACCELERATION_TX_DISABLED 0x8000000UL + #define FUNC_QCAPS_RESP_FLAGS_COREDUMP_CMD_SUPPORTED 0x10000000UL + #define FUNC_QCAPS_RESP_FLAGS_CRASHDUMP_CMD_SUPPORTED 0x20000000UL + #define FUNC_QCAPS_RESP_FLAGS_PFC_WD_STATS_SUPPORTED 0x40000000UL + #define FUNC_QCAPS_RESP_FLAGS_DBG_QCAPS_CMD_SUPPORTED 0x80000000UL + u8 mac_address[6]; + __le16 max_rsscos_ctx; + __le16 max_cmpl_rings; + __le16 max_tx_rings; + __le16 max_rx_rings; + __le16 max_l2_ctxs; + __le16 max_vnics; + __le16 first_vf_id; + __le16 max_vfs; + __le16 max_stat_ctx; + __le32 max_encap_records; + __le32 max_decap_records; + __le32 max_tx_em_flows; + __le32 max_tx_wm_flows; + __le32 max_rx_em_flows; + __le32 max_rx_wm_flows; + __le32 max_mcast_filters; + __le32 max_flow_id; + __le32 max_hw_ring_grps; + __le16 max_sp_tx_rings; + __le16 max_msix_vfs; + __le32 flags_ext; + #define FUNC_QCAPS_RESP_FLAGS_EXT_ECN_MARK_SUPPORTED 0x1UL + #define FUNC_QCAPS_RESP_FLAGS_EXT_ECN_STATS_SUPPORTED 0x2UL + #define FUNC_QCAPS_RESP_FLAGS_EXT_EXT_HW_STATS_SUPPORTED 0x4UL + #define FUNC_QCAPS_RESP_FLAGS_EXT_HOT_RESET_IF_SUPPORT 0x8UL + #define FUNC_QCAPS_RESP_FLAGS_EXT_PROXY_MODE_SUPPORT 0x10UL + #define FUNC_QCAPS_RESP_FLAGS_EXT_TX_PROXY_SRC_INTF_OVERRIDE_SUPPORT 0x20UL + #define FUNC_QCAPS_RESP_FLAGS_EXT_SCHQ_SUPPORTED 0x40UL + #define FUNC_QCAPS_RESP_FLAGS_EXT_PPP_PUSH_MODE_SUPPORTED 0x80UL + #define FUNC_QCAPS_RESP_FLAGS_EXT_EVB_MODE_CFG_NOT_SUPPORTED 0x100UL + #define FUNC_QCAPS_RESP_FLAGS_EXT_SOC_SPD_SUPPORTED 0x200UL + #define FUNC_QCAPS_RESP_FLAGS_EXT_FW_LIVEPATCH_SUPPORTED 0x400UL + #define FUNC_QCAPS_RESP_FLAGS_EXT_FAST_RESET_CAPABLE 0x800UL + #define FUNC_QCAPS_RESP_FLAGS_EXT_TX_METADATA_CFG_CAPABLE 0x1000UL + #define FUNC_QCAPS_RESP_FLAGS_EXT_NVM_OPTION_ACTION_SUPPORTED 0x2000UL + #define FUNC_QCAPS_RESP_FLAGS_EXT_BD_METADATA_SUPPORTED 0x4000UL + #define FUNC_QCAPS_RESP_FLAGS_EXT_ECHO_REQUEST_SUPPORTED 0x8000UL + #define FUNC_QCAPS_RESP_FLAGS_EXT_NPAR_1_2_SUPPORTED 0x10000UL + #define FUNC_QCAPS_RESP_FLAGS_EXT_PTP_PTM_SUPPORTED 0x20000UL + #define FUNC_QCAPS_RESP_FLAGS_EXT_PTP_PPS_SUPPORTED 0x40000UL + #define FUNC_QCAPS_RESP_FLAGS_EXT_VF_CFG_ASYNC_FOR_PF_SUPPORTED 0x80000UL + #define FUNC_QCAPS_RESP_FLAGS_EXT_PARTITION_BW_SUPPORTED 0x100000UL + #define FUNC_QCAPS_RESP_FLAGS_EXT_DFLT_VLAN_TPID_PCP_SUPPORTED 0x200000UL + #define FUNC_QCAPS_RESP_FLAGS_EXT_KTLS_SUPPORTED 0x400000UL + #define FUNC_QCAPS_RESP_FLAGS_EXT_EP_RATE_CONTROL 0x800000UL + #define FUNC_QCAPS_RESP_FLAGS_EXT_MIN_BW_SUPPORTED 0x1000000UL + #define FUNC_QCAPS_RESP_FLAGS_EXT_TX_COAL_CMPL_CAP 0x2000000UL + #define FUNC_QCAPS_RESP_FLAGS_EXT_BS_V2_SUPPORTED 0x4000000UL + #define FUNC_QCAPS_RESP_FLAGS_EXT_BS_V2_REQUIRED 0x8000000UL + #define FUNC_QCAPS_RESP_FLAGS_EXT_PTP_64BIT_RTC_SUPPORTED 0x10000000UL + #define FUNC_QCAPS_RESP_FLAGS_EXT_DBR_PACING_SUPPORTED 0x20000000UL + #define FUNC_QCAPS_RESP_FLAGS_EXT_HW_DBR_DROP_RECOV_SUPPORTED 0x40000000UL + #define FUNC_QCAPS_RESP_FLAGS_EXT_DISABLE_CQ_OVERFLOW_DETECTION_SUPPORTED 0x80000000UL + u8 max_schqs; + u8 mpc_chnls_cap; + #define FUNC_QCAPS_RESP_MPC_CHNLS_CAP_TCE 0x1UL + #define FUNC_QCAPS_RESP_MPC_CHNLS_CAP_RCE 0x2UL + #define FUNC_QCAPS_RESP_MPC_CHNLS_CAP_TE_CFA 0x4UL + #define FUNC_QCAPS_RESP_MPC_CHNLS_CAP_RE_CFA 0x8UL + #define FUNC_QCAPS_RESP_MPC_CHNLS_CAP_PRIMATE 0x10UL + __le16 max_key_ctxs_alloc; + __le32 flags_ext2; + #define FUNC_QCAPS_RESP_FLAGS_EXT2_RX_ALL_PKTS_TIMESTAMPS_SUPPORTED 0x1UL + #define FUNC_QCAPS_RESP_FLAGS_EXT2_QUIC_SUPPORTED 0x2UL + #define FUNC_QCAPS_RESP_FLAGS_EXT2_KDNET_SUPPORTED 0x4UL + #define FUNC_QCAPS_RESP_FLAGS_EXT2_DBR_PACING_EXT_SUPPORTED 0x8UL + #define FUNC_QCAPS_RESP_FLAGS_EXT2_SW_DBR_DROP_RECOVERY_SUPPORTED 0x10UL + #define FUNC_QCAPS_RESP_FLAGS_EXT2_GENERIC_STATS_SUPPORTED 0x20UL + #define FUNC_QCAPS_RESP_FLAGS_EXT2_UDP_GSO_SUPPORTED 0x40UL + #define FUNC_QCAPS_RESP_FLAGS_EXT2_SYNCE_SUPPORTED 0x80UL + #define FUNC_QCAPS_RESP_FLAGS_EXT2_DBR_PACING_V0_SUPPORTED 0x100UL + #define FUNC_QCAPS_RESP_FLAGS_EXT2_TX_PKT_TS_CMPL_SUPPORTED 0x200UL + #define FUNC_QCAPS_RESP_FLAGS_EXT2_HW_LAG_SUPPORTED 0x400UL + #define FUNC_QCAPS_RESP_FLAGS_EXT2_ON_CHIP_CTX_SUPPORTED 0x800UL + #define FUNC_QCAPS_RESP_FLAGS_EXT2_STEERING_TAG_SUPPORTED 0x1000UL + #define FUNC_QCAPS_RESP_FLAGS_EXT2_ENHANCED_VF_SCALE_SUPPORTED 0x2000UL + #define FUNC_QCAPS_RESP_FLAGS_EXT2_KEY_XID_PARTITION_SUPPORTED 0x4000UL + #define FUNC_QCAPS_RESP_FLAGS_EXT2_CONCURRENT_KTLS_QUIC_SUPPORTED 0x8000UL + #define FUNC_QCAPS_RESP_FLAGS_EXT2_SCHQ_CROSS_TC_CAP_SUPPORTED 0x10000UL + #define FUNC_QCAPS_RESP_FLAGS_EXT2_SCHQ_PER_TC_CAP_SUPPORTED 0x20000UL + #define FUNC_QCAPS_RESP_FLAGS_EXT2_SCHQ_PER_TC_RESERVATION_SUPPORTED 0x40000UL + #define FUNC_QCAPS_RESP_FLAGS_EXT2_DB_ERROR_STATS_SUPPORTED 0x80000UL + #define FUNC_QCAPS_RESP_FLAGS_EXT2_ROCE_VF_RESOURCE_MGMT_SUPPORTED 0x100000UL + #define FUNC_QCAPS_RESP_FLAGS_EXT2_UDCC_SUPPORTED 0x200000UL + #define FUNC_QCAPS_RESP_FLAGS_EXT2_TIMED_TX_SO_TXTIME_SUPPORTED 0x400000UL + #define FUNC_QCAPS_RESP_FLAGS_EXT2_SW_MAX_RESOURCE_LIMITS_SUPPORTED 0x800000UL + #define FUNC_QCAPS_RESP_FLAGS_EXT2_TF_INGRESS_NIC_FLOW_SUPPORTED 0x1000000UL + #define FUNC_QCAPS_RESP_FLAGS_EXT2_LPBK_STATS_SUPPORTED 0x2000000UL + #define FUNC_QCAPS_RESP_FLAGS_EXT2_TF_EGRESS_NIC_FLOW_SUPPORTED 0x4000000UL + #define FUNC_QCAPS_RESP_FLAGS_EXT2_MULTI_LOSSLESS_QUEUES_SUPPORTED 0x8000000UL + #define FUNC_QCAPS_RESP_FLAGS_EXT2_PEER_MMAP_SUPPORTED 0x10000000UL + __le16 tunnel_disable_flag; + #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_VXLAN 0x1UL + #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_NGE 0x2UL + #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_NVGRE 0x4UL + #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_L2GRE 0x8UL + #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_GRE 0x10UL + #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_IPINIP 0x20UL + #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_MPLS 0x40UL + #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_PPPOE 0x80UL + __le16 xid_partition_cap; + #define FUNC_QCAPS_RESP_XID_PARTITION_CAP_TX_CK 0x1UL + #define FUNC_QCAPS_RESP_XID_PARTITION_CAP_RX_CK 0x2UL + u8 device_serial_number[8]; + __le16 ctxs_per_partition; + __le16 max_tso_segs; + __le32 roce_vf_max_av; + __le32 roce_vf_max_cq; + __le32 roce_vf_max_mrw; + __le32 roce_vf_max_qp; + __le32 roce_vf_max_srq; + __le32 roce_vf_max_gid; + u8 unused_3[3]; + u8 valid; +}; + +/* hwrm_func_qcfg_input (size:192b/24B) */ +struct hwrm_func_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 fid; + u8 unused_0[6]; +}; + +/* hwrm_func_qcfg_output (size:1280b/160B) */ +struct hwrm_func_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 fid; + __le16 port_id; + __le16 vlan; + __le16 flags; + #define FUNC_QCFG_RESP_FLAGS_OOB_WOL_MAGICPKT_ENABLED 0x1UL + #define FUNC_QCFG_RESP_FLAGS_OOB_WOL_BMP_ENABLED 0x2UL + #define FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED 0x4UL + #define FUNC_QCFG_RESP_FLAGS_STD_TX_RING_MODE_ENABLED 0x8UL + #define FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED 0x10UL + #define FUNC_QCFG_RESP_FLAGS_MULTI_HOST 0x20UL + #define FUNC_QCFG_RESP_FLAGS_TRUSTED_VF 0x40UL + #define FUNC_QCFG_RESP_FLAGS_SECURE_MODE_ENABLED 0x80UL + #define FUNC_QCFG_RESP_FLAGS_PREBOOT_LEGACY_L2_RINGS 0x100UL + #define FUNC_QCFG_RESP_FLAGS_HOT_RESET_ALLOWED 0x200UL + #define FUNC_QCFG_RESP_FLAGS_PPP_PUSH_MODE_ENABLED 0x400UL + #define FUNC_QCFG_RESP_FLAGS_RING_MONITOR_ENABLED 0x800UL + #define FUNC_QCFG_RESP_FLAGS_FAST_RESET_ALLOWED 0x1000UL + #define FUNC_QCFG_RESP_FLAGS_MULTI_ROOT 0x2000UL + #define FUNC_QCFG_RESP_FLAGS_ENABLE_RDMA_SRIOV 0x4000UL + #define FUNC_QCFG_RESP_FLAGS_ROCE_VNIC_ID_VALID 0x8000UL + u8 mac_address[6]; + __le16 pci_id; + __le16 alloc_rsscos_ctx; + __le16 alloc_cmpl_rings; + __le16 alloc_tx_rings; + __le16 alloc_rx_rings; + __le16 alloc_l2_ctx; + __le16 alloc_vnics; + __le16 admin_mtu; + __le16 mru; + __le16 stat_ctx_id; + u8 port_partition_type; + #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_SPF 0x0UL + #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_MPFS 0x1UL + #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0 0x2UL + #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5 0x3UL + #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0 0x4UL + #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_2 0x5UL + #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_UNKNOWN 0xffUL + #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_LAST FUNC_QCFG_RESP_PORT_PARTITION_TYPE_UNKNOWN + u8 port_pf_cnt; + #define FUNC_QCFG_RESP_PORT_PF_CNT_UNAVAIL 0x0UL + #define FUNC_QCFG_RESP_PORT_PF_CNT_LAST FUNC_QCFG_RESP_PORT_PF_CNT_UNAVAIL + __le16 dflt_vnic_id; + __le16 max_mtu_configured; + __le32 min_bw; + #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_MASK 0xfffffffUL + #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_SFT 0 + #define FUNC_QCFG_RESP_MIN_BW_SCALE 0x10000000UL + #define FUNC_QCFG_RESP_MIN_BW_SCALE_BITS (0x0UL << 28) + #define FUNC_QCFG_RESP_MIN_BW_SCALE_BYTES (0x1UL << 28) + #define FUNC_QCFG_RESP_MIN_BW_SCALE_LAST FUNC_QCFG_RESP_MIN_BW_SCALE_BYTES + #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_SFT 29 + #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_LAST FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_INVALID + __le32 max_bw; + #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_MASK 0xfffffffUL + #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_SFT 0 + #define FUNC_QCFG_RESP_MAX_BW_SCALE 0x10000000UL + #define FUNC_QCFG_RESP_MAX_BW_SCALE_BITS (0x0UL << 28) + #define FUNC_QCFG_RESP_MAX_BW_SCALE_BYTES (0x1UL << 28) + #define FUNC_QCFG_RESP_MAX_BW_SCALE_LAST FUNC_QCFG_RESP_MAX_BW_SCALE_BYTES + #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_SFT 29 + #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_LAST FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_INVALID + u8 evb_mode; + #define FUNC_QCFG_RESP_EVB_MODE_NO_EVB 0x0UL + #define FUNC_QCFG_RESP_EVB_MODE_VEB 0x1UL + #define FUNC_QCFG_RESP_EVB_MODE_VEPA 0x2UL + #define FUNC_QCFG_RESP_EVB_MODE_LAST FUNC_QCFG_RESP_EVB_MODE_VEPA + u8 options; + #define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_MASK 0x3UL + #define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_SFT 0 + #define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_SIZE_64 0x0UL + #define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_SIZE_128 0x1UL + #define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_LAST FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_SIZE_128 + #define FUNC_QCFG_RESP_OPTIONS_LINK_ADMIN_STATE_MASK 0xcUL + #define FUNC_QCFG_RESP_OPTIONS_LINK_ADMIN_STATE_SFT 2 + #define FUNC_QCFG_RESP_OPTIONS_LINK_ADMIN_STATE_FORCED_DOWN (0x0UL << 2) + #define FUNC_QCFG_RESP_OPTIONS_LINK_ADMIN_STATE_FORCED_UP (0x1UL << 2) + #define FUNC_QCFG_RESP_OPTIONS_LINK_ADMIN_STATE_AUTO (0x2UL << 2) + #define FUNC_QCFG_RESP_OPTIONS_LINK_ADMIN_STATE_LAST FUNC_QCFG_RESP_OPTIONS_LINK_ADMIN_STATE_AUTO + #define FUNC_QCFG_RESP_OPTIONS_RSVD_MASK 0xf0UL + #define FUNC_QCFG_RESP_OPTIONS_RSVD_SFT 4 + __le16 alloc_vfs; + __le32 alloc_mcast_filters; + __le32 alloc_hw_ring_grps; + __le16 alloc_sp_tx_rings; + __le16 alloc_stat_ctx; + __le16 alloc_msix; + __le16 registered_vfs; + __le16 l2_doorbell_bar_size_kb; + u8 active_endpoints; + u8 always_1; + __le32 reset_addr_poll; + __le16 legacy_l2_db_size_kb; + __le16 svif_info; + #define FUNC_QCFG_RESP_SVIF_INFO_SVIF_MASK 0x7fffUL + #define FUNC_QCFG_RESP_SVIF_INFO_SVIF_SFT 0 + #define FUNC_QCFG_RESP_SVIF_INFO_SVIF_VALID 0x8000UL + u8 mpc_chnls; + #define FUNC_QCFG_RESP_MPC_CHNLS_TCE_ENABLED 0x1UL + #define FUNC_QCFG_RESP_MPC_CHNLS_RCE_ENABLED 0x2UL + #define FUNC_QCFG_RESP_MPC_CHNLS_TE_CFA_ENABLED 0x4UL + #define FUNC_QCFG_RESP_MPC_CHNLS_RE_CFA_ENABLED 0x8UL + #define FUNC_QCFG_RESP_MPC_CHNLS_PRIMATE_ENABLED 0x10UL + u8 db_page_size; + #define FUNC_QCFG_RESP_DB_PAGE_SIZE_4KB 0x0UL + #define FUNC_QCFG_RESP_DB_PAGE_SIZE_8KB 0x1UL + #define FUNC_QCFG_RESP_DB_PAGE_SIZE_16KB 0x2UL + #define FUNC_QCFG_RESP_DB_PAGE_SIZE_32KB 0x3UL + #define FUNC_QCFG_RESP_DB_PAGE_SIZE_64KB 0x4UL + #define FUNC_QCFG_RESP_DB_PAGE_SIZE_128KB 0x5UL + #define FUNC_QCFG_RESP_DB_PAGE_SIZE_256KB 0x6UL + #define FUNC_QCFG_RESP_DB_PAGE_SIZE_512KB 0x7UL + #define FUNC_QCFG_RESP_DB_PAGE_SIZE_1MB 0x8UL + #define FUNC_QCFG_RESP_DB_PAGE_SIZE_2MB 0x9UL + #define FUNC_QCFG_RESP_DB_PAGE_SIZE_4MB 0xaUL + #define FUNC_QCFG_RESP_DB_PAGE_SIZE_LAST FUNC_QCFG_RESP_DB_PAGE_SIZE_4MB + __le16 roce_vnic_id; + __le32 partition_min_bw; + #define FUNC_QCFG_RESP_PARTITION_MIN_BW_BW_VALUE_MASK 0xfffffffUL + #define FUNC_QCFG_RESP_PARTITION_MIN_BW_BW_VALUE_SFT 0 + #define FUNC_QCFG_RESP_PARTITION_MIN_BW_SCALE 0x10000000UL + #define FUNC_QCFG_RESP_PARTITION_MIN_BW_SCALE_BITS (0x0UL << 28) + #define FUNC_QCFG_RESP_PARTITION_MIN_BW_SCALE_BYTES (0x1UL << 28) + #define FUNC_QCFG_RESP_PARTITION_MIN_BW_SCALE_LAST FUNC_QCFG_RESP_PARTITION_MIN_BW_SCALE_BYTES + #define FUNC_QCFG_RESP_PARTITION_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define FUNC_QCFG_RESP_PARTITION_MIN_BW_BW_VALUE_UNIT_SFT 29 + #define FUNC_QCFG_RESP_PARTITION_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define FUNC_QCFG_RESP_PARTITION_MIN_BW_BW_VALUE_UNIT_LAST FUNC_QCFG_RESP_PARTITION_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 + __le32 partition_max_bw; + #define FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_MASK 0xfffffffUL + #define FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_SFT 0 + #define FUNC_QCFG_RESP_PARTITION_MAX_BW_SCALE 0x10000000UL + #define FUNC_QCFG_RESP_PARTITION_MAX_BW_SCALE_BITS (0x0UL << 28) + #define FUNC_QCFG_RESP_PARTITION_MAX_BW_SCALE_BYTES (0x1UL << 28) + #define FUNC_QCFG_RESP_PARTITION_MAX_BW_SCALE_LAST FUNC_QCFG_RESP_PARTITION_MAX_BW_SCALE_BYTES + #define FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_UNIT_SFT 29 + #define FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_UNIT_LAST FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 + __le16 host_mtu; + u8 unused_3[2]; + u8 unused_4[2]; + u8 port_kdnet_mode; + #define FUNC_QCFG_RESP_PORT_KDNET_MODE_DISABLED 0x0UL + #define FUNC_QCFG_RESP_PORT_KDNET_MODE_ENABLED 0x1UL + #define FUNC_QCFG_RESP_PORT_KDNET_MODE_LAST FUNC_QCFG_RESP_PORT_KDNET_MODE_ENABLED + u8 kdnet_pcie_function; + __le16 port_kdnet_fid; + u8 unused_5[2]; + __le32 num_ktls_tx_key_ctxs; + __le32 num_ktls_rx_key_ctxs; + u8 lag_id; + u8 parif; + u8 fw_lag_id; + u8 unused_6; + __le32 num_quic_tx_key_ctxs; + __le32 num_quic_rx_key_ctxs; + __le32 roce_max_av_per_vf; + __le32 roce_max_cq_per_vf; + __le32 roce_max_mrw_per_vf; + __le32 roce_max_qp_per_vf; + __le32 roce_max_srq_per_vf; + __le32 roce_max_gid_per_vf; + __le16 xid_partition_cfg; + #define FUNC_QCFG_RESP_XID_PARTITION_CFG_TX_CK 0x1UL + #define FUNC_QCFG_RESP_XID_PARTITION_CFG_RX_CK 0x2UL + u8 unused_7; + u8 valid; +}; + +/* hwrm_func_cfg_input (size:1280b/160B) */ +struct hwrm_func_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 fid; + __le16 num_msix; + __le32 flags; + #define FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_DISABLE 0x1UL + #define FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_ENABLE 0x2UL + #define FUNC_CFG_REQ_FLAGS_RSVD_MASK 0x1fcUL + #define FUNC_CFG_REQ_FLAGS_RSVD_SFT 2 + #define FUNC_CFG_REQ_FLAGS_STD_TX_RING_MODE_ENABLE 0x200UL + #define FUNC_CFG_REQ_FLAGS_STD_TX_RING_MODE_DISABLE 0x400UL + #define FUNC_CFG_REQ_FLAGS_VIRT_MAC_PERSIST 0x800UL + #define FUNC_CFG_REQ_FLAGS_NO_AUTOCLEAR_STATISTIC 0x1000UL + #define FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST 0x2000UL + #define FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST 0x4000UL + #define FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST 0x8000UL + #define FUNC_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST 0x10000UL + #define FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST 0x20000UL + #define FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST 0x40000UL + #define FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST 0x80000UL + #define FUNC_CFG_REQ_FLAGS_L2_CTX_ASSETS_TEST 0x100000UL + #define FUNC_CFG_REQ_FLAGS_TRUSTED_VF_ENABLE 0x200000UL + #define FUNC_CFG_REQ_FLAGS_DYNAMIC_TX_RING_ALLOC 0x400000UL + #define FUNC_CFG_REQ_FLAGS_NQ_ASSETS_TEST 0x800000UL + #define FUNC_CFG_REQ_FLAGS_TRUSTED_VF_DISABLE 0x1000000UL + #define FUNC_CFG_REQ_FLAGS_PREBOOT_LEGACY_L2_RINGS 0x2000000UL + #define FUNC_CFG_REQ_FLAGS_HOT_RESET_IF_EN_DIS 0x4000000UL + #define FUNC_CFG_REQ_FLAGS_PPP_PUSH_MODE_ENABLE 0x8000000UL + #define FUNC_CFG_REQ_FLAGS_PPP_PUSH_MODE_DISABLE 0x10000000UL + #define FUNC_CFG_REQ_FLAGS_BD_METADATA_ENABLE 0x20000000UL + #define FUNC_CFG_REQ_FLAGS_BD_METADATA_DISABLE 0x40000000UL + __le32 enables; + #define FUNC_CFG_REQ_ENABLES_ADMIN_MTU 0x1UL + #define FUNC_CFG_REQ_ENABLES_MRU 0x2UL + #define FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS 0x4UL + #define FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS 0x8UL + #define FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS 0x10UL + #define FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS 0x20UL + #define FUNC_CFG_REQ_ENABLES_NUM_L2_CTXS 0x40UL + #define FUNC_CFG_REQ_ENABLES_NUM_VNICS 0x80UL + #define FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS 0x100UL + #define FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR 0x200UL + #define FUNC_CFG_REQ_ENABLES_DFLT_VLAN 0x400UL + #define FUNC_CFG_REQ_ENABLES_DFLT_IP_ADDR 0x800UL + #define FUNC_CFG_REQ_ENABLES_MIN_BW 0x1000UL + #define FUNC_CFG_REQ_ENABLES_MAX_BW 0x2000UL + #define FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR 0x4000UL + #define FUNC_CFG_REQ_ENABLES_VLAN_ANTISPOOF_MODE 0x8000UL + #define FUNC_CFG_REQ_ENABLES_ALLOWED_VLAN_PRIS 0x10000UL + #define FUNC_CFG_REQ_ENABLES_EVB_MODE 0x20000UL + #define FUNC_CFG_REQ_ENABLES_NUM_MCAST_FILTERS 0x40000UL + #define FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS 0x80000UL + #define FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE 0x100000UL + #define FUNC_CFG_REQ_ENABLES_NUM_MSIX 0x200000UL + #define FUNC_CFG_REQ_ENABLES_ADMIN_LINK_STATE 0x400000UL + #define FUNC_CFG_REQ_ENABLES_HOT_RESET_IF_SUPPORT 0x800000UL + #define FUNC_CFG_REQ_ENABLES_SCHQ_ID 0x1000000UL + #define FUNC_CFG_REQ_ENABLES_MPC_CHNLS 0x2000000UL + #define FUNC_CFG_REQ_ENABLES_PARTITION_MIN_BW 0x4000000UL + #define FUNC_CFG_REQ_ENABLES_PARTITION_MAX_BW 0x8000000UL + #define FUNC_CFG_REQ_ENABLES_TPID 0x10000000UL + #define FUNC_CFG_REQ_ENABLES_HOST_MTU 0x20000000UL + #define FUNC_CFG_REQ_ENABLES_KTLS_TX_KEY_CTXS 0x40000000UL + #define FUNC_CFG_REQ_ENABLES_KTLS_RX_KEY_CTXS 0x80000000UL + __le16 admin_mtu; + __le16 mru; + __le16 num_rsscos_ctxs; + __le16 num_cmpl_rings; + __le16 num_tx_rings; + __le16 num_rx_rings; + __le16 num_l2_ctxs; + __le16 num_vnics; + __le16 num_stat_ctxs; + __le16 num_hw_ring_grps; + u8 dflt_mac_addr[6]; + __le16 dflt_vlan; + __be32 dflt_ip_addr[4]; + __le32 min_bw; + #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_MASK 0xfffffffUL + #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_SFT 0 + #define FUNC_CFG_REQ_MIN_BW_SCALE 0x10000000UL + #define FUNC_CFG_REQ_MIN_BW_SCALE_BITS (0x0UL << 28) + #define FUNC_CFG_REQ_MIN_BW_SCALE_BYTES (0x1UL << 28) + #define FUNC_CFG_REQ_MIN_BW_SCALE_LAST FUNC_CFG_REQ_MIN_BW_SCALE_BYTES + #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_SFT 29 + #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_LAST FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_INVALID + __le32 max_bw; + #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_MASK 0xfffffffUL + #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_SFT 0 + #define FUNC_CFG_REQ_MAX_BW_SCALE 0x10000000UL + #define FUNC_CFG_REQ_MAX_BW_SCALE_BITS (0x0UL << 28) + #define FUNC_CFG_REQ_MAX_BW_SCALE_BYTES (0x1UL << 28) + #define FUNC_CFG_REQ_MAX_BW_SCALE_LAST FUNC_CFG_REQ_MAX_BW_SCALE_BYTES + #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_SFT 29 + #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_LAST FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_INVALID + __le16 async_event_cr; + u8 vlan_antispoof_mode; + #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_NOCHECK 0x0UL + #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_VALIDATE_VLAN 0x1UL + #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_INSERT_IF_VLANDNE 0x2UL + #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_INSERT_OR_OVERRIDE_VLAN 0x3UL + #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_LAST FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_INSERT_OR_OVERRIDE_VLAN + u8 allowed_vlan_pris; + u8 evb_mode; + #define FUNC_CFG_REQ_EVB_MODE_NO_EVB 0x0UL + #define FUNC_CFG_REQ_EVB_MODE_VEB 0x1UL + #define FUNC_CFG_REQ_EVB_MODE_VEPA 0x2UL + #define FUNC_CFG_REQ_EVB_MODE_LAST FUNC_CFG_REQ_EVB_MODE_VEPA + u8 options; + #define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_MASK 0x3UL + #define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SFT 0 + #define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_64 0x0UL + #define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128 0x1UL + #define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_LAST FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128 + #define FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_MASK 0xcUL + #define FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_SFT 2 + #define FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_FORCED_DOWN (0x0UL << 2) + #define FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_FORCED_UP (0x1UL << 2) + #define FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_AUTO (0x2UL << 2) + #define FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_LAST FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_AUTO + #define FUNC_CFG_REQ_OPTIONS_RSVD_MASK 0xf0UL + #define FUNC_CFG_REQ_OPTIONS_RSVD_SFT 4 + __le16 num_mcast_filters; + __le16 schq_id; + __le16 mpc_chnls; + #define FUNC_CFG_REQ_MPC_CHNLS_TCE_ENABLE 0x1UL + #define FUNC_CFG_REQ_MPC_CHNLS_TCE_DISABLE 0x2UL + #define FUNC_CFG_REQ_MPC_CHNLS_RCE_ENABLE 0x4UL + #define FUNC_CFG_REQ_MPC_CHNLS_RCE_DISABLE 0x8UL + #define FUNC_CFG_REQ_MPC_CHNLS_TE_CFA_ENABLE 0x10UL + #define FUNC_CFG_REQ_MPC_CHNLS_TE_CFA_DISABLE 0x20UL + #define FUNC_CFG_REQ_MPC_CHNLS_RE_CFA_ENABLE 0x40UL + #define FUNC_CFG_REQ_MPC_CHNLS_RE_CFA_DISABLE 0x80UL + #define FUNC_CFG_REQ_MPC_CHNLS_PRIMATE_ENABLE 0x100UL + #define FUNC_CFG_REQ_MPC_CHNLS_PRIMATE_DISABLE 0x200UL + __le32 partition_min_bw; + #define FUNC_CFG_REQ_PARTITION_MIN_BW_BW_VALUE_MASK 0xfffffffUL + #define FUNC_CFG_REQ_PARTITION_MIN_BW_BW_VALUE_SFT 0 + #define FUNC_CFG_REQ_PARTITION_MIN_BW_SCALE 0x10000000UL + #define FUNC_CFG_REQ_PARTITION_MIN_BW_SCALE_BITS (0x0UL << 28) + #define FUNC_CFG_REQ_PARTITION_MIN_BW_SCALE_BYTES (0x1UL << 28) + #define FUNC_CFG_REQ_PARTITION_MIN_BW_SCALE_LAST FUNC_CFG_REQ_PARTITION_MIN_BW_SCALE_BYTES + #define FUNC_CFG_REQ_PARTITION_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define FUNC_CFG_REQ_PARTITION_MIN_BW_BW_VALUE_UNIT_SFT 29 + #define FUNC_CFG_REQ_PARTITION_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define FUNC_CFG_REQ_PARTITION_MIN_BW_BW_VALUE_UNIT_LAST FUNC_CFG_REQ_PARTITION_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 + __le32 partition_max_bw; + #define FUNC_CFG_REQ_PARTITION_MAX_BW_BW_VALUE_MASK 0xfffffffUL + #define FUNC_CFG_REQ_PARTITION_MAX_BW_BW_VALUE_SFT 0 + #define FUNC_CFG_REQ_PARTITION_MAX_BW_SCALE 0x10000000UL + #define FUNC_CFG_REQ_PARTITION_MAX_BW_SCALE_BITS (0x0UL << 28) + #define FUNC_CFG_REQ_PARTITION_MAX_BW_SCALE_BYTES (0x1UL << 28) + #define FUNC_CFG_REQ_PARTITION_MAX_BW_SCALE_LAST FUNC_CFG_REQ_PARTITION_MAX_BW_SCALE_BYTES + #define FUNC_CFG_REQ_PARTITION_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define FUNC_CFG_REQ_PARTITION_MAX_BW_BW_VALUE_UNIT_SFT 29 + #define FUNC_CFG_REQ_PARTITION_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define FUNC_CFG_REQ_PARTITION_MAX_BW_BW_VALUE_UNIT_LAST FUNC_CFG_REQ_PARTITION_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 + __be16 tpid; + __le16 host_mtu; + __le32 flags2; + #define FUNC_CFG_REQ_FLAGS2_KTLS_KEY_CTX_ASSETS_TEST 0x1UL + #define FUNC_CFG_REQ_FLAGS2_QUIC_KEY_CTX_ASSETS_TEST 0x2UL + __le32 enables2; + #define FUNC_CFG_REQ_ENABLES2_KDNET 0x1UL + #define FUNC_CFG_REQ_ENABLES2_DB_PAGE_SIZE 0x2UL + #define FUNC_CFG_REQ_ENABLES2_QUIC_TX_KEY_CTXS 0x4UL + #define FUNC_CFG_REQ_ENABLES2_QUIC_RX_KEY_CTXS 0x8UL + #define FUNC_CFG_REQ_ENABLES2_ROCE_MAX_AV_PER_VF 0x10UL + #define FUNC_CFG_REQ_ENABLES2_ROCE_MAX_CQ_PER_VF 0x20UL + #define FUNC_CFG_REQ_ENABLES2_ROCE_MAX_MRW_PER_VF 0x40UL + #define FUNC_CFG_REQ_ENABLES2_ROCE_MAX_QP_PER_VF 0x80UL + #define FUNC_CFG_REQ_ENABLES2_ROCE_MAX_SRQ_PER_VF 0x100UL + #define FUNC_CFG_REQ_ENABLES2_ROCE_MAX_GID_PER_VF 0x200UL + #define FUNC_CFG_REQ_ENABLES2_XID_PARTITION_CFG 0x400UL + u8 port_kdnet_mode; + #define FUNC_CFG_REQ_PORT_KDNET_MODE_DISABLED 0x0UL + #define FUNC_CFG_REQ_PORT_KDNET_MODE_ENABLED 0x1UL + #define FUNC_CFG_REQ_PORT_KDNET_MODE_LAST FUNC_CFG_REQ_PORT_KDNET_MODE_ENABLED + u8 db_page_size; + #define FUNC_CFG_REQ_DB_PAGE_SIZE_4KB 0x0UL + #define FUNC_CFG_REQ_DB_PAGE_SIZE_8KB 0x1UL + #define FUNC_CFG_REQ_DB_PAGE_SIZE_16KB 0x2UL + #define FUNC_CFG_REQ_DB_PAGE_SIZE_32KB 0x3UL + #define FUNC_CFG_REQ_DB_PAGE_SIZE_64KB 0x4UL + #define FUNC_CFG_REQ_DB_PAGE_SIZE_128KB 0x5UL + #define FUNC_CFG_REQ_DB_PAGE_SIZE_256KB 0x6UL + #define FUNC_CFG_REQ_DB_PAGE_SIZE_512KB 0x7UL + #define FUNC_CFG_REQ_DB_PAGE_SIZE_1MB 0x8UL + #define FUNC_CFG_REQ_DB_PAGE_SIZE_2MB 0x9UL + #define FUNC_CFG_REQ_DB_PAGE_SIZE_4MB 0xaUL + #define FUNC_CFG_REQ_DB_PAGE_SIZE_LAST FUNC_CFG_REQ_DB_PAGE_SIZE_4MB + u8 unused_1[2]; + __le32 num_ktls_tx_key_ctxs; + __le32 num_ktls_rx_key_ctxs; + __le32 num_quic_tx_key_ctxs; + __le32 num_quic_rx_key_ctxs; + __le32 roce_max_av_per_vf; + __le32 roce_max_cq_per_vf; + __le32 roce_max_mrw_per_vf; + __le32 roce_max_qp_per_vf; + __le32 roce_max_srq_per_vf; + __le32 roce_max_gid_per_vf; + __le16 xid_partition_cfg; + #define FUNC_CFG_REQ_XID_PARTITION_CFG_TX_CK 0x1UL + #define FUNC_CFG_REQ_XID_PARTITION_CFG_RX_CK 0x2UL + __le16 unused_2; +}; + +/* hwrm_func_cfg_output (size:128b/16B) */ +struct hwrm_func_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_func_cfg_cmd_err (size:64b/8B) */ +struct hwrm_func_cfg_cmd_err { + u8 code; + #define FUNC_CFG_CMD_ERR_CODE_UNKNOWN 0x0UL + #define FUNC_CFG_CMD_ERR_CODE_PARTITION_MIN_BW_RANGE 0x1UL + #define FUNC_CFG_CMD_ERR_CODE_PARTITION_MIN_MORE_THAN_MAX 0x2UL + #define FUNC_CFG_CMD_ERR_CODE_PARTITION_MIN_BW_UNSUPPORTED 0x3UL + #define FUNC_CFG_CMD_ERR_CODE_PARTITION_BW_PERCENT 0x4UL + #define FUNC_CFG_CMD_ERR_CODE_LAST FUNC_CFG_CMD_ERR_CODE_PARTITION_BW_PERCENT + u8 unused_0[7]; +}; + +/* hwrm_func_qstats_input (size:192b/24B) */ +struct hwrm_func_qstats_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 fid; + u8 flags; + #define FUNC_QSTATS_REQ_FLAGS_ROCE_ONLY 0x1UL + #define FUNC_QSTATS_REQ_FLAGS_COUNTER_MASK 0x2UL + #define FUNC_QSTATS_REQ_FLAGS_L2_ONLY 0x4UL + u8 unused_0[5]; +}; + +/* hwrm_func_qstats_output (size:1408b/176B) */ +struct hwrm_func_qstats_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le64 tx_ucast_pkts; + __le64 tx_mcast_pkts; + __le64 tx_bcast_pkts; + __le64 tx_discard_pkts; + __le64 tx_drop_pkts; + __le64 tx_ucast_bytes; + __le64 tx_mcast_bytes; + __le64 tx_bcast_bytes; + __le64 rx_ucast_pkts; + __le64 rx_mcast_pkts; + __le64 rx_bcast_pkts; + __le64 rx_discard_pkts; + __le64 rx_drop_pkts; + __le64 rx_ucast_bytes; + __le64 rx_mcast_bytes; + __le64 rx_bcast_bytes; + __le64 rx_agg_pkts; + __le64 rx_agg_bytes; + __le64 rx_agg_events; + __le64 rx_agg_aborts; + u8 clear_seq; + u8 unused_0[6]; + u8 valid; +}; + +/* hwrm_func_qstats_ext_input (size:256b/32B) */ +struct hwrm_func_qstats_ext_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 fid; + u8 flags; + #define FUNC_QSTATS_EXT_REQ_FLAGS_ROCE_ONLY 0x1UL + #define FUNC_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK 0x2UL + u8 unused_0[1]; + __le32 enables; + #define FUNC_QSTATS_EXT_REQ_ENABLES_SCHQ_ID 0x1UL + __le16 schq_id; + __le16 traffic_class; + u8 unused_1[4]; +}; + +/* hwrm_func_qstats_ext_output (size:1536b/192B) */ +struct hwrm_func_qstats_ext_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le64 rx_ucast_pkts; + __le64 rx_mcast_pkts; + __le64 rx_bcast_pkts; + __le64 rx_discard_pkts; + __le64 rx_error_pkts; + __le64 rx_ucast_bytes; + __le64 rx_mcast_bytes; + __le64 rx_bcast_bytes; + __le64 tx_ucast_pkts; + __le64 tx_mcast_pkts; + __le64 tx_bcast_pkts; + __le64 tx_error_pkts; + __le64 tx_discard_pkts; + __le64 tx_ucast_bytes; + __le64 tx_mcast_bytes; + __le64 tx_bcast_bytes; + __le64 rx_tpa_eligible_pkt; + __le64 rx_tpa_eligible_bytes; + __le64 rx_tpa_pkt; + __le64 rx_tpa_bytes; + __le64 rx_tpa_errors; + __le64 rx_tpa_events; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_func_clr_stats_input (size:192b/24B) */ +struct hwrm_func_clr_stats_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 fid; + u8 unused_0[6]; +}; + +/* hwrm_func_clr_stats_output (size:128b/16B) */ +struct hwrm_func_clr_stats_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_func_vf_resc_free_input (size:192b/24B) */ +struct hwrm_func_vf_resc_free_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 vf_id; + u8 unused_0[6]; +}; + +/* hwrm_func_vf_resc_free_output (size:128b/16B) */ +struct hwrm_func_vf_resc_free_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_func_drv_rgtr_input (size:896b/112B) */ +struct hwrm_func_drv_rgtr_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define FUNC_DRV_RGTR_REQ_FLAGS_FWD_ALL_MODE 0x1UL + #define FUNC_DRV_RGTR_REQ_FLAGS_FWD_NONE_MODE 0x2UL + #define FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE 0x4UL + #define FUNC_DRV_RGTR_REQ_FLAGS_FLOW_HANDLE_64BIT_MODE 0x8UL + #define FUNC_DRV_RGTR_REQ_FLAGS_HOT_RESET_SUPPORT 0x10UL + #define FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT 0x20UL + #define FUNC_DRV_RGTR_REQ_FLAGS_MASTER_SUPPORT 0x40UL + #define FUNC_DRV_RGTR_REQ_FLAGS_FAST_RESET_SUPPORT 0x80UL + #define FUNC_DRV_RGTR_REQ_FLAGS_RSS_STRICT_HASH_TYPE_SUPPORT 0x100UL + #define FUNC_DRV_RGTR_REQ_FLAGS_NPAR_1_2_SUPPORT 0x200UL + #define FUNC_DRV_RGTR_REQ_FLAGS_ASYM_QUEUE_CFG_SUPPORT 0x400UL + #define FUNC_DRV_RGTR_REQ_FLAGS_TF_INGRESS_NIC_FLOW_MODE 0x800UL + #define FUNC_DRV_RGTR_REQ_FLAGS_TF_EGRESS_NIC_FLOW_MODE 0x1000UL + __le32 enables; + #define FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE 0x1UL + #define FUNC_DRV_RGTR_REQ_ENABLES_VER 0x2UL + #define FUNC_DRV_RGTR_REQ_ENABLES_TIMESTAMP 0x4UL + #define FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD 0x8UL + #define FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD 0x10UL + __le16 os_type; + #define FUNC_DRV_RGTR_REQ_OS_TYPE_UNKNOWN 0x0UL + #define FUNC_DRV_RGTR_REQ_OS_TYPE_OTHER 0x1UL + #define FUNC_DRV_RGTR_REQ_OS_TYPE_MSDOS 0xeUL + #define FUNC_DRV_RGTR_REQ_OS_TYPE_WINDOWS 0x12UL + #define FUNC_DRV_RGTR_REQ_OS_TYPE_SOLARIS 0x1dUL + #define FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX 0x24UL + #define FUNC_DRV_RGTR_REQ_OS_TYPE_FREEBSD 0x2aUL + #define FUNC_DRV_RGTR_REQ_OS_TYPE_ESXI 0x68UL + #define FUNC_DRV_RGTR_REQ_OS_TYPE_WIN864 0x73UL + #define FUNC_DRV_RGTR_REQ_OS_TYPE_WIN2012R2 0x74UL + #define FUNC_DRV_RGTR_REQ_OS_TYPE_UEFI 0x8000UL + #define FUNC_DRV_RGTR_REQ_OS_TYPE_LAST FUNC_DRV_RGTR_REQ_OS_TYPE_UEFI + u8 ver_maj_8b; + u8 ver_min_8b; + u8 ver_upd_8b; + u8 unused_0[3]; + __le32 timestamp; + u8 unused_1[4]; + __le32 vf_req_fwd[8]; + __le32 async_event_fwd[8]; + __le16 ver_maj; + __le16 ver_min; + __le16 ver_upd; + __le16 ver_patch; +}; + +/* hwrm_func_drv_rgtr_output (size:128b/16B) */ +struct hwrm_func_drv_rgtr_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 flags; + #define FUNC_DRV_RGTR_RESP_FLAGS_IF_CHANGE_SUPPORTED 0x1UL + u8 unused_0[3]; + u8 valid; +}; + +/* hwrm_func_drv_unrgtr_input (size:192b/24B) */ +struct hwrm_func_drv_unrgtr_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define FUNC_DRV_UNRGTR_REQ_FLAGS_PREPARE_FOR_SHUTDOWN 0x1UL + u8 unused_0[4]; +}; + +/* hwrm_func_drv_unrgtr_output (size:128b/16B) */ +struct hwrm_func_drv_unrgtr_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_func_buf_rgtr_input (size:1024b/128B) */ +struct hwrm_func_buf_rgtr_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 enables; + #define FUNC_BUF_RGTR_REQ_ENABLES_VF_ID 0x1UL + #define FUNC_BUF_RGTR_REQ_ENABLES_ERR_BUF_ADDR 0x2UL + __le16 vf_id; + __le16 req_buf_num_pages; + __le16 req_buf_page_size; + #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_16B 0x4UL + #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_4K 0xcUL + #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_8K 0xdUL + #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_64K 0x10UL + #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_2M 0x15UL + #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_4M 0x16UL + #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_1G 0x1eUL + #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_LAST FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_1G + __le16 req_buf_len; + __le16 resp_buf_len; + u8 unused_0[2]; + __le64 req_buf_page_addr0; + __le64 req_buf_page_addr1; + __le64 req_buf_page_addr2; + __le64 req_buf_page_addr3; + __le64 req_buf_page_addr4; + __le64 req_buf_page_addr5; + __le64 req_buf_page_addr6; + __le64 req_buf_page_addr7; + __le64 req_buf_page_addr8; + __le64 req_buf_page_addr9; + __le64 error_buf_addr; + __le64 resp_buf_addr; +}; + +/* hwrm_func_buf_rgtr_output (size:128b/16B) */ +struct hwrm_func_buf_rgtr_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_func_buf_unrgtr_input (size:192b/24B) */ +struct hwrm_func_buf_unrgtr_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 enables; + #define FUNC_BUF_UNRGTR_REQ_ENABLES_VF_ID 0x1UL + __le16 vf_id; + u8 unused_0[2]; +}; + +/* hwrm_func_buf_unrgtr_output (size:128b/16B) */ +struct hwrm_func_buf_unrgtr_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_func_drv_qver_input (size:192b/24B) */ +struct hwrm_func_drv_qver_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 reserved; + __le16 fid; + u8 driver_type; + #define FUNC_DRV_QVER_REQ_DRIVER_TYPE_L2 0x0UL + #define FUNC_DRV_QVER_REQ_DRIVER_TYPE_ROCE 0x1UL + #define FUNC_DRV_QVER_REQ_DRIVER_TYPE_LAST FUNC_DRV_QVER_REQ_DRIVER_TYPE_ROCE + u8 unused_0; +}; + +/* hwrm_func_drv_qver_output (size:256b/32B) */ +struct hwrm_func_drv_qver_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 os_type; + #define FUNC_DRV_QVER_RESP_OS_TYPE_UNKNOWN 0x0UL + #define FUNC_DRV_QVER_RESP_OS_TYPE_OTHER 0x1UL + #define FUNC_DRV_QVER_RESP_OS_TYPE_MSDOS 0xeUL + #define FUNC_DRV_QVER_RESP_OS_TYPE_WINDOWS 0x12UL + #define FUNC_DRV_QVER_RESP_OS_TYPE_SOLARIS 0x1dUL + #define FUNC_DRV_QVER_RESP_OS_TYPE_LINUX 0x24UL + #define FUNC_DRV_QVER_RESP_OS_TYPE_FREEBSD 0x2aUL + #define FUNC_DRV_QVER_RESP_OS_TYPE_ESXI 0x68UL + #define FUNC_DRV_QVER_RESP_OS_TYPE_WIN864 0x73UL + #define FUNC_DRV_QVER_RESP_OS_TYPE_WIN2012R2 0x74UL + #define FUNC_DRV_QVER_RESP_OS_TYPE_UEFI 0x8000UL + #define FUNC_DRV_QVER_RESP_OS_TYPE_LAST FUNC_DRV_QVER_RESP_OS_TYPE_UEFI + u8 ver_maj_8b; + u8 ver_min_8b; + u8 ver_upd_8b; + u8 unused_0[3]; + __le16 ver_maj; + __le16 ver_min; + __le16 ver_upd; + __le16 ver_patch; + u8 unused_1[7]; + u8 valid; +}; + +/* hwrm_func_resource_qcaps_input (size:192b/24B) */ +struct hwrm_func_resource_qcaps_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 fid; + u8 unused_0[6]; +}; + +/* hwrm_func_resource_qcaps_output (size:704b/88B) */ +struct hwrm_func_resource_qcaps_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 max_vfs; + __le16 max_msix; + __le16 vf_reservation_strategy; + #define FUNC_RESOURCE_QCAPS_RESP_VF_RESERVATION_STRATEGY_MAXIMAL 0x0UL + #define FUNC_RESOURCE_QCAPS_RESP_VF_RESERVATION_STRATEGY_MINIMAL 0x1UL + #define FUNC_RESOURCE_QCAPS_RESP_VF_RESERVATION_STRATEGY_MINIMAL_STATIC 0x2UL + #define FUNC_RESOURCE_QCAPS_RESP_VF_RESERVATION_STRATEGY_LAST FUNC_RESOURCE_QCAPS_RESP_VF_RESERVATION_STRATEGY_MINIMAL_STATIC + __le16 min_rsscos_ctx; + __le16 max_rsscos_ctx; + __le16 min_cmpl_rings; + __le16 max_cmpl_rings; + __le16 min_tx_rings; + __le16 max_tx_rings; + __le16 min_rx_rings; + __le16 max_rx_rings; + __le16 min_l2_ctxs; + __le16 max_l2_ctxs; + __le16 min_vnics; + __le16 max_vnics; + __le16 min_stat_ctx; + __le16 max_stat_ctx; + __le16 min_hw_ring_grps; + __le16 max_hw_ring_grps; + __le16 max_tx_scheduler_inputs; + __le16 flags; + #define FUNC_RESOURCE_QCAPS_RESP_FLAGS_MIN_GUARANTEED 0x1UL + __le16 min_msix; + __le32 min_ktls_tx_key_ctxs; + __le32 max_ktls_tx_key_ctxs; + __le32 min_ktls_rx_key_ctxs; + __le32 max_ktls_rx_key_ctxs; + __le32 min_quic_tx_key_ctxs; + __le32 max_quic_tx_key_ctxs; + __le32 min_quic_rx_key_ctxs; + __le32 max_quic_rx_key_ctxs; + u8 unused_0[3]; + u8 valid; +}; + +/* hwrm_func_vf_resource_cfg_input (size:704b/88B) */ +struct hwrm_func_vf_resource_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 vf_id; + __le16 max_msix; + __le16 min_rsscos_ctx; + __le16 max_rsscos_ctx; + __le16 min_cmpl_rings; + __le16 max_cmpl_rings; + __le16 min_tx_rings; + __le16 max_tx_rings; + __le16 min_rx_rings; + __le16 max_rx_rings; + __le16 min_l2_ctxs; + __le16 max_l2_ctxs; + __le16 min_vnics; + __le16 max_vnics; + __le16 min_stat_ctx; + __le16 max_stat_ctx; + __le16 min_hw_ring_grps; + __le16 max_hw_ring_grps; + __le16 flags; + #define FUNC_VF_RESOURCE_CFG_REQ_FLAGS_MIN_GUARANTEED 0x1UL + __le16 min_msix; + __le32 min_ktls_tx_key_ctxs; + __le32 max_ktls_tx_key_ctxs; + __le32 min_ktls_rx_key_ctxs; + __le32 max_ktls_rx_key_ctxs; + __le32 min_quic_tx_key_ctxs; + __le32 max_quic_tx_key_ctxs; + __le32 min_quic_rx_key_ctxs; + __le32 max_quic_rx_key_ctxs; +}; + +/* hwrm_func_vf_resource_cfg_output (size:384b/48B) */ +struct hwrm_func_vf_resource_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 reserved_rsscos_ctx; + __le16 reserved_cmpl_rings; + __le16 reserved_tx_rings; + __le16 reserved_rx_rings; + __le16 reserved_l2_ctxs; + __le16 reserved_vnics; + __le16 reserved_stat_ctx; + __le16 reserved_hw_ring_grps; + __le32 reserved_ktls_tx_key_ctxs; + __le32 reserved_ktls_rx_key_ctxs; + __le32 reserved_quic_tx_key_ctxs; + __le32 reserved_quic_rx_key_ctxs; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_func_backing_store_qcaps_input (size:128b/16B) */ +struct hwrm_func_backing_store_qcaps_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; +}; + +/* hwrm_func_backing_store_qcaps_output (size:832b/104B) */ +struct hwrm_func_backing_store_qcaps_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 qp_max_entries; + __le16 qp_min_qp1_entries; + __le16 qp_max_l2_entries; + __le16 qp_entry_size; + __le16 srq_max_l2_entries; + __le32 srq_max_entries; + __le16 srq_entry_size; + __le16 cq_max_l2_entries; + __le32 cq_max_entries; + __le16 cq_entry_size; + __le16 vnic_max_vnic_entries; + __le16 vnic_max_ring_table_entries; + __le16 vnic_entry_size; + __le32 stat_max_entries; + __le16 stat_entry_size; + __le16 tqm_entry_size; + __le32 tqm_min_entries_per_ring; + __le32 tqm_max_entries_per_ring; + __le32 mrav_max_entries; + __le16 mrav_entry_size; + __le16 tim_entry_size; + __le32 tim_max_entries; + __le16 mrav_num_entries_units; + u8 tqm_entries_multiple; + u8 ctx_kind_initializer; + __le16 ctx_init_mask; + #define FUNC_BACKING_STORE_QCAPS_RESP_CTX_INIT_MASK_QP 0x1UL + #define FUNC_BACKING_STORE_QCAPS_RESP_CTX_INIT_MASK_SRQ 0x2UL + #define FUNC_BACKING_STORE_QCAPS_RESP_CTX_INIT_MASK_CQ 0x4UL + #define FUNC_BACKING_STORE_QCAPS_RESP_CTX_INIT_MASK_VNIC 0x8UL + #define FUNC_BACKING_STORE_QCAPS_RESP_CTX_INIT_MASK_STAT 0x10UL + #define FUNC_BACKING_STORE_QCAPS_RESP_CTX_INIT_MASK_MRAV 0x20UL + #define FUNC_BACKING_STORE_QCAPS_RESP_CTX_INIT_MASK_TKC 0x40UL + #define FUNC_BACKING_STORE_QCAPS_RESP_CTX_INIT_MASK_RKC 0x80UL + u8 qp_init_offset; + u8 srq_init_offset; + u8 cq_init_offset; + u8 vnic_init_offset; + u8 tqm_fp_rings_count; + u8 stat_init_offset; + u8 mrav_init_offset; + u8 tqm_fp_rings_count_ext; + u8 tkc_init_offset; + u8 rkc_init_offset; + __le16 tkc_entry_size; + __le16 rkc_entry_size; + __le32 tkc_max_entries; + __le32 rkc_max_entries; + __le16 fast_qpmd_qp_num_entries; + u8 rsvd1[5]; + u8 valid; +}; + +/* tqm_fp_ring_cfg (size:128b/16B) */ +struct tqm_fp_ring_cfg { + u8 tqm_ring_pg_size_tqm_ring_lvl; + #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_LVL_MASK 0xfUL + #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_LVL_SFT 0 + #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_LVL_LVL_0 0x0UL + #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_LVL_LVL_1 0x1UL + #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_LVL_LVL_2 0x2UL + #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_LVL_LAST TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_LVL_LVL_2 + #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_MASK 0xf0UL + #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_SFT 4 + #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_PG_4K (0x0UL << 4) + #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_PG_8K (0x1UL << 4) + #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_PG_64K (0x2UL << 4) + #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_PG_2M (0x3UL << 4) + #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_PG_8M (0x4UL << 4) + #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_PG_1G (0x5UL << 4) + #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_LAST TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_PG_1G + u8 unused[3]; + __le32 tqm_ring_num_entries; + __le64 tqm_ring_page_dir; +}; + +/* hwrm_func_backing_store_cfg_input (size:2688b/336B) */ +struct hwrm_func_backing_store_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define FUNC_BACKING_STORE_CFG_REQ_FLAGS_PREBOOT_MODE 0x1UL + #define FUNC_BACKING_STORE_CFG_REQ_FLAGS_MRAV_RESERVATION_SPLIT 0x2UL + __le32 enables; + #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP 0x1UL + #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ 0x2UL + #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ 0x4UL + #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC 0x8UL + #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT 0x10UL + #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP 0x20UL + #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING0 0x40UL + #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING1 0x80UL + #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING2 0x100UL + #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING3 0x200UL + #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING4 0x400UL + #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING5 0x800UL + #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING6 0x1000UL + #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING7 0x2000UL + #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV 0x4000UL + #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM 0x8000UL + #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING8 0x10000UL + #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING9 0x20000UL + #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING10 0x40000UL + #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TKC 0x80000UL + #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_RKC 0x100000UL + #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP_FAST_QPMD 0x200000UL + u8 qpc_pg_size_qpc_lvl; + #define FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_SFT 0 + #define FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_LVL_2 + #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_1G + u8 srq_pg_size_srq_lvl; + #define FUNC_BACKING_STORE_CFG_REQ_SRQ_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_CFG_REQ_SRQ_LVL_SFT 0 + #define FUNC_BACKING_STORE_CFG_REQ_SRQ_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_CFG_REQ_SRQ_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_CFG_REQ_SRQ_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_CFG_REQ_SRQ_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_SRQ_LVL_LVL_2 + #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_1G + u8 cq_pg_size_cq_lvl; + #define FUNC_BACKING_STORE_CFG_REQ_CQ_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_CFG_REQ_CQ_LVL_SFT 0 + #define FUNC_BACKING_STORE_CFG_REQ_CQ_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_CFG_REQ_CQ_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_CFG_REQ_CQ_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_CFG_REQ_CQ_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_CQ_LVL_LVL_2 + #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_CQ_PG_SIZE_PG_1G + u8 vnic_pg_size_vnic_lvl; + #define FUNC_BACKING_STORE_CFG_REQ_VNIC_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_CFG_REQ_VNIC_LVL_SFT 0 + #define FUNC_BACKING_STORE_CFG_REQ_VNIC_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_CFG_REQ_VNIC_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_CFG_REQ_VNIC_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_CFG_REQ_VNIC_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_VNIC_LVL_LVL_2 + #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_VNIC_PG_SIZE_PG_1G + u8 stat_pg_size_stat_lvl; + #define FUNC_BACKING_STORE_CFG_REQ_STAT_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_CFG_REQ_STAT_LVL_SFT 0 + #define FUNC_BACKING_STORE_CFG_REQ_STAT_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_CFG_REQ_STAT_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_CFG_REQ_STAT_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_CFG_REQ_STAT_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_STAT_LVL_LVL_2 + #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_STAT_PG_SIZE_PG_1G + u8 tqm_sp_pg_size_tqm_sp_lvl; + #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_LVL_SFT 0 + #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_SP_LVL_LVL_2 + #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_SP_PG_SIZE_PG_1G + u8 tqm_ring0_pg_size_tqm_ring0_lvl; + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_LVL_SFT 0 + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_LVL_LVL_2 + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING0_PG_SIZE_PG_1G + u8 tqm_ring1_pg_size_tqm_ring1_lvl; + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_LVL_SFT 0 + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_LVL_LVL_2 + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING1_PG_SIZE_PG_1G + u8 tqm_ring2_pg_size_tqm_ring2_lvl; + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_LVL_SFT 0 + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_LVL_LVL_2 + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING2_PG_SIZE_PG_1G + u8 tqm_ring3_pg_size_tqm_ring3_lvl; + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_LVL_SFT 0 + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_LVL_LVL_2 + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING3_PG_SIZE_PG_1G + u8 tqm_ring4_pg_size_tqm_ring4_lvl; + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_LVL_SFT 0 + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_LVL_LVL_2 + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING4_PG_SIZE_PG_1G + u8 tqm_ring5_pg_size_tqm_ring5_lvl; + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_LVL_SFT 0 + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_LVL_LVL_2 + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING5_PG_SIZE_PG_1G + u8 tqm_ring6_pg_size_tqm_ring6_lvl; + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_LVL_SFT 0 + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_LVL_LVL_2 + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING6_PG_SIZE_PG_1G + u8 tqm_ring7_pg_size_tqm_ring7_lvl; + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_LVL_SFT 0 + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_LVL_LVL_2 + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TQM_RING7_PG_SIZE_PG_1G + u8 mrav_pg_size_mrav_lvl; + #define FUNC_BACKING_STORE_CFG_REQ_MRAV_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_CFG_REQ_MRAV_LVL_SFT 0 + #define FUNC_BACKING_STORE_CFG_REQ_MRAV_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_CFG_REQ_MRAV_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_CFG_REQ_MRAV_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_CFG_REQ_MRAV_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_MRAV_LVL_LVL_2 + #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_MRAV_PG_SIZE_PG_1G + u8 tim_pg_size_tim_lvl; + #define FUNC_BACKING_STORE_CFG_REQ_TIM_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_CFG_REQ_TIM_LVL_SFT 0 + #define FUNC_BACKING_STORE_CFG_REQ_TIM_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_CFG_REQ_TIM_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_CFG_REQ_TIM_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_CFG_REQ_TIM_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TIM_LVL_LVL_2 + #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TIM_PG_SIZE_PG_1G + __le64 qpc_page_dir; + __le64 srq_page_dir; + __le64 cq_page_dir; + __le64 vnic_page_dir; + __le64 stat_page_dir; + __le64 tqm_sp_page_dir; + __le64 tqm_ring0_page_dir; + __le64 tqm_ring1_page_dir; + __le64 tqm_ring2_page_dir; + __le64 tqm_ring3_page_dir; + __le64 tqm_ring4_page_dir; + __le64 tqm_ring5_page_dir; + __le64 tqm_ring6_page_dir; + __le64 tqm_ring7_page_dir; + __le64 mrav_page_dir; + __le64 tim_page_dir; + __le32 qp_num_entries; + __le32 srq_num_entries; + __le32 cq_num_entries; + __le32 stat_num_entries; + __le32 tqm_sp_num_entries; + __le32 tqm_ring0_num_entries; + __le32 tqm_ring1_num_entries; + __le32 tqm_ring2_num_entries; + __le32 tqm_ring3_num_entries; + __le32 tqm_ring4_num_entries; + __le32 tqm_ring5_num_entries; + __le32 tqm_ring6_num_entries; + __le32 tqm_ring7_num_entries; + __le32 mrav_num_entries; + __le32 tim_num_entries; + __le16 qp_num_qp1_entries; + __le16 qp_num_l2_entries; + __le16 qp_entry_size; + __le16 srq_num_l2_entries; + __le16 srq_entry_size; + __le16 cq_num_l2_entries; + __le16 cq_entry_size; + __le16 vnic_num_vnic_entries; + __le16 vnic_num_ring_table_entries; + __le16 vnic_entry_size; + __le16 stat_entry_size; + __le16 tqm_entry_size; + __le16 mrav_entry_size; + __le16 tim_entry_size; + u8 tqm_ring8_pg_size_tqm_ring_lvl; + #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_LVL_SFT 0 + #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_LVL_LVL_2 + #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_PG_1G + u8 ring8_unused[3]; + __le32 tqm_ring8_num_entries; + __le64 tqm_ring8_page_dir; + u8 tqm_ring9_pg_size_tqm_ring_lvl; + #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_LVL_SFT 0 + #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_LVL_LVL_2 + #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_PG_1G + u8 ring9_unused[3]; + __le32 tqm_ring9_num_entries; + __le64 tqm_ring9_page_dir; + u8 tqm_ring10_pg_size_tqm_ring_lvl; + #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_LVL_SFT 0 + #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_LVL_LVL_2 + #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_PG_1G + u8 ring10_unused[3]; + __le32 tqm_ring10_num_entries; + __le64 tqm_ring10_page_dir; + __le32 tkc_num_entries; + __le32 rkc_num_entries; + __le64 tkc_page_dir; + __le64 rkc_page_dir; + __le16 tkc_entry_size; + __le16 rkc_entry_size; + u8 tkc_pg_size_tkc_lvl; + #define FUNC_BACKING_STORE_CFG_REQ_TKC_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_CFG_REQ_TKC_LVL_SFT 0 + #define FUNC_BACKING_STORE_CFG_REQ_TKC_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_CFG_REQ_TKC_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_CFG_REQ_TKC_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_CFG_REQ_TKC_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_TKC_LVL_LVL_2 + #define FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_TKC_PG_SIZE_PG_1G + u8 rkc_pg_size_rkc_lvl; + #define FUNC_BACKING_STORE_CFG_REQ_RKC_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_CFG_REQ_RKC_LVL_SFT 0 + #define FUNC_BACKING_STORE_CFG_REQ_RKC_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_CFG_REQ_RKC_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_CFG_REQ_RKC_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_CFG_REQ_RKC_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_RKC_LVL_LVL_2 + #define FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_PG_1G + __le16 qp_num_fast_qpmd_entries; +}; + +/* hwrm_func_backing_store_cfg_output (size:128b/16B) */ +struct hwrm_func_backing_store_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_func_backing_store_qcfg_input (size:128b/16B) */ +struct hwrm_func_backing_store_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; +}; + +/* hwrm_func_backing_store_qcfg_output (size:2496b/312B) */ +struct hwrm_func_backing_store_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 flags; + #define FUNC_BACKING_STORE_QCFG_RESP_FLAGS_PREBOOT_MODE 0x1UL + #define FUNC_BACKING_STORE_QCFG_RESP_FLAGS_MRAV_RESERVATION_SPLIT 0x2UL + __le32 enables; + #define FUNC_BACKING_STORE_QCFG_RESP_ENABLES_QP 0x1UL + #define FUNC_BACKING_STORE_QCFG_RESP_ENABLES_SRQ 0x2UL + #define FUNC_BACKING_STORE_QCFG_RESP_ENABLES_CQ 0x4UL + #define FUNC_BACKING_STORE_QCFG_RESP_ENABLES_VNIC 0x8UL + #define FUNC_BACKING_STORE_QCFG_RESP_ENABLES_STAT 0x10UL + #define FUNC_BACKING_STORE_QCFG_RESP_ENABLES_TQM_SP 0x20UL + #define FUNC_BACKING_STORE_QCFG_RESP_ENABLES_TQM_RING0 0x40UL + #define FUNC_BACKING_STORE_QCFG_RESP_ENABLES_TQM_RING1 0x80UL + #define FUNC_BACKING_STORE_QCFG_RESP_ENABLES_TQM_RING2 0x100UL + #define FUNC_BACKING_STORE_QCFG_RESP_ENABLES_TQM_RING3 0x200UL + #define FUNC_BACKING_STORE_QCFG_RESP_ENABLES_TQM_RING4 0x400UL + #define FUNC_BACKING_STORE_QCFG_RESP_ENABLES_TQM_RING5 0x800UL + #define FUNC_BACKING_STORE_QCFG_RESP_ENABLES_TQM_RING6 0x1000UL + #define FUNC_BACKING_STORE_QCFG_RESP_ENABLES_TQM_RING7 0x2000UL + #define FUNC_BACKING_STORE_QCFG_RESP_ENABLES_MRAV 0x4000UL + #define FUNC_BACKING_STORE_QCFG_RESP_ENABLES_TIM 0x8000UL + #define FUNC_BACKING_STORE_QCFG_RESP_ENABLES_TQM_RING8 0x10000UL + #define FUNC_BACKING_STORE_QCFG_RESP_ENABLES_TQM_RING9 0x20000UL + #define FUNC_BACKING_STORE_QCFG_RESP_ENABLES_TQM_RING10 0x40000UL + #define FUNC_BACKING_STORE_QCFG_RESP_ENABLES_TKC 0x80000UL + #define FUNC_BACKING_STORE_QCFG_RESP_ENABLES_RKC 0x100000UL + #define FUNC_BACKING_STORE_QCFG_RESP_ENABLES_QP_FAST_QPMD 0x200000UL + u8 qpc_pg_size_qpc_lvl; + #define FUNC_BACKING_STORE_QCFG_RESP_QPC_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_QCFG_RESP_QPC_LVL_SFT 0 + #define FUNC_BACKING_STORE_QCFG_RESP_QPC_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_QCFG_RESP_QPC_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_QCFG_RESP_QPC_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_QCFG_RESP_QPC_LVL_LAST FUNC_BACKING_STORE_QCFG_RESP_QPC_LVL_LVL_2 + #define FUNC_BACKING_STORE_QCFG_RESP_QPC_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_QCFG_RESP_QPC_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_QCFG_RESP_QPC_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_QPC_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_QPC_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_QPC_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_QPC_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_QPC_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_QPC_PG_SIZE_LAST FUNC_BACKING_STORE_QCFG_RESP_QPC_PG_SIZE_PG_1G + u8 srq_pg_size_srq_lvl; + #define FUNC_BACKING_STORE_QCFG_RESP_SRQ_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_QCFG_RESP_SRQ_LVL_SFT 0 + #define FUNC_BACKING_STORE_QCFG_RESP_SRQ_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_QCFG_RESP_SRQ_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_QCFG_RESP_SRQ_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_QCFG_RESP_SRQ_LVL_LAST FUNC_BACKING_STORE_QCFG_RESP_SRQ_LVL_LVL_2 + #define FUNC_BACKING_STORE_QCFG_RESP_SRQ_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_QCFG_RESP_SRQ_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_QCFG_RESP_SRQ_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_SRQ_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_SRQ_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_SRQ_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_SRQ_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_SRQ_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_SRQ_PG_SIZE_LAST FUNC_BACKING_STORE_QCFG_RESP_SRQ_PG_SIZE_PG_1G + u8 cq_pg_size_cq_lvl; + #define FUNC_BACKING_STORE_QCFG_RESP_CQ_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_QCFG_RESP_CQ_LVL_SFT 0 + #define FUNC_BACKING_STORE_QCFG_RESP_CQ_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_QCFG_RESP_CQ_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_QCFG_RESP_CQ_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_QCFG_RESP_CQ_LVL_LAST FUNC_BACKING_STORE_QCFG_RESP_CQ_LVL_LVL_2 + #define FUNC_BACKING_STORE_QCFG_RESP_CQ_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_QCFG_RESP_CQ_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_QCFG_RESP_CQ_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_CQ_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_CQ_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_CQ_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_CQ_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_CQ_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_CQ_PG_SIZE_LAST FUNC_BACKING_STORE_QCFG_RESP_CQ_PG_SIZE_PG_1G + u8 vnic_pg_size_vnic_lvl; + #define FUNC_BACKING_STORE_QCFG_RESP_VNIC_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_QCFG_RESP_VNIC_LVL_SFT 0 + #define FUNC_BACKING_STORE_QCFG_RESP_VNIC_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_QCFG_RESP_VNIC_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_QCFG_RESP_VNIC_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_QCFG_RESP_VNIC_LVL_LAST FUNC_BACKING_STORE_QCFG_RESP_VNIC_LVL_LVL_2 + #define FUNC_BACKING_STORE_QCFG_RESP_VNIC_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_QCFG_RESP_VNIC_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_QCFG_RESP_VNIC_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_VNIC_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_VNIC_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_VNIC_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_VNIC_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_VNIC_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_VNIC_PG_SIZE_LAST FUNC_BACKING_STORE_QCFG_RESP_VNIC_PG_SIZE_PG_1G + u8 stat_pg_size_stat_lvl; + #define FUNC_BACKING_STORE_QCFG_RESP_STAT_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_QCFG_RESP_STAT_LVL_SFT 0 + #define FUNC_BACKING_STORE_QCFG_RESP_STAT_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_QCFG_RESP_STAT_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_QCFG_RESP_STAT_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_QCFG_RESP_STAT_LVL_LAST FUNC_BACKING_STORE_QCFG_RESP_STAT_LVL_LVL_2 + #define FUNC_BACKING_STORE_QCFG_RESP_STAT_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_QCFG_RESP_STAT_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_QCFG_RESP_STAT_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_STAT_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_STAT_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_STAT_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_STAT_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_STAT_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_STAT_PG_SIZE_LAST FUNC_BACKING_STORE_QCFG_RESP_STAT_PG_SIZE_PG_1G + u8 tqm_sp_pg_size_tqm_sp_lvl; + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_SP_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_SP_LVL_SFT 0 + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_SP_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_SP_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_SP_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_SP_LVL_LAST FUNC_BACKING_STORE_QCFG_RESP_TQM_SP_LVL_LVL_2 + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_SP_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_SP_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_SP_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_SP_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_SP_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_SP_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_SP_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_SP_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_SP_PG_SIZE_LAST FUNC_BACKING_STORE_QCFG_RESP_TQM_SP_PG_SIZE_PG_1G + u8 tqm_ring0_pg_size_tqm_ring0_lvl; + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING0_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING0_LVL_SFT 0 + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING0_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING0_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING0_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING0_LVL_LAST FUNC_BACKING_STORE_QCFG_RESP_TQM_RING0_LVL_LVL_2 + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING0_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING0_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING0_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING0_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING0_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING0_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING0_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING0_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING0_PG_SIZE_LAST FUNC_BACKING_STORE_QCFG_RESP_TQM_RING0_PG_SIZE_PG_1G + u8 tqm_ring1_pg_size_tqm_ring1_lvl; + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING1_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING1_LVL_SFT 0 + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING1_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING1_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING1_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING1_LVL_LAST FUNC_BACKING_STORE_QCFG_RESP_TQM_RING1_LVL_LVL_2 + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING1_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING1_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING1_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING1_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING1_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING1_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING1_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING1_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING1_PG_SIZE_LAST FUNC_BACKING_STORE_QCFG_RESP_TQM_RING1_PG_SIZE_PG_1G + u8 tqm_ring2_pg_size_tqm_ring2_lvl; + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING2_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING2_LVL_SFT 0 + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING2_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING2_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING2_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING2_LVL_LAST FUNC_BACKING_STORE_QCFG_RESP_TQM_RING2_LVL_LVL_2 + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING2_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING2_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING2_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING2_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING2_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING2_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING2_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING2_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING2_PG_SIZE_LAST FUNC_BACKING_STORE_QCFG_RESP_TQM_RING2_PG_SIZE_PG_1G + u8 tqm_ring3_pg_size_tqm_ring3_lvl; + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING3_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING3_LVL_SFT 0 + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING3_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING3_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING3_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING3_LVL_LAST FUNC_BACKING_STORE_QCFG_RESP_TQM_RING3_LVL_LVL_2 + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING3_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING3_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING3_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING3_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING3_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING3_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING3_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING3_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING3_PG_SIZE_LAST FUNC_BACKING_STORE_QCFG_RESP_TQM_RING3_PG_SIZE_PG_1G + u8 tqm_ring4_pg_size_tqm_ring4_lvl; + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING4_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING4_LVL_SFT 0 + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING4_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING4_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING4_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING4_LVL_LAST FUNC_BACKING_STORE_QCFG_RESP_TQM_RING4_LVL_LVL_2 + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING4_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING4_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING4_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING4_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING4_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING4_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING4_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING4_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING4_PG_SIZE_LAST FUNC_BACKING_STORE_QCFG_RESP_TQM_RING4_PG_SIZE_PG_1G + u8 tqm_ring5_pg_size_tqm_ring5_lvl; + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING5_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING5_LVL_SFT 0 + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING5_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING5_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING5_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING5_LVL_LAST FUNC_BACKING_STORE_QCFG_RESP_TQM_RING5_LVL_LVL_2 + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING5_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING5_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING5_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING5_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING5_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING5_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING5_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING5_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING5_PG_SIZE_LAST FUNC_BACKING_STORE_QCFG_RESP_TQM_RING5_PG_SIZE_PG_1G + u8 tqm_ring6_pg_size_tqm_ring6_lvl; + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING6_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING6_LVL_SFT 0 + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING6_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING6_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING6_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING6_LVL_LAST FUNC_BACKING_STORE_QCFG_RESP_TQM_RING6_LVL_LVL_2 + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING6_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING6_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING6_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING6_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING6_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING6_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING6_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING6_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING6_PG_SIZE_LAST FUNC_BACKING_STORE_QCFG_RESP_TQM_RING6_PG_SIZE_PG_1G + u8 tqm_ring7_pg_size_tqm_ring7_lvl; + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING7_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING7_LVL_SFT 0 + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING7_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING7_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING7_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING7_LVL_LAST FUNC_BACKING_STORE_QCFG_RESP_TQM_RING7_LVL_LVL_2 + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING7_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING7_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING7_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING7_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING7_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING7_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING7_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING7_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TQM_RING7_PG_SIZE_LAST FUNC_BACKING_STORE_QCFG_RESP_TQM_RING7_PG_SIZE_PG_1G + u8 mrav_pg_size_mrav_lvl; + #define FUNC_BACKING_STORE_QCFG_RESP_MRAV_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_QCFG_RESP_MRAV_LVL_SFT 0 + #define FUNC_BACKING_STORE_QCFG_RESP_MRAV_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_QCFG_RESP_MRAV_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_QCFG_RESP_MRAV_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_QCFG_RESP_MRAV_LVL_LAST FUNC_BACKING_STORE_QCFG_RESP_MRAV_LVL_LVL_2 + #define FUNC_BACKING_STORE_QCFG_RESP_MRAV_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_QCFG_RESP_MRAV_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_QCFG_RESP_MRAV_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_MRAV_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_MRAV_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_MRAV_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_MRAV_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_MRAV_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_MRAV_PG_SIZE_LAST FUNC_BACKING_STORE_QCFG_RESP_MRAV_PG_SIZE_PG_1G + u8 tim_pg_size_tim_lvl; + #define FUNC_BACKING_STORE_QCFG_RESP_TIM_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_QCFG_RESP_TIM_LVL_SFT 0 + #define FUNC_BACKING_STORE_QCFG_RESP_TIM_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_QCFG_RESP_TIM_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_QCFG_RESP_TIM_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_QCFG_RESP_TIM_LVL_LAST FUNC_BACKING_STORE_QCFG_RESP_TIM_LVL_LVL_2 + #define FUNC_BACKING_STORE_QCFG_RESP_TIM_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_QCFG_RESP_TIM_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_QCFG_RESP_TIM_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TIM_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TIM_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TIM_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TIM_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TIM_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TIM_PG_SIZE_LAST FUNC_BACKING_STORE_QCFG_RESP_TIM_PG_SIZE_PG_1G + __le64 qpc_page_dir; + __le64 srq_page_dir; + __le64 cq_page_dir; + __le64 vnic_page_dir; + __le64 stat_page_dir; + __le64 tqm_sp_page_dir; + __le64 tqm_ring0_page_dir; + __le64 tqm_ring1_page_dir; + __le64 tqm_ring2_page_dir; + __le64 tqm_ring3_page_dir; + __le64 tqm_ring4_page_dir; + __le64 tqm_ring5_page_dir; + __le64 tqm_ring6_page_dir; + __le64 tqm_ring7_page_dir; + __le64 mrav_page_dir; + __le64 tim_page_dir; + __le16 qp_num_qp1_entries; + __le16 qp_num_l2_entries; + __le32 qp_num_entries; + __le32 srq_num_entries; + __le16 srq_num_l2_entries; + __le16 cq_num_l2_entries; + __le32 cq_num_entries; + __le16 vnic_num_vnic_entries; + __le16 vnic_num_ring_table_entries; + __le32 stat_num_entries; + __le32 tqm_sp_num_entries; + __le32 tqm_ring0_num_entries; + __le32 tqm_ring1_num_entries; + __le32 tqm_ring2_num_entries; + __le32 tqm_ring3_num_entries; + __le32 tqm_ring4_num_entries; + __le32 tqm_ring5_num_entries; + __le32 tqm_ring6_num_entries; + __le32 tqm_ring7_num_entries; + __le32 mrav_num_entries; + __le32 tim_num_entries; + u8 tqm_ring8_pg_size_tqm_ring_lvl; + #define FUNC_BACKING_STORE_QCFG_RESP_RING8_TQM_RING_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_QCFG_RESP_RING8_TQM_RING_LVL_SFT 0 + #define FUNC_BACKING_STORE_QCFG_RESP_RING8_TQM_RING_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_QCFG_RESP_RING8_TQM_RING_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_QCFG_RESP_RING8_TQM_RING_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_QCFG_RESP_RING8_TQM_RING_LVL_LAST FUNC_BACKING_STORE_QCFG_RESP_RING8_TQM_RING_LVL_LVL_2 + #define FUNC_BACKING_STORE_QCFG_RESP_RING8_TQM_RING_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_QCFG_RESP_RING8_TQM_RING_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_QCFG_RESP_RING8_TQM_RING_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_RING8_TQM_RING_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_RING8_TQM_RING_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_RING8_TQM_RING_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_RING8_TQM_RING_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_RING8_TQM_RING_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_RING8_TQM_RING_PG_SIZE_LAST FUNC_BACKING_STORE_QCFG_RESP_RING8_TQM_RING_PG_SIZE_PG_1G + u8 ring8_unused[3]; + __le32 tqm_ring8_num_entries; + __le64 tqm_ring8_page_dir; + u8 tqm_ring9_pg_size_tqm_ring_lvl; + #define FUNC_BACKING_STORE_QCFG_RESP_RING9_TQM_RING_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_QCFG_RESP_RING9_TQM_RING_LVL_SFT 0 + #define FUNC_BACKING_STORE_QCFG_RESP_RING9_TQM_RING_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_QCFG_RESP_RING9_TQM_RING_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_QCFG_RESP_RING9_TQM_RING_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_QCFG_RESP_RING9_TQM_RING_LVL_LAST FUNC_BACKING_STORE_QCFG_RESP_RING9_TQM_RING_LVL_LVL_2 + #define FUNC_BACKING_STORE_QCFG_RESP_RING9_TQM_RING_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_QCFG_RESP_RING9_TQM_RING_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_QCFG_RESP_RING9_TQM_RING_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_RING9_TQM_RING_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_RING9_TQM_RING_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_RING9_TQM_RING_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_RING9_TQM_RING_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_RING9_TQM_RING_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_RING9_TQM_RING_PG_SIZE_LAST FUNC_BACKING_STORE_QCFG_RESP_RING9_TQM_RING_PG_SIZE_PG_1G + u8 ring9_unused[3]; + __le32 tqm_ring9_num_entries; + __le64 tqm_ring9_page_dir; + u8 tqm_ring10_pg_size_tqm_ring_lvl; + #define FUNC_BACKING_STORE_QCFG_RESP_RING10_TQM_RING_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_QCFG_RESP_RING10_TQM_RING_LVL_SFT 0 + #define FUNC_BACKING_STORE_QCFG_RESP_RING10_TQM_RING_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_QCFG_RESP_RING10_TQM_RING_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_QCFG_RESP_RING10_TQM_RING_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_QCFG_RESP_RING10_TQM_RING_LVL_LAST FUNC_BACKING_STORE_QCFG_RESP_RING10_TQM_RING_LVL_LVL_2 + #define FUNC_BACKING_STORE_QCFG_RESP_RING10_TQM_RING_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_QCFG_RESP_RING10_TQM_RING_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_QCFG_RESP_RING10_TQM_RING_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_RING10_TQM_RING_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_RING10_TQM_RING_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_RING10_TQM_RING_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_RING10_TQM_RING_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_RING10_TQM_RING_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_RING10_TQM_RING_PG_SIZE_LAST FUNC_BACKING_STORE_QCFG_RESP_RING10_TQM_RING_PG_SIZE_PG_1G + u8 ring10_unused[3]; + __le32 tqm_ring10_num_entries; + __le64 tqm_ring10_page_dir; + __le32 tkc_num_entries; + __le32 rkc_num_entries; + __le64 tkc_page_dir; + __le64 rkc_page_dir; + u8 tkc_pg_size_tkc_lvl; + #define FUNC_BACKING_STORE_QCFG_RESP_TKC_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_QCFG_RESP_TKC_LVL_SFT 0 + #define FUNC_BACKING_STORE_QCFG_RESP_TKC_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_QCFG_RESP_TKC_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_QCFG_RESP_TKC_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_QCFG_RESP_TKC_LVL_LAST FUNC_BACKING_STORE_QCFG_RESP_TKC_LVL_LVL_2 + #define FUNC_BACKING_STORE_QCFG_RESP_TKC_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_QCFG_RESP_TKC_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_QCFG_RESP_TKC_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TKC_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TKC_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TKC_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TKC_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TKC_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_TKC_PG_SIZE_LAST FUNC_BACKING_STORE_QCFG_RESP_TKC_PG_SIZE_PG_1G + u8 rkc_pg_size_rkc_lvl; + #define FUNC_BACKING_STORE_QCFG_RESP_RKC_LVL_MASK 0xfUL + #define FUNC_BACKING_STORE_QCFG_RESP_RKC_LVL_SFT 0 + #define FUNC_BACKING_STORE_QCFG_RESP_RKC_LVL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_QCFG_RESP_RKC_LVL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_QCFG_RESP_RKC_LVL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_QCFG_RESP_RKC_LVL_LAST FUNC_BACKING_STORE_QCFG_RESP_RKC_LVL_LVL_2 + #define FUNC_BACKING_STORE_QCFG_RESP_RKC_PG_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_QCFG_RESP_RKC_PG_SIZE_SFT 4 + #define FUNC_BACKING_STORE_QCFG_RESP_RKC_PG_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_RKC_PG_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_RKC_PG_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_RKC_PG_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_RKC_PG_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_RKC_PG_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_QCFG_RESP_RKC_PG_SIZE_LAST FUNC_BACKING_STORE_QCFG_RESP_RKC_PG_SIZE_PG_1G + __le16 qp_num_fast_qpmd_entries; + u8 unused_1[3]; + u8 valid; +}; + +/* hwrm_error_recovery_qcfg_input (size:192b/24B) */ +struct hwrm_error_recovery_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 unused_0[8]; +}; + +/* hwrm_error_recovery_qcfg_output (size:1664b/208B) */ +struct hwrm_error_recovery_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 flags; + #define ERROR_RECOVERY_QCFG_RESP_FLAGS_HOST 0x1UL + #define ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU 0x2UL + __le32 driver_polling_freq; + __le32 master_func_wait_period; + __le32 normal_func_wait_period; + __le32 master_func_wait_period_after_reset; + __le32 max_bailout_time_after_reset; + __le32 fw_health_status_reg; + #define ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_SPACE_MASK 0x3UL + #define ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_SPACE_SFT 0 + #define ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_SPACE_PCIE_CFG 0x0UL + #define ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_SPACE_GRC 0x1UL + #define ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_SPACE_BAR0 0x2UL + #define ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_SPACE_BAR1 0x3UL + #define ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_SPACE_LAST ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_SPACE_BAR1 + #define ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_MASK 0xfffffffcUL + #define ERROR_RECOVERY_QCFG_RESP_FW_HEALTH_STATUS_REG_ADDR_SFT 2 + __le32 fw_heartbeat_reg; + #define ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_SPACE_MASK 0x3UL + #define ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_SPACE_SFT 0 + #define ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_SPACE_PCIE_CFG 0x0UL + #define ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_SPACE_GRC 0x1UL + #define ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_SPACE_BAR0 0x2UL + #define ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_SPACE_BAR1 0x3UL + #define ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_SPACE_LAST ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_SPACE_BAR1 + #define ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_MASK 0xfffffffcUL + #define ERROR_RECOVERY_QCFG_RESP_FW_HEARTBEAT_REG_ADDR_SFT 2 + __le32 fw_reset_cnt_reg; + #define ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_SPACE_MASK 0x3UL + #define ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_SPACE_SFT 0 + #define ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_SPACE_PCIE_CFG 0x0UL + #define ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_SPACE_GRC 0x1UL + #define ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_SPACE_BAR0 0x2UL + #define ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_SPACE_BAR1 0x3UL + #define ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_SPACE_LAST ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_SPACE_BAR1 + #define ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_MASK 0xfffffffcUL + #define ERROR_RECOVERY_QCFG_RESP_FW_RESET_CNT_REG_ADDR_SFT 2 + __le32 reset_inprogress_reg; + #define ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_SPACE_MASK 0x3UL + #define ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_SPACE_SFT 0 + #define ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_SPACE_PCIE_CFG 0x0UL + #define ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_SPACE_GRC 0x1UL + #define ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_SPACE_BAR0 0x2UL + #define ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_SPACE_BAR1 0x3UL + #define ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_SPACE_LAST ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_SPACE_BAR1 + #define ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_MASK 0xfffffffcUL + #define ERROR_RECOVERY_QCFG_RESP_RESET_INPROGRESS_REG_ADDR_SFT 2 + __le32 reset_inprogress_reg_mask; + u8 unused_0[3]; + u8 reg_array_cnt; + __le32 reset_reg[16]; + #define ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_SPACE_MASK 0x3UL + #define ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_SPACE_SFT 0 + #define ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_SPACE_PCIE_CFG 0x0UL + #define ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_SPACE_GRC 0x1UL + #define ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_SPACE_BAR0 0x2UL + #define ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_SPACE_BAR1 0x3UL + #define ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_SPACE_LAST ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_SPACE_BAR1 + #define ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_MASK 0xfffffffcUL + #define ERROR_RECOVERY_QCFG_RESP_RESET_REG_ADDR_SFT 2 + __le32 reset_reg_val[16]; + u8 delay_after_reset[16]; + __le32 err_recovery_cnt_reg; + #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_MASK 0x3UL + #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_SFT 0 + #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_PCIE_CFG 0x0UL + #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_GRC 0x1UL + #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_BAR0 0x2UL + #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_BAR1 0x3UL + #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_LAST ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SPACE_BAR1 + #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_MASK 0xfffffffcUL + #define ERROR_RECOVERY_QCFG_RESP_ERR_RECOVERY_CNT_REG_ADDR_SFT 2 + u8 unused_1[3]; + u8 valid; +}; + +/* hwrm_func_echo_response_input (size:192b/24B) */ +struct hwrm_func_echo_response_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 event_data1; + __le32 event_data2; +}; + +/* hwrm_func_echo_response_output (size:128b/16B) */ +struct hwrm_func_echo_response_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_func_ptp_pin_qcfg_input (size:192b/24B) */ +struct hwrm_func_ptp_pin_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 unused_0[8]; +}; + +/* hwrm_func_ptp_pin_qcfg_output (size:128b/16B) */ +struct hwrm_func_ptp_pin_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 num_pins; + u8 state; + #define FUNC_PTP_PIN_QCFG_RESP_STATE_PIN0_ENABLED 0x1UL + #define FUNC_PTP_PIN_QCFG_RESP_STATE_PIN1_ENABLED 0x2UL + #define FUNC_PTP_PIN_QCFG_RESP_STATE_PIN2_ENABLED 0x4UL + #define FUNC_PTP_PIN_QCFG_RESP_STATE_PIN3_ENABLED 0x8UL + u8 pin0_usage; + #define FUNC_PTP_PIN_QCFG_RESP_PIN0_USAGE_NONE 0x0UL + #define FUNC_PTP_PIN_QCFG_RESP_PIN0_USAGE_PPS_IN 0x1UL + #define FUNC_PTP_PIN_QCFG_RESP_PIN0_USAGE_PPS_OUT 0x2UL + #define FUNC_PTP_PIN_QCFG_RESP_PIN0_USAGE_SYNC_IN 0x3UL + #define FUNC_PTP_PIN_QCFG_RESP_PIN0_USAGE_SYNC_OUT 0x4UL + #define FUNC_PTP_PIN_QCFG_RESP_PIN0_USAGE_LAST FUNC_PTP_PIN_QCFG_RESP_PIN0_USAGE_SYNC_OUT + u8 pin1_usage; + #define FUNC_PTP_PIN_QCFG_RESP_PIN1_USAGE_NONE 0x0UL + #define FUNC_PTP_PIN_QCFG_RESP_PIN1_USAGE_PPS_IN 0x1UL + #define FUNC_PTP_PIN_QCFG_RESP_PIN1_USAGE_PPS_OUT 0x2UL + #define FUNC_PTP_PIN_QCFG_RESP_PIN1_USAGE_SYNC_IN 0x3UL + #define FUNC_PTP_PIN_QCFG_RESP_PIN1_USAGE_SYNC_OUT 0x4UL + #define FUNC_PTP_PIN_QCFG_RESP_PIN1_USAGE_LAST FUNC_PTP_PIN_QCFG_RESP_PIN1_USAGE_SYNC_OUT + u8 pin2_usage; + #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_NONE 0x0UL + #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_PPS_IN 0x1UL + #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_PPS_OUT 0x2UL + #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_SYNC_IN 0x3UL + #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_SYNC_OUT 0x4UL + #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_SYNCE_PRIMARY_CLOCK_OUT 0x5UL + #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_SYNCE_SECONDARY_CLOCK_OUT 0x6UL + #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_LAST FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_SYNCE_SECONDARY_CLOCK_OUT + u8 pin3_usage; + #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_NONE 0x0UL + #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_PPS_IN 0x1UL + #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_PPS_OUT 0x2UL + #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_SYNC_IN 0x3UL + #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_SYNC_OUT 0x4UL + #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_SYNCE_PRIMARY_CLOCK_OUT 0x5UL + #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_SYNCE_SECONDARY_CLOCK_OUT 0x6UL + #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_LAST FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_SYNCE_SECONDARY_CLOCK_OUT + u8 unused_0; + u8 valid; +}; + +/* hwrm_func_ptp_pin_cfg_input (size:256b/32B) */ +struct hwrm_func_ptp_pin_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 enables; + #define FUNC_PTP_PIN_CFG_REQ_ENABLES_PIN0_STATE 0x1UL + #define FUNC_PTP_PIN_CFG_REQ_ENABLES_PIN0_USAGE 0x2UL + #define FUNC_PTP_PIN_CFG_REQ_ENABLES_PIN1_STATE 0x4UL + #define FUNC_PTP_PIN_CFG_REQ_ENABLES_PIN1_USAGE 0x8UL + #define FUNC_PTP_PIN_CFG_REQ_ENABLES_PIN2_STATE 0x10UL + #define FUNC_PTP_PIN_CFG_REQ_ENABLES_PIN2_USAGE 0x20UL + #define FUNC_PTP_PIN_CFG_REQ_ENABLES_PIN3_STATE 0x40UL + #define FUNC_PTP_PIN_CFG_REQ_ENABLES_PIN3_USAGE 0x80UL + u8 pin0_state; + #define FUNC_PTP_PIN_CFG_REQ_PIN0_STATE_DISABLED 0x0UL + #define FUNC_PTP_PIN_CFG_REQ_PIN0_STATE_ENABLED 0x1UL + #define FUNC_PTP_PIN_CFG_REQ_PIN0_STATE_LAST FUNC_PTP_PIN_CFG_REQ_PIN0_STATE_ENABLED + u8 pin0_usage; + #define FUNC_PTP_PIN_CFG_REQ_PIN0_USAGE_NONE 0x0UL + #define FUNC_PTP_PIN_CFG_REQ_PIN0_USAGE_PPS_IN 0x1UL + #define FUNC_PTP_PIN_CFG_REQ_PIN0_USAGE_PPS_OUT 0x2UL + #define FUNC_PTP_PIN_CFG_REQ_PIN0_USAGE_SYNC_IN 0x3UL + #define FUNC_PTP_PIN_CFG_REQ_PIN0_USAGE_SYNC_OUT 0x4UL + #define FUNC_PTP_PIN_CFG_REQ_PIN0_USAGE_LAST FUNC_PTP_PIN_CFG_REQ_PIN0_USAGE_SYNC_OUT + u8 pin1_state; + #define FUNC_PTP_PIN_CFG_REQ_PIN1_STATE_DISABLED 0x0UL + #define FUNC_PTP_PIN_CFG_REQ_PIN1_STATE_ENABLED 0x1UL + #define FUNC_PTP_PIN_CFG_REQ_PIN1_STATE_LAST FUNC_PTP_PIN_CFG_REQ_PIN1_STATE_ENABLED + u8 pin1_usage; + #define FUNC_PTP_PIN_CFG_REQ_PIN1_USAGE_NONE 0x0UL + #define FUNC_PTP_PIN_CFG_REQ_PIN1_USAGE_PPS_IN 0x1UL + #define FUNC_PTP_PIN_CFG_REQ_PIN1_USAGE_PPS_OUT 0x2UL + #define FUNC_PTP_PIN_CFG_REQ_PIN1_USAGE_SYNC_IN 0x3UL + #define FUNC_PTP_PIN_CFG_REQ_PIN1_USAGE_SYNC_OUT 0x4UL + #define FUNC_PTP_PIN_CFG_REQ_PIN1_USAGE_LAST FUNC_PTP_PIN_CFG_REQ_PIN1_USAGE_SYNC_OUT + u8 pin2_state; + #define FUNC_PTP_PIN_CFG_REQ_PIN2_STATE_DISABLED 0x0UL + #define FUNC_PTP_PIN_CFG_REQ_PIN2_STATE_ENABLED 0x1UL + #define FUNC_PTP_PIN_CFG_REQ_PIN2_STATE_LAST FUNC_PTP_PIN_CFG_REQ_PIN2_STATE_ENABLED + u8 pin2_usage; + #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_NONE 0x0UL + #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_PPS_IN 0x1UL + #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_PPS_OUT 0x2UL + #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_SYNC_IN 0x3UL + #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_SYNC_OUT 0x4UL + #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_SYNCE_PRIMARY_CLOCK_OUT 0x5UL + #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_SYNCE_SECONDARY_CLOCK_OUT 0x6UL + #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_LAST FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_SYNCE_SECONDARY_CLOCK_OUT + u8 pin3_state; + #define FUNC_PTP_PIN_CFG_REQ_PIN3_STATE_DISABLED 0x0UL + #define FUNC_PTP_PIN_CFG_REQ_PIN3_STATE_ENABLED 0x1UL + #define FUNC_PTP_PIN_CFG_REQ_PIN3_STATE_LAST FUNC_PTP_PIN_CFG_REQ_PIN3_STATE_ENABLED + u8 pin3_usage; + #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_NONE 0x0UL + #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_PPS_IN 0x1UL + #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_PPS_OUT 0x2UL + #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_SYNC_IN 0x3UL + #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_SYNC_OUT 0x4UL + #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_SYNCE_PRIMARY_CLOCK_OUT 0x5UL + #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_SYNCE_SECONDARY_CLOCK_OUT 0x6UL + #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_LAST FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_SYNCE_SECONDARY_CLOCK_OUT + u8 unused_0[4]; +}; + +/* hwrm_func_ptp_pin_cfg_output (size:128b/16B) */ +struct hwrm_func_ptp_pin_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_func_ptp_cfg_input (size:384b/48B) */ +struct hwrm_func_ptp_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 enables; + #define FUNC_PTP_CFG_REQ_ENABLES_PTP_PPS_EVENT 0x1UL + #define FUNC_PTP_CFG_REQ_ENABLES_PTP_FREQ_ADJ_DLL_SOURCE 0x2UL + #define FUNC_PTP_CFG_REQ_ENABLES_PTP_FREQ_ADJ_DLL_PHASE 0x4UL + #define FUNC_PTP_CFG_REQ_ENABLES_PTP_FREQ_ADJ_EXT_PERIOD 0x8UL + #define FUNC_PTP_CFG_REQ_ENABLES_PTP_FREQ_ADJ_EXT_UP 0x10UL + #define FUNC_PTP_CFG_REQ_ENABLES_PTP_FREQ_ADJ_EXT_PHASE 0x20UL + #define FUNC_PTP_CFG_REQ_ENABLES_PTP_SET_TIME 0x40UL + u8 ptp_pps_event; + #define FUNC_PTP_CFG_REQ_PTP_PPS_EVENT_INTERNAL 0x1UL + #define FUNC_PTP_CFG_REQ_PTP_PPS_EVENT_EXTERNAL 0x2UL + u8 ptp_freq_adj_dll_source; + #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_NONE 0x0UL + #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_TSIO_0 0x1UL + #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_TSIO_1 0x2UL + #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_TSIO_2 0x3UL + #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_TSIO_3 0x4UL + #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_PORT_0 0x5UL + #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_PORT_1 0x6UL + #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_PORT_2 0x7UL + #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_PORT_3 0x8UL + #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_INVALID 0xffUL + #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_LAST FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_SOURCE_INVALID + u8 ptp_freq_adj_dll_phase; + #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_PHASE_NONE 0x0UL + #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_PHASE_4K 0x1UL + #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_PHASE_8K 0x2UL + #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_PHASE_10M 0x3UL + #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_PHASE_25M 0x4UL + #define FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_PHASE_LAST FUNC_PTP_CFG_REQ_PTP_FREQ_ADJ_DLL_PHASE_25M + u8 unused_0[3]; + __le32 ptp_freq_adj_ext_period; + __le32 ptp_freq_adj_ext_up; + __le32 ptp_freq_adj_ext_phase_lower; + __le32 ptp_freq_adj_ext_phase_upper; + __le64 ptp_set_time; +}; + +/* hwrm_func_ptp_cfg_output (size:128b/16B) */ +struct hwrm_func_ptp_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_func_ptp_ts_query_input (size:192b/24B) */ +struct hwrm_func_ptp_ts_query_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define FUNC_PTP_TS_QUERY_REQ_FLAGS_PPS_TIME 0x1UL + #define FUNC_PTP_TS_QUERY_REQ_FLAGS_PTM_TIME 0x2UL + u8 unused_0[4]; +}; + +/* hwrm_func_ptp_ts_query_output (size:320b/40B) */ +struct hwrm_func_ptp_ts_query_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le64 pps_event_ts; + __le64 ptm_local_ts; + __le64 ptm_system_ts; + __le32 ptm_link_delay; + u8 unused_0[3]; + u8 valid; +}; + +/* hwrm_func_ptp_ext_cfg_input (size:256b/32B) */ +struct hwrm_func_ptp_ext_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 enables; + #define FUNC_PTP_EXT_CFG_REQ_ENABLES_PHC_MASTER_FID 0x1UL + #define FUNC_PTP_EXT_CFG_REQ_ENABLES_PHC_SEC_FID 0x2UL + #define FUNC_PTP_EXT_CFG_REQ_ENABLES_PHC_SEC_MODE 0x4UL + #define FUNC_PTP_EXT_CFG_REQ_ENABLES_FAILOVER_TIMER 0x8UL + __le16 phc_master_fid; + __le16 phc_sec_fid; + u8 phc_sec_mode; + #define FUNC_PTP_EXT_CFG_REQ_PHC_SEC_MODE_SWITCH 0x0UL + #define FUNC_PTP_EXT_CFG_REQ_PHC_SEC_MODE_ALL 0x1UL + #define FUNC_PTP_EXT_CFG_REQ_PHC_SEC_MODE_PF_ONLY 0x2UL + #define FUNC_PTP_EXT_CFG_REQ_PHC_SEC_MODE_LAST FUNC_PTP_EXT_CFG_REQ_PHC_SEC_MODE_PF_ONLY + u8 unused_0; + __le32 failover_timer; + u8 unused_1[4]; +}; + +/* hwrm_func_ptp_ext_cfg_output (size:128b/16B) */ +struct hwrm_func_ptp_ext_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_func_ptp_ext_qcfg_input (size:192b/24B) */ +struct hwrm_func_ptp_ext_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 unused_0[8]; +}; + +/* hwrm_func_ptp_ext_qcfg_output (size:256b/32B) */ +struct hwrm_func_ptp_ext_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 phc_master_fid; + __le16 phc_sec_fid; + __le16 phc_active_fid0; + __le16 phc_active_fid1; + __le32 last_failover_event; + __le16 from_fid; + __le16 to_fid; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_func_key_ctx_alloc_input (size:384b/48B) */ +struct hwrm_func_key_ctx_alloc_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 fid; + __le16 num_key_ctxs; + __le32 dma_bufr_size_bytes; + u8 key_ctx_type; + #define FUNC_KEY_CTX_ALLOC_REQ_KEY_CTX_TYPE_TX 0x0UL + #define FUNC_KEY_CTX_ALLOC_REQ_KEY_CTX_TYPE_RX 0x1UL + #define FUNC_KEY_CTX_ALLOC_REQ_KEY_CTX_TYPE_QUIC_TX 0x2UL + #define FUNC_KEY_CTX_ALLOC_REQ_KEY_CTX_TYPE_QUIC_RX 0x3UL + #define FUNC_KEY_CTX_ALLOC_REQ_KEY_CTX_TYPE_LAST FUNC_KEY_CTX_ALLOC_REQ_KEY_CTX_TYPE_QUIC_RX + u8 unused_0[7]; + __le64 host_dma_addr; + __le32 partition_start_xid; + u8 unused_1[4]; +}; + +/* hwrm_func_key_ctx_alloc_output (size:192b/24B) */ +struct hwrm_func_key_ctx_alloc_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 num_key_ctxs_allocated; + u8 flags; + #define FUNC_KEY_CTX_ALLOC_RESP_FLAGS_KEY_CTXS_CONTIGUOUS 0x1UL + u8 unused_0; + __le32 partition_start_xid; + u8 unused_1[7]; + u8 valid; +}; + +/* hwrm_func_key_ctx_free_input (size:256b/32B) */ +struct hwrm_func_key_ctx_free_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 fid; + u8 key_ctx_type; + #define FUNC_KEY_CTX_FREE_REQ_KEY_CTX_TYPE_TX 0x0UL + #define FUNC_KEY_CTX_FREE_REQ_KEY_CTX_TYPE_RX 0x1UL + #define FUNC_KEY_CTX_FREE_REQ_KEY_CTX_TYPE_QUIC_TX 0x2UL + #define FUNC_KEY_CTX_FREE_REQ_KEY_CTX_TYPE_QUIC_RX 0x3UL + #define FUNC_KEY_CTX_FREE_REQ_KEY_CTX_TYPE_LAST FUNC_KEY_CTX_FREE_REQ_KEY_CTX_TYPE_QUIC_RX + u8 unused_0; + __le32 partition_start_xid; + __le16 num_entries; + u8 unused_1[6]; +}; + +/* hwrm_func_key_ctx_free_output (size:128b/16B) */ +struct hwrm_func_key_ctx_free_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 rsvd0[7]; + u8 valid; +}; + +/* hwrm_func_backing_store_cfg_v2_input (size:448b/56B) */ +struct hwrm_func_backing_store_cfg_v2_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 type; + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_QP 0x0UL + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SRQ 0x1UL + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CQ 0x2UL + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_VNIC 0x3UL + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_STAT 0x4UL + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SP_TQM_RING 0x5UL + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_FP_TQM_RING 0x6UL + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_MRAV 0xeUL + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_TIM 0xfUL + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_TX_CK 0x13UL + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_RX_CK 0x14UL + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_MP_TQM_RING 0x15UL + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SQ_DB_SHADOW 0x16UL + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_RQ_DB_SHADOW 0x17UL + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SRQ_DB_SHADOW 0x18UL + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CQ_DB_SHADOW 0x19UL + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_TBL_SCOPE 0x1cUL + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_XID_PARTITION 0x1dUL + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SRT_TRACE 0x1eUL + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SRT2_TRACE 0x1fUL + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CRT_TRACE 0x20UL + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CRT2_TRACE 0x21UL + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_RIGP0_TRACE 0x22UL + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_L2_HWRM_TRACE 0x23UL + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_ROCE_HWRM_TRACE 0x24UL + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_INVALID 0xffffUL + #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_LAST FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_INVALID + __le16 instance; + __le32 flags; + #define FUNC_BACKING_STORE_CFG_V2_REQ_FLAGS_PREBOOT_MODE 0x1UL + #define FUNC_BACKING_STORE_CFG_V2_REQ_FLAGS_BS_CFG_ALL_DONE 0x2UL + #define FUNC_BACKING_STORE_CFG_V2_REQ_FLAGS_BS_EXTEND 0x4UL + __le64 page_dir; + __le32 num_entries; + __le16 entry_size; + u8 page_size_pbl_level; + #define FUNC_BACKING_STORE_CFG_V2_REQ_PBL_LEVEL_MASK 0xfUL + #define FUNC_BACKING_STORE_CFG_V2_REQ_PBL_LEVEL_SFT 0 + #define FUNC_BACKING_STORE_CFG_V2_REQ_PBL_LEVEL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_CFG_V2_REQ_PBL_LEVEL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_CFG_V2_REQ_PBL_LEVEL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_CFG_V2_REQ_PBL_LEVEL_LAST FUNC_BACKING_STORE_CFG_V2_REQ_PBL_LEVEL_LVL_2 + #define FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_SFT 4 + #define FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_LAST FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_PG_1G + u8 subtype_valid_cnt; + __le32 split_entry_0; + __le32 split_entry_1; + __le32 split_entry_2; + __le32 split_entry_3; +}; + +/* hwrm_func_backing_store_cfg_v2_output (size:128b/16B) */ +struct hwrm_func_backing_store_cfg_v2_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 rsvd0[7]; + u8 valid; +}; + +/* hwrm_func_backing_store_qcfg_v2_input (size:192b/24B) */ +struct hwrm_func_backing_store_qcfg_v2_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 type; + #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_QP 0x0UL + #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_SRQ 0x1UL + #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_CQ 0x2UL + #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_VNIC 0x3UL + #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_STAT 0x4UL + #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_SP_TQM_RING 0x5UL + #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_FP_TQM_RING 0x6UL + #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_MRAV 0xeUL + #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_TIM 0xfUL + #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_TX_CK 0x13UL + #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_RX_CK 0x14UL + #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_MP_TQM_RING 0x15UL + #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_SQ_DB_SHADOW 0x16UL + #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_RQ_DB_SHADOW 0x17UL + #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_SRQ_DB_SHADOW 0x18UL + #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_CQ_DB_SHADOW 0x19UL + #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_TBL_SCOPE 0x1cUL + #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_XID_PARTITION_TABLE 0x1dUL + #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_SRT_TRACE 0x1eUL + #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_SRT2_TRACE 0x1fUL + #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_CRT_TRACE 0x20UL + #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_CRT2_TRACE 0x21UL + #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_RIGP0_TRACE 0x22UL + #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_L2_HWRM_TRACE 0x23UL + #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_ROCE_HWRM_TRACE 0x24UL + #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_INVALID 0xffffUL + #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_LAST FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_INVALID + __le16 instance; + u8 rsvd[4]; +}; + +/* hwrm_func_backing_store_qcfg_v2_output (size:448b/56B) */ +struct hwrm_func_backing_store_qcfg_v2_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 type; + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_QP 0x0UL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SRQ 0x1UL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_CQ 0x2UL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_VNIC 0x3UL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_STAT 0x4UL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SP_TQM_RING 0x5UL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_FP_TQM_RING 0x6UL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_MRAV 0xeUL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TIM 0xfUL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TX_CK 0x13UL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_RX_CK 0x14UL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_MP_TQM_RING 0x15UL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TBL_SCOPE 0x1cUL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_XID_PARTITION 0x1dUL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SRT_TRACE 0x1eUL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SRT2_TRACE 0x1fUL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_CRT_TRACE 0x20UL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_CRT2_TRACE 0x21UL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_RIGP0_TRACE 0x22UL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_L2_HWRM_TRACE 0x23UL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_ROCE_HWRM_TRACE 0x24UL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_INVALID 0xffffUL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_LAST FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_INVALID + __le16 instance; + __le32 flags; + __le64 page_dir; + __le32 num_entries; + u8 page_size_pbl_level; + #define FUNC_BACKING_STORE_QCFG_V2_RESP_PBL_LEVEL_MASK 0xfUL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_PBL_LEVEL_SFT 0 + #define FUNC_BACKING_STORE_QCFG_V2_RESP_PBL_LEVEL_LVL_0 0x0UL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_PBL_LEVEL_LVL_1 0x1UL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_PBL_LEVEL_LVL_2 0x2UL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_PBL_LEVEL_LAST FUNC_BACKING_STORE_QCFG_V2_RESP_PBL_LEVEL_LVL_2 + #define FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_MASK 0xf0UL + #define FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_SFT 4 + #define FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_PG_4K (0x0UL << 4) + #define FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_PG_8K (0x1UL << 4) + #define FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_PG_64K (0x2UL << 4) + #define FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_PG_2M (0x3UL << 4) + #define FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_PG_8M (0x4UL << 4) + #define FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_PG_1G (0x5UL << 4) + #define FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_LAST FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_PG_1G + u8 subtype_valid_cnt; + u8 rsvd[2]; + __le32 split_entry_0; + __le32 split_entry_1; + __le32 split_entry_2; + __le32 split_entry_3; + u8 rsvd2[7]; + u8 valid; +}; + +/* qpc_split_entries (size:128b/16B) */ +struct qpc_split_entries { + __le32 qp_num_l2_entries; + __le32 qp_num_qp1_entries; + __le32 qp_num_fast_qpmd_entries; + __le32 rsvd; +}; + +/* srq_split_entries (size:128b/16B) */ +struct srq_split_entries { + __le32 srq_num_l2_entries; + __le32 rsvd; + __le32 rsvd2[2]; +}; + +/* cq_split_entries (size:128b/16B) */ +struct cq_split_entries { + __le32 cq_num_l2_entries; + __le32 rsvd; + __le32 rsvd2[2]; +}; + +/* vnic_split_entries (size:128b/16B) */ +struct vnic_split_entries { + __le32 vnic_num_vnic_entries; + __le32 rsvd; + __le32 rsvd2[2]; +}; + +/* mrav_split_entries (size:128b/16B) */ +struct mrav_split_entries { + __le32 mrav_num_av_entries; + __le32 rsvd; + __le32 rsvd2[2]; +}; + +/* ts_split_entries (size:128b/16B) */ +struct ts_split_entries { + __le32 region_num_entries; + u8 tsid; + u8 lkup_static_bkt_cnt_exp[2]; + u8 rsvd; + __le32 rsvd2[2]; +}; + +/* ck_split_entries (size:128b/16B) */ +struct ck_split_entries { + __le32 num_quic_entries; + __le32 rsvd; + __le32 rsvd2[2]; +}; + +/* hwrm_func_backing_store_qcaps_v2_input (size:192b/24B) */ +struct hwrm_func_backing_store_qcaps_v2_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 type; + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_QP 0x0UL + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRQ 0x1UL + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CQ 0x2UL + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_VNIC 0x3UL + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_STAT 0x4UL + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SP_TQM_RING 0x5UL + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_FP_TQM_RING 0x6UL + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_MRAV 0xeUL + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TIM 0xfUL + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TX_CK 0x13UL + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RX_CK 0x14UL + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_MP_TQM_RING 0x15UL + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SQ_DB_SHADOW 0x16UL + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RQ_DB_SHADOW 0x17UL + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRQ_DB_SHADOW 0x18UL + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CQ_DB_SHADOW 0x19UL + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TBL_SCOPE 0x1cUL + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_XID_PARTITION 0x1dUL + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRT_TRACE 0x1eUL + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRT2_TRACE 0x1fUL + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CRT_TRACE 0x20UL + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CRT2_TRACE 0x21UL + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RIGP0_TRACE 0x22UL + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_L2_HWRM_TRACE 0x23UL + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_ROCE_HWRM_TRACE 0x24UL + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_INVALID 0xffffUL + #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_LAST FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_INVALID + u8 rsvd[6]; +}; + +/* hwrm_func_backing_store_qcaps_v2_output (size:448b/56B) */ +struct hwrm_func_backing_store_qcaps_v2_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 type; + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_QP 0x0UL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SRQ 0x1UL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CQ 0x2UL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_VNIC 0x3UL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_STAT 0x4UL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SP_TQM_RING 0x5UL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_FP_TQM_RING 0x6UL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_MRAV 0xeUL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_TIM 0xfUL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_TX_CK 0x13UL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_RX_CK 0x14UL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_MP_TQM_RING 0x15UL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SQ_DB_SHADOW 0x16UL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_RQ_DB_SHADOW 0x17UL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SRQ_DB_SHADOW 0x18UL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CQ_DB_SHADOW 0x19UL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_TBL_SCOPE 0x1cUL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_XID_PARTITION 0x1dUL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SRT_TRACE 0x1eUL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SRT2_TRACE 0x1fUL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CRT_TRACE 0x20UL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CRT2_TRACE 0x21UL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_RIGP0_TRACE 0x22UL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_L2_HWRM_TRACE 0x23UL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_ROCE_HWRM_TRACE 0x24UL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_INVALID 0xffffUL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_LAST FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_INVALID + __le16 entry_size; + __le32 flags; + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_ENABLE_CTX_KIND_INIT 0x1UL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_TYPE_VALID 0x2UL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_DRIVER_MANAGED_MEMORY 0x4UL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_ROCE_QP_PSEUDO_STATIC_ALLOC 0x8UL + __le32 instance_bit_map; + u8 ctx_init_value; + u8 ctx_init_offset; + u8 entry_multiple; + u8 rsvd; + __le32 max_num_entries; + __le32 min_num_entries; + __le16 next_valid_type; + u8 subtype_valid_cnt; + u8 exact_cnt_bit_map; + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_EXACT_CNT_BIT_MAP_SPLIT_ENTRY_0_EXACT 0x1UL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_EXACT_CNT_BIT_MAP_SPLIT_ENTRY_1_EXACT 0x2UL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_EXACT_CNT_BIT_MAP_SPLIT_ENTRY_2_EXACT 0x4UL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_EXACT_CNT_BIT_MAP_SPLIT_ENTRY_3_EXACT 0x8UL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_EXACT_CNT_BIT_MAP_UNUSED_MASK 0xf0UL + #define FUNC_BACKING_STORE_QCAPS_V2_RESP_EXACT_CNT_BIT_MAP_UNUSED_SFT 4 + __le32 split_entry_0; + __le32 split_entry_1; + __le32 split_entry_2; + __le32 split_entry_3; + u8 rsvd3[3]; + u8 valid; +}; + +/* hwrm_func_dbr_pacing_cfg_input (size:320b/40B) */ +struct hwrm_func_dbr_pacing_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 flags; + #define FUNC_DBR_PACING_CFG_REQ_FLAGS_DBR_NQ_EVENT_ENABLE 0x1UL + #define FUNC_DBR_PACING_CFG_REQ_FLAGS_DBR_NQ_EVENT_DISABLE 0x2UL + u8 unused_0[7]; + __le32 enables; + #define FUNC_DBR_PACING_CFG_REQ_ENABLES_PRIMARY_NQ_ID_VALID 0x1UL + #define FUNC_DBR_PACING_CFG_REQ_ENABLES_PACING_THRESHOLD_VALID 0x2UL + __le32 primary_nq_id; + __le32 pacing_threshold; + u8 unused_1[4]; +}; + +/* hwrm_func_dbr_pacing_cfg_output (size:128b/16B) */ +struct hwrm_func_dbr_pacing_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_func_dbr_pacing_qcfg_input (size:128b/16B) */ +struct hwrm_func_dbr_pacing_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; +}; + +/* hwrm_func_dbr_pacing_qcfg_output (size:512b/64B) */ +struct hwrm_func_dbr_pacing_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 flags; + #define FUNC_DBR_PACING_QCFG_RESP_FLAGS_DBR_NQ_EVENT_ENABLED 0x1UL + u8 unused_0[7]; + __le32 dbr_stat_db_fifo_reg; + #define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_MASK 0x3UL + #define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_SFT 0 + #define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_PCIE_CFG 0x0UL + #define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_GRC 0x1UL + #define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_BAR0 0x2UL + #define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_BAR1 0x3UL + #define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_LAST FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_BAR1 + #define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_MASK 0xfffffffcUL + #define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SFT 2 + __le32 dbr_stat_db_fifo_reg_watermark_mask; + u8 dbr_stat_db_fifo_reg_watermark_shift; + u8 unused_1[3]; + __le32 dbr_stat_db_fifo_reg_fifo_room_mask; + u8 dbr_stat_db_fifo_reg_fifo_room_shift; + u8 unused_2[3]; + __le32 dbr_throttling_aeq_arm_reg; + #define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_MASK 0x3UL + #define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_SFT 0 + #define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_PCIE_CFG 0x0UL + #define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_GRC 0x1UL + #define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_BAR0 0x2UL + #define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_BAR1 0x3UL + #define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_LAST FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_BAR1 + #define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_MASK 0xfffffffcUL + #define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SFT 2 + u8 dbr_throttling_aeq_arm_reg_val; + u8 unused_3[3]; + __le32 dbr_stat_db_max_fifo_depth; + __le32 primary_nq_id; + __le32 pacing_threshold; + u8 unused_4[7]; + u8 valid; +}; + +/* hwrm_func_dbr_pacing_broadcast_event_input (size:128b/16B) */ +struct hwrm_func_dbr_pacing_broadcast_event_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; +}; + +/* hwrm_func_dbr_pacing_broadcast_event_output (size:128b/16B) */ +struct hwrm_func_dbr_pacing_broadcast_event_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_func_dbr_pacing_nqlist_query_input (size:128b/16B) */ +struct hwrm_func_dbr_pacing_nqlist_query_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; +}; + +/* hwrm_func_dbr_pacing_nqlist_query_output (size:384b/48B) */ +struct hwrm_func_dbr_pacing_nqlist_query_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 nq_ring_id0; + __le16 nq_ring_id1; + __le16 nq_ring_id2; + __le16 nq_ring_id3; + __le16 nq_ring_id4; + __le16 nq_ring_id5; + __le16 nq_ring_id6; + __le16 nq_ring_id7; + __le16 nq_ring_id8; + __le16 nq_ring_id9; + __le16 nq_ring_id10; + __le16 nq_ring_id11; + __le16 nq_ring_id12; + __le16 nq_ring_id13; + __le16 nq_ring_id14; + __le16 nq_ring_id15; + __le32 num_nqs; + u8 unused_0[3]; + u8 valid; +}; + +/* hwrm_func_dbr_recovery_completed_input (size:192b/24B) */ +struct hwrm_func_dbr_recovery_completed_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 epoch; + #define FUNC_DBR_RECOVERY_COMPLETED_REQ_EPOCH_VALUE_MASK 0xffffffUL + #define FUNC_DBR_RECOVERY_COMPLETED_REQ_EPOCH_VALUE_SFT 0 + u8 unused_0[4]; +}; + +/* hwrm_func_dbr_recovery_completed_output (size:128b/16B) */ +struct hwrm_func_dbr_recovery_completed_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_func_synce_cfg_input (size:192b/24B) */ +struct hwrm_func_synce_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 enables; + #define FUNC_SYNCE_CFG_REQ_ENABLES_FREQ_PROFILE 0x1UL + #define FUNC_SYNCE_CFG_REQ_ENABLES_PRIMARY_CLOCK 0x2UL + #define FUNC_SYNCE_CFG_REQ_ENABLES_SECONDARY_CLOCK 0x4UL + u8 freq_profile; + #define FUNC_SYNCE_CFG_REQ_FREQ_PROFILE_INVALID 0x0UL + #define FUNC_SYNCE_CFG_REQ_FREQ_PROFILE_25MHZ 0x1UL + #define FUNC_SYNCE_CFG_REQ_FREQ_PROFILE_LAST FUNC_SYNCE_CFG_REQ_FREQ_PROFILE_25MHZ + u8 primary_clock_state; + #define FUNC_SYNCE_CFG_REQ_PRIMARY_CLOCK_STATE_DISABLE 0x0UL + #define FUNC_SYNCE_CFG_REQ_PRIMARY_CLOCK_STATE_ENABLE 0x1UL + #define FUNC_SYNCE_CFG_REQ_PRIMARY_CLOCK_STATE_LAST FUNC_SYNCE_CFG_REQ_PRIMARY_CLOCK_STATE_ENABLE + u8 secondary_clock_state; + #define FUNC_SYNCE_CFG_REQ_SECONDARY_CLOCK_STATE_DISABLE 0x0UL + #define FUNC_SYNCE_CFG_REQ_SECONDARY_CLOCK_STATE_ENABLE 0x1UL + #define FUNC_SYNCE_CFG_REQ_SECONDARY_CLOCK_STATE_LAST FUNC_SYNCE_CFG_REQ_SECONDARY_CLOCK_STATE_ENABLE + u8 unused_0[4]; +}; + +/* hwrm_func_synce_cfg_output (size:128b/16B) */ +struct hwrm_func_synce_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_func_synce_qcfg_input (size:192b/24B) */ +struct hwrm_func_synce_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 unused_0[8]; +}; + +/* hwrm_func_synce_qcfg_output (size:128b/16B) */ +struct hwrm_func_synce_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 freq_profile; + #define FUNC_SYNCE_QCFG_RESP_FREQ_PROFILE_INVALID 0x0UL + #define FUNC_SYNCE_QCFG_RESP_FREQ_PROFILE_25MHZ 0x1UL + #define FUNC_SYNCE_QCFG_RESP_FREQ_PROFILE_LAST FUNC_SYNCE_QCFG_RESP_FREQ_PROFILE_25MHZ + u8 state; + #define FUNC_SYNCE_QCFG_RESP_STATE_PRIMARY_CLOCK_ENABLED 0x1UL + #define FUNC_SYNCE_QCFG_RESP_STATE_SECONDARY_CLOCK_ENABLED 0x2UL + u8 unused_0[5]; + u8 valid; +}; + +/* hwrm_func_lag_create_input (size:192b/24B) */ +struct hwrm_func_lag_create_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 enables; + #define FUNC_LAG_CREATE_REQ_ENABLES_ACTIVE_PORT_MAP 0x1UL + #define FUNC_LAG_CREATE_REQ_ENABLES_MEMBER_PORT_MAP 0x2UL + #define FUNC_LAG_CREATE_REQ_ENABLES_AGGR_MODE 0x4UL + #define FUNC_LAG_CREATE_REQ_ENABLES_RSVD1_MASK 0xf8UL + #define FUNC_LAG_CREATE_REQ_ENABLES_RSVD1_SFT 3 + u8 active_port_map; + #define FUNC_LAG_CREATE_REQ_ACTIVE_PORT_MAP_PORT_0 0x1UL + #define FUNC_LAG_CREATE_REQ_ACTIVE_PORT_MAP_PORT_1 0x2UL + #define FUNC_LAG_CREATE_REQ_ACTIVE_PORT_MAP_PORT_2 0x4UL + #define FUNC_LAG_CREATE_REQ_ACTIVE_PORT_MAP_PORT_3 0x8UL + #define FUNC_LAG_CREATE_REQ_ACTIVE_PORT_MAP_RSVD3_MASK 0xf0UL + #define FUNC_LAG_CREATE_REQ_ACTIVE_PORT_MAP_RSVD3_SFT 4 + u8 member_port_map; + #define FUNC_LAG_CREATE_REQ_MEMBER_PORT_MAP_PORT_0 0x1UL + #define FUNC_LAG_CREATE_REQ_MEMBER_PORT_MAP_PORT_1 0x2UL + #define FUNC_LAG_CREATE_REQ_MEMBER_PORT_MAP_PORT_2 0x4UL + #define FUNC_LAG_CREATE_REQ_MEMBER_PORT_MAP_PORT_3 0x8UL + #define FUNC_LAG_CREATE_REQ_MEMBER_PORT_MAP_RSVD4_MASK 0xf0UL + #define FUNC_LAG_CREATE_REQ_MEMBER_PORT_MAP_RSVD4_SFT 4 + u8 link_aggr_mode; + #define FUNC_LAG_CREATE_REQ_AGGR_MODE_ACTIVE_ACTIVE 0x1UL + #define FUNC_LAG_CREATE_REQ_AGGR_MODE_ACTIVE_BACKUP 0x2UL + #define FUNC_LAG_CREATE_REQ_AGGR_MODE_BALANCE_XOR 0x3UL + #define FUNC_LAG_CREATE_REQ_AGGR_MODE_802_3_AD 0x4UL + #define FUNC_LAG_CREATE_REQ_AGGR_MODE_LAST FUNC_LAG_CREATE_REQ_AGGR_MODE_802_3_AD + u8 unused_0[4]; +}; + +/* hwrm_func_lag_create_output (size:128b/16B) */ +struct hwrm_func_lag_create_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 fw_lag_id; + u8 unused_0[6]; + u8 valid; +}; + +/* hwrm_func_lag_update_input (size:192b/24B) */ +struct hwrm_func_lag_update_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 fw_lag_id; + u8 enables; + #define FUNC_LAG_UPDATE_REQ_ENABLES_ACTIVE_PORT_MAP 0x1UL + #define FUNC_LAG_UPDATE_REQ_ENABLES_MEMBER_PORT_MAP 0x2UL + #define FUNC_LAG_UPDATE_REQ_ENABLES_AGGR_MODE 0x4UL + #define FUNC_LAG_UPDATE_REQ_ENABLES_RSVD1_MASK 0xf8UL + #define FUNC_LAG_UPDATE_REQ_ENABLES_RSVD1_SFT 3 + u8 active_port_map; + #define FUNC_LAG_UPDATE_REQ_ACTIVE_PORT_MAP_PORT_0 0x1UL + #define FUNC_LAG_UPDATE_REQ_ACTIVE_PORT_MAP_PORT_1 0x2UL + #define FUNC_LAG_UPDATE_REQ_ACTIVE_PORT_MAP_PORT_2 0x4UL + #define FUNC_LAG_UPDATE_REQ_ACTIVE_PORT_MAP_PORT_3 0x8UL + #define FUNC_LAG_UPDATE_REQ_ACTIVE_PORT_MAP_RSVD3_MASK 0xf0UL + #define FUNC_LAG_UPDATE_REQ_ACTIVE_PORT_MAP_RSVD3_SFT 4 + u8 member_port_map; + #define FUNC_LAG_UPDATE_REQ_MEMBER_PORT_MAP_PORT_0 0x1UL + #define FUNC_LAG_UPDATE_REQ_MEMBER_PORT_MAP_PORT_1 0x2UL + #define FUNC_LAG_UPDATE_REQ_MEMBER_PORT_MAP_PORT_2 0x4UL + #define FUNC_LAG_UPDATE_REQ_MEMBER_PORT_MAP_PORT_3 0x8UL + #define FUNC_LAG_UPDATE_REQ_MEMBER_PORT_MAP_RSVD4_MASK 0xf0UL + #define FUNC_LAG_UPDATE_REQ_MEMBER_PORT_MAP_RSVD4_SFT 4 + u8 link_aggr_mode; + #define FUNC_LAG_UPDATE_REQ_AGGR_MODE_ACTIVE_ACTIVE 0x1UL + #define FUNC_LAG_UPDATE_REQ_AGGR_MODE_ACTIVE_BACKUP 0x2UL + #define FUNC_LAG_UPDATE_REQ_AGGR_MODE_BALANCE_XOR 0x3UL + #define FUNC_LAG_UPDATE_REQ_AGGR_MODE_802_3_AD 0x4UL + #define FUNC_LAG_UPDATE_REQ_AGGR_MODE_LAST FUNC_LAG_UPDATE_REQ_AGGR_MODE_802_3_AD + u8 unused_0[3]; +}; + +/* hwrm_func_lag_update_output (size:128b/16B) */ +struct hwrm_func_lag_update_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_func_lag_free_input (size:192b/24B) */ +struct hwrm_func_lag_free_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 fw_lag_id; + u8 unused_0[7]; +}; + +/* hwrm_func_lag_free_output (size:128b/16B) */ +struct hwrm_func_lag_free_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_func_lag_qcfg_input (size:192b/24B) */ +struct hwrm_func_lag_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 fw_lag_id; + u8 unused_0[7]; +}; + +/* hwrm_func_lag_qcfg_output (size:128b/16B) */ +struct hwrm_func_lag_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 active_port_map; + #define FUNC_LAG_QCFG_RESP_ACTIVE_PORT_MAP_PORT_0 0x1UL + #define FUNC_LAG_QCFG_RESP_ACTIVE_PORT_MAP_PORT_1 0x2UL + #define FUNC_LAG_QCFG_RESP_ACTIVE_PORT_MAP_PORT_2 0x4UL + #define FUNC_LAG_QCFG_RESP_ACTIVE_PORT_MAP_PORT_3 0x8UL + #define FUNC_LAG_QCFG_RESP_ACTIVE_PORT_MAP_RSVD3_MASK 0xf0UL + #define FUNC_LAG_QCFG_RESP_ACTIVE_PORT_MAP_RSVD3_SFT 4 + u8 member_port_map; + #define FUNC_LAG_QCFG_RESP_MEMBER_PORT_MAP_PORT_0 0x1UL + #define FUNC_LAG_QCFG_RESP_MEMBER_PORT_MAP_PORT_1 0x2UL + #define FUNC_LAG_QCFG_RESP_MEMBER_PORT_MAP_PORT_2 0x4UL + #define FUNC_LAG_QCFG_RESP_MEMBER_PORT_MAP_PORT_3 0x8UL + #define FUNC_LAG_QCFG_RESP_MEMBER_PORT_MAP_RSVD4_MASK 0xf0UL + #define FUNC_LAG_QCFG_RESP_MEMBER_PORT_MAP_RSVD4_SFT 4 + u8 link_aggr_mode; + #define FUNC_LAG_QCFG_RESP_AGGR_MODE_ACTIVE_ACTIVE 0x1UL + #define FUNC_LAG_QCFG_RESP_AGGR_MODE_ACTIVE_BACKUP 0x2UL + #define FUNC_LAG_QCFG_RESP_AGGR_MODE_BALANCE_XOR 0x3UL + #define FUNC_LAG_QCFG_RESP_AGGR_MODE_802_3_AD 0x4UL + #define FUNC_LAG_QCFG_RESP_AGGR_MODE_LAST FUNC_LAG_QCFG_RESP_AGGR_MODE_802_3_AD + u8 unused_0[4]; + u8 valid; +}; + +/* hwrm_func_lag_mode_cfg_input (size:192b/24B) */ +struct hwrm_func_lag_mode_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 enables; + #define FUNC_LAG_MODE_CFG_REQ_ENABLES_FLAGS 0x1UL + #define FUNC_LAG_MODE_CFG_REQ_ENABLES_ACTIVE_PORT_MAP 0x2UL + #define FUNC_LAG_MODE_CFG_REQ_ENABLES_MEMBER_PORT_MAP 0x4UL + #define FUNC_LAG_MODE_CFG_REQ_ENABLES_AGGR_MODE 0x8UL + #define FUNC_LAG_MODE_CFG_REQ_ENABLES_LAG_ID 0x10UL + #define FUNC_LAG_MODE_CFG_REQ_ENABLES_RSVD1_MASK 0xe0UL + #define FUNC_LAG_MODE_CFG_REQ_ENABLES_RSVD1_SFT 5 + u8 flags; + #define FUNC_LAG_MODE_CFG_REQ_FLAGS_AGGR_DISABLE 0x1UL + #define FUNC_LAG_MODE_CFG_REQ_FLAGS_AGGR_ENABLE 0x2UL + #define FUNC_LAG_MODE_CFG_REQ_FLAGS_RSVD2_MASK 0xfcUL + #define FUNC_LAG_MODE_CFG_REQ_FLAGS_RSVD2_SFT 2 + u8 active_port_map; + #define FUNC_LAG_MODE_CFG_REQ_ACTIVE_PORT_MAP_PORT_0 0x1UL + #define FUNC_LAG_MODE_CFG_REQ_ACTIVE_PORT_MAP_PORT_1 0x2UL + #define FUNC_LAG_MODE_CFG_REQ_ACTIVE_PORT_MAP_PORT_2 0x4UL + #define FUNC_LAG_MODE_CFG_REQ_ACTIVE_PORT_MAP_PORT_3 0x8UL + #define FUNC_LAG_MODE_CFG_REQ_ACTIVE_PORT_MAP_RSVD3_MASK 0xf0UL + #define FUNC_LAG_MODE_CFG_REQ_ACTIVE_PORT_MAP_RSVD3_SFT 4 + u8 member_port_map; + #define FUNC_LAG_MODE_CFG_REQ_MEMBER_PORT_MAP_PORT_0 0x1UL + #define FUNC_LAG_MODE_CFG_REQ_MEMBER_PORT_MAP_PORT_1 0x2UL + #define FUNC_LAG_MODE_CFG_REQ_MEMBER_PORT_MAP_PORT_2 0x4UL + #define FUNC_LAG_MODE_CFG_REQ_MEMBER_PORT_MAP_PORT_3 0x8UL + #define FUNC_LAG_MODE_CFG_REQ_MEMBER_PORT_MAP_RSVD4_MASK 0xf0UL + #define FUNC_LAG_MODE_CFG_REQ_MEMBER_PORT_MAP_RSVD4_SFT 4 + u8 link_aggr_mode; + #define FUNC_LAG_MODE_CFG_REQ_AGGR_MODE_ACTIVE_ACTIVE 0x1UL + #define FUNC_LAG_MODE_CFG_REQ_AGGR_MODE_ACTIVE_BACKUP 0x2UL + #define FUNC_LAG_MODE_CFG_REQ_AGGR_MODE_BALANCE_XOR 0x3UL + #define FUNC_LAG_MODE_CFG_REQ_AGGR_MODE_802_3_AD 0x4UL + #define FUNC_LAG_MODE_CFG_REQ_AGGR_MODE_LAST FUNC_LAG_MODE_CFG_REQ_AGGR_MODE_802_3_AD + u8 lag_id; + u8 unused_0[2]; +}; + +/* hwrm_func_lag_mode_cfg_output (size:128b/16B) */ +struct hwrm_func_lag_mode_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 lag_id; + u8 unused_0[6]; + u8 valid; +}; + +/* hwrm_func_lag_mode_qcfg_input (size:192b/24B) */ +struct hwrm_func_lag_mode_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 unused_0[8]; +}; + +/* hwrm_func_lag_mode_qcfg_output (size:128b/16B) */ +struct hwrm_func_lag_mode_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 aggr_enabled; + #define FUNC_LAG_MODE_QCFG_RESP_AGGR_ENABLED 0x1UL + #define FUNC_LAG_MODE_QCFG_RESP_RSVD1_MASK 0xfeUL + #define FUNC_LAG_MODE_QCFG_RESP_RSVD1_SFT 1 + u8 active_port_map; + #define FUNC_LAG_MODE_QCFG_RESP_ACTIVE_PORT_MAP_PORT_0 0x1UL + #define FUNC_LAG_MODE_QCFG_RESP_ACTIVE_PORT_MAP_PORT_1 0x2UL + #define FUNC_LAG_MODE_QCFG_RESP_ACTIVE_PORT_MAP_PORT_2 0x4UL + #define FUNC_LAG_MODE_QCFG_RESP_ACTIVE_PORT_MAP_PORT_3 0x8UL + #define FUNC_LAG_MODE_QCFG_RESP_ACTIVE_PORT_MAP_RSVD2_MASK 0xf0UL + #define FUNC_LAG_MODE_QCFG_RESP_ACTIVE_PORT_MAP_RSVD2_SFT 4 + u8 member_port_map; + #define FUNC_LAG_MODE_QCFG_RESP_MEMBER_PORT_MAP_PORT_0 0x1UL + #define FUNC_LAG_MODE_QCFG_RESP_MEMBER_PORT_MAP_PORT_1 0x2UL + #define FUNC_LAG_MODE_QCFG_RESP_MEMBER_PORT_MAP_PORT_2 0x4UL + #define FUNC_LAG_MODE_QCFG_RESP_MEMBER_PORT_MAP_PORT_3 0x8UL + #define FUNC_LAG_MODE_QCFG_RESP_MEMBER_PORT_MAP_RSVD3_MASK 0xf0UL + #define FUNC_LAG_MODE_QCFG_RESP_MEMBER_PORT_MAP_RSVD3_SFT 4 + u8 link_aggr_mode; + #define FUNC_LAG_MODE_QCFG_RESP_AGGR_MODE_ACTIVE_ACTIVE 0x1UL + #define FUNC_LAG_MODE_QCFG_RESP_AGGR_MODE_ACTIVE_BACKUP 0x2UL + #define FUNC_LAG_MODE_QCFG_RESP_AGGR_MODE_BALANCE_XOR 0x3UL + #define FUNC_LAG_MODE_QCFG_RESP_AGGR_MODE_802_3_AD 0x4UL + #define FUNC_LAG_MODE_QCFG_RESP_AGGR_MODE_LAST FUNC_LAG_MODE_QCFG_RESP_AGGR_MODE_802_3_AD + u8 unused_0[3]; + u8 valid; +}; + +/* hwrm_func_vlan_qcfg_input (size:192b/24B) */ +struct hwrm_func_vlan_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 fid; + u8 unused_0[6]; +}; + +/* hwrm_func_vlan_qcfg_output (size:320b/40B) */ +struct hwrm_func_vlan_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le64 unused_0; + __le16 stag_vid; + u8 stag_pcp; + u8 unused_1; + __be16 stag_tpid; + __le16 ctag_vid; + u8 ctag_pcp; + u8 unused_2; + __be16 ctag_tpid; + __le32 rsvd2; + __le32 rsvd3; + u8 unused_3[3]; + u8 valid; +}; + +/* hwrm_func_vlan_cfg_input (size:384b/48B) */ +struct hwrm_func_vlan_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 fid; + u8 unused_0[2]; + __le32 enables; + #define FUNC_VLAN_CFG_REQ_ENABLES_STAG_VID 0x1UL + #define FUNC_VLAN_CFG_REQ_ENABLES_CTAG_VID 0x2UL + #define FUNC_VLAN_CFG_REQ_ENABLES_STAG_PCP 0x4UL + #define FUNC_VLAN_CFG_REQ_ENABLES_CTAG_PCP 0x8UL + #define FUNC_VLAN_CFG_REQ_ENABLES_STAG_TPID 0x10UL + #define FUNC_VLAN_CFG_REQ_ENABLES_CTAG_TPID 0x20UL + __le16 stag_vid; + u8 stag_pcp; + u8 unused_1; + __be16 stag_tpid; + __le16 ctag_vid; + u8 ctag_pcp; + u8 unused_2; + __be16 ctag_tpid; + __le32 rsvd1; + __le32 rsvd2; + u8 unused_3[4]; +}; + +/* hwrm_func_vlan_cfg_output (size:128b/16B) */ +struct hwrm_func_vlan_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_func_vf_vnic_ids_query_input (size:256b/32B) */ +struct hwrm_func_vf_vnic_ids_query_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 vf_id; + u8 unused_0[2]; + __le32 max_vnic_id_cnt; + __le64 vnic_id_tbl_addr; +}; + +/* hwrm_func_vf_vnic_ids_query_output (size:128b/16B) */ +struct hwrm_func_vf_vnic_ids_query_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 vnic_id_cnt; + u8 unused_0[3]; + u8 valid; +}; + +/* hwrm_func_vf_bw_cfg_input (size:960b/120B) */ +struct hwrm_func_vf_bw_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 num_vfs; + __le16 unused[3]; + __le16 vfn[48]; + #define FUNC_VF_BW_CFG_REQ_VFN_VFID_MASK 0xfffUL + #define FUNC_VF_BW_CFG_REQ_VFN_VFID_SFT 0 + #define FUNC_VF_BW_CFG_REQ_VFN_RATE_MASK 0xf000UL + #define FUNC_VF_BW_CFG_REQ_VFN_RATE_SFT 12 + #define FUNC_VF_BW_CFG_REQ_VFN_RATE_PCT_0 (0x0UL << 12) + #define FUNC_VF_BW_CFG_REQ_VFN_RATE_PCT_6_66 (0x1UL << 12) + #define FUNC_VF_BW_CFG_REQ_VFN_RATE_PCT_13_33 (0x2UL << 12) + #define FUNC_VF_BW_CFG_REQ_VFN_RATE_PCT_20 (0x3UL << 12) + #define FUNC_VF_BW_CFG_REQ_VFN_RATE_PCT_26_66 (0x4UL << 12) + #define FUNC_VF_BW_CFG_REQ_VFN_RATE_PCT_33_33 (0x5UL << 12) + #define FUNC_VF_BW_CFG_REQ_VFN_RATE_PCT_40 (0x6UL << 12) + #define FUNC_VF_BW_CFG_REQ_VFN_RATE_PCT_46_66 (0x7UL << 12) + #define FUNC_VF_BW_CFG_REQ_VFN_RATE_PCT_53_33 (0x8UL << 12) + #define FUNC_VF_BW_CFG_REQ_VFN_RATE_PCT_60 (0x9UL << 12) + #define FUNC_VF_BW_CFG_REQ_VFN_RATE_PCT_66_66 (0xaUL << 12) + #define FUNC_VF_BW_CFG_REQ_VFN_RATE_PCT_73_33 (0xbUL << 12) + #define FUNC_VF_BW_CFG_REQ_VFN_RATE_PCT_80 (0xcUL << 12) + #define FUNC_VF_BW_CFG_REQ_VFN_RATE_PCT_86_66 (0xdUL << 12) + #define FUNC_VF_BW_CFG_REQ_VFN_RATE_PCT_93_33 (0xeUL << 12) + #define FUNC_VF_BW_CFG_REQ_VFN_RATE_PCT_100 (0xfUL << 12) + #define FUNC_VF_BW_CFG_REQ_VFN_RATE_LAST FUNC_VF_BW_CFG_REQ_VFN_RATE_PCT_100 +}; + +/* hwrm_func_vf_bw_cfg_output (size:128b/16B) */ +struct hwrm_func_vf_bw_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_func_vf_bw_qcfg_input (size:960b/120B) */ +struct hwrm_func_vf_bw_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 num_vfs; + __le16 unused[3]; + __le16 vfn[48]; + #define FUNC_VF_BW_QCFG_REQ_VFN_VFID_MASK 0xfffUL + #define FUNC_VF_BW_QCFG_REQ_VFN_VFID_SFT 0 +}; + +/* hwrm_func_vf_bw_qcfg_output (size:960b/120B) */ +struct hwrm_func_vf_bw_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 num_vfs; + __le16 unused[3]; + __le16 vfn[48]; + #define FUNC_VF_BW_QCFG_RESP_VFN_VFID_MASK 0xfffUL + #define FUNC_VF_BW_QCFG_RESP_VFN_VFID_SFT 0 + #define FUNC_VF_BW_QCFG_RESP_VFN_RATE_MASK 0xf000UL + #define FUNC_VF_BW_QCFG_RESP_VFN_RATE_SFT 12 + #define FUNC_VF_BW_QCFG_RESP_VFN_RATE_PCT_0 (0x0UL << 12) + #define FUNC_VF_BW_QCFG_RESP_VFN_RATE_PCT_6_66 (0x1UL << 12) + #define FUNC_VF_BW_QCFG_RESP_VFN_RATE_PCT_13_33 (0x2UL << 12) + #define FUNC_VF_BW_QCFG_RESP_VFN_RATE_PCT_20 (0x3UL << 12) + #define FUNC_VF_BW_QCFG_RESP_VFN_RATE_PCT_26_66 (0x4UL << 12) + #define FUNC_VF_BW_QCFG_RESP_VFN_RATE_PCT_33_33 (0x5UL << 12) + #define FUNC_VF_BW_QCFG_RESP_VFN_RATE_PCT_40 (0x6UL << 12) + #define FUNC_VF_BW_QCFG_RESP_VFN_RATE_PCT_46_66 (0x7UL << 12) + #define FUNC_VF_BW_QCFG_RESP_VFN_RATE_PCT_53_33 (0x8UL << 12) + #define FUNC_VF_BW_QCFG_RESP_VFN_RATE_PCT_60 (0x9UL << 12) + #define FUNC_VF_BW_QCFG_RESP_VFN_RATE_PCT_66_66 (0xaUL << 12) + #define FUNC_VF_BW_QCFG_RESP_VFN_RATE_PCT_73_33 (0xbUL << 12) + #define FUNC_VF_BW_QCFG_RESP_VFN_RATE_PCT_80 (0xcUL << 12) + #define FUNC_VF_BW_QCFG_RESP_VFN_RATE_PCT_86_66 (0xdUL << 12) + #define FUNC_VF_BW_QCFG_RESP_VFN_RATE_PCT_93_33 (0xeUL << 12) + #define FUNC_VF_BW_QCFG_RESP_VFN_RATE_PCT_100 (0xfUL << 12) + #define FUNC_VF_BW_QCFG_RESP_VFN_RATE_LAST FUNC_VF_BW_QCFG_RESP_VFN_RATE_PCT_100 + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_func_drv_if_change_input (size:192b/24B) */ +struct hwrm_func_drv_if_change_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define FUNC_DRV_IF_CHANGE_REQ_FLAGS_UP 0x1UL + __le32 unused; +}; + +/* hwrm_func_drv_if_change_output (size:128b/16B) */ +struct hwrm_func_drv_if_change_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 flags; + #define FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE 0x1UL + #define FUNC_DRV_IF_CHANGE_RESP_FLAGS_HOT_FW_RESET_DONE 0x2UL + #define FUNC_DRV_IF_CHANGE_RESP_FLAGS_CAPS_CHANGE 0x4UL + u8 unused_0[3]; + u8 valid; +}; + +/* hwrm_func_host_pf_ids_query_input (size:192b/24B) */ +struct hwrm_func_host_pf_ids_query_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 host; + #define FUNC_HOST_PF_IDS_QUERY_REQ_HOST_SOC 0x1UL + #define FUNC_HOST_PF_IDS_QUERY_REQ_HOST_EP_0 0x2UL + #define FUNC_HOST_PF_IDS_QUERY_REQ_HOST_EP_1 0x4UL + #define FUNC_HOST_PF_IDS_QUERY_REQ_HOST_EP_2 0x8UL + #define FUNC_HOST_PF_IDS_QUERY_REQ_HOST_EP_3 0x10UL + u8 filter; + #define FUNC_HOST_PF_IDS_QUERY_REQ_FILTER_ALL 0x0UL + #define FUNC_HOST_PF_IDS_QUERY_REQ_FILTER_L2 0x1UL + #define FUNC_HOST_PF_IDS_QUERY_REQ_FILTER_ROCE 0x2UL + #define FUNC_HOST_PF_IDS_QUERY_REQ_FILTER_LAST FUNC_HOST_PF_IDS_QUERY_REQ_FILTER_ROCE + u8 unused_1[6]; +}; + +/* hwrm_func_host_pf_ids_query_output (size:128b/16B) */ +struct hwrm_func_host_pf_ids_query_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 first_pf_id; + __le16 pf_ordinal_mask; + #define FUNC_HOST_PF_IDS_QUERY_RESP_PF_ORDINAL_MASK_FUNC_0 0x1UL + #define FUNC_HOST_PF_IDS_QUERY_RESP_PF_ORDINAL_MASK_FUNC_1 0x2UL + #define FUNC_HOST_PF_IDS_QUERY_RESP_PF_ORDINAL_MASK_FUNC_2 0x4UL + #define FUNC_HOST_PF_IDS_QUERY_RESP_PF_ORDINAL_MASK_FUNC_3 0x8UL + #define FUNC_HOST_PF_IDS_QUERY_RESP_PF_ORDINAL_MASK_FUNC_4 0x10UL + #define FUNC_HOST_PF_IDS_QUERY_RESP_PF_ORDINAL_MASK_FUNC_5 0x20UL + #define FUNC_HOST_PF_IDS_QUERY_RESP_PF_ORDINAL_MASK_FUNC_6 0x40UL + #define FUNC_HOST_PF_IDS_QUERY_RESP_PF_ORDINAL_MASK_FUNC_7 0x80UL + #define FUNC_HOST_PF_IDS_QUERY_RESP_PF_ORDINAL_MASK_FUNC_8 0x100UL + #define FUNC_HOST_PF_IDS_QUERY_RESP_PF_ORDINAL_MASK_FUNC_9 0x200UL + #define FUNC_HOST_PF_IDS_QUERY_RESP_PF_ORDINAL_MASK_FUNC_10 0x400UL + #define FUNC_HOST_PF_IDS_QUERY_RESP_PF_ORDINAL_MASK_FUNC_11 0x800UL + #define FUNC_HOST_PF_IDS_QUERY_RESP_PF_ORDINAL_MASK_FUNC_12 0x1000UL + #define FUNC_HOST_PF_IDS_QUERY_RESP_PF_ORDINAL_MASK_FUNC_13 0x2000UL + #define FUNC_HOST_PF_IDS_QUERY_RESP_PF_ORDINAL_MASK_FUNC_14 0x4000UL + #define FUNC_HOST_PF_IDS_QUERY_RESP_PF_ORDINAL_MASK_FUNC_15 0x8000UL + u8 unused_1[3]; + u8 valid; +}; + +/* hwrm_func_spd_cfg_input (size:384b/48B) */ +struct hwrm_func_spd_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define FUNC_SPD_CFG_REQ_FLAGS_FWD_ENABLE 0x1UL + #define FUNC_SPD_CFG_REQ_FLAGS_FWD_DISABLE 0x2UL + #define FUNC_SPD_CFG_REQ_FLAGS_CSUM_ENABLE 0x4UL + #define FUNC_SPD_CFG_REQ_FLAGS_CSUM_DISABLE 0x8UL + #define FUNC_SPD_CFG_REQ_FLAGS_DBG_ENABLE 0x10UL + #define FUNC_SPD_CFG_REQ_FLAGS_DBG_DISABLE 0x20UL + __le32 enables; + #define FUNC_SPD_CFG_REQ_ENABLES_ETHERTYPE 0x1UL + #define FUNC_SPD_CFG_REQ_ENABLES_HASH_MODE_FLAGS 0x2UL + #define FUNC_SPD_CFG_REQ_ENABLES_HASH_TYPE 0x4UL + #define FUNC_SPD_CFG_REQ_ENABLES_RING_TBL_ADDR 0x8UL + #define FUNC_SPD_CFG_REQ_ENABLES_HASH_KEY_TBL_ADDR 0x10UL + __le16 ethertype; + u8 hash_mode_flags; + #define FUNC_SPD_CFG_REQ_HASH_MODE_FLAGS_DEFAULT 0x1UL + #define FUNC_SPD_CFG_REQ_HASH_MODE_FLAGS_INNERMOST_4 0x2UL + #define FUNC_SPD_CFG_REQ_HASH_MODE_FLAGS_INNERMOST_2 0x4UL + #define FUNC_SPD_CFG_REQ_HASH_MODE_FLAGS_OUTERMOST_4 0x8UL + #define FUNC_SPD_CFG_REQ_HASH_MODE_FLAGS_OUTERMOST_2 0x10UL + u8 unused_1; + __le32 hash_type; + #define FUNC_SPD_CFG_REQ_HASH_TYPE_IPV4 0x1UL + #define FUNC_SPD_CFG_REQ_HASH_TYPE_TCP_IPV4 0x2UL + #define FUNC_SPD_CFG_REQ_HASH_TYPE_UDP_IPV4 0x4UL + #define FUNC_SPD_CFG_REQ_HASH_TYPE_IPV6 0x8UL + #define FUNC_SPD_CFG_REQ_HASH_TYPE_TCP_IPV6 0x10UL + #define FUNC_SPD_CFG_REQ_HASH_TYPE_UDP_IPV6 0x20UL + __le64 ring_grp_tbl_addr; + __le64 hash_key_tbl_addr; +}; + +/* hwrm_func_spd_cfg_output (size:128b/16B) */ +struct hwrm_func_spd_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_func_spd_qcfg_input (size:128b/16B) */ +struct hwrm_func_spd_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; +}; + +/* hwrm_func_spd_qcfg_output (size:512b/64B) */ +struct hwrm_func_spd_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 flags; + #define FUNC_SPD_QCFG_RESP_FLAGS_FWD_ENABLED 0x1UL + #define FUNC_SPD_QCFG_RESP_FLAGS_CSUM_ENABLED 0x2UL + #define FUNC_SPD_QCFG_RESP_FLAGS_DBG_ENABLED 0x4UL + __le32 hash_type; + #define FUNC_SPD_QCFG_RESP_HASH_TYPE_IPV4 0x1UL + #define FUNC_SPD_QCFG_RESP_HASH_TYPE_TCP_IPV4 0x2UL + #define FUNC_SPD_QCFG_RESP_HASH_TYPE_UDP_IPV4 0x4UL + #define FUNC_SPD_QCFG_RESP_HASH_TYPE_IPV6 0x8UL + #define FUNC_SPD_QCFG_RESP_HASH_TYPE_TCP_IPV6 0x10UL + #define FUNC_SPD_QCFG_RESP_HASH_TYPE_UDP_IPV6 0x20UL + __le32 hash_key[10]; + u8 hash_mode_flags; + #define FUNC_SPD_QCFG_RESP_HASH_MODE_FLAGS_DEFAULT 0x1UL + #define FUNC_SPD_QCFG_RESP_HASH_MODE_FLAGS_INNERMOST_4 0x2UL + #define FUNC_SPD_QCFG_RESP_HASH_MODE_FLAGS_INNERMOST_2 0x4UL + #define FUNC_SPD_QCFG_RESP_HASH_MODE_FLAGS_OUTERMOST_4 0x8UL + #define FUNC_SPD_QCFG_RESP_HASH_MODE_FLAGS_OUTERMOST_2 0x10UL + u8 unused_1; + __le16 ethertype; + u8 unused_2[3]; + u8 valid; +}; + +/* hwrm_port_phy_cfg_input (size:512b/64B) */ +struct hwrm_port_phy_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define PORT_PHY_CFG_REQ_FLAGS_RESET_PHY 0x1UL + #define PORT_PHY_CFG_REQ_FLAGS_DEPRECATED 0x2UL + #define PORT_PHY_CFG_REQ_FLAGS_FORCE 0x4UL + #define PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG 0x8UL + #define PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE 0x10UL + #define PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE 0x20UL + #define PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE 0x40UL + #define PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE 0x80UL + #define PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_ENABLE 0x100UL + #define PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_DISABLE 0x200UL + #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE74_ENABLE 0x400UL + #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE74_DISABLE 0x800UL + #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE91_ENABLE 0x1000UL + #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE91_DISABLE 0x2000UL + #define PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN 0x4000UL + #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS544_1XN_ENABLE 0x8000UL + #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS544_1XN_DISABLE 0x10000UL + #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS544_IEEE_ENABLE 0x20000UL + #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS544_IEEE_DISABLE 0x40000UL + #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS272_1XN_ENABLE 0x80000UL + #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS272_1XN_DISABLE 0x100000UL + #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS272_IEEE_ENABLE 0x200000UL + #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS272_IEEE_DISABLE 0x400000UL + __le32 enables; + #define PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE 0x1UL + #define PORT_PHY_CFG_REQ_ENABLES_AUTO_DUPLEX 0x2UL + #define PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE 0x4UL + #define PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED 0x8UL + #define PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK 0x10UL + #define PORT_PHY_CFG_REQ_ENABLES_WIRESPEED 0x20UL + #define PORT_PHY_CFG_REQ_ENABLES_LPBK 0x40UL + #define PORT_PHY_CFG_REQ_ENABLES_PREEMPHASIS 0x80UL + #define PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE 0x100UL + #define PORT_PHY_CFG_REQ_ENABLES_EEE_LINK_SPEED_MASK 0x200UL + #define PORT_PHY_CFG_REQ_ENABLES_TX_LPI_TIMER 0x400UL + #define PORT_PHY_CFG_REQ_ENABLES_FORCE_PAM4_LINK_SPEED 0x800UL + #define PORT_PHY_CFG_REQ_ENABLES_AUTO_PAM4_LINK_SPEED_MASK 0x1000UL + #define PORT_PHY_CFG_REQ_ENABLES_FORCE_LINK_SPEEDS2 0x2000UL + #define PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEEDS2_MASK 0x4000UL + __le16 port_id; + __le16 force_link_speed; + #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100MB 0x1UL + #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_1GB 0xaUL + #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_2GB 0x14UL + #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_2_5GB 0x19UL + #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10GB 0x64UL + #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_20GB 0xc8UL + #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_25GB 0xfaUL + #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB 0x190UL + #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB 0x1f4UL + #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100GB 0x3e8UL + #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10MB 0xffffUL + #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_LAST PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10MB + u8 auto_mode; + #define PORT_PHY_CFG_REQ_AUTO_MODE_NONE 0x0UL + #define PORT_PHY_CFG_REQ_AUTO_MODE_ALL_SPEEDS 0x1UL + #define PORT_PHY_CFG_REQ_AUTO_MODE_ONE_SPEED 0x2UL + #define PORT_PHY_CFG_REQ_AUTO_MODE_ONE_OR_BELOW 0x3UL + #define PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK 0x4UL + #define PORT_PHY_CFG_REQ_AUTO_MODE_LAST PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK + u8 auto_duplex; + #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_HALF 0x0UL + #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_FULL 0x1UL + #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_BOTH 0x2UL + #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_LAST PORT_PHY_CFG_REQ_AUTO_DUPLEX_BOTH + u8 auto_pause; + #define PORT_PHY_CFG_REQ_AUTO_PAUSE_TX 0x1UL + #define PORT_PHY_CFG_REQ_AUTO_PAUSE_RX 0x2UL + #define PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE 0x4UL + u8 mgmt_flag; + #define PORT_PHY_CFG_REQ_MGMT_FLAG_LINK_RELEASE 0x1UL + #define PORT_PHY_CFG_REQ_MGMT_FLAG_MGMT_VALID 0x80UL + __le16 auto_link_speed; + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100MB 0x1UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_1GB 0xaUL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_2GB 0x14UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_2_5GB 0x19UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10GB 0x64UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_20GB 0xc8UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_25GB 0xfaUL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_40GB 0x190UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_50GB 0x1f4UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100GB 0x3e8UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10MB 0xffffUL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_LAST PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10MB + __le16 auto_link_speed_mask; + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100MBHD 0x1UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100MB 0x2UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_1GBHD 0x4UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_1GB 0x8UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_2GB 0x10UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_2_5GB 0x20UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10GB 0x40UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_20GB 0x80UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_25GB 0x100UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_40GB 0x200UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_50GB 0x400UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100GB 0x800UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10MBHD 0x1000UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10MB 0x2000UL + u8 wirespeed; + #define PORT_PHY_CFG_REQ_WIRESPEED_OFF 0x0UL + #define PORT_PHY_CFG_REQ_WIRESPEED_ON 0x1UL + #define PORT_PHY_CFG_REQ_WIRESPEED_LAST PORT_PHY_CFG_REQ_WIRESPEED_ON + u8 lpbk; + #define PORT_PHY_CFG_REQ_LPBK_NONE 0x0UL + #define PORT_PHY_CFG_REQ_LPBK_LOCAL 0x1UL + #define PORT_PHY_CFG_REQ_LPBK_REMOTE 0x2UL + #define PORT_PHY_CFG_REQ_LPBK_EXTERNAL 0x3UL + #define PORT_PHY_CFG_REQ_LPBK_LAST PORT_PHY_CFG_REQ_LPBK_EXTERNAL + u8 force_pause; + #define PORT_PHY_CFG_REQ_FORCE_PAUSE_TX 0x1UL + #define PORT_PHY_CFG_REQ_FORCE_PAUSE_RX 0x2UL + u8 unused_1; + __le32 preemphasis; + __le16 eee_link_speed_mask; + #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD1 0x1UL + #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_100MB 0x2UL + #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD2 0x4UL + #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_1GB 0x8UL + #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD3 0x10UL + #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD4 0x20UL + #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_10GB 0x40UL + __le16 force_pam4_link_speed; + #define PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_50GB 0x1f4UL + #define PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_100GB 0x3e8UL + #define PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_200GB 0x7d0UL + #define PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_LAST PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_200GB + __le32 tx_lpi_timer; + #define PORT_PHY_CFG_REQ_TX_LPI_TIMER_MASK 0xffffffUL + #define PORT_PHY_CFG_REQ_TX_LPI_TIMER_SFT 0 + __le16 auto_link_pam4_speed_mask; + #define PORT_PHY_CFG_REQ_AUTO_LINK_PAM4_SPEED_MASK_50G 0x1UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_PAM4_SPEED_MASK_100G 0x2UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_PAM4_SPEED_MASK_200G 0x4UL + __le16 force_link_speeds2; + #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_1GB 0xaUL + #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_10GB 0x64UL + #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_25GB 0xfaUL + #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_40GB 0x190UL + #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_50GB 0x1f4UL + #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_100GB 0x3e8UL + #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_50GB_PAM4_56 0x1f5UL + #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_100GB_PAM4_56 0x3e9UL + #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_200GB_PAM4_56 0x7d1UL + #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_400GB_PAM4_56 0xfa1UL + #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_100GB_PAM4_112 0x3eaUL + #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_200GB_PAM4_112 0x7d2UL + #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_400GB_PAM4_112 0xfa2UL + #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_800GB_PAM4_112 0x1f42UL + #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_LAST PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_800GB_PAM4_112 + __le16 auto_link_speeds2_mask; + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_1GB 0x1UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_10GB 0x2UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_25GB 0x4UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_40GB 0x8UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_50GB 0x10UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_100GB 0x20UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_50GB_PAM4_56 0x40UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_100GB_PAM4_56 0x80UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_200GB_PAM4_56 0x100UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_400GB_PAM4_56 0x200UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_100GB_PAM4_112 0x400UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_200GB_PAM4_112 0x800UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_400GB_PAM4_112 0x1000UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_800GB_PAM4_112 0x2000UL + u8 unused_2[6]; +}; + +/* hwrm_port_phy_cfg_output (size:128b/16B) */ +struct hwrm_port_phy_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_port_phy_cfg_cmd_err (size:64b/8B) */ +struct hwrm_port_phy_cfg_cmd_err { + u8 code; + #define PORT_PHY_CFG_CMD_ERR_CODE_UNKNOWN 0x0UL + #define PORT_PHY_CFG_CMD_ERR_CODE_ILLEGAL_SPEED 0x1UL + #define PORT_PHY_CFG_CMD_ERR_CODE_RETRY 0x2UL + #define PORT_PHY_CFG_CMD_ERR_CODE_LAST PORT_PHY_CFG_CMD_ERR_CODE_RETRY + u8 unused_0[7]; +}; + +/* hwrm_port_phy_qcfg_input (size:192b/24B) */ +struct hwrm_port_phy_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 port_id; + u8 unused_0[6]; +}; + +/* hwrm_port_phy_qcfg_output (size:832b/104B) */ +struct hwrm_port_phy_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 link; + #define PORT_PHY_QCFG_RESP_LINK_NO_LINK 0x0UL + #define PORT_PHY_QCFG_RESP_LINK_SIGNAL 0x1UL + #define PORT_PHY_QCFG_RESP_LINK_LINK 0x2UL + #define PORT_PHY_QCFG_RESP_LINK_LAST PORT_PHY_QCFG_RESP_LINK_LINK + u8 active_fec_signal_mode; + #define PORT_PHY_QCFG_RESP_SIGNAL_MODE_MASK 0xfUL + #define PORT_PHY_QCFG_RESP_SIGNAL_MODE_SFT 0 + #define PORT_PHY_QCFG_RESP_SIGNAL_MODE_NRZ 0x0UL + #define PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4 0x1UL + #define PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4_112 0x2UL + #define PORT_PHY_QCFG_RESP_SIGNAL_MODE_LAST PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4_112 + #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_MASK 0xf0UL + #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_SFT 4 + #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_NONE_ACTIVE (0x0UL << 4) + #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE74_ACTIVE (0x1UL << 4) + #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE91_ACTIVE (0x2UL << 4) + #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_1XN_ACTIVE (0x3UL << 4) + #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_IEEE_ACTIVE (0x4UL << 4) + #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_1XN_ACTIVE (0x5UL << 4) + #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_IEEE_ACTIVE (0x6UL << 4) + #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_LAST PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_IEEE_ACTIVE + __le16 link_speed; + #define PORT_PHY_QCFG_RESP_LINK_SPEED_100MB 0x1UL + #define PORT_PHY_QCFG_RESP_LINK_SPEED_1GB 0xaUL + #define PORT_PHY_QCFG_RESP_LINK_SPEED_2GB 0x14UL + #define PORT_PHY_QCFG_RESP_LINK_SPEED_2_5GB 0x19UL + #define PORT_PHY_QCFG_RESP_LINK_SPEED_10GB 0x64UL + #define PORT_PHY_QCFG_RESP_LINK_SPEED_20GB 0xc8UL + #define PORT_PHY_QCFG_RESP_LINK_SPEED_25GB 0xfaUL + #define PORT_PHY_QCFG_RESP_LINK_SPEED_40GB 0x190UL + #define PORT_PHY_QCFG_RESP_LINK_SPEED_50GB 0x1f4UL + #define PORT_PHY_QCFG_RESP_LINK_SPEED_100GB 0x3e8UL + #define PORT_PHY_QCFG_RESP_LINK_SPEED_200GB 0x7d0UL + #define PORT_PHY_QCFG_RESP_LINK_SPEED_400GB 0xfa0UL + #define PORT_PHY_QCFG_RESP_LINK_SPEED_800GB 0x1f40UL + #define PORT_PHY_QCFG_RESP_LINK_SPEED_10MB 0xffffUL + #define PORT_PHY_QCFG_RESP_LINK_SPEED_LAST PORT_PHY_QCFG_RESP_LINK_SPEED_10MB + u8 duplex_cfg; + #define PORT_PHY_QCFG_RESP_DUPLEX_CFG_HALF 0x0UL + #define PORT_PHY_QCFG_RESP_DUPLEX_CFG_FULL 0x1UL + #define PORT_PHY_QCFG_RESP_DUPLEX_CFG_LAST PORT_PHY_QCFG_RESP_DUPLEX_CFG_FULL + u8 pause; + #define PORT_PHY_QCFG_RESP_PAUSE_TX 0x1UL + #define PORT_PHY_QCFG_RESP_PAUSE_RX 0x2UL + __le16 support_speeds; + #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100MBHD 0x1UL + #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100MB 0x2UL + #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_1GBHD 0x4UL + #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_1GB 0x8UL + #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_2GB 0x10UL + #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_2_5GB 0x20UL + #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10GB 0x40UL + #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_20GB 0x80UL + #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_25GB 0x100UL + #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_40GB 0x200UL + #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_50GB 0x400UL + #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100GB 0x800UL + #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10MBHD 0x1000UL + #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10MB 0x2000UL + __le16 force_link_speed; + #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_100MB 0x1UL + #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_1GB 0xaUL + #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_2GB 0x14UL + #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_2_5GB 0x19UL + #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_10GB 0x64UL + #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_20GB 0xc8UL + #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_25GB 0xfaUL + #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_40GB 0x190UL + #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_50GB 0x1f4UL + #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_100GB 0x3e8UL + #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_10MB 0xffffUL + #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_LAST PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_10MB + u8 auto_mode; + #define PORT_PHY_QCFG_RESP_AUTO_MODE_NONE 0x0UL + #define PORT_PHY_QCFG_RESP_AUTO_MODE_ALL_SPEEDS 0x1UL + #define PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_SPEED 0x2UL + #define PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_OR_BELOW 0x3UL + #define PORT_PHY_QCFG_RESP_AUTO_MODE_SPEED_MASK 0x4UL + #define PORT_PHY_QCFG_RESP_AUTO_MODE_LAST PORT_PHY_QCFG_RESP_AUTO_MODE_SPEED_MASK + u8 auto_pause; + #define PORT_PHY_QCFG_RESP_AUTO_PAUSE_TX 0x1UL + #define PORT_PHY_QCFG_RESP_AUTO_PAUSE_RX 0x2UL + #define PORT_PHY_QCFG_RESP_AUTO_PAUSE_AUTONEG_PAUSE 0x4UL + __le16 auto_link_speed; + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_100MB 0x1UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_1GB 0xaUL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_2GB 0x14UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_2_5GB 0x19UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_10GB 0x64UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_20GB 0xc8UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_25GB 0xfaUL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_40GB 0x190UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_50GB 0x1f4UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_100GB 0x3e8UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_10MB 0xffffUL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_LAST PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_10MB + __le16 auto_link_speed_mask; + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100MBHD 0x1UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100MB 0x2UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_1GBHD 0x4UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_1GB 0x8UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_2GB 0x10UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_2_5GB 0x20UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10GB 0x40UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_20GB 0x80UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_25GB 0x100UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_40GB 0x200UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_50GB 0x400UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100GB 0x800UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10MBHD 0x1000UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10MB 0x2000UL + u8 wirespeed; + #define PORT_PHY_QCFG_RESP_WIRESPEED_OFF 0x0UL + #define PORT_PHY_QCFG_RESP_WIRESPEED_ON 0x1UL + #define PORT_PHY_QCFG_RESP_WIRESPEED_LAST PORT_PHY_QCFG_RESP_WIRESPEED_ON + u8 lpbk; + #define PORT_PHY_QCFG_RESP_LPBK_NONE 0x0UL + #define PORT_PHY_QCFG_RESP_LPBK_LOCAL 0x1UL + #define PORT_PHY_QCFG_RESP_LPBK_REMOTE 0x2UL + #define PORT_PHY_QCFG_RESP_LPBK_EXTERNAL 0x3UL + #define PORT_PHY_QCFG_RESP_LPBK_LAST PORT_PHY_QCFG_RESP_LPBK_EXTERNAL + u8 force_pause; + #define PORT_PHY_QCFG_RESP_FORCE_PAUSE_TX 0x1UL + #define PORT_PHY_QCFG_RESP_FORCE_PAUSE_RX 0x2UL + u8 module_status; + #define PORT_PHY_QCFG_RESP_MODULE_STATUS_NONE 0x0UL + #define PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX 0x1UL + #define PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG 0x2UL + #define PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN 0x3UL + #define PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTINSERTED 0x4UL + #define PORT_PHY_QCFG_RESP_MODULE_STATUS_CURRENTFAULT 0x5UL + #define PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTAPPLICABLE 0xffUL + #define PORT_PHY_QCFG_RESP_MODULE_STATUS_LAST PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTAPPLICABLE + __le32 preemphasis; + u8 phy_maj; + u8 phy_min; + u8 phy_bld; + u8 phy_type; + #define PORT_PHY_QCFG_RESP_PHY_TYPE_UNKNOWN 0x0UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASECR 0x1UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR4 0x2UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASELR 0x3UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASESR 0x4UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR2 0x5UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKX 0x6UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR 0x7UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASET 0x8UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASETE 0x9UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_SGMIIEXTPHY 0xaUL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_L 0xbUL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_S 0xcUL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_N 0xdUL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASESR 0xeUL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASECR4 0xfUL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR4 0x10UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASELR4 0x11UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASEER4 0x12UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR10 0x13UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASECR4 0x14UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASESR4 0x15UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASELR4 0x16UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASEER4 0x17UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_ACTIVE_CABLE 0x18UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASET 0x19UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASESX 0x1aUL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASECX 0x1bUL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASECR4 0x1cUL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASESR4 0x1dUL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASELR4 0x1eUL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASEER4 0x1fUL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_50G_BASECR 0x20UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_50G_BASESR 0x21UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_50G_BASELR 0x22UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_50G_BASEER 0x23UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASECR2 0x24UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR2 0x25UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASELR2 0x26UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASEER2 0x27UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASECR 0x28UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR 0x29UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASELR 0x2aUL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASEER 0x2bUL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASECR2 0x2cUL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASESR2 0x2dUL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASELR2 0x2eUL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASEER2 0x2fUL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASECR8 0x30UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASESR8 0x31UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASELR8 0x32UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASEER8 0x33UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASECR4 0x34UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASESR4 0x35UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASELR4 0x36UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASEER4 0x37UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_800G_BASECR8 0x38UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_800G_BASESR8 0x39UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_800G_BASELR8 0x3aUL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_800G_BASEER8 0x3bUL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_800G_BASEFR8 0x3cUL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_800G_BASEDR8 0x3dUL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_LAST PORT_PHY_QCFG_RESP_PHY_TYPE_800G_BASEDR8 + u8 media_type; + #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_UNKNOWN 0x0UL + #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP 0x1UL + #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_DAC 0x2UL + #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_FIBRE 0x3UL + #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_LAST PORT_PHY_QCFG_RESP_MEDIA_TYPE_FIBRE + u8 xcvr_pkg_type; + #define PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_XCVR_INTERNAL 0x1UL + #define PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_XCVR_EXTERNAL 0x2UL + #define PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_LAST PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_XCVR_EXTERNAL + u8 eee_config_phy_addr; + #define PORT_PHY_QCFG_RESP_PHY_ADDR_MASK 0x1fUL + #define PORT_PHY_QCFG_RESP_PHY_ADDR_SFT 0 + #define PORT_PHY_QCFG_RESP_EEE_CONFIG_MASK 0xe0UL + #define PORT_PHY_QCFG_RESP_EEE_CONFIG_SFT 5 + #define PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED 0x20UL + #define PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE 0x40UL + #define PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI 0x80UL + u8 parallel_detect; + #define PORT_PHY_QCFG_RESP_PARALLEL_DETECT 0x1UL + __le16 link_partner_adv_speeds; + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_100MBHD 0x1UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_100MB 0x2UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_1GBHD 0x4UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_1GB 0x8UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_2GB 0x10UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_2_5GB 0x20UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_10GB 0x40UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_20GB 0x80UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_25GB 0x100UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_40GB 0x200UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_50GB 0x400UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_100GB 0x800UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_10MBHD 0x1000UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_10MB 0x2000UL + u8 link_partner_adv_auto_mode; + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_NONE 0x0UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ALL_SPEEDS 0x1UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ONE_SPEED 0x2UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ONE_OR_BELOW 0x3UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_SPEED_MASK 0x4UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_LAST PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_SPEED_MASK + u8 link_partner_adv_pause; + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_PAUSE_TX 0x1UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_PAUSE_RX 0x2UL + __le16 adv_eee_link_speed_mask; + #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD1 0x1UL + #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_100MB 0x2UL + #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD2 0x4UL + #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_1GB 0x8UL + #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD3 0x10UL + #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD4 0x20UL + #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_10GB 0x40UL + __le16 link_partner_adv_eee_link_speed_mask; + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD1 0x1UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_100MB 0x2UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD2 0x4UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_1GB 0x8UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD3 0x10UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD4 0x20UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_10GB 0x40UL + __le32 xcvr_identifier_type_tx_lpi_timer; + #define PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK 0xffffffUL + #define PORT_PHY_QCFG_RESP_TX_LPI_TIMER_SFT 0 + #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_MASK 0xff000000UL + #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_SFT 24 + #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_UNKNOWN (0x0UL << 24) + #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_SFP (0x3UL << 24) + #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFP (0xcUL << 24) + #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFPPLUS (0xdUL << 24) + #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFP28 (0x11UL << 24) + #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFPDD (0x18UL << 24) + #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFP112 (0x1eUL << 24) + #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_SFPDD (0x1fUL << 24) + #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_CSFP (0x20UL << 24) + #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_LAST PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_CSFP + __le16 fec_cfg; + #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED 0x1UL + #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_AUTONEG_SUPPORTED 0x2UL + #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_AUTONEG_ENABLED 0x4UL + #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_SUPPORTED 0x8UL + #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_ENABLED 0x10UL + #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_SUPPORTED 0x20UL + #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_ENABLED 0x40UL + #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS544_1XN_SUPPORTED 0x80UL + #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS544_1XN_ENABLED 0x100UL + #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS544_IEEE_SUPPORTED 0x200UL + #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS544_IEEE_ENABLED 0x400UL + #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS272_1XN_SUPPORTED 0x800UL + #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS272_1XN_ENABLED 0x1000UL + #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS272_IEEE_SUPPORTED 0x2000UL + #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS272_IEEE_ENABLED 0x4000UL + u8 duplex_state; + #define PORT_PHY_QCFG_RESP_DUPLEX_STATE_HALF 0x0UL + #define PORT_PHY_QCFG_RESP_DUPLEX_STATE_FULL 0x1UL + #define PORT_PHY_QCFG_RESP_DUPLEX_STATE_LAST PORT_PHY_QCFG_RESP_DUPLEX_STATE_FULL + u8 option_flags; + #define PORT_PHY_QCFG_RESP_OPTION_FLAGS_MEDIA_AUTO_DETECT 0x1UL + #define PORT_PHY_QCFG_RESP_OPTION_FLAGS_SIGNAL_MODE_KNOWN 0x2UL + #define PORT_PHY_QCFG_RESP_OPTION_FLAGS_SPEEDS2_SUPPORTED 0x4UL + char phy_vendor_name[16]; + char phy_vendor_partnumber[16]; + __le16 support_pam4_speeds; + #define PORT_PHY_QCFG_RESP_SUPPORT_PAM4_SPEEDS_50G 0x1UL + #define PORT_PHY_QCFG_RESP_SUPPORT_PAM4_SPEEDS_100G 0x2UL + #define PORT_PHY_QCFG_RESP_SUPPORT_PAM4_SPEEDS_200G 0x4UL + __le16 force_pam4_link_speed; + #define PORT_PHY_QCFG_RESP_FORCE_PAM4_LINK_SPEED_50GB 0x1f4UL + #define PORT_PHY_QCFG_RESP_FORCE_PAM4_LINK_SPEED_100GB 0x3e8UL + #define PORT_PHY_QCFG_RESP_FORCE_PAM4_LINK_SPEED_200GB 0x7d0UL + #define PORT_PHY_QCFG_RESP_FORCE_PAM4_LINK_SPEED_LAST PORT_PHY_QCFG_RESP_FORCE_PAM4_LINK_SPEED_200GB + __le16 auto_pam4_link_speed_mask; + #define PORT_PHY_QCFG_RESP_AUTO_PAM4_LINK_SPEED_MASK_50G 0x1UL + #define PORT_PHY_QCFG_RESP_AUTO_PAM4_LINK_SPEED_MASK_100G 0x2UL + #define PORT_PHY_QCFG_RESP_AUTO_PAM4_LINK_SPEED_MASK_200G 0x4UL + u8 link_partner_pam4_adv_speeds; + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_PAM4_ADV_SPEEDS_50GB 0x1UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_PAM4_ADV_SPEEDS_100GB 0x2UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_PAM4_ADV_SPEEDS_200GB 0x4UL + u8 link_down_reason; + #define PORT_PHY_QCFG_RESP_LINK_DOWN_REASON_RF 0x1UL + __le16 support_speeds2; + #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_1GB 0x1UL + #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_10GB 0x2UL + #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_25GB 0x4UL + #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_40GB 0x8UL + #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_50GB 0x10UL + #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_100GB 0x20UL + #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_50GB_PAM4_56 0x40UL + #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_100GB_PAM4_56 0x80UL + #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_200GB_PAM4_56 0x100UL + #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_400GB_PAM4_56 0x200UL + #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_100GB_PAM4_112 0x400UL + #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_200GB_PAM4_112 0x800UL + #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_400GB_PAM4_112 0x1000UL + #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_800GB_PAM4_112 0x2000UL + __le16 force_link_speeds2; + #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_1GB 0xaUL + #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_10GB 0x64UL + #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_25GB 0xfaUL + #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_40GB 0x190UL + #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_50GB 0x1f4UL + #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_100GB 0x3e8UL + #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_50GB_PAM4_56 0x1f5UL + #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_100GB_PAM4_56 0x3e9UL + #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_200GB_PAM4_56 0x7d1UL + #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_400GB_PAM4_56 0xfa1UL + #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_100GB_PAM4_112 0x3eaUL + #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_200GB_PAM4_112 0x7d2UL + #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_400GB_PAM4_112 0xfa2UL + #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_800GB_PAM4_112 0x1f42UL + #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_LAST PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_800GB_PAM4_112 + __le16 auto_link_speeds2; + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_1GB 0x1UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_10GB 0x2UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_25GB 0x4UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_40GB 0x8UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_50GB 0x10UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_100GB 0x20UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_50GB_PAM4_56 0x40UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_100GB_PAM4_56 0x80UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_200GB_PAM4_56 0x100UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_400GB_PAM4_56 0x200UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_100GB_PAM4_112 0x400UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_200GB_PAM4_112 0x800UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_400GB_PAM4_112 0x1000UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_800GB_PAM4_112 0x2000UL + u8 active_lanes; + u8 valid; +}; + +/* hwrm_port_mac_cfg_input (size:448b/56B) */ +struct hwrm_port_mac_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define PORT_MAC_CFG_REQ_FLAGS_MATCH_LINK 0x1UL + #define PORT_MAC_CFG_REQ_FLAGS_VLAN_PRI2COS_ENABLE 0x2UL + #define PORT_MAC_CFG_REQ_FLAGS_TUNNEL_PRI2COS_ENABLE 0x4UL + #define PORT_MAC_CFG_REQ_FLAGS_IP_DSCP2COS_ENABLE 0x8UL + #define PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_ENABLE 0x10UL + #define PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_DISABLE 0x20UL + #define PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_ENABLE 0x40UL + #define PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_DISABLE 0x80UL + #define PORT_MAC_CFG_REQ_FLAGS_OOB_WOL_ENABLE 0x100UL + #define PORT_MAC_CFG_REQ_FLAGS_OOB_WOL_DISABLE 0x200UL + #define PORT_MAC_CFG_REQ_FLAGS_VLAN_PRI2COS_DISABLE 0x400UL + #define PORT_MAC_CFG_REQ_FLAGS_TUNNEL_PRI2COS_DISABLE 0x800UL + #define PORT_MAC_CFG_REQ_FLAGS_IP_DSCP2COS_DISABLE 0x1000UL + #define PORT_MAC_CFG_REQ_FLAGS_PTP_ONE_STEP_TX_TS 0x2000UL + #define PORT_MAC_CFG_REQ_FLAGS_ALL_RX_TS_CAPTURE_ENABLE 0x4000UL + #define PORT_MAC_CFG_REQ_FLAGS_ALL_RX_TS_CAPTURE_DISABLE 0x8000UL + __le32 enables; + #define PORT_MAC_CFG_REQ_ENABLES_IPG 0x1UL + #define PORT_MAC_CFG_REQ_ENABLES_LPBK 0x2UL + #define PORT_MAC_CFG_REQ_ENABLES_VLAN_PRI2COS_MAP_PRI 0x4UL + #define PORT_MAC_CFG_REQ_ENABLES_TUNNEL_PRI2COS_MAP_PRI 0x10UL + #define PORT_MAC_CFG_REQ_ENABLES_DSCP2COS_MAP_PRI 0x20UL + #define PORT_MAC_CFG_REQ_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE 0x40UL + #define PORT_MAC_CFG_REQ_ENABLES_TX_TS_CAPTURE_PTP_MSG_TYPE 0x80UL + #define PORT_MAC_CFG_REQ_ENABLES_COS_FIELD_CFG 0x100UL + #define PORT_MAC_CFG_REQ_ENABLES_PTP_FREQ_ADJ_PPB 0x200UL + #define PORT_MAC_CFG_REQ_ENABLES_PTP_ADJ_PHASE 0x400UL + #define PORT_MAC_CFG_REQ_ENABLES_PTP_LOAD_CONTROL 0x800UL + __le16 port_id; + u8 ipg; + u8 lpbk; + #define PORT_MAC_CFG_REQ_LPBK_NONE 0x0UL + #define PORT_MAC_CFG_REQ_LPBK_LOCAL 0x1UL + #define PORT_MAC_CFG_REQ_LPBK_REMOTE 0x2UL + #define PORT_MAC_CFG_REQ_LPBK_LAST PORT_MAC_CFG_REQ_LPBK_REMOTE + u8 vlan_pri2cos_map_pri; + u8 reserved1; + u8 tunnel_pri2cos_map_pri; + u8 dscp2pri_map_pri; + __le16 rx_ts_capture_ptp_msg_type; + __le16 tx_ts_capture_ptp_msg_type; + u8 cos_field_cfg; + #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_RSVD1 0x1UL + #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_MASK 0x6UL + #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_SFT 1 + #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_INNERMOST (0x0UL << 1) + #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_OUTER (0x1UL << 1) + #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_OUTERMOST (0x2UL << 1) + #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_UNSPECIFIED (0x3UL << 1) + #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_LAST PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_UNSPECIFIED + #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_MASK 0x18UL + #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_SFT 3 + #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_INNERMOST (0x0UL << 3) + #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_OUTER (0x1UL << 3) + #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_OUTERMOST (0x2UL << 3) + #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_UNSPECIFIED (0x3UL << 3) + #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_LAST PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_UNSPECIFIED + #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_DEFAULT_COS_MASK 0xe0UL + #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_DEFAULT_COS_SFT 5 + u8 unused_0[3]; + __s32 ptp_freq_adj_ppb; + u8 unused_1[3]; + u8 ptp_load_control; + #define PORT_MAC_CFG_REQ_PTP_LOAD_CONTROL_NONE 0x0UL + #define PORT_MAC_CFG_REQ_PTP_LOAD_CONTROL_IMMEDIATE 0x1UL + #define PORT_MAC_CFG_REQ_PTP_LOAD_CONTROL_PPS_EVENT 0x2UL + #define PORT_MAC_CFG_REQ_PTP_LOAD_CONTROL_LAST PORT_MAC_CFG_REQ_PTP_LOAD_CONTROL_PPS_EVENT + __s64 ptp_adj_phase; +}; + +/* hwrm_port_mac_cfg_output (size:128b/16B) */ +struct hwrm_port_mac_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 mru; + __le16 mtu; + u8 ipg; + u8 lpbk; + #define PORT_MAC_CFG_RESP_LPBK_NONE 0x0UL + #define PORT_MAC_CFG_RESP_LPBK_LOCAL 0x1UL + #define PORT_MAC_CFG_RESP_LPBK_REMOTE 0x2UL + #define PORT_MAC_CFG_RESP_LPBK_LAST PORT_MAC_CFG_RESP_LPBK_REMOTE + u8 unused_0; + u8 valid; +}; + +/* hwrm_port_mac_qcfg_input (size:192b/24B) */ +struct hwrm_port_mac_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 port_id; + u8 unused_0[6]; +}; + +/* hwrm_port_mac_qcfg_output (size:256b/32B) */ +struct hwrm_port_mac_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 mru; + __le16 mtu; + u8 ipg; + u8 lpbk; + #define PORT_MAC_QCFG_RESP_LPBK_NONE 0x0UL + #define PORT_MAC_QCFG_RESP_LPBK_LOCAL 0x1UL + #define PORT_MAC_QCFG_RESP_LPBK_REMOTE 0x2UL + #define PORT_MAC_QCFG_RESP_LPBK_LAST PORT_MAC_QCFG_RESP_LPBK_REMOTE + u8 vlan_pri2cos_map_pri; + u8 flags; + #define PORT_MAC_QCFG_RESP_FLAGS_VLAN_PRI2COS_ENABLE 0x1UL + #define PORT_MAC_QCFG_RESP_FLAGS_TUNNEL_PRI2COS_ENABLE 0x2UL + #define PORT_MAC_QCFG_RESP_FLAGS_IP_DSCP2COS_ENABLE 0x4UL + #define PORT_MAC_QCFG_RESP_FLAGS_OOB_WOL_ENABLE 0x8UL + #define PORT_MAC_QCFG_RESP_FLAGS_PTP_RX_TS_CAPTURE_ENABLE 0x10UL + #define PORT_MAC_QCFG_RESP_FLAGS_PTP_TX_TS_CAPTURE_ENABLE 0x20UL + u8 tunnel_pri2cos_map_pri; + u8 dscp2pri_map_pri; + __le16 rx_ts_capture_ptp_msg_type; + __le16 tx_ts_capture_ptp_msg_type; + u8 cos_field_cfg; + #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_RSVD 0x1UL + #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_VLAN_PRI_SEL_MASK 0x6UL + #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_VLAN_PRI_SEL_SFT 1 + #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_VLAN_PRI_SEL_INNERMOST (0x0UL << 1) + #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_VLAN_PRI_SEL_OUTER (0x1UL << 1) + #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_VLAN_PRI_SEL_OUTERMOST (0x2UL << 1) + #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_VLAN_PRI_SEL_UNSPECIFIED (0x3UL << 1) + #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_VLAN_PRI_SEL_LAST PORT_MAC_QCFG_RESP_COS_FIELD_CFG_VLAN_PRI_SEL_UNSPECIFIED + #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_T_VLAN_PRI_SEL_MASK 0x18UL + #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_T_VLAN_PRI_SEL_SFT 3 + #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_T_VLAN_PRI_SEL_INNERMOST (0x0UL << 3) + #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_T_VLAN_PRI_SEL_OUTER (0x1UL << 3) + #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_T_VLAN_PRI_SEL_OUTERMOST (0x2UL << 3) + #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_T_VLAN_PRI_SEL_UNSPECIFIED (0x3UL << 3) + #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_T_VLAN_PRI_SEL_LAST PORT_MAC_QCFG_RESP_COS_FIELD_CFG_T_VLAN_PRI_SEL_UNSPECIFIED + #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_DEFAULT_COS_MASK 0xe0UL + #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_DEFAULT_COS_SFT 5 + u8 unused_1; + __le16 port_svif_info; + #define PORT_MAC_QCFG_RESP_PORT_SVIF_INFO_PORT_SVIF_MASK 0x7fffUL + #define PORT_MAC_QCFG_RESP_PORT_SVIF_INFO_PORT_SVIF_SFT 0 + #define PORT_MAC_QCFG_RESP_PORT_SVIF_INFO_PORT_SVIF_VALID 0x8000UL + u8 ptp_load_control; + #define PORT_MAC_QCFG_RESP_PTP_LOAD_CONTROL_NONE 0x0UL + #define PORT_MAC_QCFG_RESP_PTP_LOAD_CONTROL_IMMEDIATE 0x1UL + #define PORT_MAC_QCFG_RESP_PTP_LOAD_CONTROL_PPS_EVENT 0x2UL + #define PORT_MAC_QCFG_RESP_PTP_LOAD_CONTROL_LAST PORT_MAC_QCFG_RESP_PTP_LOAD_CONTROL_PPS_EVENT + u8 unused_2[4]; + u8 valid; +}; + +/* hwrm_port_mac_ptp_qcfg_input (size:192b/24B) */ +struct hwrm_port_mac_ptp_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 port_id; + u8 unused_0[6]; +}; + +/* hwrm_port_mac_ptp_qcfg_output (size:704b/88B) */ +struct hwrm_port_mac_ptp_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 flags; + #define PORT_MAC_PTP_QCFG_RESP_FLAGS_DIRECT_ACCESS 0x1UL + #define PORT_MAC_PTP_QCFG_RESP_FLAGS_ONE_STEP_TX_TS 0x4UL + #define PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS 0x8UL + #define PORT_MAC_PTP_QCFG_RESP_FLAGS_PARTIAL_DIRECT_ACCESS_REF_CLOCK 0x10UL + #define PORT_MAC_PTP_QCFG_RESP_FLAGS_RTC_CONFIGURED 0x20UL + #define PORT_MAC_PTP_QCFG_RESP_FLAGS_64B_PHC_TIME 0x40UL + u8 unused_0[3]; + __le32 rx_ts_reg_off_lower; + __le32 rx_ts_reg_off_upper; + __le32 rx_ts_reg_off_seq_id; + __le32 rx_ts_reg_off_src_id_0; + __le32 rx_ts_reg_off_src_id_1; + __le32 rx_ts_reg_off_src_id_2; + __le32 rx_ts_reg_off_domain_id; + __le32 rx_ts_reg_off_fifo; + __le32 rx_ts_reg_off_fifo_adv; + __le32 rx_ts_reg_off_granularity; + __le32 tx_ts_reg_off_lower; + __le32 tx_ts_reg_off_upper; + __le32 tx_ts_reg_off_seq_id; + __le32 tx_ts_reg_off_fifo; + __le32 tx_ts_reg_off_granularity; + __le32 ts_ref_clock_reg_lower; + __le32 ts_ref_clock_reg_upper; + u8 unused_1[7]; + u8 valid; +}; + +/* tx_port_stats (size:3264b/408B) */ +struct tx_port_stats { + __le64 tx_64b_frames; + __le64 tx_65b_127b_frames; + __le64 tx_128b_255b_frames; + __le64 tx_256b_511b_frames; + __le64 tx_512b_1023b_frames; + __le64 tx_1024b_1518b_frames; + __le64 tx_good_vlan_frames; + __le64 tx_1519b_2047b_frames; + __le64 tx_2048b_4095b_frames; + __le64 tx_4096b_9216b_frames; + __le64 tx_9217b_16383b_frames; + __le64 tx_good_frames; + __le64 tx_total_frames; + __le64 tx_ucast_frames; + __le64 tx_mcast_frames; + __le64 tx_bcast_frames; + __le64 tx_pause_frames; + __le64 tx_pfc_frames; + __le64 tx_jabber_frames; + __le64 tx_fcs_err_frames; + __le64 tx_control_frames; + __le64 tx_oversz_frames; + __le64 tx_single_dfrl_frames; + __le64 tx_multi_dfrl_frames; + __le64 tx_single_coll_frames; + __le64 tx_multi_coll_frames; + __le64 tx_late_coll_frames; + __le64 tx_excessive_coll_frames; + __le64 tx_frag_frames; + __le64 tx_err; + __le64 tx_tagged_frames; + __le64 tx_dbl_tagged_frames; + __le64 tx_runt_frames; + __le64 tx_fifo_underruns; + __le64 tx_pfc_ena_frames_pri0; + __le64 tx_pfc_ena_frames_pri1; + __le64 tx_pfc_ena_frames_pri2; + __le64 tx_pfc_ena_frames_pri3; + __le64 tx_pfc_ena_frames_pri4; + __le64 tx_pfc_ena_frames_pri5; + __le64 tx_pfc_ena_frames_pri6; + __le64 tx_pfc_ena_frames_pri7; + __le64 tx_eee_lpi_events; + __le64 tx_eee_lpi_duration; + __le64 tx_llfc_logical_msgs; + __le64 tx_hcfc_msgs; + __le64 tx_total_collisions; + __le64 tx_bytes; + __le64 tx_xthol_frames; + __le64 tx_stat_discard; + __le64 tx_stat_error; +}; + +/* rx_port_stats (size:4224b/528B) */ +struct rx_port_stats { + __le64 rx_64b_frames; + __le64 rx_65b_127b_frames; + __le64 rx_128b_255b_frames; + __le64 rx_256b_511b_frames; + __le64 rx_512b_1023b_frames; + __le64 rx_1024b_1518b_frames; + __le64 rx_good_vlan_frames; + __le64 rx_1519b_2047b_frames; + __le64 rx_2048b_4095b_frames; + __le64 rx_4096b_9216b_frames; + __le64 rx_9217b_16383b_frames; + __le64 rx_total_frames; + __le64 rx_ucast_frames; + __le64 rx_mcast_frames; + __le64 rx_bcast_frames; + __le64 rx_fcs_err_frames; + __le64 rx_ctrl_frames; + __le64 rx_pause_frames; + __le64 rx_pfc_frames; + __le64 rx_unsupported_opcode_frames; + __le64 rx_unsupported_da_pausepfc_frames; + __le64 rx_wrong_sa_frames; + __le64 rx_align_err_frames; + __le64 rx_oor_len_frames; + __le64 rx_code_err_frames; + __le64 rx_false_carrier_frames; + __le64 rx_ovrsz_frames; + __le64 rx_jbr_frames; + __le64 rx_mtu_err_frames; + __le64 rx_match_crc_frames; + __le64 rx_promiscuous_frames; + __le64 rx_tagged_frames; + __le64 rx_double_tagged_frames; + __le64 rx_trunc_frames; + __le64 rx_good_frames; + __le64 rx_pfc_xon2xoff_frames_pri0; + __le64 rx_pfc_xon2xoff_frames_pri1; + __le64 rx_pfc_xon2xoff_frames_pri2; + __le64 rx_pfc_xon2xoff_frames_pri3; + __le64 rx_pfc_xon2xoff_frames_pri4; + __le64 rx_pfc_xon2xoff_frames_pri5; + __le64 rx_pfc_xon2xoff_frames_pri6; + __le64 rx_pfc_xon2xoff_frames_pri7; + __le64 rx_pfc_ena_frames_pri0; + __le64 rx_pfc_ena_frames_pri1; + __le64 rx_pfc_ena_frames_pri2; + __le64 rx_pfc_ena_frames_pri3; + __le64 rx_pfc_ena_frames_pri4; + __le64 rx_pfc_ena_frames_pri5; + __le64 rx_pfc_ena_frames_pri6; + __le64 rx_pfc_ena_frames_pri7; + __le64 rx_sch_crc_err_frames; + __le64 rx_undrsz_frames; + __le64 rx_frag_frames; + __le64 rx_eee_lpi_events; + __le64 rx_eee_lpi_duration; + __le64 rx_llfc_physical_msgs; + __le64 rx_llfc_logical_msgs; + __le64 rx_llfc_msgs_with_crc_err; + __le64 rx_hcfc_msgs; + __le64 rx_hcfc_msgs_with_crc_err; + __le64 rx_bytes; + __le64 rx_runt_bytes; + __le64 rx_runt_frames; + __le64 rx_stat_discard; + __le64 rx_stat_err; +}; + +/* hwrm_port_qstats_input (size:320b/40B) */ +struct hwrm_port_qstats_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 port_id; + u8 flags; + #define PORT_QSTATS_REQ_FLAGS_COUNTER_MASK 0x1UL + u8 unused_0[5]; + __le64 tx_stat_host_addr; + __le64 rx_stat_host_addr; +}; + +/* hwrm_port_qstats_output (size:128b/16B) */ +struct hwrm_port_qstats_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 tx_stat_size; + __le16 rx_stat_size; + u8 unused_0[3]; + u8 valid; +}; + +/* tx_port_stats_ext (size:2048b/256B) */ +struct tx_port_stats_ext { + __le64 tx_bytes_cos0; + __le64 tx_bytes_cos1; + __le64 tx_bytes_cos2; + __le64 tx_bytes_cos3; + __le64 tx_bytes_cos4; + __le64 tx_bytes_cos5; + __le64 tx_bytes_cos6; + __le64 tx_bytes_cos7; + __le64 tx_packets_cos0; + __le64 tx_packets_cos1; + __le64 tx_packets_cos2; + __le64 tx_packets_cos3; + __le64 tx_packets_cos4; + __le64 tx_packets_cos5; + __le64 tx_packets_cos6; + __le64 tx_packets_cos7; + __le64 pfc_pri0_tx_duration_us; + __le64 pfc_pri0_tx_transitions; + __le64 pfc_pri1_tx_duration_us; + __le64 pfc_pri1_tx_transitions; + __le64 pfc_pri2_tx_duration_us; + __le64 pfc_pri2_tx_transitions; + __le64 pfc_pri3_tx_duration_us; + __le64 pfc_pri3_tx_transitions; + __le64 pfc_pri4_tx_duration_us; + __le64 pfc_pri4_tx_transitions; + __le64 pfc_pri5_tx_duration_us; + __le64 pfc_pri5_tx_transitions; + __le64 pfc_pri6_tx_duration_us; + __le64 pfc_pri6_tx_transitions; + __le64 pfc_pri7_tx_duration_us; + __le64 pfc_pri7_tx_transitions; +}; + +/* rx_port_stats_ext (size:3904b/488B) */ +struct rx_port_stats_ext { + __le64 link_down_events; + __le64 continuous_pause_events; + __le64 resume_pause_events; + __le64 continuous_roce_pause_events; + __le64 resume_roce_pause_events; + __le64 rx_bytes_cos0; + __le64 rx_bytes_cos1; + __le64 rx_bytes_cos2; + __le64 rx_bytes_cos3; + __le64 rx_bytes_cos4; + __le64 rx_bytes_cos5; + __le64 rx_bytes_cos6; + __le64 rx_bytes_cos7; + __le64 rx_packets_cos0; + __le64 rx_packets_cos1; + __le64 rx_packets_cos2; + __le64 rx_packets_cos3; + __le64 rx_packets_cos4; + __le64 rx_packets_cos5; + __le64 rx_packets_cos6; + __le64 rx_packets_cos7; + __le64 pfc_pri0_rx_duration_us; + __le64 pfc_pri0_rx_transitions; + __le64 pfc_pri1_rx_duration_us; + __le64 pfc_pri1_rx_transitions; + __le64 pfc_pri2_rx_duration_us; + __le64 pfc_pri2_rx_transitions; + __le64 pfc_pri3_rx_duration_us; + __le64 pfc_pri3_rx_transitions; + __le64 pfc_pri4_rx_duration_us; + __le64 pfc_pri4_rx_transitions; + __le64 pfc_pri5_rx_duration_us; + __le64 pfc_pri5_rx_transitions; + __le64 pfc_pri6_rx_duration_us; + __le64 pfc_pri6_rx_transitions; + __le64 pfc_pri7_rx_duration_us; + __le64 pfc_pri7_rx_transitions; + __le64 rx_bits; + __le64 rx_buffer_passed_threshold; + __le64 rx_pcs_symbol_err; + __le64 rx_corrected_bits; + __le64 rx_discard_bytes_cos0; + __le64 rx_discard_bytes_cos1; + __le64 rx_discard_bytes_cos2; + __le64 rx_discard_bytes_cos3; + __le64 rx_discard_bytes_cos4; + __le64 rx_discard_bytes_cos5; + __le64 rx_discard_bytes_cos6; + __le64 rx_discard_bytes_cos7; + __le64 rx_discard_packets_cos0; + __le64 rx_discard_packets_cos1; + __le64 rx_discard_packets_cos2; + __le64 rx_discard_packets_cos3; + __le64 rx_discard_packets_cos4; + __le64 rx_discard_packets_cos5; + __le64 rx_discard_packets_cos6; + __le64 rx_discard_packets_cos7; + __le64 rx_fec_corrected_blocks; + __le64 rx_fec_uncorrectable_blocks; + __le64 rx_filter_miss; + __le64 rx_fec_symbol_err; +}; + +/* rx_port_stats_ext_pfc_wd (size:5120b/640B) */ +struct rx_port_stats_ext_pfc_wd { + __le64 rx_pfc_watchdog_storms_detected_pri0; + __le64 rx_pfc_watchdog_storms_detected_pri1; + __le64 rx_pfc_watchdog_storms_detected_pri2; + __le64 rx_pfc_watchdog_storms_detected_pri3; + __le64 rx_pfc_watchdog_storms_detected_pri4; + __le64 rx_pfc_watchdog_storms_detected_pri5; + __le64 rx_pfc_watchdog_storms_detected_pri6; + __le64 rx_pfc_watchdog_storms_detected_pri7; + __le64 rx_pfc_watchdog_storms_reverted_pri0; + __le64 rx_pfc_watchdog_storms_reverted_pri1; + __le64 rx_pfc_watchdog_storms_reverted_pri2; + __le64 rx_pfc_watchdog_storms_reverted_pri3; + __le64 rx_pfc_watchdog_storms_reverted_pri4; + __le64 rx_pfc_watchdog_storms_reverted_pri5; + __le64 rx_pfc_watchdog_storms_reverted_pri6; + __le64 rx_pfc_watchdog_storms_reverted_pri7; + __le64 rx_pfc_watchdog_storms_rx_packets_pri0; + __le64 rx_pfc_watchdog_storms_rx_packets_pri1; + __le64 rx_pfc_watchdog_storms_rx_packets_pri2; + __le64 rx_pfc_watchdog_storms_rx_packets_pri3; + __le64 rx_pfc_watchdog_storms_rx_packets_pri4; + __le64 rx_pfc_watchdog_storms_rx_packets_pri5; + __le64 rx_pfc_watchdog_storms_rx_packets_pri6; + __le64 rx_pfc_watchdog_storms_rx_packets_pri7; + __le64 rx_pfc_watchdog_storms_rx_bytes_pri0; + __le64 rx_pfc_watchdog_storms_rx_bytes_pri1; + __le64 rx_pfc_watchdog_storms_rx_bytes_pri2; + __le64 rx_pfc_watchdog_storms_rx_bytes_pri3; + __le64 rx_pfc_watchdog_storms_rx_bytes_pri4; + __le64 rx_pfc_watchdog_storms_rx_bytes_pri5; + __le64 rx_pfc_watchdog_storms_rx_bytes_pri6; + __le64 rx_pfc_watchdog_storms_rx_bytes_pri7; + __le64 rx_pfc_watchdog_storms_rx_packets_dropped_pri0; + __le64 rx_pfc_watchdog_storms_rx_packets_dropped_pri1; + __le64 rx_pfc_watchdog_storms_rx_packets_dropped_pri2; + __le64 rx_pfc_watchdog_storms_rx_packets_dropped_pri3; + __le64 rx_pfc_watchdog_storms_rx_packets_dropped_pri4; + __le64 rx_pfc_watchdog_storms_rx_packets_dropped_pri5; + __le64 rx_pfc_watchdog_storms_rx_packets_dropped_pri6; + __le64 rx_pfc_watchdog_storms_rx_packets_dropped_pri7; + __le64 rx_pfc_watchdog_storms_rx_bytes_dropped_pri0; + __le64 rx_pfc_watchdog_storms_rx_bytes_dropped_pri1; + __le64 rx_pfc_watchdog_storms_rx_bytes_dropped_pri2; + __le64 rx_pfc_watchdog_storms_rx_bytes_dropped_pri3; + __le64 rx_pfc_watchdog_storms_rx_bytes_dropped_pri4; + __le64 rx_pfc_watchdog_storms_rx_bytes_dropped_pri5; + __le64 rx_pfc_watchdog_storms_rx_bytes_dropped_pri6; + __le64 rx_pfc_watchdog_storms_rx_bytes_dropped_pri7; + __le64 rx_pfc_watchdog_last_storm_rx_packets_pri0; + __le64 rx_pfc_watchdog_last_storm_rx_packets_pri1; + __le64 rx_pfc_watchdog_last_storm_rx_packets_pri2; + __le64 rx_pfc_watchdog_last_storm_rx_packets_pri3; + __le64 rx_pfc_watchdog_last_storm_rx_packets_pri4; + __le64 rx_pfc_watchdog_last_storm_rx_packets_pri5; + __le64 rx_pfc_watchdog_last_storm_rx_packets_pri6; + __le64 rx_pfc_watchdog_last_storm_rx_packets_pri7; + __le64 rx_pfc_watchdog_last_storm_rx_bytes_pri0; + __le64 rx_pfc_watchdog_last_storm_rx_bytes_pri1; + __le64 rx_pfc_watchdog_last_storm_rx_bytes_pri2; + __le64 rx_pfc_watchdog_last_storm_rx_bytes_pri3; + __le64 rx_pfc_watchdog_last_storm_rx_bytes_pri4; + __le64 rx_pfc_watchdog_last_storm_rx_bytes_pri5; + __le64 rx_pfc_watchdog_last_storm_rx_bytes_pri6; + __le64 rx_pfc_watchdog_last_storm_rx_bytes_pri7; + __le64 rx_pfc_watchdog_last_storm_rx_packets_dropped_pri0; + __le64 rx_pfc_watchdog_last_storm_rx_packets_dropped_pri1; + __le64 rx_pfc_watchdog_last_storm_rx_packets_dropped_pri2; + __le64 rx_pfc_watchdog_last_storm_rx_packets_dropped_pri3; + __le64 rx_pfc_watchdog_last_storm_rx_packets_dropped_pri4; + __le64 rx_pfc_watchdog_last_storm_rx_packets_dropped_pri5; + __le64 rx_pfc_watchdog_last_storm_rx_packets_dropped_pri6; + __le64 rx_pfc_watchdog_last_storm_rx_packets_dropped_pri7; + __le64 rx_pfc_watchdog_last_storm_rx_bytes_dropped_pri0; + __le64 rx_pfc_watchdog_last_storm_rx_bytes_dropped_pri1; + __le64 rx_pfc_watchdog_last_storm_rx_bytes_dropped_pri2; + __le64 rx_pfc_watchdog_last_storm_rx_bytes_dropped_pri3; + __le64 rx_pfc_watchdog_last_storm_rx_bytes_dropped_pri4; + __le64 rx_pfc_watchdog_last_storm_rx_bytes_dropped_pri5; + __le64 rx_pfc_watchdog_last_storm_rx_bytes_dropped_pri6; + __le64 rx_pfc_watchdog_last_storm_rx_bytes_dropped_pri7; +}; + +/* hwrm_port_qstats_ext_input (size:320b/40B) */ +struct hwrm_port_qstats_ext_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 port_id; + __le16 tx_stat_size; + __le16 rx_stat_size; + u8 flags; + #define PORT_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK 0x1UL + u8 unused_0; + __le64 tx_stat_host_addr; + __le64 rx_stat_host_addr; +}; + +/* hwrm_port_qstats_ext_output (size:128b/16B) */ +struct hwrm_port_qstats_ext_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 tx_stat_size; + __le16 rx_stat_size; + __le16 total_active_cos_queues; + u8 flags; + #define PORT_QSTATS_EXT_RESP_FLAGS_CLEAR_ROCE_COUNTERS_SUPPORTED 0x1UL + u8 valid; +}; + +/* hwrm_port_qstats_ext_pfc_wd_input (size:256b/32B) */ +struct hwrm_port_qstats_ext_pfc_wd_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 port_id; + __le16 pfc_wd_stat_size; + u8 unused_0[4]; + __le64 pfc_wd_stat_host_addr; +}; + +/* hwrm_port_qstats_ext_pfc_wd_output (size:128b/16B) */ +struct hwrm_port_qstats_ext_pfc_wd_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 pfc_wd_stat_size; + u8 unused_0[5]; + u8 valid; +}; + +/* hwrm_port_lpbk_qstats_input (size:256b/32B) */ +struct hwrm_port_lpbk_qstats_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 lpbk_stat_size; + u8 flags; + #define PORT_LPBK_QSTATS_REQ_FLAGS_COUNTER_MASK 0x1UL + u8 unused_0[5]; + __le64 lpbk_stat_host_addr; +}; + +/* hwrm_port_lpbk_qstats_output (size:128b/16B) */ +struct hwrm_port_lpbk_qstats_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 lpbk_stat_size; + u8 unused_0[5]; + u8 valid; +}; + +/* port_lpbk_stats (size:640b/80B) */ +struct port_lpbk_stats { + __le64 lpbk_ucast_frames; + __le64 lpbk_mcast_frames; + __le64 lpbk_bcast_frames; + __le64 lpbk_ucast_bytes; + __le64 lpbk_mcast_bytes; + __le64 lpbk_bcast_bytes; + __le64 lpbk_tx_discards; + __le64 lpbk_tx_errors; + __le64 lpbk_rx_discards; + __le64 lpbk_rx_errors; +}; + +/* hwrm_port_ecn_qstats_input (size:256b/32B) */ +struct hwrm_port_ecn_qstats_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 port_id; + __le16 ecn_stat_buf_size; + u8 flags; + #define PORT_ECN_QSTATS_REQ_FLAGS_COUNTER_MASK 0x1UL + u8 unused_0[3]; + __le64 ecn_stat_host_addr; +}; + +/* hwrm_port_ecn_qstats_output (size:128b/16B) */ +struct hwrm_port_ecn_qstats_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 ecn_stat_buf_size; + u8 mark_en; + u8 unused_0[4]; + u8 valid; +}; + +/* port_stats_ecn (size:512b/64B) */ +struct port_stats_ecn { + __le64 mark_cnt_cos0; + __le64 mark_cnt_cos1; + __le64 mark_cnt_cos2; + __le64 mark_cnt_cos3; + __le64 mark_cnt_cos4; + __le64 mark_cnt_cos5; + __le64 mark_cnt_cos6; + __le64 mark_cnt_cos7; +}; + +/* hwrm_port_clr_stats_input (size:192b/24B) */ +struct hwrm_port_clr_stats_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 port_id; + u8 flags; + #define PORT_CLR_STATS_REQ_FLAGS_ROCE_COUNTERS 0x1UL + u8 unused_0[5]; +}; + +/* hwrm_port_clr_stats_output (size:128b/16B) */ +struct hwrm_port_clr_stats_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_port_lpbk_clr_stats_input (size:192b/24B) */ +struct hwrm_port_lpbk_clr_stats_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 port_id; + u8 unused_0[6]; +}; + +/* hwrm_port_lpbk_clr_stats_output (size:128b/16B) */ +struct hwrm_port_lpbk_clr_stats_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_port_ts_query_input (size:320b/40B) */ +struct hwrm_port_ts_query_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define PORT_TS_QUERY_REQ_FLAGS_PATH 0x1UL + #define PORT_TS_QUERY_REQ_FLAGS_PATH_TX 0x0UL + #define PORT_TS_QUERY_REQ_FLAGS_PATH_RX 0x1UL + #define PORT_TS_QUERY_REQ_FLAGS_PATH_LAST PORT_TS_QUERY_REQ_FLAGS_PATH_RX + #define PORT_TS_QUERY_REQ_FLAGS_CURRENT_TIME 0x2UL + __le16 port_id; + u8 unused_0[2]; + __le16 enables; + #define PORT_TS_QUERY_REQ_ENABLES_TS_REQ_TIMEOUT 0x1UL + #define PORT_TS_QUERY_REQ_ENABLES_PTP_SEQ_ID 0x2UL + #define PORT_TS_QUERY_REQ_ENABLES_PTP_HDR_OFFSET 0x4UL + __le16 ts_req_timeout; + __le32 ptp_seq_id; + __le16 ptp_hdr_offset; + u8 unused_1[6]; +}; + +/* hwrm_port_ts_query_output (size:192b/24B) */ +struct hwrm_port_ts_query_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le64 ptp_msg_ts; + __le16 ptp_msg_seqid; + u8 unused_0[5]; + u8 valid; +}; + +/* hwrm_port_phy_qcaps_input (size:192b/24B) */ +struct hwrm_port_phy_qcaps_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 port_id; + u8 unused_0[6]; +}; + +/* hwrm_port_phy_qcaps_output (size:320b/40B) */ +struct hwrm_port_phy_qcaps_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 flags; + #define PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED 0x1UL + #define PORT_PHY_QCAPS_RESP_FLAGS_EXTERNAL_LPBK_SUPPORTED 0x2UL + #define PORT_PHY_QCAPS_RESP_FLAGS_AUTONEG_LPBK_SUPPORTED 0x4UL + #define PORT_PHY_QCAPS_RESP_FLAGS_SHARED_PHY_CFG_SUPPORTED 0x8UL + #define PORT_PHY_QCAPS_RESP_FLAGS_CUMULATIVE_COUNTERS_ON_RESET 0x10UL + #define PORT_PHY_QCAPS_RESP_FLAGS_LOCAL_LPBK_NOT_SUPPORTED 0x20UL + #define PORT_PHY_QCAPS_RESP_FLAGS_FW_MANAGED_LINK_DOWN 0x40UL + #define PORT_PHY_QCAPS_RESP_FLAGS_NO_FCS 0x80UL + u8 port_cnt; + #define PORT_PHY_QCAPS_RESP_PORT_CNT_UNKNOWN 0x0UL + #define PORT_PHY_QCAPS_RESP_PORT_CNT_1 0x1UL + #define PORT_PHY_QCAPS_RESP_PORT_CNT_2 0x2UL + #define PORT_PHY_QCAPS_RESP_PORT_CNT_3 0x3UL + #define PORT_PHY_QCAPS_RESP_PORT_CNT_4 0x4UL + #define PORT_PHY_QCAPS_RESP_PORT_CNT_12 0xcUL + #define PORT_PHY_QCAPS_RESP_PORT_CNT_LAST PORT_PHY_QCAPS_RESP_PORT_CNT_12 + __le16 supported_speeds_force_mode; + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100MBHD 0x1UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100MB 0x2UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_1GBHD 0x4UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_1GB 0x8UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_2GB 0x10UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_2_5GB 0x20UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_10GB 0x40UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_20GB 0x80UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_25GB 0x100UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_40GB 0x200UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_50GB 0x400UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100GB 0x800UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_10MBHD 0x1000UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_10MB 0x2000UL + __le16 supported_speeds_auto_mode; + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_100MBHD 0x1UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_100MB 0x2UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_1GBHD 0x4UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_1GB 0x8UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_2GB 0x10UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_2_5GB 0x20UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_10GB 0x40UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_20GB 0x80UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_25GB 0x100UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_40GB 0x200UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_50GB 0x400UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_100GB 0x800UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_10MBHD 0x1000UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_10MB 0x2000UL + __le16 supported_speeds_eee_mode; + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD1 0x1UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_100MB 0x2UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD2 0x4UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_1GB 0x8UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD3 0x10UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD4 0x20UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_10GB 0x40UL + __le32 tx_lpi_timer_low; + #define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK 0xffffffUL + #define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_SFT 0 + #define PORT_PHY_QCAPS_RESP_RSVD2_MASK 0xff000000UL + #define PORT_PHY_QCAPS_RESP_RSVD2_SFT 24 + __le32 valid_tx_lpi_timer_high; + #define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK 0xffffffUL + #define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_SFT 0 + #define PORT_PHY_QCAPS_RESP_RSVD_MASK 0xff000000UL + #define PORT_PHY_QCAPS_RESP_RSVD_SFT 24 + __le16 supported_pam4_speeds_auto_mode; + #define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_AUTO_MODE_50G 0x1UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_AUTO_MODE_100G 0x2UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_AUTO_MODE_200G 0x4UL + __le16 supported_pam4_speeds_force_mode; + #define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_FORCE_MODE_50G 0x1UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_FORCE_MODE_100G 0x2UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_FORCE_MODE_200G 0x4UL + __le16 flags2; + #define PORT_PHY_QCAPS_RESP_FLAGS2_PAUSE_UNSUPPORTED 0x1UL + #define PORT_PHY_QCAPS_RESP_FLAGS2_PFC_UNSUPPORTED 0x2UL + #define PORT_PHY_QCAPS_RESP_FLAGS2_BANK_ADDR_SUPPORTED 0x4UL + #define PORT_PHY_QCAPS_RESP_FLAGS2_SPEEDS2_SUPPORTED 0x8UL + #define PORT_PHY_QCAPS_RESP_FLAGS2_REMOTE_LPBK_UNSUPPORTED 0x10UL + u8 internal_port_cnt; + u8 unused_0; + __le16 supported_speeds2_force_mode; + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_1GB 0x1UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_10GB 0x2UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_25GB 0x4UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_40GB 0x8UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_50GB 0x10UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_100GB 0x20UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_50GB_PAM4_56 0x40UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_100GB_PAM4_56 0x80UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_200GB_PAM4_56 0x100UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_400GB_PAM4_56 0x200UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_100GB_PAM4_112 0x400UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_200GB_PAM4_112 0x800UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_400GB_PAM4_112 0x1000UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_800GB_PAM4_112 0x2000UL + __le16 supported_speeds2_auto_mode; + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_1GB 0x1UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_10GB 0x2UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_25GB 0x4UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_40GB 0x8UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_50GB 0x10UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_100GB 0x20UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_50GB_PAM4_56 0x40UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_100GB_PAM4_56 0x80UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_200GB_PAM4_56 0x100UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_400GB_PAM4_56 0x200UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_100GB_PAM4_112 0x400UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_200GB_PAM4_112 0x800UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_400GB_PAM4_112 0x1000UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_800GB_PAM4_112 0x2000UL + u8 unused_1[3]; + u8 valid; +}; + +/* hwrm_port_phy_i2c_write_input (size:832b/104B) */ +struct hwrm_port_phy_i2c_write_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + __le32 enables; + #define PORT_PHY_I2C_WRITE_REQ_ENABLES_PAGE_OFFSET 0x1UL + #define PORT_PHY_I2C_WRITE_REQ_ENABLES_BANK_NUMBER 0x2UL + __le16 port_id; + u8 i2c_slave_addr; + u8 bank_number; + __le16 page_number; + __le16 page_offset; + u8 data_length; + u8 unused_1[7]; + __le32 data[16]; +}; + +/* hwrm_port_phy_i2c_write_output (size:128b/16B) */ +struct hwrm_port_phy_i2c_write_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_port_phy_i2c_read_input (size:320b/40B) */ +struct hwrm_port_phy_i2c_read_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + __le32 enables; + #define PORT_PHY_I2C_READ_REQ_ENABLES_PAGE_OFFSET 0x1UL + #define PORT_PHY_I2C_READ_REQ_ENABLES_BANK_NUMBER 0x2UL + __le16 port_id; + u8 i2c_slave_addr; + u8 bank_number; + __le16 page_number; + __le16 page_offset; + u8 data_length; + u8 unused_1[7]; +}; + +/* hwrm_port_phy_i2c_read_output (size:640b/80B) */ +struct hwrm_port_phy_i2c_read_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 data[16]; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_port_phy_mdio_write_input (size:320b/40B) */ +struct hwrm_port_phy_mdio_write_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 unused_0[2]; + __le16 port_id; + u8 phy_addr; + u8 dev_addr; + __le16 reg_addr; + __le16 reg_data; + u8 cl45_mdio; + u8 unused_1[7]; +}; + +/* hwrm_port_phy_mdio_write_output (size:128b/16B) */ +struct hwrm_port_phy_mdio_write_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_port_phy_mdio_read_input (size:256b/32B) */ +struct hwrm_port_phy_mdio_read_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 unused_0[2]; + __le16 port_id; + u8 phy_addr; + u8 dev_addr; + __le16 reg_addr; + u8 cl45_mdio; + u8 unused_1; +}; + +/* hwrm_port_phy_mdio_read_output (size:128b/16B) */ +struct hwrm_port_phy_mdio_read_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 reg_data; + u8 unused_0[5]; + u8 valid; +}; + +/* hwrm_port_led_cfg_input (size:512b/64B) */ +struct hwrm_port_led_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 enables; + #define PORT_LED_CFG_REQ_ENABLES_LED0_ID 0x1UL + #define PORT_LED_CFG_REQ_ENABLES_LED0_STATE 0x2UL + #define PORT_LED_CFG_REQ_ENABLES_LED0_COLOR 0x4UL + #define PORT_LED_CFG_REQ_ENABLES_LED0_BLINK_ON 0x8UL + #define PORT_LED_CFG_REQ_ENABLES_LED0_BLINK_OFF 0x10UL + #define PORT_LED_CFG_REQ_ENABLES_LED0_GROUP_ID 0x20UL + #define PORT_LED_CFG_REQ_ENABLES_LED1_ID 0x40UL + #define PORT_LED_CFG_REQ_ENABLES_LED1_STATE 0x80UL + #define PORT_LED_CFG_REQ_ENABLES_LED1_COLOR 0x100UL + #define PORT_LED_CFG_REQ_ENABLES_LED1_BLINK_ON 0x200UL + #define PORT_LED_CFG_REQ_ENABLES_LED1_BLINK_OFF 0x400UL + #define PORT_LED_CFG_REQ_ENABLES_LED1_GROUP_ID 0x800UL + #define PORT_LED_CFG_REQ_ENABLES_LED2_ID 0x1000UL + #define PORT_LED_CFG_REQ_ENABLES_LED2_STATE 0x2000UL + #define PORT_LED_CFG_REQ_ENABLES_LED2_COLOR 0x4000UL + #define PORT_LED_CFG_REQ_ENABLES_LED2_BLINK_ON 0x8000UL + #define PORT_LED_CFG_REQ_ENABLES_LED2_BLINK_OFF 0x10000UL + #define PORT_LED_CFG_REQ_ENABLES_LED2_GROUP_ID 0x20000UL + #define PORT_LED_CFG_REQ_ENABLES_LED3_ID 0x40000UL + #define PORT_LED_CFG_REQ_ENABLES_LED3_STATE 0x80000UL + #define PORT_LED_CFG_REQ_ENABLES_LED3_COLOR 0x100000UL + #define PORT_LED_CFG_REQ_ENABLES_LED3_BLINK_ON 0x200000UL + #define PORT_LED_CFG_REQ_ENABLES_LED3_BLINK_OFF 0x400000UL + #define PORT_LED_CFG_REQ_ENABLES_LED3_GROUP_ID 0x800000UL + __le16 port_id; + u8 num_leds; + u8 rsvd; + u8 led0_id; + u8 led0_state; + #define PORT_LED_CFG_REQ_LED0_STATE_DEFAULT 0x0UL + #define PORT_LED_CFG_REQ_LED0_STATE_OFF 0x1UL + #define PORT_LED_CFG_REQ_LED0_STATE_ON 0x2UL + #define PORT_LED_CFG_REQ_LED0_STATE_BLINK 0x3UL + #define PORT_LED_CFG_REQ_LED0_STATE_BLINKALT 0x4UL + #define PORT_LED_CFG_REQ_LED0_STATE_LAST PORT_LED_CFG_REQ_LED0_STATE_BLINKALT + u8 led0_color; + #define PORT_LED_CFG_REQ_LED0_COLOR_DEFAULT 0x0UL + #define PORT_LED_CFG_REQ_LED0_COLOR_AMBER 0x1UL + #define PORT_LED_CFG_REQ_LED0_COLOR_GREEN 0x2UL + #define PORT_LED_CFG_REQ_LED0_COLOR_GREENAMBER 0x3UL + #define PORT_LED_CFG_REQ_LED0_COLOR_LAST PORT_LED_CFG_REQ_LED0_COLOR_GREENAMBER + u8 unused_0; + __le16 led0_blink_on; + __le16 led0_blink_off; + u8 led0_group_id; + u8 rsvd0; + u8 led1_id; + u8 led1_state; + #define PORT_LED_CFG_REQ_LED1_STATE_DEFAULT 0x0UL + #define PORT_LED_CFG_REQ_LED1_STATE_OFF 0x1UL + #define PORT_LED_CFG_REQ_LED1_STATE_ON 0x2UL + #define PORT_LED_CFG_REQ_LED1_STATE_BLINK 0x3UL + #define PORT_LED_CFG_REQ_LED1_STATE_BLINKALT 0x4UL + #define PORT_LED_CFG_REQ_LED1_STATE_LAST PORT_LED_CFG_REQ_LED1_STATE_BLINKALT + u8 led1_color; + #define PORT_LED_CFG_REQ_LED1_COLOR_DEFAULT 0x0UL + #define PORT_LED_CFG_REQ_LED1_COLOR_AMBER 0x1UL + #define PORT_LED_CFG_REQ_LED1_COLOR_GREEN 0x2UL + #define PORT_LED_CFG_REQ_LED1_COLOR_GREENAMBER 0x3UL + #define PORT_LED_CFG_REQ_LED1_COLOR_LAST PORT_LED_CFG_REQ_LED1_COLOR_GREENAMBER + u8 unused_1; + __le16 led1_blink_on; + __le16 led1_blink_off; + u8 led1_group_id; + u8 rsvd1; + u8 led2_id; + u8 led2_state; + #define PORT_LED_CFG_REQ_LED2_STATE_DEFAULT 0x0UL + #define PORT_LED_CFG_REQ_LED2_STATE_OFF 0x1UL + #define PORT_LED_CFG_REQ_LED2_STATE_ON 0x2UL + #define PORT_LED_CFG_REQ_LED2_STATE_BLINK 0x3UL + #define PORT_LED_CFG_REQ_LED2_STATE_BLINKALT 0x4UL + #define PORT_LED_CFG_REQ_LED2_STATE_LAST PORT_LED_CFG_REQ_LED2_STATE_BLINKALT + u8 led2_color; + #define PORT_LED_CFG_REQ_LED2_COLOR_DEFAULT 0x0UL + #define PORT_LED_CFG_REQ_LED2_COLOR_AMBER 0x1UL + #define PORT_LED_CFG_REQ_LED2_COLOR_GREEN 0x2UL + #define PORT_LED_CFG_REQ_LED2_COLOR_GREENAMBER 0x3UL + #define PORT_LED_CFG_REQ_LED2_COLOR_LAST PORT_LED_CFG_REQ_LED2_COLOR_GREENAMBER + u8 unused_2; + __le16 led2_blink_on; + __le16 led2_blink_off; + u8 led2_group_id; + u8 rsvd2; + u8 led3_id; + u8 led3_state; + #define PORT_LED_CFG_REQ_LED3_STATE_DEFAULT 0x0UL + #define PORT_LED_CFG_REQ_LED3_STATE_OFF 0x1UL + #define PORT_LED_CFG_REQ_LED3_STATE_ON 0x2UL + #define PORT_LED_CFG_REQ_LED3_STATE_BLINK 0x3UL + #define PORT_LED_CFG_REQ_LED3_STATE_BLINKALT 0x4UL + #define PORT_LED_CFG_REQ_LED3_STATE_LAST PORT_LED_CFG_REQ_LED3_STATE_BLINKALT + u8 led3_color; + #define PORT_LED_CFG_REQ_LED3_COLOR_DEFAULT 0x0UL + #define PORT_LED_CFG_REQ_LED3_COLOR_AMBER 0x1UL + #define PORT_LED_CFG_REQ_LED3_COLOR_GREEN 0x2UL + #define PORT_LED_CFG_REQ_LED3_COLOR_GREENAMBER 0x3UL + #define PORT_LED_CFG_REQ_LED3_COLOR_LAST PORT_LED_CFG_REQ_LED3_COLOR_GREENAMBER + u8 unused_3; + __le16 led3_blink_on; + __le16 led3_blink_off; + u8 led3_group_id; + u8 rsvd3; +}; + +/* hwrm_port_led_cfg_output (size:128b/16B) */ +struct hwrm_port_led_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_port_led_qcfg_input (size:192b/24B) */ +struct hwrm_port_led_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 port_id; + u8 unused_0[6]; +}; + +/* hwrm_port_led_qcfg_output (size:448b/56B) */ +struct hwrm_port_led_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 num_leds; + u8 led0_id; + u8 led0_type; + #define PORT_LED_QCFG_RESP_LED0_TYPE_SPEED 0x0UL + #define PORT_LED_QCFG_RESP_LED0_TYPE_ACTIVITY 0x1UL + #define PORT_LED_QCFG_RESP_LED0_TYPE_INVALID 0xffUL + #define PORT_LED_QCFG_RESP_LED0_TYPE_LAST PORT_LED_QCFG_RESP_LED0_TYPE_INVALID + u8 led0_state; + #define PORT_LED_QCFG_RESP_LED0_STATE_DEFAULT 0x0UL + #define PORT_LED_QCFG_RESP_LED0_STATE_OFF 0x1UL + #define PORT_LED_QCFG_RESP_LED0_STATE_ON 0x2UL + #define PORT_LED_QCFG_RESP_LED0_STATE_BLINK 0x3UL + #define PORT_LED_QCFG_RESP_LED0_STATE_BLINKALT 0x4UL + #define PORT_LED_QCFG_RESP_LED0_STATE_LAST PORT_LED_QCFG_RESP_LED0_STATE_BLINKALT + u8 led0_color; + #define PORT_LED_QCFG_RESP_LED0_COLOR_DEFAULT 0x0UL + #define PORT_LED_QCFG_RESP_LED0_COLOR_AMBER 0x1UL + #define PORT_LED_QCFG_RESP_LED0_COLOR_GREEN 0x2UL + #define PORT_LED_QCFG_RESP_LED0_COLOR_GREENAMBER 0x3UL + #define PORT_LED_QCFG_RESP_LED0_COLOR_LAST PORT_LED_QCFG_RESP_LED0_COLOR_GREENAMBER + u8 unused_0; + __le16 led0_blink_on; + __le16 led0_blink_off; + u8 led0_group_id; + u8 led1_id; + u8 led1_type; + #define PORT_LED_QCFG_RESP_LED1_TYPE_SPEED 0x0UL + #define PORT_LED_QCFG_RESP_LED1_TYPE_ACTIVITY 0x1UL + #define PORT_LED_QCFG_RESP_LED1_TYPE_INVALID 0xffUL + #define PORT_LED_QCFG_RESP_LED1_TYPE_LAST PORT_LED_QCFG_RESP_LED1_TYPE_INVALID + u8 led1_state; + #define PORT_LED_QCFG_RESP_LED1_STATE_DEFAULT 0x0UL + #define PORT_LED_QCFG_RESP_LED1_STATE_OFF 0x1UL + #define PORT_LED_QCFG_RESP_LED1_STATE_ON 0x2UL + #define PORT_LED_QCFG_RESP_LED1_STATE_BLINK 0x3UL + #define PORT_LED_QCFG_RESP_LED1_STATE_BLINKALT 0x4UL + #define PORT_LED_QCFG_RESP_LED1_STATE_LAST PORT_LED_QCFG_RESP_LED1_STATE_BLINKALT + u8 led1_color; + #define PORT_LED_QCFG_RESP_LED1_COLOR_DEFAULT 0x0UL + #define PORT_LED_QCFG_RESP_LED1_COLOR_AMBER 0x1UL + #define PORT_LED_QCFG_RESP_LED1_COLOR_GREEN 0x2UL + #define PORT_LED_QCFG_RESP_LED1_COLOR_GREENAMBER 0x3UL + #define PORT_LED_QCFG_RESP_LED1_COLOR_LAST PORT_LED_QCFG_RESP_LED1_COLOR_GREENAMBER + u8 unused_1; + __le16 led1_blink_on; + __le16 led1_blink_off; + u8 led1_group_id; + u8 led2_id; + u8 led2_type; + #define PORT_LED_QCFG_RESP_LED2_TYPE_SPEED 0x0UL + #define PORT_LED_QCFG_RESP_LED2_TYPE_ACTIVITY 0x1UL + #define PORT_LED_QCFG_RESP_LED2_TYPE_INVALID 0xffUL + #define PORT_LED_QCFG_RESP_LED2_TYPE_LAST PORT_LED_QCFG_RESP_LED2_TYPE_INVALID + u8 led2_state; + #define PORT_LED_QCFG_RESP_LED2_STATE_DEFAULT 0x0UL + #define PORT_LED_QCFG_RESP_LED2_STATE_OFF 0x1UL + #define PORT_LED_QCFG_RESP_LED2_STATE_ON 0x2UL + #define PORT_LED_QCFG_RESP_LED2_STATE_BLINK 0x3UL + #define PORT_LED_QCFG_RESP_LED2_STATE_BLINKALT 0x4UL + #define PORT_LED_QCFG_RESP_LED2_STATE_LAST PORT_LED_QCFG_RESP_LED2_STATE_BLINKALT + u8 led2_color; + #define PORT_LED_QCFG_RESP_LED2_COLOR_DEFAULT 0x0UL + #define PORT_LED_QCFG_RESP_LED2_COLOR_AMBER 0x1UL + #define PORT_LED_QCFG_RESP_LED2_COLOR_GREEN 0x2UL + #define PORT_LED_QCFG_RESP_LED2_COLOR_GREENAMBER 0x3UL + #define PORT_LED_QCFG_RESP_LED2_COLOR_LAST PORT_LED_QCFG_RESP_LED2_COLOR_GREENAMBER + u8 unused_2; + __le16 led2_blink_on; + __le16 led2_blink_off; + u8 led2_group_id; + u8 led3_id; + u8 led3_type; + #define PORT_LED_QCFG_RESP_LED3_TYPE_SPEED 0x0UL + #define PORT_LED_QCFG_RESP_LED3_TYPE_ACTIVITY 0x1UL + #define PORT_LED_QCFG_RESP_LED3_TYPE_INVALID 0xffUL + #define PORT_LED_QCFG_RESP_LED3_TYPE_LAST PORT_LED_QCFG_RESP_LED3_TYPE_INVALID + u8 led3_state; + #define PORT_LED_QCFG_RESP_LED3_STATE_DEFAULT 0x0UL + #define PORT_LED_QCFG_RESP_LED3_STATE_OFF 0x1UL + #define PORT_LED_QCFG_RESP_LED3_STATE_ON 0x2UL + #define PORT_LED_QCFG_RESP_LED3_STATE_BLINK 0x3UL + #define PORT_LED_QCFG_RESP_LED3_STATE_BLINKALT 0x4UL + #define PORT_LED_QCFG_RESP_LED3_STATE_LAST PORT_LED_QCFG_RESP_LED3_STATE_BLINKALT + u8 led3_color; + #define PORT_LED_QCFG_RESP_LED3_COLOR_DEFAULT 0x0UL + #define PORT_LED_QCFG_RESP_LED3_COLOR_AMBER 0x1UL + #define PORT_LED_QCFG_RESP_LED3_COLOR_GREEN 0x2UL + #define PORT_LED_QCFG_RESP_LED3_COLOR_GREENAMBER 0x3UL + #define PORT_LED_QCFG_RESP_LED3_COLOR_LAST PORT_LED_QCFG_RESP_LED3_COLOR_GREENAMBER + u8 unused_3; + __le16 led3_blink_on; + __le16 led3_blink_off; + u8 led3_group_id; + u8 unused_4[6]; + u8 valid; +}; + +/* hwrm_port_led_qcaps_input (size:192b/24B) */ +struct hwrm_port_led_qcaps_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 port_id; + u8 unused_0[6]; +}; + +/* hwrm_port_led_qcaps_output (size:384b/48B) */ +struct hwrm_port_led_qcaps_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 num_leds; + u8 unused[3]; + u8 led0_id; + u8 led0_type; + #define PORT_LED_QCAPS_RESP_LED0_TYPE_SPEED 0x0UL + #define PORT_LED_QCAPS_RESP_LED0_TYPE_ACTIVITY 0x1UL + #define PORT_LED_QCAPS_RESP_LED0_TYPE_INVALID 0xffUL + #define PORT_LED_QCAPS_RESP_LED0_TYPE_LAST PORT_LED_QCAPS_RESP_LED0_TYPE_INVALID + u8 led0_group_id; + u8 unused_0; + __le16 led0_state_caps; + #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_ENABLED 0x1UL + #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_OFF_SUPPORTED 0x2UL + #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_ON_SUPPORTED 0x4UL + #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_BLINK_SUPPORTED 0x8UL + #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL + __le16 led0_color_caps; + #define PORT_LED_QCAPS_RESP_LED0_COLOR_CAPS_RSVD 0x1UL + #define PORT_LED_QCAPS_RESP_LED0_COLOR_CAPS_AMBER_SUPPORTED 0x2UL + #define PORT_LED_QCAPS_RESP_LED0_COLOR_CAPS_GREEN_SUPPORTED 0x4UL + u8 led1_id; + u8 led1_type; + #define PORT_LED_QCAPS_RESP_LED1_TYPE_SPEED 0x0UL + #define PORT_LED_QCAPS_RESP_LED1_TYPE_ACTIVITY 0x1UL + #define PORT_LED_QCAPS_RESP_LED1_TYPE_INVALID 0xffUL + #define PORT_LED_QCAPS_RESP_LED1_TYPE_LAST PORT_LED_QCAPS_RESP_LED1_TYPE_INVALID + u8 led1_group_id; + u8 unused_1; + __le16 led1_state_caps; + #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_ENABLED 0x1UL + #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_OFF_SUPPORTED 0x2UL + #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_ON_SUPPORTED 0x4UL + #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_BLINK_SUPPORTED 0x8UL + #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL + __le16 led1_color_caps; + #define PORT_LED_QCAPS_RESP_LED1_COLOR_CAPS_RSVD 0x1UL + #define PORT_LED_QCAPS_RESP_LED1_COLOR_CAPS_AMBER_SUPPORTED 0x2UL + #define PORT_LED_QCAPS_RESP_LED1_COLOR_CAPS_GREEN_SUPPORTED 0x4UL + u8 led2_id; + u8 led2_type; + #define PORT_LED_QCAPS_RESP_LED2_TYPE_SPEED 0x0UL + #define PORT_LED_QCAPS_RESP_LED2_TYPE_ACTIVITY 0x1UL + #define PORT_LED_QCAPS_RESP_LED2_TYPE_INVALID 0xffUL + #define PORT_LED_QCAPS_RESP_LED2_TYPE_LAST PORT_LED_QCAPS_RESP_LED2_TYPE_INVALID + u8 led2_group_id; + u8 unused_2; + __le16 led2_state_caps; + #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_ENABLED 0x1UL + #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_OFF_SUPPORTED 0x2UL + #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_ON_SUPPORTED 0x4UL + #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_BLINK_SUPPORTED 0x8UL + #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL + __le16 led2_color_caps; + #define PORT_LED_QCAPS_RESP_LED2_COLOR_CAPS_RSVD 0x1UL + #define PORT_LED_QCAPS_RESP_LED2_COLOR_CAPS_AMBER_SUPPORTED 0x2UL + #define PORT_LED_QCAPS_RESP_LED2_COLOR_CAPS_GREEN_SUPPORTED 0x4UL + u8 led3_id; + u8 led3_type; + #define PORT_LED_QCAPS_RESP_LED3_TYPE_SPEED 0x0UL + #define PORT_LED_QCAPS_RESP_LED3_TYPE_ACTIVITY 0x1UL + #define PORT_LED_QCAPS_RESP_LED3_TYPE_INVALID 0xffUL + #define PORT_LED_QCAPS_RESP_LED3_TYPE_LAST PORT_LED_QCAPS_RESP_LED3_TYPE_INVALID + u8 led3_group_id; + u8 unused_3; + __le16 led3_state_caps; + #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_ENABLED 0x1UL + #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_OFF_SUPPORTED 0x2UL + #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_ON_SUPPORTED 0x4UL + #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_BLINK_SUPPORTED 0x8UL + #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL + __le16 led3_color_caps; + #define PORT_LED_QCAPS_RESP_LED3_COLOR_CAPS_RSVD 0x1UL + #define PORT_LED_QCAPS_RESP_LED3_COLOR_CAPS_AMBER_SUPPORTED 0x2UL + #define PORT_LED_QCAPS_RESP_LED3_COLOR_CAPS_GREEN_SUPPORTED 0x4UL + u8 unused_4[3]; + u8 valid; +}; + +/* hwrm_port_prbs_test_input (size:384b/48B) */ +struct hwrm_port_prbs_test_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 resp_data_addr; + __le16 data_len; + __le16 flags; + #define PORT_PRBS_TEST_REQ_FLAGS_INTERNAL 0x1UL + __le32 unused_1; + __le16 port_id; + __le16 poly; + #define PORT_PRBS_TEST_REQ_POLY_PRBS7 0x0UL + #define PORT_PRBS_TEST_REQ_POLY_PRBS9 0x1UL + #define PORT_PRBS_TEST_REQ_POLY_PRBS11 0x2UL + #define PORT_PRBS_TEST_REQ_POLY_PRBS15 0x3UL + #define PORT_PRBS_TEST_REQ_POLY_PRBS23 0x4UL + #define PORT_PRBS_TEST_REQ_POLY_PRBS31 0x5UL + #define PORT_PRBS_TEST_REQ_POLY_PRBS58 0x6UL + #define PORT_PRBS_TEST_REQ_POLY_PRBS49 0x7UL + #define PORT_PRBS_TEST_REQ_POLY_PRBS10 0x8UL + #define PORT_PRBS_TEST_REQ_POLY_PRBS20 0x9UL + #define PORT_PRBS_TEST_REQ_POLY_PRBS13 0xaUL + #define PORT_PRBS_TEST_REQ_POLY_INVALID 0xffUL + #define PORT_PRBS_TEST_REQ_POLY_LAST PORT_PRBS_TEST_REQ_POLY_INVALID + __le16 prbs_config; + #define PORT_PRBS_TEST_REQ_PRBS_CONFIG_START_STOP 0x1UL + #define PORT_PRBS_TEST_REQ_PRBS_CONFIG_TX_LANE_MAP_VALID 0x2UL + #define PORT_PRBS_TEST_REQ_PRBS_CONFIG_RX_LANE_MAP_VALID 0x4UL + #define PORT_PRBS_TEST_REQ_PRBS_CONFIG_FEC_STAT_T0_T7 0x8UL + #define PORT_PRBS_TEST_REQ_PRBS_CONFIG_FEC_STAT_T8_T15 0x10UL + #define PORT_PRBS_TEST_REQ_PRBS_CONFIG_T_CODE 0x20UL + __le16 timeout; + __le32 tx_lane_map; + __le32 rx_lane_map; +}; + +/* hwrm_port_prbs_test_output (size:128b/16B) */ +struct hwrm_port_prbs_test_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 total_data_len; + u8 ber_format; + #define PORT_PRBS_TEST_RESP_BER_FORMAT_PRBS 0x0UL + #define PORT_PRBS_TEST_RESP_BER_FORMAT_FEC 0x1UL + #define PORT_PRBS_TEST_RESP_BER_FORMAT_LAST PORT_PRBS_TEST_RESP_BER_FORMAT_FEC + u8 unused_0; + u8 unused_1[3]; + u8 valid; +}; + +/* hwrm_port_dsc_dump_input (size:320b/40B) */ +struct hwrm_port_dsc_dump_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 resp_data_addr; + __le16 data_len; + __le16 unused_0; + __le32 data_offset; + __le16 port_id; + __le16 diag_level; + #define PORT_DSC_DUMP_REQ_DIAG_LEVEL_SRDS_DIAG_LANE 0x0UL + #define PORT_DSC_DUMP_REQ_DIAG_LEVEL_SRDS_DIAG_CORE 0x1UL + #define PORT_DSC_DUMP_REQ_DIAG_LEVEL_SRDS_DIAG_EVENT 0x2UL + #define PORT_DSC_DUMP_REQ_DIAG_LEVEL_SRDS_DIAG_EYE 0x3UL + #define PORT_DSC_DUMP_REQ_DIAG_LEVEL_SRDS_DIAG_REG_CORE 0x4UL + #define PORT_DSC_DUMP_REQ_DIAG_LEVEL_SRDS_DIAG_REG_LANE 0x5UL + #define PORT_DSC_DUMP_REQ_DIAG_LEVEL_SRDS_DIAG_UC_CORE 0x6UL + #define PORT_DSC_DUMP_REQ_DIAG_LEVEL_SRDS_DIAG_UC_LANE 0x7UL + #define PORT_DSC_DUMP_REQ_DIAG_LEVEL_SRDS_DIAG_LANE_DEBUG 0x8UL + #define PORT_DSC_DUMP_REQ_DIAG_LEVEL_SRDS_DIAG_BER_VERT 0x9UL + #define PORT_DSC_DUMP_REQ_DIAG_LEVEL_SRDS_DIAG_BER_HORZ 0xaUL + #define PORT_DSC_DUMP_REQ_DIAG_LEVEL_SRDS_DIAG_EVENT_SAFE 0xbUL + #define PORT_DSC_DUMP_REQ_DIAG_LEVEL_SRDS_DIAG_TIMESTAMP 0xcUL + #define PORT_DSC_DUMP_REQ_DIAG_LEVEL_LAST PORT_DSC_DUMP_REQ_DIAG_LEVEL_SRDS_DIAG_TIMESTAMP + __le16 lane_number; + __le16 dsc_dump_config; + #define PORT_DSC_DUMP_REQ_DSC_DUMP_CONFIG_START_RETRIEVE 0x1UL + #define PORT_DSC_DUMP_REQ_DSC_DUMP_CONFIG_BIG_BUFFER 0x2UL + #define PORT_DSC_DUMP_REQ_DSC_DUMP_CONFIG_DEFER_CLOSE 0x4UL +}; + +/* hwrm_port_dsc_dump_output (size:128b/16B) */ +struct hwrm_port_dsc_dump_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 total_data_len; + __le16 total_data_len_high; + u8 unused_1[2]; + u8 flags; + #define PORT_DSC_DUMP_RESP_FLAGS_BIG_BUFFER 0x1UL + u8 valid; +}; + +/* hwrm_port_sfp_sideband_cfg_input (size:256b/32B) */ +struct hwrm_port_sfp_sideband_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 port_id; + u8 unused_0[6]; + __le32 enables; + #define PORT_SFP_SIDEBAND_CFG_REQ_ENABLES_RS0 0x1UL + #define PORT_SFP_SIDEBAND_CFG_REQ_ENABLES_RS1 0x2UL + #define PORT_SFP_SIDEBAND_CFG_REQ_ENABLES_TX_DIS 0x4UL + #define PORT_SFP_SIDEBAND_CFG_REQ_ENABLES_MOD_SEL 0x8UL + #define PORT_SFP_SIDEBAND_CFG_REQ_ENABLES_RESET_L 0x10UL + #define PORT_SFP_SIDEBAND_CFG_REQ_ENABLES_LP_MODE 0x20UL + #define PORT_SFP_SIDEBAND_CFG_REQ_ENABLES_PWR_DIS 0x40UL + __le32 flags; + #define PORT_SFP_SIDEBAND_CFG_REQ_FLAGS_RS0 0x1UL + #define PORT_SFP_SIDEBAND_CFG_REQ_FLAGS_RS1 0x2UL + #define PORT_SFP_SIDEBAND_CFG_REQ_FLAGS_TX_DIS 0x4UL + #define PORT_SFP_SIDEBAND_CFG_REQ_FLAGS_MOD_SEL 0x8UL + #define PORT_SFP_SIDEBAND_CFG_REQ_FLAGS_RESET_L 0x10UL + #define PORT_SFP_SIDEBAND_CFG_REQ_FLAGS_LP_MODE 0x20UL + #define PORT_SFP_SIDEBAND_CFG_REQ_FLAGS_PWR_DIS 0x40UL +}; + +/* hwrm_port_sfp_sideband_cfg_output (size:128b/16B) */ +struct hwrm_port_sfp_sideband_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused[7]; + u8 valid; +}; + +/* hwrm_port_sfp_sideband_qcfg_input (size:192b/24B) */ +struct hwrm_port_sfp_sideband_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 port_id; + u8 unused_0[6]; +}; + +/* hwrm_port_sfp_sideband_qcfg_output (size:192b/24B) */ +struct hwrm_port_sfp_sideband_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 supported_mask; + __le32 sideband_signals; + #define PORT_SFP_SIDEBAND_QCFG_RESP_SIDEBAND_SIGNALS_MOD_ABS 0x1UL + #define PORT_SFP_SIDEBAND_QCFG_RESP_SIDEBAND_SIGNALS_RX_LOS 0x2UL + #define PORT_SFP_SIDEBAND_QCFG_RESP_SIDEBAND_SIGNALS_RS0 0x4UL + #define PORT_SFP_SIDEBAND_QCFG_RESP_SIDEBAND_SIGNALS_RS1 0x8UL + #define PORT_SFP_SIDEBAND_QCFG_RESP_SIDEBAND_SIGNALS_TX_DIS 0x10UL + #define PORT_SFP_SIDEBAND_QCFG_RESP_SIDEBAND_SIGNALS_TX_FAULT 0x20UL + #define PORT_SFP_SIDEBAND_QCFG_RESP_SIDEBAND_SIGNALS_MOD_SEL 0x40UL + #define PORT_SFP_SIDEBAND_QCFG_RESP_SIDEBAND_SIGNALS_RESET_L 0x80UL + #define PORT_SFP_SIDEBAND_QCFG_RESP_SIDEBAND_SIGNALS_LP_MODE 0x100UL + #define PORT_SFP_SIDEBAND_QCFG_RESP_SIDEBAND_SIGNALS_PWR_DIS 0x200UL + u8 unused[7]; + u8 valid; +}; + +/* hwrm_port_phy_mdio_bus_acquire_input (size:192b/24B) */ +struct hwrm_port_phy_mdio_bus_acquire_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 port_id; + __le16 client_id; + __le16 mdio_bus_timeout; + u8 unused_0[2]; +}; + +/* hwrm_port_phy_mdio_bus_acquire_output (size:128b/16B) */ +struct hwrm_port_phy_mdio_bus_acquire_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 unused_0; + __le16 client_id; + u8 unused_1[3]; + u8 valid; +}; + +/* hwrm_port_phy_mdio_bus_release_input (size:192b/24B) */ +struct hwrm_port_phy_mdio_bus_release_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 port_id; + __le16 client_id; + u8 unused_0[4]; +}; + +/* hwrm_port_phy_mdio_bus_release_output (size:128b/16B) */ +struct hwrm_port_phy_mdio_bus_release_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 unused_0; + __le16 clients_id; + u8 unused_1[3]; + u8 valid; +}; + +/* hwrm_port_tx_fir_cfg_input (size:320b/40B) */ +struct hwrm_port_tx_fir_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 mod_type; + #define PORT_TX_FIR_CFG_REQ_MOD_TYPE_NRZ 0x0UL + #define PORT_TX_FIR_CFG_REQ_MOD_TYPE_PAM4 0x1UL + #define PORT_TX_FIR_CFG_REQ_MOD_TYPE_C2M_NRZ 0x2UL + #define PORT_TX_FIR_CFG_REQ_MOD_TYPE_C2M_PAM4 0x3UL + #define PORT_TX_FIR_CFG_REQ_MOD_TYPE_PAM4_112 0x4UL + #define PORT_TX_FIR_CFG_REQ_MOD_TYPE_C2M_PAM4_112G 0x5UL + #define PORT_TX_FIR_CFG_REQ_MOD_TYPE_LPO_PAM4_112G 0x6UL + #define PORT_TX_FIR_CFG_REQ_MOD_TYPE_LAST PORT_TX_FIR_CFG_REQ_MOD_TYPE_LPO_PAM4_112G + u8 lane_mask; + u8 unused_0[2]; + __le32 txfir_val_1; + __le32 txfir_val_2; + __le32 txfir_val_3; + __le32 txfir_val_4; + u8 unused_1[4]; +}; + +/* hwrm_port_tx_fir_cfg_output (size:128b/16B) */ +struct hwrm_port_tx_fir_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused[7]; + u8 valid; +}; + +/* hwrm_port_tx_fir_qcfg_input (size:192b/24B) */ +struct hwrm_port_tx_fir_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 mod_type; + #define PORT_TX_FIR_QCFG_REQ_MOD_TYPE_NRZ 0x0UL + #define PORT_TX_FIR_QCFG_REQ_MOD_TYPE_PAM4 0x1UL + #define PORT_TX_FIR_QCFG_REQ_MOD_TYPE_C2M_NRZ 0x2UL + #define PORT_TX_FIR_QCFG_REQ_MOD_TYPE_C2M_PAM4 0x3UL + #define PORT_TX_FIR_QCFG_REQ_MOD_TYPE_PAM4_112 0x4UL + #define PORT_TX_FIR_QCFG_REQ_MOD_TYPE_C2M_PAM4_112 0x5UL + #define PORT_TX_FIR_QCFG_REQ_MOD_TYPE_LPO_PAM4_112 0x6UL + #define PORT_TX_FIR_QCFG_REQ_MOD_TYPE_LAST PORT_TX_FIR_QCFG_REQ_MOD_TYPE_LPO_PAM4_112 + u8 lane_id; + u8 unused[6]; +}; + +/* hwrm_port_tx_fir_qcfg_output (size:256b/32B) */ +struct hwrm_port_tx_fir_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 txfir_val_1; + __le32 txfir_val_2; + __le32 txfir_val_3; + __le32 txfir_val_4; + u8 unused[7]; + u8 valid; +}; + +/* hwrm_port_ep_tx_cfg_input (size:256b/32B) */ +struct hwrm_port_ep_tx_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 enables; + #define PORT_EP_TX_CFG_REQ_ENABLES_EP0_MIN_BW 0x1UL + #define PORT_EP_TX_CFG_REQ_ENABLES_EP0_MAX_BW 0x2UL + #define PORT_EP_TX_CFG_REQ_ENABLES_EP1_MIN_BW 0x4UL + #define PORT_EP_TX_CFG_REQ_ENABLES_EP1_MAX_BW 0x8UL + #define PORT_EP_TX_CFG_REQ_ENABLES_EP2_MIN_BW 0x10UL + #define PORT_EP_TX_CFG_REQ_ENABLES_EP2_MAX_BW 0x20UL + #define PORT_EP_TX_CFG_REQ_ENABLES_EP3_MIN_BW 0x40UL + #define PORT_EP_TX_CFG_REQ_ENABLES_EP3_MAX_BW 0x80UL + u8 port_id; + u8 unused; + u8 ep0_min_bw; + u8 ep0_max_bw; + u8 ep1_min_bw; + u8 ep1_max_bw; + u8 ep2_min_bw; + u8 ep2_max_bw; + u8 ep3_min_bw; + u8 ep3_max_bw; + u8 unused_1[4]; +}; + +/* hwrm_port_ep_tx_cfg_output (size:128b/16B) */ +struct hwrm_port_ep_tx_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_port_ep_tx_cfg_cmd_err (size:64b/8B) */ +struct hwrm_port_ep_tx_cfg_cmd_err { + u8 code; + #define PORT_EP_TX_CFG_CMD_ERR_CODE_UNKNOWN 0x0UL + #define PORT_EP_TX_CFG_CMD_ERR_CODE_PORT_ID_INVALID 0x1UL + #define PORT_EP_TX_CFG_CMD_ERR_CODE_EP_INACTIVE 0x2UL + #define PORT_EP_TX_CFG_CMD_ERR_CODE_MIN_BW_RANGE 0x3UL + #define PORT_EP_TX_CFG_CMD_ERR_CODE_MIN_MORE_THAN_MAX 0x4UL + #define PORT_EP_TX_CFG_CMD_ERR_CODE_MIN_BW_SUM 0x5UL + #define PORT_EP_TX_CFG_CMD_ERR_CODE_MIN_BW_UNSUPPORTED 0x6UL + #define PORT_EP_TX_CFG_CMD_ERR_CODE_LAST PORT_EP_TX_CFG_CMD_ERR_CODE_MIN_BW_UNSUPPORTED + u8 unused_0[7]; +}; + +/* hwrm_port_ep_tx_qcfg_input (size:192b/24B) */ +struct hwrm_port_ep_tx_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 port_id; + u8 unused[7]; +}; + +/* hwrm_port_ep_tx_qcfg_output (size:192b/24B) */ +struct hwrm_port_ep_tx_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 ep0_min_bw; + u8 ep0_max_bw; + u8 ep1_min_bw; + u8 ep1_max_bw; + u8 ep2_min_bw; + u8 ep2_max_bw; + u8 ep3_min_bw; + u8 ep3_max_bw; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_port_cfg_input (size:256b/32B) */ +struct hwrm_port_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + __le32 enables; + #define PORT_CFG_REQ_ENABLES_TX_RATE_LIMIT 0x1UL + __le16 port_id; + __le16 unused_0; + __le32 tx_rate_limit; +}; + +/* hwrm_port_cfg_output (size:128b/16B) */ +struct hwrm_port_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_port_qcfg_input (size:192b/24B) */ +struct hwrm_port_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 port_id; + u8 unused_0[6]; +}; + +/* hwrm_port_qcfg_output (size:192b/24B) */ +struct hwrm_port_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 supported; + #define PORT_QCFG_RESP_SUPPORTED_TX_RATE_LIMIT 0x1UL + __le32 enabled; + #define PORT_QCFG_RESP_ENABLED_TX_RATE_LIMIT 0x1UL + __le32 tx_rate_limit; + u8 unused_0[3]; + u8 valid; +}; + +/* hwrm_port_mac_qcaps_input (size:192b/24B) */ +struct hwrm_port_mac_qcaps_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 port_id; + u8 unused_0[6]; +}; + +/* hwrm_port_mac_qcaps_output (size:128b/16B) */ +struct hwrm_port_mac_qcaps_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 flags; + #define PORT_MAC_QCAPS_RESP_FLAGS_LOCAL_LPBK_NOT_SUPPORTED 0x1UL + #define PORT_MAC_QCAPS_RESP_FLAGS_REMOTE_LPBK_SUPPORTED 0x2UL + u8 unused_0[6]; + u8 valid; +}; + +/* hwrm_queue_qportcfg_input (size:192b/24B) */ +struct hwrm_queue_qportcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define QUEUE_QPORTCFG_REQ_FLAGS_PATH 0x1UL + #define QUEUE_QPORTCFG_REQ_FLAGS_PATH_TX 0x0UL + #define QUEUE_QPORTCFG_REQ_FLAGS_PATH_RX 0x1UL + #define QUEUE_QPORTCFG_REQ_FLAGS_PATH_LAST QUEUE_QPORTCFG_REQ_FLAGS_PATH_RX + __le16 port_id; + u8 drv_qmap_cap; + #define QUEUE_QPORTCFG_REQ_DRV_QMAP_CAP_DISABLED 0x0UL + #define QUEUE_QPORTCFG_REQ_DRV_QMAP_CAP_ENABLED 0x1UL + #define QUEUE_QPORTCFG_REQ_DRV_QMAP_CAP_LAST QUEUE_QPORTCFG_REQ_DRV_QMAP_CAP_ENABLED + u8 unused_0; +}; + +/* hwrm_queue_qportcfg_output (size:1344b/168B) */ +struct hwrm_queue_qportcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 max_configurable_queues; + u8 max_configurable_lossless_queues; + u8 queue_cfg_allowed; + u8 queue_cfg_info; + #define QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG 0x1UL + #define QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_USE_PROFILE_TYPE 0x2UL + u8 queue_pfcenable_cfg_allowed; + u8 queue_pri2cos_cfg_allowed; + u8 queue_cos2bw_cfg_allowed; + u8 queue_id0; + u8 queue_id0_service_profile; + #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSY 0x0UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS 0x1UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_UNKNOWN 0xffUL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_UNKNOWN + u8 queue_id1; + u8 queue_id1_service_profile; + #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSY 0x0UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSLESS 0x1UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_UNKNOWN 0xffUL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_UNKNOWN + u8 queue_id2; + u8 queue_id2_service_profile; + #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSY 0x0UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSLESS 0x1UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_UNKNOWN 0xffUL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_UNKNOWN + u8 queue_id3; + u8 queue_id3_service_profile; + #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSY 0x0UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSLESS 0x1UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_UNKNOWN 0xffUL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_UNKNOWN + u8 queue_id4; + u8 queue_id4_service_profile; + #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSY 0x0UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSLESS 0x1UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_UNKNOWN 0xffUL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_UNKNOWN + u8 queue_id5; + u8 queue_id5_service_profile; + #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSY 0x0UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSLESS 0x1UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_UNKNOWN 0xffUL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_UNKNOWN + u8 queue_id6; + u8 queue_id6_service_profile; + #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSY 0x0UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSLESS 0x1UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_UNKNOWN 0xffUL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_UNKNOWN + u8 queue_id7; + u8 queue_id7_service_profile; + #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSY 0x0UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSLESS 0x1UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_UNKNOWN 0xffUL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_UNKNOWN + u8 queue_id0_service_profile_type; + #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_TYPE_ROCE 0x1UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_TYPE_NIC 0x2UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_TYPE_CNP 0x4UL + char qid0_name[16]; + char qid1_name[16]; + char qid2_name[16]; + char qid3_name[16]; + char qid4_name[16]; + char qid5_name[16]; + char qid6_name[16]; + char qid7_name[16]; + u8 queue_id1_service_profile_type; + #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_TYPE_ROCE 0x1UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_TYPE_NIC 0x2UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_TYPE_CNP 0x4UL + u8 queue_id2_service_profile_type; + #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_TYPE_ROCE 0x1UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_TYPE_NIC 0x2UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_TYPE_CNP 0x4UL + u8 queue_id3_service_profile_type; + #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_TYPE_ROCE 0x1UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_TYPE_NIC 0x2UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_TYPE_CNP 0x4UL + u8 queue_id4_service_profile_type; + #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_TYPE_ROCE 0x1UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_TYPE_NIC 0x2UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_TYPE_CNP 0x4UL + u8 queue_id5_service_profile_type; + #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_TYPE_ROCE 0x1UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_TYPE_NIC 0x2UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_TYPE_CNP 0x4UL + u8 queue_id6_service_profile_type; + #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_TYPE_ROCE 0x1UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_TYPE_NIC 0x2UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_TYPE_CNP 0x4UL + u8 queue_id7_service_profile_type; + #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_TYPE_ROCE 0x1UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_TYPE_NIC 0x2UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_TYPE_CNP 0x4UL + u8 valid; +}; + +/* hwrm_queue_qcfg_input (size:192b/24B) */ +struct hwrm_queue_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define QUEUE_QCFG_REQ_FLAGS_PATH 0x1UL + #define QUEUE_QCFG_REQ_FLAGS_PATH_TX 0x0UL + #define QUEUE_QCFG_REQ_FLAGS_PATH_RX 0x1UL + #define QUEUE_QCFG_REQ_FLAGS_PATH_LAST QUEUE_QCFG_REQ_FLAGS_PATH_RX + __le32 queue_id; +}; + +/* hwrm_queue_qcfg_output (size:128b/16B) */ +struct hwrm_queue_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 queue_len; + u8 service_profile; + #define QUEUE_QCFG_RESP_SERVICE_PROFILE_LOSSY 0x0UL + #define QUEUE_QCFG_RESP_SERVICE_PROFILE_LOSSLESS 0x1UL + #define QUEUE_QCFG_RESP_SERVICE_PROFILE_UNKNOWN 0xffUL + #define QUEUE_QCFG_RESP_SERVICE_PROFILE_LAST QUEUE_QCFG_RESP_SERVICE_PROFILE_UNKNOWN + u8 queue_cfg_info; + #define QUEUE_QCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG 0x1UL + u8 unused_0; + u8 valid; +}; + +/* hwrm_queue_cfg_input (size:320b/40B) */ +struct hwrm_queue_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define QUEUE_CFG_REQ_FLAGS_PATH_MASK 0x3UL + #define QUEUE_CFG_REQ_FLAGS_PATH_SFT 0 + #define QUEUE_CFG_REQ_FLAGS_PATH_TX 0x0UL + #define QUEUE_CFG_REQ_FLAGS_PATH_RX 0x1UL + #define QUEUE_CFG_REQ_FLAGS_PATH_BIDIR 0x2UL + #define QUEUE_CFG_REQ_FLAGS_PATH_LAST QUEUE_CFG_REQ_FLAGS_PATH_BIDIR + __le32 enables; + #define QUEUE_CFG_REQ_ENABLES_DFLT_LEN 0x1UL + #define QUEUE_CFG_REQ_ENABLES_SERVICE_PROFILE 0x2UL + __le32 queue_id; + __le32 dflt_len; + u8 service_profile; + #define QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSY 0x0UL + #define QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSLESS 0x1UL + #define QUEUE_CFG_REQ_SERVICE_PROFILE_UNKNOWN 0xffUL + #define QUEUE_CFG_REQ_SERVICE_PROFILE_LAST QUEUE_CFG_REQ_SERVICE_PROFILE_UNKNOWN + u8 unused_0[7]; +}; + +/* hwrm_queue_cfg_output (size:128b/16B) */ +struct hwrm_queue_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_queue_pfcenable_qcfg_input (size:192b/24B) */ +struct hwrm_queue_pfcenable_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 port_id; + u8 unused_0[6]; +}; + +/* hwrm_queue_pfcenable_qcfg_output (size:128b/16B) */ +struct hwrm_queue_pfcenable_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 flags; + #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI0_PFC_ENABLED 0x1UL + #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI1_PFC_ENABLED 0x2UL + #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI2_PFC_ENABLED 0x4UL + #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI3_PFC_ENABLED 0x8UL + #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI4_PFC_ENABLED 0x10UL + #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI5_PFC_ENABLED 0x20UL + #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI6_PFC_ENABLED 0x40UL + #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI7_PFC_ENABLED 0x80UL + #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI0_PFC_WATCHDOG_ENABLED 0x100UL + #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI1_PFC_WATCHDOG_ENABLED 0x200UL + #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI2_PFC_WATCHDOG_ENABLED 0x400UL + #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI3_PFC_WATCHDOG_ENABLED 0x800UL + #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI4_PFC_WATCHDOG_ENABLED 0x1000UL + #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI5_PFC_WATCHDOG_ENABLED 0x2000UL + #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI6_PFC_WATCHDOG_ENABLED 0x4000UL + #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI7_PFC_WATCHDOG_ENABLED 0x8000UL + u8 unused_0[3]; + u8 valid; +}; + +/* hwrm_queue_pfcenable_cfg_input (size:192b/24B) */ +struct hwrm_queue_pfcenable_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI0_PFC_ENABLED 0x1UL + #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI1_PFC_ENABLED 0x2UL + #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI2_PFC_ENABLED 0x4UL + #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI3_PFC_ENABLED 0x8UL + #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI4_PFC_ENABLED 0x10UL + #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI5_PFC_ENABLED 0x20UL + #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI6_PFC_ENABLED 0x40UL + #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI7_PFC_ENABLED 0x80UL + #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI0_PFC_WATCHDOG_ENABLED 0x100UL + #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI1_PFC_WATCHDOG_ENABLED 0x200UL + #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI2_PFC_WATCHDOG_ENABLED 0x400UL + #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI3_PFC_WATCHDOG_ENABLED 0x800UL + #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI4_PFC_WATCHDOG_ENABLED 0x1000UL + #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI5_PFC_WATCHDOG_ENABLED 0x2000UL + #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI6_PFC_WATCHDOG_ENABLED 0x4000UL + #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI7_PFC_WATCHDOG_ENABLED 0x8000UL + __le16 port_id; + u8 unused_0[2]; +}; + +/* hwrm_queue_pfcenable_cfg_output (size:128b/16B) */ +struct hwrm_queue_pfcenable_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_queue_pri2cos_qcfg_input (size:192b/24B) */ +struct hwrm_queue_pri2cos_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH 0x1UL + #define QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH_TX 0x0UL + #define QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH_RX 0x1UL + #define QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH_LAST QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH_RX + #define QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN 0x2UL + u8 port_id; + u8 unused_0[3]; +}; + +/* hwrm_queue_pri2cos_qcfg_output (size:192b/24B) */ +struct hwrm_queue_pri2cos_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 pri0_cos_queue_id; + u8 pri1_cos_queue_id; + u8 pri2_cos_queue_id; + u8 pri3_cos_queue_id; + u8 pri4_cos_queue_id; + u8 pri5_cos_queue_id; + u8 pri6_cos_queue_id; + u8 pri7_cos_queue_id; + u8 queue_cfg_info; + #define QUEUE_PRI2COS_QCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG 0x1UL + u8 unused_0[6]; + u8 valid; +}; + +/* hwrm_queue_pri2cos_cfg_input (size:320b/40B) */ +struct hwrm_queue_pri2cos_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_MASK 0x3UL + #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_SFT 0 + #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_TX 0x0UL + #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_RX 0x1UL + #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_BIDIR 0x2UL + #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_LAST QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_BIDIR + #define QUEUE_PRI2COS_CFG_REQ_FLAGS_IVLAN 0x4UL + __le32 enables; + #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI0_COS_QUEUE_ID 0x1UL + #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI1_COS_QUEUE_ID 0x2UL + #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI2_COS_QUEUE_ID 0x4UL + #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI3_COS_QUEUE_ID 0x8UL + #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI4_COS_QUEUE_ID 0x10UL + #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI5_COS_QUEUE_ID 0x20UL + #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI6_COS_QUEUE_ID 0x40UL + #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI7_COS_QUEUE_ID 0x80UL + u8 port_id; + u8 pri0_cos_queue_id; + u8 pri1_cos_queue_id; + u8 pri2_cos_queue_id; + u8 pri3_cos_queue_id; + u8 pri4_cos_queue_id; + u8 pri5_cos_queue_id; + u8 pri6_cos_queue_id; + u8 pri7_cos_queue_id; + u8 unused_0[7]; +}; + +/* hwrm_queue_pri2cos_cfg_output (size:128b/16B) */ +struct hwrm_queue_pri2cos_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_queue_cos2bw_qcfg_input (size:192b/24B) */ +struct hwrm_queue_cos2bw_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 port_id; + u8 unused_0[6]; +}; + +/* hwrm_queue_cos2bw_qcfg_output (size:896b/112B) */ +struct hwrm_queue_cos2bw_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 queue_id0; + u8 unused_0; + __le16 unused_1; + __le32 queue_id0_min_bw; + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE_BYTES + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID + __le32 queue_id0_max_bw; + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE_BYTES + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID + u8 queue_id0_tsa_assign; + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_SP 0x0UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_ETS 0x1UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_RESERVED_FIRST 0x2UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_RESERVED_LAST 0xffUL + u8 queue_id0_pri_lvl; + u8 queue_id0_bw_weight; + u8 queue_id1; + __le32 queue_id1_min_bw; + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE_BYTES + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID + __le32 queue_id1_max_bw; + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE_BYTES + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID + u8 queue_id1_tsa_assign; + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_TSA_ASSIGN_SP 0x0UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_TSA_ASSIGN_ETS 0x1UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_TSA_ASSIGN_RESERVED_FIRST 0x2UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_TSA_ASSIGN_RESERVED_LAST 0xffUL + u8 queue_id1_pri_lvl; + u8 queue_id1_bw_weight; + u8 queue_id2; + __le32 queue_id2_min_bw; + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE_BYTES + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID + __le32 queue_id2_max_bw; + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE_BYTES + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID + u8 queue_id2_tsa_assign; + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_TSA_ASSIGN_SP 0x0UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_TSA_ASSIGN_ETS 0x1UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_TSA_ASSIGN_RESERVED_FIRST 0x2UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_TSA_ASSIGN_RESERVED_LAST 0xffUL + u8 queue_id2_pri_lvl; + u8 queue_id2_bw_weight; + u8 queue_id3; + __le32 queue_id3_min_bw; + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE_BYTES + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID + __le32 queue_id3_max_bw; + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE_BYTES + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID + u8 queue_id3_tsa_assign; + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_TSA_ASSIGN_SP 0x0UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_TSA_ASSIGN_ETS 0x1UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_TSA_ASSIGN_RESERVED_FIRST 0x2UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_TSA_ASSIGN_RESERVED_LAST 0xffUL + u8 queue_id3_pri_lvl; + u8 queue_id3_bw_weight; + u8 queue_id4; + __le32 queue_id4_min_bw; + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE_BYTES + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID + __le32 queue_id4_max_bw; + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE_BYTES + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID + u8 queue_id4_tsa_assign; + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_TSA_ASSIGN_SP 0x0UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_TSA_ASSIGN_ETS 0x1UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_TSA_ASSIGN_RESERVED_FIRST 0x2UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_TSA_ASSIGN_RESERVED_LAST 0xffUL + u8 queue_id4_pri_lvl; + u8 queue_id4_bw_weight; + u8 queue_id5; + __le32 queue_id5_min_bw; + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE_BYTES + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID + __le32 queue_id5_max_bw; + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE_BYTES + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID + u8 queue_id5_tsa_assign; + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_TSA_ASSIGN_SP 0x0UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_TSA_ASSIGN_ETS 0x1UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_TSA_ASSIGN_RESERVED_FIRST 0x2UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_TSA_ASSIGN_RESERVED_LAST 0xffUL + u8 queue_id5_pri_lvl; + u8 queue_id5_bw_weight; + u8 queue_id6; + __le32 queue_id6_min_bw; + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE_BYTES + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID + __le32 queue_id6_max_bw; + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE_BYTES + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID + u8 queue_id6_tsa_assign; + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_TSA_ASSIGN_SP 0x0UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_TSA_ASSIGN_ETS 0x1UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_TSA_ASSIGN_RESERVED_FIRST 0x2UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_TSA_ASSIGN_RESERVED_LAST 0xffUL + u8 queue_id6_pri_lvl; + u8 queue_id6_bw_weight; + u8 queue_id7; + __le32 queue_id7_min_bw; + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE_BYTES + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID + __le32 queue_id7_max_bw; + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE_BYTES + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID + u8 queue_id7_tsa_assign; + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_TSA_ASSIGN_SP 0x0UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_TSA_ASSIGN_ETS 0x1UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_TSA_ASSIGN_RESERVED_FIRST 0x2UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_TSA_ASSIGN_RESERVED_LAST 0xffUL + u8 queue_id7_pri_lvl; + u8 queue_id7_bw_weight; + u8 unused_2[4]; + u8 valid; +}; + +/* hwrm_queue_cos2bw_cfg_input (size:1024b/128B) */ +struct hwrm_queue_cos2bw_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + __le32 enables; + #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID0_VALID 0x1UL + #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID1_VALID 0x2UL + #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID2_VALID 0x4UL + #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID3_VALID 0x8UL + #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID4_VALID 0x10UL + #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID5_VALID 0x20UL + #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID6_VALID 0x40UL + #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID7_VALID 0x80UL + __le16 port_id; + u8 queue_id0; + u8 unused_0; + __le32 queue_id0_min_bw; + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE_BYTES + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID + __le32 queue_id0_max_bw; + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE_BYTES + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID + u8 queue_id0_tsa_assign; + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_SP 0x0UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_ETS 0x1UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_RESERVED_FIRST 0x2UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_RESERVED_LAST 0xffUL + u8 queue_id0_pri_lvl; + u8 queue_id0_bw_weight; + u8 queue_id1; + __le32 queue_id1_min_bw; + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE_BYTES + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID + __le32 queue_id1_max_bw; + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE_BYTES + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID + u8 queue_id1_tsa_assign; + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_SP 0x0UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_ETS 0x1UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_RESERVED_FIRST 0x2UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_RESERVED_LAST 0xffUL + u8 queue_id1_pri_lvl; + u8 queue_id1_bw_weight; + u8 queue_id2; + __le32 queue_id2_min_bw; + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE_BYTES + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID + __le32 queue_id2_max_bw; + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE_BYTES + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID + u8 queue_id2_tsa_assign; + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_SP 0x0UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_ETS 0x1UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_RESERVED_FIRST 0x2UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_RESERVED_LAST 0xffUL + u8 queue_id2_pri_lvl; + u8 queue_id2_bw_weight; + u8 queue_id3; + __le32 queue_id3_min_bw; + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE_BYTES + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID + __le32 queue_id3_max_bw; + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE_BYTES + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID + u8 queue_id3_tsa_assign; + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_SP 0x0UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_ETS 0x1UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_RESERVED_FIRST 0x2UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_RESERVED_LAST 0xffUL + u8 queue_id3_pri_lvl; + u8 queue_id3_bw_weight; + u8 queue_id4; + __le32 queue_id4_min_bw; + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE_BYTES + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID + __le32 queue_id4_max_bw; + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE_BYTES + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID + u8 queue_id4_tsa_assign; + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_SP 0x0UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_ETS 0x1UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_RESERVED_FIRST 0x2UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_RESERVED_LAST 0xffUL + u8 queue_id4_pri_lvl; + u8 queue_id4_bw_weight; + u8 queue_id5; + __le32 queue_id5_min_bw; + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE_BYTES + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID + __le32 queue_id5_max_bw; + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE_BYTES + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID + u8 queue_id5_tsa_assign; + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_SP 0x0UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_ETS 0x1UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_RESERVED_FIRST 0x2UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_RESERVED_LAST 0xffUL + u8 queue_id5_pri_lvl; + u8 queue_id5_bw_weight; + u8 queue_id6; + __le32 queue_id6_min_bw; + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE_BYTES + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID + __le32 queue_id6_max_bw; + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE_BYTES + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID + u8 queue_id6_tsa_assign; + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_SP 0x0UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_ETS 0x1UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_RESERVED_FIRST 0x2UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_RESERVED_LAST 0xffUL + u8 queue_id6_pri_lvl; + u8 queue_id6_bw_weight; + u8 queue_id7; + __le32 queue_id7_min_bw; + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE_BYTES + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID + __le32 queue_id7_max_bw; + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE_BYTES + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID + u8 queue_id7_tsa_assign; + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_SP 0x0UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_ETS 0x1UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_RESERVED_FIRST 0x2UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_RESERVED_LAST 0xffUL + u8 queue_id7_pri_lvl; + u8 queue_id7_bw_weight; + u8 unused_1[5]; +}; + +/* hwrm_queue_cos2bw_cfg_output (size:128b/16B) */ +struct hwrm_queue_cos2bw_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_queue_dscp_qcaps_input (size:192b/24B) */ +struct hwrm_queue_dscp_qcaps_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 port_id; + u8 unused_0[7]; +}; + +/* hwrm_queue_dscp_qcaps_output (size:128b/16B) */ +struct hwrm_queue_dscp_qcaps_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 num_dscp_bits; + u8 unused_0; + __le16 max_entries; + u8 unused_1[3]; + u8 valid; +}; + +/* hwrm_queue_dscp2pri_qcfg_input (size:256b/32B) */ +struct hwrm_queue_dscp2pri_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 dest_data_addr; + u8 port_id; + u8 unused_0; + __le16 dest_data_buffer_size; + u8 unused_1[4]; +}; + +/* hwrm_queue_dscp2pri_qcfg_output (size:128b/16B) */ +struct hwrm_queue_dscp2pri_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 entry_cnt; + u8 default_pri; + u8 unused_0[4]; + u8 valid; +}; + +/* hwrm_queue_dscp2pri_cfg_input (size:320b/40B) */ +struct hwrm_queue_dscp2pri_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 src_data_addr; + __le32 flags; + #define QUEUE_DSCP2PRI_CFG_REQ_FLAGS_USE_HW_DEFAULT_PRI 0x1UL + __le32 enables; + #define QUEUE_DSCP2PRI_CFG_REQ_ENABLES_DEFAULT_PRI 0x1UL + u8 port_id; + u8 default_pri; + __le16 entry_cnt; + u8 unused_0[4]; +}; + +/* hwrm_queue_dscp2pri_cfg_output (size:128b/16B) */ +struct hwrm_queue_dscp2pri_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_queue_mpls_qcaps_input (size:192b/24B) */ +struct hwrm_queue_mpls_qcaps_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 port_id; + u8 unused_0[7]; +}; + +/* hwrm_queue_mpls_qcaps_output (size:128b/16B) */ +struct hwrm_queue_mpls_qcaps_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 queue_mplstc2pri_cfg_allowed; + u8 hw_default_pri; + u8 unused_0[5]; + u8 valid; +}; + +/* hwrm_queue_mplstc2pri_qcfg_input (size:192b/24B) */ +struct hwrm_queue_mplstc2pri_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 port_id; + u8 unused_0[7]; +}; + +/* hwrm_queue_mplstc2pri_qcfg_output (size:192b/24B) */ +struct hwrm_queue_mplstc2pri_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 tc0_pri_queue_id; + u8 tc1_pri_queue_id; + u8 tc2_pri_queue_id; + u8 tc3_pri_queue_id; + u8 tc4_pri_queue_id; + u8 tc5_pri_queue_id; + u8 tc6_pri_queue_id; + u8 tc7_pri_queue_id; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_queue_mplstc2pri_cfg_input (size:256b/32B) */ +struct hwrm_queue_mplstc2pri_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 enables; + #define QUEUE_MPLSTC2PRI_CFG_REQ_ENABLES_TC0_PRI_QUEUE_ID 0x1UL + #define QUEUE_MPLSTC2PRI_CFG_REQ_ENABLES_TC1_PRI_QUEUE_ID 0x2UL + #define QUEUE_MPLSTC2PRI_CFG_REQ_ENABLES_TC2_PRI_QUEUE_ID 0x4UL + #define QUEUE_MPLSTC2PRI_CFG_REQ_ENABLES_TC3_PRI_QUEUE_ID 0x8UL + #define QUEUE_MPLSTC2PRI_CFG_REQ_ENABLES_TC4_PRI_QUEUE_ID 0x10UL + #define QUEUE_MPLSTC2PRI_CFG_REQ_ENABLES_TC5_PRI_QUEUE_ID 0x20UL + #define QUEUE_MPLSTC2PRI_CFG_REQ_ENABLES_TC6_PRI_QUEUE_ID 0x40UL + #define QUEUE_MPLSTC2PRI_CFG_REQ_ENABLES_TC7_PRI_QUEUE_ID 0x80UL + u8 port_id; + u8 unused_0[3]; + u8 tc0_pri_queue_id; + u8 tc1_pri_queue_id; + u8 tc2_pri_queue_id; + u8 tc3_pri_queue_id; + u8 tc4_pri_queue_id; + u8 tc5_pri_queue_id; + u8 tc6_pri_queue_id; + u8 tc7_pri_queue_id; +}; + +/* hwrm_queue_mplstc2pri_cfg_output (size:128b/16B) */ +struct hwrm_queue_mplstc2pri_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_queue_vlanpri_qcaps_input (size:192b/24B) */ +struct hwrm_queue_vlanpri_qcaps_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 port_id; + u8 unused_0[7]; +}; + +/* hwrm_queue_vlanpri_qcaps_output (size:128b/16B) */ +struct hwrm_queue_vlanpri_qcaps_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 hw_default_pri; + u8 unused_0[6]; + u8 valid; +}; + +/* hwrm_queue_vlanpri2pri_qcfg_input (size:192b/24B) */ +struct hwrm_queue_vlanpri2pri_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 port_id; + u8 unused_0[7]; +}; + +/* hwrm_queue_vlanpri2pri_qcfg_output (size:192b/24B) */ +struct hwrm_queue_vlanpri2pri_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 vlanpri0_user_pri_id; + u8 vlanpri1_user_pri_id; + u8 vlanpri2_user_pri_id; + u8 vlanpri3_user_pri_id; + u8 vlanpri4_user_pri_id; + u8 vlanpri5_user_pri_id; + u8 vlanpri6_user_pri_id; + u8 vlanpri7_user_pri_id; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_queue_vlanpri2pri_cfg_input (size:256b/32B) */ +struct hwrm_queue_vlanpri2pri_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 enables; + #define QUEUE_VLANPRI2PRI_CFG_REQ_ENABLES_VLANPRI0_USER_PRI_ID 0x1UL + #define QUEUE_VLANPRI2PRI_CFG_REQ_ENABLES_VLANPRI1_USER_PRI_ID 0x2UL + #define QUEUE_VLANPRI2PRI_CFG_REQ_ENABLES_VLANPRI2_USER_PRI_ID 0x4UL + #define QUEUE_VLANPRI2PRI_CFG_REQ_ENABLES_VLANPRI3_USER_PRI_ID 0x8UL + #define QUEUE_VLANPRI2PRI_CFG_REQ_ENABLES_VLANPRI4_USER_PRI_ID 0x10UL + #define QUEUE_VLANPRI2PRI_CFG_REQ_ENABLES_VLANPRI5_USER_PRI_ID 0x20UL + #define QUEUE_VLANPRI2PRI_CFG_REQ_ENABLES_VLANPRI6_USER_PRI_ID 0x40UL + #define QUEUE_VLANPRI2PRI_CFG_REQ_ENABLES_VLANPRI7_USER_PRI_ID 0x80UL + u8 port_id; + u8 unused_0[3]; + u8 vlanpri0_user_pri_id; + u8 vlanpri1_user_pri_id; + u8 vlanpri2_user_pri_id; + u8 vlanpri3_user_pri_id; + u8 vlanpri4_user_pri_id; + u8 vlanpri5_user_pri_id; + u8 vlanpri6_user_pri_id; + u8 vlanpri7_user_pri_id; +}; + +/* hwrm_queue_vlanpri2pri_cfg_output (size:128b/16B) */ +struct hwrm_queue_vlanpri2pri_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_queue_global_cfg_input (size:192b/24B) */ +struct hwrm_queue_global_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 mode; + #define QUEUE_GLOBAL_CFG_REQ_MODE_SHARED 0x0UL + #define QUEUE_GLOBAL_CFG_REQ_MODE_INDEPENDENT 0x1UL + #define QUEUE_GLOBAL_CFG_REQ_MODE_LAST QUEUE_GLOBAL_CFG_REQ_MODE_INDEPENDENT + u8 unused_0; + __le16 enables; + #define QUEUE_GLOBAL_CFG_REQ_ENABLES_MODE 0x1UL + #define QUEUE_GLOBAL_CFG_REQ_ENABLES_G0_MAX_BW 0x2UL + #define QUEUE_GLOBAL_CFG_REQ_ENABLES_G1_MAX_BW 0x4UL + #define QUEUE_GLOBAL_CFG_REQ_ENABLES_G2_MAX_BW 0x8UL + #define QUEUE_GLOBAL_CFG_REQ_ENABLES_G3_MAX_BW 0x10UL + u8 g0_max_bw; + u8 g1_max_bw; + u8 g2_max_bw; + u8 g3_max_bw; +}; + +/* hwrm_queue_global_cfg_output (size:128b/16B) */ +struct hwrm_queue_global_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_queue_global_qcfg_input (size:128b/16B) */ +struct hwrm_queue_global_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; +}; + +/* hwrm_queue_global_qcfg_output (size:320b/40B) */ +struct hwrm_queue_global_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 buffer_pool_id0_map; + u8 buffer_pool_id1_map; + u8 buffer_pool_id2_map; + u8 buffer_pool_id3_map; + __le32 buffer_pool_id0_size; + __le32 buffer_pool_id1_size; + __le32 buffer_pool_id2_size; + __le32 buffer_pool_id3_size; + __le16 flags; + #define QUEUE_GLOBAL_QCFG_RESP_FLAGS_MAPPING 0x1UL + #define QUEUE_GLOBAL_QCFG_RESP_FLAGS_MAPPING_MAPPING_PER_PORT 0x0UL + #define QUEUE_GLOBAL_QCFG_RESP_FLAGS_MAPPING_MAPPING_PER_ENDPOINT 0x1UL + #define QUEUE_GLOBAL_QCFG_RESP_FLAGS_MAPPING_LAST QUEUE_GLOBAL_QCFG_RESP_FLAGS_MAPPING_MAPPING_PER_ENDPOINT + u8 mode; + #define QUEUE_GLOBAL_QCFG_RESP_MODE_SHARED 0x0UL + #define QUEUE_GLOBAL_QCFG_RESP_MODE_INDEPENDENT 0x1UL + #define QUEUE_GLOBAL_QCFG_RESP_MODE_LAST QUEUE_GLOBAL_QCFG_RESP_MODE_INDEPENDENT + u8 unused_0; + u8 g0_max_bw; + u8 g1_max_bw; + u8 g2_max_bw; + u8 g3_max_bw; + u8 unused_1[3]; + u8 valid; +}; + +/* hwrm_queue_adptv_qos_rx_feature_qcfg_input (size:128b/16B) */ +struct hwrm_queue_adptv_qos_rx_feature_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; +}; + +/* hwrm_queue_adptv_qos_rx_feature_qcfg_output (size:128b/16B) */ +struct hwrm_queue_adptv_qos_rx_feature_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 queue_enable; + #define QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_RESP_QUEUE_ENABLE_QID0_ENABLE 0x1UL + #define QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_RESP_QUEUE_ENABLE_QID0_ENABLE_DISABLED 0x0UL + #define QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_RESP_QUEUE_ENABLE_QID0_ENABLE_ENABLED 0x1UL + #define QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_RESP_QUEUE_ENABLE_QID0_ENABLE_LAST QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_RESP_QUEUE_ENABLE_QID0_ENABLE_ENABLED + #define QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_RESP_QUEUE_ENABLE_QID1_ENABLE 0x2UL + #define QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_RESP_QUEUE_ENABLE_QID1_ENABLE_DISABLED (0x0UL << 1) + #define QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_RESP_QUEUE_ENABLE_QID1_ENABLE_ENABLED (0x1UL << 1) + #define QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_RESP_QUEUE_ENABLE_QID1_ENABLE_LAST QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_RESP_QUEUE_ENABLE_QID1_ENABLE_ENABLED + #define QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_RESP_QUEUE_ENABLE_QID2_ENABLE 0x4UL + #define QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_RESP_QUEUE_ENABLE_QID2_ENABLE_DISABLED (0x0UL << 2) + #define QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_RESP_QUEUE_ENABLE_QID2_ENABLE_ENABLED (0x1UL << 2) + #define QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_RESP_QUEUE_ENABLE_QID2_ENABLE_LAST QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_RESP_QUEUE_ENABLE_QID2_ENABLE_ENABLED + #define QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_RESP_QUEUE_ENABLE_QID3_ENABLE 0x8UL + #define QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_RESP_QUEUE_ENABLE_QID3_ENABLE_DISABLED (0x0UL << 3) + #define QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_RESP_QUEUE_ENABLE_QID3_ENABLE_ENABLED (0x1UL << 3) + #define QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_RESP_QUEUE_ENABLE_QID3_ENABLE_LAST QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_RESP_QUEUE_ENABLE_QID3_ENABLE_ENABLED + #define QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_RESP_QUEUE_ENABLE_QID4_ENABLE 0x10UL + #define QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_RESP_QUEUE_ENABLE_QID4_ENABLE_DISABLED (0x0UL << 4) + #define QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_RESP_QUEUE_ENABLE_QID4_ENABLE_ENABLED (0x1UL << 4) + #define QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_RESP_QUEUE_ENABLE_QID4_ENABLE_LAST QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_RESP_QUEUE_ENABLE_QID4_ENABLE_ENABLED + #define QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_RESP_QUEUE_ENABLE_QID5_ENABLE 0x20UL + #define QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_RESP_QUEUE_ENABLE_QID5_ENABLE_DISABLED (0x0UL << 5) + #define QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_RESP_QUEUE_ENABLE_QID5_ENABLE_ENABLED (0x1UL << 5) + #define QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_RESP_QUEUE_ENABLE_QID5_ENABLE_LAST QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_RESP_QUEUE_ENABLE_QID5_ENABLE_ENABLED + #define QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_RESP_QUEUE_ENABLE_QID6_ENABLE 0x40UL + #define QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_RESP_QUEUE_ENABLE_QID6_ENABLE_DISABLED (0x0UL << 6) + #define QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_RESP_QUEUE_ENABLE_QID6_ENABLE_ENABLED (0x1UL << 6) + #define QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_RESP_QUEUE_ENABLE_QID6_ENABLE_LAST QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_RESP_QUEUE_ENABLE_QID6_ENABLE_ENABLED + #define QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_RESP_QUEUE_ENABLE_QID7_ENABLE 0x80UL + #define QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_RESP_QUEUE_ENABLE_QID7_ENABLE_DISABLED (0x0UL << 7) + #define QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_RESP_QUEUE_ENABLE_QID7_ENABLE_ENABLED (0x1UL << 7) + #define QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_RESP_QUEUE_ENABLE_QID7_ENABLE_LAST QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_RESP_QUEUE_ENABLE_QID7_ENABLE_ENABLED + u8 queue_mode; + #define QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_RESP_QUEUE_MODE_QID0_MODE 0x1UL + #define QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_RESP_QUEUE_MODE_QID0_MODE_LOSSY 0x0UL + #define QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_RESP_QUEUE_MODE_QID0_MODE_LOSSLESS 0x1UL + #define QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_RESP_QUEUE_MODE_QID0_MODE_LAST QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_RESP_QUEUE_MODE_QID0_MODE_LOSSLESS + #define QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_RESP_QUEUE_MODE_QID1_MODE 0x2UL + #define QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_RESP_QUEUE_MODE_QID1_MODE_LOSSY (0x0UL << 1) + #define QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_RESP_QUEUE_MODE_QID1_MODE_LOSSLESS (0x1UL << 1) + #define QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_RESP_QUEUE_MODE_QID1_MODE_LAST QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_RESP_QUEUE_MODE_QID1_MODE_LOSSLESS + #define QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_RESP_QUEUE_MODE_QID2_MODE 0x4UL + #define QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_RESP_QUEUE_MODE_QID2_MODE_LOSSY (0x0UL << 2) + #define QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_RESP_QUEUE_MODE_QID2_MODE_LOSSLESS (0x1UL << 2) + #define QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_RESP_QUEUE_MODE_QID2_MODE_LAST QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_RESP_QUEUE_MODE_QID2_MODE_LOSSLESS + #define QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_RESP_QUEUE_MODE_QID3_MODE 0x8UL + #define QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_RESP_QUEUE_MODE_QID3_MODE_LOSSY (0x0UL << 3) + #define QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_RESP_QUEUE_MODE_QID3_MODE_LOSSLESS (0x1UL << 3) + #define QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_RESP_QUEUE_MODE_QID3_MODE_LAST QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_RESP_QUEUE_MODE_QID3_MODE_LOSSLESS + #define QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_RESP_QUEUE_MODE_QID4_MODE 0x10UL + #define QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_RESP_QUEUE_MODE_QID4_MODE_LOSSY (0x0UL << 4) + #define QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_RESP_QUEUE_MODE_QID4_MODE_LOSSLESS (0x1UL << 4) + #define QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_RESP_QUEUE_MODE_QID4_MODE_LAST QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_RESP_QUEUE_MODE_QID4_MODE_LOSSLESS + #define QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_RESP_QUEUE_MODE_QID5_MODE 0x20UL + #define QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_RESP_QUEUE_MODE_QID5_MODE_LOSSY (0x0UL << 5) + #define QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_RESP_QUEUE_MODE_QID5_MODE_LOSSLESS (0x1UL << 5) + #define QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_RESP_QUEUE_MODE_QID5_MODE_LAST QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_RESP_QUEUE_MODE_QID5_MODE_LOSSLESS + #define QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_RESP_QUEUE_MODE_QID6_MODE 0x40UL + #define QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_RESP_QUEUE_MODE_QID6_MODE_LOSSY (0x0UL << 6) + #define QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_RESP_QUEUE_MODE_QID6_MODE_LOSSLESS (0x1UL << 6) + #define QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_RESP_QUEUE_MODE_QID6_MODE_LAST QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_RESP_QUEUE_MODE_QID6_MODE_LOSSLESS + #define QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_RESP_QUEUE_MODE_QID7_MODE 0x80UL + #define QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_RESP_QUEUE_MODE_QID7_MODE_LOSSY (0x0UL << 7) + #define QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_RESP_QUEUE_MODE_QID7_MODE_LOSSLESS (0x1UL << 7) + #define QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_RESP_QUEUE_MODE_QID7_MODE_LAST QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_RESP_QUEUE_MODE_QID7_MODE_LOSSLESS + u8 unused_0[5]; + u8 valid; +}; + +/* hwrm_queue_adptv_qos_rx_feature_cfg_input (size:192b/24B) */ +struct hwrm_queue_adptv_qos_rx_feature_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 enables; + #define QUEUE_ADPTV_QOS_RX_FEATURE_CFG_REQ_ENABLES_QUEUE_ENABLE 0x1UL + #define QUEUE_ADPTV_QOS_RX_FEATURE_CFG_REQ_ENABLES_QUEUE_MODE 0x2UL + u8 queue_enable; + #define QUEUE_ADPTV_QOS_RX_FEATURE_CFG_REQ_QUEUE_ENABLE_QID0_ENABLE 0x1UL + #define QUEUE_ADPTV_QOS_RX_FEATURE_CFG_REQ_QUEUE_ENABLE_QID0_ENABLE_DISABLED 0x0UL + #define QUEUE_ADPTV_QOS_RX_FEATURE_CFG_REQ_QUEUE_ENABLE_QID0_ENABLE_ENABLED 0x1UL + #define QUEUE_ADPTV_QOS_RX_FEATURE_CFG_REQ_QUEUE_ENABLE_QID0_ENABLE_LAST QUEUE_ADPTV_QOS_RX_FEATURE_CFG_REQ_QUEUE_ENABLE_QID0_ENABLE_ENABLED + #define QUEUE_ADPTV_QOS_RX_FEATURE_CFG_REQ_QUEUE_ENABLE_QID1_ENABLE 0x2UL + #define QUEUE_ADPTV_QOS_RX_FEATURE_CFG_REQ_QUEUE_ENABLE_QID1_ENABLE_DISABLED (0x0UL << 1) + #define QUEUE_ADPTV_QOS_RX_FEATURE_CFG_REQ_QUEUE_ENABLE_QID1_ENABLE_ENABLED (0x1UL << 1) + #define QUEUE_ADPTV_QOS_RX_FEATURE_CFG_REQ_QUEUE_ENABLE_QID1_ENABLE_LAST QUEUE_ADPTV_QOS_RX_FEATURE_CFG_REQ_QUEUE_ENABLE_QID1_ENABLE_ENABLED + #define QUEUE_ADPTV_QOS_RX_FEATURE_CFG_REQ_QUEUE_ENABLE_QID2_ENABLE 0x4UL + #define QUEUE_ADPTV_QOS_RX_FEATURE_CFG_REQ_QUEUE_ENABLE_QID2_ENABLE_DISABLED (0x0UL << 2) + #define QUEUE_ADPTV_QOS_RX_FEATURE_CFG_REQ_QUEUE_ENABLE_QID2_ENABLE_ENABLED (0x1UL << 2) + #define QUEUE_ADPTV_QOS_RX_FEATURE_CFG_REQ_QUEUE_ENABLE_QID2_ENABLE_LAST QUEUE_ADPTV_QOS_RX_FEATURE_CFG_REQ_QUEUE_ENABLE_QID2_ENABLE_ENABLED + #define QUEUE_ADPTV_QOS_RX_FEATURE_CFG_REQ_QUEUE_ENABLE_QID3_ENABLE 0x8UL + #define QUEUE_ADPTV_QOS_RX_FEATURE_CFG_REQ_QUEUE_ENABLE_QID3_ENABLE_DISABLED (0x0UL << 3) + #define QUEUE_ADPTV_QOS_RX_FEATURE_CFG_REQ_QUEUE_ENABLE_QID3_ENABLE_ENABLED (0x1UL << 3) + #define QUEUE_ADPTV_QOS_RX_FEATURE_CFG_REQ_QUEUE_ENABLE_QID3_ENABLE_LAST QUEUE_ADPTV_QOS_RX_FEATURE_CFG_REQ_QUEUE_ENABLE_QID3_ENABLE_ENABLED + #define QUEUE_ADPTV_QOS_RX_FEATURE_CFG_REQ_QUEUE_ENABLE_QID4_ENABLE 0x10UL + #define QUEUE_ADPTV_QOS_RX_FEATURE_CFG_REQ_QUEUE_ENABLE_QID4_ENABLE_DISABLED (0x0UL << 4) + #define QUEUE_ADPTV_QOS_RX_FEATURE_CFG_REQ_QUEUE_ENABLE_QID4_ENABLE_ENABLED (0x1UL << 4) + #define QUEUE_ADPTV_QOS_RX_FEATURE_CFG_REQ_QUEUE_ENABLE_QID4_ENABLE_LAST QUEUE_ADPTV_QOS_RX_FEATURE_CFG_REQ_QUEUE_ENABLE_QID4_ENABLE_ENABLED + #define QUEUE_ADPTV_QOS_RX_FEATURE_CFG_REQ_QUEUE_ENABLE_QID5_ENABLE 0x20UL + #define QUEUE_ADPTV_QOS_RX_FEATURE_CFG_REQ_QUEUE_ENABLE_QID5_ENABLE_DISABLED (0x0UL << 5) + #define QUEUE_ADPTV_QOS_RX_FEATURE_CFG_REQ_QUEUE_ENABLE_QID5_ENABLE_ENABLED (0x1UL << 5) + #define QUEUE_ADPTV_QOS_RX_FEATURE_CFG_REQ_QUEUE_ENABLE_QID5_ENABLE_LAST QUEUE_ADPTV_QOS_RX_FEATURE_CFG_REQ_QUEUE_ENABLE_QID5_ENABLE_ENABLED + #define QUEUE_ADPTV_QOS_RX_FEATURE_CFG_REQ_QUEUE_ENABLE_QID6_ENABLE 0x40UL + #define QUEUE_ADPTV_QOS_RX_FEATURE_CFG_REQ_QUEUE_ENABLE_QID6_ENABLE_DISABLED (0x0UL << 6) + #define QUEUE_ADPTV_QOS_RX_FEATURE_CFG_REQ_QUEUE_ENABLE_QID6_ENABLE_ENABLED (0x1UL << 6) + #define QUEUE_ADPTV_QOS_RX_FEATURE_CFG_REQ_QUEUE_ENABLE_QID6_ENABLE_LAST QUEUE_ADPTV_QOS_RX_FEATURE_CFG_REQ_QUEUE_ENABLE_QID6_ENABLE_ENABLED + #define QUEUE_ADPTV_QOS_RX_FEATURE_CFG_REQ_QUEUE_ENABLE_QID7_ENABLE 0x80UL + #define QUEUE_ADPTV_QOS_RX_FEATURE_CFG_REQ_QUEUE_ENABLE_QID7_ENABLE_DISABLED (0x0UL << 7) + #define QUEUE_ADPTV_QOS_RX_FEATURE_CFG_REQ_QUEUE_ENABLE_QID7_ENABLE_ENABLED (0x1UL << 7) + #define QUEUE_ADPTV_QOS_RX_FEATURE_CFG_REQ_QUEUE_ENABLE_QID7_ENABLE_LAST QUEUE_ADPTV_QOS_RX_FEATURE_CFG_REQ_QUEUE_ENABLE_QID7_ENABLE_ENABLED + u8 queue_mode; + #define QUEUE_ADPTV_QOS_RX_FEATURE_CFG_REQ_QUEUE_MODE_QID0_MODE 0x1UL + #define QUEUE_ADPTV_QOS_RX_FEATURE_CFG_REQ_QUEUE_MODE_QID0_MODE_LOSSY 0x0UL + #define QUEUE_ADPTV_QOS_RX_FEATURE_CFG_REQ_QUEUE_MODE_QID0_MODE_LOSSLESS 0x1UL + #define QUEUE_ADPTV_QOS_RX_FEATURE_CFG_REQ_QUEUE_MODE_QID0_MODE_LAST QUEUE_ADPTV_QOS_RX_FEATURE_CFG_REQ_QUEUE_MODE_QID0_MODE_LOSSLESS + #define QUEUE_ADPTV_QOS_RX_FEATURE_CFG_REQ_QUEUE_MODE_QID1_MODE 0x2UL + #define QUEUE_ADPTV_QOS_RX_FEATURE_CFG_REQ_QUEUE_MODE_QID1_MODE_LOSSY (0x0UL << 1) + #define QUEUE_ADPTV_QOS_RX_FEATURE_CFG_REQ_QUEUE_MODE_QID1_MODE_LOSSLESS (0x1UL << 1) + #define QUEUE_ADPTV_QOS_RX_FEATURE_CFG_REQ_QUEUE_MODE_QID1_MODE_LAST QUEUE_ADPTV_QOS_RX_FEATURE_CFG_REQ_QUEUE_MODE_QID1_MODE_LOSSLESS + #define QUEUE_ADPTV_QOS_RX_FEATURE_CFG_REQ_QUEUE_MODE_QID2_MODE 0x4UL + #define QUEUE_ADPTV_QOS_RX_FEATURE_CFG_REQ_QUEUE_MODE_QID2_MODE_LOSSY (0x0UL << 2) + #define QUEUE_ADPTV_QOS_RX_FEATURE_CFG_REQ_QUEUE_MODE_QID2_MODE_LOSSLESS (0x1UL << 2) + #define QUEUE_ADPTV_QOS_RX_FEATURE_CFG_REQ_QUEUE_MODE_QID2_MODE_LAST QUEUE_ADPTV_QOS_RX_FEATURE_CFG_REQ_QUEUE_MODE_QID2_MODE_LOSSLESS + #define QUEUE_ADPTV_QOS_RX_FEATURE_CFG_REQ_QUEUE_MODE_QID3_MODE 0x8UL + #define QUEUE_ADPTV_QOS_RX_FEATURE_CFG_REQ_QUEUE_MODE_QID3_MODE_LOSSY (0x0UL << 3) + #define QUEUE_ADPTV_QOS_RX_FEATURE_CFG_REQ_QUEUE_MODE_QID3_MODE_LOSSLESS (0x1UL << 3) + #define QUEUE_ADPTV_QOS_RX_FEATURE_CFG_REQ_QUEUE_MODE_QID3_MODE_LAST QUEUE_ADPTV_QOS_RX_FEATURE_CFG_REQ_QUEUE_MODE_QID3_MODE_LOSSLESS + #define QUEUE_ADPTV_QOS_RX_FEATURE_CFG_REQ_QUEUE_MODE_QID4_MODE 0x10UL + #define QUEUE_ADPTV_QOS_RX_FEATURE_CFG_REQ_QUEUE_MODE_QID4_MODE_LOSSY (0x0UL << 4) + #define QUEUE_ADPTV_QOS_RX_FEATURE_CFG_REQ_QUEUE_MODE_QID4_MODE_LOSSLESS (0x1UL << 4) + #define QUEUE_ADPTV_QOS_RX_FEATURE_CFG_REQ_QUEUE_MODE_QID4_MODE_LAST QUEUE_ADPTV_QOS_RX_FEATURE_CFG_REQ_QUEUE_MODE_QID4_MODE_LOSSLESS + #define QUEUE_ADPTV_QOS_RX_FEATURE_CFG_REQ_QUEUE_MODE_QID5_MODE 0x20UL + #define QUEUE_ADPTV_QOS_RX_FEATURE_CFG_REQ_QUEUE_MODE_QID5_MODE_LOSSY (0x0UL << 5) + #define QUEUE_ADPTV_QOS_RX_FEATURE_CFG_REQ_QUEUE_MODE_QID5_MODE_LOSSLESS (0x1UL << 5) + #define QUEUE_ADPTV_QOS_RX_FEATURE_CFG_REQ_QUEUE_MODE_QID5_MODE_LAST QUEUE_ADPTV_QOS_RX_FEATURE_CFG_REQ_QUEUE_MODE_QID5_MODE_LOSSLESS + #define QUEUE_ADPTV_QOS_RX_FEATURE_CFG_REQ_QUEUE_MODE_QID6_MODE 0x40UL + #define QUEUE_ADPTV_QOS_RX_FEATURE_CFG_REQ_QUEUE_MODE_QID6_MODE_LOSSY (0x0UL << 6) + #define QUEUE_ADPTV_QOS_RX_FEATURE_CFG_REQ_QUEUE_MODE_QID6_MODE_LOSSLESS (0x1UL << 6) + #define QUEUE_ADPTV_QOS_RX_FEATURE_CFG_REQ_QUEUE_MODE_QID6_MODE_LAST QUEUE_ADPTV_QOS_RX_FEATURE_CFG_REQ_QUEUE_MODE_QID6_MODE_LOSSLESS + #define QUEUE_ADPTV_QOS_RX_FEATURE_CFG_REQ_QUEUE_MODE_QID7_MODE 0x80UL + #define QUEUE_ADPTV_QOS_RX_FEATURE_CFG_REQ_QUEUE_MODE_QID7_MODE_LOSSY (0x0UL << 7) + #define QUEUE_ADPTV_QOS_RX_FEATURE_CFG_REQ_QUEUE_MODE_QID7_MODE_LOSSLESS (0x1UL << 7) + #define QUEUE_ADPTV_QOS_RX_FEATURE_CFG_REQ_QUEUE_MODE_QID7_MODE_LAST QUEUE_ADPTV_QOS_RX_FEATURE_CFG_REQ_QUEUE_MODE_QID7_MODE_LOSSLESS + u8 unused_0[2]; +}; + +/* hwrm_queue_adptv_qos_rx_feature_cfg_output (size:128b/16B) */ +struct hwrm_queue_adptv_qos_rx_feature_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_queue_adptv_qos_tx_feature_qcfg_input (size:128b/16B) */ +struct hwrm_queue_adptv_qos_tx_feature_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; +}; + +/* hwrm_queue_adptv_qos_tx_feature_qcfg_output (size:128b/16B) */ +struct hwrm_queue_adptv_qos_tx_feature_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 queue_enable; + #define QUEUE_ADPTV_QOS_TX_FEATURE_QCFG_RESP_QUEUE_ENABLE_QID0_ENABLE 0x1UL + #define QUEUE_ADPTV_QOS_TX_FEATURE_QCFG_RESP_QUEUE_ENABLE_QID0_ENABLE_DISABLED 0x0UL + #define QUEUE_ADPTV_QOS_TX_FEATURE_QCFG_RESP_QUEUE_ENABLE_QID0_ENABLE_ENABLED 0x1UL + #define QUEUE_ADPTV_QOS_TX_FEATURE_QCFG_RESP_QUEUE_ENABLE_QID0_ENABLE_LAST QUEUE_ADPTV_QOS_TX_FEATURE_QCFG_RESP_QUEUE_ENABLE_QID0_ENABLE_ENABLED + #define QUEUE_ADPTV_QOS_TX_FEATURE_QCFG_RESP_QUEUE_ENABLE_QID1_ENABLE 0x2UL + #define QUEUE_ADPTV_QOS_TX_FEATURE_QCFG_RESP_QUEUE_ENABLE_QID1_ENABLE_DISABLED (0x0UL << 1) + #define QUEUE_ADPTV_QOS_TX_FEATURE_QCFG_RESP_QUEUE_ENABLE_QID1_ENABLE_ENABLED (0x1UL << 1) + #define QUEUE_ADPTV_QOS_TX_FEATURE_QCFG_RESP_QUEUE_ENABLE_QID1_ENABLE_LAST QUEUE_ADPTV_QOS_TX_FEATURE_QCFG_RESP_QUEUE_ENABLE_QID1_ENABLE_ENABLED + #define QUEUE_ADPTV_QOS_TX_FEATURE_QCFG_RESP_QUEUE_ENABLE_QID2_ENABLE 0x4UL + #define QUEUE_ADPTV_QOS_TX_FEATURE_QCFG_RESP_QUEUE_ENABLE_QID2_ENABLE_DISABLED (0x0UL << 2) + #define QUEUE_ADPTV_QOS_TX_FEATURE_QCFG_RESP_QUEUE_ENABLE_QID2_ENABLE_ENABLED (0x1UL << 2) + #define QUEUE_ADPTV_QOS_TX_FEATURE_QCFG_RESP_QUEUE_ENABLE_QID2_ENABLE_LAST QUEUE_ADPTV_QOS_TX_FEATURE_QCFG_RESP_QUEUE_ENABLE_QID2_ENABLE_ENABLED + #define QUEUE_ADPTV_QOS_TX_FEATURE_QCFG_RESP_QUEUE_ENABLE_QID3_ENABLE 0x8UL + #define QUEUE_ADPTV_QOS_TX_FEATURE_QCFG_RESP_QUEUE_ENABLE_QID3_ENABLE_DISABLED (0x0UL << 3) + #define QUEUE_ADPTV_QOS_TX_FEATURE_QCFG_RESP_QUEUE_ENABLE_QID3_ENABLE_ENABLED (0x1UL << 3) + #define QUEUE_ADPTV_QOS_TX_FEATURE_QCFG_RESP_QUEUE_ENABLE_QID3_ENABLE_LAST QUEUE_ADPTV_QOS_TX_FEATURE_QCFG_RESP_QUEUE_ENABLE_QID3_ENABLE_ENABLED + #define QUEUE_ADPTV_QOS_TX_FEATURE_QCFG_RESP_QUEUE_ENABLE_QID4_ENABLE 0x10UL + #define QUEUE_ADPTV_QOS_TX_FEATURE_QCFG_RESP_QUEUE_ENABLE_QID4_ENABLE_DISABLED (0x0UL << 4) + #define QUEUE_ADPTV_QOS_TX_FEATURE_QCFG_RESP_QUEUE_ENABLE_QID4_ENABLE_ENABLED (0x1UL << 4) + #define QUEUE_ADPTV_QOS_TX_FEATURE_QCFG_RESP_QUEUE_ENABLE_QID4_ENABLE_LAST QUEUE_ADPTV_QOS_TX_FEATURE_QCFG_RESP_QUEUE_ENABLE_QID4_ENABLE_ENABLED + #define QUEUE_ADPTV_QOS_TX_FEATURE_QCFG_RESP_QUEUE_ENABLE_QID5_ENABLE 0x20UL + #define QUEUE_ADPTV_QOS_TX_FEATURE_QCFG_RESP_QUEUE_ENABLE_QID5_ENABLE_DISABLED (0x0UL << 5) + #define QUEUE_ADPTV_QOS_TX_FEATURE_QCFG_RESP_QUEUE_ENABLE_QID5_ENABLE_ENABLED (0x1UL << 5) + #define QUEUE_ADPTV_QOS_TX_FEATURE_QCFG_RESP_QUEUE_ENABLE_QID5_ENABLE_LAST QUEUE_ADPTV_QOS_TX_FEATURE_QCFG_RESP_QUEUE_ENABLE_QID5_ENABLE_ENABLED + #define QUEUE_ADPTV_QOS_TX_FEATURE_QCFG_RESP_QUEUE_ENABLE_QID6_ENABLE 0x40UL + #define QUEUE_ADPTV_QOS_TX_FEATURE_QCFG_RESP_QUEUE_ENABLE_QID6_ENABLE_DISABLED (0x0UL << 6) + #define QUEUE_ADPTV_QOS_TX_FEATURE_QCFG_RESP_QUEUE_ENABLE_QID6_ENABLE_ENABLED (0x1UL << 6) + #define QUEUE_ADPTV_QOS_TX_FEATURE_QCFG_RESP_QUEUE_ENABLE_QID6_ENABLE_LAST QUEUE_ADPTV_QOS_TX_FEATURE_QCFG_RESP_QUEUE_ENABLE_QID6_ENABLE_ENABLED + #define QUEUE_ADPTV_QOS_TX_FEATURE_QCFG_RESP_QUEUE_ENABLE_QID7_ENABLE 0x80UL + #define QUEUE_ADPTV_QOS_TX_FEATURE_QCFG_RESP_QUEUE_ENABLE_QID7_ENABLE_DISABLED (0x0UL << 7) + #define QUEUE_ADPTV_QOS_TX_FEATURE_QCFG_RESP_QUEUE_ENABLE_QID7_ENABLE_ENABLED (0x1UL << 7) + #define QUEUE_ADPTV_QOS_TX_FEATURE_QCFG_RESP_QUEUE_ENABLE_QID7_ENABLE_LAST QUEUE_ADPTV_QOS_TX_FEATURE_QCFG_RESP_QUEUE_ENABLE_QID7_ENABLE_ENABLED + u8 unused_0[6]; + u8 valid; +}; + +/* hwrm_queue_adptv_qos_tx_feature_cfg_input (size:192b/24B) */ +struct hwrm_queue_adptv_qos_tx_feature_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 enables; + #define QUEUE_ADPTV_QOS_TX_FEATURE_CFG_REQ_ENABLES_QUEUE_ENABLE 0x1UL + u8 queue_enable; + #define QUEUE_ADPTV_QOS_TX_FEATURE_CFG_REQ_QUEUE_ENABLE_QID0_ENABLE 0x1UL + #define QUEUE_ADPTV_QOS_TX_FEATURE_CFG_REQ_QUEUE_ENABLE_QID0_ENABLE_DISABLED 0x0UL + #define QUEUE_ADPTV_QOS_TX_FEATURE_CFG_REQ_QUEUE_ENABLE_QID0_ENABLE_ENABLED 0x1UL + #define QUEUE_ADPTV_QOS_TX_FEATURE_CFG_REQ_QUEUE_ENABLE_QID0_ENABLE_LAST QUEUE_ADPTV_QOS_TX_FEATURE_CFG_REQ_QUEUE_ENABLE_QID0_ENABLE_ENABLED + #define QUEUE_ADPTV_QOS_TX_FEATURE_CFG_REQ_QUEUE_ENABLE_QID1_ENABLE 0x2UL + #define QUEUE_ADPTV_QOS_TX_FEATURE_CFG_REQ_QUEUE_ENABLE_QID1_ENABLE_DISABLED (0x0UL << 1) + #define QUEUE_ADPTV_QOS_TX_FEATURE_CFG_REQ_QUEUE_ENABLE_QID1_ENABLE_ENABLED (0x1UL << 1) + #define QUEUE_ADPTV_QOS_TX_FEATURE_CFG_REQ_QUEUE_ENABLE_QID1_ENABLE_LAST QUEUE_ADPTV_QOS_TX_FEATURE_CFG_REQ_QUEUE_ENABLE_QID1_ENABLE_ENABLED + #define QUEUE_ADPTV_QOS_TX_FEATURE_CFG_REQ_QUEUE_ENABLE_QID2_ENABLE 0x4UL + #define QUEUE_ADPTV_QOS_TX_FEATURE_CFG_REQ_QUEUE_ENABLE_QID2_ENABLE_DISABLED (0x0UL << 2) + #define QUEUE_ADPTV_QOS_TX_FEATURE_CFG_REQ_QUEUE_ENABLE_QID2_ENABLE_ENABLED (0x1UL << 2) + #define QUEUE_ADPTV_QOS_TX_FEATURE_CFG_REQ_QUEUE_ENABLE_QID2_ENABLE_LAST QUEUE_ADPTV_QOS_TX_FEATURE_CFG_REQ_QUEUE_ENABLE_QID2_ENABLE_ENABLED + #define QUEUE_ADPTV_QOS_TX_FEATURE_CFG_REQ_QUEUE_ENABLE_QID3_ENABLE 0x8UL + #define QUEUE_ADPTV_QOS_TX_FEATURE_CFG_REQ_QUEUE_ENABLE_QID3_ENABLE_DISABLED (0x0UL << 3) + #define QUEUE_ADPTV_QOS_TX_FEATURE_CFG_REQ_QUEUE_ENABLE_QID3_ENABLE_ENABLED (0x1UL << 3) + #define QUEUE_ADPTV_QOS_TX_FEATURE_CFG_REQ_QUEUE_ENABLE_QID3_ENABLE_LAST QUEUE_ADPTV_QOS_TX_FEATURE_CFG_REQ_QUEUE_ENABLE_QID3_ENABLE_ENABLED + #define QUEUE_ADPTV_QOS_TX_FEATURE_CFG_REQ_QUEUE_ENABLE_QID4_ENABLE 0x10UL + #define QUEUE_ADPTV_QOS_TX_FEATURE_CFG_REQ_QUEUE_ENABLE_QID4_ENABLE_DISABLED (0x0UL << 4) + #define QUEUE_ADPTV_QOS_TX_FEATURE_CFG_REQ_QUEUE_ENABLE_QID4_ENABLE_ENABLED (0x1UL << 4) + #define QUEUE_ADPTV_QOS_TX_FEATURE_CFG_REQ_QUEUE_ENABLE_QID4_ENABLE_LAST QUEUE_ADPTV_QOS_TX_FEATURE_CFG_REQ_QUEUE_ENABLE_QID4_ENABLE_ENABLED + #define QUEUE_ADPTV_QOS_TX_FEATURE_CFG_REQ_QUEUE_ENABLE_QID5_ENABLE 0x20UL + #define QUEUE_ADPTV_QOS_TX_FEATURE_CFG_REQ_QUEUE_ENABLE_QID5_ENABLE_DISABLED (0x0UL << 5) + #define QUEUE_ADPTV_QOS_TX_FEATURE_CFG_REQ_QUEUE_ENABLE_QID5_ENABLE_ENABLED (0x1UL << 5) + #define QUEUE_ADPTV_QOS_TX_FEATURE_CFG_REQ_QUEUE_ENABLE_QID5_ENABLE_LAST QUEUE_ADPTV_QOS_TX_FEATURE_CFG_REQ_QUEUE_ENABLE_QID5_ENABLE_ENABLED + #define QUEUE_ADPTV_QOS_TX_FEATURE_CFG_REQ_QUEUE_ENABLE_QID6_ENABLE 0x40UL + #define QUEUE_ADPTV_QOS_TX_FEATURE_CFG_REQ_QUEUE_ENABLE_QID6_ENABLE_DISABLED (0x0UL << 6) + #define QUEUE_ADPTV_QOS_TX_FEATURE_CFG_REQ_QUEUE_ENABLE_QID6_ENABLE_ENABLED (0x1UL << 6) + #define QUEUE_ADPTV_QOS_TX_FEATURE_CFG_REQ_QUEUE_ENABLE_QID6_ENABLE_LAST QUEUE_ADPTV_QOS_TX_FEATURE_CFG_REQ_QUEUE_ENABLE_QID6_ENABLE_ENABLED + #define QUEUE_ADPTV_QOS_TX_FEATURE_CFG_REQ_QUEUE_ENABLE_QID7_ENABLE 0x80UL + #define QUEUE_ADPTV_QOS_TX_FEATURE_CFG_REQ_QUEUE_ENABLE_QID7_ENABLE_DISABLED (0x0UL << 7) + #define QUEUE_ADPTV_QOS_TX_FEATURE_CFG_REQ_QUEUE_ENABLE_QID7_ENABLE_ENABLED (0x1UL << 7) + #define QUEUE_ADPTV_QOS_TX_FEATURE_CFG_REQ_QUEUE_ENABLE_QID7_ENABLE_LAST QUEUE_ADPTV_QOS_TX_FEATURE_CFG_REQ_QUEUE_ENABLE_QID7_ENABLE_ENABLED + u8 unused_0[3]; +}; + +/* hwrm_queue_adptv_qos_tx_feature_cfg_output (size:128b/16B) */ +struct hwrm_queue_adptv_qos_tx_feature_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_queue_qcaps_input (size:128b/16B) */ +struct hwrm_queue_qcaps_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; +}; + +/* hwrm_queue_qcaps_output (size:256b/32B) */ +struct hwrm_queue_qcaps_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 rx_feature_params; + #define QUEUE_QCAPS_RESP_RX_FEATURE_PARAMS_QUEUE_ENABLE_CAP 0x1UL + #define QUEUE_QCAPS_RESP_RX_FEATURE_PARAMS_QUEUE_MODE_CAP 0x2UL + __le32 tx_feature_params; + #define QUEUE_QCAPS_RESP_TX_FEATURE_PARAMS_QUEUE_ENABLE_CAP 0x1UL + u8 max_configurable_queues; + u8 unused_0[3]; + __le32 rx_tuning_params; + #define QUEUE_QCAPS_RESP_RX_TUNING_PARAMS_WFQ_COST_CAP 0x1UL + #define QUEUE_QCAPS_RESP_RX_TUNING_PARAMS_WFQ_UPPER_FACTOR_CAP 0x2UL + #define QUEUE_QCAPS_RESP_RX_TUNING_PARAMS_HYST_WINDOW_SIZE_FACTOR_CAP 0x4UL + #define QUEUE_QCAPS_RESP_RX_TUNING_PARAMS_PCIE_BW_EFF_CAP 0x8UL + #define QUEUE_QCAPS_RESP_RX_TUNING_PARAMS_XOFF_HEADROOM_FACTOR_CAP 0x10UL + #define QUEUE_QCAPS_RESP_RX_TUNING_PARAMS_L2_MIN_LATENCY_CAP 0x20UL + #define QUEUE_QCAPS_RESP_RX_TUNING_PARAMS_L2_MAX_LATENCY_CAP 0x40UL + #define QUEUE_QCAPS_RESP_RX_TUNING_PARAMS_ROCE_MIN_LATENCY_CAP 0x80UL + #define QUEUE_QCAPS_RESP_RX_TUNING_PARAMS_ROCE_MAX_LATENCY_CAP 0x100UL + #define QUEUE_QCAPS_RESP_RX_TUNING_PARAMS_L2_PIPE_COS_LATENCY_CAP 0x200UL + #define QUEUE_QCAPS_RESP_RX_TUNING_PARAMS_ROCE_PIPE_COS_LATENCY_CAP 0x400UL + #define QUEUE_QCAPS_RESP_RX_TUNING_PARAMS_COS_SHARED_MIN_RATIO_CAP 0x800UL + #define QUEUE_QCAPS_RESP_RX_TUNING_PARAMS_RSVD_CELLS_LIMIT_RATIO_CAP 0x1000UL + #define QUEUE_QCAPS_RESP_RX_TUNING_PARAMS_SHAPER_REFILL_TIMER_CAP 0x2000UL + __le32 tx_tuning_params; + #define QUEUE_QCAPS_RESP_TX_TUNING_PARAMS_WFQ_COST_CAP 0x1UL + #define QUEUE_QCAPS_RESP_TX_TUNING_PARAMS_WFQ_UPPER_FACTOR_CAP 0x2UL + #define QUEUE_QCAPS_RESP_TX_TUNING_PARAMS_HYST_WINDOW_SIZE_FACTOR_CAP 0x4UL + #define QUEUE_QCAPS_RESP_TX_TUNING_PARAMS_RSVD_CELLS_LIMIT_RATIO_CAP 0x8UL + #define QUEUE_QCAPS_RESP_TX_TUNING_PARAMS_L2_MIN_LATENCY_CAP 0x10UL + #define QUEUE_QCAPS_RESP_TX_TUNING_PARAMS_L2_MAX_LATENCY_CAP 0x20UL + #define QUEUE_QCAPS_RESP_TX_TUNING_PARAMS_ROCE_MIN_LATENCY_CAP 0x40UL + #define QUEUE_QCAPS_RESP_TX_TUNING_PARAMS_ROCE_MAX_LATENCY_CAP 0x80UL + #define QUEUE_QCAPS_RESP_TX_TUNING_PARAMS_MAX_TBM_CELLS_PRERESERVED_CAP 0x100UL + #define QUEUE_QCAPS_RESP_TX_TUNING_PARAMS_SHAPER_REFILL_TIMER_CAP 0x200UL + u8 unused_1[3]; + u8 valid; +}; + +/* hwrm_queue_adptv_qos_rx_tuning_qcfg_input (size:128b/16B) */ +struct hwrm_queue_adptv_qos_rx_tuning_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; +}; + +/* hwrm_queue_adptv_qos_rx_tuning_qcfg_output (size:576b/72B) */ +struct hwrm_queue_adptv_qos_rx_tuning_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 wfq_cost; + __le32 wfq_upper_factor; + __le32 hyst_window_size_factor; + __le32 pcie_bw_eff; + __le32 xoff_headroom_factor; + __le32 l2_min_latency; + __le32 l2_max_latency; + __le32 roce_min_latency; + __le32 roce_max_latency; + __le32 l2_pipe_cos_latency; + __le32 roce_pipe_cos_latency; + __le32 cos_shared_min_ratio; + __le32 rsvd_cells_limit_ratio; + __le32 shaper_refill_timer; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_queue_adptv_qos_rx_tuning_cfg_input (size:640b/80B) */ +struct hwrm_queue_adptv_qos_rx_tuning_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 enables; + #define QUEUE_ADPTV_QOS_RX_TUNING_CFG_REQ_ENABLES_WFQ_COST 0x1UL + #define QUEUE_ADPTV_QOS_RX_TUNING_CFG_REQ_ENABLES_WFQ_UPPER_FACTOR 0x2UL + #define QUEUE_ADPTV_QOS_RX_TUNING_CFG_REQ_ENABLES_HYST_WINDOW_SIZE_FACTOR 0x4UL + #define QUEUE_ADPTV_QOS_RX_TUNING_CFG_REQ_ENABLES_PCIE_BW_EFF 0x8UL + #define QUEUE_ADPTV_QOS_RX_TUNING_CFG_REQ_ENABLES_XOFF_HEADROOM_FACTOR 0x10UL + #define QUEUE_ADPTV_QOS_RX_TUNING_CFG_REQ_ENABLES_L2_MIN_LATENCY 0x20UL + #define QUEUE_ADPTV_QOS_RX_TUNING_CFG_REQ_ENABLES_L2_MAX_LATENCY 0x40UL + #define QUEUE_ADPTV_QOS_RX_TUNING_CFG_REQ_ENABLES_ROCE_MIN_LATENCY 0x80UL + #define QUEUE_ADPTV_QOS_RX_TUNING_CFG_REQ_ENABLES_ROCE_MAX_LATENCY 0x100UL + #define QUEUE_ADPTV_QOS_RX_TUNING_CFG_REQ_ENABLES_L2_PIPE_COS_LATENCY 0x200UL + #define QUEUE_ADPTV_QOS_RX_TUNING_CFG_REQ_ENABLES_ROCE_PIPE_COS_LATENCY 0x400UL + #define QUEUE_ADPTV_QOS_RX_TUNING_CFG_REQ_ENABLES_COS_SHARED_MIN_RATIO 0x800UL + #define QUEUE_ADPTV_QOS_RX_TUNING_CFG_REQ_ENABLES_RSVD_CELLS_LIMIT_RATIO 0x1000UL + #define QUEUE_ADPTV_QOS_RX_TUNING_CFG_REQ_ENABLES_SHAPER_REFILL_TIMER 0x2000UL + __le32 wfq_cost; + __le32 wfq_upper_factor; + __le32 hyst_window_size_factor; + __le32 pcie_bw_eff; + __le32 xoff_headroom_factor; + __le32 l2_min_latency; + __le32 l2_max_latency; + __le32 roce_min_latency; + __le32 roce_max_latency; + __le32 l2_pipe_cos_latency; + __le32 roce_pipe_cos_latency; + __le32 cos_shared_min_ratio; + __le32 rsvd_cells_limit_ratio; + __le32 shaper_refill_timer; + u8 unused_0[4]; +}; + +/* hwrm_queue_adptv_qos_rx_tuning_cfg_output (size:128b/16B) */ +struct hwrm_queue_adptv_qos_rx_tuning_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_queue_adptv_qos_tx_tuning_qcfg_input (size:128b/16B) */ +struct hwrm_queue_adptv_qos_tx_tuning_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; +}; + +/* hwrm_queue_adptv_qos_tx_tuning_qcfg_output (size:448b/56B) */ +struct hwrm_queue_adptv_qos_tx_tuning_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 wfq_cost; + __le32 wfq_upper_factor; + __le32 hyst_window_size_factor; + __le32 rsvd_cells_limit_ratio; + __le32 l2_min_latency; + __le32 l2_max_latency; + __le32 roce_min_latency; + __le32 roce_max_latency; + __le32 max_tbm_cells_prereserved; + __le32 shaper_refill_timer; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_queue_adptv_qos_tx_tuning_cfg_input (size:512b/64B) */ +struct hwrm_queue_adptv_qos_tx_tuning_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 enables; + #define QUEUE_ADPTV_QOS_TX_TUNING_CFG_REQ_ENABLES_WFQ_COST 0x1UL + #define QUEUE_ADPTV_QOS_TX_TUNING_CFG_REQ_ENABLES_WFQ_UPPER_FACTOR 0x2UL + #define QUEUE_ADPTV_QOS_TX_TUNING_CFG_REQ_ENABLES_HYST_WINDOW_SIZE_FACTOR 0x4UL + #define QUEUE_ADPTV_QOS_TX_TUNING_CFG_REQ_ENABLES_RSVD_CELLS_LIMIT_RATIO 0x8UL + #define QUEUE_ADPTV_QOS_TX_TUNING_CFG_REQ_ENABLES_L2_MIN_LATENCY 0x10UL + #define QUEUE_ADPTV_QOS_TX_TUNING_CFG_REQ_ENABLES_L2_MAX_LATENCY 0x20UL + #define QUEUE_ADPTV_QOS_TX_TUNING_CFG_REQ_ENABLES_ROCE_MIN_LATENCY 0x40UL + #define QUEUE_ADPTV_QOS_TX_TUNING_CFG_REQ_ENABLES_ROCE_MAX_LATENCY 0x80UL + #define QUEUE_ADPTV_QOS_TX_TUNING_CFG_REQ_ENABLES_MAX_TBM_CELLS_PRERESERVED 0x100UL + #define QUEUE_ADPTV_QOS_TX_TUNING_CFG_REQ_ENABLES_SHAPER_REFILL_TIMER 0x200UL + __le32 wfq_cost; + __le32 wfq_upper_factor; + __le32 hyst_window_size_factor; + __le32 rsvd_cells_limit_ratio; + __le32 l2_min_latency; + __le32 l2_max_latency; + __le32 roce_min_latency; + __le32 roce_max_latency; + __le32 max_tbm_cells_prereserved; + __le32 shaper_refill_timer; + u8 unused_0[4]; +}; + +/* hwrm_queue_adptv_qos_tx_tuning_cfg_output (size:128b/16B) */ +struct hwrm_queue_adptv_qos_tx_tuning_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_queue_pfcwd_timeout_qcaps_input (size:128b/16B) */ +struct hwrm_queue_pfcwd_timeout_qcaps_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; +}; + +/* hwrm_queue_pfcwd_timeout_qcaps_output (size:128b/16B) */ +struct hwrm_queue_pfcwd_timeout_qcaps_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 max_pfcwd_timeout; + u8 unused_0[3]; + u8 valid; +}; + +/* hwrm_queue_pfcwd_timeout_cfg_input (size:192b/24B) */ +struct hwrm_queue_pfcwd_timeout_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 pfcwd_timeout_value; + u8 unused_0[4]; +}; + +/* hwrm_queue_pfcwd_timeout_cfg_output (size:128b/16B) */ +struct hwrm_queue_pfcwd_timeout_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_queue_pfcwd_timeout_qcfg_input (size:128b/16B) */ +struct hwrm_queue_pfcwd_timeout_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; +}; + +/* hwrm_queue_pfcwd_timeout_qcfg_output (size:128b/16B) */ +struct hwrm_queue_pfcwd_timeout_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 pfcwd_timeout_value; + u8 unused_0[3]; + u8 valid; +}; + +/* hwrm_vnic_alloc_input (size:192b/24B) */ +struct hwrm_vnic_alloc_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define VNIC_ALLOC_REQ_FLAGS_DEFAULT 0x1UL + #define VNIC_ALLOC_REQ_FLAGS_VIRTIO_NET_FID_VALID 0x2UL + __le16 virtio_net_fid; + u8 unused_0[2]; +}; + +/* hwrm_vnic_alloc_output (size:128b/16B) */ +struct hwrm_vnic_alloc_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 vnic_id; + u8 unused_0[3]; + u8 valid; +}; + +/* hwrm_vnic_update_input (size:256b/32B) */ +struct hwrm_vnic_update_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 vnic_id; + __le32 enables; + #define VNIC_UPDATE_REQ_ENABLES_VNIC_STATE_VALID 0x1UL + #define VNIC_UPDATE_REQ_ENABLES_MRU_VALID 0x2UL + #define VNIC_UPDATE_REQ_ENABLES_METADATA_FORMAT_TYPE_VALID 0x4UL + u8 vnic_state; + #define VNIC_UPDATE_REQ_VNIC_STATE_NORMAL 0x0UL + #define VNIC_UPDATE_REQ_VNIC_STATE_DROP 0x1UL + #define VNIC_UPDATE_REQ_VNIC_STATE_LAST VNIC_UPDATE_REQ_VNIC_STATE_DROP + u8 metadata_format_type; + #define VNIC_UPDATE_REQ_METADATA_FORMAT_TYPE_0 0x0UL + #define VNIC_UPDATE_REQ_METADATA_FORMAT_TYPE_1 0x1UL + #define VNIC_UPDATE_REQ_METADATA_FORMAT_TYPE_2 0x2UL + #define VNIC_UPDATE_REQ_METADATA_FORMAT_TYPE_3 0x3UL + #define VNIC_UPDATE_REQ_METADATA_FORMAT_TYPE_4 0x4UL + #define VNIC_UPDATE_REQ_METADATA_FORMAT_TYPE_LAST VNIC_UPDATE_REQ_METADATA_FORMAT_TYPE_4 + __le16 mru; + u8 unused_1[4]; +}; + +/* hwrm_vnic_update_output (size:128b/16B) */ +struct hwrm_vnic_update_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_vnic_free_input (size:192b/24B) */ +struct hwrm_vnic_free_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 vnic_id; + u8 unused_0[4]; +}; + +/* hwrm_vnic_free_output (size:128b/16B) */ +struct hwrm_vnic_free_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_vnic_cfg_input (size:384b/48B) */ +struct hwrm_vnic_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define VNIC_CFG_REQ_FLAGS_DEFAULT 0x1UL + #define VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE 0x2UL + #define VNIC_CFG_REQ_FLAGS_BD_STALL_MODE 0x4UL + #define VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE 0x8UL + #define VNIC_CFG_REQ_FLAGS_ROCE_ONLY_VNIC_MODE 0x10UL + #define VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE 0x20UL + #define VNIC_CFG_REQ_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE 0x40UL + #define VNIC_CFG_REQ_FLAGS_PORTCOS_MAPPING_MODE 0x80UL + __le32 enables; + #define VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP 0x1UL + #define VNIC_CFG_REQ_ENABLES_RSS_RULE 0x2UL + #define VNIC_CFG_REQ_ENABLES_COS_RULE 0x4UL + #define VNIC_CFG_REQ_ENABLES_LB_RULE 0x8UL + #define VNIC_CFG_REQ_ENABLES_MRU 0x10UL + #define VNIC_CFG_REQ_ENABLES_DEFAULT_RX_RING_ID 0x20UL + #define VNIC_CFG_REQ_ENABLES_DEFAULT_CMPL_RING_ID 0x40UL + #define VNIC_CFG_REQ_ENABLES_QUEUE_ID 0x80UL + #define VNIC_CFG_REQ_ENABLES_RX_CSUM_V2_MODE 0x100UL + #define VNIC_CFG_REQ_ENABLES_L2_CQE_MODE 0x200UL + __le16 vnic_id; + __le16 dflt_ring_grp; + __le16 rss_rule; + __le16 cos_rule; + __le16 lb_rule; + __le16 mru; + __le16 default_rx_ring_id; + __le16 default_cmpl_ring_id; + __le16 queue_id; + u8 rx_csum_v2_mode; + #define VNIC_CFG_REQ_RX_CSUM_V2_MODE_DEFAULT 0x0UL + #define VNIC_CFG_REQ_RX_CSUM_V2_MODE_ALL_OK 0x1UL + #define VNIC_CFG_REQ_RX_CSUM_V2_MODE_MAX 0x2UL + #define VNIC_CFG_REQ_RX_CSUM_V2_MODE_LAST VNIC_CFG_REQ_RX_CSUM_V2_MODE_MAX + u8 l2_cqe_mode; + #define VNIC_CFG_REQ_L2_CQE_MODE_DEFAULT 0x0UL + #define VNIC_CFG_REQ_L2_CQE_MODE_COMPRESSED 0x1UL + #define VNIC_CFG_REQ_L2_CQE_MODE_MIXED 0x2UL + #define VNIC_CFG_REQ_L2_CQE_MODE_LAST VNIC_CFG_REQ_L2_CQE_MODE_MIXED + u8 unused0[4]; +}; + +/* hwrm_vnic_cfg_output (size:128b/16B) */ +struct hwrm_vnic_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_vnic_qcfg_input (size:256b/32B) */ +struct hwrm_vnic_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 enables; + #define VNIC_QCFG_REQ_ENABLES_VF_ID_VALID 0x1UL + __le32 vnic_id; + __le16 vf_id; + u8 unused_0[6]; +}; + +/* hwrm_vnic_qcfg_output (size:256b/32B) */ +struct hwrm_vnic_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 dflt_ring_grp; + __le16 rss_rule; + __le16 cos_rule; + __le16 lb_rule; + __le16 mru; + u8 unused_0[2]; + __le32 flags; + #define VNIC_QCFG_RESP_FLAGS_DEFAULT 0x1UL + #define VNIC_QCFG_RESP_FLAGS_VLAN_STRIP_MODE 0x2UL + #define VNIC_QCFG_RESP_FLAGS_BD_STALL_MODE 0x4UL + #define VNIC_QCFG_RESP_FLAGS_ROCE_DUAL_VNIC_MODE 0x8UL + #define VNIC_QCFG_RESP_FLAGS_ROCE_ONLY_VNIC_MODE 0x10UL + #define VNIC_QCFG_RESP_FLAGS_RSS_DFLT_CR_MODE 0x20UL + #define VNIC_QCFG_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE 0x40UL + #define VNIC_QCFG_RESP_FLAGS_OPERATION_STATE 0x80UL + #define VNIC_QCFG_RESP_FLAGS_PORTCOS_MAPPING_MODE 0x100UL + __le16 queue_id; + u8 rx_csum_v2_mode; + #define VNIC_QCFG_RESP_RX_CSUM_V2_MODE_DEFAULT 0x0UL + #define VNIC_QCFG_RESP_RX_CSUM_V2_MODE_ALL_OK 0x1UL + #define VNIC_QCFG_RESP_RX_CSUM_V2_MODE_MAX 0x2UL + #define VNIC_QCFG_RESP_RX_CSUM_V2_MODE_LAST VNIC_QCFG_RESP_RX_CSUM_V2_MODE_MAX + u8 l2_cqe_mode; + #define VNIC_QCFG_RESP_L2_CQE_MODE_DEFAULT 0x0UL + #define VNIC_QCFG_RESP_L2_CQE_MODE_COMPRESSED 0x1UL + #define VNIC_QCFG_RESP_L2_CQE_MODE_MIXED 0x2UL + #define VNIC_QCFG_RESP_L2_CQE_MODE_LAST VNIC_QCFG_RESP_L2_CQE_MODE_MIXED + u8 metadata_format_type; + #define VNIC_QCFG_RESP_METADATA_FORMAT_TYPE_0 0x0UL + #define VNIC_QCFG_RESP_METADATA_FORMAT_TYPE_1 0x1UL + #define VNIC_QCFG_RESP_METADATA_FORMAT_TYPE_2 0x2UL + #define VNIC_QCFG_RESP_METADATA_FORMAT_TYPE_3 0x3UL + #define VNIC_QCFG_RESP_METADATA_FORMAT_TYPE_4 0x4UL + #define VNIC_QCFG_RESP_METADATA_FORMAT_TYPE_LAST VNIC_QCFG_RESP_METADATA_FORMAT_TYPE_4 + u8 vnic_state; + #define VNIC_QCFG_RESP_VNIC_STATE_NORMAL 0x0UL + #define VNIC_QCFG_RESP_VNIC_STATE_DROP 0x1UL + #define VNIC_QCFG_RESP_VNIC_STATE_LAST VNIC_QCFG_RESP_VNIC_STATE_DROP + u8 unused_1; + u8 valid; +}; + +/* hwrm_vnic_qcaps_input (size:192b/24B) */ +struct hwrm_vnic_qcaps_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 enables; + u8 unused_0[4]; +}; + +/* hwrm_vnic_qcaps_output (size:192b/24B) */ +struct hwrm_vnic_qcaps_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 mru; + u8 unused_0[2]; + __le32 flags; + #define VNIC_QCAPS_RESP_FLAGS_UNUSED 0x1UL + #define VNIC_QCAPS_RESP_FLAGS_VLAN_STRIP_CAP 0x2UL + #define VNIC_QCAPS_RESP_FLAGS_BD_STALL_CAP 0x4UL + #define VNIC_QCAPS_RESP_FLAGS_ROCE_DUAL_VNIC_CAP 0x8UL + #define VNIC_QCAPS_RESP_FLAGS_ROCE_ONLY_VNIC_CAP 0x10UL + #define VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP 0x20UL + #define VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP 0x40UL + #define VNIC_QCAPS_RESP_FLAGS_OUTERMOST_RSS_CAP 0x80UL + #define VNIC_QCAPS_RESP_FLAGS_COS_ASSIGNMENT_CAP 0x100UL + #define VNIC_QCAPS_RESP_FLAGS_RX_CMPL_V2_CAP 0x200UL + #define VNIC_QCAPS_RESP_FLAGS_VNIC_STATE_CAP 0x400UL + #define VNIC_QCAPS_RESP_FLAGS_VIRTIO_NET_VNIC_ALLOC_CAP 0x800UL + #define VNIC_QCAPS_RESP_FLAGS_METADATA_FORMAT_CAP 0x1000UL + #define VNIC_QCAPS_RESP_FLAGS_RSS_STRICT_HASH_TYPE_CAP 0x2000UL + #define VNIC_QCAPS_RESP_FLAGS_RSS_HASH_TYPE_DELTA_CAP 0x4000UL + #define VNIC_QCAPS_RESP_FLAGS_RING_SELECT_MODE_TOEPLITZ_CAP 0x8000UL + #define VNIC_QCAPS_RESP_FLAGS_RING_SELECT_MODE_XOR_CAP 0x10000UL + #define VNIC_QCAPS_RESP_FLAGS_RING_SELECT_MODE_TOEPLITZ_CHKSM_CAP 0x20000UL + #define VNIC_QCAPS_RESP_FLAGS_RSS_IPV6_FLOW_LABEL_CAP 0x40000UL + #define VNIC_QCAPS_RESP_FLAGS_RX_CMPL_V3_CAP 0x80000UL + #define VNIC_QCAPS_RESP_FLAGS_L2_CQE_MODE_CAP 0x100000UL + #define VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_AH_SPI_IPV4_CAP 0x200000UL + #define VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_ESP_SPI_IPV4_CAP 0x400000UL + #define VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_AH_SPI_IPV6_CAP 0x800000UL + #define VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_ESP_SPI_IPV6_CAP 0x1000000UL + #define VNIC_QCAPS_RESP_FLAGS_OUTERMOST_RSS_TRUSTED_VF_CAP 0x2000000UL + #define VNIC_QCAPS_RESP_FLAGS_PORTCOS_MAPPING_MODE 0x4000000UL + #define VNIC_QCAPS_RESP_FLAGS_RSS_PROF_TCAM_MODE_ENABLED 0x8000000UL + #define VNIC_QCAPS_RESP_FLAGS_VNIC_RSS_HASH_MODE_CAP 0x10000000UL + #define VNIC_QCAPS_RESP_FLAGS_HW_TUNNEL_TPA_CAP 0x20000000UL + __le16 max_aggs_supported; + u8 unused_1[5]; + u8 valid; +}; + +/* hwrm_vnic_tpa_cfg_input (size:384b/48B) */ +struct hwrm_vnic_tpa_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define VNIC_TPA_CFG_REQ_FLAGS_TPA 0x1UL + #define VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA 0x2UL + #define VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE 0x4UL + #define VNIC_TPA_CFG_REQ_FLAGS_GRO 0x8UL + #define VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN 0x10UL + #define VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ 0x20UL + #define VNIC_TPA_CFG_REQ_FLAGS_GRO_IPID_CHECK 0x40UL + #define VNIC_TPA_CFG_REQ_FLAGS_GRO_TTL_CHECK 0x80UL + #define VNIC_TPA_CFG_REQ_FLAGS_AGG_PACK_AS_GRO 0x100UL + __le32 enables; + #define VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS 0x1UL + #define VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS 0x2UL + #define VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_TIMER 0x4UL + #define VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN 0x8UL + #define VNIC_TPA_CFG_REQ_ENABLES_TNL_TPA_EN 0x10UL + __le16 vnic_id; + __le16 max_agg_segs; + #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_1 0x0UL + #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_2 0x1UL + #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_4 0x2UL + #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_8 0x3UL + #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_MAX 0x1fUL + #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_LAST VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_MAX + __le16 max_aggs; + #define VNIC_TPA_CFG_REQ_MAX_AGGS_1 0x0UL + #define VNIC_TPA_CFG_REQ_MAX_AGGS_2 0x1UL + #define VNIC_TPA_CFG_REQ_MAX_AGGS_4 0x2UL + #define VNIC_TPA_CFG_REQ_MAX_AGGS_8 0x3UL + #define VNIC_TPA_CFG_REQ_MAX_AGGS_16 0x4UL + #define VNIC_TPA_CFG_REQ_MAX_AGGS_MAX 0x7UL + #define VNIC_TPA_CFG_REQ_MAX_AGGS_LAST VNIC_TPA_CFG_REQ_MAX_AGGS_MAX + u8 unused_0[2]; + __le32 max_agg_timer; + __le32 min_agg_len; + __le32 tnl_tpa_en_bitmap; + #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_VXLAN 0x1UL + #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_GENEVE 0x2UL + #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_NVGRE 0x4UL + #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_GRE 0x8UL + #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_IPV4 0x10UL + #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_IPV6 0x20UL + #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_VXLAN_GPE 0x40UL + #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_VXLAN_CUST1 0x80UL + #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_GRE_CUST1 0x100UL + #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_UPAR1 0x200UL + #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_UPAR2 0x400UL + #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_UPAR3 0x800UL + #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_UPAR4 0x1000UL + #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_UPAR5 0x2000UL + #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_UPAR6 0x4000UL + #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_UPAR7 0x8000UL + #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_UPAR8 0x10000UL + u8 unused_1[4]; +}; + +/* hwrm_vnic_tpa_cfg_output (size:128b/16B) */ +struct hwrm_vnic_tpa_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_vnic_tpa_qcfg_input (size:192b/24B) */ +struct hwrm_vnic_tpa_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 vnic_id; + u8 unused_0[6]; +}; + +/* hwrm_vnic_tpa_qcfg_output (size:256b/32B) */ +struct hwrm_vnic_tpa_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 flags; + #define VNIC_TPA_QCFG_RESP_FLAGS_TPA 0x1UL + #define VNIC_TPA_QCFG_RESP_FLAGS_ENCAP_TPA 0x2UL + #define VNIC_TPA_QCFG_RESP_FLAGS_RSC_WND_UPDATE 0x4UL + #define VNIC_TPA_QCFG_RESP_FLAGS_GRO 0x8UL + #define VNIC_TPA_QCFG_RESP_FLAGS_AGG_WITH_ECN 0x10UL + #define VNIC_TPA_QCFG_RESP_FLAGS_AGG_WITH_SAME_GRE_SEQ 0x20UL + #define VNIC_TPA_QCFG_RESP_FLAGS_GRO_IPID_CHECK 0x40UL + #define VNIC_TPA_QCFG_RESP_FLAGS_GRO_TTL_CHECK 0x80UL + __le16 max_agg_segs; + #define VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_1 0x0UL + #define VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_2 0x1UL + #define VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_4 0x2UL + #define VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_8 0x3UL + #define VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_MAX 0x1fUL + #define VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_LAST VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_MAX + __le16 max_aggs; + #define VNIC_TPA_QCFG_RESP_MAX_AGGS_1 0x0UL + #define VNIC_TPA_QCFG_RESP_MAX_AGGS_2 0x1UL + #define VNIC_TPA_QCFG_RESP_MAX_AGGS_4 0x2UL + #define VNIC_TPA_QCFG_RESP_MAX_AGGS_8 0x3UL + #define VNIC_TPA_QCFG_RESP_MAX_AGGS_16 0x4UL + #define VNIC_TPA_QCFG_RESP_MAX_AGGS_MAX 0x7UL + #define VNIC_TPA_QCFG_RESP_MAX_AGGS_LAST VNIC_TPA_QCFG_RESP_MAX_AGGS_MAX + __le32 max_agg_timer; + __le32 min_agg_len; + __le32 tnl_tpa_en_bitmap; + #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_VXLAN 0x1UL + #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_GENEVE 0x2UL + #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_NVGRE 0x4UL + #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_GRE 0x8UL + #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_IPV4 0x10UL + #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_IPV6 0x20UL + #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_VXLAN_GPE 0x40UL + #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_VXLAN_CUST1 0x80UL + #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_GRE_CUST1 0x100UL + #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_UPAR1 0x200UL + #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_UPAR2 0x400UL + #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_UPAR3 0x800UL + #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_UPAR4 0x1000UL + #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_UPAR5 0x2000UL + #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_UPAR6 0x4000UL + #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_UPAR7 0x8000UL + #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_UPAR8 0x10000UL + u8 unused_0[3]; + u8 valid; +}; + +/* hwrm_vnic_rss_cfg_input (size:384b/48B) */ +struct hwrm_vnic_rss_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 hash_type; + #define VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 0x1UL + #define VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 0x2UL + #define VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 0x4UL + #define VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 0x8UL + #define VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6 0x10UL + #define VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6 0x20UL + #define VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6_FLOW_LABEL 0x40UL + #define VNIC_RSS_CFG_REQ_HASH_TYPE_AH_SPI_IPV4 0x80UL + #define VNIC_RSS_CFG_REQ_HASH_TYPE_ESP_SPI_IPV4 0x100UL + #define VNIC_RSS_CFG_REQ_HASH_TYPE_AH_SPI_IPV6 0x200UL + #define VNIC_RSS_CFG_REQ_HASH_TYPE_ESP_SPI_IPV6 0x400UL + __le16 vnic_id; + u8 ring_table_pair_index; + u8 hash_mode_flags; + #define VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT 0x1UL + #define VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_INNERMOST_4 0x2UL + #define VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_INNERMOST_2 0x4UL + #define VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_OUTERMOST_4 0x8UL + #define VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_OUTERMOST_2 0x10UL + __le64 ring_grp_tbl_addr; + __le64 hash_key_tbl_addr; + __le16 rss_ctx_idx; + u8 flags; + #define VNIC_RSS_CFG_REQ_FLAGS_HASH_TYPE_INCLUDE 0x1UL + #define VNIC_RSS_CFG_REQ_FLAGS_HASH_TYPE_EXCLUDE 0x2UL + #define VNIC_RSS_CFG_REQ_FLAGS_IPSEC_HASH_TYPE_CFG_SUPPORT 0x4UL + u8 ring_select_mode; + #define VNIC_RSS_CFG_REQ_RING_SELECT_MODE_TOEPLITZ 0x0UL + #define VNIC_RSS_CFG_REQ_RING_SELECT_MODE_XOR 0x1UL + #define VNIC_RSS_CFG_REQ_RING_SELECT_MODE_TOEPLITZ_CHECKSUM 0x2UL + #define VNIC_RSS_CFG_REQ_RING_SELECT_MODE_LAST VNIC_RSS_CFG_REQ_RING_SELECT_MODE_TOEPLITZ_CHECKSUM + u8 unused_1[4]; +}; + +/* hwrm_vnic_rss_cfg_output (size:128b/16B) */ +struct hwrm_vnic_rss_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_vnic_rss_cfg_cmd_err (size:64b/8B) */ +struct hwrm_vnic_rss_cfg_cmd_err { + u8 code; + #define VNIC_RSS_CFG_CMD_ERR_CODE_UNKNOWN 0x0UL + #define VNIC_RSS_CFG_CMD_ERR_CODE_INTERFACE_NOT_READY 0x1UL + #define VNIC_RSS_CFG_CMD_ERR_CODE_LAST VNIC_RSS_CFG_CMD_ERR_CODE_INTERFACE_NOT_READY + u8 unused_0[7]; +}; + +/* hwrm_vnic_rss_qcfg_input (size:192b/24B) */ +struct hwrm_vnic_rss_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 rss_ctx_idx; + __le16 vnic_id; + u8 unused_0[4]; +}; + +/* hwrm_vnic_rss_qcfg_output (size:512b/64B) */ +struct hwrm_vnic_rss_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 hash_type; + #define VNIC_RSS_QCFG_RESP_HASH_TYPE_IPV4 0x1UL + #define VNIC_RSS_QCFG_RESP_HASH_TYPE_TCP_IPV4 0x2UL + #define VNIC_RSS_QCFG_RESP_HASH_TYPE_UDP_IPV4 0x4UL + #define VNIC_RSS_QCFG_RESP_HASH_TYPE_IPV6 0x8UL + #define VNIC_RSS_QCFG_RESP_HASH_TYPE_TCP_IPV6 0x10UL + #define VNIC_RSS_QCFG_RESP_HASH_TYPE_UDP_IPV6 0x20UL + #define VNIC_RSS_QCFG_RESP_HASH_TYPE_IPV6_FLOW_LABEL 0x40UL + #define VNIC_RSS_QCFG_RESP_HASH_TYPE_AH_SPI_IPV4 0x80UL + #define VNIC_RSS_QCFG_RESP_HASH_TYPE_ESP_SPI_IPV4 0x100UL + #define VNIC_RSS_QCFG_RESP_HASH_TYPE_AH_SPI_IPV6 0x200UL + #define VNIC_RSS_QCFG_RESP_HASH_TYPE_ESP_SPI_IPV6 0x400UL + u8 unused_0[4]; + __le32 hash_key[10]; + u8 hash_mode_flags; + #define VNIC_RSS_QCFG_RESP_HASH_MODE_FLAGS_DEFAULT 0x1UL + #define VNIC_RSS_QCFG_RESP_HASH_MODE_FLAGS_INNERMOST_4 0x2UL + #define VNIC_RSS_QCFG_RESP_HASH_MODE_FLAGS_INNERMOST_2 0x4UL + #define VNIC_RSS_QCFG_RESP_HASH_MODE_FLAGS_OUTERMOST_4 0x8UL + #define VNIC_RSS_QCFG_RESP_HASH_MODE_FLAGS_OUTERMOST_2 0x10UL + u8 ring_select_mode; + #define VNIC_RSS_QCFG_RESP_RING_SELECT_MODE_TOEPLITZ 0x0UL + #define VNIC_RSS_QCFG_RESP_RING_SELECT_MODE_XOR 0x1UL + #define VNIC_RSS_QCFG_RESP_RING_SELECT_MODE_TOEPLITZ_CHECKSUM 0x2UL + #define VNIC_RSS_QCFG_RESP_RING_SELECT_MODE_LAST VNIC_RSS_QCFG_RESP_RING_SELECT_MODE_TOEPLITZ_CHECKSUM + u8 unused_1[5]; + u8 valid; +}; + +/* hwrm_vnic_plcmodes_cfg_input (size:320b/40B) */ +struct hwrm_vnic_plcmodes_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define VNIC_PLCMODES_CFG_REQ_FLAGS_REGULAR_PLACEMENT 0x1UL + #define VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT 0x2UL + #define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 0x4UL + #define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6 0x8UL + #define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_FCOE 0x10UL + #define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_ROCE 0x20UL + #define VNIC_PLCMODES_CFG_REQ_FLAGS_VIRTIO_PLACEMENT 0x40UL + __le32 enables; + #define VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID 0x1UL + #define VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_OFFSET_VALID 0x2UL + #define VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID 0x4UL + #define VNIC_PLCMODES_CFG_REQ_ENABLES_MAX_BDS_VALID 0x8UL + __le32 vnic_id; + __le16 jumbo_thresh; + __le16 hds_offset; + __le16 hds_threshold; + __le16 max_bds; + u8 unused_0[4]; +}; + +/* hwrm_vnic_plcmodes_cfg_output (size:128b/16B) */ +struct hwrm_vnic_plcmodes_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_vnic_plcmodes_qcfg_input (size:192b/24B) */ +struct hwrm_vnic_plcmodes_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 vnic_id; + u8 unused_0[4]; +}; + +/* hwrm_vnic_plcmodes_qcfg_output (size:192b/24B) */ +struct hwrm_vnic_plcmodes_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 flags; + #define VNIC_PLCMODES_QCFG_RESP_FLAGS_REGULAR_PLACEMENT 0x1UL + #define VNIC_PLCMODES_QCFG_RESP_FLAGS_JUMBO_PLACEMENT 0x2UL + #define VNIC_PLCMODES_QCFG_RESP_FLAGS_HDS_IPV4 0x4UL + #define VNIC_PLCMODES_QCFG_RESP_FLAGS_HDS_IPV6 0x8UL + #define VNIC_PLCMODES_QCFG_RESP_FLAGS_HDS_FCOE 0x10UL + #define VNIC_PLCMODES_QCFG_RESP_FLAGS_HDS_ROCE 0x20UL + #define VNIC_PLCMODES_QCFG_RESP_FLAGS_DFLT_VNIC 0x40UL + #define VNIC_PLCMODES_QCFG_RESP_FLAGS_VIRTIO_PLACEMENT 0x80UL + __le16 jumbo_thresh; + __le16 hds_offset; + __le16 hds_threshold; + __le16 max_bds; + u8 unused_0[3]; + u8 valid; +}; + +/* hwrm_vnic_rss_cos_lb_ctx_alloc_input (size:128b/16B) */ +struct hwrm_vnic_rss_cos_lb_ctx_alloc_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; +}; + +/* hwrm_vnic_rss_cos_lb_ctx_alloc_output (size:128b/16B) */ +struct hwrm_vnic_rss_cos_lb_ctx_alloc_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 rss_cos_lb_ctx_id; + u8 unused_0[5]; + u8 valid; +}; + +/* hwrm_vnic_rss_cos_lb_ctx_free_input (size:192b/24B) */ +struct hwrm_vnic_rss_cos_lb_ctx_free_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 rss_cos_lb_ctx_id; + u8 unused_0[6]; +}; + +/* hwrm_vnic_rss_cos_lb_ctx_free_output (size:128b/16B) */ +struct hwrm_vnic_rss_cos_lb_ctx_free_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_ring_alloc_input (size:704b/88B) */ +struct hwrm_ring_alloc_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 enables; + #define RING_ALLOC_REQ_ENABLES_RING_ARB_CFG 0x2UL + #define RING_ALLOC_REQ_ENABLES_STAT_CTX_ID_VALID 0x8UL + #define RING_ALLOC_REQ_ENABLES_MAX_BW_VALID 0x20UL + #define RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID 0x40UL + #define RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID 0x80UL + #define RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID 0x100UL + #define RING_ALLOC_REQ_ENABLES_SCHQ_ID 0x200UL + #define RING_ALLOC_REQ_ENABLES_MPC_CHNLS_TYPE 0x400UL + #define RING_ALLOC_REQ_ENABLES_STEERING_TAG_VALID 0x800UL + u8 ring_type; + #define RING_ALLOC_REQ_RING_TYPE_L2_CMPL 0x0UL + #define RING_ALLOC_REQ_RING_TYPE_TX 0x1UL + #define RING_ALLOC_REQ_RING_TYPE_RX 0x2UL + #define RING_ALLOC_REQ_RING_TYPE_ROCE_CMPL 0x3UL + #define RING_ALLOC_REQ_RING_TYPE_RX_AGG 0x4UL + #define RING_ALLOC_REQ_RING_TYPE_NQ 0x5UL + #define RING_ALLOC_REQ_RING_TYPE_LAST RING_ALLOC_REQ_RING_TYPE_NQ + u8 cmpl_coal_cnt; + #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_OFF 0x0UL + #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_4 0x1UL + #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_8 0x2UL + #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_12 0x3UL + #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_16 0x4UL + #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_24 0x5UL + #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_32 0x6UL + #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_48 0x7UL + #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_64 0x8UL + #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_96 0x9UL + #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_128 0xaUL + #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_192 0xbUL + #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_256 0xcUL + #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_320 0xdUL + #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_384 0xeUL + #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_MAX 0xfUL + #define RING_ALLOC_REQ_CMPL_COAL_CNT_LAST RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_MAX + __le16 flags; + #define RING_ALLOC_REQ_FLAGS_RX_SOP_PAD 0x1UL + #define RING_ALLOC_REQ_FLAGS_DISABLE_CQ_OVERFLOW_DETECTION 0x2UL + #define RING_ALLOC_REQ_FLAGS_NQ_DBR_PACING 0x4UL + #define RING_ALLOC_REQ_FLAGS_TX_PKT_TS_CMPL_ENABLE 0x8UL + __le64 page_tbl_addr; + __le32 fbo; + u8 page_size; + u8 page_tbl_depth; + __le16 schq_id; + __le32 length; + __le16 logical_id; + __le16 cmpl_ring_id; + __le16 queue_id; + __le16 rx_buf_size; + __le16 rx_ring_id; + __le16 nq_ring_id; + __le16 ring_arb_cfg; + #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_MASK 0xfUL + #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_SFT 0 + #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_SP 0x1UL + #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_WFQ 0x2UL + #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_LAST RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_WFQ + #define RING_ALLOC_REQ_RING_ARB_CFG_RSVD_MASK 0xf0UL + #define RING_ALLOC_REQ_RING_ARB_CFG_RSVD_SFT 4 + #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_PARAM_MASK 0xff00UL + #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_PARAM_SFT 8 + __le16 steering_tag; + __le32 reserved3; + __le32 stat_ctx_id; + __le32 reserved4; + __le32 max_bw; + #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_MASK 0xfffffffUL + #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_SFT 0 + #define RING_ALLOC_REQ_MAX_BW_SCALE 0x10000000UL + #define RING_ALLOC_REQ_MAX_BW_SCALE_BITS (0x0UL << 28) + #define RING_ALLOC_REQ_MAX_BW_SCALE_BYTES (0x1UL << 28) + #define RING_ALLOC_REQ_MAX_BW_SCALE_LAST RING_ALLOC_REQ_MAX_BW_SCALE_BYTES + #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_SFT 29 + #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_LAST RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_INVALID + u8 int_mode; + #define RING_ALLOC_REQ_INT_MODE_LEGACY 0x0UL + #define RING_ALLOC_REQ_INT_MODE_RSVD 0x1UL + #define RING_ALLOC_REQ_INT_MODE_MSIX 0x2UL + #define RING_ALLOC_REQ_INT_MODE_POLL 0x3UL + #define RING_ALLOC_REQ_INT_MODE_LAST RING_ALLOC_REQ_INT_MODE_POLL + u8 mpc_chnls_type; + #define RING_ALLOC_REQ_MPC_CHNLS_TYPE_TCE 0x0UL + #define RING_ALLOC_REQ_MPC_CHNLS_TYPE_RCE 0x1UL + #define RING_ALLOC_REQ_MPC_CHNLS_TYPE_TE_CFA 0x2UL + #define RING_ALLOC_REQ_MPC_CHNLS_TYPE_RE_CFA 0x3UL + #define RING_ALLOC_REQ_MPC_CHNLS_TYPE_PRIMATE 0x4UL + #define RING_ALLOC_REQ_MPC_CHNLS_TYPE_LAST RING_ALLOC_REQ_MPC_CHNLS_TYPE_PRIMATE + u8 unused_4[2]; + __le64 cq_handle; +}; + +/* hwrm_ring_alloc_output (size:128b/16B) */ +struct hwrm_ring_alloc_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 ring_id; + __le16 logical_ring_id; + u8 push_buffer_index; + #define RING_ALLOC_RESP_PUSH_BUFFER_INDEX_PING_BUFFER 0x0UL + #define RING_ALLOC_RESP_PUSH_BUFFER_INDEX_PONG_BUFFER 0x1UL + #define RING_ALLOC_RESP_PUSH_BUFFER_INDEX_LAST RING_ALLOC_RESP_PUSH_BUFFER_INDEX_PONG_BUFFER + u8 unused_0[2]; + u8 valid; +}; + +/* hwrm_ring_free_input (size:256b/32B) */ +struct hwrm_ring_free_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 ring_type; + #define RING_FREE_REQ_RING_TYPE_L2_CMPL 0x0UL + #define RING_FREE_REQ_RING_TYPE_TX 0x1UL + #define RING_FREE_REQ_RING_TYPE_RX 0x2UL + #define RING_FREE_REQ_RING_TYPE_ROCE_CMPL 0x3UL + #define RING_FREE_REQ_RING_TYPE_RX_AGG 0x4UL + #define RING_FREE_REQ_RING_TYPE_NQ 0x5UL + #define RING_FREE_REQ_RING_TYPE_LAST RING_FREE_REQ_RING_TYPE_NQ + u8 flags; + #define RING_FREE_REQ_FLAGS_VIRTIO_RING_VALID 0x1UL + #define RING_FREE_REQ_FLAGS_LAST RING_FREE_REQ_FLAGS_VIRTIO_RING_VALID + __le16 ring_id; + __le32 prod_idx; + __le32 opaque; + __le32 unused_1; +}; + +/* hwrm_ring_free_output (size:128b/16B) */ +struct hwrm_ring_free_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_ring_reset_input (size:192b/24B) */ +struct hwrm_ring_reset_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 ring_type; + #define RING_RESET_REQ_RING_TYPE_L2_CMPL 0x0UL + #define RING_RESET_REQ_RING_TYPE_TX 0x1UL + #define RING_RESET_REQ_RING_TYPE_RX 0x2UL + #define RING_RESET_REQ_RING_TYPE_ROCE_CMPL 0x3UL + #define RING_RESET_REQ_RING_TYPE_RX_RING_GRP 0x6UL + #define RING_RESET_REQ_RING_TYPE_LAST RING_RESET_REQ_RING_TYPE_RX_RING_GRP + u8 unused_0; + __le16 ring_id; + u8 unused_1[4]; +}; + +/* hwrm_ring_reset_output (size:128b/16B) */ +struct hwrm_ring_reset_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 push_buffer_index; + #define RING_RESET_RESP_PUSH_BUFFER_INDEX_PING_BUFFER 0x0UL + #define RING_RESET_RESP_PUSH_BUFFER_INDEX_PONG_BUFFER 0x1UL + #define RING_RESET_RESP_PUSH_BUFFER_INDEX_LAST RING_RESET_RESP_PUSH_BUFFER_INDEX_PONG_BUFFER + u8 unused_0[3]; + u8 consumer_idx[3]; + u8 valid; +}; + +/* hwrm_ring_cfg_input (size:320b/40B) */ +struct hwrm_ring_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 ring_type; + #define RING_CFG_REQ_RING_TYPE_TX 0x1UL + #define RING_CFG_REQ_RING_TYPE_RX 0x2UL + #define RING_CFG_REQ_RING_TYPE_LAST RING_CFG_REQ_RING_TYPE_RX + u8 unused_0; + __le16 ring_id; + __le16 enables; + #define RING_CFG_REQ_ENABLES_RX_SOP_PAD_ENABLE 0x1UL + #define RING_CFG_REQ_ENABLES_PROXY_MODE_ENABLE 0x2UL + #define RING_CFG_REQ_ENABLES_TX_PROXY_SRC_INTF_OVERRIDE 0x4UL + #define RING_CFG_REQ_ENABLES_SCHQ_ID 0x8UL + #define RING_CFG_REQ_ENABLES_CMPL_RING_ID_UPDATE 0x10UL + #define RING_CFG_REQ_ENABLES_TX_METADATA 0x20UL + __le16 proxy_fid; + __le16 schq_id; + __le16 cmpl_ring_id; + u8 rx_sop_pad_bytes; + u8 unused_1[3]; + __le32 tx_metadata; + u8 unused_2[4]; +}; + +/* hwrm_ring_cfg_output (size:128b/16B) */ +struct hwrm_ring_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_ring_qcfg_input (size:192b/24B) */ +struct hwrm_ring_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 ring_type; + #define RING_QCFG_REQ_RING_TYPE_TX 0x1UL + #define RING_QCFG_REQ_RING_TYPE_RX 0x2UL + #define RING_QCFG_REQ_RING_TYPE_LAST RING_QCFG_REQ_RING_TYPE_RX + u8 unused_0[5]; + __le16 ring_id; +}; + +/* hwrm_ring_qcfg_output (size:256b/32B) */ +struct hwrm_ring_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 enables; + #define RING_QCFG_RESP_ENABLES_RX_SOP_PAD_ENABLE 0x1UL + #define RING_QCFG_RESP_ENABLES_PROXY_MODE_ENABLE 0x2UL + #define RING_QCFG_RESP_ENABLES_TX_PROXY_SRC_INTF_OVERRIDE 0x4UL + __le16 proxy_fid; + __le16 schq_id; + __le16 cmpl_ring_id; + u8 rx_sop_pad_bytes; + u8 unused_0[3]; + __le32 tx_metadata; + u8 unused_1[7]; + u8 valid; +}; + +/* hwrm_ring_aggint_qcaps_input (size:128b/16B) */ +struct hwrm_ring_aggint_qcaps_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; +}; + +/* hwrm_ring_aggint_qcaps_output (size:384b/48B) */ +struct hwrm_ring_aggint_qcaps_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 cmpl_params; + #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_INT_LAT_TMR_MIN 0x1UL + #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_INT_LAT_TMR_MAX 0x2UL + #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET 0x4UL + #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_RING_IDLE 0x8UL + #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_DMA_AGGR 0x10UL + #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_DMA_AGGR_DURING_INT 0x20UL + #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_CMPL_AGGR_DMA_TMR 0x40UL + #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_CMPL_AGGR_DMA_TMR_DURING_INT 0x80UL + #define RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_AGGR_INT 0x100UL + __le32 nq_params; + #define RING_AGGINT_QCAPS_RESP_NQ_PARAMS_INT_LAT_TMR_MIN 0x1UL + __le16 num_cmpl_dma_aggr_min; + __le16 num_cmpl_dma_aggr_max; + __le16 num_cmpl_dma_aggr_during_int_min; + __le16 num_cmpl_dma_aggr_during_int_max; + __le16 cmpl_aggr_dma_tmr_min; + __le16 cmpl_aggr_dma_tmr_max; + __le16 cmpl_aggr_dma_tmr_during_int_min; + __le16 cmpl_aggr_dma_tmr_during_int_max; + __le16 int_lat_tmr_min_min; + __le16 int_lat_tmr_min_max; + __le16 int_lat_tmr_max_min; + __le16 int_lat_tmr_max_max; + __le16 num_cmpl_aggr_int_min; + __le16 num_cmpl_aggr_int_max; + __le16 timer_units; + u8 unused_0[1]; + u8 valid; +}; + +/* hwrm_ring_cmpl_ring_qaggint_params_input (size:192b/24B) */ +struct hwrm_ring_cmpl_ring_qaggint_params_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 ring_id; + __le16 flags; + #define RING_CMPL_RING_QAGGINT_PARAMS_REQ_FLAGS_UNUSED_0_MASK 0x3UL + #define RING_CMPL_RING_QAGGINT_PARAMS_REQ_FLAGS_UNUSED_0_SFT 0 + #define RING_CMPL_RING_QAGGINT_PARAMS_REQ_FLAGS_IS_NQ 0x4UL + u8 unused_0[4]; +}; + +/* hwrm_ring_cmpl_ring_qaggint_params_output (size:256b/32B) */ +struct hwrm_ring_cmpl_ring_qaggint_params_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 flags; + #define RING_CMPL_RING_QAGGINT_PARAMS_RESP_FLAGS_TIMER_RESET 0x1UL + #define RING_CMPL_RING_QAGGINT_PARAMS_RESP_FLAGS_RING_IDLE 0x2UL + __le16 num_cmpl_dma_aggr; + __le16 num_cmpl_dma_aggr_during_int; + __le16 cmpl_aggr_dma_tmr; + __le16 cmpl_aggr_dma_tmr_during_int; + __le16 int_lat_tmr_min; + __le16 int_lat_tmr_max; + __le16 num_cmpl_aggr_int; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_ring_cmpl_ring_cfg_aggint_params_input (size:320b/40B) */ +struct hwrm_ring_cmpl_ring_cfg_aggint_params_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 ring_id; + __le16 flags; + #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET 0x1UL + #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE 0x2UL + #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_IS_NQ 0x4UL + __le16 num_cmpl_dma_aggr; + __le16 num_cmpl_dma_aggr_during_int; + __le16 cmpl_aggr_dma_tmr; + __le16 cmpl_aggr_dma_tmr_during_int; + __le16 int_lat_tmr_min; + __le16 int_lat_tmr_max; + __le16 num_cmpl_aggr_int; + __le16 enables; + #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_NUM_CMPL_DMA_AGGR 0x1UL + #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_NUM_CMPL_DMA_AGGR_DURING_INT 0x2UL + #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_CMPL_AGGR_DMA_TMR 0x4UL + #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_INT_LAT_TMR_MIN 0x8UL + #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_INT_LAT_TMR_MAX 0x10UL + #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_NUM_CMPL_AGGR_INT 0x20UL + u8 unused_0[4]; +}; + +/* hwrm_ring_cmpl_ring_cfg_aggint_params_output (size:128b/16B) */ +struct hwrm_ring_cmpl_ring_cfg_aggint_params_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_ring_grp_alloc_input (size:192b/24B) */ +struct hwrm_ring_grp_alloc_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 cr; + __le16 rr; + __le16 ar; + __le16 sc; +}; + +/* hwrm_ring_grp_alloc_output (size:128b/16B) */ +struct hwrm_ring_grp_alloc_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 ring_group_id; + u8 unused_0[3]; + u8 valid; +}; + +/* hwrm_ring_grp_free_input (size:192b/24B) */ +struct hwrm_ring_grp_free_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 ring_group_id; + u8 unused_0[4]; +}; + +/* hwrm_ring_grp_free_output (size:128b/16B) */ +struct hwrm_ring_grp_free_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_ring_schq_alloc_input (size:1088b/136B) */ +struct hwrm_ring_schq_alloc_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 enables; + #define RING_SCHQ_ALLOC_REQ_ENABLES_TQM_RING0 0x1UL + #define RING_SCHQ_ALLOC_REQ_ENABLES_TQM_RING1 0x2UL + #define RING_SCHQ_ALLOC_REQ_ENABLES_TQM_RING2 0x4UL + #define RING_SCHQ_ALLOC_REQ_ENABLES_TQM_RING3 0x8UL + #define RING_SCHQ_ALLOC_REQ_ENABLES_TQM_RING4 0x10UL + #define RING_SCHQ_ALLOC_REQ_ENABLES_TQM_RING5 0x20UL + #define RING_SCHQ_ALLOC_REQ_ENABLES_TQM_RING6 0x40UL + #define RING_SCHQ_ALLOC_REQ_ENABLES_TQM_RING7 0x80UL + __le32 reserved; + u8 tqm_ring0_pg_size_tqm_ring0_lvl; + #define RING_SCHQ_ALLOC_REQ_TQM_RING0_LVL_MASK 0xfUL + #define RING_SCHQ_ALLOC_REQ_TQM_RING0_LVL_SFT 0 + #define RING_SCHQ_ALLOC_REQ_TQM_RING0_LVL_LVL_0 0x0UL + #define RING_SCHQ_ALLOC_REQ_TQM_RING0_LVL_LVL_1 0x1UL + #define RING_SCHQ_ALLOC_REQ_TQM_RING0_LVL_LVL_2 0x2UL + #define RING_SCHQ_ALLOC_REQ_TQM_RING0_LVL_LAST RING_SCHQ_ALLOC_REQ_TQM_RING0_LVL_LVL_2 + #define RING_SCHQ_ALLOC_REQ_TQM_RING0_PG_SIZE_MASK 0xf0UL + #define RING_SCHQ_ALLOC_REQ_TQM_RING0_PG_SIZE_SFT 4 + #define RING_SCHQ_ALLOC_REQ_TQM_RING0_PG_SIZE_PG_4K (0x0UL << 4) + #define RING_SCHQ_ALLOC_REQ_TQM_RING0_PG_SIZE_PG_8K (0x1UL << 4) + #define RING_SCHQ_ALLOC_REQ_TQM_RING0_PG_SIZE_PG_64K (0x2UL << 4) + #define RING_SCHQ_ALLOC_REQ_TQM_RING0_PG_SIZE_PG_2M (0x3UL << 4) + #define RING_SCHQ_ALLOC_REQ_TQM_RING0_PG_SIZE_PG_8M (0x4UL << 4) + #define RING_SCHQ_ALLOC_REQ_TQM_RING0_PG_SIZE_PG_1G (0x5UL << 4) + #define RING_SCHQ_ALLOC_REQ_TQM_RING0_PG_SIZE_LAST RING_SCHQ_ALLOC_REQ_TQM_RING0_PG_SIZE_PG_1G + u8 tqm_ring1_pg_size_tqm_ring1_lvl; + #define RING_SCHQ_ALLOC_REQ_TQM_RING1_LVL_MASK 0xfUL + #define RING_SCHQ_ALLOC_REQ_TQM_RING1_LVL_SFT 0 + #define RING_SCHQ_ALLOC_REQ_TQM_RING1_LVL_LVL_0 0x0UL + #define RING_SCHQ_ALLOC_REQ_TQM_RING1_LVL_LVL_1 0x1UL + #define RING_SCHQ_ALLOC_REQ_TQM_RING1_LVL_LVL_2 0x2UL + #define RING_SCHQ_ALLOC_REQ_TQM_RING1_LVL_LAST RING_SCHQ_ALLOC_REQ_TQM_RING1_LVL_LVL_2 + #define RING_SCHQ_ALLOC_REQ_TQM_RING1_PG_SIZE_MASK 0xf0UL + #define RING_SCHQ_ALLOC_REQ_TQM_RING1_PG_SIZE_SFT 4 + #define RING_SCHQ_ALLOC_REQ_TQM_RING1_PG_SIZE_PG_4K (0x0UL << 4) + #define RING_SCHQ_ALLOC_REQ_TQM_RING1_PG_SIZE_PG_8K (0x1UL << 4) + #define RING_SCHQ_ALLOC_REQ_TQM_RING1_PG_SIZE_PG_64K (0x2UL << 4) + #define RING_SCHQ_ALLOC_REQ_TQM_RING1_PG_SIZE_PG_2M (0x3UL << 4) + #define RING_SCHQ_ALLOC_REQ_TQM_RING1_PG_SIZE_PG_8M (0x4UL << 4) + #define RING_SCHQ_ALLOC_REQ_TQM_RING1_PG_SIZE_PG_1G (0x5UL << 4) + #define RING_SCHQ_ALLOC_REQ_TQM_RING1_PG_SIZE_LAST RING_SCHQ_ALLOC_REQ_TQM_RING1_PG_SIZE_PG_1G + u8 tqm_ring2_pg_size_tqm_ring2_lvl; + #define RING_SCHQ_ALLOC_REQ_TQM_RING2_LVL_MASK 0xfUL + #define RING_SCHQ_ALLOC_REQ_TQM_RING2_LVL_SFT 0 + #define RING_SCHQ_ALLOC_REQ_TQM_RING2_LVL_LVL_0 0x0UL + #define RING_SCHQ_ALLOC_REQ_TQM_RING2_LVL_LVL_1 0x1UL + #define RING_SCHQ_ALLOC_REQ_TQM_RING2_LVL_LVL_2 0x2UL + #define RING_SCHQ_ALLOC_REQ_TQM_RING2_LVL_LAST RING_SCHQ_ALLOC_REQ_TQM_RING2_LVL_LVL_2 + #define RING_SCHQ_ALLOC_REQ_TQM_RING2_PG_SIZE_MASK 0xf0UL + #define RING_SCHQ_ALLOC_REQ_TQM_RING2_PG_SIZE_SFT 4 + #define RING_SCHQ_ALLOC_REQ_TQM_RING2_PG_SIZE_PG_4K (0x0UL << 4) + #define RING_SCHQ_ALLOC_REQ_TQM_RING2_PG_SIZE_PG_8K (0x1UL << 4) + #define RING_SCHQ_ALLOC_REQ_TQM_RING2_PG_SIZE_PG_64K (0x2UL << 4) + #define RING_SCHQ_ALLOC_REQ_TQM_RING2_PG_SIZE_PG_2M (0x3UL << 4) + #define RING_SCHQ_ALLOC_REQ_TQM_RING2_PG_SIZE_PG_8M (0x4UL << 4) + #define RING_SCHQ_ALLOC_REQ_TQM_RING2_PG_SIZE_PG_1G (0x5UL << 4) + #define RING_SCHQ_ALLOC_REQ_TQM_RING2_PG_SIZE_LAST RING_SCHQ_ALLOC_REQ_TQM_RING2_PG_SIZE_PG_1G + u8 tqm_ring3_pg_size_tqm_ring3_lvl; + #define RING_SCHQ_ALLOC_REQ_TQM_RING3_LVL_MASK 0xfUL + #define RING_SCHQ_ALLOC_REQ_TQM_RING3_LVL_SFT 0 + #define RING_SCHQ_ALLOC_REQ_TQM_RING3_LVL_LVL_0 0x0UL + #define RING_SCHQ_ALLOC_REQ_TQM_RING3_LVL_LVL_1 0x1UL + #define RING_SCHQ_ALLOC_REQ_TQM_RING3_LVL_LVL_2 0x2UL + #define RING_SCHQ_ALLOC_REQ_TQM_RING3_LVL_LAST RING_SCHQ_ALLOC_REQ_TQM_RING3_LVL_LVL_2 + #define RING_SCHQ_ALLOC_REQ_TQM_RING3_PG_SIZE_MASK 0xf0UL + #define RING_SCHQ_ALLOC_REQ_TQM_RING3_PG_SIZE_SFT 4 + #define RING_SCHQ_ALLOC_REQ_TQM_RING3_PG_SIZE_PG_4K (0x0UL << 4) + #define RING_SCHQ_ALLOC_REQ_TQM_RING3_PG_SIZE_PG_8K (0x1UL << 4) + #define RING_SCHQ_ALLOC_REQ_TQM_RING3_PG_SIZE_PG_64K (0x2UL << 4) + #define RING_SCHQ_ALLOC_REQ_TQM_RING3_PG_SIZE_PG_2M (0x3UL << 4) + #define RING_SCHQ_ALLOC_REQ_TQM_RING3_PG_SIZE_PG_8M (0x4UL << 4) + #define RING_SCHQ_ALLOC_REQ_TQM_RING3_PG_SIZE_PG_1G (0x5UL << 4) + #define RING_SCHQ_ALLOC_REQ_TQM_RING3_PG_SIZE_LAST RING_SCHQ_ALLOC_REQ_TQM_RING3_PG_SIZE_PG_1G + u8 tqm_ring4_pg_size_tqm_ring4_lvl; + #define RING_SCHQ_ALLOC_REQ_TQM_RING4_LVL_MASK 0xfUL + #define RING_SCHQ_ALLOC_REQ_TQM_RING4_LVL_SFT 0 + #define RING_SCHQ_ALLOC_REQ_TQM_RING4_LVL_LVL_0 0x0UL + #define RING_SCHQ_ALLOC_REQ_TQM_RING4_LVL_LVL_1 0x1UL + #define RING_SCHQ_ALLOC_REQ_TQM_RING4_LVL_LVL_2 0x2UL + #define RING_SCHQ_ALLOC_REQ_TQM_RING4_LVL_LAST RING_SCHQ_ALLOC_REQ_TQM_RING4_LVL_LVL_2 + #define RING_SCHQ_ALLOC_REQ_TQM_RING4_PG_SIZE_MASK 0xf0UL + #define RING_SCHQ_ALLOC_REQ_TQM_RING4_PG_SIZE_SFT 4 + #define RING_SCHQ_ALLOC_REQ_TQM_RING4_PG_SIZE_PG_4K (0x0UL << 4) + #define RING_SCHQ_ALLOC_REQ_TQM_RING4_PG_SIZE_PG_8K (0x1UL << 4) + #define RING_SCHQ_ALLOC_REQ_TQM_RING4_PG_SIZE_PG_64K (0x2UL << 4) + #define RING_SCHQ_ALLOC_REQ_TQM_RING4_PG_SIZE_PG_2M (0x3UL << 4) + #define RING_SCHQ_ALLOC_REQ_TQM_RING4_PG_SIZE_PG_8M (0x4UL << 4) + #define RING_SCHQ_ALLOC_REQ_TQM_RING4_PG_SIZE_PG_1G (0x5UL << 4) + #define RING_SCHQ_ALLOC_REQ_TQM_RING4_PG_SIZE_LAST RING_SCHQ_ALLOC_REQ_TQM_RING4_PG_SIZE_PG_1G + u8 tqm_ring5_pg_size_tqm_ring5_lvl; + #define RING_SCHQ_ALLOC_REQ_TQM_RING5_LVL_MASK 0xfUL + #define RING_SCHQ_ALLOC_REQ_TQM_RING5_LVL_SFT 0 + #define RING_SCHQ_ALLOC_REQ_TQM_RING5_LVL_LVL_0 0x0UL + #define RING_SCHQ_ALLOC_REQ_TQM_RING5_LVL_LVL_1 0x1UL + #define RING_SCHQ_ALLOC_REQ_TQM_RING5_LVL_LVL_2 0x2UL + #define RING_SCHQ_ALLOC_REQ_TQM_RING5_LVL_LAST RING_SCHQ_ALLOC_REQ_TQM_RING5_LVL_LVL_2 + #define RING_SCHQ_ALLOC_REQ_TQM_RING5_PG_SIZE_MASK 0xf0UL + #define RING_SCHQ_ALLOC_REQ_TQM_RING5_PG_SIZE_SFT 4 + #define RING_SCHQ_ALLOC_REQ_TQM_RING5_PG_SIZE_PG_4K (0x0UL << 4) + #define RING_SCHQ_ALLOC_REQ_TQM_RING5_PG_SIZE_PG_8K (0x1UL << 4) + #define RING_SCHQ_ALLOC_REQ_TQM_RING5_PG_SIZE_PG_64K (0x2UL << 4) + #define RING_SCHQ_ALLOC_REQ_TQM_RING5_PG_SIZE_PG_2M (0x3UL << 4) + #define RING_SCHQ_ALLOC_REQ_TQM_RING5_PG_SIZE_PG_8M (0x4UL << 4) + #define RING_SCHQ_ALLOC_REQ_TQM_RING5_PG_SIZE_PG_1G (0x5UL << 4) + #define RING_SCHQ_ALLOC_REQ_TQM_RING5_PG_SIZE_LAST RING_SCHQ_ALLOC_REQ_TQM_RING5_PG_SIZE_PG_1G + u8 tqm_ring6_pg_size_tqm_ring6_lvl; + #define RING_SCHQ_ALLOC_REQ_TQM_RING6_LVL_MASK 0xfUL + #define RING_SCHQ_ALLOC_REQ_TQM_RING6_LVL_SFT 0 + #define RING_SCHQ_ALLOC_REQ_TQM_RING6_LVL_LVL_0 0x0UL + #define RING_SCHQ_ALLOC_REQ_TQM_RING6_LVL_LVL_1 0x1UL + #define RING_SCHQ_ALLOC_REQ_TQM_RING6_LVL_LVL_2 0x2UL + #define RING_SCHQ_ALLOC_REQ_TQM_RING6_LVL_LAST RING_SCHQ_ALLOC_REQ_TQM_RING6_LVL_LVL_2 + #define RING_SCHQ_ALLOC_REQ_TQM_RING6_PG_SIZE_MASK 0xf0UL + #define RING_SCHQ_ALLOC_REQ_TQM_RING6_PG_SIZE_SFT 4 + #define RING_SCHQ_ALLOC_REQ_TQM_RING6_PG_SIZE_PG_4K (0x0UL << 4) + #define RING_SCHQ_ALLOC_REQ_TQM_RING6_PG_SIZE_PG_8K (0x1UL << 4) + #define RING_SCHQ_ALLOC_REQ_TQM_RING6_PG_SIZE_PG_64K (0x2UL << 4) + #define RING_SCHQ_ALLOC_REQ_TQM_RING6_PG_SIZE_PG_2M (0x3UL << 4) + #define RING_SCHQ_ALLOC_REQ_TQM_RING6_PG_SIZE_PG_8M (0x4UL << 4) + #define RING_SCHQ_ALLOC_REQ_TQM_RING6_PG_SIZE_PG_1G (0x5UL << 4) + #define RING_SCHQ_ALLOC_REQ_TQM_RING6_PG_SIZE_LAST RING_SCHQ_ALLOC_REQ_TQM_RING6_PG_SIZE_PG_1G + u8 tqm_ring7_pg_size_tqm_ring7_lvl; + #define RING_SCHQ_ALLOC_REQ_TQM_RING7_LVL_MASK 0xfUL + #define RING_SCHQ_ALLOC_REQ_TQM_RING7_LVL_SFT 0 + #define RING_SCHQ_ALLOC_REQ_TQM_RING7_LVL_LVL_0 0x0UL + #define RING_SCHQ_ALLOC_REQ_TQM_RING7_LVL_LVL_1 0x1UL + #define RING_SCHQ_ALLOC_REQ_TQM_RING7_LVL_LVL_2 0x2UL + #define RING_SCHQ_ALLOC_REQ_TQM_RING7_LVL_LAST RING_SCHQ_ALLOC_REQ_TQM_RING7_LVL_LVL_2 + #define RING_SCHQ_ALLOC_REQ_TQM_RING7_PG_SIZE_MASK 0xf0UL + #define RING_SCHQ_ALLOC_REQ_TQM_RING7_PG_SIZE_SFT 4 + #define RING_SCHQ_ALLOC_REQ_TQM_RING7_PG_SIZE_PG_4K (0x0UL << 4) + #define RING_SCHQ_ALLOC_REQ_TQM_RING7_PG_SIZE_PG_8K (0x1UL << 4) + #define RING_SCHQ_ALLOC_REQ_TQM_RING7_PG_SIZE_PG_64K (0x2UL << 4) + #define RING_SCHQ_ALLOC_REQ_TQM_RING7_PG_SIZE_PG_2M (0x3UL << 4) + #define RING_SCHQ_ALLOC_REQ_TQM_RING7_PG_SIZE_PG_8M (0x4UL << 4) + #define RING_SCHQ_ALLOC_REQ_TQM_RING7_PG_SIZE_PG_1G (0x5UL << 4) + #define RING_SCHQ_ALLOC_REQ_TQM_RING7_PG_SIZE_LAST RING_SCHQ_ALLOC_REQ_TQM_RING7_PG_SIZE_PG_1G + __le64 tqm_ring0_page_dir; + __le64 tqm_ring1_page_dir; + __le64 tqm_ring2_page_dir; + __le64 tqm_ring3_page_dir; + __le64 tqm_ring4_page_dir; + __le64 tqm_ring5_page_dir; + __le64 tqm_ring6_page_dir; + __le64 tqm_ring7_page_dir; + __le32 tqm_ring0_num_entries; + __le32 tqm_ring1_num_entries; + __le32 tqm_ring2_num_entries; + __le32 tqm_ring3_num_entries; + __le32 tqm_ring4_num_entries; + __le32 tqm_ring5_num_entries; + __le32 tqm_ring6_num_entries; + __le32 tqm_ring7_num_entries; + __le16 tqm_entry_size; + u8 unused_0[6]; +}; + +/* hwrm_ring_schq_alloc_output (size:128b/16B) */ +struct hwrm_ring_schq_alloc_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 schq_id; + u8 unused_0[5]; + u8 valid; +}; + +/* hwrm_ring_schq_cfg_input (size:768b/96B) */ +struct hwrm_ring_schq_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 schq_id; + u8 tc_enabled; + u8 unused_0; + __le32 flags; + #define RING_SCHQ_CFG_REQ_FLAGS_TC_MAX_BW_ENABLED 0x1UL + #define RING_SCHQ_CFG_REQ_FLAGS_TC_RESERVATION_ENABLED 0x2UL + __le32 max_bw_tc0; + __le32 max_bw_tc1; + __le32 max_bw_tc2; + __le32 max_bw_tc3; + __le32 max_bw_tc4; + __le32 max_bw_tc5; + __le32 max_bw_tc6; + __le32 max_bw_tc7; + __le32 tc_bw_reservation0; + __le32 tc_bw_reservation1; + __le32 tc_bw_reservation2; + __le32 tc_bw_reservation3; + __le32 tc_bw_reservation4; + __le32 tc_bw_reservation5; + __le32 tc_bw_reservation6; + __le32 tc_bw_reservation7; + __le32 max_bw; + u8 unused_1[4]; +}; + +/* hwrm_ring_schq_cfg_output (size:128b/16B) */ +struct hwrm_ring_schq_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_ring_schq_free_input (size:192b/24B) */ +struct hwrm_ring_schq_free_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 schq_id; + u8 unused_0[6]; +}; + +/* hwrm_ring_schq_free_output (size:128b/16B) */ +struct hwrm_ring_schq_free_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; +#define DEFAULT_FLOW_ID 0xFFFFFFFFUL +#define ROCEV1_FLOW_ID 0xFFFFFFFEUL +#define ROCEV2_FLOW_ID 0xFFFFFFFDUL +#define ROCEV2_CNP_FLOW_ID 0xFFFFFFFCUL + +/* hwrm_cfa_l2_filter_alloc_input (size:768b/96B) */ +struct hwrm_cfa_l2_filter_alloc_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH 0x1UL + #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_TX 0x0UL + #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX 0x1UL + #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_LAST CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX + #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_LOOPBACK 0x2UL + #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_DROP 0x4UL + #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST 0x8UL + #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_TRAFFIC_MASK 0x30UL + #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_TRAFFIC_SFT 4 + #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_TRAFFIC_NO_ROCE_L2 (0x0UL << 4) + #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_TRAFFIC_L2 (0x1UL << 4) + #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_TRAFFIC_ROCE (0x2UL << 4) + #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_TRAFFIC_LAST CFA_L2_FILTER_ALLOC_REQ_FLAGS_TRAFFIC_ROCE + #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_XDP_DISABLE 0x40UL + #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_SOURCE_VALID 0x80UL + __le32 enables; + #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR 0x1UL + #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK 0x2UL + #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_OVLAN 0x4UL + #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_OVLAN_MASK 0x8UL + #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN 0x10UL + #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN_MASK 0x20UL + #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_ADDR 0x40UL + #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_ADDR_MASK 0x80UL + #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_OVLAN 0x100UL + #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_OVLAN_MASK 0x200UL + #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_IVLAN 0x400UL + #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_IVLAN_MASK 0x800UL + #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_SRC_TYPE 0x1000UL + #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_SRC_ID 0x2000UL + #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE 0x4000UL + #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID 0x8000UL + #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x10000UL + #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_NUM_VLANS 0x20000UL + #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_NUM_VLANS 0x40000UL + u8 l2_addr[6]; + u8 num_vlans; + u8 t_num_vlans; + u8 l2_addr_mask[6]; + __le16 l2_ovlan; + __le16 l2_ovlan_mask; + __le16 l2_ivlan; + __le16 l2_ivlan_mask; + u8 unused_1[2]; + u8 t_l2_addr[6]; + u8 unused_2[2]; + u8 t_l2_addr_mask[6]; + __le16 t_l2_ovlan; + __le16 t_l2_ovlan_mask; + __le16 t_l2_ivlan; + __le16 t_l2_ivlan_mask; + u8 src_type; + #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_NPORT 0x0UL + #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_PF 0x1UL + #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_VF 0x2UL + #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_VNIC 0x3UL + #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_KONG 0x4UL + #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_APE 0x5UL + #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_BONO 0x6UL + #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_TANG 0x7UL + #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_LAST CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_TANG + u8 unused_3; + __le32 src_id; + u8 tunnel_type; + #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL + #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL + #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL + #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL + #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL + #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL + #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL + #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL + #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL + #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL + #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL + #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL + #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL + #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL + #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL + #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL + u8 unused_4; + __le16 dst_id; + __le16 mirror_vnic_id; + u8 pri_hint; + #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_NO_PREFER 0x0UL + #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_ABOVE_FILTER 0x1UL + #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_BELOW_FILTER 0x2UL + #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_MAX 0x3UL + #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_MIN 0x4UL + #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_LAST CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_MIN + u8 unused_5; + __le32 unused_6; + __le64 l2_filter_id_hint; +}; + +/* hwrm_cfa_l2_filter_alloc_output (size:192b/24B) */ +struct hwrm_cfa_l2_filter_alloc_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le64 l2_filter_id; + __le32 flow_id; + #define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_VALUE_MASK 0x3fffffffUL + #define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_VALUE_SFT 0 + #define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_TYPE 0x40000000UL + #define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_TYPE_INT (0x0UL << 30) + #define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_TYPE_EXT (0x1UL << 30) + #define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_TYPE_LAST CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_TYPE_EXT + #define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_DIR 0x80000000UL + #define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_DIR_RX (0x0UL << 31) + #define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_DIR_TX (0x1UL << 31) + #define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_DIR_LAST CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_DIR_TX + u8 unused_0[3]; + u8 valid; +}; + +/* hwrm_cfa_l2_filter_free_input (size:192b/24B) */ +struct hwrm_cfa_l2_filter_free_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 l2_filter_id; +}; + +/* hwrm_cfa_l2_filter_free_output (size:128b/16B) */ +struct hwrm_cfa_l2_filter_free_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_cfa_l2_filter_cfg_input (size:384b/48B) */ +struct hwrm_cfa_l2_filter_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH 0x1UL + #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_TX 0x0UL + #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX 0x1UL + #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_LAST CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX + #define CFA_L2_FILTER_CFG_REQ_FLAGS_DROP 0x2UL + #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_MASK 0xcUL + #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_SFT 2 + #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_NO_ROCE_L2 (0x0UL << 2) + #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_L2 (0x1UL << 2) + #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_ROCE (0x2UL << 2) + #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_LAST CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_ROCE + #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_MASK 0x30UL + #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_SFT 4 + #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_NO_UPDATE (0x0UL << 4) + #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_BYPASS_LKUP (0x1UL << 4) + #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_ENABLE_LKUP (0x2UL << 4) + #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_LAST CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_ENABLE_LKUP + __le32 enables; + #define CFA_L2_FILTER_CFG_REQ_ENABLES_DST_ID 0x1UL + #define CFA_L2_FILTER_CFG_REQ_ENABLES_NEW_MIRROR_VNIC_ID 0x2UL + #define CFA_L2_FILTER_CFG_REQ_ENABLES_PROF_FUNC 0x4UL + #define CFA_L2_FILTER_CFG_REQ_ENABLES_L2_CONTEXT_ID 0x8UL + __le64 l2_filter_id; + __le32 dst_id; + __le32 new_mirror_vnic_id; + __le32 prof_func; + __le32 l2_context_id; +}; + +/* hwrm_cfa_l2_filter_cfg_output (size:128b/16B) */ +struct hwrm_cfa_l2_filter_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_cfa_l2_set_rx_mask_input (size:448b/56B) */ +struct hwrm_cfa_l2_set_rx_mask_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 vnic_id; + __le32 mask; + #define CFA_L2_SET_RX_MASK_REQ_MASK_MCAST 0x2UL + #define CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST 0x4UL + #define CFA_L2_SET_RX_MASK_REQ_MASK_BCAST 0x8UL + #define CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS 0x10UL + #define CFA_L2_SET_RX_MASK_REQ_MASK_OUTERMOST 0x20UL + #define CFA_L2_SET_RX_MASK_REQ_MASK_VLANONLY 0x40UL + #define CFA_L2_SET_RX_MASK_REQ_MASK_VLAN_NONVLAN 0x80UL + #define CFA_L2_SET_RX_MASK_REQ_MASK_ANYVLAN_NONVLAN 0x100UL + __le64 mc_tbl_addr; + __le32 num_mc_entries; + u8 unused_0[4]; + __le64 vlan_tag_tbl_addr; + __le32 num_vlan_tags; + u8 unused_1[4]; +}; + +/* hwrm_cfa_l2_set_rx_mask_output (size:128b/16B) */ +struct hwrm_cfa_l2_set_rx_mask_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_cfa_l2_set_rx_mask_cmd_err (size:64b/8B) */ +struct hwrm_cfa_l2_set_rx_mask_cmd_err { + u8 code; + #define CFA_L2_SET_RX_MASK_CMD_ERR_CODE_UNKNOWN 0x0UL + #define CFA_L2_SET_RX_MASK_CMD_ERR_CODE_NTUPLE_FILTER_CONFLICT_ERR 0x1UL + #define CFA_L2_SET_RX_MASK_CMD_ERR_CODE_LAST CFA_L2_SET_RX_MASK_CMD_ERR_CODE_NTUPLE_FILTER_CONFLICT_ERR + u8 unused_0[7]; +}; + +/* hwrm_cfa_vlan_antispoof_cfg_input (size:256b/32B) */ +struct hwrm_cfa_vlan_antispoof_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 fid; + u8 unused_0[2]; + __le32 num_vlan_entries; + __le64 vlan_tag_mask_tbl_addr; +}; + +/* hwrm_cfa_vlan_antispoof_cfg_output (size:128b/16B) */ +struct hwrm_cfa_vlan_antispoof_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_cfa_vlan_antispoof_qcfg_input (size:256b/32B) */ +struct hwrm_cfa_vlan_antispoof_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 fid; + u8 unused_0[2]; + __le32 max_vlan_entries; + __le64 vlan_tag_mask_tbl_addr; +}; + +/* hwrm_cfa_vlan_antispoof_qcfg_output (size:128b/16B) */ +struct hwrm_cfa_vlan_antispoof_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 num_vlan_entries; + u8 unused_0[3]; + u8 valid; +}; + +/* hwrm_cfa_tunnel_filter_alloc_input (size:704b/88B) */ +struct hwrm_cfa_tunnel_filter_alloc_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define CFA_TUNNEL_FILTER_ALLOC_REQ_FLAGS_LOOPBACK 0x1UL + __le32 enables; + #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID 0x1UL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L2_ADDR 0x2UL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN 0x4UL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L3_ADDR 0x8UL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L3_ADDR_TYPE 0x10UL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_T_L3_ADDR_TYPE 0x20UL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_T_L3_ADDR 0x40UL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE 0x80UL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_VNI 0x100UL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_DST_VNIC_ID 0x200UL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x400UL + __le64 l2_filter_id; + u8 l2_addr[6]; + __le16 l2_ivlan; + __le32 l3_addr[4]; + __le32 t_l3_addr[4]; + u8 l3_addr_type; + u8 t_l3_addr_type; + u8 tunnel_type; + #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL + u8 tunnel_flags; + #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_FLAGS_TUN_FLAGS_OAM_CHECKSUM_EXPLHDR 0x1UL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_FLAGS_TUN_FLAGS_CRITICAL_OPT_S1 0x2UL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_FLAGS_TUN_FLAGS_EXTHDR_SEQNUM_S0 0x4UL + __le32 vni; + __le32 dst_vnic_id; + __le32 mirror_vnic_id; +}; + +/* hwrm_cfa_tunnel_filter_alloc_output (size:192b/24B) */ +struct hwrm_cfa_tunnel_filter_alloc_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le64 tunnel_filter_id; + __le32 flow_id; + #define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_VALUE_MASK 0x3fffffffUL + #define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_VALUE_SFT 0 + #define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_TYPE 0x40000000UL + #define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_TYPE_INT (0x0UL << 30) + #define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_TYPE_EXT (0x1UL << 30) + #define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_TYPE_LAST CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_TYPE_EXT + #define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_DIR 0x80000000UL + #define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_DIR_RX (0x0UL << 31) + #define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_DIR_TX (0x1UL << 31) + #define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_DIR_LAST CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_DIR_TX + u8 unused_0[3]; + u8 valid; +}; + +/* hwrm_cfa_tunnel_filter_free_input (size:192b/24B) */ +struct hwrm_cfa_tunnel_filter_free_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 tunnel_filter_id; +}; + +/* hwrm_cfa_tunnel_filter_free_output (size:128b/16B) */ +struct hwrm_cfa_tunnel_filter_free_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_cfa_redirect_tunnel_type_alloc_input (size:192b/24B) */ +struct hwrm_cfa_redirect_tunnel_type_alloc_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 dest_fid; + u8 tunnel_type; + #define CFA_REDIRECT_TUNNEL_TYPE_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL + #define CFA_REDIRECT_TUNNEL_TYPE_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL + #define CFA_REDIRECT_TUNNEL_TYPE_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL + #define CFA_REDIRECT_TUNNEL_TYPE_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL + #define CFA_REDIRECT_TUNNEL_TYPE_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL + #define CFA_REDIRECT_TUNNEL_TYPE_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL + #define CFA_REDIRECT_TUNNEL_TYPE_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL + #define CFA_REDIRECT_TUNNEL_TYPE_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL + #define CFA_REDIRECT_TUNNEL_TYPE_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL + #define CFA_REDIRECT_TUNNEL_TYPE_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL + #define CFA_REDIRECT_TUNNEL_TYPE_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL + #define CFA_REDIRECT_TUNNEL_TYPE_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL + #define CFA_REDIRECT_TUNNEL_TYPE_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL + #define CFA_REDIRECT_TUNNEL_TYPE_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL + #define CFA_REDIRECT_TUNNEL_TYPE_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL + #define CFA_REDIRECT_TUNNEL_TYPE_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_REDIRECT_TUNNEL_TYPE_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL + u8 flags; + #define CFA_REDIRECT_TUNNEL_TYPE_ALLOC_REQ_FLAGS_MODIFY_DST 0x1UL + u8 unused_0[4]; +}; + +/* hwrm_cfa_redirect_tunnel_type_alloc_output (size:128b/16B) */ +struct hwrm_cfa_redirect_tunnel_type_alloc_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_cfa_redirect_tunnel_type_free_input (size:192b/24B) */ +struct hwrm_cfa_redirect_tunnel_type_free_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 dest_fid; + u8 tunnel_type; + #define CFA_REDIRECT_TUNNEL_TYPE_FREE_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL + #define CFA_REDIRECT_TUNNEL_TYPE_FREE_REQ_TUNNEL_TYPE_VXLAN 0x1UL + #define CFA_REDIRECT_TUNNEL_TYPE_FREE_REQ_TUNNEL_TYPE_NVGRE 0x2UL + #define CFA_REDIRECT_TUNNEL_TYPE_FREE_REQ_TUNNEL_TYPE_L2GRE 0x3UL + #define CFA_REDIRECT_TUNNEL_TYPE_FREE_REQ_TUNNEL_TYPE_IPIP 0x4UL + #define CFA_REDIRECT_TUNNEL_TYPE_FREE_REQ_TUNNEL_TYPE_GENEVE 0x5UL + #define CFA_REDIRECT_TUNNEL_TYPE_FREE_REQ_TUNNEL_TYPE_MPLS 0x6UL + #define CFA_REDIRECT_TUNNEL_TYPE_FREE_REQ_TUNNEL_TYPE_STT 0x7UL + #define CFA_REDIRECT_TUNNEL_TYPE_FREE_REQ_TUNNEL_TYPE_IPGRE 0x8UL + #define CFA_REDIRECT_TUNNEL_TYPE_FREE_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL + #define CFA_REDIRECT_TUNNEL_TYPE_FREE_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL + #define CFA_REDIRECT_TUNNEL_TYPE_FREE_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL + #define CFA_REDIRECT_TUNNEL_TYPE_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL + #define CFA_REDIRECT_TUNNEL_TYPE_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL + #define CFA_REDIRECT_TUNNEL_TYPE_FREE_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL + #define CFA_REDIRECT_TUNNEL_TYPE_FREE_REQ_TUNNEL_TYPE_LAST CFA_REDIRECT_TUNNEL_TYPE_FREE_REQ_TUNNEL_TYPE_ANYTUNNEL + u8 unused_0[5]; +}; + +/* hwrm_cfa_redirect_tunnel_type_free_output (size:128b/16B) */ +struct hwrm_cfa_redirect_tunnel_type_free_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_cfa_redirect_tunnel_type_info_input (size:192b/24B) */ +struct hwrm_cfa_redirect_tunnel_type_info_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 src_fid; + u8 tunnel_type; + #define CFA_REDIRECT_TUNNEL_TYPE_INFO_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL + #define CFA_REDIRECT_TUNNEL_TYPE_INFO_REQ_TUNNEL_TYPE_VXLAN 0x1UL + #define CFA_REDIRECT_TUNNEL_TYPE_INFO_REQ_TUNNEL_TYPE_NVGRE 0x2UL + #define CFA_REDIRECT_TUNNEL_TYPE_INFO_REQ_TUNNEL_TYPE_L2GRE 0x3UL + #define CFA_REDIRECT_TUNNEL_TYPE_INFO_REQ_TUNNEL_TYPE_IPIP 0x4UL + #define CFA_REDIRECT_TUNNEL_TYPE_INFO_REQ_TUNNEL_TYPE_GENEVE 0x5UL + #define CFA_REDIRECT_TUNNEL_TYPE_INFO_REQ_TUNNEL_TYPE_MPLS 0x6UL + #define CFA_REDIRECT_TUNNEL_TYPE_INFO_REQ_TUNNEL_TYPE_STT 0x7UL + #define CFA_REDIRECT_TUNNEL_TYPE_INFO_REQ_TUNNEL_TYPE_IPGRE 0x8UL + #define CFA_REDIRECT_TUNNEL_TYPE_INFO_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL + #define CFA_REDIRECT_TUNNEL_TYPE_INFO_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL + #define CFA_REDIRECT_TUNNEL_TYPE_INFO_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL + #define CFA_REDIRECT_TUNNEL_TYPE_INFO_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL + #define CFA_REDIRECT_TUNNEL_TYPE_INFO_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL + #define CFA_REDIRECT_TUNNEL_TYPE_INFO_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL + #define CFA_REDIRECT_TUNNEL_TYPE_INFO_REQ_TUNNEL_TYPE_LAST CFA_REDIRECT_TUNNEL_TYPE_INFO_REQ_TUNNEL_TYPE_ANYTUNNEL + u8 unused_0[5]; +}; + +/* hwrm_cfa_redirect_tunnel_type_info_output (size:128b/16B) */ +struct hwrm_cfa_redirect_tunnel_type_info_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 dest_fid; + u8 unused_0[5]; + u8 valid; +}; + +/* hwrm_vxlan_ipv4_hdr (size:128b/16B) */ +struct hwrm_vxlan_ipv4_hdr { + u8 ver_hlen; + #define VXLAN_IPV4_HDR_VER_HLEN_HEADER_LENGTH_MASK 0xfUL + #define VXLAN_IPV4_HDR_VER_HLEN_HEADER_LENGTH_SFT 0 + #define VXLAN_IPV4_HDR_VER_HLEN_VERSION_MASK 0xf0UL + #define VXLAN_IPV4_HDR_VER_HLEN_VERSION_SFT 4 + u8 tos; + __be16 ip_id; + __be16 flags_frag_offset; + u8 ttl; + u8 protocol; + __be32 src_ip_addr; + __be32 dest_ip_addr; +}; + +/* hwrm_vxlan_ipv6_hdr (size:320b/40B) */ +struct hwrm_vxlan_ipv6_hdr { + __be32 ver_tc_flow_label; + #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_VER_SFT 0x1cUL + #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_VER_MASK 0xf0000000UL + #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_TC_SFT 0x14UL + #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_TC_MASK 0xff00000UL + #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_FLOW_LABEL_SFT 0x0UL + #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_FLOW_LABEL_MASK 0xfffffUL + #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_LAST VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_FLOW_LABEL_MASK + __be16 payload_len; + u8 next_hdr; + u8 ttl; + __be32 src_ip_addr[4]; + __be32 dest_ip_addr[4]; +}; + +/* hwrm_cfa_encap_data_vxlan (size:640b/80B) */ +struct hwrm_cfa_encap_data_vxlan { + u8 src_mac_addr[6]; + __le16 unused_0; + u8 dst_mac_addr[6]; + u8 num_vlan_tags; + u8 unused_1; + __be16 ovlan_tpid; + __be16 ovlan_tci; + __be16 ivlan_tpid; + __be16 ivlan_tci; + __le32 l3[10]; + #define CFA_ENCAP_DATA_VXLAN_L3_VER_MASK 0xfUL + #define CFA_ENCAP_DATA_VXLAN_L3_VER_IPV4 0x4UL + #define CFA_ENCAP_DATA_VXLAN_L3_VER_IPV6 0x6UL + #define CFA_ENCAP_DATA_VXLAN_L3_LAST CFA_ENCAP_DATA_VXLAN_L3_VER_IPV6 + __be16 src_port; + __be16 dst_port; + __be32 vni; + u8 hdr_rsvd0[3]; + u8 hdr_rsvd1; + u8 hdr_flags; + u8 unused[3]; +}; + +/* hwrm_cfa_encap_record_alloc_input (size:832b/104B) */ +struct hwrm_cfa_encap_record_alloc_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define CFA_ENCAP_RECORD_ALLOC_REQ_FLAGS_LOOPBACK 0x1UL + #define CFA_ENCAP_RECORD_ALLOC_REQ_FLAGS_EXTERNAL 0x2UL + u8 encap_type; + #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN 0x1UL + #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_NVGRE 0x2UL + #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_L2GRE 0x3UL + #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPIP 0x4UL + #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_GENEVE 0x5UL + #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_MPLS 0x6UL + #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VLAN 0x7UL + #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPGRE 0x8UL + #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN_V4 0x9UL + #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPGRE_V1 0xaUL + #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_L2_ETYPE 0xbUL + #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN_GPE_V6 0xcUL + #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN_GPE 0x10UL + #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_LAST CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN_GPE + u8 unused_0[3]; + __le32 encap_data[20]; +}; + +/* hwrm_cfa_encap_record_alloc_output (size:128b/16B) */ +struct hwrm_cfa_encap_record_alloc_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 encap_record_id; + u8 unused_0[3]; + u8 valid; +}; + +/* hwrm_cfa_encap_record_free_input (size:192b/24B) */ +struct hwrm_cfa_encap_record_free_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 encap_record_id; + u8 unused_0[4]; +}; + +/* hwrm_cfa_encap_record_free_output (size:128b/16B) */ +struct hwrm_cfa_encap_record_free_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_cfa_ntuple_filter_alloc_input (size:1024b/128B) */ +struct hwrm_cfa_ntuple_filter_alloc_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_LOOPBACK 0x1UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DROP 0x2UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_METER 0x4UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_FID 0x8UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_ARP_REPLY 0x10UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_RFS_RING_IDX 0x20UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_NO_L2_CONTEXT 0x40UL + __le32 enables; + #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID 0x1UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE 0x2UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE 0x4UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR 0x8UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE 0x10UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR 0x20UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK 0x40UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR 0x80UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK 0x100UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL 0x200UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT 0x400UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK 0x800UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT 0x1000UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK 0x2000UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_PRI_HINT 0x4000UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_NTUPLE_FILTER_ID 0x8000UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID 0x10000UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x20000UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_MACADDR 0x40000UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_RFS_RING_TBL_IDX 0x80000UL + __le64 l2_filter_id; + u8 src_macaddr[6]; + __be16 ethertype; + u8 ip_addr_type; + #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_UNKNOWN 0x0UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4 0x4UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6 0x6UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_LAST CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6 + u8 ip_protocol; + #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_UNKNOWN 0x0UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_TCP 0x6UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP 0x11UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_ICMP 0x1UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_ICMPV6 0x3aUL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_RSVD 0xffUL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_LAST CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_RSVD + __le16 dst_id; + __le16 rfs_ring_tbl_idx; + u8 tunnel_type; + #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL + u8 pri_hint; + #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_NO_PREFER 0x0UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_ABOVE 0x1UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_BELOW 0x2UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_HIGHEST 0x3UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_LOWEST 0x4UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_LAST CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_LOWEST + __be32 src_ipaddr[4]; + __be32 src_ipaddr_mask[4]; + __be32 dst_ipaddr[4]; + __be32 dst_ipaddr_mask[4]; + __be16 src_port; + __be16 src_port_mask; + __be16 dst_port; + __be16 dst_port_mask; + __le64 ntuple_filter_id_hint; +}; + +/* hwrm_cfa_ntuple_filter_alloc_output (size:192b/24B) */ +struct hwrm_cfa_ntuple_filter_alloc_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le64 ntuple_filter_id; + __le32 flow_id; + #define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_VALUE_MASK 0x3fffffffUL + #define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_VALUE_SFT 0 + #define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_TYPE 0x40000000UL + #define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_TYPE_INT (0x0UL << 30) + #define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_TYPE_EXT (0x1UL << 30) + #define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_TYPE_LAST CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_TYPE_EXT + #define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_DIR 0x80000000UL + #define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_DIR_RX (0x0UL << 31) + #define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_DIR_TX (0x1UL << 31) + #define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_DIR_LAST CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_DIR_TX + u8 unused_0[3]; + u8 valid; +}; + +/* hwrm_cfa_ntuple_filter_alloc_cmd_err (size:64b/8B) */ +struct hwrm_cfa_ntuple_filter_alloc_cmd_err { + u8 code; + #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_UNKNOWN 0x0UL + #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_RX_MASK_VLAN_CONFLICT_ERR 0x1UL + #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_LAST CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_RX_MASK_VLAN_CONFLICT_ERR + u8 unused_0[7]; +}; + +/* hwrm_cfa_ntuple_filter_free_input (size:192b/24B) */ +struct hwrm_cfa_ntuple_filter_free_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 ntuple_filter_id; +}; + +/* hwrm_cfa_ntuple_filter_free_output (size:128b/16B) */ +struct hwrm_cfa_ntuple_filter_free_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_cfa_ntuple_filter_cfg_input (size:384b/48B) */ +struct hwrm_cfa_ntuple_filter_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 enables; + #define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_DST_ID 0x1UL + #define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_MIRROR_VNIC_ID 0x2UL + #define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_METER_INSTANCE_ID 0x4UL + __le32 flags; + #define CFA_NTUPLE_FILTER_CFG_REQ_FLAGS_DEST_FID 0x1UL + #define CFA_NTUPLE_FILTER_CFG_REQ_FLAGS_DEST_RFS_RING_IDX 0x2UL + #define CFA_NTUPLE_FILTER_CFG_REQ_FLAGS_NO_L2_CONTEXT 0x4UL + __le64 ntuple_filter_id; + __le32 new_dst_id; + __le32 new_mirror_vnic_id; + __le16 new_meter_instance_id; + #define CFA_NTUPLE_FILTER_CFG_REQ_NEW_METER_INSTANCE_ID_INVALID 0xffffUL + #define CFA_NTUPLE_FILTER_CFG_REQ_NEW_METER_INSTANCE_ID_LAST CFA_NTUPLE_FILTER_CFG_REQ_NEW_METER_INSTANCE_ID_INVALID + u8 unused_1[6]; +}; + +/* hwrm_cfa_ntuple_filter_cfg_output (size:128b/16B) */ +struct hwrm_cfa_ntuple_filter_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_cfa_em_flow_alloc_input (size:896b/112B) */ +struct hwrm_cfa_em_flow_alloc_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define CFA_EM_FLOW_ALLOC_REQ_FLAGS_PATH 0x1UL + #define CFA_EM_FLOW_ALLOC_REQ_FLAGS_PATH_TX 0x0UL + #define CFA_EM_FLOW_ALLOC_REQ_FLAGS_PATH_RX 0x1UL + #define CFA_EM_FLOW_ALLOC_REQ_FLAGS_PATH_LAST CFA_EM_FLOW_ALLOC_REQ_FLAGS_PATH_RX + #define CFA_EM_FLOW_ALLOC_REQ_FLAGS_BYTE_CTR 0x2UL + #define CFA_EM_FLOW_ALLOC_REQ_FLAGS_PKT_CTR 0x4UL + #define CFA_EM_FLOW_ALLOC_REQ_FLAGS_DECAP 0x8UL + #define CFA_EM_FLOW_ALLOC_REQ_FLAGS_ENCAP 0x10UL + #define CFA_EM_FLOW_ALLOC_REQ_FLAGS_DROP 0x20UL + #define CFA_EM_FLOW_ALLOC_REQ_FLAGS_METER 0x40UL + __le32 enables; + #define CFA_EM_FLOW_ALLOC_REQ_ENABLES_L2_FILTER_ID 0x1UL + #define CFA_EM_FLOW_ALLOC_REQ_ENABLES_TUNNEL_TYPE 0x2UL + #define CFA_EM_FLOW_ALLOC_REQ_ENABLES_TUNNEL_ID 0x4UL + #define CFA_EM_FLOW_ALLOC_REQ_ENABLES_SRC_MACADDR 0x8UL + #define CFA_EM_FLOW_ALLOC_REQ_ENABLES_DST_MACADDR 0x10UL + #define CFA_EM_FLOW_ALLOC_REQ_ENABLES_OVLAN_VID 0x20UL + #define CFA_EM_FLOW_ALLOC_REQ_ENABLES_IVLAN_VID 0x40UL + #define CFA_EM_FLOW_ALLOC_REQ_ENABLES_ETHERTYPE 0x80UL + #define CFA_EM_FLOW_ALLOC_REQ_ENABLES_SRC_IPADDR 0x100UL + #define CFA_EM_FLOW_ALLOC_REQ_ENABLES_DST_IPADDR 0x200UL + #define CFA_EM_FLOW_ALLOC_REQ_ENABLES_IPADDR_TYPE 0x400UL + #define CFA_EM_FLOW_ALLOC_REQ_ENABLES_IP_PROTOCOL 0x800UL + #define CFA_EM_FLOW_ALLOC_REQ_ENABLES_SRC_PORT 0x1000UL + #define CFA_EM_FLOW_ALLOC_REQ_ENABLES_DST_PORT 0x2000UL + #define CFA_EM_FLOW_ALLOC_REQ_ENABLES_DST_ID 0x4000UL + #define CFA_EM_FLOW_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x8000UL + #define CFA_EM_FLOW_ALLOC_REQ_ENABLES_ENCAP_RECORD_ID 0x10000UL + #define CFA_EM_FLOW_ALLOC_REQ_ENABLES_METER_INSTANCE_ID 0x20000UL + __le64 l2_filter_id; + u8 tunnel_type; + #define CFA_EM_FLOW_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL + #define CFA_EM_FLOW_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL + #define CFA_EM_FLOW_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL + #define CFA_EM_FLOW_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL + #define CFA_EM_FLOW_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL + #define CFA_EM_FLOW_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL + #define CFA_EM_FLOW_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL + #define CFA_EM_FLOW_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL + #define CFA_EM_FLOW_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL + #define CFA_EM_FLOW_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL + #define CFA_EM_FLOW_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL + #define CFA_EM_FLOW_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL + #define CFA_EM_FLOW_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL + #define CFA_EM_FLOW_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL + #define CFA_EM_FLOW_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL + #define CFA_EM_FLOW_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_EM_FLOW_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL + u8 unused_0[3]; + __le32 tunnel_id; + u8 src_macaddr[6]; + __le16 meter_instance_id; + #define CFA_EM_FLOW_ALLOC_REQ_METER_INSTANCE_ID_INVALID 0xffffUL + #define CFA_EM_FLOW_ALLOC_REQ_METER_INSTANCE_ID_LAST CFA_EM_FLOW_ALLOC_REQ_METER_INSTANCE_ID_INVALID + u8 dst_macaddr[6]; + __le16 ovlan_vid; + __le16 ivlan_vid; + __be16 ethertype; + u8 ip_addr_type; + #define CFA_EM_FLOW_ALLOC_REQ_IP_ADDR_TYPE_UNKNOWN 0x0UL + #define CFA_EM_FLOW_ALLOC_REQ_IP_ADDR_TYPE_IPV4 0x4UL + #define CFA_EM_FLOW_ALLOC_REQ_IP_ADDR_TYPE_IPV6 0x6UL + #define CFA_EM_FLOW_ALLOC_REQ_IP_ADDR_TYPE_LAST CFA_EM_FLOW_ALLOC_REQ_IP_ADDR_TYPE_IPV6 + u8 ip_protocol; + #define CFA_EM_FLOW_ALLOC_REQ_IP_PROTOCOL_UNKNOWN 0x0UL + #define CFA_EM_FLOW_ALLOC_REQ_IP_PROTOCOL_TCP 0x6UL + #define CFA_EM_FLOW_ALLOC_REQ_IP_PROTOCOL_UDP 0x11UL + #define CFA_EM_FLOW_ALLOC_REQ_IP_PROTOCOL_LAST CFA_EM_FLOW_ALLOC_REQ_IP_PROTOCOL_UDP + u8 unused_1[2]; + __be32 src_ipaddr[4]; + __be32 dst_ipaddr[4]; + __be16 src_port; + __be16 dst_port; + __le16 dst_id; + __le16 mirror_vnic_id; + __le32 encap_record_id; + u8 unused_2[4]; +}; + +/* hwrm_cfa_em_flow_alloc_output (size:192b/24B) */ +struct hwrm_cfa_em_flow_alloc_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le64 em_filter_id; + __le32 flow_id; + #define CFA_EM_FLOW_ALLOC_RESP_FLOW_ID_VALUE_MASK 0x3fffffffUL + #define CFA_EM_FLOW_ALLOC_RESP_FLOW_ID_VALUE_SFT 0 + #define CFA_EM_FLOW_ALLOC_RESP_FLOW_ID_TYPE 0x40000000UL + #define CFA_EM_FLOW_ALLOC_RESP_FLOW_ID_TYPE_INT (0x0UL << 30) + #define CFA_EM_FLOW_ALLOC_RESP_FLOW_ID_TYPE_EXT (0x1UL << 30) + #define CFA_EM_FLOW_ALLOC_RESP_FLOW_ID_TYPE_LAST CFA_EM_FLOW_ALLOC_RESP_FLOW_ID_TYPE_EXT + #define CFA_EM_FLOW_ALLOC_RESP_FLOW_ID_DIR 0x80000000UL + #define CFA_EM_FLOW_ALLOC_RESP_FLOW_ID_DIR_RX (0x0UL << 31) + #define CFA_EM_FLOW_ALLOC_RESP_FLOW_ID_DIR_TX (0x1UL << 31) + #define CFA_EM_FLOW_ALLOC_RESP_FLOW_ID_DIR_LAST CFA_EM_FLOW_ALLOC_RESP_FLOW_ID_DIR_TX + u8 unused_0[3]; + u8 valid; +}; + +/* hwrm_cfa_em_flow_free_input (size:192b/24B) */ +struct hwrm_cfa_em_flow_free_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 em_filter_id; +}; + +/* hwrm_cfa_em_flow_free_output (size:128b/16B) */ +struct hwrm_cfa_em_flow_free_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_cfa_em_flow_cfg_input (size:384b/48B) */ +struct hwrm_cfa_em_flow_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 enables; + #define CFA_EM_FLOW_CFG_REQ_ENABLES_NEW_DST_ID 0x1UL + #define CFA_EM_FLOW_CFG_REQ_ENABLES_NEW_MIRROR_VNIC_ID 0x2UL + #define CFA_EM_FLOW_CFG_REQ_ENABLES_NEW_METER_INSTANCE_ID 0x4UL + u8 unused_0[4]; + __le64 em_filter_id; + __le32 new_dst_id; + __le32 new_mirror_vnic_id; + __le16 new_meter_instance_id; + #define CFA_EM_FLOW_CFG_REQ_NEW_METER_INSTANCE_ID_INVALID 0xffffUL + #define CFA_EM_FLOW_CFG_REQ_NEW_METER_INSTANCE_ID_LAST CFA_EM_FLOW_CFG_REQ_NEW_METER_INSTANCE_ID_INVALID + u8 unused_1[6]; +}; + +/* hwrm_cfa_em_flow_cfg_output (size:128b/16B) */ +struct hwrm_cfa_em_flow_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_cfa_meter_qcaps_input (size:128b/16B) */ +struct hwrm_cfa_meter_qcaps_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; +}; + +/* hwrm_cfa_meter_qcaps_output (size:320b/40B) */ +struct hwrm_cfa_meter_qcaps_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 flags; + #define CFA_METER_QCAPS_RESP_FLAGS_CLOCK_MASK 0xfUL + #define CFA_METER_QCAPS_RESP_FLAGS_CLOCK_SFT 0 + #define CFA_METER_QCAPS_RESP_FLAGS_CLOCK_375MHZ 0x0UL + #define CFA_METER_QCAPS_RESP_FLAGS_CLOCK_625MHZ 0x1UL + #define CFA_METER_QCAPS_RESP_FLAGS_CLOCK_LAST CFA_METER_QCAPS_RESP_FLAGS_CLOCK_625MHZ + u8 unused_0[4]; + __le16 min_tx_profile; + __le16 max_tx_profile; + __le16 min_rx_profile; + __le16 max_rx_profile; + __le16 min_tx_instance; + __le16 max_tx_instance; + __le16 min_rx_instance; + __le16 max_rx_instance; + u8 unused_1[7]; + u8 valid; +}; + +/* hwrm_cfa_meter_profile_alloc_input (size:320b/40B) */ +struct hwrm_cfa_meter_profile_alloc_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 flags; + #define CFA_METER_PROFILE_ALLOC_REQ_FLAGS_PATH 0x1UL + #define CFA_METER_PROFILE_ALLOC_REQ_FLAGS_PATH_TX 0x0UL + #define CFA_METER_PROFILE_ALLOC_REQ_FLAGS_PATH_RX 0x1UL + #define CFA_METER_PROFILE_ALLOC_REQ_FLAGS_PATH_LAST CFA_METER_PROFILE_ALLOC_REQ_FLAGS_PATH_RX + u8 meter_type; + #define CFA_METER_PROFILE_ALLOC_REQ_METER_TYPE_RFC2697 0x0UL + #define CFA_METER_PROFILE_ALLOC_REQ_METER_TYPE_RFC2698 0x1UL + #define CFA_METER_PROFILE_ALLOC_REQ_METER_TYPE_RFC4115 0x2UL + #define CFA_METER_PROFILE_ALLOC_REQ_METER_TYPE_LAST CFA_METER_PROFILE_ALLOC_REQ_METER_TYPE_RFC4115 + __le16 reserved1; + __le32 reserved2; + __le32 commit_rate; + #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_RATE_BW_VALUE_MASK 0xfffffffUL + #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_RATE_BW_VALUE_SFT 0 + #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_RATE_SCALE 0x10000000UL + #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_RATE_SCALE_BITS (0x0UL << 28) + #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_RATE_SCALE_BYTES (0x1UL << 28) + #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_RATE_SCALE_LAST CFA_METER_PROFILE_ALLOC_REQ_COMMIT_RATE_SCALE_BYTES + #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_RATE_BW_VALUE_UNIT_MASK 0xe0000000UL + #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_RATE_BW_VALUE_UNIT_SFT 29 + #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_RATE_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_RATE_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_RATE_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_RATE_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_RATE_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_RATE_BW_VALUE_UNIT_RAW (0x7UL << 29) + #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_RATE_BW_VALUE_UNIT_LAST CFA_METER_PROFILE_ALLOC_REQ_COMMIT_RATE_BW_VALUE_UNIT_RAW + __le32 commit_burst; + #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_BURST_BW_VALUE_MASK 0xfffffffUL + #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_BURST_BW_VALUE_SFT 0 + #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_BURST_SCALE 0x10000000UL + #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_BURST_SCALE_BITS (0x0UL << 28) + #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_BURST_SCALE_BYTES (0x1UL << 28) + #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_BURST_SCALE_LAST CFA_METER_PROFILE_ALLOC_REQ_COMMIT_BURST_SCALE_BYTES + #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_BURST_BW_VALUE_UNIT_MASK 0xe0000000UL + #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_BURST_BW_VALUE_UNIT_SFT 29 + #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_BURST_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_BURST_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_BURST_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_BURST_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_BURST_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_BURST_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define CFA_METER_PROFILE_ALLOC_REQ_COMMIT_BURST_BW_VALUE_UNIT_LAST CFA_METER_PROFILE_ALLOC_REQ_COMMIT_BURST_BW_VALUE_UNIT_INVALID + __le32 excess_peak_rate; + #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_RATE_BW_VALUE_MASK 0xfffffffUL + #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_RATE_BW_VALUE_SFT 0 + #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_RATE_SCALE 0x10000000UL + #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_RATE_SCALE_BITS (0x0UL << 28) + #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_RATE_SCALE_BYTES (0x1UL << 28) + #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_RATE_SCALE_LAST CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_RATE_SCALE_BYTES + #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_RATE_BW_VALUE_UNIT_MASK 0xe0000000UL + #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_RATE_BW_VALUE_UNIT_SFT 29 + #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_RATE_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_RATE_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_RATE_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_RATE_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_RATE_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_RATE_BW_VALUE_UNIT_RAW (0x7UL << 29) + #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_RATE_BW_VALUE_UNIT_LAST CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_RATE_BW_VALUE_UNIT_RAW + __le32 excess_peak_burst; + #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_BURST_BW_VALUE_MASK 0xfffffffUL + #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_BURST_BW_VALUE_SFT 0 + #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_BURST_SCALE 0x10000000UL + #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_BURST_SCALE_BITS (0x0UL << 28) + #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_BURST_SCALE_BYTES (0x1UL << 28) + #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_BURST_SCALE_LAST CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_BURST_SCALE_BYTES + #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_BURST_BW_VALUE_UNIT_MASK 0xe0000000UL + #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_BURST_BW_VALUE_UNIT_SFT 29 + #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_BURST_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_BURST_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_BURST_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_BURST_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_BURST_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_BURST_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_BURST_BW_VALUE_UNIT_LAST CFA_METER_PROFILE_ALLOC_REQ_EXCESS_PEAK_BURST_BW_VALUE_UNIT_INVALID +}; + +/* hwrm_cfa_meter_profile_alloc_output (size:128b/16B) */ +struct hwrm_cfa_meter_profile_alloc_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 meter_profile_id; + #define CFA_METER_PROFILE_ALLOC_RESP_METER_PROFILE_ID_INVALID 0xffffUL + #define CFA_METER_PROFILE_ALLOC_RESP_METER_PROFILE_ID_LAST CFA_METER_PROFILE_ALLOC_RESP_METER_PROFILE_ID_INVALID + u8 unused_0[5]; + u8 valid; +}; + +/* hwrm_cfa_meter_profile_free_input (size:192b/24B) */ +struct hwrm_cfa_meter_profile_free_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 flags; + #define CFA_METER_PROFILE_FREE_REQ_FLAGS_PATH 0x1UL + #define CFA_METER_PROFILE_FREE_REQ_FLAGS_PATH_TX 0x0UL + #define CFA_METER_PROFILE_FREE_REQ_FLAGS_PATH_RX 0x1UL + #define CFA_METER_PROFILE_FREE_REQ_FLAGS_PATH_LAST CFA_METER_PROFILE_FREE_REQ_FLAGS_PATH_RX + u8 unused_0; + __le16 meter_profile_id; + #define CFA_METER_PROFILE_FREE_REQ_METER_PROFILE_ID_INVALID 0xffffUL + #define CFA_METER_PROFILE_FREE_REQ_METER_PROFILE_ID_LAST CFA_METER_PROFILE_FREE_REQ_METER_PROFILE_ID_INVALID + u8 unused_1[4]; +}; + +/* hwrm_cfa_meter_profile_free_output (size:128b/16B) */ +struct hwrm_cfa_meter_profile_free_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_cfa_meter_profile_cfg_input (size:320b/40B) */ +struct hwrm_cfa_meter_profile_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 flags; + #define CFA_METER_PROFILE_CFG_REQ_FLAGS_PATH 0x1UL + #define CFA_METER_PROFILE_CFG_REQ_FLAGS_PATH_TX 0x0UL + #define CFA_METER_PROFILE_CFG_REQ_FLAGS_PATH_RX 0x1UL + #define CFA_METER_PROFILE_CFG_REQ_FLAGS_PATH_LAST CFA_METER_PROFILE_CFG_REQ_FLAGS_PATH_RX + u8 meter_type; + #define CFA_METER_PROFILE_CFG_REQ_METER_TYPE_RFC2697 0x0UL + #define CFA_METER_PROFILE_CFG_REQ_METER_TYPE_RFC2698 0x1UL + #define CFA_METER_PROFILE_CFG_REQ_METER_TYPE_RFC4115 0x2UL + #define CFA_METER_PROFILE_CFG_REQ_METER_TYPE_LAST CFA_METER_PROFILE_CFG_REQ_METER_TYPE_RFC4115 + __le16 meter_profile_id; + #define CFA_METER_PROFILE_CFG_REQ_METER_PROFILE_ID_INVALID 0xffffUL + #define CFA_METER_PROFILE_CFG_REQ_METER_PROFILE_ID_LAST CFA_METER_PROFILE_CFG_REQ_METER_PROFILE_ID_INVALID + __le32 reserved; + __le32 commit_rate; + #define CFA_METER_PROFILE_CFG_REQ_COMMIT_RATE_BW_VALUE_MASK 0xfffffffUL + #define CFA_METER_PROFILE_CFG_REQ_COMMIT_RATE_BW_VALUE_SFT 0 + #define CFA_METER_PROFILE_CFG_REQ_COMMIT_RATE_SCALE 0x10000000UL + #define CFA_METER_PROFILE_CFG_REQ_COMMIT_RATE_SCALE_BITS (0x0UL << 28) + #define CFA_METER_PROFILE_CFG_REQ_COMMIT_RATE_SCALE_BYTES (0x1UL << 28) + #define CFA_METER_PROFILE_CFG_REQ_COMMIT_RATE_SCALE_LAST CFA_METER_PROFILE_CFG_REQ_COMMIT_RATE_SCALE_BYTES + #define CFA_METER_PROFILE_CFG_REQ_COMMIT_RATE_BW_VALUE_UNIT_MASK 0xe0000000UL + #define CFA_METER_PROFILE_CFG_REQ_COMMIT_RATE_BW_VALUE_UNIT_SFT 29 + #define CFA_METER_PROFILE_CFG_REQ_COMMIT_RATE_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define CFA_METER_PROFILE_CFG_REQ_COMMIT_RATE_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define CFA_METER_PROFILE_CFG_REQ_COMMIT_RATE_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define CFA_METER_PROFILE_CFG_REQ_COMMIT_RATE_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define CFA_METER_PROFILE_CFG_REQ_COMMIT_RATE_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define CFA_METER_PROFILE_CFG_REQ_COMMIT_RATE_BW_VALUE_UNIT_RAW (0x7UL << 29) + #define CFA_METER_PROFILE_CFG_REQ_COMMIT_RATE_BW_VALUE_UNIT_LAST CFA_METER_PROFILE_CFG_REQ_COMMIT_RATE_BW_VALUE_UNIT_RAW + __le32 commit_burst; + #define CFA_METER_PROFILE_CFG_REQ_COMMIT_BURST_BW_VALUE_MASK 0xfffffffUL + #define CFA_METER_PROFILE_CFG_REQ_COMMIT_BURST_BW_VALUE_SFT 0 + #define CFA_METER_PROFILE_CFG_REQ_COMMIT_BURST_SCALE 0x10000000UL + #define CFA_METER_PROFILE_CFG_REQ_COMMIT_BURST_SCALE_BITS (0x0UL << 28) + #define CFA_METER_PROFILE_CFG_REQ_COMMIT_BURST_SCALE_BYTES (0x1UL << 28) + #define CFA_METER_PROFILE_CFG_REQ_COMMIT_BURST_SCALE_LAST CFA_METER_PROFILE_CFG_REQ_COMMIT_BURST_SCALE_BYTES + #define CFA_METER_PROFILE_CFG_REQ_COMMIT_BURST_BW_VALUE_UNIT_MASK 0xe0000000UL + #define CFA_METER_PROFILE_CFG_REQ_COMMIT_BURST_BW_VALUE_UNIT_SFT 29 + #define CFA_METER_PROFILE_CFG_REQ_COMMIT_BURST_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define CFA_METER_PROFILE_CFG_REQ_COMMIT_BURST_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define CFA_METER_PROFILE_CFG_REQ_COMMIT_BURST_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define CFA_METER_PROFILE_CFG_REQ_COMMIT_BURST_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define CFA_METER_PROFILE_CFG_REQ_COMMIT_BURST_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define CFA_METER_PROFILE_CFG_REQ_COMMIT_BURST_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define CFA_METER_PROFILE_CFG_REQ_COMMIT_BURST_BW_VALUE_UNIT_LAST CFA_METER_PROFILE_CFG_REQ_COMMIT_BURST_BW_VALUE_UNIT_INVALID + __le32 excess_peak_rate; + #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_RATE_BW_VALUE_MASK 0xfffffffUL + #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_RATE_BW_VALUE_SFT 0 + #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_RATE_SCALE 0x10000000UL + #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_RATE_SCALE_BITS (0x0UL << 28) + #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_RATE_SCALE_BYTES (0x1UL << 28) + #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_RATE_SCALE_LAST CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_RATE_SCALE_BYTES + #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_RATE_BW_VALUE_UNIT_MASK 0xe0000000UL + #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_RATE_BW_VALUE_UNIT_SFT 29 + #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_RATE_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_RATE_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_RATE_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_RATE_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_RATE_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_RATE_BW_VALUE_UNIT_RAW (0x7UL << 29) + #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_RATE_BW_VALUE_UNIT_LAST CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_RATE_BW_VALUE_UNIT_RAW + __le32 excess_peak_burst; + #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_BURST_BW_VALUE_MASK 0xfffffffUL + #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_BURST_BW_VALUE_SFT 0 + #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_BURST_SCALE 0x10000000UL + #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_BURST_SCALE_BITS (0x0UL << 28) + #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_BURST_SCALE_BYTES (0x1UL << 28) + #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_BURST_SCALE_LAST CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_BURST_SCALE_BYTES + #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_BURST_BW_VALUE_UNIT_MASK 0xe0000000UL + #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_BURST_BW_VALUE_UNIT_SFT 29 + #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_BURST_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_BURST_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_BURST_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_BURST_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_BURST_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_BURST_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_BURST_BW_VALUE_UNIT_LAST CFA_METER_PROFILE_CFG_REQ_EXCESS_PEAK_BURST_BW_VALUE_UNIT_INVALID +}; + +/* hwrm_cfa_meter_profile_cfg_output (size:128b/16B) */ +struct hwrm_cfa_meter_profile_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_cfa_meter_instance_alloc_input (size:192b/24B) */ +struct hwrm_cfa_meter_instance_alloc_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 flags; + #define CFA_METER_INSTANCE_ALLOC_REQ_FLAGS_PATH 0x1UL + #define CFA_METER_INSTANCE_ALLOC_REQ_FLAGS_PATH_TX 0x0UL + #define CFA_METER_INSTANCE_ALLOC_REQ_FLAGS_PATH_RX 0x1UL + #define CFA_METER_INSTANCE_ALLOC_REQ_FLAGS_PATH_LAST CFA_METER_INSTANCE_ALLOC_REQ_FLAGS_PATH_RX + u8 unused_0; + __le16 meter_profile_id; + #define CFA_METER_INSTANCE_ALLOC_REQ_METER_PROFILE_ID_INVALID 0xffffUL + #define CFA_METER_INSTANCE_ALLOC_REQ_METER_PROFILE_ID_LAST CFA_METER_INSTANCE_ALLOC_REQ_METER_PROFILE_ID_INVALID + u8 unused_1[4]; +}; + +/* hwrm_cfa_meter_instance_alloc_output (size:128b/16B) */ +struct hwrm_cfa_meter_instance_alloc_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 meter_instance_id; + #define CFA_METER_INSTANCE_ALLOC_RESP_METER_INSTANCE_ID_INVALID 0xffffUL + #define CFA_METER_INSTANCE_ALLOC_RESP_METER_INSTANCE_ID_LAST CFA_METER_INSTANCE_ALLOC_RESP_METER_INSTANCE_ID_INVALID + u8 unused_0[5]; + u8 valid; +}; + +/* hwrm_cfa_meter_instance_cfg_input (size:192b/24B) */ +struct hwrm_cfa_meter_instance_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 flags; + #define CFA_METER_INSTANCE_CFG_REQ_FLAGS_PATH 0x1UL + #define CFA_METER_INSTANCE_CFG_REQ_FLAGS_PATH_TX 0x0UL + #define CFA_METER_INSTANCE_CFG_REQ_FLAGS_PATH_RX 0x1UL + #define CFA_METER_INSTANCE_CFG_REQ_FLAGS_PATH_LAST CFA_METER_INSTANCE_CFG_REQ_FLAGS_PATH_RX + u8 unused_0; + __le16 meter_profile_id; + #define CFA_METER_INSTANCE_CFG_REQ_METER_PROFILE_ID_INVALID 0xffffUL + #define CFA_METER_INSTANCE_CFG_REQ_METER_PROFILE_ID_LAST CFA_METER_INSTANCE_CFG_REQ_METER_PROFILE_ID_INVALID + __le16 meter_instance_id; + u8 unused_1[2]; +}; + +/* hwrm_cfa_meter_instance_cfg_output (size:128b/16B) */ +struct hwrm_cfa_meter_instance_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_cfa_meter_instance_free_input (size:192b/24B) */ +struct hwrm_cfa_meter_instance_free_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 flags; + #define CFA_METER_INSTANCE_FREE_REQ_FLAGS_PATH 0x1UL + #define CFA_METER_INSTANCE_FREE_REQ_FLAGS_PATH_TX 0x0UL + #define CFA_METER_INSTANCE_FREE_REQ_FLAGS_PATH_RX 0x1UL + #define CFA_METER_INSTANCE_FREE_REQ_FLAGS_PATH_LAST CFA_METER_INSTANCE_FREE_REQ_FLAGS_PATH_RX + u8 unused_0; + __le16 meter_instance_id; + #define CFA_METER_INSTANCE_FREE_REQ_METER_INSTANCE_ID_INVALID 0xffffUL + #define CFA_METER_INSTANCE_FREE_REQ_METER_INSTANCE_ID_LAST CFA_METER_INSTANCE_FREE_REQ_METER_INSTANCE_ID_INVALID + u8 unused_1[4]; +}; + +/* hwrm_cfa_meter_instance_free_output (size:128b/16B) */ +struct hwrm_cfa_meter_instance_free_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_cfa_decap_filter_alloc_input (size:832b/104B) */ +struct hwrm_cfa_decap_filter_alloc_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define CFA_DECAP_FILTER_ALLOC_REQ_FLAGS_OVS_TUNNEL 0x1UL + __le32 enables; + #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE 0x1UL + #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_TUNNEL_ID 0x2UL + #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR 0x4UL + #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_MACADDR 0x8UL + #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_OVLAN_VID 0x10UL + #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IVLAN_VID 0x20UL + #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_T_OVLAN_VID 0x40UL + #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_T_IVLAN_VID 0x80UL + #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE 0x100UL + #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR 0x200UL + #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR 0x400UL + #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE 0x800UL + #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL 0x1000UL + #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_SRC_PORT 0x2000UL + #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_PORT 0x4000UL + #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_ID 0x8000UL + #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x10000UL + __be32 tunnel_id; + u8 tunnel_type; + #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL + #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL + #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL + #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL + #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL + #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL + #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL + #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL + #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL + #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL + #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL + #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL + #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL + #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL + #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL + #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL + u8 unused_0; + __le16 unused_1; + u8 src_macaddr[6]; + u8 unused_2[2]; + u8 dst_macaddr[6]; + __be16 ovlan_vid; + __be16 ivlan_vid; + __be16 t_ovlan_vid; + __be16 t_ivlan_vid; + __be16 ethertype; + u8 ip_addr_type; + #define CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_UNKNOWN 0x0UL + #define CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4 0x4UL + #define CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6 0x6UL + #define CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_LAST CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6 + u8 ip_protocol; + #define CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_UNKNOWN 0x0UL + #define CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_TCP 0x6UL + #define CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP 0x11UL + #define CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_LAST CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP + __le16 unused_3; + __le32 unused_4; + __be32 src_ipaddr[4]; + __be32 dst_ipaddr[4]; + __be16 src_port; + __be16 dst_port; + __le16 dst_id; + __le16 l2_ctxt_ref_id; +}; + +/* hwrm_cfa_decap_filter_alloc_output (size:128b/16B) */ +struct hwrm_cfa_decap_filter_alloc_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 decap_filter_id; + u8 unused_0[3]; + u8 valid; +}; + +/* hwrm_cfa_decap_filter_free_input (size:192b/24B) */ +struct hwrm_cfa_decap_filter_free_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 decap_filter_id; + u8 unused_0[4]; +}; + +/* hwrm_cfa_decap_filter_free_output (size:128b/16B) */ +struct hwrm_cfa_decap_filter_free_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_cfa_flow_alloc_input (size:1024b/128B) */ +struct hwrm_cfa_flow_alloc_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 flags; + #define CFA_FLOW_ALLOC_REQ_FLAGS_TUNNEL 0x1UL + #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_MASK 0x6UL + #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_SFT 1 + #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_NONE (0x0UL << 1) + #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_ONE (0x1UL << 1) + #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_TWO (0x2UL << 1) + #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_LAST CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_TWO + #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_MASK 0x38UL + #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_SFT 3 + #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_L2 (0x0UL << 3) + #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV4 (0x1UL << 3) + #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV6 (0x2UL << 3) + #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_LAST CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV6 + #define CFA_FLOW_ALLOC_REQ_FLAGS_PATH_TX 0x40UL + #define CFA_FLOW_ALLOC_REQ_FLAGS_PATH_RX 0x80UL + #define CFA_FLOW_ALLOC_REQ_FLAGS_MATCH_VXLAN_IP_VNI 0x100UL + #define CFA_FLOW_ALLOC_REQ_FLAGS_VHOST_ID_USE_VLAN 0x200UL + __le16 src_fid; + __le32 tunnel_handle; + __le16 action_flags; + #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_FWD 0x1UL + #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_RECYCLE 0x2UL + #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_DROP 0x4UL + #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_METER 0x8UL + #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TUNNEL 0x10UL + #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_SRC 0x20UL + #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_DEST 0x40UL + #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_IPV4_ADDRESS 0x80UL + #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_L2_HEADER_REWRITE 0x100UL + #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TTL_DECREMENT 0x200UL + #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TUNNEL_IP 0x400UL + #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_FLOW_AGING_ENABLED 0x800UL + #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_PRI_HINT 0x1000UL + #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NO_FLOW_COUNTER_ALLOC 0x2000UL + __le16 dst_fid; + __be16 l2_rewrite_vlan_tpid; + __be16 l2_rewrite_vlan_tci; + __le16 act_meter_id; + __le16 ref_flow_handle; + __be16 ethertype; + __be16 outer_vlan_tci; + __be16 dmac[3]; + __be16 inner_vlan_tci; + __be16 smac[3]; + u8 ip_dst_mask_len; + u8 ip_src_mask_len; + __be32 ip_dst[4]; + __be32 ip_src[4]; + __be16 l4_src_port; + __be16 l4_src_port_mask; + __be16 l4_dst_port; + __be16 l4_dst_port_mask; + __be32 nat_ip_address[4]; + __be16 l2_rewrite_dmac[3]; + __be16 nat_port; + __be16 l2_rewrite_smac[3]; + u8 ip_proto; + u8 tunnel_type; + #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL + #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL + #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL + #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL + #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL + #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL + #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL + #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL + #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL + #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL + #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL + #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL + #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL + #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL + #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL + #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL +}; + +/* hwrm_cfa_flow_alloc_output (size:256b/32B) */ +struct hwrm_cfa_flow_alloc_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 flow_handle; + u8 unused_0[2]; + __le32 flow_id; + #define CFA_FLOW_ALLOC_RESP_FLOW_ID_VALUE_MASK 0x3fffffffUL + #define CFA_FLOW_ALLOC_RESP_FLOW_ID_VALUE_SFT 0 + #define CFA_FLOW_ALLOC_RESP_FLOW_ID_TYPE 0x40000000UL + #define CFA_FLOW_ALLOC_RESP_FLOW_ID_TYPE_INT (0x0UL << 30) + #define CFA_FLOW_ALLOC_RESP_FLOW_ID_TYPE_EXT (0x1UL << 30) + #define CFA_FLOW_ALLOC_RESP_FLOW_ID_TYPE_LAST CFA_FLOW_ALLOC_RESP_FLOW_ID_TYPE_EXT + #define CFA_FLOW_ALLOC_RESP_FLOW_ID_DIR 0x80000000UL + #define CFA_FLOW_ALLOC_RESP_FLOW_ID_DIR_RX (0x0UL << 31) + #define CFA_FLOW_ALLOC_RESP_FLOW_ID_DIR_TX (0x1UL << 31) + #define CFA_FLOW_ALLOC_RESP_FLOW_ID_DIR_LAST CFA_FLOW_ALLOC_RESP_FLOW_ID_DIR_TX + __le64 ext_flow_handle; + __le32 flow_counter_id; + u8 unused_1[3]; + u8 valid; +}; + +/* hwrm_cfa_flow_alloc_cmd_err (size:64b/8B) */ +struct hwrm_cfa_flow_alloc_cmd_err { + u8 code; + #define CFA_FLOW_ALLOC_CMD_ERR_CODE_UNKNOWN 0x0UL + #define CFA_FLOW_ALLOC_CMD_ERR_CODE_L2_CONTEXT_TCAM 0x1UL + #define CFA_FLOW_ALLOC_CMD_ERR_CODE_ACTION_RECORD 0x2UL + #define CFA_FLOW_ALLOC_CMD_ERR_CODE_FLOW_COUNTER 0x3UL + #define CFA_FLOW_ALLOC_CMD_ERR_CODE_WILD_CARD_TCAM 0x4UL + #define CFA_FLOW_ALLOC_CMD_ERR_CODE_HASH_COLLISION 0x5UL + #define CFA_FLOW_ALLOC_CMD_ERR_CODE_KEY_EXISTS 0x6UL + #define CFA_FLOW_ALLOC_CMD_ERR_CODE_FLOW_CTXT_DB 0x7UL + #define CFA_FLOW_ALLOC_CMD_ERR_CODE_LAST CFA_FLOW_ALLOC_CMD_ERR_CODE_FLOW_CTXT_DB + u8 unused_0[7]; +}; + +/* hwrm_cfa_flow_free_input (size:256b/32B) */ +struct hwrm_cfa_flow_free_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 flow_handle; + __le16 unused_0; + __le32 flow_counter_id; + __le64 ext_flow_handle; +}; + +/* hwrm_cfa_flow_free_output (size:256b/32B) */ +struct hwrm_cfa_flow_free_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le64 packet; + __le64 byte; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_cfa_flow_action_data (size:960b/120B) */ +struct hwrm_cfa_flow_action_data { + __le16 action_flags; + #define CFA_FLOW_ACTION_DATA_ACTION_FLAGS_FWD 0x1UL + #define CFA_FLOW_ACTION_DATA_ACTION_FLAGS_RECYCLE 0x2UL + #define CFA_FLOW_ACTION_DATA_ACTION_FLAGS_DROP 0x4UL + #define CFA_FLOW_ACTION_DATA_ACTION_FLAGS_METER 0x8UL + #define CFA_FLOW_ACTION_DATA_ACTION_FLAGS_TUNNEL 0x10UL + #define CFA_FLOW_ACTION_DATA_ACTION_FLAGS_TUNNEL_IP 0x20UL + #define CFA_FLOW_ACTION_DATA_ACTION_FLAGS_TTL_DECREMENT 0x40UL + #define CFA_FLOW_ACTION_DATA_ACTION_FLAGS_FLOW_AGING_ENABLED 0x80UL + #define CFA_FLOW_ACTION_DATA_ACTION_FLAGS_ENCAP 0x100UL + #define CFA_FLOW_ACTION_DATA_ACTION_FLAGS_DECAP 0x200UL + __le16 act_meter_id; + __le16 vnic_id; + __le16 vport_id; + __be16 nat_port; + __le16 unused_0[3]; + __be32 nat_ip_address[4]; + u8 encap_type; + #define CFA_FLOW_ACTION_DATA_ENCAP_TYPE_VXLAN 0x1UL + #define CFA_FLOW_ACTION_DATA_ENCAP_TYPE_NVGRE 0x2UL + #define CFA_FLOW_ACTION_DATA_ENCAP_TYPE_L2GRE 0x3UL + #define CFA_FLOW_ACTION_DATA_ENCAP_TYPE_IPIP 0x4UL + #define CFA_FLOW_ACTION_DATA_ENCAP_TYPE_GENEVE 0x5UL + #define CFA_FLOW_ACTION_DATA_ENCAP_TYPE_MPLS 0x6UL + #define CFA_FLOW_ACTION_DATA_ENCAP_TYPE_VLAN 0x7UL + #define CFA_FLOW_ACTION_DATA_ENCAP_TYPE_IPGRE 0x8UL + #define CFA_FLOW_ACTION_DATA_ENCAP_TYPE_VXLAN_V4 0x9UL + #define CFA_FLOW_ACTION_DATA_ENCAP_TYPE_IPGRE_V1 0xaUL + #define CFA_FLOW_ACTION_DATA_ENCAP_TYPE_L2_ETYPE 0xbUL + #define CFA_FLOW_ACTION_DATA_ENCAP_TYPE_VXLAN_GPE_V6 0xcUL + #define CFA_FLOW_ACTION_DATA_ENCAP_TYPE_VXLAN_GPE 0x10UL + #define CFA_FLOW_ACTION_DATA_ENCAP_TYPE_LAST CFA_FLOW_ACTION_DATA_ENCAP_TYPE_VXLAN_GPE + u8 unused[7]; + __le32 encap_data[20]; +}; + +/* hwrm_cfa_flow_tunnel_hdr_data (size:64b/8B) */ +struct hwrm_cfa_flow_tunnel_hdr_data { + u8 tunnel_type; + #define CFA_FLOW_TUNNEL_HDR_DATA_TUNNEL_TYPE_NONTUNNEL 0x0UL + #define CFA_FLOW_TUNNEL_HDR_DATA_TUNNEL_TYPE_VXLAN 0x1UL + #define CFA_FLOW_TUNNEL_HDR_DATA_TUNNEL_TYPE_NVGRE 0x2UL + #define CFA_FLOW_TUNNEL_HDR_DATA_TUNNEL_TYPE_L2GRE 0x3UL + #define CFA_FLOW_TUNNEL_HDR_DATA_TUNNEL_TYPE_IPIP 0x4UL + #define CFA_FLOW_TUNNEL_HDR_DATA_TUNNEL_TYPE_GENEVE 0x5UL + #define CFA_FLOW_TUNNEL_HDR_DATA_TUNNEL_TYPE_MPLS 0x6UL + #define CFA_FLOW_TUNNEL_HDR_DATA_TUNNEL_TYPE_STT 0x7UL + #define CFA_FLOW_TUNNEL_HDR_DATA_TUNNEL_TYPE_IPGRE 0x8UL + #define CFA_FLOW_TUNNEL_HDR_DATA_TUNNEL_TYPE_VXLAN_V4 0x9UL + #define CFA_FLOW_TUNNEL_HDR_DATA_TUNNEL_TYPE_IPGRE_V1 0xaUL + #define CFA_FLOW_TUNNEL_HDR_DATA_TUNNEL_TYPE_L2_ETYPE 0xbUL + #define CFA_FLOW_TUNNEL_HDR_DATA_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL + #define CFA_FLOW_TUNNEL_HDR_DATA_TUNNEL_TYPE_VXLAN_GPE 0x10UL + #define CFA_FLOW_TUNNEL_HDR_DATA_TUNNEL_TYPE_ANYTUNNEL 0xffUL + #define CFA_FLOW_TUNNEL_HDR_DATA_TUNNEL_TYPE_LAST CFA_FLOW_TUNNEL_HDR_DATA_TUNNEL_TYPE_ANYTUNNEL + u8 unused[3]; + __le32 tunnel_id; +}; + +/* hwrm_cfa_flow_l4_key_data (size:64b/8B) */ +struct hwrm_cfa_flow_l4_key_data { + __be16 l4_src_port; + __be16 l4_dst_port; + __le32 unused; +}; + +/* hwrm_cfa_flow_l3_key_data (size:512b/64B) */ +struct hwrm_cfa_flow_l3_key_data { + u8 ip_protocol; + u8 unused_0[7]; + __be32 ip_dst[4]; + __be32 ip_src[4]; + __be32 nat_ip_address[4]; + __le32 unused[2]; +}; + +/* hwrm_cfa_flow_l2_key_data (size:448b/56B) */ +struct hwrm_cfa_flow_l2_key_data { + __be16 dmac[3]; + __le16 unused_0; + __be16 smac[3]; + __le16 unused_1; + __be16 l2_rewrite_dmac[3]; + __le16 unused_2; + __be16 l2_rewrite_smac[3]; + __le16 ethertype; + __le16 num_vlan_tags; + __be16 l2_rewrite_vlan_tpid; + __be16 l2_rewrite_vlan_tci; + u8 unused_3[2]; + __be16 ovlan_tpid; + __be16 ovlan_tci; + __be16 ivlan_tpid; + __be16 ivlan_tci; + u8 unused[8]; +}; + +/* hwrm_cfa_flow_key_data (size:4160b/520B) */ +struct hwrm_cfa_flow_key_data { + __le32 t_l2_key_data[14]; + __le32 t_l2_key_mask[14]; + __le32 t_l3_key_data[16]; + __le32 t_l3_key_mask[16]; + __le32 t_l4_key_data[2]; + __le32 t_l4_key_mask[2]; + __le32 tunnel_hdr[2]; + __le32 l2_key_data[14]; + __le32 l2_key_mask[14]; + __le32 l3_key_data[16]; + __le32 l3_key_mask[16]; + __le32 l4_key_data[2]; + __le32 l4_key_mask[2]; +}; + +/* hwrm_cfa_flow_info_input (size:256b/32B) */ +struct hwrm_cfa_flow_info_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 flow_handle; + #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_MAX_MASK 0xfffUL + #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_CNP_CNT 0x1000UL + #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_ROCEV1_CNT 0x2000UL + #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_NIC_TX 0x3000UL + #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_ROCEV2_CNT 0x4000UL + #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_DIR_RX 0x8000UL + #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_CNP_CNT_RX 0x9000UL + #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_ROCEV1_CNT_RX 0xa000UL + #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_NIC_RX 0xb000UL + #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_ROCEV2_CNT_RX 0xc000UL + #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_LAST CFA_FLOW_INFO_REQ_FLOW_HANDLE_ROCEV2_CNT_RX + u8 unused_0[6]; + __le64 ext_flow_handle; +}; + +/* hwrm_cfa_flow_info_output (size:5632b/704B) */ +struct hwrm_cfa_flow_info_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 flags; + #define CFA_FLOW_INFO_RESP_FLAGS_PATH_TX 0x1UL + #define CFA_FLOW_INFO_RESP_FLAGS_PATH_RX 0x2UL + u8 profile; + __le16 src_fid; + __le16 dst_fid; + __le16 l2_ctxt_id; + __le64 em_info; + __le64 tcam_info; + __le64 vfp_tcam_info; + __le16 ar_id; + __le16 flow_handle; + __le32 tunnel_handle; + __le16 flow_timer; + u8 unused_0[6]; + __le32 flow_key_data[130]; + __le32 flow_action_info[30]; + u8 unused_1[7]; + u8 valid; +}; + +/* hwrm_cfa_flow_flush_input (size:256b/32B) */ +struct hwrm_cfa_flow_flush_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define CFA_FLOW_FLUSH_REQ_FLAGS_FLOW_TABLE_VALID 0x1UL + #define CFA_FLOW_FLUSH_REQ_FLAGS_FLOW_RESET_ALL 0x2UL + #define CFA_FLOW_FLUSH_REQ_FLAGS_FLOW_RESET_PORT 0x4UL + #define CFA_FLOW_FLUSH_REQ_FLAGS_FLOW_HANDLE_INCL_FC 0x8000000UL + #define CFA_FLOW_FLUSH_REQ_FLAGS_FLOW_HANDLE_ENTRY_SIZE_MASK 0xc0000000UL + #define CFA_FLOW_FLUSH_REQ_FLAGS_FLOW_HANDLE_ENTRY_SIZE_SFT 30 + #define CFA_FLOW_FLUSH_REQ_FLAGS_FLOW_HANDLE_ENTRY_SIZE_FLOW_HND_16BIT (0x0UL << 30) + #define CFA_FLOW_FLUSH_REQ_FLAGS_FLOW_HANDLE_ENTRY_SIZE_FLOW_HND_64BIT (0x1UL << 30) + #define CFA_FLOW_FLUSH_REQ_FLAGS_FLOW_HANDLE_ENTRY_SIZE_LAST CFA_FLOW_FLUSH_REQ_FLAGS_FLOW_HANDLE_ENTRY_SIZE_FLOW_HND_64BIT + u8 page_size; + #define CFA_FLOW_FLUSH_REQ_PAGE_SIZE_4K 0x0UL + #define CFA_FLOW_FLUSH_REQ_PAGE_SIZE_8K 0x1UL + #define CFA_FLOW_FLUSH_REQ_PAGE_SIZE_64K 0x4UL + #define CFA_FLOW_FLUSH_REQ_PAGE_SIZE_256K 0x6UL + #define CFA_FLOW_FLUSH_REQ_PAGE_SIZE_1M 0x8UL + #define CFA_FLOW_FLUSH_REQ_PAGE_SIZE_2M 0x9UL + #define CFA_FLOW_FLUSH_REQ_PAGE_SIZE_4M 0xaUL + #define CFA_FLOW_FLUSH_REQ_PAGE_SIZE_1G 0x12UL + #define CFA_FLOW_FLUSH_REQ_PAGE_SIZE_LAST CFA_FLOW_FLUSH_REQ_PAGE_SIZE_1G + u8 page_level; + #define CFA_FLOW_FLUSH_REQ_PAGE_LEVEL_LVL_0 0x0UL + #define CFA_FLOW_FLUSH_REQ_PAGE_LEVEL_LVL_1 0x1UL + #define CFA_FLOW_FLUSH_REQ_PAGE_LEVEL_LVL_2 0x2UL + #define CFA_FLOW_FLUSH_REQ_PAGE_LEVEL_LAST CFA_FLOW_FLUSH_REQ_PAGE_LEVEL_LVL_2 + __le16 num_flows; + __le64 page_dir; +}; + +/* hwrm_cfa_flow_flush_output (size:128b/16B) */ +struct hwrm_cfa_flow_flush_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_cfa_flow_stats_input (size:640b/80B) */ +struct hwrm_cfa_flow_stats_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 num_flows; + __le16 flow_handle_0; + __le16 flow_handle_1; + __le16 flow_handle_2; + __le16 flow_handle_3; + __le16 flow_handle_4; + __le16 flow_handle_5; + __le16 flow_handle_6; + __le16 flow_handle_7; + __le16 flow_handle_8; + __le16 flow_handle_9; + u8 unused_0[2]; + __le32 flow_id_0; + __le32 flow_id_1; + __le32 flow_id_2; + __le32 flow_id_3; + __le32 flow_id_4; + __le32 flow_id_5; + __le32 flow_id_6; + __le32 flow_id_7; + __le32 flow_id_8; + __le32 flow_id_9; +}; + +/* hwrm_cfa_flow_stats_output (size:1408b/176B) */ +struct hwrm_cfa_flow_stats_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le64 packet_0; + __le64 packet_1; + __le64 packet_2; + __le64 packet_3; + __le64 packet_4; + __le64 packet_5; + __le64 packet_6; + __le64 packet_7; + __le64 packet_8; + __le64 packet_9; + __le64 byte_0; + __le64 byte_1; + __le64 byte_2; + __le64 byte_3; + __le64 byte_4; + __le64 byte_5; + __le64 byte_6; + __le64 byte_7; + __le64 byte_8; + __le64 byte_9; + __le16 flow_hits; + u8 unused_0[5]; + u8 valid; +}; + +/* hwrm_cfa_flow_aging_timer_reset_input (size:256b/32B) */ +struct hwrm_cfa_flow_aging_timer_reset_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 flow_handle; + u8 unused_0[2]; + __le32 flow_timer; + __le64 ext_flow_handle; +}; + +/* hwrm_cfa_flow_aging_timer_reset_output (size:128b/16B) */ +struct hwrm_cfa_flow_aging_timer_reset_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_cfa_flow_aging_cfg_input (size:384b/48B) */ +struct hwrm_cfa_flow_aging_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 enables; + #define CFA_FLOW_AGING_CFG_REQ_ENABLES_TCP_FLOW_TIMER 0x1UL + #define CFA_FLOW_AGING_CFG_REQ_ENABLES_TCP_FIN_TIMER 0x2UL + #define CFA_FLOW_AGING_CFG_REQ_ENABLES_UDP_FLOW_TIMER 0x4UL + #define CFA_FLOW_AGING_CFG_REQ_ENABLES_EEM_DMA_INTERVAL 0x8UL + #define CFA_FLOW_AGING_CFG_REQ_ENABLES_EEM_NOTICE_INTERVAL 0x10UL + #define CFA_FLOW_AGING_CFG_REQ_ENABLES_EEM_CTX_MAX_ENTRIES 0x20UL + #define CFA_FLOW_AGING_CFG_REQ_ENABLES_EEM_CTX_ID 0x40UL + #define CFA_FLOW_AGING_CFG_REQ_ENABLES_EEM_CTX_MEM_TYPE 0x80UL + u8 flags; + #define CFA_FLOW_AGING_CFG_REQ_FLAGS_PATH 0x1UL + #define CFA_FLOW_AGING_CFG_REQ_FLAGS_PATH_TX 0x0UL + #define CFA_FLOW_AGING_CFG_REQ_FLAGS_PATH_RX 0x1UL + #define CFA_FLOW_AGING_CFG_REQ_FLAGS_PATH_LAST CFA_FLOW_AGING_CFG_REQ_FLAGS_PATH_RX + #define CFA_FLOW_AGING_CFG_REQ_FLAGS_EEM 0x2UL + #define CFA_FLOW_AGING_CFG_REQ_FLAGS_EEM_DISABLE (0x0UL << 1) + #define CFA_FLOW_AGING_CFG_REQ_FLAGS_EEM_ENABLE (0x1UL << 1) + #define CFA_FLOW_AGING_CFG_REQ_FLAGS_EEM_LAST CFA_FLOW_AGING_CFG_REQ_FLAGS_EEM_ENABLE + u8 unused_0; + __le32 tcp_flow_timer; + __le32 tcp_fin_timer; + __le32 udp_flow_timer; + __le16 eem_dma_interval; + __le16 eem_notice_interval; + __le32 eem_ctx_max_entries; + __le16 eem_ctx_id; + __le16 eem_ctx_mem_type; + #define CFA_FLOW_AGING_CFG_REQ_EEM_CTX_MEM_TYPE_EJECTION_DATA 0x0UL + #define CFA_FLOW_AGING_CFG_REQ_EEM_CTX_MEM_TYPE_LAST CFA_FLOW_AGING_CFG_REQ_EEM_CTX_MEM_TYPE_EJECTION_DATA + u8 unused_1[4]; +}; + +/* hwrm_cfa_flow_aging_cfg_output (size:128b/16B) */ +struct hwrm_cfa_flow_aging_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_cfa_flow_aging_qcfg_input (size:192b/24B) */ +struct hwrm_cfa_flow_aging_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 flags; + #define CFA_FLOW_AGING_QCFG_REQ_FLAGS_PATH 0x1UL + #define CFA_FLOW_AGING_QCFG_REQ_FLAGS_PATH_TX 0x0UL + #define CFA_FLOW_AGING_QCFG_REQ_FLAGS_PATH_RX 0x1UL + #define CFA_FLOW_AGING_QCFG_REQ_FLAGS_PATH_LAST CFA_FLOW_AGING_QCFG_REQ_FLAGS_PATH_RX + u8 unused_0[7]; +}; + +/* hwrm_cfa_flow_aging_qcfg_output (size:320b/40B) */ +struct hwrm_cfa_flow_aging_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 tcp_flow_timer; + __le32 tcp_fin_timer; + __le32 udp_flow_timer; + __le16 eem_dma_interval; + __le16 eem_notice_interval; + __le32 eem_ctx_max_entries; + __le16 eem_ctx_id; + __le16 eem_ctx_mem_type; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_cfa_flow_aging_qcaps_input (size:192b/24B) */ +struct hwrm_cfa_flow_aging_qcaps_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 flags; + #define CFA_FLOW_AGING_QCAPS_REQ_FLAGS_PATH 0x1UL + #define CFA_FLOW_AGING_QCAPS_REQ_FLAGS_PATH_TX 0x0UL + #define CFA_FLOW_AGING_QCAPS_REQ_FLAGS_PATH_RX 0x1UL + #define CFA_FLOW_AGING_QCAPS_REQ_FLAGS_PATH_LAST CFA_FLOW_AGING_QCAPS_REQ_FLAGS_PATH_RX + u8 unused_0[7]; +}; + +/* hwrm_cfa_flow_aging_qcaps_output (size:256b/32B) */ +struct hwrm_cfa_flow_aging_qcaps_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 max_tcp_flow_timer; + __le32 max_tcp_fin_timer; + __le32 max_udp_flow_timer; + __le32 max_aging_flows; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_cfa_tcp_flag_process_qcfg_input (size:128b/16B) */ +struct hwrm_cfa_tcp_flag_process_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; +}; + +/* hwrm_cfa_tcp_flag_process_qcfg_output (size:192b/24B) */ +struct hwrm_cfa_tcp_flag_process_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 rx_ar_id_port0; + __le16 rx_ar_id_port1; + __le16 tx_ar_id_port0; + __le16 tx_ar_id_port1; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_cfa_vf_pair_alloc_input (size:448b/56B) */ +struct hwrm_cfa_vf_pair_alloc_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 vf_a_id; + __le16 vf_b_id; + u8 unused_0[4]; + char pair_name[32]; +}; + +/* hwrm_cfa_vf_pair_alloc_output (size:128b/16B) */ +struct hwrm_cfa_vf_pair_alloc_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_cfa_vf_pair_free_input (size:384b/48B) */ +struct hwrm_cfa_vf_pair_free_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + char pair_name[32]; +}; + +/* hwrm_cfa_vf_pair_free_output (size:128b/16B) */ +struct hwrm_cfa_vf_pair_free_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_cfa_vf_pair_info_input (size:448b/56B) */ +struct hwrm_cfa_vf_pair_info_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define CFA_VF_PAIR_INFO_REQ_FLAGS_LOOKUP_TYPE 0x1UL + __le16 vf_pair_index; + u8 unused_0[2]; + char vf_pair_name[32]; +}; + +/* hwrm_cfa_vf_pair_info_output (size:512b/64B) */ +struct hwrm_cfa_vf_pair_info_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 next_vf_pair_index; + __le16 vf_a_fid; + __le16 vf_a_index; + __le16 vf_b_fid; + __le16 vf_b_index; + u8 pair_state; + #define CFA_VF_PAIR_INFO_RESP_PAIR_STATE_ALLOCATED 0x1UL + #define CFA_VF_PAIR_INFO_RESP_PAIR_STATE_ACTIVE 0x2UL + #define CFA_VF_PAIR_INFO_RESP_PAIR_STATE_LAST CFA_VF_PAIR_INFO_RESP_PAIR_STATE_ACTIVE + u8 unused_0[5]; + char pair_name[32]; + u8 unused_1[7]; + u8 valid; +}; + +/* hwrm_cfa_pair_alloc_input (size:576b/72B) */ +struct hwrm_cfa_pair_alloc_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 pair_mode; + #define CFA_PAIR_ALLOC_REQ_PAIR_MODE_VF2FN 0x0UL + #define CFA_PAIR_ALLOC_REQ_PAIR_MODE_REP2FN 0x1UL + #define CFA_PAIR_ALLOC_REQ_PAIR_MODE_REP2REP 0x2UL + #define CFA_PAIR_ALLOC_REQ_PAIR_MODE_PROXY 0x3UL + #define CFA_PAIR_ALLOC_REQ_PAIR_MODE_PFPAIR 0x4UL + #define CFA_PAIR_ALLOC_REQ_PAIR_MODE_REP2FN_MOD 0x5UL + #define CFA_PAIR_ALLOC_REQ_PAIR_MODE_REP2FN_MODALL 0x6UL + #define CFA_PAIR_ALLOC_REQ_PAIR_MODE_REP2FN_TRUFLOW 0x7UL + #define CFA_PAIR_ALLOC_REQ_PAIR_MODE_LAST CFA_PAIR_ALLOC_REQ_PAIR_MODE_REP2FN_TRUFLOW + __le16 vf_a_id; + u8 host_b_id; + u8 pf_b_id; + __le16 vf_b_id; + u8 port_id; + u8 pri; + __le16 new_pf_fid; + __le32 enables; + #define CFA_PAIR_ALLOC_REQ_ENABLES_Q_AB_VALID 0x1UL + #define CFA_PAIR_ALLOC_REQ_ENABLES_Q_BA_VALID 0x2UL + #define CFA_PAIR_ALLOC_REQ_ENABLES_FC_AB_VALID 0x4UL + #define CFA_PAIR_ALLOC_REQ_ENABLES_FC_BA_VALID 0x8UL + char pair_name[32]; + u8 q_ab; + u8 q_ba; + u8 fc_ab; + u8 fc_ba; + u8 unused_1[4]; +}; + +/* hwrm_cfa_pair_alloc_output (size:192b/24B) */ +struct hwrm_cfa_pair_alloc_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 rx_cfa_code_a; + __le16 tx_cfa_action_a; + __le16 rx_cfa_code_b; + __le16 tx_cfa_action_b; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_cfa_pair_free_input (size:448b/56B) */ +struct hwrm_cfa_pair_free_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + char pair_name[32]; + u8 pf_b_id; + u8 unused_0[3]; + __le16 vf_id; + __le16 pair_mode; + #define CFA_PAIR_FREE_REQ_PAIR_MODE_VF2FN 0x0UL + #define CFA_PAIR_FREE_REQ_PAIR_MODE_REP2FN 0x1UL + #define CFA_PAIR_FREE_REQ_PAIR_MODE_REP2REP 0x2UL + #define CFA_PAIR_FREE_REQ_PAIR_MODE_PROXY 0x3UL + #define CFA_PAIR_FREE_REQ_PAIR_MODE_PFPAIR 0x4UL + #define CFA_PAIR_FREE_REQ_PAIR_MODE_REP2FN_MOD 0x5UL + #define CFA_PAIR_FREE_REQ_PAIR_MODE_REP2FN_MODALL 0x6UL + #define CFA_PAIR_FREE_REQ_PAIR_MODE_REP2FN_TRUFLOW 0x7UL + #define CFA_PAIR_FREE_REQ_PAIR_MODE_LAST CFA_PAIR_FREE_REQ_PAIR_MODE_REP2FN_TRUFLOW +}; + +/* hwrm_cfa_pair_free_output (size:128b/16B) */ +struct hwrm_cfa_pair_free_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_cfa_pair_info_input (size:448b/56B) */ +struct hwrm_cfa_pair_info_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define CFA_PAIR_INFO_REQ_FLAGS_LOOKUP_TYPE 0x1UL + #define CFA_PAIR_INFO_REQ_FLAGS_LOOKUP_REPRE 0x2UL + __le16 pair_index; + u8 pair_pfid; + u8 pair_vfid; + char pair_name[32]; +}; + +/* hwrm_cfa_pair_info_output (size:576b/72B) */ +struct hwrm_cfa_pair_info_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 next_pair_index; + __le16 a_fid; + u8 host_a_index; + u8 pf_a_index; + __le16 vf_a_index; + __le16 rx_cfa_code_a; + __le16 tx_cfa_action_a; + __le16 b_fid; + u8 host_b_index; + u8 pf_b_index; + __le16 vf_b_index; + __le16 rx_cfa_code_b; + __le16 tx_cfa_action_b; + u8 pair_mode; + #define CFA_PAIR_INFO_RESP_PAIR_MODE_VF2FN 0x0UL + #define CFA_PAIR_INFO_RESP_PAIR_MODE_REP2FN 0x1UL + #define CFA_PAIR_INFO_RESP_PAIR_MODE_REP2REP 0x2UL + #define CFA_PAIR_INFO_RESP_PAIR_MODE_PROXY 0x3UL + #define CFA_PAIR_INFO_RESP_PAIR_MODE_PFPAIR 0x4UL + #define CFA_PAIR_INFO_RESP_PAIR_MODE_LAST CFA_PAIR_INFO_RESP_PAIR_MODE_PFPAIR + u8 pair_state; + #define CFA_PAIR_INFO_RESP_PAIR_STATE_ALLOCATED 0x1UL + #define CFA_PAIR_INFO_RESP_PAIR_STATE_ACTIVE 0x2UL + #define CFA_PAIR_INFO_RESP_PAIR_STATE_LAST CFA_PAIR_INFO_RESP_PAIR_STATE_ACTIVE + char pair_name[32]; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_cfa_vfr_alloc_input (size:448b/56B) */ +struct hwrm_cfa_vfr_alloc_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 vf_id; + __le16 reserved; + u8 unused_0[4]; + char vfr_name[32]; +}; + +/* hwrm_cfa_vfr_alloc_output (size:128b/16B) */ +struct hwrm_cfa_vfr_alloc_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 rx_cfa_code; + __le16 tx_cfa_action; + u8 unused_0[3]; + u8 valid; +}; + +/* hwrm_cfa_vfr_free_input (size:448b/56B) */ +struct hwrm_cfa_vfr_free_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + char vfr_name[32]; + __le16 vf_id; + __le16 reserved; + u8 unused_0[4]; +}; + +/* hwrm_cfa_vfr_free_output (size:128b/16B) */ +struct hwrm_cfa_vfr_free_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_cfa_redirect_query_tunnel_type_input (size:192b/24B) */ +struct hwrm_cfa_redirect_query_tunnel_type_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 src_fid; + u8 unused_0[6]; +}; + +/* hwrm_cfa_redirect_query_tunnel_type_output (size:128b/16B) */ +struct hwrm_cfa_redirect_query_tunnel_type_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 tunnel_mask; + #define CFA_REDIRECT_QUERY_TUNNEL_TYPE_RESP_TUNNEL_MASK_NONTUNNEL 0x1UL + #define CFA_REDIRECT_QUERY_TUNNEL_TYPE_RESP_TUNNEL_MASK_VXLAN 0x2UL + #define CFA_REDIRECT_QUERY_TUNNEL_TYPE_RESP_TUNNEL_MASK_NVGRE 0x4UL + #define CFA_REDIRECT_QUERY_TUNNEL_TYPE_RESP_TUNNEL_MASK_L2GRE 0x8UL + #define CFA_REDIRECT_QUERY_TUNNEL_TYPE_RESP_TUNNEL_MASK_IPIP 0x10UL + #define CFA_REDIRECT_QUERY_TUNNEL_TYPE_RESP_TUNNEL_MASK_GENEVE 0x20UL + #define CFA_REDIRECT_QUERY_TUNNEL_TYPE_RESP_TUNNEL_MASK_MPLS 0x40UL + #define CFA_REDIRECT_QUERY_TUNNEL_TYPE_RESP_TUNNEL_MASK_STT 0x80UL + #define CFA_REDIRECT_QUERY_TUNNEL_TYPE_RESP_TUNNEL_MASK_IPGRE 0x100UL + #define CFA_REDIRECT_QUERY_TUNNEL_TYPE_RESP_TUNNEL_MASK_VXLAN_V4 0x200UL + #define CFA_REDIRECT_QUERY_TUNNEL_TYPE_RESP_TUNNEL_MASK_IPGRE_V1 0x400UL + #define CFA_REDIRECT_QUERY_TUNNEL_TYPE_RESP_TUNNEL_MASK_ANYTUNNEL 0x800UL + #define CFA_REDIRECT_QUERY_TUNNEL_TYPE_RESP_TUNNEL_MASK_L2_ETYPE 0x1000UL + #define CFA_REDIRECT_QUERY_TUNNEL_TYPE_RESP_TUNNEL_MASK_VXLAN_GPE_V6 0x2000UL + #define CFA_REDIRECT_QUERY_TUNNEL_TYPE_RESP_TUNNEL_MASK_VXLAN_GPE 0x4000UL + u8 unused_0[3]; + u8 valid; +}; + +/* hwrm_cfa_ctx_mem_rgtr_input (size:256b/32B) */ +struct hwrm_cfa_ctx_mem_rgtr_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 flags; + u8 page_level; + #define CFA_CTX_MEM_RGTR_REQ_PAGE_LEVEL_LVL_0 0x0UL + #define CFA_CTX_MEM_RGTR_REQ_PAGE_LEVEL_LVL_1 0x1UL + #define CFA_CTX_MEM_RGTR_REQ_PAGE_LEVEL_LVL_2 0x2UL + #define CFA_CTX_MEM_RGTR_REQ_PAGE_LEVEL_LAST CFA_CTX_MEM_RGTR_REQ_PAGE_LEVEL_LVL_2 + u8 page_size; + #define CFA_CTX_MEM_RGTR_REQ_PAGE_SIZE_4K 0x0UL + #define CFA_CTX_MEM_RGTR_REQ_PAGE_SIZE_8K 0x1UL + #define CFA_CTX_MEM_RGTR_REQ_PAGE_SIZE_64K 0x4UL + #define CFA_CTX_MEM_RGTR_REQ_PAGE_SIZE_256K 0x6UL + #define CFA_CTX_MEM_RGTR_REQ_PAGE_SIZE_1M 0x8UL + #define CFA_CTX_MEM_RGTR_REQ_PAGE_SIZE_2M 0x9UL + #define CFA_CTX_MEM_RGTR_REQ_PAGE_SIZE_4M 0xaUL + #define CFA_CTX_MEM_RGTR_REQ_PAGE_SIZE_1G 0x12UL + #define CFA_CTX_MEM_RGTR_REQ_PAGE_SIZE_LAST CFA_CTX_MEM_RGTR_REQ_PAGE_SIZE_1G + __le32 unused_0; + __le64 page_dir; +}; + +/* hwrm_cfa_ctx_mem_rgtr_output (size:128b/16B) */ +struct hwrm_cfa_ctx_mem_rgtr_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 ctx_id; + u8 unused_0[5]; + u8 valid; +}; + +/* hwrm_cfa_ctx_mem_unrgtr_input (size:192b/24B) */ +struct hwrm_cfa_ctx_mem_unrgtr_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 ctx_id; + u8 unused_0[6]; +}; + +/* hwrm_cfa_ctx_mem_unrgtr_output (size:128b/16B) */ +struct hwrm_cfa_ctx_mem_unrgtr_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_cfa_ctx_mem_qctx_input (size:192b/24B) */ +struct hwrm_cfa_ctx_mem_qctx_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 ctx_id; + u8 unused_0[6]; +}; + +/* hwrm_cfa_ctx_mem_qctx_output (size:256b/32B) */ +struct hwrm_cfa_ctx_mem_qctx_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 flags; + u8 page_level; + #define CFA_CTX_MEM_QCTX_RESP_PAGE_LEVEL_LVL_0 0x0UL + #define CFA_CTX_MEM_QCTX_RESP_PAGE_LEVEL_LVL_1 0x1UL + #define CFA_CTX_MEM_QCTX_RESP_PAGE_LEVEL_LVL_2 0x2UL + #define CFA_CTX_MEM_QCTX_RESP_PAGE_LEVEL_LAST CFA_CTX_MEM_QCTX_RESP_PAGE_LEVEL_LVL_2 + u8 page_size; + #define CFA_CTX_MEM_QCTX_RESP_PAGE_SIZE_4K 0x0UL + #define CFA_CTX_MEM_QCTX_RESP_PAGE_SIZE_8K 0x1UL + #define CFA_CTX_MEM_QCTX_RESP_PAGE_SIZE_64K 0x4UL + #define CFA_CTX_MEM_QCTX_RESP_PAGE_SIZE_256K 0x6UL + #define CFA_CTX_MEM_QCTX_RESP_PAGE_SIZE_1M 0x8UL + #define CFA_CTX_MEM_QCTX_RESP_PAGE_SIZE_2M 0x9UL + #define CFA_CTX_MEM_QCTX_RESP_PAGE_SIZE_4M 0xaUL + #define CFA_CTX_MEM_QCTX_RESP_PAGE_SIZE_1G 0x12UL + #define CFA_CTX_MEM_QCTX_RESP_PAGE_SIZE_LAST CFA_CTX_MEM_QCTX_RESP_PAGE_SIZE_1G + u8 unused_0[4]; + __le64 page_dir; + u8 unused_1[7]; + u8 valid; +}; + +/* hwrm_cfa_ctx_mem_qcaps_input (size:128b/16B) */ +struct hwrm_cfa_ctx_mem_qcaps_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; +}; + +/* hwrm_cfa_ctx_mem_qcaps_output (size:128b/16B) */ +struct hwrm_cfa_ctx_mem_qcaps_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 max_entries; + u8 unused_0[5]; + u8 valid; +}; + +/* hwrm_cfa_counter_qcaps_input (size:128b/16B) */ +struct hwrm_cfa_counter_qcaps_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; +}; + +/* hwrm_cfa_counter_qcaps_output (size:576b/72B) */ +struct hwrm_cfa_counter_qcaps_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 flags; + #define CFA_COUNTER_QCAPS_RESP_FLAGS_COUNTER_FORMAT 0x1UL + #define CFA_COUNTER_QCAPS_RESP_FLAGS_COUNTER_FORMAT_NONE 0x0UL + #define CFA_COUNTER_QCAPS_RESP_FLAGS_COUNTER_FORMAT_64_BIT 0x1UL + #define CFA_COUNTER_QCAPS_RESP_FLAGS_COUNTER_FORMAT_LAST CFA_COUNTER_QCAPS_RESP_FLAGS_COUNTER_FORMAT_64_BIT + __le32 unused_0; + __le32 min_rx_fc; + __le32 max_rx_fc; + __le32 min_tx_fc; + __le32 max_tx_fc; + __le32 min_rx_efc; + __le32 max_rx_efc; + __le32 min_tx_efc; + __le32 max_tx_efc; + __le32 min_rx_mdc; + __le32 max_rx_mdc; + __le32 min_tx_mdc; + __le32 max_tx_mdc; + __le32 max_flow_alloc_fc; + u8 unused_1[3]; + u8 valid; +}; + +/* hwrm_cfa_counter_cfg_input (size:256b/32B) */ +struct hwrm_cfa_counter_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 flags; + #define CFA_COUNTER_CFG_REQ_FLAGS_CFG_MODE 0x1UL + #define CFA_COUNTER_CFG_REQ_FLAGS_CFG_MODE_DISABLE 0x0UL + #define CFA_COUNTER_CFG_REQ_FLAGS_CFG_MODE_ENABLE 0x1UL + #define CFA_COUNTER_CFG_REQ_FLAGS_CFG_MODE_LAST CFA_COUNTER_CFG_REQ_FLAGS_CFG_MODE_ENABLE + #define CFA_COUNTER_CFG_REQ_FLAGS_PATH 0x2UL + #define CFA_COUNTER_CFG_REQ_FLAGS_PATH_TX (0x0UL << 1) + #define CFA_COUNTER_CFG_REQ_FLAGS_PATH_RX (0x1UL << 1) + #define CFA_COUNTER_CFG_REQ_FLAGS_PATH_LAST CFA_COUNTER_CFG_REQ_FLAGS_PATH_RX + #define CFA_COUNTER_CFG_REQ_FLAGS_DATA_TRANSFER_MODE_MASK 0xcUL + #define CFA_COUNTER_CFG_REQ_FLAGS_DATA_TRANSFER_MODE_SFT 2 + #define CFA_COUNTER_CFG_REQ_FLAGS_DATA_TRANSFER_MODE_PUSH (0x0UL << 2) + #define CFA_COUNTER_CFG_REQ_FLAGS_DATA_TRANSFER_MODE_PULL (0x1UL << 2) + #define CFA_COUNTER_CFG_REQ_FLAGS_DATA_TRANSFER_MODE_PULL_ASYNC (0x2UL << 2) + #define CFA_COUNTER_CFG_REQ_FLAGS_DATA_TRANSFER_MODE_LAST CFA_COUNTER_CFG_REQ_FLAGS_DATA_TRANSFER_MODE_PULL_ASYNC + __le16 counter_type; + #define CFA_COUNTER_CFG_REQ_COUNTER_TYPE_FC 0x0UL + #define CFA_COUNTER_CFG_REQ_COUNTER_TYPE_EFC 0x1UL + #define CFA_COUNTER_CFG_REQ_COUNTER_TYPE_MDC 0x2UL + #define CFA_COUNTER_CFG_REQ_COUNTER_TYPE_LAST CFA_COUNTER_CFG_REQ_COUNTER_TYPE_MDC + __le16 ctx_id; + __le16 update_tmr_ms; + __le32 num_entries; + __le32 unused_0; +}; + +/* hwrm_cfa_counter_cfg_output (size:128b/16B) */ +struct hwrm_cfa_counter_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_cfa_counter_qcfg_input (size:192b/24B) */ +struct hwrm_cfa_counter_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 flags; + #define CFA_COUNTER_QCFG_REQ_FLAGS_PATH 0x1UL + #define CFA_COUNTER_QCFG_REQ_FLAGS_PATH_TX 0x0UL + #define CFA_COUNTER_QCFG_REQ_FLAGS_PATH_RX 0x1UL + #define CFA_COUNTER_QCFG_REQ_FLAGS_PATH_LAST CFA_COUNTER_QCFG_REQ_FLAGS_PATH_RX + #define CFA_COUNTER_QCFG_REQ_FLAGS_DATA_TRANSFER_MODE_MASK 0x6UL + #define CFA_COUNTER_QCFG_REQ_FLAGS_DATA_TRANSFER_MODE_SFT 1 + #define CFA_COUNTER_QCFG_REQ_FLAGS_DATA_TRANSFER_MODE_PUSH (0x0UL << 1) + #define CFA_COUNTER_QCFG_REQ_FLAGS_DATA_TRANSFER_MODE_PULL (0x1UL << 1) + #define CFA_COUNTER_QCFG_REQ_FLAGS_DATA_TRANSFER_MODE_PULL_ASYNC (0x2UL << 1) + #define CFA_COUNTER_QCFG_REQ_FLAGS_DATA_TRANSFER_MODE_LAST CFA_COUNTER_QCFG_REQ_FLAGS_DATA_TRANSFER_MODE_PULL_ASYNC + __le16 counter_type; + __le32 unused_0; +}; + +/* hwrm_cfa_counter_qcfg_output (size:192b/24B) */ +struct hwrm_cfa_counter_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 ctx_id; + __le16 update_tmr_ms; + __le32 num_entries; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_cfa_counter_qstats_input (size:320b/40B) */ +struct hwrm_cfa_counter_qstats_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 flags; + #define CFA_COUNTER_QSTATS_REQ_FLAGS_PATH 0x1UL + #define CFA_COUNTER_QSTATS_REQ_FLAGS_PATH_TX 0x0UL + #define CFA_COUNTER_QSTATS_REQ_FLAGS_PATH_RX 0x1UL + #define CFA_COUNTER_QSTATS_REQ_FLAGS_PATH_LAST CFA_COUNTER_QSTATS_REQ_FLAGS_PATH_RX + __le16 counter_type; + __le16 input_flow_ctx_id; + __le16 num_entries; + __le16 delta_time_ms; + __le16 meter_instance_id; + __le16 mdc_ctx_id; + u8 unused_0[2]; + __le64 expected_count; +}; + +/* hwrm_cfa_counter_qstats_output (size:128b/16B) */ +struct hwrm_cfa_counter_qstats_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_cfa_eem_qcaps_input (size:192b/24B) */ +struct hwrm_cfa_eem_qcaps_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define CFA_EEM_QCAPS_REQ_FLAGS_PATH_TX 0x1UL + #define CFA_EEM_QCAPS_REQ_FLAGS_PATH_RX 0x2UL + #define CFA_EEM_QCAPS_REQ_FLAGS_PREFERRED_OFFLOAD 0x4UL + __le32 unused_0; +}; + +/* hwrm_cfa_eem_qcaps_output (size:320b/40B) */ +struct hwrm_cfa_eem_qcaps_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 flags; + #define CFA_EEM_QCAPS_RESP_FLAGS_PATH_TX 0x1UL + #define CFA_EEM_QCAPS_RESP_FLAGS_PATH_RX 0x2UL + #define CFA_EEM_QCAPS_RESP_FLAGS_CENTRALIZED_MEMORY_MODEL_SUPPORTED 0x4UL + #define CFA_EEM_QCAPS_RESP_FLAGS_DETACHED_CENTRALIZED_MEMORY_MODEL_SUPPORTED 0x8UL + __le32 unused_0; + __le32 supported; + #define CFA_EEM_QCAPS_RESP_SUPPORTED_KEY0_TABLE 0x1UL + #define CFA_EEM_QCAPS_RESP_SUPPORTED_KEY1_TABLE 0x2UL + #define CFA_EEM_QCAPS_RESP_SUPPORTED_EXTERNAL_RECORD_TABLE 0x4UL + #define CFA_EEM_QCAPS_RESP_SUPPORTED_EXTERNAL_FLOW_COUNTERS_TABLE 0x8UL + #define CFA_EEM_QCAPS_RESP_SUPPORTED_FID_TABLE 0x10UL + __le32 max_entries_supported; + __le16 key_entry_size; + __le16 record_entry_size; + __le16 efc_entry_size; + __le16 fid_entry_size; + u8 unused_1[7]; + u8 valid; +}; + +/* hwrm_cfa_eem_cfg_input (size:384b/48B) */ +struct hwrm_cfa_eem_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define CFA_EEM_CFG_REQ_FLAGS_PATH_TX 0x1UL + #define CFA_EEM_CFG_REQ_FLAGS_PATH_RX 0x2UL + #define CFA_EEM_CFG_REQ_FLAGS_PREFERRED_OFFLOAD 0x4UL + #define CFA_EEM_CFG_REQ_FLAGS_SECONDARY_PF 0x8UL + __le16 group_id; + __le16 unused_0; + __le32 num_entries; + __le32 unused_1; + __le16 key0_ctx_id; + __le16 key1_ctx_id; + __le16 record_ctx_id; + __le16 efc_ctx_id; + __le16 fid_ctx_id; + __le16 unused_2; + __le32 unused_3; +}; + +/* hwrm_cfa_eem_cfg_output (size:128b/16B) */ +struct hwrm_cfa_eem_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_cfa_eem_qcfg_input (size:192b/24B) */ +struct hwrm_cfa_eem_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define CFA_EEM_QCFG_REQ_FLAGS_PATH_TX 0x1UL + #define CFA_EEM_QCFG_REQ_FLAGS_PATH_RX 0x2UL + __le32 unused_0; +}; + +/* hwrm_cfa_eem_qcfg_output (size:256b/32B) */ +struct hwrm_cfa_eem_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 flags; + #define CFA_EEM_QCFG_RESP_FLAGS_PATH_TX 0x1UL + #define CFA_EEM_QCFG_RESP_FLAGS_PATH_RX 0x2UL + #define CFA_EEM_QCFG_RESP_FLAGS_PREFERRED_OFFLOAD 0x4UL + __le32 num_entries; + __le16 key0_ctx_id; + __le16 key1_ctx_id; + __le16 record_ctx_id; + __le16 efc_ctx_id; + __le16 fid_ctx_id; + u8 unused_2[5]; + u8 valid; +}; + +/* hwrm_cfa_eem_op_input (size:192b/24B) */ +struct hwrm_cfa_eem_op_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define CFA_EEM_OP_REQ_FLAGS_PATH_TX 0x1UL + #define CFA_EEM_OP_REQ_FLAGS_PATH_RX 0x2UL + __le16 unused_0; + __le16 op; + #define CFA_EEM_OP_REQ_OP_RESERVED 0x0UL + #define CFA_EEM_OP_REQ_OP_EEM_DISABLE 0x1UL + #define CFA_EEM_OP_REQ_OP_EEM_ENABLE 0x2UL + #define CFA_EEM_OP_REQ_OP_EEM_CLEANUP 0x3UL + #define CFA_EEM_OP_REQ_OP_LAST CFA_EEM_OP_REQ_OP_EEM_CLEANUP +}; + +/* hwrm_cfa_eem_op_output (size:128b/16B) */ +struct hwrm_cfa_eem_op_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_cfa_adv_flow_mgnt_qcaps_input (size:256b/32B) */ +struct hwrm_cfa_adv_flow_mgnt_qcaps_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 unused_0[4]; +}; + +/* hwrm_cfa_adv_flow_mgnt_qcaps_output (size:128b/16B) */ +struct hwrm_cfa_adv_flow_mgnt_qcaps_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 flags; + #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_HND_16BIT_SUPPORTED 0x1UL + #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_HND_64BIT_SUPPORTED 0x2UL + #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_BATCH_DELETE_SUPPORTED 0x4UL + #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_RESET_ALL_SUPPORTED 0x8UL + #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_DEST_FUNC_SUPPORTED 0x10UL + #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_TX_EEM_FLOW_SUPPORTED 0x20UL + #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RX_EEM_FLOW_SUPPORTED 0x40UL + #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_COUNTER_ALLOC_SUPPORTED 0x80UL + #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_SUPPORTED 0x100UL + #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_UNTAGGED_VLAN_SUPPORTED 0x200UL + #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_XDP_SUPPORTED 0x400UL + #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_L2_HEADER_SOURCE_FIELDS_SUPPORTED 0x800UL + #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_RX_ARP_SUPPORTED 0x1000UL + #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V2_SUPPORTED 0x2000UL + #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_RX_ETHERTYPE_IP_SUPPORTED 0x4000UL + #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_TRUFLOW_CAPABLE 0x8000UL + #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_L2_FILTER_TRAFFIC_TYPE_L2_ROCE_SUPPORTED 0x10000UL + #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_LAG_SUPPORTED 0x20000UL + #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_NO_L2CTX_SUPPORTED 0x40000UL + #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NIC_FLOW_STATS_SUPPORTED 0x80000UL + #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_RX_EXT_IP_PROTO_SUPPORTED 0x100000UL + #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V3_SUPPORTED 0x200000UL + u8 unused_0[3]; + u8 valid; +}; + +/* hwrm_cfa_tflib_input (size:1024b/128B) */ +struct hwrm_cfa_tflib_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 tf_type; + __le16 tf_subtype; + u8 unused0[4]; + __le32 tf_req[26]; +}; + +/* hwrm_cfa_tflib_output (size:5632b/704B) */ +struct hwrm_cfa_tflib_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 tf_type; + __le16 tf_subtype; + __le32 tf_resp_code; + __le32 tf_resp[170]; + u8 unused1[7]; + u8 valid; +}; + +/* hwrm_cfa_lag_group_member_rgtr_input (size:192b/24B) */ +struct hwrm_cfa_lag_group_member_rgtr_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 mode; + #define CFA_LAG_GROUP_MEMBER_RGTR_REQ_MODE_ACTIVE_BACKUP 0x1UL + #define CFA_LAG_GROUP_MEMBER_RGTR_REQ_MODE_BALANCE_XOR 0x2UL + #define CFA_LAG_GROUP_MEMBER_RGTR_REQ_MODE_BROADCAST 0x3UL + #define CFA_LAG_GROUP_MEMBER_RGTR_REQ_MODE_LAST CFA_LAG_GROUP_MEMBER_RGTR_REQ_MODE_BROADCAST + u8 port_bitmap; + u8 active_port; + u8 unused_0[5]; +}; + +/* hwrm_cfa_lag_group_member_rgtr_output (size:128b/16B) */ +struct hwrm_cfa_lag_group_member_rgtr_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 lag_id; + u8 unused_0[5]; + u8 valid; +}; + +/* hwrm_cfa_lag_group_member_unrgtr_input (size:192b/24B) */ +struct hwrm_cfa_lag_group_member_unrgtr_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 lag_id; + u8 unused_0[6]; +}; + +/* hwrm_cfa_lag_group_member_unrgtr_output (size:128b/16B) */ +struct hwrm_cfa_lag_group_member_unrgtr_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_cfa_tls_filter_alloc_input (size:768b/96B) */ +struct hwrm_cfa_tls_filter_alloc_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 unused_0; + __le32 enables; + #define CFA_TLS_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID 0x1UL + #define CFA_TLS_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE 0x2UL + #define CFA_TLS_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE 0x4UL + #define CFA_TLS_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR 0x8UL + #define CFA_TLS_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR 0x10UL + #define CFA_TLS_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL 0x20UL + #define CFA_TLS_FILTER_ALLOC_REQ_ENABLES_SRC_PORT 0x40UL + #define CFA_TLS_FILTER_ALLOC_REQ_ENABLES_DST_PORT 0x80UL + #define CFA_TLS_FILTER_ALLOC_REQ_ENABLES_KID 0x100UL + #define CFA_TLS_FILTER_ALLOC_REQ_ENABLES_DST_ID 0x200UL + #define CFA_TLS_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x400UL + #define CFA_TLS_FILTER_ALLOC_REQ_ENABLES_QUIC_DST_CONNECT_ID 0x800UL + __le64 l2_filter_id; + u8 unused_1[6]; + __be16 ethertype; + u8 ip_addr_type; + #define CFA_TLS_FILTER_ALLOC_REQ_IP_ADDR_TYPE_UNKNOWN 0x0UL + #define CFA_TLS_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4 0x4UL + #define CFA_TLS_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6 0x6UL + #define CFA_TLS_FILTER_ALLOC_REQ_IP_ADDR_TYPE_LAST CFA_TLS_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6 + u8 ip_protocol; + #define CFA_TLS_FILTER_ALLOC_REQ_IP_PROTOCOL_UNKNOWN 0x0UL + #define CFA_TLS_FILTER_ALLOC_REQ_IP_PROTOCOL_TCP 0x6UL + #define CFA_TLS_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP 0x11UL + #define CFA_TLS_FILTER_ALLOC_REQ_IP_PROTOCOL_LAST CFA_TLS_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP + __le16 dst_id; + __le16 mirror_vnic_id; + u8 unused_2[2]; + __be32 src_ipaddr[4]; + __be32 dst_ipaddr[4]; + __be16 src_port; + __be16 dst_port; + __le32 kid; + __le64 quic_dst_connect_id; +}; + +/* hwrm_cfa_tls_filter_alloc_output (size:192b/24B) */ +struct hwrm_cfa_tls_filter_alloc_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le64 tls_filter_id; + __le32 flow_id; + #define CFA_TLS_FILTER_ALLOC_RESP_FLOW_ID_VALUE_MASK 0x3fffffffUL + #define CFA_TLS_FILTER_ALLOC_RESP_FLOW_ID_VALUE_SFT 0 + #define CFA_TLS_FILTER_ALLOC_RESP_FLOW_ID_TYPE 0x40000000UL + #define CFA_TLS_FILTER_ALLOC_RESP_FLOW_ID_TYPE_INT (0x0UL << 30) + #define CFA_TLS_FILTER_ALLOC_RESP_FLOW_ID_TYPE_EXT (0x1UL << 30) + #define CFA_TLS_FILTER_ALLOC_RESP_FLOW_ID_TYPE_LAST CFA_TLS_FILTER_ALLOC_RESP_FLOW_ID_TYPE_EXT + #define CFA_TLS_FILTER_ALLOC_RESP_FLOW_ID_DIR 0x80000000UL + #define CFA_TLS_FILTER_ALLOC_RESP_FLOW_ID_DIR_RX (0x0UL << 31) + #define CFA_TLS_FILTER_ALLOC_RESP_FLOW_ID_DIR_TX (0x1UL << 31) + #define CFA_TLS_FILTER_ALLOC_RESP_FLOW_ID_DIR_LAST CFA_TLS_FILTER_ALLOC_RESP_FLOW_ID_DIR_TX + u8 unused_0[3]; + u8 valid; +}; + +/* hwrm_cfa_tls_filter_free_input (size:192b/24B) */ +struct hwrm_cfa_tls_filter_free_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 tls_filter_id; +}; + +/* hwrm_cfa_tls_filter_free_output (size:128b/16B) */ +struct hwrm_cfa_tls_filter_free_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_cfa_release_afm_func_input (size:256b/32B) */ +struct hwrm_cfa_release_afm_func_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 fid; + __le16 rfid; + u8 type; + #define CFA_RELEASE_AFM_FUNC_REQ_TYPE_EFID 0x1UL + #define CFA_RELEASE_AFM_FUNC_REQ_TYPE_RFID 0x2UL + #define CFA_RELEASE_AFM_FUNC_REQ_TYPE_DFID 0x3UL + #define CFA_RELEASE_AFM_FUNC_REQ_TYPE_LAST CFA_RELEASE_AFM_FUNC_REQ_TYPE_DFID + u8 unused_0[3]; + __le32 flags; + #define CFA_RELEASE_AFM_FUNC_REQ_FLAGS_BC_REM 0x1UL + #define CFA_RELEASE_AFM_FUNC_REQ_FLAGS_MC_REM 0x2UL + #define CFA_RELEASE_AFM_FUNC_REQ_FLAGS_PROMISC_REM 0x4UL + __le32 unused_1; +}; + +/* hwrm_cfa_release_afm_func_output (size:128b/16B) */ +struct hwrm_cfa_release_afm_func_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_tf_input (size:1024b/128B) */ +struct hwrm_tf_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 type; + __le16 subtype; + u8 unused0[4]; + __le32 req[26]; +}; + +/* hwrm_tf_output (size:5632b/704B) */ +struct hwrm_tf_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 type; + __le16 subtype; + __le32 resp_code; + __le32 resp[170]; + u8 unused1[7]; + u8 valid; +}; + +/* hwrm_tf_version_get_input (size:128b/16B) */ +struct hwrm_tf_version_get_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; +}; + +/* hwrm_tf_version_get_output (size:256b/32B) */ +struct hwrm_tf_version_get_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 major; + u8 minor; + u8 update; + u8 unused0[5]; + __le64 dev_caps_cfg; + u8 unused1[7]; + u8 valid; +}; + +/* hwrm_tf_session_open_input (size:640b/80B) */ +struct hwrm_tf_session_open_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 session_name[64]; +}; + +/* hwrm_tf_session_open_output (size:192b/24B) */ +struct hwrm_tf_session_open_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 fw_session_id; + __le32 fw_session_client_id; + __le32 flags; + #define TF_SESSION_OPEN_RESP_FLAGS_SHARED_SESSION 0x1UL + #define TF_SESSION_OPEN_RESP_FLAGS_SHARED_SESSION_NOT_CREATOR 0x0UL + #define TF_SESSION_OPEN_RESP_FLAGS_SHARED_SESSION_CREATOR 0x1UL + #define TF_SESSION_OPEN_RESP_FLAGS_SHARED_SESSION_LAST TF_SESSION_OPEN_RESP_FLAGS_SHARED_SESSION_CREATOR + u8 unused1[3]; + u8 valid; +}; + +/* hwrm_tf_session_register_input (size:704b/88B) */ +struct hwrm_tf_session_register_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 fw_session_id; + __le32 unused0; + u8 session_client_name[64]; +}; + +/* hwrm_tf_session_register_output (size:128b/16B) */ +struct hwrm_tf_session_register_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 fw_session_client_id; + u8 unused0[3]; + u8 valid; +}; + +/* hwrm_tf_session_unregister_input (size:192b/24B) */ +struct hwrm_tf_session_unregister_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 fw_session_id; + __le32 fw_session_client_id; +}; + +/* hwrm_tf_session_unregister_output (size:128b/16B) */ +struct hwrm_tf_session_unregister_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused0[7]; + u8 valid; +}; + +/* hwrm_tf_session_close_input (size:192b/24B) */ +struct hwrm_tf_session_close_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 fw_session_id; + u8 unused0[4]; +}; + +/* hwrm_tf_session_close_output (size:128b/16B) */ +struct hwrm_tf_session_close_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused0[7]; + u8 valid; +}; + +/* hwrm_tf_session_qcfg_input (size:192b/24B) */ +struct hwrm_tf_session_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 fw_session_id; + u8 unused0[4]; +}; + +/* hwrm_tf_session_qcfg_output (size:128b/16B) */ +struct hwrm_tf_session_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 rx_act_flags; + #define TF_SESSION_QCFG_RESP_RX_ACT_FLAGS_ABCR_GFID_EN 0x1UL + #define TF_SESSION_QCFG_RESP_RX_ACT_FLAGS_ABCR_VTAG_DLT_BOTH 0x2UL + #define TF_SESSION_QCFG_RESP_RX_ACT_FLAGS_TECT_SMAC_OVR_RUTNSL2 0x4UL + u8 tx_act_flags; + #define TF_SESSION_QCFG_RESP_TX_ACT_FLAGS_ABCR_VEB_EN 0x1UL + #define TF_SESSION_QCFG_RESP_TX_ACT_FLAGS_TECT_GRE_SET_K 0x2UL + #define TF_SESSION_QCFG_RESP_TX_ACT_FLAGS_TECT_IPV6_TC_IH 0x4UL + #define TF_SESSION_QCFG_RESP_TX_ACT_FLAGS_TECT_IPV4_TOS_IH 0x8UL + u8 unused0[5]; + u8 valid; +}; + +/* hwrm_tf_session_resc_qcaps_input (size:256b/32B) */ +struct hwrm_tf_session_resc_qcaps_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 fw_session_id; + __le16 flags; + #define TF_SESSION_RESC_QCAPS_REQ_FLAGS_DIR 0x1UL + #define TF_SESSION_RESC_QCAPS_REQ_FLAGS_DIR_RX 0x0UL + #define TF_SESSION_RESC_QCAPS_REQ_FLAGS_DIR_TX 0x1UL + #define TF_SESSION_RESC_QCAPS_REQ_FLAGS_DIR_LAST TF_SESSION_RESC_QCAPS_REQ_FLAGS_DIR_TX + __le16 qcaps_size; + __le64 qcaps_addr; +}; + +/* hwrm_tf_session_resc_qcaps_output (size:192b/24B) */ +struct hwrm_tf_session_resc_qcaps_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 flags; + #define TF_SESSION_RESC_QCAPS_RESP_FLAGS_SESS_RESV_STRATEGY_MASK 0x3UL + #define TF_SESSION_RESC_QCAPS_RESP_FLAGS_SESS_RESV_STRATEGY_SFT 0 + #define TF_SESSION_RESC_QCAPS_RESP_FLAGS_SESS_RESV_STRATEGY_STATIC 0x0UL + #define TF_SESSION_RESC_QCAPS_RESP_FLAGS_SESS_RESV_STRATEGY_1 0x1UL + #define TF_SESSION_RESC_QCAPS_RESP_FLAGS_SESS_RESV_STRATEGY_2 0x2UL + #define TF_SESSION_RESC_QCAPS_RESP_FLAGS_SESS_RESV_STRATEGY_3 0x3UL + #define TF_SESSION_RESC_QCAPS_RESP_FLAGS_SESS_RESV_STRATEGY_LAST TF_SESSION_RESC_QCAPS_RESP_FLAGS_SESS_RESV_STRATEGY_3 + __le16 size; + u8 sram_profile; + u8 unused0; + u8 unused1[7]; + u8 valid; +}; + +/* hwrm_tf_session_resc_alloc_input (size:320b/40B) */ +struct hwrm_tf_session_resc_alloc_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 fw_session_id; + __le16 flags; + #define TF_SESSION_RESC_ALLOC_REQ_FLAGS_DIR 0x1UL + #define TF_SESSION_RESC_ALLOC_REQ_FLAGS_DIR_RX 0x0UL + #define TF_SESSION_RESC_ALLOC_REQ_FLAGS_DIR_TX 0x1UL + #define TF_SESSION_RESC_ALLOC_REQ_FLAGS_DIR_LAST TF_SESSION_RESC_ALLOC_REQ_FLAGS_DIR_TX + __le16 req_size; + __le64 req_addr; + __le64 resc_addr; +}; + +/* hwrm_tf_session_resc_alloc_output (size:128b/16B) */ +struct hwrm_tf_session_resc_alloc_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 size; + u8 unused0[5]; + u8 valid; +}; + +/* hwrm_tf_session_resc_flush_input (size:256b/32B) */ +struct hwrm_tf_session_resc_flush_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 fw_session_id; + __le16 flags; + #define TF_SESSION_RESC_FLUSH_REQ_FLAGS_DIR 0x1UL + #define TF_SESSION_RESC_FLUSH_REQ_FLAGS_DIR_RX 0x0UL + #define TF_SESSION_RESC_FLUSH_REQ_FLAGS_DIR_TX 0x1UL + #define TF_SESSION_RESC_FLUSH_REQ_FLAGS_DIR_LAST TF_SESSION_RESC_FLUSH_REQ_FLAGS_DIR_TX + __le16 flush_size; + __le64 flush_addr; +}; + +/* hwrm_tf_session_resc_flush_output (size:128b/16B) */ +struct hwrm_tf_session_resc_flush_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused0[7]; + u8 valid; +}; + +/* hwrm_tf_session_resc_info_input (size:320b/40B) */ +struct hwrm_tf_session_resc_info_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 fw_session_id; + __le16 flags; + #define TF_SESSION_RESC_INFO_REQ_FLAGS_DIR 0x1UL + #define TF_SESSION_RESC_INFO_REQ_FLAGS_DIR_RX 0x0UL + #define TF_SESSION_RESC_INFO_REQ_FLAGS_DIR_TX 0x1UL + #define TF_SESSION_RESC_INFO_REQ_FLAGS_DIR_LAST TF_SESSION_RESC_INFO_REQ_FLAGS_DIR_TX + __le16 req_size; + __le64 req_addr; + __le64 resc_addr; +}; + +/* hwrm_tf_session_resc_info_output (size:128b/16B) */ +struct hwrm_tf_session_resc_info_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 size; + u8 unused0[5]; + u8 valid; +}; + +/* tf_rm_resc_req_entry (size:64b/8B) */ +struct tf_rm_resc_req_entry { + __le32 type; + __le16 min; + __le16 max; +}; + +/* tf_rm_resc_entry (size:64b/8B) */ +struct tf_rm_resc_entry { + __le32 type; + __le16 start; + __le16 stride; +}; + +/* hwrm_tf_tbl_type_alloc_input (size:192b/24B) */ +struct hwrm_tf_tbl_type_alloc_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 fw_session_id; + __le16 flags; + #define TF_TBL_TYPE_ALLOC_REQ_FLAGS_DIR 0x1UL + #define TF_TBL_TYPE_ALLOC_REQ_FLAGS_DIR_RX 0x0UL + #define TF_TBL_TYPE_ALLOC_REQ_FLAGS_DIR_TX 0x1UL + #define TF_TBL_TYPE_ALLOC_REQ_FLAGS_DIR_LAST TF_TBL_TYPE_ALLOC_REQ_FLAGS_DIR_TX + u8 blktype; + #define TF_TBL_TYPE_ALLOC_REQ_BLKTYPE_BLKTYPE_CFA 0x0UL + #define TF_TBL_TYPE_ALLOC_REQ_BLKTYPE_BLKTYPE_RXP 0x1UL + #define TF_TBL_TYPE_ALLOC_REQ_BLKTYPE_BLKTYPE_RE_GPARSE 0x2UL + #define TF_TBL_TYPE_ALLOC_REQ_BLKTYPE_BLKTYPE_TE_GPARSE 0x3UL + #define TF_TBL_TYPE_ALLOC_REQ_BLKTYPE_LAST TF_TBL_TYPE_ALLOC_REQ_BLKTYPE_BLKTYPE_TE_GPARSE + u8 type; +}; + +/* hwrm_tf_tbl_type_alloc_output (size:128b/16B) */ +struct hwrm_tf_tbl_type_alloc_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 resp_code; + __le16 idx_tbl_id; + u8 unused0; + u8 valid; +}; + +/* hwrm_tf_tbl_type_get_input (size:256b/32B) */ +struct hwrm_tf_tbl_type_get_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 fw_session_id; + __le16 flags; + #define TF_TBL_TYPE_GET_REQ_FLAGS_DIR 0x1UL + #define TF_TBL_TYPE_GET_REQ_FLAGS_DIR_RX 0x0UL + #define TF_TBL_TYPE_GET_REQ_FLAGS_DIR_TX 0x1UL + #define TF_TBL_TYPE_GET_REQ_FLAGS_DIR_LAST TF_TBL_TYPE_GET_REQ_FLAGS_DIR_TX + #define TF_TBL_TYPE_GET_REQ_FLAGS_CLEAR_ON_READ 0x2UL + u8 blktype; + #define TF_TBL_TYPE_GET_REQ_BLKTYPE_BLKTYPE_CFA 0x0UL + #define TF_TBL_TYPE_GET_REQ_BLKTYPE_BLKTYPE_RXP 0x1UL + #define TF_TBL_TYPE_GET_REQ_BLKTYPE_BLKTYPE_RE_GPARSE 0x2UL + #define TF_TBL_TYPE_GET_REQ_BLKTYPE_BLKTYPE_TE_GPARSE 0x3UL + #define TF_TBL_TYPE_GET_REQ_BLKTYPE_LAST TF_TBL_TYPE_GET_REQ_BLKTYPE_BLKTYPE_TE_GPARSE + u8 unused0; + __le32 type; + __le32 index; +}; + +/* hwrm_tf_tbl_type_get_output (size:2240b/280B) */ +struct hwrm_tf_tbl_type_get_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 resp_code; + __le16 size; + __le16 unused0; + u8 data[256]; + u8 unused1[7]; + u8 valid; +}; + +/* hwrm_tf_tbl_type_set_input (size:1024b/128B) */ +struct hwrm_tf_tbl_type_set_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 fw_session_id; + __le16 flags; + #define TF_TBL_TYPE_SET_REQ_FLAGS_DIR 0x1UL + #define TF_TBL_TYPE_SET_REQ_FLAGS_DIR_RX 0x0UL + #define TF_TBL_TYPE_SET_REQ_FLAGS_DIR_TX 0x1UL + #define TF_TBL_TYPE_SET_REQ_FLAGS_DIR_LAST TF_TBL_TYPE_SET_REQ_FLAGS_DIR_TX + #define TF_TBL_TYPE_SET_REQ_FLAGS_DMA 0x2UL + u8 blktype; + #define TF_TBL_TYPE_SET_REQ_BLKTYPE_BLKTYPE_CFA 0x0UL + #define TF_TBL_TYPE_SET_REQ_BLKTYPE_BLKTYPE_RXP 0x1UL + #define TF_TBL_TYPE_SET_REQ_BLKTYPE_BLKTYPE_RE_GPARSE 0x2UL + #define TF_TBL_TYPE_SET_REQ_BLKTYPE_BLKTYPE_TE_GPARSE 0x3UL + #define TF_TBL_TYPE_SET_REQ_BLKTYPE_LAST TF_TBL_TYPE_SET_REQ_BLKTYPE_BLKTYPE_TE_GPARSE + u8 unused0; + __le32 type; + __le32 index; + __le16 size; + u8 unused1[6]; + u8 data[88]; +}; + +/* hwrm_tf_tbl_type_set_output (size:128b/16B) */ +struct hwrm_tf_tbl_type_set_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused0[7]; + u8 valid; +}; + +/* hwrm_tf_tbl_type_free_input (size:256b/32B) */ +struct hwrm_tf_tbl_type_free_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 fw_session_id; + __le16 flags; + #define TF_TBL_TYPE_FREE_REQ_FLAGS_DIR 0x1UL + #define TF_TBL_TYPE_FREE_REQ_FLAGS_DIR_RX 0x0UL + #define TF_TBL_TYPE_FREE_REQ_FLAGS_DIR_TX 0x1UL + #define TF_TBL_TYPE_FREE_REQ_FLAGS_DIR_LAST TF_TBL_TYPE_FREE_REQ_FLAGS_DIR_TX + u8 blktype; + #define TF_TBL_TYPE_FREE_REQ_BLKTYPE_BLKTYPE_CFA 0x0UL + #define TF_TBL_TYPE_FREE_REQ_BLKTYPE_BLKTYPE_RXP 0x1UL + #define TF_TBL_TYPE_FREE_REQ_BLKTYPE_BLKTYPE_RE_GPARSE 0x2UL + #define TF_TBL_TYPE_FREE_REQ_BLKTYPE_BLKTYPE_TE_GPARSE 0x3UL + #define TF_TBL_TYPE_FREE_REQ_BLKTYPE_LAST TF_TBL_TYPE_FREE_REQ_BLKTYPE_BLKTYPE_TE_GPARSE + u8 unused0; + __le16 idx_tbl_id; + u8 unused1[6]; +}; + +/* hwrm_tf_tbl_type_free_output (size:128b/16B) */ +struct hwrm_tf_tbl_type_free_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 resp_code; + u8 unused0[3]; + u8 valid; +}; + +/* hwrm_tf_em_insert_input (size:832b/104B) */ +struct hwrm_tf_em_insert_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 fw_session_id; + __le16 flags; + #define TF_EM_INSERT_REQ_FLAGS_DIR 0x1UL + #define TF_EM_INSERT_REQ_FLAGS_DIR_RX 0x0UL + #define TF_EM_INSERT_REQ_FLAGS_DIR_TX 0x1UL + #define TF_EM_INSERT_REQ_FLAGS_DIR_LAST TF_EM_INSERT_REQ_FLAGS_DIR_TX + __le16 strength; + __le32 action_ptr; + __le32 em_record_idx; + __le64 em_key[8]; + __le16 em_key_bitlen; + __le16 unused0[3]; +}; + +/* hwrm_tf_em_insert_output (size:128b/16B) */ +struct hwrm_tf_em_insert_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 rptr_index; + u8 rptr_entry; + u8 num_of_entries; + u8 unused0[3]; + u8 valid; +}; + +/* hwrm_tf_em_hash_insert_input (size:1024b/128B) */ +struct hwrm_tf_em_hash_insert_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 fw_session_id; + __le16 flags; + #define TF_EM_HASH_INSERT_REQ_FLAGS_DIR 0x1UL + #define TF_EM_HASH_INSERT_REQ_FLAGS_DIR_RX 0x0UL + #define TF_EM_HASH_INSERT_REQ_FLAGS_DIR_TX 0x1UL + #define TF_EM_HASH_INSERT_REQ_FLAGS_DIR_LAST TF_EM_HASH_INSERT_REQ_FLAGS_DIR_TX + #define TF_EM_HASH_INSERT_REQ_FLAGS_DMA 0x2UL + __le16 em_record_size_bits; + __le32 key0_hash; + __le32 key1_hash; + __le32 em_record_idx; + __le32 unused0; + __le64 em_record[11]; +}; + +/* hwrm_tf_em_hash_insert_output (size:128b/16B) */ +struct hwrm_tf_em_hash_insert_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 rptr_index; + u8 rptr_entry; + u8 num_of_entries; + u8 unused0[3]; + u8 valid; +}; + +/* hwrm_tf_em_delete_input (size:832b/104B) */ +struct hwrm_tf_em_delete_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 fw_session_id; + __le16 flags; + #define TF_EM_DELETE_REQ_FLAGS_DIR 0x1UL + #define TF_EM_DELETE_REQ_FLAGS_DIR_RX 0x0UL + #define TF_EM_DELETE_REQ_FLAGS_DIR_TX 0x1UL + #define TF_EM_DELETE_REQ_FLAGS_DIR_LAST TF_EM_DELETE_REQ_FLAGS_DIR_TX + __le16 unused0; + __le64 flow_handle; + __le64 em_key[8]; + __le16 em_key_bitlen; + __le16 unused1[3]; +}; + +/* hwrm_tf_em_delete_output (size:128b/16B) */ +struct hwrm_tf_em_delete_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 em_index; + u8 unused0[5]; + u8 valid; +}; + +/* hwrm_tf_em_move_input (size:320b/40B) */ +struct hwrm_tf_em_move_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 fw_session_id; + __le16 flags; + #define TF_EM_MOVE_REQ_FLAGS_DIR 0x1UL + #define TF_EM_MOVE_REQ_FLAGS_DIR_RX 0x0UL + #define TF_EM_MOVE_REQ_FLAGS_DIR_TX 0x1UL + #define TF_EM_MOVE_REQ_FLAGS_DIR_LAST TF_EM_MOVE_REQ_FLAGS_DIR_TX + __le16 num_blocks; + __le32 new_index; + __le32 unused0; + __le64 flow_handle; +}; + +/* hwrm_tf_em_move_output (size:128b/16B) */ +struct hwrm_tf_em_move_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 em_index; + u8 unused0[5]; + u8 valid; +}; + +/* hwrm_tf_tcam_set_input (size:1024b/128B) */ +struct hwrm_tf_tcam_set_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 fw_session_id; + __le32 flags; + #define TF_TCAM_SET_REQ_FLAGS_DIR 0x1UL + #define TF_TCAM_SET_REQ_FLAGS_DIR_RX 0x0UL + #define TF_TCAM_SET_REQ_FLAGS_DIR_TX 0x1UL + #define TF_TCAM_SET_REQ_FLAGS_DIR_LAST TF_TCAM_SET_REQ_FLAGS_DIR_TX + #define TF_TCAM_SET_REQ_FLAGS_DMA 0x2UL + __le32 type; + __le16 idx; + u8 key_size; + u8 result_size; + u8 mask_offset; + u8 result_offset; + u8 unused0[6]; + u8 dev_data[88]; +}; + +/* hwrm_tf_tcam_set_output (size:128b/16B) */ +struct hwrm_tf_tcam_set_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused0[7]; + u8 valid; +}; + +/* hwrm_tf_tcam_get_input (size:256b/32B) */ +struct hwrm_tf_tcam_get_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 fw_session_id; + __le32 flags; + #define TF_TCAM_GET_REQ_FLAGS_DIR 0x1UL + #define TF_TCAM_GET_REQ_FLAGS_DIR_RX 0x0UL + #define TF_TCAM_GET_REQ_FLAGS_DIR_TX 0x1UL + #define TF_TCAM_GET_REQ_FLAGS_DIR_LAST TF_TCAM_GET_REQ_FLAGS_DIR_TX + __le32 type; + __le16 idx; + __le16 unused0; +}; + +/* hwrm_tf_tcam_get_output (size:2368b/296B) */ +struct hwrm_tf_tcam_get_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 key_size; + u8 result_size; + u8 mask_offset; + u8 result_offset; + u8 unused0[4]; + u8 dev_data[272]; + u8 unused1[7]; + u8 valid; +}; + +/* hwrm_tf_tcam_move_input (size:1024b/128B) */ +struct hwrm_tf_tcam_move_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 fw_session_id; + __le32 flags; + #define TF_TCAM_MOVE_REQ_FLAGS_DIR 0x1UL + #define TF_TCAM_MOVE_REQ_FLAGS_DIR_RX 0x0UL + #define TF_TCAM_MOVE_REQ_FLAGS_DIR_TX 0x1UL + #define TF_TCAM_MOVE_REQ_FLAGS_DIR_LAST TF_TCAM_MOVE_REQ_FLAGS_DIR_TX + __le32 type; + __le16 count; + __le16 unused0; + __le16 idx_pairs[48]; +}; + +/* hwrm_tf_tcam_move_output (size:128b/16B) */ +struct hwrm_tf_tcam_move_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused0[7]; + u8 valid; +}; + +/* hwrm_tf_tcam_free_input (size:1024b/128B) */ +struct hwrm_tf_tcam_free_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 fw_session_id; + __le32 flags; + #define TF_TCAM_FREE_REQ_FLAGS_DIR 0x1UL + #define TF_TCAM_FREE_REQ_FLAGS_DIR_RX 0x0UL + #define TF_TCAM_FREE_REQ_FLAGS_DIR_TX 0x1UL + #define TF_TCAM_FREE_REQ_FLAGS_DIR_LAST TF_TCAM_FREE_REQ_FLAGS_DIR_TX + __le32 type; + __le16 count; + __le16 unused0; + __le16 idx_list[48]; +}; + +/* hwrm_tf_tcam_free_output (size:128b/16B) */ +struct hwrm_tf_tcam_free_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused0[7]; + u8 valid; +}; + +/* hwrm_tf_global_cfg_set_input (size:448b/56B) */ +struct hwrm_tf_global_cfg_set_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 fw_session_id; + __le32 flags; + #define TF_GLOBAL_CFG_SET_REQ_FLAGS_DIR 0x1UL + #define TF_GLOBAL_CFG_SET_REQ_FLAGS_DIR_RX 0x0UL + #define TF_GLOBAL_CFG_SET_REQ_FLAGS_DIR_TX 0x1UL + #define TF_GLOBAL_CFG_SET_REQ_FLAGS_DIR_LAST TF_GLOBAL_CFG_SET_REQ_FLAGS_DIR_TX + #define TF_GLOBAL_CFG_SET_REQ_FLAGS_DMA 0x2UL + __le32 type; + __le32 offset; + __le16 size; + u8 unused0[6]; + u8 data[8]; + u8 mask[8]; +}; + +/* hwrm_tf_global_cfg_set_output (size:128b/16B) */ +struct hwrm_tf_global_cfg_set_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused0[7]; + u8 valid; +}; + +/* hwrm_tf_global_cfg_get_input (size:320b/40B) */ +struct hwrm_tf_global_cfg_get_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 fw_session_id; + __le32 flags; + #define TF_GLOBAL_CFG_GET_REQ_FLAGS_DIR 0x1UL + #define TF_GLOBAL_CFG_GET_REQ_FLAGS_DIR_RX 0x0UL + #define TF_GLOBAL_CFG_GET_REQ_FLAGS_DIR_TX 0x1UL + #define TF_GLOBAL_CFG_GET_REQ_FLAGS_DIR_LAST TF_GLOBAL_CFG_GET_REQ_FLAGS_DIR_TX + __le32 type; + __le32 offset; + __le16 size; + u8 unused0[6]; +}; + +/* hwrm_tf_global_cfg_get_output (size:2240b/280B) */ +struct hwrm_tf_global_cfg_get_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 size; + u8 unused0[6]; + u8 data[256]; + u8 unused1[7]; + u8 valid; +}; + +/* hwrm_tf_if_tbl_get_input (size:256b/32B) */ +struct hwrm_tf_if_tbl_get_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 fw_session_id; + __le16 flags; + #define TF_IF_TBL_GET_REQ_FLAGS_DIR 0x1UL + #define TF_IF_TBL_GET_REQ_FLAGS_DIR_RX 0x0UL + #define TF_IF_TBL_GET_REQ_FLAGS_DIR_TX 0x1UL + #define TF_IF_TBL_GET_REQ_FLAGS_DIR_LAST TF_IF_TBL_GET_REQ_FLAGS_DIR_TX + __le16 size; + __le32 type; + __le32 index; +}; + +/* hwrm_tf_if_tbl_get_output (size:1216b/152B) */ +struct hwrm_tf_if_tbl_get_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 resp_code; + __le16 size; + __le16 unused0; + u8 data[128]; + u8 unused1[7]; + u8 valid; +}; + +/* hwrm_tf_if_tbl_set_input (size:1024b/128B) */ +struct hwrm_tf_if_tbl_set_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 fw_session_id; + __le16 flags; + #define TF_IF_TBL_SET_REQ_FLAGS_DIR 0x1UL + #define TF_IF_TBL_SET_REQ_FLAGS_DIR_RX 0x0UL + #define TF_IF_TBL_SET_REQ_FLAGS_DIR_TX 0x1UL + #define TF_IF_TBL_SET_REQ_FLAGS_DIR_LAST TF_IF_TBL_SET_REQ_FLAGS_DIR_TX + u8 unused0[2]; + __le32 type; + __le32 index; + __le16 size; + u8 unused1[6]; + u8 data[88]; +}; + +/* hwrm_tf_if_tbl_set_output (size:128b/16B) */ +struct hwrm_tf_if_tbl_set_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused0[7]; + u8 valid; +}; + +/* hwrm_tf_tbl_type_bulk_get_input (size:384b/48B) */ +struct hwrm_tf_tbl_type_bulk_get_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 fw_session_id; + __le16 flags; + #define TF_TBL_TYPE_BULK_GET_REQ_FLAGS_DIR 0x1UL + #define TF_TBL_TYPE_BULK_GET_REQ_FLAGS_DIR_RX 0x0UL + #define TF_TBL_TYPE_BULK_GET_REQ_FLAGS_DIR_TX 0x1UL + #define TF_TBL_TYPE_BULK_GET_REQ_FLAGS_DIR_LAST TF_TBL_TYPE_BULK_GET_REQ_FLAGS_DIR_TX + #define TF_TBL_TYPE_BULK_GET_REQ_FLAGS_CLEAR_ON_READ 0x2UL + u8 unused0[2]; + __le32 type; + __le32 start_index; + __le32 num_entries; + __le32 unused1; + __le64 host_addr; +}; + +/* hwrm_tf_tbl_type_bulk_get_output (size:128b/16B) */ +struct hwrm_tf_tbl_type_bulk_get_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 resp_code; + __le16 size; + u8 unused0; + u8 valid; +}; + +/* hwrm_tf_session_hotup_state_set_input (size:192b/24B) */ +struct hwrm_tf_session_hotup_state_set_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 fw_session_id; + __le16 state; + __le16 flags; + #define TF_SESSION_HOTUP_STATE_SET_REQ_FLAGS_DIR 0x1UL + #define TF_SESSION_HOTUP_STATE_SET_REQ_FLAGS_DIR_RX 0x0UL + #define TF_SESSION_HOTUP_STATE_SET_REQ_FLAGS_DIR_TX 0x1UL + #define TF_SESSION_HOTUP_STATE_SET_REQ_FLAGS_DIR_LAST TF_SESSION_HOTUP_STATE_SET_REQ_FLAGS_DIR_TX +}; + +/* hwrm_tf_session_hotup_state_set_output (size:128b/16B) */ +struct hwrm_tf_session_hotup_state_set_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused0[7]; + u8 valid; +}; + +/* hwrm_tf_session_hotup_state_get_input (size:192b/24B) */ +struct hwrm_tf_session_hotup_state_get_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 fw_session_id; + __le16 flags; + #define TF_SESSION_HOTUP_STATE_GET_REQ_FLAGS_DIR 0x1UL + #define TF_SESSION_HOTUP_STATE_GET_REQ_FLAGS_DIR_RX 0x0UL + #define TF_SESSION_HOTUP_STATE_GET_REQ_FLAGS_DIR_TX 0x1UL + #define TF_SESSION_HOTUP_STATE_GET_REQ_FLAGS_DIR_LAST TF_SESSION_HOTUP_STATE_GET_REQ_FLAGS_DIR_TX + u8 unused0[2]; +}; + +/* hwrm_tf_session_hotup_state_get_output (size:128b/16B) */ +struct hwrm_tf_session_hotup_state_get_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 state; + __le16 ref_cnt; + u8 unused0[3]; + u8 valid; +}; + +/* hwrm_tf_resc_usage_set_input (size:1024b/128B) */ +struct hwrm_tf_resc_usage_set_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 fw_session_id; + __le16 flags; + #define TF_RESC_USAGE_SET_REQ_FLAGS_DIR 0x1UL + #define TF_RESC_USAGE_SET_REQ_FLAGS_DIR_RX 0x0UL + #define TF_RESC_USAGE_SET_REQ_FLAGS_DIR_TX 0x1UL + #define TF_RESC_USAGE_SET_REQ_FLAGS_DIR_LAST TF_RESC_USAGE_SET_REQ_FLAGS_DIR_TX + #define TF_RESC_USAGE_SET_REQ_FLAGS_DMA 0x2UL + __le16 types; + #define TF_RESC_USAGE_SET_REQ_TYPES_WC_TCAM 0x1UL + #define TF_RESC_USAGE_SET_REQ_TYPES_EM 0x2UL + #define TF_RESC_USAGE_SET_REQ_TYPES_METER 0x4UL + #define TF_RESC_USAGE_SET_REQ_TYPES_COUNTER 0x8UL + #define TF_RESC_USAGE_SET_REQ_TYPES_ACTION 0x10UL + #define TF_RESC_USAGE_SET_REQ_TYPES_ACT_MOD_ENCAP 0x20UL + #define TF_RESC_USAGE_SET_REQ_TYPES_SP_SMAC 0x40UL + #define TF_RESC_USAGE_SET_REQ_TYPES_ALL 0x80UL + __le16 size; + u8 unused1[6]; + u8 data[96]; +}; + +/* hwrm_tf_resc_usage_set_output (size:128b/16B) */ +struct hwrm_tf_resc_usage_set_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused0[7]; + u8 valid; +}; + +/* hwrm_tf_resc_usage_query_input (size:256b/32B) */ +struct hwrm_tf_resc_usage_query_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 fw_session_id; + __le16 flags; + #define TF_RESC_USAGE_QUERY_REQ_FLAGS_DIR 0x1UL + #define TF_RESC_USAGE_QUERY_REQ_FLAGS_DIR_RX 0x0UL + #define TF_RESC_USAGE_QUERY_REQ_FLAGS_DIR_TX 0x1UL + #define TF_RESC_USAGE_QUERY_REQ_FLAGS_DIR_LAST TF_RESC_USAGE_QUERY_REQ_FLAGS_DIR_TX + u8 unused0[2]; + __le16 types; + #define TF_RESC_USAGE_QUERY_REQ_TYPES_WC_TCAM 0x1UL + #define TF_RESC_USAGE_QUERY_REQ_TYPES_EM 0x2UL + #define TF_RESC_USAGE_QUERY_REQ_TYPES_METER 0x4UL + #define TF_RESC_USAGE_QUERY_REQ_TYPES_COUNTER 0x8UL + #define TF_RESC_USAGE_QUERY_REQ_TYPES_ACTION 0x10UL + #define TF_RESC_USAGE_QUERY_REQ_TYPES_ACT_MOD_ENCAP 0x20UL + #define TF_RESC_USAGE_QUERY_REQ_TYPES_SP_SMAC 0x40UL + #define TF_RESC_USAGE_QUERY_REQ_TYPES_ALL 0x80UL + u8 unused1[6]; +}; + +/* hwrm_tf_resc_usage_query_output (size:960b/120B) */ +struct hwrm_tf_resc_usage_query_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 resp_code; + __le16 size; + __le16 unused0; + u8 data[96]; + u8 unused1[7]; + u8 valid; +}; + +/* hwrm_tfc_tbl_scope_qcaps_input (size:128b/16B) */ +struct hwrm_tfc_tbl_scope_qcaps_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; +}; + +/* hwrm_tfc_tbl_scope_qcaps_output (size:192b/24B) */ +struct hwrm_tfc_tbl_scope_qcaps_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 max_lkup_rec_cnt; + __le32 max_act_rec_cnt; + u8 tbl_scope_capable; + u8 max_lkup_static_buckets_exp; + u8 unused0[5]; + u8 valid; +}; + +/* hwrm_tfc_tbl_scope_id_alloc_input (size:256b/32B) */ +struct hwrm_tfc_tbl_scope_id_alloc_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 fid; + __le16 max_pools; + u8 shared; + u8 lkup_pool_sz_exp[2]; + u8 act_pool_sz_exp[2]; + u8 app_type; + u8 unused0[6]; +}; + +/* hwrm_tfc_tbl_scope_id_alloc_output (size:128b/16B) */ +struct hwrm_tfc_tbl_scope_id_alloc_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 tsid; + u8 first; + u8 unused0[5]; + u8 valid; +}; + +/* hwrm_tfc_tbl_scope_config_input (size:704b/88B) */ +struct hwrm_tfc_tbl_scope_config_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 lkup_base_addr[2]; + __le64 act_base_addr[2]; + __le32 lkup_rec_cnt[2]; + __le32 act_rec_cnt[2]; + __le32 lkup_static_bucket_cnt[2]; + __le32 pbl_page_sz; + u8 lkup_pbl_level[2]; + u8 act_pbl_level[2]; + u8 tsid; + u8 unused0[7]; +}; + +/* hwrm_tfc_tbl_scope_config_output (size:128b/16B) */ +struct hwrm_tfc_tbl_scope_config_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused0[7]; + u8 valid; +}; + +/* hwrm_tfc_tbl_scope_deconfig_input (size:192b/24B) */ +struct hwrm_tfc_tbl_scope_deconfig_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 tsid; + u8 unused0[7]; +}; + +/* hwrm_tfc_tbl_scope_deconfig_output (size:128b/16B) */ +struct hwrm_tfc_tbl_scope_deconfig_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused0[7]; + u8 valid; +}; + +/* hwrm_tfc_tbl_scope_fid_add_input (size:192b/24B) */ +struct hwrm_tfc_tbl_scope_fid_add_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 fid; + u8 tsid; + u8 unused0[5]; +}; + +/* hwrm_tfc_tbl_scope_fid_add_output (size:128b/16B) */ +struct hwrm_tfc_tbl_scope_fid_add_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 fid_cnt; + u8 unused0[6]; + u8 valid; +}; + +/* hwrm_tfc_tbl_scope_fid_rem_input (size:192b/24B) */ +struct hwrm_tfc_tbl_scope_fid_rem_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 fid; + u8 tsid; + u8 unused0[5]; +}; + +/* hwrm_tfc_tbl_scope_fid_rem_output (size:128b/16B) */ +struct hwrm_tfc_tbl_scope_fid_rem_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 fid_cnt; + u8 unused0[5]; + u8 valid; +}; + +/* hwrm_tfc_session_id_alloc_input (size:192b/24B) */ +struct hwrm_tfc_session_id_alloc_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 fid; + u8 unused0[6]; +}; + +/* hwrm_tfc_session_id_alloc_output (size:128b/16B) */ +struct hwrm_tfc_session_id_alloc_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 sid; + u8 unused0[5]; + u8 valid; +}; + +/* hwrm_tfc_session_fid_add_input (size:192b/24B) */ +struct hwrm_tfc_session_fid_add_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 fid; + __le16 sid; + u8 unused0[4]; +}; + +/* hwrm_tfc_session_fid_add_output (size:128b/16B) */ +struct hwrm_tfc_session_fid_add_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 fid_cnt; + u8 unused0[5]; + u8 valid; +}; + +/* hwrm_tfc_session_fid_rem_input (size:192b/24B) */ +struct hwrm_tfc_session_fid_rem_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 fid; + __le16 sid; + u8 unused0[4]; +}; + +/* hwrm_tfc_session_fid_rem_output (size:128b/16B) */ +struct hwrm_tfc_session_fid_rem_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 fid_cnt; + u8 unused0[5]; + u8 valid; +}; + +/* hwrm_tfc_ident_alloc_input (size:192b/24B) */ +struct hwrm_tfc_ident_alloc_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 fid; + __le16 sid; + u8 flags; + #define TFC_IDENT_ALLOC_REQ_FLAGS_DIR 0x1UL + #define TFC_IDENT_ALLOC_REQ_FLAGS_DIR_RX 0x0UL + #define TFC_IDENT_ALLOC_REQ_FLAGS_DIR_TX 0x1UL + #define TFC_IDENT_ALLOC_REQ_FLAGS_DIR_LAST TFC_IDENT_ALLOC_REQ_FLAGS_DIR_TX + u8 subtype; + u8 track_type; + #define TFC_IDENT_ALLOC_REQ_TRACK_TYPE_TRACK_TYPE_INVALID 0x0UL + #define TFC_IDENT_ALLOC_REQ_TRACK_TYPE_TRACK_TYPE_SID 0x1UL + #define TFC_IDENT_ALLOC_REQ_TRACK_TYPE_TRACK_TYPE_FID 0x2UL + #define TFC_IDENT_ALLOC_REQ_TRACK_TYPE_LAST TFC_IDENT_ALLOC_REQ_TRACK_TYPE_TRACK_TYPE_FID + u8 unused0; +}; + +/* hwrm_tfc_ident_alloc_output (size:128b/16B) */ +struct hwrm_tfc_ident_alloc_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 ident_id; + u8 unused0[5]; + u8 valid; +}; + +/* hwrm_tfc_ident_free_input (size:192b/24B) */ +struct hwrm_tfc_ident_free_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 fid; + __le16 sid; + u8 subtype; + u8 flags; + #define TFC_IDENT_FREE_REQ_FLAGS_DIR 0x1UL + #define TFC_IDENT_FREE_REQ_FLAGS_DIR_RX 0x0UL + #define TFC_IDENT_FREE_REQ_FLAGS_DIR_TX 0x1UL + #define TFC_IDENT_FREE_REQ_FLAGS_DIR_LAST TFC_IDENT_FREE_REQ_FLAGS_DIR_TX + __le16 ident_id; +}; + +/* hwrm_tfc_ident_free_output (size:128b/16B) */ +struct hwrm_tfc_ident_free_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused0[7]; + u8 valid; +}; + +/* hwrm_tfc_idx_tbl_alloc_input (size:192b/24B) */ +struct hwrm_tfc_idx_tbl_alloc_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 fid; + __le16 sid; + u8 flags; + #define TFC_IDX_TBL_ALLOC_REQ_FLAGS_DIR 0x1UL + #define TFC_IDX_TBL_ALLOC_REQ_FLAGS_DIR_RX 0x0UL + #define TFC_IDX_TBL_ALLOC_REQ_FLAGS_DIR_TX 0x1UL + #define TFC_IDX_TBL_ALLOC_REQ_FLAGS_DIR_LAST TFC_IDX_TBL_ALLOC_REQ_FLAGS_DIR_TX + u8 subtype; + u8 track_type; + #define TFC_IDX_TBL_ALLOC_REQ_TRACK_TYPE_TRACK_TYPE_INVALID 0x0UL + #define TFC_IDX_TBL_ALLOC_REQ_TRACK_TYPE_TRACK_TYPE_SID 0x1UL + #define TFC_IDX_TBL_ALLOC_REQ_TRACK_TYPE_TRACK_TYPE_FID 0x2UL + #define TFC_IDX_TBL_ALLOC_REQ_TRACK_TYPE_LAST TFC_IDX_TBL_ALLOC_REQ_TRACK_TYPE_TRACK_TYPE_FID + u8 blktype; + #define TFC_IDX_TBL_ALLOC_REQ_BLKTYPE_BLKTYPE_CFA 0x0UL + #define TFC_IDX_TBL_ALLOC_REQ_BLKTYPE_BLKTYPE_RXP 0x1UL + #define TFC_IDX_TBL_ALLOC_REQ_BLKTYPE_BLKTYPE_RE_GPARSE 0x2UL + #define TFC_IDX_TBL_ALLOC_REQ_BLKTYPE_BLKTYPE_TE_GPARSE 0x3UL + #define TFC_IDX_TBL_ALLOC_REQ_BLKTYPE_LAST TFC_IDX_TBL_ALLOC_REQ_BLKTYPE_BLKTYPE_TE_GPARSE +}; + +/* hwrm_tfc_idx_tbl_alloc_output (size:128b/16B) */ +struct hwrm_tfc_idx_tbl_alloc_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 idx_tbl_id; + u8 unused0[5]; + u8 valid; +}; + +/* hwrm_tfc_idx_tbl_alloc_set_input (size:1088b/136B) */ +struct hwrm_tfc_idx_tbl_alloc_set_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 fid; + __le16 sid; + u8 flags; + #define TFC_IDX_TBL_ALLOC_SET_REQ_FLAGS_DIR 0x1UL + #define TFC_IDX_TBL_ALLOC_SET_REQ_FLAGS_DIR_RX 0x0UL + #define TFC_IDX_TBL_ALLOC_SET_REQ_FLAGS_DIR_TX 0x1UL + #define TFC_IDX_TBL_ALLOC_SET_REQ_FLAGS_DIR_LAST TFC_IDX_TBL_ALLOC_SET_REQ_FLAGS_DIR_TX + #define TFC_IDX_TBL_ALLOC_SET_REQ_FLAGS_DMA 0x2UL + u8 subtype; + u8 track_type; + #define TFC_IDX_TBL_ALLOC_SET_REQ_TRACK_TYPE_TRACK_TYPE_INVALID 0x0UL + #define TFC_IDX_TBL_ALLOC_SET_REQ_TRACK_TYPE_TRACK_TYPE_SID 0x1UL + #define TFC_IDX_TBL_ALLOC_SET_REQ_TRACK_TYPE_TRACK_TYPE_FID 0x2UL + #define TFC_IDX_TBL_ALLOC_SET_REQ_TRACK_TYPE_LAST TFC_IDX_TBL_ALLOC_SET_REQ_TRACK_TYPE_TRACK_TYPE_FID + u8 blktype; + #define TFC_IDX_TBL_ALLOC_SET_REQ_BLKTYPE_BLKTYPE_CFA 0x0UL + #define TFC_IDX_TBL_ALLOC_SET_REQ_BLKTYPE_BLKTYPE_RXP 0x1UL + #define TFC_IDX_TBL_ALLOC_SET_REQ_BLKTYPE_BLKTYPE_RE_GPARSE 0x2UL + #define TFC_IDX_TBL_ALLOC_SET_REQ_BLKTYPE_BLKTYPE_TE_GPARSE 0x3UL + #define TFC_IDX_TBL_ALLOC_SET_REQ_BLKTYPE_LAST TFC_IDX_TBL_ALLOC_SET_REQ_BLKTYPE_BLKTYPE_TE_GPARSE + __le16 data_size; + u8 unused1[6]; + __le64 dma_addr; + u8 dev_data[96]; +}; + +/* hwrm_tfc_idx_tbl_alloc_set_output (size:128b/16B) */ +struct hwrm_tfc_idx_tbl_alloc_set_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 idx_tbl_id; + u8 unused0[5]; + u8 valid; +}; + +/* hwrm_tfc_idx_tbl_set_input (size:1088b/136B) */ +struct hwrm_tfc_idx_tbl_set_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 flags; + #define TFC_IDX_TBL_SET_REQ_FLAGS_DIR 0x1UL + #define TFC_IDX_TBL_SET_REQ_FLAGS_DIR_RX 0x0UL + #define TFC_IDX_TBL_SET_REQ_FLAGS_DIR_TX 0x1UL + #define TFC_IDX_TBL_SET_REQ_FLAGS_DIR_LAST TFC_IDX_TBL_SET_REQ_FLAGS_DIR_TX + #define TFC_IDX_TBL_SET_REQ_FLAGS_DMA 0x2UL + u8 subtype; + __le16 fid; + __le16 sid; + __le16 idx_tbl_id; + __le16 data_size; + u8 blktype; + #define TFC_IDX_TBL_SET_REQ_BLKTYPE_BLKTYPE_CFA 0x0UL + #define TFC_IDX_TBL_SET_REQ_BLKTYPE_BLKTYPE_RXP 0x1UL + #define TFC_IDX_TBL_SET_REQ_BLKTYPE_BLKTYPE_RE_GPARSE 0x2UL + #define TFC_IDX_TBL_SET_REQ_BLKTYPE_BLKTYPE_TE_GPARSE 0x3UL + #define TFC_IDX_TBL_SET_REQ_BLKTYPE_LAST TFC_IDX_TBL_SET_REQ_BLKTYPE_BLKTYPE_TE_GPARSE + u8 unused0[5]; + __le64 dma_addr; + u8 dev_data[96]; +}; + +/* hwrm_tfc_idx_tbl_set_output (size:128b/16B) */ +struct hwrm_tfc_idx_tbl_set_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused0[7]; + u8 valid; +}; + +/* hwrm_tfc_idx_tbl_get_input (size:320b/40B) */ +struct hwrm_tfc_idx_tbl_get_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 flags; + #define TFC_IDX_TBL_GET_REQ_FLAGS_DIR 0x1UL + #define TFC_IDX_TBL_GET_REQ_FLAGS_DIR_RX 0x0UL + #define TFC_IDX_TBL_GET_REQ_FLAGS_DIR_TX 0x1UL + #define TFC_IDX_TBL_GET_REQ_FLAGS_DIR_LAST TFC_IDX_TBL_GET_REQ_FLAGS_DIR_TX + #define TFC_IDX_TBL_GET_REQ_FLAGS_CLEAR_ON_READ 0x2UL + u8 subtype; + __le16 fid; + __le16 sid; + __le16 idx_tbl_id; + __le16 buffer_size; + u8 blktype; + #define TFC_IDX_TBL_GET_REQ_BLKTYPE_BLKTYPE_CFA 0x0UL + #define TFC_IDX_TBL_GET_REQ_BLKTYPE_BLKTYPE_RXP 0x1UL + #define TFC_IDX_TBL_GET_REQ_BLKTYPE_BLKTYPE_RE_GPARSE 0x2UL + #define TFC_IDX_TBL_GET_REQ_BLKTYPE_BLKTYPE_TE_GPARSE 0x3UL + #define TFC_IDX_TBL_GET_REQ_BLKTYPE_LAST TFC_IDX_TBL_GET_REQ_BLKTYPE_BLKTYPE_TE_GPARSE + u8 unused0[5]; + __le64 dma_addr; +}; + +/* hwrm_tfc_idx_tbl_get_output (size:128b/16B) */ +struct hwrm_tfc_idx_tbl_get_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 data_size; + u8 unused1[5]; + u8 valid; +}; + +/* hwrm_tfc_idx_tbl_free_input (size:256b/32B) */ +struct hwrm_tfc_idx_tbl_free_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 flags; + #define TFC_IDX_TBL_FREE_REQ_FLAGS_DIR 0x1UL + #define TFC_IDX_TBL_FREE_REQ_FLAGS_DIR_RX 0x0UL + #define TFC_IDX_TBL_FREE_REQ_FLAGS_DIR_TX 0x1UL + #define TFC_IDX_TBL_FREE_REQ_FLAGS_DIR_LAST TFC_IDX_TBL_FREE_REQ_FLAGS_DIR_TX + u8 subtype; + __le16 fid; + __le16 sid; + __le16 idx_tbl_id; + u8 blktype; + #define TFC_IDX_TBL_FREE_REQ_BLKTYPE_BLKTYPE_CFA 0x0UL + #define TFC_IDX_TBL_FREE_REQ_BLKTYPE_BLKTYPE_RXP 0x1UL + #define TFC_IDX_TBL_FREE_REQ_BLKTYPE_BLKTYPE_RE_GPARSE 0x2UL + #define TFC_IDX_TBL_FREE_REQ_BLKTYPE_BLKTYPE_TE_GPARSE 0x3UL + #define TFC_IDX_TBL_FREE_REQ_BLKTYPE_LAST TFC_IDX_TBL_FREE_REQ_BLKTYPE_BLKTYPE_TE_GPARSE + u8 unused0[7]; +}; + +/* hwrm_tfc_idx_tbl_free_output (size:128b/16B) */ +struct hwrm_tfc_idx_tbl_free_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused0[7]; + u8 valid; +}; + +/* tfc_global_id_hwrm_req (size:64b/8B) */ +struct tfc_global_id_hwrm_req { + __le16 rtype; + __le16 dir; + __le16 subtype; + __le16 cnt; +}; + +/* tfc_global_id_hwrm_rsp (size:64b/8B) */ +struct tfc_global_id_hwrm_rsp { + __le16 rtype; + __le16 dir; + __le16 subtype; + __le16 id; +}; + +/* hwrm_tfc_global_id_alloc_input (size:320b/40B) */ +struct hwrm_tfc_global_id_alloc_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 fid; + __le16 sid; + __le16 global_id; + __le16 req_cnt; + __le64 req_addr; + __le64 resc_addr; +}; + +/* hwrm_tfc_global_id_alloc_output (size:128b/16B) */ +struct hwrm_tfc_global_id_alloc_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 rsp_cnt; + u8 first; + u8 unused0[4]; + u8 valid; +}; + +/* hwrm_tfc_tcam_set_input (size:1088b/136B) */ +struct hwrm_tfc_tcam_set_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 fid; + __le16 sid; + __le16 tcam_id; + __le16 key_size; + __le16 result_size; + u8 flags; + #define TFC_TCAM_SET_REQ_FLAGS_DIR 0x1UL + #define TFC_TCAM_SET_REQ_FLAGS_DIR_RX 0x0UL + #define TFC_TCAM_SET_REQ_FLAGS_DIR_TX 0x1UL + #define TFC_TCAM_SET_REQ_FLAGS_DIR_LAST TFC_TCAM_SET_REQ_FLAGS_DIR_TX + #define TFC_TCAM_SET_REQ_FLAGS_DMA 0x2UL + u8 subtype; + u8 unused0[4]; + __le64 dma_addr; + u8 dev_data[96]; +}; + +/* hwrm_tfc_tcam_set_output (size:128b/16B) */ +struct hwrm_tfc_tcam_set_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused0[7]; + u8 valid; +}; + +/* hwrm_tfc_tcam_get_input (size:192b/24B) */ +struct hwrm_tfc_tcam_get_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 flags; + #define TFC_TCAM_GET_REQ_FLAGS_DIR 0x1UL + #define TFC_TCAM_GET_REQ_FLAGS_DIR_RX 0x0UL + #define TFC_TCAM_GET_REQ_FLAGS_DIR_TX 0x1UL + #define TFC_TCAM_GET_REQ_FLAGS_DIR_LAST TFC_TCAM_GET_REQ_FLAGS_DIR_TX + u8 subtype; + __le16 fid; + __le16 sid; + __le16 tcam_id; +}; + +/* hwrm_tfc_tcam_get_output (size:2368b/296B) */ +struct hwrm_tfc_tcam_get_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 key_size; + __le16 result_size; + u8 unused0[4]; + u8 dev_data[272]; + u8 unused1[7]; + u8 valid; +}; + +/* hwrm_tfc_tcam_alloc_input (size:256b/32B) */ +struct hwrm_tfc_tcam_alloc_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 flags; + #define TFC_TCAM_ALLOC_REQ_FLAGS_DIR 0x1UL + #define TFC_TCAM_ALLOC_REQ_FLAGS_DIR_RX 0x0UL + #define TFC_TCAM_ALLOC_REQ_FLAGS_DIR_TX 0x1UL + #define TFC_TCAM_ALLOC_REQ_FLAGS_DIR_LAST TFC_TCAM_ALLOC_REQ_FLAGS_DIR_TX + u8 subtype; + __le16 fid; + __le16 sid; + __le16 key_size; + __le16 priority; + u8 track_type; + #define TFC_TCAM_ALLOC_REQ_TRACK_TYPE_TRACK_TYPE_INVALID 0x0UL + #define TFC_TCAM_ALLOC_REQ_TRACK_TYPE_TRACK_TYPE_SID 0x1UL + #define TFC_TCAM_ALLOC_REQ_TRACK_TYPE_TRACK_TYPE_FID 0x2UL + #define TFC_TCAM_ALLOC_REQ_TRACK_TYPE_LAST TFC_TCAM_ALLOC_REQ_TRACK_TYPE_TRACK_TYPE_FID + u8 unused0[5]; +}; + +/* hwrm_tfc_tcam_alloc_output (size:128b/16B) */ +struct hwrm_tfc_tcam_alloc_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 idx; + u8 unused0[5]; + u8 valid; +}; + +/* hwrm_tfc_tcam_alloc_set_input (size:1088b/136B) */ +struct hwrm_tfc_tcam_alloc_set_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 flags; + #define TFC_TCAM_ALLOC_SET_REQ_FLAGS_DIR 0x1UL + #define TFC_TCAM_ALLOC_SET_REQ_FLAGS_DIR_RX 0x0UL + #define TFC_TCAM_ALLOC_SET_REQ_FLAGS_DIR_TX 0x1UL + #define TFC_TCAM_ALLOC_SET_REQ_FLAGS_DIR_LAST TFC_TCAM_ALLOC_SET_REQ_FLAGS_DIR_TX + #define TFC_TCAM_ALLOC_SET_REQ_FLAGS_DMA 0x2UL + u8 subtype; + __le16 fid; + __le16 sid; + __le16 key_size; + __le16 result_size; + __le16 priority; + u8 track_type; + #define TFC_TCAM_ALLOC_SET_REQ_TRACK_TYPE_TRACK_TYPE_INVALID 0x0UL + #define TFC_TCAM_ALLOC_SET_REQ_TRACK_TYPE_TRACK_TYPE_SID 0x1UL + #define TFC_TCAM_ALLOC_SET_REQ_TRACK_TYPE_TRACK_TYPE_FID 0x2UL + #define TFC_TCAM_ALLOC_SET_REQ_TRACK_TYPE_LAST TFC_TCAM_ALLOC_SET_REQ_TRACK_TYPE_TRACK_TYPE_FID + u8 unused[3]; + __le64 dma_addr; + u8 dev_data[96]; +}; + +/* hwrm_tfc_tcam_alloc_set_output (size:128b/16B) */ +struct hwrm_tfc_tcam_alloc_set_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 tcam_id; + u8 unused0[5]; + u8 valid; +}; + +/* hwrm_tfc_tcam_free_input (size:192b/24B) */ +struct hwrm_tfc_tcam_free_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 flags; + #define TFC_TCAM_FREE_REQ_FLAGS_DIR 0x1UL + #define TFC_TCAM_FREE_REQ_FLAGS_DIR_RX 0x0UL + #define TFC_TCAM_FREE_REQ_FLAGS_DIR_TX 0x1UL + #define TFC_TCAM_FREE_REQ_FLAGS_DIR_LAST TFC_TCAM_FREE_REQ_FLAGS_DIR_TX + u8 subtype; + __le16 fid; + __le16 sid; + __le16 tcam_id; +}; + +/* hwrm_tfc_tcam_free_output (size:128b/16B) */ +struct hwrm_tfc_tcam_free_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused0[7]; + u8 valid; +}; + +/* hwrm_tfc_if_tbl_set_input (size:960b/120B) */ +struct hwrm_tfc_if_tbl_set_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 sid; + __le16 fid; + u8 subtype; + u8 flags; + #define TFC_IF_TBL_SET_REQ_FLAGS_DIR 0x1UL + #define TFC_IF_TBL_SET_REQ_FLAGS_DIR_RX 0x0UL + #define TFC_IF_TBL_SET_REQ_FLAGS_DIR_TX 0x1UL + #define TFC_IF_TBL_SET_REQ_FLAGS_DIR_LAST TFC_IF_TBL_SET_REQ_FLAGS_DIR_TX + __le16 index; + u8 data_size; + u8 unused0[7]; + u8 data[88]; +}; + +/* hwrm_tfc_if_tbl_set_output (size:128b/16B) */ +struct hwrm_tfc_if_tbl_set_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused0[7]; + u8 valid; +}; + +/* hwrm_tfc_if_tbl_get_input (size:256b/32B) */ +struct hwrm_tfc_if_tbl_get_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 sid; + __le16 fid; + u8 subtype; + u8 flags; + #define TFC_IF_TBL_GET_REQ_FLAGS_DIR 0x1UL + #define TFC_IF_TBL_GET_REQ_FLAGS_DIR_RX 0x0UL + #define TFC_IF_TBL_GET_REQ_FLAGS_DIR_TX 0x1UL + #define TFC_IF_TBL_GET_REQ_FLAGS_DIR_LAST TFC_IF_TBL_GET_REQ_FLAGS_DIR_TX + __le16 index; + u8 data_size; + u8 unused0[7]; +}; + +/* hwrm_tfc_if_tbl_get_output (size:960b/120B) */ +struct hwrm_tfc_if_tbl_get_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 sid; + __le16 fid; + u8 subtype; + u8 flags; + #define TFC_IF_TBL_GET_RESP_FLAGS_DIR 0x1UL + #define TFC_IF_TBL_GET_RESP_FLAGS_DIR_RX 0x0UL + #define TFC_IF_TBL_GET_RESP_FLAGS_DIR_TX 0x1UL + #define TFC_IF_TBL_GET_RESP_FLAGS_DIR_LAST TFC_IF_TBL_GET_RESP_FLAGS_DIR_TX + __le16 index; + u8 data_size; + u8 unused0[7]; + u8 data[88]; + u8 unused1[7]; + u8 valid; +}; + +/* hwrm_tfc_tbl_scope_config_get_input (size:192b/24B) */ +struct hwrm_tfc_tbl_scope_config_get_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 tsid; + u8 unused0[7]; +}; + +/* hwrm_tfc_tbl_scope_config_get_output (size:128b/16B) */ +struct hwrm_tfc_tbl_scope_config_get_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 configured; + u8 unused0[6]; + u8 valid; +}; + +/* hwrm_tfc_resc_usage_query_input (size:256b/32B) */ +struct hwrm_tfc_resc_usage_query_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 sid; + __le16 fid; + u8 flags; + #define TFC_RESC_USAGE_QUERY_REQ_FLAGS_DIR 0x1UL + #define TFC_RESC_USAGE_QUERY_REQ_FLAGS_DIR_RX 0x0UL + #define TFC_RESC_USAGE_QUERY_REQ_FLAGS_DIR_TX 0x1UL + #define TFC_RESC_USAGE_QUERY_REQ_FLAGS_DIR_LAST TFC_RESC_USAGE_QUERY_REQ_FLAGS_DIR_TX + u8 track_type; + #define TFC_RESC_USAGE_QUERY_REQ_TRACK_TYPE_TRACK_TYPE_INVALID 0x0UL + #define TFC_RESC_USAGE_QUERY_REQ_TRACK_TYPE_TRACK_TYPE_SID 0x1UL + #define TFC_RESC_USAGE_QUERY_REQ_TRACK_TYPE_TRACK_TYPE_FID 0x2UL + #define TFC_RESC_USAGE_QUERY_REQ_TRACK_TYPE_LAST TFC_RESC_USAGE_QUERY_REQ_TRACK_TYPE_TRACK_TYPE_FID + __le16 data_size; + u8 unused1[8]; +}; + +/* hwrm_tfc_resc_usage_query_output (size:960b/120B) */ +struct hwrm_tfc_resc_usage_query_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 resp_code; + __le16 data_size; + __le16 unused0; + u8 data[96]; + u8 unused1[7]; + u8 valid; +}; + +/* hwrm_tunnel_dst_port_query_input (size:192b/24B) */ +struct hwrm_tunnel_dst_port_query_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 tunnel_type; + #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN 0x1UL + #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_GENEVE 0x5UL + #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL + #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL + #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL + #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL + #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_CUSTOM_GRE 0xdUL + #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ECPRI 0xeUL + #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_SRV6 0xfUL + #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL + #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_GRE 0x11UL + #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ULP_DYN_UPAR 0x12UL + #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES01 0x13UL + #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES02 0x14UL + #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES03 0x15UL + #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES04 0x16UL + #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES05 0x17UL + #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES06 0x18UL + #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES07 0x19UL + #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES07 + u8 tunnel_next_proto; + u8 unused_0[6]; +}; + +/* hwrm_tunnel_dst_port_query_output (size:128b/16B) */ +struct hwrm_tunnel_dst_port_query_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 tunnel_dst_port_id; + __be16 tunnel_dst_port_val; + u8 upar_in_use; + #define TUNNEL_DST_PORT_QUERY_RESP_UPAR_IN_USE_UPAR0 0x1UL + #define TUNNEL_DST_PORT_QUERY_RESP_UPAR_IN_USE_UPAR1 0x2UL + #define TUNNEL_DST_PORT_QUERY_RESP_UPAR_IN_USE_UPAR2 0x4UL + #define TUNNEL_DST_PORT_QUERY_RESP_UPAR_IN_USE_UPAR3 0x8UL + #define TUNNEL_DST_PORT_QUERY_RESP_UPAR_IN_USE_UPAR4 0x10UL + #define TUNNEL_DST_PORT_QUERY_RESP_UPAR_IN_USE_UPAR5 0x20UL + #define TUNNEL_DST_PORT_QUERY_RESP_UPAR_IN_USE_UPAR6 0x40UL + #define TUNNEL_DST_PORT_QUERY_RESP_UPAR_IN_USE_UPAR7 0x80UL + u8 status; + #define TUNNEL_DST_PORT_QUERY_RESP_STATUS_CHIP_LEVEL 0x1UL + #define TUNNEL_DST_PORT_QUERY_RESP_STATUS_FUNC_LEVEL 0x2UL + u8 unused_0; + u8 valid; +}; + +/* hwrm_tunnel_dst_port_alloc_input (size:192b/24B) */ +struct hwrm_tunnel_dst_port_alloc_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 tunnel_type; + #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL + #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL + #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL + #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL + #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL + #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL + #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_CUSTOM_GRE 0xdUL + #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ECPRI 0xeUL + #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_SRV6 0xfUL + #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL + #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GRE 0x11UL + #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR 0x12UL + #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES01 0x13UL + #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES02 0x14UL + #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES03 0x15UL + #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES04 0x16UL + #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES05 0x17UL + #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES06 0x18UL + #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES07 0x19UL + #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES07 + u8 tunnel_next_proto; + __be16 tunnel_dst_port_val; + u8 unused_0[4]; +}; + +/* hwrm_tunnel_dst_port_alloc_output (size:128b/16B) */ +struct hwrm_tunnel_dst_port_alloc_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 tunnel_dst_port_id; + u8 error_info; + #define TUNNEL_DST_PORT_ALLOC_RESP_ERROR_INFO_SUCCESS 0x0UL + #define TUNNEL_DST_PORT_ALLOC_RESP_ERROR_INFO_ERR_ALLOCATED 0x1UL + #define TUNNEL_DST_PORT_ALLOC_RESP_ERROR_INFO_ERR_NO_RESOURCE 0x2UL + #define TUNNEL_DST_PORT_ALLOC_RESP_ERROR_INFO_ERR_ENABLED 0x3UL + #define TUNNEL_DST_PORT_ALLOC_RESP_ERROR_INFO_LAST TUNNEL_DST_PORT_ALLOC_RESP_ERROR_INFO_ERR_ENABLED + u8 upar_in_use; + #define TUNNEL_DST_PORT_ALLOC_RESP_UPAR_IN_USE_UPAR0 0x1UL + #define TUNNEL_DST_PORT_ALLOC_RESP_UPAR_IN_USE_UPAR1 0x2UL + #define TUNNEL_DST_PORT_ALLOC_RESP_UPAR_IN_USE_UPAR2 0x4UL + #define TUNNEL_DST_PORT_ALLOC_RESP_UPAR_IN_USE_UPAR3 0x8UL + #define TUNNEL_DST_PORT_ALLOC_RESP_UPAR_IN_USE_UPAR4 0x10UL + #define TUNNEL_DST_PORT_ALLOC_RESP_UPAR_IN_USE_UPAR5 0x20UL + #define TUNNEL_DST_PORT_ALLOC_RESP_UPAR_IN_USE_UPAR6 0x40UL + #define TUNNEL_DST_PORT_ALLOC_RESP_UPAR_IN_USE_UPAR7 0x80UL + u8 unused_0[3]; + u8 valid; +}; + +/* hwrm_tunnel_dst_port_free_input (size:192b/24B) */ +struct hwrm_tunnel_dst_port_free_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 tunnel_type; + #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN 0x1UL + #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE 0x5UL + #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL + #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL + #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL + #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL + #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_CUSTOM_GRE 0xdUL + #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ECPRI 0xeUL + #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_SRV6 0xfUL + #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL + #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GRE 0x11UL + #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR 0x12UL + #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES01 0x13UL + #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES02 0x14UL + #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES03 0x15UL + #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES04 0x16UL + #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES05 0x17UL + #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES06 0x18UL + #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES07 0x19UL + #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES07 + u8 tunnel_next_proto; + __le16 tunnel_dst_port_id; + u8 unused_0[4]; +}; + +/* hwrm_tunnel_dst_port_free_output (size:128b/16B) */ +struct hwrm_tunnel_dst_port_free_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 error_info; + #define TUNNEL_DST_PORT_FREE_RESP_ERROR_INFO_SUCCESS 0x0UL + #define TUNNEL_DST_PORT_FREE_RESP_ERROR_INFO_ERR_NOT_OWNER 0x1UL + #define TUNNEL_DST_PORT_FREE_RESP_ERROR_INFO_ERR_NOT_ALLOCATED 0x2UL + #define TUNNEL_DST_PORT_FREE_RESP_ERROR_INFO_LAST TUNNEL_DST_PORT_FREE_RESP_ERROR_INFO_ERR_NOT_ALLOCATED + u8 unused_1[6]; + u8 valid; +}; + +/* ctx_hw_stats (size:1280b/160B) */ +struct ctx_hw_stats { + __le64 rx_ucast_pkts; + __le64 rx_mcast_pkts; + __le64 rx_bcast_pkts; + __le64 rx_discard_pkts; + __le64 rx_error_pkts; + __le64 rx_ucast_bytes; + __le64 rx_mcast_bytes; + __le64 rx_bcast_bytes; + __le64 tx_ucast_pkts; + __le64 tx_mcast_pkts; + __le64 tx_bcast_pkts; + __le64 tx_error_pkts; + __le64 tx_discard_pkts; + __le64 tx_ucast_bytes; + __le64 tx_mcast_bytes; + __le64 tx_bcast_bytes; + __le64 tpa_pkts; + __le64 tpa_bytes; + __le64 tpa_events; + __le64 tpa_aborts; +}; + +/* ctx_hw_stats_ext (size:1408b/176B) */ +struct ctx_hw_stats_ext { + __le64 rx_ucast_pkts; + __le64 rx_mcast_pkts; + __le64 rx_bcast_pkts; + __le64 rx_discard_pkts; + __le64 rx_error_pkts; + __le64 rx_ucast_bytes; + __le64 rx_mcast_bytes; + __le64 rx_bcast_bytes; + __le64 tx_ucast_pkts; + __le64 tx_mcast_pkts; + __le64 tx_bcast_pkts; + __le64 tx_error_pkts; + __le64 tx_discard_pkts; + __le64 tx_ucast_bytes; + __le64 tx_mcast_bytes; + __le64 tx_bcast_bytes; + __le64 rx_tpa_eligible_pkt; + __le64 rx_tpa_eligible_bytes; + __le64 rx_tpa_pkt; + __le64 rx_tpa_bytes; + __le64 rx_tpa_errors; + __le64 rx_tpa_events; +}; + +/* ctx_eng_stats (size:512b/64B) */ +struct ctx_eng_stats { + __le64 eng_bytes_in; + __le64 eng_bytes_out; + __le64 aux_bytes_in; + __le64 aux_bytes_out; + __le64 commands; + __le64 error_commands; + __le64 cce_engine_usage; + __le64 cdd_engine_usage; +}; + +/* hwrm_stat_ctx_alloc_input (size:320b/40B) */ +struct hwrm_stat_ctx_alloc_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 stats_dma_addr; + __le32 update_period_ms; + u8 stat_ctx_flags; + #define STAT_CTX_ALLOC_REQ_STAT_CTX_FLAGS_ROCE 0x1UL + u8 unused_0; + __le16 stats_dma_length; + __le16 flags; + #define STAT_CTX_ALLOC_REQ_FLAGS_STEERING_TAG_VALID 0x1UL + __le16 steering_tag; + __le32 unused_1; +}; + +/* hwrm_stat_ctx_alloc_output (size:128b/16B) */ +struct hwrm_stat_ctx_alloc_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 stat_ctx_id; + u8 unused_0[3]; + u8 valid; +}; + +/* hwrm_stat_ctx_free_input (size:192b/24B) */ +struct hwrm_stat_ctx_free_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 stat_ctx_id; + u8 unused_0[4]; +}; + +/* hwrm_stat_ctx_free_output (size:128b/16B) */ +struct hwrm_stat_ctx_free_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 stat_ctx_id; + u8 unused_0[3]; + u8 valid; +}; + +/* hwrm_stat_ctx_query_input (size:192b/24B) */ +struct hwrm_stat_ctx_query_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 stat_ctx_id; + u8 flags; + #define STAT_CTX_QUERY_REQ_FLAGS_COUNTER_MASK 0x1UL + u8 unused_0[3]; +}; + +/* hwrm_stat_ctx_query_output (size:1408b/176B) */ +struct hwrm_stat_ctx_query_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le64 tx_ucast_pkts; + __le64 tx_mcast_pkts; + __le64 tx_bcast_pkts; + __le64 tx_discard_pkts; + __le64 tx_error_pkts; + __le64 tx_ucast_bytes; + __le64 tx_mcast_bytes; + __le64 tx_bcast_bytes; + __le64 rx_ucast_pkts; + __le64 rx_mcast_pkts; + __le64 rx_bcast_pkts; + __le64 rx_discard_pkts; + __le64 rx_error_pkts; + __le64 rx_ucast_bytes; + __le64 rx_mcast_bytes; + __le64 rx_bcast_bytes; + __le64 rx_agg_pkts; + __le64 rx_agg_bytes; + __le64 rx_agg_events; + __le64 rx_agg_aborts; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_stat_ext_ctx_query_input (size:192b/24B) */ +struct hwrm_stat_ext_ctx_query_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 stat_ctx_id; + u8 flags; + #define STAT_EXT_CTX_QUERY_REQ_FLAGS_COUNTER_MASK 0x1UL + u8 unused_0[3]; +}; + +/* hwrm_stat_ext_ctx_query_output (size:1536b/192B) */ +struct hwrm_stat_ext_ctx_query_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le64 rx_ucast_pkts; + __le64 rx_mcast_pkts; + __le64 rx_bcast_pkts; + __le64 rx_discard_pkts; + __le64 rx_error_pkts; + __le64 rx_ucast_bytes; + __le64 rx_mcast_bytes; + __le64 rx_bcast_bytes; + __le64 tx_ucast_pkts; + __le64 tx_mcast_pkts; + __le64 tx_bcast_pkts; + __le64 tx_error_pkts; + __le64 tx_discard_pkts; + __le64 tx_ucast_bytes; + __le64 tx_mcast_bytes; + __le64 tx_bcast_bytes; + __le64 rx_tpa_eligible_pkt; + __le64 rx_tpa_eligible_bytes; + __le64 rx_tpa_pkt; + __le64 rx_tpa_bytes; + __le64 rx_tpa_errors; + __le64 rx_tpa_events; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_stat_ctx_eng_query_input (size:192b/24B) */ +struct hwrm_stat_ctx_eng_query_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 stat_ctx_id; + u8 unused_0[4]; +}; + +/* hwrm_stat_ctx_eng_query_output (size:640b/80B) */ +struct hwrm_stat_ctx_eng_query_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le64 eng_bytes_in; + __le64 eng_bytes_out; + __le64 aux_bytes_in; + __le64 aux_bytes_out; + __le64 commands; + __le64 error_commands; + __le64 cce_engine_usage; + __le64 cdd_engine_usage; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_stat_ctx_clr_stats_input (size:192b/24B) */ +struct hwrm_stat_ctx_clr_stats_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 stat_ctx_id; + u8 unused_0[4]; +}; + +/* hwrm_stat_ctx_clr_stats_output (size:128b/16B) */ +struct hwrm_stat_ctx_clr_stats_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_pcie_qstats_input (size:256b/32B) */ +struct hwrm_pcie_qstats_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 pcie_stat_size; + u8 unused_0[6]; + __le64 pcie_stat_host_addr; +}; + +/* hwrm_pcie_qstats_output (size:128b/16B) */ +struct hwrm_pcie_qstats_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 pcie_stat_size; + u8 unused_0[5]; + u8 valid; +}; + +/* pcie_ctx_hw_stats (size:768b/96B) */ +struct pcie_ctx_hw_stats { + __le64 pcie_pl_signal_integrity; + __le64 pcie_dl_signal_integrity; + __le64 pcie_tl_signal_integrity; + __le64 pcie_link_integrity; + __le64 pcie_tx_traffic_rate; + __le64 pcie_rx_traffic_rate; + __le64 pcie_tx_dllp_statistics; + __le64 pcie_rx_dllp_statistics; + __le64 pcie_equalization_time; + __le32 pcie_ltssm_histogram[4]; + __le64 pcie_recovery_histogram; +}; + +/* hwrm_stat_generic_qstats_input (size:256b/32B) */ +struct hwrm_stat_generic_qstats_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 generic_stat_size; + u8 flags; + #define STAT_GENERIC_QSTATS_REQ_FLAGS_COUNTER_MASK 0x1UL + u8 unused_0[5]; + __le64 generic_stat_host_addr; +}; + +/* hwrm_stat_generic_qstats_output (size:128b/16B) */ +struct hwrm_stat_generic_qstats_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 generic_stat_size; + u8 unused_0[5]; + u8 valid; +}; + +/* generic_sw_hw_stats (size:1472b/184B) */ +struct generic_sw_hw_stats { + __le64 pcie_statistics_tx_tlp; + __le64 pcie_statistics_rx_tlp; + __le64 pcie_credit_fc_hdr_posted; + __le64 pcie_credit_fc_hdr_nonposted; + __le64 pcie_credit_fc_hdr_cmpl; + __le64 pcie_credit_fc_data_posted; + __le64 pcie_credit_fc_data_nonposted; + __le64 pcie_credit_fc_data_cmpl; + __le64 pcie_credit_fc_tgt_nonposted; + __le64 pcie_credit_fc_tgt_data_posted; + __le64 pcie_credit_fc_tgt_hdr_posted; + __le64 pcie_credit_fc_cmpl_hdr_posted; + __le64 pcie_credit_fc_cmpl_data_posted; + __le64 pcie_cmpl_longest; + __le64 pcie_cmpl_shortest; + __le64 cache_miss_count_cfcq; + __le64 cache_miss_count_cfcs; + __le64 cache_miss_count_cfcc; + __le64 cache_miss_count_cfcm; + __le64 hw_db_recov_dbs_dropped; + __le64 hw_db_recov_drops_serviced; + __le64 hw_db_recov_dbs_recovered; + __le64 hw_db_recov_oo_drop_count; +}; + +/* hwrm_stat_db_error_qstats_input (size:128b/16B) */ +struct hwrm_stat_db_error_qstats_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; +}; + +/* hwrm_stat_db_error_qstats_output (size:320b/40B) */ +struct hwrm_stat_db_error_qstats_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 tx_db_drop_invalid_qp_state; + __le32 rx_db_drop_invalid_rq_state; + __le32 tx_db_drop_format_error; + __le32 express_db_dropped_misc_error; + __le32 express_db_dropped_sq_overflow; + __le32 express_db_dropped_rq_overflow; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_fw_reset_input (size:192b/24B) */ +struct hwrm_fw_reset_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 embedded_proc_type; + #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_BOOT 0x0UL + #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_MGMT 0x1UL + #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_NETCTRL 0x2UL + #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_ROCE 0x3UL + #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_HOST 0x4UL + #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_AP 0x5UL + #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP 0x6UL + #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_HOST_RESOURCE_REINIT 0x7UL + #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_IMPACTLESS_ACTIVATION 0x8UL + #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_LAST FW_RESET_REQ_EMBEDDED_PROC_TYPE_IMPACTLESS_ACTIVATION + u8 selfrst_status; + #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTNONE 0x0UL + #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP 0x1UL + #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST 0x2UL + #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTIMMEDIATE 0x3UL + #define FW_RESET_REQ_SELFRST_STATUS_LAST FW_RESET_REQ_SELFRST_STATUS_SELFRSTIMMEDIATE + u8 host_idx; + u8 flags; + #define FW_RESET_REQ_FLAGS_RESET_GRACEFUL 0x1UL + #define FW_RESET_REQ_FLAGS_FW_ACTIVATION 0x2UL + u8 unused_0[4]; +}; + +/* hwrm_fw_reset_output (size:128b/16B) */ +struct hwrm_fw_reset_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 selfrst_status; + #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTNONE 0x0UL + #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTASAP 0x1UL + #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTPCIERST 0x2UL + #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTIMMEDIATE 0x3UL + #define FW_RESET_RESP_SELFRST_STATUS_LAST FW_RESET_RESP_SELFRST_STATUS_SELFRSTIMMEDIATE + u8 unused_0[6]; + u8 valid; +}; + +/* hwrm_fw_qstatus_input (size:192b/24B) */ +struct hwrm_fw_qstatus_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 embedded_proc_type; + #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_BOOT 0x0UL + #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_MGMT 0x1UL + #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_NETCTRL 0x2UL + #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_ROCE 0x3UL + #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_HOST 0x4UL + #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_AP 0x5UL + #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_CHIP 0x6UL + #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_LAST FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_CHIP + u8 unused_0[7]; +}; + +/* hwrm_fw_qstatus_output (size:128b/16B) */ +struct hwrm_fw_qstatus_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 selfrst_status; + #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTNONE 0x0UL + #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTASAP 0x1UL + #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTPCIERST 0x2UL + #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTPOWER 0x3UL + #define FW_QSTATUS_RESP_SELFRST_STATUS_LAST FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTPOWER + u8 nvm_option_action_status; + #define FW_QSTATUS_RESP_NVM_OPTION_ACTION_STATUS_NVMOPT_ACTION_NONE 0x0UL + #define FW_QSTATUS_RESP_NVM_OPTION_ACTION_STATUS_NVMOPT_ACTION_HOTRESET 0x1UL + #define FW_QSTATUS_RESP_NVM_OPTION_ACTION_STATUS_NVMOPT_ACTION_WARMBOOT 0x2UL + #define FW_QSTATUS_RESP_NVM_OPTION_ACTION_STATUS_NVMOPT_ACTION_COLDBOOT 0x3UL + #define FW_QSTATUS_RESP_NVM_OPTION_ACTION_STATUS_LAST FW_QSTATUS_RESP_NVM_OPTION_ACTION_STATUS_NVMOPT_ACTION_COLDBOOT + u8 unused_0[5]; + u8 valid; +}; + +/* hwrm_fw_set_time_input (size:256b/32B) */ +struct hwrm_fw_set_time_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 year; + #define FW_SET_TIME_REQ_YEAR_UNKNOWN 0x0UL + #define FW_SET_TIME_REQ_YEAR_LAST FW_SET_TIME_REQ_YEAR_UNKNOWN + u8 month; + u8 day; + u8 hour; + u8 minute; + u8 second; + u8 unused_0; + __le16 millisecond; + __le16 zone; + #define FW_SET_TIME_REQ_ZONE_UTC 0 + #define FW_SET_TIME_REQ_ZONE_UNKNOWN 65535 + #define FW_SET_TIME_REQ_ZONE_LAST FW_SET_TIME_REQ_ZONE_UNKNOWN + u8 unused_1[4]; +}; + +/* hwrm_fw_set_time_output (size:128b/16B) */ +struct hwrm_fw_set_time_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_fw_get_time_input (size:128b/16B) */ +struct hwrm_fw_get_time_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; +}; + +/* hwrm_fw_get_time_output (size:192b/24B) */ +struct hwrm_fw_get_time_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 year; + #define FW_GET_TIME_RESP_YEAR_UNKNOWN 0x0UL + #define FW_GET_TIME_RESP_YEAR_LAST FW_GET_TIME_RESP_YEAR_UNKNOWN + u8 month; + u8 day; + u8 hour; + u8 minute; + u8 second; + u8 unused_0; + __le16 millisecond; + __le16 zone; + #define FW_GET_TIME_RESP_ZONE_UTC 0 + #define FW_GET_TIME_RESP_ZONE_UNKNOWN 65535 + #define FW_GET_TIME_RESP_ZONE_LAST FW_GET_TIME_RESP_ZONE_UNKNOWN + u8 unused_1[3]; + u8 valid; +}; + +/* hwrm_struct_hdr (size:128b/16B) */ +struct hwrm_struct_hdr { + __le16 struct_id; + #define STRUCT_HDR_STRUCT_ID_LLDP_CFG 0x41bUL + #define STRUCT_HDR_STRUCT_ID_DCBX_ETS 0x41dUL + #define STRUCT_HDR_STRUCT_ID_DCBX_PFC 0x41fUL + #define STRUCT_HDR_STRUCT_ID_DCBX_APP 0x421UL + #define STRUCT_HDR_STRUCT_ID_DCBX_FEATURE_STATE 0x422UL + #define STRUCT_HDR_STRUCT_ID_LLDP_GENERIC 0x424UL + #define STRUCT_HDR_STRUCT_ID_LLDP_DEVICE 0x426UL + #define STRUCT_HDR_STRUCT_ID_POWER_BKUP 0x427UL + #define STRUCT_HDR_STRUCT_ID_PEER_MMAP 0x429UL + #define STRUCT_HDR_STRUCT_ID_AFM_OPAQUE 0x1UL + #define STRUCT_HDR_STRUCT_ID_PORT_DESCRIPTION 0xaUL + #define STRUCT_HDR_STRUCT_ID_RSS_V2 0x64UL + #define STRUCT_HDR_STRUCT_ID_MSIX_PER_VF 0xc8UL + #define STRUCT_HDR_STRUCT_ID_LAST STRUCT_HDR_STRUCT_ID_MSIX_PER_VF + __le16 len; + u8 version; + u8 count; + __le16 subtype; + __le16 next_offset; + #define STRUCT_HDR_NEXT_OFFSET_LAST 0x0UL + u8 unused_0[6]; +}; + +/* hwrm_struct_data_dcbx_ets (size:256b/32B) */ +struct hwrm_struct_data_dcbx_ets { + u8 destination; + #define STRUCT_DATA_DCBX_ETS_DESTINATION_CONFIGURATION 0x1UL + #define STRUCT_DATA_DCBX_ETS_DESTINATION_RECOMMMENDATION 0x2UL + #define STRUCT_DATA_DCBX_ETS_DESTINATION_LAST STRUCT_DATA_DCBX_ETS_DESTINATION_RECOMMMENDATION + u8 max_tcs; + __le16 unused1; + u8 pri0_to_tc_map; + u8 pri1_to_tc_map; + u8 pri2_to_tc_map; + u8 pri3_to_tc_map; + u8 pri4_to_tc_map; + u8 pri5_to_tc_map; + u8 pri6_to_tc_map; + u8 pri7_to_tc_map; + u8 tc0_to_bw_map; + u8 tc1_to_bw_map; + u8 tc2_to_bw_map; + u8 tc3_to_bw_map; + u8 tc4_to_bw_map; + u8 tc5_to_bw_map; + u8 tc6_to_bw_map; + u8 tc7_to_bw_map; + u8 tc0_to_tsa_map; + #define STRUCT_DATA_DCBX_ETS_TC0_TO_TSA_MAP_TSA_TYPE_SP 0x0UL + #define STRUCT_DATA_DCBX_ETS_TC0_TO_TSA_MAP_TSA_TYPE_CBS 0x1UL + #define STRUCT_DATA_DCBX_ETS_TC0_TO_TSA_MAP_TSA_TYPE_ETS 0x2UL + #define STRUCT_DATA_DCBX_ETS_TC0_TO_TSA_MAP_TSA_TYPE_VENDOR_SPECIFIC 0xffUL + #define STRUCT_DATA_DCBX_ETS_TC0_TO_TSA_MAP_LAST STRUCT_DATA_DCBX_ETS_TC0_TO_TSA_MAP_TSA_TYPE_VENDOR_SPECIFIC + u8 tc1_to_tsa_map; + u8 tc2_to_tsa_map; + u8 tc3_to_tsa_map; + u8 tc4_to_tsa_map; + u8 tc5_to_tsa_map; + u8 tc6_to_tsa_map; + u8 tc7_to_tsa_map; + u8 unused_0[4]; +}; + +/* hwrm_struct_data_dcbx_pfc (size:64b/8B) */ +struct hwrm_struct_data_dcbx_pfc { + u8 pfc_priority_bitmap; + u8 max_pfc_tcs; + u8 mbc; + u8 unused_0[5]; +}; + +/* hwrm_struct_data_dcbx_app (size:64b/8B) */ +struct hwrm_struct_data_dcbx_app { + __be16 protocol_id; + u8 protocol_selector; + #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_ETHER_TYPE 0x1UL + #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_TCP_PORT 0x2UL + #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_UDP_PORT 0x3UL + #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_TCP_UDP_PORT 0x4UL + #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_LAST STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_TCP_UDP_PORT + u8 priority; + u8 valid; + u8 unused_0[3]; +}; + +/* hwrm_struct_data_dcbx_feature_state (size:64b/8B) */ +struct hwrm_struct_data_dcbx_feature_state { + u8 dcbx_mode; + #define STRUCT_DATA_DCBX_FEATURE_STATE_DCBX_MODE_DCBX_DISABLED 0x0UL + #define STRUCT_DATA_DCBX_FEATURE_STATE_DCBX_MODE_DCBX_IEEE 0x1UL + #define STRUCT_DATA_DCBX_FEATURE_STATE_DCBX_MODE_DCBX_CEE 0x2UL + #define STRUCT_DATA_DCBX_FEATURE_STATE_DCBX_MODE_LAST STRUCT_DATA_DCBX_FEATURE_STATE_DCBX_MODE_DCBX_CEE + u8 ets_state; + u8 pfc_state; + u8 app_state; + #define STRUCT_DATA_DCBX_FEATURE_STATE_APP_STATE_ENABLE_BIT_POS 0x7UL + #define STRUCT_DATA_DCBX_FEATURE_STATE_APP_STATE_WILLING_BIT_POS 0x6UL + #define STRUCT_DATA_DCBX_FEATURE_STATE_APP_STATE_ADVERTISE_BIT_POS 0x5UL + #define STRUCT_DATA_DCBX_FEATURE_STATE_APP_STATE_LAST STRUCT_DATA_DCBX_FEATURE_STATE_APP_STATE_ADVERTISE_BIT_POS + u8 unused[3]; + u8 resets; + #define STRUCT_DATA_DCBX_FEATURE_STATE_RESETS_RESET_ETS 0x1UL + #define STRUCT_DATA_DCBX_FEATURE_STATE_RESETS_RESET_PFC 0x2UL + #define STRUCT_DATA_DCBX_FEATURE_STATE_RESETS_RESET_APP 0x4UL + #define STRUCT_DATA_DCBX_FEATURE_STATE_RESETS_RESET_STATE 0x8UL + #define STRUCT_DATA_DCBX_FEATURE_STATE_RESETS_LAST STRUCT_DATA_DCBX_FEATURE_STATE_RESETS_RESET_STATE +}; + +/* hwrm_struct_data_lldp (size:64b/8B) */ +struct hwrm_struct_data_lldp { + u8 admin_state; + #define STRUCT_DATA_LLDP_ADMIN_STATE_DISABLE 0x0UL + #define STRUCT_DATA_LLDP_ADMIN_STATE_TX 0x1UL + #define STRUCT_DATA_LLDP_ADMIN_STATE_RX 0x2UL + #define STRUCT_DATA_LLDP_ADMIN_STATE_ENABLE 0x3UL + #define STRUCT_DATA_LLDP_ADMIN_STATE_LAST STRUCT_DATA_LLDP_ADMIN_STATE_ENABLE + u8 port_description_state; + #define STRUCT_DATA_LLDP_PORT_DESCRIPTION_STATE_DISABLE 0x0UL + #define STRUCT_DATA_LLDP_PORT_DESCRIPTION_STATE_ENABLE 0x1UL + #define STRUCT_DATA_LLDP_PORT_DESCRIPTION_STATE_LAST STRUCT_DATA_LLDP_PORT_DESCRIPTION_STATE_ENABLE + u8 system_name_state; + #define STRUCT_DATA_LLDP_SYSTEM_NAME_STATE_DISABLE 0x0UL + #define STRUCT_DATA_LLDP_SYSTEM_NAME_STATE_ENABLE 0x1UL + #define STRUCT_DATA_LLDP_SYSTEM_NAME_STATE_LAST STRUCT_DATA_LLDP_SYSTEM_NAME_STATE_ENABLE + u8 system_desc_state; + #define STRUCT_DATA_LLDP_SYSTEM_DESC_STATE_DISABLE 0x0UL + #define STRUCT_DATA_LLDP_SYSTEM_DESC_STATE_ENABLE 0x1UL + #define STRUCT_DATA_LLDP_SYSTEM_DESC_STATE_LAST STRUCT_DATA_LLDP_SYSTEM_DESC_STATE_ENABLE + u8 system_cap_state; + #define STRUCT_DATA_LLDP_SYSTEM_CAP_STATE_DISABLE 0x0UL + #define STRUCT_DATA_LLDP_SYSTEM_CAP_STATE_ENABLE 0x1UL + #define STRUCT_DATA_LLDP_SYSTEM_CAP_STATE_LAST STRUCT_DATA_LLDP_SYSTEM_CAP_STATE_ENABLE + u8 mgmt_addr_state; + #define STRUCT_DATA_LLDP_MGMT_ADDR_STATE_DISABLE 0x0UL + #define STRUCT_DATA_LLDP_MGMT_ADDR_STATE_ENABLE 0x1UL + #define STRUCT_DATA_LLDP_MGMT_ADDR_STATE_LAST STRUCT_DATA_LLDP_MGMT_ADDR_STATE_ENABLE + u8 async_event_notification_state; + #define STRUCT_DATA_LLDP_ASYNC_EVENT_NOTIFICATION_STATE_DISABLE 0x0UL + #define STRUCT_DATA_LLDP_ASYNC_EVENT_NOTIFICATION_STATE_ENABLE 0x1UL + #define STRUCT_DATA_LLDP_ASYNC_EVENT_NOTIFICATION_STATE_LAST STRUCT_DATA_LLDP_ASYNC_EVENT_NOTIFICATION_STATE_ENABLE + u8 unused_0; +}; + +/* hwrm_struct_data_lldp_generic (size:2112b/264B) */ +struct hwrm_struct_data_lldp_generic { + u8 tlv_type; + #define STRUCT_DATA_LLDP_GENERIC_TLV_TYPE_CHASSIS 0x1UL + #define STRUCT_DATA_LLDP_GENERIC_TLV_TYPE_PORT 0x2UL + #define STRUCT_DATA_LLDP_GENERIC_TLV_TYPE_SYSTEM_NAME 0x3UL + #define STRUCT_DATA_LLDP_GENERIC_TLV_TYPE_SYSTEM_DESCRIPTION 0x4UL + #define STRUCT_DATA_LLDP_GENERIC_TLV_TYPE_PORT_NAME 0x5UL + #define STRUCT_DATA_LLDP_GENERIC_TLV_TYPE_PORT_DESCRIPTION 0x6UL + #define STRUCT_DATA_LLDP_GENERIC_TLV_TYPE_LAST STRUCT_DATA_LLDP_GENERIC_TLV_TYPE_PORT_DESCRIPTION + u8 subtype; + u8 length; + u8 unused1[5]; + __le32 tlv_value[64]; +}; + +/* hwrm_struct_data_lldp_device (size:1472b/184B) */ +struct hwrm_struct_data_lldp_device { + __le16 ttl; + u8 mgmt_addr_len; + u8 mgmt_addr_type; + u8 unused_3[4]; + __le32 mgmt_addr[8]; + __le32 system_caps; + u8 intf_num_type; + u8 mgmt_addr_oid_length; + u8 unused_4[2]; + __le32 intf_num; + u8 unused_5[4]; + __le32 mgmt_addr_oid[32]; +}; + +/* hwrm_struct_data_port_description (size:64b/8B) */ +struct hwrm_struct_data_port_description { + u8 port_id; + u8 unused_0[7]; +}; + +/* hwrm_struct_data_rss_v2 (size:128b/16B) */ +struct hwrm_struct_data_rss_v2 { + __le16 flags; + #define STRUCT_DATA_RSS_V2_FLAGS_HASH_VALID 0x1UL + __le16 rss_ctx_id; + __le16 num_ring_groups; + __le16 hash_type; + #define STRUCT_DATA_RSS_V2_HASH_TYPE_IPV4 0x1UL + #define STRUCT_DATA_RSS_V2_HASH_TYPE_TCP_IPV4 0x2UL + #define STRUCT_DATA_RSS_V2_HASH_TYPE_UDP_IPV4 0x4UL + #define STRUCT_DATA_RSS_V2_HASH_TYPE_IPV6 0x8UL + #define STRUCT_DATA_RSS_V2_HASH_TYPE_TCP_IPV6 0x10UL + #define STRUCT_DATA_RSS_V2_HASH_TYPE_UDP_IPV6 0x20UL + __le64 hash_key_ring_group_ids; +}; + +/* hwrm_struct_data_power_information (size:192b/24B) */ +struct hwrm_struct_data_power_information { + __le32 bkup_power_info_ver; + __le32 platform_bkup_power_count; + __le32 load_milli_watt; + __le32 bkup_time_milli_seconds; + __le32 bkup_power_status; + __le32 bkup_power_charge_time; +}; + +/* hwrm_struct_data_peer_mmap (size:1600b/200B) */ +struct hwrm_struct_data_peer_mmap { + __le16 fid; + __le16 count; + __le32 unused_0; + __le64 hpa_0; + __le64 gpa_0; + __le64 size_0; + __le64 hpa_1; + __le64 gpa_1; + __le64 size_1; + __le64 hpa_2; + __le64 gpa_2; + __le64 size_2; + __le64 hpa_3; + __le64 gpa_3; + __le64 size_3; + __le64 hpa_4; + __le64 gpa_4; + __le64 size_4; + __le64 hpa_5; + __le64 gpa_5; + __le64 size_5; + __le64 hpa_6; + __le64 gpa_6; + __le64 size_6; + __le64 hpa_7; + __le64 gpa_7; + __le64 size_7; +}; + +/* hwrm_struct_data_msix_per_vf (size:320b/40B) */ +struct hwrm_struct_data_msix_per_vf { + __le16 pf_id; + __le16 count; + __le32 unused_0; + __le16 start_vf_0; + __le16 msix_0; + __le16 start_vf_1; + __le16 msix_1; + __le16 start_vf_2; + __le16 msix_2; + __le16 start_vf_3; + __le16 msix_3; + __le16 start_vf_4; + __le16 msix_4; + __le16 start_vf_5; + __le16 msix_5; + __le16 start_vf_6; + __le16 msix_6; + __le16 start_vf_7; + __le16 msix_7; +}; + +/* hwrm_fw_set_structured_data_input (size:256b/32B) */ +struct hwrm_fw_set_structured_data_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 src_data_addr; + __le16 data_len; + u8 hdr_cnt; + u8 unused_0[5]; +}; + +/* hwrm_fw_set_structured_data_output (size:128b/16B) */ +struct hwrm_fw_set_structured_data_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_fw_set_structured_data_cmd_err (size:64b/8B) */ +struct hwrm_fw_set_structured_data_cmd_err { + u8 code; + #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_UNKNOWN 0x0UL + #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_HDR_CNT 0x1UL + #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_FMT 0x2UL + #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_ID 0x3UL + #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_LAST FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_ID + u8 unused_0[7]; +}; + +/* hwrm_fw_get_structured_data_input (size:256b/32B) */ +struct hwrm_fw_get_structured_data_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 dest_data_addr; + __le16 data_len; + __le16 structure_id; + __le16 subtype; + #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_UNUSED 0x0UL + #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_ALL 0xffffUL + #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NEAR_BRIDGE_ADMIN 0x100UL + #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NEAR_BRIDGE_PEER 0x101UL + #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NEAR_BRIDGE_OPERATIONAL 0x102UL + #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NON_TPMR_ADMIN 0x200UL + #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NON_TPMR_PEER 0x201UL + #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NON_TPMR_OPERATIONAL 0x202UL + #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_HOST_OPERATIONAL 0x300UL + #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_LAST FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_HOST_OPERATIONAL + u8 count; + u8 unused_0; +}; + +/* hwrm_fw_get_structured_data_output (size:128b/16B) */ +struct hwrm_fw_get_structured_data_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 hdr_cnt; + u8 unused_0[6]; + u8 valid; +}; + +/* hwrm_fw_get_structured_data_cmd_err (size:64b/8B) */ +struct hwrm_fw_get_structured_data_cmd_err { + u8 code; + #define FW_GET_STRUCTURED_DATA_CMD_ERR_CODE_UNKNOWN 0x0UL + #define FW_GET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_ID 0x3UL + #define FW_GET_STRUCTURED_DATA_CMD_ERR_CODE_LAST FW_GET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_ID + u8 unused_0[7]; +}; + +/* hwrm_fw_ipc_msg_input (size:320b/40B) */ +struct hwrm_fw_ipc_msg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 enables; + #define FW_IPC_MSG_REQ_ENABLES_COMMAND_ID 0x1UL + #define FW_IPC_MSG_REQ_ENABLES_SRC_PROCESSOR 0x2UL + #define FW_IPC_MSG_REQ_ENABLES_DATA_OFFSET 0x4UL + #define FW_IPC_MSG_REQ_ENABLES_LENGTH 0x8UL + __le16 command_id; + #define FW_IPC_MSG_REQ_COMMAND_ID_ROCE_LAG 0x1UL + #define FW_IPC_MSG_REQ_COMMAND_ID_MHB_HOST 0x2UL + #define FW_IPC_MSG_REQ_COMMAND_ID_ROCE_DRVR_VERSION 0x3UL + #define FW_IPC_MSG_REQ_COMMAND_ID_LAST FW_IPC_MSG_REQ_COMMAND_ID_ROCE_DRVR_VERSION + u8 src_processor; + #define FW_IPC_MSG_REQ_SRC_PROCESSOR_CFW 0x1UL + #define FW_IPC_MSG_REQ_SRC_PROCESSOR_BONO 0x2UL + #define FW_IPC_MSG_REQ_SRC_PROCESSOR_APE 0x3UL + #define FW_IPC_MSG_REQ_SRC_PROCESSOR_KONG 0x4UL + #define FW_IPC_MSG_REQ_SRC_PROCESSOR_LAST FW_IPC_MSG_REQ_SRC_PROCESSOR_KONG + u8 unused_0; + __le32 data_offset; + __le16 length; + u8 unused_1[2]; + __le64 opaque; +}; + +/* hwrm_fw_ipc_msg_output (size:256b/32B) */ +struct hwrm_fw_ipc_msg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 msg_data_1; + __le32 msg_data_2; + __le64 reserved64; + u8 reserved48[7]; + u8 valid; +}; + +/* hwrm_fw_ipc_mailbox_input (size:256b/32B) */ +struct hwrm_fw_ipc_mailbox_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 flags; + u8 unused; + u8 event_id; + u8 port_id; + __le32 event_data1; + __le32 event_data2; + u8 unused_0[4]; +}; + +/* hwrm_fw_ipc_mailbox_output (size:128b/16B) */ +struct hwrm_fw_ipc_mailbox_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_fw_ipc_mailbox_cmd_err (size:64b/8B) */ +struct hwrm_fw_ipc_mailbox_cmd_err { + u8 code; + #define FW_IPC_MAILBOX_CMD_ERR_CODE_UNKNOWN 0x0UL + #define FW_IPC_MAILBOX_CMD_ERR_CODE_BAD_ID 0x3UL + #define FW_IPC_MAILBOX_CMD_ERR_CODE_LAST FW_IPC_MAILBOX_CMD_ERR_CODE_BAD_ID + u8 unused_0[7]; +}; + +/* hwrm_fw_ecn_cfg_input (size:192b/24B) */ +struct hwrm_fw_ecn_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 flags; + #define FW_ECN_CFG_REQ_FLAGS_ENABLE_ECN 0x1UL + u8 unused_0[6]; +}; + +/* hwrm_fw_ecn_cfg_output (size:128b/16B) */ +struct hwrm_fw_ecn_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_fw_ecn_qcfg_input (size:128b/16B) */ +struct hwrm_fw_ecn_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; +}; + +/* hwrm_fw_ecn_qcfg_output (size:128b/16B) */ +struct hwrm_fw_ecn_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 flags; + #define FW_ECN_QCFG_RESP_FLAGS_ENABLE_ECN 0x1UL + u8 unused_0[5]; + u8 valid; +}; + +/* hwrm_fw_health_check_input (size:128b/16B) */ +struct hwrm_fw_health_check_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; +}; + +/* hwrm_fw_health_check_output (size:128b/16B) */ +struct hwrm_fw_health_check_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 fw_status; + #define FW_HEALTH_CHECK_RESP_FW_STATUS_SBI_BOOTED 0x1UL + #define FW_HEALTH_CHECK_RESP_FW_STATUS_SBI_MISMATCH 0x2UL + #define FW_HEALTH_CHECK_RESP_FW_STATUS_SRT_BOOTED 0x4UL + #define FW_HEALTH_CHECK_RESP_FW_STATUS_SRT_MISMATCH 0x8UL + #define FW_HEALTH_CHECK_RESP_FW_STATUS_CRT_BOOTED 0x10UL + #define FW_HEALTH_CHECK_RESP_FW_STATUS_CRT_MISMATCH 0x20UL + #define FW_HEALTH_CHECK_RESP_FW_STATUS_SECOND_RT 0x40UL + #define FW_HEALTH_CHECK_RESP_FW_STATUS_FASTBOOTED 0x80UL + #define FW_HEALTH_CHECK_RESP_FW_STATUS_DIR_HDR_BOOTED 0x100UL + #define FW_HEALTH_CHECK_RESP_FW_STATUS_DIR_HDR_MISMATCH 0x200UL + #define FW_HEALTH_CHECK_RESP_FW_STATUS_MBR_CORRUPT 0x400UL + #define FW_HEALTH_CHECK_RESP_FW_STATUS_CFG_MISMATCH 0x800UL + #define FW_HEALTH_CHECK_RESP_FW_STATUS_FRU_MISMATCH 0x1000UL + #define FW_HEALTH_CHECK_RESP_FW_STATUS_CRT2_BOOTED 0x2000UL + #define FW_HEALTH_CHECK_RESP_FW_STATUS_CRT2_MISMATCH 0x4000UL + #define FW_HEALTH_CHECK_RESP_FW_STATUS_GXRT_BOOTED 0x8000UL + #define FW_HEALTH_CHECK_RESP_FW_STATUS_GXRT_MISMATCH 0x10000UL + u8 unused_0[3]; + u8 valid; +}; + +/* hwrm_fw_livepatch_query_input (size:192b/24B) */ +struct hwrm_fw_livepatch_query_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 fw_target; + #define FW_LIVEPATCH_QUERY_REQ_FW_TARGET_COMMON_FW 0x1UL + #define FW_LIVEPATCH_QUERY_REQ_FW_TARGET_SECURE_FW 0x2UL + #define FW_LIVEPATCH_QUERY_REQ_FW_TARGET_LAST FW_LIVEPATCH_QUERY_REQ_FW_TARGET_SECURE_FW + u8 unused_0[7]; +}; + +/* hwrm_fw_livepatch_query_output (size:640b/80B) */ +struct hwrm_fw_livepatch_query_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + char install_ver[32]; + char active_ver[32]; + __le16 status_flags; + #define FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_INSTALL 0x1UL + #define FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_ACTIVE 0x2UL + u8 unused_0[5]; + u8 valid; +}; + +/* hwrm_fw_livepatch_input (size:256b/32B) */ +struct hwrm_fw_livepatch_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 opcode; + #define FW_LIVEPATCH_REQ_OPCODE_ACTIVATE 0x1UL + #define FW_LIVEPATCH_REQ_OPCODE_DEACTIVATE 0x2UL + #define FW_LIVEPATCH_REQ_OPCODE_LAST FW_LIVEPATCH_REQ_OPCODE_DEACTIVATE + u8 fw_target; + #define FW_LIVEPATCH_REQ_FW_TARGET_COMMON_FW 0x1UL + #define FW_LIVEPATCH_REQ_FW_TARGET_SECURE_FW 0x2UL + #define FW_LIVEPATCH_REQ_FW_TARGET_LAST FW_LIVEPATCH_REQ_FW_TARGET_SECURE_FW + u8 loadtype; + #define FW_LIVEPATCH_REQ_LOADTYPE_NVM_INSTALL 0x1UL + #define FW_LIVEPATCH_REQ_LOADTYPE_MEMORY_DIRECT 0x2UL + #define FW_LIVEPATCH_REQ_LOADTYPE_LAST FW_LIVEPATCH_REQ_LOADTYPE_MEMORY_DIRECT + u8 flags; + __le32 patch_len; + __le64 host_addr; +}; + +/* hwrm_fw_livepatch_output (size:128b/16B) */ +struct hwrm_fw_livepatch_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_fw_livepatch_cmd_err (size:64b/8B) */ +struct hwrm_fw_livepatch_cmd_err { + u8 code; + #define FW_LIVEPATCH_CMD_ERR_CODE_UNKNOWN 0x0UL + #define FW_LIVEPATCH_CMD_ERR_CODE_INVALID_OPCODE 0x1UL + #define FW_LIVEPATCH_CMD_ERR_CODE_INVALID_TARGET 0x2UL + #define FW_LIVEPATCH_CMD_ERR_CODE_NOT_SUPPORTED 0x3UL + #define FW_LIVEPATCH_CMD_ERR_CODE_NOT_INSTALLED 0x4UL + #define FW_LIVEPATCH_CMD_ERR_CODE_NOT_PATCHED 0x5UL + #define FW_LIVEPATCH_CMD_ERR_CODE_AUTH_FAIL 0x6UL + #define FW_LIVEPATCH_CMD_ERR_CODE_INVALID_HEADER 0x7UL + #define FW_LIVEPATCH_CMD_ERR_CODE_INVALID_SIZE 0x8UL + #define FW_LIVEPATCH_CMD_ERR_CODE_ALREADY_PATCHED 0x9UL + #define FW_LIVEPATCH_CMD_ERR_CODE_LAST FW_LIVEPATCH_CMD_ERR_CODE_ALREADY_PATCHED + u8 unused_0[7]; +}; + +/* hwrm_fw_sync_input (size:192b/24B) */ +struct hwrm_fw_sync_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 sync_action; + #define FW_SYNC_REQ_SYNC_ACTION_SYNC_SBI 0x1UL + #define FW_SYNC_REQ_SYNC_ACTION_SYNC_SRT 0x2UL + #define FW_SYNC_REQ_SYNC_ACTION_SYNC_CRT 0x4UL + #define FW_SYNC_REQ_SYNC_ACTION_SYNC_DIR_HDR 0x8UL + #define FW_SYNC_REQ_SYNC_ACTION_WRITE_MBR 0x10UL + #define FW_SYNC_REQ_SYNC_ACTION_SYNC_CFG 0x20UL + #define FW_SYNC_REQ_SYNC_ACTION_SYNC_FRU 0x40UL + #define FW_SYNC_REQ_SYNC_ACTION_SYNC_CRT2 0x80UL + #define FW_SYNC_REQ_SYNC_ACTION_SYNC_GXRT 0x100UL + #define FW_SYNC_REQ_SYNC_ACTION_ACTION 0x80000000UL + u8 unused_0[4]; +}; + +/* hwrm_fw_sync_output (size:128b/16B) */ +struct hwrm_fw_sync_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 sync_status; + #define FW_SYNC_RESP_SYNC_STATUS_ERR_CODE_MASK 0xffUL + #define FW_SYNC_RESP_SYNC_STATUS_ERR_CODE_SFT 0 + #define FW_SYNC_RESP_SYNC_STATUS_ERR_CODE_SUCCESS 0x0UL + #define FW_SYNC_RESP_SYNC_STATUS_ERR_CODE_IN_PROGRESS 0x1UL + #define FW_SYNC_RESP_SYNC_STATUS_ERR_CODE_TIMEOUT 0x2UL + #define FW_SYNC_RESP_SYNC_STATUS_ERR_CODE_GENERAL 0x3UL + #define FW_SYNC_RESP_SYNC_STATUS_ERR_CODE_LAST FW_SYNC_RESP_SYNC_STATUS_ERR_CODE_GENERAL + #define FW_SYNC_RESP_SYNC_STATUS_SYNC_ERR 0x40000000UL + #define FW_SYNC_RESP_SYNC_STATUS_SYNC_COMPLETE 0x80000000UL + u8 unused_0[3]; + u8 valid; +}; + +/* hwrm_fw_state_qcaps_input (size:128b/16B) */ +struct hwrm_fw_state_qcaps_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; +}; + +/* hwrm_fw_state_qcaps_output (size:256b/32B) */ +struct hwrm_fw_state_qcaps_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 backup_memory; + __le32 quiesce_timeout; + __le32 fw_status_blackout; + __le32 fw_status_max_wait; + u8 unused_0[4]; + u8 unused_1[3]; + u8 valid; +}; + +/* hwrm_fw_state_quiesce_input (size:192b/24B) */ +struct hwrm_fw_state_quiesce_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 flags; + #define FW_STATE_QUIESCE_REQ_FLAGS_ERROR_RECOVERY 0x1UL + u8 unused_0[7]; +}; + +/* hwrm_fw_state_quiesce_output (size:192b/24B) */ +struct hwrm_fw_state_quiesce_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 quiesce_status; + #define FW_STATE_QUIESCE_RESP_QUIESCE_STATUS_INITIATED 0x80000000UL + u8 unused_0[4]; + u8 unused_1[7]; + u8 valid; +}; + +/* hwrm_fw_state_unquiesce_input (size:128b/16B) */ +struct hwrm_fw_state_unquiesce_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; +}; + +/* hwrm_fw_state_unquiesce_output (size:192b/24B) */ +struct hwrm_fw_state_unquiesce_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 unquiesce_status; + #define FW_STATE_UNQUIESCE_RESP_UNQUIESCE_STATUS_COMPLETE 0x80000000UL + u8 unused_0[4]; + u8 unused_1[7]; + u8 valid; +}; + +/* hwrm_fw_state_backup_input (size:256b/32B) */ +struct hwrm_fw_state_backup_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 backup_pg_size_backup_lvl; + #define FW_STATE_BACKUP_REQ_BACKUP_LVL_MASK 0xfUL + #define FW_STATE_BACKUP_REQ_BACKUP_LVL_SFT 0 + #define FW_STATE_BACKUP_REQ_BACKUP_LVL_LVL_0 0x0UL + #define FW_STATE_BACKUP_REQ_BACKUP_LVL_LVL_1 0x1UL + #define FW_STATE_BACKUP_REQ_BACKUP_LVL_LVL_2 0x2UL + #define FW_STATE_BACKUP_REQ_BACKUP_LVL_LAST FW_STATE_BACKUP_REQ_BACKUP_LVL_LVL_2 + #define FW_STATE_BACKUP_REQ_BACKUP_PG_SIZE_MASK 0xf0UL + #define FW_STATE_BACKUP_REQ_BACKUP_PG_SIZE_SFT 4 + #define FW_STATE_BACKUP_REQ_BACKUP_PG_SIZE_PG_4K (0x0UL << 4) + #define FW_STATE_BACKUP_REQ_BACKUP_PG_SIZE_PG_8K (0x1UL << 4) + #define FW_STATE_BACKUP_REQ_BACKUP_PG_SIZE_PG_64K (0x2UL << 4) + #define FW_STATE_BACKUP_REQ_BACKUP_PG_SIZE_PG_2M (0x3UL << 4) + #define FW_STATE_BACKUP_REQ_BACKUP_PG_SIZE_PG_8M (0x4UL << 4) + #define FW_STATE_BACKUP_REQ_BACKUP_PG_SIZE_PG_1G (0x5UL << 4) + #define FW_STATE_BACKUP_REQ_BACKUP_PG_SIZE_LAST FW_STATE_BACKUP_REQ_BACKUP_PG_SIZE_PG_1G + u8 unused_0[7]; + __le64 backup_page_dir; +}; + +/* hwrm_fw_state_backup_output (size:192b/24B) */ +struct hwrm_fw_state_backup_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 backup_status; + #define FW_STATE_BACKUP_RESP_BACKUP_STATUS_ERR_CODE_MASK 0xffUL + #define FW_STATE_BACKUP_RESP_BACKUP_STATUS_ERR_CODE_SFT 0 + #define FW_STATE_BACKUP_RESP_BACKUP_STATUS_ERR_CODE_SUCCESS 0x0UL + #define FW_STATE_BACKUP_RESP_BACKUP_STATUS_ERR_CODE_QUIESCE_ERROR 0x1UL + #define FW_STATE_BACKUP_RESP_BACKUP_STATUS_ERR_CODE_GENERAL 0x3UL + #define FW_STATE_BACKUP_RESP_BACKUP_STATUS_ERR_CODE_LAST FW_STATE_BACKUP_RESP_BACKUP_STATUS_ERR_CODE_GENERAL + #define FW_STATE_BACKUP_RESP_BACKUP_STATUS_RESET_REQUIRED 0x40000000UL + #define FW_STATE_BACKUP_RESP_BACKUP_STATUS_COMPLETE 0x80000000UL + u8 unused_0[4]; + u8 unused_1[7]; + u8 valid; +}; + +/* hwrm_fw_state_restore_input (size:256b/32B) */ +struct hwrm_fw_state_restore_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 restore_pg_size_restore_lvl; + #define FW_STATE_RESTORE_REQ_RESTORE_LVL_MASK 0xfUL + #define FW_STATE_RESTORE_REQ_RESTORE_LVL_SFT 0 + #define FW_STATE_RESTORE_REQ_RESTORE_LVL_LVL_0 0x0UL + #define FW_STATE_RESTORE_REQ_RESTORE_LVL_LVL_1 0x1UL + #define FW_STATE_RESTORE_REQ_RESTORE_LVL_LVL_2 0x2UL + #define FW_STATE_RESTORE_REQ_RESTORE_LVL_LAST FW_STATE_RESTORE_REQ_RESTORE_LVL_LVL_2 + #define FW_STATE_RESTORE_REQ_RESTORE_PG_SIZE_MASK 0xf0UL + #define FW_STATE_RESTORE_REQ_RESTORE_PG_SIZE_SFT 4 + #define FW_STATE_RESTORE_REQ_RESTORE_PG_SIZE_PG_4K (0x0UL << 4) + #define FW_STATE_RESTORE_REQ_RESTORE_PG_SIZE_PG_8K (0x1UL << 4) + #define FW_STATE_RESTORE_REQ_RESTORE_PG_SIZE_PG_64K (0x2UL << 4) + #define FW_STATE_RESTORE_REQ_RESTORE_PG_SIZE_PG_2M (0x3UL << 4) + #define FW_STATE_RESTORE_REQ_RESTORE_PG_SIZE_PG_8M (0x4UL << 4) + #define FW_STATE_RESTORE_REQ_RESTORE_PG_SIZE_PG_1G (0x5UL << 4) + #define FW_STATE_RESTORE_REQ_RESTORE_PG_SIZE_LAST FW_STATE_RESTORE_REQ_RESTORE_PG_SIZE_PG_1G + u8 unused_0[7]; + __le64 restore_page_dir; +}; + +/* hwrm_fw_state_restore_output (size:128b/16B) */ +struct hwrm_fw_state_restore_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 restore_status; + #define FW_STATE_RESTORE_RESP_RESTORE_STATUS_ERR_CODE_MASK 0xffUL + #define FW_STATE_RESTORE_RESP_RESTORE_STATUS_ERR_CODE_SFT 0 + #define FW_STATE_RESTORE_RESP_RESTORE_STATUS_ERR_CODE_SUCCESS 0x0UL + #define FW_STATE_RESTORE_RESP_RESTORE_STATUS_ERR_CODE_GENERAL 0x1UL + #define FW_STATE_RESTORE_RESP_RESTORE_STATUS_ERR_CODE_FORMAT_PARSE 0x2UL + #define FW_STATE_RESTORE_RESP_RESTORE_STATUS_ERR_CODE_INTEGRITY_CHECK 0x3UL + #define FW_STATE_RESTORE_RESP_RESTORE_STATUS_ERR_CODE_LAST FW_STATE_RESTORE_RESP_RESTORE_STATUS_ERR_CODE_INTEGRITY_CHECK + #define FW_STATE_RESTORE_RESP_RESTORE_STATUS_FAILURE_ROLLBACK_COMPLETED 0x40000000UL + #define FW_STATE_RESTORE_RESP_RESTORE_STATUS_COMPLETE 0x80000000UL + u8 unused_0[3]; + u8 valid; +}; + +/* hwrm_fw_secure_cfg_input (size:256b/32B) */ +struct hwrm_fw_secure_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 enable; + #define FW_SECURE_CFG_REQ_ENABLE_NVRAM 0x1UL + #define FW_SECURE_CFG_REQ_ENABLE_GRC 0x2UL + #define FW_SECURE_CFG_REQ_ENABLE_UART 0x3UL + #define FW_SECURE_CFG_REQ_ENABLE_LAST FW_SECURE_CFG_REQ_ENABLE_UART + u8 config_mode; + #define FW_SECURE_CFG_REQ_CONFIG_MODE_PERSISTENT 0x1UL + #define FW_SECURE_CFG_REQ_CONFIG_MODE_RUNTIME 0x2UL + u8 nvm_lock_mode; + #define FW_SECURE_CFG_REQ_NVM_LOCK_MODE_NONE 0x0UL + #define FW_SECURE_CFG_REQ_NVM_LOCK_MODE_PARTIAL 0x1UL + #define FW_SECURE_CFG_REQ_NVM_LOCK_MODE_FULL 0x2UL + #define FW_SECURE_CFG_REQ_NVM_LOCK_MODE_CHIP 0x3UL + #define FW_SECURE_CFG_REQ_NVM_LOCK_MODE_LAST FW_SECURE_CFG_REQ_NVM_LOCK_MODE_CHIP + u8 nvm_partial_lock_mask; + #define FW_SECURE_CFG_REQ_NVM_PARTIAL_LOCK_MASK_EXE 0x1UL + #define FW_SECURE_CFG_REQ_NVM_PARTIAL_LOCK_MASK_CFG 0x2UL + u8 grc_ctrl; + #define FW_SECURE_CFG_REQ_GRC_CTRL_RO 0x0UL + #define FW_SECURE_CFG_REQ_GRC_CTRL_RW 0x1UL + #define FW_SECURE_CFG_REQ_GRC_CTRL_LAST FW_SECURE_CFG_REQ_GRC_CTRL_RW + u8 uart_ctrl; + #define FW_SECURE_CFG_REQ_UART_CTRL_DISABLE 0x0UL + #define FW_SECURE_CFG_REQ_UART_CTRL_ENABLE 0x1UL + #define FW_SECURE_CFG_REQ_UART_CTRL_LAST FW_SECURE_CFG_REQ_UART_CTRL_ENABLE + u8 unused_0[2]; + __le32 unused_1[2]; +}; + +/* hwrm_fw_secure_cfg_output (size:128b/16B) */ +struct hwrm_fw_secure_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_exec_fwd_resp_input (size:1024b/128B) */ +struct hwrm_exec_fwd_resp_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 encap_request[26]; + __le16 encap_resp_target_id; + u8 unused_0[6]; +}; + +/* hwrm_exec_fwd_resp_output (size:128b/16B) */ +struct hwrm_exec_fwd_resp_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_reject_fwd_resp_input (size:1024b/128B) */ +struct hwrm_reject_fwd_resp_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 encap_request[26]; + __le16 encap_resp_target_id; + u8 unused_0[6]; +}; + +/* hwrm_reject_fwd_resp_output (size:128b/16B) */ +struct hwrm_reject_fwd_resp_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_fwd_resp_input (size:1024b/128B) */ +struct hwrm_fwd_resp_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 encap_resp_target_id; + __le16 encap_resp_cmpl_ring; + __le16 encap_resp_len; + u8 unused_0; + u8 unused_1; + __le64 encap_resp_addr; + __le32 encap_resp[24]; +}; + +/* hwrm_fwd_resp_output (size:128b/16B) */ +struct hwrm_fwd_resp_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_fwd_async_event_cmpl_input (size:320b/40B) */ +struct hwrm_fwd_async_event_cmpl_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 encap_async_event_target_id; + u8 unused_0[6]; + __le32 encap_async_event_cmpl[4]; +}; + +/* hwrm_fwd_async_event_cmpl_output (size:128b/16B) */ +struct hwrm_fwd_async_event_cmpl_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_temp_monitor_query_input (size:128b/16B) */ +struct hwrm_temp_monitor_query_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; +}; + +/* hwrm_temp_monitor_query_output (size:192b/24B) */ +struct hwrm_temp_monitor_query_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 temp; + u8 phy_temp; + u8 om_temp; + u8 flags; + #define TEMP_MONITOR_QUERY_RESP_FLAGS_TEMP_NOT_AVAILABLE 0x1UL + #define TEMP_MONITOR_QUERY_RESP_FLAGS_PHY_TEMP_NOT_AVAILABLE 0x2UL + #define TEMP_MONITOR_QUERY_RESP_FLAGS_OM_NOT_PRESENT 0x4UL + #define TEMP_MONITOR_QUERY_RESP_FLAGS_OM_TEMP_NOT_AVAILABLE 0x8UL + #define TEMP_MONITOR_QUERY_RESP_FLAGS_EXT_TEMP_FIELDS_AVAILABLE 0x10UL + #define TEMP_MONITOR_QUERY_RESP_FLAGS_THRESHOLD_VALUES_AVAILABLE 0x20UL + u8 temp2; + u8 phy_temp2; + u8 om_temp2; + u8 warn_threshold; + u8 critical_threshold; + u8 fatal_threshold; + u8 shutdown_threshold; + u8 unused_0[4]; + u8 valid; +}; + +/* hwrm_reg_power_query_input (size:128b/16B) */ +struct hwrm_reg_power_query_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; +}; + +/* hwrm_reg_power_query_output (size:192b/24B) */ +struct hwrm_reg_power_query_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 flags; + #define REG_POWER_QUERY_RESP_FLAGS_IN_POWER_AVAILABLE 0x1UL + #define REG_POWER_QUERY_RESP_FLAGS_OUT_POWER_AVAILABLE 0x2UL + __le32 in_power_mw; + __le32 out_power_mw; + u8 unused_0[3]; + u8 valid; +}; + +/* hwrm_core_frequency_query_input (size:128b/16B) */ +struct hwrm_core_frequency_query_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; +}; + +/* hwrm_core_frequency_query_output (size:128b/16B) */ +struct hwrm_core_frequency_query_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 core_frequency_hz; + u8 unused_0[3]; + u8 valid; +}; + +/* hwrm_reg_power_histogram_input (size:192b/24B) */ +struct hwrm_reg_power_histogram_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define REG_POWER_HISTOGRAM_REQ_FLAGS_CLEAR_HISTOGRAM 0x1UL + __le32 unused_0; +}; + +/* hwrm_reg_power_histogram_output (size:1088b/136B) */ +struct hwrm_reg_power_histogram_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 flags; + #define REG_POWER_HISTOGRAM_RESP_FLAGS_POWER_IN_OUT 0x1UL + #define REG_POWER_HISTOGRAM_RESP_FLAGS_POWER_IN_OUT_INPUT 0x0UL + #define REG_POWER_HISTOGRAM_RESP_FLAGS_POWER_IN_OUT_OUTPUT 0x1UL + #define REG_POWER_HISTOGRAM_RESP_FLAGS_POWER_IN_OUT_LAST REG_POWER_HISTOGRAM_RESP_FLAGS_POWER_IN_OUT_OUTPUT + u8 unused_0[2]; + __le32 sampling_period; + __le64 sample_count; + __le32 power_hist[26]; + u8 unused_1[7]; + u8 valid; +}; + +#define BUCKET_NO_DATA_FOR_SAMPLE 0x0UL +#define BUCKET_RANGE_8W_OR_LESS 0x1UL +#define BUCKET_RANGE_8W_TO_9W 0x2UL +#define BUCKET_RANGE_9W_TO_10W 0x3UL +#define BUCKET_RANGE_10W_TO_11W 0x4UL +#define BUCKET_RANGE_11W_TO_12W 0x5UL +#define BUCKET_RANGE_12W_TO_13W 0x6UL +#define BUCKET_RANGE_13W_TO_14W 0x7UL +#define BUCKET_RANGE_14W_TO_15W 0x8UL +#define BUCKET_RANGE_15W_TO_16W 0x9UL +#define BUCKET_RANGE_16W_TO_18W 0xaUL +#define BUCKET_RANGE_18W_TO_20W 0xbUL +#define BUCKET_RANGE_20W_TO_22W 0xcUL +#define BUCKET_RANGE_22W_TO_24W 0xdUL +#define BUCKET_RANGE_24W_TO_26W 0xeUL +#define BUCKET_RANGE_26W_TO_28W 0xfUL +#define BUCKET_RANGE_28W_TO_30W 0x10UL +#define BUCKET_RANGE_30W_TO_32W 0x11UL +#define BUCKET_RANGE_32W_TO_34W 0x12UL +#define BUCKET_RANGE_34W_TO_36W 0x13UL +#define BUCKET_RANGE_36W_TO_38W 0x14UL +#define BUCKET_RANGE_38W_TO_40W 0x15UL +#define BUCKET_RANGE_40W_TO_42W 0x16UL +#define BUCKET_RANGE_42W_TO_44W 0x17UL +#define BUCKET_RANGE_44W_TO_50W 0x18UL +#define BUCKET_RANGE_OVER_50W 0x19UL +#define BUCKET_LAST BUCKET_RANGE_OVER_50W + + +/* hwrm_wol_filter_alloc_input (size:512b/64B) */ +struct hwrm_wol_filter_alloc_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + __le32 enables; + #define WOL_FILTER_ALLOC_REQ_ENABLES_MAC_ADDRESS 0x1UL + #define WOL_FILTER_ALLOC_REQ_ENABLES_PATTERN_OFFSET 0x2UL + #define WOL_FILTER_ALLOC_REQ_ENABLES_PATTERN_BUF_SIZE 0x4UL + #define WOL_FILTER_ALLOC_REQ_ENABLES_PATTERN_BUF_ADDR 0x8UL + #define WOL_FILTER_ALLOC_REQ_ENABLES_PATTERN_MASK_ADDR 0x10UL + #define WOL_FILTER_ALLOC_REQ_ENABLES_PATTERN_MASK_SIZE 0x20UL + __le16 port_id; + u8 wol_type; + #define WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT 0x0UL + #define WOL_FILTER_ALLOC_REQ_WOL_TYPE_BMP 0x1UL + #define WOL_FILTER_ALLOC_REQ_WOL_TYPE_INVALID 0xffUL + #define WOL_FILTER_ALLOC_REQ_WOL_TYPE_LAST WOL_FILTER_ALLOC_REQ_WOL_TYPE_INVALID + u8 unused_0[5]; + u8 mac_address[6]; + __le16 pattern_offset; + __le16 pattern_buf_size; + __le16 pattern_mask_size; + u8 unused_1[4]; + __le64 pattern_buf_addr; + __le64 pattern_mask_addr; +}; + +/* hwrm_wol_filter_alloc_output (size:128b/16B) */ +struct hwrm_wol_filter_alloc_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 wol_filter_id; + u8 unused_0[6]; + u8 valid; +}; + +/* hwrm_wol_filter_free_input (size:256b/32B) */ +struct hwrm_wol_filter_free_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define WOL_FILTER_FREE_REQ_FLAGS_FREE_ALL_WOL_FILTERS 0x1UL + __le32 enables; + #define WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID 0x1UL + __le16 port_id; + u8 wol_filter_id; + u8 unused_0[5]; +}; + +/* hwrm_wol_filter_free_output (size:128b/16B) */ +struct hwrm_wol_filter_free_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_wol_filter_qcfg_input (size:448b/56B) */ +struct hwrm_wol_filter_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 port_id; + __le16 handle; + u8 unused_0[4]; + __le64 pattern_buf_addr; + __le16 pattern_buf_size; + u8 unused_1[6]; + __le64 pattern_mask_addr; + __le16 pattern_mask_size; + u8 unused_2[6]; +}; + +/* hwrm_wol_filter_qcfg_output (size:256b/32B) */ +struct hwrm_wol_filter_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 next_handle; + u8 wol_filter_id; + u8 wol_type; + #define WOL_FILTER_QCFG_RESP_WOL_TYPE_MAGICPKT 0x0UL + #define WOL_FILTER_QCFG_RESP_WOL_TYPE_BMP 0x1UL + #define WOL_FILTER_QCFG_RESP_WOL_TYPE_INVALID 0xffUL + #define WOL_FILTER_QCFG_RESP_WOL_TYPE_LAST WOL_FILTER_QCFG_RESP_WOL_TYPE_INVALID + __le32 unused_0; + u8 mac_address[6]; + __le16 pattern_offset; + __le16 pattern_size; + __le16 pattern_mask_size; + u8 unused_1[3]; + u8 valid; +}; + +/* hwrm_wol_reason_qcfg_input (size:320b/40B) */ +struct hwrm_wol_reason_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 port_id; + u8 unused_0[6]; + __le64 wol_pkt_buf_addr; + __le16 wol_pkt_buf_size; + u8 unused_1[6]; +}; + +/* hwrm_wol_reason_qcfg_output (size:128b/16B) */ +struct hwrm_wol_reason_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 wol_filter_id; + u8 wol_reason; + #define WOL_REASON_QCFG_RESP_WOL_REASON_MAGICPKT 0x0UL + #define WOL_REASON_QCFG_RESP_WOL_REASON_BMP 0x1UL + #define WOL_REASON_QCFG_RESP_WOL_REASON_INVALID 0xffUL + #define WOL_REASON_QCFG_RESP_WOL_REASON_LAST WOL_REASON_QCFG_RESP_WOL_REASON_INVALID + u8 wol_pkt_len; + u8 unused_0[4]; + u8 valid; +}; + +/* hwrm_dbg_read_direct_input (size:256b/32B) */ +struct hwrm_dbg_read_direct_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 host_dest_addr; + __le32 read_addr; + __le32 read_len32; +}; + +/* hwrm_dbg_read_direct_output (size:128b/16B) */ +struct hwrm_dbg_read_direct_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 crc32; + u8 unused_0[3]; + u8 valid; +}; + +/* hwrm_dbg_write_direct_input (size:448b/56B) */ +struct hwrm_dbg_write_direct_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 write_addr; + __le32 write_len32; + __le32 write_data[8]; +}; + +/* hwrm_dbg_write_direct_output (size:128b/16B) */ +struct hwrm_dbg_write_direct_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_dbg_read_indirect_input (size:640b/80B) */ +struct hwrm_dbg_read_indirect_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 host_dest_addr; + __le32 host_dest_addr_len; + u8 indirect_access_type; + #define DBG_READ_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_TE_MGMT_FILTERS_L2 0x0UL + #define DBG_READ_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_TE_MGMT_FILTERS_L3L4 0x1UL + #define DBG_READ_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_RE_MGMT_FILTERS_L2 0x2UL + #define DBG_READ_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_RE_MGMT_FILTERS_L3L4 0x3UL + #define DBG_READ_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_STAT_CTXS 0x4UL + #define DBG_READ_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_CFA_TX_L2_TCAM 0x5UL + #define DBG_READ_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_CFA_RX_L2_TCAM 0x6UL + #define DBG_READ_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_CFA_TX_IPV6_SUBNET_TCAM 0x7UL + #define DBG_READ_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_CFA_RX_IPV6_SUBNET_TCAM 0x8UL + #define DBG_READ_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_CFA_TX_SRC_PROPERTIES_TCAM 0x9UL + #define DBG_READ_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_CFA_RX_SRC_PROPERTIES_TCAM 0xaUL + #define DBG_READ_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_CFA_VEB_LOOKUP_TCAM 0xbUL + #define DBG_READ_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_CFA_TX_PROFILE_LOOKUP_TCAM 0xcUL + #define DBG_READ_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_CFA_RX_PROFILE_LOOKUP_TCAM 0xdUL + #define DBG_READ_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_CFA_TX_LOOKUP_TCAM 0xeUL + #define DBG_READ_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_CFA_RX_LOOKUP_TCAM 0xfUL + #define DBG_READ_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_MHB 0x10UL + #define DBG_READ_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_PCIE_GBL 0x11UL + #define DBG_READ_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_MULTI_HOST_SOC 0x12UL + #define DBG_READ_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_PCIE_PRIVATE 0x13UL + #define DBG_READ_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_HOST_DMA 0x14UL + #define DBG_READ_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_SOC_ELOG 0x15UL + #define DBG_READ_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_CTX 0x16UL + #define DBG_READ_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_STATS 0x17UL + #define DBG_READ_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_LAST DBG_READ_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_STATS + u8 unused_0[3]; + __le32 start_index; + __le32 num_of_entries; + __le32 opaque[10]; +}; + +/* hwrm_dbg_read_indirect_output (size:128b/16B) */ +struct hwrm_dbg_read_indirect_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_dbg_write_indirect_input (size:832b/104B) */ +struct hwrm_dbg_write_indirect_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 indirect_access_type; + #define DBG_WRITE_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_TE_MGMT_FILTERS_L2 0x0UL + #define DBG_WRITE_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_TE_MGMT_FILTERS_L3L4 0x1UL + #define DBG_WRITE_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_RE_MGMT_FILTERS_L2 0x2UL + #define DBG_WRITE_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_RE_MGMT_FILTERS_L3L4 0x3UL + #define DBG_WRITE_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_STAT_CTXS 0x4UL + #define DBG_WRITE_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_CFA_TX_L2_TCAM 0x5UL + #define DBG_WRITE_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_CFA_RX_L2_TCAM 0x6UL + #define DBG_WRITE_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_CFA_TX_IPV6_SUBNET_TCAM 0x7UL + #define DBG_WRITE_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_CFA_RX_IPV6_SUBNET_TCAM 0x8UL + #define DBG_WRITE_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_CFA_TX_SRC_PROPERTIES_TCAM 0x9UL + #define DBG_WRITE_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_CFA_RX_SRC_PROPERTIES_TCAM 0xaUL + #define DBG_WRITE_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_CFA_VEB_LOOKUP_TCAM 0xbUL + #define DBG_WRITE_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_CFA_TX_PROFILE_LOOKUP_TCAM 0xcUL + #define DBG_WRITE_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_CFA_RX_PROFILE_LOOKUP_TCAM 0xdUL + #define DBG_WRITE_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_CFA_TX_LOOKUP_TCAM 0xeUL + #define DBG_WRITE_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_CFA_RX_LOOKUP_TCAM 0xfUL + #define DBG_WRITE_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_MHB 0x10UL + #define DBG_WRITE_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_PCIE_GBL 0x11UL + #define DBG_WRITE_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_MULTI_HOST_SOC 0x12UL + #define DBG_WRITE_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_PCIE_PRIVATE 0x13UL + #define DBG_WRITE_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_HOST_DMA 0x14UL + #define DBG_WRITE_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_SOC_ELOG 0x15UL + #define DBG_WRITE_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_CTX 0x16UL + #define DBG_WRITE_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_STATS 0x17UL + #define DBG_WRITE_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_LAST DBG_WRITE_INDIRECT_REQ_INDIRECT_ACCESS_TYPE_STATS + u8 unused_0[3]; + __le32 start_index; + __le32 num_of_entries; + u8 unused_1[4]; + __le32 write_data[8]; + __le32 opaque[10]; +}; + +/* hwrm_dbg_write_indirect_output (size:128b/16B) */ +struct hwrm_dbg_write_indirect_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_dbg_dump_input (size:320b/40B) */ +struct hwrm_dbg_dump_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 handle; + u8 unused_0[4]; + __le64 host_dbg_dump_addr; + __le64 host_dbg_dump_addr_len; +}; + +/* hwrm_dbg_dump_output (size:192b/24B) */ +struct hwrm_dbg_dump_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 nexthandle; + __le32 dbg_data_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_dbg_erase_nvm_input (size:192b/24B) */ +struct hwrm_dbg_erase_nvm_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 flags; + #define DBG_ERASE_NVM_REQ_FLAGS_ERASE_ALL 0x1UL + u8 unused_0[6]; +}; + +/* hwrm_dbg_erase_nvm_output (size:128b/16B) */ +struct hwrm_dbg_erase_nvm_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_dbg_cfg_input (size:192b/24B) */ +struct hwrm_dbg_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define DBG_CFG_REQ_FLAGS_UART_LOG 0x1UL + #define DBG_CFG_REQ_FLAGS_UART_LOG_SECONDARY 0x2UL + #define DBG_CFG_REQ_FLAGS_FW_TRACE 0x4UL + #define DBG_CFG_REQ_FLAGS_FW_TRACE_SECONDARY 0x8UL + #define DBG_CFG_REQ_FLAGS_DEBUG_NOTIFY 0x10UL + #define DBG_CFG_REQ_FLAGS_JTAG_DEBUG 0x20UL + __le16 async_cmpl_ring; + u8 unused_0[2]; +}; + +/* hwrm_dbg_cfg_output (size:128b/16B) */ +struct hwrm_dbg_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_dbg_crashdump_header_input (size:192b/24B) */ +struct hwrm_dbg_crashdump_header_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 unused_0[2]; +}; + +/* hwrm_dbg_crashdump_header_output (size:512b/64B) */ +struct hwrm_dbg_crashdump_header_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 version_hi; + u8 version_low; + __le16 header_len; + __le32 dump_size; + __le32 crash_time; + s8 utc_offset; + #define DBG_CRASHDUMP_HEADER_RESP_UTC_OFFSET_UTC 0 + #define DBG_CRASHDUMP_HEADER_RESP_UTC_OFFSET_AMSTERDAM 4 + #define DBG_CRASHDUMP_HEADER_RESP_UTC_OFFSET_EGYPT 8 + #define DBG_CRASHDUMP_HEADER_RESP_UTC_OFFSET_EUROPE_MOSCOW 12 + #define DBG_CRASHDUMP_HEADER_RESP_UTC_OFFSET_IRAN 14 + #define DBG_CRASHDUMP_HEADER_RESP_UTC_OFFSET_ASIA_DUBAI 16 + #define DBG_CRASHDUMP_HEADER_RESP_UTC_OFFSET_ASIA_KABUL 18 + #define DBG_CRASHDUMP_HEADER_RESP_UTC_OFFSET_ANTARCTICA_MAWSON 20 + #define DBG_CRASHDUMP_HEADER_RESP_UTC_OFFSET_ASIA_COLOMBO 22 + #define DBG_CRASHDUMP_HEADER_RESP_UTC_OFFSET_ASIA_KATHMANDU 23 + #define DBG_CRASHDUMP_HEADER_RESP_UTC_OFFSET_INDIAN_CHAGOS 24 + #define DBG_CRASHDUMP_HEADER_RESP_UTC_OFFSET_INDIAN_COCOS 26 + #define DBG_CRASHDUMP_HEADER_RESP_UTC_OFFSET_ASIA_BANGKOK 28 + #define DBG_CRASHDUMP_HEADER_RESP_UTC_OFFSET_ASIA_HONG_KONG 32 + #define DBG_CRASHDUMP_HEADER_RESP_UTC_OFFSET_ASIA_PYONGYANG 34 + #define DBG_CRASHDUMP_HEADER_RESP_UTC_OFFSET_AUSTRALIA_EUCLA 35 + #define DBG_CRASHDUMP_HEADER_RESP_UTC_OFFSET_ASIA_TOKYO 36 + #define DBG_CRASHDUMP_HEADER_RESP_UTC_OFFSET_AUSTRALIA_ADELAIDE 38 + #define DBG_CRASHDUMP_HEADER_RESP_UTC_OFFSET_AUSTRALIA_BROKEN_HILL 38 + #define DBG_CRASHDUMP_HEADER_RESP_UTC_OFFSET_AUSTRALIA_DARWIN 38 + #define DBG_CRASHDUMP_HEADER_RESP_UTC_OFFSET_AUSTRALIA_SYDNEY 40 + #define DBG_CRASHDUMP_HEADER_RESP_UTC_OFFSET_AUSTRALIA_LORD_HOWE 42 + #define DBG_CRASHDUMP_HEADER_RESP_UTC_OFFSET_ANTARCTICA_MACQUARIE 44 + #define DBG_CRASHDUMP_HEADER_RESP_UTC_OFFSET_ANTARCTICA_SOUTH_POLE 48 + #define DBG_CRASHDUMP_HEADER_RESP_UTC_OFFSET_PACIFIC_CHATHAM 51 + #define DBG_CRASHDUMP_HEADER_RESP_UTC_OFFSET_PACIFIC_APIA 52 + #define DBG_CRASHDUMP_HEADER_RESP_UTC_OFFSET_PACIFIC_KIRITIMATIS 56 + #define DBG_CRASHDUMP_HEADER_RESP_UTC_OFFSET_ATLANTIC_CAPE_VERDE -4 + #define DBG_CRASHDUMP_HEADER_RESP_UTC_OFFSET_ATLANTIC_SOUTH_GEORGIA -8 + #define DBG_CRASHDUMP_HEADER_RESP_UTC_OFFSET_AMERICA_ARGENTINA_BUENOS_AIRES -12 + #define DBG_CRASHDUMP_HEADER_RESP_UTC_OFFSET_AMERICA_SAO_PAULO -12 + #define DBG_CRASHDUMP_HEADER_RESP_UTC_OFFSET_AMERICA_NEWFOUNDLAND -14 + #define DBG_CRASHDUMP_HEADER_RESP_UTC_OFFSET_AMERICA_BARBADOS -16 + #define DBG_CRASHDUMP_HEADER_RESP_UTC_OFFSET_AMERICA_CANCUN -20 + #define DBG_CRASHDUMP_HEADER_RESP_UTC_OFFSET_AMERICA_COSTA_RICA -24 + #define DBG_CRASHDUMP_HEADER_RESP_UTC_OFFSET_AMERICA_PHOENIX -28 + #define DBG_CRASHDUMP_HEADER_RESP_UTC_OFFSET_US_ARIZONA -28 + #define DBG_CRASHDUMP_HEADER_RESP_UTC_OFFSET_US_PACIFIC -32 + #define DBG_CRASHDUMP_HEADER_RESP_UTC_OFFSET_US_ALASKA -36 + #define DBG_CRASHDUMP_HEADER_RESP_UTC_OFFSET_PACIFIC_MARQUESAS -38 + #define DBG_CRASHDUMP_HEADER_RESP_UTC_OFFSET_PACIFIC_HAWAII -40 + #define DBG_CRASHDUMP_HEADER_RESP_UTC_OFFSET_PACIFIC_MIDWAY -44 + #define DBG_CRASHDUMP_HEADER_RESP_UTC_OFFSET_LAST DBG_CRASHDUMP_HEADER_RESP_UTC_OFFSET_PACIFIC_MIDWAY + u8 crash_cntr; + __le16 dev_uid_length; + u8 dev_uid[32]; + __le32 power_on_count; + u8 unused_2[3]; + u8 valid; +}; + +/* hwrm_dbg_crashdump_erase_input (size:192b/24B) */ +struct hwrm_dbg_crashdump_erase_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 scope; + #define DBG_CRASHDUMP_ERASE_REQ_SCOPE_INVALIDATE 0x0UL + #define DBG_CRASHDUMP_ERASE_REQ_SCOPE_REINIT 0x1UL + #define DBG_CRASHDUMP_ERASE_REQ_SCOPE_LAST DBG_CRASHDUMP_ERASE_REQ_SCOPE_REINIT + u8 unused_0[3]; + __le32 unused_1; +}; + +/* hwrm_dbg_crashdump_erase_output (size:128b/16B) */ +struct hwrm_dbg_crashdump_erase_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_1[7]; + u8 valid; +}; + +/* hwrm_dbg_qcaps_input (size:192b/24B) */ +struct hwrm_dbg_qcaps_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 fid; + u8 unused_0[6]; +}; + +/* hwrm_dbg_qcaps_output (size:192b/24B) */ +struct hwrm_dbg_qcaps_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 fid; + u8 unused_0[2]; + __le32 coredump_component_disable_caps; + #define DBG_QCAPS_RESP_COREDUMP_COMPONENT_DISABLE_CAPS_NVRAM 0x1UL + __le32 flags; + #define DBG_QCAPS_RESP_FLAGS_CRASHDUMP_NVM 0x1UL + #define DBG_QCAPS_RESP_FLAGS_CRASHDUMP_HOST_DDR 0x2UL + #define DBG_QCAPS_RESP_FLAGS_CRASHDUMP_SOC_DDR 0x4UL + #define DBG_QCAPS_RESP_FLAGS_USEQ 0x8UL + u8 unused_1[3]; + u8 valid; +}; + +/* hwrm_dbg_qcfg_input (size:192b/24B) */ +struct hwrm_dbg_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 fid; + __le16 flags; + #define DBG_QCFG_REQ_FLAGS_CRASHDUMP_SIZE_FOR_DEST_MASK 0x3UL + #define DBG_QCFG_REQ_FLAGS_CRASHDUMP_SIZE_FOR_DEST_SFT 0 + #define DBG_QCFG_REQ_FLAGS_CRASHDUMP_SIZE_FOR_DEST_DEST_NVM 0x0UL + #define DBG_QCFG_REQ_FLAGS_CRASHDUMP_SIZE_FOR_DEST_DEST_HOST_DDR 0x1UL + #define DBG_QCFG_REQ_FLAGS_CRASHDUMP_SIZE_FOR_DEST_DEST_SOC_DDR 0x2UL + #define DBG_QCFG_REQ_FLAGS_CRASHDUMP_SIZE_FOR_DEST_LAST DBG_QCFG_REQ_FLAGS_CRASHDUMP_SIZE_FOR_DEST_DEST_SOC_DDR + __le32 coredump_component_disable_flags; + #define DBG_QCFG_REQ_COREDUMP_COMPONENT_DISABLE_FLAGS_NVRAM 0x1UL +}; + +/* hwrm_dbg_qcfg_output (size:256b/32B) */ +struct hwrm_dbg_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 fid; + u8 unused_0[2]; + __le32 coredump_size; + __le32 flags; + #define DBG_QCFG_RESP_FLAGS_UART_LOG 0x1UL + #define DBG_QCFG_RESP_FLAGS_UART_LOG_SECONDARY 0x2UL + #define DBG_QCFG_RESP_FLAGS_FW_TRACE 0x4UL + #define DBG_QCFG_RESP_FLAGS_FW_TRACE_SECONDARY 0x8UL + #define DBG_QCFG_RESP_FLAGS_DEBUG_NOTIFY 0x10UL + #define DBG_QCFG_RESP_FLAGS_JTAG_DEBUG 0x20UL + __le16 async_cmpl_ring; + u8 unused_2[2]; + __le32 crashdump_size; + u8 unused_3[3]; + u8 valid; +}; + +/* hwrm_dbg_crashdump_medium_cfg_input (size:320b/40B) */ +struct hwrm_dbg_crashdump_medium_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 output_dest_flags; + #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_TYPE_DDR 0x1UL + __le16 pg_size_lvl; + #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_LVL_MASK 0x3UL + #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_LVL_SFT 0 + #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_LVL_LVL_0 0x0UL + #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_LVL_LVL_1 0x1UL + #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_LVL_LVL_2 0x2UL + #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_LVL_LAST DBG_CRASHDUMP_MEDIUM_CFG_REQ_LVL_LVL_2 + #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_MASK 0x1cUL + #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_SFT 2 + #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_4K (0x0UL << 2) + #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_8K (0x1UL << 2) + #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_64K (0x2UL << 2) + #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_2M (0x3UL << 2) + #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_8M (0x4UL << 2) + #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_1G (0x5UL << 2) + #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_LAST DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_1G + #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_UNUSED11_MASK 0xffe0UL + #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_UNUSED11_SFT 5 + __le32 size; + __le32 coredump_component_disable_flags; + #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_NVRAM 0x1UL + __le32 unused_0; + __le64 pbl; +}; + +/* hwrm_dbg_crashdump_medium_cfg_output (size:128b/16B) */ +struct hwrm_dbg_crashdump_medium_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_1[7]; + u8 valid; +}; + +/* coredump_segment_record (size:128b/16B) */ +struct coredump_segment_record { + __le16 component_id; + __le16 segment_id; + __le16 max_instances; + u8 version_hi; + u8 version_low; + u8 seg_flags; + u8 compress_flags; + #define SFLAG_COMPRESSED_ZLIB 0x1UL + u8 unused_0[2]; + __le32 segment_len; +}; + +/* hwrm_dbg_coredump_list_input (size:256b/32B) */ +struct hwrm_dbg_coredump_list_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 host_dest_addr; + __le32 host_buf_len; + __le16 seq_no; + u8 flags; + #define DBG_COREDUMP_LIST_REQ_FLAGS_CRASHDUMP 0x1UL + u8 unused_0[1]; +}; + +/* hwrm_dbg_coredump_list_output (size:128b/16B) */ +struct hwrm_dbg_coredump_list_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 flags; + #define DBG_COREDUMP_LIST_RESP_FLAGS_MORE 0x1UL + u8 unused_0; + __le16 total_segments; + __le16 data_len; + u8 unused_1; + u8 valid; +}; + +/* hwrm_dbg_coredump_initiate_input (size:256b/32B) */ +struct hwrm_dbg_coredump_initiate_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 component_id; + __le16 segment_id; + __le16 instance; + __le16 unused_0; + u8 seg_flags; + #define DBG_COREDUMP_INITIATE_REQ_SEG_FLAGS_LIVE_DATA 0x1UL + #define DBG_COREDUMP_INITIATE_REQ_SEG_FLAGS_CRASH_DATA 0x2UL + #define DBG_COREDUMP_INITIATE_REQ_SEG_FLAGS_COLLECT_CTX_L1_CACHE 0x4UL + u8 unused_1[7]; +}; + +/* hwrm_dbg_coredump_initiate_output (size:128b/16B) */ +struct hwrm_dbg_coredump_initiate_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* coredump_data_hdr (size:128b/16B) */ +struct coredump_data_hdr { + __le32 address; + __le32 flags_length; + #define COREDUMP_DATA_HDR_FLAGS_LENGTH_ACTUAL_LEN_MASK 0xffffffUL + #define COREDUMP_DATA_HDR_FLAGS_LENGTH_ACTUAL_LEN_SFT 0 + #define COREDUMP_DATA_HDR_FLAGS_LENGTH_INDIRECT_ACCESS 0x1000000UL + __le32 instance; + __le32 next_offset; +}; + +/* hwrm_dbg_coredump_retrieve_input (size:448b/56B) */ +struct hwrm_dbg_coredump_retrieve_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 host_dest_addr; + __le32 host_buf_len; + __le32 unused_0; + __le16 component_id; + __le16 segment_id; + __le16 instance; + __le16 unused_1; + u8 seg_flags; + u8 unused_2; + __le16 unused_3; + __le32 unused_4; + __le32 seq_no; + __le32 unused_5; +}; + +/* hwrm_dbg_coredump_retrieve_output (size:128b/16B) */ +struct hwrm_dbg_coredump_retrieve_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 flags; + #define DBG_COREDUMP_RETRIEVE_RESP_FLAGS_MORE 0x1UL + u8 unused_0; + __le16 data_len; + u8 unused_1[3]; + u8 valid; +}; + +/* hwrm_dbg_i2c_cmd_input (size:320b/40B) */ +struct hwrm_dbg_i2c_cmd_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 host_dest_addr; + __le16 read_size; + __le16 write_size; + u8 chnl_id; + u8 options; + #define DBG_I2C_CMD_REQ_OPTIONS_10_BIT_ADDRESSING 0x1UL + #define DBG_I2C_CMD_REQ_OPTIONS_FAST_MODE 0x2UL + __le16 slave_addr; + u8 xfer_mode; + #define DBG_I2C_CMD_REQ_XFER_MODE_MASTER_READ 0x0UL + #define DBG_I2C_CMD_REQ_XFER_MODE_MASTER_WRITE 0x1UL + #define DBG_I2C_CMD_REQ_XFER_MODE_MASTER_WRITE_READ 0x2UL + #define DBG_I2C_CMD_REQ_XFER_MODE_LAST DBG_I2C_CMD_REQ_XFER_MODE_MASTER_WRITE_READ + u8 unused_1[7]; +}; + +/* hwrm_dbg_i2c_cmd_output (size:128b/16B) */ +struct hwrm_dbg_i2c_cmd_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_dbg_fw_cli_input (size:1024b/128B) */ +struct hwrm_dbg_fw_cli_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 host_dest_addr; + __le32 host_buf_len; + __le16 cli_cmd_len; + u8 unused_0[2]; + u8 cli_cmd[96]; +}; + +/* hwrm_dbg_fw_cli_output (size:128b/16B) */ +struct hwrm_dbg_fw_cli_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 cli_data_len; + u8 unused_0[3]; + u8 valid; +}; + +/* hwrm_dbg_ring_info_get_input (size:192b/24B) */ +struct hwrm_dbg_ring_info_get_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 ring_type; + #define DBG_RING_INFO_GET_REQ_RING_TYPE_L2_CMPL 0x0UL + #define DBG_RING_INFO_GET_REQ_RING_TYPE_TX 0x1UL + #define DBG_RING_INFO_GET_REQ_RING_TYPE_RX 0x2UL + #define DBG_RING_INFO_GET_REQ_RING_TYPE_NQ 0x3UL + #define DBG_RING_INFO_GET_REQ_RING_TYPE_LAST DBG_RING_INFO_GET_REQ_RING_TYPE_NQ + u8 unused_0[3]; + __le32 fw_ring_id; +}; + +/* hwrm_dbg_ring_info_get_output (size:192b/24B) */ +struct hwrm_dbg_ring_info_get_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 producer_index; + __le32 consumer_index; + __le32 cag_vector_ctrl; + __le16 st_tag; + u8 unused_0; + u8 valid; +}; + +/* hwrm_dbg_drv_trace_input (size:1024b/128B) */ +struct hwrm_dbg_drv_trace_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 severity; + #define DBG_DRV_TRACE_REQ_SEVERITY_TRACE_LEVEL_FATAL 0x0UL + #define DBG_DRV_TRACE_REQ_SEVERITY_TRACE_LEVEL_ERROR 0x1UL + #define DBG_DRV_TRACE_REQ_SEVERITY_TRACE_LEVEL_WARNING 0x2UL + #define DBG_DRV_TRACE_REQ_SEVERITY_TRACE_LEVEL_INFO 0x3UL + #define DBG_DRV_TRACE_REQ_SEVERITY_TRACE_LEVEL_DEBUG 0x4UL + #define DBG_DRV_TRACE_REQ_SEVERITY_LAST DBG_DRV_TRACE_REQ_SEVERITY_TRACE_LEVEL_DEBUG + u8 write_len; + u8 unused_0[6]; + char trace_data[104]; +}; + +/* hwrm_dbg_drv_trace_output (size:128b/16B) */ +struct hwrm_dbg_drv_trace_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_dbg_useq_alloc_input (size:192b/24B) */ +struct hwrm_dbg_useq_alloc_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 size; + __le16 output_bytes; + __le16 unused_0; +}; + +/* hwrm_dbg_useq_alloc_output (size:256b/32B) */ +struct hwrm_dbg_useq_alloc_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 nz_fw_timestamp; + __le16 last_usid; + __le16 num_useq_allocd; + __le32 useq_resp_flags; + #define DBG_USEQ_ALLOC_RESP_HWRM_DBG_USEQ_RESP_HDR_USEQ_RESP_FLAGS_AVAIL 0x1UL + #define DBG_USEQ_ALLOC_RESP_HWRM_DBG_USEQ_RESP_HDR_USEQ_RESP_FLAGS_OVERFLOW 0x2UL + #define DBG_USEQ_ALLOC_RESP_HWRM_DBG_USEQ_RESP_HDR_USEQ_RESP_FLAGS_LAST DBG_USEQ_ALLOC_RESP_HWRM_DBG_USEQ_RESP_HDR_USEQ_RESP_FLAGS_OVERFLOW + u8 full_cnt; + u8 useq_resp_unused_0[3]; + __le16 alloc_usid; + __le16 unused_0; + __le32 valid; +}; + +/* hwrm_dbg_useq_free_input (size:192b/24B) */ +struct hwrm_dbg_useq_free_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 usid; + u8 unused_0[6]; +}; + +/* hwrm_dbg_useq_free_output (size:256b/32B) */ +struct hwrm_dbg_useq_free_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 nz_fw_timestamp; + __le16 last_usid; + __le16 num_useq_allocd; + __le32 useq_resp_flags; + #define DBG_USEQ_FREE_RESP_HWRM_DBG_USEQ_RESP_HDR_USEQ_RESP_FLAGS_AVAIL 0x1UL + #define DBG_USEQ_FREE_RESP_HWRM_DBG_USEQ_RESP_HDR_USEQ_RESP_FLAGS_OVERFLOW 0x2UL + #define DBG_USEQ_FREE_RESP_HWRM_DBG_USEQ_RESP_HDR_USEQ_RESP_FLAGS_LAST DBG_USEQ_FREE_RESP_HWRM_DBG_USEQ_RESP_HDR_USEQ_RESP_FLAGS_OVERFLOW + u8 full_cnt; + u8 useq_resp_unused_0[3]; + __le32 unused_0; + __le32 valid; +}; + +/* hwrm_dbg_useq_flush_input (size:192b/24B) */ +struct hwrm_dbg_useq_flush_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 flags; + #define DBG_USEQ_FLUSH_REQ_USEQ_CODE_WORDS 0x1UL + #define DBG_USEQ_FLUSH_REQ_BUFFERS 0x2UL + #define DBG_USEQ_FLUSH_REQ_LAST DBG_USEQ_FLUSH_REQ_BUFFERS + u8 unused_0[6]; +}; + +/* hwrm_dbg_useq_flush_output (size:256b/32B) */ +struct hwrm_dbg_useq_flush_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 nz_fw_timestamp; + __le16 last_usid; + __le16 num_useq_allocd; + __le32 useq_resp_flags; + #define DBG_USEQ_FLUSH_RESP_HWRM_DBG_USEQ_RESP_HDR_USEQ_RESP_FLAGS_AVAIL 0x1UL + #define DBG_USEQ_FLUSH_RESP_HWRM_DBG_USEQ_RESP_HDR_USEQ_RESP_FLAGS_OVERFLOW 0x2UL + #define DBG_USEQ_FLUSH_RESP_HWRM_DBG_USEQ_RESP_HDR_USEQ_RESP_FLAGS_LAST DBG_USEQ_FLUSH_RESP_HWRM_DBG_USEQ_RESP_HDR_USEQ_RESP_FLAGS_OVERFLOW + u8 full_cnt; + u8 useq_resp_unused_0[3]; + __le32 unused_0; + __le32 valid; +}; + +/* hwrm_dbg_useq_cw_cfg_input (size:960b/120B) */ +struct hwrm_dbg_useq_cw_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 usid; + __le16 offset; + __le16 size; + __le16 flags; + #define DBG_USEQ_CW_CFG_REQ_FLAGS_USID_CTRL_PRESENT 0x1UL + #define DBG_USEQ_CW_CFG_REQ_FLAGS_USE_DMA 0x2UL + #define DBG_USEQ_CW_CFG_REQ_FLAGS_END 0x8000UL + #define DBG_USEQ_CW_CFG_REQ_FLAGS_LAST DBG_USEQ_CW_CFG_REQ_FLAGS_END + __le32 opaque[24]; +}; + +/* hwrm_dbg_useq_cw_cfg_output (size:192b/24B) */ +struct hwrm_dbg_useq_cw_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 nz_fw_timestamp; + __le16 last_usid; + __le16 num_useq_allocd; + __le32 useq_resp_flags; + #define DBG_USEQ_CW_CFG_RESP_HWRM_DBG_USEQ_RESP_HDR_USEQ_RESP_FLAGS_AVAIL 0x1UL + #define DBG_USEQ_CW_CFG_RESP_HWRM_DBG_USEQ_RESP_HDR_USEQ_RESP_FLAGS_OVERFLOW 0x2UL + #define DBG_USEQ_CW_CFG_RESP_HWRM_DBG_USEQ_RESP_HDR_USEQ_RESP_FLAGS_LAST DBG_USEQ_CW_CFG_RESP_HWRM_DBG_USEQ_RESP_HDR_USEQ_RESP_FLAGS_OVERFLOW + u8 full_cnt; + u8 useq_resp_unused_0[3]; +}; + +/* hwrm_dbg_useq_qcaps_input (size:128b/16B) */ +struct hwrm_dbg_useq_qcaps_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; +}; + +/* hwrm_dbg_useq_qcaps_output (size:384b/48B) */ +struct hwrm_dbg_useq_qcaps_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 nz_fw_timestamp; + __le16 last_usid; + __le16 num_useq_allocd; + __le32 useq_resp_flags; + #define DBG_USEQ_QCAPS_RESP_HWRM_DBG_USEQ_RESP_HDR_USEQ_RESP_FLAGS_AVAIL 0x1UL + #define DBG_USEQ_QCAPS_RESP_HWRM_DBG_USEQ_RESP_HDR_USEQ_RESP_FLAGS_OVERFLOW 0x2UL + #define DBG_USEQ_QCAPS_RESP_HWRM_DBG_USEQ_RESP_HDR_USEQ_RESP_FLAGS_LAST DBG_USEQ_QCAPS_RESP_HWRM_DBG_USEQ_RESP_HDR_USEQ_RESP_FLAGS_OVERFLOW + u8 full_cnt; + u8 useq_resp_unused_0[3]; + __le32 max_num_useq; + __le32 max_useq_size; + __le32 max_useq_32b_output_size; + __le32 num_buf; + __le32 unused_0; + __le32 valid; +}; + +/* hwrm_dbg_useq_sched_cfg_input (size:192b/24B) */ +struct hwrm_dbg_useq_sched_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 global_cfg; + #define DBG_USEQ_SCHED_CFG_REQ_NO_CHANGE 0x0UL + #define DBG_USEQ_SCHED_CFG_REQ_DISABLE 0x1UL + #define DBG_USEQ_SCHED_CFG_REQ_ENABLE 0x2UL + #define DBG_USEQ_SCHED_CFG_REQ_LAST DBG_USEQ_SCHED_CFG_REQ_ENABLE + __le16 usid; + __le32 polling_interval; +}; + +/* hwrm_dbg_useq_sched_cfg_output (size:256b/32B) */ +struct hwrm_dbg_useq_sched_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 nz_fw_timestamp; + __le16 last_usid; + __le16 num_useq_allocd; + __le32 useq_resp_flags; + #define DBG_USEQ_SCHED_CFG_RESP_HWRM_DBG_USEQ_RESP_HDR_USEQ_RESP_FLAGS_AVAIL 0x1UL + #define DBG_USEQ_SCHED_CFG_RESP_HWRM_DBG_USEQ_RESP_HDR_USEQ_RESP_FLAGS_OVERFLOW 0x2UL + #define DBG_USEQ_SCHED_CFG_RESP_HWRM_DBG_USEQ_RESP_HDR_USEQ_RESP_FLAGS_LAST DBG_USEQ_SCHED_CFG_RESP_HWRM_DBG_USEQ_RESP_HDR_USEQ_RESP_FLAGS_OVERFLOW + u8 full_cnt; + u8 useq_resp_unused_0[3]; + __le32 unused_0; + __le32 valid; +}; + +/* hwrm_dbg_useq_run_input (size:320b/40B) */ +struct hwrm_dbg_useq_run_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 usid; + u8 run_type; + #define DBG_USEQ_RUN_REQ_RUN_TYPE_SINGLE 0x0UL + #define DBG_USEQ_RUN_REQ_RUN_TYPE_CNT 0x1UL + #define DBG_USEQ_RUN_REQ_RUN_TYPE_FILL_BUF 0x2UL + #define DBG_USEQ_RUN_REQ_RUN_TYPE_LAST DBG_USEQ_RUN_REQ_RUN_TYPE_FILL_BUF + u8 run_cnt; + __le32 run_interval; + __le64 host_dest_addr; + __le32 host_dest_len; + __le32 unused_0; +}; + +/* hwrm_dbg_useq_run_output (size:256b/32B) */ +struct hwrm_dbg_useq_run_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 nz_fw_timestamp; + __le16 last_usid; + __le16 num_useq_allocd; + __le32 useq_resp_flags; + #define DBG_USEQ_RUN_RESP_HWRM_DBG_USEQ_RESP_HDR_USEQ_RESP_FLAGS_AVAIL 0x1UL + #define DBG_USEQ_RUN_RESP_HWRM_DBG_USEQ_RESP_HDR_USEQ_RESP_FLAGS_OVERFLOW 0x2UL + #define DBG_USEQ_RUN_RESP_HWRM_DBG_USEQ_RESP_HDR_USEQ_RESP_FLAGS_LAST DBG_USEQ_RUN_RESP_HWRM_DBG_USEQ_RESP_HDR_USEQ_RESP_FLAGS_OVERFLOW + u8 full_cnt; + u8 useq_resp_unused_0[3]; + __le32 host_dest_filled_len; + __le32 valid; +}; + +/* hwrm_dbg_useq_delivery_req_input (size:896b/112B) */ +struct hwrm_dbg_useq_delivery_req_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 host_dest_addrs[8]; + __le32 host_dest_len[8]; +}; + +/* hwrm_dbg_useq_delivery_req_output (size:512b/64B) */ +struct hwrm_dbg_useq_delivery_req_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 nz_fw_timestamp; + __le16 last_usid; + __le16 num_useq_allocd; + __le32 useq_resp_flags; + #define DBG_USEQ_DELIVERY_REQ_RESP_HWRM_DBG_USEQ_RESP_HDR_USEQ_RESP_FLAGS_AVAIL 0x1UL + #define DBG_USEQ_DELIVERY_REQ_RESP_HWRM_DBG_USEQ_RESP_HDR_USEQ_RESP_FLAGS_OVERFLOW 0x2UL + #define DBG_USEQ_DELIVERY_REQ_RESP_HWRM_DBG_USEQ_RESP_HDR_USEQ_RESP_FLAGS_LAST DBG_USEQ_DELIVERY_REQ_RESP_HWRM_DBG_USEQ_RESP_HDR_USEQ_RESP_FLAGS_OVERFLOW + u8 full_cnt; + u8 useq_resp_unused_0[3]; + __le32 host_dest_filled_len[8]; + __le32 unused_0; + __le32 valid; +}; + +/* hwrm_dbg_log_buffer_flush_input (size:192b/24B) */ +struct hwrm_dbg_log_buffer_flush_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 type; + #define DBG_LOG_BUFFER_FLUSH_REQ_TYPE_SRT_TRACE 0x0UL + #define DBG_LOG_BUFFER_FLUSH_REQ_TYPE_SRT2_TRACE 0x1UL + #define DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CRT_TRACE 0x2UL + #define DBG_LOG_BUFFER_FLUSH_REQ_TYPE_CRT2_TRACE 0x3UL + #define DBG_LOG_BUFFER_FLUSH_REQ_TYPE_RIGP0_TRACE 0x4UL + #define DBG_LOG_BUFFER_FLUSH_REQ_TYPE_L2_HWRM_TRACE 0x5UL + #define DBG_LOG_BUFFER_FLUSH_REQ_TYPE_ROCE_HWRM_TRACE 0x6UL + #define DBG_LOG_BUFFER_FLUSH_REQ_TYPE_LAST DBG_LOG_BUFFER_FLUSH_REQ_TYPE_ROCE_HWRM_TRACE + u8 unused_1[2]; + __le32 flags; + #define DBG_LOG_BUFFER_FLUSH_REQ_FLAGS_FLUSH_ALL_BUFFERS 0x1UL +}; + +/* hwrm_dbg_log_buffer_flush_output (size:128b/16B) */ +struct hwrm_dbg_log_buffer_flush_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 current_buffer_offset; + u8 unused_1[3]; + u8 valid; +}; + +/* hwrm_nvm_raw_write_blk_input (size:320b/40B) */ +struct hwrm_nvm_raw_write_blk_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 host_src_addr; + __le32 dest_addr; + __le32 len; + u8 flags; + #define NVM_RAW_WRITE_BLK_REQ_FLAGS_SECURITY_SOC_NVM 0x1UL + u8 unused_0[7]; +}; + +/* hwrm_nvm_raw_write_blk_output (size:128b/16B) */ +struct hwrm_nvm_raw_write_blk_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_nvm_read_input (size:320b/40B) */ +struct hwrm_nvm_read_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 host_dest_addr; + __le16 dir_idx; + u8 unused_0[2]; + __le32 offset; + __le32 len; + u8 unused_1[4]; +}; + +/* hwrm_nvm_read_output (size:128b/16B) */ +struct hwrm_nvm_read_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_nvm_raw_dump_input (size:320b/40B) */ +struct hwrm_nvm_raw_dump_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 host_dest_addr; + __le32 offset; + __le32 len; + u8 flags; + #define NVM_RAW_DUMP_REQ_FLAGS_SECURITY_SOC_NVM 0x1UL + u8 unused_0[7]; +}; + +/* hwrm_nvm_raw_dump_output (size:128b/16B) */ +struct hwrm_nvm_raw_dump_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_nvm_get_dir_entries_input (size:192b/24B) */ +struct hwrm_nvm_get_dir_entries_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 host_dest_addr; +}; + +/* hwrm_nvm_get_dir_entries_output (size:128b/16B) */ +struct hwrm_nvm_get_dir_entries_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_nvm_get_dir_info_input (size:128b/16B) */ +struct hwrm_nvm_get_dir_info_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; +}; + +/* hwrm_nvm_get_dir_info_output (size:192b/24B) */ +struct hwrm_nvm_get_dir_info_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 entries; + __le32 entry_length; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_nvm_write_input (size:448b/56B) */ +struct hwrm_nvm_write_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 host_src_addr; + __le16 dir_type; + __le16 dir_ordinal; + __le16 dir_ext; + __le16 dir_attr; + __le32 dir_data_length; + __le16 option; + __le16 flags; + #define NVM_WRITE_REQ_FLAGS_KEEP_ORIG_ACTIVE_IMG 0x1UL + #define NVM_WRITE_REQ_FLAGS_BATCH_MODE 0x2UL + #define NVM_WRITE_REQ_FLAGS_BATCH_LAST 0x4UL + __le32 dir_item_length; + __le32 offset; + __le32 len; + __le32 unused_0; +}; + +/* hwrm_nvm_write_output (size:128b/16B) */ +struct hwrm_nvm_write_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 dir_item_length; + __le16 dir_idx; + u8 unused_0; + u8 valid; +}; + +/* hwrm_nvm_write_cmd_err (size:64b/8B) */ +struct hwrm_nvm_write_cmd_err { + u8 code; + #define NVM_WRITE_CMD_ERR_CODE_UNKNOWN 0x0UL + #define NVM_WRITE_CMD_ERR_CODE_FRAG_ERR 0x1UL + #define NVM_WRITE_CMD_ERR_CODE_NO_SPACE 0x2UL + #define NVM_WRITE_CMD_ERR_CODE_LAST NVM_WRITE_CMD_ERR_CODE_NO_SPACE + u8 unused_0[7]; +}; + +/* hwrm_nvm_modify_input (size:320b/40B) */ +struct hwrm_nvm_modify_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 host_src_addr; + __le16 dir_idx; + __le16 flags; + #define NVM_MODIFY_REQ_FLAGS_BATCH_MODE 0x1UL + #define NVM_MODIFY_REQ_FLAGS_BATCH_LAST 0x2UL + __le32 offset; + __le32 len; + u8 unused_1[4]; +}; + +/* hwrm_nvm_modify_output (size:128b/16B) */ +struct hwrm_nvm_modify_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_nvm_find_dir_entry_input (size:256b/32B) */ +struct hwrm_nvm_find_dir_entry_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 enables; + #define NVM_FIND_DIR_ENTRY_REQ_ENABLES_DIR_IDX_VALID 0x1UL + __le16 dir_idx; + __le16 dir_type; + __le16 dir_ordinal; + __le16 dir_ext; + u8 opt_ordinal; + #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_MASK 0x3UL + #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_SFT 0 + #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_EQ 0x0UL + #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_GE 0x1UL + #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_GT 0x2UL + #define NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_LAST NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_GT + u8 unused_0[3]; +}; + +/* hwrm_nvm_find_dir_entry_output (size:256b/32B) */ +struct hwrm_nvm_find_dir_entry_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 dir_item_length; + __le32 dir_data_length; + __le32 fw_ver; + __le16 dir_ordinal; + __le16 dir_idx; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_nvm_erase_dir_entry_input (size:192b/24B) */ +struct hwrm_nvm_erase_dir_entry_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 dir_idx; + u8 unused_0[6]; +}; + +/* hwrm_nvm_erase_dir_entry_output (size:128b/16B) */ +struct hwrm_nvm_erase_dir_entry_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_nvm_get_dev_info_input (size:128b/16B) */ +struct hwrm_nvm_get_dev_info_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; +}; + +/* hwrm_nvm_get_dev_info_output (size:704b/88B) */ +struct hwrm_nvm_get_dev_info_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 manufacturer_id; + __le16 device_id; + __le32 sector_size; + __le32 nvram_size; + __le32 reserved_size; + __le32 available_size; + u8 nvm_cfg_ver_maj; + u8 nvm_cfg_ver_min; + u8 nvm_cfg_ver_upd; + u8 flags; + #define NVM_GET_DEV_INFO_RESP_FLAGS_FW_VER_VALID 0x1UL + char pkg_name[16]; + __le16 hwrm_fw_major; + __le16 hwrm_fw_minor; + __le16 hwrm_fw_build; + __le16 hwrm_fw_patch; + __le16 mgmt_fw_major; + __le16 mgmt_fw_minor; + __le16 mgmt_fw_build; + __le16 mgmt_fw_patch; + __le16 roce_fw_major; + __le16 roce_fw_minor; + __le16 roce_fw_build; + __le16 roce_fw_patch; + __le16 netctrl_fw_major; + __le16 netctrl_fw_minor; + __le16 netctrl_fw_build; + __le16 netctrl_fw_patch; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_nvm_mod_dir_entry_input (size:256b/32B) */ +struct hwrm_nvm_mod_dir_entry_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 enables; + #define NVM_MOD_DIR_ENTRY_REQ_ENABLES_CHECKSUM 0x1UL + __le16 dir_idx; + __le16 dir_ordinal; + __le16 dir_ext; + __le16 dir_attr; + __le32 checksum; +}; + +/* hwrm_nvm_mod_dir_entry_output (size:128b/16B) */ +struct hwrm_nvm_mod_dir_entry_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_nvm_verify_update_input (size:192b/24B) */ +struct hwrm_nvm_verify_update_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 dir_type; + __le16 dir_ordinal; + __le16 dir_ext; + u8 unused_0[2]; +}; + +/* hwrm_nvm_verify_update_output (size:128b/16B) */ +struct hwrm_nvm_verify_update_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_nvm_install_update_input (size:192b/24B) */ +struct hwrm_nvm_install_update_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 install_type; + #define NVM_INSTALL_UPDATE_REQ_INSTALL_TYPE_NORMAL 0x0UL + #define NVM_INSTALL_UPDATE_REQ_INSTALL_TYPE_ALL 0xffffffffUL + #define NVM_INSTALL_UPDATE_REQ_INSTALL_TYPE_LAST NVM_INSTALL_UPDATE_REQ_INSTALL_TYPE_ALL + __le16 flags; + #define NVM_INSTALL_UPDATE_REQ_FLAGS_ERASE_UNUSED_SPACE 0x1UL + #define NVM_INSTALL_UPDATE_REQ_FLAGS_REMOVE_UNUSED_PKG 0x2UL + #define NVM_INSTALL_UPDATE_REQ_FLAGS_ALLOWED_TO_DEFRAG 0x4UL + #define NVM_INSTALL_UPDATE_REQ_FLAGS_VERIFY_ONLY 0x8UL + u8 unused_0[2]; +}; + +/* hwrm_nvm_install_update_output (size:192b/24B) */ +struct hwrm_nvm_install_update_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le64 installed_items; + u8 result; + #define NVM_INSTALL_UPDATE_RESP_RESULT_SUCCESS 0x0UL + #define NVM_INSTALL_UPDATE_RESP_RESULT_FAILURE 0xffUL + #define NVM_INSTALL_UPDATE_RESP_RESULT_MALLOC_FAILURE 0xfdUL + #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_INDEX_PARAMETER 0xfbUL + #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_TYPE_PARAMETER 0xf3UL + #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_PREREQUISITE 0xf2UL + #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_FILE_HEADER 0xecUL + #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_SIGNATURE 0xebUL + #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_PROP_STREAM 0xeaUL + #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_PROP_LENGTH 0xe9UL + #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_MANIFEST 0xe8UL + #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_TRAILER 0xe7UL + #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_CHECKSUM 0xe6UL + #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_ITEM_CHECKSUM 0xe5UL + #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_DATA_LENGTH 0xe4UL + #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_DIRECTIVE 0xe1UL + #define NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_CHIP_REV 0xceUL + #define NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_DEVICE_ID 0xcdUL + #define NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_SUBSYS_VENDOR 0xccUL + #define NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_SUBSYS_ID 0xcbUL + #define NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_PLATFORM 0xc5UL + #define NVM_INSTALL_UPDATE_RESP_RESULT_DUPLICATE_ITEM 0xc4UL + #define NVM_INSTALL_UPDATE_RESP_RESULT_ZERO_LENGTH_ITEM 0xc3UL + #define NVM_INSTALL_UPDATE_RESP_RESULT_INSTALL_CHECKSUM_ERROR 0xb9UL + #define NVM_INSTALL_UPDATE_RESP_RESULT_INSTALL_DATA_ERROR 0xb8UL + #define NVM_INSTALL_UPDATE_RESP_RESULT_INSTALL_AUTHENTICATION_ERROR 0xb7UL + #define NVM_INSTALL_UPDATE_RESP_RESULT_ITEM_NOT_FOUND 0xb0UL + #define NVM_INSTALL_UPDATE_RESP_RESULT_ITEM_LOCKED 0xa7UL + #define NVM_INSTALL_UPDATE_RESP_RESULT_LAST NVM_INSTALL_UPDATE_RESP_RESULT_ITEM_LOCKED + u8 problem_item; + #define NVM_INSTALL_UPDATE_RESP_PROBLEM_ITEM_NONE 0x0UL + #define NVM_INSTALL_UPDATE_RESP_PROBLEM_ITEM_PACKAGE 0xffUL + #define NVM_INSTALL_UPDATE_RESP_PROBLEM_ITEM_LAST NVM_INSTALL_UPDATE_RESP_PROBLEM_ITEM_PACKAGE + u8 reset_required; + #define NVM_INSTALL_UPDATE_RESP_RESET_REQUIRED_NONE 0x0UL + #define NVM_INSTALL_UPDATE_RESP_RESET_REQUIRED_PCI 0x1UL + #define NVM_INSTALL_UPDATE_RESP_RESET_REQUIRED_POWER 0x2UL + #define NVM_INSTALL_UPDATE_RESP_RESET_REQUIRED_LAST NVM_INSTALL_UPDATE_RESP_RESET_REQUIRED_POWER + u8 unused_0[4]; + u8 valid; +}; + +/* hwrm_nvm_install_update_cmd_err (size:64b/8B) */ +struct hwrm_nvm_install_update_cmd_err { + u8 code; + #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_UNKNOWN 0x0UL + #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR 0x1UL + #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_SPACE 0x2UL + #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_ANTI_ROLLBACK 0x3UL + #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_VOLTREG_SUPPORT 0x4UL + #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_LAST NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_VOLTREG_SUPPORT + u8 unused_0[7]; +}; + +/* hwrm_nvm_flush_input (size:128b/16B) */ +struct hwrm_nvm_flush_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; +}; + +/* hwrm_nvm_flush_output (size:128b/16B) */ +struct hwrm_nvm_flush_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_nvm_flush_cmd_err (size:64b/8B) */ +struct hwrm_nvm_flush_cmd_err { + u8 code; + #define NVM_FLUSH_CMD_ERR_CODE_UNKNOWN 0x0UL + #define NVM_FLUSH_CMD_ERR_CODE_FAIL 0x1UL + #define NVM_FLUSH_CMD_ERR_CODE_LAST NVM_FLUSH_CMD_ERR_CODE_FAIL + u8 unused_0[7]; +}; + +/* hwrm_nvm_get_variable_input (size:320b/40B) */ +struct hwrm_nvm_get_variable_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 dest_data_addr; + __le16 data_len; + __le16 option_num; + #define NVM_GET_VARIABLE_REQ_OPTION_NUM_RSVD_0 0x0UL + #define NVM_GET_VARIABLE_REQ_OPTION_NUM_RSVD_FFFF 0xffffUL + #define NVM_GET_VARIABLE_REQ_OPTION_NUM_LAST NVM_GET_VARIABLE_REQ_OPTION_NUM_RSVD_FFFF + __le16 dimensions; + __le16 index_0; + __le16 index_1; + __le16 index_2; + __le16 index_3; + u8 flags; + #define NVM_GET_VARIABLE_REQ_FLAGS_FACTORY_DFLT 0x1UL + u8 unused_0; +}; + +/* hwrm_nvm_get_variable_output (size:128b/16B) */ +struct hwrm_nvm_get_variable_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 data_len; + __le16 option_num; + #define NVM_GET_VARIABLE_RESP_OPTION_NUM_RSVD_0 0x0UL + #define NVM_GET_VARIABLE_RESP_OPTION_NUM_RSVD_FFFF 0xffffUL + #define NVM_GET_VARIABLE_RESP_OPTION_NUM_LAST NVM_GET_VARIABLE_RESP_OPTION_NUM_RSVD_FFFF + u8 unused_0[3]; + u8 valid; +}; + +/* hwrm_nvm_get_variable_cmd_err (size:64b/8B) */ +struct hwrm_nvm_get_variable_cmd_err { + u8 code; + #define NVM_GET_VARIABLE_CMD_ERR_CODE_UNKNOWN 0x0UL + #define NVM_GET_VARIABLE_CMD_ERR_CODE_VAR_NOT_EXIST 0x1UL + #define NVM_GET_VARIABLE_CMD_ERR_CODE_CORRUPT_VAR 0x2UL + #define NVM_GET_VARIABLE_CMD_ERR_CODE_LEN_TOO_SHORT 0x3UL + #define NVM_GET_VARIABLE_CMD_ERR_CODE_LAST NVM_GET_VARIABLE_CMD_ERR_CODE_LEN_TOO_SHORT + u8 unused_0[7]; +}; + +/* hwrm_nvm_set_variable_input (size:320b/40B) */ +struct hwrm_nvm_set_variable_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 src_data_addr; + __le16 data_len; + __le16 option_num; + #define NVM_SET_VARIABLE_REQ_OPTION_NUM_RSVD_0 0x0UL + #define NVM_SET_VARIABLE_REQ_OPTION_NUM_RSVD_FFFF 0xffffUL + #define NVM_SET_VARIABLE_REQ_OPTION_NUM_LAST NVM_SET_VARIABLE_REQ_OPTION_NUM_RSVD_FFFF + __le16 dimensions; + __le16 index_0; + __le16 index_1; + __le16 index_2; + __le16 index_3; + u8 flags; + #define NVM_SET_VARIABLE_REQ_FLAGS_FORCE_FLUSH 0x1UL + #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_MASK 0xeUL + #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_SFT 1 + #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_NONE (0x0UL << 1) + #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_HMAC_SHA1 (0x1UL << 1) + #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_AES256 (0x2UL << 1) + #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_HMAC_SHA1_AUTH (0x3UL << 1) + #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_LAST NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_HMAC_SHA1_AUTH + #define NVM_SET_VARIABLE_REQ_FLAGS_FLAGS_UNUSED_0_MASK 0x70UL + #define NVM_SET_VARIABLE_REQ_FLAGS_FLAGS_UNUSED_0_SFT 4 + #define NVM_SET_VARIABLE_REQ_FLAGS_FACTORY_DEFAULT 0x80UL + u8 unused_0; +}; + +/* hwrm_nvm_set_variable_output (size:128b/16B) */ +struct hwrm_nvm_set_variable_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_nvm_set_variable_cmd_err (size:64b/8B) */ +struct hwrm_nvm_set_variable_cmd_err { + u8 code; + #define NVM_SET_VARIABLE_CMD_ERR_CODE_UNKNOWN 0x0UL + #define NVM_SET_VARIABLE_CMD_ERR_CODE_VAR_NOT_EXIST 0x1UL + #define NVM_SET_VARIABLE_CMD_ERR_CODE_CORRUPT_VAR 0x2UL + #define NVM_SET_VARIABLE_CMD_ERR_CODE_LAST NVM_SET_VARIABLE_CMD_ERR_CODE_CORRUPT_VAR + u8 unused_0[7]; +}; + +/* hwrm_nvm_validate_option_input (size:320b/40B) */ +struct hwrm_nvm_validate_option_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 src_data_addr; + __le16 data_len; + __le16 option_num; + #define NVM_VALIDATE_OPTION_REQ_OPTION_NUM_RSVD_0 0x0UL + #define NVM_VALIDATE_OPTION_REQ_OPTION_NUM_RSVD_FFFF 0xffffUL + #define NVM_VALIDATE_OPTION_REQ_OPTION_NUM_LAST NVM_VALIDATE_OPTION_REQ_OPTION_NUM_RSVD_FFFF + __le16 dimensions; + __le16 index_0; + __le16 index_1; + __le16 index_2; + __le16 index_3; + u8 unused_0[2]; +}; + +/* hwrm_nvm_validate_option_output (size:128b/16B) */ +struct hwrm_nvm_validate_option_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 result; + #define NVM_VALIDATE_OPTION_RESP_RESULT_NOT_MATCH 0x0UL + #define NVM_VALIDATE_OPTION_RESP_RESULT_MATCH 0x1UL + #define NVM_VALIDATE_OPTION_RESP_RESULT_LAST NVM_VALIDATE_OPTION_RESP_RESULT_MATCH + u8 unused_0[6]; + u8 valid; +}; + +/* hwrm_nvm_validate_option_cmd_err (size:64b/8B) */ +struct hwrm_nvm_validate_option_cmd_err { + u8 code; + #define NVM_VALIDATE_OPTION_CMD_ERR_CODE_UNKNOWN 0x0UL + #define NVM_VALIDATE_OPTION_CMD_ERR_CODE_LAST NVM_VALIDATE_OPTION_CMD_ERR_CODE_UNKNOWN + u8 unused_0[7]; +}; + +/* hwrm_nvm_factory_defaults_input (size:192b/24B) */ +struct hwrm_nvm_factory_defaults_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 mode; + #define NVM_FACTORY_DEFAULTS_REQ_MODE_RESTORE 0x0UL + #define NVM_FACTORY_DEFAULTS_REQ_MODE_CREATE 0x1UL + #define NVM_FACTORY_DEFAULTS_REQ_MODE_LAST NVM_FACTORY_DEFAULTS_REQ_MODE_CREATE + u8 unused_0[1]; + __le16 selection; + #define NVM_FACTORY_DEFAULTS_REQ_SELECTION_CFG_OPTION 0x1UL + #define NVM_FACTORY_DEFAULTS_REQ_SELECTION_CRASHDUMP 0x2UL + u8 unused_1[4]; +}; + +/* hwrm_nvm_factory_defaults_output (size:128b/16B) */ +struct hwrm_nvm_factory_defaults_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 result; + #define NVM_FACTORY_DEFAULTS_RESP_RESULT_CREATE_OK 0x0UL + #define NVM_FACTORY_DEFAULTS_RESP_RESULT_RESTORE_OK 0x1UL + #define NVM_FACTORY_DEFAULTS_RESP_RESULT_CREATE_ALREADY 0x2UL + #define NVM_FACTORY_DEFAULTS_RESP_RESULT_LAST NVM_FACTORY_DEFAULTS_RESP_RESULT_CREATE_ALREADY + u8 unused_0[6]; + u8 valid; +}; + +/* hwrm_nvm_factory_defaults_cmd_err (size:64b/8B) */ +struct hwrm_nvm_factory_defaults_cmd_err { + u8 code; + #define NVM_FACTORY_DEFAULTS_CMD_ERR_CODE_UNKNOWN 0x0UL + #define NVM_FACTORY_DEFAULTS_CMD_ERR_CODE_NO_VALID_CFG 0x1UL + #define NVM_FACTORY_DEFAULTS_CMD_ERR_CODE_NO_SAVED_CFG 0x2UL + #define NVM_FACTORY_DEFAULTS_CMD_ERR_CODE_LAST NVM_FACTORY_DEFAULTS_CMD_ERR_CODE_NO_SAVED_CFG + u8 unused_0[7]; +}; + +/* hwrm_nvm_req_arbitration_input (size:192b/24B) */ +struct hwrm_nvm_req_arbitration_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 type; + #define NVM_REQ_ARBITRATION_REQ_TYPE_STATUS 0x0UL + #define NVM_REQ_ARBITRATION_REQ_TYPE_ACQUIRE 0x1UL + #define NVM_REQ_ARBITRATION_REQ_TYPE_RELEASE 0x2UL + #define NVM_REQ_ARBITRATION_REQ_TYPE_LAST NVM_REQ_ARBITRATION_REQ_TYPE_RELEASE + u8 unused_0[7]; +}; + +/* hwrm_nvm_req_arbitration_output (size:128b/16B) */ +struct hwrm_nvm_req_arbitration_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 acquired; + u8 unused_0[6]; + u8 valid; +}; + +/* hwrm_nvm_defrag_input (size:192b/24B) */ +struct hwrm_nvm_defrag_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define NVM_DEFRAG_REQ_FLAGS_DEFRAG 0x1UL + u8 unused_0[4]; +}; + +/* hwrm_nvm_defrag_output (size:128b/16B) */ +struct hwrm_nvm_defrag_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_nvm_defrag_cmd_err (size:64b/8B) */ +struct hwrm_nvm_defrag_cmd_err { + u8 code; + #define NVM_DEFRAG_CMD_ERR_CODE_UNKNOWN 0x0UL + #define NVM_DEFRAG_CMD_ERR_CODE_FAIL 0x1UL + #define NVM_DEFRAG_CMD_ERR_CODE_LAST NVM_DEFRAG_CMD_ERR_CODE_FAIL + u8 unused_0[7]; +}; + +/* hwrm_nvm_get_vpd_field_info_input (size:192b/24B) */ +struct hwrm_nvm_get_vpd_field_info_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 tag_id[2]; + u8 unused_0[6]; +}; + +/* hwrm_nvm_get_vpd_field_info_output (size:2176b/272B) */ +struct hwrm_nvm_get_vpd_field_info_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 data[256]; + __le16 data_len; + u8 unused_0[5]; + u8 valid; +}; + +/* hwrm_nvm_set_vpd_field_info_input (size:256b/32B) */ +struct hwrm_nvm_set_vpd_field_info_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 host_src_addr; + u8 tag_id[2]; + __le16 data_len; + u8 unused_0[4]; +}; + +/* hwrm_nvm_set_vpd_field_info_output (size:128b/16B) */ +struct hwrm_nvm_set_vpd_field_info_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_selftest_qlist_input (size:128b/16B) */ +struct hwrm_selftest_qlist_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; +}; + +/* hwrm_selftest_qlist_output (size:2240b/280B) */ +struct hwrm_selftest_qlist_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 num_tests; + u8 available_tests; + #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_NVM_TEST 0x1UL + #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_LINK_TEST 0x2UL + #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_REGISTER_TEST 0x4UL + #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_MEMORY_TEST 0x8UL + #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_PCIE_SERDES_TEST 0x10UL + #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_ETHERNET_SERDES_TEST 0x20UL + u8 offline_tests; + #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_NVM_TEST 0x1UL + #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_LINK_TEST 0x2UL + #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_REGISTER_TEST 0x4UL + #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_MEMORY_TEST 0x8UL + #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_PCIE_SERDES_TEST 0x10UL + #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_ETHERNET_SERDES_TEST 0x20UL + u8 unused_0; + __le16 test_timeout; + u8 unused_1[2]; + char test_name[8][32]; + u8 eyescope_target_BER_support; + #define SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_BER_1E8_SUPPORTED 0x0UL + #define SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_BER_1E9_SUPPORTED 0x1UL + #define SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_BER_1E10_SUPPORTED 0x2UL + #define SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_BER_1E11_SUPPORTED 0x3UL + #define SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_BER_1E12_SUPPORTED 0x4UL + #define SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_LAST SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_BER_1E12_SUPPORTED + u8 unused_2[6]; + u8 valid; +}; + +/* hwrm_selftest_exec_input (size:192b/24B) */ +struct hwrm_selftest_exec_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 flags; + #define SELFTEST_EXEC_REQ_FLAGS_NVM_TEST 0x1UL + #define SELFTEST_EXEC_REQ_FLAGS_LINK_TEST 0x2UL + #define SELFTEST_EXEC_REQ_FLAGS_REGISTER_TEST 0x4UL + #define SELFTEST_EXEC_REQ_FLAGS_MEMORY_TEST 0x8UL + #define SELFTEST_EXEC_REQ_FLAGS_PCIE_SERDES_TEST 0x10UL + #define SELFTEST_EXEC_REQ_FLAGS_ETHERNET_SERDES_TEST 0x20UL + u8 unused_0[7]; +}; + +/* hwrm_selftest_exec_output (size:128b/16B) */ +struct hwrm_selftest_exec_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 requested_tests; + #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_NVM_TEST 0x1UL + #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_LINK_TEST 0x2UL + #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_REGISTER_TEST 0x4UL + #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_MEMORY_TEST 0x8UL + #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_PCIE_SERDES_TEST 0x10UL + #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_ETHERNET_SERDES_TEST 0x20UL + u8 test_success; + #define SELFTEST_EXEC_RESP_TEST_SUCCESS_NVM_TEST 0x1UL + #define SELFTEST_EXEC_RESP_TEST_SUCCESS_LINK_TEST 0x2UL + #define SELFTEST_EXEC_RESP_TEST_SUCCESS_REGISTER_TEST 0x4UL + #define SELFTEST_EXEC_RESP_TEST_SUCCESS_MEMORY_TEST 0x8UL + #define SELFTEST_EXEC_RESP_TEST_SUCCESS_PCIE_SERDES_TEST 0x10UL + #define SELFTEST_EXEC_RESP_TEST_SUCCESS_ETHERNET_SERDES_TEST 0x20UL + u8 unused_0[5]; + u8 valid; +}; + +/* hwrm_selftest_irq_input (size:128b/16B) */ +struct hwrm_selftest_irq_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; +}; + +/* hwrm_selftest_irq_output (size:128b/16B) */ +struct hwrm_selftest_irq_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_selftest_retrieve_serdes_data_input (size:320b/40B) */ +struct hwrm_selftest_retrieve_serdes_data_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 resp_data_addr; + __le32 resp_data_offset; + __le16 data_len; + u8 flags; + #define SELFTEST_RETRIEVE_SERDES_DATA_REQ_FLAGS_UNUSED_TEST_MASK 0x7UL + #define SELFTEST_RETRIEVE_SERDES_DATA_REQ_FLAGS_UNUSED_TEST_SFT 0 + #define SELFTEST_RETRIEVE_SERDES_DATA_REQ_FLAGS_EYE_PROJECTION 0x8UL + #define SELFTEST_RETRIEVE_SERDES_DATA_REQ_FLAGS_PCIE_SERDES_TEST 0x10UL + #define SELFTEST_RETRIEVE_SERDES_DATA_REQ_FLAGS_ETHERNET_SERDES_TEST 0x20UL + u8 options; + #define SELFTEST_RETRIEVE_SERDES_DATA_REQ_OPTIONS_PCIE_LANE_NO_MASK 0xfUL + #define SELFTEST_RETRIEVE_SERDES_DATA_REQ_OPTIONS_PCIE_LANE_NO_SFT 0 + #define SELFTEST_RETRIEVE_SERDES_DATA_REQ_OPTIONS_DIRECTION 0x10UL + #define SELFTEST_RETRIEVE_SERDES_DATA_REQ_OPTIONS_DIRECTION_HORIZONTAL (0x0UL << 4) + #define SELFTEST_RETRIEVE_SERDES_DATA_REQ_OPTIONS_DIRECTION_VERTICAL (0x1UL << 4) + #define SELFTEST_RETRIEVE_SERDES_DATA_REQ_OPTIONS_DIRECTION_LAST SELFTEST_RETRIEVE_SERDES_DATA_REQ_OPTIONS_DIRECTION_VERTICAL + #define SELFTEST_RETRIEVE_SERDES_DATA_REQ_OPTIONS_PROJ_TYPE 0x20UL + #define SELFTEST_RETRIEVE_SERDES_DATA_REQ_OPTIONS_PROJ_TYPE_LEFT_TOP (0x0UL << 5) + #define SELFTEST_RETRIEVE_SERDES_DATA_REQ_OPTIONS_PROJ_TYPE_RIGHT_BOTTOM (0x1UL << 5) + #define SELFTEST_RETRIEVE_SERDES_DATA_REQ_OPTIONS_PROJ_TYPE_LAST SELFTEST_RETRIEVE_SERDES_DATA_REQ_OPTIONS_PROJ_TYPE_RIGHT_BOTTOM + #define SELFTEST_RETRIEVE_SERDES_DATA_REQ_OPTIONS_RSVD_MASK 0xc0UL + #define SELFTEST_RETRIEVE_SERDES_DATA_REQ_OPTIONS_RSVD_SFT 6 + u8 targetBER; + #define SELFTEST_RETRIEVE_SERDES_DATA_REQ_TARGETBER_BER_1E8 0x0UL + #define SELFTEST_RETRIEVE_SERDES_DATA_REQ_TARGETBER_BER_1E9 0x1UL + #define SELFTEST_RETRIEVE_SERDES_DATA_REQ_TARGETBER_BER_1E10 0x2UL + #define SELFTEST_RETRIEVE_SERDES_DATA_REQ_TARGETBER_BER_1E11 0x3UL + #define SELFTEST_RETRIEVE_SERDES_DATA_REQ_TARGETBER_BER_1E12 0x4UL + #define SELFTEST_RETRIEVE_SERDES_DATA_REQ_TARGETBER_LAST SELFTEST_RETRIEVE_SERDES_DATA_REQ_TARGETBER_BER_1E12 + u8 action; + #define SELFTEST_RETRIEVE_SERDES_DATA_REQ_ACTION_SYNCHRONOUS 0x0UL + #define SELFTEST_RETRIEVE_SERDES_DATA_REQ_ACTION_START 0x1UL + #define SELFTEST_RETRIEVE_SERDES_DATA_REQ_ACTION_PROGRESS 0x2UL + #define SELFTEST_RETRIEVE_SERDES_DATA_REQ_ACTION_STOP 0x3UL + #define SELFTEST_RETRIEVE_SERDES_DATA_REQ_ACTION_LAST SELFTEST_RETRIEVE_SERDES_DATA_REQ_ACTION_STOP + u8 unused[6]; +}; + +/* hwrm_selftest_retrieve_serdes_data_output (size:192b/24B) */ +struct hwrm_selftest_retrieve_serdes_data_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 total_data_len; + __le16 copied_data_len; + __le16 progress_percent; + __le16 timeout; + u8 flags; + #define SELFTEST_RETRIEVE_SERDES_DATA_RESP_FLAGS_BIT_COUNT_TYPE 0x1UL + #define SELFTEST_RETRIEVE_SERDES_DATA_RESP_FLAGS_BIT_COUNT_TYPE_BIT_COUNT_TOTAL 0x0UL + #define SELFTEST_RETRIEVE_SERDES_DATA_RESP_FLAGS_BIT_COUNT_TYPE_BIT_COUNT_POW2 0x1UL + #define SELFTEST_RETRIEVE_SERDES_DATA_RESP_FLAGS_BIT_COUNT_TYPE_LAST SELFTEST_RETRIEVE_SERDES_DATA_RESP_FLAGS_BIT_COUNT_TYPE_BIT_COUNT_POW2 + #define SELFTEST_RETRIEVE_SERDES_DATA_RESP_FLAGS_RSVD_MASK 0xfeUL + #define SELFTEST_RETRIEVE_SERDES_DATA_RESP_FLAGS_RSVD_SFT 1 + u8 unused_0; + __le16 hdr_size; + u8 unused_1[3]; + u8 valid; +}; + +/* hwrm_oem_cmd_input (size:1024b/128B) */ +struct hwrm_oem_cmd_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 oem_id; + u8 naming_authority; + #define OEM_CMD_REQ_NAMING_AUTHORITY_INVALID 0x0UL + #define OEM_CMD_REQ_NAMING_AUTHORITY_PCI_SIG 0x1UL + #define OEM_CMD_REQ_NAMING_AUTHORITY_LAST OEM_CMD_REQ_NAMING_AUTHORITY_PCI_SIG + u8 message_family; + #define OEM_CMD_REQ_MESSAGE_FAMILY_INVALID 0x0UL + #define OEM_CMD_REQ_MESSAGE_FAMILY_TRUFLOW 0x1UL + #define OEM_CMD_REQ_MESSAGE_FAMILY_ROCE 0x2UL + #define OEM_CMD_REQ_MESSAGE_FAMILY_LAST OEM_CMD_REQ_MESSAGE_FAMILY_ROCE + __le16 unused; + __le32 oem_data[26]; +}; + +/* hwrm_oem_cmd_output (size:768b/96B) */ +struct hwrm_oem_cmd_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 oem_id; + u8 naming_authority; + u8 message_family; + __le16 unused; + __le32 oem_data[18]; + u8 unused_1[7]; + u8 valid; +}; + +/* hwrm_sv_input (size:1152b/144B) */ +struct hwrm_sv_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 opaque[32]; +}; + +/* hwrm_sv_output (size:1088b/136B) */ +struct hwrm_sv_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 opaque[32]; +}; + +/* hwrm_udcc_qcaps_input (size:128b/16B) */ +struct hwrm_udcc_qcaps_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; +}; + +/* hwrm_udcc_qcaps_output (size:192b/24B) */ +struct hwrm_udcc_qcaps_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 min_sessions; + __le16 max_sessions; + u8 session_type; + #define UDCC_QCAPS_RESP_SESSION_TYPE_PER_DESTINATION 0x0UL + #define UDCC_QCAPS_RESP_SESSION_TYPE_PER_QP 0x1UL + #define UDCC_QCAPS_RESP_SESSION_TYPE_LAST UDCC_QCAPS_RESP_SESSION_TYPE_PER_QP + u8 unused_0[3]; + __le16 max_comp_cfg_xfer; + __le16 max_comp_data_xfer; + u8 unused_1[3]; + u8 valid; +}; + +/* hwrm_udcc_cfg_input (size:192b/24B) */ +struct hwrm_udcc_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 enables; + #define UDCC_CFG_REQ_ENABLES_UDCC_MODE 0x1UL + u8 udcc_mode; + #define UDCC_CFG_REQ_UDCC_MODE_DISABLED 0x0UL + #define UDCC_CFG_REQ_UDCC_MODE_ENABLED 0x1UL + #define UDCC_CFG_REQ_UDCC_MODE_LAST UDCC_CFG_REQ_UDCC_MODE_ENABLED + u8 unused_1[3]; +}; + +/* hwrm_udcc_cfg_output (size:128b/16B) */ +struct hwrm_udcc_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_1[7]; + u8 valid; +}; + +/* hwrm_udcc_qcfg_input (size:128b/16B) */ +struct hwrm_udcc_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; +}; + +/* hwrm_udcc_qcfg_output (size:128b/16B) */ +struct hwrm_udcc_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 udcc_mode; + u8 unused_1[6]; + u8 valid; +}; + +/* hwrm_udcc_session_cfg_input (size:384b/48B) */ +struct hwrm_udcc_session_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 enables; + #define UDCC_SESSION_CFG_REQ_ENABLES_SESSION_STATE 0x1UL + #define UDCC_SESSION_CFG_REQ_ENABLES_DEST_MAC 0x2UL + #define UDCC_SESSION_CFG_REQ_ENABLES_SRC_MAC 0x4UL + #define UDCC_SESSION_CFG_REQ_ENABLES_TX_STATS_RECORD 0x8UL + #define UDCC_SESSION_CFG_REQ_ENABLES_RX_STATS_RECORD 0x10UL + u8 session_state; + #define UDCC_SESSION_CFG_REQ_SESSION_STATE_ENABLED 0x1UL + #define UDCC_SESSION_CFG_REQ_SESSION_STATE_FLOW_NOT_CREATED 0x2UL + #define UDCC_SESSION_CFG_REQ_SESSION_STATE_FLOW_HAS_BEEN_DELETED 0x4UL + u8 unused_1; + __le16 session_id; + u8 dest_mac[6]; + __le16 unused_2; + u8 src_mac[6]; + __le16 unused_3; + __le32 tx_stats_record; + __le32 rx_stats_record; +}; + +/* hwrm_udcc_session_cfg_output (size:128b/16B) */ +struct hwrm_udcc_session_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_1[7]; + u8 valid; +}; + +/* hwrm_udcc_session_qcfg_input (size:192b/24B) */ +struct hwrm_udcc_session_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 session_id; + u8 unused_0[6]; +}; + +/* hwrm_udcc_session_qcfg_output (size:512b/64B) */ +struct hwrm_udcc_session_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 session_state; + #define UDCC_SESSION_QCFG_RESP_SESSION_STATE_ENABLED 0x1UL + #define UDCC_SESSION_QCFG_RESP_SESSION_STATE_FLOW_NOT_CREATED 0x2UL + #define UDCC_SESSION_QCFG_RESP_SESSION_STATE_FLOW_HAS_BEEN_DELETED 0x4UL + u8 unused_0; + u8 dest_mac[6]; + __be32 dest_ip[4]; + u8 unused_1[2]; + u8 src_mac[6]; + __le32 src_qp_num; + __le32 dest_qp_num; + __le32 tx_stats_record; + __le32 rx_stats_record; + u8 unused_2[7]; + u8 valid; +}; + +/* hwrm_udcc_session_query_input (size:192b/24B) */ +struct hwrm_udcc_session_query_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 session_id; + u8 unused_0[6]; +}; + +/* hwrm_udcc_session_query_output (size:576b/72B) */ +struct hwrm_udcc_session_query_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 min_rtt_ns; + __le32 max_rtt_ns; + __le32 cur_rate_mbps; + __le32 tx_event_count; + __le32 cnp_rx_event_count; + __le32 rtt_req_count; + __le32 rtt_resp_count; + __le32 tx_bytes_count; + __le32 tx_packets_count; + __le32 init_probes_sent; + __le32 term_probes_recv; + __le32 cnp_packets_recv; + __le32 rto_event_recv; + __le32 seq_err_nak_recv; + __le32 qp_count; + u8 unused_1[3]; + u8 valid; +}; + +/* hwrm_udcc_comp_cfg_input (size:576b/72B) */ +struct hwrm_udcc_comp_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 arg_buf[40]; + __le32 arg_len; + __le32 cfg_len; + __le32 cfg_host_addr[2]; +}; + +/* hwrm_udcc_comp_cfg_output (size:128b/16B) */ +struct hwrm_udcc_comp_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_udcc_comp_qcfg_input (size:576b/72B) */ +struct hwrm_udcc_comp_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 arg_buf[40]; + __le32 arg_len; + __le32 cfg_host_buf_size; + __le32 cfg_host_addr[2]; +}; + +/* hwrm_udcc_comp_qcfg_output (size:128b/16B) */ +struct hwrm_udcc_comp_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 cfg_len; + u8 unused_0[3]; + u8 valid; +}; + +/* hwrm_udcc_comp_query_input (size:576b/72B) */ +struct hwrm_udcc_comp_query_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 arg_buf[40]; + __le32 arg_len; + __le32 data_host_buf_size; + __le32 data_host_addr[2]; +}; + +/* hwrm_udcc_comp_query_output (size:128b/16B) */ +struct hwrm_udcc_comp_query_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 data_len; + u8 unused_0[3]; + u8 valid; +}; + +/* dbc_dbc (size:64b/8B) */ +struct dbc_dbc { + __le32 index; + #define DBC_DBC_INDEX_MASK 0xffffffUL + #define DBC_DBC_INDEX_SFT 0 + #define DBC_DBC_EPOCH 0x1000000UL + #define DBC_DBC_TOGGLE_MASK 0x6000000UL + #define DBC_DBC_TOGGLE_SFT 25 + __le32 type_path_xid; + #define DBC_DBC_XID_MASK 0xfffffUL + #define DBC_DBC_XID_SFT 0 + #define DBC_DBC_PATH_MASK 0x3000000UL + #define DBC_DBC_PATH_SFT 24 + #define DBC_DBC_PATH_ROCE (0x0UL << 24) + #define DBC_DBC_PATH_L2 (0x1UL << 24) + #define DBC_DBC_PATH_ENGINE (0x2UL << 24) + #define DBC_DBC_PATH_LAST DBC_DBC_PATH_ENGINE + #define DBC_DBC_VALID 0x4000000UL + #define DBC_DBC_DEBUG_TRACE 0x8000000UL + #define DBC_DBC_TYPE_MASK 0xf0000000UL + #define DBC_DBC_TYPE_SFT 28 + #define DBC_DBC_TYPE_SQ (0x0UL << 28) + #define DBC_DBC_TYPE_RQ (0x1UL << 28) + #define DBC_DBC_TYPE_SRQ (0x2UL << 28) + #define DBC_DBC_TYPE_SRQ_ARM (0x3UL << 28) + #define DBC_DBC_TYPE_CQ (0x4UL << 28) + #define DBC_DBC_TYPE_CQ_ARMSE (0x5UL << 28) + #define DBC_DBC_TYPE_CQ_ARMALL (0x6UL << 28) + #define DBC_DBC_TYPE_CQ_ARMENA (0x7UL << 28) + #define DBC_DBC_TYPE_SRQ_ARMENA (0x8UL << 28) + #define DBC_DBC_TYPE_CQ_CUTOFF_ACK (0x9UL << 28) + #define DBC_DBC_TYPE_NQ (0xaUL << 28) + #define DBC_DBC_TYPE_NQ_ARM (0xbUL << 28) + #define DBC_DBC_TYPE_NQ_MASK (0xeUL << 28) + #define DBC_DBC_TYPE_NULL (0xfUL << 28) + #define DBC_DBC_TYPE_LAST DBC_DBC_TYPE_NULL +}; + +/* dbc_dbc64 (size:64b/8B) */ +struct dbc_dbc64 { + __le64 dbc; + #define DBC_DBC64_INDEX_MASK 0xffffffUL + #define DBC_DBC64_INDEX_SFT 0 + #define DBC_DBC64_EPOCH 0x1000000UL + #define DBC_DBC64_TOGGLE_MASK 0x6000000UL + #define DBC_DBC64_TOGGLE_SFT 25 + #define DBC_DBC64_XID_MASK 0xfffff00000000ULL + #define DBC_DBC64_XID_SFT 32 + #define DBC_DBC64_PATH_MASK 0x300000000000000ULL + #define DBC_DBC64_PATH_SFT 56 + #define DBC_DBC64_PATH_ROCE (0x0ULL << 56) + #define DBC_DBC64_PATH_L2 (0x1ULL << 56) + #define DBC_DBC64_PATH_ENGINE (0x2ULL << 56) + #define DBC_DBC64_PATH_LAST DBC_DBC64_PATH_ENGINE + #define DBC_DBC64_VALID 0x400000000000000ULL + #define DBC_DBC64_DEBUG_TRACE 0x800000000000000ULL + #define DBC_DBC64_TYPE_MASK 0xf000000000000000ULL + #define DBC_DBC64_TYPE_SFT 60 + #define DBC_DBC64_TYPE_SQ (0x0ULL << 60) + #define DBC_DBC64_TYPE_RQ (0x1ULL << 60) + #define DBC_DBC64_TYPE_SRQ (0x2ULL << 60) + #define DBC_DBC64_TYPE_SRQ_ARM (0x3ULL << 60) + #define DBC_DBC64_TYPE_CQ (0x4ULL << 60) + #define DBC_DBC64_TYPE_CQ_ARMSE (0x5ULL << 60) + #define DBC_DBC64_TYPE_CQ_ARMALL (0x6ULL << 60) + #define DBC_DBC64_TYPE_CQ_ARMENA (0x7ULL << 60) + #define DBC_DBC64_TYPE_SRQ_ARMENA (0x8ULL << 60) + #define DBC_DBC64_TYPE_CQ_CUTOFF_ACK (0x9ULL << 60) + #define DBC_DBC64_TYPE_NQ (0xaULL << 60) + #define DBC_DBC64_TYPE_NQ_ARM (0xbULL << 60) + #define DBC_DBC64_TYPE_NQ_MASK (0xeULL << 60) + #define DBC_DBC64_TYPE_NULL (0xfULL << 60) + #define DBC_DBC64_TYPE_LAST DBC_DBC64_TYPE_NULL +}; + +/* dbc_dbc32 (size:32b/4B) */ +struct dbc_dbc32 { + u32 type_abs_incr_xid; + #define DBC_DBC32_XID_MASK 0xfffffUL + #define DBC_DBC32_XID_SFT 0 + #define DBC_DBC32_PATH_MASK 0xc00000UL + #define DBC_DBC32_PATH_SFT 22 + #define DBC_DBC32_PATH_ROCE (0x0UL << 22) + #define DBC_DBC32_PATH_L2 (0x1UL << 22) + #define DBC_DBC32_PATH_LAST DBC_DBC32_PATH_L2 + #define DBC_DBC32_INCR_MASK 0xf000000UL + #define DBC_DBC32_INCR_SFT 24 + #define DBC_DBC32_ABS 0x10000000UL + #define DBC_DBC32_TYPE_MASK 0xe0000000UL + #define DBC_DBC32_TYPE_SFT 29 + #define DBC_DBC32_TYPE_SQ (0x0UL << 29) + #define DBC_DBC32_TYPE_LAST DBC_DBC32_TYPE_SQ +}; + +/* db_push_start (size:64b/8B) */ +struct db_push_start { + u64 db; + #define DB_PUSH_START_DB_INDEX_MASK 0xffffffUL + #define DB_PUSH_START_DB_INDEX_SFT 0 + #define DB_PUSH_START_DB_PI_LO_MASK 0xff000000UL + #define DB_PUSH_START_DB_PI_LO_SFT 24 + #define DB_PUSH_START_DB_XID_MASK 0xfffff00000000ULL + #define DB_PUSH_START_DB_XID_SFT 32 + #define DB_PUSH_START_DB_PI_HI_MASK 0xf0000000000000ULL + #define DB_PUSH_START_DB_PI_HI_SFT 52 + #define DB_PUSH_START_DB_TYPE_MASK 0xf000000000000000ULL + #define DB_PUSH_START_DB_TYPE_SFT 60 + #define DB_PUSH_START_DB_TYPE_PUSH_START (0xcULL << 60) + #define DB_PUSH_START_DB_TYPE_PUSH_END (0xdULL << 60) + #define DB_PUSH_START_DB_TYPE_LAST DB_PUSH_START_DB_TYPE_PUSH_END +}; + +/* db_push_end (size:64b/8B) */ +struct db_push_end { + u64 db; + #define DB_PUSH_END_DB_INDEX_MASK 0xffffffUL + #define DB_PUSH_END_DB_INDEX_SFT 0 + #define DB_PUSH_END_DB_PI_LO_MASK 0xff000000UL + #define DB_PUSH_END_DB_PI_LO_SFT 24 + #define DB_PUSH_END_DB_XID_MASK 0xfffff00000000ULL + #define DB_PUSH_END_DB_XID_SFT 32 + #define DB_PUSH_END_DB_PI_HI_MASK 0xf0000000000000ULL + #define DB_PUSH_END_DB_PI_HI_SFT 52 + #define DB_PUSH_END_DB_PATH_MASK 0x300000000000000ULL + #define DB_PUSH_END_DB_PATH_SFT 56 + #define DB_PUSH_END_DB_PATH_ROCE (0x0ULL << 56) + #define DB_PUSH_END_DB_PATH_L2 (0x1ULL << 56) + #define DB_PUSH_END_DB_PATH_ENGINE (0x2ULL << 56) + #define DB_PUSH_END_DB_PATH_LAST DB_PUSH_END_DB_PATH_ENGINE + #define DB_PUSH_END_DB_DEBUG_TRACE 0x800000000000000ULL + #define DB_PUSH_END_DB_TYPE_MASK 0xf000000000000000ULL + #define DB_PUSH_END_DB_TYPE_SFT 60 + #define DB_PUSH_END_DB_TYPE_PUSH_START (0xcULL << 60) + #define DB_PUSH_END_DB_TYPE_PUSH_END (0xdULL << 60) + #define DB_PUSH_END_DB_TYPE_LAST DB_PUSH_END_DB_TYPE_PUSH_END +}; + +/* db_push_info (size:64b/8B) */ +struct db_push_info { + u32 push_size_push_index; + #define DB_PUSH_INFO_PUSH_INDEX_MASK 0xffffffUL + #define DB_PUSH_INFO_PUSH_INDEX_SFT 0 + #define DB_PUSH_INFO_PUSH_SIZE_MASK 0x1f000000UL + #define DB_PUSH_INFO_PUSH_SIZE_SFT 24 + u32 reserved32; +}; + +/* dbc_absolute_db_32 (size:32b/4B) */ +struct dbc_absolute_db_32 { + u32 index; + #define DBC_ABSOLUTE_DB_32_INDEX_MASK 0xffffUL + #define DBC_ABSOLUTE_DB_32_INDEX_SFT 0 + #define DBC_ABSOLUTE_DB_32_EPOCH 0x10000UL + #define DBC_ABSOLUTE_DB_32_TOGGLE_MASK 0x60000UL + #define DBC_ABSOLUTE_DB_32_TOGGLE_SFT 17 + #define DBC_ABSOLUTE_DB_32_MXID_MASK 0x1f80000UL + #define DBC_ABSOLUTE_DB_32_MXID_SFT 19 + #define DBC_ABSOLUTE_DB_32_PATH_MASK 0x6000000UL + #define DBC_ABSOLUTE_DB_32_PATH_SFT 25 + #define DBC_ABSOLUTE_DB_32_PATH_ROCE (0x0UL << 25) + #define DBC_ABSOLUTE_DB_32_PATH_L2 (0x1UL << 25) + #define DBC_ABSOLUTE_DB_32_PATH_LAST DBC_ABSOLUTE_DB_32_PATH_L2 + #define DBC_ABSOLUTE_DB_32_VALID 0x8000000UL + #define DBC_ABSOLUTE_DB_32_TYPE_MASK 0xf0000000UL + #define DBC_ABSOLUTE_DB_32_TYPE_SFT 28 + #define DBC_ABSOLUTE_DB_32_TYPE_SQ (0x0UL << 28) + #define DBC_ABSOLUTE_DB_32_TYPE_RQ (0x1UL << 28) + #define DBC_ABSOLUTE_DB_32_TYPE_SRQ (0x2UL << 28) + #define DBC_ABSOLUTE_DB_32_TYPE_SRQ_ARM (0x3UL << 28) + #define DBC_ABSOLUTE_DB_32_TYPE_CQ (0x4UL << 28) + #define DBC_ABSOLUTE_DB_32_TYPE_CQ_ARMSE (0x5UL << 28) + #define DBC_ABSOLUTE_DB_32_TYPE_CQ_ARMALL (0x6UL << 28) + #define DBC_ABSOLUTE_DB_32_TYPE_CQ_ARMENA (0x7UL << 28) + #define DBC_ABSOLUTE_DB_32_TYPE_SRQ_ARMENA (0x8UL << 28) + #define DBC_ABSOLUTE_DB_32_TYPE_NQ (0xaUL << 28) + #define DBC_ABSOLUTE_DB_32_TYPE_NQ_ARM (0xbUL << 28) + #define DBC_ABSOLUTE_DB_32_TYPE_NQ_MASK (0xeUL << 28) + #define DBC_ABSOLUTE_DB_32_TYPE_NULL (0xfUL << 28) + #define DBC_ABSOLUTE_DB_32_TYPE_LAST DBC_ABSOLUTE_DB_32_TYPE_NULL +}; + +/* dbc_relative_db_32 (size:32b/4B) */ +struct dbc_relative_db_32 { + u32 xid; + #define DBC_RELATIVE_DB_32_XID_MASK 0xfffffUL + #define DBC_RELATIVE_DB_32_XID_SFT 0 + #define DBC_RELATIVE_DB_32_PATH_MASK 0xc00000UL + #define DBC_RELATIVE_DB_32_PATH_SFT 22 + #define DBC_RELATIVE_DB_32_PATH_ROCE (0x0UL << 22) + #define DBC_RELATIVE_DB_32_PATH_L2 (0x1UL << 22) + #define DBC_RELATIVE_DB_32_PATH_LAST DBC_RELATIVE_DB_32_PATH_L2 + #define DBC_RELATIVE_DB_32_INCR_MASK 0x1f000000UL + #define DBC_RELATIVE_DB_32_INCR_SFT 24 + #define DBC_RELATIVE_DB_32_TYPE_MASK 0xe0000000UL + #define DBC_RELATIVE_DB_32_TYPE_SFT 29 + #define DBC_RELATIVE_DB_32_TYPE_SQ (0x0UL << 29) + #define DBC_RELATIVE_DB_32_TYPE_SRQ (0x1UL << 29) + #define DBC_RELATIVE_DB_32_TYPE_CQ (0x2UL << 29) + #define DBC_RELATIVE_DB_32_TYPE_CQ_ARMALL (0x3UL << 29) + #define DBC_RELATIVE_DB_32_TYPE_NQ (0x4UL << 29) + #define DBC_RELATIVE_DB_32_TYPE_NQ_ARM (0x5UL << 29) + #define DBC_RELATIVE_DB_32_TYPE_NQ_MASK (0x6UL << 29) + #define DBC_RELATIVE_DB_32_TYPE_LAST DBC_RELATIVE_DB_32_TYPE_NQ_MASK +}; + +/* dbc_drk (size:128b/16B) */ +struct dbc_drk { + __le32 db_format_linked_last_valid_stride_size; + #define DBC_DRK_VALID 0x1UL + #define DBC_DRK_LAST 0x2UL + #define DBC_DRK_LINKED 0x4UL + #define DBC_DRK_DB_FORMAT 0x8UL + #define DBC_DRK_DB_FORMAT_B64 (0x0UL << 3) + #define DBC_DRK_DB_FORMAT_B32A (0x1UL << 3) + #define DBC_DRK_DB_FORMAT_LAST DBC_DRK_DB_FORMAT_B32A + #define DBC_DRK_STRIDE_MASK 0x300UL + #define DBC_DRK_STRIDE_SFT 8 + #define DBC_DRK_STRIDE_OFF (0x0UL << 8) + #define DBC_DRK_STRIDE_SZ64 (0x1UL << 8) + #define DBC_DRK_STRIDE_SZ128 (0x2UL << 8) + #define DBC_DRK_STRIDE_LAST DBC_DRK_STRIDE_SZ128 + #define DBC_DRK_SIZE_MASK 0xc00UL + #define DBC_DRK_SIZE_SFT 10 + #define DBC_DRK_SIZE_FOUR (0x0UL << 10) + #define DBC_DRK_SIZE_ONE (0x1UL << 10) + #define DBC_DRK_SIZE_TWO (0x2UL << 10) + #define DBC_DRK_SIZE_THREE (0x3UL << 10) + #define DBC_DRK_SIZE_LAST DBC_DRK_SIZE_THREE + __le32 pi; + #define DBC_DRK_PI_MASK 0xffffUL + #define DBC_DRK_PI_SFT 0 + __le64 memptr; +}; + +/* dbc_drk64 (size:128b/16B) */ +struct dbc_drk64 { + __le64 flags; + #define DBC_DRK64_VALID 0x1UL + #define DBC_DRK64_LAST 0x2UL + #define DBC_DRK64_LINKED 0x4UL + #define DBC_DRK64_DB_FORMAT 0x8UL + #define DBC_DRK64_DB_FORMAT_B64 (0x0UL << 3) + #define DBC_DRK64_DB_FORMAT_B32A (0x1UL << 3) + #define DBC_DRK64_DB_FORMAT_LAST DBC_DRK64_DB_FORMAT_B32A + #define DBC_DRK64_STRIDE_MASK 0x300UL + #define DBC_DRK64_STRIDE_SFT 8 + #define DBC_DRK64_STRIDE_OFF (0x0UL << 8) + #define DBC_DRK64_STRIDE_SZ64 (0x1UL << 8) + #define DBC_DRK64_STRIDE_SZ128 (0x2UL << 8) + #define DBC_DRK64_STRIDE_LAST DBC_DRK64_STRIDE_SZ128 + #define DBC_DRK64_SIZE_MASK 0xc00UL + #define DBC_DRK64_SIZE_SFT 10 + #define DBC_DRK64_SIZE_FOUR (0x0UL << 10) + #define DBC_DRK64_SIZE_ONE (0x1UL << 10) + #define DBC_DRK64_SIZE_TWO (0x2UL << 10) + #define DBC_DRK64_SIZE_THREE (0x3UL << 10) + #define DBC_DRK64_SIZE_LAST DBC_DRK64_SIZE_THREE + #define DBC_DRK64_PI_MASK 0xffff00000000ULL + #define DBC_DRK64_PI_SFT 32 + __le64 memptr; +}; + +/* dbc_dbc_v3 (size:64b/8B) */ +struct dbc_dbc_v3 { + u32 index; + #define DBC_DBC_V3_INDEX_MASK 0xffffffUL + #define DBC_DBC_V3_INDEX_SFT 0 + #define DBC_DBC_V3_EPOCH 0x1000000UL + #define DBC_DBC_V3_TOGGLE_MASK 0x6000000UL + #define DBC_DBC_V3_TOGGLE_SFT 25 + u32 type_path_xid; + #define DBC_DBC_V3_XID_MASK 0xfffUL + #define DBC_DBC_V3_XID_SFT 0 + #define DBC_DBC_V3_PATH_MASK 0x3000000UL + #define DBC_DBC_V3_PATH_SFT 24 + #define DBC_DBC_V3_PATH_ROCE (0x0UL << 24) + #define DBC_DBC_V3_PATH_L2 (0x1UL << 24) + #define DBC_DBC_V3_PATH_LAST DBC_DBC_V3_PATH_L2 + #define DBC_DBC_V3_VALID 0x4000000UL + #define DBC_DBC_V3_DEBUG_TRACE 0x8000000UL + #define DBC_DBC_V3_TYPE_MASK 0xf0000000UL + #define DBC_DBC_V3_TYPE_SFT 28 + #define DBC_DBC_V3_TYPE_SQ (0x0UL << 28) + #define DBC_DBC_V3_TYPE_RQ (0x1UL << 28) + #define DBC_DBC_V3_TYPE_SRQ (0x2UL << 28) + #define DBC_DBC_V3_TYPE_SRQ_ARM (0x3UL << 28) + #define DBC_DBC_V3_TYPE_CQ (0x4UL << 28) + #define DBC_DBC_V3_TYPE_CQ_ARMSE (0x5UL << 28) + #define DBC_DBC_V3_TYPE_CQ_ARMALL (0x6UL << 28) + #define DBC_DBC_V3_TYPE_CQ_ARMENA (0x7UL << 28) + #define DBC_DBC_V3_TYPE_SRQ_ARMENA (0x8UL << 28) + #define DBC_DBC_V3_TYPE_CQ_CUTOFF_ACK (0x9UL << 28) + #define DBC_DBC_V3_TYPE_NQ (0xaUL << 28) + #define DBC_DBC_V3_TYPE_NQ_ARM (0xbUL << 28) + #define DBC_DBC_V3_TYPE_CQ_REASSIGN (0xcUL << 28) + #define DBC_DBC_V3_TYPE_NQ_MASK (0xeUL << 28) + #define DBC_DBC_V3_TYPE_NULL (0xfUL << 28) + #define DBC_DBC_V3_TYPE_LAST DBC_DBC_V3_TYPE_NULL +}; + +/* dbc_xp (size:512b/64B) */ +struct dbc_xp { + u32 reserved; + u32 type_xid; + #define DBC_XP_XID_MASK 0xfffUL + #define DBC_XP_XID_SFT 0 + #define DBC_XP_DEBUG_TRACE 0x1000000UL + #define DBC_XP_TYPE_MASK 0xf0000000UL + #define DBC_XP_TYPE_SFT 28 + #define DBC_XP_TYPE_SQ (0x0UL << 28) + #define DBC_XP_TYPE_RQ (0x1UL << 28) + #define DBC_XP_TYPE_SRQ (0x2UL << 28) + #define DBC_XP_TYPE_LAST DBC_XP_TYPE_SRQ + u32 wqe[14]; +}; + +/* fw_status_reg (size:32b/4B) */ +struct fw_status_reg { + u32 fw_status; + #define FW_STATUS_REG_CODE_MASK 0xffffUL + #define FW_STATUS_REG_CODE_SFT 0 + #define FW_STATUS_REG_CODE_READY 0x8000UL + #define FW_STATUS_REG_CODE_LAST FW_STATUS_REG_CODE_READY + #define FW_STATUS_REG_IMAGE_DEGRADED 0x10000UL + #define FW_STATUS_REG_RECOVERABLE 0x20000UL + #define FW_STATUS_REG_CRASHDUMP_ONGOING 0x40000UL + #define FW_STATUS_REG_CRASHDUMP_COMPLETE 0x80000UL + #define FW_STATUS_REG_SHUTDOWN 0x100000UL + #define FW_STATUS_REG_CRASHED_NO_MASTER 0x200000UL + #define FW_STATUS_REG_RECOVERING 0x400000UL + #define FW_STATUS_REG_MANU_DEBUG_STATUS 0x800000UL +}; + +/* hcomm_status (size:64b/8B) */ +struct hcomm_status { + u32 sig_ver; + #define HCOMM_STATUS_VER_MASK 0xffUL + #define HCOMM_STATUS_VER_SFT 0 + #define HCOMM_STATUS_VER_LATEST 0x1UL + #define HCOMM_STATUS_VER_LAST HCOMM_STATUS_VER_LATEST + #define HCOMM_STATUS_SIGNATURE_MASK 0xffffff00UL + #define HCOMM_STATUS_SIGNATURE_SFT 8 + #define HCOMM_STATUS_SIGNATURE_VAL (0x484353UL << 8) + #define HCOMM_STATUS_SIGNATURE_LAST HCOMM_STATUS_SIGNATURE_VAL + u32 fw_status_loc; + #define HCOMM_STATUS_TRUE_ADDR_SPACE_MASK 0x3UL + #define HCOMM_STATUS_TRUE_ADDR_SPACE_SFT 0 + #define HCOMM_STATUS_FW_STATUS_LOC_ADDR_SPACE_PCIE_CFG 0x0UL + #define HCOMM_STATUS_FW_STATUS_LOC_ADDR_SPACE_GRC 0x1UL + #define HCOMM_STATUS_FW_STATUS_LOC_ADDR_SPACE_BAR0 0x2UL + #define HCOMM_STATUS_FW_STATUS_LOC_ADDR_SPACE_BAR1 0x3UL + #define HCOMM_STATUS_FW_STATUS_LOC_ADDR_SPACE_LAST HCOMM_STATUS_FW_STATUS_LOC_ADDR_SPACE_BAR1 + #define HCOMM_STATUS_TRUE_OFFSET_MASK 0xfffffffcUL + #define HCOMM_STATUS_TRUE_OFFSET_SFT 2 +}; +#define HCOMM_STATUS_STRUCT_LOC 0x31001F0UL + +#endif /* _BNXT_HSI_H_ */ diff --git a/drivers/thirdparty/release-drivers/bnxt/bnxt_hwmon.c b/drivers/thirdparty/release-drivers/bnxt/bnxt_hwmon.c new file mode 100644 index 000000000000..d88248776695 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/bnxt_hwmon.c @@ -0,0 +1,245 @@ +/* Broadcom NetXtreme-C/E network driver. + * + * Copyright (c) 2023 Broadcom Limited + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ + +#include +#include + +#include "bnxt_compat.h" +#include "bnxt_hsi.h" +#include "bnxt.h" +#include "bnxt_hwrm.h" +#include "bnxt_hwmon.h" + +#ifdef CONFIG_BNXT_HWMON + +void bnxt_hwmon_notify_event(struct bnxt *bp) +{ + u32 attr; + + if (!bp->hwmon_dev) + return; + + switch (bp->thermal_threshold_type) { + case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_WARN: + attr = hwmon_temp_max_alarm; + break; + case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_CRITICAL: + attr = hwmon_temp_crit_alarm; + break; + case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_FATAL: + case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_SHUTDOWN: + attr = hwmon_temp_emergency_alarm; + break; + default: + return; + } + + hwmon_notify_event(&bp->pdev->dev, hwmon_temp, attr, 0); +} + +static int bnxt_hwrm_temp_query(struct bnxt *bp, u8 *temp) +{ + struct hwrm_temp_monitor_query_output *resp; + struct hwrm_temp_monitor_query_input *req; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_TEMP_MONITOR_QUERY); + if (rc) + return rc; + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send_silent(bp, req); + if (rc) + goto drop_req; + + if (temp) { + *temp = resp->temp; + } else if (resp->flags & + TEMP_MONITOR_QUERY_RESP_FLAGS_THRESHOLD_VALUES_AVAILABLE) { + bp->fw_cap |= BNXT_FW_CAP_THRESHOLD_TEMP_SUPPORTED; + bp->warn_thresh_temp = resp->warn_threshold; + bp->crit_thresh_temp = resp->critical_threshold; + bp->fatal_thresh_temp = resp->fatal_threshold; + bp->shutdown_thresh_temp = resp->shutdown_threshold; + } +drop_req: + hwrm_req_drop(bp, req); + return rc; +} + +static umode_t bnxt_hwmon_is_visible(const void *_data, enum hwmon_sensor_types type, + u32 attr, int channel) +{ + const struct bnxt *bp = _data; + + if (type != hwmon_temp) + return 0; + + switch (attr) { + case hwmon_temp_input: + return 0444; + case hwmon_temp_max: + case hwmon_temp_crit: + case hwmon_temp_emergency: + case hwmon_temp_max_alarm: + case hwmon_temp_crit_alarm: + case hwmon_temp_emergency_alarm: + if (!(bp->fw_cap & BNXT_FW_CAP_THRESHOLD_TEMP_SUPPORTED)) + return 0; + return 0444; + default: + return 0; + } +} + +static int bnxt_hwmon_read(struct device *dev, enum hwmon_sensor_types type, u32 attr, + int channel, long *val) +{ + struct bnxt *bp = dev_get_drvdata(dev); + u8 temp = 0; + int rc; + + switch (attr) { + case hwmon_temp_input: + rc = bnxt_hwrm_temp_query(bp, &temp); + if (!rc) + *val = temp * 1000; + return rc; + case hwmon_temp_max: + *val = bp->warn_thresh_temp * 1000; + return 0; + case hwmon_temp_crit: + *val = bp->crit_thresh_temp * 1000; + return 0; + case hwmon_temp_emergency: + *val = bp->fatal_thresh_temp * 1000; + return 0; + case hwmon_temp_max_alarm: + rc = bnxt_hwrm_temp_query(bp, &temp); + if (!rc) + *val = temp >= bp->warn_thresh_temp; + return rc; + case hwmon_temp_crit_alarm: + rc = bnxt_hwrm_temp_query(bp, &temp); + if (!rc) + *val = temp >= bp->crit_thresh_temp; + return rc; + case hwmon_temp_emergency_alarm: + rc = bnxt_hwrm_temp_query(bp, &temp); + if (!rc) + *val = temp >= bp->fatal_thresh_temp; + return rc; + default: + return -EOPNOTSUPP; + } +} + +static const struct hwmon_channel_info *bnxt_hwmon_info[] = { + HWMON_CHANNEL_INFO(temp, + HWMON_T_INPUT | + HWMON_T_MAX | HWMON_T_CRIT | + HWMON_T_EMERGENCY | HWMON_T_MAX_ALARM | + HWMON_T_CRIT_ALARM | HWMON_T_EMERGENCY_ALARM), + NULL +}; + +static const struct hwmon_ops bnxt_hwmon_ops = { + .is_visible = bnxt_hwmon_is_visible, + .read = bnxt_hwmon_read, +}; + +static const struct hwmon_chip_info bnxt_hwmon_chip_info = { + .ops = &bnxt_hwmon_ops, + .info = bnxt_hwmon_info, +}; + +static ssize_t temp1_shutdown_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct bnxt *bp = dev_get_drvdata(dev); + + return sysfs_emit(buf, "%u\n", bp->shutdown_thresh_temp * 1000); +} + +static ssize_t temp1_shutdown_alarm_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct bnxt *bp = dev_get_drvdata(dev); + u8 temp; + int rc; + + rc = bnxt_hwrm_temp_query(bp, &temp); + if (rc) + return -EIO; + + return sysfs_emit(buf, "%u\n", temp >= bp->shutdown_thresh_temp); +} + +static DEVICE_ATTR_RO(temp1_shutdown); +static DEVICE_ATTR_RO(temp1_shutdown_alarm); + +static struct attribute *bnxt_temp_extra_attrs[] = { + &dev_attr_temp1_shutdown.attr, + &dev_attr_temp1_shutdown_alarm.attr, + NULL, +}; + +static umode_t bnxt_temp_extra_attrs_visible(struct kobject *kobj, + struct attribute *attr, int index) +{ + struct device *dev = kobj_to_dev(kobj); + struct bnxt *bp = dev_get_drvdata(dev); + + /* Shutdown temperature setting in NVM is optional */ + if (!(bp->fw_cap & BNXT_FW_CAP_THRESHOLD_TEMP_SUPPORTED) || + !bp->shutdown_thresh_temp) + return 0; + + return attr->mode; +} + +static const struct attribute_group bnxt_temp_extra_group = { + .attrs = bnxt_temp_extra_attrs, + .is_visible = bnxt_temp_extra_attrs_visible, +}; +__ATTRIBUTE_GROUPS(bnxt_temp_extra); + +void bnxt_hwmon_uninit(struct bnxt *bp) +{ + if (bp->hwmon_dev) { + hwmon_device_unregister(bp->hwmon_dev); + bp->hwmon_dev = NULL; + } +} + +void bnxt_hwmon_init(struct bnxt *bp) +{ + struct pci_dev *pdev = bp->pdev; + int rc; + + /* temp1_xxx is only sensor, ensure not registered if it will fail */ + rc = bnxt_hwrm_temp_query(bp, NULL); + if (rc == -EACCES || rc == -EOPNOTSUPP) { + bnxt_hwmon_uninit(bp); + return; + } + + if (bp->hwmon_dev) + return; + + bp->hwmon_dev = hwmon_device_register_with_info(&pdev->dev, + DRV_MODULE_NAME, bp, + &bnxt_hwmon_chip_info, + bnxt_temp_extra_groups); + if (IS_ERR(bp->hwmon_dev)) { + bp->hwmon_dev = NULL; + dev_warn(&pdev->dev, "Cannot register hwmon device\n"); + } +} + +#endif diff --git a/drivers/thirdparty/release-drivers/bnxt/bnxt_hwmon.h b/drivers/thirdparty/release-drivers/bnxt/bnxt_hwmon.h new file mode 100644 index 000000000000..d9fd0ad51072 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/bnxt_hwmon.h @@ -0,0 +1,30 @@ +/* Broadcom NetXtreme-C/E network driver. + * + * Copyright (c) 2023 Broadcom Limited + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ + +#ifndef BNXT_HWMON_H +#define BNXT_HWMON_H + +#ifdef CONFIG_BNXT_HWMON +void bnxt_hwmon_init(struct bnxt *bp); +void bnxt_hwmon_uninit(struct bnxt *bp); +void bnxt_hwmon_notify_event(struct bnxt *bp); +#else +static inline void bnxt_hwmon_uninit(struct bnxt *bp) +{ +} + +static inline void bnxt_hwmon_init(struct bnxt *bp) +{ +} + +static inline void bnxt_hwmon_notify_event(struct bnxt *bp) +{ +} +#endif +#endif diff --git a/drivers/thirdparty/release-drivers/bnxt/bnxt_hwrm.c b/drivers/thirdparty/release-drivers/bnxt/bnxt_hwrm.c new file mode 100644 index 000000000000..a02c9d257443 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/bnxt_hwrm.c @@ -0,0 +1,836 @@ +/* Broadcom NetXtreme-C/E network driver. + * + * Copyright (c) 2020-2022 Broadcom Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "bnxt_compat.h" +#include "bnxt_hsi.h" +#include "bnxt.h" +#include "bnxt_hwrm.h" + +static u64 hwrm_calc_sentinel(struct bnxt_hwrm_ctx *ctx, u16 req_type) +{ + return (((uintptr_t)ctx) + req_type) ^ BNXT_HWRM_SENTINEL; +} + +/** + * __hwrm_req_init() - Initialize an HWRM request. + * @bp: The driver context. + * @req: A pointer to the request pointer to initialize. + * @req_type: The request type. This will be converted to the little endian + * before being written to the req_type field of the returned request. + * @req_len: The length of the request to be allocated. + * + * Allocate DMA resources and initialize a new HWRM request object of the + * given type. The response address field in the request is configured with + * the DMA bus address that has been mapped for the response and the passed + * request is pointed to kernel virtual memory mapped for the request (such + * that short_input indirection can be accomplished without copying). The + * request’s target and completion ring are initialized to default values and + * can be overridden by writing to the returned request object directly. + * + * The initialized request can be further customized by writing to its fields + * directly, taking care to covert such fields to little endian. The request + * object will be consumed (and all its associated resources release) upon + * passing it to hwrm_req_send() unless ownership of the request has been + * claimed by the caller via a call to hwrm_req_hold(). If the request is not + * consumed, either because it is never sent or because ownership has been + * claimed, then it must be released by a call to hwrm_req_drop(). + * + * Return: zero on success, negative error code otherwise: + * E2BIG: the type of request pointer is too large to fit. + * ENOMEM: an allocation failure occurred. + */ +int __hwrm_req_init(struct bnxt *bp, void **req, u16 req_type, u32 req_len) +{ + struct bnxt_hwrm_ctx *ctx; + dma_addr_t dma_handle; + u8 *req_addr; + + if (req_len > BNXT_HWRM_CTX_OFFSET) + return -E2BIG; + + req_addr = dma_pool_alloc(bp->hwrm_dma_pool, GFP_KERNEL | __GFP_ZERO, + &dma_handle); + if (!req_addr) + return -ENOMEM; + + ctx = (struct bnxt_hwrm_ctx *)(req_addr + BNXT_HWRM_CTX_OFFSET); + /* safety first, sentinel used to check for invalid requests */ + ctx->sentinel = hwrm_calc_sentinel(ctx, req_type); + ctx->req_len = req_len; + ctx->req = (struct input *)req_addr; + ctx->resp = (struct output *)(req_addr + BNXT_HWRM_RESP_OFFSET); + ctx->dma_handle = dma_handle; + ctx->flags = 0; /* __GFP_ZERO, but be explicit regarding ownership */ + ctx->timeout = bp->hwrm_cmd_timeout ?: DFLT_HWRM_CMD_TIMEOUT; + ctx->allocated = BNXT_HWRM_DMA_SIZE - BNXT_HWRM_CTX_OFFSET; + ctx->gfp = GFP_KERNEL; + ctx->slice_addr = NULL; + + /* initialize common request fields */ + ctx->req->req_type = cpu_to_le16(req_type); + ctx->req->resp_addr = cpu_to_le64(dma_handle + BNXT_HWRM_RESP_OFFSET); + ctx->req->cmpl_ring = cpu_to_le16(BNXT_HWRM_NO_CMPL_RING); + ctx->req->target_id = cpu_to_le16(BNXT_HWRM_TARGET); + *req = ctx->req; + + return 0; +} + +static struct bnxt_hwrm_ctx *__hwrm_ctx(struct bnxt *bp, u8 *req_addr) +{ + void *ctx_addr = req_addr + BNXT_HWRM_CTX_OFFSET; + struct input *req = (struct input *)req_addr; + struct bnxt_hwrm_ctx *ctx = ctx_addr; + u64 sentinel; + + if (!req) { + /* can only be due to software bug, be loud */ + netdev_err(bp->dev, "null HWRM request"); + dump_stack(); + return NULL; + } + + /* HWRM API has no type safety, verify sentinel to validate address */ + sentinel = hwrm_calc_sentinel(ctx, le16_to_cpu(req->req_type)); + if (ctx->sentinel != sentinel) { + /* can only be due to software bug, be loud */ + netdev_err(bp->dev, "HWRM sentinel mismatch, req_type = %u\n", + (u32)le16_to_cpu(req->req_type)); + dump_stack(); + return NULL; + } + + return ctx; +} + +/** + * hwrm_req_timeout() - Set the completion timeout for the request. + * @bp: The driver context. + * @req: The request to set the timeout. + * @timeout: The timeout in milliseconds. + * + * Set the timeout associated with the request for subsequent calls to + * hwrm_req_send(). Some requests are long running and require a different + * timeout than the default. + */ +void hwrm_req_timeout(struct bnxt *bp, void *req, unsigned int timeout) +{ + struct bnxt_hwrm_ctx *ctx = __hwrm_ctx(bp, req); + + if (ctx) + ctx->timeout = timeout; +} + +/** + * hwrm_req_alloc_flags() - Sets GFP allocation flags for slices. + * @bp: The driver context. + * @req: The request for which calls to hwrm_req_dma_slice() will have altered + * allocation flags. + * @gfp: A bitmask of GFP flags. These flags are passed to dma_alloc_coherent() + * whenever it is used to allocate backing memory for slices. Note that + * calls to hwrm_req_dma_slice() will not always result in new allocations, + * however, memory suballocated from the request buffer is already + * __GFP_ZERO. + * + * Sets the GFP allocation flags associated with the request for subsequent + * calls to hwrm_req_dma_slice(). This can be useful for specifying __GFP_ZERO + * for slice allocations. + */ +void hwrm_req_alloc_flags(struct bnxt *bp, void *req, gfp_t gfp) +{ + struct bnxt_hwrm_ctx *ctx = __hwrm_ctx(bp, req); + + if (ctx) + ctx->gfp = gfp; +} + +/** + * hwrm_req_replace() - Replace request data. + * @bp: The driver context. + * @req: The request to modify. A call to hwrm_req_replace() is conceptually + * an assignment of new_req to req. Subsequent calls to HWRM API functions, + * such as hwrm_req_send(), should thus use req and not new_req (in fact, + * calls to HWRM API functions will fail if non-managed request objects + * are passed). + * @len: The length of new_req. + * @new_req: The pre-built request to copy or reference. + * + * Replaces the request data in req with that of new_req. This is useful in + * scenarios where a request object has already been constructed by a third + * party prior to creating a resource managed request using hwrm_req_init(). + * Depending on the length, hwrm_req_replace() will either copy the new + * request data into the DMA memory allocated for req, or it will simply + * reference the new request and use it in lieu of req during subsequent + * calls to hwrm_req_send(). The resource management is associated with + * req and is independent of and does not apply to new_req. The caller must + * ensure that the lifetime of new_req is least as long as req. Any slices + * that may have been associated with the original request are released. + * + * Return: zero on success, negative error code otherwise: + * E2BIG: Request is too large. + * EINVAL: Invalid request to modify. + */ +int hwrm_req_replace(struct bnxt *bp, void *req, void *new_req, u32 len) +{ + struct bnxt_hwrm_ctx *ctx = __hwrm_ctx(bp, req); + struct input *internal_req = req; + u16 req_type; + + if (!ctx) + return -EINVAL; + + if (len > BNXT_HWRM_CTX_OFFSET) + return -E2BIG; + + /* free any existing slices */ + ctx->allocated = BNXT_HWRM_DMA_SIZE - BNXT_HWRM_CTX_OFFSET; + if (ctx->slice_addr) { + dma_free_coherent(&bp->pdev->dev, ctx->slice_size, + ctx->slice_addr, ctx->slice_handle); + ctx->slice_addr = NULL; + } + ctx->gfp = GFP_KERNEL; + + if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) || len > BNXT_HWRM_MAX_REQ_LEN) { + memcpy(internal_req, new_req, len); + } else { + internal_req->req_type = ((struct input *)new_req)->req_type; + ctx->req = new_req; + } + + ctx->req_len = len; + ctx->req->resp_addr = cpu_to_le64(ctx->dma_handle + + BNXT_HWRM_RESP_OFFSET); + + /* update sentinel for potentially new request type */ + req_type = le16_to_cpu(internal_req->req_type); + ctx->sentinel = hwrm_calc_sentinel(ctx, req_type); + + return 0; +} + +/** + * hwrm_req_flags() - Set non internal flags of the ctx + * @bp: The driver context. + * @req: The request containing the HWRM command + * @flags: ctx flags that don't have BNXT_HWRM_INTERNAL_FLAG set + * + * ctx flags can be used by the callers to instruct how the subsequent + * hwrm_req_send() should behave. Example: callers can use hwrm_req_flags + * with BNXT_HWRM_CTX_SILENT to omit kernel prints of errors of hwrm_req_send() + * or with BNXT_HWRM_FULL_WAIT enforce hwrm_req_send() to wait for full timeout + * even if FW is not responding. + * This generic function can be used to set any flag that is not an internal flag + * of the HWRM module. + */ +void hwrm_req_flags(struct bnxt *bp, void *req, enum bnxt_hwrm_ctx_flags flags) +{ + struct bnxt_hwrm_ctx *ctx = __hwrm_ctx(bp, req); + + if (ctx) + ctx->flags |= (flags & HWRM_API_FLAGS); +} + +/** + * hwrm_req_hold() - Claim ownership of the request's resources. + * @bp: The driver context. + * @req: A pointer to the request to own. The request will no longer be + * consumed by calls to hwrm_req_send(). + * + * Take ownership of the request. Ownership places responsibility on the + * caller to free the resources associated with the request via a call to + * hwrm_req_drop(). The caller taking ownership implies that a subsequent + * call to hwrm_req_send() will not consume the request (ie. sending will + * not free the associated resources if the request is owned by the caller). + * Taking ownership returns a reference to the response. Retaining and + * accessing the response data is the most common reason to take ownership + * of the request. Ownership can also be acquired in order to reuse the same + * request object across multiple invocations of hwrm_req_send(). + * + * Return: A pointer to the response object. + * + * The resources associated with the response will remain available to the + * caller until ownership of the request is relinquished via a call to + * hwrm_req_drop(). It is not possible for hwrm_req_hold() to return NULL if + * a valid request is provided. A returned NULL value would imply a driver + * bug and the implementation will complain loudly in the logs to aid in + * detection. It should not be necessary to check the result for NULL. + */ +void *hwrm_req_hold(struct bnxt *bp, void *req) +{ + struct bnxt_hwrm_ctx *ctx = __hwrm_ctx(bp, req); + struct input *input = (struct input *)req; + + if (!ctx) + return NULL; + + if (ctx->flags & BNXT_HWRM_INTERNAL_CTX_OWNED) { + /* can only be due to software bug, be loud */ + netdev_err(bp->dev, "HWRM context already owned, req_type = %u\n", + (u32)le16_to_cpu(input->req_type)); + dump_stack(); + return NULL; + } + + ctx->flags |= BNXT_HWRM_INTERNAL_CTX_OWNED; + return ((u8 *)req) + BNXT_HWRM_RESP_OFFSET; +} + +static void __hwrm_ctx_drop(struct bnxt *bp, struct bnxt_hwrm_ctx *ctx) +{ + void *addr = ((u8 *)ctx) - BNXT_HWRM_CTX_OFFSET; + dma_addr_t dma_handle = ctx->dma_handle; /* save before invalidate */ + + /* unmap any auxiliary DMA slice */ + if (ctx->slice_addr) + dma_free_coherent(&bp->pdev->dev, ctx->slice_size, + ctx->slice_addr, ctx->slice_handle); + + /* invalidate, ensure ownership, sentinel and dma_handle are cleared */ + memset(ctx, 0, sizeof(struct bnxt_hwrm_ctx)); + + /* return the buffer to the DMA pool */ + if (dma_handle) + dma_pool_free(bp->hwrm_dma_pool, addr, dma_handle); +} + +/** + * hwrm_req_drop() - Release all resources associated with the request. + * @bp: The driver context. + * @req: The request to consume, releasing the associated resources. The + * request object, any slices, and its associated response are no + * longer valid. + * + * It is legal to call hwrm_req_drop() on an unowned request, provided it + * has not already been consumed by hwrm_req_send() (for example, to release + * an aborted request). A given request should not be dropped more than once, + * nor should it be dropped after having been consumed by hwrm_req_send(). To + * do so is an error (the context will not be found and a stack trace will be + * rendered in the kernel log). + */ +void hwrm_req_drop(struct bnxt *bp, void *req) +{ + struct bnxt_hwrm_ctx *ctx = __hwrm_ctx(bp, req); + + if (ctx) + __hwrm_ctx_drop(bp, ctx); +} + +static int __hwrm_to_stderr(u32 hwrm_err) +{ + switch (hwrm_err) { + case HWRM_ERR_CODE_SUCCESS: + return 0; + case HWRM_ERR_CODE_RESOURCE_LOCKED: + return -EROFS; + case HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED: + return -EACCES; + case HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR: + return -ENOSPC; + case HWRM_ERR_CODE_INVALID_PARAMS: + case HWRM_ERR_CODE_INVALID_FLAGS: + case HWRM_ERR_CODE_INVALID_ENABLES: + case HWRM_ERR_CODE_UNSUPPORTED_TLV: + case HWRM_ERR_CODE_UNSUPPORTED_OPTION_ERR: + return -EINVAL; + case HWRM_ERR_CODE_NO_BUFFER: + return -ENOMEM; + case HWRM_ERR_CODE_HOT_RESET_PROGRESS: + case HWRM_ERR_CODE_BUSY: + return -EAGAIN; + case HWRM_ERR_CODE_CMD_NOT_SUPPORTED: + return -EOPNOTSUPP; + case HWRM_ERR_CODE_PF_UNAVAILABLE: + return -ENODEV; + default: + return -EIO; + } +} + +static struct bnxt_hwrm_wait_token * +__hwrm_acquire_token(struct bnxt *bp, enum bnxt_hwrm_chnl dst) + __acquires(&bp->hwrm_cmd_lock) +{ + struct bnxt_hwrm_wait_token *token; + + token = kzalloc(sizeof(*token), GFP_KERNEL); + if (!token) + return NULL; + + mutex_lock(&bp->hwrm_cmd_lock); + + token->dst = dst; + token->state = BNXT_HWRM_PENDING; + if (dst == BNXT_HWRM_CHNL_CHIMP) { + token->seq_id = bp->hwrm_cmd_seq++; + hlist_add_head_rcu(&token->node, &bp->hwrm_pending_list); + } else { + token->seq_id = bp->hwrm_cmd_kong_seq++; + } + + return token; +} + +static void +__hwrm_release_token(struct bnxt *bp, struct bnxt_hwrm_wait_token *token) + __releases(&bp->hwrm_cmd_lock) +{ + if (token->dst == BNXT_HWRM_CHNL_CHIMP) { + hlist_del_rcu(&token->node); + kfree_rcu(token, rcu); + } else { + kfree(token); + } + mutex_unlock(&bp->hwrm_cmd_lock); +} + +void +hwrm_update_token(struct bnxt *bp, u16 seq_id, enum bnxt_hwrm_wait_state state) +{ + struct hlist_node __maybe_unused *dummy; + struct bnxt_hwrm_wait_token *token; + + rcu_read_lock(); + __hlist_for_each_entry_rcu(token, dummy, &bp->hwrm_pending_list, node) { + if (token->seq_id == seq_id) { + WRITE_ONCE(token->state, state); + rcu_read_unlock(); + return; + } + } + rcu_read_unlock(); + /* hwrm may have completed when we receive deferred event */ + if (state != BNXT_HWRM_DEFERRED) + netdev_err(bp->dev, "Invalid hwrm seq id %d\n", seq_id); +} + +static void hwrm_req_dbg(struct bnxt *bp, struct input *req) +{ + u32 ring = le16_to_cpu(req->cmpl_ring); + u32 type = le16_to_cpu(req->req_type); + u32 tgt = le16_to_cpu(req->target_id); + u32 seq = le16_to_cpu(req->seq_id); + char opt[32] = "\n"; + + if (unlikely(ring != (u16)BNXT_HWRM_NO_CMPL_RING)) + snprintf(opt, 16, " ring %d\n", ring); + + if (unlikely(tgt != BNXT_HWRM_TARGET)) + snprintf(opt + strlen(opt) - 1, 16, " tgt 0x%x\n", tgt); + + netdev_dbg(bp->dev, "sent hwrm req_type 0x%x seq id 0x%x%s", + type, seq, opt); +} + +#define hwrm_err(bp, ctx, fmt, ...) \ + do { \ + if ((ctx)->flags & BNXT_HWRM_CTX_SILENT) \ + netdev_dbg((bp)->dev, fmt, __VA_ARGS__); \ + else \ + netdev_err((bp)->dev, fmt, __VA_ARGS__); \ + } while (0) + +static inline bool +hwrm_wait_must_abort(struct bnxt *bp, u32 req_type, u32 *fw_status) +{ + if (req_type == HWRM_VER_GET) + return false; + + if (!bp->fw_health || !bp->fw_health->status_reliable) + return false; + + *fw_status = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG); + return *fw_status && !BNXT_FW_IS_HEALTHY(*fw_status); +} + +static int __hwrm_send(struct bnxt *bp, struct bnxt_hwrm_ctx *ctx) +{ + u32 doorbell_offset = BNXT_GRCPF_REG_CHIMP_COMM_TRIGGER; + enum bnxt_hwrm_chnl dst = BNXT_HWRM_CHNL_CHIMP; + u32 bar_offset = BNXT_GRCPF_REG_CHIMP_COMM; + struct bnxt_hwrm_wait_token *token = NULL; + struct hwrm_short_input short_input = {0}; + u16 max_req_len = BNXT_HWRM_MAX_REQ_LEN; + unsigned int i, timeout, tmo_count; + u32 *data = (u32 *)ctx->req; + u32 msg_len = ctx->req_len; + u32 req_type, sts; + int rc = -EBUSY; + u16 len = 0; + u8 *valid; + +#ifndef HSI_DBG_DISABLE + decode_hwrm_req(ctx->req); +#endif + + if (ctx->flags & BNXT_HWRM_INTERNAL_RESP_DIRTY) + memset(ctx->resp, 0, PAGE_SIZE); + + req_type = le16_to_cpu(ctx->req->req_type); + if (BNXT_NO_FW_ACCESS(bp) && + (req_type != HWRM_FUNC_RESET && req_type != HWRM_VER_GET)) { + netdev_dbg(bp->dev, "hwrm req_type 0x%x skipped, FW channel down\n", + req_type); + goto exit; + } + + if (msg_len > BNXT_HWRM_MAX_REQ_LEN && + msg_len > bp->hwrm_max_ext_req_len) { + netdev_warn(bp->dev, "oversized hwrm request, req_type 0x%x", + req_type); + rc = -E2BIG; + goto exit; + } + + if (hwrm_req_kong(bp, ctx->req)) { + dst = BNXT_HWRM_CHNL_KONG; + bar_offset = BNXT_GRCPF_REG_KONG_COMM; + doorbell_offset = BNXT_GRCPF_REG_KONG_COMM_TRIGGER; + if (le16_to_cpu(ctx->req->cmpl_ring) != INVALID_HW_RING_ID) { + netdev_err(bp->dev, "Ring completions not supported for KONG commands, req_type = %d\n", + req_type); + rc = -EINVAL; + goto exit; + } + } + + token = __hwrm_acquire_token(bp, dst); + if (!token) { + rc = -ENOMEM; + goto exit; + } + ctx->req->seq_id = cpu_to_le16(token->seq_id); + + if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) || + msg_len > BNXT_HWRM_MAX_REQ_LEN) { + short_input.req_type = ctx->req->req_type; + short_input.signature = + cpu_to_le16(SHORT_REQ_SIGNATURE_SHORT_CMD); + short_input.size = cpu_to_le16(msg_len); + short_input.req_addr = cpu_to_le64(ctx->dma_handle); + + data = (u32 *)&short_input; + msg_len = sizeof(short_input); + + max_req_len = BNXT_HWRM_SHORT_REQ_LEN; + } + + /* Ensure any associated DMA buffers are written before doorbell */ + wmb(); + + /* Write request msg to hwrm channel */ + __iowrite32_copy(bp->bar0 + bar_offset, data, msg_len / 4); + + for (i = msg_len; i < max_req_len; i += 4) + writel(0, bp->bar0 + bar_offset + i); + + /* Ring channel doorbell */ + writel(1, bp->bar0 + doorbell_offset); + + hwrm_req_dbg(bp, ctx->req); + + if (!pci_is_enabled(bp->pdev)) { + rc = -ENODEV; + goto exit; + } + + timeout = min(ctx->timeout, bp->hwrm_cmd_max_timeout ?: HWRM_CMD_MAX_TIMEOUT); + + /* convert timeout to usec */ + timeout *= 1000; + + i = 0; + /* Short timeout for the first few iterations: + * number of loops = number of loops for short timeout + + * number of loops for standard timeout. + */ + tmo_count = HWRM_SHORT_TIMEOUT_COUNTER; + timeout = timeout - HWRM_SHORT_MIN_TIMEOUT * HWRM_SHORT_TIMEOUT_COUNTER; + tmo_count += DIV_ROUND_UP(timeout, HWRM_MIN_TIMEOUT); + + if (le16_to_cpu(ctx->req->cmpl_ring) != INVALID_HW_RING_ID) { + /* Wait until hwrm response cmpl interrupt is processed */ + while (READ_ONCE(token->state) < BNXT_HWRM_COMPLETE && + i++ < tmo_count) { + /* Abort the wait for completion if the FW health + * check has failed. + */ + if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) + goto exit; + /* on first few passes, just barely sleep */ + if (i < HWRM_SHORT_TIMEOUT_COUNTER) { + usleep_range(HWRM_SHORT_MIN_TIMEOUT, + HWRM_SHORT_MAX_TIMEOUT); + } else { + if (hwrm_wait_must_abort(bp, req_type, &sts)) { + hwrm_err(bp, ctx, "Resp cmpl intr abandoning msg: 0x%x due to firmware status: 0x%x\n", + req_type, sts); + goto exit; + } + usleep_range(HWRM_MIN_TIMEOUT, + HWRM_MAX_TIMEOUT); + } + } + + if (READ_ONCE(token->state) != BNXT_HWRM_COMPLETE) { + hwrm_err(bp, ctx, "Resp cmpl intr err msg: 0x%x\n", + req_type); + goto exit; + } + len = le16_to_cpu(READ_ONCE(ctx->resp->resp_len)); + valid = ((u8 *)ctx->resp) + len - 1; + } else { + __le16 seen_out_of_seq = ctx->req->seq_id; /* will never see */ + int j; + + /* Check if response len is updated */ + for (i = 0; i < tmo_count; i++) { + /* Abort the wait for completion if the FW health + * check has failed. + */ + if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) + goto exit; + + if (token && + READ_ONCE(token->state) == BNXT_HWRM_DEFERRED) { + __hwrm_release_token(bp, token); + token = NULL; + } + + len = le16_to_cpu(READ_ONCE(ctx->resp->resp_len)); + if (len) { + __le16 resp_seq = READ_ONCE(ctx->resp->seq_id); + + if (resp_seq == ctx->req->seq_id) + break; + if (resp_seq != seen_out_of_seq) { + netdev_warn(bp->dev, "Discarding out of seq response: 0x%x for msg {0x%x 0x%x}\n", + le16_to_cpu(resp_seq), + req_type, + le16_to_cpu(ctx->req->seq_id)); + seen_out_of_seq = resp_seq; + } + } + + /* on first few passes, just barely sleep */ + if (i < HWRM_SHORT_TIMEOUT_COUNTER) { + usleep_range(HWRM_SHORT_MIN_TIMEOUT, + HWRM_SHORT_MAX_TIMEOUT); + } else { + if (hwrm_wait_must_abort(bp, req_type, &sts)) { + hwrm_err(bp, ctx, "Abandoning msg {0x%x 0x%x} len: %d due to firmware status: 0x%x\n", + req_type, + le16_to_cpu(ctx->req->seq_id), + len, sts); + goto exit; + } + usleep_range(HWRM_MIN_TIMEOUT, + HWRM_MAX_TIMEOUT); + } + } + + if (i >= tmo_count) { + hwrm_err(bp, ctx, "Error (timeout: %u) msg {0x%x 0x%x} len:%d\n", + hwrm_total_timeout(i), req_type, + le16_to_cpu(ctx->req->seq_id), len); + goto exit; + } + + /* Last byte of resp contains valid bit */ + valid = ((u8 *)ctx->resp) + len - 1; + for (j = 0; j < HWRM_VALID_BIT_DELAY_USEC; ) { + /* make sure we read from updated DMA memory */ + dma_rmb(); + if (*valid) + break; + if (j < 10) { + udelay(1); + j++; + } else { + usleep_range(20, 30); + j += 20; + } + } + + if (j >= HWRM_VALID_BIT_DELAY_USEC) { + hwrm_err(bp, ctx, "Error (timeout: %u) msg {0x%x 0x%x} len:%d v:%d\n", + hwrm_total_timeout(i) + j, req_type, + le16_to_cpu(ctx->req->seq_id), len, *valid); + goto exit; + } + } + + /* Zero valid bit for compatibility. Valid bit in an older spec + * may become a new field in a newer spec. We must make sure that + * a new field not implemented by old spec will read zero. + */ + *valid = 0; + rc = le16_to_cpu(ctx->resp->error_code); + if (rc == HWRM_ERR_CODE_BUSY && !(ctx->flags & BNXT_HWRM_CTX_SILENT)) + netdev_warn(bp->dev, "FW returned busy, hwrm req_type 0x%x\n", + req_type); + else if (rc && rc != HWRM_ERR_CODE_PF_UNAVAILABLE && + rc != HWRM_ERR_CODE_ENTITY_NOT_PRESENT) + hwrm_err(bp, ctx, "hwrm req_type 0x%x seq id 0x%x error 0x%x\n", + req_type, le16_to_cpu(ctx->req->seq_id), rc); +#ifndef HSI_DBG_DISABLE + decode_hwrm_resp(ctx->resp); +#endif + rc = __hwrm_to_stderr(rc); +exit: + if (token) + __hwrm_release_token(bp, token); + if (ctx->flags & BNXT_HWRM_INTERNAL_CTX_OWNED) + ctx->flags |= BNXT_HWRM_INTERNAL_RESP_DIRTY; + else + __hwrm_ctx_drop(bp, ctx); + return rc; +} + +/** + * hwrm_req_send() - Execute an HWRM command. + * @bp: The driver context. + * @req: A pointer to the request to send. The DMA resources associated with + * the request will be released (ie. the request will be consumed) unless + * ownership of the request has been assumed by the caller via a call to + * hwrm_req_hold(). + * + * Send an HWRM request to the device and wait for a response. The request is + * consumed if it is not owned by the caller. This function will block until + * the request has either completed or times out due to an error. + * + * Return: A result code. + * + * The result is zero on success, otherwise the negative error code indicates + * one of the following errors: + * E2BIG: The request was too large. + * EBUSY: The firmware is in a fatal state or the request timed out + * EACCESS: HWRM access denied. + * ENOSPC: HWRM resource allocation error. + * EINVAL: Request parameters are invalid. + * ENOMEM: HWRM has no buffers. + * EAGAIN: HWRM busy or reset in progress. + * EOPNOTSUPP: Invalid request type. + * ENODEV: PCI device is disabled or parent PF is down when issued on VFs. + * EROFS: The request is not allowed due to a secure lock violation. + * EIO: Any other error. + * Error handling is orthogonal to request ownership. An unowned request will + * still be consumed on error. If the caller owns the request, then the caller + * is responsible for releasing the resources. Otherwise, hwrm_req_send() will + * always consume the request. + */ +int hwrm_req_send(struct bnxt *bp, void *req) +{ + struct bnxt_hwrm_ctx *ctx = __hwrm_ctx(bp, req); + + if (!ctx) + return -EINVAL; + + return __hwrm_send(bp, ctx); +} + +/** + * hwrm_req_send_silent() - A silent version of hwrm_req_send(). + * @bp: The driver context. + * @req: The request to send without logging. + * + * The same as hwrm_req_send(), except that the request is silenced using + * hwrm_req_silence() prior the call. This version of the function is + * provided solely to preserve the legacy API’s flavor for this functionality. + * + * Return: A result code, see hwrm_req_send(). + */ +int hwrm_req_send_silent(struct bnxt *bp, void *req) +{ + hwrm_req_flags(bp, req, BNXT_HWRM_CTX_SILENT); + return hwrm_req_send(bp, req); +} + +/** + * hwrm_req_dma_slice() - Allocate a slice of DMA mapped memory. + * @bp: The driver context. + * @req: The request for which indirect data will be associated. + * @size: The size of the allocation. + * @dma_handle: The bus address associated with the allocation. The HWRM API has + * no knowledge about the type of the request and so cannot infer how the + * caller intends to use the indirect data. Thus, the caller is + * responsible for configuring the request object appropriately to + * point to the associated indirect memory. Note, DMA handle has the + * same definition as it does in dma_alloc_coherent(), the caller is + * responsible for endian conversions via cpu_to_le64() before assigning + * this address. + * + * Allocates DMA mapped memory for indirect data related to a request. The + * lifetime of the DMA resources will be bound to that of the request (ie. + * they will be automatically released when the request is either consumed by + * hwrm_req_send() or dropped by hwrm_req_drop()). Small allocations are + * efficiently suballocated out of the request buffer space, hence the name + * slice, while larger requests are satisfied via an underlying call to + * dma_alloc_coherent(). Multiple suballocations are supported, however, only + * one externally mapped region is. + * + * Return: The kernel virtual address of the DMA mapping. + */ +void * +hwrm_req_dma_slice(struct bnxt *bp, void *req, u32 size, dma_addr_t *dma_handle) +{ + struct bnxt_hwrm_ctx *ctx = __hwrm_ctx(bp, req); + u8 *end = ((u8 *)req) + BNXT_HWRM_DMA_SIZE; + struct input *input = req; + u8 *addr, *req_addr = req; + u32 max_offset, offset; + + if (!ctx) + return NULL; + + max_offset = BNXT_HWRM_DMA_SIZE - ctx->allocated; + offset = max_offset - size; + offset = ALIGN_DOWN(offset, BNXT_HWRM_DMA_ALIGN); + addr = req_addr + offset; + + if (addr < req_addr + max_offset && req_addr + ctx->req_len <= addr) { + ctx->allocated = end - addr; + *dma_handle = ctx->dma_handle + offset; + return addr; + } + + /* could not suballocate from ctx buffer, try create a new mapping */ + if (ctx->slice_addr) { + /* if one exists, can only be due to software bug, be loud */ + netdev_err(bp->dev, "HWRM refusing to reallocate DMA slice, req_type = %u\n", + (u32)le16_to_cpu(input->req_type)); + dump_stack(); + return NULL; + } + + addr = dma_alloc_coherent(&bp->pdev->dev, size, dma_handle, ctx->gfp); + + if (!addr) + return NULL; + + ctx->slice_addr = addr; + ctx->slice_size = size; + ctx->slice_handle = *dma_handle; + + return addr; +} diff --git a/drivers/thirdparty/release-drivers/bnxt/bnxt_hwrm.h b/drivers/thirdparty/release-drivers/bnxt/bnxt_hwrm.h new file mode 100644 index 000000000000..da609adf8eac --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/bnxt_hwrm.h @@ -0,0 +1,157 @@ +/* Broadcom NetXtreme-C/E network driver. + * + * Copyright (c) 2020-2022 Broadcom Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ + +#ifndef BNXT_HWRM_H +#define BNXT_HWRM_H + +#include "bnxt_hsi.h" + +enum bnxt_hwrm_ctx_flags { + /* Update the HWRM_API_FLAGS right below for any new non-internal bit added here */ + BNXT_HWRM_INTERNAL_CTX_OWNED = BIT(0), /* caller owns the context */ + BNXT_HWRM_INTERNAL_RESP_DIRTY = BIT(1), /* response contains data */ + BNXT_HWRM_CTX_SILENT = BIT(2), /* squelch firmware errors */ + BNXT_HWRM_FULL_WAIT = BIT(3), /* wait for full timeout of HWRM command */ +}; + +#define HWRM_API_FLAGS (BNXT_HWRM_CTX_SILENT | BNXT_HWRM_FULL_WAIT) + +struct bnxt_hwrm_ctx { + u64 sentinel; + dma_addr_t dma_handle; + struct output *resp; + struct input *req; + dma_addr_t slice_handle; + void *slice_addr; + u32 slice_size; + u32 req_len; + enum bnxt_hwrm_ctx_flags flags; + unsigned int timeout; + u32 allocated; + gfp_t gfp; +}; + +enum bnxt_hwrm_wait_state { + BNXT_HWRM_PENDING, + BNXT_HWRM_DEFERRED, + BNXT_HWRM_COMPLETE, + BNXT_HWRM_CANCELLED, +}; + +enum bnxt_hwrm_chnl { BNXT_HWRM_CHNL_CHIMP, BNXT_HWRM_CHNL_KONG }; + +struct bnxt_hwrm_wait_token { + struct rcu_head rcu; + struct hlist_node node; + enum bnxt_hwrm_wait_state state; + enum bnxt_hwrm_chnl dst; + u16 seq_id; +}; + +void hwrm_update_token(struct bnxt *bp, u16 seq, enum bnxt_hwrm_wait_state s); + +#define BNXT_HWRM_MAX_REQ_LEN (bp->hwrm_max_req_len) +#define BNXT_HWRM_SHORT_REQ_LEN sizeof(struct hwrm_short_input) +#define SHORT_HWRM_CMD_TIMEOUT 20 +#define HWRM_CMD_TIMEOUT (bp->hwrm_cmd_timeout) +#define HWRM_CMD_MAX_TIMEOUT 40000U +#define HWRM_RESET_TIMEOUT ((HWRM_CMD_TIMEOUT) * 4) +#define HWRM_COREDUMP_TIMEOUT (bp->hwrm_cmd_max_timeout) +#ifdef BNXT_FPGA +#define HWRM_FPGA_TIMEOUT 5000 +#endif +#define BNXT_HWRM_TARGET 0xffff +#define BNXT_HWRM_NO_CMPL_RING -1 +#define BNXT_HWRM_REQ_MAX_SIZE 128 +#define BNXT_HWRM_DMA_SIZE (2 * PAGE_SIZE) /* space for req+resp */ +#define BNXT_HWRM_RESP_RESERVED PAGE_SIZE +#define BNXT_HWRM_RESP_OFFSET (BNXT_HWRM_DMA_SIZE - \ + BNXT_HWRM_RESP_RESERVED) +#define BNXT_HWRM_CTX_OFFSET (BNXT_HWRM_RESP_OFFSET - \ + sizeof(struct bnxt_hwrm_ctx)) +#define BNXT_HWRM_DMA_ALIGN 16 +#define BNXT_HWRM_SENTINEL 0xb6e1f68a12e9a7eb /* arbitrary value */ +#define BNXT_HWRM_REQS_PER_PAGE (BNXT_PAGE_SIZE / \ + BNXT_HWRM_REQ_MAX_SIZE) +#define HWRM_SHORT_MIN_TIMEOUT 3 +#define HWRM_SHORT_MAX_TIMEOUT 10 +#define HWRM_SHORT_TIMEOUT_COUNTER 5 + +#define HWRM_MIN_TIMEOUT 25 +#define HWRM_MAX_TIMEOUT 40 + +static inline unsigned int hwrm_total_timeout(unsigned int n) +{ + return n <= HWRM_SHORT_TIMEOUT_COUNTER ? n * HWRM_SHORT_MIN_TIMEOUT : + HWRM_SHORT_TIMEOUT_COUNTER * HWRM_SHORT_MIN_TIMEOUT + + (n - HWRM_SHORT_TIMEOUT_COUNTER) * HWRM_MIN_TIMEOUT; +} + +#define HWRM_VALID_BIT_DELAY_USEC 50000 + +static inline bool hwrm_req_type_cfa(u16 req_type) +{ + switch (req_type) { + case HWRM_CFA_ENCAP_RECORD_ALLOC: + case HWRM_CFA_ENCAP_RECORD_FREE: + case HWRM_CFA_DECAP_FILTER_ALLOC: + case HWRM_CFA_DECAP_FILTER_FREE: + case HWRM_CFA_EM_FLOW_ALLOC: + case HWRM_CFA_EM_FLOW_FREE: + case HWRM_CFA_EM_FLOW_CFG: + case HWRM_CFA_FLOW_ALLOC: + case HWRM_CFA_FLOW_FREE: + case HWRM_CFA_FLOW_INFO: + case HWRM_CFA_FLOW_FLUSH: + case HWRM_CFA_FLOW_STATS: + case HWRM_CFA_METER_PROFILE_ALLOC: + case HWRM_CFA_METER_PROFILE_FREE: + case HWRM_CFA_METER_PROFILE_CFG: + case HWRM_CFA_METER_INSTANCE_ALLOC: + case HWRM_CFA_METER_INSTANCE_FREE: + return true; + default: + return false; + } +} + +static inline bool hwrm_req_kong(struct bnxt *bp, struct input *req) +{ + return (bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL && + (hwrm_req_type_cfa(le16_to_cpu(req->req_type)) || + le16_to_cpu(req->target_id) == HWRM_TARGET_ID_KONG)); +} + +int __hwrm_req_init(struct bnxt *bp, void **req, u16 req_type, u32 req_len); +#define hwrm_req_init(bp, req, req_type) \ + __hwrm_req_init((bp), (void **)&(req), (req_type), sizeof(*(req))) +void *hwrm_req_hold(struct bnxt *bp, void *req); +void hwrm_req_drop(struct bnxt *bp, void *req); +void hwrm_req_flags(struct bnxt *bp, void *req, enum bnxt_hwrm_ctx_flags flags); +void hwrm_req_timeout(struct bnxt *bp, void *req, unsigned int timeout); +int hwrm_req_send(struct bnxt *bp, void *req); +int hwrm_req_send_silent(struct bnxt *bp, void *req); +int hwrm_req_replace(struct bnxt *bp, void *req, void *new_req, u32 len); +void hwrm_req_alloc_flags(struct bnxt *bp, void *req, gfp_t flags); +void *hwrm_req_dma_slice(struct bnxt *bp, void *req, u32 size, dma_addr_t *dma); + +/* Older devices can only support req length of 128. + * HWRM_FUNC_CFG requests which don't need fields starting at + * num_quic_tx_key_ctxs can use this helper to avoid getting -E2BIG. + */ +static inline int +bnxt_hwrm_func_cfg_short_req_init(struct bnxt *bp, + struct hwrm_func_cfg_input **req) +{ + u32 req_len; + + req_len = min_t(u32, sizeof(**req), bp->hwrm_max_ext_req_len); + return __hwrm_req_init(bp, (void **)req, HWRM_FUNC_CFG, req_len); +} +#endif diff --git a/drivers/thirdparty/release-drivers/bnxt/bnxt_ktls.c b/drivers/thirdparty/release-drivers/bnxt/bnxt_ktls.c new file mode 100644 index 000000000000..635204f90916 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/bnxt_ktls.c @@ -0,0 +1,1329 @@ +/* Broadcom NetXtreme-C/E network driver. + * + * Copyright (c) 2022-2023 Broadcom Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef HAVE_KTLS +#include +#endif + +#include "bnxt_compat.h" +#include "bnxt_hsi.h" +#include "bnxt.h" +#include "bnxt_hwrm.h" +#include "bnxt_mpc.h" +#include "bnxt_ktls.h" + +#if defined(HAVE_KTLS) && IS_ENABLED(CONFIG_TLS_DEVICE) && (LINUX_VERSION_CODE >= KERNEL_VERSION(5,0,0)) + +#define BNXT_PARTITION_CAP_BITS \ + (FUNC_QCAPS_RESP_XID_PARTITION_CAP_TX_CK | \ + FUNC_QCAPS_RESP_XID_PARTITION_CAP_RX_CK) + +#define BNXT_PARTITION_CAP(resp) \ + ((le32_to_cpu((resp)->flags_ext2) & \ + FUNC_QCAPS_RESP_FLAGS_EXT2_KEY_XID_PARTITION_SUPPORTED) && \ + ((le16_to_cpu(resp->xid_partition_cap) & \ + BNXT_PARTITION_CAP_BITS) == BNXT_PARTITION_CAP_BITS)) + +void bnxt_alloc_ktls_info(struct bnxt *bp, struct hwrm_func_qcaps_output *resp) +{ + u16 max_keys = le16_to_cpu(resp->max_key_ctxs_alloc); + struct bnxt_ktls_info *ktls = bp->ktls_info; + + if (BNXT_VF(bp)) + return; + if (!ktls) { + bool partition_mode = false; + struct bnxt_kctx *kctx; + u16 batch_sz = 0; + int i; + + ktls = kzalloc(sizeof(*ktls), GFP_KERNEL); + if (!ktls) + return; + + if (BNXT_PARTITION_CAP(resp)) { + batch_sz = le16_to_cpu(resp->ctxs_per_partition); + if (batch_sz && batch_sz <= BNXT_KID_BATCH_SIZE) + partition_mode = true; + } + for (i = 0; i < BNXT_MAX_CRYPTO_KEY_TYPE; i++) { + kctx = &ktls->kctx[i]; + kctx->type = i; + if (i == BNXT_TX_CRYPTO_KEY_TYPE) + kctx->max_ctx = BNXT_MAX_TX_CRYPTO_KEYS; + else + kctx->max_ctx = BNXT_MAX_RX_CRYPTO_KEYS; + INIT_LIST_HEAD(&kctx->list); + spin_lock_init(&kctx->lock); + atomic_set(&kctx->alloc_pending, 0); + init_waitqueue_head(&kctx->alloc_pending_wq); + if (partition_mode) { + int bmap_sz; + + bmap_sz = DIV_ROUND_UP(kctx->max_ctx, batch_sz); + kctx->partition_bmap = bitmap_zalloc(bmap_sz, + GFP_KERNEL); + if (!kctx->partition_bmap) + partition_mode = false; + } + } + ktls->partition_mode = partition_mode; + ktls->ctxs_per_partition = batch_sz; + + hash_init(ktls->filter_tbl); + spin_lock_init(&ktls->filter_lock); + + atomic_set(&ktls->pending, 0); + + bp->ktls_info = ktls; + } + ktls->max_key_ctxs_alloc = max_keys; +} + +void bnxt_clear_cfa_tls_filters_tbl(struct bnxt *bp) +{ + struct bnxt_ktls_info *ktls = bp->ktls_info; + struct bnxt_kfltr_info *kfltr; + struct hlist_node *tmp_node; + int bkt; + + if (!ktls) + return; + + spin_lock(&ktls->filter_lock); + hash_for_each_safe(ktls->filter_tbl, bkt, tmp_node, kfltr, hash) { + hash_del_rcu(&kfltr->hash); + kfree_rcu(kfltr, rcu); + } + ktls->filter_count = 0; + spin_unlock(&ktls->filter_lock); +} + +void bnxt_free_ktls_info(struct bnxt *bp) +{ + struct bnxt_ktls_info *ktls = bp->ktls_info; + struct bnxt_kid_info *kid, *tmp; + struct bnxt_kctx *kctx; + int i; + + if (!ktls) + return; + + /* Shutting down, no need to protect the lists. */ + for (i = 0; i < BNXT_MAX_CRYPTO_KEY_TYPE; i++) { + kctx = &ktls->kctx[i]; + list_for_each_entry_safe(kid, tmp, &kctx->list, list) { + list_del(&kid->list); + kfree(kid); + } + bitmap_free(kctx->partition_bmap); + } + bnxt_clear_cfa_tls_filters_tbl(bp); + kmem_cache_destroy(ktls->mpc_cache); + kfree(ktls); + bp->ktls_info = NULL; +} + +void bnxt_hwrm_reserve_pf_key_ctxs(struct bnxt *bp, + struct hwrm_func_cfg_input *req) +{ + struct bnxt_ktls_info *ktls = bp->ktls_info; + struct bnxt_hw_resc *hw_resc = &bp->hw_resc; + u32 tx, rx; + + if (!ktls) + return; + + tx = min(ktls->tck.max_ctx, hw_resc->max_tx_key_ctxs); + req->num_ktls_tx_key_ctxs = cpu_to_le32(tx); + rx = min(ktls->rck.max_ctx, hw_resc->max_rx_key_ctxs); + req->num_ktls_rx_key_ctxs = cpu_to_le32(rx); + req->enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_KTLS_TX_KEY_CTXS | + FUNC_CFG_REQ_ENABLES_KTLS_RX_KEY_CTXS); +} + +static int __bnxt_partition_alloc(struct bnxt_kctx *kctx, u32 *id) +{ + unsigned int next, max = kctx->max_ctx; + + next = find_next_zero_bit(kctx->partition_bmap, max, kctx->next); + if (next >= max) + next = find_first_zero_bit(kctx->partition_bmap, max); + if (next >= max) + return -ENOSPC; + *id = next; + kctx->next = next; + return 0; +} + +static int bnxt_partition_alloc(struct bnxt_kctx *kctx, u32 *id) +{ + int rc; + + do { + rc = __bnxt_partition_alloc(kctx, id); + if (rc) + return rc; + } while (test_and_set_bit(*id, kctx->partition_bmap)); + return 0; +} + +static int bnxt_key_ctx_store(struct bnxt *bp, __le32 *key_buf, u32 num, + bool contig, struct bnxt_kctx *kctx, u32 *id) +{ + struct bnxt_kid_info *kid; + u32 i; + + for (i = 0; i < num; ) { + kid = kzalloc(sizeof(*kid), GFP_KERNEL); + if (!kid) + return -ENOMEM; + kid->start_id = le32_to_cpu(key_buf[i]); + if (contig) + kid->count = num; + else + kid->count = 1; + bitmap_set(kid->ids, 0, kid->count); + if (id && !i) { + clear_bit(0, kid->ids); + *id = kid->start_id; + } + spin_lock(&kctx->lock); + list_add_tail_rcu(&kid->list, &kctx->list); + kctx->total_alloc += kid->count; + spin_unlock(&kctx->lock); + i += kid->count; + } + return 0; +} + +static int bnxt_hwrm_key_ctx_alloc(struct bnxt *bp, struct bnxt_kctx *kctx, + u32 num, u32 *id) +{ + struct bnxt_ktls_info *ktls = bp->ktls_info; + struct hwrm_func_key_ctx_alloc_output *resp; + struct hwrm_func_key_ctx_alloc_input *req; + dma_addr_t mapping; + int pending_count; + __le32 *key_buf; + bool contig; + int rc; + + num = min_t(u32, num, ktls->max_key_ctxs_alloc); + rc = hwrm_req_init(bp, req, HWRM_FUNC_KEY_CTX_ALLOC); + if (rc) + return rc; + + if (ktls->partition_mode) { + u32 partition_id; + + num = ktls->ctxs_per_partition; + rc = bnxt_partition_alloc(kctx, &partition_id); + if (rc) + goto key_alloc_exit; + req->partition_start_xid = cpu_to_le32(partition_id * num); + } else { + key_buf = hwrm_req_dma_slice(bp, req, num * 4, &mapping); + if (!key_buf) { + rc = -ENOMEM; + goto key_alloc_exit; + } + req->dma_bufr_size_bytes = cpu_to_le32(num * 4); + req->host_dma_addr = cpu_to_le64(mapping); + } + resp = hwrm_req_hold(bp, req); + + req->key_ctx_type = kctx->type; + req->num_key_ctxs = cpu_to_le16(num); + + pending_count = atomic_inc_return(&kctx->alloc_pending); + rc = hwrm_req_send(bp, req); + atomic_dec(&kctx->alloc_pending); + if (rc) + goto key_alloc_exit_wake; + + num = le16_to_cpu(resp->num_key_ctxs_allocated); + contig = + resp->flags & FUNC_KEY_CTX_ALLOC_RESP_FLAGS_KEY_CTXS_CONTIGUOUS; + if (ktls->partition_mode) + key_buf = &resp->partition_start_xid; + rc = bnxt_key_ctx_store(bp, key_buf, num, contig, kctx, id); + +key_alloc_exit_wake: + if (pending_count >= BNXT_KCTX_ALLOC_PENDING_MAX) + wake_up_all(&kctx->alloc_pending_wq); +key_alloc_exit: + hwrm_req_drop(bp, req); + return rc; +} + +static int bnxt_alloc_one_kctx(struct bnxt_kctx *kctx, u32 *id) +{ + struct bnxt_kid_info *kid; + int rc = -ENOMEM; + + rcu_read_lock(); + list_for_each_entry_rcu(kid, &kctx->list, list) { + u32 idx = 0; + + do { + idx = find_next_bit(kid->ids, kid->count, idx); + if (idx >= kid->count) + break; + if (test_and_clear_bit(idx, kid->ids)) { + *id = kid->start_id + idx; + rc = 0; + goto alloc_done; + } + } while (1); + } + +alloc_done: + rcu_read_unlock(); + return rc; +} + +static void bnxt_free_one_kctx(struct bnxt_kctx *kctx, u32 id) +{ + struct bnxt_kid_info *kid; + + rcu_read_lock(); + list_for_each_entry_rcu(kid, &kctx->list, list) { + if (id >= kid->start_id && id < kid->start_id + kid->count) { + set_bit(id - kid->start_id, kid->ids); + break; + } + } + rcu_read_unlock(); +} + +#define BNXT_KCTX_ALLOC_RETRY_MAX 3 + +static int bnxt_key_ctx_alloc_one(struct bnxt *bp, struct bnxt_kctx *kctx, + u32 *id) +{ + int rc, retry = 0; + + while (retry++ < BNXT_KCTX_ALLOC_RETRY_MAX) { + rc = bnxt_alloc_one_kctx(kctx, id); + if (!rc) + return 0; + + if ((kctx->total_alloc + BNXT_KID_BATCH_SIZE) > kctx->max_ctx) + return -ENOSPC; + + if (!BNXT_KCTX_ALLOC_OK(kctx)) { + wait_event(kctx->alloc_pending_wq, + BNXT_KCTX_ALLOC_OK(kctx)); + continue; + } + rc = bnxt_hwrm_key_ctx_alloc(bp, kctx, BNXT_KID_BATCH_SIZE, id); + if (!rc) + return 0; + } + return -EAGAIN; +} + +#define BNXT_TLS_FLTR_FLAGS \ + (CFA_TLS_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID | \ + CFA_TLS_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE | \ + CFA_TLS_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE | \ + CFA_TLS_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR | \ + CFA_TLS_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR | \ + CFA_TLS_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL | \ + CFA_TLS_FILTER_ALLOC_REQ_ENABLES_SRC_PORT | \ + CFA_TLS_FILTER_ALLOC_REQ_ENABLES_DST_PORT | \ + CFA_TLS_FILTER_ALLOC_REQ_ENABLES_KID | \ + CFA_TLS_FILTER_ALLOC_REQ_ENABLES_DST_ID) + +static int bnxt_hwrm_cfa_tls_filter_alloc(struct bnxt *bp, struct sock *sk, + u32 kid) +{ + struct hwrm_cfa_tls_filter_alloc_output *resp; + struct hwrm_cfa_tls_filter_alloc_input *req; + struct bnxt_ktls_info *ktls = bp->ktls_info; + struct inet_sock *inet = inet_sk(sk); + struct bnxt_l2_filter *l2_fltr; + struct bnxt_kfltr_info *kfltr; + int rc; + + kfltr = kzalloc(sizeof(*kfltr), GFP_KERNEL); + if (!kfltr) + return -ENOMEM; + + rc = hwrm_req_init(bp, req, HWRM_CFA_TLS_FILTER_ALLOC); + if (rc) { + kfree(kfltr); + return rc; + } + + req->enables = cpu_to_le32(BNXT_TLS_FLTR_FLAGS); + + l2_fltr = bp->vnic_info[BNXT_VNIC_DEFAULT].l2_filters[0]; + req->l2_filter_id = l2_fltr->base.filter_id; + req->dst_id = cpu_to_le16(bp->vnic_info[BNXT_VNIC_DEFAULT].fw_vnic_id); + req->kid = cpu_to_le32(kid); + + req->ip_protocol = CFA_TLS_FILTER_ALLOC_REQ_IP_PROTOCOL_TCP; + req->src_port = inet->inet_dport; + req->dst_port = inet->inet_sport; + + switch (sk->sk_family) { + case AF_INET: + default: + req->ethertype = htons(ETH_P_IP); + req->ip_addr_type = CFA_TLS_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4; + req->src_ipaddr[0] = inet->inet_daddr; + req->dst_ipaddr[0] = inet->inet_saddr; + break; + case AF_INET6: { + struct ipv6_pinfo *inet6 = inet6_sk(sk); + + req->ethertype = htons(ETH_P_IPV6); + req->ip_addr_type = CFA_TLS_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6; + memcpy(req->src_ipaddr, &sk->sk_v6_daddr, sizeof(req->src_ipaddr)); + memcpy(req->dst_ipaddr, &inet6->saddr, sizeof(req->dst_ipaddr)); + break; + } + } + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); + if (rc) { + kfree(kfltr); + } else { + kfltr->kid = kid; + kfltr->filter_id = resp->tls_filter_id; + spin_lock(&ktls->filter_lock); + ktls->filter_count++; + hash_add_rcu(ktls->filter_tbl, &kfltr->hash, kid); + spin_unlock(&ktls->filter_lock); + } + hwrm_req_drop(bp, req); + return rc; +} + +static int bnxt_hwrm_cfa_tls_filter_free(struct bnxt *bp, u32 kid) +{ + struct bnxt_ktls_info *ktls = bp->ktls_info; + struct hwrm_cfa_tls_filter_free_input *req; + struct bnxt_kfltr_info *kfltr; + bool found = false; + int rc; + + rcu_read_lock(); + hash_for_each_possible_rcu(ktls->filter_tbl, kfltr, hash, kid) { + if (kfltr->kid == kid) { + found = true; + break; + } + } + rcu_read_unlock(); + if (!found) + return -ENOENT; + + rc = hwrm_req_init(bp, req, HWRM_CFA_TLS_FILTER_FREE); + if (rc) + return rc; + + req->tls_filter_id = kfltr->filter_id; + rc = hwrm_req_send(bp, req); + + spin_lock(&ktls->filter_lock); + ktls->filter_count--; + hash_del_rcu(&kfltr->hash); + spin_unlock(&ktls->filter_lock); + kfree_rcu(kfltr, rcu); + return rc; +} + +static int bnxt_xmit_crypto_cmd(struct bnxt *bp, struct bnxt_tx_ring_info *txr, + void *cmd, uint len, uint tmo) +{ + struct bnxt_ktls_info *ktls = bp->ktls_info; + struct bnxt_crypto_cmd_ctx *ctx = NULL; + unsigned long tmo_left, handle = 0; + int rc; + + if (tmo) { + u32 kid = CE_CMD_KID(cmd); + + ctx = kmem_cache_alloc(ktls->mpc_cache, GFP_KERNEL); + if (!ctx) + return -ENOMEM; + init_completion(&ctx->cmp); + handle = (unsigned long)ctx; + ctx->ce_cmp.opaque = + BNXT_KMPC_OPAQUE(txr->tx_ring_struct.mpc_chnl_type, + kid); + might_sleep(); + } + spin_lock(&txr->tx_lock); + rc = bnxt_start_xmit_mpc(bp, txr, cmd, len, handle); + spin_unlock(&txr->tx_lock); + if (rc || !tmo) + goto xmit_done; + + tmo_left = wait_for_completion_timeout(&ctx->cmp, msecs_to_jiffies(tmo)); + if (!tmo_left) { + ctx->ce_cmp.opaque = BNXT_INV_KMPC_OPAQUE; + netdev_warn(bp->dev, "kTLS MP cmd %08x timed out\n", + *((u32 *)cmd)); + rc = -ETIMEDOUT; + goto xmit_done; + } + if (CE_CMPL_STATUS(&ctx->ce_cmp) == CE_CMPL_STATUS_OK) + rc = 0; + else + rc = -EIO; +xmit_done: + if (ctx) + kmem_cache_free(ktls->mpc_cache, ctx); + return rc; +} + +static void bnxt_copy_tls_mp_data(u8 *dst, u8 *src, int bytes) +{ + int i; + + for (i = 0; i < bytes; i++) + dst[-i] = src[i]; +} + +static int bnxt_crypto_add(struct bnxt *bp, + enum tls_offload_ctx_dir direction, + struct tls_crypto_info *crypto_info, u32 tcp_seq_no, + u32 kid) +{ + struct bnxt_mpc_info *mpc = bp->mpc_info; + struct bnxt_tx_ring_info *txr; + struct ce_add_cmd cmd = {0}; + u32 data; + + if (direction == TLS_OFFLOAD_CTX_DIR_TX) { + txr = &mpc->mpc_rings[BNXT_MPC_TCE_TYPE][0]; + cmd.ctx_kind = CE_ADD_CMD_CTX_KIND_CK_TX; + } else { + txr = &mpc->mpc_rings[BNXT_MPC_RCE_TYPE][0]; + cmd.ctx_kind = CE_ADD_CMD_CTX_KIND_CK_RX; + } + + data = CE_ADD_CMD_OPCODE_ADD | (kid << CE_ADD_CMD_KID_SFT); + switch (crypto_info->cipher_type) { + case TLS_CIPHER_AES_GCM_128: { + struct tls12_crypto_info_aes_gcm_128 *aes; + + aes = (void *)crypto_info; + data |= CE_ADD_CMD_ALGORITHM_AES_GCM_128; + if (crypto_info->version == TLS_1_3_VERSION) + data |= CE_ADD_CMD_VERSION_TLS1_3; + memcpy(&cmd.session_key, aes->key, sizeof(aes->key)); + memcpy(&cmd.salt, aes->salt, sizeof(aes->salt)); + memcpy(&cmd.addl_iv, aes->iv, sizeof(aes->iv)); + bnxt_copy_tls_mp_data(&cmd.record_seq_num_end, aes->rec_seq, + sizeof(aes->rec_seq)); + break; + } + case TLS_CIPHER_AES_GCM_256: { + struct tls12_crypto_info_aes_gcm_256 *aes; + + aes = (void *)crypto_info; + data |= CE_ADD_CMD_ALGORITHM_AES_GCM_256; + if (crypto_info->version == TLS_1_3_VERSION) + data |= CE_ADD_CMD_VERSION_TLS1_3; + memcpy(&cmd.session_key, aes->key, sizeof(aes->key)); + memcpy(&cmd.salt, aes->salt, sizeof(aes->salt)); + memcpy(&cmd.addl_iv, aes->iv, sizeof(aes->iv)); + bnxt_copy_tls_mp_data(&cmd.record_seq_num_end, aes->rec_seq, + sizeof(aes->rec_seq)); + break; + } + } + cmd.ver_algo_kid_opcode = cpu_to_le32(data); + cmd.pkt_tcp_seq_num = cpu_to_le32(tcp_seq_no); + cmd.tls_header_tcp_seq_num = cmd.pkt_tcp_seq_num; + return bnxt_xmit_crypto_cmd(bp, txr, &cmd, sizeof(cmd), + BNXT_MPC_TMO_MSECS); +} + +static int bnxt_crypto_del(struct bnxt *bp, + enum tls_offload_ctx_dir direction, u32 kid) +{ + struct bnxt_mpc_info *mpc = bp->mpc_info; + struct bnxt_tx_ring_info *txr; + struct ce_delete_cmd cmd = {0}; + u32 data; + + if (direction == TLS_OFFLOAD_CTX_DIR_TX) { + txr = &mpc->mpc_rings[BNXT_MPC_TCE_TYPE][0]; + data = CE_DELETE_CMD_CTX_KIND_CK_TX; + } else { + txr = &mpc->mpc_rings[BNXT_MPC_RCE_TYPE][0]; + data = CE_DELETE_CMD_CTX_KIND_CK_RX; + } + + data |= CE_DELETE_CMD_OPCODE_DEL | (kid << CE_DELETE_CMD_KID_SFT); + + cmd.ctx_kind_kid_opcode = cpu_to_le32(data); + return bnxt_xmit_crypto_cmd(bp, txr, &cmd, sizeof(cmd), + BNXT_MPC_TMO_MSECS); +} + +static bool bnxt_ktls_cipher_supported(struct bnxt *bp, + struct tls_crypto_info *crypto_info) +{ + u16 type = crypto_info->cipher_type; + u16 version = crypto_info->version; + + if ((type == TLS_CIPHER_AES_GCM_128 || + type == TLS_CIPHER_AES_GCM_256) && + (version == TLS_1_2_VERSION || + version == TLS_1_3_VERSION)) + return true; + return false; +} + +static void bnxt_set_ktls_ctx_rx(struct tls_context *tls_ctx, + struct bnxt_ktls_offload_ctx_rx *kctx_rx) +{ + struct bnxt_ktls_offload_ctx_rx **rx = + __tls_driver_ctx(tls_ctx, TLS_OFFLOAD_CTX_DIR_RX); + + *rx = kctx_rx; +} + +static struct bnxt_ktls_offload_ctx_rx * +bnxt_get_ktls_ctx_rx(struct tls_context *tls_ctx) +{ + struct bnxt_ktls_offload_ctx_rx **rx = + __tls_driver_ctx(tls_ctx, TLS_OFFLOAD_CTX_DIR_RX); + + return *rx; +} + +static int bnxt_ktls_dev_add(struct net_device *dev, struct sock *sk, + enum tls_offload_ctx_dir direction, + struct tls_crypto_info *crypto_info, + u32 start_offload_tcp_sn) +{ + struct bnxt_ktls_offload_ctx_rx *kctx_rx = NULL; + struct bnxt_ktls_offload_ctx_tx *kctx_tx; + struct bnxt *bp = netdev_priv(dev); + struct tls_context *tls_ctx; + struct bnxt_ktls_info *ktls; + struct bnxt_kctx *kctx; + u32 kid; + int rc; + + BUILD_BUG_ON(sizeof(struct bnxt_ktls_offload_ctx_tx) > + TLS_DRIVER_STATE_SIZE_TX); + BUILD_BUG_ON(sizeof(struct bnxt_ktls_offload_ctx_rx *) > + TLS_DRIVER_STATE_SIZE_RX); + + if (!bnxt_ktls_cipher_supported(bp, crypto_info)) + return -EOPNOTSUPP; + + ktls = bp->ktls_info; + atomic_inc(&ktls->pending); + /* Make sure bnxt_close_nic() sees pending before we check the + * BNXT_STATE_OPEN flag. + */ + smp_mb__after_atomic(); + if (!test_bit(BNXT_STATE_OPEN, &bp->state)) { + rc = -ENODEV; + goto exit; + } + + tls_ctx = tls_get_ctx(sk); + if (direction == TLS_OFFLOAD_CTX_DIR_TX) { + kctx_tx = __tls_driver_ctx(tls_ctx, TLS_OFFLOAD_CTX_DIR_TX); + kctx = &ktls->tck; + } else { + if (ktls->filter_count > BNXT_MAX_KTLS_FILTER) { + rc = -ENOSPC; + goto exit; + } + kctx_rx = kzalloc(sizeof(*kctx_rx), GFP_KERNEL); + if (!kctx_rx) { + rc = -ENOMEM; + goto exit; + } + + spin_lock_init(&kctx_rx->resync_lock); + bnxt_set_ktls_ctx_rx(tls_ctx, kctx_rx); + kctx = &ktls->rck; + } + rc = bnxt_key_ctx_alloc_one(bp, kctx, &kid); + if (rc) + goto free_ctx_rx; + rc = bnxt_crypto_add(bp, direction, crypto_info, start_offload_tcp_sn, + kid); + if (rc) + goto free_kctx; + if (direction == TLS_OFFLOAD_CTX_DIR_TX) { + kctx_tx->kid = kid; + kctx_tx->tcp_seq_no = start_offload_tcp_sn; + atomic64_inc(&ktls->counters[BNXT_KTLS_TX_ADD]); + } else { + kctx_rx->kid = kid; + rc = bnxt_hwrm_cfa_tls_filter_alloc(bp, sk, kid); + if (rc) { + int err = bnxt_crypto_del(bp, direction, kid); + + /* If unable to free, keep the KID */ + if (err) + goto free_ctx_rx; + goto free_kctx; + } + atomic64_inc(&ktls->counters[BNXT_KTLS_RX_ADD]); + } +free_kctx: + if (rc) + bnxt_free_one_kctx(kctx, kid); +free_ctx_rx: + if (rc) + kfree(kctx_rx); +exit: + atomic_dec(&ktls->pending); + return rc; +} + +#if defined(BNXT_FPGA) +#define BNXT_RETRY_MAX 200 +#else +#define BNXT_RETRY_MAX 20 +#endif + +static void bnxt_ktls_dev_del(struct net_device *dev, + struct tls_context *tls_ctx, + enum tls_offload_ctx_dir direction) +{ + struct bnxt_ktls_offload_ctx_tx *kctx_tx; + struct bnxt_ktls_offload_ctx_rx *kctx_rx; + struct bnxt *bp = netdev_priv(dev); + struct bnxt_ktls_info *ktls; + struct bnxt_kctx *kctx; + int retry_cnt = 0; + u32 kid; + int rc; + + ktls = bp->ktls_info; +retry: + atomic_inc(&ktls->pending); + /* Make sure bnxt_close_nic() sees pending before we check the + * BNXT_STATE_OPEN flag. + */ + smp_mb__after_atomic(); + while (!test_bit(BNXT_STATE_OPEN, &bp->state)) { + atomic_dec(&ktls->pending); + if (!netif_running(dev)) + return; + if (retry_cnt > BNXT_RETRY_MAX) { + netdev_warn(bp->dev, "%s retry max %d exceeded, state %lx\n", + __func__, retry_cnt, bp->state); + return; + } + retry_cnt++; + msleep(100); + goto retry; + } + + if (direction == TLS_OFFLOAD_CTX_DIR_TX) { + kctx_tx = __tls_driver_ctx(tls_ctx, TLS_OFFLOAD_CTX_DIR_TX); + kid = kctx_tx->kid; + kctx = &ktls->tck; + } else { + kctx_rx = bnxt_get_ktls_ctx_rx(tls_ctx); + kid = kctx_rx->kid; + kctx = &ktls->rck; + bnxt_hwrm_cfa_tls_filter_free(bp, kid); + kfree(kctx_rx); + } + rc = bnxt_crypto_del(bp, direction, kid); + if (!rc) { + bnxt_free_one_kctx(kctx, kid); + if (direction == TLS_OFFLOAD_CTX_DIR_TX) + atomic64_inc(&ktls->counters[BNXT_KTLS_TX_DEL]); + else + atomic64_inc(&ktls->counters[BNXT_KTLS_RX_DEL]); + } + atomic_dec(&ktls->pending); +} + +static int +bnxt_ktls_dev_resync(struct net_device *dev, struct sock *sk, u32 seq, + u8 *rcd_sn, enum tls_offload_ctx_dir direction) +{ + struct bnxt_ktls_offload_ctx_rx *kctx_rx; + struct ce_resync_resp_ack_cmd cmd = {0}; + struct bnxt *bp = netdev_priv(dev); + struct bnxt_tx_ring_info *txr; + struct bnxt_ktls_info *ktls; + struct tls_context *tls_ctx; + struct bnxt_mpc_info *mpc; + u32 data; + int rc; + + if (direction == TLS_OFFLOAD_CTX_DIR_TX) + return -EOPNOTSUPP; + + ktls = bp->ktls_info; + atomic_inc(&ktls->pending); + /* Make sure bnxt_close_nic() sees pending before we check the + * BNXT_STATE_OPEN flag. + */ + smp_mb__after_atomic(); + if (!test_bit(BNXT_STATE_OPEN, &bp->state)) { + atomic_dec(&ktls->pending); + return -ENODEV; + } + mpc = bp->mpc_info; + txr = &mpc->mpc_rings[BNXT_MPC_RCE_TYPE][0]; + tls_ctx = tls_get_ctx(sk); + kctx_rx = bnxt_get_ktls_ctx_rx(tls_ctx); + spin_lock_bh(&kctx_rx->resync_lock); + if (!kctx_rx->resync_pending || seq != kctx_rx->resync_tcp_seq_no) { + spin_unlock_bh(&kctx_rx->resync_lock); + atomic64_inc(&ktls->counters[BNXT_KTLS_RX_RESYNC_DISCARD]); + atomic_dec(&ktls->pending); + return 0; + } + kctx_rx->resync_pending = false; + spin_unlock_bh(&kctx_rx->resync_lock); + data = CE_RESYNC_RESP_ACK_CMD_OPCODE_RESYNC | + (kctx_rx->kid << CE_RESYNC_RESP_ACK_CMD_KID_SFT); + cmd.resync_status_kid_opcode = cpu_to_le32(data); + cmd.resync_record_tcp_seq_num = cpu_to_le32(seq - TLS_HEADER_SIZE + 1); + bnxt_copy_tls_mp_data(&cmd.resync_record_seq_num_end, rcd_sn, + sizeof(cmd.resync_record_seq_num)); + rc = bnxt_xmit_crypto_cmd(bp, txr, &cmd, sizeof(cmd), 0); + atomic64_inc(&ktls->counters[BNXT_KTLS_RX_RESYNC_ACK]); + atomic_dec(&ktls->pending); + return rc; +} + +static const struct tlsdev_ops bnxt_ktls_ops = { + .tls_dev_add = bnxt_ktls_dev_add, + .tls_dev_del = bnxt_ktls_dev_del, + .tls_dev_resync = bnxt_ktls_dev_resync, +}; + +static int bnxt_set_partition_mode(struct bnxt *bp) +{ + struct hwrm_func_cfg_input *req; + int rc; + + rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req); + if (rc) + return rc; + req->fid = cpu_to_le16(0xffff); + req->enables2 = cpu_to_le32(FUNC_CFG_REQ_ENABLES2_XID_PARTITION_CFG); + req->xid_partition_cfg = + cpu_to_le16(FUNC_CFG_REQ_XID_PARTITION_CFG_TX_CK | + FUNC_CFG_REQ_XID_PARTITION_CFG_RX_CK); + return hwrm_req_send(bp, req); +} + +int bnxt_ktls_init(struct bnxt *bp) +{ + struct bnxt_ktls_info *ktls = bp->ktls_info; + struct bnxt_hw_resc *hw_resc = &bp->hw_resc; + struct net_device *dev = bp->dev; + int rc; + + if (!ktls) + return 0; + + ktls->tck.max_ctx = hw_resc->resv_tx_key_ctxs; + ktls->rck.max_ctx = hw_resc->resv_rx_key_ctxs; + + if (!ktls->tck.max_ctx || !ktls->rck.max_ctx) + return 0; + + if (ktls->partition_mode) { + rc = bnxt_set_partition_mode(bp); + if (rc) + ktls->partition_mode = false; + } + + rc = bnxt_hwrm_key_ctx_alloc(bp, &ktls->tck, BNXT_KID_BATCH_SIZE, NULL); + if (rc) + return rc; + + rc = bnxt_hwrm_key_ctx_alloc(bp, &ktls->rck, BNXT_KID_BATCH_SIZE, NULL); + if (rc) + return rc; + + ktls->mpc_cache = kmem_cache_create("bnxt_ktls", + sizeof(struct bnxt_crypto_cmd_ctx), + 0, 0, NULL); + if (!ktls->mpc_cache) + return -ENOMEM; + + dev->tlsdev_ops = &bnxt_ktls_ops; + dev->hw_features |= NETIF_F_HW_TLS_TX | NETIF_F_HW_TLS_RX; + dev->features |= NETIF_F_HW_TLS_TX | NETIF_F_HW_TLS_RX; + return 0; +} + +void bnxt_ktls_mpc_cmp(struct bnxt *bp, u32 client, unsigned long handle, + struct bnxt_cmpl_entry cmpl[], u32 entries) +{ + struct bnxt_crypto_cmd_ctx *ctx; + struct ce_cmpl *cmp; + u32 len, kid; + + cmp = cmpl[0].cmpl; + if (!handle || entries != 1) { + if (entries != 1) { + netdev_warn(bp->dev, "Invalid entries %d with handle %lx cmpl %08x in %s()\n", + entries, handle, *(u32 *)cmp, __func__); + } + return; + } + ctx = (void *)handle; + kid = CE_CMPL_KID(cmp); + if (ctx->ce_cmp.opaque != BNXT_KMPC_OPAQUE(client, kid)) { + netdev_warn(bp->dev, "Invalid CE cmpl software opaque %08x, cmpl %08x, kid %x\n", + ctx->ce_cmp.opaque, *(u32 *)cmp, kid); + return; + } + len = min_t(u32, cmpl[0].len, sizeof(ctx->ce_cmp)); + memcpy(&ctx->ce_cmp, cmpl[0].cmpl, len); + complete(&ctx->cmp); +} + +static void bnxt_ktls_pre_xmit(struct bnxt *bp, struct bnxt_tx_ring_info *txr, + u32 kid, struct crypto_prefix_cmd *pre_cmd) +{ + struct bnxt_sw_tx_bd *tx_buf; + struct tx_bd_presync *psbd; + u32 bd_space, space; + u8 *pcmd; + u16 prod; + + prod = txr->tx_prod; + tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)]; + + psbd = (void *)&txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)]; + psbd->tx_bd_len_flags_type = CRYPTO_PRESYNC_BD_CMD; + psbd->tx_bd_kid = cpu_to_le32(kid); + psbd->tx_bd_opaque = + SET_TX_OPAQUE(bp, txr, prod, CRYPTO_PREFIX_CMD_BDS + 1); + + prod = NEXT_TX(prod); + pcmd = (void *)&txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)]; + bd_space = TX_DESC_CNT - TX_IDX(prod); + space = bd_space * sizeof(struct tx_bd); + if (space >= CRYPTO_PREFIX_CMD_SIZE) { + memcpy(pcmd, pre_cmd, CRYPTO_PREFIX_CMD_SIZE); + prod += CRYPTO_PREFIX_CMD_BDS; + } else { + memcpy(pcmd, pre_cmd, space); + prod += bd_space; + pcmd = (void *)&txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)]; + memcpy(pcmd, (u8 *)pre_cmd + space, + CRYPTO_PREFIX_CMD_SIZE - space); + prod += CRYPTO_PREFIX_CMD_BDS - bd_space; + } + txr->tx_prod = prod; + tx_buf->is_push = 1; + tx_buf->inline_data_bds = CRYPTO_PREFIX_CMD_BDS - 1; +} + +static struct sk_buff * +bnxt_ktls_tx_replay(struct bnxt *bp, struct bnxt_tx_ring_info *txr, + struct sk_buff *skb, struct tls_record_info *record, + u32 replay_len) +{ + int headlen, headroom; + struct sk_buff *nskb; + struct ipv6hdr *ip6h; + struct tcphdr *th; + struct iphdr *iph; + int remaining, i; + + headlen = skb_headlen(skb); + headroom = skb_headroom(skb); + nskb = alloc_skb(headlen + headroom, GFP_ATOMIC); + if (!nskb) + return NULL; + + skb_reserve(nskb, headroom); + skb_put(nskb, headlen); + memcpy(nskb->data, skb->data, headlen); + skb_copy_header(nskb, skb); + skb_gso_reset(nskb); + th = tcp_hdr(nskb); + th->seq = htonl(tls_record_start_seq(record)); + if (skb->protocol == htons(ETH_P_IPV6)) { + ip6h = ipv6_hdr(nskb); + ip6h->payload_len = htons(replay_len + __tcp_hdrlen(th)); + } else { + iph = ip_hdr(nskb); + iph->tot_len = htons(replay_len + __tcp_hdrlen(th) + + ip_hdrlen(nskb)); + } + remaining = replay_len; + for (i = 0; remaining > 0 && i < record->num_frags; i++) { + skb_frag_t *frag = &skb_shinfo(nskb)->frags[i]; + int len; + + len = skb_frag_size(&record->frags[i]) >= remaining ? + remaining : + skb_frag_size(&record->frags[i]); + + skb_frag_page_copy(frag, &record->frags[i]); + __skb_frag_ref(frag); + skb_frag_off_copy(frag, &record->frags[i]); + skb_frag_size_set(frag, len); + nskb->data_len += len; + nskb->len += len; + remaining -= len; + } + if (remaining) { + dev_kfree_skb_any(nskb); + return NULL; + } + skb_shinfo(nskb)->nr_frags = i; + return nskb; +} + +static int bnxt_ktls_tx_ooo(struct bnxt *bp, struct bnxt_tx_ring_info *txr, + struct sk_buff *skb, u32 payload_len, u32 seq, + struct tls_context *tls_ctx) +{ + struct bnxt_ktls_info *ktls = bp->ktls_info; + struct tls_offload_context_tx *tx_tls_ctx; + struct bnxt_ktls_offload_ctx_tx *kctx_tx; + struct crypto_prefix_cmd *pcmd; + struct tls_record_info *record; + struct sk_buff *nskb = NULL; + unsigned long flags; + u32 hdr_tcp_seq; + u64 rec_sn; + u8 *hdr; + int rc; + + tx_tls_ctx = tls_offload_ctx_tx(tls_ctx); + kctx_tx = __tls_driver_ctx(tls_ctx, TLS_OFFLOAD_CTX_DIR_TX); + if (before(seq, kctx_tx->tcp_seq_no)) { + u32 total_bds; + + spin_lock_irqsave(&tx_tls_ctx->lock, flags); + record = tls_get_record(tx_tls_ctx, seq, &rec_sn); + if (!record || !record->num_frags) { + rc = -EPROTO; + goto unlock_exit; + } + hdr_tcp_seq = tls_record_start_seq(record); + hdr = skb_frag_address_safe(&record->frags[0]); + + total_bds = CRYPTO_PRESYNC_BDS + skb_shinfo(skb)->nr_frags + 2; + if (bnxt_tx_avail(bp, txr) < total_bds) { + rc = -ENOSPC; + goto unlock_exit; + } + + pcmd = kzalloc(sizeof(*pcmd), GFP_ATOMIC); + if (!pcmd) { + rc = -ENOMEM; + goto unlock_exit; + } + + pcmd->header_tcp_seq_num = cpu_to_le32(hdr_tcp_seq); + pcmd->start_tcp_seq_num = cpu_to_le32(seq); + pcmd->end_tcp_seq_num = cpu_to_le32(seq + payload_len - 1); + if (tls_ctx->prot_info.version == TLS_1_2_VERSION) + memcpy(pcmd->explicit_nonce, hdr + 5, + tls_ctx->prot_info.iv_size); + memcpy(&pcmd->record_seq_num[0], &rec_sn, sizeof(rec_sn)); + + /* retransmission includes tag bytes */ + if (before(record->end_seq - tls_ctx->prot_info.tag_size, + seq + payload_len)) { + u32 replay_len = seq - hdr_tcp_seq; + + nskb = bnxt_ktls_tx_replay(bp, txr, skb, record, + replay_len); + if (!nskb) { + rc = -ENOMEM; + goto free_exit; + } + total_bds += skb_shinfo(nskb)->nr_frags + 2; + if (bnxt_tx_avail(bp, txr) < total_bds) { + dev_kfree_skb_any(nskb); + rc = -ENOSPC; + goto free_exit; + } + } + rc = 0; + atomic64_inc(&ktls->counters[BNXT_KTLS_TX_RETRANS]); + bnxt_ktls_pre_xmit(bp, txr, kctx_tx->kid, pcmd); + + if (nskb) { + struct netdev_queue *txq; + u32 kid = kctx_tx->kid; + __le32 lflags; + int txq_map; + + txq_map = skb_get_queue_mapping(nskb); + txq = netdev_get_tx_queue(bp->dev, txq_map); + lflags = cpu_to_le32(TX_BD_FLAGS_CRYPTO_EN | + BNXT_TX_KID_LO(kid)); + __bnxt_start_xmit(bp, txq, txr, nskb, lflags, kid); + atomic64_inc(&ktls->counters[BNXT_KTLS_TX_REPLAY]); + } +free_exit: + kfree(pcmd); + +unlock_exit: + spin_unlock_irqrestore(&tx_tls_ctx->lock, flags); + return rc; + } + return -EOPNOTSUPP; +} + +struct sk_buff *bnxt_ktls_xmit(struct bnxt *bp, struct bnxt_tx_ring_info *txr, + struct sk_buff *skb, __le32 *lflags, u32 *kid) +{ + struct bnxt_ktls_info *ktls = bp->ktls_info; + struct bnxt_ktls_offload_ctx_tx *kctx_tx; + struct tls_context *tls_ctx; + u32 seq; + + if (!IS_ENABLED(CONFIG_TLS_DEVICE) || !skb->sk || + !tls_is_skb_tx_device_offloaded(skb)) + return skb; + + seq = ntohl(tcp_hdr(skb)->seq); + tls_ctx = tls_get_ctx(skb->sk); + kctx_tx = __tls_driver_ctx(tls_ctx, TLS_OFFLOAD_CTX_DIR_TX); + if (kctx_tx->tcp_seq_no == seq) { + kctx_tx->tcp_seq_no += skb->len - skb_tcp_all_headers(skb); + *kid = kctx_tx->kid; + *lflags |= cpu_to_le32(TX_BD_FLAGS_CRYPTO_EN | + BNXT_TX_KID_LO(*kid)); + atomic64_inc(&ktls->counters[BNXT_KTLS_TX_HW_PKT]); + } else { + u32 payload_len; + int rc; + + payload_len = skb->len - skb_tcp_all_headers(skb); + if (!payload_len) + return skb; + + atomic64_inc(&ktls->counters[BNXT_KTLS_TX_OOO]); + + rc = bnxt_ktls_tx_ooo(bp, txr, skb, payload_len, seq, tls_ctx); + if (rc) { + atomic64_inc(&ktls->counters[BNXT_KTLS_TX_SW_PKT]); + return tls_encrypt_skb(skb); + } + *kid = kctx_tx->kid; + *lflags |= cpu_to_le32(TX_BD_FLAGS_CRYPTO_EN | + BNXT_TX_KID_LO(*kid)); + return skb; + } + return skb; +} + +static void bnxt_ktls_resync_nak(struct bnxt *bp, u32 kid, u32 seq) +{ + struct bnxt_ktls_info *ktls = bp->ktls_info; + struct bnxt_mpc_info *mpc = bp->mpc_info; + struct ce_resync_resp_ack_cmd cmd = {0}; + struct bnxt_tx_ring_info *txr; + u32 data; + + txr = &mpc->mpc_rings[BNXT_MPC_RCE_TYPE][0]; + data = CE_RESYNC_RESP_ACK_CMD_OPCODE_RESYNC | + (kid << CE_RESYNC_RESP_ACK_CMD_KID_SFT) | + CE_RESYNC_RESP_ACK_CMD_RESYNC_STATUS_NAK; + cmd.resync_status_kid_opcode = cpu_to_le32(data); + cmd.resync_record_tcp_seq_num = cpu_to_le32(seq - TLS_HEADER_SIZE + 1); + bnxt_xmit_crypto_cmd(bp, txr, &cmd, sizeof(cmd), 0); + atomic64_inc(&ktls->counters[BNXT_KTLS_RX_RESYNC_NAK]); +} + +static void bnxt_ktls_rx_resync_exp(struct bnxt *bp, + struct bnxt_ktls_offload_ctx_rx *kctx_rx, + u32 bytes) +{ + u32 tcp_seq_no; + + spin_lock_bh(&kctx_rx->resync_lock); + if (!kctx_rx->resync_pending) + goto unlock; + kctx_rx->bytes_since_resync += bytes; + if (kctx_rx->bytes_since_resync > BNXT_KTLS_MAX_RESYNC_BYTES && + time_after(jiffies, kctx_rx->resync_timestamp + + BNXT_KTLS_RESYNC_TMO)) { + kctx_rx->resync_pending = false; + tcp_seq_no = kctx_rx->resync_tcp_seq_no; + spin_unlock_bh(&kctx_rx->resync_lock); + bnxt_ktls_resync_nak(bp, kctx_rx->kid, tcp_seq_no); + return; + } +unlock: + spin_unlock_bh(&kctx_rx->resync_lock); +} + +#define BNXT_METADATA_OFF(len) ALIGN(len, 32) + +void bnxt_ktls_rx(struct bnxt *bp, struct sk_buff *skb, u8 *data_ptr, + unsigned int len, struct rx_cmp *rxcmp, + struct rx_cmp_ext *rxcmp1) +{ + struct bnxt_ktls_info *ktls = bp->ktls_info; + unsigned int off = BNXT_METADATA_OFF(len); + struct bnxt_ktls_offload_ctx_rx *kctx_rx; + struct tls_metadata_base_msg *md; + struct tls_context *tls_ctx; + u32 md_data; + + md = (struct tls_metadata_base_msg *)(data_ptr + off); + md_data = le32_to_cpu(md->md_type_link_flags_kid_lo); + if (md_data & TLS_METADATA_BASE_MSG_FLAGS_DECRYPTED) { + skb->decrypted = true; + atomic64_inc(&ktls->counters[BNXT_KTLS_RX_HW_PKT]); + } else { + u32 misc = le32_to_cpu(rxcmp->rx_cmp_misc_v1); + struct tls_metadata_resync_msg *resync_msg; + u32 payload_off, tcp_seq, md_type; + struct net_device *dev = bp->dev; + struct net *net = dev_net(dev); + u8 agg_bufs, *l3_ptr; + struct tcphdr *th; + struct sock *sk; + + payload_off = RX_CMP_PAYLOAD_OFF(misc); + agg_bufs = (misc & RX_CMP_AGG_BUFS) >> RX_CMP_AGG_BUFS_SHIFT; + /* No payload */ + if (payload_off == len && !agg_bufs) + return; + + l3_ptr = data_ptr + RX_CMP_INNER_L3_OFF(rxcmp1); + if (RX_CMP_IS_IPV6(rxcmp1)) { + struct ipv6hdr *ip6h = (struct ipv6hdr *)l3_ptr; + u8 *nextp = (u8 *)(ip6h + 1); + u8 nexthdr = ip6h->nexthdr; + + while (ipv6_ext_hdr(nexthdr)) { + struct ipv6_opt_hdr *hp; + + hp = (struct ipv6_opt_hdr *)nextp; + if (nexthdr == NEXTHDR_AUTH) + nextp += ipv6_authlen(hp); + else + nextp += ipv6_optlen(hp); + nexthdr = hp->nexthdr; + } + th = (struct tcphdr *)nextp; + sk = __inet6_lookup_established(net, + net->ipv4.tcp_death_row.hashinfo, + &ip6h->saddr, th->source, &ip6h->daddr, + ntohs(th->dest), dev->ifindex, 0); + } else { + struct iphdr *iph = (struct iphdr *)l3_ptr; + + th = (struct tcphdr *)(l3_ptr + iph->ihl * 4); + sk = inet_lookup_established(net, + net->ipv4.tcp_death_row.hashinfo, + iph->saddr, th->source, iph->daddr, + th->dest, dev->ifindex); + } + if (!sk) + goto rx_done_no_sk; + + if (!tls_is_sk_rx_device_offloaded(sk)) + goto rx_done; + + tls_ctx = tls_get_ctx(sk); + kctx_rx = bnxt_get_ktls_ctx_rx(tls_ctx); + + md_type = md_data & TLS_METADATA_BASE_MSG_MD_TYPE_MASK; + if (md_type != TLS_METADATA_BASE_MSG_MD_TYPE_TLS_RESYNC) { + bnxt_ktls_rx_resync_exp(bp, kctx_rx, len - payload_off); + goto rx_done; + } + + resync_msg = (struct tls_metadata_resync_msg *)md; + tcp_seq = le32_to_cpu(resync_msg->resync_record_tcp_seq_num); + tcp_seq += TLS_HEADER_SIZE - 1; + + spin_lock_bh(&kctx_rx->resync_lock); + kctx_rx->resync_pending = true; + kctx_rx->resync_tcp_seq_no = tcp_seq; + kctx_rx->bytes_since_resync = 0; + kctx_rx->resync_timestamp = jiffies; + spin_unlock_bh(&kctx_rx->resync_lock); + + tls_offload_rx_resync_request(sk, htonl(tcp_seq)); + atomic64_inc(&ktls->counters[BNXT_KTLS_RX_RESYNC_REQ]); +rx_done: + sock_gen_put(sk); +rx_done_no_sk: + atomic64_inc(&ktls->counters[BNXT_KTLS_RX_SW_PKT]); + } +} + +#else /* HAVE_KTLS */ + +void bnxt_alloc_ktls_info(struct bnxt *bp, struct hwrm_func_qcaps_output *resp) +{ +} + +void bnxt_clear_cfa_tls_filters_tbl(struct bnxt *bp) +{ +} + +void bnxt_free_ktls_info(struct bnxt *bp) +{ +} + +void bnxt_hwrm_reserve_pf_key_ctxs(struct bnxt *bp, + struct hwrm_func_cfg_input *req) +{ +} + +int bnxt_ktls_init(struct bnxt *bp) +{ + return 0; +} + +void bnxt_ktls_mpc_cmp(struct bnxt *bp, u32 client, unsigned long handle, + struct bnxt_cmpl_entry cmpl[], u32 entries) +{ +} + +struct sk_buff *bnxt_ktls_xmit(struct bnxt *bp, struct bnxt_tx_ring_info *txr, + struct sk_buff *skb, __le32 *lflags, u32 *kid) +{ + return skb; +} + +void bnxt_ktls_rx(struct bnxt *bp, struct sk_buff *skb, u8 *data_ptr, + unsigned int len, struct rx_cmp *rxcmp, + struct rx_cmp_ext *rxcmp1) +{ +} +#endif /* HAVE_KTLS */ diff --git a/drivers/thirdparty/release-drivers/bnxt/bnxt_ktls.h b/drivers/thirdparty/release-drivers/bnxt/bnxt_ktls.h new file mode 100644 index 000000000000..055895327214 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/bnxt_ktls.h @@ -0,0 +1,267 @@ +/* Broadcom NetXtreme-C/E network driver. + * + * Copyright (c) 2022-2023 Broadcom Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ + +#ifndef BNXT_KTLS_H +#define BNXT_KTLS_H + +#include + +#define BNXT_MAX_TX_CRYPTO_KEYS 16384 +#define BNXT_MAX_RX_CRYPTO_KEYS 16384 + +#define BNXT_TX_CRYPTO_KEY_TYPE FUNC_KEY_CTX_ALLOC_REQ_KEY_CTX_TYPE_TX +#define BNXT_RX_CRYPTO_KEY_TYPE FUNC_KEY_CTX_ALLOC_REQ_KEY_CTX_TYPE_RX + +#define BNXT_KID_BATCH_SIZE 128 + +struct bnxt_kid_info { + struct list_head list; + u32 start_id; + u32 count; + DECLARE_BITMAP(ids, BNXT_KID_BATCH_SIZE); +}; + +struct bnxt_kctx { + struct list_head list; + /* to serialize update to the linked list and total_alloc */ + spinlock_t lock; + u8 type; + u32 total_alloc; + u32 max_ctx; + atomic_t alloc_pending; +#define BNXT_KCTX_ALLOC_PENDING_MAX 8 + wait_queue_head_t alloc_pending_wq; + unsigned long *partition_bmap; + unsigned int next; +}; + +#define BNXT_KCTX_ALLOC_OK(kctx) \ + (atomic_read(&((kctx)->alloc_pending)) < BNXT_KCTX_ALLOC_PENDING_MAX) + +struct bnxt_kfltr_info { + u32 kid; + __le64 filter_id; + struct hlist_node hash; + struct rcu_head rcu; +}; + +#define BNXT_MAX_CRYPTO_KEY_TYPE (BNXT_RX_CRYPTO_KEY_TYPE + 1) + +struct bnxt_ktls_info { + u16 max_key_ctxs_alloc; + u16 ctxs_per_partition; + u8 partition_mode:1; + + struct bnxt_kctx kctx[BNXT_MAX_CRYPTO_KEY_TYPE]; + + struct kmem_cache *mpc_cache; + atomic_t pending; + + DECLARE_HASHTABLE(filter_tbl, 8); + /* to serialize adding to and deleting from the filter_tbl */ + spinlock_t filter_lock; + u32 filter_count; +#define BNXT_MAX_KTLS_FILTER 460 + +#define BNXT_KTLS_TX_ADD 0 +#define BNXT_KTLS_TX_DEL 1 +#define BNXT_KTLS_TX_HW_PKT 2 +#define BNXT_KTLS_TX_SW_PKT 3 +#define BNXT_KTLS_TX_OOO 4 +#define BNXT_KTLS_TX_RETRANS 5 +#define BNXT_KTLS_TX_REPLAY 6 + +#define BNXT_KTLS_RX_ADD 7 +#define BNXT_KTLS_RX_DEL 8 +#define BNXT_KTLS_RX_HW_PKT 9 +#define BNXT_KTLS_RX_SW_PKT 10 +#define BNXT_KTLS_RX_RESYNC_REQ 11 +#define BNXT_KTLS_RX_RESYNC_ACK 12 +#define BNXT_KTLS_RX_RESYNC_DISCARD 13 +#define BNXT_KTLS_RX_RESYNC_NAK 14 + +#define BNXT_KTLS_MAX_COUNTERS 15 + + atomic64_t counters[BNXT_KTLS_MAX_COUNTERS]; +}; + +#define tck kctx[BNXT_TX_CRYPTO_KEY_TYPE] +#define rck kctx[BNXT_RX_CRYPTO_KEY_TYPE] + +struct bnxt_ktls_offload_ctx_tx { + u32 tcp_seq_no; + u32 kid; +}; + +struct bnxt_ktls_offload_ctx_rx { + u32 kid; + /* to protect resync state */ + spinlock_t resync_lock; + u32 resync_tcp_seq_no; + u32 bytes_since_resync; + unsigned long resync_timestamp; + u8 resync_pending:1; +}; + +#define BNXT_KTLS_RESYNC_TMO msecs_to_jiffies(2500) +#define BNXT_KTLS_MAX_RESYNC_BYTES 32768 + +struct ce_add_cmd { + __le32 ver_algo_kid_opcode; + #define CE_ADD_CMD_OPCODE_MASK 0xfUL + #define CE_ADD_CMD_OPCODE_SFT 0 + #define CE_ADD_CMD_OPCODE_ADD 0x1UL + #define CE_ADD_CMD_KID_MASK 0xfffff0UL + #define CE_ADD_CMD_KID_SFT 4 + #define CE_ADD_CMD_ALGORITHM_MASK 0xf000000UL + #define CE_ADD_CMD_ALGORITHM_SFT 24 + #define CE_ADD_CMD_ALGORITHM_AES_GCM_128 0x1000000UL + #define CE_ADD_CMD_ALGORITHM_AES_GCM_256 0x2000000UL + #define CE_ADD_CMD_VERSION_MASK 0xf0000000UL + #define CE_ADD_CMD_VERSION_SFT 28 + #define CE_ADD_CMD_VERSION_TLS1_2 (0x0UL << 28) + #define CE_ADD_CMD_VERSION_TLS1_3 (0x1UL << 28) + u8 ctx_kind; + #define CE_ADD_CMD_CTX_KIND_MASK 0x1fUL + #define CE_ADD_CMD_CTX_KIND_SFT 0 + #define CE_ADD_CMD_CTX_KIND_CK_TX 0x11UL + #define CE_ADD_CMD_CTX_KIND_CK_RX 0x12UL + u8 unused0[3]; + u8 salt[4]; + u8 unused1[4]; + __le32 pkt_tcp_seq_num; + __le32 tls_header_tcp_seq_num; + u8 record_seq_num[8]; + u8 session_key[32]; + u8 addl_iv[8]; +}; + +#define record_seq_num_end record_seq_num[7] + +struct ce_delete_cmd { + __le32 ctx_kind_kid_opcode; + #define CE_DELETE_CMD_OPCODE_MASK 0xfUL + #define CE_DELETE_CMD_OPCODE_SFT 0 + #define CE_DELETE_CMD_OPCODE_DEL 0x2UL + #define CE_DELETE_CMD_KID_MASK 0xfffff0UL + #define CE_DELETE_CMD_KID_SFT 4 + #define CE_DELETE_CMD_CTX_KIND_MASK 0x1f000000UL + #define CE_DELETE_CMD_CTX_KIND_SFT 24 + #define CE_DELETE_CMD_CTX_KIND_CK_TX (0x11UL << 24) + #define CE_DELETE_CMD_CTX_KIND_CK_RX (0x12UL << 24) +}; + +struct ce_resync_resp_ack_cmd { + __le32 resync_status_kid_opcode; + #define CE_RESYNC_RESP_ACK_CMD_OPCODE_MASK 0xfUL + #define CE_RESYNC_RESP_ACK_CMD_OPCODE_SFT 0 + #define CE_RESYNC_RESP_ACK_CMD_OPCODE_RESYNC 0x3UL + #define CE_RESYNC_RESP_ACK_CMD_KID_MASK 0xfffff0UL + #define CE_RESYNC_RESP_ACK_CMD_KID_SFT 4 + #define CE_RESYNC_RESP_ACK_CMD_RESYNC_STATUS 0x1000000UL + #define CE_RESYNC_RESP_ACK_CMD_RESYNC_STATUS_ACK (0x0UL << 24) + #define CE_RESYNC_RESP_ACK_CMD_RESYNC_STATUS_NAK (0x1UL << 24) + __le32 resync_record_tcp_seq_num; + u8 resync_record_seq_num[8]; +}; + +#define resync_record_seq_num_end resync_record_seq_num[7] + +#define CE_CMD_KID_MASK 0xfffff0UL +#define CE_CMD_KID_SFT 4 + +#define CE_CMD_KID(cmd_p) \ + ((*(u32 *)(cmd_p) & CE_CMD_KID_MASK) >> CE_CMD_KID_SFT) + +#define BNXT_KMPC_OPAQUE(client, kid) \ + (((client) << 24) | (kid)) + +#define BNXT_INV_KMPC_OPAQUE 0xffffffff + +struct ce_cmpl { + __le16 client_subtype_type; + #define CE_CMPL_TYPE_MASK 0x3fUL + #define CE_CMPL_TYPE_SFT 0 + #define CE_CMPL_TYPE_MID_PATH_SHORT 0x1eUL + #define CE_CMPL_SUBTYPE_MASK 0xf00UL + #define CE_CMPL_SUBTYPE_SFT 8 + #define CE_CMPL_SUBTYPE_SOLICITED (0x0UL << 8) + #define CE_CMPL_SUBTYPE_ERR (0x1UL << 8) + #define CE_CMPL_SUBTYPE_RESYNC (0x2UL << 8) + #define CE_CMPL_MP_CLIENT_MASK 0xf000UL + #define CE_CMPL_MP_CLIENT_SFT 12 + #define CE_CMPL_MP_CLIENT_TCE (0x0UL << 12) + #define CE_CMPL_MP_CLIENT_RCE (0x1UL << 12) + __le16 status; + #define CE_CMPL_STATUS_MASK 0xfUL + #define CE_CMPL_STATUS_SFT 0 + #define CE_CMPL_STATUS_OK 0x0UL + #define CE_CMPL_STATUS_CTX_LD_ERR 0x1UL + #define CE_CMPL_STATUS_FID_CHK_ERR 0x2UL + #define CE_CMPL_STATUS_CTX_VER_ERR 0x3UL + #define CE_CMPL_STATUS_DST_ID_ERR 0x4UL + #define CE_CMPL_STATUS_MP_CMD_ERR 0x5UL + u32 opaque; + __le32 v; + #define CE_CMPL_V 0x1UL + __le32 kid; + #define CE_CMPL_KID_MASK 0xfffffUL + #define CE_CMPL_KID_SFT 0 +}; + +#define CE_CMPL_STATUS(ce_cmpl) \ + (le16_to_cpu((ce_cmpl)->status) & CE_CMPL_STATUS_MASK) + +#define CE_CMPL_KID(ce_cmpl) \ + (le32_to_cpu((ce_cmpl)->kid) & CE_CMPL_KID_MASK) + +struct crypto_prefix_cmd { + __le32 flags; + #define CRYPTO_PREFIX_CMD_FLAGS_UPDATE_IN_ORDER_VAR 0x1UL + #define CRYPTO_PREFIX_CMD_FLAGS_FULL_REPLAY_RETRAN 0x2UL + __le32 header_tcp_seq_num; + __le32 start_tcp_seq_num; + __le32 end_tcp_seq_num; + u8 explicit_nonce[8]; + u8 record_seq_num[8]; +}; + +#define CRYPTO_PREFIX_CMD_SIZE ((u32)sizeof(struct crypto_prefix_cmd)) +#define CRYPTO_PREFIX_CMD_BDS (CRYPTO_PREFIX_CMD_SIZE / sizeof(struct tx_bd)) +#define CRYPTO_PRESYNC_BDS (CRYPTO_PREFIX_CMD_BDS + 1) + +#define CRYPTO_PRESYNC_BD_CMD \ + (cpu_to_le32((CRYPTO_PREFIX_CMD_SIZE << TX_BD_LEN_SHIFT) | \ + (CRYPTO_PRESYNC_BDS << TX_BD_FLAGS_BD_CNT_SHIFT) | \ + TX_BD_TYPE_PRESYNC_TX_BD)) + +struct bnxt_crypto_cmd_ctx { + struct completion cmp; + struct ce_cmpl ce_cmp; +}; + +static inline bool bnxt_ktls_busy(struct bnxt *bp) +{ + return bp->ktls_info && atomic_read(&bp->ktls_info->pending) > 0; +} + +void bnxt_alloc_ktls_info(struct bnxt *bp, struct hwrm_func_qcaps_output *resp); +void bnxt_clear_cfa_tls_filters_tbl(struct bnxt *bp); +void bnxt_free_ktls_info(struct bnxt *bp); +void bnxt_hwrm_reserve_pf_key_ctxs(struct bnxt *bp, + struct hwrm_func_cfg_input *req); +int bnxt_ktls_init(struct bnxt *bp); +void bnxt_ktls_mpc_cmp(struct bnxt *bp, u32 client, unsigned long handle, + struct bnxt_cmpl_entry cmpl[], u32 entries); +struct sk_buff *bnxt_ktls_xmit(struct bnxt *bp, struct bnxt_tx_ring_info *txr, + struct sk_buff *skb, __le32 *lflags, u32 *kid); +void bnxt_ktls_rx(struct bnxt *bp, struct sk_buff *skb, u8 *data_ptr, + unsigned int len, struct rx_cmp *rxcmp, + struct rx_cmp_ext *rxcmp1); +#endif diff --git a/drivers/thirdparty/release-drivers/bnxt/bnxt_lfc.c b/drivers/thirdparty/release-drivers/bnxt/bnxt_lfc.c new file mode 100644 index 000000000000..bfd3ce1d46a0 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/bnxt_lfc.c @@ -0,0 +1,806 @@ +/* Broadcom NetXtreme-C/E network driver. + * + * Copyright (c) 2017-2018 Broadcom Limited + * Copyright (c) 2018-2023 Broadcom Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "bnxt_compat.h" +#include "bnxt_hsi.h" +#include "bnxt.h" +#include "bnxt_hwrm.h" +#include "bnxt_ulp.h" +#include "bnxt_lfc.h" +#include "bnxt_lfc_ioctl.h" + +#ifdef CONFIG_BNXT_LFC + +#ifdef HAVE_MODULE_IMPORT_NS_DMA_BUF +MODULE_IMPORT_NS(DMA_BUF); +#endif + +#define MAX_LFC_CACHED_NET_DEVICES 32 +#define PRIME_1 29 +#define PRIME_2 31 + +static struct bnxt_gloabl_dev blfc_global_dev; +static struct bnxt_lfc_dev_array blfc_array[MAX_LFC_CACHED_NET_DEVICES]; + +static bool bnxt_lfc_inited; +static bool is_domain_available; +static int domain_no; + +static int lfc_device_event(struct notifier_block *unused, + unsigned long event, void *ptr) +{ + u32 i; + struct bnxt_lfc_dev *blfc_dev; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); + + switch (event) { + case NETDEV_UNREGISTER: + for (i = 0; i < MAX_LFC_CACHED_NET_DEVICES; i++) { + blfc_dev = blfc_array[i].bnxt_lfc_dev; + if (blfc_dev && blfc_dev->ndev == dev) { + dev_put(blfc_dev->ndev); + kfree(blfc_dev); + blfc_array[i].bnxt_lfc_dev = NULL; + blfc_array[i].taken = 0; + break; + } + } + break; + default: + /* do nothing */ + break; + } + return 0; +} + +struct notifier_block lfc_device_notifier = { + .notifier_call = lfc_device_event +}; + +static u32 bnxt_lfc_get_hash_key(u32 bus, u32 devfn) +{ + return ((bus * PRIME_1 + devfn) * PRIME_2) % MAX_LFC_CACHED_NET_DEVICES; +} + +static int bnxt_lfc_send_hwrm(struct bnxt *bp, struct bnxt_fw_msg *fw_msg) +{ + struct output *resp; + struct input *req; + u32 resp_len; + int rc; + + if (bp->fw_reset_state) + return -EBUSY; + + rc = hwrm_req_init(bp, req, 0 /* don't care */); + if (rc) + return rc; + + rc = hwrm_req_replace(bp, req, fw_msg->msg, fw_msg->msg_len); + if (rc) + return rc; + + hwrm_req_timeout(bp, req, fw_msg->timeout); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); + resp_len = le16_to_cpu(resp->resp_len); + if (resp_len) { + if (fw_msg->resp_max_len < resp_len) + resp_len = fw_msg->resp_max_len; + + memcpy(fw_msg->resp, resp, resp_len); + } + hwrm_req_drop(bp, req); + return rc; +} + +static bool bnxt_lfc_is_valid_pdev(struct pci_dev *pdev) +{ + int32_t idx; + + if (!pdev) { + BNXT_LFC_ERR(NULL, "No such PCI device\n"); + return false; + } + + if (pdev->vendor != PCI_VENDOR_ID_BROADCOM) { + pci_dev_put(pdev); + BNXT_LFC_ERR(NULL, "Not a Broadcom PCI device\n"); + return false; + } + + if (strncmp(dev_driver_string(&pdev->dev), "bnxt_en", 7)) { + BNXT_LFC_DEBUG(&pdev->dev, + "This device is not owned by bnxt_en, instead owned by %s\n", + dev_driver_string(&pdev->dev)); + pci_dev_put(pdev); + return false; + } + + for (idx = 0; bnxt_pci_tbl[idx].device != 0; idx++) { + if (pdev->device == bnxt_pci_tbl[idx].device) { + BNXT_LFC_DEBUG(&pdev->dev, "Found valid PCI device\n"); + return true; + } + } + pci_dev_put(pdev); + BNXT_LFC_ERR(NULL, "PCI device not supported\n"); + return false; +} + +static void bnxt_lfc_init_req_hdr(struct input *req_hdr, u16 req_type, + u16 cpr_id, u16 tgt_id) +{ + req_hdr->req_type = cpu_to_le16(req_type); + req_hdr->cmpl_ring = cpu_to_le16(cpr_id); + req_hdr->target_id = cpu_to_le16(tgt_id); +} + +static void bnxt_lfc_prep_fw_msg(struct bnxt_fw_msg *fw_msg, void *msg, + int32_t msg_len, void *resp, + int32_t resp_max_len, + int32_t timeout) +{ + fw_msg->msg = msg; + fw_msg->msg_len = msg_len; + fw_msg->resp = resp; + fw_msg->resp_max_len = resp_max_len; + fw_msg->timeout = timeout; +} + +static int32_t bnxt_lfc_process_nvm_flush(struct bnxt_lfc_dev *blfc_dev) +{ + struct bnxt *bp = blfc_dev->bp; + int32_t rc = 0; + + struct hwrm_nvm_flush_input req = {0}; + struct hwrm_nvm_flush_output resp = {0}; + struct bnxt_fw_msg fw_msg; + + bnxt_lfc_init_req_hdr((void *)&req, + HWRM_NVM_FLUSH, -1, -1); + bnxt_lfc_prep_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp, + sizeof(resp), BNXT_NVM_FLUSH_TIMEOUT); + rc = bnxt_lfc_send_hwrm(bp, &fw_msg); + if (rc) + BNXT_LFC_ERR(&blfc_dev->pdev->dev, + "Failed to send NVM_FLUSH FW msg, rc = 0x%x", rc); + return rc; + +} +static int32_t bnxt_lfc_process_nvm_get_var_req(struct bnxt_lfc_dev *blfc_dev, + struct bnxt_lfc_nvm_get_var_req + *nvm_get_var_req) +{ + int32_t rc; + uint16_t len_in_bytes; + struct pci_dev *pdev = blfc_dev->pdev; + struct bnxt *bp = blfc_dev->bp; + + struct hwrm_nvm_get_variable_input req = {0}; + struct hwrm_nvm_get_variable_output resp = {0}; + struct bnxt_fw_msg fw_msg; + void *dest_data_addr = NULL; + dma_addr_t dest_data_dma_addr; + + if (nvm_get_var_req->len_in_bits == 0) { + BNXT_LFC_ERR(&blfc_dev->pdev->dev, "Invalid Length\n"); + return -ENOMEM; + } + + len_in_bytes = (nvm_get_var_req->len_in_bits + 7) / 8; + dest_data_addr = dma_alloc_coherent(&pdev->dev, + len_in_bytes, + &dest_data_dma_addr, + GFP_KERNEL); + + if (dest_data_addr == NULL) { + BNXT_LFC_ERR(&blfc_dev->pdev->dev, + "Failed to alloc mem for data\n"); + return -ENOMEM; + } + + bnxt_lfc_init_req_hdr((void *)&req, + HWRM_NVM_GET_VARIABLE, -1, -1); + req.dest_data_addr = cpu_to_le64(dest_data_dma_addr); + req.data_len = cpu_to_le16(nvm_get_var_req->len_in_bits); + req.option_num = cpu_to_le16(nvm_get_var_req->option_num); + req.dimensions = cpu_to_le16(nvm_get_var_req->dimensions); + req.index_0 = cpu_to_le16(nvm_get_var_req->index_0); + req.index_1 = cpu_to_le16(nvm_get_var_req->index_1); + req.index_2 = cpu_to_le16(nvm_get_var_req->index_2); + req.index_3 = cpu_to_le16(nvm_get_var_req->index_3); + + bnxt_lfc_prep_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp, + sizeof(resp), DFLT_HWRM_CMD_TIMEOUT); + + rc = bnxt_lfc_send_hwrm(bp, &fw_msg); + if (rc) { + BNXT_LFC_ERR(&blfc_dev->pdev->dev, + "Failed to send NVM_GET_VARIABLE FW msg, rc = 0x%x", + rc); + goto done; + } + + rc = copy_to_user(nvm_get_var_req->out_val, dest_data_addr, + len_in_bytes); + if (rc != 0) { + BNXT_LFC_ERR(&blfc_dev->pdev->dev, + "Failed to send %d characters to the user\n", rc); + rc = -EFAULT; + } +done: + dma_free_coherent(&pdev->dev, (nvm_get_var_req->len_in_bits), + dest_data_addr, + dest_data_dma_addr); + return rc; +} + +static int32_t bnxt_lfc_process_nvm_set_var_req(struct bnxt_lfc_dev *blfc_dev, + struct bnxt_lfc_nvm_set_var_req + *nvm_set_var_req) +{ + int32_t rc; + uint16_t len_in_bytes; + struct pci_dev *pdev = blfc_dev->pdev; + struct bnxt *bp = blfc_dev->bp; + + struct hwrm_nvm_set_variable_input req = {0}; + struct hwrm_nvm_set_variable_output resp = {0}; + struct bnxt_fw_msg fw_msg; + void *src_data_addr = NULL; + dma_addr_t src_data_dma_addr; + + if (nvm_set_var_req->len_in_bits == 0) { + BNXT_LFC_ERR(&blfc_dev->pdev->dev, "Invalid Length\n"); + return -ENOMEM; + } + + len_in_bytes = (nvm_set_var_req->len_in_bits + 7) / 8; + src_data_addr = dma_alloc_coherent(&pdev->dev, + len_in_bytes, + &src_data_dma_addr, + GFP_KERNEL); + + if (src_data_addr == NULL) { + BNXT_LFC_ERR(&blfc_dev->pdev->dev, + "Failed to alloc mem for data\n"); + return -ENOMEM; + } + + rc = copy_from_user(src_data_addr, + nvm_set_var_req->in_val, + len_in_bytes); + + if (rc != 0) { + BNXT_LFC_ERR(&blfc_dev->pdev->dev, + "Failed to send %d bytes from the user\n", rc); + rc = -EFAULT; + goto done; + } + + bnxt_lfc_init_req_hdr((void *)&req, + HWRM_NVM_SET_VARIABLE, -1, -1); + req.src_data_addr = cpu_to_le64(src_data_dma_addr); + req.data_len = cpu_to_le16(nvm_set_var_req->len_in_bits); + req.option_num = cpu_to_le16(nvm_set_var_req->option_num); + req.dimensions = cpu_to_le16(nvm_set_var_req->dimensions); + req.index_0 = cpu_to_le16(nvm_set_var_req->index_0); + req.index_1 = cpu_to_le16(nvm_set_var_req->index_1); + req.index_2 = cpu_to_le16(nvm_set_var_req->index_2); + req.index_3 = cpu_to_le16(nvm_set_var_req->index_3); + + bnxt_lfc_prep_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp, + sizeof(resp), DFLT_HWRM_CMD_TIMEOUT); + + rc = bnxt_lfc_send_hwrm(bp, &fw_msg); + if (rc) + BNXT_LFC_ERR(&blfc_dev->pdev->dev, + "Failed to send NVM_SET_VARIABLE FW msg, rc = 0x%x", rc); +done: + dma_free_coherent(&pdev->dev, len_in_bytes, + src_data_addr, + src_data_dma_addr); + return rc; +} + +static int32_t bnxt_lfc_fill_fw_msg(struct pci_dev *pdev, + struct bnxt_fw_msg *fw_msg, + struct blfc_fw_msg *msg) +{ + int32_t rc = 0; + + if (copy_from_user(fw_msg->msg, + (void __user *)((unsigned long)msg->usr_req), + msg->len_req)) { + BNXT_LFC_ERR(&pdev->dev, "Failed to copy data from user\n"); + return -EFAULT; + } + + fw_msg->msg_len = msg->len_req; + fw_msg->resp_max_len = msg->len_resp; + if (!msg->timeout) + fw_msg->timeout = DFLT_HWRM_CMD_TIMEOUT; + else + fw_msg->timeout = msg->timeout; + return rc; +} + +static int32_t bnxt_lfc_prepare_dma_operations(struct bnxt_lfc_dev *blfc_dev, + struct blfc_fw_msg *msg, + struct bnxt_fw_msg *fw_msg) +{ + int32_t rc = 0; + uint8_t i, num_allocated = 0; + void *dma_ptr; + + for (i = 0; i < msg->num_dma_indications; i++) { + if (msg->dma[i].length == 0 || + msg->dma[i].length > MAX_DMA_MEM_SIZE) { + BNXT_LFC_ERR(&blfc_dev->pdev->dev, + "Invalid DMA memory length\n"); + rc = -EINVAL; + goto err; + } + blfc_dev->dma_virt_addr[i] = dma_alloc_coherent( + &blfc_dev->pdev->dev, + msg->dma[i].length, + &blfc_dev->dma_addr[i], + GFP_KERNEL); + if (!blfc_dev->dma_virt_addr[i]) { + BNXT_LFC_ERR(&blfc_dev->pdev->dev, + "Failed to allocate memory for data_addr[%d]\n", + i); + rc = -ENOMEM; + goto err; + } + num_allocated++; + if (!(msg->dma[i].read_or_write)) { + if (copy_from_user(blfc_dev->dma_virt_addr[i], + (void __user *)( + (unsigned long)(msg->dma[i].data)), + msg->dma[i].length)) { + BNXT_LFC_ERR(&blfc_dev->pdev->dev, + "Failed to copy data from user for data_addr[%d]\n", + i); + rc = -EFAULT; + goto err; + } + } + dma_ptr = fw_msg->msg + msg->dma[i].offset; + + if ((PTR_ALIGN(dma_ptr, 8) == dma_ptr) && + (msg->dma[i].offset < msg->len_req)) { + __le64 *dmap = dma_ptr; + + *dmap = cpu_to_le64(blfc_dev->dma_addr[i]); + } else { + BNXT_LFC_ERR(&blfc_dev->pdev->dev, + "Wrong input parameter\n"); + rc = -EINVAL; + goto err; + } + } + return rc; +err: + for (i = 0; i < num_allocated; i++) + dma_free_coherent(&blfc_dev->pdev->dev, + msg->dma[i].length, + blfc_dev->dma_virt_addr[i], + blfc_dev->dma_addr[i]); + return rc; +} + +static int32_t bnxt_lfc_process_hwrm(struct bnxt_lfc_dev *blfc_dev, + struct bnxt_lfc_req *lfc_req) +{ + int32_t rc = 0, i, hwrm_err = 0; + struct bnxt *bp = blfc_dev->bp; + struct pci_dev *pdev = blfc_dev->pdev; + struct bnxt_fw_msg fw_msg; + struct blfc_fw_msg msg, *msg2 = NULL; + + if (copy_from_user(&msg, + (void __user *)((unsigned long)lfc_req->req.hreq), + sizeof(msg))) { + BNXT_LFC_ERR(&pdev->dev, "Failed to copy data from user\n"); + return -EFAULT; + } + + if (msg.len_req > blfc_dev->bp->hwrm_max_ext_req_len || + msg.len_resp > BNXT_LFC_MAX_HWRM_RESP_LENGTH) { + BNXT_LFC_ERR(&pdev->dev, + "Invalid length\n"); + return -EINVAL; + } + + fw_msg.msg = kmalloc(msg.len_req, GFP_KERNEL); + if (!fw_msg.msg) { + BNXT_LFC_ERR(&pdev->dev, + "Failed to allocate input req memory\n"); + return -ENOMEM; + } + + fw_msg.resp = kmalloc(msg.len_resp, GFP_KERNEL); + if (!fw_msg.resp) { + BNXT_LFC_ERR(&pdev->dev, + "Failed to allocate resp memory\n"); + rc = -ENOMEM; + goto err; + } + + rc = bnxt_lfc_fill_fw_msg(pdev, &fw_msg, &msg); + if (rc) { + BNXT_LFC_ERR(&pdev->dev, + "Failed to fill the FW data\n"); + goto err; + } + + if (msg.num_dma_indications) { + if (msg.num_dma_indications > MAX_NUM_DMA_INDICATIONS) { + BNXT_LFC_ERR(&pdev->dev, + "Invalid DMA indications\n"); + rc = -EINVAL; + goto err1; + } + msg2 = kmalloc((sizeof(struct blfc_fw_msg) + + (msg.num_dma_indications * sizeof(struct dma_info))), + GFP_KERNEL); + if (!msg2) { + BNXT_LFC_ERR(&pdev->dev, + "Failed to allocate memory\n"); + rc = -ENOMEM; + goto err; + } + if (copy_from_user((void *)msg2, + (void __user *)((unsigned long)lfc_req->req.hreq), + (sizeof(struct blfc_fw_msg) + + (msg.num_dma_indications * + sizeof(struct dma_info))))) { + BNXT_LFC_ERR(&pdev->dev, + "Failed to copy data from user\n"); + rc = -EFAULT; + goto err; + } + rc = bnxt_lfc_prepare_dma_operations(blfc_dev, msg2, &fw_msg); + if (rc) { + BNXT_LFC_ERR(&pdev->dev, + "Failed to perform DMA operaions\n"); + goto err; + } + } + + hwrm_err = bnxt_lfc_send_hwrm(bp, &fw_msg); + if (hwrm_err) { + struct input *req = fw_msg.msg; + + BNXT_LFC_DEBUG(&pdev->dev, + "Failed to send FW msg type = 0x%x, error = 0x%x", + req->req_type, hwrm_err); + goto err; + } + + for (i = 0; i < msg.num_dma_indications; i++) { + if (msg2->dma[i].read_or_write) { + if (copy_to_user((void __user *) + ((unsigned long)msg2->dma[i].data), + blfc_dev->dma_virt_addr[i], + msg2->dma[i].length)) { + BNXT_LFC_ERR(&pdev->dev, + "Failed to copy data to user\n"); + rc = -EFAULT; + goto err; + } + } + } +err: + for (i = 0; i < msg.num_dma_indications; i++) + dma_free_coherent(&pdev->dev, msg2->dma[i].length, + blfc_dev->dma_virt_addr[i], + blfc_dev->dma_addr[i]); + + if (hwrm_err != -EBUSY && hwrm_err != -E2BIG) { + if (copy_to_user((void __user *)((unsigned long)msg.usr_resp), + fw_msg.resp, + msg.len_resp)) { + BNXT_LFC_ERR(&pdev->dev, + "Failed to copy data to user\n"); + rc = -EFAULT; + } + } +err1: + kfree(msg2); + kfree(fw_msg.msg); + kfree(fw_msg.resp); + + /* If HWRM command fails, return the response error code */ + if (hwrm_err) + return hwrm_err; + return rc; +} + +static int32_t bnxt_lfc_process_req(struct bnxt_lfc_dev *blfc_dev, + struct bnxt_lfc_req *lfc_req) +{ + int32_t rc; + + switch (lfc_req->hdr.req_type) { + case BNXT_LFC_NVM_GET_VAR_REQ: + rc = bnxt_lfc_process_nvm_get_var_req(blfc_dev, + &lfc_req->req.nvm_get_var_req); + break; + case BNXT_LFC_NVM_SET_VAR_REQ: + rc = bnxt_lfc_process_nvm_set_var_req(blfc_dev, + &lfc_req->req.nvm_set_var_req); + break; + case BNXT_LFC_NVM_FLUSH_REQ: + rc = bnxt_lfc_process_nvm_flush(blfc_dev); + break; + case BNXT_LFC_GENERIC_HWRM_REQ: + rc = bnxt_lfc_process_hwrm(blfc_dev, lfc_req); + break; + default: + BNXT_LFC_DEBUG(&blfc_dev->pdev->dev, + "No valid request found\n"); + return -EINVAL; + } + return rc; +} + +static int32_t bnxt_lfc_open(struct inode *inode, struct file *flip) +{ + BNXT_LFC_DEBUG(NULL, "open is called"); + return 0; +} + +static ssize_t bnxt_lfc_read(struct file *filp, char __user *buff, + size_t length, loff_t *offset) +{ + return -EINVAL; +} + +static ssize_t bnxt_lfc_write(struct file *filp, const char __user *ubuff, + size_t len, loff_t *offset) +{ + struct bnxt_lfc_generic_msg kbuff; + + if (len != sizeof(kbuff)) { + BNXT_LFC_ERR(NULL, "Invalid length provided (%zu)\n", len); + return -EINVAL; + } + + if (copy_from_user(&kbuff, (void __user *)ubuff, len)) { + BNXT_LFC_ERR(NULL, "Failed to copy data from user application\n"); + return -EFAULT; + } + + switch (kbuff.key) { + case BNXT_LFC_KEY_DOMAIN_NO: + is_domain_available = true; + domain_no = kbuff.value; + break; + default: + BNXT_LFC_ERR(NULL, "Invalid Key provided (%u)\n", kbuff.key); + return -EINVAL; + } + return len; +} + +static loff_t bnxt_lfc_seek(struct file *filp, loff_t offset, int32_t whence) +{ + return -EINVAL; +} + +static long bnxt_lfc_ioctl(struct file *flip, unsigned int cmd, + unsigned long args) +{ + int32_t rc; + struct bnxt_lfc_req lfc_req; + u32 index; + struct bnxt_lfc_dev *blfc_dev = NULL; + + rc = copy_from_user(&lfc_req, (void __user *)args, sizeof(lfc_req)); + if (rc) { + BNXT_LFC_ERR(NULL, + "Failed to send %d bytes from the user\n", rc); + return -EINVAL; + } + + switch (cmd) { + case BNXT_LFC_REQ: + BNXT_LFC_DEBUG(NULL, "BNXT_LFC_REQ called"); + mutex_lock(&blfc_global_dev.bnxt_lfc_lock); + index = bnxt_lfc_get_hash_key(lfc_req.hdr.bus, lfc_req.hdr.devfn); + if (blfc_array[index].taken) { + if (lfc_req.hdr.devfn != blfc_array[index].bnxt_lfc_dev->devfn || + lfc_req.hdr.bus != blfc_array[index].bnxt_lfc_dev->bus || + domain_no != blfc_array[index].bnxt_lfc_dev->domain) { + /* we have a false hit. Free the older blfc device + store the new one */ + rtnl_lock(); + dev_put(blfc_array[index].bnxt_lfc_dev->ndev); + kfree(blfc_array[index].bnxt_lfc_dev); + blfc_array[index].bnxt_lfc_dev = NULL; + blfc_array[index].taken = 0; + rtnl_unlock(); + goto not_taken; + } + blfc_dev = blfc_array[index].bnxt_lfc_dev; + } + else { +not_taken: + blfc_dev = kzalloc(sizeof(struct bnxt_lfc_dev), GFP_KERNEL); + if (!blfc_dev) { + mutex_unlock(&blfc_global_dev.bnxt_lfc_lock); + return -EINVAL; + } + blfc_dev->pdev = + pci_get_domain_bus_and_slot( + ((is_domain_available == true) ? + domain_no : 0), lfc_req.hdr.bus, + lfc_req.hdr.devfn); + + if (bnxt_lfc_is_valid_pdev(blfc_dev->pdev) != true) { + mutex_unlock(&blfc_global_dev.bnxt_lfc_lock); + kfree(blfc_dev); + return -EINVAL; + } + + rtnl_lock(); + blfc_dev->ndev = pci_get_drvdata(blfc_dev->pdev); + if (!blfc_dev->ndev) { + printk("Driver with provided BDF doesn't exist\n"); + pci_dev_put(blfc_dev->pdev); + rtnl_unlock(); + mutex_unlock(&blfc_global_dev.bnxt_lfc_lock); + kfree(blfc_dev); + return -EINVAL; + } + + dev_hold(blfc_dev->ndev); + rtnl_unlock(); + if (try_module_get(blfc_dev->pdev->driver->driver.owner)) { + blfc_dev->bp = netdev_priv(blfc_dev->ndev); + if (!blfc_dev->bp) + rc = -EINVAL; + module_put(blfc_dev->pdev->driver->driver.owner); + } else { + rc = -EINVAL; + } + pci_dev_put(blfc_dev->pdev); + + if (rc) { + dev_put(blfc_dev->ndev); + kfree(blfc_dev); + is_domain_available = false; + mutex_unlock(&blfc_global_dev.bnxt_lfc_lock); + return -EINVAL; + } + + blfc_dev->bus = lfc_req.hdr.bus; + blfc_dev->devfn = lfc_req.hdr.devfn; + blfc_dev->domain = domain_no; + rtnl_lock(); + blfc_array[index].bnxt_lfc_dev = blfc_dev; + blfc_array[index].taken = 1; + rtnl_unlock(); + } + + rc = bnxt_lfc_process_req(blfc_dev, &lfc_req); + mutex_unlock(&blfc_global_dev.bnxt_lfc_lock); + break; + + default: + BNXT_LFC_ERR(NULL, "No Valid IOCTL found\n"); + return -EINVAL; + +} + return rc; +} + +static int32_t bnxt_lfc_release(struct inode *inode, struct file *filp) +{ + BNXT_LFC_DEBUG(NULL, "release is called"); + return 0; +} + +int32_t __init bnxt_lfc_init(void) +{ + int32_t rc; + + rc = alloc_chrdev_region(&blfc_global_dev.d_dev, 0, 1, BNXT_LFC_DEV_NAME); + if (rc < 0) { + BNXT_LFC_ERR(NULL, "Allocation of char dev region is failed\n"); + return rc; + } + + blfc_global_dev.d_class = class_create(THIS_MODULE, BNXT_LFC_DEV_NAME); + if (IS_ERR(blfc_global_dev.d_class)) { + BNXT_LFC_ERR(NULL, "Class creation is failed\n"); + unregister_chrdev_region(blfc_global_dev.d_dev, 1); + return -1; + } + + if (IS_ERR(device_create(blfc_global_dev.d_class, NULL, blfc_global_dev.d_dev, NULL, + BNXT_LFC_DEV_NAME))) { + BNXT_LFC_ERR(NULL, "Device creation is failed\n"); + class_destroy(blfc_global_dev.d_class); + unregister_chrdev_region(blfc_global_dev.d_dev, 1); + return -1; + } + + blfc_global_dev.fops.owner = THIS_MODULE; + blfc_global_dev.fops.open = bnxt_lfc_open; + blfc_global_dev.fops.read = bnxt_lfc_read; + blfc_global_dev.fops.write = bnxt_lfc_write; + blfc_global_dev.fops.llseek = bnxt_lfc_seek; + blfc_global_dev.fops.unlocked_ioctl = bnxt_lfc_ioctl; + blfc_global_dev.fops.release = bnxt_lfc_release; + + cdev_init(&blfc_global_dev.c_dev, &blfc_global_dev.fops); + if (cdev_add(&blfc_global_dev.c_dev, blfc_global_dev.d_dev, 1) == -1) { + BNXT_LFC_ERR(NULL, "Char device addition is failed\n"); + device_destroy(blfc_global_dev.d_class, blfc_global_dev.d_dev); + class_destroy(blfc_global_dev.d_class); + unregister_chrdev_region(blfc_global_dev.d_dev, 1); + return -1; + } + mutex_init(&blfc_global_dev.bnxt_lfc_lock); + bnxt_lfc_inited = true; + + memset(blfc_array, 0, sizeof(struct bnxt_lfc_dev_array) + * MAX_LFC_CACHED_NET_DEVICES); + + rc = bnxt_en_register_netdevice_notifier(&lfc_device_notifier); + if (rc) { + BNXT_LFC_ERR(NULL, "Error on register NETDEV event notifier\n"); + return -1; + } + return 0; +} + +void bnxt_lfc_exit(void) +{ + struct bnxt_lfc_dev *blfc_dev; + u32 i; + if (!bnxt_lfc_inited) + return; + + rtnl_lock(); + for (i = 0; i < MAX_LFC_CACHED_NET_DEVICES; i++) { + blfc_dev = blfc_array[i].bnxt_lfc_dev; + if (blfc_dev) { + blfc_array[i].bnxt_lfc_dev = NULL; + blfc_array[i].taken = 0; + dev_put(blfc_dev->ndev); + kfree(blfc_dev); + } + } + rtnl_unlock(); + + bnxt_en_unregister_netdevice_notifier(&lfc_device_notifier); + cdev_del(&blfc_global_dev.c_dev); + device_destroy(blfc_global_dev.d_class, blfc_global_dev.d_dev); + class_destroy(blfc_global_dev.d_class); + unregister_chrdev_region(blfc_global_dev.d_dev, 1); +} +#endif diff --git a/drivers/thirdparty/release-drivers/bnxt/bnxt_lfc.h b/drivers/thirdparty/release-drivers/bnxt/bnxt_lfc.h new file mode 100644 index 000000000000..a83c62399639 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/bnxt_lfc.h @@ -0,0 +1,98 @@ +/* Broadcom NetXtreme-C/E network driver. + * + * Copyright (c) 2017-2018 Broadcom Limited + * Copyright (c) 2018-2020 Broadcom Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ + +#ifndef BNXT_LFC_H +#define BNXT_LFC_H + +#ifdef CONFIG_BNXT_LFC + +/* Assuming that no HWRM command requires more than 10 DMA address + * as input requests. + */ +#define MAX_NUM_DMA_INDICATIONS 10 +#define MAX_DMA_MEM_SIZE 0x10000 /*64K*/ + +/* To prevent mismatch between bnxtnvm user application and bnxt_lfc + * keeping the max. size as 512. + */ +#define BNXT_LFC_MAX_HWRM_REQ_LENGTH HWRM_MAX_REQ_LEN +#define BNXT_LFC_MAX_HWRM_RESP_LENGTH (512) + +#define BNXT_NVM_FLUSH_TIMEOUT ((DFLT_HWRM_CMD_TIMEOUT) * 100) +#define BNXT_LFC_DEV_NAME "bnxt_lfc" +#define DRV_NAME BNXT_LFC_DEV_NAME + +#define BNXT_LFC_ERR(dev, fmt, arg...) \ + dev_err(dev, "%s: %s:%d: "fmt "\n", \ + DRV_NAME, __func__, \ + __LINE__, ##arg) \ + +#define BNXT_LFC_WARN(dev, fmt, arg...) \ + dev_warn(dev, "%s: %s:%d: "fmt "\n", \ + DRV_NAME, __func__, \ + __LINE__, ##arg) \ + +#define BNXT_LFC_INFO(dev, fmt, arg...) \ + dev_info(dev, "%s: %s:%d: "fmt "\n", \ + DRV_NAME, __func__, \ + __LINE__, ##arg) \ + +#define BNXT_LFC_DEBUG(dev, fmt, arg...) \ + dev_dbg(dev, "%s: %s:%d: "fmt "\n", \ + DRV_NAME, __func__, \ + __LINE__, ##arg) \ + +struct bnxt_lfc_dev_array { + u32 taken; + struct bnxt_lfc_dev *bnxt_lfc_dev; +}; + +struct bnxt_lfc_dev { + struct pci_dev *pdev; + struct net_device *ndev; + + struct bnxt *bp; + + int domain; + u32 bus; + u32 devfn; + + /* dma_virt_addr to hold the virtual address + * of the DMA memory. + */ + void *dma_virt_addr[MAX_NUM_DMA_INDICATIONS]; + /* dma_addr to hold the DMA addresses*/ + dma_addr_t dma_addr[MAX_NUM_DMA_INDICATIONS]; +}; + +struct bnxt_gloabl_dev { + dev_t d_dev; + struct class *d_class; + struct cdev c_dev; + + struct file_operations fops; + + struct mutex bnxt_lfc_lock; +}; + +int32_t bnxt_lfc_init(void); +void bnxt_lfc_exit(void); + +#else + +static inline int32_t bnxt_lfc_init() +{ +} + +static inline void bnxt_lfc_exit() +{ +} +#endif +#endif /*BNXT_LFC_H*/ diff --git a/drivers/thirdparty/release-drivers/bnxt/bnxt_lfc_ioctl.h b/drivers/thirdparty/release-drivers/bnxt/bnxt_lfc_ioctl.h new file mode 100644 index 000000000000..396cf55984de --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/bnxt_lfc_ioctl.h @@ -0,0 +1,111 @@ +/* Broadcom NetXtreme-C/E network driver. + * + * Copyright (c) 2017-2018 Broadcom Limited + * Copyright (c) 2018-2022 Broadcom Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ + +#ifndef BNXT_LFC_IOCTL_H +#define BNXT_LFC_IOCTL_H + +#define BNXT_LFC_IOCTL_MAGIC 0x98 +#define BNXT_LFC_VER 1 + +enum bnxt_lfc_req_type { + BNXT_LFC_NVM_GET_VAR_REQ = 1, + BNXT_LFC_NVM_SET_VAR_REQ, + BNXT_LFC_NVM_FLUSH_REQ, + BNXT_LFC_GENERIC_HWRM_REQ, +}; + +struct bnxt_lfc_req_hdr { + uint32_t ver; + uint32_t bus; + uint32_t devfn; + enum bnxt_lfc_req_type req_type; +}; + +struct bnxt_lfc_nvm_get_var_req { + uint16_t option_num; + uint16_t dimensions; + uint16_t index_0; + uint16_t index_1; + uint16_t index_2; + uint16_t index_3; + uint16_t len_in_bits; + uint8_t __user *out_val; +}; + +struct bnxt_lfc_nvm_set_var_req { + uint16_t option_num; + uint16_t dimensions; + uint16_t index_0; + uint16_t index_1; + uint16_t index_2; + uint16_t index_3; + uint16_t len_in_bits; + uint8_t __user *in_val; +}; + +struct dma_info { + __u64 data; + /* Based on read_or_write parameter + * LFC will either fill or read the + * data to or from the user memory + */ + __u32 length; + /* Length of the data for read/write */ + __u16 offset; + /* Offset at which HWRM input structure needs DMA address*/ + __u8 read_or_write; + /* It should be 0 for write and 1 for read */ + __u8 unused; +}; + +struct blfc_fw_msg { + __u64 usr_req; + /* HWRM input structure */ + __u64 usr_resp; + /* HWRM output structure */ + __u32 len_req; + /* HWRM input structure length*/ + __u32 len_resp; + /* HWRM output structure length*/ + __u32 timeout; + /* HWRM command timeout. If 0 then + * LFC will provide default timeout + */ + __u32 num_dma_indications; + /* Number of DMA addresses used in HWRM command */ +#ifdef DECLARE_FLEX_ARRAY + DECLARE_FLEX_ARRAY(struct dma_info, dma); +#else + struct dma_info dma[0]; +#endif + /* User should allocate it with + * (sizeof(struct dma_info) * num_dma_indications) + */ +}; + + +struct bnxt_lfc_generic_msg { + __u8 key; + #define BNXT_LFC_KEY_DOMAIN_NO 1 + __u8 reserved[3]; + __u32 value; +}; + +struct bnxt_lfc_req { + struct bnxt_lfc_req_hdr hdr; + union { + struct bnxt_lfc_nvm_get_var_req nvm_get_var_req; + struct bnxt_lfc_nvm_set_var_req nvm_set_var_req; + __u64 hreq; /* Pointer to "struct blfc_fw_msg" */ + } req; +}; + +#define BNXT_LFC_REQ _IOW(BNXT_LFC_IOCTL_MAGIC, 1, struct bnxt_lfc_req) +#endif /*BNXT_LFC_IOCTL_H*/ diff --git a/drivers/thirdparty/release-drivers/bnxt/bnxt_log.c b/drivers/thirdparty/release-drivers/bnxt/bnxt_log.c new file mode 100644 index 000000000000..0428807cd1b8 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/bnxt_log.c @@ -0,0 +1,570 @@ +/* Broadcom NetXtreme-C/E network driver. + * + * Copyright (c) 2023 Broadcom Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ + +#include +#include + +#include "bnxt_compat.h" +#include "bnxt.h" +#include "bnxt_log.h" +#include "bnxt_coredump.h" + +#define BNXT_LOG_MSG_SIZE 256 +#define BNXT_LOG_NUM_BUFFERS(x) ((x) / BNXT_LOG_MSG_SIZE) + +int l2_ring_contents_seg_list[] = { + BNXT_SEGMENT_L2_RING_CONTENT +}; + +/* Below list of segment creation will be + * attempted for L2 logger + */ +int l2_seg_list[] = { + BNXT_SEGMENT_L2 +}; + +/* Below list of segment creation will be + * attempted for L2 CTX MEM logger + */ +int l2_ctx_mem_seg_list[] = { + BNXT_SEGMENT_CTX_MEM_QP, + BNXT_SEGMENT_CTX_MEM_SRQ, + BNXT_SEGMENT_CTX_MEM_CQ, + BNXT_SEGMENT_CTX_MEM_VNIC, + BNXT_SEGMENT_CTX_MEM_STAT, + BNXT_SEGMENT_CTX_MEM_SP_TQM_RING, + BNXT_SEGMENT_CTX_MEM_FP_TQM_RING, + BNXT_SEGMENT_CTX_MEM_MRAV, + BNXT_SEGMENT_CTX_MEM_TIM, + BNXT_SEGMENT_CTX_MEM_TX_CK, + BNXT_SEGMENT_CTX_MEM_RX_CK, + BNXT_SEGMENT_CTX_MEM_MP_TQM_RING, + BNXT_SEGMENT_CTX_MEM_SQ_DB_SHADOW, + BNXT_SEGMENT_CTX_MEM_RQ_DB_SHADOW, + BNXT_SEGMENT_CTX_MEM_SRQ_DB_SHADOW, + BNXT_SEGMENT_CTX_MEM_CQ_DB_SHADOW +}; + +/* Below list of segment creation will be + * attempted for RoCE logger + */ +int roce_seg_list[] = { + BNXT_SEGMENT_QP_CTX, + BNXT_SEGMENT_CQ_CTX, + BNXT_SEGMENT_MR_CTX, + BNXT_SEGMENT_SRQ_CTX, + /* Try to fit fixed sized segment first.*/ + BNXT_SEGMENT_ROCE +}; + +struct bnxt_logger { + struct list_head list; + u16 logger_id; + u32 buffer_size; + u16 head; + u16 tail; + bool valid; + void *msgs; + u32 live_max_size; + void *live_msgs; + u32 max_live_buff_size; + u32 live_msgs_len; + void (*log_live_op)(void *dev, u32 seg_id); + u32 total_segs; + int *seg_list; +}; + +int bnxt_register_logger(struct bnxt *bp, u16 logger_id, u32 num_buffs, + void (*log_live)(void *, u32), u32 live_max_size) +{ + struct bnxt_logger *logger; + void *data; + + if (logger_id == BNXT_LOGGER_L2_CTX_MEM || + logger_id == BNXT_LOGGER_L2_RING_CONTENTS) + goto register_logger; + + if (!log_live || !live_max_size) + return -EINVAL; + + if (!is_power_of_2(num_buffs)) + return -EINVAL; + +register_logger: + logger = kzalloc(sizeof(*logger), GFP_KERNEL); + if (!logger) + return -ENOMEM; + + logger->logger_id = logger_id; + logger->buffer_size = num_buffs * BNXT_LOG_MSG_SIZE; + logger->log_live_op = log_live; + logger->max_live_buff_size = live_max_size; + + switch (logger_id) { + case BNXT_LOGGER_L2: + logger->total_segs = sizeof(l2_seg_list) / sizeof(int); + logger->seg_list = &l2_seg_list[0]; + break; + case BNXT_LOGGER_ROCE: + logger->total_segs = sizeof(roce_seg_list) / sizeof(int); + logger->seg_list = &roce_seg_list[0]; + break; + case BNXT_LOGGER_L2_CTX_MEM: + logger->total_segs = sizeof(l2_ctx_mem_seg_list) / sizeof(int); + logger->seg_list = &l2_ctx_mem_seg_list[0]; + break; + case BNXT_LOGGER_L2_RING_CONTENTS: + logger->total_segs = sizeof(l2_ring_contents_seg_list) / sizeof(int); + logger->seg_list = &l2_ring_contents_seg_list[0]; + break; + default: + logger->total_segs = 1; + break; + } + + if (logger->buffer_size) { + data = vmalloc(logger->buffer_size); + if (!data) { + kfree(logger); + return -ENOMEM; + } + logger->msgs = data; + } + + INIT_LIST_HEAD(&logger->list); + mutex_lock(&bp->log_lock); + list_add_tail(&logger->list, &bp->loggers_list); + mutex_unlock(&bp->log_lock); + return 0; +} + +void bnxt_unregister_logger(struct bnxt *bp, u16 logger_id) +{ + struct bnxt_logger *l = NULL, *tmp; + + mutex_lock(&bp->log_lock); + list_for_each_entry_safe(l, tmp, &bp->loggers_list, list) { + if (l->logger_id == logger_id) { + list_del(&l->list); + break; + } + } + mutex_unlock(&bp->log_lock); + + if (!l || l->logger_id != logger_id) { + netdev_err(bp->dev, "logger id %d not registered\n", logger_id); + return; + } + + vfree(l->msgs); + kfree(l); +} + +int bnxt_log_ring_contents(struct bnxt *bp) +{ + struct list_head *list_head, *pos, *lg; + struct bnxt_logger *logger = NULL; + size_t size = 0; + u32 offset = 0; + u8 *data; + int i, len; + + mutex_lock(&bp->log_lock); + list_head = &bp->loggers_list; + list_for_each_safe(pos, lg, list_head) { + logger = list_entry(pos, struct bnxt_logger, list); + if (logger->logger_id == BNXT_LOGGER_L2_RING_CONTENTS) + break; + } + + if (!logger || logger->logger_id != BNXT_LOGGER_L2_RING_CONTENTS) { + mutex_unlock(&bp->log_lock); + return -EINVAL; + } + + /* Include 2 extra u16 size bytes to store ring's producer & consumer index */ + size = bp->tx_nr_rings * (2 * sizeof(u16) + (bp->tx_nr_pages * HW_TXBD_RING_SIZE)); + + if (!logger->msgs || logger->buffer_size < size) { + if (logger->msgs) + vfree(logger->msgs); + + logger->msgs = vmalloc(size); + if (!logger->msgs) { + mutex_unlock(&bp->log_lock); + return -ENOMEM; + } + + logger->buffer_size = size; + } + + data = logger->msgs; + + for (i = 0; i < bp->tx_nr_rings; i++) { + struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; + u16 prod_id = RING_TX(bp, txr->tx_prod); + u16 cons_id = RING_TX(bp, txr->tx_cons); + struct bnxt_ring_struct *ring; + + ring = &txr->tx_ring_struct; + + data[offset++] = prod_id && 0xff; + data[offset++] = (prod_id && 0xff00) >> 8; + + data[offset++] = cons_id && 0xff; + data[offset++] = (cons_id && 0xff00) >> 8; + + len = bnxt_copy_ring(bp, &ring->ring_mem, data, offset); + offset += len; + } + mutex_unlock(&bp->log_lock); + return 0; +} + +static int bnxt_log_info(char *buf, size_t max_len, const char *format, va_list args) +{ + static char textbuf[BNXT_LOG_MSG_SIZE]; + char *text = textbuf; + size_t text_len; + char *next; + + text_len = vscnprintf(text, sizeof(textbuf), format, args); + + next = memchr(text, '\n', text_len); + if (next) + text_len = next - text; + else if (text[text_len] == '\0') + text[text_len] = '\n'; + + if (text_len > max_len) { + /* Truncate */ + text_len = max_len; + text[text_len] = '\n'; + } + + memcpy(buf, text, text_len + 1); + + return text_len + 1; +} + +void bnxt_log_add_msg(struct bnxt *bp, u16 logger_id, const char *format, ...) +{ + struct list_head *list_head, *pos, *lg; + struct bnxt_logger *logger = NULL; + u16 start, tail; + va_list args; + void *buf; + u32 mask; + + mutex_lock(&bp->log_lock); + list_head = &bp->loggers_list; + list_for_each_safe(pos, lg, list_head) { + logger = list_entry(pos, struct bnxt_logger, list); + if (logger->logger_id == logger_id) + break; + } + + if (!logger) { + mutex_unlock(&bp->log_lock); + return; + } + + mask = BNXT_LOG_NUM_BUFFERS(logger->buffer_size) - 1; + tail = logger->tail; + start = logger->head; + + if (logger->valid && start == tail) + logger->head = ++start & mask; + + buf = logger->msgs + BNXT_LOG_MSG_SIZE * logger->tail; + logger->tail = ++tail & mask; + + if (!logger->valid) + logger->valid = true; + + va_start(args, format); + bnxt_log_info(buf, BNXT_LOG_MSG_SIZE, format, args); + va_end(args); + mutex_unlock(&bp->log_lock); +} + +void bnxt_log_raw(struct bnxt *bp, u16 logger_id, void *data, int len) +{ + struct list_head *head, *pos, *lg; + struct bnxt_logger *logger = NULL; + bool match_found = false; + + head = &bp->loggers_list; + list_for_each_safe(pos, lg, head) { + logger = list_entry(pos, struct bnxt_logger, list); + if ((logger->logger_id == logger_id) && logger->live_msgs) { + match_found = true; + break; + } + } + + if (!match_found) + return; + + if ((logger->max_live_buff_size - logger->live_msgs_len) >= len) { + memcpy(logger->live_msgs, data, len); + logger->live_msgs_len += len; + logger->live_msgs += len; + } +} + +void bnxt_log_live(struct bnxt *bp, u16 logger_id, const char *format, ...) +{ + struct list_head *head, *pos, *lg; + struct bnxt_logger *logger = NULL; + va_list args; + int len; + + head = &bp->loggers_list; + list_for_each_safe(pos, lg, head) { + logger = list_entry(pos, struct bnxt_logger, list); + if (logger->logger_id == logger_id) + break; + } + + if (!logger || !logger->live_msgs || (logger->live_msgs_len >= logger->max_live_buff_size)) + return; + + va_start(args, format); + len = bnxt_log_info(logger->live_msgs + logger->live_msgs_len, + logger->max_live_buff_size - logger->live_msgs_len, + format, args); + va_end(args); + + logger->live_msgs_len += len; +} + +static size_t bnxt_get_data_len(char *buf) +{ + size_t count = 0; + + while (*buf++ != '\n') + count++; + return count + 1; +} + +static size_t bnxt_collect_logs_buffer(struct bnxt_logger *logger, char *dest) +{ + u32 mask = BNXT_LOG_NUM_BUFFERS(logger->buffer_size) - 1; + u16 head = logger->head; + u16 tail = logger->tail; + size_t total_len = 0; + int count; + + if (!logger->valid) + return 0; + + count = (tail > head) ? (tail - head) : (tail - head + mask + 1); + while (count--) { + void *src = logger->msgs + BNXT_LOG_MSG_SIZE * (head & mask); + size_t len; + + len = bnxt_get_data_len(src); + memcpy(dest + total_len, src, len); + total_len += len; + head++; + } + + return total_len; +} + +static int bnxt_get_ctx_mem_length(struct bnxt *bp, u32 total_segments) +{ + u32 seg_hdr_len = sizeof(struct bnxt_coredump_segment_hdr); + struct bnxt_ctx_mem_info *ctx = bp->ctx; + size_t seg_len; + size_t length = 0; + int i; + + if (!ctx) + return 0; + + for (i = 0; i < total_segments; i++) { + int type = l2_ctx_mem_seg_list[i] - BNXT_LOG_CTX_MEM_SEG_ID_START; + struct bnxt_ctx_mem_type *ctxm; + + ctxm = &ctx->ctx_arr[type]; + if (!ctxm) + continue; + + seg_len = bnxt_copy_ctx_mem(bp, ctxm, NULL, 0); + length += (seg_hdr_len + seg_len); + } + return length; +} + +size_t bnxt_get_loggers_coredump_size(struct bnxt *bp, u16 dump_type) +{ + struct list_head *head, *pos, *lg; + struct bnxt_logger *logger; + size_t len = 0; + + mutex_lock(&bp->log_lock); + head = &bp->loggers_list; + list_for_each_safe(pos, lg, head) { + logger = list_entry(pos, struct bnxt_logger, list); + if (logger->logger_id == BNXT_LOGGER_L2_CTX_MEM) { + if (dump_type != BNXT_DUMP_DRIVER_WITH_CTX_MEM) + continue; + len += bnxt_get_ctx_mem_length(bp, logger->total_segs); + continue; + } + len += sizeof(struct bnxt_coredump_segment_hdr) + + logger->max_live_buff_size + logger->buffer_size; + } + mutex_unlock(&bp->log_lock); + return len; +} + +void bnxt_start_logging_coredump(struct bnxt *bp, char *dest_buf, u32 *dump_len, u16 dump_type) +{ + u32 null_seg_len, requested_buf_len, total_segs_per_logger; + u32 ver_get_resp_len = sizeof(struct hwrm_ver_get_output); + u32 offset, seg_hdr_len, total_seg_count; + struct bnxt_coredump_segment_hdr seg_hdr; + u32 prev_live_msgs_len, seg_id_in_hdr; + struct list_head *head, *pos, *lg; + struct bnxt_time start_time; + struct bnxt_logger *logger; + void *seg_hdr_dest; + s16 start_utc; + size_t seg_len; + int i; + + seg_hdr_len = sizeof(seg_hdr); + total_seg_count = 0; + offset = 0; + + requested_buf_len = *dump_len; + start_time = bnxt_get_current_time(bp); + start_utc = sys_tz.tz_minuteswest; + + mutex_lock(&bp->log_lock); + + /* First segment should be hwrm_ver_get response. + * For hwrm_ver_get response Component id = 2 and Segment id = 0 + */ + bnxt_fill_coredump_seg_hdr(bp, &seg_hdr, NULL, ver_get_resp_len, + 0, 0, 0, 2, 0); + memcpy(dest_buf + offset, &seg_hdr, seg_hdr_len); + offset += seg_hdr_len; + memcpy(dest_buf + offset, &bp->ver_resp, ver_get_resp_len); + offset += ver_get_resp_len; + *dump_len = seg_hdr_len + ver_get_resp_len; + + head = &bp->loggers_list; + list_for_each_safe(pos, lg, head) { + seg_hdr_dest = NULL; + seg_len = 0; + + logger = list_entry(pos, struct bnxt_logger, list); + total_segs_per_logger = logger->total_segs; + logger->live_msgs_len = 0; + prev_live_msgs_len = 0; + + if (logger->logger_id == BNXT_LOGGER_L2_CTX_MEM) { + if (dump_type != BNXT_DUMP_DRIVER_WITH_CTX_MEM || !bp->ctx) + continue; + } + + netdev_dbg(bp->dev, "logger id %d -> total seg %d\n", + logger->logger_id, total_segs_per_logger); + for (i = 0; i < total_segs_per_logger; i++) { + seg_hdr_dest = dest_buf + offset; + offset += seg_hdr_len; + seg_len = 0; + + if (logger->logger_id == BNXT_LOGGER_L2_CTX_MEM) { + struct bnxt_ctx_mem_info *ctx = bp->ctx; + struct bnxt_ctx_mem_type *ctxm; + u16 type; + + type = l2_ctx_mem_seg_list[i] - BNXT_LOG_CTX_MEM_SEG_ID_START; + ctxm = &ctx->ctx_arr[type]; + seg_len = bnxt_copy_ctx_mem(bp, ctxm, dest_buf, offset); + offset += seg_len; + seg_id_in_hdr = logger->seg_list ? + logger->seg_list[i] : total_seg_count; + } else if (logger->logger_id == BNXT_LOGGER_L2_RING_CONTENTS) { + if (logger->msgs) { + memcpy(dest_buf + offset, logger->msgs, + logger->buffer_size); + seg_len = logger->buffer_size; + offset += seg_len; + } + seg_id_in_hdr = logger->seg_list ? + logger->seg_list[i] : total_seg_count; + } else { + /* First collect logs from buffer */ + seg_len = bnxt_collect_logs_buffer(logger, dest_buf + offset); + offset += seg_len; + + /* Let logger to collect live messages */ + logger->live_msgs = dest_buf + offset; + + prev_live_msgs_len = logger->live_msgs_len; + seg_id_in_hdr = logger->seg_list ? + logger->seg_list[i] : total_seg_count; + logger->log_live_op(bp, logger->seg_list ? + logger->seg_list[i] : total_seg_count); + seg_len += (logger->live_msgs_len - prev_live_msgs_len); + offset += seg_len; + } + + bnxt_fill_coredump_seg_hdr(bp, &seg_hdr, NULL, seg_len, + 0, 0, 0, 13, 0); + seg_hdr.segment_id = cpu_to_le32(seg_id_in_hdr); + memcpy(seg_hdr_dest, &seg_hdr, sizeof(seg_hdr)); + total_seg_count++; + *dump_len += (seg_hdr_len + seg_len); + netdev_dbg(bp->dev, "seg 0x%x seg_len (%d + %d) offset %d len %d\n", + seg_id_in_hdr, seg_hdr_len, (unsigned int)seg_len, + offset, *dump_len); + } + } + + null_seg_len = BNXT_COREDUMP_BUF_LEN(requested_buf_len) - *dump_len; + offset = *dump_len; + bnxt_fill_empty_seg(bp, dest_buf + offset, null_seg_len); + + /* Fix the coredump record at last 1024 bytes */ + offset = requested_buf_len - sizeof(struct bnxt_coredump_record); + netdev_dbg(bp->dev, "From %s %d offset %d buf len %d\n", + __func__, __LINE__, offset, requested_buf_len); + bnxt_fill_coredump_record(bp, (void *)dest_buf + offset, + start_time, start_utc, + total_seg_count + 2, 0); + + *dump_len = *dump_len + null_seg_len + + sizeof(struct bnxt_coredump_record) + + sizeof(struct bnxt_coredump_segment_hdr); + + mutex_unlock(&bp->log_lock); +} + +void bnxt_reset_loggers(struct bnxt *bp) +{ + struct list_head *head, *pos, *lg; + struct bnxt_logger *logger; + + mutex_lock(&bp->log_lock); + head = &bp->loggers_list; + list_for_each_safe(pos, lg, head) { + logger = list_entry(pos, struct bnxt_logger, list); + logger->head = 0; + logger->tail = 0; + logger->valid = false; + } + mutex_unlock(&bp->log_lock); +} diff --git a/drivers/thirdparty/release-drivers/bnxt/bnxt_log.h b/drivers/thirdparty/release-drivers/bnxt/bnxt_log.h new file mode 100644 index 000000000000..78ee536a6499 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/bnxt_log.h @@ -0,0 +1,55 @@ +/* Broadcom NetXtreme-C/E network driver. + * + * Copyright (c) 2023 Broadcom Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ + +#ifndef BNXT_LOG_H +#define BNXT_LOG_H + +#define BNXT_LOGGER_L2 1 +#define BNXT_LOGGER_ROCE 2 +#define BNXT_LOGGER_L2_CTX_MEM 3 +#define BNXT_LOGGER_L2_RING_CONTENTS 4 + +#define BNXT_SEGMENT_L2 0 +#define BNXT_SEGMENT_ROCE 255 +#define BNXT_SEGMENT_QP_CTX 256 +#define BNXT_SEGMENT_SRQ_CTX 257 +#define BNXT_SEGMENT_CQ_CTX 258 +#define BNXT_SEGMENT_MR_CTX 270 + +#define BNXT_LOG_CTX_MEM_SEG_ID_START 0x100 +#define BNXT_SEGMENT_L2_RING_CONTENT 0x200 + +#define BNXT_SEGMENT_CTX_MEM_QP (BNXT_LOG_CTX_MEM_SEG_ID_START + BNXT_CTX_QP) +#define BNXT_SEGMENT_CTX_MEM_SRQ (BNXT_LOG_CTX_MEM_SEG_ID_START + BNXT_CTX_SRQ) +#define BNXT_SEGMENT_CTX_MEM_CQ (BNXT_LOG_CTX_MEM_SEG_ID_START + BNXT_CTX_CQ) +#define BNXT_SEGMENT_CTX_MEM_VNIC (BNXT_LOG_CTX_MEM_SEG_ID_START + BNXT_CTX_VNIC) +#define BNXT_SEGMENT_CTX_MEM_STAT (BNXT_LOG_CTX_MEM_SEG_ID_START + BNXT_CTX_STAT) +#define BNXT_SEGMENT_CTX_MEM_SP_TQM_RING (BNXT_LOG_CTX_MEM_SEG_ID_START + BNXT_CTX_STQM) +#define BNXT_SEGMENT_CTX_MEM_FP_TQM_RING (BNXT_LOG_CTX_MEM_SEG_ID_START + BNXT_CTX_FTQM) +#define BNXT_SEGMENT_CTX_MEM_MRAV (BNXT_LOG_CTX_MEM_SEG_ID_START + BNXT_CTX_MRAV) +#define BNXT_SEGMENT_CTX_MEM_TIM (BNXT_LOG_CTX_MEM_SEG_ID_START + BNXT_CTX_TIM) +#define BNXT_SEGMENT_CTX_MEM_TX_CK (BNXT_LOG_CTX_MEM_SEG_ID_START + BNXT_CTX_TCK) +#define BNXT_SEGMENT_CTX_MEM_RX_CK (BNXT_LOG_CTX_MEM_SEG_ID_START + BNXT_CTX_RCK) +#define BNXT_SEGMENT_CTX_MEM_MP_TQM_RING (BNXT_LOG_CTX_MEM_SEG_ID_START + BNXT_CTX_MTQM) +#define BNXT_SEGMENT_CTX_MEM_SQ_DB_SHADOW (BNXT_LOG_CTX_MEM_SEG_ID_START + BNXT_CTX_SQDBS) +#define BNXT_SEGMENT_CTX_MEM_RQ_DB_SHADOW (BNXT_LOG_CTX_MEM_SEG_ID_START + BNXT_CTX_RQDBS) +#define BNXT_SEGMENT_CTX_MEM_SRQ_DB_SHADOW (BNXT_LOG_CTX_MEM_SEG_ID_START + BNXT_CTX_SRQDBS) +#define BNXT_SEGMENT_CTX_MEM_CQ_DB_SHADOW (BNXT_LOG_CTX_MEM_SEG_ID_START + BNXT_CTX_CQDBS) + +int bnxt_register_logger(struct bnxt *bp, u16 logger_id, u32 num_buffers, + void (*log_live)(void *, u32), u32 live_size); +void bnxt_unregister_logger(struct bnxt *bp, u16 logger_id); +void bnxt_log_add_msg(struct bnxt *bp, u16 logger_id, const char *format, ...); +void bnxt_log_live(struct bnxt *bp, u16 logger_id, const char *format, ...); +void bnxt_log_raw(struct bnxt *bp, u16 logger_id, void *data, int len); +void bnxt_reset_loggers(struct bnxt *bp); +size_t bnxt_get_loggers_coredump_size(struct bnxt *bp, u16 dump_type); +void bnxt_start_logging_coredump(struct bnxt *bp, char *dest_buf, u32 *dump_len, u16 dump_type); +int bnxt_log_ring_contents(struct bnxt *bp); +#endif diff --git a/drivers/thirdparty/release-drivers/bnxt/bnxt_log_data.c b/drivers/thirdparty/release-drivers/bnxt/bnxt_log_data.c new file mode 100644 index 000000000000..35c854361240 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/bnxt_log_data.c @@ -0,0 +1,84 @@ +/* Broadcom NetXtreme-C/E network driver. + * + * Copyright (c) 2023 Broadcom Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ + +#include "bnxt_compat.h" +#include "bnxt.h" +#include "bnxt_coredump.h" +#include "bnxt_log.h" +#include "bnxt_log_data.h" + +static void bnxt_log_drv_version(struct bnxt *bp) +{ + bnxt_log_live(bp, BNXT_LOGGER_L2, "\n"); + + bnxt_log_live(bp, BNXT_LOGGER_L2, "Interface: %s driver version: %s\n", + bp->dev->name, DRV_MODULE_VERSION); +} + +static void bnxt_log_tx_sw_state(struct bnxt_napi *bnapi) +{ + struct bnxt_tx_ring_info *txr; + struct bnxt *bp = bnapi->bp; + int i = bnapi->index, j; + + bnxt_for_each_napi_tx(j, bnapi, txr) + bnxt_log_live(bp, BNXT_LOGGER_L2, "[%d.%d]: tx{fw_ring: %d prod: %x cons: %x}\n", + i, j, txr->tx_ring_struct.fw_ring_id, txr->tx_prod, + txr->tx_cons); +} + +static void bnxt_log_rx_sw_state(struct bnxt_napi *bnapi) +{ + struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; + struct bnxt *bp = bnapi->bp; + int i = bnapi->index; + + if (!rxr) + return; + + bnxt_log_live(bp, BNXT_LOGGER_L2, "[%d]: rx{fw_ring: %d prod: %x} rx_agg{fw_ring: %d agg_prod: %x sw_agg_prod: %x}\n", + i, rxr->rx_ring_struct.fw_ring_id, rxr->rx_prod, + rxr->rx_agg_ring_struct.fw_ring_id, rxr->rx_agg_prod, + rxr->rx_sw_agg_prod); +} + +static void bnxt_log_cp_sw_state(struct bnxt_napi *bnapi) +{ + struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring, *cpr2; + struct bnxt *bp = bnapi->bp; + int i = bnapi->index, j; + + bnxt_log_live(bp, BNXT_LOGGER_L2, "[%d]: cp{fw_ring: %d raw_cons: %x}\n", + i, cpr->cp_ring_struct.fw_ring_id, cpr->cp_raw_cons); + for (j = 0; j < cpr->cp_ring_count; j++) { + cpr2 = &cpr->cp_ring_arr[j]; + if (!cpr2->bnapi) + continue; + bnxt_log_live(bp, BNXT_LOGGER_L2, "[%d.%d]: cp{fw_ring: %d raw_cons: %x}\n", + i, j, cpr2->cp_ring_struct.fw_ring_id, cpr2->cp_raw_cons); + } +} + +void bnxt_log_ring_states(struct bnxt *bp) +{ + struct bnxt_napi *bnapi; + int i; + + bnxt_log_drv_version(bp); + + if (!netif_running(bp->dev)) + return; + + for (i = 0; i < bp->cp_nr_rings; i++) { + bnapi = bp->bnapi[i]; + bnxt_log_tx_sw_state(bnapi); + bnxt_log_rx_sw_state(bnapi); + bnxt_log_cp_sw_state(bnapi); + } +} diff --git a/drivers/thirdparty/release-drivers/bnxt/bnxt_log_data.h b/drivers/thirdparty/release-drivers/bnxt/bnxt_log_data.h new file mode 100644 index 000000000000..366bd54ad95e --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/bnxt_log_data.h @@ -0,0 +1,17 @@ +/* Broadcom NetXtreme-C/E network driver. + * + * Copyright (c) 2023 Broadcom Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ + +#ifndef BNXT_LOG_DATA_H +#define BNXT_LOG_DATA_H + +#define BNXT_L2_MAX_LOG_BUFFERS 1024 +#define BNXT_L2_MAX_LIVE_LOG_SIZE (4 << 20) + +void bnxt_log_ring_states(struct bnxt *bp); +#endif diff --git a/drivers/thirdparty/release-drivers/bnxt/bnxt_mpc.c b/drivers/thirdparty/release-drivers/bnxt/bnxt_mpc.c new file mode 100644 index 000000000000..459d862a6e8c --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/bnxt_mpc.c @@ -0,0 +1,543 @@ +/* Broadcom NetXtreme-C/E network driver. + * + * Copyright (c) 2022-2023 Broadcom Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ + +#include +#include +#include +#include + +#include "bnxt_compat.h" +#include "bnxt_hsi.h" +#include "bnxt.h" +#include "bnxt_mpc.h" +#include "bnxt_ktls.h" +#include "bnxt_tfc.h" + +void bnxt_alloc_mpc_info(struct bnxt *bp, u8 mpc_chnls_cap) +{ + if (mpc_chnls_cap) { + if (!bp->mpc_info) + bp->mpc_info = kzalloc(sizeof(*bp->mpc_info), + GFP_KERNEL); + } else { + bnxt_free_mpc_info(bp); + } + if (bp->mpc_info) + bp->mpc_info->mpc_chnls_cap = mpc_chnls_cap; +} + +void bnxt_free_mpc_info(struct bnxt *bp) +{ + kfree(bp->mpc_info); + bp->mpc_info = NULL; +} + +int bnxt_mpc_tx_rings_in_use(struct bnxt *bp) +{ + struct bnxt_mpc_info *mpc = bp->mpc_info; + int i, mpc_tx = 0; + + if (!mpc) + return 0; + for (i = 0; i < BNXT_MPC_TYPE_MAX; i++) + mpc_tx += mpc->mpc_ring_count[i]; + return mpc_tx; +} + +int bnxt_mpc_cp_rings_in_use(struct bnxt *bp) +{ + struct bnxt_mpc_info *mpc = bp->mpc_info; + + if (!mpc) + return 0; + return mpc->mpc_cp_rings; +} + +bool bnxt_napi_has_mpc(struct bnxt *bp, int i) +{ + struct bnxt_mpc_info *mpc = bp->mpc_info; + struct bnxt_napi *bnapi = bp->bnapi[i]; + struct bnxt_tx_ring_info *txr; + + if (!mpc) + return false; + + txr = bnapi->tx_ring[0]; + if (txr && !(bnapi->flags & BNXT_NAPI_FLAG_XDP)) + return txr->txq_index < mpc->mpc_cp_rings; + return false; +} + +void bnxt_set_mpc_cp_ring(struct bnxt *bp, int bnapi_idx, + struct bnxt_cp_ring_info *cpr) +{ + struct bnxt_mpc_info *mpc = bp->mpc_info; + struct bnxt_napi *bnapi; + int i, j; + + bnapi = bp->bnapi[bnapi_idx]; + for (i = 0; i < BNXT_MPC_TYPE_MAX; i++) { + int num = mpc->mpc_ring_count[i]; + + for (j = 0; j < num; j++) { + struct bnxt_tx_ring_info *txr = &mpc->mpc_rings[i][j]; + + if (txr->bnapi == bnapi) { + txr->tx_cpr = cpr; + txr->tx_napi_idx = i; + bnapi->tx_mpc_ring[i] = txr; + break; + } + } + } + cpr->cp_ring_type = BNXT_NQ_HDL_TYPE_MP; +} + +void bnxt_trim_mpc_rings(struct bnxt *bp) +{ + struct bnxt_mpc_info *mpc = bp->mpc_info; + int max = bp->tx_nr_rings_per_tc; + u8 max_cp = 0; + int i; + + if (!mpc) + return; + + for (i = 0; i < BNXT_MPC_TYPE_MAX; i++) { + mpc->mpc_ring_count[i] = min_t(u8, mpc->mpc_ring_count[i], max); + max_cp = max(max_cp, mpc->mpc_ring_count[i]); + } + mpc->mpc_cp_rings = max_cp; +} + +enum bnxt_mpc_type { + BNXT_MPC_CRYPTO, + BNXT_MPC_CFA, +}; + +static void __bnxt_set_dflt_mpc_rings(struct bnxt *bp, enum bnxt_mpc_type type, + int *avail, int avail_cp) +{ + struct bnxt_mpc_info *mpc = bp->mpc_info; + int dflt1, dflt2; + int idx1, idx2; + int min1, min2; + int val1, val2; + + if (type == BNXT_MPC_CRYPTO) { + min1 = BNXT_MIN_MPC_TCE; + min2 = BNXT_MIN_MPC_RCE; + dflt1 = BNXT_DFLT_MPC_TCE; + dflt2 = BNXT_DFLT_MPC_RCE; + idx1 = BNXT_MPC_TCE_TYPE; + idx2 = BNXT_MPC_RCE_TYPE; + } else { + min1 = BNXT_MIN_MPC_TE_CFA; + min2 = BNXT_MIN_MPC_RE_CFA; + dflt1 = BNXT_DFLT_MPC_TE_CFA; + dflt2 = BNXT_DFLT_MPC_RE_CFA; + idx1 = BNXT_MPC_TE_CFA_TYPE; + idx2 = BNXT_MPC_RE_CFA_TYPE; + } + if (*avail < (min1 + min2)) + return; + + val1 = min_t(int, *avail / 2, bp->tx_nr_rings_per_tc); + val2 = val1; + + val1 = min_t(int, val1, dflt1); + val2 = min_t(int, val2, dflt2); + + if (avail_cp < min1 || avail_cp < min2) + return; + + val1 = min(val1, avail_cp); + val2 = min(val2, avail_cp); + + mpc->mpc_ring_count[idx1] = val1; + mpc->mpc_ring_count[idx2] = val2; + + *avail = *avail - val1 - val2; +} + +void bnxt_set_dflt_mpc_rings(struct bnxt *bp) +{ + struct bnxt_hw_resc *hw_resc = &bp->hw_resc; + struct bnxt_mpc_info *mpc = bp->mpc_info; + int avail, mpc_cp, i; + int avail_cp; + + if (!mpc) + return; + + for (i = 0; i < BNXT_MPC_TYPE_MAX; i++) + mpc->mpc_ring_count[i] = 0; + mpc->mpc_cp_rings = 0; + + avail = hw_resc->max_tx_rings - bp->tx_nr_rings; + + avail_cp = hw_resc->max_cp_rings - bp->tx_nr_rings - + bp->rx_nr_rings; + + if (BNXT_MPC_CRYPTO_CAPABLE(bp)) + __bnxt_set_dflt_mpc_rings(bp, BNXT_MPC_CRYPTO, &avail, avail_cp); + + if (BNXT_MPC_CFA_CAPABLE(bp)) + __bnxt_set_dflt_mpc_rings(bp, BNXT_MPC_CFA, &avail, avail_cp); + + for (i = 0, mpc_cp = 0; i < BNXT_MPC_TYPE_MAX; i++) { + if (mpc_cp < mpc->mpc_ring_count[i]) + mpc_cp = mpc->mpc_ring_count[i]; + } + mpc->mpc_cp_rings = mpc_cp; +} + +void bnxt_init_mpc_ring_struct(struct bnxt *bp) +{ + struct bnxt_mpc_info *mpc = bp->mpc_info; + int i, j; + + if (!BNXT_MPC_CRYPTO_CAPABLE(bp) && !BNXT_MPC_CFA_CAPABLE(bp)) + return; + + for (i = 0; i < BNXT_MPC_TYPE_MAX; i++) { + int num = mpc->mpc_ring_count[i]; + struct bnxt_tx_ring_info *txr; + + txr = mpc->mpc_rings[i]; + if (!txr) + continue; + for (j = 0; j < num; j++) { + struct bnxt_ring_mem_info *rmem; + struct bnxt_ring_struct *ring; + + txr = &mpc->mpc_rings[i][j]; + + txr->tx_ring_struct.ring_mem.flags = + BNXT_RMEM_RING_PTE_FLAG; + txr->bnapi = bp->tx_ring[bp->tx_ring_map[j]].bnapi; + + ring = &txr->tx_ring_struct; + rmem = &ring->ring_mem; + rmem->nr_pages = bp->tx_nr_pages; + rmem->page_size = HW_TXBD_RING_SIZE; + rmem->pg_arr = (void **)txr->tx_desc_ring; + rmem->dma_arr = txr->tx_desc_mapping; + rmem->vmem_size = SW_MPC_TXBD_RING_SIZE * + bp->tx_nr_pages; + rmem->vmem = (void **)&txr->tx_buf_ring; + } + } +} + +int bnxt_alloc_mpcs(struct bnxt *bp) +{ + struct bnxt_mpc_info *mpc = bp->mpc_info; + int i; + + if (!BNXT_MPC_CRYPTO_CAPABLE(bp) && !BNXT_MPC_CFA_CAPABLE(bp)) + return 0; + + for (i = 0; i < BNXT_MPC_TYPE_MAX; i++) { + int num = mpc->mpc_ring_count[i]; + struct bnxt_tx_ring_info *txr; + + if (!num) + continue; + txr = kcalloc(num, sizeof(*txr), GFP_KERNEL); + if (!txr) + return -ENOMEM; + mpc->mpc_rings[i] = txr; + } + + for (i = 0; i < bp->cp_nr_rings; i++) { + struct bnxt_napi *bnapi = bp->bnapi[i]; + + if (!bnxt_napi_has_mpc(bp, i)) + continue; + bnapi->tx_mpc_ring = kcalloc(BNXT_MPC_TYPE_MAX, + sizeof(*bnapi->tx_mpc_ring), + GFP_KERNEL); + if (!bnapi->tx_mpc_ring) + return -ENOMEM; + } + return 0; +} + +void bnxt_free_mpcs(struct bnxt *bp) +{ + struct bnxt_mpc_info *mpc = bp->mpc_info; + int i; + + if (!mpc) + return; + + for (i = 0; i < BNXT_MPC_TYPE_MAX; i++) { + kfree(mpc->mpc_rings[i]); + mpc->mpc_rings[i] = NULL; + } + if (!bp->bnapi) + return; + for (i = 0; i < bp->cp_nr_rings; i++) { + struct bnxt_napi *bnapi = bp->bnapi[i]; + + kfree(bnapi->tx_mpc_ring); + bnapi->tx_mpc_ring = NULL; + } +} + +int bnxt_alloc_mpc_rings(struct bnxt *bp) +{ + struct bnxt_mpc_info *mpc = bp->mpc_info; + int i, j; + + if (!mpc) + return 0; + + for (i = 0; i < BNXT_MPC_TYPE_MAX; i++) { + int num = mpc->mpc_ring_count[i], rc; + + for (j = 0; j < num; j++) { + struct bnxt_tx_ring_info *txr = &mpc->mpc_rings[i][j]; + struct bnxt_ring_struct *ring; + + ring = &txr->tx_ring_struct; + rc = bnxt_alloc_ring(bp, &ring->ring_mem); + if (rc) + return rc; + ring->queue_id = BNXT_MPC_QUEUE_ID; + ring->mpc_chnl_type = i; + /* for stats context */ + ring->grp_idx = txr->bnapi->index; + spin_lock_init(&txr->tx_lock); + } + } + return 0; +} + +void bnxt_free_mpc_rings(struct bnxt *bp) +{ + struct bnxt_mpc_info *mpc = bp->mpc_info; + int i, j; + + if (!mpc) + return; + + for (i = 0; i < BNXT_MPC_TYPE_MAX; i++) { + int num = mpc->mpc_ring_count[i]; + + if (!mpc->mpc_rings[i]) + continue; + for (j = 0; j < num; j++) { + struct bnxt_tx_ring_info *txr = &mpc->mpc_rings[i][j]; + struct bnxt_ring_struct *ring = &txr->tx_ring_struct; + + bnxt_free_ring(bp, &ring->ring_mem); + } + } +} + +void bnxt_init_mpc_rings(struct bnxt *bp) +{ + struct bnxt_mpc_info *mpc = bp->mpc_info; + int i, j; + + if (!mpc) + return; + + mpc->mpc_tx_start_idx = bp->tx_nr_rings; + for (i = 0; i < BNXT_MPC_TYPE_MAX; i++) { + int num = mpc->mpc_ring_count[i]; + + for (j = 0; j < num; j++) { + struct bnxt_tx_ring_info *txr = &mpc->mpc_rings[i][j]; + struct bnxt_ring_struct *ring = &txr->tx_ring_struct; + + txr->tx_prod = 0; + txr->tx_cons = 0; + txr->tx_hw_cons = 0; + ring->fw_ring_id = INVALID_HW_RING_ID; + } + } +} + +int bnxt_hwrm_mpc_ring_alloc(struct bnxt *bp) +{ + struct bnxt_mpc_info *mpc = bp->mpc_info; + int i, j, rc; + u32 tx_idx; + + if (!mpc) + return 0; + + tx_idx = mpc->mpc_tx_start_idx; + for (i = 0; i < BNXT_MPC_TYPE_MAX; i++) { + int num = mpc->mpc_ring_count[i]; + + for (j = 0; j < num; j++) { + struct bnxt_tx_ring_info *txr = &mpc->mpc_rings[i][j]; + struct bnxt_cp_ring_info *cpr = txr->tx_cpr; + struct bnxt_ring_struct *ring; + + ring = &cpr->cp_ring_struct; + if (ring->fw_ring_id == INVALID_HW_RING_ID) { + rc = bnxt_hwrm_cp_ring_alloc_p5(bp, cpr); + if (rc) + return rc; + } + rc = bnxt_hwrm_tx_ring_alloc(bp, txr, tx_idx++); + if (rc) + return rc; + } + } + return 0; +} + +void bnxt_hwrm_mpc_ring_free(struct bnxt *bp, bool close_path) +{ + struct bnxt_mpc_info *mpc = bp->mpc_info; + int i, j; + + if (!mpc) + return; + + for (i = 0; i < BNXT_MPC_TYPE_MAX; i++) { + int num = mpc->mpc_ring_count[i]; + + if (!mpc->mpc_rings[i]) + continue; + for (j = 0; j < num; j++) + bnxt_hwrm_tx_ring_free(bp, &mpc->mpc_rings[i][j], + close_path); + } +} + +int bnxt_start_xmit_mpc(struct bnxt *bp, struct bnxt_tx_ring_info *txr, + void *data, uint len, unsigned long handle) +{ + u32 bds, total_bds, bd_space, free_size; + struct bnxt_sw_mpc_tx_bd *tx_buf; + struct tx_bd *txbd; + u16 prod; + + bds = DIV_ROUND_UP(len, sizeof(*txbd)); + total_bds = bds + 1; + free_size = bnxt_tx_avail(bp, txr); + if (free_size < total_bds) + return -EBUSY; + + prod = txr->tx_prod; + txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)]; + tx_buf = &txr->tx_mpc_buf_ring[RING_TX(bp, prod)]; + tx_buf->handle = handle; + tx_buf->inline_bds = total_bds; + + txbd->tx_bd_len_flags_type = + cpu_to_le32((len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_MPC_TX_BD | + (total_bds << TX_BD_FLAGS_BD_CNT_SHIFT)); + txbd->tx_bd_opaque = SET_TX_OPAQUE(bp, txr, prod, total_bds); + + prod = NEXT_TX(prod); + txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)]; + bd_space = TX_DESC_CNT - TX_IDX(prod); + if (bd_space < bds) { + uint len0 = bd_space * sizeof(*txbd); + + memcpy(txbd, data, len0); + prod += bd_space; + txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)]; + bds -= bd_space; + len -= len0; + data += len0; + } + memcpy(txbd, data, len); + prod += bds; + txr->tx_prod = prod; + + /* Sync BD data before updating doorbell */ + wmb(); + bnxt_db_write(bp, &txr->tx_db, prod); + + return 0; +} + +static bool bnxt_mpc_unsolicit(struct mpc_cmp *mpcmp) +{ + u32 client = MPC_CMP_CLIENT_TYPE(mpcmp); + + if (client != MPC_CMP_CLIENT_TCE && client != MPC_CMP_CLIENT_RCE && + client != MPC_CMP_CLIENT_TE_CFA && client != MPC_CMP_CLIENT_RE_CFA) + return false; + return MPC_CMP_UNSOLICIT_SUBTYPE(mpcmp); +} + +int bnxt_mpc_cmp(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, u32 *raw_cons) +{ + struct bnxt_cmpl_entry cmpl_entry_arr[2]; + struct bnxt_napi *bnapi = cpr->bnapi; + u16 cons = RING_CMP(*raw_cons); + struct mpc_cmp *mpcmp, *mpcmp1; + u32 tmp_raw_cons = *raw_cons; + unsigned long handle = 0; + u32 client, cmpl_num; + u8 type; + + mpcmp = (struct mpc_cmp *) + &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]; + type = MPC_CMP_CMP_TYPE(mpcmp); + cmpl_entry_arr[0].cmpl = mpcmp; + cmpl_entry_arr[0].len = sizeof(*mpcmp); + cmpl_num = 1; + if (type == MPC_CMP_TYPE_MID_PATH_LONG) { + tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons); + cons = RING_CMP(tmp_raw_cons); + mpcmp1 = (struct mpc_cmp *) + &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]; + + if (!MPC_CMP_VALID(bp, mpcmp1, tmp_raw_cons)) + return -EBUSY; + /* The valid test of the entry must be done first before + * reading any further. + */ + dma_rmb(); + if (mpcmp1 == mpcmp + 1) { + cmpl_entry_arr[cmpl_num - 1].len += sizeof(*mpcmp1); + } else { + cmpl_entry_arr[cmpl_num].cmpl = mpcmp1; + cmpl_entry_arr[cmpl_num].len = sizeof(*mpcmp1); + cmpl_num++; + } + } + client = MPC_CMP_CLIENT_TYPE(mpcmp) >> MPC_CMP_CLIENT_SFT; + + if (!bnxt_mpc_unsolicit(mpcmp)) { + struct bnxt_sw_mpc_tx_bd *mpc_buf; + struct bnxt_tx_ring_info *txr; + u16 tx_cons; + u32 opaque; + + opaque = mpcmp->mpc_cmp_opaque; + txr = bnapi->tx_mpc_ring[client]; + tx_cons = txr->tx_cons; + if (TX_OPAQUE_RING(opaque) != txr->tx_napi_idx) + netdev_warn(bp->dev, "Wrong opaque %x, expected ring %x, idx %x\n", + opaque, txr->tx_napi_idx, txr->tx_cons); + mpc_buf = &txr->tx_mpc_buf_ring[RING_TX(bp, tx_cons)]; + handle = mpc_buf->handle; + tx_cons += mpc_buf->inline_bds; + txr->tx_cons = tx_cons; + txr->tx_hw_cons = RING_TX(bp, tx_cons); + } + if (client == BNXT_MPC_TCE_TYPE || client == BNXT_MPC_RCE_TYPE) + bnxt_ktls_mpc_cmp(bp, client, handle, cmpl_entry_arr, cmpl_num); + else if (client == BNXT_MPC_TE_CFA_TYPE || client == BNXT_MPC_RE_CFA_TYPE) + bnxt_tfc_mpc_cmp(bp, client, handle, cmpl_entry_arr, cmpl_num); + *raw_cons = tmp_raw_cons; + return 0; +} diff --git a/drivers/thirdparty/release-drivers/bnxt/bnxt_mpc.h b/drivers/thirdparty/release-drivers/bnxt/bnxt_mpc.h new file mode 100644 index 000000000000..e32d1d7b0de6 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/bnxt_mpc.h @@ -0,0 +1,143 @@ +/* Broadcom NetXtreme-C/E network driver. + * + * Copyright (c) 2022-2023 Broadcom Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ + +#ifndef BNXT_MPC_H +#define BNXT_MPC_H + +#define BNXT_MPC_TCE_TYPE RING_ALLOC_REQ_MPC_CHNLS_TYPE_TCE +#define BNXT_MPC_RCE_TYPE RING_ALLOC_REQ_MPC_CHNLS_TYPE_RCE +#define BNXT_MPC_TE_CFA_TYPE RING_ALLOC_REQ_MPC_CHNLS_TYPE_TE_CFA +#define BNXT_MPC_RE_CFA_TYPE RING_ALLOC_REQ_MPC_CHNLS_TYPE_RE_CFA +#define BNXT_MPC_TYPE_MAX (BNXT_MPC_RE_CFA_TYPE + 1) + +#define BNXT_MAX_MPC 8 + +#define BNXT_MIN_MPC_TCE 1 +#define BNXT_MIN_MPC_RCE 1 +#define BNXT_DFLT_MPC_TCE BNXT_MAX_MPC +#define BNXT_DFLT_MPC_RCE BNXT_MAX_MPC + +#define BNXT_MIN_MPC_TE_CFA 1 +#define BNXT_MIN_MPC_RE_CFA 1 +#define BNXT_DFLT_MPC_TE_CFA BNXT_MAX_MPC +#define BNXT_DFLT_MPC_RE_CFA BNXT_MAX_MPC + +/* Defines the number of msgs there are in an MPC msg completion event. + * Used to pass an opaque value into the MPC msg xmit function. The + * completion processing uses this value to ring the doorbell correctly to + * signal "completion event processing complete" to the hardware. + */ + +#define BNXT_MPC_COMP_MSG_COUNT 1 + +#define BNXT_MPC_TMO_MSECS 1000 + +struct bnxt_mpc_info { + u8 mpc_chnls_cap; + u8 mpc_cp_rings; + u8 mpc_ring_count[BNXT_MPC_TYPE_MAX]; + u16 mpc_tx_start_idx; + struct bnxt_tx_ring_info *mpc_rings[BNXT_MPC_TYPE_MAX]; +}; + +enum bnxt_mpc_chnl { + BNXT_MPC_CHNL_TCE = 0, + BNXT_MPC_CHNL_RCE = 1, + BNXT_MPC_CHNL_TE_CFA = 2, + BNXT_MPC_CHNL_RE_CFA = 3, + BNXT_MPC_CHNL_PRIMATE = 4, + BNXT_MPC_CHNL_MAX = 5, +}; + +struct bnxt_sw_mpc_tx_bd { + u8 inline_bds; + unsigned long handle; +}; + +#define SW_MPC_TXBD_RING_SIZE (sizeof(struct bnxt_sw_mpc_tx_bd) * TX_DESC_CNT) + +struct bnxt_cmpl_entry { + void *cmpl; + u32 len; +}; + +struct mpc_cmp { + __le32 mpc_cmp_client_subtype_type; + #define MPC_CMP_TYPE (0x3f << 0) + #define MPC_CMP_TYPE_MID_PATH_SHORT 0x1e + #define MPC_CMP_TYPE_MID_PATH_LONG 0x1f + #define MPC_CMP_SUBTYPE 0xf00 + #define MPC_CMP_SUBTYPE_SFT 8 + #define MPC_CMP_SUBTYPE_SOLICITED (0x0 << 8) + #define MPC_CMP_SUBTYPE_ERR (0x1 << 8) + #define MPC_CMP_SUBTYPE_RESYNC (0x2 << 8) + #define MPC_CMP_CLIENT (0xf << 12) + #define MPC_CMP_CLIENT_SFT 12 + #define MPC_CMP_CLIENT_TCE (0x0 << 12) + #define MPC_CMP_CLIENT_RCE (0x1 << 12) + #define MPC_CMP_CLIENT_TE_CFA (0x2 << 12) + #define MPC_CMP_CLIENT_RE_CFA (0x3 << 12) + u32 mpc_cmp_opaque; + __le32 mpc_cmp_v; + #define MPC_CMP_V (1 << 0) + __le32 mpc_cmp_filler; +}; + +#define MPC_CMP_CMP_TYPE(mpcmp) \ + (le32_to_cpu((mpcmp)->mpc_cmp_client_subtype_type) & MPC_CMP_TYPE) + +#define MPC_CMP_CLIENT_TYPE(mpcmp) \ + (le32_to_cpu((mpcmp)->mpc_cmp_client_subtype_type) & MPC_CMP_CLIENT) + +#define MPC_CMP_UNSOLICIT_SUBTYPE(mpcmp) \ + ((le32_to_cpu((mpcmp)->mpc_cmp_client_subtype_type) & \ + MPC_CMP_SUBTYPE) == MPC_CMP_SUBTYPE_ERR) + +#define MPC_CMP_VALID(bp, mpcmp, raw_cons) \ + (!!((mpcmp)->mpc_cmp_v & cpu_to_le32(MPC_CMP_V)) == \ + !((raw_cons) & (bp)->cp_bit)) + +#define BNXT_MPC_CRYPTO_CAP \ + (FUNC_QCAPS_RESP_MPC_CHNLS_CAP_TCE | FUNC_QCAPS_RESP_MPC_CHNLS_CAP_RCE) + +#define BNXT_MPC_CRYPTO_CAPABLE(bp) \ + ((bp)->mpc_info ? \ + ((bp)->mpc_info->mpc_chnls_cap & BNXT_MPC_CRYPTO_CAP) == \ + BNXT_MPC_CRYPTO_CAP : false) + +#define BNXT_MPC_CFA_CAP \ + (FUNC_QCAPS_RESP_MPC_CHNLS_CAP_TE_CFA | FUNC_QCAPS_RESP_MPC_CHNLS_CAP_RE_CFA) + +#define BNXT_MPC_CFA_CAPABLE(bp) \ + ((bp)->mpc_info ? \ + ((bp)->mpc_info->mpc_chnls_cap & BNXT_MPC_CFA_CAP) == \ + BNXT_MPC_CFA_CAP : false) + +void bnxt_alloc_mpc_info(struct bnxt *bp, u8 mpc_chnls_cap); +void bnxt_free_mpc_info(struct bnxt *bp); +int bnxt_mpc_tx_rings_in_use(struct bnxt *bp); +int bnxt_mpc_cp_rings_in_use(struct bnxt *bp); +bool bnxt_napi_has_mpc(struct bnxt *bp, int i); +void bnxt_set_mpc_cp_ring(struct bnxt *bp, int bnapi_idx, + struct bnxt_cp_ring_info *cpr); +void bnxt_trim_mpc_rings(struct bnxt *bp); +void bnxt_set_dflt_mpc_rings(struct bnxt *bp); +void bnxt_init_mpc_ring_struct(struct bnxt *bp); +int bnxt_alloc_mpcs(struct bnxt *bp); +void bnxt_free_mpcs(struct bnxt *bp); +int bnxt_alloc_mpc_rings(struct bnxt *bp); +void bnxt_free_mpc_rings(struct bnxt *bp); +void bnxt_init_mpc_rings(struct bnxt *bp); +int bnxt_hwrm_mpc_ring_alloc(struct bnxt *bp); +void bnxt_hwrm_mpc_ring_free(struct bnxt *bp, bool close_path); +int bnxt_start_xmit_mpc(struct bnxt *bp, struct bnxt_tx_ring_info *txr, + void *data, uint len, unsigned long handle); +int bnxt_mpc_cmp(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, u32 *raw_cons); + +#endif diff --git a/drivers/thirdparty/release-drivers/bnxt/bnxt_netmap_linux.h b/drivers/thirdparty/release-drivers/bnxt/bnxt_netmap_linux.h new file mode 100644 index 000000000000..d160f00cc82b --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/bnxt_netmap_linux.h @@ -0,0 +1,985 @@ +/* + * netmap support for Broadcom bnxt Ethernet driver on Linux + * + * Copyright (C) 2015-2018 British Broadcasting Corporation. All rights reserved. + * + * Author: Stuart Grace, BBC Research & Development + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * Some portions are: + * + * Copyright (C) 2012-2014 Matteo Landi, Luigi Rizzo. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * Some portions are: + * + * Copyright (c) 2018-2023 Broadcom Inc. + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef __BNXT_NETMAP_LINUX_H__ +#define __BNXT_NETMAP_LINUX_H__ + +#include +#include +#include + +#ifdef NETMAP_BNXT_MAIN + +#define NM_BNXT_ADAPTER bnxt + +/* No: of shadow AGG rings; for now stick to 1 ==> same size as normal ring */ +#define AGG_NM_RINGS 1 + +/* + * Register/unregister. We are already under netmap lock. + * Only called on the first register or the last unregister. + */ +int bnxt_netmap_reg(struct netmap_adapter *na, int onoff) +{ + struct ifnet *ifp = na->ifp; + struct NM_BNXT_ADAPTER *bp = netdev_priv(ifp); + int err = 0; + + nm_prinf("bnxt switching %s native netmap mode", onoff ? "into" : "out of"); + + if (netif_running(ifp)) + bnxt_close_nic(bp, true, false); + /* enable or disable flags and callbacks in na and ifp */ + if (onoff) { + nm_set_native_flags(na); + if (!(bp->flags & BNXT_FLAG_JUMBO)) { + bp->flags &= ~BNXT_FLAG_AGG_RINGS; + bp->flags |= BNXT_FLAG_NO_AGG_RINGS; + if (bp->flags & BNXT_FLAG_LRO) { + bp->dev->hw_features &= ~NETIF_F_LRO; + bp->dev->features &= ~NETIF_F_LRO; + netdev_update_features(bp->dev); + } + } + bp->flags |= BNXT_FLAG_DIM; + } else { + bp->flags |= BNXT_FLAG_AGG_RINGS; + bp->flags &= ~BNXT_FLAG_NO_AGG_RINGS; + if (bp->flags & BNXT_FLAG_LRO) { + bp->dev->hw_features |= NETIF_F_LRO; + bp->dev->features |= NETIF_F_LRO; + netdev_update_features(bp->dev); + } + bp->flags &= ~(BNXT_FLAG_DIM); + nm_clear_native_flags(na); + } + + if (netif_running(ifp)) + return bnxt_open_nic(bp, true, false); + return err; +} + +void bnxt_netmap_txflush(struct bnxt_tx_ring_info *txr) +{ + struct bnxt *bp = txr->bnapi->bp; + struct bnxt_cp_ring_info *cpr2; + struct bnxt_db_info *db; + u32 raw_cons, tgl = 0; + struct tx_cmp *txcmp; + u16 cons; + + cpr2 = txr->tx_cpr; + raw_cons = cpr2->cp_raw_cons; + + while (1) { + u8 cmp_type; + + cons = RING_CMP(raw_cons); + txcmp = &cpr2->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]; + + if (!TX_CMP_VALID(txcmp, raw_cons)) + break; + + /* The valid test of the entry must be done first before + * reading any further. + */ + dma_rmb(); + cmp_type = TX_CMP_TYPE(txcmp); + if (cmp_type == CMP_TYPE_TX_L2_CMP || + cmp_type == CMP_TYPE_TX_L2_COAL_CMP) { + u32 opaque = txcmp->tx_cmp_opaque; + + if (cmp_type == CMP_TYPE_TX_L2_COAL_CMP) + txr->tx_hw_cons = TX_CMP_SQ_CONS_IDX(txcmp); + else + txr->tx_hw_cons = TX_OPAQUE_IDX(opaque); + raw_cons = NEXT_RAW_CMP(raw_cons); + } + } + + if (raw_cons != cpr2->cp_raw_cons) { + tgl = cpr2->toggle; + db = &cpr2->cp_db; + cpr2->cp_raw_cons = raw_cons; + /* barrier - before arming the cq */ + wmb(); + bnxt_writeq(bp, db->db_key64 | DBR_TYPE_CQ_ARMALL | DB_TOGGLE(tgl) | + DB_RING_IDX(db, cpr2->cp_raw_cons), db->doorbell); + } +} + +/* + * Reconcile kernel and user view of the transmit ring. + * + * Userspace wants to send packets up to the one before ring->head, + * kernel knows kring->nr_hwcur is the first unsent packet. + * + * Here we push packets out (as many as possible), and possibly + * reclaim buffers from previously completed transmission. + * + * ring->tail is updated on return. + * ring->head is never used here. + * + * The caller (netmap) guarantees that there is only one instance + * running at any time. Any interference with other driver + * methods should be handled by the individual drivers. + */ +int bnxt_netmap_txsync(struct netmap_kring *kring, int flags) +{ + u_int const lim = kring->nkr_num_slots - 1; + struct netmap_ring *ring = kring->ring; + struct netmap_adapter *na = kring->na; + struct bnxt_cp_ring_info *cpr2; + u_int const head = kring->rhead; + struct ifnet *ifp = na->ifp; + u_int nm_i; /* index into the netmap ring */ + u_int n; + /* + * interrupts on every tx packet are expensive so request + * them every half ring, or where NS_REPORT is set + */ + u_int tosync; + /* device-specific */ + struct NM_BNXT_ADAPTER *bp = netdev_priv(ifp); + u16 prod = 0, cons, hw_cons, nr_frags = 0; + struct bnxt_tx_ring_info *txr; + struct bnxt_sw_tx_bd *tx_buf; + struct tx_bd *txbd, *txbd0; + u32 raw_cons, tgl = 0; + struct tx_cmp *txcmp; + struct bnxt_db_info *db; + u16 prod0; + + if (!netif_carrier_ok(ifp) || !netif_running(ifp)) + return 0; + + txr = &bp->tx_ring[bp->tx_ring_map[kring->ring_id]]; + if (unlikely(!txr)) { + nm_prlim(1, "ring %s is missing (txr=%p)", kring->name, txr); + return -ENXIO; + } + + /* + * First part: process new packets to send. + * nm_i is the current index in the netmap ring, + * + * If we have packets to send (kring->nr_hwcur != kring->rhead) + * iterate over the netmap ring, fetch length and update + * the corresponding slot in the NIC ring. Some drivers also + * need to update the buffer's physical address in the NIC slot + * even NS_BUF_CHANGED is not set (PNMB computes the addresses). + * + * The netmap_reload_map() calls is especially expensive, + * even when (as in this case) the tag is 0, so do only + * when the buffer has actually changed. + * + * If possible do not set the report/intr bit on all slots, + * but only a few times per ring or when NS_REPORT is set. + * + * Finally, on 10G and faster drivers, it might be useful + * to prefetch the next slot and txr entry. + */ + + nm_i = kring->nr_hwcur; + if (nm_i != head) { /* we have new packets to send */ + nm_prdis("new pkts to send nm_i: %d head: %d\n", nm_i, head); + __builtin_prefetch(&ring->slot[nm_i]); + + for (n = 0; nm_i != head; n++) { + struct netmap_slot *slot = &ring->slot[nm_i]; + u_int len = slot->len, bd0_len; + uint64_t paddr; + uint64_t offset = nm_get_offset(kring, slot); + + /* device-specific */ + if (bnxt_tx_avail(bp, txr) < 1) { + nm_prinf("NO TX AVAIL!\n"); + break; + } + prod = txr->tx_prod; /* producer index */ + txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)]; + + tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)]; + /* prefetch for next round */ + __builtin_prefetch(&ring->slot[nm_i + 1]); + __builtin_prefetch(&txr->tx_desc_ring[TX_RING(bp, prod + 1)][TX_IDX(prod + 1)]); + + PNMB(na, slot, &paddr); + NM_CHECK_ADDR_LEN_OFF(na, len, offset); + + /* Fill the slot in the NIC ring. */ + txbd->tx_bd_haddr = cpu_to_le64(paddr + offset); + netmap_sync_map_dev(na, (bus_dma_tag_t)na->pdev, &paddr, len, NR_TX); + + flags = (len << TX_BD_LEN_SHIFT) | + ((nr_frags + 1) << TX_BD_FLAGS_BD_CNT_SHIFT) | + bnxt_lhint_arr[len >> 9]; + txbd->tx_bd_len_flags_type = cpu_to_le32(flags); + txbd0 = txbd; + prod0 = prod; + bd0_len = len; + if (slot->flags & NS_MOREFRAG) { + nr_frags++; + for (;;) { + nm_i = nm_next(nm_i, lim); + /* remember that we have to ask for a + * report each time we move past half a + * ring + */ + if (nm_i == head) { + /* XXX should we accept incomplete packets? */ + return -EINVAL; + } + slot = &ring->slot[nm_i]; + len = slot->len; + PNMB(na, slot, &paddr); + offset = nm_get_offset(kring, slot); + NM_CHECK_ADDR_LEN_OFF(na, len, offset); + prod = NEXT_TX(prod); + txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)]; + txbd->tx_bd_haddr = cpu_to_le64(paddr + offset); + flags = len << TX_BD_LEN_SHIFT; + txbd->tx_bd_len_flags_type = cpu_to_le32(flags); + netmap_sync_map_dev(na, (bus_dma_tag_t)na->pdev, + &paddr, len, NR_TX); + if (!(slot->flags & NS_MOREFRAG)) + break; + nr_frags++; + } + tx_buf->nr_frags = nr_frags; + nr_frags = 0; + + flags = (bd0_len << TX_BD_LEN_SHIFT) | + ((tx_buf->nr_frags + 1) << TX_BD_FLAGS_BD_CNT_SHIFT) | + bnxt_lhint_arr[bd0_len >> 9]; + txbd0->tx_bd_len_flags_type = cpu_to_le32(flags); + } + slot->flags &= ~(NS_REPORT | NS_BUF_CHANGED | NS_MOREFRAG); + + flags &= ~TX_BD_LEN; + txbd->tx_bd_len_flags_type = cpu_to_le32(((len) << TX_BD_LEN_SHIFT) | + flags | TX_BD_FLAGS_PACKET_END); + prod = NEXT_TX(prod); + txbd0->tx_bd_opaque = SET_TX_OPAQUE(bp, txr, prod0, + tx_buf->nr_frags); + txr->tx_prod = prod; + nm_i = nm_next(nm_i, lim); + } + kring->nr_hwcur = head; + + /* synchronize the NIC ring */ + nm_prdis("calling bnxt_txr_db_kick with prod:%d cons: %d nr_hwtail: %d\n", + prod, txr->tx_cons, kring->nr_hwtail); + bnxt_txr_db_kick(bp, txr, prod); + } + /* + * Second part: reclaim buffers for completed transmissions. + */ + cpr2 = txr->tx_cpr; + raw_cons = cpr2->cp_raw_cons; + + while (1) { + u8 cmp_type; + + cons = RING_CMP(raw_cons); + txcmp = &cpr2->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]; + + if (!TX_CMP_VALID(txcmp, raw_cons)) + break; + + /* The valid test of the entry must be done first before + * reading any further. + */ + dma_rmb(); + cmp_type = TX_CMP_TYPE(txcmp); + if (cmp_type == CMP_TYPE_TX_L2_CMP || + cmp_type == CMP_TYPE_TX_L2_COAL_CMP) { + u32 opaque = txcmp->tx_cmp_opaque; + + if (cmp_type == CMP_TYPE_TX_L2_COAL_CMP) + txr->tx_hw_cons = TX_CMP_SQ_CONS_IDX(txcmp); + else + txr->tx_hw_cons = TX_OPAQUE_IDX(opaque); + raw_cons = NEXT_RAW_CMP(raw_cons); + } + } + + if (raw_cons != cpr2->cp_raw_cons) { + tgl = cpr2->toggle; + db = &cpr2->cp_db; + cpr2->cp_raw_cons = raw_cons; + /* barrier - before arming the cq */ + wmb(); + bnxt_writeq(bp, db->db_key64 | DBR_TYPE_CQ_ARMALL | DB_TOGGLE(tgl) | + DB_RING_IDX(db, cpr2->cp_raw_cons), + db->doorbell); + } + + tosync = nm_next(kring->nr_hwtail, lim); + hw_cons = txr->tx_hw_cons; + cons = txr->tx_cons; + n = 0; + + while (RING_TX(bp, cons) != hw_cons) { + /* some tx completed, increment avail */ + /* sync all buffers that we are returning to userspace */ + struct netmap_slot *slot = &ring->slot[tosync]; + struct bnxt_sw_tx_bd *tx_buf; + uint64_t paddr; + int j, last; + + (void)PNMB_O(kring, slot, &paddr); + tx_buf = &txr->tx_buf_ring[RING_TX(bp, cons)]; + netmap_sync_map_cpu(na, (bus_dma_tag_t)na->pdev, &paddr, slot->len, NR_TX); + tosync = nm_next(tosync, lim); + kring->nr_hwtail = nm_prev(tosync, lim); + + last = tx_buf->nr_frags; + + for (j = 0; j < last; j++) { + slot = &ring->slot[tosync]; + (void)PNMB_O(kring, slot, &paddr); + cons = NEXT_TX(cons); + netmap_sync_map_cpu(na, (bus_dma_tag_t)na->pdev, &paddr, slot->len, NR_TX); + tosync = nm_next(tosync, lim); + kring->nr_hwtail = nm_prev(tosync, lim); + } + + cons = NEXT_TX(cons); + + n++; + } + + if (n) { + nm_prdis("tx_completed [%d] kring->nr_hwtail: %d\n", n, kring->nr_hwtail); + txr->tx_cons = cons; + } + + return 0; +} + +int __bnxt_netmap_rxsync(struct netmap_kring *kring, int flags) +{ + u_int const lim = kring->nkr_num_slots - 1; + struct netmap_adapter *na = kring->na; + struct netmap_ring *ring = kring->ring; + u_int const head = kring->rhead; + u_int stop_i = nm_prev(head, lim); /* stop reclaiming here */ + u_int ring_nr = kring->ring_id; + struct ifnet *ifp = na->ifp; + uint16_t slot_flags = 0; + u_int nm_i = 0; /* index into the netmap ring */ + + /* device-specific */ + struct NM_BNXT_ADAPTER *bp = netdev_priv(ifp); + u32 cp_cons, tmp_raw_cons = 0, real_cons = 0; + struct bnxt_rx_ring_info *rxr; + struct bnxt_cp_ring_info *cpr; + u32 lflags, work_done = 0; + struct rx_cmp_ext *rxcmp1; + struct bnxt_db_info *db; + struct rx_cmp *rxcmp; + u32 tgl = 0, len; + uint64_t paddr; + + rxr = &bp->rx_ring[kring->ring_id]; + cpr = rxr->rx_cpr; + + /* + * First part: reclaim buffers that userspace has released: + * (from kring->nr_hwcur to second last [*] slot before ring->head) + * and make the buffers available for reception. + * As usual nm_i is the index in the netmap ring. + * [*] IMPORTANT: we must leave one free slot in the ring + * to avoid ring empty/full confusion in userspace. + */ + + nm_i = kring->nr_hwcur; + stop_i = nm_prev(kring->rhead, lim); + + if (nm_i != stop_i) { + struct netmap_slot *slot; + u32 prod = rxr->rx_prod; + struct rx_bd *rxbd; + uint64_t offset; + void *addr; + + while (nm_i != stop_i) { + slot = &ring->slot[nm_i]; + offset = nm_get_offset(kring, slot); + addr = PNMB(na, slot, &paddr); /* find phys address */ + + if (unlikely(addr == NETMAP_BUF_BASE(na))) { /* bad buf */ + nm_prinf("Resetting RX ring %u\n", ring_nr); + goto ring_reset; + } + + if (slot->flags & NS_BUF_CHANGED) + slot->flags &= ~NS_BUF_CHANGED; + + rxbd = &rxr->rx_desc_ring[RX_RING(bp, prod)][RX_IDX(prod)]; + netmap_sync_map_dev(na, (bus_dma_tag_t)na->pdev, &paddr, + NETMAP_BUF_SIZE(na), NR_RX); + rxbd->rx_bd_haddr = cpu_to_le64(paddr + offset); + prod = NEXT_RX(prod); + nm_i = nm_next(nm_i, lim); + } + rxr->rx_prod = prod; + bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); + kring->nr_hwcur = nm_i; + } + /* + * Second part: import newly received packets. + * We are told about received packets by CQEs in the CQ. + * + * nm_i is the index of the next free slot in the netmap ring: + */ + rmb(); + real_cons = cpr->cp_raw_cons; + cp_cons = RING_CMP(real_cons); + nm_i = kring->nr_hwtail; + stop_i = nm_prev(kring->nr_hwcur, lim); + + while (nm_i != stop_i) { + rxcmp = (struct rx_cmp *)&cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; + tmp_raw_cons = NEXT_RAW_CMP(real_cons); + cp_cons = RING_CMP(tmp_raw_cons); + + rxcmp1 = (struct rx_cmp_ext *) + &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; + + if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons)) + break; + + dma_rmb(); + lflags = le32_to_cpu(rxcmp->rx_cmp_len_flags_type); + len = lflags >> RX_CMP_LEN_SHIFT; + ring->slot[nm_i].len = len; + ring->slot[nm_i].flags = slot_flags; + PNMB_O(kring, &ring->slot[nm_i], &paddr); + netmap_sync_map_cpu(na, (bus_dma_tag_t)na->pdev, + &paddr, len, NR_RX); + + nm_i = nm_next(nm_i, lim); + tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons); + cp_cons = RING_CMP(tmp_raw_cons); + real_cons = tmp_raw_cons; + work_done++; + } + + if (work_done) { + kring->nr_hwtail = nm_i; + cpr->cp_raw_cons = real_cons; + tgl = cpr->toggle; + db = &cpr->cp_db; + /* barrier - TBD revisit? */ + wmb(); + bnxt_writeq(bp, db->db_key64 | DBR_TYPE_CQ_ARMALL | DB_TOGGLE(tgl) | + DB_RING_IDX(db, cpr->cp_raw_cons), + db->doorbell); + kring->nr_kflags &= ~NKR_PENDINTR; + } + return 0; +ring_reset: + return netmap_ring_reinit(kring); +} + +#define SLOT_SWAP(s1, s2) do { \ + u32 tmp; \ + tmp = (s1)->buf_idx; \ + (s1)->buf_idx = (s2)->buf_idx; \ + (s2)->buf_idx = tmp; \ + (s1)->flags |= NS_BUF_CHANGED; \ + (s2)->flags |= NS_BUF_CHANGED; \ +} while (0) + +int bnxt_netmap_rxsync_jumbo(struct netmap_kring *kring, int flags) +{ + u_int const lim = kring->nkr_num_slots - 1; + struct netmap_adapter *na = kring->na; + struct netmap_ring *ring = kring->ring; + struct netmap_kring *base_kring; + struct netmap_ring *base_nmring; + struct netmap_kring *agg_kring; + struct netmap_ring *agg_nmring; + u_int const head = kring->rhead; + u_int stop_i = nm_prev(head, lim); /* stop reclaiming here */ + struct ifnet *ifp = na->ifp; + uint16_t slot_flags = 0; + uint32_t rx_ring_id = 0; + u_int nm_i = 0; /* index into the netmap ring */ + + /* device-specific */ + struct NM_BNXT_ADAPTER *bp = netdev_priv(ifp); + u32 cp_cons, tmp_raw_cons = 0, real_cons = 0; + struct bnxt_rx_ring_info *rxr; + struct bnxt_cp_ring_info *cpr; + u32 lflags, work_done = 0; + struct rx_cmp_ext *rxcmp1; + struct bnxt_db_info *db; + + /* jumbo specific */ + u32 tgl = 0, len, misc, total_frag_len = 0; + u16 rx_prod, rx_agg_prod, rx_sw_agg_prod; + struct rx_cmp *rxcmp; + struct rx_bd *rxbd; + uint64_t paddr; + u8 agg_bufs; + int i; + + /* 0,3,6,N... are the actual rings that will be used by app/userspace + * while [1,2, 4,5, N+1,N+2...] are the shadow rings that map to the base HW + * ring and AGG rings respectively + */ + if ((kring->ring_id % (2 + AGG_NM_RINGS)) != 0) + return 0; + + rx_ring_id = kring->ring_id / (2 + AGG_NM_RINGS); + rxr = &bp->rx_ring[rx_ring_id]; + cpr = rxr->rx_cpr; + + base_kring = na->rx_rings[kring->ring_id + 1]; + base_nmring = base_kring->ring; + + agg_kring = na->rx_rings[kring->ring_id + 2]; + agg_nmring = agg_kring->ring; + + if (unlikely(kring->nr_mode == NKR_NETMAP_OFF) || + base_kring->nr_mode == NKR_NETMAP_OFF || agg_kring->nr_mode == NKR_NETMAP_OFF) + return 0; + + /* + * First part: reclaim buffers that userspace has released: + * (from kring->nr_hwcur to second last [*] slot before ring->head) + * and make the buffers available for reception. + * For ring N+0 nothing to be done for the buffers that userspace has released. + * Those are not to be published to the hardware RX ring because the buffer refill + * has happened at slot swap time. So a simple kring->nr_hwcur = kring->rhead + * should be enough. Also, since tail, head and cur are frozen for rings N+1 and N+2, + * rxsync would be a NOP for those. + * In the end, all real work happens in the "import newly received packets" part of the + * rxsync for ring N+0. + */ + + kring->nr_hwcur = kring->rhead; + + /* + * Second part: import newly received packets. + * We are told about received packets by CQEs in the CQ. + * + * nm_i is the index of the next free slot in the netmap ring: + */ + rmb(); + real_cons = cpr->cp_raw_cons; + cp_cons = RING_CMP(real_cons); + nm_i = kring->nr_hwtail; + stop_i = nm_prev(kring->nr_hwcur, lim); + + while (nm_i != stop_i) { + rx_agg_prod = rxr->rx_agg_prod; + rx_sw_agg_prod = rxr->rx_sw_agg_prod; + + rx_prod = rxr->rx_prod; + + rxcmp = (struct rx_cmp *)&cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; + tmp_raw_cons = NEXT_RAW_CMP(real_cons); + cp_cons = RING_CMP(tmp_raw_cons); + + rxcmp1 = (struct rx_cmp_ext *) + &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; + + if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons)) + break; + + dma_rmb(); + + lflags = le32_to_cpu(rxcmp->rx_cmp_len_flags_type); + len = lflags >> RX_CMP_LEN_SHIFT; + misc = le32_to_cpu(rxcmp->rx_cmp_misc_v1); + agg_bufs = (misc & RX_CMP_AGG_BUFS) >> RX_CMP_AGG_BUFS_SHIFT; + + if (agg_bufs) { + int space = stop_i - nm_i; + + if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons)) + break; + + if (space < 0) + space += kring->nkr_num_slots; + if (space < agg_bufs) { + nm_prinf(" Not enough space!! space_rem: %d agg_bufs: %d\n", + space, agg_bufs); + break; + } + slot_flags |= NS_MOREFRAG; + } + + BUG_ON(rxcmp->rx_cmp_opaque > lim); + SLOT_SWAP(&ring->slot[nm_i], &base_nmring->slot[rxcmp->rx_cmp_opaque]); + /* Now that the SLOT SWAP is done, refill the base HW ring BD + * with the new address got from the application ring + */ + rxbd = &rxr->rx_desc_ring[RX_RING(bp, rx_prod)][RX_IDX(rx_prod)]; + PNMB_O(base_kring, &base_nmring->slot[rxcmp->rx_cmp_opaque], &paddr); + rxbd->rx_bd_haddr = cpu_to_le64(paddr); + rxbd->rx_bd_opaque = RING_RX(bp, rx_prod); + + ring->slot[nm_i].len = len; + ring->slot[nm_i].flags = slot_flags; + PNMB_O(kring, &ring->slot[nm_i], &paddr); + netmap_sync_map_cpu(na, (bus_dma_tag_t)na->pdev, + &paddr, len, NR_RX); + nm_prdis("BEG kring->nr_hwtail: %d slot[%d].len: %d flags: %d agg_bufs: %d rx_cmp_opaque: %d\n", + kring->nr_hwtail, nm_i, len, ring->slot[nm_i].flags, agg_bufs, rxcmp->rx_cmp_opaque); + nm_i = nm_next(nm_i, lim); + if (agg_bufs) { + cp_cons = NEXT_CMP(cp_cons); + for (i = 0; i < agg_bufs; i++) { + u16 cons, frag_len; + struct rx_agg_cmp *agg; + + agg = bnxt_get_agg(bp, cpr, cp_cons, i); + cons = agg->rx_agg_cmp_opaque; + frag_len = (le32_to_cpu(agg->rx_agg_cmp_len_flags_type) & + RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT; + agg_nmring = agg_kring->ring; + BUG_ON(cons > lim); + SLOT_SWAP(&ring->slot[nm_i], &agg_nmring->slot[cons]); + /* Now that the SLOT SWAP is done, refill the AGG HW ring BD + * with the new address got from the application ring + */ + rxbd = &rxr->rx_agg_desc_ring[RX_AGG_RING(bp, rx_agg_prod)][RX_IDX(rx_agg_prod)]; + PNMB_O(agg_kring, &agg_nmring->slot[cons], &paddr); + rxbd->rx_bd_haddr = cpu_to_le64(paddr); + rxbd->rx_bd_opaque = rx_sw_agg_prod; + + slot_flags = (i < (agg_bufs - 1)) ? NS_MOREFRAG : 0; + ring->slot[nm_i].len = frag_len; + ring->slot[nm_i].flags = slot_flags; + PNMB_O(kring, &ring->slot[nm_i], &paddr); + netmap_sync_map_cpu(na, (bus_dma_tag_t)na->pdev, + &paddr, len, NR_RX); + total_frag_len += frag_len; + nm_prdis("slot[%d].len: %d flags: %d agg_ring_cons: %d bd_opaque: %d rx_agg_prod: %d\n", + nm_i, ring->slot[nm_i].len, ring->slot[nm_i].flags, cons, rxbd->rx_bd_opaque, rx_agg_prod); + nm_i = nm_next(nm_i, lim); + rx_agg_prod = NEXT_RX_AGG(rx_agg_prod); + rx_sw_agg_prod = RING_RX_AGG(bp, NEXT_RX_AGG(rx_sw_agg_prod)); + } + rxr->rx_agg_prod = rx_agg_prod; + rxr->rx_sw_agg_prod = rx_sw_agg_prod; + } + tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons); + cp_cons = RING_CMP(tmp_raw_cons); + real_cons = tmp_raw_cons; + rxr->rx_prod = NEXT_RX(rx_prod); + work_done++; + } + + if (work_done) { + kring->nr_hwtail = nm_i; + cpr->cp_raw_cons = real_cons; + tgl = cpr->toggle; + db = &cpr->cp_db; + /* barrier - TBD revisit? */ + wmb(); + bnxt_writeq(bp, db->db_key64 | DBR_TYPE_CQ_ARMALL | DB_TOGGLE(tgl) | + DB_RING_IDX(db, cpr->cp_raw_cons), db->doorbell); + kring->nr_kflags &= ~NKR_PENDINTR; + bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); + bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod); + nm_prdis("END cp_raw_cons: %d kring->nr_hwtail : %d rx_prod: %d rx_agg_prod: %d\n", + cpr->cp_raw_cons, kring->nr_hwtail, rxr->rx_prod, rxr->rx_agg_prod); + } + return 0; +} + +/* + * Reconcile kernel and user view of the receive ring. + * Same as for the txsync, this routine must be efficient. + * The caller guarantees a single invocations, but races against + * the rest of the driver should be handled here. + * + * When called, userspace has released buffers up to ring->head + * (last one excluded). + * + * If (flags & NAF_FORCE_READ) also check for incoming packets irrespective + * of whether or not we received an interrupt. + */ +int bnxt_netmap_rxsync(struct netmap_kring *kring, int flags) +{ + u_int const lim = kring->nkr_num_slots - 1; + struct netmap_adapter *na = kring->na; + u_int const head = kring->rhead; + struct ifnet *ifp = na->ifp; + + /* device-specific */ + struct NM_BNXT_ADAPTER *bp = netdev_priv(ifp); + + if (!netif_carrier_ok(ifp) || !netif_running(ifp)) + return 0; + + if (unlikely(head > lim)) + return netmap_ring_reinit(kring); + + if (!(bp->flags & BNXT_FLAG_JUMBO)) + return __bnxt_netmap_rxsync(kring, flags); + + return bnxt_netmap_rxsync_jumbo(kring, flags); +} + +/* + * if in netmap mode, attach the netmap buffers to the ring and return true. + * Otherwise return false. + */ +int bnxt_netmap_configure_tx_ring(struct NM_BNXT_ADAPTER *adapter, + int ring_nr) +{ + struct netmap_adapter *na = NA(adapter->dev); + struct bnxt_tx_ring_info *txr; + struct netmap_slot *slot; + + slot = netmap_reset(na, NR_TX, ring_nr, 0); + if (!slot) + return 0; /* not in native netmap mode */ + + txr = &adapter->tx_ring[adapter->tx_ring_map[ring_nr]]; + txr->tx_cpr->netmapped = 1; + txr->bnapi->cp_ring.netmapped = 1; + /* + * On some cards we would set up the slot addresses now. + * But on bnxt, the address will be written to the WQ when + * each packet arrives in bnxt_netmap_txsync + */ + + return 1; +} + +int bnxt_netmap_configure_rx_ring(struct NM_BNXT_ADAPTER *adapter, struct bnxt_rx_ring_info *rxr) +{ + /* + * In netmap mode, we must preserve the buffers made + * available to userspace before the if_init() + * (this is true by default on the TX side, because + * init makes all buffers available to userspace). + */ + struct netmap_adapter *na = NA(adapter->dev); + struct netmap_slot *slot; + int count = 0, i; + int lim, ring_nr = rxr->netmap_idx; + struct rx_bd *rxbd; + u32 prod; + struct ifnet *ifp = na->ifp; + struct NM_BNXT_ADAPTER *bp = netdev_priv(ifp); + + slot = netmap_reset(na, NR_RX, ring_nr, 0); + if (!slot) + return 0; /* not in native netmap mode */ + + lim = na->num_rx_desc - 1 - nm_kr_rxspace(na->rx_rings[ring_nr]); + rxr->rx_prod = 0; + prod = rxr->rx_prod; + + /* Add this so that even if the NM ring reset fails + * the netmapped flag is set and we will not timeout ring_free + * during teardown + */ + rxr->rx_cpr->netmapped = 1; + if (bp->flags & BNXT_FLAG_JUMBO) { + slot = netmap_reset(na, NR_RX, ring_nr + 1, 0); + if (!slot) + return 0; /* not in native netmap mode */ + + while (count < lim) { + uint64_t paddr; + + rxbd = &rxr->rx_desc_ring[RX_RING(bp, prod)][RX_IDX(prod)]; + PNMB_O(na->rx_rings[ring_nr + 1], &slot[count], &paddr); + rxbd->rx_bd_haddr = cpu_to_le64(paddr); + rxbd->rx_bd_opaque = prod; + prod = NEXT_RX(prod); + count++; + } + nm_prdis("populated %d Rx bufs in ring %d rxr: %p lim = %d", + count, ring_nr + 1, rxr, lim); + rxr->rx_prod = prod; + rxr->rx_next_cons = 0; + + rxr->rx_agg_prod = 0; + prod = rxr->rx_agg_prod; + for (i = 0; i < AGG_NM_RINGS; i++) { + slot = netmap_reset(na, NR_RX, ring_nr + 2 + i, 0); + if (!slot) + return 0; /* not in native netmap mode */ + + count = 0; + while (count < lim) { + uint64_t paddr; + + rxbd = &rxr->rx_agg_desc_ring[RX_AGG_RING(bp, prod)][RX_IDX(prod)]; + PNMB_O(na->rx_rings[ring_nr + 2 + i], &slot[count], &paddr); + rxbd->rx_bd_haddr = cpu_to_le64(paddr); + rxbd->rx_bd_opaque = prod; + prod = NEXT_RX_AGG(prod); + count++; + } + nm_prdis("populated %d Rx AGG bufs in ring %d prod = %d", + count, ring_nr + 2 + i, prod); + } + rxr->rx_agg_prod = prod; + rxr->rx_sw_agg_prod = prod; + } else { + while (count < lim) { + uint64_t paddr; + + rxbd = &rxr->rx_desc_ring[RX_RING(bp, prod)][RX_IDX(prod)]; + PNMB_O(na->rx_rings[ring_nr], slot + count, &paddr); + rxbd->rx_bd_haddr = cpu_to_le64(paddr); + rxbd->rx_bd_opaque = prod; + prod = NEXT_RX(prod); + count++; + } + nm_prdis("populated %d Rx bufs in ring %d lim = %d", count, ring_nr, lim); + } + + /* ensure wqes are visible to device before updating doorbell record */ + wmb(); + if (bp->flags & BNXT_FLAG_JUMBO) + bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod); + bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); + + return 1; +} + +int bnxt_netmap_config(struct netmap_adapter *na, struct nm_config_info *info) +{ + struct ifnet *ifp = na->ifp; + struct NM_BNXT_ADAPTER *bp; + + bp = netdev_priv(ifp); + info->num_tx_rings = bp->tx_nr_rings_per_tc; + info->num_rx_rings = bp->rx_nr_rings; + if (bp->dev->mtu > NETMAP_BUF_SIZE(na) || bp->flags & BNXT_FLAG_JUMBO) { + info->num_rx_rings = 2 * info->num_rx_rings + info->num_rx_rings * AGG_NM_RINGS; + info->rx_buf_maxsize = BNXT_RX_PAGE_SIZE; + } else { + info->rx_buf_maxsize = NETMAP_BUF_SIZE(na); + } + info->num_tx_descs = bp->tx_ring_size + 1; + info->num_rx_descs = bp->rx_ring_size + 1; + + return 0; +} + +/* + * The attach routine, called at the end of bnxt_create_netdev(), + * fills the parameters for netmap_attach() and calls it. + * It cannot fail, in the worst case (such as no memory) + * netmap mode will be disabled and the driver will only + * operate in standard mode. + */ +void bnxt_netmap_attach(struct NM_BNXT_ADAPTER *adapter) +{ + struct netmap_adapter na; + + bzero(&na, sizeof(na)); + + na.ifp = adapter->dev; + na.pdev = &adapter->pdev->dev; + na.na_flags = NAF_MOREFRAG; + na.num_tx_desc = adapter->tx_ring_size + 1; + na.num_rx_desc = adapter->rx_ring_size + 1; + na.nm_txsync = bnxt_netmap_txsync; + na.nm_rxsync = bnxt_netmap_rxsync; + na.nm_register = bnxt_netmap_reg; + na.nm_config = bnxt_netmap_config; + + /* each channel has 1 rx ring and a tx for each tc */ + na.num_tx_rings = adapter->tx_nr_rings_per_tc; + na.num_rx_rings = adapter->rx_nr_rings; + na.rx_buf_maxsize = 1500; /* will be overwritten by nm_config */ + netmap_attach(&na); +} + +#endif /* NETMAP_BNXT_MAIN */ + +#endif /* __BNXT_NETMAP_LINUX_H__ */ + +/* end of file */ diff --git a/drivers/thirdparty/release-drivers/bnxt/bnxt_nic_flow.c b/drivers/thirdparty/release-drivers/bnxt/bnxt_nic_flow.c new file mode 100644 index 000000000000..7b2f7036ce7b --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/bnxt_nic_flow.c @@ -0,0 +1,313 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* Copyright(c) 2024 Broadcom + * All rights reserved. + */ +#include +#include +#include +#include "bnxt_compat.h" +#include "bnxt_hsi.h" +#include "bnxt.h" +#include "bnxt_hwrm.h" +#include "bnxt_ulp_flow.h" +#include "bnxt_nic_flow.h" +#include "ulp_nic_flow.h" +#include "bnxt_vfr.h" +#include "tfc.h" + +#if defined(CONFIG_BNXT_FLOWER_OFFLOAD) + +/* Max number of filters per PF */ +#define NIC_FLOW_FILTER_MAX 2 + +/* Per L2 filter RoCE flow data */ +struct nic_flow_roce { + __le64 l2_filter_id; + u8 mac_addr[ETH_ALEN]; + u32 l2_ctxt_id; + u32 prof_func; + u32 flow_id; + u64 flow_cnt_hndl; + u32 cnp_flow_id; + u64 cnp_flow_cnt_hndl; + bool in_use; +}; + +#define NIC_FLOW_SUPPORTED(bp) \ + (BNXT_PF(bp) && BNXT_TF_RX_NIC_FLOW_CAP(bp) && BNXT_UDCC_CAP(bp)) + +/* NIC flow database */ +struct nic_flow_db { + struct nic_flow_roce roce[NIC_FLOW_FILTER_MAX]; +}; + +static int bnxt_hwrm_l2_filter_cfg(struct bnxt *bp, __le64 l2_filter_id, + u32 l2_ctxt_id, u32 prof_func) +{ + struct hwrm_cfa_l2_filter_cfg_input *req; + u32 flags; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_CFG); + if (rc) + return rc; + + req->target_id = cpu_to_le16(0xffff); + flags = CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX | + CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_ENABLE_LKUP; + + req->flags = cpu_to_le32(flags); + req->enables = cpu_to_le32(CFA_L2_FILTER_CFG_REQ_ENABLES_L2_CONTEXT_ID | + CFA_L2_FILTER_CFG_REQ_ENABLES_PROF_FUNC); + + req->l2_filter_id = l2_filter_id; + req->l2_context_id = l2_ctxt_id; + req->prof_func = prof_func; + + return hwrm_req_send(bp, req); +} + +/* This function initializes the NIC Flow feature which allows + * TF to insert NIC flows into the CFA. + */ +int bnxt_nic_flows_init(struct bnxt *bp) +{ + struct nic_flow_db *nfdb; + u16 sid = 0; + int rc = 0; + + if (!NIC_FLOW_SUPPORTED(bp)) + return 0; + + nfdb = kzalloc(sizeof(*nfdb), GFP_ATOMIC); + if (!nfdb) + return -ENOMEM; + + bp->nic_flow_info = nfdb; + + /* Set the session id in TF core to the AFM session */ + rc = tfc_session_id_set(bp->tfp, sid); + return rc; +} + +void bnxt_nic_flows_deinit(struct bnxt *bp) +{ + if (!NIC_FLOW_SUPPORTED(bp)) + return; + kfree(bp->nic_flow_info); + bp->nic_flow_info = NULL; +} + +int bnxt_nic_flows_open(struct bnxt *bp) +{ + int rc = 0; + if (!NIC_FLOW_SUPPORTED(bp)) + return rc; + + rc = bnxt_tf_port_init(bp, BNXT_TF_FLAG_NICFLOW); + if (rc) + return rc; + rc = bnxt_nic_flows_roce_add(bp); + + return rc; +} + +void bnxt_nic_flows_close(struct bnxt *bp) +{ + if (!NIC_FLOW_SUPPORTED(bp)) + return; + bnxt_nic_flows_deinit(bp); + bnxt_tf_port_deinit(bp, BNXT_TF_FLAG_NICFLOW); +} + +int bnxt_nic_flows_filter_add(struct bnxt *bp, __le64 l2_filter_id, const u8 *mac_addr) +{ + struct nic_flow_db *nfdb = bp->nic_flow_info; + struct nic_flow_roce *nfr; + int i; + + if (!NIC_FLOW_SUPPORTED(bp)) + return 0; + for (i = 0; i < NIC_FLOW_FILTER_MAX; i++) { + nfr = &nfdb->roce[i]; + if (nfr->in_use) + continue; + nfr->l2_filter_id = l2_filter_id; + ether_addr_copy(nfr->mac_addr, mac_addr); + nfr->in_use = true; + netdev_dbg(bp->dev, "%s: filter_id(%llx) mac(%pM)\n", __func__, + l2_filter_id, mac_addr); + return 0; + } + netdev_dbg(bp->dev, "%s: no free NIC flow l2 filter entry\n", __func__); + return -EINVAL; +} + +int bnxt_nic_flows_roce_add(struct bnxt *bp) +{ + struct nic_flow_db *nfdb = bp->nic_flow_info; + struct nic_flow_roce *nfr; + int rc = 0; + u8 i; + + if (!NIC_FLOW_SUPPORTED(bp)) + return rc; + /* Return until init complete */ + if (!bp->nic_flow_info) { + netdev_dbg(bp->dev, "%s: Attempt to add RoCE but db not init\n", + __func__); + return -EINVAL; + } + + for (i = 0; i < NIC_FLOW_FILTER_MAX; i++) { + nfr = &nfdb->roce[i]; + if (!nfr->in_use) + continue; + + rc = bnxt_ulp_nic_flows_roce_add(bp, nfr->l2_filter_id, &nfr->l2_ctxt_id, + &nfr->prof_func, &nfr->flow_id, + &nfr->flow_cnt_hndl, &nfr->cnp_flow_id, + &nfr->cnp_flow_cnt_hndl); + if (rc) { + netdev_dbg(bp->dev, "%s: RoCE NIC flow creation failure(%d)\n", + __func__, rc); + goto error; + } + rc = bnxt_hwrm_l2_filter_cfg(bp, nfr->l2_filter_id, nfr->l2_ctxt_id, + nfr->prof_func); + if (rc) { + netdev_dbg(bp->dev, "%s: L2 filter cfg error(%d)\n", + __func__, rc); + goto error; + } + } + return rc; +error: + rc = bnxt_nic_flows_roce_rem(bp, nfr->l2_filter_id); + return rc; +} + +int bnxt_nic_flows_roce_rem(struct bnxt *bp, __le64 l2_filter_id) +{ + struct nic_flow_db *nfdb = bp->nic_flow_info; + struct nic_flow_roce *nfr; + int rc = 0; + u8 i; + + if (!NIC_FLOW_SUPPORTED(bp)) + return 0; + + /* Return until init complete */ + if (!bp->nic_flow_info) + return 0; + + for (i = 0; i < NIC_FLOW_FILTER_MAX; i++) { + nfr = &nfdb->roce[i]; + if ((nfr->in_use) && (nfr->l2_filter_id == l2_filter_id)) { + rc = bnxt_ulp_nic_flows_roce_del(bp, l2_filter_id, nfr->l2_ctxt_id, + nfr->prof_func, nfr->flow_id, + nfr->cnp_flow_id); + if (rc) + netdev_dbg(bp->dev, "%s: delete l2_filter_id(%llx) failed rc(%d)\n", + __func__, l2_filter_id, rc); + nfr->l2_filter_id = 0; + nfr->in_use = false; + } + } + return rc; +} + +int bnxt_nic_flows_filter_info_get(struct bnxt *bp, __le64 l2_filter_id, + u32 *l2_ctxt_id, u32 *prof_func) +{ + struct nic_flow_db *nfdb = bp->nic_flow_info; + struct nic_flow_roce *nfr; + u8 i; + + if (!NIC_FLOW_SUPPORTED(bp)) + return 0; + + if (!bp->nic_flow_info) + return -EINVAL; + + for (i = 0; i < NIC_FLOW_FILTER_MAX; i++) { + nfr = &nfdb->roce[i]; + if ((nfr->in_use) && (nfr->l2_filter_id == l2_filter_id)) { + *l2_ctxt_id = nfr->l2_ctxt_id; + *prof_func = nfr->prof_func; + return 0; + } + } + netdev_dbg(bp->dev, "%s: l2_filter_id(%llx) not found\n", + __func__, l2_filter_id); + return -ENOENT; +} + +int bnxt_nic_flow_dmac_filter_get(struct bnxt *bp, u8 *dmac, __le64 *filter_id) +{ + struct nic_flow_db *nfdb = bp->nic_flow_info; + struct nic_flow_roce *nfr; + u8 i; + + if (!NIC_FLOW_SUPPORTED(bp)) + return 0; + + if (!bp->nic_flow_info) + return -EINVAL; + + for (i = 0; i < NIC_FLOW_FILTER_MAX; i++) { + nfr = &nfdb->roce[i]; + if (!nfr->in_use) + continue; + if (ether_addr_equal(nfr->mac_addr, dmac)) { + *filter_id = nfr->l2_filter_id; + netdev_dbg(bp->dev, "%s: %pM filter=%llx\n", __func__, dmac, + *filter_id); + return 0; + } + } + netdev_dbg(bp->dev, "%s: No matching filter for dmac%pM\n", __func__, dmac); + return -ENOENT; +} + +#else /* if defined(CONFIG_BNXT_FLOWER_OFFLOAD) */ +int bnxt_nic_flows_init(struct bnxt *bp) +{ + return 0; +} + +void bnxt_nic_flows_deinit(struct bnxt *bp) +{ +} + +int bnxt_nic_flows_open(struct bnxt *bp) +{ + return 0; +} + +void bnxt_nic_flows_close(struct bnxt *bp) +{ +} + +int bnxt_nic_flows_filter_add(struct bnxt *bp, __le64 filter_id, const u8 *mac_addr) +{ + return 0; +} + +int bnxt_nic_flows_roce_add(struct bnxt *bp) +{ + return 0; +} + +int bnxt_nic_flows_roce_rem(struct bnxt *bp, __le64 filter_id) +{ + return 0; +} + +int bnxt_nic_flows_filter_info_get(struct bnxt *bp, __le64 filter_id, + u32 *l2_ctxt_id, u32 *prof_func) +{ + return 0; +} + +#endif /* if defined(CONFIG_BNXT_FLOWER_OFFLOAD) */ diff --git a/drivers/thirdparty/release-drivers/bnxt/bnxt_nic_flow.h b/drivers/thirdparty/release-drivers/bnxt/bnxt_nic_flow.h new file mode 100644 index 000000000000..115a6f06777f --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/bnxt_nic_flow.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2024 Broadcom + * All rights reserved. + */ +#ifndef BNXT_NIC_FLOW_H +#define BNXT_NIC_FLOW_H + +int bnxt_nic_flows_init(struct bnxt *bp); +void bnxt_nic_flows_deinit(struct bnxt *bp); +int bnxt_nic_flows_open(struct bnxt *bp); +void bnxt_nic_flows_close(struct bnxt *bp); +int bnxt_nic_flows_filter_add(struct bnxt *bp, __le64 filter_id, const u8 *mac_addr); +int bnxt_nic_flows_roce_add(struct bnxt *bp); +int bnxt_nic_flows_roce_rem(struct bnxt *bp, __le64 filter_id); +int bnxt_nic_flows_filter_info_get(struct bnxt *bp, __le64 filter_id, + u32 *l2_ctxt_id, u32 *prof_func); +int bnxt_nic_flow_dmac_filter_get(struct bnxt *bp, u8 *dmac, __le64 *filter_id); +#endif /* BNXT_NIC_FLOW_H */ diff --git a/drivers/thirdparty/release-drivers/bnxt/bnxt_nvm_defs.h b/drivers/thirdparty/release-drivers/bnxt/bnxt_nvm_defs.h new file mode 100644 index 000000000000..eb7917e14276 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/bnxt_nvm_defs.h @@ -0,0 +1,73 @@ +/* Broadcom NetXtreme-C/E network driver. + * + * Copyright (c) 2014-2016 Broadcom Corporation + * Copyright (c) 2016-2018 Broadcom Limited + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ + +#ifndef _BNXT_NVM_DEFS_H_ +#define _BNXT_NVM_DEFS_H_ + +enum bnxt_nvm_directory_type { + BNX_DIR_TYPE_UNUSED = 0, + BNX_DIR_TYPE_PKG_LOG = 1, + BNX_DIR_TYPE_UPDATE = 2, + BNX_DIR_TYPE_CHIMP_PATCH = 3, + BNX_DIR_TYPE_BOOTCODE = 4, + BNX_DIR_TYPE_VPD = 5, + BNX_DIR_TYPE_EXP_ROM_MBA = 6, + BNX_DIR_TYPE_AVS = 7, + BNX_DIR_TYPE_PCIE = 8, + BNX_DIR_TYPE_PORT_MACRO = 9, + BNX_DIR_TYPE_APE_FW = 10, + BNX_DIR_TYPE_APE_PATCH = 11, + BNX_DIR_TYPE_KONG_FW = 12, + BNX_DIR_TYPE_KONG_PATCH = 13, + BNX_DIR_TYPE_BONO_FW = 14, + BNX_DIR_TYPE_BONO_PATCH = 15, + BNX_DIR_TYPE_TANG_FW = 16, + BNX_DIR_TYPE_TANG_PATCH = 17, + BNX_DIR_TYPE_BOOTCODE_2 = 18, + BNX_DIR_TYPE_CCM = 19, + BNX_DIR_TYPE_PCI_CFG = 20, + BNX_DIR_TYPE_TSCF_UCODE = 21, + BNX_DIR_TYPE_ISCSI_BOOT = 22, + BNX_DIR_TYPE_ISCSI_BOOT_IPV6 = 24, + BNX_DIR_TYPE_ISCSI_BOOT_IPV4N6 = 25, + BNX_DIR_TYPE_ISCSI_BOOT_CFG6 = 26, + BNX_DIR_TYPE_EXT_PHY = 27, + BNX_DIR_TYPE_SHARED_CFG = 40, + BNX_DIR_TYPE_PORT_CFG = 41, + BNX_DIR_TYPE_FUNC_CFG = 42, + BNX_DIR_TYPE_MGMT_CFG = 48, + BNX_DIR_TYPE_MGMT_DATA = 49, + BNX_DIR_TYPE_MGMT_WEB_DATA = 50, + BNX_DIR_TYPE_MGMT_WEB_META = 51, + BNX_DIR_TYPE_MGMT_EVENT_LOG = 52, + BNX_DIR_TYPE_MGMT_AUDIT_LOG = 53 +}; + +#define BNX_DIR_ORDINAL_FIRST 0 + +#define BNX_DIR_EXT_NONE 0 +#define BNX_DIR_EXT_INACTIVE (1 << 0) +#define BNX_DIR_EXT_UPDATE (1 << 1) + +#define BNX_DIR_ATTR_NONE 0 +#define BNX_DIR_ATTR_NO_CHKSUM (1 << 0) +#define BNX_DIR_ATTR_PROP_STREAM (1 << 1) + +enum bnxnvm_pkglog_field_index { + BNX_PKG_LOG_FIELD_IDX_INSTALLED_TIMESTAMP = 0, + BNX_PKG_LOG_FIELD_IDX_PKG_DESCRIPTION = 1, + BNX_PKG_LOG_FIELD_IDX_PKG_VERSION = 2, + BNX_PKG_LOG_FIELD_IDX_PKG_TIMESTAMP = 3, + BNX_PKG_LOG_FIELD_IDX_PKG_CHECKSUM = 4, + BNX_PKG_LOG_FIELD_IDX_INSTALLED_ITEMS = 5, + BNX_PKG_LOG_FIELD_IDX_INSTALLED_MASK = 6 +}; + +#endif /* Don't add anything after this line */ diff --git a/drivers/thirdparty/release-drivers/bnxt/bnxt_ptp.c b/drivers/thirdparty/release-drivers/bnxt/bnxt_ptp.c new file mode 100644 index 000000000000..cf9a5e643609 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/bnxt_ptp.c @@ -0,0 +1,1524 @@ +/* Broadcom NetXtreme-C/E network driver. + * + * Copyright (c) 2017-2018 Broadcom Limited + * Copyright (c) 2018-2023 Broadcom Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ +#include +#include +#include +#include +#include +#ifdef HAVE_IEEE1588_SUPPORT +#include +#include +#include +#include +#include +#include +#endif +#include "bnxt_compat.h" +#include "bnxt_hsi.h" +#include "bnxt.h" +#include "bnxt_hwrm.h" +#include "bnxt_ptp.h" + +#ifdef HAVE_IEEE1588_SUPPORT +static int bnxt_ptp_cfg_settime(struct bnxt *bp, u64 time) +{ + struct hwrm_func_ptp_cfg_input *req; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_FUNC_PTP_CFG); + if (rc) + return rc; + + req->enables = cpu_to_le16(FUNC_PTP_CFG_REQ_ENABLES_PTP_SET_TIME); + req->ptp_set_time = time; + return hwrm_req_send(bp, req); +} + +int bnxt_ptp_parse(struct sk_buff *skb, u16 *seq_id, u16 *hdr_off) +{ + unsigned int ptp_class; + struct ptp_header *hdr; + + ptp_class = ptp_classify_raw(skb); + + switch (ptp_class & PTP_CLASS_VMASK) { + case PTP_CLASS_V1: + case PTP_CLASS_V2: + hdr = ptp_parse_header(skb, ptp_class); + if (!hdr) + return -EINVAL; + + if (hdr_off) + *hdr_off = (u8 *)hdr - skb->data; + *seq_id = ntohs(hdr->sequence_id); + return 0; + default: + return -ERANGE; + } +} + +static int bnxt_ptp_settime(struct ptp_clock_info *ptp_info, + const struct timespec64 *ts) +{ + struct bnxt_ptp_cfg *ptp = container_of(ptp_info, struct bnxt_ptp_cfg, + ptp_info); + u64 ns = timespec64_to_ns(ts); + + if (BNXT_PTP_USE_RTC(ptp->bp)) + return bnxt_ptp_cfg_settime(ptp->bp, ns); + + spin_lock_bh(&ptp->ptp_lock); + timecounter_init(&ptp->tc, &ptp->cc, ns); + spin_unlock_bh(&ptp->ptp_lock); + return 0; +} + +/* Caller holds ptp_lock */ +static int bnxt_refclk_read(struct bnxt *bp, struct ptp_system_timestamp *sts, + u64 *ns) +{ + struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; + u32 high_before, high_now, low; + + if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) + return -EIO; + + high_before = readl(bp->bar0 + ptp->refclk_mapped_regs[1]); + ptp_read_system_prets(sts); + low = readl(bp->bar0 + ptp->refclk_mapped_regs[0]); + ptp_read_system_postts(sts); + high_now = readl(bp->bar0 + ptp->refclk_mapped_regs[1]); + if (high_now != high_before) { + ptp_read_system_prets(sts); + low = readl(bp->bar0 + ptp->refclk_mapped_regs[0]); + ptp_read_system_postts(sts); + } + *ns = (((u64)high_now) << 32) | ((u64)low); + + return 0; +} + +static int bnxt_hwrm_port_ts_query(struct bnxt *bp, u32 flags, u64 *ts, + u32 txts_tmo, int slot) +{ + struct hwrm_port_ts_query_output *resp; + struct hwrm_port_ts_query_input *req; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_PORT_TS_QUERY); + if (rc) + return rc; + + req->flags = cpu_to_le32(flags); + if (flags == PORT_TS_QUERY_REQ_FLAGS_PATH_TX) { + u32 tmo_us = txts_tmo * 1000; + + req->enables = cpu_to_le16(BNXT_PTP_QTS_TX_ENABLES); + req->ptp_seq_id = cpu_to_le32(bp->ptp_cfg->txts_req[slot].tx_seqid); + req->ptp_hdr_offset = cpu_to_le16(bp->ptp_cfg->txts_req[slot].tx_hdr_off); + if (!tmo_us) + tmo_us = BNXT_PTP_QTS_TIMEOUT(bp); + tmo_us = min_t(u32, tmo_us, BNXT_PTP_QTS_MAX_TMO_US); + req->ts_req_timeout = cpu_to_le16(tmo_us); + } else if (flags == PORT_TS_QUERY_REQ_FLAGS_PATH_RX) { + req->ptp_seq_id = cpu_to_le32(bp->ptp_cfg->rx_seqid); + req->enables = cpu_to_le16(BNXT_PTP_QTS_RX_ENABLES); + } + + resp = hwrm_req_hold(bp, req); + + rc = hwrm_req_send(bp, req); + if (rc) { + hwrm_req_drop(bp, req); + return rc; + } + *ts = le64_to_cpu(resp->ptp_msg_ts); + hwrm_req_drop(bp, req); + return 0; +} + +static void bnxt_ptp_get_current_time(struct bnxt *bp) +{ + struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; + + if (!ptp) + return; + spin_lock_bh(&ptp->ptp_lock); + WRITE_ONCE(ptp->old_time, ptp->current_time); + bnxt_refclk_read(bp, NULL, &ptp->current_time); + spin_unlock_bh(&ptp->ptp_lock); +} + +void bnxt_ptp_get_skb_pre_xmit_ts(struct bnxt *bp) +{ + struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; + + if (!ptp) + return; + + spin_lock_bh(&ptp->ptp_lock); + bnxt_refclk_read(bp, NULL, &ptp->skb_pre_xmit_ts); + spin_unlock_bh(&ptp->ptp_lock); +} + +#ifdef HAVE_PTP_GETTIMEX64 +static int bnxt_ptp_gettimex(struct ptp_clock_info *ptp_info, + struct timespec64 *ts, + struct ptp_system_timestamp *sts) +{ + struct bnxt_ptp_cfg *ptp = container_of(ptp_info, struct bnxt_ptp_cfg, + ptp_info); + u64 ns, cycles; + int rc; + + spin_lock_bh(&ptp->ptp_lock); + rc = bnxt_refclk_read(ptp->bp, sts, &cycles); + if (rc) { + spin_unlock_bh(&ptp->ptp_lock); + return rc; + } + ns = timecounter_cyc2time(&ptp->tc, cycles); + spin_unlock_bh(&ptp->ptp_lock); + *ts = ns_to_timespec64(ns); + + return 0; +} +#else +static int bnxt_ptp_gettime(struct ptp_clock_info *ptp_info, + struct timespec64 *ts) +{ + struct bnxt_ptp_cfg *ptp = container_of(ptp_info, struct bnxt_ptp_cfg, + ptp_info); + u64 ns; + + spin_lock_bh(&ptp->ptp_lock); + ns = timecounter_read(&ptp->tc); + spin_unlock_bh(&ptp->ptp_lock); + *ts = ns_to_timespec64(ns); + return 0; +} +#endif + +/* Caller holds ptp_lock */ +void bnxt_ptp_update_current_time(struct bnxt *bp) +{ + struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; + + bnxt_refclk_read(bp, NULL, &ptp->current_time); + WRITE_ONCE(ptp->old_time, ptp->current_time); +} + +static int bnxt_ptp_adjphc(struct bnxt_ptp_cfg *ptp, s64 delta) +{ + struct hwrm_port_mac_cfg_input *req; + struct bnxt *bp = ptp->bp; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_CFG); + if (rc) + return rc; + + req->enables = cpu_to_le32(PORT_MAC_CFG_REQ_ENABLES_PTP_ADJ_PHASE); + req->ptp_adj_phase = delta; + + rc = hwrm_req_send(bp, req); + if (rc) { + netdev_err(bp->dev, "ptp adjphc failed. rc = %x\n", rc); + } else { + spin_lock_bh(&ptp->ptp_lock); + bnxt_ptp_update_current_time(bp); + spin_unlock_bh(&ptp->ptp_lock); + } + + return rc; +} + +static int bnxt_ptp_adjtime(struct ptp_clock_info *ptp_info, s64 delta) +{ + struct bnxt_ptp_cfg *ptp = container_of(ptp_info, struct bnxt_ptp_cfg, + ptp_info); + + if (BNXT_PTP_USE_RTC(ptp->bp)) + return bnxt_ptp_adjphc(ptp, delta); + + spin_lock_bh(&ptp->ptp_lock); + timecounter_adjtime(&ptp->tc, delta); + spin_unlock_bh(&ptp->ptp_lock); + return 0; +} + +#ifdef HAVE_PTP_ADJPHASE +static int bnxt_ptp_adjphase(struct ptp_clock_info *ptp_info, s32 offset_ns) +{ + struct bnxt_ptp_cfg *ptp = container_of(ptp_info, struct bnxt_ptp_cfg, + ptp_info); + struct hwrm_port_mac_cfg_input *req; + struct bnxt *bp = ptp->bp; + int rc; + + if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) + return -EOPNOTSUPP; + + rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_CFG); + if (rc) + return rc; + + req->enables = cpu_to_le32(PORT_MAC_CFG_REQ_ENABLES_PTP_ADJ_PHASE); + req->ptp_adj_phase = cpu_to_le32(offset_ns); + + rc = hwrm_req_send(bp, req); + if (rc) + netdev_err(bp->dev, "ptp adjphase failed. rc = %x\n", rc); + + return rc; +} +#endif + +static int bnxt_ptp_adjfine_rtc(struct bnxt *bp, s32 ppb) +{ + struct hwrm_port_mac_cfg_input *req; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_CFG); + if (rc) + return rc; + + req->ptp_freq_adj_ppb = cpu_to_le32(ppb); + req->enables = cpu_to_le32(PORT_MAC_CFG_REQ_ENABLES_PTP_FREQ_ADJ_PPB); + rc = hwrm_req_send(bp, req); + if (rc) + netdev_err(bp->dev, + "ptp adjfine failed. rc = %d\n", rc); + return rc; +} + +#ifdef HAVE_SCALED_PPM +static int bnxt_ptp_adjfine(struct ptp_clock_info *ptp_info, long scaled_ppm) +#else +static int bnxt_ptp_adjfreq(struct ptp_clock_info *ptp_info, s32 ppb) +#endif +{ + struct bnxt_ptp_cfg *ptp = container_of(ptp_info, struct bnxt_ptp_cfg, + ptp_info); + s32 period, period1, period2, dif, dif1, dif2; + s32 step, best_step = 0, best_period = 0; + s32 best_dif = BNXT_MAX_PHC_DRIFT; + struct bnxt *bp = ptp->bp; + u32 drift_sign = 1; +#ifdef HAVE_SCALED_PPM + s32 ppb = scaled_ppm_to_ppb(scaled_ppm); +#endif + + if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS || + BNXT_CHIP_NUM_5745X(bp->chip_num)) { +#if !defined(HAVE_SCALED_PPM) + int neg_adj = 0; + u32 diff; + u64 adj; +#endif + + if (!BNXT_MH(bp)) + return bnxt_ptp_adjfine_rtc(bp, ppb); + +#if !defined(HAVE_SCALED_PPM) + if (ppb < 0) { + neg_adj = 1; + ppb = -ppb; + } + adj = ptp->cmult; + adj *= ppb; + diff = div_u64(adj, 1000000000ULL); +#endif + spin_lock_bh(&ptp->ptp_lock); + timecounter_read(&ptp->tc); +#ifdef HAVE_SCALED_PPM + ptp->cc.mult = adjust_by_scaled_ppm(ptp->cmult, scaled_ppm); +#else + ptp->cc.mult = neg_adj ? ptp->cmult - diff : ptp->cmult + diff; +#endif + spin_unlock_bh(&ptp->ptp_lock); + return 0; + } + + /* Frequency adjustment requires programming 3 values: + * 1-bit direction + * 5-bit adjustment step in 1 ns unit + * 24-bit period in 1 us unit between adjustments + */ + if (ppb < 0) { + ppb = -ppb; + drift_sign = 0; + } + + if (ppb == 0) { + /* no adjustment */ + best_step = 0; + best_period = 0xFFFFFF; + } else if (ppb >= BNXT_MAX_PHC_DRIFT) { + /* max possible adjustment */ + best_step = 31; + best_period = 1; + } else { + /* Find the best possible adjustment step and period */ + for (step = 0; step <= 31; step++) { + period1 = step * 1000000 / ppb; + period2 = period1 + 1; + if (period1 != 0) + dif1 = ppb - (step * 1000000 / period1); + else + dif1 = BNXT_MAX_PHC_DRIFT; + if (dif1 < 0) + dif1 = -dif1; + dif2 = ppb - (step * 1000000 / period2); + if (dif2 < 0) + dif2 = -dif2; + dif = (dif1 < dif2) ? dif1 : dif2; + period = (dif1 < dif2) ? period1 : period2; + if (dif < best_dif) { + best_dif = dif; + best_step = step; + best_period = period; + } + } + } + writel((drift_sign << BNXT_GRCPF_REG_SYNC_TIME_ADJ_SIGN_SFT) | + (best_step << BNXT_GRCPF_REG_SYNC_TIME_ADJ_VAL_SFT) | + (best_period & BNXT_GRCPF_REG_SYNC_TIME_ADJ_PER_MSK), + bp->bar0 + BNXT_GRCPF_REG_SYNC_TIME_ADJ); + + return 0; +} + +void bnxt_ptp_pps_event(struct bnxt *bp, u32 data1, u32 data2) +{ + struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; + struct ptp_clock_event event; + u64 ns, pps_ts; + + pps_ts = EVENT_PPS_TS(data2, data1); + spin_lock_bh(&ptp->ptp_lock); + ns = timecounter_cyc2time(&ptp->tc, pps_ts); + spin_unlock_bh(&ptp->ptp_lock); + + switch (EVENT_DATA2_PPS_EVENT_TYPE(data2)) { + case ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_EVENT_TYPE_INTERNAL: + event.pps_times.ts_real = ns_to_timespec64(ns); + event.type = PTP_CLOCK_PPSUSR; + event.index = EVENT_DATA2_PPS_PIN_NUM(data2); + break; + case ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_EVENT_TYPE_EXTERNAL: + event.timestamp = ns; + event.type = PTP_CLOCK_EXTTS; + event.index = EVENT_DATA2_PPS_PIN_NUM(data2); + break; + } + + ptp_clock_event(ptp->ptp_clock, &event); +} + +static int bnxt_ptp_cfg_pin(struct bnxt *bp, int pin, u8 usage) +{ + struct hwrm_func_ptp_pin_cfg_input *req; + struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; + u8 state = usage != BNXT_PPS_PIN_NONE; + u8 *pin_state, *pin_usg; + u32 enables; + int rc; + + if (!TSIO_PIN_VALID(pin)) { + netdev_err(bp->dev, "1PPS: Invalid pin. Check pin-function configuration\n"); + return -EOPNOTSUPP; + } + + rc = hwrm_req_init(bp, req, HWRM_FUNC_PTP_PIN_CFG); + if (rc) + return rc; + + enables = (FUNC_PTP_PIN_CFG_REQ_ENABLES_PIN0_STATE | + FUNC_PTP_PIN_CFG_REQ_ENABLES_PIN0_USAGE) << (pin * 2); + req->enables = cpu_to_le32(enables); + + pin_state = &req->pin0_state; + pin_usg = &req->pin0_usage; + + *(pin_state + (pin * 2)) = state; + *(pin_usg + (pin * 2)) = usage; + + rc = hwrm_req_send(bp, req); + if (rc) + return rc; + + ptp->pps_info.pins[pin].usage = usage; + ptp->pps_info.pins[pin].state = state; + + return 0; +} + +static int bnxt_ptp_cfg_event(struct bnxt *bp, u8 event) +{ + struct hwrm_func_ptp_cfg_input *req; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_FUNC_PTP_CFG); + if (rc) + return rc; + + req->enables = cpu_to_le16(FUNC_PTP_CFG_REQ_ENABLES_PTP_PPS_EVENT); + req->ptp_pps_event = event; + return hwrm_req_send(bp, req); +} + +void bnxt_ptp_cfg_tstamp_filters(struct bnxt *bp) +{ + struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; + struct hwrm_port_mac_cfg_input *req; + + if (!ptp || !ptp->tstamp_filters) + return; + + if (hwrm_req_init(bp, req, HWRM_PORT_MAC_CFG)) + goto out; + + if (!(bp->fw_cap & BNXT_FW_CAP_RX_ALL_PKT_TS) && (ptp->tstamp_filters & + (PORT_MAC_CFG_REQ_FLAGS_ALL_RX_TS_CAPTURE_ENABLE | + PORT_MAC_CFG_REQ_FLAGS_ALL_RX_TS_CAPTURE_DISABLE))) { + ptp->tstamp_filters &= ~(PORT_MAC_CFG_REQ_FLAGS_ALL_RX_TS_CAPTURE_ENABLE | + PORT_MAC_CFG_REQ_FLAGS_ALL_RX_TS_CAPTURE_DISABLE); + netdev_warn(bp->dev, "Unsupported FW for all RX pkts timestamp filter\n"); + } + + req->flags = cpu_to_le32(ptp->tstamp_filters); + req->enables = cpu_to_le32(PORT_MAC_CFG_REQ_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE); + req->rx_ts_capture_ptp_msg_type = cpu_to_le16(ptp->rxctl); + + if (!hwrm_req_send(bp, req)) { + bp->ptp_all_rx_tstamp = !!(ptp->tstamp_filters & + PORT_MAC_CFG_REQ_FLAGS_ALL_RX_TS_CAPTURE_ENABLE); + return; + } + ptp->tstamp_filters = 0; +out: + bp->ptp_all_rx_tstamp = 0; + netdev_warn(bp->dev, "Failed to configure HW packet timestamp filters\n"); +} + +void bnxt_ptp_reapply_pps(struct bnxt *bp) +{ + struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; + struct bnxt_pps *pps; + u32 pin = 0; + int rc; + + if (!ptp || !(bp->fw_cap & BNXT_FW_CAP_PTP_PPS) || + !(ptp->ptp_info.pin_config)) + return; + pps = &ptp->pps_info; + for (pin = 0; pin < BNXT_MAX_TSIO_PINS; pin++) { + if (pps->pins[pin].state) { + rc = bnxt_ptp_cfg_pin(bp, pin, pps->pins[pin].usage); + if (!rc && pps->pins[pin].event) + rc = bnxt_ptp_cfg_event(bp, + pps->pins[pin].event); + if (rc) + netdev_err(bp->dev, "1PPS: Failed to configure pin%d\n", + pin); + } + } +} + +void bnxt_ptp_reapply_phc(struct bnxt *bp) +{ + struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; + u64 current_ns; + + if (!ptp || (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) + return; + + /* Account for the delta when stored to now */ + spin_lock_bh(&ptp->ptp_lock); + ptp->current_time += ktime_get_ns() - ptp->save_ts; + current_ns = ptp->current_time; + WRITE_ONCE(ptp->old_time, current_ns); + writel(lower_32_bits(current_ns), bp->bar0 + ptp->refclk_mapped_regs[0]); + writel(upper_32_bits(current_ns), bp->bar0 + ptp->refclk_mapped_regs[1]); + spin_unlock_bh(&ptp->ptp_lock); +} + +static int bnxt_get_target_cycles(struct bnxt_ptp_cfg *ptp, u64 target_ns, + u64 *cycles_delta) +{ + u64 cycles_now, nsec_now, nsec_delta; + int rc; + + spin_lock_bh(&ptp->ptp_lock); + rc = bnxt_refclk_read(ptp->bp, NULL, &cycles_now); + if (rc) { + spin_unlock_bh(&ptp->ptp_lock); + return rc; + } + nsec_now = timecounter_cyc2time(&ptp->tc, cycles_now); + spin_unlock_bh(&ptp->ptp_lock); + + nsec_delta = target_ns - nsec_now; + *cycles_delta = div64_u64(nsec_delta << ptp->cc.shift, ptp->cc.mult); + return 0; +} + +static int bnxt_ptp_perout_cfg(struct bnxt_ptp_cfg *ptp, + struct ptp_clock_request *rq) +{ + struct hwrm_func_ptp_cfg_input *req; + struct bnxt *bp = ptp->bp; + struct timespec64 ts; + u64 target_ns, delta; + u16 enables; + int rc; + + ts.tv_sec = rq->perout.start.sec; + ts.tv_nsec = rq->perout.start.nsec; + target_ns = timespec64_to_ns(&ts); + + if (bp->fw_cap & BNXT_FW_CAP_PTP_RTC) { + delta = target_ns; + goto skip_target_cycles; + } + + rc = bnxt_get_target_cycles(ptp, target_ns, &delta); + if (rc) + return rc; +skip_target_cycles: + rc = hwrm_req_init(bp, req, HWRM_FUNC_PTP_CFG); + if (rc) + return rc; + + enables = FUNC_PTP_CFG_REQ_ENABLES_PTP_FREQ_ADJ_EXT_PERIOD | + FUNC_PTP_CFG_REQ_ENABLES_PTP_FREQ_ADJ_EXT_UP | + FUNC_PTP_CFG_REQ_ENABLES_PTP_FREQ_ADJ_EXT_PHASE; + req->enables = cpu_to_le16(enables); + req->ptp_pps_event = 0; + req->ptp_freq_adj_dll_source = 0; + req->ptp_freq_adj_dll_phase = 0; + req->ptp_freq_adj_ext_period = cpu_to_le32(NSEC_PER_SEC); + req->ptp_freq_adj_ext_up = 0; + req->ptp_freq_adj_ext_phase_lower = cpu_to_le32(lower_32_bits(delta)); + req->ptp_freq_adj_ext_phase_upper = cpu_to_le32(upper_32_bits(delta)); + + return hwrm_req_send(bp, req); +} + +static int bnxt_ptp_enable(struct ptp_clock_info *ptp_info, + struct ptp_clock_request *rq, int on) +{ + struct bnxt_ptp_cfg *ptp = container_of(ptp_info, struct bnxt_ptp_cfg, + ptp_info); + struct bnxt *bp = ptp->bp; + int pin_id; + int rc; + + switch (rq->type) { + case PTP_CLK_REQ_EXTTS: + /* Configure an External PPS IN */ + pin_id = ptp_find_pin(ptp->ptp_clock, PTP_PF_EXTTS, + rq->extts.index); + if (!TSIO_PIN_VALID(pin_id)) + return -EOPNOTSUPP; + if (!on) + break; + rc = bnxt_ptp_cfg_pin(bp, pin_id, BNXT_PPS_PIN_PPS_IN); + if (rc) + return rc; + rc = bnxt_ptp_cfg_event(bp, BNXT_PPS_EVENT_EXTERNAL); + if (!rc) + ptp->pps_info.pins[pin_id].event = BNXT_PPS_EVENT_EXTERNAL; + return rc; + case PTP_CLK_REQ_PEROUT: + /* Configure a Periodic PPS OUT */ + pin_id = ptp_find_pin(ptp->ptp_clock, PTP_PF_PEROUT, + rq->perout.index); + if (!TSIO_PIN_VALID(pin_id)) + return -EOPNOTSUPP; + if (!on) + break; + + rc = bnxt_ptp_cfg_pin(bp, pin_id, BNXT_PPS_PIN_PPS_OUT); + if (!rc) + rc = bnxt_ptp_perout_cfg(ptp, rq); + + return rc; + case PTP_CLK_REQ_PPS: + /* Configure PHC PPS IN */ + rc = bnxt_ptp_cfg_pin(bp, 0, BNXT_PPS_PIN_PPS_IN); + if (rc) + return rc; + rc = bnxt_ptp_cfg_event(bp, BNXT_PPS_EVENT_INTERNAL); + if (!rc) + ptp->pps_info.pins[0].event = BNXT_PPS_EVENT_INTERNAL; + return rc; + default: + netdev_err(bp->dev, "Unrecognized PIN function\n"); + return -EOPNOTSUPP; + } + + return bnxt_ptp_cfg_pin(bp, pin_id, BNXT_PPS_PIN_NONE); +} + +static int bnxt_hwrm_ptp_cfg(struct bnxt *bp) +{ + struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; + u32 flags = 0; + int rc = 0; + + switch (ptp->rx_filter) { + case HWTSTAMP_FILTER_ALL: + flags = PORT_MAC_CFG_REQ_FLAGS_ALL_RX_TS_CAPTURE_ENABLE; + break; + case HWTSTAMP_FILTER_NONE: + flags = PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_DISABLE; + if (bp->fw_cap & BNXT_FW_CAP_RX_ALL_PKT_TS) + flags |= PORT_MAC_CFG_REQ_FLAGS_ALL_RX_TS_CAPTURE_DISABLE; + break; + case HWTSTAMP_FILTER_PTP_V2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: + flags = PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_ENABLE; + break; + } + + if (ptp->tx_tstamp_en) + flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_ENABLE; + else + flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_DISABLE; + + ptp->tstamp_filters = flags; + + if (netif_running(bp->dev)) { + if (ptp->rx_filter == HWTSTAMP_FILTER_ALL) { + bnxt_close_nic(bp, false, false); + rc = bnxt_open_nic(bp, false, false); + } else { + bnxt_ptp_cfg_tstamp_filters(bp); + } + if (!rc && !ptp->tstamp_filters) + rc = -EIO; + } + + return rc; +} + +int bnxt_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) +{ + struct bnxt *bp = netdev_priv(dev); + struct hwtstamp_config stmpconf; + struct bnxt_ptp_cfg *ptp; + u16 old_rxctl; + int old_rx_filter, rc; + u8 old_tx_tstamp_en; + + ptp = bp->ptp_cfg; + if (!ptp) + return -EOPNOTSUPP; + + if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf))) + return -EFAULT; + +#ifndef HAVE_HWTSTAMP_FLAG_BONDED_PHC_INDEX + if (stmpconf.flags) + return -EINVAL; +#endif + + if (stmpconf.tx_type != HWTSTAMP_TX_ON && + stmpconf.tx_type != HWTSTAMP_TX_OFF) + return -ERANGE; + + old_rx_filter = ptp->rx_filter; + old_rxctl = ptp->rxctl; + old_tx_tstamp_en = ptp->tx_tstamp_en; + switch (stmpconf.rx_filter) { + case HWTSTAMP_FILTER_NONE: + ptp->rxctl = 0; + ptp->rx_filter = HWTSTAMP_FILTER_NONE; + break; + case HWTSTAMP_FILTER_ALL: + if (bp->fw_cap & BNXT_FW_CAP_RX_ALL_PKT_TS) { + ptp->rx_filter = HWTSTAMP_FILTER_ALL; + break; + } + return -EOPNOTSUPP; + case HWTSTAMP_FILTER_PTP_V2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: + ptp->rxctl = BNXT_PTP_MSG_EVENTS; + ptp->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; + break; + case HWTSTAMP_FILTER_PTP_V2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: + ptp->rxctl = BNXT_PTP_MSG_SYNC; + ptp->rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC; + break; + case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: + ptp->rxctl = BNXT_PTP_MSG_DELAY_REQ; + ptp->rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ; + break; + default: + return -ERANGE; + } + + if (stmpconf.tx_type == HWTSTAMP_TX_ON) + ptp->tx_tstamp_en = 1; + else + ptp->tx_tstamp_en = 0; + + rc = bnxt_hwrm_ptp_cfg(bp); + if (rc) + goto ts_set_err; + + stmpconf.rx_filter = ptp->rx_filter; + return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ? + -EFAULT : 0; + +ts_set_err: + ptp->rx_filter = old_rx_filter; + ptp->rxctl = old_rxctl; + ptp->tx_tstamp_en = old_tx_tstamp_en; + return rc; +} + +int bnxt_hwtstamp_get(struct net_device *dev, struct ifreq *ifr) +{ + struct bnxt *bp = netdev_priv(dev); + struct hwtstamp_config stmpconf; + struct bnxt_ptp_cfg *ptp; + + ptp = bp->ptp_cfg; + if (!ptp) + return -EOPNOTSUPP; + + stmpconf.flags = 0; + stmpconf.tx_type = ptp->tx_tstamp_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF; + + stmpconf.rx_filter = ptp->rx_filter; + return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ? + -EFAULT : 0; +} + +static int bnxt_map_regs(struct bnxt *bp, u32 *reg_arr, int count, int reg_win) +{ + u32 reg_base = *reg_arr & BNXT_GRC_BASE_MASK; + u32 win_off; + int i; + + for (i = 0; i < count; i++) { + if ((reg_arr[i] & BNXT_GRC_BASE_MASK) != reg_base) + return -ERANGE; + } + win_off = BNXT_GRCPF_REG_WINDOW_BASE_OUT + (reg_win - 1) * 4; + writel(reg_base, bp->bar0 + win_off); + return 0; +} + +static int bnxt_map_ptp_regs(struct bnxt *bp) +{ + struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; + u32 *reg_arr; + int rc, i; + + reg_arr = ptp->refclk_regs; + if (BNXT_CHIP_P5(bp)) { + u32 base = BNXT_PTP_GRC_WIN_BASE; + int win = BNXT_PTP_GRC_WIN; + + if (BNXT_VF(bp)) { + base = BNXT_PTP_GRC_WIN_BASE_VF; + win = BNXT_PTP_GRC_WIN_VF; + } + rc = bnxt_map_regs(bp, reg_arr, 2, win); + if (rc) + return rc; + for (i = 0; i < 2; i++) + ptp->refclk_mapped_regs[i] = base + + (ptp->refclk_regs[i] & BNXT_GRC_OFFSET_MASK); + return 0; + } + for (i = 0; i < 2; i++) { + if (reg_arr[i] & BNXT_GRC_BASE_MASK) + return -EINVAL; + ptp->refclk_mapped_regs[i] = ptp->refclk_regs[i]; + } + + return 0; +} + +static void bnxt_unmap_ptp_regs(struct bnxt *bp) +{ + writel(0, bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + + (BNXT_PTP_GRC_WIN - 1) * 4); + if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) + writel(0, bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 16); +} + +static u64 bnxt_cc_read(const struct cyclecounter *cc) +{ + struct bnxt_ptp_cfg *ptp = container_of(cc, struct bnxt_ptp_cfg, cc); + u64 ns = 0; + + bnxt_refclk_read(ptp->bp, NULL, &ns); + return ns; +} + +int bnxt_get_rx_ts(struct bnxt *bp, struct bnxt_napi *bnapi, + u32 vlan, struct sk_buff *skb) +{ + struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; + + if (ptp->rx_skb) { + netdev_err(bp->dev, "deferring skb:one SKB is still outstanding\n"); + return -EBUSY; + } + + ptp->rx_skb = skb; + ptp->bnapi = bnapi; + ptp->vlan = vlan; +#if !defined HAVE_PTP_DO_AUX_WORK + schedule_work(&ptp->ptp_ts_task); +#else + ptp_schedule_worker(ptp->ptp_clock, 0); +#endif + return 0; +} + +static int bnxt_stamp_tx_skb(struct bnxt *bp, int slot) +{ + struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; + struct skb_shared_hwtstamps timestamp; + struct bnxt_ptp_tx_req *txts_req; + unsigned long now = jiffies; + u64 ts = 0, ns = 0; + u32 tmo = 0; + int rc; + + txts_req = &ptp->txts_req[slot]; + /* make sure bnxt_get_tx_ts() has finished updating */ + smp_rmb(); + if (!time_after_eq(now, txts_req->abs_txts_tmo)) + tmo = jiffies_to_msecs(txts_req->abs_txts_tmo - now); + rc = bnxt_hwrm_port_ts_query(bp, PORT_TS_QUERY_REQ_FLAGS_PATH_TX, &ts, tmo, slot); + if (!rc) { + if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && + (ts < ptp->skb_pre_xmit_ts)) + goto retry_ts; + + memset(×tamp, 0, sizeof(timestamp)); + spin_lock_bh(&ptp->ptp_lock); + ns = timecounter_cyc2time(&ptp->tc, ts); + spin_unlock_bh(&ptp->ptp_lock); + timestamp.hwtstamp = ns_to_ktime(ns); + skb_tstamp_tx(txts_req->tx_skb, ×tamp); + } else { +retry_ts: + if (!time_after_eq(jiffies, txts_req->abs_txts_tmo)) + return -EAGAIN; + + netdev_warn_once(bp->dev, "TS query for TX timer failed rc = %x\n", + rc); + } + + dev_kfree_skb_any(txts_req->tx_skb); + txts_req->tx_skb = NULL; + BNXT_PTP_INC_TX_AVAIL(ptp); + + return 0; +} + +static void bnxt_stamp_rx_skb(struct bnxt *bp) +{ + struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; + u64 ts = 0, ns = 0; + int rc; + + __skb_push(ptp->rx_skb, ETH_HLEN); + /* On BCM57414 chips, hdr_offset is not supported, only seqid */ + bnxt_ptp_parse(ptp->rx_skb, &ptp->rx_seqid, NULL); + __skb_pull(ptp->rx_skb, ETH_HLEN); + + rc = bnxt_hwrm_port_ts_query(bp, PORT_TS_QUERY_REQ_FLAGS_PATH_RX, &ts, + 0, 0); + + if (!rc) { + spin_lock_bh(&ptp->ptp_lock); + ns = timecounter_cyc2time(&ptp->tc, ts); + spin_unlock_bh(&ptp->ptp_lock); + memset(skb_hwtstamps(ptp->rx_skb), 0, sizeof(*skb_hwtstamps(ptp->rx_skb))); + skb_hwtstamps(ptp->rx_skb)->hwtstamp = ns_to_ktime(ns); + } else { + netdev_err(bp->dev, "TS query for RX timer failed rc = %x\n", rc); + } + bnxt_deliver_skb(bp, ptp->bnapi, ptp->vlan, ptp->rx_skb); + ptp->rx_skb = NULL; +} + +int bnxt_ptp_get_txts_prod(struct bnxt_ptp_cfg *ptp, u16 *prod) +{ + spin_lock_bh(&ptp->ptp_tx_lock); + if (ptp->tx_avail) { + *prod = ptp->txts_prod; + ptp->txts_prod = NEXT_TXTS(*prod); + ptp->tx_avail--; + spin_unlock_bh(&ptp->ptp_tx_lock); + return 0; + } + spin_unlock_bh(&ptp->ptp_tx_lock); + return -ENOSPC; +} + +#if defined HAVE_PTP_DO_AUX_WORK +static long bnxt_ptp_ts_aux_work(struct ptp_clock_info *ptp_info) +{ + struct bnxt_ptp_cfg *ptp = container_of(ptp_info, struct bnxt_ptp_cfg, + ptp_info); + unsigned long now = jiffies; + struct bnxt *bp = ptp->bp; + u16 cons = ptp->txts_cons; + int rc = 0; + + while (READ_ONCE(ptp->tx_avail) != BNXT_MAX_TX_TS) { + if (!ptp->txts_req[cons].tx_skb) + break; + rc = bnxt_stamp_tx_skb(bp, cons); + if (rc == -EAGAIN) + break; + cons = NEXT_TXTS(cons); + } + ptp->txts_cons = cons; + + if (ptp->rx_skb) + bnxt_stamp_rx_skb(bp); + + if (!time_after_eq(now, ptp->next_period)) { + if (rc == -EAGAIN) + return 0; + return ptp->next_period - now; + } + + bnxt_ptp_get_current_time(bp); + ptp->next_period = now + HZ; + if (time_after_eq(now, ptp->next_overflow_check)) { + spin_lock_bh(&ptp->ptp_lock); + timecounter_read(&ptp->tc); + spin_unlock_bh(&ptp->ptp_lock); + ptp->next_overflow_check = now + BNXT_PHC_OVERFLOW_PERIOD; + } + if (rc == -EAGAIN) + return 0; + return HZ; +} +#else +void bnxt_ptp_timer(struct bnxt *bp) +{ + struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; + + if (!ptp) + return; + + bnxt_ptp_get_current_time(bp); + if (time_after_eq(jiffies, ptp->next_overflow_check)) { + spin_lock_bh(&ptp->ptp_lock); + timecounter_read(&ptp->tc); + spin_unlock_bh(&ptp->ptp_lock); + ptp->next_overflow_check = jiffies + BNXT_PHC_OVERFLOW_PERIOD; + } +} + +static void bnxt_ptp_ts_task(struct work_struct *work) +{ + struct bnxt_ptp_cfg *ptp = container_of(work, struct bnxt_ptp_cfg, + ptp_ts_task); + struct bnxt *bp = ptp->bp; + u16 cons = ptp->txts_cons; + int rc = 0; + + while (READ_ONCE(ptp->tx_avail) != BNXT_MAX_TX_TS) { + if (!ptp->txts_req[cons].tx_skb) + break; + rc = bnxt_stamp_tx_skb(bp, cons); + if (rc == -EAGAIN) + break; + cons = NEXT_TXTS(cons); + } + ptp->txts_cons = cons; + if (ptp->rx_skb) + bnxt_stamp_rx_skb(bp); + if (rc == -EAGAIN && ptp->ptp_clock) + schedule_work(&ptp->ptp_ts_task); +} +#endif +int bnxt_get_tx_ts(struct bnxt *bp, struct sk_buff *skb, u16 prod) +{ + struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; + struct bnxt_ptp_tx_req *txts_req; + + txts_req = &ptp->txts_req[prod]; + txts_req->abs_txts_tmo = jiffies + msecs_to_jiffies(ptp->txts_tmo); + /* make sure bnxt_stamp_tx_skb() is in sync */ + smp_wmb(); + txts_req->tx_skb = skb; +#if !defined HAVE_PTP_DO_AUX_WORK + schedule_work(&ptp->ptp_ts_task); +#else + ptp_schedule_worker(ptp->ptp_clock, 0); +#endif + return 0; +} + +int bnxt_get_rx_ts_p5(struct bnxt *bp, u64 *ts, u32 pkt_ts) +{ + struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; + u64 time; + + BNXT_READ_TIME64(ptp, time, ptp->old_time); + *ts = (time & BNXT_HI_TIMER_MASK) | pkt_ts; + if (pkt_ts < (time & BNXT_LO_TIMER_MASK)) + *ts += BNXT_LO_TIMER_MASK + 1; + + return 0; +} + +void bnxt_tx_ts_cmp(struct bnxt *bp, struct bnxt_napi *bnapi, + struct tx_ts_cmp *tscmp) +{ + struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; + struct skb_shared_hwtstamps timestamp; + u32 opaque = tscmp->tx_ts_cmp_opaque; + struct bnxt_tx_ring_info *txr; + struct bnxt_sw_tx_bd *tx_buf; + u64 ts, ns; + u16 cons; + + txr = bnapi->tx_ring[TX_OPAQUE_RING(opaque)]; + ts = BNXT_GET_TX_TS_48B_NS(tscmp); + cons = TX_OPAQUE_IDX(opaque); + tx_buf = &txr->tx_buf_ring[RING_TX(bp, cons)]; + if (tx_buf->is_ts_pkt) { + if (BNXT_TX_TS_ERR(tscmp)) { + netdev_err(bp->dev, + "timestamp completion error 0x%x 0x%x\n", + le32_to_cpu(tscmp->tx_ts_cmp_flags_type), + le32_to_cpu(tscmp->tx_ts_cmp_errors_v)); + } else { + spin_lock_bh(&ptp->ptp_lock); + ns = timecounter_cyc2time(&ptp->tc, ts); + spin_unlock_bh(&ptp->ptp_lock); + memset(×tamp, 0, sizeof(timestamp)); + timestamp.hwtstamp = ns_to_ktime(ns); + skb_tstamp_tx(tx_buf->skb, ×tamp); + } + tx_buf->is_ts_pkt = 0; + } +} + +#ifdef HAVE_ARTNS_TO_TSC +static int bnxt_phc_get_syncdevicetime(ktime_t *device, + struct system_counterval_t *system, + void *ctx) +{ + struct bnxt_ptp_cfg *ptp = (struct bnxt_ptp_cfg *)ctx; + struct hwrm_func_ptp_ts_query_output *resp; + struct hwrm_func_ptp_ts_query_input *req; + struct bnxt *bp = ptp->bp; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_FUNC_PTP_TS_QUERY); + if (rc) + return rc; + req->flags = cpu_to_le32(FUNC_PTP_TS_QUERY_REQ_FLAGS_PTM_TIME); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); + if (rc) { + hwrm_req_drop(bp, req); + return rc; + } + spin_lock_bh(&ptp->ptp_lock); + *device = ns_to_ktime(timecounter_cyc2time(&ptp->tc, le64_to_cpu(resp->ptm_local_ts))); + spin_unlock_bh(&ptp->ptp_lock); + *system = convert_art_ns_to_tsc(le64_to_cpu(resp->ptm_system_ts)); + hwrm_req_drop(bp, req); + + return 0; +} + +static int bnxt_ptp_getcrosststamp(struct ptp_clock_info *ptp_info, + struct system_device_crosststamp *xtstamp) +{ + struct bnxt_ptp_cfg *ptp = container_of(ptp_info, struct bnxt_ptp_cfg, + ptp_info); + + if (!(ptp->bp->fw_cap & BNXT_FW_CAP_PTP_PTM)) + return -EOPNOTSUPP; + return get_device_system_crosststamp(bnxt_phc_get_syncdevicetime, + ptp, NULL, xtstamp); +} +#endif + +static const struct ptp_clock_info bnxt_ptp_caps = { + .owner = THIS_MODULE, + .name = "bnxt clock", + .max_adj = BNXT_MAX_PHC_DRIFT, + .n_alarm = 0, + .n_ext_ts = 0, + .n_per_out = 0, + .n_pins = 0, + .pps = 0, +#ifdef HAVE_SCALED_PPM + .adjfine = bnxt_ptp_adjfine, +#else + .adjfreq = bnxt_ptp_adjfreq, +#endif /* HAVE_SCALED_PPM */ +#ifdef HAVE_PTP_ADJPHASE + .adjphase = bnxt_ptp_adjphase, +#endif + .adjtime = bnxt_ptp_adjtime, +#ifdef HAVE_PTP_DO_AUX_WORK + .do_aux_work = bnxt_ptp_ts_aux_work, +#endif +#ifdef HAVE_PTP_GETTIMEX64 + .gettimex64 = bnxt_ptp_gettimex, +#else + .gettime64 = bnxt_ptp_gettime, +#endif + .settime64 = bnxt_ptp_settime, + .enable = bnxt_ptp_enable, +#ifdef HAVE_ARTNS_TO_TSC + .getcrosststamp = bnxt_ptp_getcrosststamp, +#endif +}; + +static int bnxt_ptp_verify(struct ptp_clock_info *ptp_info, unsigned int pin, + enum ptp_pin_function func, unsigned int chan) +{ + struct bnxt_ptp_cfg *ptp = container_of(ptp_info, struct bnxt_ptp_cfg, + ptp_info); + /* Allow only PPS pin function configuration */ + if (ptp->pps_info.pins[pin].usage <= BNXT_PPS_PIN_PPS_OUT && + func != PTP_PF_PHYSYNC) + return 0; + else + return -EOPNOTSUPP; +} + +static int bnxt_ptp_pps_init(struct bnxt *bp) +{ + struct hwrm_func_ptp_pin_qcfg_output *resp; + struct hwrm_func_ptp_pin_qcfg_input *req; + struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; + struct ptp_clock_info *ptp_info; + struct bnxt_pps *pps_info; + u8 *pin_usg; + u32 i, rc; + + /* Query current/default PIN CFG */ + rc = hwrm_req_init(bp, req, HWRM_FUNC_PTP_PIN_QCFG); + if (rc) + return rc; + + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); + if (rc || !resp->num_pins) { + hwrm_req_drop(bp, req); + return -EOPNOTSUPP; + } + + ptp_info = &ptp->ptp_info; + pps_info = &ptp->pps_info; + pps_info->num_pins = resp->num_pins; + ptp_info->n_pins = pps_info->num_pins; + ptp_info->pin_config = kcalloc(ptp_info->n_pins, + sizeof(*ptp_info->pin_config), + GFP_KERNEL); + if (!ptp_info->pin_config) { + hwrm_req_drop(bp, req); + return -ENOMEM; + } + + /* Report the TSIO capability to kernel */ + pin_usg = &resp->pin0_usage; + for (i = 0; i < pps_info->num_pins; i++, pin_usg++) { + snprintf(ptp_info->pin_config[i].name, + sizeof(ptp_info->pin_config[i].name), "bnxt_pps%d", i); + ptp_info->pin_config[i].index = i; + ptp_info->pin_config[i].chan = i; + if (*pin_usg == BNXT_PPS_PIN_PPS_IN) + ptp_info->pin_config[i].func = PTP_PF_EXTTS; + else if (*pin_usg == BNXT_PPS_PIN_PPS_OUT) + ptp_info->pin_config[i].func = PTP_PF_PEROUT; + else + ptp_info->pin_config[i].func = PTP_PF_NONE; + + pps_info->pins[i].usage = *pin_usg; + } + hwrm_req_drop(bp, req); + + /* Only 1 each of ext_ts and per_out pins is available in HW */ + ptp_info->n_ext_ts = 1; + ptp_info->n_per_out = 1; + ptp_info->pps = 1; + ptp_info->verify = bnxt_ptp_verify; + + return 0; +} + +static bool bnxt_pps_config_ok(struct bnxt *bp) +{ + struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; + + return !(bp->fw_cap & BNXT_FW_CAP_PTP_PPS) == !ptp->ptp_info.pin_config; +} + +static void bnxt_ptp_timecounter_init(struct bnxt *bp, bool init_tc) +{ + struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; + + if (!ptp->ptp_clock) { + memset(&ptp->cc, 0, sizeof(ptp->cc)); + ptp->cc.read = bnxt_cc_read; + if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) + ptp->cc.mask = CYCLECOUNTER_MASK(48); + else + ptp->cc.mask = CYCLECOUNTER_MASK(64); + if (BNXT_MH(bp)) { + /* Use timecounter based non-real time mode */ + ptp->cc.shift = BNXT_CYCLES_SHIFT; + ptp->cc.mult = clocksource_khz2mult(BNXT_DEVCLK_FREQ, ptp->cc.shift); + ptp->cmult = ptp->cc.mult; + } else { + ptp->cc.shift = 0; + ptp->cc.mult = 1; + } + ptp->next_overflow_check = jiffies + BNXT_PHC_OVERFLOW_PERIOD; + } + if (init_tc) + timecounter_init(&ptp->tc, &ptp->cc, ktime_to_ns(ktime_get_real())); +} + +/* Caller holds ptp_lock */ +void bnxt_ptp_rtc_timecounter_init(struct bnxt_ptp_cfg *ptp, u64 ns) +{ + timecounter_init(&ptp->tc, &ptp->cc, ns); + /* For RTC, cycle_last must be in sync with the timecounter value. */ + ptp->tc.cycle_last = ns & ptp->cc.mask; +} + +int bnxt_ptp_init_rtc(struct bnxt *bp, bool phc_cfg) +{ + struct timespec64 tsp; + u64 ns; + int rc; + + if (!bp->ptp_cfg || !BNXT_PTP_USE_RTC(bp)) + return -ENODEV; + + if (!phc_cfg) { + ktime_get_real_ts64(&tsp); + ns = timespec64_to_ns(&tsp); + rc = bnxt_ptp_cfg_settime(bp, ns); + if (rc) + return rc; + } else { + rc = bnxt_hwrm_port_ts_query(bp, PORT_TS_QUERY_REQ_FLAGS_CURRENT_TIME, + &ns, 0, 0); + if (rc) + return rc; + } + spin_lock_bh(&bp->ptp_cfg->ptp_lock); + bnxt_ptp_rtc_timecounter_init(bp->ptp_cfg, ns); + spin_unlock_bh(&bp->ptp_cfg->ptp_lock); + + return 0; +} + +static void bnxt_ptp_free(struct bnxt *bp) +{ + struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; + + if (ptp->ptp_clock) { + ptp_clock_unregister(ptp->ptp_clock); + ptp->ptp_clock = NULL; + kfree(ptp->ptp_info.pin_config); + ptp->ptp_info.pin_config = NULL; + } +} + +int bnxt_ptp_init(struct bnxt *bp, bool phc_cfg) +{ + struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; + int rc; + + if (!ptp) + return 0; + + rc = bnxt_map_ptp_regs(bp); + if (rc) + return rc; + if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) { + /* Initialize freq adj GRC space to 0 so that stratus + * can ignore GRC and use external TS block + */ + writel(0, bp->bar0 + BNXT_GRCPF_REG_SYNC_TIME_ADJ); + } + + if (ptp->ptp_clock && bnxt_pps_config_ok(bp)) + return 0; + + bnxt_ptp_free(bp); + + WRITE_ONCE(ptp->tx_avail, BNXT_MAX_TX_TS); + spin_lock_init(&ptp->ptp_lock); + spin_lock_init(&ptp->ptp_tx_lock); + + if (BNXT_PTP_USE_RTC(bp)) { + bnxt_ptp_timecounter_init(bp, false); + rc = bnxt_ptp_init_rtc(bp, phc_cfg); + if (rc) + goto out; + } else { + bnxt_ptp_timecounter_init(bp, true); + if (BNXT_MH(bp)) + bnxt_ptp_adjfine_rtc(bp, 0); + } + + ptp->ptp_info = bnxt_ptp_caps; + if ((bp->fw_cap & BNXT_FW_CAP_PTP_PPS)) { + if (bnxt_ptp_pps_init(bp)) + netdev_warn(bp->dev, "1pps not initialized, continuing without 1pps support\n"); + } + ptp->ptp_clock = ptp_clock_register(&ptp->ptp_info, &bp->pdev->dev); + if (IS_ERR(ptp->ptp_clock)) { + rc = PTR_ERR(ptp->ptp_clock); + ptp->ptp_clock = NULL; + goto out; + } + bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, true); + +#if !defined HAVE_PTP_DO_AUX_WORK + INIT_WORK(&ptp->ptp_ts_task, bnxt_ptp_ts_task); +#endif + spin_lock_bh(&ptp->ptp_lock); + bnxt_refclk_read(bp, NULL, &ptp->current_time); + WRITE_ONCE(ptp->old_time, ptp->current_time); + spin_unlock_bh(&ptp->ptp_lock); +#ifdef HAVE_PTP_DO_AUX_WORK + ptp_schedule_worker(ptp->ptp_clock, 0); +#endif + ptp->txts_tmo = BNXT_PTP_DFLT_TX_TMO; + return 0; + +out: + bnxt_ptp_free(bp); + bnxt_unmap_ptp_regs(bp); + return rc; +} + +void bnxt_ptp_clear(struct bnxt *bp) +{ + struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; + int i; + + if (!ptp) + return; + + if (ptp->ptp_clock) + ptp_clock_unregister(ptp->ptp_clock); + + ptp->ptp_clock = NULL; +#if !defined HAVE_PTP_DO_AUX_WORK + cancel_work_sync(&ptp->ptp_ts_task); +#endif + kfree(ptp->ptp_info.pin_config); + ptp->ptp_info.pin_config = NULL; + + for (i = 0; i < BNXT_MAX_TX_TS; i++) { + if (ptp->txts_req[i].tx_skb) { + dev_kfree_skb_any(ptp->txts_req[i].tx_skb); + ptp->txts_req[i].tx_skb = NULL; + } + } + if (ptp->rx_skb) { + dev_kfree_skb_any(ptp->rx_skb); + ptp->rx_skb = NULL; + } + + bnxt_unmap_ptp_regs(bp); +} + +void bnxt_save_pre_reset_ts(struct bnxt *bp) +{ + if (BNXT_CHIP_P5_PLUS(bp)) + return; + + bnxt_ptp_get_current_time(bp); + bp->ptp_cfg->save_ts = ktime_get_ns(); +} + +#else + +void bnxt_ptp_timer(struct bnxt *bp) +{ +} + +int bnxt_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) +{ + return -EOPNOTSUPP; +} + +int bnxt_hwtstamp_get(struct net_device *dev, struct ifreq *ifr) +{ + return -EOPNOTSUPP; +} + +int bnxt_ptp_init(struct bnxt *bp, bool phc_cfg) +{ + return 0; +} + +void bnxt_ptp_clear(struct bnxt *bp) +{ +} + +void bnxt_ptp_reapply_pps(struct bnxt *bp) +{ +} + +void bnxt_ptp_cfg_tstamp_filters(struct bnxt *bp) +{ +} + +void bnxt_ptp_pps_event(struct bnxt *bp, u32 data1, u32 data2) +{ +} + +int bnxt_ptp_init_rtc(struct bnxt *bp, bool phc_cfg) +{ + return 0; +} + +void bnxt_ptp_reapply_phc(struct bnxt *bp) +{ +} + +void bnxt_save_pre_reset_ts(struct bnxt *bp) +{ +} + +void bnxt_tx_ts_cmp(struct bnxt *bp, struct bnxt_napi *bnapi, + struct tx_ts_cmp *tscmp) +{ +} +#endif diff --git a/drivers/thirdparty/release-drivers/bnxt/bnxt_ptp.h b/drivers/thirdparty/release-drivers/bnxt/bnxt_ptp.h new file mode 100644 index 000000000000..57a7a27f3225 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/bnxt_ptp.h @@ -0,0 +1,201 @@ +/* Broadcom NetXtreme-C/E network driver. + * + * Copyright (c) 2017-2018 Broadcom Limited + * Copyright (c) 2018-2023 Broadcom Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ + +#ifndef BNXT_PTP_H +#define BNXT_PTP_H + +#ifdef HAVE_IEEE1588_SUPPORT +#include +#include +#endif + +#define BNXT_PTP_GRC_WIN 6 +#define BNXT_PTP_GRC_WIN_BASE 0x6000 + +#define BNXT_PTP_GRC_WIN_VF 1 +#define BNXT_PTP_GRC_WIN_BASE_VF 0x1000 + +#define BNXT_MAX_PHC_DRIFT 31000000 +#define BNXT_CYCLES_SHIFT 23 +#define BNXT_DEVCLK_FREQ 1000000 +#define BNXT_LO_TIMER_MASK 0x0000ffffffffUL +#define BNXT_HI_TIMER_MASK 0xffff00000000UL +#define BNXT_HI_TIMER_MASK64 0xffff000000000000UL + +#define BNXT_PTP_DFLT_TX_TMO 1000 /* ms */ +#define BNXT_PTP_QTS_TIMEOUT(bp) (((bp)->flags & BNXT_FLAG_CHIP_P5_PLUS) ? 1000 : 62000) +#define BNXT_PTP_QTS_MAX_TMO_US 65535 +#define BNXT_PTP_QTS_TX_ENABLES (PORT_TS_QUERY_REQ_ENABLES_PTP_SEQ_ID | \ + PORT_TS_QUERY_REQ_ENABLES_TS_REQ_TIMEOUT | \ + PORT_TS_QUERY_REQ_ENABLES_PTP_HDR_OFFSET) + +#define BNXT_PTP_QTS_RX_ENABLES (PORT_TS_QUERY_REQ_ENABLES_PTP_SEQ_ID) +struct pps_pin { + u8 event; + u8 usage; + u8 state; +}; + +#define TSIO_PIN_VALID(pin) ((pin) >= 0 && (pin) < (BNXT_MAX_TSIO_PINS)) + +#define EVENT_DATA2_PPS_EVENT_TYPE(data2) \ + ((data2) & ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_EVENT_TYPE) + +#define EVENT_DATA2_PPS_PIN_NUM(data2) \ + (((data2) & \ + ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_PIN_NUMBER_MASK) >>\ + ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_PIN_NUMBER_SFT) + +#define BNXT_DATA2_UPPER_MSK \ + ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_PPS_TIMESTAMP_UPPER_MASK + +#define BNXT_DATA2_UPPER_SFT \ + (32 - \ + ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_PPS_TIMESTAMP_UPPER_SFT) + +#define BNXT_DATA1_LOWER_MSK \ + ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA1_PPS_TIMESTAMP_LOWER_MASK + +#define BNXT_DATA1_LOWER_SFT \ + ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA1_PPS_TIMESTAMP_LOWER_SFT + +#define EVENT_PPS_TS(data2, data1) \ + (((u64)((data2) & BNXT_DATA2_UPPER_MSK) << BNXT_DATA2_UPPER_SFT) |\ + (((data1) & BNXT_DATA1_LOWER_MSK) >> BNXT_DATA1_LOWER_SFT)) + +#define BNXT_PPS_PIN_DISABLE 0 +#define BNXT_PPS_PIN_ENABLE 1 +#define BNXT_PPS_PIN_NONE 0 +#define BNXT_PPS_PIN_PPS_IN 1 +#define BNXT_PPS_PIN_PPS_OUT 2 +#define BNXT_PPS_PIN_SYNC_IN 3 +#define BNXT_PPS_PIN_SYNC_OUT 4 + +#define BNXT_PPS_EVENT_INTERNAL 1 +#define BNXT_PPS_EVENT_EXTERNAL 2 + +struct bnxt_pps { + u8 num_pins; +#define BNXT_MAX_TSIO_PINS 4 + struct pps_pin pins[BNXT_MAX_TSIO_PINS]; +}; + +#define BNXT_MAX_TX_TS 4 +#define NEXT_TXTS(idx) (((idx) + 1) & (BNXT_MAX_TX_TS - 1)) + +struct bnxt_ptp_tx_req { + struct sk_buff *tx_skb; + u16 tx_seqid; + u16 tx_hdr_off; + unsigned long abs_txts_tmo; +}; + +struct bnxt_ptp_cfg { +#ifdef HAVE_IEEE1588_SUPPORT + struct ptp_clock_info ptp_info; + struct ptp_clock *ptp_clock; + struct cyclecounter cc; + struct timecounter tc; + struct bnxt_pps pps_info; + /* serialize timecounter access */ + spinlock_t ptp_lock; + /* serialize ts tx request queuing */ + spinlock_t ptp_tx_lock; + struct sk_buff *rx_skb; + struct bnxt_napi *bnapi; + u32 vlan; + u64 current_time; + u64 old_time; + u64 skb_pre_xmit_ts; + u64 save_ts; +#if !defined HAVE_PTP_DO_AUX_WORK + struct work_struct ptp_ts_task; +#else + unsigned long next_period; +#endif + unsigned long next_overflow_check; + u32 cmult; + /* a 23b shift cyclecounter will overflow in ~36 mins. Check overflow every 18 mins. */ + #define BNXT_PHC_OVERFLOW_PERIOD (18 * 60 * HZ) + + struct bnxt_ptp_tx_req txts_req[BNXT_MAX_TX_TS]; + u16 rx_seqid; +#endif + struct bnxt *bp; + u8 tx_avail; + u16 rxctl; +#define BNXT_PTP_MSG_SYNC (1 << 0) +#define BNXT_PTP_MSG_DELAY_REQ (1 << 1) +#define BNXT_PTP_MSG_PDELAY_REQ (1 << 2) +#define BNXT_PTP_MSG_PDELAY_RESP (1 << 3) +#define BNXT_PTP_MSG_FOLLOW_UP (1 << 8) +#define BNXT_PTP_MSG_DELAY_RESP (1 << 9) +#define BNXT_PTP_MSG_PDELAY_RESP_FOLLOW_UP (1 << 10) +#define BNXT_PTP_MSG_ANNOUNCE (1 << 11) +#define BNXT_PTP_MSG_SIGNALING (1 << 12) +#define BNXT_PTP_MSG_MANAGEMENT (1 << 13) +#define BNXT_PTP_MSG_EVENTS (BNXT_PTP_MSG_SYNC | \ + BNXT_PTP_MSG_DELAY_REQ | \ + BNXT_PTP_MSG_PDELAY_REQ | \ + BNXT_PTP_MSG_PDELAY_RESP) + u8 tx_tstamp_en:1; + int rx_filter; + u32 tstamp_filters; + + u32 refclk_regs[2]; + u32 refclk_mapped_regs[2]; + u32 txts_tmo; + u16 txts_prod; + u16 txts_cons; +}; + +#if BITS_PER_LONG == 32 +#define BNXT_READ_TIME64(ptp, dst, src) \ +do { \ + spin_lock_bh(&(ptp)->ptp_lock); \ + (dst) = (src); \ + spin_unlock_bh(&(ptp)->ptp_lock); \ +} while (0) +#else +#define BNXT_READ_TIME64(ptp, dst, src) \ + ((dst) = READ_ONCE(src)) +#endif + +#define BNXT_PTP_INC_TX_AVAIL(ptp) \ +do { \ + spin_lock_bh(&(ptp)->ptp_tx_lock); \ + (ptp)->tx_avail++; \ + spin_unlock_bh(&(ptp)->ptp_tx_lock); \ +} while (0) + +int bnxt_ptp_parse(struct sk_buff *skb, u16 *seq_id, u16 *hdr_off); +void bnxt_ptp_pps_event(struct bnxt *bp, u32 data1, u32 data2); +void bnxt_ptp_reapply_pps(struct bnxt *bp); +void bnxt_ptp_reapply_phc(struct bnxt *bp); +#ifndef HAVE_PTP_DO_AUX_WORK +void bnxt_ptp_timer(struct bnxt *bp); +#endif +int bnxt_hwtstamp_set(struct net_device *dev, struct ifreq *ifr); +int bnxt_hwtstamp_get(struct net_device *dev, struct ifreq *ifr); +int bnxt_get_rx_ts_p5(struct bnxt *bp, u64 *ts, u32 pkt_ts); +int bnxt_get_rx_ts(struct bnxt *bp, struct bnxt_napi *bnapi, u32 vlan, struct sk_buff *skb); +int bnxt_get_tx_ts(struct bnxt *bp, struct sk_buff *skb, u16 prod); +void bnxt_tx_ts_cmp(struct bnxt *bp, struct bnxt_napi *bnapi, + struct tx_ts_cmp *tscmp); +int bnxt_ptp_init(struct bnxt *bp, bool phc_cfg); +void bnxt_ptp_clear(struct bnxt *bp); +void bnxt_ptp_rtc_timecounter_init(struct bnxt_ptp_cfg *ptp, u64 ns); +int bnxt_ptp_init_rtc(struct bnxt *bp, bool phc_cfg); +void bnxt_ptp_update_current_time(struct bnxt *bp); +void bnxt_ptp_cfg_tstamp_filters(struct bnxt *bp); +void bnxt_ptp_get_skb_pre_xmit_ts(struct bnxt *bp); +void bnxt_save_pre_reset_ts(struct bnxt *bp); +int bnxt_ptp_get_txts_prod(struct bnxt_ptp_cfg *ptp, u16 *prod); +#endif diff --git a/drivers/thirdparty/release-drivers/bnxt/bnxt_sriov.c b/drivers/thirdparty/release-drivers/bnxt/bnxt_sriov.c new file mode 100644 index 000000000000..edeb781a7dd0 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/bnxt_sriov.c @@ -0,0 +1,2125 @@ +/* Broadcom NetXtreme-C/E network driver. + * + * Copyright (c) 2014-2016 Broadcom Corporation + * Copyright (c) 2016-2018 Broadcom Limited + * Copyright (c) 2018-2023 Broadcom Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include "bnxt_compat.h" +#include "bnxt_hsi.h" +#include "bnxt.h" +#include "bnxt_hwrm.h" +#include "bnxt_ulp.h" +#include "bnxt_sriov.h" +#include "bnxt_vfr.h" +#include "bnxt_ethtool.h" +#include "bnxt_tc.h" +#include "bnxt_devlink.h" +#include "bnxt_sriov_sysfs.h" +#include "tfc_vf2pf_msg.h" + +#ifdef CONFIG_BNXT_SRIOV +static int bnxt_hwrm_fwd_async_event_cmpl(struct bnxt *bp, + struct bnxt_vf_info *vf, + u16 event_id) +{ + struct hwrm_fwd_async_event_cmpl_input *req; + struct hwrm_async_event_cmpl *async_cmpl; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_FWD_ASYNC_EVENT_CMPL); + if (rc) + goto exit; + + if (vf) + req->encap_async_event_target_id = cpu_to_le16(vf->fw_fid); + else + /* broadcast this async event to all VFs */ + req->encap_async_event_target_id = cpu_to_le16(0xffff); + async_cmpl = + (struct hwrm_async_event_cmpl *)req->encap_async_event_cmpl; + async_cmpl->type = cpu_to_le16(ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT); + async_cmpl->event_id = cpu_to_le16(event_id); + + rc = hwrm_req_send(bp, req); +exit: + if (rc) + netdev_err(bp->dev, "hwrm_fwd_async_event_cmpl failed. rc:%d\n", + rc); + return rc; +} + +#ifdef HAVE_NDO_GET_VF_CONFIG +static struct bnxt_vf_info *bnxt_vf_ndo_prep(struct bnxt *bp, int vf_id) + __acquires(&bp->sriov_lock) +{ + struct bnxt_vf_info *vf; + + mutex_lock(&bp->sriov_lock); + if (!bp->pf.active_vfs) { + mutex_unlock(&bp->sriov_lock); + netdev_err(bp->dev, "vf ndo called though sriov is disabled\n"); + return ERR_PTR(-EINVAL); + } + if (vf_id >= bp->pf.active_vfs) { + mutex_unlock(&bp->sriov_lock); + netdev_err(bp->dev, "Invalid VF id %d\n", vf_id); + return ERR_PTR(-EINVAL); + } + vf = rcu_dereference_protected(bp->pf.vf, + lockdep_is_held(&bp->sriov_lock)); + if (!vf) { + mutex_unlock(&bp->sriov_lock); + netdev_warn(bp->dev, "VF structure freed\n"); + return ERR_PTR(-ENODEV); + } + return &vf[vf_id]; +} + +static void bnxt_vf_ndo_end(struct bnxt *bp) + __releases(&bp->sriov_lock) +{ + mutex_unlock(&bp->sriov_lock); +} + +#ifdef HAVE_VF_SPOOFCHK +int bnxt_set_vf_spoofchk(struct net_device *dev, int vf_id, bool setting) +{ + struct bnxt *bp = netdev_priv(dev); + struct hwrm_func_cfg_input *req; + bool old_setting = false; + struct bnxt_vf_info *vf; + u32 func_flags; + int rc; + + if (bp->hwrm_spec_code < 0x10701) + return -ENOTSUPP; + + vf = bnxt_vf_ndo_prep(bp, vf_id); + if (IS_ERR(vf)) + return PTR_ERR(vf); + + if (vf->flags & BNXT_VF_SPOOFCHK) + old_setting = true; + if (old_setting == setting) { + bnxt_vf_ndo_end(bp); + return 0; + } + + if (setting) + func_flags = FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_ENABLE; + else + func_flags = FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_DISABLE; + /*TODO: if the driver supports VLAN filter on guest VLAN, + * the spoof check should also include vlan anti-spoofing + */ + rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req); + if (!rc) { + req->fid = cpu_to_le16(vf->fw_fid); + req->flags = cpu_to_le32(func_flags); + rc = hwrm_req_send(bp, req); + if (!rc) { + if (setting) + vf->flags |= BNXT_VF_SPOOFCHK; + else + vf->flags &= ~BNXT_VF_SPOOFCHK; + } + } + bnxt_vf_ndo_end(bp); + return rc; +} +#endif + +static int bnxt_hwrm_func_qcfg_flags(struct bnxt *bp, struct bnxt_vf_info *vf) +{ + struct hwrm_func_qcfg_output *resp; + struct hwrm_func_qcfg_input *req; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG); + if (rc) + return rc; + + req->fid = cpu_to_le16(BNXT_PF(bp) ? vf->fw_fid : 0xffff); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); + if (!rc) + vf->func_qcfg_flags = cpu_to_le16(resp->flags); + hwrm_req_drop(bp, req); + return rc; +} + +bool bnxt_is_trusted_vf(struct bnxt *bp, struct bnxt_vf_info *vf) +{ + if (BNXT_PF(bp) && !(bp->fw_cap & BNXT_FW_CAP_TRUSTED_VF)) + return !!(vf->flags & BNXT_VF_TRUST); + + if (!(bp->fw_cap & BNXT_FW_CAP_VF_CFG_FOR_PF)) + bnxt_hwrm_func_qcfg_flags(bp, vf); + return !!(vf->func_qcfg_flags & FUNC_QCFG_RESP_FLAGS_TRUSTED_VF); +} + +#ifdef HAVE_NDO_SET_VF_TRUST +static int bnxt_hwrm_set_trusted_vf(struct bnxt *bp, struct bnxt_vf_info *vf) +{ + struct hwrm_func_cfg_input *req; + int rc; + + if (!(bp->fw_cap & BNXT_FW_CAP_TRUSTED_VF)) + return 0; + + rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req); + if (rc) + return rc; + + req->fid = cpu_to_le16(vf->fw_fid); + if (vf->flags & BNXT_VF_TRUST) + req->flags = cpu_to_le32(FUNC_CFG_REQ_FLAGS_TRUSTED_VF_ENABLE); + else + req->flags = cpu_to_le32(FUNC_CFG_REQ_FLAGS_TRUSTED_VF_DISABLE); + return hwrm_req_send(bp, req); +} + +int bnxt_set_vf_trust(struct net_device *dev, int vf_id, bool trusted) +{ + struct bnxt *bp = netdev_priv(dev); + struct bnxt_vf_info *vf; + + vf = bnxt_vf_ndo_prep(bp, vf_id); + if (IS_ERR(vf)) + return -EINVAL; + + if (trusted) + vf->flags |= BNXT_VF_TRUST; + else + vf->flags &= ~BNXT_VF_TRUST; + + bnxt_hwrm_set_trusted_vf(bp, vf); + bnxt_vf_ndo_end(bp); + return 0; +} +#endif + +#ifdef HAVE_NDO_SET_VF_QUEUES +static bool bnxt_param_ok(int new, u16 curr, u16 avail) +{ + int delta; + + if (new <= curr) + return true; + + delta = new - curr; + if (delta <= avail) + return true; + return false; +} + +static void bnxt_adjust_ring_resc(struct bnxt *bp, struct bnxt_vf_info *vf, + struct hwrm_func_vf_resource_cfg_input *req) +{ + struct bnxt_hw_resc *hw_resc = &bp->hw_resc; + u16 cp = 0, grp = 0, stat = 0, vnic = 0; + u16 min_l2, max_l2, min_rss, max_rss; + u16 min_tx, max_tx, min_rx, max_rx; + + min_tx = le16_to_cpu(req->min_tx_rings); + max_tx = le16_to_cpu(req->max_tx_rings); + min_rx = le16_to_cpu(req->min_rx_rings); + max_rx = le16_to_cpu(req->max_rx_rings); + min_rss = le16_to_cpu(req->min_rsscos_ctx); + max_rss = le16_to_cpu(req->max_rsscos_ctx); + min_l2 = le16_to_cpu(req->min_l2_ctxs); + max_l2 = le16_to_cpu(req->max_l2_ctxs); + if (!min_tx && !max_tx && !min_rx && !max_rx) { + min_rss = 0; + max_rss = 0; + min_l2 = 0; + max_l2 = 0; + } else if (bp->pf.vf_resv_strategy == BNXT_VF_RESV_STRATEGY_MAXIMAL) { + u16 avail_cp_rings, avail_stat_ctx; + u16 avail_vnics, avail_ring_grps; + + avail_cp_rings = bnxt_get_avail_cp_rings_for_en(bp); + avail_stat_ctx = bnxt_get_avail_stat_ctxs_for_en(bp); + avail_ring_grps = hw_resc->max_hw_ring_grps - bp->rx_nr_rings; + avail_vnics = hw_resc->max_vnics - bp->nr_vnics; + + cp = max_t(u16, 2 * min_tx, min_rx); + if (cp > vf->min_cp_rings) + cp = min_t(u16, cp, avail_cp_rings + vf->min_cp_rings); + grp = min_tx; + if (grp > vf->min_ring_grps) + grp = min_t(u16, avail_ring_grps + vf->min_ring_grps, + grp); + stat = min_rx; + if (stat > vf->min_stat_ctxs) + stat = min_t(u16, avail_stat_ctx + vf->min_stat_ctxs, + stat); + vnic = min_rx; + if (vnic > vf->min_vnics) + vnic = min_t(u16, vnic, avail_vnics + vf->min_vnics); + + } else { + return; + } + req->min_cmpl_rings = cpu_to_le16(cp); + req->max_cmpl_rings = cpu_to_le16(cp); + req->min_hw_ring_grps = cpu_to_le16(grp); + req->max_hw_ring_grps = cpu_to_le16(grp); + req->min_stat_ctx = cpu_to_le16(stat); + req->max_stat_ctx = cpu_to_le16(stat); + req->min_vnics = cpu_to_le16(vnic); + req->max_vnics = cpu_to_le16(vnic); + req->min_rsscos_ctx = cpu_to_le16(min_rss); + req->max_rsscos_ctx = cpu_to_le16(max_rss); + req->min_l2_ctxs = cpu_to_le16(min_l2); + req->max_l2_ctxs = cpu_to_le16(max_l2); +} + +static void bnxt_record_ring_resc(struct bnxt *bp, struct bnxt_vf_info *vf, + struct hwrm_func_vf_resource_cfg_input *req) +{ + struct bnxt_hw_resc *hw_resc = &bp->hw_resc; + + hw_resc->max_tx_rings += vf->min_tx_rings; + hw_resc->max_rx_rings += vf->min_rx_rings; + vf->min_tx_rings = le16_to_cpu(req->min_tx_rings); + vf->max_tx_rings = le16_to_cpu(req->max_tx_rings); + vf->min_rx_rings = le16_to_cpu(req->min_rx_rings); + vf->max_rx_rings = le16_to_cpu(req->max_rx_rings); + hw_resc->max_tx_rings -= vf->min_tx_rings; + hw_resc->max_rx_rings -= vf->min_rx_rings; + if (bp->pf.vf_resv_strategy == BNXT_VF_RESV_STRATEGY_MAXIMAL) { + hw_resc->max_cp_rings += vf->min_cp_rings; + hw_resc->max_hw_ring_grps += vf->min_ring_grps; + hw_resc->max_stat_ctxs += vf->min_stat_ctxs; + hw_resc->max_vnics += vf->min_vnics; + vf->min_cp_rings = le16_to_cpu(req->min_cmpl_rings); + vf->min_ring_grps = le16_to_cpu(req->min_hw_ring_grps); + vf->min_stat_ctxs = le16_to_cpu(req->min_stat_ctx); + vf->min_vnics = le16_to_cpu(req->min_vnics); + hw_resc->max_cp_rings -= vf->min_cp_rings; + hw_resc->max_hw_ring_grps -= vf->min_ring_grps; + hw_resc->max_stat_ctxs -= vf->min_stat_ctxs; + hw_resc->max_vnics -= vf->min_vnics; + } +} + +int bnxt_set_vf_queues(struct net_device *dev, int vf_id, int min_txq, + int max_txq, int min_rxq, int max_rxq) +{ + struct hwrm_func_vf_resource_cfg_input *req; + struct bnxt *bp = netdev_priv(dev); + u16 avail_tx_rings, avail_rx_rings; + struct bnxt_hw_resc *hw_resc; + struct bnxt_vf_info *vf; + int rc; + + vf = bnxt_vf_ndo_prep(bp, vf_id); + if (IS_ERR(vf)) + return -EINVAL; + + if (!BNXT_NEW_RM(bp) || + !(bp->fw_cap & BNXT_FW_CAP_VF_RES_MIN_GUARANTEED)) { + bnxt_vf_ndo_end(bp); + return -EOPNOTSUPP; + } + + hw_resc = &bp->hw_resc; + + avail_tx_rings = hw_resc->max_tx_rings - bp->tx_nr_rings; + if (bp->flags & BNXT_FLAG_AGG_RINGS) + avail_rx_rings = hw_resc->max_rx_rings - bp->rx_nr_rings * 2; + else + avail_rx_rings = hw_resc->max_rx_rings - bp->rx_nr_rings; + + if (!bnxt_param_ok(min_txq, vf->min_tx_rings, avail_tx_rings) || + !bnxt_param_ok(min_rxq, vf->min_rx_rings, avail_rx_rings) || + !bnxt_param_ok(max_txq, vf->max_tx_rings, avail_tx_rings) || + !bnxt_param_ok(max_rxq, vf->max_rx_rings, avail_rx_rings)) { + bnxt_vf_ndo_end(bp); + return -ENOBUFS; + } + + rc = hwrm_req_init(bp, req, HWRM_FUNC_VF_RESOURCE_CFG); + if (rc) { + bnxt_vf_ndo_end(bp); + return rc; + } + + rc = hwrm_req_replace(bp, req, &bp->vf_resc_cfg_input, sizeof(*req)); + if (rc) { + bnxt_vf_ndo_end(bp); + return rc; + } + + req->vf_id = cpu_to_le16(vf->fw_fid); + req->min_tx_rings = cpu_to_le16(min_txq); + req->min_rx_rings = cpu_to_le16(min_rxq); + req->max_tx_rings = cpu_to_le16(max_txq); + req->max_rx_rings = cpu_to_le16(max_rxq); + req->flags = cpu_to_le16(FUNC_VF_RESOURCE_CFG_REQ_FLAGS_MIN_GUARANTEED); + + bnxt_adjust_ring_resc(bp, vf, req); + + bnxt_req_hold(bp, req); + rc = hwrm_req_send(bp, req); + if (!rc) + bnxt_record_ring_resc(bp, vf, req); + bnxt_req_drop(bp, req); + bnxt_vf_ndo_end(bp); + return rc; +} +#endif + +int bnxt_get_vf_config(struct net_device *dev, int vf_id, + struct ifla_vf_info *ivi) +{ + struct bnxt *bp = netdev_priv(dev); + struct bnxt_vf_info *vf; + + vf = bnxt_vf_ndo_prep(bp, vf_id); + if (IS_ERR(vf)) + return PTR_ERR(vf); + + ivi->vf = vf_id; + + if (is_valid_ether_addr(vf->mac_addr)) + memcpy(&ivi->mac, vf->mac_addr, ETH_ALEN); + else + memcpy(&ivi->mac, vf->vf_mac_addr, ETH_ALEN); +#ifdef HAVE_IFLA_TX_RATE + ivi->max_tx_rate = vf->max_tx_rate; + ivi->min_tx_rate = vf->min_tx_rate; +#else + ivi->tx_rate = vf->max_tx_rate; +#endif + ivi->vlan = vf->vlan & VLAN_VID_MASK; + ivi->qos = vf->vlan >> VLAN_PRIO_SHIFT; +#ifdef HAVE_VF_SPOOFCHK + ivi->spoofchk = !!(vf->flags & BNXT_VF_SPOOFCHK); +#endif +#ifdef HAVE_NDO_SET_VF_TRUST + ivi->trusted = bnxt_is_trusted_vf(bp, vf); +#endif +#ifdef HAVE_NDO_SET_VF_LINK_STATE + if (!(vf->flags & BNXT_VF_LINK_FORCED)) + ivi->linkstate = IFLA_VF_LINK_STATE_AUTO; + else if (vf->flags & BNXT_VF_LINK_UP) + ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE; + else + ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE; +#endif +#ifdef HAVE_NDO_SET_VF_QUEUES + ivi->min_tx_queues = vf->min_tx_rings; + ivi->max_tx_queues = vf->max_tx_rings; + ivi->min_rx_queues = vf->min_rx_rings; + ivi->max_rx_queues = vf->max_rx_rings; +#endif + + bnxt_vf_ndo_end(bp); + return 0; +} + +int bnxt_set_vf_mac(struct net_device *dev, int vf_id, u8 *mac) +{ + struct bnxt *bp = netdev_priv(dev); + struct hwrm_func_cfg_input *req; + struct bnxt_vf_info *vf; + u16 fw_fid; + int rc; + + vf = bnxt_vf_ndo_prep(bp, vf_id); + if (IS_ERR(vf)) + return PTR_ERR(vf); + /* reject bc or mc mac addr, zero mac addr means allow + * VF to use its own mac addr + */ + if (is_multicast_ether_addr(mac)) { + bnxt_vf_ndo_end(bp); + netdev_err(dev, "Invalid VF ethernet address\n"); + return -EINVAL; + } + + memcpy(vf->mac_addr, mac, ETH_ALEN); + fw_fid = vf->fw_fid; + bnxt_vf_ndo_end(bp); + + rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req); + if (rc) + return rc; + + req->fid = cpu_to_le16(fw_fid); + req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR); + memcpy(req->dflt_mac_addr, mac, ETH_ALEN); + return hwrm_req_send(bp, req); +} + +#ifdef NEW_NDO_SET_VF_VLAN +int bnxt_set_vf_vlan(struct net_device *dev, int vf_id, u16 vlan_id, u8 qos, + __be16 vlan_proto) +#else +int bnxt_set_vf_vlan(struct net_device *dev, int vf_id, u16 vlan_id, u8 qos) +#endif +{ + struct bnxt *bp = netdev_priv(dev); + struct hwrm_func_cfg_input *req; + struct bnxt_vf_info *vf; + u16 vlan_tag; + int rc; + + if (bp->hwrm_spec_code < 0x10201) + return -ENOTSUPP; + +#ifdef NEW_NDO_SET_VF_VLAN + if (vlan_proto != htons(ETH_P_8021Q) && + (vlan_proto != htons(ETH_P_8021AD) || !(bp->fw_cap & BNXT_FW_CAP_DFLT_VLAN_TPID_PCP))) + return -EPROTONOSUPPORT; +#endif + + vf = bnxt_vf_ndo_prep(bp, vf_id); + if (IS_ERR(vf)) + return PTR_ERR(vf); + + if (vlan_id >= VLAN_N_VID || qos >= IEEE_8021Q_MAX_PRIORITIES || (!vlan_id && qos)) { + bnxt_vf_ndo_end(bp); + return -EINVAL; + } + + vlan_tag = vlan_id | (u16)qos << VLAN_PRIO_SHIFT; + if (vlan_tag == vf->vlan) { + bnxt_vf_ndo_end(bp); + return 0; + } + + if (!netif_running(bp->dev)) { + bnxt_vf_ndo_end(bp); + return -ENETDOWN; + } + rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req); + if (!rc) { + req->fid = cpu_to_le16(vf->fw_fid); + req->dflt_vlan = cpu_to_le16(vlan_tag); + req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_VLAN); +#ifdef NEW_NDO_SET_VF_VLAN + if (bp->fw_cap & BNXT_FW_CAP_DFLT_VLAN_TPID_PCP) { + req->enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_TPID); + req->tpid = vlan_proto; + } +#endif + rc = hwrm_req_send(bp, req); + if (!rc) + vf->vlan = vlan_tag; + } + bnxt_vf_ndo_end(bp); + return rc; +} + +#ifdef HAVE_IFLA_TX_RATE +int bnxt_set_vf_bw(struct net_device *dev, int vf_id, int min_tx_rate, + int max_tx_rate) +#else +int bnxt_set_vf_bw(struct net_device *dev, int vf_id, int max_tx_rate) +#endif +{ + struct bnxt *bp = netdev_priv(dev); + struct hwrm_func_cfg_input *req; + struct bnxt_vf_info *vf; + u32 pf_link_speed; + int rc; + + vf = bnxt_vf_ndo_prep(bp, vf_id); + if (IS_ERR(vf)) + return PTR_ERR(vf); + + pf_link_speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed); + if (max_tx_rate > pf_link_speed) { + bnxt_vf_ndo_end(bp); + netdev_info(bp->dev, "max tx rate %d exceed PF link speed for VF %d\n", + max_tx_rate, vf_id); + return -EINVAL; + } + +#ifdef HAVE_IFLA_TX_RATE + if (min_tx_rate > pf_link_speed || min_tx_rate > max_tx_rate) { + bnxt_vf_ndo_end(bp); + netdev_info(bp->dev, "min tx rate %d is invalid for VF %d\n", + min_tx_rate, vf_id); + return -EINVAL; + } + if (min_tx_rate == vf->min_tx_rate && max_tx_rate == vf->max_tx_rate) { + bnxt_vf_ndo_end(bp); + return 0; + } +#else + if (max_tx_rate == vf->max_tx_rate) { + bnxt_vf_ndo_end(bp); + return 0; + } +#endif + rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req); + if (!rc) { + req->fid = cpu_to_le16(vf->fw_fid); + req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MAX_BW); + req->max_bw = cpu_to_le32(max_tx_rate); +#ifdef HAVE_IFLA_TX_RATE + req->enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MIN_BW); + req->min_bw = cpu_to_le32(min_tx_rate); +#endif + rc = hwrm_req_send(bp, req); + if (!rc) { +#ifdef HAVE_IFLA_TX_RATE + vf->min_tx_rate = min_tx_rate; +#endif + vf->max_tx_rate = max_tx_rate; + } + } + bnxt_vf_ndo_end(bp); + return rc; +} + +static int bnxt_set_vf_link_admin_state(struct bnxt *bp, int vf_id) +{ + struct hwrm_func_cfg_input *req; + struct bnxt_vf_info *vf; + int rc; + + if (!(bp->fw_cap & BNXT_FW_CAP_LINK_ADMIN)) + return 0; + + vf = &bp->pf.vf[vf_id]; + + rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req); + if (rc) + return rc; + + req->fid = cpu_to_le16(vf->fw_fid); + switch (vf->flags & (BNXT_VF_LINK_FORCED | BNXT_VF_LINK_UP)) { + case BNXT_VF_LINK_FORCED: + req->options = + FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_FORCED_DOWN; + break; + case (BNXT_VF_LINK_FORCED | BNXT_VF_LINK_UP): + req->options = FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_FORCED_UP; + break; + default: + req->options = FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_AUTO; + break; + } + req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ADMIN_LINK_STATE); + return hwrm_req_send(bp, req); +} + +#ifdef HAVE_NDO_SET_VF_LINK_STATE +int bnxt_set_vf_link_state(struct net_device *dev, int vf_id, int link) +{ + struct bnxt *bp = netdev_priv(dev); + struct bnxt_vf_info *vf; + int rc; + + vf = bnxt_vf_ndo_prep(bp, vf_id); + if (IS_ERR(vf)) + return PTR_ERR(vf); + + vf->flags &= ~(BNXT_VF_LINK_UP | BNXT_VF_LINK_FORCED); + switch (link) { + case IFLA_VF_LINK_STATE_AUTO: + vf->flags |= BNXT_VF_LINK_UP; + break; + case IFLA_VF_LINK_STATE_DISABLE: + vf->flags |= BNXT_VF_LINK_FORCED; + break; + case IFLA_VF_LINK_STATE_ENABLE: + vf->flags |= BNXT_VF_LINK_UP | BNXT_VF_LINK_FORCED; + break; + default: + netdev_err(bp->dev, "Invalid link option\n"); + bnxt_vf_ndo_end(bp); + return -EINVAL; + } + + if (!(bp->fw_cap & BNXT_FW_CAP_LINK_ADMIN)) + rc = bnxt_hwrm_fwd_async_event_cmpl(bp, vf, + ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE); + else + rc = bnxt_set_vf_link_admin_state(bp, vf_id); + + if (rc) + rc = -EIO; + bnxt_vf_ndo_end(bp); + return rc; +} +#endif +#endif + +static void bnxt_set_vf_attr(struct bnxt *bp, int num_vfs) +{ + int i; + struct bnxt_vf_info *vf; + + for (i = 0; i < num_vfs; i++) { + vf = &bp->pf.vf[i]; + memset(vf, 0, sizeof(*vf)); + } +} + +static int bnxt_hwrm_func_vf_resource_free(struct bnxt *bp, int num_vfs) +{ + struct hwrm_func_vf_resc_free_input *req; + struct bnxt_pf_info *pf = &bp->pf; + int i, rc; + + rc = hwrm_req_init(bp, req, HWRM_FUNC_VF_RESC_FREE); + if (rc) + return rc; + + hwrm_req_hold(bp, req); + for (i = pf->first_vf_id; i < pf->first_vf_id + num_vfs; i++) { + req->vf_id = cpu_to_le16(i); + rc = hwrm_req_send(bp, req); + if (rc) + break; + } + hwrm_req_drop(bp, req); + return rc; +} + +void bnxt_free_vf_stats_mem(struct bnxt *bp) +{ + int num_vfs = pci_num_vf(bp->pdev); + struct bnxt_vf_info *vf; + int i; + + mutex_lock(&bp->sriov_lock); + vf = rcu_dereference_protected(bp->pf.vf, + lockdep_is_held(&bp->sriov_lock)); + if (!vf) { + mutex_unlock(&bp->sriov_lock); + return; + } + + for (i = 0; i < num_vfs; i++) { + if (vf[i].stats.hw_stats) + bnxt_free_stats_mem(bp, &vf[i].stats); + } + mutex_unlock(&bp->sriov_lock); +} + +static void bnxt_free_vf_resources(struct bnxt *bp) +{ + struct pci_dev *pdev = bp->pdev; + struct bnxt_vf_info *vf; + int i; + + mutex_lock(&bp->sriov_lock); + bp->pf.active_vfs = 0; + vf = rcu_dereference_protected(bp->pf.vf, + lockdep_is_held(&bp->sriov_lock)); + RCU_INIT_POINTER(bp->pf.vf, NULL); + synchronize_rcu(); + kfree(vf); + + kfree(bp->pf.vf_event_bmap); + bp->pf.vf_event_bmap = NULL; + + for (i = 0; i < BNXT_MAX_VF_CMD_FWD_PAGES; i++) { + if (bp->pf.hwrm_cmd_req_addr[i]) { + dma_free_coherent(&pdev->dev, 1 << bp->pf.vf_hwrm_cmd_req_page_shift, + bp->pf.hwrm_cmd_req_addr[i], + bp->pf.hwrm_cmd_req_dma_addr[i]); + bp->pf.hwrm_cmd_req_addr[i] = NULL; + } + } + mutex_unlock(&bp->sriov_lock); +} + +int bnxt_alloc_vf_stats_mem(struct bnxt *bp) +{ + int num_vfs = pci_num_vf(bp->pdev); + struct bnxt_vf_info *vf; + int rc = 0; + int i; + + mutex_lock(&bp->sriov_lock); + vf = rcu_dereference_protected(bp->pf.vf, + lockdep_is_held(&bp->sriov_lock)); + if (!vf) { + mutex_unlock(&bp->sriov_lock); + return -EINVAL; + } + + for (i = 0; i < num_vfs; i++) { + bp->pf.vf[i].stats.len = sizeof(struct ctx_hw_stats); + if (bp->pf.vf[i].stats.hw_stats) + continue; + + rc = bnxt_alloc_stats_mem(bp, &bp->pf.vf[i].stats, !i); + if (rc) + break; + } + + /* Query function stat mask to the vf[0] + * stat structure for overflow processing. + */ + if (!rc) + bnxt_get_func_stats_ext_mask(bp, &bp->pf.vf[0].stats); + mutex_unlock(&bp->sriov_lock); + + if (rc) + bnxt_free_vf_stats_mem(bp); + return rc; +} + +static int bnxt_alloc_vf_resources(struct bnxt *bp, int num_vfs) +{ + struct pci_dev *pdev = bp->pdev; + u32 nr_pages, size, i, j, k = 0; + u32 page_size, reqs_per_page; + void *p; + + p = kcalloc(num_vfs, sizeof(struct bnxt_vf_info), GFP_KERNEL); + if (!p) + return -ENOMEM; + + rcu_assign_pointer(bp->pf.vf, p); + bnxt_set_vf_attr(bp, num_vfs); + + size = num_vfs * BNXT_HWRM_REQ_MAX_SIZE; + page_size = BNXT_PAGE_SIZE; + bp->pf.vf_hwrm_cmd_req_page_shift = BNXT_PAGE_SHIFT; + /* Adjust the page size to make sure we fit all VFs to up to 4 chunks*/ + while (size > page_size * BNXT_MAX_VF_CMD_FWD_PAGES) { + page_size *= 2; + bp->pf.vf_hwrm_cmd_req_page_shift++; + } + nr_pages = DIV_ROUND_UP(size, page_size); + reqs_per_page = page_size / BNXT_HWRM_REQ_MAX_SIZE; + + for (i = 0; i < nr_pages; i++) { + bp->pf.hwrm_cmd_req_addr[i] = + dma_alloc_coherent(&pdev->dev, page_size, + &bp->pf.hwrm_cmd_req_dma_addr[i], + GFP_KERNEL); + + if (!bp->pf.hwrm_cmd_req_addr[i]) + return -ENOMEM; + + for (j = 0; j < reqs_per_page && k < num_vfs; j++) { + struct bnxt_vf_info *vf = &bp->pf.vf[k]; + + vf->hwrm_cmd_req_addr = bp->pf.hwrm_cmd_req_addr[i] + + j * BNXT_HWRM_REQ_MAX_SIZE; + vf->hwrm_cmd_req_dma_addr = + bp->pf.hwrm_cmd_req_dma_addr[i] + j * + BNXT_HWRM_REQ_MAX_SIZE; + k++; + } + } + + bp->pf.vf_event_bmap = kzalloc(ALIGN(DIV_ROUND_UP(num_vfs, 8), sizeof(long)), GFP_KERNEL); + if (!bp->pf.vf_event_bmap) + return -ENOMEM; + + bp->pf.hwrm_cmd_req_pages = nr_pages; + return 0; +} + +static int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp) +{ + struct hwrm_func_buf_rgtr_input *req; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_FUNC_BUF_RGTR); + if (rc) + return rc; + + req->req_buf_num_pages = cpu_to_le16(bp->pf.hwrm_cmd_req_pages); + req->req_buf_page_size = cpu_to_le16(bp->pf.vf_hwrm_cmd_req_page_shift); + req->req_buf_len = cpu_to_le16(BNXT_HWRM_REQ_MAX_SIZE); + req->req_buf_page_addr0 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[0]); + req->req_buf_page_addr1 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[1]); + req->req_buf_page_addr2 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[2]); + req->req_buf_page_addr3 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[3]); + + return hwrm_req_send(bp, req); +} + +static int __bnxt_set_vf_params(struct bnxt *bp, int vf_id) +{ + struct hwrm_func_cfg_input *req; + struct bnxt_vf_info *vf; + int rc; + + rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req); + if (rc) + return rc; + + vf = &bp->pf.vf[vf_id]; + req->fid = cpu_to_le16(vf->fw_fid); + + if (is_valid_ether_addr(vf->mac_addr)) { + req->enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR); + memcpy(req->dflt_mac_addr, vf->mac_addr, ETH_ALEN); + } + if (vf->vlan) { + req->enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_VLAN); + req->dflt_vlan = cpu_to_le16(vf->vlan); + } + if (vf->max_tx_rate) { + req->enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MAX_BW); + req->max_bw = cpu_to_le32(vf->max_tx_rate); +#ifdef HAVE_IFLA_TX_RATE + req->enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MIN_BW); + req->min_bw = cpu_to_le32(vf->min_tx_rate); +#endif + } + if (vf->flags & BNXT_VF_TRUST) + req->flags |= cpu_to_le32(FUNC_CFG_REQ_FLAGS_TRUSTED_VF_ENABLE); + + return hwrm_req_send(bp, req); +} + +static void bnxt_hwrm_roce_sriov_cfg(struct bnxt *bp, int num_vfs) +{ + struct hwrm_func_qcaps_output *resp; + struct hwrm_func_cfg_input *cfg_req; + struct hwrm_func_qcaps_input *req; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_FUNC_QCAPS); + if (rc) + return; + + req->fid = cpu_to_le16(0xffff); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); + if (rc) + goto err; + + rc = bnxt_hwrm_func_cfg_short_req_init(bp, &cfg_req); + if (rc) + goto err; + + cfg_req->fid = cpu_to_le16(0xffff); + cfg_req->enables2 = cpu_to_le32(FUNC_CFG_REQ_ENABLES2_ROCE_MAX_AV_PER_VF | + FUNC_CFG_REQ_ENABLES2_ROCE_MAX_CQ_PER_VF | + FUNC_CFG_REQ_ENABLES2_ROCE_MAX_MRW_PER_VF | + FUNC_CFG_REQ_ENABLES2_ROCE_MAX_QP_PER_VF | + FUNC_CFG_REQ_ENABLES2_ROCE_MAX_SRQ_PER_VF | + FUNC_CFG_REQ_ENABLES2_ROCE_MAX_GID_PER_VF); + cfg_req->roce_max_av_per_vf = cpu_to_le32(le32_to_cpu(resp->roce_vf_max_av) / num_vfs); + cfg_req->roce_max_cq_per_vf = cpu_to_le32(le32_to_cpu(resp->roce_vf_max_cq) / num_vfs); + cfg_req->roce_max_mrw_per_vf = cpu_to_le32(le32_to_cpu(resp->roce_vf_max_mrw) / num_vfs); + cfg_req->roce_max_qp_per_vf = cpu_to_le32(le32_to_cpu(resp->roce_vf_max_qp) / num_vfs); + cfg_req->roce_max_srq_per_vf = cpu_to_le32(le32_to_cpu(resp->roce_vf_max_srq) / num_vfs); + cfg_req->roce_max_gid_per_vf = cpu_to_le32(le32_to_cpu(resp->roce_vf_max_gid) / num_vfs); + + rc = hwrm_req_send(bp, cfg_req); + if (rc) + goto err; + + hwrm_req_drop(bp, req); + return; + +err: + hwrm_req_drop(bp, req); + netdev_err(bp->dev, "RoCE sriov configuration failed\n"); +} + +/* Only called by PF to reserve resources for VFs, returns actual number of + * VFs configured, or < 0 on error. + */ +static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs, bool reset) +{ + struct hwrm_func_vf_resource_cfg_input *req; + struct bnxt_hw_resc *hw_resc = &bp->hw_resc; + u16 vf_tx_rings, vf_rx_rings, vf_cp_rings; + u16 vf_stat_ctx, vf_vnics, vf_ring_grps; + struct bnxt_pf_info *pf = &bp->pf; + int i, rc, min = 1; + u16 vf_msix = 0; + u16 vf_rss; + + rc = hwrm_req_init(bp, req, HWRM_FUNC_VF_RESOURCE_CFG); + if (rc) + return rc; + + if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { + vf_msix = hw_resc->max_nqs - bnxt_min_nq_rings_in_use(bp); + vf_ring_grps = 0; + } else { + vf_ring_grps = hw_resc->max_hw_ring_grps - bp->rx_nr_rings; + } + vf_cp_rings = bnxt_get_avail_cp_rings_for_en(bp); + vf_stat_ctx = bnxt_get_avail_stat_ctxs_for_en(bp); + if (bp->flags & BNXT_FLAG_AGG_RINGS) + vf_rx_rings = hw_resc->max_rx_rings - bp->rx_nr_rings * 2; + else + vf_rx_rings = hw_resc->max_rx_rings - bp->rx_nr_rings; + vf_tx_rings = hw_resc->max_tx_rings - bnxt_total_tx_rings(bp); + vf_vnics = hw_resc->max_vnics - bp->nr_vnics; + vf_rss = hw_resc->max_rsscos_ctxs - bp->rsscos_nr_ctxs; + + req->min_rsscos_ctx = cpu_to_le16(BNXT_VF_MIN_RSS_CTX); + if (pf->vf_resv_strategy == BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC) { + min = 0; + req->min_rsscos_ctx = cpu_to_le16(min); + } + if (pf->vf_resv_strategy == BNXT_VF_RESV_STRATEGY_MINIMAL || + pf->vf_resv_strategy == BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC) { + req->min_cmpl_rings = cpu_to_le16(min); + req->min_tx_rings = cpu_to_le16(min); + req->min_rx_rings = cpu_to_le16(min); + req->min_l2_ctxs = cpu_to_le16(min); + req->min_vnics = cpu_to_le16(min); + req->min_stat_ctx = cpu_to_le16(min); + if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) + req->min_hw_ring_grps = cpu_to_le16(min); + } else { + vf_cp_rings /= num_vfs; + vf_tx_rings /= num_vfs; + vf_rx_rings /= num_vfs; + if ((bp->fw_cap & BNXT_FW_CAP_VF_RESV_VNICS_MAXVFS) && + vf_vnics >= pf->max_vfs) { + /* Take into account that FW has reserved 1 VNIC for each pf->max_vfs */ + vf_vnics = (vf_vnics - pf->max_vfs + num_vfs) / num_vfs; + } else { + vf_vnics /= num_vfs; + } + vf_stat_ctx /= num_vfs; + vf_ring_grps /= num_vfs; + vf_rss /= num_vfs; + + vf_vnics = min_t(u16, vf_vnics, vf_rx_rings); + req->min_cmpl_rings = cpu_to_le16(vf_cp_rings); + req->min_tx_rings = cpu_to_le16(vf_tx_rings); + req->min_rx_rings = cpu_to_le16(vf_rx_rings); + req->min_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX); + req->min_vnics = cpu_to_le16(vf_vnics); + req->min_stat_ctx = cpu_to_le16(vf_stat_ctx); + req->min_hw_ring_grps = cpu_to_le16(vf_ring_grps); + req->min_rsscos_ctx = cpu_to_le16(vf_rss); + } + req->max_cmpl_rings = cpu_to_le16(vf_cp_rings); + req->max_tx_rings = cpu_to_le16(vf_tx_rings); + req->max_rx_rings = cpu_to_le16(vf_rx_rings); + req->max_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX); + req->max_vnics = cpu_to_le16(vf_vnics); + req->max_stat_ctx = cpu_to_le16(vf_stat_ctx); + req->max_hw_ring_grps = cpu_to_le16(vf_ring_grps); + req->max_rsscos_ctx = cpu_to_le16(vf_rss); + if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) + req->max_msix = cpu_to_le16(vf_msix / num_vfs); + + hwrm_req_hold(bp, req); + for (i = 0; i < num_vfs; i++) { + struct bnxt_vf_info *vf = &pf->vf[i]; + + vf->fw_fid = pf->first_vf_id + i; + if (bnxt_set_vf_link_admin_state(bp, i)) { + rc = -EIO; + break; + } + + if (reset) { + rc = __bnxt_set_vf_params(bp, i); + if (rc) + break; + } + + req->vf_id = cpu_to_le16(vf->fw_fid); + rc = hwrm_req_send(bp, req); + if (rc) + break; + pf->active_vfs = i + 1; + vf->min_tx_rings = le16_to_cpu(req->min_tx_rings); + vf->max_tx_rings = vf_tx_rings; + vf->min_rx_rings = le16_to_cpu(req->min_rx_rings); + vf->max_rx_rings = vf_rx_rings; + vf->min_cp_rings = le16_to_cpu(req->min_cmpl_rings); + vf->min_stat_ctxs = le16_to_cpu(req->min_stat_ctx); + vf->min_ring_grps = le16_to_cpu(req->min_hw_ring_grps); + vf->min_vnics = le16_to_cpu(req->min_vnics); + } + + if (pf->active_vfs) { + u16 n = pf->active_vfs; + + hw_resc->max_tx_rings -= le16_to_cpu(req->min_tx_rings) * n; + hw_resc->max_rx_rings -= le16_to_cpu(req->min_rx_rings) * n; + hw_resc->max_hw_ring_grps -= + le16_to_cpu(req->min_hw_ring_grps) * n; + hw_resc->max_cp_rings -= le16_to_cpu(req->min_cmpl_rings) * n; + hw_resc->max_rsscos_ctxs -= + le16_to_cpu(req->min_rsscos_ctx) * n; + hw_resc->max_stat_ctxs -= le16_to_cpu(req->min_stat_ctx) * n; + hw_resc->max_vnics -= le16_to_cpu(req->min_vnics) * n; + if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) + hw_resc->max_nqs -= vf_msix; + + memcpy(&bp->vf_resc_cfg_input, req, sizeof(*req)); + rc = pf->active_vfs; + } + hwrm_req_drop(bp, req); + return rc; +} + +/* Only called by PF to reserve resources for VFs, returns actual number of + * VFs configured, or < 0 on error. + */ +static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs) +{ + u16 vf_tx_rings, vf_rx_rings, vf_cp_rings, vf_stat_ctx, vf_vnics; + struct bnxt_hw_resc *hw_resc = &bp->hw_resc; + struct bnxt_pf_info *pf = &bp->pf; + struct hwrm_func_cfg_input *req; + int total_vf_tx_rings = 0; + u16 vf_ring_grps; + u32 rc, mtu, i; + + rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req); + if (rc) + return rc; + + /* Remaining rings are distributed equally amongs VF's for now */ + vf_cp_rings = bnxt_get_avail_cp_rings_for_en(bp) / num_vfs; + vf_stat_ctx = bnxt_get_avail_stat_ctxs_for_en(bp) / num_vfs; + if (bp->flags & BNXT_FLAG_AGG_RINGS) + vf_rx_rings = (hw_resc->max_rx_rings - bp->rx_nr_rings * 2) / + num_vfs; + else + vf_rx_rings = (hw_resc->max_rx_rings - bp->rx_nr_rings) / + num_vfs; + vf_ring_grps = (hw_resc->max_hw_ring_grps - bp->rx_nr_rings) / num_vfs; + vf_tx_rings = (hw_resc->max_tx_rings - bp->tx_nr_rings) / num_vfs; + vf_vnics = (hw_resc->max_vnics - bp->nr_vnics) / num_vfs; + vf_vnics = min_t(u16, vf_vnics, vf_rx_rings); + + req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ADMIN_MTU | + FUNC_CFG_REQ_ENABLES_MRU | + FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS | + FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS | + FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS | + FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS | + FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS | + FUNC_CFG_REQ_ENABLES_NUM_L2_CTXS | + FUNC_CFG_REQ_ENABLES_NUM_VNICS | + FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS); + + if (bp->fw_cap & BNXT_FW_CAP_LINK_ADMIN) { + req->options = FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_AUTO; + req->enables |= + cpu_to_le32(FUNC_CFG_REQ_ENABLES_ADMIN_LINK_STATE); + } + + mtu = bp->dev->mtu + ETH_HLEN + VLAN_HLEN; + req->mru = cpu_to_le16(mtu); + req->admin_mtu = cpu_to_le16(mtu); + + req->num_rsscos_ctxs = cpu_to_le16(1); + req->num_cmpl_rings = cpu_to_le16(vf_cp_rings); + req->num_tx_rings = cpu_to_le16(vf_tx_rings); + req->num_rx_rings = cpu_to_le16(vf_rx_rings); + req->num_hw_ring_grps = cpu_to_le16(vf_ring_grps); + req->num_l2_ctxs = cpu_to_le16(4); + + req->num_vnics = cpu_to_le16(vf_vnics); + /* FIXME spec currently uses 1 bit for stats ctx */ + req->num_stat_ctxs = cpu_to_le16(vf_stat_ctx); + + hwrm_req_hold(bp, req); + for (i = 0; i < num_vfs; i++) { + struct bnxt_vf_info *vf = &pf->vf[i]; + int vf_tx_rsvd = vf_tx_rings; + + req->fid = cpu_to_le16(pf->first_vf_id + i); + rc = hwrm_req_send(bp, req); + if (rc) + break; + pf->active_vfs = i + 1; + vf->fw_fid = le16_to_cpu(req->fid); + rc = __bnxt_hwrm_get_tx_rings(bp, vf->fw_fid, &vf_tx_rsvd); + if (rc) + break; + total_vf_tx_rings += vf_tx_rsvd; + vf->min_tx_rings = vf_tx_rsvd; + vf->max_tx_rings = vf_tx_rsvd; + vf->min_rx_rings = vf_rx_rings; + vf->max_rx_rings = vf_rx_rings; + } + hwrm_req_drop(bp, req); + if (pf->active_vfs) { + hw_resc->max_tx_rings -= total_vf_tx_rings; + hw_resc->max_rx_rings -= vf_rx_rings * num_vfs; + hw_resc->max_hw_ring_grps -= vf_ring_grps * num_vfs; + hw_resc->max_cp_rings -= vf_cp_rings * num_vfs; + hw_resc->max_rsscos_ctxs -= num_vfs; + hw_resc->max_stat_ctxs -= vf_stat_ctx * num_vfs; + hw_resc->max_vnics -= vf_vnics * num_vfs; + rc = pf->active_vfs; + } + return rc; +} + +static int bnxt_func_cfg(struct bnxt *bp, int num_vfs, bool reset) +{ + if (BNXT_NEW_RM(bp)) + return bnxt_hwrm_func_vf_resc_cfg(bp, num_vfs, reset); + else + return bnxt_hwrm_func_cfg(bp, num_vfs); +} + +int bnxt_cfg_hw_sriov(struct bnxt *bp, int *num_vfs, bool reset) +{ + int rc; + + /* Register buffers for VFs */ + rc = bnxt_hwrm_func_buf_rgtr(bp); + if (rc) + return rc; + + /* Reserve resources for VFs */ + rc = bnxt_func_cfg(bp, *num_vfs, reset); + if (rc != *num_vfs) { + if (rc <= 0) { + netdev_warn(bp->dev, "Unable to reserve resources for SRIOV.\n"); + *num_vfs = 0; + return rc; + } + netdev_warn(bp->dev, "Only able to reserve resources for %d VFs.\n", rc); + *num_vfs = rc; + } + + if (BNXT_RDMA_SRIOV_EN(bp) && BNXT_ROCE_VF_RESC_CAP(bp) && + bnxt_ulp_registered(bp->edev)) + bnxt_hwrm_roce_sriov_cfg(bp, *num_vfs); + + return 0; +} + +static int bnxt_get_msix_vec_per_vf(struct bnxt *bp, u32 *msix_per_vf) +{ + u16 bits = sizeof(*msix_per_vf); + union bnxt_nvm_data *data; + dma_addr_t data_dma_addr; + u16 dim = 1; + int rc; + + /* On older FW, this will be 0, in which case fetch it from NVM */ + if (bp->pf.max_msix_vfs) { + *msix_per_vf = bp->pf.max_msix_vfs; + return 0; + } + + data = dma_zalloc_coherent(&bp->pdev->dev, sizeof(*data), + &data_dma_addr, GFP_KERNEL); + if (!data) + return -ENOMEM; + + rc = bnxt_hwrm_nvm_get_var(bp, data_dma_addr, NVM_OFF_MSIX_VEC_PER_VF, + dim, bp->pf.fw_fid - 1, bits); + if (rc) + *msix_per_vf = 1; /* At least 1 MSI-X per VF */ + else + *msix_per_vf = le32_to_cpu(data->val32); + + dma_free_coherent(&bp->pdev->dev, sizeof(*data), data, data_dma_addr); + + return rc; +} + +static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs) +{ + int rc = 0, vfs_supported; + int min_rx_rings, min_tx_rings, min_rss_ctxs; + struct bnxt_hw_resc *hw_resc = &bp->hw_resc; + int tx_ok = 0, rx_ok = 0, rss_ok = 0; + u32 nvm_cfg_msix_per_vf = 1; + int avail_cp, avail_stat; + + /* Check if we can enable requested num of vf's. At a mininum + * we require 1 RX 1 TX rings for each VF. In this minimum conf + * features like TPA will not be available. + */ + vfs_supported = *num_vfs; + + avail_cp = bnxt_get_avail_cp_rings_for_en(bp); + avail_stat = bnxt_get_avail_stat_ctxs_for_en(bp); + avail_cp = min_t(int, avail_cp, avail_stat); + + /* Workaround for Thor HW issue (fixed in B2, so check + * for metal version < 2). + * Create only those many VFs with which NQ's/VF >= N. + * where, N = MSI-X table size advertised in the VF's PCIe configuration space + * Also, it is expected to be rounded to a multiple of 8 as that is how the HW + * is programmed + * Starting with 2.28, FW has implemented workaround to productize Thor SRIOV + * with Small VFs only(VF# 128 and above), while discontinuing use of Big VFs + * as the above HW bug is hit only when using Big VFs(first 128 VFs). + * FW indicates this via VF_SCALE_SUPPORTED bit in FW QCAPs + */ + if (BNXT_CHIP_THOR(bp) && bp->chip_rev == 1 && + bp->ver_resp.chip_metal < 2 && + !(bp->fw_cap & BNXT_FW_CAP_VF_SCALE_SUPPORTED)) { + u32 max_vf_msix, max_vfs_possible; + + max_vf_msix = hw_resc->max_nqs - bnxt_min_nq_rings_in_use(bp); + bnxt_get_msix_vec_per_vf(bp, &nvm_cfg_msix_per_vf); + max_vfs_possible = round_down(max_vf_msix / nvm_cfg_msix_per_vf, 8); + vfs_supported = min_t(u32, max_vfs_possible, vfs_supported); + } + + while (vfs_supported) { + min_rx_rings = vfs_supported; + min_tx_rings = vfs_supported; + min_rss_ctxs = vfs_supported; + + if (bp->flags & BNXT_FLAG_AGG_RINGS) { + if (hw_resc->max_rx_rings - bp->rx_nr_rings * 2 >= + min_rx_rings) + rx_ok = 1; + } else { + if (hw_resc->max_rx_rings - bp->rx_nr_rings >= + min_rx_rings) + rx_ok = 1; + } + if ((hw_resc->max_vnics - bp->nr_vnics < min_rx_rings) || + (avail_cp < min_rx_rings)) + rx_ok = 0; + + if ((hw_resc->max_tx_rings - bnxt_total_tx_rings(bp) >= + min_tx_rings) && (avail_cp >= min_tx_rings)) + tx_ok = 1; + + if (hw_resc->max_rsscos_ctxs - bp->rsscos_nr_ctxs >= + min_rss_ctxs) + rss_ok = 1; + + if (tx_ok && rx_ok && rss_ok) + break; + + vfs_supported--; + } + + if (!vfs_supported) { + netdev_err(bp->dev, "Cannot enable VF's as all resources are used by PF\n"); + return -EINVAL; + } + + if (vfs_supported != *num_vfs) { + netdev_info(bp->dev, "Requested VFs %d, can enable %d\n", + *num_vfs, vfs_supported); + *num_vfs = vfs_supported; + } + + rtnl_lock(); + if (!bnxt_ulp_registered(bp->edev)) { + u16 max_nqs = hw_resc->max_nqs; + + if (netif_running(bp->dev)) { + bp->sriov_cfg = false; + bnxt_close_nic(bp, true, false); + bp->sriov_cfg = true; + } + + /* Reduce max NQs so that reserve ring do not see NQs + * available for ulp. + */ + if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) + hw_resc->max_nqs = bnxt_min_nq_rings_in_use(bp); + + /* Tell reserve rings to consider reservation again */ + bnxt_set_ulp_msix_num(bp, 0); + + if (netif_running(bp->dev)) + rc = bnxt_open_nic(bp, true, false); + hw_resc->max_nqs = max_nqs; + if (rc) { + rtnl_unlock(); + return rc; + } + } + rtnl_unlock(); + + rc = bnxt_alloc_vf_resources(bp, *num_vfs); + if (rc) + goto err_out1; + + rc = bnxt_cfg_hw_sriov(bp, num_vfs, false); + if (rc) + goto err_out2; + + rc = pci_enable_sriov(bp->pdev, *num_vfs); + if (rc) { + netdev_err(bp->dev, "pci_enable_sriov failed : %d\n", rc); + goto err_out2; + } + + rc = bnxt_create_vfs_sysfs(bp); + if (rc) + netdev_err(bp->dev, "Could not create SRIOV sysfs entries %d\n", rc); + + rc = bnxt_alloc_vf_stats_mem(bp); + if (rc) + netdev_dbg(bp->dev, "Failed to allocate VF stats memory\n"); + + if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV) + return 0; + + /* Create representors for VFs in switchdev mode */ + mutex_lock(&bp->vf_rep_lock); + rc = bnxt_vf_reps_create(bp); + mutex_unlock(&bp->vf_rep_lock); + if (rc) { + netdev_info(bp->dev, "Cannot enable VFs as representors cannot be created\n"); + goto err_out3; + } + + return 0; + +err_out3: + bnxt_destroy_vfs_sysfs(bp); + + bnxt_free_vf_stats_mem(bp); + + /* Disable SR-IOV */ + pci_disable_sriov(bp->pdev); + +err_out2: + /* Free the resources reserved for various VF's */ + bnxt_hwrm_func_vf_resource_free(bp, *num_vfs); + + /* Restore the max resources */ + bnxt_hwrm_func_qcaps(bp, false); + +err_out1: + bnxt_free_vf_resources(bp); + + return rc; +} + +void bnxt_sriov_disable(struct bnxt *bp) +{ + u16 num_vfs = pci_num_vf(bp->pdev); + + if (!num_vfs) + return; + + bnxt_destroy_vfs_sysfs(bp); + + /* synchronize VF and VF-rep create and destroy + * and to protect the array of VF structures + */ + mutex_lock(&bp->vf_rep_lock); + bnxt_vf_reps_destroy(bp); + mutex_unlock(&bp->vf_rep_lock); + + /* Free VF stats mem after destroying VF-reps */ + bnxt_free_vf_stats_mem(bp); + + if (bnxt_tc_flower_enabled(bp)) + bnxt_tc_flush_flows(bp); + + if (pci_vfs_assigned(bp->pdev)) { + bnxt_hwrm_fwd_async_event_cmpl( + bp, NULL, ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD); + netdev_warn(bp->dev, "Unable to free %d VFs because some are assigned to VMs.\n", + num_vfs); + } else { + pci_disable_sriov(bp->pdev); + /* Free the HW resources reserved for various VF's */ + bnxt_hwrm_func_vf_resource_free(bp, num_vfs); + } + + bnxt_free_vf_resources(bp); + + /* Reclaim all resources for the PF. */ + rtnl_lock(); + bnxt_set_dflt_ulp_stat_ctxs(bp); + bnxt_restore_pf_fw_resources(bp); + rtnl_unlock(); +} + +int bnxt_sriov_configure(struct pci_dev *pdev, int num_vfs) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct bnxt *bp = netdev_priv(dev); + int rc = 0; + + rtnl_lock(); + if (!netif_running(dev)) { + netdev_warn(dev, "Reject SRIOV config request since if is down!\n"); + rtnl_unlock(); + return 0; + } + if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) { + netdev_warn(dev, "Reject SRIOV config request when FW reset is in progress\n"); + rtnl_unlock(); + return 0; + } + bp->sriov_cfg = true; + rtnl_unlock(); + + if (pci_vfs_assigned(bp->pdev)) { + netdev_warn(dev, "Unable to configure SRIOV since some VFs are assigned to VMs.\n"); + num_vfs = 0; + goto sriov_cfg_exit; + } + + /* Check if enabled VFs is same as requested */ + if (num_vfs && num_vfs == bp->pf.active_vfs) + goto sriov_cfg_exit; + + /* if there are previous existing VFs, clean them up */ + bnxt_sriov_disable(bp); + if (!num_vfs) + goto sriov_cfg_exit; + + rc = bnxt_sriov_enable(bp, &num_vfs); + +sriov_cfg_exit: + bp->sriov_cfg = false; + wake_up(&bp->sriov_cfg_wait); + + return rc ? rc : num_vfs; +} + +#ifndef PCIE_SRIOV_CONFIGURE + +static struct workqueue_struct *bnxt_iov_wq; + +void bnxt_sriov_init(unsigned int num_vfs) +{ + if (num_vfs) + bnxt_iov_wq = create_singlethread_workqueue("bnxt_iov_wq"); +} + +void bnxt_sriov_exit(void) +{ + if (bnxt_iov_wq) + destroy_workqueue(bnxt_iov_wq); + bnxt_iov_wq = NULL; +} + +static void bnxt_iov_task(struct work_struct *work) +{ + struct bnxt *bp; + + bp = container_of(work, struct bnxt, iov_task); + bnxt_sriov_configure(bp->pdev, bp->req_vfs); +} + +void bnxt_start_sriov(struct bnxt *bp, int num_vfs) +{ + int pos, req_vfs; + + if (!num_vfs || !BNXT_PF(bp)) + return; + + pos = pci_find_ext_capability(bp->pdev, PCI_EXT_CAP_ID_SRIOV); + if (!pos) { + return; + } else { + u16 t_vf = 0; + + pci_read_config_word(bp->pdev, pos + PCI_SRIOV_TOTAL_VF, &t_vf); + req_vfs = min_t(int, num_vfs, (int)t_vf); + } + + if (!bnxt_iov_wq) { + netdev_warn(bp->dev, "Work queue not available to start SRIOV\n"); + return; + } + bp->req_vfs = req_vfs; + INIT_WORK(&bp->iov_task, bnxt_iov_task); + queue_work(bnxt_iov_wq, &bp->iov_task); +} +#endif + +static int bnxt_hwrm_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf, + void *encap_resp, __le64 encap_resp_addr, + __le16 encap_resp_cpr, u32 msg_size) +{ + struct hwrm_fwd_resp_input *req; + int rc; + + if (BNXT_FWD_RESP_SIZE_ERR(msg_size)) { + netdev_warn_once(bp->dev, "HWRM fwd response too big (%d bytes)\n", + msg_size); + return -EINVAL; + } + + rc = hwrm_req_init(bp, req, HWRM_FWD_RESP); + if (!rc) { + /* Set the new target id */ + req->target_id = cpu_to_le16(vf->fw_fid); + req->encap_resp_target_id = cpu_to_le16(vf->fw_fid); + req->encap_resp_len = cpu_to_le16(msg_size); + req->encap_resp_addr = encap_resp_addr; + req->encap_resp_cmpl_ring = encap_resp_cpr; + memcpy(req->encap_resp, encap_resp, msg_size); + + rc = hwrm_req_send(bp, req); + } + if (rc) + netdev_err(bp->dev, "hwrm_fwd_resp failed. rc:%d\n", rc); + return rc; +} + +static int bnxt_hwrm_fwd_err_resp(struct bnxt *bp, struct bnxt_vf_info *vf, + u32 msg_size) +{ + struct hwrm_reject_fwd_resp_input *req; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_REJECT_FWD_RESP); + if (!rc) { + if (msg_size > sizeof(req->encap_request)) + msg_size = sizeof(req->encap_request); + + /* Set the new target id */ + req->target_id = cpu_to_le16(vf->fw_fid); + req->encap_resp_target_id = cpu_to_le16(vf->fw_fid); + memcpy(req->encap_request, vf->hwrm_cmd_req_addr, msg_size); + + rc = hwrm_req_send(bp, req); + } + if (rc) + netdev_err(bp->dev, "hwrm_fwd_err_resp failed. rc:%d\n", rc); + return rc; +} + +static int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf, + u32 msg_size) +{ + struct hwrm_exec_fwd_resp_input *req; + int rc; + + if (BNXT_EXEC_FWD_RESP_SIZE_ERR(msg_size)) + return bnxt_hwrm_fwd_err_resp(bp, vf, msg_size); + + rc = hwrm_req_init(bp, req, HWRM_EXEC_FWD_RESP); + if (!rc) { + /* Set the new target id */ + req->target_id = cpu_to_le16(vf->fw_fid); + req->encap_resp_target_id = cpu_to_le16(vf->fw_fid); + memcpy(req->encap_request, vf->hwrm_cmd_req_addr, msg_size); + + rc = hwrm_req_send(bp, req); + } + if (rc) + netdev_err(bp->dev, "hwrm_exec_fw_resp failed. rc:%d\n", rc); + return rc; +} + +static int bnxt_vf_configure_mac(struct bnxt *bp, struct bnxt_vf_info *vf) +{ + u32 msg_size = sizeof(struct hwrm_func_vf_cfg_input); + struct hwrm_func_vf_cfg_input *req = + (struct hwrm_func_vf_cfg_input *)vf->hwrm_cmd_req_addr; + + /* Allow VF to set a valid MAC address, if trust is set to on or + * if the PF assigned MAC address is zero + */ + if (req->enables & cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR)) { + bool trust = bnxt_is_trusted_vf(bp, vf); + + if (is_valid_ether_addr(req->dflt_mac_addr) && + (trust || !is_valid_ether_addr(vf->mac_addr) || + ether_addr_equal(req->dflt_mac_addr, vf->mac_addr))) { + ether_addr_copy(vf->vf_mac_addr, req->dflt_mac_addr); + return bnxt_hwrm_exec_fwd_resp(bp, vf, msg_size); + } + return bnxt_hwrm_fwd_err_resp(bp, vf, msg_size); + } + return bnxt_hwrm_exec_fwd_resp(bp, vf, msg_size); +} + +static int bnxt_vf_validate_set_mac(struct bnxt *bp, struct bnxt_vf_info *vf) +{ + u32 msg_size = sizeof(struct hwrm_cfa_l2_filter_alloc_input); + struct hwrm_cfa_l2_filter_alloc_input *req = + (struct hwrm_cfa_l2_filter_alloc_input *)vf->hwrm_cmd_req_addr; + bool mac_ok = false; + + if (!is_valid_ether_addr((const u8 *)req->l2_addr)) + return bnxt_hwrm_fwd_err_resp(bp, vf, msg_size); + + /* Allow VF to set a valid MAC address, if trust is set to on. + * Or VF MAC address must first match MAC address in PF's context. + * Otherwise, it must match the VF MAC address if firmware spec >= + * 1.2.2 + */ + if (bnxt_is_trusted_vf(bp, vf)) { + mac_ok = true; + } else if (is_valid_ether_addr(vf->mac_addr)) { + if (ether_addr_equal((const u8 *)req->l2_addr, vf->mac_addr)) + mac_ok = true; + } else if (is_valid_ether_addr(vf->vf_mac_addr)) { + if (ether_addr_equal((const u8 *)req->l2_addr, vf->vf_mac_addr)) + mac_ok = true; + } else { + /* There are two cases: + * 1.If firmware spec < 0x10202,VF MAC address is not forwarded + * to the PF and so it doesn't have to match + * 2.Allow VF to modify it's own MAC when PF has not assigned a + * valid MAC address and firmware spec >= 0x10202 + */ + mac_ok = true; + } + if (mac_ok) + return bnxt_hwrm_exec_fwd_resp(bp, vf, msg_size); + + return bnxt_hwrm_fwd_err_resp(bp, vf, msg_size); +} + +static int bnxt_vf_set_link(struct bnxt *bp, struct bnxt_vf_info *vf) +{ + int rc = 0; + + if (!(vf->flags & BNXT_VF_LINK_FORCED)) { + /* real link */ + rc = bnxt_hwrm_exec_fwd_resp( + bp, vf, sizeof(struct hwrm_port_phy_qcfg_input)); + } else { + struct hwrm_port_phy_qcfg_output_compat phy_qcfg_resp; + struct hwrm_port_phy_qcfg_input *phy_qcfg_req; + + phy_qcfg_req = + (struct hwrm_port_phy_qcfg_input *)vf->hwrm_cmd_req_addr; + mutex_lock(&bp->link_lock); + memcpy(&phy_qcfg_resp, &bp->link_info.phy_qcfg_resp, + sizeof(phy_qcfg_resp)); + mutex_unlock(&bp->link_lock); + phy_qcfg_resp.resp_len = cpu_to_le16(sizeof(phy_qcfg_resp)); + phy_qcfg_resp.seq_id = phy_qcfg_req->seq_id; + phy_qcfg_resp.option_flags &= + ~PORT_PHY_QCAPS_RESP_FLAGS2_SPEEDS2_SUPPORTED; + phy_qcfg_resp.valid = 1; + + if (vf->flags & BNXT_VF_LINK_UP) { + /* if physical link is down, force link up on VF */ + if (phy_qcfg_resp.link != + PORT_PHY_QCFG_RESP_LINK_LINK) { + phy_qcfg_resp.link = + PORT_PHY_QCFG_RESP_LINK_LINK; + phy_qcfg_resp.link_speed = cpu_to_le16( + PORT_PHY_QCFG_RESP_LINK_SPEED_10GB); + phy_qcfg_resp.duplex_cfg = + PORT_PHY_QCFG_RESP_DUPLEX_CFG_FULL; + phy_qcfg_resp.duplex_state = + PORT_PHY_QCFG_RESP_DUPLEX_STATE_FULL; + phy_qcfg_resp.pause = + (PORT_PHY_QCFG_RESP_PAUSE_TX | + PORT_PHY_QCFG_RESP_PAUSE_RX); + } + } else { + /* force link down */ + phy_qcfg_resp.link = PORT_PHY_QCFG_RESP_LINK_NO_LINK; + phy_qcfg_resp.link_speed = 0; + phy_qcfg_resp.duplex_state = + PORT_PHY_QCFG_RESP_DUPLEX_STATE_HALF; + phy_qcfg_resp.pause = 0; + } + rc = bnxt_hwrm_fwd_resp(bp, vf, &phy_qcfg_resp, + phy_qcfg_req->resp_addr, + phy_qcfg_req->cmpl_ring, + sizeof(phy_qcfg_resp)); + } + return rc; +} + +static int bnxt_hwrm_oem_cmd(struct bnxt *bp, struct bnxt_vf_info *vf) +{ + struct hwrm_oem_cmd_input *oem_cmd = vf->hwrm_cmd_req_addr; + struct hwrm_oem_cmd_output oem_out = { 0 }; + struct tfc *tfcp = bp->tfp; + int rc = 0; + + if (oem_cmd->oem_id == 0x14e4 && + oem_cmd->naming_authority == OEM_CMD_REQ_NAMING_AUTHORITY_PCI_SIG && + oem_cmd->message_family == OEM_CMD_REQ_MESSAGE_FAMILY_TRUFLOW) { + uint16_t oem_data_len = sizeof(oem_out.oem_data); + uint16_t resp_len = oem_data_len; + uint32_t resp[18] = { 0 }; + + rc = tfc_oem_cmd_process(tfcp, oem_cmd->oem_data, resp, &resp_len); + if (rc) { + netdev_dbg(bp->dev, + "OEM cmd process error id 0x%x, name 0x%x, family 0x%x rc %d\n", + oem_cmd->oem_id, oem_cmd->naming_authority, + oem_cmd->message_family, rc); + return rc; + } + + oem_out.error_code = 0; + oem_out.req_type = oem_cmd->req_type; + oem_out.seq_id = oem_cmd->seq_id; + oem_out.resp_len = cpu_to_le16(sizeof(oem_out)); + oem_out.oem_id = oem_cmd->oem_id; + oem_out.naming_authority = oem_cmd->naming_authority; + oem_out.message_family = oem_cmd->message_family; + memcpy(oem_out.oem_data, resp, resp_len); + oem_out.valid = 1; + + rc = bnxt_hwrm_fwd_resp(bp, vf, &oem_out, oem_cmd->resp_addr, + oem_cmd->cmpl_ring, oem_out.resp_len); + if (rc) + netdev_dbg(bp->dev, "Failed to send HWRM_FWD_RESP VF 0x%p rc %d\n", vf, rc); + } else { + netdev_dbg(bp->dev, "Unsupported OEM cmd id 0x%x, name 0x%x, family 0x%x\n", + oem_cmd->oem_id, oem_cmd->naming_authority, oem_cmd->message_family); + rc = -EOPNOTSUPP; + } + + return rc; +} + +int bnxt_hwrm_tf_oem_cmd(struct bnxt *bp, u32 *in, u16 in_len, u32 *out, u16 out_len) +{ + struct hwrm_oem_cmd_output *resp; + struct hwrm_oem_cmd_input *req; + int rc = 0; + + if (!BNXT_VF(bp)) { + netdev_dbg(bp->dev, "Not a VF. Command not supported\n"); + return -EOPNOTSUPP; + } + + rc = hwrm_req_init(bp, req, HWRM_OEM_CMD); + if (rc) + return rc; + + resp = hwrm_req_hold(bp, req); + + req->oem_id = cpu_to_le32(0x14e4); + req->naming_authority = OEM_CMD_REQ_NAMING_AUTHORITY_PCI_SIG; + req->message_family = OEM_CMD_REQ_MESSAGE_FAMILY_TRUFLOW; + memcpy(req->oem_data, in, in_len); + + rc = hwrm_req_send(bp, req); + if (rc) + goto cleanup; + + if (resp->oem_id == 0x14e4 && + resp->naming_authority == OEM_CMD_REQ_NAMING_AUTHORITY_PCI_SIG && + resp->message_family == OEM_CMD_REQ_MESSAGE_FAMILY_TRUFLOW) + memcpy(out, resp->oem_data, out_len); + +cleanup: + hwrm_req_drop(bp, req); + return rc; +} + +static int bnxt_vf_req_validate_snd(struct bnxt *bp, struct bnxt_vf_info *vf) +{ + int rc = 0; + struct input *encap_req = vf->hwrm_cmd_req_addr; + u32 req_type = le16_to_cpu(encap_req->req_type); + + switch (req_type) { + case HWRM_FUNC_VF_CFG: + rc = bnxt_vf_configure_mac(bp, vf); + break; + case HWRM_CFA_L2_FILTER_ALLOC: + rc = bnxt_vf_validate_set_mac(bp, vf); + break; + case HWRM_OEM_CMD: + rc = bnxt_hwrm_oem_cmd(bp, vf); + break; + case HWRM_FUNC_CFG: + /* TODO Validate if VF is allowed to change mac address, + * mtu, num of rings etc + */ + rc = bnxt_hwrm_exec_fwd_resp( + bp, vf, sizeof(struct hwrm_func_cfg_input)); + break; + case HWRM_PORT_PHY_QCFG: + rc = bnxt_vf_set_link(bp, vf); + break; + default: + rc = bnxt_hwrm_fwd_err_resp(bp, vf, bp->hwrm_max_req_len); + break; + } + return rc; +} + +void bnxt_hwrm_exec_fwd_req(struct bnxt *bp) +{ + u32 i = 0, active_vfs = bp->pf.active_vfs, vf_id; + + /* Scan through VF's and process commands */ + while (1) { + vf_id = find_next_bit(bp->pf.vf_event_bmap, active_vfs, i); + if (vf_id >= active_vfs) + break; + + clear_bit(vf_id, bp->pf.vf_event_bmap); + bnxt_vf_req_validate_snd(bp, &bp->pf.vf[vf_id]); + i = vf_id + 1; + } +} + +int bnxt_approve_mac(struct bnxt *bp, const u8 *mac, bool strict) +{ + struct hwrm_func_vf_cfg_input *req; + int rc = 0; + + if (!BNXT_VF(bp)) + return 0; + + if (bp->hwrm_spec_code < 0x10202) { + if (is_valid_ether_addr(bp->vf.mac_addr)) + rc = -EADDRNOTAVAIL; + goto mac_done; + } + + rc = hwrm_req_init(bp, req, HWRM_FUNC_VF_CFG); + if (rc) + goto mac_done; + + req->enables = cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR); + memcpy(req->dflt_mac_addr, mac, ETH_ALEN); + if (!strict) + hwrm_req_flags(bp, req, BNXT_HWRM_CTX_SILENT); + rc = hwrm_req_send(bp, req); +mac_done: + if (rc && strict) { + rc = -EADDRNOTAVAIL; + netdev_warn(bp->dev, "VF MAC address %pM not approved by the PF\n", + mac); + return rc; + } + return 0; +} + +void bnxt_update_vf_mac(struct bnxt *bp) +{ + struct hwrm_func_qcaps_output *resp; + struct hwrm_func_qcaps_input *req; + bool inform_pf = false; + + if (hwrm_req_init(bp, req, HWRM_FUNC_QCAPS)) + return; + + req->fid = cpu_to_le16(0xffff); + + resp = hwrm_req_hold(bp, req); + if (hwrm_req_send(bp, req)) + goto update_vf_mac_exit; + + /* Store MAC address from the firmware. There are 2 cases: + * 1. MAC address is valid. It is assigned from the PF and we + * need to override the current VF MAC address with it. + * 2. MAC address is zero. The VF will use a random MAC address by + * default but the stored zero MAC will allow the VF user to change + * the random MAC address using ndo_set_mac_address() if he wants. + */ + if (!ether_addr_equal(resp->mac_address, bp->vf.mac_addr)) { + memcpy(bp->vf.mac_addr, resp->mac_address, ETH_ALEN); + /* This means we are now using our own MAC address, let + * the PF know about this MAC address. + */ + if (!is_valid_ether_addr(bp->vf.mac_addr)) + inform_pf = true; + } + + /* overwrite netdev dev_addr with admin VF MAC */ + if (is_valid_ether_addr(bp->vf.mac_addr)) + eth_hw_addr_set(bp->dev, bp->vf.mac_addr); +update_vf_mac_exit: + hwrm_req_drop(bp, req); + if (inform_pf) + bnxt_approve_mac(bp, bp->dev->dev_addr, false); +} + +void bnxt_update_vf_vnic(struct bnxt *bp, u32 vf_idx, u32 state) +{ + struct bnxt_pf_info *pf = &bp->pf; + struct bnxt_vf_info *vf; + + rcu_read_lock(); + vf = rcu_dereference(pf->vf); + if (vf) { + vf = &vf[vf_idx]; + if (state == EVENT_DATA1_VNIC_CHNG_VNIC_STATE_ALLOC) + vf->vnic_state_pending = 1; + else if (state == EVENT_DATA1_VNIC_CHNG_VNIC_STATE_FREE) + vf->vnic_state_pending = 0; + } + rcu_read_unlock(); +} + +void bnxt_commit_vf_vnic(struct bnxt *bp, u32 vf_idx) +{ + struct bnxt_pf_info *pf = &bp->pf; + struct bnxt_vf_info *vf; + + rcu_read_lock(); + vf = rcu_dereference(pf->vf); + if (vf) { + vf = &vf[vf_idx]; + vf->vnic_state = vf->vnic_state_pending; + } + rcu_read_unlock(); +} + +bool bnxt_vf_vnic_state_is_up(struct bnxt *bp, u32 vf_idx) +{ + struct bnxt_pf_info *pf = &bp->pf; + struct bnxt_vf_info *vf = vf; + bool up = false; + + rcu_read_lock(); + vf = rcu_dereference(pf->vf); + if (vf) + up = !!vf[vf_idx].vnic_state; + rcu_read_unlock(); + return up; +} + +bool bnxt_vf_cfg_change(struct bnxt *bp, u16 vf_id, u32 data1) +{ + struct bnxt_pf_info *pf = &bp->pf; + struct bnxt_vf_info *vf; + bool rc = false; + u16 vf_idx; + + if (!(data1 & + ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_TRUSTED_VF_CFG_CHANGE)) + return false; + + rcu_read_lock(); + vf_idx = vf_id - pf->first_vf_id; + vf = rcu_dereference(pf->vf); + if (vf && vf_idx < pf->active_vfs) { + vf[vf_idx].cfg_change = 1; + rc = true; + } + rcu_read_unlock(); + return rc; +} + +void bnxt_update_vf_cfg(struct bnxt *bp) +{ + struct bnxt_pf_info *pf = &bp->pf; + struct bnxt_vf_info *vf; + int i, num_vfs; + + mutex_lock(&bp->sriov_lock); + num_vfs = pf->active_vfs; + if (!num_vfs) + goto vf_cfg_done; + + vf = rcu_dereference_protected(bp->pf.vf, + lockdep_is_held(&bp->sriov_lock)); + for (i = 0; i < num_vfs; i++) { + if (vf[i].cfg_change) { + vf[i].cfg_change = 0; + bnxt_hwrm_func_qcfg_flags(bp, &vf[i]); + } + } +vf_cfg_done: + mutex_unlock(&bp->sriov_lock); +} + +void bnxt_reset_vf_stats(struct bnxt *bp) +{ + struct bnxt_vf_info *vfp; + struct bnxt_vf_info *vf; + int num_vfs; + int vf_idx; + int len; + u64 *sw; + + mutex_lock(&bp->sriov_lock); + + vf = rcu_dereference_protected(bp->pf.vf, + lockdep_is_held(&bp->sriov_lock)); + if (!vf) { + mutex_unlock(&bp->sriov_lock); + return; + } + + num_vfs = bp->pf.active_vfs; + len = vf[0].stats.len; + + for (vf_idx = 0; vf_idx < num_vfs; vf_idx++) { + vfp = &vf[vf_idx]; + if (vfp->vnic_state) /* !free */ + continue; + + sw = vfp->stats.sw_stats; + if (!sw) + continue; + + memset(sw, 0, len); + } + mutex_unlock(&bp->sriov_lock); +} + +#else + +int bnxt_cfg_hw_sriov(struct bnxt *bp, int *num_vfs, bool reset) +{ + if (*num_vfs) + return -EOPNOTSUPP; + return 0; +} + +void bnxt_sriov_disable(struct bnxt *bp) +{ +} + +void bnxt_hwrm_exec_fwd_req(struct bnxt *bp) +{ + netdev_err(bp->dev, "Invalid VF message received when SRIOV is not enable\n"); +} + +void bnxt_update_vf_mac(struct bnxt *bp) +{ +} + +int bnxt_approve_mac(struct bnxt *bp, const u8 *mac, bool strict) +{ + return 0; +} + +void bnxt_update_vf_vnic(struct bnxt *bp, u32 vf_idx, u32 state) +{ +} + +void bnxt_commit_vf_vnic(struct bnxt *bp, u32 vf_idx) +{ +} + +bool bnxt_vf_vnic_state_is_up(struct bnxt *bp, u32 vf_idx) +{ + return false; +} + +bool bnxt_vf_cfg_change(struct bnxt *bp, u16 vf_id, u32 data1) +{ + return false; +} + +static void bnxt_update_vf_cfg(struct bnxt *bp) +{ +} + +void bnxt_reset_vf_stats(struct bnxt *bp) +{ +} +#endif diff --git a/drivers/thirdparty/release-drivers/bnxt/bnxt_sriov.h b/drivers/thirdparty/release-drivers/bnxt/bnxt_sriov.h new file mode 100644 index 000000000000..224b17cab1e7 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/bnxt_sriov.h @@ -0,0 +1,82 @@ +/* Broadcom NetXtreme-C/E network driver. + * + * Copyright (c) 2014-2016 Broadcom Corporation + * Copyright (c) 2016-2018 Broadcom Limited + * Copyright (c) 2018-2021 Broadcom Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ + +#ifndef BNXT_SRIOV_H +#define BNXT_SRIOV_H + +#define BNXT_FWD_RESP_SIZE_ERR(n) \ + ((offsetof(struct hwrm_fwd_resp_input, encap_resp) + n) > \ + sizeof(struct hwrm_fwd_resp_input)) + +#define BNXT_EXEC_FWD_RESP_SIZE_ERR(n) \ + ((offsetof(struct hwrm_exec_fwd_resp_input, encap_request) + n) >\ + offsetof(struct hwrm_exec_fwd_resp_input, encap_resp_target_id)) + +#define BNXT_VF_MIN_RSS_CTX 1 +#define BNXT_VF_MAX_RSS_CTX 1 +#define BNXT_VF_MIN_L2_CTX 1 +#define BNXT_VF_MAX_L2_CTX 4 + +#ifdef CONFIG_BNXT_SRIOV +#define BNXT_SUPPORTS_SRIOV(pdev) ((pdev)->sriov) +#else +#define BNXT_SUPPORTS_SRIOV(pdev) 0 +#endif + +#ifdef HAVE_NDO_GET_VF_CONFIG +int bnxt_get_vf_config(struct net_device *, int, struct ifla_vf_info *); +int bnxt_set_vf_mac(struct net_device *, int, u8 *); +#ifdef NEW_NDO_SET_VF_VLAN +int bnxt_set_vf_vlan(struct net_device *, int, u16, u8, __be16); +#else +int bnxt_set_vf_vlan(struct net_device *, int, u16, u8); +#endif +#ifdef HAVE_IFLA_TX_RATE +int bnxt_set_vf_bw(struct net_device *, int, int, int); +#else +int bnxt_set_vf_bw(struct net_device *, int, int); +#endif +#ifdef HAVE_NDO_SET_VF_LINK_STATE +int bnxt_set_vf_link_state(struct net_device *, int, int); +#endif +#ifdef HAVE_VF_SPOOFCHK +int bnxt_set_vf_spoofchk(struct net_device *, int, bool); +#endif +#ifdef HAVE_NDO_SET_VF_TRUST +int bnxt_set_vf_trust(struct net_device *dev, int vf_id, bool trust); +#endif +#ifdef HAVE_NDO_SET_VF_QUEUES +int bnxt_set_vf_queues(struct net_device *dev, int vf_id, int min_txq, + int max_txq, int min_rxq, int max_rxq); +#endif +#endif +int bnxt_sriov_configure(struct pci_dev *pdev, int num_vfs); +#ifndef PCIE_SRIOV_CONFIGURE +void bnxt_start_sriov(struct bnxt *, int); +void bnxt_sriov_init(unsigned int); +void bnxt_sriov_exit(void); +#endif +int bnxt_cfg_hw_sriov(struct bnxt *bp, int *num_vfs, bool reset); +void bnxt_sriov_disable(struct bnxt *bp); +void bnxt_hwrm_exec_fwd_req(struct bnxt *bp); +void bnxt_update_vf_mac(struct bnxt *bp); +int bnxt_approve_mac(struct bnxt *bp, const u8 *mac, bool strict); +void bnxt_update_vf_vnic(struct bnxt *bp, u32 vf_idx, u32 state); +void bnxt_commit_vf_vnic(struct bnxt *bp, u32 vf_idx); +bool bnxt_vf_vnic_state_is_up(struct bnxt *bp, u32 vf_idx); +bool bnxt_vf_cfg_change(struct bnxt *bp, u16 vf_id, u32 data1); +void bnxt_update_vf_cfg(struct bnxt *bp); +bool bnxt_is_trusted_vf(struct bnxt *bp, struct bnxt_vf_info *vf); +int bnxt_alloc_vf_stats_mem(struct bnxt *bp); +void bnxt_free_vf_stats_mem(struct bnxt *bp); +void bnxt_reset_vf_stats(struct bnxt *bp); +int bnxt_hwrm_tf_oem_cmd(struct bnxt *bp, u32 *in, u16 in_len, u32 *out, u16 out_len); +#endif diff --git a/drivers/thirdparty/release-drivers/bnxt/bnxt_sriov_sysfs.c b/drivers/thirdparty/release-drivers/bnxt/bnxt_sriov_sysfs.c new file mode 100644 index 000000000000..799a493ffbe2 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/bnxt_sriov_sysfs.c @@ -0,0 +1,266 @@ +/* Broadcom NetXtreme-C/E network driver. + * + * Copyright (c) 2014, Mellanox Technologies inc. All rights reserved. + * Copyright (c) 2023 Broadcom Inc. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +#include +#include +#include "bnxt_hsi.h" +#include "bnxt_compat.h" +#include "bnxt.h" +#include "bnxt_sriov_sysfs.h" + +struct vf_attributes { + struct attribute attr; + ssize_t (*show)(struct bnxt_vf_sysfs_obj *vf_so, struct vf_attributes *vfa, + char *buf); + ssize_t (*store)(struct bnxt_vf_sysfs_obj *vf_so, struct vf_attributes *vfa, + const char *buf, size_t count); +}; + +static ssize_t vf_attr_show(struct kobject *kobj, + struct attribute *attr, char *buf) +{ + struct vf_attributes *ga = + container_of(attr, struct vf_attributes, attr); + struct bnxt_vf_sysfs_obj *g = container_of(kobj, struct bnxt_vf_sysfs_obj, kobj); + + if (!ga->show) + return -EIO; + + return ga->show(g, ga, buf); +} + +static ssize_t vf_attr_store(struct kobject *kobj, + struct attribute *attr, + const char *buf, size_t size) +{ + struct vf_attributes *ga = + container_of(attr, struct vf_attributes, attr); + struct bnxt_vf_sysfs_obj *g = container_of(kobj, struct bnxt_vf_sysfs_obj, kobj); + + if (!ga->store) + return -EIO; + + return ga->store(g, ga, buf, size); +} + +#define _sprintf(p, buf, format, arg...) \ + ((PAGE_SIZE - (int)((p) - (buf))) <= 0 ? 0 : \ + scnprintf((p), PAGE_SIZE - (int)((p) - (buf)), format, ## arg)) + +static ssize_t stats_show(struct bnxt_vf_sysfs_obj *g, struct vf_attributes *oa, + char *buf) +{ + struct bnxt_stats_mem *stats = &g->stats; + struct bnxt *bp = g->parent_pf_bp; + struct ctx_hw_stats *hw_stats; + u64 rx_dropped, tx_dropped; + u64 rx_packets, rx_bytes; + u64 tx_packets, tx_bytes; + char *p = buf; + int rc; + + memset(stats->hw_stats, 0, stats->len); + + mutex_lock(&bp->sriov_lock); + rc = bnxt_hwrm_func_qstats(bp, stats, + cpu_to_le16(g->fw_fid), 0); + + if (rc) { + mutex_unlock(&bp->sriov_lock); + return rc; + } + + hw_stats = stats->hw_stats; + + rx_packets = hw_stats->rx_ucast_pkts + hw_stats->rx_mcast_pkts + hw_stats->rx_bcast_pkts; + rx_bytes = hw_stats->rx_ucast_bytes + hw_stats->rx_mcast_bytes + hw_stats->rx_bcast_bytes; + + tx_packets = hw_stats->tx_ucast_pkts + hw_stats->tx_mcast_pkts + hw_stats->tx_bcast_pkts; + tx_bytes = hw_stats->tx_ucast_bytes + hw_stats->tx_mcast_bytes + hw_stats->tx_bcast_bytes; + + rx_dropped = hw_stats->rx_error_pkts; + tx_dropped = hw_stats->tx_error_pkts; + + p += _sprintf(p, buf, "tx_packets : %llu\n", tx_packets); + p += _sprintf(p, buf, "tx_bytes : %llu\n", tx_bytes); + p += _sprintf(p, buf, "tx_dropped : %llu\n", tx_dropped); + p += _sprintf(p, buf, "rx_packets : %llu\n", rx_packets); + p += _sprintf(p, buf, "rx_bytes : %llu\n", rx_bytes); + p += _sprintf(p, buf, "rx_dropped : %llu\n", rx_dropped); + p += _sprintf(p, buf, "rx_multicast : %llu\n", hw_stats->rx_mcast_pkts); + p += _sprintf(p, buf, "rx_broadcast : %llu\n", hw_stats->rx_bcast_pkts); + p += _sprintf(p, buf, "tx_broadcast : %llu\n", hw_stats->tx_bcast_pkts); + p += _sprintf(p, buf, "tx_multicast : %llu\n", hw_stats->tx_mcast_pkts); + + mutex_unlock(&bp->sriov_lock); + return (ssize_t)(p - buf); +} + +#define VF_ATTR(_name) struct vf_attributes vf_attr_##_name = \ + __ATTR(_name, 0644, _name##_show, NULL) + +VF_ATTR(stats); + +static struct attribute *vf_eth_attrs[] = { + &vf_attr_stats.attr, + NULL +}; + +#ifdef HAVE_KOBJ_DEFAULT_GROUPS +ATTRIBUTE_GROUPS(vf_eth); +#endif + +static const struct sysfs_ops vf_sysfs_ops = { + .show = vf_attr_show, + .store = vf_attr_store, +}; + +static struct kobj_type vf_type_eth = { + .sysfs_ops = &vf_sysfs_ops, +#ifdef HAVE_KOBJ_DEFAULT_GROUPS + .default_groups = vf_eth_groups +#else + .default_attrs = vf_eth_attrs +#endif +}; + +int bnxt_sriov_sysfs_init(struct bnxt *bp) +{ + struct device *dev = &bp->pdev->dev; + + bp->sriov_sysfs_config = kobject_create_and_add("sriov", &dev->kobj); + if (!bp->sriov_sysfs_config) + return -ENOMEM; + + return 0; +} + +void bnxt_sriov_sysfs_exit(struct bnxt *bp) +{ + kobject_put(bp->sriov_sysfs_config); + bp->sriov_sysfs_config = NULL; +} + +int bnxt_create_vfs_sysfs(struct bnxt *bp) +{ + struct bnxt_vf_sysfs_obj *vf_obj; + static struct kobj_type *sysfs; + struct bnxt_vf_info *vfs, *tmp; + struct bnxt_stats_mem *stats; + int err; + int vf; + + sysfs = &vf_type_eth; + + bp->vf_sysfs_objs = kcalloc(bp->pf.active_vfs, sizeof(struct bnxt_vf_sysfs_obj), + GFP_KERNEL); + if (!bp->vf_sysfs_objs) + return -ENOMEM; + + mutex_lock(&bp->sriov_lock); + vfs = rcu_dereference_protected(bp->pf.vf, + lockdep_is_held(&bp->sriov_lock)); + + for (vf = 0; vf < bp->pf.active_vfs; vf++) { + tmp = &vfs[vf]; + if (!tmp) { + netdev_warn(bp->dev, "create_vfs_syfs vfs[%d] is NULL\n", vf); + continue; + } + + vf_obj = &bp->vf_sysfs_objs[vf]; + + vf_obj->parent_pf_bp = bp; + vf_obj->fw_fid = tmp->fw_fid; + + stats = &vf_obj->stats; + + stats->len = bp->hw_ring_stats_size; + stats->hw_stats = dma_alloc_coherent(&bp->pdev->dev, stats->len, + &stats->hw_stats_map, GFP_KERNEL); + + if (!stats->hw_stats) + goto err_vf_obj; + + err = kobject_init_and_add(&vf_obj->kobj, sysfs, bp->sriov_sysfs_config, + "%d", vf); + if (err) + goto err_vf_obj; + + kobject_uevent(&vf_obj->kobj, KOBJ_ADD); + } + mutex_unlock(&bp->sriov_lock); + return 0; + +err_vf_obj: + for (; vf >= 0; vf--) { + vf_obj = &bp->vf_sysfs_objs[vf]; + stats = &vf_obj->stats; + + if (stats->hw_stats) + dma_free_coherent(&bp->pdev->dev, stats->len, stats->hw_stats, + stats->hw_stats_map); + + if (vf_obj->kobj.state_initialized) + kobject_put(&vf_obj->kobj); + } + kfree(bp->vf_sysfs_objs); + mutex_unlock(&bp->sriov_lock); + + return -ENOMEM; +} + +void bnxt_destroy_vfs_sysfs(struct bnxt *bp) +{ + struct bnxt_vf_sysfs_obj *vf_obj; + struct bnxt_stats_mem *stats; + int vf; + + mutex_lock(&bp->sriov_lock); + + for (vf = 0; vf < bp->pf.active_vfs; vf++) { + vf_obj = &bp->vf_sysfs_objs[vf]; + stats = &vf_obj->stats; + + if (stats->hw_stats) + dma_free_coherent(&bp->pdev->dev, stats->len, stats->hw_stats, + stats->hw_stats_map); + kobject_put(&vf_obj->kobj); + } + + kfree(bp->vf_sysfs_objs); + + mutex_unlock(&bp->sriov_lock); +} diff --git a/drivers/thirdparty/release-drivers/bnxt/bnxt_sriov_sysfs.h b/drivers/thirdparty/release-drivers/bnxt/bnxt_sriov_sysfs.h new file mode 100644 index 000000000000..82db82cc7a6a --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/bnxt_sriov_sysfs.h @@ -0,0 +1,20 @@ +/* Broadcom NetXtreme-C/E network driver. + * + * Copyright (c) 2023 Broadcom Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ + +#ifndef BNXT_SRIOV_SYSFS_H +#define BNXT_SRIOV_SYSFS_H + +#include "bnxt_hsi.h" +#include "bnxt.h" + +int bnxt_sriov_sysfs_init(struct bnxt *bp); +void bnxt_sriov_sysfs_exit(struct bnxt *bp); +int bnxt_create_vfs_sysfs(struct bnxt *bp); +void bnxt_destroy_vfs_sysfs(struct bnxt *bp); +#endif diff --git a/drivers/thirdparty/release-drivers/bnxt/bnxt_tc.c b/drivers/thirdparty/release-drivers/bnxt/bnxt_tc.c new file mode 100644 index 000000000000..382fd3697bcc --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/bnxt_tc.c @@ -0,0 +1,3904 @@ +/* Broadcom NetXtreme-C/E network driver. + * + * Copyright (c) 2017-2018 Broadcom Limited + * Copyright (c) 2018-2023 Broadcom Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ + +#include +#include +#include +#include +#if defined(HAVE_TC_FLOW_CLS_OFFLOAD) || defined(HAVE_TC_CLS_FLOWER_OFFLOAD) +#include +#include +#include +#include +#include +#include +#include +#ifdef HAVE_TCF_TUNNEL +#include +#endif +#include +#endif /* HAVE_TC_FLOW_CLS_OFFLOAD || HAVE_TC_CLS_FLOWER_OFFLOAD */ + +#include "bnxt_compat.h" +#include "bnxt_hsi.h" +#include "bnxt.h" +#include "bnxt_hwrm.h" +#include "bnxt_sriov.h" +#include "bnxt_tc.h" +#include "bnxt_vfr.h" +#include "ulp_udcc.h" +#include "bnxt_ulp_flow.h" + +#ifdef CONFIG_BNXT_FLOWER_OFFLOAD + +#include "bnxt_tc_compat.h" + +#define BNXT_FID_INVALID INVALID_HW_RING_ID +#define VLAN_TCI(vid, prio) ((vid) | ((prio) << VLAN_PRIO_SHIFT)) +#define BNXT_MAX_NEIGH_TIMEOUT 10 + +#define is_vlan_pcp_wildcarded(vlan_tci_mask) \ + ((ntohs(vlan_tci_mask) & VLAN_PRIO_MASK) == 0x0000) +#define is_vlan_pcp_exactmatch(vlan_tci_mask) \ + ((ntohs(vlan_tci_mask) & VLAN_PRIO_MASK) == VLAN_PRIO_MASK) +#define is_vlan_pcp_zero(vlan_tci) \ + ((ntohs(vlan_tci) & VLAN_PRIO_MASK) == 0x0000) +#define is_vid_exactmatch(vlan_tci_mask) \ + ((ntohs(vlan_tci_mask) & VLAN_VID_MASK) == VLAN_VID_MASK) + +static bool is_wildcard(void *mask, int len); +static bool is_exactmatch(void *mask, int len); +/* Return the dst fid of the func for flow forwarding + * For PFs: src_fid is the fid of the PF + * For VF-reps: src_fid the fid of the VF + */ +u16 bnxt_flow_get_dst_fid(struct bnxt *pf_bp, struct net_device *dev) +{ + struct bnxt *bp; + + /* check if dev belongs to the same switch */ + if (!netdev_port_same_parent_id(pf_bp->dev, dev)) { + netdev_info(pf_bp->dev, "dev(ifindex=%d) not on same switch\n", + dev->ifindex); + return BNXT_FID_INVALID; + } + +#ifdef CONFIG_VF_REPS + /* Is dev a VF-rep? */ + if (bnxt_dev_is_vf_rep(dev)) + return bnxt_vf_rep_get_fid(dev); +#endif + + bp = netdev_priv(dev); + return bp->pf.fw_fid; +} + +#ifdef HAVE_FLOW_OFFLOAD_H +static int bnxt_tc_parse_redir(struct bnxt *bp, + struct bnxt_tc_actions *actions, + const struct flow_action_entry *act) +{ + struct net_device *dev = act->dev; + + if (!dev) { + netdev_info(bp->dev, "no dev in mirred action\n"); + return -EINVAL; + } + + actions->flags |= BNXT_TC_ACTION_FLAG_FWD; + actions->dst_dev = dev; + return 0; +} + +#else + +static int bnxt_tc_parse_redir(struct bnxt *bp, + struct bnxt_tc_actions *actions, + const struct tc_action *tc_act) +{ +#ifdef HAVE_TCF_MIRRED_DEV + struct net_device *dev = tcf_mirred_dev(tc_act); + + if (!dev) { + netdev_info(bp->dev, "no dev in mirred action"); + return -EINVAL; + } +#else + int ifindex = tcf_mirred_ifindex(tc_act); + struct net_device *dev; + + dev = __dev_get_by_index(dev_net(bp->dev), ifindex); + if (!dev) { + netdev_info(bp->dev, "no dev for ifindex=%d", ifindex); + return -EINVAL; + } +#endif + + actions->flags |= BNXT_TC_ACTION_FLAG_FWD; + actions->dst_dev = dev; + return 0; +} +#endif + +#ifdef HAVE_FLOW_OFFLOAD_H +static int bnxt_tc_parse_vlan(struct bnxt *bp, + struct bnxt_tc_actions *actions, + const struct flow_action_entry *act) +{ + switch (act->id) { + case FLOW_ACTION_VLAN_POP: + actions->flags |= BNXT_TC_ACTION_FLAG_POP_VLAN; + break; + case FLOW_ACTION_VLAN_PUSH: + actions->flags |= BNXT_TC_ACTION_FLAG_PUSH_VLAN; + actions->push_vlan_tci = htons(act->vlan.vid); + actions->push_vlan_tpid = act->vlan.proto; + break; + default: + return -EOPNOTSUPP; + } + return 0; +} + +#else + +static int bnxt_tc_parse_vlan(struct bnxt *bp, + struct bnxt_tc_actions *actions, + const struct tc_action *tc_act) +{ + switch (tcf_vlan_action(tc_act)) { + case TCA_VLAN_ACT_POP: + actions->flags |= BNXT_TC_ACTION_FLAG_POP_VLAN; + break; + case TCA_VLAN_ACT_PUSH: + actions->flags |= BNXT_TC_ACTION_FLAG_PUSH_VLAN; + actions->push_vlan_tci = htons(tcf_vlan_push_vid(tc_act)); + actions->push_vlan_tpid = tcf_vlan_push_proto(tc_act); + break; + default: + return -EOPNOTSUPP; + } + return 0; +} +#endif + +#ifdef HAVE_FLOW_OFFLOAD_H +static int bnxt_tc_parse_tunnel_set(struct bnxt *bp, + struct bnxt_tc_actions *actions, + const struct flow_action_entry *act) +{ + const struct ip_tunnel_info *tun_info = act->tunnel; + const struct ip_tunnel_key *tun_key = &tun_info->key; + +#else +static int bnxt_tc_parse_tunnel_set(struct bnxt *bp, + struct bnxt_tc_actions *actions, + const struct tc_action *tc_act) +{ + struct ip_tunnel_info *tun_info = tcf_tunnel_info(tc_act); + struct ip_tunnel_key *tun_key = &tun_info->key; +#endif + + switch (ip_tunnel_info_af(tun_info)) { + case AF_INET: + actions->flags |= BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP_IPV4; + break; + case AF_INET6: + actions->flags |= BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP_IPV6; + break; + default: + return -EOPNOTSUPP; + } + + actions->tun_encap_key = *tun_key; + actions->flags |= BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP; + return 0; +} + + +/* Key & Mask from the stack comes unaligned in multiple iterations of 4 bytes + * each(u32). + * This routine consolidates such multiple unaligned values into one + * field each for Key & Mask (for src and dst macs separately) + * For example, + * Mask/Key Offset Iteration + * ========== ====== ========= + * dst mac 0xffffffff 0 1 + * dst mac 0x0000ffff 4 2 + * + * src mac 0xffff0000 4 1 + * src mac 0xffffffff 8 2 + * + * The above combination coming from the stack will be consolidated as + * Mask/Key + * ============== + * src mac: 0xffffffffffff + * dst mac: 0xffffffffffff + */ +static void bnxt_set_l2_key_mask(u32 part_key, u32 part_mask, + u8 *actual_key, u8 *actual_mask) +{ + u32 key = get_unaligned((u32 *)actual_key); + u32 mask = get_unaligned((u32 *)actual_mask); + + part_key &= part_mask; + part_key |= key & ~part_mask; + + put_unaligned(mask | part_mask, (u32 *)actual_mask); + put_unaligned(part_key, (u32 *)actual_key); +} + +static int +bnxt_fill_l2_rewrite_fields(struct bnxt_tc_actions *actions, + u16 *eth_addr, u16 *eth_addr_mask) +{ + u16 *p; + int j; + + if (unlikely(bnxt_eth_addr_key_mask_invalid(eth_addr, eth_addr_mask))) + return -EINVAL; + + if (!is_wildcard(ð_addr_mask[0], ETH_ALEN)) { + if (!is_exactmatch(ð_addr_mask[0], ETH_ALEN)) + return -EINVAL; + /* FW expects dmac to be in u16 array format */ + p = eth_addr; + for (j = 0; j < 3; j++) + actions->l2_rewrite_dmac[j] = cpu_to_be16(*(p + j)); + } + + if (!is_wildcard(ð_addr_mask[ETH_ALEN / 2], ETH_ALEN)) { + if (!is_exactmatch(ð_addr_mask[ETH_ALEN / 2], ETH_ALEN)) + return -EINVAL; + /* FW expects smac to be in u16 array format */ + p = ð_addr[ETH_ALEN / 2]; + for (j = 0; j < 3; j++) + actions->l2_rewrite_smac[j] = cpu_to_be16(*(p + j)); + } + + return 0; +} + +#ifdef HAVE_FLOW_OFFLOAD_H +static int +bnxt_tc_parse_pedit(struct bnxt *bp, struct bnxt_tc_actions *actions, + struct flow_action_entry *act, int act_idx, u8 *eth_addr, + u8 *eth_addr_mask) +{ + size_t offset_of_ip6_daddr = offsetof(struct ipv6hdr, daddr); + size_t offset_of_ip6_saddr = offsetof(struct ipv6hdr, saddr); + u32 mask, val, offset, idx; + u8 htype; + + offset = act->mangle.offset; + htype = act->mangle.htype; + mask = ~act->mangle.mask; + val = act->mangle.val; + + switch (htype) { + case FLOW_ACT_MANGLE_HDR_TYPE_ETH: + if (offset > PEDIT_OFFSET_SMAC_LAST_4_BYTES) { + netdev_err(bp->dev, + "%s: eth_hdr: Invalid pedit field\n", + __func__); + return -EINVAL; + } + actions->flags |= BNXT_TC_ACTION_FLAG_L2_REWRITE; + + bnxt_set_l2_key_mask(val, mask, ð_addr[offset], + ð_addr_mask[offset]); + break; + case FLOW_ACT_MANGLE_HDR_TYPE_IP4: + actions->flags |= BNXT_TC_ACTION_FLAG_NAT_XLATE; + actions->nat.l3_is_ipv4 = true; + if (offset == offsetof(struct iphdr, saddr)) { + actions->nat.src_xlate = true; + actions->nat.l3.ipv4.saddr.s_addr = htonl(val); + } + else if (offset == offsetof(struct iphdr, daddr)) { + actions->nat.src_xlate = false; + actions->nat.l3.ipv4.daddr.s_addr = htonl(val); + } else { + netdev_err(bp->dev, + "%s: IPv4_hdr: Invalid pedit field\n", + __func__); + return -EINVAL; + } + + netdev_dbg(bp->dev, "nat.src_xlate = %d src IP: %pI4 dst ip : %pI4\n", + actions->nat.src_xlate, &actions->nat.l3.ipv4.saddr, + &actions->nat.l3.ipv4.daddr); + break; + + case FLOW_ACT_MANGLE_HDR_TYPE_IP6: + actions->flags |= BNXT_TC_ACTION_FLAG_NAT_XLATE; + actions->nat.l3_is_ipv4 = false; + if (offset >= offsetof(struct ipv6hdr, saddr) && + offset < offset_of_ip6_daddr) { + /* 16 byte IPv6 address comes in 4 iterations of + * 4byte chunks each + */ + actions->nat.src_xlate = true; + idx = (offset - offset_of_ip6_saddr) / 4; + /* First 4bytes will be copied to idx 0 and so on */ + actions->nat.l3.ipv6.saddr.s6_addr32[idx] = htonl(val); + } else if (offset >= offset_of_ip6_daddr && + offset < offset_of_ip6_daddr + 16) { + actions->nat.src_xlate = false; + idx = (offset - offset_of_ip6_daddr) / 4; + actions->nat.l3.ipv6.saddr.s6_addr32[idx] = htonl(val); + } else { + netdev_err(bp->dev, + "%s: IPv6_hdr: Invalid pedit field\n", + __func__); + return -EINVAL; + } + break; + case FLOW_ACT_MANGLE_HDR_TYPE_TCP: + case FLOW_ACT_MANGLE_HDR_TYPE_UDP: + /* HW does not support L4 rewrite alone without L3 + * rewrite + */ + if (!(actions->flags & BNXT_TC_ACTION_FLAG_NAT_XLATE)) { + netdev_err(bp->dev, + "Need to specify L3 rewrite as well\n"); + return -EINVAL; + } + if (actions->nat.src_xlate) + actions->nat.l4.ports.sport = htons(val); + else + actions->nat.l4.ports.dport = htons(val); + netdev_dbg(bp->dev, "actions->nat.sport = %d dport = %d\n", + actions->nat.l4.ports.sport, + actions->nat.l4.ports.dport); + break; + default: + netdev_err(bp->dev, "%s: Unsupported pedit hdr type\n", + __func__); + return -EINVAL; + } + return 0; +} + +static int bnxt_tc_parse_actions(struct bnxt *bp, + struct bnxt_tc_actions *actions, + struct flow_action *flow_action, + struct netlink_ext_ack *extack) +{ + /* Used to store the L2 rewrite mask for dmac (6 bytes) followed by + * smac (6 bytes) if rewrite of both is specified, otherwise either + * dmac or smac + */ + u16 eth_addr_mask[ETH_ALEN] = { 0 }; + /* Used to store the L2 rewrite key for dmac (6 bytes) followed by + * smac (6 bytes) if rewrite of both is specified, otherwise either + * dmac or smac + */ + u16 eth_addr[ETH_ALEN] = { 0 }; + struct flow_action_entry *act; + int i, rc; + + if (!flow_action_has_entries(flow_action)) { + netdev_info(bp->dev, "no actions\n"); + return -EINVAL; + } + + if (!flow_action_basic_hw_stats_check(flow_action, extack)) + return -EOPNOTSUPP; + + flow_action_for_each(i, act, flow_action) { + switch (act->id) { + case FLOW_ACTION_DROP: + actions->flags |= BNXT_TC_ACTION_FLAG_DROP; + return 0; /* don't bother with other actions */ + case FLOW_ACTION_REDIRECT: + rc = bnxt_tc_parse_redir(bp, actions, act); + if (rc) + return rc; + break; + case FLOW_ACTION_VLAN_POP: + case FLOW_ACTION_VLAN_PUSH: + case FLOW_ACTION_VLAN_MANGLE: + rc = bnxt_tc_parse_vlan(bp, actions, act); + if (rc) + return rc; + break; + case FLOW_ACTION_TUNNEL_ENCAP: + rc = bnxt_tc_parse_tunnel_set(bp, actions, act); + if (rc) + return rc; + break; + case FLOW_ACTION_TUNNEL_DECAP: + actions->flags |= BNXT_TC_ACTION_FLAG_TUNNEL_DECAP; + break; + /* Packet edit: L2 rewrite, NAT, NAPT */ + case FLOW_ACTION_MANGLE: + rc = bnxt_tc_parse_pedit(bp, actions, act, i, + (u8 *)eth_addr, + (u8 *)eth_addr_mask); + if (rc) + return rc; + break; + default: + break; + } + } + + if (actions->flags & BNXT_TC_ACTION_FLAG_L2_REWRITE) { + rc = bnxt_fill_l2_rewrite_fields(actions, eth_addr, + eth_addr_mask); + if (rc) + return rc; + } + + if (actions->flags & BNXT_TC_ACTION_FLAG_FWD) { + if (actions->flags & BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP) { + /* dst_fid is PF's fid */ + actions->dst_fid = bp->pf.fw_fid; + } else { + /* find the FID from dst_dev */ + actions->dst_fid = + bnxt_flow_get_dst_fid(bp, actions->dst_dev); + if (actions->dst_fid == BNXT_FID_INVALID) + return -EINVAL; + } + } + + return 0; +} + +#else + +static int +bnxt_tc_parse_pedit(struct bnxt *bp, const struct tc_action *tc_act, + struct bnxt_tc_actions *actions, + u8 *eth_addr, u8 *eth_addr_mask) +{ + size_t offset_of_ip6_daddr = offsetof(struct ipv6hdr, daddr); + size_t offset_of_ip6_saddr = offsetof(struct ipv6hdr, saddr); + u32 mask, val, offset, idx; + u8 cmd, htype; + int nkeys, j; + + nkeys = tcf_pedit_nkeys(tc_act); + for (j = 0 ; j < nkeys; j++) { + cmd = tcf_pedit_cmd(tc_act, j); + /* L2 rewrite comes as TCA_PEDIT_KEY_EX_CMD_SET type from TC. + * Return error, if the TC pedit cmd is not of this type. + */ + if (cmd != TCA_PEDIT_KEY_EX_CMD_SET) { + netdev_err(bp->dev, "%s: pedit cmd not supported\n", + __func__); + return -EINVAL; + } + + offset = tcf_pedit_offset(tc_act, j); + htype = tcf_pedit_htype(tc_act, j); + mask = ~tcf_pedit_mask(tc_act, j); + val = tcf_pedit_val(tc_act, j); + + switch (htype) { + case TCA_PEDIT_KEY_EX_HDR_TYPE_ETH: + if (offset > PEDIT_OFFSET_SMAC_LAST_4_BYTES) { + netdev_err(bp->dev, + "%s: eth_hdr: Invalid pedit field\n", + __func__); + return -EINVAL; + } + actions->flags |= + BNXT_TC_ACTION_FLAG_L2_REWRITE; + + bnxt_set_l2_key_mask(val, mask, + ð_addr[offset], + ð_addr_mask[offset]); + break; + + case TCA_PEDIT_KEY_EX_HDR_TYPE_IP4: + actions->flags |= BNXT_TC_ACTION_FLAG_NAT_XLATE; + actions->nat.l3_is_ipv4 = true; + if (offset == offsetof(struct iphdr, saddr)) { + actions->nat.src_xlate = true; + actions->nat.l3.ipv4.saddr.s_addr = htonl(val); + } else if (offset == offsetof(struct iphdr, daddr)) { + actions->nat.src_xlate = false; + actions->nat.l3.ipv4.daddr.s_addr = htonl(val); + } else { + netdev_err(bp->dev, + "%s: IPv4_hdr: Invalid pedit field\n", + __func__); + return -EINVAL; + } + break; + + case TCA_PEDIT_KEY_EX_HDR_TYPE_IP6: + actions->flags |= BNXT_TC_ACTION_FLAG_NAT_XLATE; + actions->nat.l3_is_ipv4 = false; + + if (offset >= offsetof(struct ipv6hdr, saddr) && + offset < offset_of_ip6_daddr) { + /* 16 byte IPv6 address comes in 4 iterations of + * 4byte chunks each + */ + actions->nat.src_xlate = true; + idx = (offset - offset_of_ip6_saddr) / 4; + /* First 4bytes will be copied to idx 0 and so on */ + actions->nat.l3.ipv6.saddr.s6_addr32[idx] = + htonl(val); + } else if (offset >= offset_of_ip6_daddr && + offset < offset_of_ip6_daddr + 16) { + actions->nat.src_xlate = false; + idx = (offset - offset_of_ip6_daddr) / 4; + actions->nat.l3.ipv6.daddr.s6_addr32[idx] = + htonl(val); + } else { + netdev_err(bp->dev, + "%s: IPv6_hdr: Invalid pedit field\n", + __func__); + return -EINVAL; + } + break; + + case TCA_PEDIT_KEY_EX_HDR_TYPE_TCP: + case TCA_PEDIT_KEY_EX_HDR_TYPE_UDP: + /* HW does not support L4 rewrite alone without L3 + * rewrite + */ + if (!(actions->flags & BNXT_TC_ACTION_FLAG_NAT_XLATE)) { + netdev_err(bp->dev, + "Need to specify L3 rewrite as well\n"); + return -EINVAL; + } + if (actions->nat.src_xlate) + actions->nat.l4.ports.sport = htons(val); + else + actions->nat.l4.ports.dport = htons(val); + break; + /* Return, if the packet edit is not for L2/L3/L4 */ + default: + netdev_err(bp->dev, "%s: Unsupported pedit hdr type\n", + __func__); + return -EINVAL; + } + } + + return 0; +} + +static int bnxt_tc_parse_actions(struct bnxt *bp, + struct bnxt_tc_actions *actions, + struct tcf_exts *tc_exts) +{ + u16 eth_addr_mask[ETH_ALEN] = { 0 }; + u16 eth_addr[ETH_ALEN] = { 0 }; + const struct tc_action *tc_act; +#ifndef HAVE_TC_EXTS_FOR_ACTION + LIST_HEAD(tc_actions); + int rc; +#else + int i, rc; +#endif + + if (!tcf_exts_has_actions(tc_exts)) { + netdev_info(bp->dev, "no actions"); + return -EINVAL; + } + +#ifndef HAVE_TC_EXTS_FOR_ACTION + tcf_exts_to_list(tc_exts, &tc_actions); + list_for_each_entry(tc_act, &tc_actions, list) { +#else + tcf_exts_for_each_action(i, tc_act, tc_exts) { +#endif + /* Drop action */ + if (is_tcf_gact_shot(tc_act)) { + actions->flags |= BNXT_TC_ACTION_FLAG_DROP; + return 0; /* don't bother with other actions */ + } + + /* Redirect action */ + if (is_tcf_mirred_egress_redirect(tc_act)) { + rc = bnxt_tc_parse_redir(bp, actions, tc_act); + if (rc) + return rc; + continue; + } + + /* Push/pop VLAN */ + if (is_tcf_vlan(tc_act)) { + rc = bnxt_tc_parse_vlan(bp, actions, tc_act); + if (rc) + return rc; + continue; + } + + /* Tunnel encap */ + if (is_tcf_tunnel_set(tc_act)) { + rc = bnxt_tc_parse_tunnel_set(bp, actions, tc_act); + if (rc) + return rc; + continue; + } + + /* Tunnel decap */ + if (is_tcf_tunnel_release(tc_act)) { + actions->flags |= BNXT_TC_ACTION_FLAG_TUNNEL_DECAP; + continue; + } + + /* Packet edit: L2 rewrite, NAT, NAPT */ + if (is_tcf_pedit(tc_act)) { + rc = bnxt_tc_parse_pedit(bp, tc_act, actions, + (u8 *)eth_addr, + (u8 *)eth_addr_mask); + if (rc) + return rc; + + if (actions->flags & BNXT_TC_ACTION_FLAG_L2_REWRITE) { + rc = bnxt_fill_l2_rewrite_fields(actions, + eth_addr, + eth_addr_mask); + if (rc) + return rc; + } + } + } + + if (actions->flags & BNXT_TC_ACTION_FLAG_FWD) { + if (actions->flags & BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP) { + /* dst_fid is PF's fid */ + actions->dst_fid = bp->pf.fw_fid; + } else { + /* find the FID from dst_dev */ + actions->dst_fid = + bnxt_flow_get_dst_fid(bp, actions->dst_dev); + if (actions->dst_fid == BNXT_FID_INVALID) + return -EINVAL; + } + } + + return 0; +} +#endif + +static int bnxt_tc_parse_flow(struct bnxt *bp, + struct flow_cls_offload *tc_flow_cmd, + struct bnxt_tc_flow *flow) +{ + struct flow_rule *rule = flow_cls_offload_flow_rule(tc_flow_cmd); + struct flow_dissector *dissector = rule->match.dissector; + + /* KEY_CONTROL and KEY_BASIC are needed for forming a meaningful key */ + if ((dissector->used_keys & BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL)) == 0 || + (dissector->used_keys & BIT_ULL(FLOW_DISSECTOR_KEY_BASIC)) == 0) { + netdev_info(bp->dev, "cannot form TC key: used_keys = 0x%llx\n", + (u64)dissector->used_keys); + return -EOPNOTSUPP; + } + + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { + struct flow_match_basic match; + + flow_rule_match_basic(rule, &match); + flow->l2_key.ether_type = match.key->n_proto; + flow->l2_mask.ether_type = match.mask->n_proto; + + if (match.key->n_proto == htons(ETH_P_IP) || + match.key->n_proto == htons(ETH_P_IPV6)) { + flow->l4_key.ip_proto = match.key->ip_proto; + flow->l4_mask.ip_proto = match.mask->ip_proto; + } + } + + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { + struct flow_match_eth_addrs match; + + flow_rule_match_eth_addrs(rule, &match); + flow->flags |= BNXT_TC_FLOW_FLAGS_ETH_ADDRS; + ether_addr_copy(flow->l2_key.dmac, match.key->dst); + ether_addr_copy(flow->l2_mask.dmac, match.mask->dst); + ether_addr_copy(flow->l2_key.smac, match.key->src); + ether_addr_copy(flow->l2_mask.smac, match.mask->src); + } + + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) { + struct flow_match_vlan match; + + flow_rule_match_vlan(rule, &match); + flow->l2_key.inner_vlan_tci = + cpu_to_be16(VLAN_TCI(match.key->vlan_id, + match.key->vlan_priority)); + flow->l2_mask.inner_vlan_tci = + cpu_to_be16((VLAN_TCI(match.mask->vlan_id, + match.mask->vlan_priority))); + flow->l2_key.inner_vlan_tpid = htons(ETH_P_8021Q); + flow->l2_mask.inner_vlan_tpid = htons(0xffff); + flow->l2_key.num_vlans = 1; + } + + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) { + struct flow_match_control match; + u16 addr_type; + + flow_rule_match_control(rule, &match); + addr_type = match.key->addr_type; + if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { + struct flow_match_ipv4_addrs match; + + flow_rule_match_ipv4_addrs(rule, &match); + flow->flags |= BNXT_TC_FLOW_FLAGS_IPV4_ADDRS; + flow->l3_key.ipv4.daddr.s_addr = match.key->dst; + flow->l3_mask.ipv4.daddr.s_addr = match.mask->dst; + flow->l3_key.ipv4.saddr.s_addr = match.key->src; + flow->l3_mask.ipv4.saddr.s_addr = match.mask->src; + } else if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) { + struct flow_match_ipv6_addrs match; + + flow_rule_match_ipv6_addrs(rule, &match); + flow->flags |= BNXT_TC_FLOW_FLAGS_IPV6_ADDRS; + flow->l3_key.ipv6.daddr = match.key->dst; + flow->l3_mask.ipv6.daddr = match.mask->dst; + flow->l3_key.ipv6.saddr = match.key->src; + flow->l3_mask.ipv6.saddr = match.mask->src; + } + } + + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) { + struct flow_match_ports match; + + flow_rule_match_ports(rule, &match); + flow->flags |= BNXT_TC_FLOW_FLAGS_PORTS; + flow->l4_key.ports.dport = match.key->dst; + flow->l4_mask.ports.dport = match.mask->dst; + flow->l4_key.ports.sport = match.key->src; + flow->l4_mask.ports.sport = match.mask->src; + } + + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ICMP)) { + struct flow_match_icmp match; + + flow_rule_match_icmp(rule, &match); + flow->flags |= BNXT_TC_FLOW_FLAGS_ICMP; + flow->l4_key.icmp.type = match.key->type; + flow->l4_key.icmp.code = match.key->code; + flow->l4_mask.icmp.type = match.mask->type; + flow->l4_mask.icmp.code = match.mask->code; + } + + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL)) { + struct flow_match_control match; + u16 addr_type; + + flow_rule_match_enc_control(rule, &match); + addr_type = match.key->addr_type; + + if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { + struct flow_match_ipv4_addrs match; + + flow_rule_match_enc_ipv4_addrs(rule, &match); + flow->flags |= BNXT_TC_FLOW_FLAGS_TUNL_IPV4_ADDRS; + flow->tun_key.u.ipv4.dst = match.key->dst; + flow->tun_mask.u.ipv4.dst = match.mask->dst; + flow->tun_key.u.ipv4.src = match.key->src; + flow->tun_mask.u.ipv4.src = match.mask->src; + } else if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) { + struct flow_match_ipv6_addrs match; + + flow_rule_match_enc_ipv6_addrs(rule, &match); + flow->flags |= BNXT_TC_FLOW_FLAGS_TUNL_IPV6_ADDRS; + flow->tun_key.u.ipv6.dst = match.key->dst; + flow->tun_mask.u.ipv6.dst = match.mask->dst; + flow->tun_key.u.ipv6.src = match.key->src; + flow->tun_mask.u.ipv6.src = match.mask->src; + } + } + + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) { + struct flow_match_enc_keyid match; + + flow_rule_match_enc_keyid(rule, &match); + flow->flags |= BNXT_TC_FLOW_FLAGS_TUNL_ID; + flow->tun_key.tun_id = key32_to_tunnel_id(match.key->keyid); + flow->tun_mask.tun_id = key32_to_tunnel_id(match.mask->keyid); + } + + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_PORTS)) { + struct flow_match_ports match; + + flow_rule_match_enc_ports(rule, &match); + flow->flags |= BNXT_TC_FLOW_FLAGS_TUNL_PORTS; + flow->tun_key.tp_dst = match.key->dst; + flow->tun_mask.tp_dst = match.mask->dst; + flow->tun_key.tp_src = match.key->src; + flow->tun_mask.tp_src = match.mask->src; + } + +#ifdef HAVE_FLOW_OFFLOAD_H + return bnxt_tc_parse_actions(bp, &flow->actions, &rule->action, + tc_flow_cmd->common.extack); +#else + return bnxt_tc_parse_actions(bp, &flow->actions, tc_flow_cmd->exts); +#endif +} + +static int bnxt_hwrm_cfa_flow_free(struct bnxt *bp, + struct bnxt_tc_flow_node *flow_node) +{ + struct hwrm_cfa_flow_free_input *req; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_CFA_FLOW_FREE); + if (!rc) { + if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE) + req->ext_flow_handle = flow_node->ext_flow_handle; + else + req->flow_handle = flow_node->flow_handle; + + rc = hwrm_req_send(bp, req); + } + if (rc) + netdev_info(bp->dev, "%s: Error rc=%d\n", __func__, rc); + + return rc; +} + +static int ipv6_mask_len(struct in6_addr *mask) +{ + int mask_len = 0, i; + + for (i = 0; i < 4; i++) + mask_len += inet_mask_len(mask->s6_addr32[i]); + + return mask_len; +} + +static bool is_wildcard(void *mask, int len) +{ + const u8 *p = mask; + int i; + + for (i = 0; i < len; i++) { + if (p[i] != 0) + return false; + } + return true; +} + +static bool is_exactmatch(void *mask, int len) +{ + const u8 *p = mask; + int i; + + for (i = 0; i < len; i++) + if (p[i] != 0xff) + return false; + + return true; +} + +static bool is_vlan_tci_allowed(__be16 vlan_tci_mask, + __be16 vlan_tci) +{ + /* VLAN priority must be either exactly zero or fully wildcarded and + * VLAN id must be exact match. + */ + if (is_vid_exactmatch(vlan_tci_mask) && + ((is_vlan_pcp_exactmatch(vlan_tci_mask) && + is_vlan_pcp_zero(vlan_tci)) || + is_vlan_pcp_wildcarded(vlan_tci_mask))) + return true; + + return false; +} + +static bool bits_set(void *key, int len) +{ + const u8 *p = key; + int i; + + for (i = 0; i < len; i++) + if (p[i] != 0) + return true; + + return false; +} + +static int bnxt_hwrm_cfa_flow_alloc(struct bnxt *bp, struct bnxt_tc_flow *flow, + __le16 ref_flow_handle, + __le32 tunnel_handle, + struct bnxt_tc_flow_node *flow_node) +{ + struct bnxt_tc_actions *actions = &flow->actions; + struct bnxt_tc_l3_key *l3_mask = &flow->l3_mask; + struct bnxt_tc_l3_key *l3_key = &flow->l3_key; + struct hwrm_cfa_flow_alloc_output *resp; + struct hwrm_cfa_flow_alloc_input *req; + u16 flow_flags = 0, action_flags = 0; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_CFA_FLOW_ALLOC); + if (rc) + return rc; + + req->src_fid = cpu_to_le16(flow->src_fid); + req->ref_flow_handle = ref_flow_handle; + + if (actions->flags & BNXT_TC_ACTION_FLAG_L2_REWRITE) { + memcpy(req->l2_rewrite_dmac, actions->l2_rewrite_dmac, + ETH_ALEN); + memcpy(req->l2_rewrite_smac, actions->l2_rewrite_smac, + ETH_ALEN); + action_flags |= + CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_L2_HEADER_REWRITE; + } + + if (actions->flags & BNXT_TC_ACTION_FLAG_NAT_XLATE) { + if (actions->nat.l3_is_ipv4) { + action_flags |= + CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_IPV4_ADDRESS; + + if (actions->nat.src_xlate) { + action_flags |= + CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_SRC; + /* L3 source rewrite */ + req->nat_ip_address[0] = + actions->nat.l3.ipv4.saddr.s_addr; + /* L4 source port */ + if (actions->nat.l4.ports.sport) + req->nat_port = + actions->nat.l4.ports.sport; + } else { + action_flags |= + CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_DEST; + /* L3 destination rewrite */ + req->nat_ip_address[0] = + actions->nat.l3.ipv4.daddr.s_addr; + /* L4 destination port */ + if (actions->nat.l4.ports.dport) + req->nat_port = + actions->nat.l4.ports.dport; + } + netdev_dbg(bp->dev, + "req.nat_ip_address: %pI4 src_xlate: %d req.nat_port: %x\n", + req->nat_ip_address, actions->nat.src_xlate, + req->nat_port); + } else { + if (actions->nat.src_xlate) { + action_flags |= + CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_SRC; + /* L3 source rewrite */ + memcpy(req->nat_ip_address, + actions->nat.l3.ipv6.saddr.s6_addr32, + sizeof(req->nat_ip_address)); + /* L4 source port */ + if (actions->nat.l4.ports.sport) + req->nat_port = + actions->nat.l4.ports.sport; + } else { + action_flags |= + CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_DEST; + /* L3 destination rewrite */ + memcpy(req->nat_ip_address, + actions->nat.l3.ipv6.daddr.s6_addr32, + sizeof(req->nat_ip_address)); + /* L4 destination port */ + if (actions->nat.l4.ports.dport) + req->nat_port = + actions->nat.l4.ports.dport; + } + netdev_dbg(bp->dev, + "req.nat_ip_address: %pI6 src_xlate: %d req.nat_port: %x\n", + req->nat_ip_address, actions->nat.src_xlate, + req->nat_port); + } + } + + if (actions->flags & BNXT_TC_ACTION_FLAG_TUNNEL_DECAP || + actions->flags & BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP) { + req->tunnel_handle = tunnel_handle; + flow_flags |= CFA_FLOW_ALLOC_REQ_FLAGS_TUNNEL; + action_flags |= CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TUNNEL; + } + + req->ethertype = flow->l2_key.ether_type; + req->ip_proto = flow->l4_key.ip_proto; + + if (flow->flags & BNXT_TC_FLOW_FLAGS_ETH_ADDRS) { + memcpy(req->dmac, flow->l2_key.dmac, ETH_ALEN); + memcpy(req->smac, flow->l2_key.smac, ETH_ALEN); + } + + if (flow->l2_key.num_vlans > 0) { + flow_flags |= CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_ONE; + /* FW expects the inner_vlan_tci value to be set + * in outer_vlan_tci when num_vlans is 1 (which is + * always the case in TC.) + */ + req->outer_vlan_tci = flow->l2_key.inner_vlan_tci; + } + + /* If all IP and L4 fields are wildcarded then this is an L2 flow */ + if (is_wildcard(l3_mask, sizeof(*l3_mask)) && + is_wildcard(&flow->l4_mask, sizeof(flow->l4_mask))) { + flow_flags |= CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_L2; + } else { + flow_flags |= flow->l2_key.ether_type == htons(ETH_P_IP) ? + CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV4 : + CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV6; + + if (flow->flags & BNXT_TC_FLOW_FLAGS_IPV4_ADDRS) { + req->ip_dst[0] = l3_key->ipv4.daddr.s_addr; + req->ip_dst_mask_len = + inet_mask_len(l3_mask->ipv4.daddr.s_addr); + req->ip_src[0] = l3_key->ipv4.saddr.s_addr; + req->ip_src_mask_len = + inet_mask_len(l3_mask->ipv4.saddr.s_addr); + } else if (flow->flags & BNXT_TC_FLOW_FLAGS_IPV6_ADDRS) { + memcpy(req->ip_dst, l3_key->ipv6.daddr.s6_addr32, + sizeof(req->ip_dst)); + req->ip_dst_mask_len = + ipv6_mask_len(&l3_mask->ipv6.daddr); + memcpy(req->ip_src, l3_key->ipv6.saddr.s6_addr32, + sizeof(req->ip_src)); + req->ip_src_mask_len = + ipv6_mask_len(&l3_mask->ipv6.saddr); + } + } + + if (flow->flags & BNXT_TC_FLOW_FLAGS_PORTS) { + req->l4_src_port = flow->l4_key.ports.sport; + req->l4_src_port_mask = flow->l4_mask.ports.sport; + req->l4_dst_port = flow->l4_key.ports.dport; + req->l4_dst_port_mask = flow->l4_mask.ports.dport; + } else if (flow->flags & BNXT_TC_FLOW_FLAGS_ICMP) { + /* l4 ports serve as type/code when ip_proto is ICMP */ + req->l4_src_port = htons(flow->l4_key.icmp.type); + req->l4_src_port_mask = htons(flow->l4_mask.icmp.type); + req->l4_dst_port = htons(flow->l4_key.icmp.code); + req->l4_dst_port_mask = htons(flow->l4_mask.icmp.code); + } + req->flags = cpu_to_le16(flow_flags); + + if (actions->flags & BNXT_TC_ACTION_FLAG_DROP) { + action_flags |= CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_DROP; + } else { + if (actions->flags & BNXT_TC_ACTION_FLAG_FWD) { + action_flags |= CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_FWD; + req->dst_fid = cpu_to_le16(actions->dst_fid); + } + if (actions->flags & BNXT_TC_ACTION_FLAG_PUSH_VLAN) { + action_flags |= + CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_L2_HEADER_REWRITE; + req->l2_rewrite_vlan_tpid = actions->push_vlan_tpid; + req->l2_rewrite_vlan_tci = actions->push_vlan_tci; + memcpy(&req->l2_rewrite_dmac, &req->dmac, ETH_ALEN); + memcpy(&req->l2_rewrite_smac, &req->smac, ETH_ALEN); + } + if (actions->flags & BNXT_TC_ACTION_FLAG_POP_VLAN) { + action_flags |= + CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_L2_HEADER_REWRITE; + /* Rewrite config with tpid = 0 implies vlan pop */ + req->l2_rewrite_vlan_tpid = 0; + memcpy(&req->l2_rewrite_dmac, &req->dmac, ETH_ALEN); + memcpy(&req->l2_rewrite_smac, &req->smac, ETH_ALEN); + } + } + req->action_flags = cpu_to_le16(action_flags); + + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send_silent(bp, req); + if (!rc) { + /* CFA_FLOW_ALLOC response interpretation: + * fw with fw with + * 16-bit 64-bit + * flow handle flow handle + * =========== =========== + * flow_handle flow handle flow context id + * ext_flow_handle INVALID flow handle + * flow_id INVALID flow counter id + */ + flow_node->flow_handle = resp->flow_handle; + if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE) { + flow_node->ext_flow_handle = resp->ext_flow_handle; + flow_node->flow_id = resp->flow_id; + } + } + hwrm_req_drop(bp, req); + return rc; +} + +static int hwrm_cfa_decap_filter_alloc(struct bnxt *bp, + struct bnxt_tc_flow *flow, + struct bnxt_tc_l2_key *l2_info, + __le32 ref_decap_handle, + __le32 *decap_filter_handle) +{ + struct hwrm_cfa_decap_filter_alloc_output *resp; + struct ip_tunnel_key *tun_key = &flow->tun_key; + struct hwrm_cfa_decap_filter_alloc_input *req; + u32 enables = 0; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_CFA_DECAP_FILTER_ALLOC); + if (rc) + goto exit; + + req->flags = cpu_to_le32(CFA_DECAP_FILTER_ALLOC_REQ_FLAGS_OVS_TUNNEL); + enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE | + CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL; + req->tunnel_type = CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN; + req->ip_protocol = CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP; + + if (flow->flags & BNXT_TC_FLOW_FLAGS_TUNL_ID) { + enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_TUNNEL_ID; + /* tunnel_id is wrongly defined in hsi defn. as __le32 */ + req->tunnel_id = tunnel_id_to_key32(tun_key->tun_id); + } + + if (flow->flags & BNXT_TC_FLOW_FLAGS_TUNL_ETH_ADDRS) { + enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_MACADDR; + ether_addr_copy(req->dst_macaddr, l2_info->dmac); + } + if (l2_info->num_vlans) { + enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_T_IVLAN_VID; + req->t_ivlan_vid = l2_info->inner_vlan_tci; + } + + enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE; + req->ethertype = htons(ETH_P_IP); + + if (flow->flags & BNXT_TC_FLOW_FLAGS_TUNL_IPV4_ADDRS) { + enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR | + CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR | + CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE; + req->ip_addr_type = + CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4; + req->dst_ipaddr[0] = tun_key->u.ipv4.dst; + req->src_ipaddr[0] = tun_key->u.ipv4.src; + } + + if (flow->flags & BNXT_TC_FLOW_FLAGS_TUNL_IPV6_ADDRS) { + enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR | + CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR | + CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE; + req->ip_addr_type = + CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6; + memcpy(req->dst_ipaddr, &tun_key->u.ipv6.dst, + sizeof(req->dst_ipaddr)); + memcpy(req->src_ipaddr, &tun_key->u.ipv6.src, + sizeof(req->src_ipaddr)); + } + + if (flow->flags & BNXT_TC_FLOW_FLAGS_TUNL_PORTS) { + enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_PORT; + req->dst_port = tun_key->tp_dst; + } + + /* Eventhough the decap_handle returned by hwrm_cfa_decap_filter_alloc + * is defined as __le32, l2_ctxt_ref_id is defined in HSI as __le16. + */ + req->l2_ctxt_ref_id = (__force __le16)ref_decap_handle; + req->enables = cpu_to_le32(enables); + + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send_silent(bp, req); + if (!rc) + *decap_filter_handle = resp->decap_filter_id; + hwrm_req_drop(bp, req); +exit: + if (rc == -ENOSPC) + net_info_ratelimited("%s %s: No HW resources for new flow, rc=%d\n", + bp->dev->name, __func__, rc); + else if (rc) + netdev_err(bp->dev, "%s: Error rc=%d\n", __func__, rc); + + return rc; +} + +static int hwrm_cfa_decap_filter_free(struct bnxt *bp, + __le32 decap_filter_handle) +{ + struct hwrm_cfa_decap_filter_free_input *req; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_CFA_DECAP_FILTER_FREE); + if (!rc) { + req->decap_filter_id = decap_filter_handle; + rc = hwrm_req_send(bp, req); + } + if (rc) + netdev_info(bp->dev, "%s: Error rc=%d\n", __func__, rc); + + return rc; +} + +static int hwrm_cfa_encap_record_alloc(struct bnxt *bp, + struct ip_tunnel_key *encap_key, + struct bnxt_tc_l2_key *l2_info, + __le32 *encap_record_handle) +{ + struct hwrm_cfa_encap_record_alloc_output *resp; + struct hwrm_cfa_encap_record_alloc_input *req; + struct hwrm_cfa_encap_data_vxlan *encap; + struct hwrm_vxlan_ipv4_hdr *encap_ipv4; + struct hwrm_vxlan_ipv6_hdr *encap_ipv6; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_CFA_ENCAP_RECORD_ALLOC); + if (rc) + goto exit; + + encap = (struct hwrm_cfa_encap_data_vxlan *)&req->encap_data; + req->encap_type = CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN; + ether_addr_copy(encap->dst_mac_addr, l2_info->dmac); + ether_addr_copy(encap->src_mac_addr, l2_info->smac); + if (l2_info->num_vlans) { + encap->num_vlan_tags = l2_info->num_vlans; + encap->ovlan_tci = l2_info->inner_vlan_tci; + encap->ovlan_tpid = l2_info->inner_vlan_tpid; + } + + if (l2_info->ether_type == htons(ETH_P_IPV6)) { + encap_ipv6 = (struct hwrm_vxlan_ipv6_hdr *)encap->l3; + encap_ipv6->ver_tc_flow_label = + 6 << VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_VER_SFT; + memcpy(encap_ipv6->dest_ip_addr, &encap_key->u.ipv6.dst, + sizeof(encap_ipv6->dest_ip_addr)); + memcpy(encap_ipv6->src_ip_addr, &encap_key->u.ipv6.src, + sizeof(encap_ipv6->src_ip_addr)); + encap_ipv6->ttl = encap_key->ttl; + encap_ipv6->next_hdr = IPPROTO_UDP; + } else { + encap_ipv4 = (struct hwrm_vxlan_ipv4_hdr *)encap->l3; + encap_ipv4->ver_hlen = 4 << VXLAN_IPV4_HDR_VER_HLEN_VERSION_SFT; + encap_ipv4->ver_hlen |= + 5 << VXLAN_IPV4_HDR_VER_HLEN_HEADER_LENGTH_SFT; + encap_ipv4->ttl = encap_key->ttl; + encap_ipv4->dest_ip_addr = encap_key->u.ipv4.dst; + encap_ipv4->src_ip_addr = encap_key->u.ipv4.src; + encap_ipv4->protocol = IPPROTO_UDP; + } + + encap->dst_port = encap_key->tp_dst; + encap->vni = tunnel_id_to_key32(encap_key->tun_id); + + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send_silent(bp, req); + if (!rc) + *encap_record_handle = resp->encap_record_id; + hwrm_req_drop(bp, req); +exit: + if (rc == -ENOSPC) + net_info_ratelimited("%s %s: No HW resources for new flow, rc=%d\n", + bp->dev->name, __func__, rc); + else if (rc) + netdev_err(bp->dev, "%s: Error rc=%d\n", __func__, rc); + + return rc; +} + +static int hwrm_cfa_encap_record_free(struct bnxt *bp, + __le32 encap_record_handle) +{ + struct hwrm_cfa_encap_record_free_input *req; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_CFA_ENCAP_RECORD_FREE); + if (!rc) { + req->encap_record_id = encap_record_handle; + rc = hwrm_req_send(bp, req); + } + if (rc) + netdev_info(bp->dev, "%s: Error rc=%d\n", __func__, rc); + + return rc; +} + +static int bnxt_tc_put_l2_node(struct bnxt *bp, + struct bnxt_tc_flow_node *flow_node) +{ + struct bnxt_tc_l2_node *l2_node = flow_node->l2_node; + struct bnxt_tc_info *tc_info = bp->tc_info; + int rc; + + /* l2_node may be release twice if re-add flow to HW failed when egress + * tunnel MAC was changed, return gracefully for second time. + */ + if (!l2_node) + return 0; + + /* remove flow_node from the L2 shared flow list */ + list_del(&flow_node->l2_list_node); + if (--l2_node->refcount == 0) { + rc = rhashtable_remove_fast(&tc_info->l2_table, &l2_node->node, + tc_info->l2_ht_params); + if (rc) + netdev_err(bp->dev, + "Error: %s: rhashtable_remove_fast: %d\n", + __func__, rc); + kfree_rcu(l2_node, rcu); + } + flow_node->l2_node = NULL; + return 0; +} + +static struct bnxt_tc_l2_node * +bnxt_tc_get_l2_node(struct bnxt *bp, struct rhashtable *l2_table, + struct rhashtable_params ht_params, + struct bnxt_tc_l2_key *l2_key) +{ + struct bnxt_tc_l2_node *l2_node; + int rc; + + l2_node = rhashtable_lookup_fast(l2_table, l2_key, ht_params); + if (!l2_node) { + l2_node = kzalloc(sizeof(*l2_node), GFP_KERNEL); + if (!l2_node) + return NULL; + + l2_node->key = *l2_key; + rc = rhashtable_insert_fast(l2_table, &l2_node->node, + ht_params); + if (rc) { + kfree_rcu(l2_node, rcu); + netdev_err(bp->dev, + "Error: %s: rhashtable_insert_fast: %d\n", + __func__, rc); + return NULL; + } + INIT_LIST_HEAD(&l2_node->common_l2_flows); + } + return l2_node; +} + +/* Get the ref_flow_handle for a flow by checking if there are any other + * flows that share the same L2 key as this flow. + */ +static int +bnxt_tc_get_ref_flow_handle(struct bnxt *bp, struct bnxt_tc_flow *flow, + struct bnxt_tc_flow_node *flow_node, + __le16 *ref_flow_handle) +{ + struct bnxt_tc_info *tc_info = bp->tc_info; + struct bnxt_tc_flow_node *ref_flow_node; + struct bnxt_tc_l2_node *l2_node; + + l2_node = bnxt_tc_get_l2_node(bp, &tc_info->l2_table, + tc_info->l2_ht_params, + &flow->l2_key); + if (!l2_node) + return -1; + + /* If any other flow is using this l2_node, use it's flow_handle + * as the ref_flow_handle + */ + if (l2_node->refcount > 0) { + ref_flow_node = list_first_entry(&l2_node->common_l2_flows, + struct bnxt_tc_flow_node, + l2_list_node); + *ref_flow_handle = ref_flow_node->flow_handle; + } else { + *ref_flow_handle = cpu_to_le16(0xffff); + } + + /* Insert the l2_node into the flow_node so that subsequent flows + * with a matching l2 key can use the flow_handle of this flow + * as their ref_flow_handle + */ + flow_node->l2_node = l2_node; + list_add(&flow_node->l2_list_node, &l2_node->common_l2_flows); + l2_node->refcount++; + return 0; +} + +/* After the flow parsing is done, this routine is used for checking + * if there are any aspects of the flow that prevent it from being + * offloaded. + */ +static bool bnxt_tc_can_offload(struct bnxt *bp, struct bnxt_tc_flow *flow) +{ + /* If L4 ports are specified then ip_proto must be TCP or UDP */ + if ((flow->flags & BNXT_TC_FLOW_FLAGS_PORTS) && + (flow->l4_key.ip_proto != IPPROTO_TCP && + flow->l4_key.ip_proto != IPPROTO_UDP)) { + netdev_info(bp->dev, "Cannot offload non-TCP/UDP (%d) ports\n", + flow->l4_key.ip_proto); + return false; + } + + if (is_multicast_ether_addr(flow->l2_key.dmac) || + is_broadcast_ether_addr(flow->l2_key.dmac)) { + netdev_info(bp->dev, + "Broadcast/Multicast flow offload unsupported\n"); + return false; + } + + /* Currently source/dest MAC cannot be partial wildcard */ + if (bits_set(&flow->l2_key.smac, sizeof(flow->l2_key.smac)) && + !is_exactmatch(flow->l2_mask.smac, sizeof(flow->l2_mask.smac))) { + netdev_info(bp->dev, "Wildcard match unsupported for Source MAC\n"); + return false; + } + if (bits_set(&flow->l2_key.dmac, sizeof(flow->l2_key.dmac)) && + !is_exactmatch(&flow->l2_mask.dmac, sizeof(flow->l2_mask.dmac))) { + netdev_info(bp->dev, "Wildcard match unsupported for Dest MAC\n"); + return false; + } + + /* Currently VLAN fields cannot be partial wildcard */ + if (bits_set(&flow->l2_key.inner_vlan_tci, + sizeof(flow->l2_key.inner_vlan_tci)) && + !is_vlan_tci_allowed(flow->l2_mask.inner_vlan_tci, + flow->l2_key.inner_vlan_tci)) { + netdev_info(bp->dev, "Unsupported VLAN TCI\n"); + return false; + } + if (bits_set(&flow->l2_key.inner_vlan_tpid, + sizeof(flow->l2_key.inner_vlan_tpid)) && + !is_exactmatch(&flow->l2_mask.inner_vlan_tpid, + sizeof(flow->l2_mask.inner_vlan_tpid))) { + netdev_info(bp->dev, "Wildcard match unsupported for VLAN TPID\n"); + return false; + } + + /* Currently Ethertype must be set */ + if (!is_exactmatch(&flow->l2_mask.ether_type, + sizeof(flow->l2_mask.ether_type))) { + netdev_info(bp->dev, "Wildcard match unsupported for Ethertype\n"); + return false; + } + + return true; +} + +/* + * Returns the final refcount of the node on success + * or a -ve error code on failure + */ +static int bnxt_tc_put_tunnel_node(struct bnxt *bp, + struct rhashtable *tunnel_table, + struct rhashtable_params *ht_params, + struct bnxt_tc_tunnel_node *tunnel_node) +{ + int rc; + + if (--tunnel_node->refcount == 0) { + if (tunnel_node->encap_list_node.prev) + list_del(&tunnel_node->encap_list_node); + + rc = rhashtable_remove_fast(tunnel_table, &tunnel_node->node, + *ht_params); + if (rc) { + netdev_err(bp->dev, "rhashtable_remove_fast rc=%d\n", rc); + rc = -1; + } + kfree_rcu(tunnel_node, rcu); + return rc; + } else { + return tunnel_node->refcount; + } +} + +/* + * Get (or add) either encap or decap tunnel node from/to the supplied + * hash table. + */ +static struct bnxt_tc_tunnel_node * +bnxt_tc_get_tunnel_node(struct bnxt *bp, struct rhashtable *tunnel_table, + struct rhashtable_params *ht_params, + struct ip_tunnel_key *tun_key, + enum bnxt_tc_tunnel_node_type tunnel_node_type) +{ + struct bnxt_tc_tunnel_node *tunnel_node; + int rc; + + tunnel_node = rhashtable_lookup_fast(tunnel_table, tun_key, *ht_params); + if (!tunnel_node) { + tunnel_node = kzalloc(sizeof(*tunnel_node), GFP_KERNEL); + if (!tunnel_node) { + rc = -ENOMEM; + goto err; + } + + tunnel_node->key = *tun_key; + tunnel_node->tunnel_handle = INVALID_TUNNEL_HANDLE; + tunnel_node->tunnel_node_type = tunnel_node_type; + rc = rhashtable_insert_fast(tunnel_table, &tunnel_node->node, + *ht_params); + if (rc) { + kfree_rcu(tunnel_node, rcu); + goto err; + } + INIT_LIST_HEAD(&tunnel_node->common_encap_flows); + } + tunnel_node->refcount++; + return tunnel_node; +err: + netdev_info(bp->dev, "error rc=%d\n", rc); + return NULL; +} + +static int bnxt_tc_put_neigh_node(struct bnxt *bp, + struct rhashtable *neigh_table, + struct rhashtable_params *ht_params, + struct bnxt_tc_neigh_node *neigh_node) +{ + int rc; + + if (--neigh_node->refcount > 0) + return neigh_node->refcount; + + /* Neigh node reference count is 0 */ + rc = rhashtable_remove_fast(neigh_table, &neigh_node->node, + *ht_params); + if (rc) + netdev_err(bp->dev, "%s: rhashtable_remove_fast rc=%d\n", + __func__, rc); + + kfree_rcu(neigh_node, rcu); + return rc; +} + +static struct bnxt_tc_neigh_node * +bnxt_tc_get_neigh_node(struct bnxt *bp, struct rhashtable *neigh_table, + struct rhashtable_params *ht_params, + struct bnxt_tc_neigh_key *neigh_key) +{ + struct bnxt_tc_neigh_node *neigh_node; + int rc; + + neigh_node = rhashtable_lookup_fast(neigh_table, neigh_key, *ht_params); + if (neigh_node) { + neigh_node->refcount++; + return neigh_node; + } + + neigh_node = kzalloc(sizeof(*neigh_node), GFP_KERNEL); + if (!neigh_node) + return NULL; + + neigh_node->key = *neigh_key; + rc = rhashtable_insert_fast(neigh_table, &neigh_node->node, *ht_params); + if (rc) { + kfree_rcu(neigh_node, rcu); + return NULL; + } + INIT_LIST_HEAD(&neigh_node->common_encap_list); + neigh_node->refcount++; + return neigh_node; +} + +static int bnxt_tc_get_ref_decap_handle(struct bnxt *bp, + struct bnxt_tc_flow *flow, + struct bnxt_tc_l2_key *l2_key, + struct bnxt_tc_flow_node *flow_node, + __le32 *ref_decap_handle) +{ + struct bnxt_tc_info *tc_info = bp->tc_info; + struct bnxt_tc_flow_node *ref_flow_node; + struct bnxt_tc_l2_node *decap_l2_node; + + decap_l2_node = bnxt_tc_get_l2_node(bp, &tc_info->decap_l2_table, + tc_info->decap_l2_ht_params, + l2_key); + if (!decap_l2_node) + return -1; + + /* If any other flow is using this decap_l2_node, use it's decap_handle + * as the ref_decap_handle + */ + if (decap_l2_node->refcount > 0) { + ref_flow_node = + list_first_entry(&decap_l2_node->common_l2_flows, + struct bnxt_tc_flow_node, + decap_l2_list_node); + *ref_decap_handle = ref_flow_node->decap_node->tunnel_handle; + } else { + *ref_decap_handle = INVALID_TUNNEL_HANDLE; + } + + /* Insert the l2_node into the flow_node so that subsequent flows + * with a matching decap l2 key can use the decap_filter_handle of + * this flow as their ref_decap_handle + */ + flow_node->decap_l2_node = decap_l2_node; + list_add(&flow_node->decap_l2_list_node, + &decap_l2_node->common_l2_flows); + decap_l2_node->refcount++; + return 0; +} + +static void bnxt_tc_put_decap_l2_node(struct bnxt *bp, + struct bnxt_tc_flow_node *flow_node) +{ + struct bnxt_tc_l2_node *decap_l2_node = flow_node->decap_l2_node; + struct bnxt_tc_info *tc_info = bp->tc_info; + int rc; + + /* remove flow_node from the decap L2 sharing flow list */ + list_del(&flow_node->decap_l2_list_node); + if (--decap_l2_node->refcount == 0) { + rc = rhashtable_remove_fast(&tc_info->decap_l2_table, + &decap_l2_node->node, + tc_info->decap_l2_ht_params); + if (rc) + netdev_err(bp->dev, "rhashtable_remove_fast rc=%d\n", rc); + kfree_rcu(decap_l2_node, rcu); + } +} + +static void bnxt_tc_put_decap_handle(struct bnxt *bp, + struct bnxt_tc_flow_node *flow_node) +{ + __le32 decap_handle = flow_node->decap_node->tunnel_handle; + struct bnxt_tc_info *tc_info = bp->tc_info; + int rc; + + if (flow_node->decap_l2_node) + bnxt_tc_put_decap_l2_node(bp, flow_node); + + rc = bnxt_tc_put_tunnel_node(bp, &tc_info->decap_table, + &tc_info->decap_ht_params, + flow_node->decap_node); + if (!rc && decap_handle != INVALID_TUNNEL_HANDLE) + hwrm_cfa_decap_filter_free(bp, decap_handle); +} + +static int bnxt_tc_create_neigh_node(struct bnxt *bp, void *flow_node, + struct bnxt_tc_neigh_key *neigh_key) +{ + struct bnxt_tc_info *tc_info = bp->tc_info; + struct bnxt_tc_tunnel_node *encap_node; + struct bnxt_tc_neigh_node *neigh_node; + + if (BNXT_TRUFLOW_EN(bp)) { + struct bnxt_tf_flow_node *node = flow_node; + + encap_node = node->encap_node; + } else { + struct bnxt_tc_flow_node *node = flow_node; + + encap_node = node->encap_node; + } + + neigh_node = bnxt_tc_get_neigh_node(bp, &tc_info->neigh_table, + &tc_info->neigh_ht_params, + neigh_key); + if (!neigh_node) + return -ENOMEM; + + ether_addr_copy(neigh_node->dmac, encap_node->l2_info.dmac); + encap_node->neigh_node = neigh_node; + list_add(&encap_node->encap_list_node, &neigh_node->common_encap_list); + + return 0; +} + +static int bnxt_tc_resolve_vlan(struct bnxt *bp, + struct bnxt_tc_l2_key *l2_info, + struct net_device *dst_dev) +{ +#ifdef CONFIG_INET + struct net_device *real_dst_dev = bp->dev; + int rc = 0; + + /* The route must either point to the real_dst_dev or a dst_dev that + * uses the real_dst_dev. + */ + if (is_vlan_dev(dst_dev)) { +#if IS_ENABLED(CONFIG_VLAN_8021Q) + struct vlan_dev_priv *vlan = vlan_dev_priv(dst_dev); + + if (vlan->real_dev != real_dst_dev) + return -ENETUNREACH; + + l2_info->inner_vlan_tci = htons(vlan->vlan_id); + l2_info->inner_vlan_tpid = vlan->vlan_proto; + l2_info->num_vlans = 1; +#endif + } else if (dst_dev != real_dst_dev) { + rc = -ENETUNREACH; + } + + return rc; +#else + return -EOPNOTSUPP; +#endif +} + +static int bnxt_tc_resolve_mac(struct bnxt *bp, + struct bnxt_tc_l2_key *l2_info, + struct net_device *dst_dev, + struct neighbour *nbr) +{ +#ifdef CONFIG_INET + int i = 0; + + neigh_ha_snapshot(l2_info->dmac, nbr, dst_dev); + + if (!is_zero_ether_addr(l2_info->dmac)) { + ether_addr_copy(l2_info->smac, dst_dev->dev_addr); + return 0; + } + + /* Call neigh_event_send to resolve MAC address if didn't + * get a valid one. + */ + if (!(nbr->nud_state & NUD_VALID)) + neigh_event_send(nbr, NULL); + + while (true) { + neigh_ha_snapshot(l2_info->dmac, nbr, dst_dev); + if (!is_zero_ether_addr(l2_info->dmac)) { + ether_addr_copy(l2_info->smac, dst_dev->dev_addr); + return 0; + } + if (++i > BNXT_MAX_NEIGH_TIMEOUT) + return -ENETUNREACH; + + usleep_range(200, 600); + } +#else + return -EOPNOTSUPP; +#endif +} + +static void bnxt_tc_init_neigh_key(struct bnxt *bp, + struct bnxt_tc_neigh_key *neigh_key, + struct neighbour *nbr) +{ + memcpy(&neigh_key->dst_ip, nbr->primary_key, nbr->tbl->key_len); + neigh_key->family = nbr->ops->family; + neigh_key->dev = bp->dev; +} + +int bnxt_tc_resolve_ipv4_tunnel_hdrs(struct bnxt *bp, + struct bnxt_tc_flow_node *flow_node, + struct ip_tunnel_key *tun_key, + struct bnxt_tc_l2_key *l2_info, + struct bnxt_tc_neigh_key *neigh_key) +{ +#ifdef CONFIG_INET + struct net_device *real_dst_dev = bp->dev; + struct flowi4 flow = { {0} }; + struct net_device *dst_dev; + struct rtable *rt = NULL; + struct neighbour *nbr; + int rc; + + flow.flowi4_proto = IPPROTO_UDP; + flow.fl4_dport = tun_key->tp_dst; + flow.daddr = tun_key->u.ipv4.dst; + rt = ip_route_output_key(dev_net(real_dst_dev), &flow); + if (IS_ERR(rt)) + return -ENETUNREACH; + + dst_dev = rt->dst.dev; + rc = bnxt_tc_resolve_vlan(bp, l2_info, dst_dev); + if (rc) { + netdev_info(bp->dev, + "dst_dev(%s) for %pI4b is not PF-if(%s)\n", + netdev_name(dst_dev), &flow.daddr, + netdev_name(real_dst_dev)); + ip_rt_put(rt); + return rc; + } + + nbr = dst_neigh_lookup(&rt->dst, &flow.daddr); + if (!nbr) { + netdev_info(bp->dev, "can't lookup neighbor for %pI4b\n", + &flow.daddr); + ip_rt_put(rt); + return -ENETUNREACH; + } + + if (!tun_key->u.ipv4.src) + tun_key->u.ipv4.src = flow.saddr; + tun_key->ttl = ip4_dst_hoplimit(&rt->dst); + rc = bnxt_tc_resolve_mac(bp, l2_info, dst_dev, nbr); + if (neigh_key) + bnxt_tc_init_neigh_key(bp, neigh_key, nbr); + neigh_release(nbr); + ip_rt_put(rt); + + return rc; +#else + return -EOPNOTSUPP; +#endif +} + +int bnxt_tc_resolve_ipv6_tunnel_hdrs(struct bnxt *bp, + struct bnxt_tc_flow_node *flow_node, + struct ip_tunnel_key *tun_key, + struct bnxt_tc_l2_key *l2_info, + struct bnxt_tc_neigh_key *neigh_key) +{ +#ifdef CONFIG_INET + struct net_device *real_dst_dev = bp->dev; + struct flowi6 flow6 = { {0} }; + struct dst_entry *dst = NULL; + struct net_device *dst_dev; + struct neighbour *nbr; + int rc; + + flow6.daddr = tun_key->u.ipv6.dst; + flow6.fl6_dport = tun_key->tp_dst; + flow6.flowi6_proto = IPPROTO_UDP; + dst = ip6_route_output(dev_net(real_dst_dev), NULL, &flow6); + if (dst->error) + return -ENETUNREACH; + + dst_dev = dst->dev; + rc = bnxt_tc_resolve_vlan(bp, l2_info, dst_dev); + if (rc) { + netdev_info(bp->dev, + "dst_dev(%s) for %pI6 is not PF-if(%s)\n", + netdev_name(dst_dev), &flow6.daddr, + netdev_name(real_dst_dev)); + dst_release(dst); + return rc; + } + + nbr = dst_neigh_lookup(dst, &flow6.daddr); + if (!nbr) { + netdev_info(bp->dev, "can't lookup neighbor for %pI6\n", + &flow6.daddr); + dst_release(dst); + return -ENETUNREACH; + } + + tun_key->ttl = ip6_dst_hoplimit(dst); + rc = bnxt_tc_resolve_mac(bp, l2_info, dst_dev, nbr); + if (neigh_key) + bnxt_tc_init_neigh_key(bp, neigh_key, nbr); + neigh_release(nbr); + dst_release(dst); + + return rc; +#else + return -EOPNOTSUPP; +#endif +} + +static int bnxt_tc_resolve_tunnel_hdrs(struct bnxt *bp, + struct bnxt_tc_flow_node *flow_node, + struct ip_tunnel_key *tun_key, + struct bnxt_tc_l2_key *l2_info, + struct bnxt_tc_neigh_key *neigh_key) +{ + if ((flow_node->flow.flags & BNXT_TC_FLOW_FLAGS_TUNL_IPV6_ADDRS) || + (flow_node->flow.actions.flags & BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP_IPV6)) + return bnxt_tc_resolve_ipv6_tunnel_hdrs(bp, flow_node, tun_key, + l2_info, neigh_key); + else + return bnxt_tc_resolve_ipv4_tunnel_hdrs(bp, flow_node, tun_key, + l2_info, neigh_key); +} + +static bool bnxt_tc_need_lkup_tunnel_hdrs(struct bnxt_tc_flow *flow) +{ + bool need_tun_lkup = false; + + /* Some use cases don't want to match tunnel SIP for ingress flow, it will + * not specify the tunnel SIP in flow key fields, for these cases, need + * skip to lookup tunnel header which include lookup routing table, + * otherwise, the lookup result may not point to PF's net device, driver + * will not offload this flow. We can use PF's MAC to set up the decap + * tunnel to offload this flow successfully since HW supports it. + * Use tunnel SIP mask to check whether there has tunnel SIP in the flow + * key fields. + * For example, following ingress flow doesn't specify to match the tunnel + * sip which the tunnel SIP is 0.0.0.0 to linux driver, we can't use tunnel + * sip 0.0.0.0 to lookup routing table which may point to non PF's net + * device, and driver will not offload below flow but HW actually can + * support to offload this flow by using the PF's MAC to set up decap + * tunnel. + * tc filter add dev vxlan0 ingress prio 100 chain 0 proto ip flower \ + * enc_dst_ip 2.1.1.195 enc_dst_port 4789 enc_key_id 22 dst_ip 90.1.2.20 \ + * action tunnel_key unset action pedit ex munge eth dst set \ + * 46:6c:99:59:cb:15 pipe action mirred egress redirect dev eth0 + */ + if (flow->flags & BNXT_TC_FLOW_FLAGS_TUNL_IPV6_ADDRS) { + if (flow->tun_mask.u.ipv6.src.s6_addr32[0] || + flow->tun_mask.u.ipv6.src.s6_addr32[1] || + flow->tun_mask.u.ipv6.src.s6_addr32[2] || + flow->tun_mask.u.ipv6.src.s6_addr32[3]) + need_tun_lkup = true; + } else { + if (flow->tun_mask.u.ipv4.src) + need_tun_lkup = true; + } + + return need_tun_lkup; +} + +static int bnxt_tc_get_decap_handle(struct bnxt *bp, struct bnxt_tc_flow *flow, + struct bnxt_tc_flow_node *flow_node, + __le32 *decap_filter_handle) +{ + struct ip_tunnel_key *decap_key = &flow->tun_key; + struct bnxt_tc_info *tc_info = bp->tc_info; + struct bnxt_tc_l2_key l2_info = { 0 }; + struct bnxt_tc_tunnel_node *decap_node; + struct ip_tunnel_key tun_key = { 0 }; + struct bnxt_tc_l2_key *decap_l2_info; + struct bnxt_tc_neigh_key neigh_key; + __le32 ref_decap_handle; + int rc; + + /* Check if there's another flow using the same tunnel decap. + * If not, add this tunnel to the table and resolve the other + * tunnel header fileds. Ignore src_port in the tunnel_key, + * since it is not required for decap filters. + */ + decap_key->tp_src = 0; + decap_node = bnxt_tc_get_tunnel_node(bp, &tc_info->decap_table, + &tc_info->decap_ht_params, + decap_key, + BNXT_TC_TUNNEL_NODE_TYPE_DECAP); + if (!decap_node) + return -ENOMEM; + + flow_node->decap_node = decap_node; + + if (decap_node->tunnel_handle != INVALID_TUNNEL_HANDLE) + goto done; + + /* + * Resolve the L2 fields for tunnel decap + * Resolve the route for remote vtep (saddr) of the decap key + * Find it's next-hop mac addrs + */ + if (flow->flags & BNXT_TC_FLOW_FLAGS_TUNL_IPV6_ADDRS) + tun_key.u.ipv6.dst = flow->tun_key.u.ipv6.src; + else + tun_key.u.ipv4.dst = flow->tun_key.u.ipv4.src; + + tun_key.tp_dst = flow->tun_key.tp_dst; + decap_l2_info = &decap_node->l2_info; + if (bnxt_tc_need_lkup_tunnel_hdrs(flow)) { + rc = bnxt_tc_resolve_tunnel_hdrs(bp, flow_node, &tun_key, + &l2_info, &neigh_key); + if (rc) + goto put_decap; + + /* decap smac is wildcarded */ + ether_addr_copy(decap_l2_info->dmac, l2_info.smac); + if (l2_info.num_vlans) { + decap_l2_info->num_vlans = l2_info.num_vlans; + decap_l2_info->inner_vlan_tpid = l2_info.inner_vlan_tpid; + decap_l2_info->inner_vlan_tci = l2_info.inner_vlan_tci; + } + } else { + ether_addr_copy(decap_l2_info->dmac, bp->pf.mac_addr); + } + flow->flags |= BNXT_TC_FLOW_FLAGS_TUNL_ETH_ADDRS; + + /* For getting a decap_filter_handle we first need to check if + * there are any other decap flows that share the same tunnel L2 + * key and if so, pass that flow's decap_filter_handle as the + * ref_decap_handle for this flow. + */ + rc = bnxt_tc_get_ref_decap_handle(bp, flow, decap_l2_info, flow_node, + &ref_decap_handle); + if (rc) + goto put_decap; + + /* Issue the hwrm cmd to allocate a decap filter handle */ + rc = hwrm_cfa_decap_filter_alloc(bp, flow, decap_l2_info, + ref_decap_handle, + &decap_node->tunnel_handle); + if (rc) + goto put_decap_l2; + +done: + *decap_filter_handle = decap_node->tunnel_handle; + return 0; + +put_decap_l2: + bnxt_tc_put_decap_l2_node(bp, flow_node); +put_decap: + bnxt_tc_put_tunnel_node(bp, &tc_info->decap_table, + &tc_info->decap_ht_params, + flow_node->decap_node); + return rc; +} + +static void bnxt_tc_put_encap_handle(struct bnxt *bp, + struct bnxt_tc_flow_node *flow_node) +{ + __le32 encap_handle = flow_node->encap_node->tunnel_handle; + struct bnxt_tc_info *tc_info = bp->tc_info; + int rc; + + list_del(&flow_node->encap_flow_list_node); + rc = bnxt_tc_put_tunnel_node(bp, &tc_info->encap_table, + &tc_info->encap_ht_params, + flow_node->encap_node); + if (!rc && encap_handle != INVALID_TUNNEL_HANDLE) { + hwrm_cfa_encap_record_free(bp, encap_handle); + bnxt_tc_put_neigh_node(bp, &tc_info->neigh_table, + &tc_info->neigh_ht_params, + flow_node->encap_node->neigh_node); + } +} + +/* + * Lookup the tunnel encap table and check if there's an encap_handle + * alloc'd already. + * If not, query L2 info via a route lookup and issue an encap_record_alloc + * cmd to FW. + */ +static int bnxt_tc_get_encap_handle(struct bnxt *bp, struct bnxt_tc_flow *flow, + struct bnxt_tc_flow_node *flow_node, + __le32 *encap_handle) +{ + struct ip_tunnel_key *encap_key = &flow->actions.tun_encap_key; + struct bnxt_tc_neigh_key neigh_key = { 0 }; + struct bnxt_tc_info *tc_info = bp->tc_info; + struct bnxt_tc_tunnel_node *encap_node; + int rc; + + /* Check if there's another flow using the same tunnel encap. + * If not, add this tunnel to the table and resolve the other + * tunnel header fileds + */ + encap_node = bnxt_tc_get_tunnel_node(bp, &tc_info->encap_table, + &tc_info->encap_ht_params, + encap_key, + BNXT_TC_TUNNEL_NODE_TYPE_ENCAP); + if (!encap_node) + return -ENOMEM; + + flow_node->encap_node = encap_node; + + if (encap_node->tunnel_handle != INVALID_TUNNEL_HANDLE) + goto done; + + if (flow->actions.flags & BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP_IPV6) + encap_node->l2_info.ether_type = htons(ETH_P_IPV6); + else + encap_node->l2_info.ether_type = htons(ETH_P_IP); + + rc = bnxt_tc_resolve_tunnel_hdrs(bp, flow_node, encap_key, + &encap_node->l2_info, &neigh_key); + if (rc) + goto put_encap; + + rc = bnxt_tc_create_neigh_node(bp, flow_node, &neigh_key); + if (rc) + goto put_encap; + + /* Allocate a new tunnel encap record */ + rc = hwrm_cfa_encap_record_alloc(bp, encap_key, &encap_node->l2_info, + &encap_node->tunnel_handle); + if (rc) + goto put_neigh; + +done: + *encap_handle = encap_node->tunnel_handle; + /* Add flow to encap list, it will be used by neigh update event */ + list_add(&flow_node->encap_flow_list_node, &encap_node->common_encap_flows); + return 0; + +put_neigh: + bnxt_tc_put_neigh_node(bp, &tc_info->neigh_table, + &tc_info->neigh_ht_params, + flow_node->encap_node->neigh_node); +put_encap: + bnxt_tc_put_tunnel_node(bp, &tc_info->encap_table, + &tc_info->encap_ht_params, encap_node); + return rc; +} + +static void bnxt_tc_put_tunnel_handle(struct bnxt *bp, + struct bnxt_tc_flow *flow, + struct bnxt_tc_flow_node *flow_node) +{ + if (flow->actions.flags & BNXT_TC_ACTION_FLAG_TUNNEL_DECAP) + bnxt_tc_put_decap_handle(bp, flow_node); + else if (flow->actions.flags & BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP) + bnxt_tc_put_encap_handle(bp, flow_node); +} + +static int bnxt_tc_get_tunnel_handle(struct bnxt *bp, + struct bnxt_tc_flow *flow, + struct bnxt_tc_flow_node *flow_node, + __le32 *tunnel_handle) +{ + if (flow->actions.flags & BNXT_TC_ACTION_FLAG_TUNNEL_DECAP) + return bnxt_tc_get_decap_handle(bp, flow, flow_node, + tunnel_handle); + else if (flow->actions.flags & BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP) + return bnxt_tc_get_encap_handle(bp, flow, flow_node, + tunnel_handle); + else + return 0; +} + +static void bnxt_tc_del_encap_flow(struct bnxt *bp, + struct bnxt_tc_flow_node *flow_node) +{ + /* 1. Delete HW cfa flow entry. + * 2. Delete SW l2 node, will add SW l2 node when alloc flow again. + */ + bnxt_hwrm_cfa_flow_free(bp, flow_node); + bnxt_tc_put_l2_node(bp, flow_node); +} + +static void bnxt_tc_free_encap_flow(struct bnxt *bp, + struct bnxt_tc_flow_node *flow_node) +{ + struct bnxt_tc_info *tc_info = bp->tc_info; + int rc; + + /* L2 node may be released twice, return gracefully for second time */ + bnxt_tc_put_l2_node(bp, flow_node); + bnxt_tc_put_tunnel_handle(bp, &flow_node->flow, flow_node); + rc = rhashtable_remove_fast(&tc_info->flow_table, &flow_node->node, + tc_info->flow_ht_params); + if (rc) + netdev_err(bp->dev, "%s: Error: rhashtable_remove_fast rc=%d\n", + __func__, rc); + + kfree_rcu(flow_node, rcu); + netdev_dbg(bp->dev, "%s: Failed to re-add flow to HW, freed flow memory\n", + __func__); +} + +static int bnxt_tc_add_encap_flow(struct bnxt *bp, + struct bnxt_tc_neigh_node *neigh_node, + struct bnxt_tc_flow_node *flow_node) +{ + struct bnxt_tc_tunnel_node *encap_node; + struct ip_tunnel_key *encap_key; + struct bnxt_tc_flow *flow; + __le16 ref_flow_handle; + int rc; + + flow = &flow_node->flow; + encap_key = &flow->actions.tun_encap_key; + encap_node = flow_node->encap_node; + + /* 1. Get ref_flow_handle. + * 2. Add HW encap record. + * 3. Add HW cfa flow entry. + */ + rc = bnxt_tc_get_ref_flow_handle(bp, flow, flow_node, &ref_flow_handle); + if (rc) + return rc; + + /* Allocate a new tunnel encap record */ + if (encap_node->tunnel_handle == INVALID_TUNNEL_HANDLE) { + rc = hwrm_cfa_encap_record_alloc(bp, encap_key, + &encap_node->l2_info, + &encap_node->tunnel_handle); + if (rc) + return rc; + } + + rc = bnxt_hwrm_cfa_flow_alloc(bp, flow, ref_flow_handle, + encap_node->tunnel_handle, flow_node); + + return rc; +} + +static void *bnxt_tc_lkup_neigh_node(struct bnxt *bp, + struct neighbour *n) +{ + struct bnxt_tc_info *tc_info = bp->tc_info; + struct bnxt_tc_neigh_key key = { 0 }; + + bnxt_tc_init_neigh_key(bp, &key, n); + + return rhashtable_lookup_fast(&tc_info->neigh_table, &key, + tc_info->neigh_ht_params); +} + +static void +bnxt_tc_del_add_encap_flows_tf(struct bnxt *bp, struct bnxt_tc_tunnel_node *encap_node, + struct bnxt_tc_neigh_node *neigh_node) +{ + struct bnxt_tf_flow_node *flow_node; + + /* Flow may share the same encap node, need delete all the HW + * flow and encap record first, then update the SW encap tunnel + * handle, add HW encap record and flow at last. + */ + list_for_each_entry(encap_node, &neigh_node->common_encap_list, + encap_list_node) { + list_for_each_entry(flow_node, &encap_node->common_encap_flows, + encap_flow_list_node) + bnxt_ulp_update_flow_encap_record(bp, bp->neigh_update.neigh->ha, + flow_node->mparms, + &flow_node->flow_id); + memcpy(encap_node->l2_info.dmac, bp->neigh_update.neigh->ha, ETH_ALEN); + } +} + +static void +bnxt_tc_del_add_encap_flows_afm(struct bnxt *bp, struct bnxt_tc_tunnel_node *encap_node, + struct bnxt_tc_neigh_node *neigh_node) +{ + struct bnxt_tc_flow_node *flow_node; + struct list_head failed_flows_head; + int rc; + + INIT_LIST_HEAD(&failed_flows_head); + /* Flow may share the same encap node, need delete all the HW + * flow and encap record first, then update the SW encap tunnel + * handle, add HW encap record and flow at last. + */ + list_for_each_entry(encap_node, &neigh_node->common_encap_list, + encap_list_node) { + list_for_each_entry(flow_node, &encap_node->common_encap_flows, + encap_flow_list_node) { + bnxt_tc_del_encap_flow(bp, flow_node); + } + + hwrm_cfa_encap_record_free(bp, encap_node->tunnel_handle); + encap_node->tunnel_handle = INVALID_TUNNEL_HANDLE; + memcpy(encap_node->l2_info.dmac, bp->neigh_update.neigh->ha, ETH_ALEN); + } + + list_for_each_entry(encap_node, &neigh_node->common_encap_list, + encap_list_node) { + list_for_each_entry(flow_node, &encap_node->common_encap_flows, + encap_flow_list_node) { + rc = bnxt_tc_add_encap_flow(bp, neigh_node, flow_node); + if (rc) + list_add(&flow_node->failed_add_flow_node, + &failed_flows_head); + } + } + /* Free flow node which re-add to HW failed */ + list_for_each_entry(flow_node, &failed_flows_head, failed_add_flow_node) + bnxt_tc_free_encap_flow(bp, flow_node); +} + +void bnxt_tc_update_neigh_work(struct work_struct *work) +{ + struct bnxt *bp = container_of(work, struct bnxt, neigh_update.work); + struct bnxt_tc_info *tc_info = bp->tc_info; + struct bnxt_tc_tunnel_node *encap_node = NULL; + struct bnxt_tc_neigh_node *neigh_node; + + mutex_lock(&tc_info->lock); + neigh_node = bnxt_tc_lkup_neigh_node(bp, bp->neigh_update.neigh); + if (!neigh_node) + goto exit; + + if (ether_addr_equal(neigh_node->dmac, bp->neigh_update.neigh->ha)) + goto exit; + + if (BNXT_TRUFLOW_EN(bp)) + bnxt_tc_del_add_encap_flows_tf(bp, encap_node, neigh_node); + else + bnxt_tc_del_add_encap_flows_afm(bp, encap_node, neigh_node); + + memcpy(neigh_node->dmac, bp->neigh_update.neigh->ha, ETH_ALEN); + +exit: + mutex_unlock(&tc_info->lock); + neigh_release(bp->neigh_update.neigh); + bp->neigh_update.neigh = NULL; +} + +static int __bnxt_tc_del_flow_afm(struct bnxt *bp, void *flow) +{ + struct bnxt_tc_info *tc_info = bp->tc_info; + struct bnxt_tc_flow_node *flow_node = flow; + int rc; + + /* send HWRM cmd to free the flow-id */ + bnxt_hwrm_cfa_flow_free(bp, flow_node); + + /* release references to any tunnel encap/decap nodes */ + bnxt_tc_put_tunnel_handle(bp, &flow_node->flow, flow_node); + + /* release reference to l2 node */ + bnxt_tc_put_l2_node(bp, flow_node); + + rc = rhashtable_remove_fast(&tc_info->flow_table, &flow_node->node, + tc_info->flow_ht_params); + if (rc) + netdev_err(bp->dev, "Error: %s: rhashtable_remove_fast rc=%d\n", + __func__, rc); + + kfree_rcu(flow_node, rcu); + return 0; +} + +static void bnxt_tc_put_encap_node(struct bnxt *bp, + struct bnxt_tf_flow_node *flow_node) +{ + struct bnxt_tc_info *tc_info = bp->tc_info; + int refcnt; + + list_del(&flow_node->encap_flow_list_node); + refcnt = bnxt_tc_put_tunnel_node(bp, &tc_info->encap_table, + &tc_info->encap_ht_params, + flow_node->encap_node); + + /* If there are no flows referencing this encap node, + * (i.e, encap_node is freed) drop its reference on + * the neigh_node. + */ + if (!refcnt) + bnxt_tc_put_neigh_node(bp, &tc_info->neigh_table, + &tc_info->neigh_ht_params, + flow_node->encap_node->neigh_node); +} + +static int bnxt_tc_get_encap_node(struct bnxt *bp, + struct bnxt_tf_flow_node *flow_node, + struct bnxt_ulp_flow_info *flow_info) +{ + struct ip_tunnel_key *encap_key = flow_info->encap_key; + struct bnxt_tc_neigh_key *neigh_key = flow_info->neigh_key; + struct bnxt_tc_info *tc_info = bp->tc_info; + struct bnxt_tc_tunnel_node *encap_node; + int rc; + + /* Check if there's another flow using the same tunnel encap. + * If not, add this tunnel to the table. + */ + encap_node = bnxt_tc_get_tunnel_node(bp, &tc_info->encap_table, + &tc_info->encap_ht_params, + encap_key, + BNXT_TC_TUNNEL_NODE_TYPE_ENCAP); + if (!encap_node) + return -ENOMEM; + + flow_node->encap_node = encap_node; + + /* Encap node already exists */ + if (encap_node->refcount > 1) + goto done; + + /* Initialize encap node */ + ether_addr_copy(encap_node->l2_info.dmac, flow_info->tnl_dmac); + ether_addr_copy(encap_node->l2_info.smac, flow_info->tnl_smac); + encap_node->l2_info.ether_type = flow_info->tnl_ether_type; + + rc = bnxt_tc_create_neigh_node(bp, flow_node, neigh_key); + if (rc) + goto put_encap; + +done: + /* Add flow to encap list, it will be used by neigh update event */ + list_add(&flow_node->encap_flow_list_node, + &encap_node->common_encap_flows); + return 0; + +put_encap: + bnxt_tc_put_tunnel_node(bp, &tc_info->encap_table, + &tc_info->encap_ht_params, encap_node); + return rc; +} + +static int __bnxt_tc_del_flow_tf(struct bnxt *bp, void *flow) +{ + struct bnxt_tc_info *tc_info = bp->tc_info; + struct bnxt_tf_flow_node *flow_node = flow; + int rc; + + rc = bnxt_ulp_flow_destroy(bp, flow_node->flow_id, + flow_node->ulp_src_fid, flow_node->dscp_remap); + if (rc) { + if (rc != -ENOENT) + netdev_err(bp->dev, + "Failed to destroy flow: cookie:0x%lx src_fid:0x%x error:%d\n", + flow_node->key.cookie, flow_node->ulp_src_fid, rc); + else + netdev_dbg(bp->dev, + "Failed to destroy flow: cookie:0x%lx src_fid:0x%x error:%d\n", + flow_node->key.cookie, flow_node->ulp_src_fid, rc); + } + + /* Release references to any tunnel encap node */ + if (flow_node->encap_node) + bnxt_tc_put_encap_node(bp, flow_node); + + rc = rhashtable_remove_fast(&tc_info->tf_flow_table, &flow_node->node, + tc_info->tf_flow_ht_params); + if (rc) + netdev_dbg(bp->dev, "Error: %s: rhashtable_remove_fast rc=%d\n", + __func__, rc); + + netdev_dbg(bp->dev, + "%s: cookie:0x%lx src_fid:%d flow_id:0x%x\n", + __func__, flow_node->key.cookie, flow_node->key.src_fid, + flow_node->flow_id); + + if (flow_node->mparms) + bnxt_ulp_free_mapper_encap_mparams(flow_node->mparms); + + kfree_rcu(flow_node, rcu); + return rc; +} + +static int __bnxt_tc_del_flow(struct bnxt *bp, void *flow) +{ + if (BNXT_TRUFLOW_EN(bp)) + return __bnxt_tc_del_flow_tf(bp, flow); + else + return __bnxt_tc_del_flow_afm(bp, flow); +} + +#define BNXT_BATCH_FLOWS_NUM 32 + +static void bnxt_tc_batch_flows_get(struct rhashtable_iter *iter, + void *batch_flows[], + int *num_flows) +{ + void *flow_node; + int i = 0; + + rhashtable_walk_start(iter); + while ((flow_node = rhashtable_walk_next(iter)) != NULL) { + if (IS_ERR(flow_node)) + continue; + + batch_flows[i++] = flow_node; + if (i >= BNXT_BATCH_FLOWS_NUM) + break; + } + *num_flows = i; + rhashtable_walk_stop(iter); +} + +void bnxt_tc_flush_flows(struct bnxt *bp) +{ + void *batch_flow_nodes[BNXT_BATCH_FLOWS_NUM]; + struct bnxt_tc_info *tc_info = bp->tc_info; + struct rhashtable_iter iter; + int i, num_flows; + + mutex_lock(&tc_info->lock); + num_flows = atomic_read(&tc_info->flow_table.nelems); + if (!num_flows) { + mutex_unlock(&tc_info->lock); + return; + } + + netdev_warn(bp->dev, "Flushing offloaded flows\n"); + rhashtable_walk_enter(&tc_info->flow_table, &iter); + do { + bnxt_tc_batch_flows_get(&iter, batch_flow_nodes, &num_flows); + for (i = 0; i < num_flows; i++) + __bnxt_tc_del_flow(bp, batch_flow_nodes[i]); + } while (num_flows != 0); + rhashtable_walk_exit(&iter); + mutex_unlock(&tc_info->lock); +} + +static void bnxt_tc_set_l2_dir_fid(struct bnxt *bp, struct bnxt_tc_flow *flow, + u16 src_fid) +{ + flow->l2_key.dir = (bp->pf.fw_fid == src_fid) ? BNXT_DIR_RX : BNXT_DIR_TX; + /* Add src_fid to l2 key field for egress tc flower flows, it will + * make sure that egress flow entries from different representor + * port have different HW entries for the L2 lookup stage. + */ + if (flow->l2_key.dir == BNXT_DIR_TX) + flow->l2_key.src_fid = flow->src_fid; +} + +static void bnxt_tc_set_src_fid(struct bnxt* bp, struct bnxt_tc_flow *flow, + u16 src_fid) +{ + if (flow->actions.flags & BNXT_TC_ACTION_FLAG_TUNNEL_DECAP) + flow->src_fid = bp->pf.fw_fid; + else + flow->src_fid = src_fid; +} + +/* Add a new flow or replace an existing flow. + * Notes on locking: + * There are essentially two critical sections here. + * 1. while adding a new flow + * a) lookup l2-key + * b) issue HWRM cmd and get flow_handle + * c) link l2-key with flow + * 2. while deleting a flow + * a) unlinking l2-key from flow + * A lock is needed to protect these two critical sections. + * + * The hash-tables are already protected by the rhashtable API. + */ +#ifdef HAVE_TC_CB_EGDEV +static int bnxt_tc_add_flow_afm(struct bnxt *bp, u16 src_fid, + struct flow_cls_offload *tc_flow_cmd, + int tc_dev_dir) +#else +static int bnxt_tc_add_flow_afm(struct bnxt *bp, u16 src_fid, + struct flow_cls_offload *tc_flow_cmd) +#endif +{ + struct bnxt_tc_flow_node *new_node, *old_node; + struct bnxt_tc_info *tc_info = bp->tc_info; + struct bnxt_tc_flow *flow; + __le32 tunnel_handle = 0; + __le16 ref_flow_handle; + int rc = 0; + + /* Configure tc flower on vxlan interface, it will iterate all BRCM + * interfaces, function bnxt_tc_parse_flow will generate an error log + * on interfaces which don't enable switchdev mode, need check + * switchdev mode before call this function to avoid error log. + */ + if (!bnxt_tc_is_switchdev_mode(bp)) + return -EINVAL; + + /* allocate memory for the new flow and it's node */ + new_node = kzalloc(sizeof(*new_node), GFP_KERNEL); + if (!new_node) { + rc = -ENOMEM; + goto done; + } + new_node->key.cookie = tc_flow_cmd->cookie; +#ifdef HAVE_TC_CB_EGDEV + new_node->tc_dev_dir = tc_dev_dir; +#endif + flow = &new_node->flow; + + rc = bnxt_tc_parse_flow(bp, tc_flow_cmd, flow); + if (rc) + goto free_node; + + bnxt_tc_set_src_fid(bp, flow, src_fid); + bnxt_tc_set_l2_dir_fid(bp, flow, flow->src_fid); + new_node->key.src_fid = flow->src_fid; + + if (!bnxt_tc_can_offload(bp, flow)) { + rc = -EOPNOTSUPP; + kfree_rcu(new_node, rcu); + return rc; + } + + mutex_lock(&tc_info->lock); + /* Synchronize with switchdev mode change via sriov_disable() */ + if (!bnxt_tc_is_switchdev_mode(bp)) { + mutex_unlock(&tc_info->lock); + kfree_rcu(new_node, rcu); + return -EINVAL; + } + /* If a flow exists with the same key, delete it */ + old_node = rhashtable_lookup_fast(&tc_info->flow_table, + &new_node->key, + tc_info->flow_ht_params); + if (old_node) { +#ifdef HAVE_TC_CB_EGDEV + if (old_node->tc_dev_dir != tc_dev_dir) { + /* This happens when TC invokes flow-add for the same + * flow a second time through egress dev (e.g, in the + * case of VF-VF, VF-Uplink flows). Ignore it and + * return success. + */ + goto unlock; + } +#endif + __bnxt_tc_del_flow(bp, old_node); + } + + /* Check if the L2 part of the flow has been offloaded already. + * If so, bump up it's refcnt and get it's reference handle. + */ + rc = bnxt_tc_get_ref_flow_handle(bp, flow, new_node, &ref_flow_handle); + if (rc) + goto unlock; + + /* If the flow involves tunnel encap/decap, get tunnel_handle */ + rc = bnxt_tc_get_tunnel_handle(bp, flow, new_node, &tunnel_handle); + if (rc) + goto put_l2; + + /* send HWRM cmd to alloc the flow */ + rc = bnxt_hwrm_cfa_flow_alloc(bp, flow, ref_flow_handle, + tunnel_handle, new_node); + if (rc) + goto put_tunnel; + + flow->lastused = jiffies; + spin_lock_init(&flow->stats_lock); + /* add new flow to flow-table */ + rc = rhashtable_insert_fast(&tc_info->flow_table, &new_node->node, + tc_info->flow_ht_params); + if (rc) + goto hwrm_flow_free; + + mutex_unlock(&tc_info->lock); + return 0; + +hwrm_flow_free: + bnxt_hwrm_cfa_flow_free(bp, new_node); +put_tunnel: + bnxt_tc_put_tunnel_handle(bp, flow, new_node); +put_l2: + bnxt_tc_put_l2_node(bp, new_node); +unlock: + mutex_unlock(&tc_info->lock); +free_node: + kfree_rcu(new_node, rcu); +done: + if (rc == -ENOSPC) + net_info_ratelimited("%s %s: No resources for new flow, cookie=0x%lx error=%d\n", + bp->dev->name, __func__, tc_flow_cmd->cookie, rc); + else if (rc) + netdev_err(bp->dev, "Error: %s: cookie=0x%lx error=%d\n", + __func__, tc_flow_cmd->cookie, rc); + return rc; +} + +#ifdef HAVE_TC_CB_EGDEV + +static bool bnxt_tc_is_action_decap(struct flow_cls_offload *tc_flow_cmd) +{ + struct tcf_exts *tc_exts = tc_flow_cmd->exts; + struct tc_action *tc_act; +#ifndef HAVE_TC_EXTS_FOR_ACTION + LIST_HEAD(tc_actions); +#else + int i; +#endif + +#ifndef HAVE_TC_EXTS_FOR_ACTION + tcf_exts_to_list(tc_exts, &tc_actions); + list_for_each_entry(tc_act, &tc_actions, list) { +#else + tcf_exts_for_each_action(i, tc_act, tc_exts) { +#endif + if (is_tcf_tunnel_release(tc_act)) + return true; + } + + return false; +} + +#endif + +#ifdef HAVE_TC_CB_EGDEV +static int bnxt_tc_add_flow_tf(struct bnxt *bp, u16 src_fid, + struct flow_cls_offload *tc_flow_cmd, + int tc_dev_dir) +#else +static int bnxt_tc_add_flow_tf(struct bnxt *bp, u16 src_fid, + struct flow_cls_offload *tc_flow_cmd) +#endif +{ + struct bnxt_tf_flow_node *new_node, *old_node; + struct bnxt_ulp_flow_info flow_info = { 0 }; + struct bnxt_tc_info *tc_info = bp->tc_info; + int rc = 0; + + /* Allocate memory for the new flow and it's node */ + new_node = kzalloc(sizeof(*new_node), GFP_KERNEL); + if (!new_node) { + rc = -ENOMEM; + goto done; + } + + new_node->key.cookie = tc_flow_cmd->cookie; + +#ifdef HAVE_TC_CB_EGDEV + new_node->tc_dev_dir = tc_dev_dir; + + /* If it is a decap-flow offloaded on the egress dev, then the + * actual src_fid must be that of the PF since it is really an + * ingress flow. Pass the right src_fid to the ULP. But use the + * VF's src_fid in the flow_node key, since we need that to lookup + * the flow in flow_stats() and del_flow(). This is the only case + * in which the src_fid in the flow_node key and the src_fid passed + * to the ULP are different. + */ + new_node->ulp_src_fid = bnxt_tc_is_action_decap(tc_flow_cmd) ? + bp->pf.fw_fid : src_fid; +#else + new_node->ulp_src_fid = src_fid; +#endif + new_node->key.src_fid = src_fid; + + mutex_lock(&tc_info->lock); + + if (!bnxt_tc_flower_enabled(bp)) { + rc = -EOPNOTSUPP; + goto unlock; + } + + /* Synchronize with switchdev mode change via sriov_disable() */ + if (!bnxt_tc_is_switchdev_mode(bp)) { + rc = -EOPNOTSUPP; + goto unlock; + } + + /* If a flow exists with the same cookie, delete it */ + old_node = rhashtable_lookup_fast(&tc_info->tf_flow_table, + &new_node->key, + tc_info->tf_flow_ht_params); + if (old_node) { +#ifdef HAVE_TC_CB_EGDEV + /* This happens when TC invokes flow-add for the same + * flow a second time through egress dev (e.g, in the + * case of VF-VF, VF-Uplink flows). Ignore it and + * return success. + */ + if (old_node->tc_dev_dir != tc_dev_dir) + goto unlock; +#endif + __bnxt_tc_del_flow(bp, old_node); + } + + rc = bnxt_ulp_flow_create(bp, new_node->ulp_src_fid, tc_flow_cmd, + &flow_info); + if (rc) + goto unlock; + + new_node->mparms = flow_info.mparms; + new_node->flow_id = flow_info.flow_id; + new_node->dscp_remap = flow_info.dscp_remap; + netdev_dbg(bp->dev, + "%s: cookie:0x%lx src_fid:0x%x flow_id:0x%x\n", + __func__, tc_flow_cmd->cookie, src_fid, flow_info.flow_id); + + if (flow_info.encap_key) { + rc = bnxt_tc_get_encap_node(bp, new_node, &flow_info); + if (rc) + goto free_flow; + } + + /* add new flow to flow-table */ + rc = rhashtable_insert_fast(&tc_info->tf_flow_table, &new_node->node, + tc_info->tf_flow_ht_params); + if (rc) + goto put_encap; + + mutex_unlock(&tc_info->lock); + + /* flow_info.mparms will be freed during flow destroy */ + vfree(flow_info.encap_key); + vfree(flow_info.neigh_key); + return 0; + +put_encap: + if (flow_info.encap_key) + bnxt_tc_put_encap_node(bp, new_node); +free_flow: + bnxt_ulp_flow_destroy(bp, new_node->flow_id, new_node->ulp_src_fid, + new_node->dscp_remap); + if (flow_info.encap_key) { + vfree(flow_info.encap_key); + vfree(flow_info.neigh_key); + vfree(flow_info.mparms); + } +unlock: + mutex_unlock(&tc_info->lock); + kfree_rcu(new_node, rcu); +done: + if (rc == -ENOSPC) + net_info_ratelimited("%s: No HW resources for new flow: cookie=0x%lx error=%d\n", + bp->dev->name, tc_flow_cmd->cookie, rc); + else if (rc && rc != -EOPNOTSUPP) + netdev_err(bp->dev, + "Failed to create flow: cookie:0x%lx src_fid:0x%x error:%d\n", + tc_flow_cmd->cookie, src_fid, rc); + return rc; +} + +#ifdef HAVE_TC_CB_EGDEV +static int bnxt_tc_add_flow(struct bnxt *bp, u16 src_fid, + struct flow_cls_offload *tc_flow_cmd, + int tc_dev_dir) +#else +static int bnxt_tc_add_flow(struct bnxt *bp, u16 src_fid, + struct flow_cls_offload *tc_flow_cmd) +#endif +{ + int rc; + +#ifdef HAVE_TC_CB_EGDEV + if (BNXT_TRUFLOW_EN(bp)) + rc = bnxt_tc_add_flow_tf(bp, src_fid, tc_flow_cmd, tc_dev_dir); + else + rc = bnxt_tc_add_flow_afm(bp, src_fid, tc_flow_cmd, tc_dev_dir); +#else + if (BNXT_TRUFLOW_EN(bp)) + rc = bnxt_tc_add_flow_tf(bp, src_fid, tc_flow_cmd); + else + rc = bnxt_tc_add_flow_afm(bp, src_fid, tc_flow_cmd); +#endif + + return rc; +} + +#ifdef HAVE_TC_CB_EGDEV +static int bnxt_tc_del_flow_afm(struct bnxt *bp, u16 src_fid, + struct flow_cls_offload *tc_flow_cmd, + int tc_dev_dir) +#else +static int bnxt_tc_del_flow_afm(struct bnxt *bp, u16 src_fid, + struct flow_cls_offload *tc_flow_cmd) +#endif +{ + struct bnxt_tc_info *tc_info = bp->tc_info; + struct bnxt_tc_flow_node *flow_node; + struct bnxt_tc_flow_node_key flow_key; + int rc; + + memset(&flow_key, 0, sizeof(flow_key)); + flow_key.cookie = tc_flow_cmd->cookie; + flow_key.src_fid = src_fid; + mutex_lock(&tc_info->lock); + flow_node = rhashtable_lookup_fast(&tc_info->flow_table, + &flow_key, + tc_info->flow_ht_params); +#ifdef HAVE_TC_CB_EGDEV + if (!flow_node || flow_node->tc_dev_dir != tc_dev_dir) { +#else + if (!flow_node) { +#endif + mutex_unlock(&tc_info->lock); + return -EINVAL; + } + + rc = __bnxt_tc_del_flow(bp, flow_node); + mutex_unlock(&tc_info->lock); + + return rc; +} + +#ifdef HAVE_TC_CB_EGDEV +static int bnxt_tc_del_flow_tf(struct bnxt *bp, u16 src_fid, + struct flow_cls_offload *tc_flow_cmd, + int tc_dev_dir) +#else +static int bnxt_tc_del_flow_tf(struct bnxt *bp, u16 src_fid, + struct flow_cls_offload *tc_flow_cmd) +#endif +{ + struct bnxt_tc_info *tc_info = bp->tc_info; + struct bnxt_tc_flow_node_key flow_key; + struct bnxt_tf_flow_node *flow_node; + int rc; + + memset(&flow_key, 0, sizeof(flow_key)); + flow_key.cookie = tc_flow_cmd->cookie; + flow_key.src_fid = src_fid; + + mutex_lock(&tc_info->lock); + if (!bnxt_tc_flower_enabled(bp)) { + rc = -EOPNOTSUPP; + goto unlock; + } + flow_node = rhashtable_lookup_fast(&tc_info->tf_flow_table, + &flow_key, + tc_info->tf_flow_ht_params); +#ifdef HAVE_TC_CB_EGDEV + if (!flow_node || flow_node->tc_dev_dir != tc_dev_dir) { +#else + if (!flow_node) { +#endif + mutex_unlock(&tc_info->lock); + return -EINVAL; + } + + rc = __bnxt_tc_del_flow(bp, flow_node); + +unlock: + mutex_unlock(&tc_info->lock); + return rc; +} + +#ifdef HAVE_TC_CB_EGDEV +static int bnxt_tc_del_flow(struct bnxt *bp, u16 src_fid, + struct flow_cls_offload *tc_flow_cmd, + int tc_dev_dir) +#else +static int bnxt_tc_del_flow(struct bnxt *bp, u16 src_fid, + struct flow_cls_offload *tc_flow_cmd) +#endif +{ + int rc; + +#ifdef HAVE_TC_CB_EGDEV + if (BNXT_TRUFLOW_EN(bp)) + rc = bnxt_tc_del_flow_tf(bp, src_fid, tc_flow_cmd, + tc_dev_dir); + else + rc = bnxt_tc_del_flow_afm(bp, src_fid, tc_flow_cmd, tc_dev_dir); +#else + if (BNXT_TRUFLOW_EN(bp)) + rc = bnxt_tc_del_flow_tf(bp, src_fid, tc_flow_cmd); + else + rc = bnxt_tc_del_flow_afm(bp, src_fid, tc_flow_cmd); +#endif + + return rc; +} + +#ifdef HAVE_TC_CB_EGDEV +static int bnxt_tc_get_flow_stats_afm(struct bnxt *bp, u16 src_fid, + struct flow_cls_offload *tc_flow_cmd, + int tc_dev_dir) +#else +static int bnxt_tc_get_flow_stats_afm(struct bnxt *bp, u16 src_fid, + struct flow_cls_offload *tc_flow_cmd) +#endif +{ + struct bnxt_tc_flow_stats stats, *curr_stats, *prev_stats; + struct bnxt_tc_info *tc_info = bp->tc_info; + struct bnxt_tc_flow_node_key flow_key; + struct bnxt_tc_flow_node *flow_node; + struct bnxt_tc_flow *flow; + unsigned long lastused; + + memset(&flow_key, 0, sizeof(flow_key)); + flow_key.cookie = tc_flow_cmd->cookie; + flow_key.src_fid = src_fid; + + mutex_lock(&tc_info->lock); + flow_node = rhashtable_lookup_fast(&tc_info->flow_table, + &flow_key, + tc_info->flow_ht_params); +#ifdef HAVE_TC_CB_EGDEV + if (!flow_node || flow_node->tc_dev_dir != tc_dev_dir) { +#else + if (!flow_node) { +#endif + mutex_unlock(&tc_info->lock); + return -1; + } + + flow = &flow_node->flow; + curr_stats = &flow->stats; + prev_stats = &flow->prev_stats; + + spin_lock(&flow->stats_lock); + stats.packets = curr_stats->packets - prev_stats->packets; + stats.bytes = curr_stats->bytes - prev_stats->bytes; + *prev_stats = *curr_stats; + lastused = flow->lastused; + spin_unlock(&flow->stats_lock); + +#if defined(HAVE_FLOW_OFFLOAD_H) && defined(HAVE_FLOW_STATS_UPDATE) + flow_stats_update(&tc_flow_cmd->stats, stats.bytes, stats.packets, 0, + lastused, FLOW_ACTION_HW_STATS_DELAYED); +#else + tcf_exts_stats_update(tc_flow_cmd->exts, stats.bytes, stats.packets, + lastused); +#endif + mutex_unlock(&tc_info->lock); + return 0; +} + +#ifdef HAVE_TC_CB_EGDEV +static int bnxt_tc_get_flow_stats_tf(struct bnxt *bp, u16 src_fid, + struct flow_cls_offload *tc_flow_cmd, + int tc_dev_dir) +#else +static int bnxt_tc_get_flow_stats_tf(struct bnxt *bp, u16 src_fid, + struct flow_cls_offload *tc_flow_cmd) +#endif +{ + struct bnxt_tc_info *tc_info = bp->tc_info; + struct bnxt_tc_flow_node_key flow_key; + struct bnxt_tf_flow_node *flow_node; + u64 packets = 0, bytes = 0; + unsigned long lastused = 0; + + memset(&flow_key, 0, sizeof(flow_key)); + flow_key.cookie = tc_flow_cmd->cookie; + flow_key.src_fid = src_fid; + + mutex_lock(&tc_info->lock); + if (!bnxt_tc_flower_enabled(bp)) { + mutex_unlock(&tc_info->lock); + return -1; + } + flow_node = rhashtable_lookup_fast(&tc_info->tf_flow_table, + &flow_key, + tc_info->tf_flow_ht_params); +#ifdef HAVE_TC_CB_EGDEV + if (!flow_node || flow_node->tc_dev_dir != tc_dev_dir) { +#else + if (!flow_node) { +#endif + mutex_unlock(&tc_info->lock); + return -1; + } + + bnxt_ulp_flow_query_count(bp, flow_node->flow_id, &packets, + &bytes, &lastused); + +#if defined(HAVE_FLOW_OFFLOAD_H) && defined(HAVE_FLOW_STATS_UPDATE) + flow_stats_update(&tc_flow_cmd->stats, bytes, packets, 0, + lastused, FLOW_ACTION_HW_STATS_DELAYED); +#else + tcf_exts_stats_update(tc_flow_cmd->exts, bytes, packets, + lastused); +#endif + mutex_unlock(&tc_info->lock); + return 0; +} + +#ifdef HAVE_TC_CB_EGDEV +static int bnxt_tc_get_flow_stats(struct bnxt *bp, u16 src_fid, + struct flow_cls_offload *tc_flow_cmd, + int tc_dev_dir) +{ + if (BNXT_TRUFLOW_EN(bp)) + return bnxt_tc_get_flow_stats_tf(bp, src_fid, tc_flow_cmd, + tc_dev_dir); + else + return bnxt_tc_get_flow_stats_afm(bp, src_fid, tc_flow_cmd, + tc_dev_dir); +} +#else +static int bnxt_tc_get_flow_stats(struct bnxt *bp, u16 src_fid, + struct flow_cls_offload *tc_flow_cmd) +{ + if (BNXT_TRUFLOW_EN(bp)) + return bnxt_tc_get_flow_stats_tf(bp, src_fid, tc_flow_cmd); + else + return bnxt_tc_get_flow_stats_afm(bp, src_fid, tc_flow_cmd); +} +#endif + +static void bnxt_fill_cfa_stats_req(struct bnxt *bp, + struct bnxt_tc_flow_node *flow_node, + __le16 *flow_handle, __le32 *flow_id) +{ + u16 handle; + + if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE) { + *flow_id = flow_node->flow_id; + + /* If flow_id is used to fetch flow stats then: + * 1. lower 12 bits of flow_handle must be set to all 1s. + * 2. 15th bit of flow_handle must specify the flow + * direction (TX/RX). + */ + if (flow_node->flow.l2_key.dir == BNXT_DIR_RX) + handle = CFA_FLOW_INFO_REQ_FLOW_HANDLE_DIR_RX | + CFA_FLOW_INFO_REQ_FLOW_HANDLE_MAX_MASK; + else + handle = CFA_FLOW_INFO_REQ_FLOW_HANDLE_MAX_MASK; + + *flow_handle = cpu_to_le16(handle); + } else { + *flow_handle = flow_node->flow_handle; + } +} + +static int +bnxt_hwrm_cfa_flow_stats_get(struct bnxt *bp, int num_flows, + struct bnxt_tc_stats_batch stats_batch[]) +{ + struct hwrm_cfa_flow_stats_output *resp; + struct hwrm_cfa_flow_stats_input *req; + __le16 *req_flow_handles; + __le32 *req_flow_ids; + int rc, i; + + rc = hwrm_req_init(bp, req, HWRM_CFA_FLOW_STATS); + if (rc) + goto exit; + + req_flow_handles = &req->flow_handle_0; + req_flow_ids = &req->flow_id_0; + + req->num_flows = cpu_to_le16(num_flows); + for (i = 0; i < num_flows; i++) { + struct bnxt_tc_flow_node *flow_node = stats_batch[i].flow_node; + + bnxt_fill_cfa_stats_req(bp, flow_node, + &req_flow_handles[i], &req_flow_ids[i]); + } + + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); + if (!rc) { + __le64 *resp_packets; + __le64 *resp_bytes; + + resp_packets = &resp->packet_0; + resp_bytes = &resp->byte_0; + + for (i = 0; i < num_flows; i++) { + stats_batch[i].hw_stats.packets = + le64_to_cpu(resp_packets[i]); + stats_batch[i].hw_stats.bytes = + le64_to_cpu(resp_bytes[i]); + } + } + hwrm_req_drop(bp, req); +exit: + if (rc) + netdev_info(bp->dev, "error rc=%d\n", rc); + + return rc; +} + +/* + * Add val to accum while handling a possible wraparound + * of val. Eventhough val is of type u64, its actual width + * is denoted by mask and will wrap-around beyond that width. + */ +static void accumulate_val(u64 *accum, u64 val, u64 mask) +{ +#define low_bits(x, mask) ((x) & (mask)) +#define high_bits(x, mask) ((x) & ~(mask)) + bool wrapped = val < low_bits(*accum, mask); + + *accum = high_bits(*accum, mask) + val; + if (wrapped) + *accum += (mask + 1); +} + +/* The HW counters' width is much less than 64bits. + * Handle possible wrap-around while updating the stat counters + */ +static void bnxt_flow_stats_accum(struct bnxt_tc_info *tc_info, + struct bnxt_tc_flow_stats *acc_stats, + struct bnxt_tc_flow_stats *hw_stats) +{ + accumulate_val(&acc_stats->bytes, hw_stats->bytes, tc_info->bytes_mask); + accumulate_val(&acc_stats->packets, hw_stats->packets, + tc_info->packets_mask); +} + +static int +bnxt_tc_flow_stats_batch_update(struct bnxt *bp, int num_flows, + struct bnxt_tc_stats_batch stats_batch[]) +{ + struct bnxt_tc_info *tc_info = bp->tc_info; + int rc, i; + + rc = bnxt_hwrm_cfa_flow_stats_get(bp, num_flows, stats_batch); + if (rc) + return rc; + + for (i = 0; i < num_flows; i++) { + struct bnxt_tc_flow_node *flow_node = stats_batch[i].flow_node; + struct bnxt_tc_flow *flow = &flow_node->flow; + + spin_lock(&flow->stats_lock); + bnxt_flow_stats_accum(tc_info, &flow->stats, + &stats_batch[i].hw_stats); + if (flow->stats.packets != flow->prev_stats.packets) + flow->lastused = jiffies; + spin_unlock(&flow->stats_lock); + } + + return 0; +} + +static int +bnxt_tc_flow_stats_batch_prep(struct bnxt *bp, + struct bnxt_tc_stats_batch stats_batch[], + int *num_flows) +{ + struct bnxt_tc_info *tc_info = bp->tc_info; + struct rhashtable_iter *iter = &tc_info->iter; + void *flow_node; + int rc, i; + + rhashtable_walk_start(iter); + + rc = 0; + for (i = 0; i < BNXT_FLOW_STATS_BATCH_MAX; i++) { + flow_node = rhashtable_walk_next(iter); + if (IS_ERR(flow_node)) { + i = 0; + if (PTR_ERR(flow_node) == -EAGAIN) { + continue; + } else { + rc = PTR_ERR(flow_node); + goto done; + } + } + + /* No more flows */ + if (!flow_node) + goto done; + + stats_batch[i].flow_node = flow_node; + } +done: + rhashtable_walk_stop(iter); + *num_flows = i; + return rc; +} + +void bnxt_tc_flow_stats_work(struct bnxt *bp) +{ + struct bnxt_tc_info *tc_info = bp->tc_info; + int num_flows, rc; + + mutex_lock(&tc_info->lock); + num_flows = atomic_read(&tc_info->flow_table.nelems); + if (!num_flows) { + mutex_unlock(&tc_info->lock); + return; + } + rhashtable_walk_enter(&tc_info->flow_table, &tc_info->iter); + + for (;;) { + rc = bnxt_tc_flow_stats_batch_prep(bp, tc_info->stats_batch, + &num_flows); + if (rc) { + if (rc == -EAGAIN) + continue; + break; + } + + if (!num_flows) + break; + + bnxt_tc_flow_stats_batch_update(bp, num_flows, + tc_info->stats_batch); + } + + rhashtable_walk_exit(&tc_info->iter); + mutex_unlock(&tc_info->lock); +} + +#ifdef HAVE_TC_SETUP_BLOCK +static bool bnxt_tc_can_offload_and_chain(struct bnxt *bp, u16 src_fid, + struct flow_cls_offload *cls_flower) +{ + bool can = true; + u32 chain_index; + + if (!BNXT_TRUFLOW_EN(bp)) + return tc_cls_can_offload_and_chain0(bp->dev, + (void *)cls_flower); + + can = tc_can_offload(bp->dev); + if (!can) { + NL_SET_ERR_MSG_MOD(cls_flower->common.extack, + "TC offload is disabled on net device"); + return can; + } + + chain_index = cls_flower->common.chain_index; + if (!chain_index) + return can; + + can = bnxt_ulp_flow_chain_validate(bp, src_fid, cls_flower); + if (!can) + NL_SET_ERR_MSG_MOD(cls_flower->common.extack, + "Driver supports only offload of chain 0"); + return can; +} +#endif + +#ifdef HAVE_TC_CB_EGDEV +int bnxt_tc_setup_flower(struct bnxt *bp, u16 src_fid, + struct flow_cls_offload *cls_flower, + int tc_dev_dir) +{ +#ifdef HAVE_TC_SETUP_TYPE +#ifndef HAVE_TC_SETUP_BLOCK + if (!is_classid_clsact_ingress(cls_flower->common.classid)) + return -EOPNOTSUPP; +#else + if (!bnxt_tc_can_offload_and_chain(bp, src_fid, cls_flower)) + return -EOPNOTSUPP; +#endif +#endif + switch (cls_flower->command) { + case FLOW_CLS_REPLACE: + return bnxt_tc_add_flow(bp, src_fid, cls_flower, tc_dev_dir); + case FLOW_CLS_DESTROY: + return bnxt_tc_del_flow(bp, src_fid, cls_flower, tc_dev_dir); + case FLOW_CLS_STATS: + return bnxt_tc_get_flow_stats(bp, src_fid, cls_flower, tc_dev_dir); + default: + return -EOPNOTSUPP; + } +} + +#else + +int bnxt_tc_setup_flower(struct bnxt *bp, u16 src_fid, + struct flow_cls_offload *cls_flower) +{ +#ifdef HAVE_TC_SETUP_TYPE +#ifndef HAVE_TC_SETUP_BLOCK + if (!is_classid_clsact_ingress(cls_flower->common.classid)) + return -EOPNOTSUPP; +#else + if (!bnxt_tc_can_offload_and_chain(bp, src_fid, cls_flower)) + return -EOPNOTSUPP; +#endif +#endif + switch (cls_flower->command) { + case FLOW_CLS_REPLACE: + return bnxt_tc_add_flow(bp, src_fid, cls_flower); + case FLOW_CLS_DESTROY: + return bnxt_tc_del_flow(bp, src_fid, cls_flower); + case FLOW_CLS_STATS: + return bnxt_tc_get_flow_stats(bp, src_fid, cls_flower); + default: + return -EOPNOTSUPP; + } +} +#endif /* HAVE_TC_CB_EGDEV */ + +#ifdef HAVE_TC_SETUP_TYPE +#ifdef HAVE_TC_SETUP_BLOCK +#ifdef HAVE_FLOW_INDR_BLOCK_CB + +static int bnxt_tc_setup_indr_block_cb(enum tc_setup_type type, + void *type_data, void *cb_priv) +{ + struct bnxt_flower_indr_block_cb_priv *priv = cb_priv; + struct flow_cls_offload *flower = type_data; + struct bnxt *bp = priv->bp; + + if (!tc_cls_can_offload_and_chain0(bp->dev, type_data)) + return -EOPNOTSUPP; + + switch (type) { + case TC_SETUP_CLSFLOWER: +#ifdef HAVE_TC_CB_EGDEV + return bnxt_tc_setup_flower(bp, bp->pf.fw_fid, flower, + BNXT_TC_DEV_INGRESS); +#else + return bnxt_tc_setup_flower(bp, bp->pf.fw_fid, flower); +#endif + default: + return -EOPNOTSUPP; + } +} + +static struct bnxt_flower_indr_block_cb_priv * +bnxt_tc_indr_block_cb_lookup(struct bnxt *bp, struct net_device *netdev) +{ + struct bnxt_flower_indr_block_cb_priv *cb_priv; + +#ifndef HAVE_FLOW_INDIR_BLK_PROTECTION + /* All callback list access should be protected by RTNL. */ + ASSERT_RTNL(); +#endif + + list_for_each_entry(cb_priv, &bp->tc_indr_block_list, list) + if (cb_priv->tunnel_netdev == netdev) + return cb_priv; + + return NULL; +} + +static void bnxt_tc_setup_indr_rel(void *cb_priv) +{ + struct bnxt_flower_indr_block_cb_priv *priv = cb_priv; + + list_del(&priv->list); + kfree(priv); +} + +/* Ensure that the indirect block offload request is for + * this PF, by comparing with the lower_dev of vxlan-dev. + */ +static bool bnxt_is_vxlan_lower_dev(struct net_device *vxlan_netdev, + struct bnxt *bp) +{ + const struct vxlan_dev *vxlan = netdev_priv(vxlan_netdev); + const struct vxlan_rdst *dst = &vxlan->default_dst; + +#ifdef HAVE_VXLAN_RDST_RDEV + if (dst->remote_dev) + return bp->dev == dst->remote_dev; +#else + if (dst->remote_ifindex) + return (bp->dev == __dev_get_by_index(dev_net(bp->dev), + dst->remote_ifindex)); +#endif + /* If lower dev is not specified, this vxlan interface + * could be a vport device. Let the offload go through. + */ + return true; +} + +extern struct list_head bnxt_block_cb_list; +#ifdef HAVE_FLOW_INDR_BLOCK_CB_QDISC +static int bnxt_tc_setup_indr_block(struct net_device *netdev, struct Qdisc *sch, struct bnxt *bp, + struct flow_block_offload *f, void *data, + void (*cleanup)(struct flow_block_cb *block_cb)) +#else +static int bnxt_tc_setup_indr_block(struct net_device *netdev, + struct bnxt *bp, + struct flow_block_offload *f, void *data, + void (*cleanup)(struct flow_block_cb *block_cb)) +#endif +{ + struct bnxt_flower_indr_block_cb_priv *cb_priv; + struct flow_block_cb *block_cb; + + if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) + return -EOPNOTSUPP; + + if (!bnxt_is_vxlan_lower_dev(netdev, bp)) + return -EOPNOTSUPP; + + switch (f->command) { + case FLOW_BLOCK_BIND: + cb_priv = kmalloc(sizeof(*cb_priv), GFP_KERNEL); + if (!cb_priv) + return -ENOMEM; + + cb_priv->tunnel_netdev = netdev; + cb_priv->bp = bp; + list_add(&cb_priv->list, &bp->tc_indr_block_list); + +#ifdef HAVE_FLOW_INDR_BLOCK_CB_QDISC + block_cb = flow_indr_block_cb_alloc(bnxt_tc_setup_indr_block_cb, + cb_priv, cb_priv, + bnxt_tc_setup_indr_rel, f, + netdev, sch, data, bp, cleanup); +#else + block_cb = flow_indr_block_cb_alloc(bnxt_tc_setup_indr_block_cb, + cb_priv, cb_priv, + bnxt_tc_setup_indr_rel, f, + netdev, data, bp, cleanup); +#endif + if (IS_ERR(block_cb)) { + list_del(&cb_priv->list); + kfree(cb_priv); + return PTR_ERR(block_cb); + } + + flow_block_cb_add(block_cb, f); + list_add_tail(&block_cb->driver_list, &bnxt_block_cb_list); + break; + case FLOW_BLOCK_UNBIND: + cb_priv = bnxt_tc_indr_block_cb_lookup(bp, netdev); + if (!cb_priv) + return -ENOENT; + + block_cb = flow_block_cb_lookup(f->block, + bnxt_tc_setup_indr_block_cb, + cb_priv); + if (!block_cb) + return -ENOENT; + + flow_indr_block_cb_remove(block_cb, f); + list_del(&block_cb->driver_list); + break; + default: + return -EOPNOTSUPP; + } + return 0; +} + +static bool bnxt_is_netdev_indr_offload(struct net_device *netdev) +{ + return netif_is_vxlan(netdev); +} + +#ifdef HAVE_FLOW_INDR_BLOCK_CB_QDISC +static int bnxt_tc_setup_indr_cb(struct net_device *netdev, struct Qdisc *sch, void *cb_priv, + enum tc_setup_type type, void *type_data, + void *data, + void (*cleanup)(struct flow_block_cb *block_cb)) +#else +static int bnxt_tc_setup_indr_cb(struct net_device *netdev, void *cb_priv, + enum tc_setup_type type, void *type_data, + void *data, + void (*cleanup)(struct flow_block_cb *block_cb)) +#endif +{ + if (!netdev || !bnxt_is_netdev_indr_offload(netdev)) + return -EOPNOTSUPP; + + switch (type) { + case TC_SETUP_BLOCK: +#ifdef HAVE_FLOW_INDR_BLOCK_CB_QDISC + return bnxt_tc_setup_indr_block(netdev, sch, cb_priv, type_data, data, + cleanup); +#else + return bnxt_tc_setup_indr_block(netdev, cb_priv, type_data, data, + cleanup); +#endif + default: + break; + } + + return -EOPNOTSUPP; +} + +#ifndef HAVE_FLOW_INDR_DEV_RGTR +int bnxt_tc_indr_block_event(struct notifier_block *nb, unsigned long event, + void *ptr) +{ + struct net_device *netdev; + struct bnxt *bp; + int rc; + + netdev = netdev_notifier_info_to_dev(ptr); + if (!bnxt_is_netdev_indr_offload(netdev)) + return NOTIFY_OK; + + bp = container_of(nb, struct bnxt, tc_netdev_nb); + + switch (event) { + case NETDEV_REGISTER: + rc = __flow_indr_block_cb_register(netdev, bp, + bnxt_tc_setup_indr_cb, + bp); + if (rc) + netdev_info(bp->dev, + "Failed to register indirect blk: dev: %s\n", + netdev->name); + break; + case NETDEV_UNREGISTER: + __flow_indr_block_cb_unregister(netdev, + bnxt_tc_setup_indr_cb, + bp); + break; + } + + return NOTIFY_DONE; +} +#endif /* HAVE_FLOW_INDR_DEV_RGTR */ +#endif /* HAVE_FLOW_INDR_BLOCK_CB */ + +#if defined(HAVE_TC_MATCHALL_FLOW_RULE) && defined(HAVE_FLOW_ACTION_POLICE) + +static inline int bnxt_tc_find_vf_by_fid(struct bnxt *bp, u16 fid) +{ + int num_vfs = pci_num_vf(bp->pdev); + int i; + + for (i = 0; i < num_vfs; i++) { + if (bp->pf.vf[i].fw_fid == fid) + break; + } + if (i >= num_vfs) + return -EINVAL; + return i; +} + +static int bnxt_tc_del_matchall(struct bnxt *bp, u16 src_fid, + struct tc_cls_matchall_offload *matchall_cmd) +{ + int vf_idx; + + vf_idx = bnxt_tc_find_vf_by_fid(bp, src_fid); + if (vf_idx < 0) + return vf_idx; + + if (bp->pf.vf[vf_idx].police_id != matchall_cmd->cookie) + return -ENOENT; + + bnxt_set_vf_bw(bp->dev, vf_idx, 0, 0); + bp->pf.vf[vf_idx].police_id = 0; + return 0; +} + +static int bnxt_tc_add_matchall(struct bnxt *bp, u16 src_fid, + struct tc_cls_matchall_offload *matchall_cmd) +{ + struct flow_action_entry *action; + int vf_idx; + s64 burst; + u64 rate; + int rc; + + vf_idx = bnxt_tc_find_vf_by_fid(bp, src_fid); + if (vf_idx < 0) + return vf_idx; + + action = &matchall_cmd->rule->action.entries[0]; + if (action->id != FLOW_ACTION_POLICE) { + netdev_err(bp->dev, "%s: Unsupported matchall action: %d", + __func__, action->id); + return -EOPNOTSUPP; + } + if (bp->pf.vf[vf_idx].police_id && bp->pf.vf[vf_idx].police_id != + matchall_cmd->cookie) { + netdev_err(bp->dev, + "%s: Policer is already configured for VF: %d", + __func__, vf_idx); + return -EEXIST; + } + + rate = (u32)div_u64(action->police.rate_bytes_ps, 1024 * 1000) * 8; + burst = (u32)div_u64(action->police.rate_bytes_ps * + PSCHED_NS2TICKS(action->police.burst), + PSCHED_TICKS_PER_SEC); + burst = (u32)PSCHED_TICKS2NS(burst) / (1 << 20); + + rc = bnxt_set_vf_bw(bp->dev, vf_idx, burst, rate); + if (rc) { + netdev_err(bp->dev, + "Error: %s: VF: %d rate: %llu burst: %llu rc: %d", + __func__, vf_idx, rate, burst, rc); + return rc; + } + + bp->pf.vf[vf_idx].police_id = matchall_cmd->cookie; + return 0; +} + +int bnxt_tc_setup_matchall(struct bnxt *bp, u16 src_fid, + struct tc_cls_matchall_offload *cls_matchall) +{ + if (!tc_cls_can_offload_and_chain0(bp->dev, (void *)cls_matchall)) + return -EOPNOTSUPP; + + switch (cls_matchall->command) { + case TC_CLSMATCHALL_REPLACE: + return bnxt_tc_add_matchall(bp, src_fid, cls_matchall); + case TC_CLSMATCHALL_DESTROY: + return bnxt_tc_del_matchall(bp, src_fid, cls_matchall); + default: + return -EOPNOTSUPP; + } +} + +#endif /* HAVE_TC_MATCHALL_FLOW_RULE && HAVE_FLOW_ACTION_POLICE */ +#endif /* HAVE_TC_SETUP_BLOCK */ +#endif /* HAVE_TC_SETUP_TYPE */ + +static const struct rhashtable_params bnxt_tc_flow_ht_params = { + .head_offset = offsetof(struct bnxt_tc_flow_node, node), + .key_offset = offsetof(struct bnxt_tc_flow_node, key), + .key_len = sizeof(struct bnxt_tc_flow_node_key), + .automatic_shrinking = true +}; + +static const struct rhashtable_params bnxt_tf_flow_ht_params = { + .head_offset = offsetof(struct bnxt_tf_flow_node, node), + .key_offset = offsetof(struct bnxt_tf_flow_node, key), + .key_len = sizeof(struct bnxt_tc_flow_node_key), + .automatic_shrinking = true +}; + +static const struct rhashtable_params bnxt_tc_l2_ht_params = { + .head_offset = offsetof(struct bnxt_tc_l2_node, node), + .key_offset = offsetof(struct bnxt_tc_l2_node, key), + .key_len = BNXT_TC_L2_KEY_LEN, + .automatic_shrinking = true +}; + +static const struct rhashtable_params bnxt_tc_decap_l2_ht_params = { + .head_offset = offsetof(struct bnxt_tc_l2_node, node), + .key_offset = offsetof(struct bnxt_tc_l2_node, key), + .key_len = BNXT_TC_L2_KEY_LEN, + .automatic_shrinking = true +}; + +static const struct rhashtable_params bnxt_tc_tunnel_ht_params = { + .head_offset = offsetof(struct bnxt_tc_tunnel_node, node), + .key_offset = offsetof(struct bnxt_tc_tunnel_node, key), + .key_len = sizeof(struct ip_tunnel_key), + .automatic_shrinking = true +}; + +static const struct rhashtable_params bnxt_tc_neigh_ht_params = { + .head_offset = offsetof(struct bnxt_tc_neigh_node, node), + .key_offset = offsetof(struct bnxt_tc_neigh_node, key), + .key_len = sizeof(struct bnxt_tc_neigh_key), + .automatic_shrinking = true +}; + +static const struct rhashtable_params bnxt_ulp_udcc_v6_subnet_ht_params = { + .head_offset = offsetof(struct bnxt_ulp_udcc_v6_subnet_node, node), + .key_offset = offsetof(struct bnxt_ulp_udcc_v6_subnet_node, key), + .key_len = sizeof(struct bnxt_ulp_udcc_v6_subnet_key), + .automatic_shrinking = true +}; + +/* convert counter width in bits to a mask */ +#define mask(width) ((u64)~0 >> (64 - (width))) + +static int bnxt_rep_netevent_cb(struct notifier_block *nb, + unsigned long event, void *ptr) +{ + struct bnxt *bp = container_of(nb, struct bnxt, neigh_update.netevent_nb); + struct bnxt_tc_neigh_node *neigh_node; + struct neighbour *n; + + switch (event) { + case NETEVENT_NEIGH_UPDATE: + n = ptr; + neigh_node = bnxt_tc_lkup_neigh_node(bp, n); + if (!neigh_node) + break; + + /* We currently support serial processing of neighbor events; if + * there is a pending work item, return without scheduling a new + * one. This logic can be revisited in the future if we need to + * support multiple neighbor update events. + */ + spin_lock_bh(&bp->neigh_update.lock); + if (bp->neigh_update.neigh) { + spin_unlock_bh(&bp->neigh_update.lock); + break; + } + bp->neigh_update.neigh = n; + spin_unlock_bh(&bp->neigh_update.lock); + /* Do not schedule the work if FW reset is in progress. */ + if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) { + netdev_dbg(bp->dev, + "FW reset, dropping neigh update event\n"); + bp->neigh_update.neigh = NULL; + break; + } + /* Release neighbor in queue work handler if put work task successfully */ + neigh_hold(n); + if (schedule_work(&bp->neigh_update.work)) + break; + + neigh_release(n); + bp->neigh_update.neigh = NULL; + break; + default: + break; + } + + return NOTIFY_DONE; +} + +int bnxt_init_tc(struct bnxt *bp) +{ + struct bnxt_tc_info *tc_info; + int rc; + + if (bp->hwrm_spec_code < 0x10800) + return 0; + + tc_info = kzalloc(sizeof(*tc_info), GFP_KERNEL); + if (!tc_info) + return -ENOMEM; + mutex_init(&tc_info->lock); + + /* Counter widths are programmed by FW */ + tc_info->bytes_mask = mask(36); + tc_info->packets_mask = mask(28); + + tc_info->flow_ht_params = bnxt_tc_flow_ht_params; + rc = rhashtable_init(&tc_info->flow_table, &tc_info->flow_ht_params); + if (rc) + goto free_tc_info; + + tc_info->tf_flow_ht_params = bnxt_tf_flow_ht_params; + rc = rhashtable_init(&tc_info->tf_flow_table, + &tc_info->tf_flow_ht_params); + if (rc) + goto destroy_flow_table; + + tc_info->l2_ht_params = bnxt_tc_l2_ht_params; + rc = rhashtable_init(&tc_info->l2_table, &tc_info->l2_ht_params); + if (rc) + goto destroy_tf_flow_table; + + tc_info->decap_l2_ht_params = bnxt_tc_decap_l2_ht_params; + rc = rhashtable_init(&tc_info->decap_l2_table, + &tc_info->decap_l2_ht_params); + if (rc) + goto destroy_l2_table; + + tc_info->decap_ht_params = bnxt_tc_tunnel_ht_params; + rc = rhashtable_init(&tc_info->decap_table, + &tc_info->decap_ht_params); + if (rc) + goto destroy_decap_l2_table; + + tc_info->encap_ht_params = bnxt_tc_tunnel_ht_params; + rc = rhashtable_init(&tc_info->encap_table, + &tc_info->encap_ht_params); + if (rc) + goto destroy_decap_table; + + tc_info->neigh_ht_params = bnxt_tc_neigh_ht_params; + rc = rhashtable_init(&tc_info->neigh_table, + &tc_info->neigh_ht_params); + if (rc) + goto destroy_encap_table; + + tc_info->v6_subnet_ht_params = bnxt_ulp_udcc_v6_subnet_ht_params; + rc = rhashtable_init(&tc_info->v6_subnet_table, + &tc_info->v6_subnet_ht_params); + if (rc) + goto destroy_neigh_table; + + rc = bnxt_ba_init(&tc_info->v6_subnet_pool, + BNXT_ULP_MAX_V6_SUBNETS, + true); + if (rc) + goto destroy_v6_subnet_table; + + tc_info->enabled = true; + bp->dev->hw_features |= NETIF_F_HW_TC; + bp->dev->features |= NETIF_F_HW_TC; + bp->tc_info = tc_info; + + bp->neigh_update.neigh = NULL; + spin_lock_init(&bp->neigh_update.lock); + INIT_WORK(&bp->neigh_update.work, bnxt_tc_update_neigh_work); + bp->neigh_update.netevent_nb.notifier_call = bnxt_rep_netevent_cb; + rc = register_netevent_notifier(&bp->neigh_update.netevent_nb); + if (rc) + goto destroy_v6_subnet_table; + + /* This is required for tf_core to be in place so that dpdk VFs can + * get the memory allocated by the PFs for table scope memory. + * Nic Flow support will always enable ULP. + */ + if (BNXT_CHIP_P7(bp) && BNXT_PF(bp)) + bnxt_tfo_init(bp); + +#ifndef HAVE_FLOW_INDR_BLOCK_CB + netdev_dbg(bp->dev, "Not registering indirect block notification\n"); + return 0; +#else + netdev_dbg(bp->dev, "Registering indirect block notification\n"); + /* init indirect block notifications */ + INIT_LIST_HEAD(&bp->tc_indr_block_list); + rc = flow_indr_dev_register(bnxt_tc_setup_indr_cb, bp); + if (!rc) + return 0; + + unregister_netevent_notifier(&bp->neigh_update.netevent_nb); +#endif + +destroy_v6_subnet_table: + rhashtable_destroy(&tc_info->v6_subnet_table); +destroy_neigh_table: + rhashtable_destroy(&tc_info->neigh_table); +destroy_encap_table: + rhashtable_destroy(&tc_info->encap_table); +destroy_decap_table: + rhashtable_destroy(&tc_info->decap_table); +destroy_decap_l2_table: + rhashtable_destroy(&tc_info->decap_l2_table); +destroy_l2_table: + rhashtable_destroy(&tc_info->l2_table); +destroy_tf_flow_table: + rhashtable_destroy(&tc_info->tf_flow_table); +destroy_flow_table: + rhashtable_destroy(&tc_info->flow_table); +free_tc_info: + kfree(tc_info); + return rc; +} + +void bnxt_shutdown_tc(struct bnxt *bp) +{ + struct bnxt_tc_info *tc_info = bp->tc_info; + + if (!bnxt_tc_flower_enabled(bp)) + return; + +#ifdef HAVE_FLOW_INDR_BLOCK_CB + flow_indr_dev_unregister(bnxt_tc_setup_indr_cb, bp, + bnxt_tc_setup_indr_rel); +#endif + unregister_netevent_notifier(&bp->neigh_update.netevent_nb); + cancel_work_sync(&bp->neigh_update.work); + rhashtable_destroy(&tc_info->flow_table); + rhashtable_destroy(&tc_info->tf_flow_table); + rhashtable_destroy(&tc_info->l2_table); + rhashtable_destroy(&tc_info->decap_l2_table); + rhashtable_destroy(&tc_info->decap_table); + rhashtable_destroy(&tc_info->encap_table); + rhashtable_destroy(&tc_info->neigh_table); + rhashtable_destroy(&tc_info->v6_subnet_table); + bnxt_ba_deinit(&tc_info->v6_subnet_pool); + /* Free TFC here until Nic Flow support enabled in ULP */ + if (BNXT_CHIP_P7(bp) && BNXT_PF(bp)) + bnxt_tfo_deinit(bp); + kfree(tc_info); + bp->tc_info = NULL; +} + +#endif /* CONFIG_BNXT_FLOWER_OFFLOAD */ diff --git a/drivers/thirdparty/release-drivers/bnxt/bnxt_tc.h b/drivers/thirdparty/release-drivers/bnxt/bnxt_tc.h new file mode 100644 index 000000000000..55b8ffefb4b3 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/bnxt_tc.h @@ -0,0 +1,384 @@ +/* Broadcom NetXtreme-C/E network driver. + * + * Copyright (c) 2017-2018 Broadcom Limited + * Copyright (c) 2018-2022 Broadcom Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ + +#ifndef BNXT_TC_H +#define BNXT_TC_H + +#ifdef CONFIG_BNXT_FLOWER_OFFLOAD + +#include + +/* Structs used for storing the filter/actions of the TC cmd. + */ +struct bnxt_tc_l2_key { + u16 src_fid; + u8 dmac[ETH_ALEN]; + u8 smac[ETH_ALEN]; + __be16 inner_vlan_tpid; + __be16 inner_vlan_tci; + __be16 ether_type; + u8 num_vlans; + u8 dir; +#define BNXT_DIR_RX 1 +#define BNXT_DIR_TX 0 +}; + +struct bnxt_tc_l3_key { + union { + struct { + struct in_addr daddr; + struct in_addr saddr; + } ipv4; + struct { + struct in6_addr daddr; + struct in6_addr saddr; + } ipv6; + }; +}; + +struct bnxt_tc_l4_key { + u8 ip_proto; + union { + struct { + __be16 sport; + __be16 dport; + } ports; + struct { + u8 type; + u8 code; + } icmp; + }; +}; + +struct bnxt_tc_tunnel_key { + struct bnxt_tc_l2_key l2; + struct bnxt_tc_l3_key l3; + struct bnxt_tc_l4_key l4; + __be32 id; +}; + +#define bnxt_eth_addr_key_mask_invalid(eth_addr, eth_addr_mask) \ + ((is_wildcard(&(eth_addr)[0], ETH_ALEN) && \ + is_wildcard(&(eth_addr)[ETH_ALEN / 2], ETH_ALEN)) || \ + (is_wildcard(&(eth_addr_mask)[0], ETH_ALEN) && \ + is_wildcard(&(eth_addr_mask)[ETH_ALEN / 2], ETH_ALEN))) + +struct bnxt_tc_actions { + u32 flags; +#define BNXT_TC_ACTION_FLAG_FWD BIT(0) +#define BNXT_TC_ACTION_FLAG_FWD_VXLAN BIT(1) +#define BNXT_TC_ACTION_FLAG_PUSH_VLAN BIT(3) +#define BNXT_TC_ACTION_FLAG_POP_VLAN BIT(4) +#define BNXT_TC_ACTION_FLAG_DROP BIT(5) +#define BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP BIT(6) +#define BNXT_TC_ACTION_FLAG_TUNNEL_DECAP BIT(7) +#define BNXT_TC_ACTION_FLAG_L2_REWRITE BIT(8) +#define BNXT_TC_ACTION_FLAG_NAT_XLATE BIT(9) +#define BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP_IPV4 BIT(10) +#define BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP_IPV6 BIT(11) + + u16 dst_fid; + struct net_device *dst_dev; + __be16 push_vlan_tpid; + __be16 push_vlan_tci; + + /* tunnel encap */ + struct ip_tunnel_key tun_encap_key; +#define PEDIT_OFFSET_SMAC_LAST_4_BYTES 0x8 + __be16 l2_rewrite_dmac[3]; + __be16 l2_rewrite_smac[3]; + struct { + bool src_xlate; /* true => translate src, + * false => translate dst + * Mutually exclusive, i.e cannot set both + */ + bool l3_is_ipv4; /* false means L3 is ipv6 */ + struct bnxt_tc_l3_key l3; + struct bnxt_tc_l4_key l4; + } nat; +}; + +struct bnxt_tc_flow { + u32 flags; +#define BNXT_TC_FLOW_FLAGS_ETH_ADDRS BIT(1) +#define BNXT_TC_FLOW_FLAGS_IPV4_ADDRS BIT(2) +#define BNXT_TC_FLOW_FLAGS_IPV6_ADDRS BIT(3) +#define BNXT_TC_FLOW_FLAGS_PORTS BIT(4) +#define BNXT_TC_FLOW_FLAGS_ICMP BIT(5) +#define BNXT_TC_FLOW_FLAGS_TUNL_ETH_ADDRS BIT(6) +#define BNXT_TC_FLOW_FLAGS_TUNL_IPV4_ADDRS BIT(7) +#define BNXT_TC_FLOW_FLAGS_TUNL_IPV6_ADDRS BIT(8) +#define BNXT_TC_FLOW_FLAGS_TUNL_PORTS BIT(9) +#define BNXT_TC_FLOW_FLAGS_TUNL_ID BIT(10) +#define BNXT_TC_FLOW_FLAGS_TUNNEL (BNXT_TC_FLOW_FLAGS_TUNL_ETH_ADDRS | \ + BNXT_TC_FLOW_FLAGS_TUNL_IPV4_ADDRS | \ + BNXT_TC_FLOW_FLAGS_TUNL_IPV6_ADDRS |\ + BNXT_TC_FLOW_FLAGS_TUNL_PORTS |\ + BNXT_TC_FLOW_FLAGS_TUNL_ID) + + /* flow applicable to pkts ingressing on this fid */ + u16 src_fid; + struct bnxt_tc_l2_key l2_key; + struct bnxt_tc_l2_key l2_mask; + struct bnxt_tc_l3_key l3_key; + struct bnxt_tc_l3_key l3_mask; + struct bnxt_tc_l4_key l4_key; + struct bnxt_tc_l4_key l4_mask; + struct ip_tunnel_key tun_key; + struct ip_tunnel_key tun_mask; + + struct bnxt_tc_actions actions; + + /* updated stats accounting for hw-counter wrap-around */ + struct bnxt_tc_flow_stats stats; + /* previous snap-shot of stats */ + struct bnxt_tc_flow_stats prev_stats; + unsigned long lastused; /* jiffies */ + /* for calculating delta from prev_stats and + * updating prev_stats atomically. + */ + spinlock_t stats_lock; +}; + +enum bnxt_tc_tunnel_node_type { + BNXT_TC_TUNNEL_NODE_TYPE_NONE, + BNXT_TC_TUNNEL_NODE_TYPE_ENCAP, + BNXT_TC_TUNNEL_NODE_TYPE_DECAP +}; + +/* + * Tunnel encap/decap hash table + * This table is used to maintain a list of flows that use + * the same tunnel encap/decap params (ip_daddrs, vni, udp_dport) + * and the FW returned handle. + * A separate table is maintained for encap and decap + */ +struct bnxt_tc_tunnel_node { + struct ip_tunnel_key key; + struct rhash_head node; + enum bnxt_tc_tunnel_node_type tunnel_node_type; + + /* tunnel l2 info */ + struct bnxt_tc_l2_key l2_info; + +#define INVALID_TUNNEL_HANDLE cpu_to_le32(0xffffffff) + /* tunnel handle returned by FW */ + __le32 tunnel_handle; + + u32 refcount; + /* For the shared encap list maintained in neigh node */ + struct list_head encap_list_node; + /* A list of flows that share the encap tunnel node */ + struct list_head common_encap_flows; + struct bnxt_tc_neigh_node *neigh_node; + struct rcu_head rcu; +}; + +/* + * L2 hash table + * The same data-struct is used for L2-flow table and L2-tunnel table. + * The L2 part of a flow or tunnel is stored in a hash table. + * A flow that shares the same L2 key/mask with an + * already existing flow/tunnel must refer to it's flow handle or + * decap_filter_id respectively. + */ +struct bnxt_tc_l2_node { + /* hash key: first 16b of key */ +#define BNXT_TC_L2_KEY_LEN 18 + struct bnxt_tc_l2_key key; + struct rhash_head node; + + /* a linked list of flows that share the same l2 key */ + struct list_head common_l2_flows; + + /* number of flows/tunnels sharing the l2 key */ + u16 refcount; + + struct rcu_head rcu; +}; + +/* Track if the TC offload API is invoked on an ingress or egress device. */ +enum { + BNXT_TC_DEV_INGRESS = 1, + BNXT_TC_DEV_EGRESS = 2 +}; + +/* Use TC provided cookie along with the src_fid of the device on which + * the offload request is received . This is done to handle shared block + * filters for 2 VFs of the same PF, since they would come with the same + * cookie + */ +struct bnxt_tc_flow_node_key { + /* hash key: provided by TC */ + unsigned long cookie; + u32 src_fid; +}; + +struct bnxt_tc_flow_node { + struct bnxt_tc_flow_node_key key; + struct rhash_head node; + + struct bnxt_tc_flow flow; + + __le64 ext_flow_handle; + __le16 flow_handle; + __le32 flow_id; + int tc_dev_dir; + + /* L2 node in l2 hashtable that shares flow's l2 key */ + struct bnxt_tc_l2_node *l2_node; + /* for the shared_flows list maintained in l2_node */ + struct list_head l2_list_node; + + /* tunnel encap related */ + struct bnxt_tc_tunnel_node *encap_node; + + /* tunnel decap related */ + struct bnxt_tc_tunnel_node *decap_node; + /* L2 node in tunnel-l2 hashtable that shares flow's tunnel l2 key */ + struct bnxt_tc_l2_node *decap_l2_node; + /* for the shared_flows list maintained in tunnel decap l2_node */ + struct list_head decap_l2_list_node; + /* For the shared flows list maintained in tunnel encap node */ + struct list_head encap_flow_list_node; + /* For the shared flows list which re-add failed when get neigh event */ + struct list_head failed_add_flow_node; + + struct rcu_head rcu; +}; + +struct bnxt_tc_neigh_key { + struct net_device *dev; + union { + struct in_addr v4; + struct in6_addr v6; + } dst_ip; + int family; +}; + +struct bnxt_tc_neigh_node { + struct bnxt_tc_neigh_key key; + struct rhash_head node; + /* An encap tunnel list which use the same neigh node */ + struct list_head common_encap_list; + u32 refcount; + u8 dmac[ETH_ALEN]; + struct rcu_head rcu; +}; + +struct bnxt_tf_flow_node { + struct bnxt_tc_flow_node_key key; + struct rhash_head node; + u32 flow_id; +#ifdef HAVE_TC_CB_EGDEV + int tc_dev_dir; +#endif + u16 ulp_src_fid; + bool dscp_remap; + + /* The below fields are used if the there is a tunnel encap + * action associated with the flow. These members are used to + * manage neighbour update events on the tunnel neighbour. + */ + struct bnxt_tc_tunnel_node *encap_node; + /* For the shared flows list maintained in tunnel encap node */ + struct list_head encap_flow_list_node; + /* For the shared flows list when re-add fails during neigh event */ + struct list_head failed_add_flow_node; + void *mparms; + + struct rcu_head rcu; +}; + +#ifdef HAVE_TC_CB_EGDEV +int bnxt_tc_setup_flower(struct bnxt *bp, u16 src_fid, + struct flow_cls_offload *cls_flower, + int tc_dev_dir); +#else +int bnxt_tc_setup_flower(struct bnxt *bp, u16 src_fid, + struct flow_cls_offload *cls_flower); +#endif + +int bnxt_init_tc(struct bnxt *bp); +void bnxt_shutdown_tc(struct bnxt *bp); +void bnxt_tc_flow_stats_work(struct bnxt *bp); +void bnxt_tc_flush_flows(struct bnxt *bp); +#if defined(HAVE_TC_MATCHALL_FLOW_RULE) && defined(HAVE_FLOW_ACTION_POLICE) +int bnxt_tc_setup_matchall(struct bnxt *bp, u16 src_fid, + struct tc_cls_matchall_offload *cls_matchall); +#endif + +void bnxt_tc_update_neigh_work(struct work_struct *work); +u16 bnxt_flow_get_dst_fid(struct bnxt *pf_bp, struct net_device *dev); +int bnxt_tc_resolve_ipv4_tunnel_hdrs(struct bnxt *bp, + struct bnxt_tc_flow_node *flow_node, + struct ip_tunnel_key *tun_key, + struct bnxt_tc_l2_key *l2_info, + struct bnxt_tc_neigh_key *neigh_key); +int bnxt_tc_resolve_ipv6_tunnel_hdrs(struct bnxt *bp, + struct bnxt_tc_flow_node *flow_node, + struct ip_tunnel_key *tun_key, + struct bnxt_tc_l2_key *l2_info, + struct bnxt_tc_neigh_key *neigh_key); + +static inline bool bnxt_tc_flower_enabled(struct bnxt *bp) +{ + return bp->tc_info && bp->tc_info->enabled; +} + +static inline void bnxt_disable_tc_flower(struct bnxt *bp) +{ + mutex_lock(&bp->tc_info->lock); + bp->tc_info->enabled = false; + mutex_unlock(&bp->tc_info->lock); +} + +static inline void bnxt_enable_tc_flower(struct bnxt *bp) +{ + mutex_lock(&bp->tc_info->lock); + bp->tc_info->enabled = true; + mutex_unlock(&bp->tc_info->lock); +} + +#else /* CONFIG_BNXT_FLOWER_OFFLOAD */ + +static inline int bnxt_init_tc(struct bnxt *bp) +{ + return 0; +} + +static inline void bnxt_shutdown_tc(struct bnxt *bp) +{ +} + +static inline void bnxt_tc_flow_stats_work(struct bnxt *bp) +{ +} + +static inline void bnxt_tc_flush_flows(struct bnxt *bp) +{ +} + +static inline bool bnxt_tc_flower_enabled(struct bnxt *bp) +{ + return false; +} + +static inline void bnxt_disable_tc_flower(struct bnxt *bp) +{ +} + +static inline void bnxt_enable_tc_flower(struct bnxt *bp) +{ +} + +#endif /* CONFIG_BNXT_FLOWER_OFFLOAD */ + +#endif /* BNXT_TC_H */ diff --git a/drivers/thirdparty/release-drivers/bnxt/bnxt_tc_compat.h b/drivers/thirdparty/release-drivers/bnxt/bnxt_tc_compat.h new file mode 100644 index 000000000000..1cdcbd743487 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/bnxt_tc_compat.h @@ -0,0 +1,310 @@ +/* Broadcom NetXtreme-C/E network driver. + * + * Copyright (c) 2020-2023 Broadcom Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ + +#include "bnxt.h" + +#ifdef CONFIG_BNXT_FLOWER_OFFLOAD + +#ifdef HAVE_FLOW_OFFLOAD_H +#ifdef HAVE_FLOW_STATS_UPDATE +#if !defined(HAVE_FLOW_STATS_DROPS) && defined(HAVE_FLOW_ACTION_BASIC_HW_STATS_CHECK) +#define flow_stats_update(flow_stats, bytes, pkts, drops, last_used, used_hw_stats) \ + flow_stats_update(flow_stats, bytes, pkts, last_used, used_hw_stats) +#elif !defined(HAVE_FLOW_ACTION_BASIC_HW_STATS_CHECK) +#define flow_stats_update(flow_stats, bytes, pkts, drops, last_used, used_hw_stats) \ + flow_stats_update(flow_stats, bytes, pkts, last_used) +#endif +#endif + +#ifndef HAVE_FLOW_ACTION_BASIC_HW_STATS_CHECK +static inline bool +flow_action_basic_hw_stats_check(const struct flow_action *action, + struct netlink_ext_ack *extack) +{ + return true; +} +#endif /* HAVE_FLOW_ACTION_BASIC_HW_STATS_CHECK */ + +#ifndef HAVE_FLOW_INDR_BLOCK_CLEANUP +#ifdef HAVE_FLOW_INDR_BLOCK_CB_QDISC +#define bnxt_tc_setup_indr_block(netdev, sch, bp, f, data, cleanup) \ + bnxt_tc_setup_indr_block(netdev, bp, f) +#else +#define bnxt_tc_setup_indr_block(netdev, bp, f, data, cleanup) \ + bnxt_tc_setup_indr_block(netdev, bp, f) +#endif + +#ifdef HAVE_FLOW_INDR_BLOCK_CB_QDISC +#define flow_indr_block_cb_alloc(cb, cb_ident, cb_priv, bnxt_tc_setup_indr_rel, \ + f, netdev, sch, data, bp, cleanup) \ + flow_block_cb_alloc(cb, cb_ident, cb_priv, bnxt_tc_setup_indr_rel) +#else +#define flow_indr_block_cb_alloc(cb, cb_ident, cb_priv, bnxt_tc_setup_indr_rel, \ + f, netdev, data, bp, cleanup) \ + flow_block_cb_alloc(cb, cb_ident, cb_priv, bnxt_tc_setup_indr_rel) +#endif + +#define flow_indr_block_cb_remove(block_cb, f) \ + flow_block_cb_remove(block_cb, f) + +#ifdef HAVE_FLOW_INDR_BLOCK_CB_QDISC +#define bnxt_tc_setup_indr_cb(netdev, sch, cb_priv, type, type_data, data, cleanup) \ + bnxt_tc_setup_indr_cb(netdev, cb_priv, type, type_data) +#else +#define bnxt_tc_setup_indr_cb(netdev, cb_priv, type, type_data, data, cleanup) \ + bnxt_tc_setup_indr_cb(netdev, cb_priv, type, type_data) +#endif +#endif /* HAVE_FLOW_INDR_BLOCK_CLEANUP */ +#endif /* HAVE_FLOW_OFFLOAD_H */ + +#if defined(CONFIG_BNXT_FLOWER_OFFLOAD) && defined(HAVE_FLOW_INDR_BLOCK_CB) +#if !defined(HAVE_FLOW_INDR_DEV_RGTR) +int bnxt_tc_indr_block_event(struct notifier_block *nb, unsigned long event, + void *ptr); + +static inline int flow_indr_dev_register(flow_indr_block_bind_cb_t *cb, + void *cb_priv) +{ + struct bnxt *bp = cb_priv; + + bp->tc_netdev_nb.notifier_call = bnxt_tc_indr_block_event; + return register_netdevice_notifier(&bp->tc_netdev_nb); +} + +static inline void flow_indr_dev_unregister(flow_indr_block_bind_cb_t *cb, + void *cb_priv, + void (*release)(void *cb_priv)) +{ + struct bnxt *bp = cb_priv; + + unregister_netdevice_notifier(&bp->tc_netdev_nb); +} +#endif /* !HAVE_FLOW_INDR_DEV_RGTR */ + +#ifdef HAVE_OLD_FLOW_INDR_DEV_UNRGTR +#define flow_indr_dev_unregister(cb, bp, rel) \ + flow_indr_dev_unregister(cb, bp, bnxt_tc_setup_indr_block_cb) +#endif /* HAVE_OLD_FLOW_INDR_BLOCK_CB_UNRGTR */ +#endif /* CONFIG_BNXT_FLOWER_OFFLOAD && HAVE_FLOW_INDR_BLOCK_CB */ + +#ifndef HAVE_FLOW_OFFLOAD_H + +struct flow_match_basic { + struct flow_dissector_key_basic *key, *mask; +}; + +struct flow_match_control { + struct flow_dissector_key_control *key, *mask; +}; + +struct flow_match_eth_addrs { + struct flow_dissector_key_eth_addrs *key, *mask; +}; + +struct flow_match_vlan { + struct flow_dissector_key_vlan *key, *mask; +}; + +struct flow_match_ipv4_addrs { + struct flow_dissector_key_ipv4_addrs *key, *mask; +}; + +struct flow_match_ipv6_addrs { + struct flow_dissector_key_ipv6_addrs *key, *mask; +}; + +struct flow_match_ip { + struct flow_dissector_key_ip *key, *mask; +}; + +struct flow_match_ports { + struct flow_dissector_key_ports *key, *mask; +}; + +struct flow_match_icmp { + struct flow_dissector_key_icmp *key, *mask; +}; + +struct flow_match_tcp { + struct flow_dissector_key_tcp *key, *mask; +}; + +struct flow_match_enc_keyid { + struct flow_dissector_key_keyid *key, *mask; +}; + +struct flow_match { + struct flow_dissector *dissector; + void *mask; + void *key; +}; + +struct flow_rule { + struct flow_match match; +}; + +#define FLOW_DISSECTOR_MATCH(__rule, __type, __out) \ + const struct flow_match *__m = &(__rule)->match; \ + struct flow_dissector *__d = (__m)->dissector; \ + \ + (__out)->key = skb_flow_dissector_target(__d, __type, (__m)->key); \ + (__out)->mask = skb_flow_dissector_target(__d, __type, (__m)->mask) \ + +static inline bool flow_rule_match_key(const struct flow_rule *rule, + enum flow_dissector_key_id key) +{ + return dissector_uses_key(rule->match.dissector, key); +} + +static inline void flow_rule_match_basic(const struct flow_rule *rule, + struct flow_match_basic *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_BASIC, out); +} + +static inline void flow_rule_match_control(const struct flow_rule *rule, + struct flow_match_control *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_CONTROL, out); +} + +static inline void flow_rule_match_eth_addrs(const struct flow_rule *rule, + struct flow_match_eth_addrs *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS, out); +} + +static inline void flow_rule_match_vlan(const struct flow_rule *rule, + struct flow_match_vlan *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_VLAN, out); +} + +static inline void flow_rule_match_ipv4_addrs(const struct flow_rule *rule, + struct flow_match_ipv4_addrs *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS, out); +} + +static inline void flow_rule_match_ipv6_addrs(const struct flow_rule *rule, + struct flow_match_ipv6_addrs *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS, out); +} + +static inline void flow_rule_match_ip(const struct flow_rule *rule, + struct flow_match_ip *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_IP, out); +} + +static inline void flow_rule_match_tcp(const struct flow_rule *rule, + struct flow_match_tcp *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_TCP, out); +} + +static inline void flow_rule_match_ports(const struct flow_rule *rule, + struct flow_match_ports *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_PORTS, out); +} + +static inline void flow_rule_match_icmp(const struct flow_rule *rule, + struct flow_match_icmp *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ICMP, out); +} + +static inline void flow_rule_match_enc_control(const struct flow_rule *rule, + struct flow_match_control *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL, out); +} + +static inline void +flow_rule_match_enc_ipv4_addrs(const struct flow_rule *rule, + struct flow_match_ipv4_addrs *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, out); +} + +static inline void flow_rule_match_enc_ipv6_addrs(const struct flow_rule *rule, + struct flow_match_ipv6_addrs *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, out); +} + +static inline void flow_rule_match_enc_ip(const struct flow_rule *rule, + struct flow_match_ip *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_IP, out); +} + +static inline void flow_rule_match_enc_ports(const struct flow_rule *rule, + struct flow_match_ports *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_PORTS, out); +} + +static inline void flow_rule_match_enc_keyid(const struct flow_rule *rule, + struct flow_match_enc_keyid *out) +{ + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_KEYID, out); +} + +#ifdef flow_cls_offload_flow_rule +#undef flow_cls_offload_flow_rule +#endif +#define flow_cls_offload_flow_rule(cmd) \ + (&(struct flow_rule) { \ + .match = { \ + .dissector = (cmd)->dissector, \ + .mask = (cmd)->mask, \ + .key = (cmd)->key, \ + } \ + }) + +enum flow_action_id { + FLOW_ACTION_ACCEPT = 0, + FLOW_ACTION_DROP, + FLOW_ACTION_TRAP, + FLOW_ACTION_GOTO, + FLOW_ACTION_REDIRECT, + FLOW_ACTION_MIRRED, + FLOW_ACTION_REDIRECT_INGRESS, + FLOW_ACTION_MIRRED_INGRESS, + FLOW_ACTION_VLAN_PUSH, + FLOW_ACTION_VLAN_POP, + FLOW_ACTION_VLAN_MANGLE, + FLOW_ACTION_TUNNEL_ENCAP, + FLOW_ACTION_TUNNEL_DECAP, + FLOW_ACTION_MANGLE, + FLOW_ACTION_ADD, + FLOW_ACTION_CSUM, + FLOW_ACTION_MARK, + FLOW_ACTION_PTYPE, + FLOW_ACTION_PRIORITY, + FLOW_ACTION_WAKE, + FLOW_ACTION_QUEUE, + FLOW_ACTION_SAMPLE, + FLOW_ACTION_POLICE, + FLOW_ACTION_CT, + FLOW_ACTION_CT_METADATA, + FLOW_ACTION_MPLS_PUSH, + FLOW_ACTION_MPLS_POP, + FLOW_ACTION_MPLS_MANGLE, + FLOW_ACTION_GATE, + FLOW_ACTION_PPPOE_PUSH, + FLOW_ACTION_INVALID = NUM_FLOW_ACTIONS +}; + +#endif /* !HAVE_FLOW_OFFLOAD_H */ + +#endif /* CONFIG_BNXT_FLOWER_OFFLOAD */ diff --git a/drivers/thirdparty/release-drivers/bnxt/bnxt_tfc.c b/drivers/thirdparty/release-drivers/bnxt/bnxt_tfc.c new file mode 100644 index 000000000000..8f176e697628 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/bnxt_tfc.c @@ -0,0 +1,267 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* Copyright (c) 2022-2023 Broadcom Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "bnxt_compat.h" +#include "bnxt_hsi.h" +#include "bnxt.h" +#include "bnxt_hwrm.h" +#include "bnxt_mpc.h" +#include "bnxt_tfc.h" + +#define BNXT_MPC_RX_US_SLEEP 10000 +#define BNXT_MPC_RX_RETRY 10 +#define BNXT_MPC_TIMEOUT (BNXT_MPC_RX_US_SLEEP * BNXT_MPC_RX_RETRY) +#define BNXT_TFC_MPC_TX_RETRIES 150 +#define BNXT_TFC_MPC_TX_RETRY_DELAY_MIN_US 500 +#define BNXT_TFC_MPC_TX_RETRY_DELAY_MAX_US 1000 + +#define BNXT_TFC_DISP_BUF_SIZE 128 + +#define BNXT_TFC_PR_W_1BYTES 1 +#define BNXT_TFC_PR_W_2BYTES 2 +#define BNXT_TFC_PR_W_4BYTES 4 +/* + * bnxt_tfc_buf_dump: Pretty-prints a buffer using the following options + * + * Parameters: + * hdr - A header that is printed as-is + * msg - This is a pointer to the uint8_t buffer to be dumped + * prtwidth - The width of the items to be printed in bytes, + * allowed options 1, 2, 4 + * Defaults to 1 if either: + * 1) any other value + * 2) if buffer length is not a multiple of width + * linewidth - The length of the lines printed (in items) + */ +void bnxt_tfc_buf_dump(struct bnxt *bp, char *hdr, + uint8_t *msg, int msglen, + int prtwidth, int linewidth) +{ + char msg_line[BNXT_TFC_DISP_BUF_SIZE]; + int msg_i = 0, i; + uint16_t *sw_msg = (uint16_t *)msg; + uint32_t *lw_msg = (uint32_t *)msg; + + if (hdr) + netdev_dbg(bp->dev, "%s", hdr); + + if (msglen % prtwidth) { + netdev_dbg(bp->dev, "msglen[%u] not aligned on width[%u]\n", + msglen, prtwidth); + prtwidth = 1; + linewidth = 16; + } + + for (i = 0; i < msglen / prtwidth; i++) { + if ((i % linewidth == 0) && i) + netdev_dbg(bp->dev, "%s\n", msg_line); + if (i % linewidth == 0) { + msg_i = 0; + msg_i += snprintf(&msg_line[msg_i], (sizeof(msg_line) - msg_i), + "%04x: ", i * prtwidth); + } + switch (prtwidth) { + case BNXT_TFC_PR_W_2BYTES: + msg_i += snprintf(&msg_line[msg_i], (sizeof(msg_line) - msg_i), + "%04x ", sw_msg[i]); + break; + + case BNXT_TFC_PR_W_4BYTES: + msg_i += snprintf(&msg_line[msg_i], (sizeof(msg_line) - msg_i), + "%08x ", lw_msg[i]); + break; + + case BNXT_TFC_PR_W_1BYTES: + default: + msg_i += snprintf(&msg_line[msg_i], (sizeof(msg_line) - msg_i), + "%02x ", msg[i]); + break; + } + } + netdev_dbg(bp->dev, "%s\n", msg_line); +} + +void bnxt_free_tfc_mpc_info(struct bnxt *bp) +{ + struct bnxt_tfc_mpc_info *tfc_info; + + if (!bp) + return; + + tfc_info = bp->tfc_info; + + if (tfc_info && tfc_info->mpc_cache) { + kmem_cache_destroy(tfc_info->mpc_cache); + tfc_info->mpc_cache = NULL; + } + + kfree(bp->tfc_info); + bp->tfc_info = NULL; +} + +int bnxt_alloc_tfc_mpc_info(struct bnxt *bp) +{ + struct bnxt_tfc_mpc_info *tfc_info = + (struct bnxt_tfc_mpc_info *)(bp->tfc_info); + + if (!tfc_info) { + tfc_info = kzalloc(sizeof(*tfc_info), GFP_KERNEL); + if (!tfc_info) + return -ENOMEM; + + bp->tfc_info = (void *)tfc_info; + } + tfc_info->mpc_cache = kmem_cache_create("bnxt_tfc", + sizeof(struct bnxt_tfc_cmd_ctx), + 0, 0, NULL); + + if (!tfc_info->mpc_cache) { + bnxt_free_tfc_mpc_info(bp); + return -ENOMEM; + } + + return 0; +} + +int bnxt_mpc_send(struct bnxt *bp, + struct bnxt_mpc_mbuf *in_msg, + struct bnxt_mpc_mbuf *out_msg, + uint32_t *opaque) +{ + struct bnxt_tfc_mpc_info *tfc = (struct bnxt_tfc_mpc_info *)bp->tfc_info; + struct bnxt_mpc_info *mpc = bp->mpc_info; + struct bnxt_tfc_cmd_ctx *ctx = NULL; + unsigned long tmo_left, handle = 0; + struct bnxt_tx_ring_info *txr; + uint tmo = BNXT_MPC_TIMEOUT; + int retry = 0; + int rc = 0; + + if (!mpc || !tfc) { + netdev_dbg(bp->dev, "%s: mpc[%p], tfc[%p]\n", __func__, mpc, tfc); + return -1; + } + + if (out_msg->cmp_type != MPC_CMP_TYPE_MID_PATH_SHORT && + out_msg->cmp_type != MPC_CMP_TYPE_MID_PATH_LONG) + return -1; + + do { + atomic_inc(&tfc->pending); + /* Make sure bnxt_close_nic() sees pending before we check the + * BNXT_STATE_OPEN flag. + */ + smp_mb__after_atomic(); + if (test_bit(BNXT_STATE_OPEN, &bp->state)) + break; + + atomic_dec(&tfc->pending); + usleep_range(BNXT_TFC_MPC_TX_RETRY_DELAY_MIN_US, + BNXT_TFC_MPC_TX_RETRY_DELAY_MAX_US); + retry++; + } while (retry < BNXT_TFC_MPC_TX_RETRIES); + + if (retry >= BNXT_TFC_MPC_TX_RETRIES) { + netdev_err(bp->dev, "%s: TF MPC send failed after max retries\n", + __func__); + return -EAGAIN; + } + + if (in_msg->chnl_id == RING_ALLOC_REQ_MPC_CHNLS_TYPE_TE_CFA) + txr = &mpc->mpc_rings[BNXT_MPC_TE_CFA_TYPE][0]; + else + txr = &mpc->mpc_rings[BNXT_MPC_RE_CFA_TYPE][0]; + + if (!txr) { + netdev_err(bp->dev, "%s: No Tx rings\n", __func__); + rc = -EINVAL; + goto xmit_done; + } + + if (tmo) { + ctx = kmem_cache_alloc(tfc->mpc_cache, GFP_KERNEL); + if (!ctx) { + rc = -ENOMEM; + goto xmit_done; + } + init_completion(&ctx->cmp); + handle = (unsigned long)ctx; + ctx->tfc_cmp.opaque = *opaque; + might_sleep(); + } + + spin_lock(&txr->tx_lock); + rc = bnxt_start_xmit_mpc(bp, txr, in_msg->msg_data, + in_msg->msg_size, handle); + spin_unlock(&txr->tx_lock); + if (rc || !tmo) + goto xmit_done; + + tmo_left = wait_for_completion_timeout(&ctx->cmp, msecs_to_jiffies(tmo)); + if (!tmo_left) { + ctx->tfc_cmp.opaque = BNXT_INV_TMPC_OPAQUE; + netdev_warn(bp->dev, "TFC MP cmd %08x timed out\n", + *((u32 *)in_msg->msg_data)); + rc = -ETIMEDOUT; + goto xmit_done; + } + if (TFC_CMPL_STATUS(&ctx->tfc_cmp) == TFC_CMPL_STATUS_OK) { + /* Copy response/completion back into out_msg */ + memcpy(out_msg->msg_data, &ctx->tfc_cmp, sizeof(ctx->tfc_cmp)); + rc = 0; + } else { + netdev_err(bp->dev, "MPC status code [%lu]\n", + TFC_CMPL_STATUS(&ctx->tfc_cmp) >> TFC_CMPL_STATUS_SFT); + rc = -EIO; + } + +xmit_done: + if (ctx) + kmem_cache_free(tfc->mpc_cache, ctx); + atomic_dec(&tfc->pending); + return rc; +} + +void bnxt_tfc_mpc_cmp(struct bnxt *bp, u32 client, unsigned long handle, + struct bnxt_cmpl_entry cmpl[], u32 entries) +{ + struct bnxt_tfc_cmd_ctx *ctx; + struct tfc_cmpl *cmp; + u32 len; + + cmp = cmpl[0].cmpl; + if (!handle || entries < 1 || entries > 2) { + if (entries < 1 || entries > 2) { + netdev_warn(bp->dev, "Invalid entries %d with handle %lx cmpl %08x in %s()\n", + entries, handle, *(u32 *)cmp, __func__); + } + return; + } + ctx = (void *)handle; + if (entries > 1) { + memcpy(&ctx->tfc_cmp, cmpl[0].cmpl, cmpl[0].len); + memcpy(&ctx->tfc_cmp.l_cmpl[0], cmpl[1].cmpl, cmpl[1].len); + } else { + len = min_t(u32, cmpl[0].len, sizeof(ctx->tfc_cmp)); + memcpy(&ctx->tfc_cmp, cmpl[0].cmpl, len); + } + complete(&ctx->cmp); +} diff --git a/drivers/thirdparty/release-drivers/bnxt/bnxt_tfc.h b/drivers/thirdparty/release-drivers/bnxt/bnxt_tfc.h new file mode 100644 index 000000000000..9478183d0985 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/bnxt_tfc.h @@ -0,0 +1,150 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * + * Copyright (c) 2022-2023 Broadcom Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ + +#ifndef BNXT_TFC_H +#define BNXT_TFC_H + +#include +#include "bnxt_mpc.h" + +struct bnxt_tfc_mpc_info { + struct kmem_cache *mpc_cache; + atomic_t pending; +}; + +struct tfc_cmpl { + __le16 client_status_type; + #define TFC_CMPL_TYPE_MASK 0x3fUL + #define TFC_CMPL_TYPE_SFT 0 + #define TFC_CMPL_TYPE_MID_PATH_SHORT 0x1eUL + #define TFC_CMPL_TYPE_MID_PATH_LONG 0x1fUL + + #define TFC_CMPL_STATUS_MASK 0xf00UL + #define TFC_CMPL_STATUS_SFT 8 + #define TFC_CMPL_STATUS_OK 0x0UL + #define TFC_CMPL_STATUS_UNSPRT_ERR 0x1UL + #define TFC_CMPL_STATUS_FMT_ERR 0x2UL + #define TFC_CMPL_STATUS_SCOPE_ERR 0x3UL + #define TFC_CMPL_STATUS_ADDR_ERR 0x4UL + #define TFC_CMPL_STATUS_CACHE_ERR 0x5UL + + #define TFC_CMPL_MP_CLIENT_MASK 0xf000UL + #define TFC_CMPL_MP_CLIENT_SFT 12 + #define TFC_CMPL_MP_CLIENT_TE_CFA 0x2UL + #define TFC_CMPL_MP_CLIENT_RE_CFA 0x3UL + + __le16 opc_dmalen; + + #define TFC_CMPL_OPC_MASK 0xffUL + #define TFC_CMPL_OPC_SFT 0 + #define TFC_CMPL_OPC_TBL_READ 0 + #define TFC_CMPL_OPC_TBL_WRITE 1 + #define TFC_CMPL_OPC_TBL_READ_CLR 2 + #define TFC_CMPL_OPC_TBL_INVALIDATE 5 + #define TFC_CMPL_OPC_TBL_EVENT_COLLECTION 6 + #define TFC_CMPL_OPC_TBL_EM_SEARCH 8 + #define TFC_CMPL_OPC_TBL_EM_INSERT 9 + #define TFC_CMPL_OPC_TBL_EM_DELETE 10 + #define TFC_CMPL_OPC_TBL_EM_CHAIN 11 + + u32 opaque; + + __le32 v_hmsb_tbl_type_scope; + #define TFC_CMPL_V 0x1UL + #define TFC_CMPL_V_MASK 0x1UL + #define TFC_CMPL_V_SFT 0 + #define TFC_CMPL_HASH_MSB_MASK 0xfffUL + #define TFC_CMPL_HASH_MSB_SFT 12 + #define TFC_CMPL_TBL_TYPE_MASK 0xf000UL + #define TFC_CMPL_TBL_TYPE_SFT 12 + #define TFC_CMPL_TBL_TYPE_ACTION 0 + #define TFC_CMPL_TBL_TYPE_EM 1 + #define TFC_CMPL_TBL_SCOPE_MASK 0x1f000000UL + #define TFC_CMPL_TBL_SCOPE_SFT 24 + + __le32 v_tbl_index; + #define TFC_CMPL_TBL_IDX_MASK 0x3ffffffUL + #define TFC_CMPL_TBL_IDX_SFT 0 + + __le32 l_cmpl[4]; +}; + +/* + * Use a combination of opcode, table_type, table_scope and table_index to + * generate a unique opaque field, which can be used to verify the completion + * later. + * + * cccc_ssss_siii_iiii_iiii_iiii_iiii_iiii + * opaque[31:28] (c) opcode + * opaque[27:23] (s) tbl scope + * opaque[22:00] (i) tbl index + * + * 0x1080000a + * 0x01000001 + * 0x1000000a + */ +#define TFC_CMPL_OPC_NIB_MASK 0xfUL +#define TFC_CMPL_OPQ_OPC_SFT 28 +#define TFC_CMPL_TBL_23B_IDX_MASK 0x7fffffUL +#define TFC_CMPL_TBL_SCOPE_OPQ_SFT 1 +#define TFC_CMD_TBL_SCOPE_OPQ_SFT 23 + +/* Used to generate opaque field for command send */ +#define BNXT_TFC_CMD_OPQ(opc, ts, ti) \ + ((((opc) & TFC_CMPL_OPC_NIB_MASK) << TFC_CMPL_OPQ_OPC_SFT) | \ + ((ts) << TFC_CMD_TBL_SCOPE_OPQ_SFT) | \ + ((ti) & TFC_CMPL_TBL_23B_IDX_MASK)) + +/* Used to generate opaque field for completion verification */ +#define BNXT_TFC_CMPL_OPAQUE(tfc_cmpl) \ + ((((u32)le16_to_cpu((tfc_cmpl)->opc_dmalen) & TFC_CMPL_OPC_NIB_MASK) << \ + TFC_CMPL_OPQ_OPC_SFT) | \ + ((le32_to_cpu((tfc_cmpl)->v_hmsb_tbl_type_scope) & TFC_CMPL_TBL_SCOPE_MASK) >> \ + TFC_CMPL_TBL_SCOPE_OPQ_SFT) |\ + (le32_to_cpu((tfc_cmpl)->v_tbl_index) & TFC_CMPL_TBL_23B_IDX_MASK)) + +#define BNXT_INV_TMPC_OPAQUE 0xffffffff + +#define TFC_CMPL_STATUS(tfc_cmpl) \ + (le16_to_cpu((tfc_cmpl)->client_status_type) & \ + TFC_CMPL_STATUS_MASK) + +struct bnxt_tfc_cmd_ctx { + struct completion cmp; + struct tfc_cmpl tfc_cmp; +}; + +struct bnxt_mpc_mbuf { + uint32_t chnl_id; + uint8_t cmp_type; + uint8_t *msg_data; + /* MPC msg size in bytes, must be multiple of 16Bytes */ + uint16_t msg_size; +}; + +static inline bool bnxt_tfc_busy(struct bnxt *bp) +{ + struct bnxt_tfc_mpc_info *tfc_info = bp->tfc_info; + + return tfc_info && atomic_read(&tfc_info->pending) > 0; +} + +void bnxt_tfc_buf_dump(struct bnxt *bp, char *hdr, + uint8_t *msg, int msglen, + int prtwidth, int linewidth); +void bnxt_free_tfc_mpc_info(struct bnxt *bp); +int bnxt_alloc_tfc_mpc_info(struct bnxt *bp); + +int bnxt_mpc_send(struct bnxt *bp, + struct bnxt_mpc_mbuf *in_msg, + struct bnxt_mpc_mbuf *out_msg, + uint32_t *opaque); +void bnxt_tfc_mpc_cmp(struct bnxt *bp, u32 client, unsigned long handle, + struct bnxt_cmpl_entry cmpl[], u32 entries); +#endif diff --git a/drivers/thirdparty/release-drivers/bnxt/bnxt_udcc.c b/drivers/thirdparty/release-drivers/bnxt/bnxt_udcc.c new file mode 100644 index 000000000000..e29d59e690be --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/bnxt_udcc.c @@ -0,0 +1,1131 @@ +/* Broadcom NetXtreme-C/E network driver. + * + * Copyright (c) 2023 Broadcom Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "bnxt_compat.h" +#include "bnxt_hsi.h" +#include "bnxt.h" +#include "bnxt_hwrm.h" +#include "ulp_generic_flow_offload.h" +#include "ulp_udcc.h" +#include "bnxt_tf_ulp.h" +#include "bnxt_udcc.h" +#include "bnxt_debugfs.h" +#include "bnxt_nic_flow.h" + +#if defined(CONFIG_BNXT_FLOWER_OFFLOAD) + +static int bnxt_tf_ulp_flow_delete(struct bnxt *bp, struct bnxt_udcc_session_entry *entry); + +static int bnxt_hwrm_udcc_qcfg(struct bnxt *bp) +{ + struct hwrm_udcc_qcfg_output *resp; + struct hwrm_udcc_qcfg_input *req; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_UDCC_QCFG); + if (rc) + return rc; + + req->target_id = cpu_to_le16(0xffff); + + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); + if (rc) + goto udcc_qcfg_exit; + + bp->udcc_info->mode = resp->udcc_mode; + netdev_info(bp->dev, "UDCC mode: %s!!!\n", + bp->udcc_info->mode ? "Enabled" : "Disabled"); + +udcc_qcfg_exit: + hwrm_req_drop(bp, req); + return rc; +} + +int bnxt_alloc_udcc_info(struct bnxt *bp) +{ + struct bnxt_udcc_info *udcc = bp->udcc_info; + struct hwrm_udcc_qcaps_output *resp; + struct hwrm_func_qcaps_input *req; + int rc; + + if (BNXT_VF(bp) || !BNXT_UDCC_CAP(bp)) + return 0; + + if (udcc) + return 0; + + rc = hwrm_req_init(bp, req, HWRM_UDCC_QCAPS); + if (rc) + return rc; + + req->fid = cpu_to_le16(0xffff); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); + if (rc) + goto exit; + + udcc = kzalloc(sizeof(*udcc), GFP_KERNEL); + if (!udcc) + goto exit; + + udcc->max_sessions = le16_to_cpu(resp->max_sessions); + udcc->max_comp_cfg_xfer = le16_to_cpu(resp->max_comp_cfg_xfer); + udcc->max_comp_data_xfer = le16_to_cpu(resp->max_comp_data_xfer); + udcc->session_type = resp->session_type; + mutex_init(&udcc->session_db_lock); + bp->udcc_info = udcc; + netdev_info(bp->dev, "UDCC capability: %s max %d sessions\n", + udcc->session_type ? "per-QP" : "per-DestIP", + udcc->max_sessions); + + rc = bnxt_hwrm_udcc_qcfg(bp); + if (rc) { + kfree(udcc); + goto exit; + } + + netdev_dbg(bp->dev, "%s(): udcc_info initialized!\n", __func__); +exit: + hwrm_req_drop(bp, req); + return rc; +} + +int bnxt_hwrm_udcc_session_query(struct bnxt *bp, u32 session_id, + struct hwrm_udcc_session_query_output *resp_out) +{ + struct hwrm_udcc_session_query_input *req; + struct hwrm_udcc_session_query_output *resp; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_UDCC_SESSION_QUERY); + if (rc) + return rc; + + req->session_id = cpu_to_le16(session_id); + + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); + if (rc) + goto udcc_query_exit; + + memcpy(resp_out, resp, sizeof(struct hwrm_udcc_session_query_output)); + +udcc_query_exit: + hwrm_req_drop(bp, req); + return rc; +} + +static int bnxt_hwrm_udcc_session_qcfg(struct bnxt *bp, struct bnxt_udcc_session_entry *entry) +{ + struct hwrm_udcc_session_qcfg_output *resp; + struct hwrm_udcc_session_qcfg_input *req; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_UDCC_SESSION_QCFG); + if (rc) + return rc; + + req->session_id = cpu_to_le16(entry->session_id); + + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); + if (rc) + goto udcc_qcfg_exit; + + ether_addr_copy(entry->dest_mac, resp->dest_mac); + ether_addr_copy(entry->src_mac, resp->src_mac); + memcpy(entry->dst_ip.s6_addr32, resp->dest_ip, sizeof(resp->dest_ip)); + entry->dest_qp_num = le32_to_cpu(resp->dest_qp_num); + entry->src_qp_num = le32_to_cpu(resp->src_qp_num); + +udcc_qcfg_exit: + hwrm_req_drop(bp, req); + return rc; +} + +static int bnxt_hwrm_udcc_session_cfg(struct bnxt *bp, struct bnxt_udcc_session_entry *entry) +{ + struct hwrm_udcc_session_cfg_input *req; + int rc = 0; + + rc = hwrm_req_init(bp, req, HWRM_UDCC_SESSION_CFG); + if (rc) + return rc; + + req->session_id = cpu_to_le16(entry->session_id); + if (entry->state != UDCC_SESSION_CFG_REQ_SESSION_STATE_ENABLED) { + req->enables = cpu_to_le32(UDCC_SESSION_CFG_REQ_ENABLES_SESSION_STATE); + goto session_state; + } + req->enables = cpu_to_le32(UDCC_SESSION_CFG_REQ_ENABLES_SESSION_STATE | + UDCC_SESSION_CFG_REQ_ENABLES_DEST_MAC | + UDCC_SESSION_CFG_REQ_ENABLES_SRC_MAC | + UDCC_SESSION_CFG_REQ_ENABLES_TX_STATS_RECORD | + UDCC_SESSION_CFG_REQ_ENABLES_RX_STATS_RECORD); + if (is_valid_ether_addr(entry->dst_mac_mod) && + is_valid_ether_addr(entry->src_mac_mod)) { + ether_addr_copy(req->dest_mac, entry->dst_mac_mod); + ether_addr_copy(req->src_mac, entry->src_mac_mod); + } else { + ether_addr_copy(req->dest_mac, entry->dest_mac); + ether_addr_copy(req->src_mac, entry->src_mac); + } + req->tx_stats_record = cpu_to_le32((u32)entry->tx_counter_hndl); + req->rx_stats_record = cpu_to_le32((u32)entry->rx_counter_hndl); + +session_state: + req->session_state = entry->state; + return hwrm_req_send(bp, req); +} + +#define ACT_OFFS_MASK 0x6ffffff +#define TSID_SHIFT 26 +#define TSID_MASK 0x1f + +/* This function converts the provided tfc action handle to the UDCC + * action handle required by the firmware. The action handle consists + * of an 8 byte offset in the lower 26 bits and the table scope id in + * the upper 6 bits. + */ +static int bnxt_tfc_counter_update(struct bnxt *bp, u64 *counter_hndl) +{ + u64 val = 0; + u8 tsid = 0; + int rc = 0; + + rc = bnxt_ulp_cntxt_tsid_get(bp->ulp_ctx, &tsid); + if (rc) { + netdev_dbg(bp->dev, "%s:Invalid tsid, cannot update counter_hndl rc=%d\n", + __func__, rc); + return rc; + } + netdev_dbg(bp->dev, "%s: counter_hndl(%llx)\n", __func__, *counter_hndl); + val = *counter_hndl; + /* 32B offset to 8B offset */ + val = val << 2; + val &= ACT_OFFS_MASK; + val |= (tsid & TSID_MASK) << TSID_SHIFT; + + *counter_hndl = val; + netdev_dbg(bp->dev, "%s:counter_hndl update tsid(%d) counter_hndl(%llx)\n", + __func__, tsid, *counter_hndl); + return rc; +} + +static u8 bnxt_ulp_gen_l3_ipv6_addr_em_mask[] = { + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff +}; + +static int bnxt_udcc_flows_create_p7(struct bnxt *bp, struct bnxt_udcc_session_entry *entry) +{ + struct bnxt_ulp_gen_bth_hdr bth_spec = { 0 }, bth_mask = { 0 }; + struct bnxt_ulp_gen_ipv6_hdr v6_spec = { 0 }, v6_mask = { 0 }; + bool per_qp_session = BNXT_UDCC_SESSION_PER_QP(bp); + struct bnxt_ulp_gen_l2_hdr_parms l2_parms = { 0 }; + struct bnxt_ulp_gen_l3_hdr_parms l3_parms = { 0 }; + struct bnxt_ulp_gen_l4_hdr_parms l4_parms = { 0 }; + struct bnxt_ulp_gen_action_parms actions = { 0 }; + struct bnxt_ulp_gen_flow_parms parms = { 0 }; + + /* These would normally be preset and passed to the upper layer */ + /* u32 dst_qpn = cpu_to_be32(entry->dest_qp_num); */ + u32 src_qpn = cpu_to_be32(entry->src_qp_num); + u32 msk_qpn = cpu_to_be32(0xffffffff); + u16 op_code = cpu_to_be16(0x81); /* RoCE CNP */ + u16 op_code_mask = cpu_to_be16(0xffff); + u8 l4_proto = IPPROTO_UDP; + u8 l4_proto_mask = 0xff; + __le64 l2_filter_id = 0; + int rc; + + /* the source mac from the session is the dmac of the l2 filter */ + rc = bnxt_nic_flow_dmac_filter_get(bp, entry->src_mac, &l2_filter_id); + if (rc) { + netdev_warn(bp->dev, "UDCC l2 filter mac check failed rc=%d\n", rc); + return rc; + } + + l2_parms.type = BNXT_ULP_GEN_L2_L2_FILTER_ID; + l2_parms.l2_filter_id = &l2_filter_id; + + /* Pack the L3 Data */ + v6_spec.proto6 = &l4_proto; + v6_mask.proto6 = &l4_proto_mask; + v6_spec.dip6 = NULL; + v6_mask.dip6 = NULL; + v6_spec.sip6 = entry->dst_ip.s6_addr; + v6_mask.sip6 = bnxt_ulp_gen_l3_ipv6_addr_em_mask; + + l3_parms.type = BNXT_ULP_GEN_L3_IPV6; + l3_parms.v6_spec = &v6_spec; + l3_parms.v6_mask = &v6_mask; + + /* Pack the L4 Data */ + bth_spec.op_code = &op_code; + bth_mask.op_code = &op_code_mask; + bth_spec.dst_qpn = NULL; + bth_mask.dst_qpn = NULL; + if (per_qp_session) { + bth_spec.dst_qpn = &src_qpn; + bth_mask.dst_qpn = &msk_qpn; + } + l4_parms.type = BNXT_ULP_GEN_L4_BTH; + l4_parms.bth_spec = &bth_spec; + l4_parms.bth_mask = &bth_mask; + + /* Pack the actions NIC template will use RoCE VNIC by default */ + actions.enables = BNXT_ULP_GEN_ACTION_ENABLES_DROP | + BNXT_ULP_GEN_ACTION_ENABLES_COUNT; + actions.dst_fid = bp->pf.fw_fid; + + parms.dir = BNXT_ULP_GEN_RX; + parms.flow_id = &entry->rx_flow_id; + + parms.counter_hndl = &entry->rx_counter_hndl; + parms.l2 = &l2_parms; + parms.l3 = &l3_parms; + parms.l4 = &l4_parms; + parms.actions = &actions; + parms.priority = 2; /* must be higher priority than NIC flow CNP */ + + rc = bnxt_ulp_gen_flow_create(bp, bp->pf.fw_fid, &parms); + if (rc) { + netdev_warn(bp->dev, "UDCC TFC flow creation failed rc=%d\n", rc); + return rc; + } + + netdev_dbg(bp->dev, "UDCC Add Rx flow for session_id: %d flow_id: %d, counter: 0x%llx\n", + entry->session_id, + entry->rx_flow_id, + entry->rx_counter_hndl); + + bnxt_tfc_counter_update(bp, &entry->rx_counter_hndl); + + return rc; +} + +static int bnxt_udcc_rx_flow_create_v6(struct bnxt *bp, + struct bnxt_udcc_session_entry *entry) +{ + struct bnxt_ulp_gen_bth_hdr bth_spec = { 0 }, bth_mask = { 0 }; + struct bnxt_ulp_gen_ipv6_hdr v6_spec = { 0 }, v6_mask = { 0 }; + bool per_qp_session = BNXT_UDCC_SESSION_PER_QP(bp); + struct bnxt_ulp_gen_l2_hdr_parms l2_parms = { 0 }; + struct bnxt_ulp_gen_l3_hdr_parms l3_parms = { 0 }; + struct bnxt_ulp_gen_l4_hdr_parms l4_parms = { 0 }; + struct bnxt_ulp_gen_action_parms actions = { 0 }; + struct bnxt_ulp_gen_flow_parms parms = { 0 }; + + /* These would normally be preset and passed to the upper layer */ + u32 src_qpn = cpu_to_be32(entry->src_qp_num); + u32 msk_qpn = cpu_to_be32(0xffffffff); + u16 op_code = cpu_to_be16(0x81); /* RoCE CNP */ + u16 op_code_mask = cpu_to_be16(0xffff); + u8 l4_proto = IPPROTO_UDP; + u8 l4_proto_mask = 0xff; + int rc; + + /* Pack the L2 Data - Don't fill l2_spec for now */ + l2_parms.type = BNXT_ULP_GEN_L2_L2_HDR; + + /* Pack the L3 Data */ + v6_spec.proto6 = &l4_proto; + v6_mask.proto6 = &l4_proto_mask; + v6_spec.dip6 = NULL; + v6_mask.dip6 = NULL; + v6_spec.sip6 = entry->dst_ip.s6_addr; + v6_mask.sip6 = bnxt_ulp_gen_l3_ipv6_addr_em_mask; + + l3_parms.type = BNXT_ULP_GEN_L3_IPV6; + l3_parms.v6_spec = &v6_spec; + l3_parms.v6_mask = &v6_mask; + + /* Pack the L4 Data */ + bth_spec.op_code = &op_code; + bth_mask.op_code = &op_code_mask; + bth_spec.dst_qpn = NULL; + bth_mask.dst_qpn = NULL; + if (per_qp_session) { + bth_spec.dst_qpn = &src_qpn; + bth_mask.dst_qpn = &msk_qpn; + } + l4_parms.type = BNXT_ULP_GEN_L4_BTH; + l4_parms.bth_spec = &bth_spec; + l4_parms.bth_mask = &bth_mask; + + /* Pack the actions */ + actions.enables = BNXT_ULP_GEN_ACTION_ENABLES_REDIRECT | + BNXT_ULP_GEN_ACTION_ENABLES_DROP | + BNXT_ULP_GEN_ACTION_ENABLES_COUNT; + actions.dst_fid = bp->pf.fw_fid; + + parms.dir = BNXT_ULP_GEN_RX; + parms.flow_id = &entry->rx_flow_id; + parms.counter_hndl = &entry->rx_counter_hndl; + parms.l2 = &l2_parms; + parms.l3 = &l3_parms; + parms.l4 = &l4_parms; + parms.actions = &actions; + + rc = bnxt_ulp_gen_flow_create(bp, bp->pf.fw_fid, &parms); + if (rc) + return rc; + netdev_dbg(bp->dev, "UDCC Add Rx flow for session_id: %d flow_id: %d, counter: 0x%llx\n", + entry->session_id, + entry->rx_flow_id, + entry->rx_counter_hndl); + + return rc; +} + +static int bnxt_udcc_tx_flow_create_v6(struct bnxt *bp, + struct bnxt_udcc_session_entry *entry) +{ + struct bnxt_ulp_gen_bth_hdr bth_spec = { 0 }, bth_mask = { 0 }; + struct bnxt_ulp_gen_ipv6_hdr v6_spec = { 0 }, v6_mask = { 0 }; + bool per_qp_session = BNXT_UDCC_SESSION_PER_QP(bp); + struct bnxt_ulp_gen_l2_hdr_parms l2_parms = { 0 }; + struct bnxt_ulp_gen_l3_hdr_parms l3_parms = { 0 }; + struct bnxt_ulp_gen_l4_hdr_parms l4_parms = { 0 }; + struct bnxt_ulp_gen_action_parms actions = { 0 }; + struct bnxt_ulp_gen_flow_parms parms = { 0 }; + + /* These would normally be preset and passed to the upper layer */ + u32 dst_qpn = cpu_to_be32(entry->dest_qp_num); + u32 msk_qpn = cpu_to_be32(0xffffffff); + u8 l4_proto = IPPROTO_UDP; + u8 l4_proto_mask = 0xff; + int rc; + + /* Pack the L2 Data - Don't fill l2_spec for now */ + l2_parms.type = BNXT_ULP_GEN_L2_L2_HDR; + + /* Pack the L3 Data */ + v6_spec.proto6 = &l4_proto; + v6_mask.proto6 = &l4_proto_mask; + v6_spec.sip6 = NULL; + v6_mask.sip6 = NULL; + v6_spec.dip6 = entry->dst_ip.s6_addr; + v6_mask.dip6 = bnxt_ulp_gen_l3_ipv6_addr_em_mask; + + l3_parms.type = BNXT_ULP_GEN_L3_IPV6; + l3_parms.v6_spec = &v6_spec; + l3_parms.v6_mask = &v6_mask; + + /* Pack the L4 Data */ + bth_spec.op_code = NULL; + bth_mask.op_code = NULL; + bth_spec.dst_qpn = NULL; + bth_mask.dst_qpn = NULL; + if (per_qp_session) { + bth_spec.dst_qpn = &dst_qpn; + bth_mask.dst_qpn = &msk_qpn; + } + l4_parms.type = BNXT_ULP_GEN_L4_BTH; + l4_parms.bth_spec = &bth_spec; + l4_parms.bth_mask = &bth_mask; + + /* Pack the actions */ + actions.enables = BNXT_ULP_GEN_ACTION_ENABLES_REDIRECT | + BNXT_ULP_GEN_ACTION_ENABLES_SET_SMAC | + BNXT_ULP_GEN_ACTION_ENABLES_SET_DMAC | + BNXT_ULP_GEN_ACTION_ENABLES_COUNT; + + actions.dst_fid = bp->pf.fw_fid; + if (is_valid_ether_addr(entry->dst_mac_mod) && + is_valid_ether_addr(entry->src_mac_mod)) { + ether_addr_copy(actions.dmac, entry->dst_mac_mod); + ether_addr_copy(actions.smac, entry->src_mac_mod); + } else { + /* PF case (non-switchdev): zero smac and dmac modify. + * Just use the smac dmac given by FW in the entry. + */ + ether_addr_copy(actions.dmac, entry->dest_mac); + ether_addr_copy(actions.smac, entry->src_mac); + } + + parms.dir = BNXT_ULP_GEN_TX; + parms.flow_id = &entry->tx_flow_id; + parms.counter_hndl = &entry->tx_counter_hndl; + parms.l2 = &l2_parms; + parms.l3 = &l3_parms; + parms.l4 = &l4_parms; + parms.actions = &actions; + + rc = bnxt_ulp_gen_flow_create(bp, bp->pf.fw_fid, &parms); + if (rc) + return rc; + netdev_dbg(bp->dev, "UDCC Add Tx flow for session_id: %d flow_id: %d, counter: 0x%llx\n", + entry->session_id, + entry->tx_flow_id, + entry->tx_counter_hndl); + + return rc; +} + +static u8 bnxt_ulp_gen_l3_ipv4_addr_em_mask[] = { 0xff, 0xff, 0xff, 0xff }; + +static int bnxt_udcc_rx_flow_create_v4(struct bnxt *bp, + struct bnxt_udcc_session_entry *entry) +{ + struct bnxt_ulp_gen_bth_hdr bth_spec = { 0 }, bth_mask = { 0 }; + struct bnxt_ulp_gen_ipv4_hdr v4_spec = { 0 }, v4_mask = { 0 }; + bool per_qp_session = BNXT_UDCC_SESSION_PER_QP(bp); + struct bnxt_ulp_gen_l2_hdr_parms l2_parms = { 0 }; + struct bnxt_ulp_gen_l3_hdr_parms l3_parms = { 0 }; + struct bnxt_ulp_gen_l4_hdr_parms l4_parms = { 0 }; + struct bnxt_ulp_gen_action_parms actions = { 0 }; + struct bnxt_ulp_gen_flow_parms parms = { 0 }; + + /* These would normally be preset and passed to the upper layer */ + u32 src_qpn = cpu_to_be32(entry->src_qp_num); + u32 msk_qpn = cpu_to_be32(0xffffffff); + u16 op_code = cpu_to_be16(0x81); /* RoCE CNP */ + u16 op_code_mask = cpu_to_be16(0xffff); + u8 l4_proto = IPPROTO_UDP; + u8 l4_proto_mask = 0xff; + int rc; + + /* Pack the L2 Data - Don't fill l2_spec for now */ + l2_parms.type = BNXT_ULP_GEN_L2_L2_HDR; + + /* Pack the L3 Data */ + v4_spec.proto = &l4_proto; + v4_mask.proto = &l4_proto_mask; + v4_spec.dip = NULL; + v4_mask.dip = NULL; + v4_spec.sip = (u32 *)&entry->dst_ip.s6_addr32[3]; + v4_mask.sip = (u32 *)bnxt_ulp_gen_l3_ipv4_addr_em_mask; + + l3_parms.type = BNXT_ULP_GEN_L3_IPV4; + l3_parms.v4_spec = &v4_spec; + l3_parms.v4_mask = &v4_mask; + + /* Pack the L4 Data */ + bth_spec.op_code = &op_code; + bth_mask.op_code = &op_code_mask; + bth_spec.dst_qpn = NULL; + bth_mask.dst_qpn = NULL; + if (per_qp_session) { + bth_spec.dst_qpn = &src_qpn; + bth_mask.dst_qpn = &msk_qpn; + } + l4_parms.type = BNXT_ULP_GEN_L4_BTH; + l4_parms.bth_spec = &bth_spec; + l4_parms.bth_mask = &bth_mask; + + /* Pack the actions */ + actions.enables = BNXT_ULP_GEN_ACTION_ENABLES_REDIRECT | + BNXT_ULP_GEN_ACTION_ENABLES_DROP | + BNXT_ULP_GEN_ACTION_ENABLES_COUNT; + actions.dst_fid = bp->pf.fw_fid; + + parms.dir = BNXT_ULP_GEN_RX; + parms.flow_id = &entry->rx_flow_id; + parms.counter_hndl = &entry->rx_counter_hndl; + parms.l2 = &l2_parms; + parms.l3 = &l3_parms; + parms.l4 = &l4_parms; + parms.actions = &actions; + + rc = bnxt_ulp_gen_flow_create(bp, bp->pf.fw_fid, &parms); + if (rc) + return rc; + netdev_dbg(bp->dev, "UDCC Add Rx flow for session_id: %d flow_id: %d, counter: 0x%llx\n", + entry->session_id, + entry->rx_flow_id, + entry->rx_counter_hndl); + + return rc; +} + +static int bnxt_udcc_tx_flow_create_v4(struct bnxt *bp, + struct bnxt_udcc_session_entry *entry) +{ + struct bnxt_ulp_gen_bth_hdr bth_spec = { 0 }, bth_mask = { 0 }; + struct bnxt_ulp_gen_ipv4_hdr v4_spec = { 0 }, v4_mask = { 0 }; + bool per_qp_session = BNXT_UDCC_SESSION_PER_QP(bp); + struct bnxt_ulp_gen_l2_hdr_parms l2_parms = { 0 }; + struct bnxt_ulp_gen_l3_hdr_parms l3_parms = { 0 }; + struct bnxt_ulp_gen_l4_hdr_parms l4_parms = { 0 }; + struct bnxt_ulp_gen_action_parms actions = { 0 }; + struct bnxt_ulp_gen_flow_parms parms = { 0 }; + + /* These would normally be preset and passed to the upper layer */ + u32 dst_qpn = cpu_to_be32(entry->dest_qp_num); + u32 msk_qpn = cpu_to_be32(0xffffffff); + u8 l4_proto = IPPROTO_UDP; + u8 l4_proto_mask = 0xff; + int rc; + + /* Pack the L2 Data - Don't fill l2_spec for now */ + l2_parms.type = BNXT_ULP_GEN_L2_L2_HDR; + + /* Pack the L3 Data */ + v4_spec.proto = &l4_proto; + v4_mask.proto = &l4_proto_mask; + v4_spec.sip = NULL; + v4_mask.sip = NULL; + v4_spec.dip = (u32 *)&entry->dst_ip.s6_addr32[3]; + v4_mask.dip = (u32 *)bnxt_ulp_gen_l3_ipv4_addr_em_mask; + + l3_parms.type = BNXT_ULP_GEN_L3_IPV4; + l3_parms.v4_spec = &v4_spec; + l3_parms.v4_mask = &v4_mask; + + /* Pack the L4 Data */ + bth_spec.op_code = NULL; + bth_mask.op_code = NULL; + bth_spec.dst_qpn = NULL; + bth_mask.dst_qpn = NULL; + if (per_qp_session) { + bth_spec.dst_qpn = &dst_qpn; + bth_mask.dst_qpn = &msk_qpn; + } + l4_parms.type = BNXT_ULP_GEN_L4_BTH; + l4_parms.bth_spec = &bth_spec; + l4_parms.bth_mask = &bth_mask; + + /* Pack the actions */ + actions.enables = BNXT_ULP_GEN_ACTION_ENABLES_REDIRECT | + BNXT_ULP_GEN_ACTION_ENABLES_SET_SMAC | + BNXT_ULP_GEN_ACTION_ENABLES_SET_DMAC | + BNXT_ULP_GEN_ACTION_ENABLES_COUNT; + + actions.dst_fid = bp->pf.fw_fid; + if (is_valid_ether_addr(entry->dst_mac_mod) && + is_valid_ether_addr(entry->src_mac_mod)) { + ether_addr_copy(actions.dmac, entry->dst_mac_mod); + ether_addr_copy(actions.smac, entry->src_mac_mod); + } else { + /* PF case (non-switchdev): zero smac and dmac modify. + * Just use the smac dmac given by FW in the entry. + */ + ether_addr_copy(actions.dmac, entry->dest_mac); + ether_addr_copy(actions.smac, entry->src_mac); + } + + parms.dir = BNXT_ULP_GEN_TX; + parms.flow_id = &entry->tx_flow_id; + parms.counter_hndl = &entry->tx_counter_hndl; + parms.l2 = &l2_parms; + parms.l3 = &l3_parms; + parms.l4 = &l4_parms; + parms.actions = &actions; + + rc = bnxt_ulp_gen_flow_create(bp, bp->pf.fw_fid, &parms); + if (rc) + return rc; + netdev_dbg(bp->dev, "UDCC Add Tx flow for session_id: %d flow_id: %d, counter: 0x%llx\n", + entry->session_id, + entry->tx_flow_id, + entry->tx_counter_hndl); + + return rc; +} + +static int bnxt_udcc_flows_create_v6(struct bnxt *bp, + struct bnxt_udcc_session_entry *entry) +{ + int rc; + + rc = bnxt_udcc_rx_flow_create_v6(bp, entry); + if (rc) + return rc; + + rc = bnxt_udcc_tx_flow_create_v6(bp, entry); + return rc; +} + +static int bnxt_udcc_flows_create_v4(struct bnxt *bp, + struct bnxt_udcc_session_entry *entry) +{ + int rc; + + rc = bnxt_udcc_rx_flow_create_v4(bp, entry); + if (rc) + return rc; + + rc = bnxt_udcc_tx_flow_create_v4(bp, entry); + return rc; +} + +static int bnxt_udcc_flows_create(struct bnxt *bp, + struct bnxt_udcc_session_entry *entry) +{ + if (entry->v4_dst) + return bnxt_udcc_flows_create_v4(bp, entry); + + return bnxt_udcc_flows_create_v6(bp, entry); +} + +/* The dip gets encoded as the RoCEv2 GID. The third integer + * should be FFFF0000 if the encoded address is IPv4. + * Example: GID: ::ffff:171.16.10.1 + */ +#define BNXT_UDCC_DIP_V4_MASK 0xFFFF0000 +static bool bnxt_is_udcc_dip_ipv4(struct bnxt *bp, struct in6_addr *dip) +{ + netdev_dbg(bp->dev, "%s: s6_addr32[0]: 0x%x s6_addr32[1]: 0x%x\n", + __func__, dip->s6_addr32[0], dip->s6_addr32[1]); + netdev_dbg(bp->dev, "%s: s6_addr32[2]: 0x%x s6_addr32[3]: 0x%x\n", + __func__, dip->s6_addr32[2], dip->s6_addr32[3]); + + if ((dip->s6_addr32[2] & BNXT_UDCC_DIP_V4_MASK) == + BNXT_UDCC_DIP_V4_MASK) + return true; + + return false; +} + +/* Insert a new session entry into the database */ +static int bnxt_udcc_create_session(struct bnxt *bp, u32 session_id) +{ + struct bnxt_udcc_info *udcc = bp->udcc_info; + struct bnxt_udcc_session_entry *entry; + int rc; + + entry = kzalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) + return -ENOMEM; + + entry->session_id = session_id; + + /* ==================================================================== + * 1.Issue HWRM_UDCC_SESSION_QCFG to get the session details + * + * 2.Use the returned DIP to invoke TF API to get flow_ids/counter_hndls + * for Tx/Tx + * a) Use the DIP to query the smac/dmac - TF API + * b) Add a Tx flow using DIP, action_param - modify dmac/smac,count + * c) Add a Rx flow using DIP as SIP, match: CNP, action: count + * + * 3. Issue HWRM_UDCC_SESSION_CFG to update the FW + */ + rc = bnxt_hwrm_udcc_session_qcfg(bp, entry); + if (rc) + goto create_sess_exit1; + + if (BNXT_CHIP_P7(bp)) { + rc = bnxt_udcc_flows_create_p7(bp, entry); + if (rc) { + netdev_warn(bp->dev, "UDCC flow create failed rc=%d\n", rc); + goto create_sess_exit1; + } + } else { + entry->v4_dst = bnxt_is_udcc_dip_ipv4(bp, &entry->dst_ip); + rc = bnxt_ulp_udcc_v6_subnet_check(bp, bp->pf.fw_fid, &entry->dst_ip, + entry->dst_mac_mod, + entry->src_mac_mod); + if (rc) { + if (rc != -ENOENT) { + netdev_warn(bp->dev, "UDCC subnet check failed rc=%d\n", rc); + goto create_sess_exit1; + } + entry->skip_subnet_checking = true; + } + rc = bnxt_udcc_flows_create(bp, entry); + if (rc) + goto create_sess_exit1; + } + entry->state = UDCC_SESSION_CFG_REQ_SESSION_STATE_ENABLED; + rc = bnxt_hwrm_udcc_session_cfg(bp, entry); + if (rc) + goto create_sess_exit2; + + mutex_lock(&udcc->session_db_lock); + udcc->session_db[session_id] = entry; + udcc->session_count++; + mutex_unlock(&udcc->session_db_lock); + + bnxt_debugfs_create_udcc_session(bp, session_id); + + return 0; +create_sess_exit2: + bnxt_tf_ulp_flow_delete(bp, entry); +create_sess_exit1: + entry->state = UDCC_SESSION_CFG_REQ_SESSION_STATE_FLOW_NOT_CREATED; + bnxt_hwrm_udcc_session_cfg(bp, entry); + kfree(entry); + return rc; +} + +static int bnxt_tf_ulp_flow_delete(struct bnxt *bp, struct bnxt_udcc_session_entry *entry) +{ + int rc = 0; + + /* Delete the TF flows for Rx/Tx */ + if (entry->rx_flow_id) { + rc = bnxt_ulp_gen_flow_destroy(bp, bp->pf.fw_fid, + entry->rx_flow_id); + if (!rc) { + netdev_dbg(bp->dev, + "UDCC Delete Rx flow_id: %d session: %d\n", + entry->rx_flow_id, entry->session_id); + } else { + netdev_dbg(bp->dev, + "UDCC Delete Rx flow_id: %d failed rc: %d\n", + entry->rx_flow_id, rc); + } + entry->rx_flow_id = 0; + entry->rx_counter_hndl = 0; + } + + if (entry->tx_flow_id) { + rc = bnxt_ulp_gen_flow_destroy(bp, bp->pf.fw_fid, + entry->tx_flow_id); + if (!rc) { + netdev_dbg(bp->dev, + "UDCC Delete Tx flow_id: %d session: %d\n", + entry->tx_flow_id, entry->session_id); + } else { + netdev_dbg(bp->dev, + "UDCC Delete Tx flow_id: %d failed rc: %d\n", + entry->tx_flow_id, rc); + } + entry->tx_flow_id = 0; + entry->tx_counter_hndl = 0; + } + + return rc; +} + +void bnxt_udcc_session_debugfs_add(struct bnxt *bp) +{ + struct bnxt_udcc_info *udcc = bp->udcc_info; + struct bnxt_udcc_session_entry *entry; + int i; + + if (!udcc || !udcc->session_count) + return; + + mutex_lock(&udcc->session_db_lock); + for (i = 0; i < BNXT_UDCC_MAX_SESSIONS; i++) { + entry = udcc->session_db[i]; + if (!entry) + continue; + + if (entry->state == UDCC_SESSION_CFG_REQ_SESSION_STATE_ENABLED) + bnxt_debugfs_create_udcc_session(bp, i); + } + mutex_unlock(&udcc->session_db_lock); +} + +void bnxt_udcc_session_debugfs_cleanup(struct bnxt *bp) +{ + struct bnxt_udcc_info *udcc = bp->udcc_info; + struct bnxt_udcc_session_entry *entry; + int i; + + if (!udcc || !udcc->session_count) + return; + + mutex_lock(&udcc->session_db_lock); + for (i = 0; i < BNXT_UDCC_MAX_SESSIONS; i++) { + entry = udcc->session_db[i]; + if (!entry) + continue; + + if (entry->state == UDCC_SESSION_CFG_REQ_SESSION_STATE_ENABLED) + bnxt_debugfs_delete_udcc_session(bp, i); + } + mutex_unlock(&udcc->session_db_lock); +} + +static int bnxt_udcc_delete_session(struct bnxt *bp, u32 session_id, bool cleanup) +{ + struct bnxt_udcc_info *udcc = bp->udcc_info; + struct bnxt_udcc_session_entry *entry; + int rc = 0; + + mutex_lock(&udcc->session_db_lock); + entry = udcc->session_db[session_id]; + if (!entry) { + rc = -ENOENT; + goto exit; + } + + rc = bnxt_tf_ulp_flow_delete(bp, entry); + if (rc) { + netdev_dbg(bp->dev, + "Failed to delete UDCC flows, session: %d\n", + session_id); + } + + /* No need to issue udcc_session_cfg command when + * firmware is in reset state. + */ + if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state) || cleanup) + goto cleanup_udcc_session; + + entry->state = UDCC_SESSION_CFG_REQ_SESSION_STATE_FLOW_HAS_BEEN_DELETED; + rc = bnxt_hwrm_udcc_session_cfg(bp, entry); + if (rc) { + netdev_dbg(bp->dev, "Failed to delete UDCC session: %d\n", + session_id); + goto exit; + } + +cleanup_udcc_session: + bnxt_debugfs_delete_udcc_session(bp, session_id); + + kfree(entry); + udcc->session_db[session_id] = NULL; + udcc->session_count--; + + netdev_dbg(bp->dev, "Deleted UDCC session: %d\n", session_id); +exit: + mutex_unlock(&udcc->session_db_lock); + return rc; +} + +void bnxt_udcc_session_db_cleanup(struct bnxt *bp) +{ + struct bnxt_udcc_info *udcc = bp->udcc_info; + int i; + + if (!udcc) + return; + + for (i = 0; i < BNXT_UDCC_MAX_SESSIONS; i++) + bnxt_udcc_delete_session(bp, i, false); +} + +void bnxt_udcc_update_session(struct bnxt *bp, bool suspend) +{ + u8 tf_event; + + if (!bp->udcc_info) + return; + + if (suspend) + tf_event = BNXT_UDCC_INFO_TF_EVENT_SUSPEND; + else + tf_event = BNXT_UDCC_INFO_TF_EVENT_UNSUSPEND; + + if (test_and_set_bit(tf_event, &bp->udcc_info->tf_events)) + return; + bnxt_queue_udcc_work(bp, BNXT_UDCC_MAX_SESSIONS + 1, + BNXT_UDCC_SESSION_UPDATE, suspend); +} + +static void bnxt_udcc_suspend_session(struct bnxt *bp, + u8 orig_state, + struct bnxt_udcc_session_entry *entry) +{ + int rc; + + bnxt_tf_ulp_flow_delete(bp, entry); + entry->state = !UDCC_SESSION_CFG_REQ_SESSION_STATE_ENABLED; + + rc = bnxt_hwrm_udcc_session_cfg(bp, entry); + if (rc) { + netdev_warn(bp->dev, "UDCC failed to suspend session: %d\n", + entry->session_id); + entry->state = orig_state; + } else { + netdev_dbg(bp->dev, "UDCC update session: %d is SUSPENDED\n", + entry->session_id); + } + bnxt_debugfs_delete_udcc_session(bp, entry->session_id); +} + +static void bnxt_udcc_unsuspend_session(struct bnxt *bp, + u8 orig_state, + struct bnxt_udcc_session_entry *entry) +{ + int rc; + + bnxt_udcc_flows_create(bp, entry); + entry->state = UDCC_SESSION_CFG_REQ_SESSION_STATE_ENABLED; + + rc = bnxt_hwrm_udcc_session_cfg(bp, entry); + if (rc) { + netdev_warn(bp->dev, "UDCC failed to unsuspend session: %d\n", + entry->session_id); + entry->state = orig_state; + } else { + netdev_dbg(bp->dev, "UDCC update session: %d is UNSUSPENDED\n", + entry->session_id); + } + bnxt_debugfs_create_udcc_session(bp, entry->session_id); +} + +static void __bnxt_udcc_update_session(struct bnxt *bp, bool suspend) +{ + struct bnxt_udcc_info *udcc = bp->udcc_info; + u8 orig_state; + bool found; + int i; + + mutex_lock(&udcc->session_db_lock); + if (!udcc->session_count) + goto exit; + + for (i = 0; i < BNXT_UDCC_MAX_SESSIONS; i++) { + struct bnxt_udcc_session_entry *entry; + u8 dmac[ETH_ALEN] = {0}; + u8 smac[ETH_ALEN] = {0}; + + entry = udcc->session_db[i]; + if (!entry) + continue; + + if (entry->skip_subnet_checking) + continue; + + found = !bnxt_ulp_udcc_v6_subnet_check(bp, bp->pf.fw_fid, + &entry->dst_ip, + dmac, + smac); + + orig_state = entry->state; + + if (suspend && found && + orig_state == UDCC_SESSION_CFG_REQ_SESSION_STATE_ENABLED) { + if ((!ether_addr_equal(entry->dst_mac_mod, dmac)) || + (!ether_addr_equal(entry->src_mac_mod, smac))) { + /* Update the mod dmac and smac */ + ether_addr_copy(entry->dst_mac_mod, dmac); + ether_addr_copy(entry->src_mac_mod, smac); + + /* Suspend and Unsuspend if action changed */ + bnxt_udcc_suspend_session(bp, orig_state, entry); + bnxt_udcc_unsuspend_session(bp, orig_state, entry); + } + } else if (suspend && !found && + orig_state == UDCC_SESSION_CFG_REQ_SESSION_STATE_ENABLED) { + /* Suspend */ + bnxt_udcc_suspend_session(bp, orig_state, entry); + } else if (!suspend && found && + orig_state == !UDCC_SESSION_CFG_REQ_SESSION_STATE_ENABLED) { + /* Unsuspend */ + bnxt_udcc_unsuspend_session(bp, orig_state, entry); + } + + } +exit: + mutex_unlock(&udcc->session_db_lock); +} + +void bnxt_udcc_task(struct work_struct *work) +{ + struct bnxt_udcc_work *udcc_work = + container_of(work, struct bnxt_udcc_work, work); + struct bnxt *bp = udcc_work->bp; + + set_bit(BNXT_STATE_IN_UDCC_TASK, &bp->state); + /* Adding memory barrier to set the IN_UDCC_TASK bit first */ + smp_mb__after_atomic(); + if (!test_bit(BNXT_STATE_OPEN, &bp->state)) { + clear_bit(BNXT_STATE_IN_UDCC_TASK, &bp->state); + return; + } + + switch (udcc_work->session_opcode) { + case BNXT_UDCC_SESSION_CREATE: + bnxt_udcc_create_session(bp, udcc_work->session_id); + break; + + case BNXT_UDCC_SESSION_DELETE: + bnxt_udcc_delete_session(bp, udcc_work->session_id, false); + break; + case BNXT_UDCC_SESSION_UPDATE: + /* Check whether the BNXT_UDCC_SESSION_UPDATE event is from TF or Firmware. + * Clear the tf_events bits only if this event is from TF. + */ + if (udcc_work->session_id == BNXT_UDCC_MAX_SESSIONS + 1) { + /* Since UDCC session update events are not specific to a particular + * session, we might end up missing an update for a different session + * (e.g different subnet), if we are already in the middle of processing + * in __bnxt_udcc_update_session(). To avoid this, clear the bit first + * before we enter __bnxt_udcc_update_session() to allow a subsequent + * event to schedule the task again. + */ + if (udcc_work->session_suspend) + clear_bit(BNXT_UDCC_INFO_TF_EVENT_SUSPEND, + &bp->udcc_info->tf_events); + else + clear_bit(BNXT_UDCC_INFO_TF_EVENT_UNSUSPEND, + &bp->udcc_info->tf_events); + } + __bnxt_udcc_update_session(bp, udcc_work->session_suspend); + break; + default: + netdev_warn(bp->dev, "Invalid UDCC session opcode session_id: %d\n", + udcc_work->session_id); + } + + /* Complete all memory stores before setting bit. */ + smp_mb__before_atomic(); + clear_bit(BNXT_STATE_IN_UDCC_TASK, &bp->state); + kfree(udcc_work); +} + +void bnxt_free_udcc_info(struct bnxt *bp) +{ + struct bnxt_udcc_info *udcc = bp->udcc_info; + int i; + + if (!udcc) + return; + + for (i = 0; i < BNXT_UDCC_MAX_SESSIONS; i++) + bnxt_udcc_delete_session(bp, i, true); + + kfree(udcc); + bp->udcc_info = NULL; + + netdev_dbg(bp->dev, "%s(): udcc_info freed up!\n", __func__); +} + +#else /* if defined(CONFIG_BNXT_FLOWER_OFFLOAD) */ + +void bnxt_free_udcc_info(struct bnxt *bp) +{ +} + +int bnxt_alloc_udcc_info(struct bnxt *bp) +{ + return 0; +} + +void bnxt_udcc_task(struct work_struct *work) +{ +} + +void bnxt_udcc_session_db_cleanup(struct bnxt *bp) +{ +} + +void bnxt_udcc_session_debugfs_add(struct bnxt *bp) +{ +} + +void bnxt_udcc_session_debugfs_cleanup(struct bnxt *bp) +{ +} +#endif /* if defined(CONFIG_BNXT_FLOWER_OFFLOAD) */ diff --git a/drivers/thirdparty/release-drivers/bnxt/bnxt_udcc.h b/drivers/thirdparty/release-drivers/bnxt/bnxt_udcc.h new file mode 100644 index 000000000000..8b5572421477 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/bnxt_udcc.h @@ -0,0 +1,84 @@ +/* Broadcom NetXtreme-C/E network driver. + * + * Copyright (c) 2023 Broadcom Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ + +#ifndef BNXT_UDCC_H +#define BNXT_UDCC_H + +#define BNXT_UDCC_MAX_SESSIONS 2048 + +#define BNXT_UDCC_HASH_SIZE 64 + +#define BNXT_UDCC_SESSION_CREATE 0 +#define BNXT_UDCC_SESSION_DELETE 1 +#define BNXT_UDCC_SESSION_UPDATE 2 +#define BNXT_UDCC_SESSION_PER_QP(bp) ((bp)->udcc_info->session_type & \ + UDCC_QCAPS_RESP_SESSION_TYPE_PER_QP) + +struct bnxt_udcc_session_entry { + u32 session_id; + u32 rx_flow_id; + u32 tx_flow_id; + u64 rx_counter_hndl; + u64 tx_counter_hndl; + u8 dest_mac[ETH_ALEN]; + u8 src_mac[ETH_ALEN]; + u8 dst_mac_mod[ETH_ALEN]; + u8 src_mac_mod[ETH_ALEN]; + struct in6_addr dst_ip; + struct in6_addr src_ip; + u32 src_qp_num; + u32 dest_qp_num; + struct dentry *debugfs_dir; + struct bnxt *bp; + u8 state; + bool v4_dst; + bool skip_subnet_checking; +}; + +struct bnxt_udcc_work { + struct work_struct work; + struct bnxt *bp; + u32 session_id; + u8 session_opcode; + bool session_suspend; +}; + +struct bnxt_udcc_info { + u32 max_sessions; + struct bnxt_udcc_session_entry *session_db[BNXT_UDCC_MAX_SESSIONS]; + struct mutex session_db_lock; /* protect session_db */ + u32 session_count; + u8 session_type; + struct dentry *udcc_debugfs_dir; + u16 max_comp_cfg_xfer; + u16 max_comp_data_xfer; + unsigned long tf_events; +#define BNXT_UDCC_INFO_TF_EVENT_SUSPEND BIT(0) +#define BNXT_UDCC_INFO_TF_EVENT_UNSUSPEND BIT(1) + /* mode is 0 if udcc is disabled */ + u8 mode; +}; + +static inline u8 bnxt_udcc_get_mode(struct bnxt *bp) +{ + return bp->udcc_info ? bp->udcc_info->mode : 0; +} + +int bnxt_alloc_udcc_info(struct bnxt *bp); +void bnxt_free_udcc_info(struct bnxt *bp); +void bnxt_udcc_session_db_cleanup(struct bnxt *bp); +void bnxt_udcc_task(struct work_struct *work); +int bnxt_hwrm_udcc_session_query(struct bnxt *bp, u32 session_id, + struct hwrm_udcc_session_query_output *resp_out); +int bnxt_queue_udcc_work(struct bnxt *bp, u32 session_id, u32 session_opcode, + bool suspend); +void bnxt_udcc_update_session(struct bnxt *bp, bool suspend); +void bnxt_udcc_session_debugfs_add(struct bnxt *bp); +void bnxt_udcc_session_debugfs_cleanup(struct bnxt *bp); +#endif diff --git a/drivers/thirdparty/release-drivers/bnxt/bnxt_ulp.c b/drivers/thirdparty/release-drivers/bnxt/bnxt_ulp.c new file mode 100644 index 000000000000..d46db332f729 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/bnxt_ulp.c @@ -0,0 +1,646 @@ +/* Broadcom NetXtreme-C/E network driver. + * + * Copyright (c) 2016-2018 Broadcom Limited + * Copyright (c) 2018-2023 Broadcom Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ + +#include "bnxt_compat.h" +#include "bnxt_hsi.h" +#include "bnxt.h" +#include "bnxt_hwrm.h" +#include "bnxt_ulp.h" +#include "bnxt_log.h" +#include "bnxt_log_data.h" + +static DEFINE_IDA(bnxt_aux_dev_ids); + +static void bnxt_fill_msix_vecs(struct bnxt *bp, struct bnxt_msix_entry *ent) +{ + struct bnxt_en_dev *edev = bp->edev; + int num_msix, i; + + num_msix = edev->ulp_tbl->msix_requested; + for (i = 0; i < num_msix; i++) { + ent[i].vector = bp->irq_tbl[i].vector; + ent[i].ring_idx = i; + if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) + ent[i].db_offset = bp->db_offset; + else + ent[i].db_offset = i * 0x80; + } +} + +int bnxt_get_ulp_msix_num(struct bnxt *bp) +{ + if (bp->edev) + return bp->edev->ulp_num_msix_vec; + return 0; +} + +void bnxt_set_ulp_msix_num(struct bnxt *bp, int num) +{ + if (bp->edev) + bp->edev->ulp_num_msix_vec = num; +} + +int bnxt_get_ulp_msix_num_in_use(struct bnxt *bp) +{ + if (bnxt_ulp_registered(bp->edev)) + return bp->edev->ulp_num_msix_vec; + return 0; +} + +int bnxt_get_ulp_stat_ctxs(struct bnxt *bp) +{ + if (bp->edev) + return bp->edev->ulp_num_ctxs; + return 0; +} + +void bnxt_set_ulp_stat_ctxs(struct bnxt *bp, int num_ulp_ctx) +{ + if (bp->edev) + bp->edev->ulp_num_ctxs = num_ulp_ctx; +} + +int bnxt_get_ulp_stat_ctxs_in_use(struct bnxt *bp) +{ + if (bnxt_ulp_registered(bp->edev)) + return bp->edev->ulp_num_ctxs; + return 0; +} + +void bnxt_set_dflt_ulp_stat_ctxs(struct bnxt *bp) +{ + if (bp->edev) { + bp->edev->ulp_num_ctxs = BNXT_MIN_ROCE_STAT_CTXS; + /* Reserve one additional stat_ctx for PF0 (except + * on 1-port NICs) as it also creates one stat_ctx + * for PF1 in case of RoCE bonding. + */ + if (BNXT_PF(bp) && !bp->pf.port_id && + bp->port_count > 1) + bp->edev->ulp_num_ctxs++; + } +} + +int bnxt_register_dev(struct bnxt_en_dev *edev, + struct bnxt_ulp_ops *ulp_ops, void *handle) +{ + struct net_device *dev = edev->net; + struct bnxt *bp = netdev_priv(dev); + unsigned int max_stat_ctxs; + struct bnxt_ulp *ulp; + int rc = 0; + + rtnl_lock(); + mutex_lock(&edev->en_dev_lock); + if (!bp->irq_tbl) { + rc = -ENODEV; + goto exit; + } + max_stat_ctxs = bnxt_get_max_func_stat_ctxs(bp); + if (max_stat_ctxs <= BNXT_MIN_ROCE_STAT_CTXS || + bp->cp_nr_rings == max_stat_ctxs) { + rc = -ENOMEM; + goto exit; + } + + ulp = edev->ulp_tbl; + ulp->handle = handle; + rcu_assign_pointer(ulp->ulp_ops, ulp_ops); + + if (test_bit(BNXT_STATE_OPEN, &bp->state)) + bnxt_hwrm_vnic_cfg(bp, &bp->vnic_info[0], 0); + + edev->ulp_tbl->msix_requested = bnxt_get_ulp_msix_num(bp); + + bnxt_fill_msix_vecs(bp, bp->edev->msix_entries); + edev->flags |= BNXT_EN_FLAG_MSIX_REQUESTED; +exit: + mutex_unlock(&edev->en_dev_lock); + rtnl_unlock(); + return rc; +} +EXPORT_SYMBOL(bnxt_register_dev); + +int bnxt_unregister_dev(struct bnxt_en_dev *edev) +{ + struct net_device *dev = edev->net; + struct bnxt *bp = netdev_priv(dev); + struct bnxt_ulp *ulp; + + ulp = edev->ulp_tbl; + rtnl_lock(); + if (ulp->msix_requested) + edev->flags &= ~BNXT_EN_FLAG_MSIX_REQUESTED; + edev->ulp_tbl->msix_requested = 0; + + if (ulp->max_async_event_id) + bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, true); + + RCU_INIT_POINTER(ulp->ulp_ops, NULL); + synchronize_rcu(); + ulp->max_async_event_id = 0; + ulp->async_events_bmap = NULL; + rtnl_unlock(); + return 0; +} +EXPORT_SYMBOL(bnxt_unregister_dev); + +static int bnxt_num_ulp_msix_requested(struct bnxt *bp, int num_msix) +{ + int num_msix_want; + + if (!(bp->flags & BNXT_FLAG_ROCE_CAP)) + return 0; + + /* + * Request MSIx based on the function type. This is + * a temporary solution to enable max VFs when NPAR is + * enabled. + * TODO - change the scheme with an adapter specific check + * as the latest adapters can support more NQs. For now + * this change satisfy all adapter versions. + */ + if (BNXT_VF(bp)) + num_msix_want = BNXT_MAX_ROCE_MSIX_VF; + else if (bp->port_partition_type) + num_msix_want = BNXT_MAX_ROCE_MSIX_NPAR_PF; + else if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) || + (bp->flags & BNXT_FLAG_CHIP_P7)) +#ifdef BNXT_FPGA + num_msix_want = BNXT_MAX_ROCE_MSIX_PF - 1; +#else + num_msix_want = BNXT_MAX_ROCE_MSIX_GEN_P5_PF; +#endif + else + num_msix_want = num_msix; + + /* + * Since MSIX vectors are used for both NQs and CREQ, we should try to + * allocate num_online_cpus + 1 by taking into account the CREQ. This + * leaves the number of MSIX vectors for NQs match the number of CPUs + * and allows the system to be fully utilized + */ + num_msix_want = min_t(u32, num_msix_want, num_online_cpus() + 1); + num_msix_want = min_t(u32, num_msix_want, BNXT_MAX_ROCE_MSIX); + num_msix_want = max_t(u32, num_msix_want, BNXT_MIN_ROCE_CP_RINGS); + + return num_msix_want; +} + +int bnxt_send_msg(struct bnxt_en_dev *edev, + struct bnxt_fw_msg *fw_msg) +{ + struct net_device *dev = edev->net; + struct bnxt *bp = netdev_priv(dev); + struct output *resp; + struct input *req; + u32 resp_len; + int rc; + + if (bp->fw_reset_state) + return -EBUSY; + + rc = hwrm_req_init(bp, req, 0 /* don't care */); + if (rc) + return rc; + + rc = hwrm_req_replace(bp, req, fw_msg->msg, fw_msg->msg_len); + if (rc) + return rc; + + hwrm_req_timeout(bp, req, fw_msg->timeout); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); + resp_len = le16_to_cpu(resp->resp_len); + if (resp_len) { + if (fw_msg->resp_max_len < resp_len) + resp_len = fw_msg->resp_max_len; + + memcpy(fw_msg->resp, resp, resp_len); + } + hwrm_req_drop(bp, req); + return rc; +} +EXPORT_SYMBOL(bnxt_send_msg); + +void bnxt_ulp_stop(struct bnxt *bp) +{ + struct bnxt_aux_priv *bnxt_aux = bp->aux_priv; + struct bnxt_en_dev *edev = bp->edev; + + if (!edev) + return; + mutex_lock(&edev->en_dev_lock); + /* This check is needed for RoCE lag case */ + if (!bnxt_ulp_registered(edev)) { + mutex_unlock(&edev->en_dev_lock); + return; + } + + edev->flags |= BNXT_EN_FLAG_ULP_STOPPED; + edev->en_state = bp->state; + if (bnxt_aux) { + struct auxiliary_device *adev; + + adev = &bnxt_aux->aux_dev; + if (adev->dev.driver) { + struct auxiliary_driver *adrv; + pm_message_t pm = {}; + + adrv = to_auxiliary_drv(adev->dev.driver); + if (adrv->suspend) + adrv->suspend(adev, pm); + } + } + mutex_unlock(&edev->en_dev_lock); +} + +void bnxt_ulp_start(struct bnxt *bp, int err) +{ + struct bnxt_aux_priv *bnxt_aux = bp->aux_priv; + struct bnxt_en_dev *edev = bp->edev; + + if (!edev) + return; + + edev->flags &= ~BNXT_EN_FLAG_ULP_STOPPED; + edev->en_state = bp->state; + + if (err) + return; + + mutex_lock(&edev->en_dev_lock); + /* This check is needed for RoCE lag case */ + if (!bnxt_ulp_registered(edev)) { + mutex_unlock(&edev->en_dev_lock); + return; + } + + bnxt_fill_msix_vecs(bp, bp->edev->msix_entries); + + if (bnxt_aux) { + struct auxiliary_device *adev; + + adev = &bnxt_aux->aux_dev; + if (adev->dev.driver) { + struct auxiliary_driver *adrv; + + adrv = to_auxiliary_drv(adev->dev.driver); + if (adrv->resume) + adrv->resume(adev); + } + } + mutex_unlock(&edev->en_dev_lock); +} + +/* + * In kernels where native Auxbus infrastructure support is not there, + * invoke the auxiliary_driver shutdown function. + */ +#ifndef HAVE_AUXILIARY_DRIVER +void bnxt_ulp_shutdown(struct bnxt *bp) +{ + struct bnxt_aux_priv *bnxt_aux = bp->aux_priv; + struct bnxt_en_dev *edev = bp->edev; + + if (!edev) + return; + + if (bnxt_aux) { + struct auxiliary_device *adev; + + adev = &bnxt_aux->aux_dev; + if (adev->dev.driver) { + struct auxiliary_driver *adrv; + + adrv = to_auxiliary_drv(adev->dev.driver); + if (adrv->shutdown) + adrv->shutdown(adev); + } + } +} +#endif + +void bnxt_ulp_irq_stop(struct bnxt *bp) +{ + struct bnxt_en_dev *edev = bp->edev; + struct bnxt_ulp_ops *ops; + bool reset = false; + + ASSERT_RTNL(); + if (!edev || !(edev->flags & BNXT_EN_FLAG_MSIX_REQUESTED)) + return; + + if (bnxt_ulp_registered(bp->edev)) { + struct bnxt_ulp *ulp = edev->ulp_tbl; + + if (!ulp->msix_requested) + return; + + ops = rtnl_dereference(ulp->ulp_ops); + if (!ops || !ops->ulp_irq_stop) + return; + if (test_bit(BNXT_STATE_FW_RESET_DET, &bp->state)) + reset = true; + edev->en_state = bp->state; + ops->ulp_irq_stop(ulp->handle, reset); + } +} + +void bnxt_ulp_irq_restart(struct bnxt *bp, int err) +{ + struct bnxt_en_dev *edev = bp->edev; + struct bnxt_ulp_ops *ops; + + ASSERT_RTNL(); + if (!edev || !(edev->flags & BNXT_EN_FLAG_MSIX_REQUESTED)) + return; + + if (bnxt_ulp_registered(bp->edev)) { + struct bnxt_ulp *ulp = edev->ulp_tbl; + struct bnxt_msix_entry *ent = NULL; + + if (!ulp->msix_requested) + return; + + ops = rtnl_dereference(ulp->ulp_ops); + if (!ops || !ops->ulp_irq_restart) + return; + + if (!err) { + ent = kcalloc(ulp->msix_requested, sizeof(*ent), + GFP_KERNEL); + if (!ent) + return; + bnxt_fill_msix_vecs(bp, ent); + } + edev->en_state = bp->state; + ops->ulp_irq_restart(ulp->handle, ent); + kfree(ent); + } +} + +void bnxt_logger_ulp_live_data(void *d, u32 seg_id) +{ + struct bnxt_en_dev *edev; + struct bnxt *bp; + + bp = d; + edev = bp->edev; + + if (!edev) + return; + + if (bnxt_ulp_registered(edev)) { + struct bnxt_ulp_ops *ops; + struct bnxt_ulp *ulp; + + ulp = edev->ulp_tbl; + ops = rtnl_dereference(ulp->ulp_ops); + if (!ops || !ops->ulp_log_live) + return; + + ops->ulp_log_live(ulp->handle, seg_id); + } +} + +void bnxt_ulp_log_raw(struct bnxt_en_dev *edev, u16 logger_id, + void *data, int len) +{ + bnxt_log_raw(netdev_priv(edev->net), logger_id, data, len); +} +EXPORT_SYMBOL(bnxt_ulp_log_raw); + +void bnxt_ulp_log_live(struct bnxt_en_dev *edev, u16 logger_id, + const char *format, ...) +{ + bnxt_log_live(netdev_priv(edev->net), logger_id, format); +} +EXPORT_SYMBOL(bnxt_ulp_log_live); + +void bnxt_ulp_async_events(struct bnxt *bp, struct hwrm_async_event_cmpl *cmpl) +{ + u16 event_id = le16_to_cpu(cmpl->event_id); + struct bnxt_en_dev *edev = bp->edev; + struct bnxt_ulp_ops *ops; + struct bnxt_ulp *ulp; + + if (!bnxt_ulp_registered(edev)) + return; + ulp = edev->ulp_tbl; + + rcu_read_lock(); + + ops = rcu_dereference(ulp->ulp_ops); + if (!ops || !ops->ulp_async_notifier) + goto exit_unlock_rcu; + if (!ulp->async_events_bmap || event_id > ulp->max_async_event_id) + goto exit_unlock_rcu; + + /* Read max_async_event_id first before testing the bitmap. */ + smp_rmb(); + if (edev->flags & BNXT_EN_FLAG_ULP_STOPPED) + goto exit_unlock_rcu; + + if (test_bit(event_id, ulp->async_events_bmap)) + ops->ulp_async_notifier(ulp->handle, cmpl); +exit_unlock_rcu: + rcu_read_unlock(); +} +EXPORT_SYMBOL(bnxt_ulp_async_events); + +int bnxt_register_async_events(struct bnxt_en_dev *edev, + unsigned long *events_bmap, u16 max_id) +{ + struct net_device *dev = edev->net; + struct bnxt *bp = netdev_priv(dev); + struct bnxt_ulp *ulp; + + ulp = edev->ulp_tbl; + + ulp->async_events_bmap = events_bmap; + /* Make sure bnxt_ulp_async_events() sees this order */ + smp_wmb(); + ulp->max_async_event_id = max_id; + bnxt_hwrm_func_drv_rgtr(bp, events_bmap, max_id + 1, true); + return 0; +} +EXPORT_SYMBOL(bnxt_register_async_events); + +int bnxt_dbr_complete(struct bnxt_en_dev *edev, u32 epoch) +{ + struct net_device *dev = edev->net; + struct bnxt *bp = netdev_priv(dev); + + bnxt_dbr_recovery_done(bp, epoch, BNXT_ROCE_ULP); + + return 0; +} +EXPORT_SYMBOL(bnxt_dbr_complete); + +void bnxt_rdma_aux_device_uninit(struct bnxt *bp) +{ + struct bnxt_aux_priv *aux_priv; + struct auxiliary_device *adev; + + /* Skip if no auxiliary device init was done. */ + if (!bp->aux_priv) + return; + + bnxt_unregister_logger(bp, BNXT_LOGGER_ROCE); + aux_priv = bp->aux_priv; + adev = &aux_priv->aux_dev; + auxiliary_device_uninit(adev); +} + +static void bnxt_aux_dev_release(struct device *dev) +{ + struct bnxt_aux_priv *aux_priv = + container_of(dev, struct bnxt_aux_priv, aux_dev.dev); + struct bnxt *bp = netdev_priv(aux_priv->edev->net); + + ida_free(&bnxt_aux_dev_ids, aux_priv->id); + kfree(aux_priv->edev->ulp_tbl); + kfree(aux_priv->edev); + bp->edev = NULL; + kfree(bp->aux_priv); + bp->aux_priv = NULL; +} + +void bnxt_rdma_aux_device_del(struct bnxt *bp) +{ + if (!bp->edev) + return; + + auxiliary_device_delete(&bp->aux_priv->aux_dev); +} + +static inline void bnxt_set_edev_info(struct bnxt_en_dev *edev, struct bnxt *bp) +{ + edev->net = bp->dev; + edev->pdev = bp->pdev; + edev->l2_db_size = bp->db_size; + edev->l2_db_size_nc = bp->db_size_nc; + edev->l2_db_offset = bp->db_offset; + mutex_init(&edev->en_dev_lock); + + if (bp->flags & BNXT_FLAG_ROCEV1_CAP) + edev->flags |= BNXT_EN_FLAG_ROCEV1_CAP; + if (bp->flags & BNXT_FLAG_ROCEV2_CAP) + edev->flags |= BNXT_EN_FLAG_ROCEV2_CAP; + if (bp->is_asym_q) + edev->flags |= BNXT_EN_FLAG_ASYM_Q; + if (bp->flags & BNXT_FLAG_MULTI_HOST) + edev->flags |= BNXT_EN_FLAG_MULTI_HOST; + if (bp->flags & BNXT_FLAG_MULTI_ROOT) + edev->flags |= BNXT_EN_FLAG_MULTI_ROOT; + if (BNXT_VF(bp)) + edev->flags |= BNXT_EN_FLAG_VF; + if (bp->fw_cap & BNXT_FW_CAP_HW_LAG_SUPPORTED) + edev->flags |= BNXT_EN_FLAG_HW_LAG; + if (BNXT_ROCE_VF_RESC_CAP(bp)) + edev->flags |= BNXT_EN_FLAG_ROCE_VF_RES_MGMT; + if (BNXT_SW_RES_LMT(bp)) + edev->flags |= BNXT_EN_FLAG_SW_RES_LMT; + edev->bar0 = bp->bar0; + edev->port_partition_type = bp->port_partition_type; + edev->port_count = bp->port_count; + edev->pf_port_id = bp->pf.port_id; + edev->hw_ring_stats_size = bp->hw_ring_stats_size; + edev->ulp_version = BNXT_ULP_VERSION; + edev->en_dbr = &bp->dbr; + edev->hdbr_info = &bp->hdbr_info; + /* Update chip type used for roce pre-init purposes */ + edev->chip_num = bp->chip_num; +} + +void bnxt_rdma_aux_device_add(struct bnxt *bp) +{ + struct auxiliary_device *aux_dev; + int rc; + + if (!bp->edev) + return; + + aux_dev = &bp->aux_priv->aux_dev; + rc = auxiliary_device_add(aux_dev); + if (rc) { + netdev_warn(bp->dev, "Failed to add auxiliary device for ROCE\n"); + auxiliary_device_uninit(aux_dev); + bp->flags &= ~BNXT_FLAG_ROCE_CAP; + } +} + +void bnxt_rdma_aux_device_init(struct bnxt *bp) +{ + struct auxiliary_device *aux_dev; + struct bnxt_aux_priv *aux_priv; + struct bnxt_en_dev *edev; + struct bnxt_ulp *ulp; + int rc; + + if (!(bp->flags & BNXT_FLAG_ROCE_CAP)) + return; + + aux_priv = kzalloc(sizeof(*bp->aux_priv), GFP_KERNEL); + if (!aux_priv) + goto exit; + + aux_priv->id = ida_alloc(&bnxt_aux_dev_ids, GFP_KERNEL); + if (aux_priv->id < 0) { + netdev_warn(bp->dev, "ida alloc failed for ROCE auxiliary device\n"); + kfree(aux_priv); + goto exit; + } + + aux_dev = &aux_priv->aux_dev; + aux_dev->id = aux_priv->id; + aux_dev->name = "rdma"; + aux_dev->dev.parent = &bp->pdev->dev; + aux_dev->dev.release = bnxt_aux_dev_release; + + rc = auxiliary_device_init(aux_dev); + if (rc) { + ida_free(&bnxt_aux_dev_ids, aux_priv->id); + kfree(aux_priv); + goto exit; + } + bp->aux_priv = aux_priv; + + /* From this point, all cleanup will happen via the .release callback & + * any error unwinding will need to include a call to + * auxiliary_device_uninit. + */ + edev = kzalloc(sizeof(*edev), GFP_KERNEL); + if (!edev) + goto aux_dev_uninit; + + aux_priv->edev = edev; + + ulp = kzalloc(sizeof(*ulp), GFP_KERNEL); + if (!ulp) + goto aux_dev_uninit; + + edev->ulp_tbl = ulp; + bp->edev = edev; + bnxt_set_edev_info(edev, bp); + bnxt_register_logger(bp, BNXT_LOGGER_ROCE, + BNXT_ULP_MAX_LOG_BUFFERS, + bnxt_logger_ulp_live_data, + BNXT_ULP_MAX_LIVE_LOG_SIZE); + bp->ulp_num_msix_want = bnxt_num_ulp_msix_requested(bp, BNXT_MAX_ROCE_MSIX); + + return; + +aux_dev_uninit: + auxiliary_device_uninit(aux_dev); +exit: + bp->flags &= ~BNXT_FLAG_ROCE_CAP; +} diff --git a/drivers/thirdparty/release-drivers/bnxt/bnxt_ulp.h b/drivers/thirdparty/release-drivers/bnxt/bnxt_ulp.h new file mode 100644 index 000000000000..4466f38415d0 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/bnxt_ulp.h @@ -0,0 +1,162 @@ +/* Broadcom NetXtreme-C/E network driver. + * + * Copyright (c) 2016-2018 Broadcom Limited + * Copyright (c) 2018-2023 Broadcom Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ + +#ifndef BNXT_ULP_H +#define BNXT_ULP_H + +#define BNXT_ROCE_ULP 0 +#define BNXT_OTHER_ULP 1 +#define BNXT_MAX_ULP 2 + +#define BNXT_MIN_ROCE_CP_RINGS 2 +#define BNXT_MIN_ROCE_STAT_CTXS 1 + +#define BNXT_MAX_ROCE_MSIX_VF 2 +#define BNXT_MAX_ROCE_MSIX_PF 9 +#define BNXT_MAX_ROCE_MSIX_NPAR_PF 5 +#define BNXT_MAX_ROCE_MSIX 64 +#define BNXT_MAX_ROCE_MSIX_GEN_P5_PF BNXT_MAX_ROCE_MSIX + +#define BNXT_ULP_MAX_LOG_BUFFERS 1024 +#define BNXT_ULP_MAX_LIVE_LOG_SIZE (32 << 20) + +struct hwrm_async_event_cmpl; +struct bnxt; + +struct bnxt_msix_entry { + u32 vector; + u32 ring_idx; + u32 db_offset; +}; + +struct bnxt_ulp_ops { + /* async_notifier() cannot sleep (in BH context) */ + void (*ulp_async_notifier)(void *, struct hwrm_async_event_cmpl *); + void (*ulp_irq_stop)(void *, bool); + void (*ulp_irq_restart)(void *, struct bnxt_msix_entry *); + void (*ulp_log_live)(void *handle, u32 seg_id); +}; + +struct bnxt_fw_msg { + void *msg; + int msg_len; + void *resp; + int resp_max_len; + int timeout; +}; + +struct bnxt_ulp { + void *handle; + struct bnxt_ulp_ops __rcu *ulp_ops; + unsigned long *async_events_bmap; + u16 max_async_event_id; + u16 msix_requested; +}; + +struct bnxt_en_dev { + struct net_device *net; + struct pci_dev *pdev; + struct bnxt_msix_entry msix_entries[BNXT_MAX_ROCE_MSIX]; + u32 flags; + #define BNXT_EN_FLAG_ROCEV1_CAP 0x1 + #define BNXT_EN_FLAG_ROCEV2_CAP 0x2 + #define BNXT_EN_FLAG_ROCE_CAP (BNXT_EN_FLAG_ROCEV1_CAP | \ + BNXT_EN_FLAG_ROCEV2_CAP) + #define BNXT_EN_FLAG_MSIX_REQUESTED 0x4 + #define BNXT_EN_FLAG_ULP_STOPPED 0x8 + #define BNXT_EN_FLAG_ASYM_Q 0x10 + #define BNXT_EN_FLAG_MULTI_HOST 0x20 + #define BNXT_EN_FLAG_VF 0x40 + #define BNXT_EN_FLAG_HW_LAG 0x80 + #define BNXT_EN_FLAG_ROCE_VF_RES_MGMT 0x100 + #define BNXT_EN_FLAG_MULTI_ROOT 0x200 + #define BNXT_EN_FLAG_SW_RES_LMT 0x400 +#define BNXT_EN_ASYM_Q(edev) ((edev)->flags & BNXT_EN_FLAG_ASYM_Q) +#define BNXT_EN_MH(edev) ((edev)->flags & BNXT_EN_FLAG_MULTI_HOST) +#define BNXT_EN_VF(edev) ((edev)->flags & BNXT_EN_FLAG_VF) +#define BNXT_EN_HW_LAG(edev) ((edev)->flags & BNXT_EN_FLAG_HW_LAG) +#define BNXT_EN_MR(edev) ((edev)->flags & BNXT_EN_FLAG_MULTI_ROOT) +#define BNXT_EN_SW_RES_LMT(edev) ((edev)->flags & BNXT_EN_FLAG_SW_RES_LMT) + struct bnxt_ulp *ulp_tbl; + int l2_db_size; /* Doorbell BAR size in + * bytes mapped by L2 + * driver. + */ + int l2_db_size_nc; /* Doorbell BAR size in + * bytes mapped as non- + * cacheable. + */ + u32 ulp_version; /* bnxt_re checks the + * ulp_version is correct + * to ensure compatibility + * with bnxt_en. + */ + #define BNXT_ULP_VERSION 0x695a000f /* Change this when any interface + * structure or API changes + * between bnxt_en and bnxt_re. + */ + unsigned long en_state; + void __iomem *bar0; + u16 hw_ring_stats_size; + u16 pf_port_id; + u8 port_partition_type; +#define BNXT_EN_NPAR(edev) ((edev)->port_partition_type) + u8 port_count; + struct bnxt_dbr *en_dbr; + + struct bnxt_hdbr_info *hdbr_info; + u16 chip_num; + int l2_db_offset; /* Doorbell BAR offset + * of non-cacheable. + */ + + u16 ulp_num_msix_vec; + u16 ulp_num_ctxs; + struct mutex en_dev_lock; /* serialize ulp operations */ +}; + +static inline bool bnxt_ulp_registered(struct bnxt_en_dev *edev) +{ + if (edev && rcu_access_pointer(edev->ulp_tbl->ulp_ops)) + return true; + return false; +} + +int bnxt_get_ulp_msix_num(struct bnxt *bp); +int bnxt_get_ulp_msix_num_in_use(struct bnxt *bp); +void bnxt_set_ulp_msix_num(struct bnxt *bp, int num); +int bnxt_get_ulp_stat_ctxs(struct bnxt *bp); +int bnxt_get_ulp_stat_ctxs_in_use(struct bnxt *bp); +void bnxt_set_ulp_stat_ctxs(struct bnxt *bp, int num_ctxs); +void bnxt_set_dflt_ulp_stat_ctxs(struct bnxt *bp); +void bnxt_ulp_stop(struct bnxt *bp); +void bnxt_ulp_start(struct bnxt *bp, int err); +void bnxt_ulp_sriov_cfg(struct bnxt *bp, int num_vfs); +#ifndef HAVE_AUXILIARY_DRIVER +void bnxt_ulp_shutdown(struct bnxt *bp); +#endif +void bnxt_ulp_irq_stop(struct bnxt *bp); +void bnxt_ulp_irq_restart(struct bnxt *bp, int err); +void bnxt_ulp_async_events(struct bnxt *bp, struct hwrm_async_event_cmpl *cmpl); +void bnxt_rdma_aux_device_uninit(struct bnxt *bp); +void bnxt_rdma_aux_device_init(struct bnxt *bp); +void bnxt_rdma_aux_device_add(struct bnxt *bp); +void bnxt_rdma_aux_device_del(struct bnxt *bp); +int bnxt_register_dev(struct bnxt_en_dev *edev, + struct bnxt_ulp_ops *ulp_ops, void *handle); +int bnxt_unregister_dev(struct bnxt_en_dev *edev); +int bnxt_send_msg(struct bnxt_en_dev *edev, struct bnxt_fw_msg *fw_msg); +int bnxt_register_async_events(struct bnxt_en_dev *edev, + unsigned long *events_bmap, u16 max_id); +int bnxt_dbr_complete(struct bnxt_en_dev *edev, u32 epoch); +void bnxt_ulp_log_live(struct bnxt_en_dev *edev, u16 logger_id, + const char *format, ...); +void bnxt_ulp_log_raw(struct bnxt_en_dev *edev, u16 logger_id, void *data, int len); +#endif diff --git a/drivers/thirdparty/release-drivers/bnxt/bnxt_vfr.c b/drivers/thirdparty/release-drivers/bnxt/bnxt_vfr.c new file mode 100644 index 000000000000..a489d065c6b8 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/bnxt_vfr.c @@ -0,0 +1,1394 @@ +/* Broadcom NetXtreme-C/E network driver. + * + * Copyright (c) 2016-2018 Broadcom Limited + * Copyright (c) 2018-2023 Broadcom Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ +#include +#include +#include +#include +#include +#ifdef HAVE_TC_SETUP_TYPE +#include +#endif + +#include "bnxt_compat.h" +#include "bnxt_hsi.h" +#include "bnxt.h" +#include "bnxt_hwrm.h" +#include "bnxt_sriov.h" +#include "bnxt_vfr.h" +#include "bnxt_devlink.h" +#include "bnxt_tc.h" +#include "bnxt_ulp_flow.h" +#include "bnxt_tf_common.h" +#include "tfc.h" +#include "tfc_debug.h" + +/* Synchronize TF ULP port operations. + * TBD: Revisit this global lock and consider making this a per-adapter lock. + */ +DEFINE_MUTEX(tf_port_lock); + +#if defined(CONFIG_VF_REPS) || defined(CONFIG_BNXT_CUSTOM_FLOWER_OFFLOAD) +/* This function removes a FID from the AFM session and designates whether + * it is an endpoint or representor to the firmware based on the type field + * passed into the HWRM message. + */ +int bnxt_hwrm_release_afm_func(struct bnxt *bp, u16 fid, u16 rfid, + u8 type, u32 flags) +{ + struct hwrm_cfa_release_afm_func_input *req; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_CFA_RELEASE_AFM_FUNC); + + req->fid = cpu_to_le16(fid); + req->rfid = cpu_to_le16(rfid); + req->flags = cpu_to_le16(flags); + req->type = type; + + rc = hwrm_req_send(bp, req); + return rc; +} + +/* This function initializes Truflow feature which enables host based + * flow offloads. The flag argument provides information about the TF + * consumer and a reference to the consumer is set in bp->tf_flags. + * The initialization is done only once when the first consumer calls + * this function. + */ +int bnxt_tf_port_init(struct bnxt *bp, u16 flag) +{ + int rc; + + mutex_lock(&tf_port_lock); + if (bp->tf_flags & BNXT_TF_FLAG_INITIALIZED) { + /* TF already initialized; just set the in-use flag + * for the specific consumer and return success. + */ + rc = 0; + goto exit; + } + + if (!BNXT_TF_RX_NIC_FLOW_CAP(bp) && !BNXT_UDCC_CAP(bp) && BNXT_CHIP_P7(bp)) { + /* Need to release the Fid from AFM control if TF application */ + rc = bnxt_hwrm_release_afm_func(bp, bp->pf.fw_fid, + bp->pf.fw_fid, + CFA_RELEASE_AFM_FUNC_REQ_TYPE_RFID, + 0); + if (rc) { + netdev_dbg(bp->dev, "Failed in hwrm release afm func:%u rc=%d\n", + bp->pf.fw_fid, rc); + goto exit; + } + netdev_dbg(bp->dev, "Released RFID:%d\n", bp->pf.fw_fid); + } + rc = bnxt_ulp_port_init(bp); +exit: + if (!rc) { + bp->tf_flags |= flag; + if (!(bp->tf_flags & BNXT_TF_FLAG_INITIALIZED)) + bp->tf_flags |= BNXT_TF_FLAG_INITIALIZED; + + } else { + netdev_err(bp->dev, "Failed to initialize Truflow feature\n"); + } + mutex_unlock(&tf_port_lock); + return rc; +} + +/* This function allocates Truflow tfo structure */ +int bnxt_tfo_init(struct bnxt *bp) +{ + int rc; + + mutex_lock(&tf_port_lock); + rc = bnxt_ulp_tfo_init(bp); + if (rc) + netdev_err(bp->dev, "Failed to allocate Truflow structure\n"); + mutex_unlock(&tf_port_lock); + return rc; +} + +void bnxt_tfo_deinit(struct bnxt *bp) +{ + mutex_lock(&tf_port_lock); + bnxt_ulp_tfo_deinit(bp); + mutex_unlock(&tf_port_lock); +} + +static bool bnxt_is_tf_busy(struct bnxt *bp) +{ + return (bp->tf_flags & + (BNXT_TF_FLAG_NICFLOW | + BNXT_TF_FLAG_SWITCHDEV | + BNXT_TF_FLAG_DEVLINK)); +} + +/* Uninitialize TF. The flag argument represents the TF consumer + * so that the reference held in bp->tf_flags earlier can be + * released. TF is uninitialized when there are no more active + * consumers. The flag value of NONE(0) overrides this logic and + * uninits regardless of any active consumers (e.g during rmmod). + */ +void bnxt_tf_port_deinit(struct bnxt *bp, u16 flag) +{ + mutex_lock(&tf_port_lock); + + /* Not initialized; nothing to do */ + if (!(bp->flags & BNXT_TF_FLAG_INITIALIZED)) + goto done; + + /* Clear in-use flag for the specific consumer */ + if (flag) + bp->tf_flags &= ~flag; + + /* Are there other TF consumers? */ + if (bnxt_is_tf_busy(bp) && flag) + goto done; + + /* Ok to deinit */ + bnxt_ulp_port_deinit(bp); + bp->tf_flags &= ~BNXT_TF_FLAG_INITIALIZED; + +done: + mutex_unlock(&tf_port_lock); +} + +void bnxt_custom_tf_port_init(struct bnxt *bp) +{ +#ifdef CONFIG_BNXT_CUSTOM_FLOWER_OFFLOAD + if (bnxt_tc_is_switchdev_mode(bp)) + return; + if (BNXT_PF(bp) && BNXT_TRUFLOW_EN(bp)) + bnxt_tf_port_init(bp, BNXT_TF_FLAG_NONE); +#endif + return; +} + +void bnxt_custom_tf_port_deinit(struct bnxt *bp) +{ +#ifdef CONFIG_BNXT_CUSTOM_FLOWER_OFFLOAD + if (bnxt_tc_is_switchdev_mode(bp)) + return; + if (BNXT_PF(bp) && BNXT_TRUFLOW_EN(bp)) + bnxt_tf_port_deinit(bp, BNXT_TF_FLAG_NONE); +#endif + return; +} + +int bnxt_devlink_tf_port_init(struct bnxt *bp) +{ + if (bp->dl_param_truflow) + return 0; + + if (BNXT_PF(bp) && BNXT_TRUFLOW_EN(bp)) + return bnxt_tf_port_init(bp, BNXT_TF_FLAG_DEVLINK); + + return -EOPNOTSUPP; +} + +void bnxt_devlink_tf_port_deinit(struct bnxt *bp) +{ + if (!bp->dl_param_truflow) + return; + + if (BNXT_PF(bp) && BNXT_TRUFLOW_EN(bp)) + bnxt_tf_port_deinit(bp, BNXT_TF_FLAG_DEVLINK); +} + +#endif + +#ifdef CONFIG_VF_REPS + +#define CFA_HANDLE_INVALID 0xffff +#define VF_IDX_INVALID 0xffff + +static int hwrm_cfa_vfr_alloc(struct bnxt *bp, u16 vf_idx, + u32 *tx_cfa_action, u16 *rx_cfa_code) +{ + struct hwrm_cfa_vfr_alloc_output *resp; + struct hwrm_cfa_vfr_alloc_input *req; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_CFA_VFR_ALLOC); + if (!rc) { + req->vf_id = cpu_to_le16(vf_idx); + sprintf(req->vfr_name, "vfr%d", vf_idx); + + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); + if (!rc) { + *tx_cfa_action = le16_to_cpu(resp->tx_cfa_action); + *rx_cfa_code = le16_to_cpu(resp->rx_cfa_code); + netdev_dbg(bp->dev, "tx_cfa_action=0x%x, rx_cfa_code=0x%x", + *tx_cfa_action, *rx_cfa_code); + } + hwrm_req_drop(bp, req); + } + if (rc) + netdev_info(bp->dev, "%s error rc=%d\n", __func__, rc); + return rc; +} + +static int hwrm_cfa_vfr_free(struct bnxt *bp, u16 vf_idx) +{ + struct hwrm_cfa_vfr_free_input *req; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_CFA_VFR_FREE); + if (!rc) { + sprintf(req->vfr_name, "vfr%d", vf_idx); + rc = hwrm_req_send(bp, req); + } + if (rc) + netdev_info(bp->dev, "%s error rc=%d\n", __func__, rc); + return rc; +} + +static int bnxt_hwrm_vfr_qcfg(struct bnxt *bp, struct bnxt_vf_rep *vf_rep, + u16 *max_mtu) +{ + struct hwrm_func_qcfg_output *resp; + struct hwrm_func_qcfg_input *req; + struct bnxt_vf_info *vf; + u16 mtu; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG); + if (rc) + return rc; + + rcu_read_lock(); + vf = rcu_dereference(bp->pf.vf); + if (!vf) { + rcu_read_unlock(); + return -EINVAL; + } + req->fid = vf[vf_rep->vf_idx].fw_fid; + rcu_read_unlock(); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); + if (!rc) { + mtu = le16_to_cpu(resp->max_mtu_configured); + if (!mtu) + *max_mtu = BNXT_MAX_MTU; + else + *max_mtu = mtu; + } + hwrm_req_drop(bp, req); + + return rc; +} + +static int bnxt_vf_rep_open(struct net_device *dev) +{ + struct bnxt_vf_rep *vf_rep = netdev_priv(dev); + struct bnxt *bp = vf_rep->bp; + + /* Enable link and TX only if the parent PF is open. */ + if (netif_running(bp->dev)) { + netif_carrier_on(dev); + netif_tx_start_all_queues(dev); + } + return 0; +} + +static int bnxt_vf_rep_close(struct net_device *dev) +{ + netif_carrier_off(dev); + netif_tx_disable(dev); + + return 0; +} + +static netdev_tx_t bnxt_vf_rep_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + struct bnxt_vf_rep *vf_rep = netdev_priv(dev); + int rc, len = skb->len; + + skb_dst_drop(skb); + dst_hold((struct dst_entry *)vf_rep->dst); + skb_dst_set(skb, (struct dst_entry *)vf_rep->dst); + skb->dev = vf_rep->dst->u.port_info.lower_dev; + + rc = dev_queue_xmit(skb); + if (!rc) { + vf_rep->tx_stats.packets++; + vf_rep->tx_stats.bytes += len; + } + return rc; +} + +static void bnxt_vf_rep_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *stats) +{ + struct bnxt_vf_rep *vf_rep = netdev_priv(dev); + + if (!vf_rep || !vf_rep->bp) + return; + + stats->rx_packets = vf_rep->rx_stats.packets; + stats->rx_bytes = vf_rep->rx_stats.bytes; + stats->tx_packets = vf_rep->tx_stats.packets; + stats->tx_bytes = vf_rep->tx_stats.bytes; +} + +#ifdef CONFIG_BNXT_FLOWER_OFFLOAD +#ifdef HAVE_TC_SETUP_TYPE +#ifdef HAVE_TC_SETUP_BLOCK +static LIST_HEAD(bnxt_vf_block_cb_list); + +static int bnxt_vf_rep_setup_tc_block_cb(enum tc_setup_type type, + void *type_data, + void *cb_priv) +{ + struct bnxt_vf_rep *vf_rep = cb_priv; + struct bnxt *bp = vf_rep->bp; + u16 vf_fid; + + vf_fid = bnxt_vf_target_id(&bp->pf, vf_rep->vf_idx); + if (vf_fid == INVALID_HW_RING_ID) + return -EINVAL; + + if (!bnxt_tc_flower_enabled(vf_rep->bp)) + return -EOPNOTSUPP; + + switch (type) { + case TC_SETUP_CLSFLOWER: +#ifdef HAVE_TC_CB_EGDEV + return bnxt_tc_setup_flower(bp, vf_fid, type_data, + BNXT_TC_DEV_INGRESS); +#else + return bnxt_tc_setup_flower(bp, vf_fid, type_data); +#endif + +#if defined(HAVE_TC_MATCHALL_FLOW_RULE) && defined(HAVE_FLOW_ACTION_POLICE) + case TC_SETUP_CLSMATCHALL: + return bnxt_tc_setup_matchall(bp, vf_fid, type_data); +#endif + default: + return -EOPNOTSUPP; + } +} + +#endif /* HAVE_TC_SETUP_BLOCK */ + +static int bnxt_vf_rep_setup_tc(struct net_device *dev, enum tc_setup_type type, + void *type_data) +{ + struct bnxt_vf_rep *vf_rep = netdev_priv(dev); + + switch (type) { +#ifdef HAVE_TC_SETUP_BLOCK + case TC_SETUP_BLOCK: + return flow_block_cb_setup_simple(type_data, + &bnxt_vf_block_cb_list, + bnxt_vf_rep_setup_tc_block_cb, vf_rep, vf_rep, true); +#else + case TC_SETUP_CLSFLOWER: { + struct bnxt *bp = vf_rep->bp; + u16 vf_fid; + + vf_fid = bnxt_vf_target_id(&bp->pf, vf_rep->vf_idx); + if (vf_fid == INVALID_HW_RING_ID) + return -EINVAL; +#ifdef HAVE_TC_CB_EGDEV + return bnxt_tc_setup_flower(bp, vf_fid, type_data, + BNXT_TC_DEV_INGRESS); +#else + return bnxt_tc_setup_flower(bp, vf_fid, type_data); +#endif + } +#endif + default: + return -EOPNOTSUPP; + } +} + +#else /* HAVE_TC_SETUP_TYPE */ + +#ifdef HAVE_CHAIN_INDEX +static int bnxt_vf_rep_setup_tc(struct net_device *dev, u32 handle, + u32 chain_index, __be16 proto, + struct tc_to_netdev *ntc) +#else +static int bnxt_vf_rep_setup_tc(struct net_device *dev, u32 handle, + __be16 proto, struct tc_to_netdev *ntc) +#endif +{ + struct bnxt_vf_rep *vf_rep = netdev_priv(dev); + u16 vf_fid; + + vf_fid = bnxt_vf_target_id(&vf_rep->bp->pf, vf_rep->vf_idx); + if (vf_fid == INVALID_HW_RING_ID) + return -EINVAL; + + if (!bnxt_tc_flower_enabled(vf_rep->bp)) + return -EOPNOTSUPP; + + switch (ntc->type) { + case TC_SETUP_CLSFLOWER: +#ifdef HAVE_TC_CB_EGDEV + return bnxt_tc_setup_flower(vf_rep->bp, + vf_fid, + ntc->cls_flower, + BNXT_TC_DEV_INGRESS); +#else + return bnxt_tc_setup_flower(vf_rep->bp, + vf_fid, + ntc->cls_flower); +#endif + default: + return -EOPNOTSUPP; + } +} +#endif /* HAVE_TC_SETUP_TYPE */ + +#ifdef HAVE_TC_CB_EGDEV +static int bnxt_vf_rep_tc_cb_egdev(enum tc_setup_type type, void *type_data, + void *cb_priv) +{ + struct bnxt_vf_rep *vf_rep = cb_priv; + struct bnxt *bp = vf_rep->bp; + u16 vf_fid; + + vf_fid = bnxt_vf_target_id(&bp->pf, vf_rep->vf_idx); + if (vf_fid == INVALID_HW_RING_ID) + return -EINVAL; + + if (!bnxt_tc_flower_enabled(vf_rep->bp)) + return -EOPNOTSUPP; + + switch (type) { + case TC_SETUP_CLSFLOWER: + return bnxt_tc_setup_flower(bp, vf_fid, type_data, + BNXT_TC_DEV_EGRESS); + default: + return -EOPNOTSUPP; + } +} +#else + +#ifdef HAVE_TC_SETUP_TYPE +static int bnxt_vf_rep_tc_cb_egdev(enum tc_setup_type type, void *type_data, + void *cb_priv) +#else +static int bnxt_vf_rep_tc_cb_egdev(int type, void *type_data, void *cb_priv) +#endif /* HAVE_TC_SETUP_TYPE */ +{ + return 0; +} +#endif /* HAVE_TC_CB_EGDEV */ + +#define bnxt_cb_egdev ((void *)bnxt_vf_rep_tc_cb_egdev) + +#endif /* CONFIG_BNXT_FLOWER_OFFLOAD */ + +struct net_device *bnxt_get_vf_rep(struct bnxt *bp, u16 cfa_code) +{ + u16 vf_idx; + + if (cfa_code && bp->cfa_code_map && BNXT_PF(bp)) { + vf_idx = bp->cfa_code_map[cfa_code]; + if (vf_idx != VF_IDX_INVALID) + return bp->vf_reps[vf_idx]->dev; + } + return NULL; +} + +struct net_device *bnxt_tf_get_vf_rep(struct bnxt *bp, + struct rx_cmp_ext *rxcmp1, + struct bnxt_tpa_info *tpa_info) +{ + u32 mark_id = 0; + u16 vf_idx; + int rc; + + if (bp->cfa_code_map && BNXT_PF(bp)) { + if (BNXT_CHIP_P7(bp)) + rc = bnxt_ulp_get_mark_from_cfacode_p7(bp, rxcmp1, tpa_info, + &mark_id); + else + rc = bnxt_ulp_get_mark_from_cfacode(bp, rxcmp1, tpa_info, + &mark_id); + if (rc) + return NULL; + /* mark_id is endpoint vf's fw fid */ + vf_idx = bp->cfa_code_map[mark_id]; + if (vf_idx != VF_IDX_INVALID) + return bp->vf_reps[vf_idx]->dev; + } + + return NULL; +} + +void bnxt_vf_rep_rx(struct bnxt *bp, struct sk_buff *skb) +{ + struct bnxt_vf_rep *vf_rep = netdev_priv(skb->dev); + + vf_rep->rx_stats.bytes += skb->len; + vf_rep->rx_stats.packets++; + + netif_receive_skb(skb); +} + +static int bnxt_vf_rep_get_phys_port_name(struct net_device *dev, char *buf, + size_t len) +{ + struct bnxt_vf_rep *vf_rep = netdev_priv(dev); + int rc; + + if (!vf_rep || !vf_rep->bp || !vf_rep->bp->pdev) + return -EINVAL; + + rc = snprintf(buf, len, "pf%dvf%d", vf_rep->bp->pf.fw_fid - 1, + vf_rep->vf_idx); + if (rc >= len) + return -EOPNOTSUPP; + + return 0; +} + +static void bnxt_vf_rep_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver)); + strscpy(info->version, DRV_MODULE_VERSION, sizeof(info->version)); +} + +#ifdef HAVE_NDO_GET_PORT_PARENT_ID +static int bnxt_vf_rep_get_port_parent_id(struct net_device *dev, + struct netdev_phys_item_id *ppid) +{ + struct bnxt_vf_rep *vf_rep = netdev_priv(dev); + + /* as only PORT_PARENT_ID is supported currently use common code + * between PF and VF-rep for now. + */ + return bnxt_get_port_parent_id(vf_rep->bp->dev, ppid); +} + +#else + +static int bnxt_vf_rep_port_attr_get(struct net_device *dev, + struct switchdev_attr *attr) +{ + struct bnxt_vf_rep *vf_rep = netdev_priv(dev); + + /* as only PORT_PARENT_ID is supported currently use common code + * between PF and VF-rep for now. + */ + return bnxt_port_attr_get(vf_rep->bp, attr); +} + +static const struct switchdev_ops bnxt_vf_rep_switchdev_ops = { + .switchdev_port_attr_get = bnxt_vf_rep_port_attr_get +}; +#endif + +static const char *const bnxt_vf_rep_stats_str[] = { + "vport_rx_packets", + "vport_rx_bytes", + "vport_tx_packets", + "vport_tx_bytes", + "vport_rx_errors", + "vport_rx_discards", + "vport_tx_discards", + "vport_rx_tpa_pkt", + "vport_rx_tpa_bytes", + "vport_rx_tpa_errors" +}; + +#define BNXT_VF_REP_NUM_COUNTERS ARRAY_SIZE(bnxt_vf_rep_stats_str) +static int bnxt_get_vf_rep_sset_count(struct net_device *dev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return BNXT_VF_REP_NUM_COUNTERS; + default: + return -EOPNOTSUPP; + } +} + +static void bnxt_get_vf_rep_strings(struct net_device *dev, u32 stringset, u8 *buf) +{ + u32 i; + + switch (stringset) { + case ETH_SS_STATS: + for (i = 0; i < BNXT_VF_REP_NUM_COUNTERS; i++) { + sprintf(buf, "%s", bnxt_vf_rep_stats_str[i]); + buf += ETH_GSTRING_LEN; + } + break; + default: + netdev_err(dev, "%s invalid request %x\n", __func__, stringset); + break; + } +} + +static void bnxt_get_vf_rep_ethtool_stats(struct net_device *dev, + struct ethtool_stats *stats, u64 *buf) +{ + int buf_size = BNXT_VF_REP_NUM_COUNTERS * sizeof(u64); + struct bnxt_vf_rep *vf_rep = netdev_priv(dev); + struct bnxt_vf_info *vf; + u64 *sw; + + if (!vf_rep || !vf_rep->bp) + return; + + memset(buf, 0, buf_size); + + rcu_read_lock(); + vf = rcu_dereference(vf_rep->bp->pf.vf); + if (!vf) { + rcu_read_unlock(); + return; + } + sw = vf[vf_rep->vf_idx].stats.sw_stats; + + buf[0] = bnxt_add_ring_rx_pkts(sw); + buf[1] = bnxt_add_ring_rx_bytes(sw); + buf[2] = bnxt_add_ring_tx_pkts(sw); + buf[3] = bnxt_add_ring_tx_bytes(sw); + buf[4] = BNXT_GET_RING_STATS64(sw, rx_error_pkts); + buf[5] = BNXT_GET_RING_STATS64(sw, rx_discard_pkts); + buf[6] = BNXT_GET_RING_STATS64(sw, tx_error_pkts) + + BNXT_GET_RING_STATS64(sw, tx_discard_pkts); + buf[7] = BNXT_GET_RING_STATS64(sw, tpa_pkts); + buf[8] = BNXT_GET_RING_STATS64(sw, tpa_bytes); + buf[9] = BNXT_GET_RING_STATS64(sw, tpa_aborts); + rcu_read_unlock(); +} + +static const struct ethtool_ops bnxt_vf_rep_ethtool_ops = { + .get_drvinfo = bnxt_vf_rep_get_drvinfo, + .get_ethtool_stats = bnxt_get_vf_rep_ethtool_stats, + .get_strings = bnxt_get_vf_rep_strings, + .get_sset_count = bnxt_get_vf_rep_sset_count, +}; + +static const struct net_device_ops bnxt_vf_rep_netdev_ops = { +#ifdef HAVE_NDO_SETUP_TC_RH + .ndo_size = sizeof(const struct net_device_ops), +#endif + .ndo_open = bnxt_vf_rep_open, + .ndo_stop = bnxt_vf_rep_close, + .ndo_start_xmit = bnxt_vf_rep_xmit, + .ndo_get_stats64 = bnxt_vf_rep_get_stats64, +#ifdef CONFIG_BNXT_FLOWER_OFFLOAD +#ifdef HAVE_NDO_SETUP_TC_RH + .extended.ndo_setup_tc_rh = bnxt_vf_rep_setup_tc, +#else + .ndo_setup_tc = bnxt_vf_rep_setup_tc, +#endif +#endif +#ifdef HAVE_NDO_GET_PORT_PARENT_ID + .ndo_get_port_parent_id = bnxt_vf_rep_get_port_parent_id, +#endif +#ifdef HAVE_EXT_GET_PHYS_PORT_NAME + .extended.ndo_get_phys_port_name = bnxt_vf_rep_get_phys_port_name +#else + .ndo_get_phys_port_name = bnxt_vf_rep_get_phys_port_name +#endif +}; + +bool bnxt_dev_is_vf_rep(struct net_device *dev) +{ + return dev->netdev_ops == &bnxt_vf_rep_netdev_ops; +} + +int bnxt_hwrm_cfa_pair_exists(struct bnxt *bp, void *vfr) +{ + struct hwrm_cfa_pair_info_output *resp; + struct hwrm_cfa_pair_info_input *req; + struct bnxt_vf_rep *rep_bp = vfr; + int rc; + + if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) { + netdev_dbg(bp->dev, + "Not a PF or trusted VF. Command not supported\n"); + return -EOPNOTSUPP; + } + + rc = hwrm_req_init(bp, req, HWRM_CFA_PAIR_INFO); + if (rc) + return rc; + + rc = snprintf(req->pair_name, sizeof(req->pair_name), "%svfr%d", + dev_name(rep_bp->bp->dev->dev.parent), rep_bp->vf_idx); + + if (rc >= sizeof(req->pair_name) || rc < 0) + return -EINVAL; + + req->flags = cpu_to_le32(CFA_PAIR_INFO_REQ_FLAGS_LOOKUP_TYPE); + + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); + /* CFA_PAIR_EXISTS command will succeed even though there is no + * CFA_PAIR, the proper check to see if CFA_PAIR exists or not + * is to look at the resp->pair_name. + */ + if (!rc && !strlen(resp->pair_name)) + rc = -EINVAL; + hwrm_req_drop(bp, req); + + return rc; +} + +int bnxt_hwrm_cfa_pair_free(struct bnxt *bp, void *vfr) +{ + struct hwrm_cfa_pair_free_input *req; + struct bnxt_vf_rep *rep_bp = vfr; + int rc; + + if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) { + netdev_dbg(bp->dev, + "Not a PF or trusted VF. Command not supported\n"); + return 0; + } + + rc = hwrm_req_init(bp, req, HWRM_CFA_PAIR_FREE); + if (rc) + return rc; + + rc = snprintf(req->pair_name, sizeof(req->pair_name), "%svfr%d", + dev_name(rep_bp->bp->dev->dev.parent), rep_bp->vf_idx); + + if (rc >= sizeof(req->pair_name) || rc < 0) + return -EINVAL; + + req->pair_mode = cpu_to_le16(CFA_PAIR_FREE_REQ_PAIR_MODE_REP2FN_TRUFLOW); + req->pf_b_id = rep_bp->bp->pf.fw_fid - 1; + req->vf_id = cpu_to_le16(rep_bp->vf_idx); + + rc = hwrm_req_send(bp, req); + if (rc) + return rc; + + netdev_dbg(bp->dev, "VFR %d freed\n", rep_bp->vf_idx); + return 0; +} + +static void __bnxt_tf_free_one_vf_rep(struct bnxt *bp, + struct bnxt_vf_rep *vf_rep) +{ + if (BNXT_CHIP_P7(bp)) + bnxt_ulp_free_vf_rep_p7(bp, vf_rep); + else + bnxt_ulp_free_vf_rep(bp, vf_rep); +} + +/* Called when the parent PF interface is closed. */ +void bnxt_vf_reps_close(struct bnxt *bp) +{ + struct bnxt_vf_rep *vf_rep; + u16 num_vfs, i; + + if (!bnxt_tc_is_switchdev_mode(bp)) + return; + + if (!bp->cfa_code_map) + return; + + num_vfs = pci_num_vf(bp->pdev); + for (i = 0; i < num_vfs; i++) { + vf_rep = bp->vf_reps[i]; + if (netif_running(vf_rep->dev)) + bnxt_vf_rep_close(vf_rep->dev); + } +} + +/* Called when the parent PF interface is opened (re-opened) */ +void bnxt_vf_reps_open(struct bnxt *bp) +{ + int i; + + if (!bnxt_tc_is_switchdev_mode(bp)) + return; + + if (!bp->cfa_code_map) + return; + + for (i = 0; i < pci_num_vf(bp->pdev); i++) { + /* Open the VF-Rep only if it is allocated in the FW */ + if (bp->vf_reps[i]->tx_cfa_action != CFA_HANDLE_INVALID) + bnxt_vf_rep_open(bp->vf_reps[i]->dev); + } +} + +static void __bnxt_free_one_vf_rep(struct bnxt *bp, struct bnxt_vf_rep *vf_rep) +{ + if (!vf_rep) + return; + + if (vf_rep->dst) { + dst_release((struct dst_entry *)vf_rep->dst); + vf_rep->dst = NULL; + } + if (vf_rep->tx_cfa_action != CFA_HANDLE_INVALID) { + if (BNXT_TRUFLOW_EN(bp)) + __bnxt_tf_free_one_vf_rep(bp, vf_rep); + else + hwrm_cfa_vfr_free(bp, vf_rep->vf_idx); + vf_rep->tx_cfa_action = CFA_HANDLE_INVALID; + } +} + +static void __bnxt_vf_reps_destroy(struct bnxt *bp) +{ + u16 num_vfs = pci_num_vf(bp->pdev); + struct bnxt_vf_rep *vf_rep; + int i; + + for (i = 0; i < num_vfs; i++) { + vf_rep = bp->vf_reps[i]; + if (vf_rep) { + __bnxt_free_one_vf_rep(bp, vf_rep); + if (vf_rep->dev) { + /* if register_netdev failed, then netdev_ops + * would have been set to NULL + */ + if (vf_rep->dev->netdev_ops) { +#ifdef CONFIG_BNXT_FLOWER_OFFLOAD + bnxt_unreg_egdev(vf_rep->dev, + bnxt_cb_egdev, + (void *)vf_rep); +#endif + unregister_netdev(vf_rep->dev); + } + free_netdev(vf_rep->dev); + } + bp->vf_reps[i] = NULL; + } + } + + kfree(bp->vf_reps); + bp->vf_reps = NULL; +} + +void bnxt_vf_reps_destroy(struct bnxt *bp) +{ + bool closed = false; + + if (!bnxt_tc_is_switchdev_mode(bp)) + return; + + if (!bp->vf_reps) + return; + + /* Ensure that parent PF's and VF-reps' RX/TX has been quiesced + * before proceeding with VF-rep cleanup. + */ + rtnl_lock(); + if (netif_running(bp->dev)) { + bnxt_close_nic(bp, false, false); + closed = true; + } + /* un-publish cfa_code_map so that RX path can't see it anymore */ + kfree(bp->cfa_code_map); + bp->cfa_code_map = NULL; + + if (closed) { + /* Temporarily set legacy mode to avoid re-opening + * representors and restore switchdev mode after that. + */ + bp->eswitch_mode = DEVLINK_ESWITCH_MODE_LEGACY; + bnxt_open_nic(bp, false, false); + bp->eswitch_mode = DEVLINK_ESWITCH_MODE_SWITCHDEV; + } + rtnl_unlock(); + + /* Need to call vf_reps_destroy() outside of rntl_lock + * as unregister_netdev takes rtnl_lock + */ + __bnxt_vf_reps_destroy(bp); +} + +/* Free the VF-Reps in firmware, during firmware hot-reset processing. + * Note that the VF-Rep netdevs are still active (not unregistered) during + * this process. + */ +void bnxt_vf_reps_free(struct bnxt *bp) +{ + u16 num_vfs = pci_num_vf(bp->pdev); + int i; + + if (!bnxt_tc_is_switchdev_mode(bp)) + return; + + for (i = 0; i < num_vfs; i++) + __bnxt_free_one_vf_rep(bp, bp->vf_reps[i]); +} + +int bnxt_hwrm_cfa_pair_alloc(struct bnxt *bp, void *vfr) +{ + struct hwrm_cfa_pair_alloc_input *req; + struct bnxt_vf_rep *rep_bp = vfr; + int rc; + + if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) { + netdev_dbg(bp->dev, + "Not a PF or trusted VF. Command not supported\n"); + return -EINVAL; + } + + rc = hwrm_req_init(bp, req, HWRM_CFA_PAIR_ALLOC); + if (rc) + return rc; + + req->pair_mode = cpu_to_le16(CFA_PAIR_ALLOC_REQ_PAIR_MODE_REP2FN_TRUFLOW); + rc = snprintf(req->pair_name, sizeof(req->pair_name), "%svfr%d", + dev_name(rep_bp->bp->dev->dev.parent), rep_bp->vf_idx); + + if (rc >= sizeof(req->pair_name) || rc < 0) + return -EINVAL; + + req->pf_b_id = rep_bp->bp->pf.fw_fid - 1; + req->vf_b_id = cpu_to_le16(rep_bp->vf_idx); + req->vf_a_id = cpu_to_le16(rep_bp->bp->pf.fw_fid); + req->host_b_id = 1; /* TBD - Confirm if this is OK */ + + rc = hwrm_req_send(bp, req); + if (rc) + return rc; + + netdev_dbg(bp->dev, "VFR %d allocated\n", rep_bp->vf_idx); + return rc; +} + +static int bnxt_alloc_tf_vf_rep(struct bnxt *bp, struct bnxt_vf_rep *vf_rep, + u16 *cfa_code_map) +{ + struct bnxt_vf_info *vf; + int rc; + + if (!BNXT_CHIP_P7(bp)) { + rc = bnxt_ulp_alloc_vf_rep(bp, vf_rep); + if (rc) + return rc; + } + + rcu_read_lock(); + vf = rcu_dereference(bp->pf.vf); + if (vf) + cfa_code_map[vf[vf_rep->vf_idx].fw_fid] = vf_rep->vf_idx; + rcu_read_unlock(); + + /* ulp_mapper_bd_act_set requires cfa_code_map to be set up + * so it can locate the vfr. So the allocation for vf reps for + * P7 is called after the vf idx is set up in the code map. + */ + if (BNXT_CHIP_P7(bp)) { + rc = bnxt_ulp_alloc_vf_rep_p7(bp, vf_rep); + if (rc) + return rc; + } + + return 0; +} + +static int bnxt_vfrep_cfact_update(struct bnxt *bp, struct bnxt_vf_rep *vf_rep) +{ + vf_rep->dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX, GFP_KERNEL); + if (!vf_rep->dst) + return -ENOMEM; + + /* only cfa_action is needed to mux a packet while TXing */ + vf_rep->dst->u.port_info.port_id = vf_rep->tx_cfa_action; + vf_rep->dst->u.port_info.lower_dev = bp->dev; + + return 0; +} + +static int bnxt_alloc_vf_rep(struct bnxt *bp, struct bnxt_vf_rep *vf_rep, + u16 *cfa_code_map) +{ + int rc; + + if (!BNXT_TRUFLOW_EN(bp)) { + /* get cfa handles from FW */ + if (hwrm_cfa_vfr_alloc(bp, vf_rep->vf_idx, &vf_rep->tx_cfa_action, + &vf_rep->rx_cfa_code)) + return -ENOLINK; + cfa_code_map[vf_rep->rx_cfa_code] = vf_rep->vf_idx; + } else { + rc = bnxt_alloc_tf_vf_rep(bp, vf_rep, cfa_code_map); + if (rc) + return rc; + } + + if (!BNXT_CHIP_P7(bp)) + return bnxt_vfrep_cfact_update(bp, vf_rep); + + return 0; +} + +/* Allocate the VF-Reps in firmware, during firmware hot-reset processing. + * Note that the VF-Rep netdevs are still active (not unregistered) during + * this process. + */ +int bnxt_vf_reps_alloc(struct bnxt *bp) +{ + u16 *cfa_code_map = bp->cfa_code_map, num_vfs = pci_num_vf(bp->pdev); + struct bnxt_vf_rep *vf_rep; + int rc, i; + + if (!bnxt_tc_is_switchdev_mode(bp)) + return -EINVAL; + + if (!cfa_code_map) + return -EINVAL; + + for (i = 0; i < MAX_CFA_CODE; i++) + cfa_code_map[i] = VF_IDX_INVALID; + + for (i = 0; i < num_vfs; i++) { + vf_rep = bp->vf_reps[i]; + vf_rep->vf_idx = i; + + rc = bnxt_alloc_vf_rep(bp, vf_rep, cfa_code_map); + if (rc) + goto err; + } + + return 0; + +err: + netdev_info(bp->dev, "%s error=%d\n", __func__, rc); + bnxt_vf_reps_free(bp); + return rc; +} + +/* Use the OUI of the PF's perm addr and report the same mac addr + * for the same VF-rep each time + */ +static void bnxt_vf_rep_eth_addr_gen(u8 *src_mac, u16 vf_idx, u8 *mac) +{ + u32 addr; + + ether_addr_copy(mac, src_mac); + + addr = jhash(src_mac, ETH_ALEN, 0) + vf_idx; + mac[3] = (u8)(addr & 0xFF); + mac[4] = (u8)((addr >> 8) & 0xFF); + mac[5] = (u8)((addr >> 16) & 0xFF); +} + +static void bnxt_vf_rep_netdev_init(struct bnxt *bp, struct bnxt_vf_rep *vf_rep, + struct net_device *dev) +{ + struct net_device *pf_dev = bp->dev; + u16 max_mtu; + + dev->netdev_ops = &bnxt_vf_rep_netdev_ops; + dev->ethtool_ops = &bnxt_vf_rep_ethtool_ops; +#ifndef HAVE_NDO_GET_PORT_PARENT_ID + SWITCHDEV_SET_OPS(dev, &bnxt_vf_rep_switchdev_ops); +#endif + /* Just inherit all the featues of the parent PF as the VF-R + * uses the RX/TX rings of the parent PF + */ + dev->hw_features = pf_dev->hw_features; + dev->gso_partial_features = pf_dev->gso_partial_features; + dev->vlan_features = pf_dev->vlan_features; + dev->hw_enc_features = pf_dev->hw_enc_features; + dev->features |= pf_dev->features; + bnxt_vf_rep_eth_addr_gen(bp->pf.mac_addr, vf_rep->vf_idx, + dev->perm_addr); + eth_hw_addr_set(dev, dev->perm_addr); + /* Set VF-Rep's max-mtu to the corresponding VF's max-mtu */ + if (!bnxt_hwrm_vfr_qcfg(bp, vf_rep, &max_mtu)) +#ifdef HAVE_NET_DEVICE_EXT + dev->extended->max_mtu = max_mtu; + dev->extended->min_mtu = ETH_ZLEN; +#else + dev->max_mtu = max_mtu; + dev->min_mtu = ETH_ZLEN; +#endif +} + +int bnxt_vf_reps_create(struct bnxt *bp) +{ + u16 *cfa_code_map = NULL, num_vfs = pci_num_vf(bp->pdev); + struct bnxt_vf_rep *vf_rep; + struct net_device *dev; + int rc, i; + + if (!(bp->flags & BNXT_FLAG_DSN_VALID)) + return -ENODEV; + + bp->vf_reps = kcalloc(num_vfs, sizeof(vf_rep), GFP_KERNEL); + if (!bp->vf_reps) + return -ENOMEM; + + /* storage for cfa_code to vf-idx mapping */ + cfa_code_map = kmalloc_array(MAX_CFA_CODE, sizeof(*bp->cfa_code_map), + GFP_KERNEL); + if (!cfa_code_map) { + rc = -ENOMEM; + goto err; + } + for (i = 0; i < MAX_CFA_CODE; i++) + cfa_code_map[i] = VF_IDX_INVALID; + + if (BNXT_CHIP_P7(bp)) { + /* ONLY for THOR2, publish cfa_code_map before all VFs are + * initialized, so default rules can run and use it when required. + * Note: code maps are inited to "invalid" by default. + */ + bp->cfa_code_map = cfa_code_map; + } + + for (i = 0; i < num_vfs; i++) { + dev = alloc_etherdev(sizeof(*vf_rep)); + if (!dev) { + rc = -ENOMEM; + goto err; + } + + vf_rep = netdev_priv(dev); + bp->vf_reps[i] = vf_rep; + vf_rep->dev = dev; + vf_rep->bp = bp; + vf_rep->vf_idx = i; + vf_rep->tx_cfa_action = CFA_HANDLE_INVALID; + + if (BNXT_TRUFLOW_EN(bp)) + bnxt_vf_rep_netdev_init(bp, vf_rep, dev); + + rc = bnxt_alloc_vf_rep(bp, vf_rep, cfa_code_map); + if (rc) { + if (BNXT_TRUFLOW_EN(bp)) + vf_rep->dev->netdev_ops = NULL; + goto err; + } + + if (!BNXT_TRUFLOW_EN(bp)) + bnxt_vf_rep_netdev_init(bp, vf_rep, dev); + + rc = register_netdev(dev); + if (rc) { + /* no need for unregister_netdev in cleanup */ + dev->netdev_ops = NULL; + goto err; + } +#ifdef CONFIG_BNXT_FLOWER_OFFLOAD + bnxt_reg_egdev(vf_rep->dev, bnxt_cb_egdev, (void *)vf_rep, + vf_rep->vf_idx); +#endif + } + + /* publish cfa_code_map only after all VF-reps have been initialized */ + bp->cfa_code_map = cfa_code_map; + netif_keep_dst(bp->dev); + return 0; + +err: + netdev_err(bp->dev, "Failed to initialize SWITCHDEV mode, rc[%d]\n", rc); + kfree(cfa_code_map); + __bnxt_vf_reps_destroy(bp); + return rc; +} + +/* Devlink related routines */ +int bnxt_dl_eswitch_mode_get(struct devlink *devlink, u16 *mode) +{ + struct bnxt *bp = bnxt_get_bp_from_dl(devlink); + + *mode = bp->eswitch_mode; + return 0; +} + +#ifdef HAVE_ESWITCH_MODE_SET_EXTACK +int bnxt_dl_eswitch_mode_set(struct devlink *devlink, u16 mode, + struct netlink_ext_ack *extack) +#else +int bnxt_dl_eswitch_mode_set(struct devlink *devlink, u16 mode) +#endif +{ + struct bnxt *bp = bnxt_get_bp_from_dl(devlink); + int rc = 0; + + if (BNXT_TF_RX_NIC_FLOW_CAP(bp) && (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV)) { + /* + * Switchdev mode unsupported if NIC flow capable. Currently NIC flow + * is only available on Thor2 with special UDCC build + */ + netdev_dbg(bp->dev, + "Switchdev mode not supported when NIC flows are enabled\n"); + return -EOPNOTSUPP; + } + + if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV) { + rtnl_lock(); + if (!netif_running(bp->dev)) { + netdev_info(bp->dev, + "Bring up the interface before setting switchdev mode\n"); + rtnl_unlock(); + return -EINVAL; + } + if (bp->sriov_cfg) { + netdev_info(bp->dev, + "SRIOV is being configured, cannot set switchdev mode\n"); + rtnl_unlock(); + return -EBUSY; + } + rtnl_unlock(); + } + + mutex_lock(&bp->vf_rep_lock); + if (bp->eswitch_mode == mode) { + netdev_info(bp->dev, "already in %s eswitch mode\n", + mode == DEVLINK_ESWITCH_MODE_LEGACY ? + "legacy" : "switchdev"); + rc = -EINVAL; + goto done; + } + + switch (mode) { + case DEVLINK_ESWITCH_MODE_LEGACY: + bnxt_vf_reps_destroy(bp); + if (BNXT_TRUFLOW_EN(bp)) + bnxt_tf_port_deinit(bp, BNXT_TF_FLAG_SWITCHDEV); + break; + + case DEVLINK_ESWITCH_MODE_SWITCHDEV: + if (bp->hwrm_spec_code < 0x10803) { + netdev_warn(bp->dev, "FW does not support SRIOV E-Switch SWITCHDEV mode\n"); + rc = -ENOTSUPP; + goto done; + } + if (bp->eswitch_disabled) { /* PCI remove in progress */ + netdev_warn(bp->dev, "SWITCHDEV mode transition is disabled\n"); + rc = -EOPNOTSUPP; + goto done; + } + + if (BNXT_TRUFLOW_EN(bp)) { + rc = bnxt_tf_port_init(bp, BNXT_TF_FLAG_SWITCHDEV); + if (rc) + goto done; + } + + /* Create representors for existing VFs */ + if (pci_num_vf(bp->pdev) > 0) + rc = bnxt_vf_reps_create(bp); + break; + + default: + rc = -EINVAL; + goto done; + } +done: + if (!rc) + bp->eswitch_mode = mode; + mutex_unlock(&bp->vf_rep_lock); + return rc; +} + +#endif /* CONFIG_VF_REPS */ + +#if defined(CONFIG_VF_REPS) || defined(CONFIG_BNXT_CUSTOM_FLOWER_OFFLOAD) +int bnxt_hwrm_get_dflt_vnic_svif(struct bnxt *bp, u16 fid, + u16 *vnic_id, u16 *svif) +{ + struct hwrm_func_qcfg_output *resp; + struct hwrm_func_qcfg_input *req; + u16 svif_info; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG); + if (rc) + return rc; + req->fid = cpu_to_le16(fid); + + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); + if (rc) + goto err; + + svif_info = le16_to_cpu(resp->svif_info); + if (svif && (svif_info & FUNC_QCFG_RESP_SVIF_INFO_SVIF_VALID)) { + *svif = svif_info & FUNC_QCFG_RESP_SVIF_INFO_SVIF_MASK; + /* When the VF corresponding to the VFR is down at the time of + * VFR conduit creation, the VFR rule will be programmed with + * invalid vnic id because FW will return default vnic id as + * INVALID when queried through FUNC_QCFG. As a result, when + * the VF is brought up, VF won't receive packets because + * INVALID vnic id is already programmed. + * + * Hence, use svif value as vnic id during VFR conduit creation + * as both svif and default vnic id values are same and will + * never change. + */ + if (vnic_id) + *vnic_id = *svif; + } + + netdev_dbg(bp->dev, "FID %d SVIF %d VNIC ID %d\n", req->fid, *svif, *vnic_id); +err: + hwrm_req_drop(bp, req); + return rc; +} +#endif /* CONFIG_VF_REPS || CONFIG_BNXT_CUSTOM_FLOWER_OFFLOAD */ + +#ifdef CONFIG_DEBUG_FS +static char *dir_str[] = {"rx", "tx"}; + +static int bs_show(struct seq_file *m, void *unused) +{ + struct bnxt *bp = dev_get_drvdata(m->private); + char dir_str_req[32]; + int tsid; + int dir; + int rc; + + rc = sscanf(m->file->f_path.dentry->d_name.name, + "%d-%s", &tsid, dir_str_req); + if (rc < 0) { + seq_puts(m, "Failed to scan file name\n"); + return 0; + } + + if (strcmp(dir_str[0], dir_str_req) == 0) + dir = CFA_DIR_RX; + else + dir = CFA_DIR_TX; + + seq_printf(m, "ts:%d(%d) dir:%d(%d)\n", + tsid, bp->bs_data[dir].tsid, + dir, + bp->bs_data[dir].dir); + tfc_em_show(m, bp->tfp, tsid, dir); + return 0; +} + +void bnxt_tf_debugfs_create_files(struct bnxt *bp, u8 tsid, struct dentry *port_dir) +{ + char name[32]; + int dir; + + for (dir = 0; dir < CFA_DIR_MAX; dir++) { + /* Format is: tablescope-dir */ + sprintf(name, "%d-%s", + tsid, dir_str[dir]); + bp->bs_data[dir].tsid = tsid; + bp->bs_data[dir].dir = dir; + dev_set_drvdata(&bp->dev->dev, bp); + if (!debugfs_lookup(name, port_dir)) + debugfs_create_devm_seqfile(&bp->dev->dev, + name, + port_dir, + bs_show); + } +} +#endif /* CONFIG_DEBUG_FS */ diff --git a/drivers/thirdparty/release-drivers/bnxt/bnxt_vfr.h b/drivers/thirdparty/release-drivers/bnxt/bnxt_vfr.h new file mode 100644 index 000000000000..2c0a827d08ba --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/bnxt_vfr.h @@ -0,0 +1,266 @@ +/* Broadcom NetXtreme-C/E network driver. + * + * Copyright (c) 2016-2018 Broadcom Limited + * Copyright (c) 2018-2023 Broadcom Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ + +#ifndef BNXT_VFR_H +#define BNXT_VFR_H + +#include + +#ifdef CONFIG_VF_REPS + +#define MAX_CFA_CODE 65536 + +int bnxt_hwrm_release_afm_func(struct bnxt *bp, u16 fid, u16 rfid, + u8 type, u32 flags); +int bnxt_vf_reps_create(struct bnxt *bp); +void bnxt_vf_reps_destroy(struct bnxt *bp); +void bnxt_vf_reps_close(struct bnxt *bp); +void bnxt_vf_reps_open(struct bnxt *bp); +void bnxt_vf_rep_rx(struct bnxt *bp, struct sk_buff *skb); +struct net_device *bnxt_get_vf_rep(struct bnxt *bp, u16 cfa_code); +struct net_device *bnxt_tf_get_vf_rep(struct bnxt *bp, + struct rx_cmp_ext *rxcmp1, + struct bnxt_tpa_info *tpa_info); +int bnxt_vf_reps_alloc(struct bnxt *bp); +void bnxt_vf_reps_free(struct bnxt *bp); +int bnxt_hwrm_cfa_pair_alloc(struct bnxt *bp, void *vfr); +int bnxt_hwrm_cfa_pair_free(struct bnxt *bp, void *vfr); +int bnxt_hwrm_cfa_pair_exists(struct bnxt *bp, void *vfr); + +static inline u16 bnxt_vf_rep_get_fid(struct net_device *dev) +{ + struct bnxt_vf_rep *vf_rep = netdev_priv(dev); + struct bnxt *bp = vf_rep->bp; + + return bnxt_vf_target_id(&bp->pf, vf_rep->vf_idx); +} + +static inline bool bnxt_tc_is_switchdev_mode(struct bnxt *bp) +{ + return bp->eswitch_mode == DEVLINK_ESWITCH_MODE_SWITCHDEV; +} + +bool bnxt_dev_is_vf_rep(struct net_device *dev); +int bnxt_dl_eswitch_mode_get(struct devlink *devlink, u16 *mode); +#ifdef HAVE_ESWITCH_MODE_SET_EXTACK +int bnxt_dl_eswitch_mode_set(struct devlink *devlink, u16 mode, + struct netlink_ext_ack *extack); +#else +int bnxt_dl_eswitch_mode_set(struct devlink *devlink, u16 mode); +#endif + +int bnxt_hwrm_get_dflt_vnic_svif(struct bnxt *bp, u16 fid, + u16 *vnic_id, u16 *svif); +int bnxt_tf_port_init(struct bnxt *bp, u16 flag); +int bnxt_tfo_init(struct bnxt *bp); +void bnxt_tfo_deinit(struct bnxt *bp); +void bnxt_tf_port_deinit(struct bnxt *bp, u16 flag); +void bnxt_custom_tf_port_init(struct bnxt *bp); +void bnxt_custom_tf_port_deinit(struct bnxt *bp); +int bnxt_devlink_tf_port_init(struct bnxt *bp); +void bnxt_devlink_tf_port_deinit(struct bnxt *bp); +#ifdef CONFIG_DEBUG_FS +void bnxt_tf_debugfs_create_files(struct bnxt *bp, u8 tsid, struct dentry *port_dir); +#endif /* CONFIG_DEBUG_FS */ + +#elif defined CONFIG_BNXT_CUSTOM_FLOWER_OFFLOAD +static inline void bnxt_vf_reps_destroy(struct bnxt *bp) +{ +} + +static inline void bnxt_vf_reps_close(struct bnxt *bp) +{ +} + +static inline void bnxt_vf_reps_open(struct bnxt *bp) +{ +} + +static inline void bnxt_vf_rep_rx(struct bnxt *bp, struct sk_buff *skb) +{ +} + +static inline struct net_device *bnxt_get_vf_rep(struct bnxt *bp, u16 cfa_code) +{ + return NULL; +} + +static inline struct net_device *bnxt_tf_get_vf_rep(struct bnxt *bp, + struct rx_cmp_ext *rxcmp1, + struct bnxt_tpa_info + *tpa_info) +{ + return NULL; +} + +static inline u16 bnxt_vf_rep_get_fid(struct net_device *dev) +{ + return 0; +} + +static inline bool bnxt_dev_is_vf_rep(struct net_device *dev) +{ + return false; +} + +static inline int bnxt_vf_reps_alloc(struct bnxt *bp) +{ + return -EINVAL; +} + +static inline void bnxt_vf_reps_free(struct bnxt *bp) +{ +} + +static inline bool bnxt_tc_is_switchdev_mode(struct bnxt *bp) +{ + return false; +} + +static inline int bnxt_hwrm_cfa_pair_alloc(struct bnxt *bp, void *vfr) +{ + return -EINVAL; +} + +static inline int bnxt_hwrm_cfa_pair_free(struct bnxt *bp, void *vfr) +{ + return -EINVAL; +} + +static inline int bnxt_hwrm_cfa_pair_exists(struct bnxt *bp, void *vfr) +{ + return -EINVAL; +} + +int bnxt_tf_port_init(struct bnxt *bp, u16 flag); +int bnxt_tf_port_init_p7(struct bnxt *bp); +void bnxt_tf_port_deinit(struct bnxt *bp, u16 flag); +int bnxt_tfo_init(struct bnxt *bp); +void bnxt_tfo_deinit(struct bnxt *bp); +void bnxt_custom_tf_port_init(struct bnxt *bp); +void bnxt_custom_tf_port_deinit(struct bnxt *bp); +int bnxt_hwrm_get_dflt_vnic_svif(struct bnxt *bp, u16 fid, u16 *vnic_id, u16 *svif); +#ifdef CONFIG_DEBUG_FS +void bnxt_tf_debugfs_create_files(struct bnxt *bp, u8 tsid, struct dentry *port_dir); +#endif /* CONFIG_DEBUG_FS */ + +#else +static inline int bnxt_vf_reps_create(struct bnxt *bp) +{ + return 0; +} + +static inline void bnxt_vf_reps_destroy(struct bnxt *bp) +{ +} + +static inline void bnxt_vf_reps_close(struct bnxt *bp) +{ +} + +static inline void bnxt_vf_reps_open(struct bnxt *bp) +{ +} + +static inline void bnxt_vf_rep_rx(struct bnxt *bp, struct sk_buff *skb) +{ +} + +static inline struct net_device *bnxt_get_vf_rep(struct bnxt *bp, u16 cfa_code) +{ + return NULL; +} + +static inline struct net_device *bnxt_tf_get_vf_rep(struct bnxt *bp, + struct rx_cmp_ext *rxcmp1, + struct bnxt_tpa_info + *tpa_info) +{ + return NULL; +} + +static inline u16 bnxt_vf_rep_get_fid(struct net_device *dev) +{ + return 0; +} + +static inline bool bnxt_dev_is_vf_rep(struct net_device *dev) +{ + return false; +} + +static inline int bnxt_vf_reps_alloc(struct bnxt *bp) +{ + return -EINVAL; +} + +static inline void bnxt_vf_reps_free(struct bnxt *bp) +{ +} + +static inline bool bnxt_tc_is_switchdev_mode(struct bnxt *bp) +{ + return false; +} + +static inline int bnxt_hwrm_cfa_pair_alloc(struct bnxt *bp, void *vfr) +{ + return -EINVAL; +} + +static inline int bnxt_hwrm_cfa_pair_free(struct bnxt *bp, void *vfr) +{ + return -EINVAL; +} + +static inline int bnxt_hwrm_cfa_pair_exists(struct bnxt *bp, void *vfr) +{ + return -EINVAL; +} + +static inline int bnxt_tf_port_init(struct bnxt *bp) +{ + return 0; +} + +static inline int bnxt_tf_port_init_p7(struct bnxt *bp) +{ + return 0; +} + +static inline int bnxt_tfo_init(struct bnxt *bp) +{ + return 0; +} + +static inline void bnxt_tfo_deinit(struct bnxt *bp) +{ +} + +static inline void bnxt_tf_port_deinit(struct bnxt *bp) +{ +} + +static inline void bnxt_custom_tf_port_init(struct bnxt *bp) +{ +} + +static inline void bnxt_custom_tf_port_deinit(struct bnxt *bp) +{ +} + +#ifdef CONFIG_DEBUG_FS + +void bnxt_tf_debugfs_create_files(struct bnxt *bp, u8 tsid, struct dentry *port_dir) +{ +} +#endif /* CONFIG_DEBUG_FS */ +#endif /* CONFIG_VF_REPS */ +#endif /* BNXT_VFR_H */ diff --git a/drivers/thirdparty/release-drivers/bnxt/bnxt_xdp.c b/drivers/thirdparty/release-drivers/bnxt/bnxt_xdp.c new file mode 100644 index 000000000000..2893f438434d --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/bnxt_xdp.c @@ -0,0 +1,659 @@ +/* Broadcom NetXtreme-C/E network driver. + * + * Copyright (c) 2016-2018 Broadcom Limited + * Copyright (c) 2018-2023 Broadcom Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ +#include +#include +#include +#include +#include +#include +#ifdef HAVE_NDO_XDP +#include +#ifdef HAVE_BPF_TRACE +#include +#endif +#include +#endif +#ifdef CONFIG_PAGE_POOL +#ifdef HAVE_PAGE_POOL_HELPERS_H +#include +#else +#include +#endif +#endif +#include "bnxt_compat.h" +#include "bnxt_hsi.h" +#include "bnxt.h" +#include "bnxt_xdp.h" +#include "bnxt_xsk.h" + +DEFINE_STATIC_KEY_FALSE(bnxt_xdp_locking_key); + +struct bnxt_sw_tx_bd *bnxt_xmit_bd(struct bnxt *bp, + struct bnxt_tx_ring_info *txr, + dma_addr_t mapping, u32 len, + struct xdp_buff *xdp) +{ + struct bnxt_sw_tx_bd *tx_buf; + struct tx_bd *txbd; + int num_frags = 0; + u32 flags; + u16 prod; + struct skb_shared_info *sinfo; +#ifdef HAVE_XDP_MULTI_BUFF + int i; +#endif + + if (xdp && xdp_buff_has_frags(xdp)) { + sinfo = xdp_get_shared_info_from_buff(xdp); + num_frags = sinfo->nr_frags; + } + + /* fill up the first buffer */ + prod = txr->tx_prod; + tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)]; + + tx_buf->nr_frags = num_frags; + if (xdp) + tx_buf->page = virt_to_head_page(xdp->data); + + txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)]; + flags = (len << TX_BD_LEN_SHIFT) | + ((num_frags + 1) << TX_BD_FLAGS_BD_CNT_SHIFT) | + bnxt_lhint_arr[len >> 9]; + txbd->tx_bd_len_flags_type = cpu_to_le32(flags); + txbd->tx_bd_haddr = cpu_to_le64(mapping); + txbd->tx_bd_opaque = SET_TX_OPAQUE(bp, txr, prod, 1 + num_frags); +#ifdef HAVE_XDP_MULTI_BUFF + /* now let us fill up the frags into the next buffers */ + for (i = 0; i < num_frags ; i++) { + skb_frag_t *frag = &sinfo->frags[i]; + struct bnxt_sw_tx_bd *frag_tx_buf; + dma_addr_t frag_mapping; + int frag_len; + + prod = NEXT_TX(prod); + WRITE_ONCE(txr->tx_prod, prod); + + /* first fill up the first buffer */ + frag_tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)]; + frag_tx_buf->page = skb_frag_page(frag); + + txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)]; + + frag_len = skb_frag_size(frag); + flags = frag_len << TX_BD_LEN_SHIFT; + txbd->tx_bd_len_flags_type = cpu_to_le32(flags); + frag_mapping = page_pool_get_dma_addr(skb_frag_page(frag)) + + skb_frag_off(frag); + txbd->tx_bd_haddr = cpu_to_le64(frag_mapping); + + len = frag_len; + } + +#endif + flags &= ~TX_BD_LEN; + txbd->tx_bd_len_flags_type = cpu_to_le32(((len) << TX_BD_LEN_SHIFT) | flags | + TX_BD_FLAGS_PACKET_END); + prod = NEXT_TX(prod); + WRITE_ONCE(txr->tx_prod, prod); + + /* Sync TX BD */ + wmb(); + return tx_buf; +} + +#ifdef HAVE_NDO_XDP +bool bnxt_xdp_attached(struct bnxt *bp, struct bnxt_rx_ring_info *rxr) +{ + struct bpf_prog *xdp_prog = READ_ONCE(rxr->xdp_prog); + + return !!xdp_prog; +} + +void bnxt_xdp_buff_init(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, + u16 cons, u8 *data_ptr, unsigned int len, + struct xdp_buff *xdp) +{ + struct bnxt_sw_rx_bd *rx_buf; + u32 buflen = BNXT_RX_PAGE_SIZE; + struct pci_dev *pdev; + dma_addr_t mapping; + u32 offset; + + pdev = bp->pdev; + rx_buf = &rxr->rx_buf_ring[cons]; + offset = bp->rx_offset; + + mapping = rx_buf->mapping - bp->rx_dma_offset; + dma_sync_single_for_cpu(&pdev->dev, mapping + offset, len, bp->rx_dir); + + xdp_init_buff(xdp, buflen, &rxr->xdp_rxq); + xdp_prepare_buff(xdp, data_ptr - offset, offset, len, true); +} + +void __bnxt_xmit_xdp(struct bnxt *bp, struct bnxt_tx_ring_info *txr, + dma_addr_t mapping, u32 len, u16 rx_prod, + struct xdp_buff *xdp) +{ + struct bnxt_sw_tx_bd *tx_buf; + + tx_buf = bnxt_xmit_bd(bp, txr, mapping, len, xdp); + tx_buf->rx_prod = rx_prod; + tx_buf->action = XDP_TX; + txr->xdp_tx_pending++; +} + +#ifdef HAVE_XDP_FRAME +static void __bnxt_xmit_xdp_redirect(struct bnxt *bp, + struct bnxt_tx_ring_info *txr, + dma_addr_t mapping, u32 len, + struct xdp_frame *xdpf) +{ + struct bnxt_sw_tx_bd *tx_buf; + + tx_buf = bnxt_xmit_bd(bp, txr, mapping, len, NULL); + tx_buf->action = XDP_REDIRECT; + tx_buf->xdpf = xdpf; + dma_unmap_addr_set(tx_buf, mapping, mapping); + dma_unmap_len_set(tx_buf, len, 0); +} +#endif + +void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int budget) +{ + struct bnxt_tx_ring_info *txr = bnapi->tx_ring[0]; +#ifdef HAVE_XSK_SUPPORT + struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; +#endif + struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; + bool rx_doorbell_needed = false; + u16 tx_hw_cons = txr->tx_hw_cons; + struct bnxt_sw_tx_bd *tx_buf; + u16 tx_cons = txr->tx_cons; + u16 last_tx_cons = tx_cons; + int i, frags, xsk_tx = 0; + + if (!budget) + return; + + while (RING_TX(bp, tx_cons) != tx_hw_cons) { + tx_buf = &txr->tx_buf_ring[RING_TX(bp, tx_cons)]; + + if (tx_buf->action == XDP_REDIRECT) { + struct pci_dev *pdev = bp->pdev; + + dma_unmap_single(&pdev->dev, + dma_unmap_addr(tx_buf, mapping), + dma_unmap_len(tx_buf, len), + DMA_TO_DEVICE); +#ifdef HAVE_XDP_FRAME + xdp_return_frame(tx_buf->xdpf); +#endif + tx_buf->action = 0; + tx_buf->xdpf = NULL; + } else if (tx_buf->action == XDP_TX) { + tx_buf->action = 0; + rx_doorbell_needed = true; + last_tx_cons = tx_cons; + + frags = tx_buf->nr_frags; + for (i = 0; i < frags; i++) { + tx_cons = NEXT_TX(tx_cons); + tx_buf = &txr->tx_buf_ring[RING_TX(bp, tx_cons)]; +#ifdef CONFIG_PAGE_POOL + page_pool_recycle_direct(rxr->page_pool, tx_buf->page); +#else + __free_page(tx_buf->page); +#endif + } + txr->xdp_tx_pending--; + } else if (tx_buf->action == BNXT_XSK_TX) { + rx_doorbell_needed = false; + xsk_tx++; + } else { + bnxt_sched_reset_txr(bp, txr, tx_cons); + return; + } + tx_cons = NEXT_TX(tx_cons); + } + bnapi->events &= ~BNXT_TX_CMP_EVENT; + WRITE_ONCE(txr->tx_cons, tx_cons); + +#ifdef HAVE_XSK_SUPPORT + if (txr->xsk_pool && xsk_tx) { + xsk_tx_completed(txr->xsk_pool, xsk_tx); + cpr->sw_stats->xsk_stats.xsk_tx_completed += xsk_tx; + } + if (xsk_uses_need_wakeup(txr->xsk_pool)) + xsk_set_tx_need_wakeup(txr->xsk_pool); +#endif + if (rx_doorbell_needed) { + if (!txr->xdp_tx_pending) { + bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); + } else { + tx_buf = &txr->tx_buf_ring[RING_TX(bp, last_tx_cons)]; + bnxt_db_write(bp, &rxr->rx_db, tx_buf->rx_prod); + } + } +} + +void bnxt_xdp_buff_frags_free(struct bnxt_rx_ring_info *rxr, + struct xdp_buff *xdp) +{ + struct skb_shared_info *shinfo; + int i; + + if (!xdp || !xdp_buff_has_frags(xdp)) + return; + shinfo = xdp_get_shared_info_from_buff(xdp); + if (!shinfo) + return; + + for (i = 0; i < shinfo->nr_frags; i++) { + struct page *page = skb_frag_page(&shinfo->frags[i]); + +#ifdef CONFIG_PAGE_POOL + page_pool_recycle_direct(rxr->page_pool, page); +#else + __free_page(page); +#endif + } + shinfo->nr_frags = 0; +} + +/* returns the following: + * true - packet consumed by XDP and new buffer is allocated. + * false - packet should be passed to the stack. + */ +bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons, + struct xdp_buff *xdp, struct page *page, u8 **data_ptr, + unsigned int *len, u8 *event) +{ + struct bpf_prog *xdp_prog = READ_ONCE(rxr->xdp_prog); + struct bnxt_tx_ring_info *txr; + struct bnxt_sw_rx_bd *rx_buf; + struct pci_dev *pdev; + dma_addr_t mapping; + u32 tx_needed = 1; + void *orig_data; + u32 tx_avail; + u32 offset; + u32 act; + + if (!xdp_prog) + return false; + + pdev = bp->pdev; + offset = bp->rx_offset; + + txr = rxr->bnapi->tx_ring[0]; + /* BNXT_RX_PAGE_MODE(bp) when XDP enabled */ + orig_data = xdp->data; + + act = bpf_prog_run_xdp(xdp_prog, xdp); + + tx_avail = bnxt_tx_avail(bp, txr); + /* If there are pending XDP_TX packets, we must not update the rx + * producer yet because some RX buffers may still be on the TX ring. + */ + if (txr->xdp_tx_pending) + *event &= ~BNXT_RX_EVENT; + +#if XDP_PACKET_HEADROOM + *len = xdp->data_end - xdp->data; + if (orig_data != xdp->data) { + offset = xdp->data - xdp->data_hard_start; + *data_ptr = xdp->data_hard_start + offset; + } +#endif + + switch (act) { + case XDP_PASS: + return false; + + case XDP_TX: + rx_buf = &rxr->rx_buf_ring[cons]; + mapping = rx_buf->mapping - bp->rx_dma_offset; + *event = 0; + + if (unlikely(xdp_buff_has_frags(xdp))) { + struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp); + + tx_needed += sinfo->nr_frags; + *event = BNXT_AGG_EVENT; + } + + if (tx_avail < tx_needed) { + trace_xdp_exception(bp->dev, xdp_prog, act); + bnxt_xdp_buff_frags_free(rxr, xdp); + bnxt_reuse_rx_data(rxr, cons, page); + return true; + } + + dma_sync_single_for_device(&pdev->dev, mapping + offset, *len, + bp->rx_dir); + + *event &= ~BNXT_RX_EVENT; + *event |= BNXT_TX_EVENT; + __bnxt_xmit_xdp(bp, txr, mapping + offset, *len, + NEXT_RX(rxr->rx_prod), xdp); + bnxt_reuse_rx_data(rxr, cons, page); + return true; + case XDP_REDIRECT: + /* if we are calling this here then we know that the + * redirect is coming from a frame received by the + * bnxt_en driver. + */ + rx_buf = &rxr->rx_buf_ring[cons]; + mapping = rx_buf->mapping - bp->rx_dma_offset; + dma_unmap_page_attrs(&pdev->dev, mapping, + BNXT_RX_PAGE_SIZE, bp->rx_dir, + DMA_ATTR_WEAK_ORDERING); + + /* if we are unable to allocate a new buffer, abort and reuse */ + if (bnxt_alloc_rx_data(bp, rxr, rxr->rx_prod, GFP_ATOMIC)) { + trace_xdp_exception(bp->dev, xdp_prog, act); + bnxt_xdp_buff_frags_free(rxr, xdp); + bnxt_reuse_rx_data(rxr, cons, page); + return true; + } + + if (xdp_do_redirect(bp->dev, xdp, xdp_prog)) { + trace_xdp_exception(bp->dev, xdp_prog, act); +#ifdef CONFIG_PAGE_POOL + page_pool_recycle_direct(rxr->page_pool, page); +#else + __free_page(page); +#endif + return true; + } + + *event |= BNXT_REDIRECT_EVENT; + break; + default: + bpf_warn_invalid_xdp_action(bp->dev, xdp_prog, act); + fallthrough; + case XDP_ABORTED: + trace_xdp_exception(bp->dev, xdp_prog, act); + fallthrough; + case XDP_DROP: + bnxt_xdp_buff_frags_free(rxr, xdp); + bnxt_reuse_rx_data(rxr, cons, page); + break; + } + return true; +} + +#ifdef HAVE_XDP_FRAME +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5,13,0) +int bnxt_xdp_xmit(struct net_device *dev, int num_frames, + struct xdp_frame **frames, u32 flags) +{ + struct bnxt *bp = netdev_priv(dev); + struct bpf_prog *xdp_prog = READ_ONCE(bp->xdp_prog); + struct pci_dev *pdev = bp->pdev; + struct bnxt_tx_ring_info *txr; + dma_addr_t mapping; + int nxmit = 0; + int ring; + int i; + + if (!test_bit(BNXT_STATE_OPEN, &bp->state) || + !bp->tx_nr_rings_xdp || + !xdp_prog) + return -EINVAL; + + ring = smp_processor_id() % bp->tx_nr_rings_xdp; + txr = &bp->tx_ring[ring]; + + if (READ_ONCE(txr->dev_state) == BNXT_DEV_STATE_CLOSING) + return -EINVAL; + + if (static_branch_unlikely(&bnxt_xdp_locking_key)) + spin_lock(&txr->tx_lock); + + for (i = 0; i < num_frames; i++) { + struct xdp_frame *xdp = frames[i]; + + if (!bnxt_tx_avail(bp, txr)) + break; + + mapping = dma_map_single(&pdev->dev, xdp->data, xdp->len, + DMA_TO_DEVICE); + + if (dma_mapping_error(&pdev->dev, mapping)) + break; + + __bnxt_xmit_xdp_redirect(bp, txr, mapping, xdp->len, xdp); + nxmit++; + } + + if (flags & XDP_XMIT_FLUSH) { + /* Sync BD data before updating doorbell */ + wmb(); + bnxt_db_write(bp, &txr->tx_db, txr->tx_prod); + } + + if (static_branch_unlikely(&bnxt_xdp_locking_key)) + spin_unlock(&txr->tx_lock); + + return nxmit; +} +#else +int bnxt_xdp_xmit(struct net_device *dev, int num_frames, + struct xdp_frame **frames, u32 flags) +{ + struct bnxt *bp = netdev_priv(dev); + struct bpf_prog *xdp_prog = READ_ONCE(bp->xdp_prog); + struct pci_dev *pdev = bp->pdev; + struct bnxt_tx_ring_info *txr; + dma_addr_t mapping; + int drops = 0; + int ring; + int i; + + if (!test_bit(BNXT_STATE_OPEN, &bp->state) || + !bp->tx_nr_rings_xdp || + !xdp_prog) + return -EINVAL; + + ring = smp_processor_id() % bp->tx_nr_rings_xdp; + txr = &bp->tx_ring[ring]; + + if (READ_ONCE(txr->dev_state) == BNXT_DEV_STATE_CLOSING) + return -EINVAL; + + if (static_branch_unlikely(&bnxt_xdp_locking_key)) + spin_lock(&txr->tx_lock); + + for (i = 0; i < num_frames; i++) { + struct xdp_frame *xdp = frames[i]; + + if (!bnxt_tx_avail(bp, txr)) { + xdp_return_frame_rx_napi(xdp); + drops++; + continue; + } + + mapping = dma_map_single(&pdev->dev, xdp->data, xdp->len, + DMA_TO_DEVICE); + + if (dma_mapping_error(&pdev->dev, mapping)) { + xdp_return_frame_rx_napi(xdp); + drops++; + continue; + } + __bnxt_xmit_xdp_redirect(bp, txr, mapping, xdp->len, xdp); + } + + if (flags & XDP_XMIT_FLUSH) { + /* Sync BD data before updating doorbell */ + wmb(); + bnxt_db_write(bp, &txr->tx_db, txr->tx_prod); + } + + if (static_branch_unlikely(&bnxt_xdp_locking_key)) + spin_unlock(&txr->tx_lock); + + return num_frames - drops; +} +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(5,13,0) */ +#endif + +/* Under rtnl_lock */ +static int bnxt_xdp_set(struct bnxt *bp, struct bpf_prog *prog) +{ + struct net_device *dev = bp->dev; + int tx_xdp = 0, tx_cp, rc, tc; + struct bpf_prog *old; + +#ifndef HAVE_XDP_MULTI_BUFF + if (prog && bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU(bp)) { + netdev_warn(dev, "MTU %d larger than largest XDP supported MTU %d.\n", +#else + if (prog && !prog->aux->xdp_has_frags && + bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU(bp)) { + netdev_warn(dev, "MTU %d larger than %d without XDP frag support.\n", +#endif + bp->dev->mtu, BNXT_MAX_PAGE_MODE_MTU(bp)); + return -EOPNOTSUPP; + } + if (!(bp->flags & BNXT_FLAG_SHARED_RINGS)) { + netdev_warn(dev, "ethtool rx/tx channels must be combined to support XDP.\n"); + return -EOPNOTSUPP; + } + if (prog) + tx_xdp = bp->rx_nr_rings; + + tc = bp->num_tc; + if (!tc) + tc = 1; + rc = bnxt_check_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings, + true, tc, tx_xdp); + if (rc) { + netdev_warn(dev, "Unable to reserve enough TX rings to support XDP.\n"); + return rc; + } + if (netif_running(dev)) + bnxt_close_nic(bp, true, false); + + old = xchg(&bp->xdp_prog, prog); + if (old) + bpf_prog_put(old); + + if (prog) { + bnxt_set_rx_skb_mode(bp, true); + xdp_features_set_redirect_target(dev, true); + } else { + int rx, tx; + + xdp_features_clear_redirect_target(dev); + bnxt_set_rx_skb_mode(bp, false); + bnxt_get_max_rings(bp, &rx, &tx, true); + if (rx > 1) { + bp->flags &= ~BNXT_FLAG_NO_AGG_RINGS; + bp->dev->hw_features |= NETIF_F_LRO; + /* Re-enable TPA if necessary */ + netdev_update_features(dev); + } + } + bp->tx_nr_rings_xdp = tx_xdp; + bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc + tx_xdp; + tx_cp = bnxt_num_tx_to_cp(bp, bp->tx_nr_rings); + bp->cp_nr_rings = max_t(int, tx_cp, bp->rx_nr_rings); + bnxt_set_tpa_flags(bp); + bnxt_set_ring_params(bp); + + if (netif_running(dev)) + return bnxt_open_nic(bp, true, false); + + return 0; +} + +int bnxt_xdp(struct net_device *dev, struct netdev_bpf *xdp) +{ + struct bnxt *bp = netdev_priv(dev); + int rc; + + switch (xdp->command) { + case XDP_SETUP_PROG: + rc = bnxt_xdp_set(bp, xdp->prog); + break; +#ifdef HAVE_XDP_QUERY_PROG + case XDP_QUERY_PROG: +#ifdef HAVE_PROG_ATTACHED + xdp->prog_attached = !!bp->xdp_prog; +#endif +#ifdef HAVE_IFLA_XDP_PROG_ID + xdp->prog_id = bp->xdp_prog ? bp->xdp_prog->aux->id : 0; +#endif + rc = 0; + break; +#endif /* HAVE_XDP_QUERY_PROG */ +#ifdef HAVE_XSK_SUPPORT + case XDP_SETUP_XSK_POOL: + netdev_info(bp->dev, "%s(): XDP_SETUP_XSK_POOL on queue_id: %d\n", + __func__, xdp->xsk.queue_id); + return bnxt_xdp_setup_pool(bp, xdp->xsk.pool, xdp->xsk.queue_id); +#endif + default: + rc = -EINVAL; + break; + } + return rc; +} + +#ifdef HAVE_XDP_MULTI_BUFF +struct sk_buff * +bnxt_xdp_build_skb(struct bnxt *bp, struct sk_buff *skb, u8 num_frags, + struct page_pool *pool, struct xdp_buff *xdp, + struct rx_cmp_ext *rxcmp1) +{ + struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp); + + if (!skb || !sinfo) + return NULL; + skb_checksum_none_assert(skb); + if (RX_CMP_L4_CS_OK(rxcmp1)) { + if (bp->dev->features & NETIF_F_RXCSUM) { + skb->ip_summed = CHECKSUM_UNNECESSARY; + skb->csum_level = RX_CMP_ENCAP(rxcmp1); + } + } + xdp_update_skb_shared_info(skb, num_frags, + sinfo->xdp_frags_size, + BNXT_RX_PAGE_SIZE * sinfo->nr_frags, + xdp_buff_is_frag_pfmemalloc(xdp)); + return skb; +} +#endif +#else +void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int budget) +{ +} + +bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons, + struct xdp_buff *xdp, void *page, u8 **data_ptr, + unsigned int *len, u8 *event) +{ + return false; +} + +bool bnxt_xdp_attached(struct bnxt *bp, struct bnxt_rx_ring_info *rxr) +{ + return false; +} + +void bnxt_xdp_buff_init(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, + u16 cons, u8 *data_ptr, unsigned int len, + struct xdp_buff *xdp) +{ +} +#endif diff --git a/drivers/thirdparty/release-drivers/bnxt/bnxt_xdp.h b/drivers/thirdparty/release-drivers/bnxt/bnxt_xdp.h new file mode 100644 index 000000000000..5d08a150b91a --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/bnxt_xdp.h @@ -0,0 +1,57 @@ +/* Broadcom NetXtreme-C/E network driver. + * + * Copyright (c) 2016-2018 Broadcom Limited + * Copyright (c) 2018-2022 Broadcom Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ + +#ifndef BNXT_XDP_H +#define BNXT_XDP_H + +DECLARE_STATIC_KEY_FALSE(bnxt_xdp_locking_key); + +struct bnxt_sw_tx_bd *bnxt_xmit_bd(struct bnxt *bp, + struct bnxt_tx_ring_info *txr, + dma_addr_t mapping, u32 len, + struct xdp_buff *xdp); +void __bnxt_xmit_xdp(struct bnxt *bp, struct bnxt_tx_ring_info *txr, + dma_addr_t mapping, u32 len, u16 rx_prod, + struct xdp_buff *xdp); +void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int budget); +#ifdef HAVE_NDO_XDP +bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons, + struct xdp_buff *xdp, struct page *page, u8 **data_ptr, + unsigned int *len, u8 *event); +bool bnxt_rx_xsk(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons, + struct xdp_buff *xdp, u8 **data_ptr, + unsigned int *len, u8 *event); +#else +bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons, + struct xdp_buff *xdp, void *page, u8 **data_ptr, + unsigned int *len, u8 *event); +bool bnxt_rx_xsk(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons, + struct xdp_buff *xdp, u8 **data_ptr, + unsigned int *len, u8 *event); +#endif +int bnxt_xdp(struct net_device *dev, struct netdev_bpf *xdp); +#ifdef HAVE_XDP_FRAME +int bnxt_xdp_xmit(struct net_device *dev, int num_frames, + struct xdp_frame **frames, u32 flags); +#endif +bool bnxt_xdp_attached(struct bnxt *bp, struct bnxt_rx_ring_info *rxr); + +void bnxt_xdp_buff_init(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, + u16 cons, u8 *data_ptr, unsigned int len, + struct xdp_buff *xdp); +#ifdef HAVE_XDP_MULTI_BUFF +struct sk_buff *bnxt_xdp_build_skb(struct bnxt *bp, struct sk_buff *skb, + u8 num_frags, struct page_pool *pool, + struct xdp_buff *xdp, + struct rx_cmp_ext *rxcmp1); +void bnxt_xdp_buff_frags_free(struct bnxt_rx_ring_info *rxr, + struct xdp_buff *xdp); +#endif +#endif diff --git a/drivers/thirdparty/release-drivers/bnxt/bnxt_xsk.c b/drivers/thirdparty/release-drivers/bnxt/bnxt_xsk.c new file mode 100644 index 000000000000..1fbdd83cf5fd --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/bnxt_xsk.c @@ -0,0 +1,490 @@ +/* Broadcom NetXtreme-C/E network driver. + * + * Copyright (c) 2024 Broadcom Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ +#include +#include +#include +#include +#ifdef HAVE_NDO_XDP +#include +#ifdef HAVE_BPF_TRACE +#include +#endif +#include +#endif +#ifdef CONFIG_PAGE_POOL +#ifdef HAVE_PAGE_POOL_HELPERS_H +#include +#else +#include +#endif +#endif +#include "bnxt_compat.h" +#include "bnxt_hsi.h" +#include "bnxt.h" +#include "bnxt_xdp.h" +#include "bnxt_xsk.h" + +#if defined(CONFIG_XDP_SOCKETS) && defined(HAVE_NDO_BPF) && defined(HAVE_XSK_SUPPORT) +int bnxt_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags) +{ + struct bnxt *bp = netdev_priv(dev); + struct bnxt_cp_ring_info *cpr; + struct bnxt_rx_ring_info *rxr; + struct bnxt_tx_ring_info *txr; + struct bnxt_napi *bnapi; + + if (!test_bit(BNXT_STATE_OPEN, &bp->state)) + return -ENETDOWN; + + if (queue_id >= bp->rx_nr_rings || queue_id >= bp->tx_nr_rings_xdp) + return -EINVAL; + + rxr = &bp->rx_ring[queue_id]; + txr = &bp->tx_ring[queue_id]; + + if (!rxr->xsk_pool && !txr->xsk_pool) + return -ENXIO; + + bnapi = bp->bnapi[queue_id]; + cpr = &bnapi->cp_ring; + if (!napi_if_scheduled_mark_missed(&bnapi->napi)) { + cpr->sw_stats->xsk_stats.xsk_wakeup++; + napi_schedule(&bnapi->napi); + } + + return 0; +} + +static void bnxt_xsk_disable_rx_ring(struct bnxt *bp, u16 queue_id) +{ + struct bnxt_rx_ring_info *rxr; + struct bnxt_vnic_info *vnic; + struct bnxt_napi *bnapi; + + rxr = &bp->rx_ring[queue_id]; + bnapi = rxr->bnapi; + vnic = &bp->vnic_info[BNXT_VNIC_NTUPLE]; + +#ifdef HAVE_XDP_RXQ_INFO + if (xdp_rxq_info_is_reg(&rxr->xdp_rxq)) + xdp_rxq_info_unreg(&rxr->xdp_rxq); +#endif + vnic->mru = 0; + bnxt_hwrm_vnic_update(bp, vnic, VNIC_UPDATE_REQ_ENABLES_MRU_VALID); + napi_disable(&bnapi->napi); + bnxt_free_one_rx_buf_ring(bp, rxr); + bnxt_hwrm_rx_ring_free(bp, rxr, 0); +} + +static int bnxt_xsk_enable_rx_ring(struct bnxt *bp, u16 queue_id) +{ + struct bnxt_rx_ring_info *rxr; + struct bnxt_vnic_info *vnic; + struct bnxt_napi *bnapi; + int rc, i; + u32 prod; + + rxr = &bp->rx_ring[queue_id]; + bnapi = rxr->bnapi; + vnic = &bp->vnic_info[BNXT_VNIC_NTUPLE]; + +#ifdef HAVE_XDP_RXQ_INFO + rc = xdp_rxq_info_reg(&rxr->xdp_rxq, bp->dev, queue_id, 0); + if (rc < 0) + return rc; + + rxr->xsk_pool = xsk_get_pool_from_qid(bp->dev, queue_id); + if (BNXT_RING_RX_ZC_MODE(rxr) && rxr->xsk_pool) { + rc = xdp_rxq_info_reg_mem_model(&rxr->xdp_rxq, + MEM_TYPE_XSK_BUFF_POOL, NULL); + xsk_pool_set_rxq_info(rxr->xsk_pool, &rxr->xdp_rxq); + netdev_dbg(bp->dev, "%s(): AF_XDP_ZC flag set for rxring:%d\n", __func__, queue_id); + } else { + rc = xdp_rxq_info_reg_mem_model(&rxr->xdp_rxq, + MEM_TYPE_PAGE_POOL, rxr->page_pool); + netdev_dbg(bp->dev, "%s(): AF_XDP_ZC flag RESET for rxring:%d\n", + __func__, queue_id); + } +#endif + rxr->rx_next_cons = 0; + bnxt_hwrm_rx_ring_alloc(bp, rxr, queue_id); + + rxr->rx_prod = 0; + prod = rxr->rx_prod; + for (i = 0; i < bp->rx_ring_size; i++) { + if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL)) { + netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d skbs only\n", + queue_id, i, bp->rx_ring_size); + break; + } + prod = NEXT_RX(prod); + } + rxr->rx_prod = prod; + bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); + napi_enable(&bnapi->napi); + vnic->mru = bp->dev->mtu + ETH_HLEN + VLAN_HLEN; + bnxt_hwrm_vnic_update(bp, vnic, VNIC_UPDATE_REQ_ENABLES_MRU_VALID); + + return rc; +} + +static bool bnxt_check_xsk_q_in_dflt_vnic(struct bnxt *bp, u16 queue_id) +{ + u16 tbl_size, i; + + tbl_size = bnxt_get_rxfh_indir_size(bp->dev); + + for (i = 0; i < tbl_size; i++) { + if (queue_id == bp->rss_indir_tbl[i]) { + netdev_err(bp->dev, + "queue_id: %d is in default RSS context, not supported\n", + queue_id); + return true; + } + } + return false; +} + +static int bnxt_validate_xsk(struct bnxt *bp, u16 queue_id) +{ + if (!(bp->flags & BNXT_FLAG_RFS)) { + netdev_err(bp->dev, + "nTUPLE feature needs to be on for AF_XDP support\n"); + return -EOPNOTSUPP; + } + + if (bp->num_rss_ctx) { + netdev_err(bp->dev, + "AF_XDP not supported with additional RSS contexts\n"); + return -EOPNOTSUPP; + } + + if (bnxt_check_xsk_q_in_dflt_vnic(bp, queue_id)) + return -EOPNOTSUPP; + + return 0; +} + +static int bnxt_xdp_enable_pool(struct bnxt *bp, struct xsk_buff_pool *pool, + u16 queue_id) +{ + struct bpf_prog *xdp_prog = READ_ONCE(bp->xdp_prog); + struct device *dev = &bp->pdev->dev; + struct bnxt_rx_ring_info *rxr; + bool needs_reset; + int rc; + + rc = bnxt_validate_xsk(bp, queue_id); + if (rc) + return rc; + + rxr = &bp->rx_ring[queue_id]; + rc = xsk_pool_dma_map(pool, dev, DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING); + if (rc) { + netdev_err(bp->dev, "Failed to map xsk pool\n"); + return rc; + } + + set_bit(queue_id, bp->af_xdp_zc_qs); + /* Check if XDP program is already attached, in which case + * need to explicitly quiesce traffic, free the regular path + * resources and reallocate AF_XDP resources for the rings. + * Otherwise, in the normal case, resources for AF_XDP will + * get created anyway as part of the XDP program attach + */ + needs_reset = netif_running(bp->dev) && xdp_prog; + + if (needs_reset) { + /* Check to differentiate b/n Tx/Rx only modes */ + if (xsk_buff_can_alloc(pool, bp->rx_ring_size)) { + bnxt_xsk_disable_rx_ring(bp, queue_id); + rxr->flags |= BNXT_RING_FLAG_AF_XDP_ZC; + bnxt_xsk_enable_rx_ring(bp, queue_id); + } else { + struct bnxt_tx_ring_info *txr = &bp->tx_ring[queue_id]; + struct bnxt_napi *bnapi; + + bnapi = bp->bnapi[queue_id]; + bnxt_lock_napi(bnapi); + txr->xsk_pool = xsk_get_pool_from_qid(bp->dev, queue_id); + bnxt_unlock_napi(bnapi); + } + } + + return rc; +} + +static int bnxt_xdp_disable_pool(struct bnxt *bp, u16 queue_id) +{ + struct bpf_prog *xdp_prog = READ_ONCE(bp->xdp_prog); + struct bnxt_rx_ring_info *rxr; + struct bnxt_tx_ring_info *txr; + struct xsk_buff_pool *pool; + struct bnxt_napi *bnapi; + bool needs_reset; + + pool = xsk_get_pool_from_qid(bp->dev, queue_id); + if (!pool) + return -EINVAL; + + if (!bp->bnapi || + test_bit(BNXT_STATE_NAPI_DISABLED, &bp->state)) { + xsk_pool_dma_unmap(pool, DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING); + return 0; + } + + rxr = &bp->rx_ring[queue_id]; + txr = &bp->tx_ring[queue_id]; + + bnapi = bp->bnapi[queue_id]; + + bnxt_lock_napi(bnapi); + clear_bit(queue_id, bp->af_xdp_zc_qs); + xsk_pool_dma_unmap(pool, DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING); + + needs_reset = netif_running(bp->dev) && xdp_prog; + + if (needs_reset) { + if (xsk_buff_can_alloc(pool, bp->rx_ring_size)) { + bnxt_xsk_disable_rx_ring(bp, queue_id); + rxr->flags &= ~BNXT_RING_FLAG_AF_XDP_ZC; + bnxt_xsk_enable_rx_ring(bp, queue_id); + } + } + txr->xsk_pool = NULL; + + bnxt_unlock_napi(bnapi); + return 0; +} + +int bnxt_xdp_setup_pool(struct bnxt *bp, struct xsk_buff_pool *pool, + u16 queue_id) +{ + if (queue_id >= bp->rx_nr_rings) + return -EINVAL; + + return pool ? bnxt_xdp_enable_pool(bp, pool, queue_id) : + bnxt_xdp_disable_pool(bp, queue_id); +} + +/* returns the following: + * true - packet consumed by XDP and new buffer is allocated. + * false - packet should be passed to the stack. + */ +bool bnxt_rx_xsk(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons, + struct xdp_buff *xdp, u8 **data_ptr, unsigned int *len, u8 *event) +{ + struct bpf_prog *xdp_prog = READ_ONCE(bp->xdp_prog); + struct bnxt_cp_ring_info *cpr; + struct bnxt_tx_ring_info *txr; + struct bnxt_sw_rx_bd *rx_buf; + struct bnxt_napi *bnapi; + struct pci_dev *pdev; + dma_addr_t mapping; + u32 tx_needed = 1; + void *orig_data; + u32 tx_avail; + u32 offset; + u32 act; + + if (!xdp_prog) + return false; + + pdev = bp->pdev; + offset = bp->rx_offset; + + txr = rxr->bnapi->tx_ring[0]; + xdp->data_end = xdp->data + *len; + + orig_data = xdp->data; + + xsk_buff_dma_sync_for_cpu(xdp, rxr->xsk_pool); + + act = bpf_prog_run_xdp(xdp_prog, xdp); + + tx_avail = bnxt_tx_avail(bp, txr); + /* If there are pending XDP_TX packets, we must not update the rx + * producer yet because some RX buffers may still be on the TX ring. + */ + if (txr->xdp_tx_pending) + *event &= ~BNXT_RX_EVENT; + + tx_avail = bnxt_tx_avail(bp, txr); + /* If there are pending XDP_TX packets, we must not update the rx + * producer yet because some RX buffers may still be on the TX ring. + */ + if (txr->xdp_tx_pending) + *event &= ~BNXT_RX_EVENT; + +#if XDP_PACKET_HEADROOM + *len = xdp->data_end - xdp->data; + if (orig_data != xdp->data) { + offset = xdp->data - xdp->data_hard_start; + *data_ptr = xdp->data_hard_start + offset; + } +#endif + bnapi = rxr->bnapi; + cpr = &bnapi->cp_ring; + + switch (act) { + case XDP_PASS: + return false; + + case XDP_TX: + rx_buf = &rxr->rx_buf_ring[cons]; + mapping = rx_buf->mapping - bp->rx_dma_offset; + *event = 0; + + if (tx_avail < tx_needed) { + trace_xdp_exception(bp->dev, xdp_prog, act); + bnxt_reuse_rx_data(rxr, cons, xdp); + return true; + } + + dma_sync_single_for_device(&pdev->dev, mapping + offset, *len, + bp->rx_dir); + + *event &= ~BNXT_RX_EVENT; + *event |= BNXT_TX_EVENT; + /* Pass NULL as xdp->data here is buffer from the XSK pool i.e userspace */ + __bnxt_xmit_xdp(bp, txr, mapping + offset, *len, + NEXT_RX(rxr->rx_prod), NULL); + bnxt_reuse_rx_data(rxr, cons, xdp); + return true; + + case XDP_REDIRECT: + /* if we are calling this here then we know that the + * redirect is coming from a frame received by the + * bnxt_en driver. + */ + + /* if we are unable to allocate a new buffer, abort and reuse */ + if (bnxt_alloc_rx_data(bp, rxr, rxr->rx_prod, GFP_ATOMIC)) { + trace_xdp_exception(bp->dev, xdp_prog, act); + bnxt_reuse_rx_data(rxr, cons, xdp); + cpr->sw_stats->xsk_stats.xsk_rx_alloc_fail++; + return true; + } + + if (xdp_do_redirect(bp->dev, xdp, xdp_prog)) { + trace_xdp_exception(bp->dev, xdp_prog, act); + cpr->sw_stats->xsk_stats.xsk_rx_redirect_fail++; + bnxt_reuse_rx_data(rxr, cons, xdp); + return true; + } + + *event |= BNXT_REDIRECT_EVENT; + cpr->sw_stats->xsk_stats.xsk_rx_success++; + break; + default: + bpf_warn_invalid_xdp_action(bp->dev, xdp_prog, act); + fallthrough; + case XDP_ABORTED: + trace_xdp_exception(bp->dev, xdp_prog, act); + fallthrough; + case XDP_DROP: + break; + } + return true; +} + +bool bnxt_xsk_xmit(struct bnxt *bp, struct bnxt_napi *bnapi, int budget) +{ + struct bnxt_tx_ring_info *txr = bnapi->tx_ring[0]; + struct bnxt_cp_ring_info *cpr; + int cpu = smp_processor_id(); + struct bnxt_sw_tx_bd *tx_buf; + struct netdev_queue *txq; + u16 prod = txr->tx_prod; + bool xsk_more = true; + struct tx_bd *txbd; + dma_addr_t mapping; + int i, xsk_tx = 0; + int num_frags = 0; + u32 len, flags; + + cpr = &bnapi->cp_ring; + txq = netdev_get_tx_queue(bp->dev, txr->txq_index); + + __netif_tx_lock(txq, cpu); + + for (i = 0; i < budget; i++) { + struct xdp_desc desc; + + if (bnxt_tx_avail(bp, txr) < 2) { + cpr->sw_stats->xsk_stats.xsk_tx_ring_full++; + xsk_more = false; + break; + } + + if (!xsk_tx_peek_desc(txr->xsk_pool, &desc)) { + xsk_more = false; + break; + } + + mapping = xsk_buff_raw_get_dma(txr->xsk_pool, desc.addr); + len = desc.len; + + xsk_buff_raw_dma_sync_for_device(txr->xsk_pool, mapping, desc.len); + + tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)]; + tx_buf->action = BNXT_XSK_TX; + + txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)]; + flags = (len << TX_BD_LEN_SHIFT) | ((num_frags + 1) << TX_BD_FLAGS_BD_CNT_SHIFT) | + bnxt_lhint_arr[len >> 9]; + txbd->tx_bd_len_flags_type = cpu_to_le32(flags); + txbd->tx_bd_opaque = SET_TX_OPAQUE(bp, txr, prod, 1 + num_frags); + txbd->tx_bd_haddr = cpu_to_le64(mapping); + + dma_unmap_addr_set(tx_buf, mapping, mapping); + dma_unmap_len_set(tx_buf, len, len); + + flags &= ~TX_BD_LEN; + txbd->tx_bd_len_flags_type = cpu_to_le32(((len) << TX_BD_LEN_SHIFT) | + flags | TX_BD_FLAGS_PACKET_END); + prod = NEXT_TX(prod); + txr->tx_prod = prod; + xsk_tx++; + } + + if (xsk_tx) { + /* write the doorbell */ + wmb(); + xsk_tx_release(txr->xsk_pool); + bnxt_db_write(bp, &txr->tx_db, prod); + cpr->sw_stats->xsk_stats.xsk_tx_sent_pkts += xsk_tx; + } + + __netif_tx_unlock(txq); + return xsk_more; +} +#else +bool bnxt_rx_xsk(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons, + struct xdp_buff *xdp, u8 **data_ptr, unsigned int *len, u8 *event) +{ + return false; +} + +int bnxt_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags) +{ + return 0; +} + +int bnxt_xdp_setup_pool(struct bnxt *bp, struct xsk_buff_pool *pool, + u16 queue_id) +{ + return 0; +} + +bool bnxt_xsk_xmit(struct bnxt *bp, struct bnxt_napi *bnapi, int budget) +{ + return false; +} +#endif diff --git a/drivers/thirdparty/release-drivers/bnxt/bnxt_xsk.h b/drivers/thirdparty/release-drivers/bnxt/bnxt_xsk.h new file mode 100644 index 000000000000..5a30299317ee --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/bnxt_xsk.h @@ -0,0 +1,21 @@ +/* Broadcom NetXtreme-C/E network driver. + * + * Copyright (c) 2024 Broadcom Inc + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ + +#ifndef __BNXT_XSK_H__ +#define __BNXT_XSK_H__ + +#ifdef HAVE_XSK_SUPPORT +#include +#endif + +int bnxt_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags); +int bnxt_xdp_setup_pool(struct bnxt *bp, struct xsk_buff_pool *pool, + u16 queue_id); +bool bnxt_xsk_xmit(struct bnxt *bp, struct bnxt_napi *bnapi, int budget); +#endif diff --git a/drivers/thirdparty/release-drivers/bnxt/find_src.awk b/drivers/thirdparty/release-drivers/bnxt/find_src.awk new file mode 100755 index 000000000000..c283c31b47f1 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/find_src.awk @@ -0,0 +1,37 @@ +#!/usr/bin/awk -f + +BEGIN{ + if (struct) { + start="struct " struct " {" + } else if (enum) { + start="enum " enum " {" + } else if (define) { + pattern="#define " define + open=1 + } else { + print "Usage: find_src.awk -v = [-v pattern=]" + print "\nPrints lines associated with matched elements and optionally further constrains matching within such elements by an additional regex pattern." + exit 1 + } +} +$0~/{/{ + open && open++ +} +$0~start{ + open=1; +} +{ + if (line_cont) { + print $0 + line_cont=match($0, /\\$/) + } +} +$0~pattern{ + if (open) { + print $0 + line_cont=match($0, /\\$/) + } +} +$0~/}/{ + open && open-- +} diff --git a/drivers/thirdparty/release-drivers/bnxt/hcapi/bitalloc.c b/drivers/thirdparty/release-drivers/bnxt/hcapi/bitalloc.c new file mode 100644 index 000000000000..93b798aa9be4 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/hcapi/bitalloc.c @@ -0,0 +1,258 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* Copyright(c) 2019-2021 Broadcom + * All rights reserved. + */ +#include +#include +#include +#include "bnxt_hsi.h" +#include "bnxt_compat.h" +#include "bitalloc.h" + +/** + * bnxt_ba_init - allocate memory for bitmap + * @pool: Pointer to struct bitalloc + * @free: free=true, sets all bits to 1 + * In a bitmap, bit 1 means index is free and + * 0 means index in-use, because we need to allocate + * from reverse also and in those cases we can search + * via find_last_set(), dont have find_last_zero() api. + * + * Returns: 0 on success, -ve otherwise + */ +int bnxt_ba_init(struct bitalloc *pool, int size, bool free) +{ + if (unlikely(!pool || size < 1 || size > BITALLOC_MAX_SIZE)) + return -EINVAL; + + pool->bitmap = bitmap_zalloc(size, GFP_KERNEL); + if (unlikely(!pool->bitmap)) + return -ENOMEM; + + if (free) { + pool->size = size; + pool->free_count = size; + bitmap_set(pool->bitmap, 0, size); + } else { + pool->size = size; + pool->free_count = 0; + } + + return 0; +} + +/** + * bnxt_ba_deinit - Free the malloced memory for the bitmap + * @pool: Pointer to struct bitalloc + * + * Returns: void + */ +void bnxt_ba_deinit(struct bitalloc *pool) +{ + if (unlikely(!pool || !pool->bitmap)) + return; + + bitmap_free(pool->bitmap); + pool->size = 0; + pool->free_count = 0; +} + +/** + * bnxt_ba_alloc - Allocate a lowest free index + * @pool: Pointer to struct bitalloc + * + * Returns: -1 on failure, index on success + */ +int bnxt_ba_alloc(struct bitalloc *pool) +{ + int r = -1; + + if (unlikely(!pool || !pool->bitmap || !pool->free_count)) + return r; + + r = find_first_bit(pool->bitmap, pool->size); + if (likely(r < pool->size)) { + clear_bit(r, pool->bitmap); + --pool->free_count; + } + return r; +} + +/** + * bnxt_ba_alloc_reverse - Allocate a highest free index + * @pool: Pointer to struct bitalloc + * + * Returns: -1 on failure, index on success + */ +int bnxt_ba_alloc_reverse(struct bitalloc *pool) +{ + int r = -1; + + if (unlikely(!pool || !pool->bitmap || !pool->free_count)) + return r; + + r = find_last_bit(pool->bitmap, pool->size); + if (likely(r < pool->size)) { + clear_bit(r, pool->bitmap); + --pool->free_count; + } + return r; +} + +/** + * bnxt_ba_alloc_index - Allocate the requested index + * @pool: Pointer to struct bitalloc + * @index: Index to allocate + * + * Returns: -1 on failure, index on success + */ +int bnxt_ba_alloc_index(struct bitalloc *pool, int index) +{ + int r = -1; + + if (unlikely(!pool || !pool->bitmap || + index < 0 || index >= (int)pool->size || + !pool->free_count)) + return r; + + if (likely(test_bit(index, pool->bitmap))) { + clear_bit(index, pool->bitmap); + --pool->free_count; + r = index; + } + + return r; +} + +/** + * bnxt_ba_free - Free the requested index if allocated + * @pool: Pointer to struct bitalloc + * @index: Index to free + * + * Returns: -1 on failure, 0 on success + */ +int bnxt_ba_free(struct bitalloc *pool, int index) +{ + int r = -1; + + if (unlikely(!pool || !pool->bitmap || + index < 0 || index >= (int)pool->size)) + return r; + + if (unlikely(test_bit(index, pool->bitmap))) + return r; + + set_bit(index, pool->bitmap); + pool->free_count++; + return 0; +} + +/** + * bnxt_ba_inuse - Check if the requested index is already allocated + * @pool: Pointer to struct bitalloc + * @index: Index to check availability + * + * Returns: -1 on failure, 0 if it is free, 1 if it is allocated + */ +int bnxt_ba_inuse(struct bitalloc *pool, int index) +{ + if (unlikely(!pool || !pool->bitmap || + index < 0 || index >= (int)pool->size)) + return -1; + + return !test_bit(index, pool->bitmap); +} + +/** + * bnxt_ba_inuse_free - Free the index if it was allocated + * @pool: Pointer to struct bitalloc + * @index: Index to be freed if it was allocated + * + * Returns: -1 on failure, 0 if it is free, 1 if it is in use + */ +int bnxt_ba_inuse_free(struct bitalloc *pool, int index) +{ + if (unlikely(!pool || !pool->bitmap || + index < 0 || index >= (int)pool->size)) + return -1; + + if (bnxt_ba_free(pool, index) == 0) + return 1; + + return 0; +} + +/** + * bnxt_ba_find_next_inuse - Find the next index allocated + * @pool: Pointer to struct bitalloc + * @index: Index from where to search for the next inuse index + * + * Returns: -1 on failure or if not found, else next index + */ +int bnxt_ba_find_next_inuse(struct bitalloc *pool, int index) +{ + int r = -1; + + if (unlikely(!pool || !pool->bitmap || + index < 0 || index >= (int)pool->size)) + return r; + + r = find_next_zero_bit(pool->bitmap, pool->size, ++index); + if (unlikely(r == pool->size)) + return -1; + + return r; +} + +/** + * bnxt_ba_find_next_inuse_free - Free the next allocated index + * @pool: Pointer to struct bitalloc + * @index: Index from where to search for the next inuse index + * + * Returns: -1 on failure, else next inuse index that was freed + */ +int bnxt_ba_find_next_inuse_free(struct bitalloc *pool, int index) +{ + int r = -1; + + if (unlikely(!pool || !pool->bitmap || + index < 0 || index >= (int)pool->size)) + return r; + + r = find_next_zero_bit(pool->bitmap, pool->size, ++index); + if (unlikely(r == pool->size)) + return -1; + + if (likely(bnxt_ba_free(pool, r) == 0)) + return r; + + return -1; +} + +/** + * bnxt_ba_free_count - Available indexes that can be allocated. + * @pool: Pointer to struct bitalloc + * + * Returns: 0 - size, -ve on error + */ +int bnxt_ba_free_count(struct bitalloc *pool) +{ + if (unlikely(!pool)) + return -EINVAL; + + return (int)pool->free_count; +} + +/** + * bnxt_ba_inuse_count - Number of already allocated indexes. + * @pool: Pointer to struct bitalloc + * + * Returns: 0 - size, -ve on error + */ +int bnxt_ba_inuse_count(struct bitalloc *pool) +{ + if (unlikely(!pool)) + return -EINVAL; + + return (int)(pool->size) - (int)(pool->free_count); +} diff --git a/drivers/thirdparty/release-drivers/bnxt/hcapi/bitalloc.h b/drivers/thirdparty/release-drivers/bnxt/hcapi/bitalloc.h new file mode 100644 index 000000000000..dd6dac3fc467 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/hcapi/bitalloc.h @@ -0,0 +1,57 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019-2021 Broadcom + * All rights reserved. + */ + +#ifndef _BITALLOC_H_ +#define _BITALLOC_H_ + +#include +#include + +struct bitalloc { + u32 size; + u32 free_count; + unsigned long *bitmap; +}; + +#define BITALLOC_SIZEOF(size) (sizeof(struct bitalloc) + ((size) + 31) / 32) +#define BITALLOC_MAX_SIZE (32 * 32 * 32 * 32 * 32 * 32) + +/* Initialize the struct bitalloc and alloc bitmap memory */ +int bnxt_ba_init(struct bitalloc *pool, int size, bool free); + +/* Deinitialize the struct bitalloc and free bitmap memory */ +void bnxt_ba_deinit(struct bitalloc *pool); + +/* Allocate a lowest free index */ +int bnxt_ba_alloc(struct bitalloc *pool); + +/* Allocate the given index */ +int bnxt_ba_alloc_index(struct bitalloc *pool, int index); + +/* Allocate a highest free index */ +int bnxt_ba_alloc_reverse(struct bitalloc *pool); + +/* Test if index is in use */ +int bnxt_ba_inuse(struct bitalloc *pool, int index); + +/* Test if index is in use, but also free the index */ +int bnxt_ba_inuse_free(struct bitalloc *pool, int index); + +/* Find the next index is in use from a given index */ +int bnxt_ba_find_next_inuse(struct bitalloc *pool, int index); + +/* Find the next index is in use from a given index, and also free it */ +int bnxt_ba_find_next_inuse_free(struct bitalloc *pool, int index); + +/* Free the index */ +int bnxt_ba_free(struct bitalloc *pool, int index); + +/* Available number of indexes for allocation */ +int bnxt_ba_free_count(struct bitalloc *pool); + +/* Number of indexes that are allocated */ +int bnxt_ba_inuse_count(struct bitalloc *pool); + +#endif /* _BITALLOC_H_ */ diff --git a/drivers/thirdparty/release-drivers/bnxt/hcapi/cfa/cfa_p40_hw.h b/drivers/thirdparty/release-drivers/bnxt/hcapi/cfa/cfa_p40_hw.h new file mode 100644 index 000000000000..1f9e847f6096 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/hcapi/cfa/cfa_p40_hw.h @@ -0,0 +1,652 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019-2021 Broadcom + * All rights reserved. + */ +/* Name: cfa_p40_hw.h + * + * Description: header for SWE based on Truflow + * + * Date: taken from 12/16/19 17:18:12 + * + * Note: This file was first generated using tflib_decode.py. + * + * Changes have been made due to lack of availability of xml for + * additional tables at this time (EEM Record and union table fields) + * Changes not autogenerated are noted in comments. + */ + +#ifndef _CFA_P40_HW_H_ +#define _CFA_P40_HW_H_ + +/* Valid TCAM entry. (for idx 5 ...) */ +#define CFA_P40_PROF_L2_CTXT_TCAM_VALID_BITPOS 166 +#define CFA_P40_PROF_L2_CTXT_TCAM_VALID_NUM_BITS 1 + +/* Key type (pass). (for idx 5 ...) */ +#define CFA_P40_PROF_L2_CTXT_TCAM_KEY_TYPE_BITPOS 164 +#define CFA_P40_PROF_L2_CTXT_TCAM_KEY_TYPE_NUM_BITS 2 + +/* Tunnel HDR type. (for idx 5 ...) */ +#define CFA_P40_PROF_L2_CTXT_TCAM_TUN_HDR_TYPE_BITPOS 160 +#define CFA_P40_PROF_L2_CTXT_TCAM_TUN_HDR_TYPE_NUM_BITS 4 + +/* Number of VLAN tags in tunnel l2 header. (for idx 4 ...) */ +#define CFA_P40_PROF_L2_CTXT_TCAM_T_L2_NUMTAGS_BITPOS 158 +#define CFA_P40_PROF_L2_CTXT_TCAM_T_L2_NUMTAGS_NUM_BITS 2 + +/* Number of VLAN tags in l2 header. (for idx 4 ...) */ +#define CFA_P40_PROF_L2_CTXT_TCAM_L2_NUMTAGS_BITPOS 156 +#define CFA_P40_PROF_L2_CTXT_TCAM_L2_NUMTAGS_NUM_BITS 2 + +/* Tunnel/Inner Source/Dest. MAC Address. */ +#define CFA_P40_PROF_L2_CTXT_TCAM_MAC1_BITPOS 108 +#define CFA_P40_PROF_L2_CTXT_TCAM_MAC1_NUM_BITS 48 + +/* Tunnel Outer VLAN Tag ID. (for idx 3 ...) */ +#define CFA_P40_PROF_L2_CTXT_TCAM_T_OVID_BITPOS 96 +#define CFA_P40_PROF_L2_CTXT_TCAM_T_OVID_NUM_BITS 12 + +/* Tunnel Inner VLAN Tag ID. (for idx 2 ...) */ +#define CFA_P40_PROF_L2_CTXT_TCAM_T_IVID_BITPOS 84 +#define CFA_P40_PROF_L2_CTXT_TCAM_T_IVID_NUM_BITS 12 + +/* Source Partition. (for idx 2 ...) */ +#define CFA_P40_PROF_L2_CTXT_TCAM_SPARIF_BITPOS 80 +#define CFA_P40_PROF_L2_CTXT_TCAM_SPARIF_NUM_BITS 4 + +/* Source Virtual I/F. (for idx 2 ...) */ +#define CFA_P40_PROF_L2_CTXT_TCAM_SVIF_BITPOS 72 +#define CFA_P40_PROF_L2_CTXT_TCAM_SVIF_NUM_BITS 8 + +/* Tunnel/Inner Source/Dest. MAC Address. */ +#define CFA_P40_PROF_L2_CTXT_TCAM_MAC0_BITPOS 24 +#define CFA_P40_PROF_L2_CTXT_TCAM_MAC0_NUM_BITS 48 + +/* Outer VLAN Tag ID. */ +#define CFA_P40_PROF_L2_CTXT_TCAM_OVID_BITPOS 12 +#define CFA_P40_PROF_L2_CTXT_TCAM_OVID_NUM_BITS 12 + +/* Inner VLAN Tag ID. */ +#define CFA_P40_PROF_L2_CTXT_TCAM_IVID_BITPOS 0 +#define CFA_P40_PROF_L2_CTXT_TCAM_IVID_NUM_BITS 12 + +enum cfa_p40_prof_l2_ctxt_tcam_flds { + CFA_P40_PROF_L2_CTXT_TCAM_VALID_FLD = 0, + CFA_P40_PROF_L2_CTXT_TCAM_KEY_TYPE_FLD = 1, + CFA_P40_PROF_L2_CTXT_TCAM_TUN_HDR_TYPE_FLD = 2, + CFA_P40_PROF_L2_CTXT_TCAM_T_L2_NUMTAGS_FLD = 3, + CFA_P40_PROF_L2_CTXT_TCAM_L2_NUMTAGS_FLD = 4, + CFA_P40_PROF_L2_CTXT_TCAM_MAC1_FLD = 5, + CFA_P40_PROF_L2_CTXT_TCAM_T_OVID_FLD = 6, + CFA_P40_PROF_L2_CTXT_TCAM_T_IVID_FLD = 7, + CFA_P40_PROF_L2_CTXT_TCAM_SPARIF_FLD = 8, + CFA_P40_PROF_L2_CTXT_TCAM_SVIF_FLD = 9, + CFA_P40_PROF_L2_CTXT_TCAM_MAC0_FLD = 10, + CFA_P40_PROF_L2_CTXT_TCAM_OVID_FLD = 11, + CFA_P40_PROF_L2_CTXT_TCAM_IVID_FLD = 12, + CFA_P40_PROF_L2_CTXT_TCAM_MAX_FLD +}; + +#define CFA_P40_PROF_L2_CTXT_TCAM_TOTAL_NUM_BITS 167 + +/* Valid entry. (for idx 2 ...) */ +#define CFA_P40_ACT_VEB_TCAM_VALID_BITPOS 79 +#define CFA_P40_ACT_VEB_TCAM_VALID_NUM_BITS 1 + +/* reserved program to 0. (for idx 2 ...) */ +#define CFA_P40_ACT_VEB_TCAM_RESERVED_BITPOS 78 +#define CFA_P40_ACT_VEB_TCAM_RESERVED_NUM_BITS 1 + +/* PF Parif Number. (for idx 2 ...) */ +#define CFA_P40_ACT_VEB_TCAM_PARIF_IN_BITPOS 74 +#define CFA_P40_ACT_VEB_TCAM_PARIF_IN_NUM_BITS 4 + +/* Number of VLAN Tags. (for idx 2 ...) */ +#define CFA_P40_ACT_VEB_TCAM_NUM_VTAGS_BITPOS 72 +#define CFA_P40_ACT_VEB_TCAM_NUM_VTAGS_NUM_BITS 2 + +/* Dest. MAC Address. */ +#define CFA_P40_ACT_VEB_TCAM_MAC_BITPOS 24 +#define CFA_P40_ACT_VEB_TCAM_MAC_NUM_BITS 48 + +/* Outer VLAN Tag ID. */ +#define CFA_P40_ACT_VEB_TCAM_OVID_BITPOS 12 +#define CFA_P40_ACT_VEB_TCAM_OVID_NUM_BITS 12 + +/* Inner VLAN Tag ID. */ +#define CFA_P40_ACT_VEB_TCAM_IVID_BITPOS 0 +#define CFA_P40_ACT_VEB_TCAM_IVID_NUM_BITS 12 + +enum cfa_p40_act_veb_tcam_flds { + CFA_P40_ACT_VEB_TCAM_VALID_FLD = 0, + CFA_P40_ACT_VEB_TCAM_RESERVED_FLD = 1, + CFA_P40_ACT_VEB_TCAM_PARIF_IN_FLD = 2, + CFA_P40_ACT_VEB_TCAM_NUM_VTAGS_FLD = 3, + CFA_P40_ACT_VEB_TCAM_MAC_FLD = 4, + CFA_P40_ACT_VEB_TCAM_OVID_FLD = 5, + CFA_P40_ACT_VEB_TCAM_IVID_FLD = 6, + CFA_P40_ACT_VEB_TCAM_MAX_FLD +}; + +#define CFA_P40_ACT_VEB_TCAM_TOTAL_NUM_BITS 80 + +/* Entry is valid. */ +#define CFA_P40_LKUP_TCAM_RECORD_MEM_VALID_BITPOS 18 +#define CFA_P40_LKUP_TCAM_RECORD_MEM_VALID_NUM_BITS 1 + +/* Action Record Pointer */ +#define CFA_P40_LKUP_TCAM_RECORD_MEM_ACT_REC_PTR_BITPOS 2 +#define CFA_P40_LKUP_TCAM_RECORD_MEM_ACT_REC_PTR_NUM_BITS 16 + +/* for resolving TCAM/EM conflicts */ +#define CFA_P40_LKUP_TCAM_RECORD_MEM_STRENGTH_BITPOS 0 +#define CFA_P40_LKUP_TCAM_RECORD_MEM_STRENGTH_NUM_BITS 2 + +enum cfa_p40_lkup_tcam_record_mem_flds { + CFA_P40_LKUP_TCAM_RECORD_MEM_VALID_FLD = 0, + CFA_P40_LKUP_TCAM_RECORD_MEM_ACT_REC_PTR_FLD = 1, + CFA_P40_LKUP_TCAM_RECORD_MEM_STRENGTH_FLD = 2, + CFA_P40_LKUP_TCAM_RECORD_MEM_MAX_FLD +}; + +#define CFA_P40_LKUP_TCAM_RECORD_MEM_TOTAL_NUM_BITS 19 + +/* (for idx 1 ...) */ +#define CFA_P40_PROF_CTXT_REMAP_MEM_TPID_ANTI_SPOOF_CTL_BITPOS 62 +#define CFA_P40_PROF_CTXT_REMAP_MEM_TPID_ANTI_SPOOF_CTL_NUM_BITS 2 +enum cfa_p40_prof_ctxt_remap_mem_tpid_anti_spoof_ctl { + CFA_P40_PROF_CTXT_REMAP_MEM_TPID_IGNORE = 0x0UL, + + CFA_P40_PROF_CTXT_REMAP_MEM_TPID_DROP = 0x1UL, + + CFA_P40_PROF_CTXT_REMAP_MEM_TPID_DEFAULT = 0x2UL, + + CFA_P40_PROF_CTXT_REMAP_MEM_TPID_SPIF = 0x3UL, + CFA_P40_PROF_CTXT_REMAP_MEM_TPID_MAX = 0x3UL +}; + +/* (for idx 1 ...) */ +#define CFA_P40_PROF_CTXT_REMAP_MEM_PRI_ANTI_SPOOF_CTL_BITPOS 60 +#define CFA_P40_PROF_CTXT_REMAP_MEM_PRI_ANTI_SPOOF_CTL_NUM_BITS 2 +enum cfa_p40_prof_ctxt_remap_mem_pri_anti_spoof_ctl { + CFA_P40_PROF_CTXT_REMAP_MEM_PRI_IGNORE = 0x0UL, + + CFA_P40_PROF_CTXT_REMAP_MEM_PRI_DROP = 0x1UL, + + CFA_P40_PROF_CTXT_REMAP_MEM_PRI_DEFAULT = 0x2UL, + + CFA_P40_PROF_CTXT_REMAP_MEM_PRI_SPIF = 0x3UL, + CFA_P40_PROF_CTXT_REMAP_MEM_PRI_MAX = 0x3UL +}; + +/* Bypass Source Properties Lookup. (for idx 1 ...) */ +#define CFA_P40_PROF_CTXT_REMAP_MEM_BYP_SP_LKUP_BITPOS 59 +#define CFA_P40_PROF_CTXT_REMAP_MEM_BYP_SP_LKUP_NUM_BITS 1 + +/* SP Record Pointer. (for idx 1 ...) */ +#define CFA_P40_PROF_CTXT_REMAP_MEM_SP_REC_PTR_BITPOS 43 +#define CFA_P40_PROF_CTXT_REMAP_MEM_SP_REC_PTR_NUM_BITS 16 + +/* BD Action pointer passing enable. (for idx 1 ...) */ +#define CFA_P40_PROF_CTXT_REMAP_MEM_BD_ACT_EN_BITPOS 42 +#define CFA_P40_PROF_CTXT_REMAP_MEM_BD_ACT_EN_NUM_BITS 1 + +/* Default VLAN TPID. (for idx 1 ...) */ +#define CFA_P40_PROF_CTXT_REMAP_MEM_DEFAULT_TPID_BITPOS 39 +#define CFA_P40_PROF_CTXT_REMAP_MEM_DEFAULT_TPID_NUM_BITS 3 + +/* Allowed VLAN TPIDs. (for idx 1 ...) */ +#define CFA_P40_PROF_CTXT_REMAP_MEM_ALLOWED_TPID_BITPOS 33 +#define CFA_P40_PROF_CTXT_REMAP_MEM_ALLOWED_TPID_NUM_BITS 6 + +/* Default VLAN PRI. */ +#define CFA_P40_PROF_CTXT_REMAP_MEM_DEFAULT_PRI_BITPOS 30 +#define CFA_P40_PROF_CTXT_REMAP_MEM_DEFAULT_PRI_NUM_BITS 3 + +/* Allowed VLAN PRIs. */ +#define CFA_P40_PROF_CTXT_REMAP_MEM_ALLOWED_PRI_BITPOS 22 +#define CFA_P40_PROF_CTXT_REMAP_MEM_ALLOWED_PRI_NUM_BITS 8 + +/* Partition. */ +#define CFA_P40_PROF_CTXT_REMAP_MEM_PARIF_BITPOS 18 +#define CFA_P40_PROF_CTXT_REMAP_MEM_PARIF_NUM_BITS 4 + +/* Bypass Lookup. */ +#define CFA_P40_PROF_CTXT_REMAP_MEM_BYP_LKUP_EN_BITPOS 17 +#define CFA_P40_PROF_CTXT_REMAP_MEM_BYP_LKUP_EN_NUM_BITS 1 + +/* L2 Context Remap Data. Action bypass mode (1) {7'd0,prof_vnic[9:0]} Note: + * should also set byp_lkup_en. Action bypass mode (0) byp_lkup_en(0) - + * {prof_func[6:0],l2_context[9:0]} byp_lkup_en(1) - {1'b0,act_rec_ptr[15:0]} + */ + +#define CFA_P40_PROF_CTXT_REMAP_MEM_PROF_VNIC_BITPOS 0 +#define CFA_P40_PROF_CTXT_REMAP_MEM_PROF_VNIC_NUM_BITS 12 + +#define CFA_P40_PROF_CTXT_REMAP_MEM_PROF_FUNC_BITPOS 10 +#define CFA_P40_PROF_CTXT_REMAP_MEM_PROF_FUNC_NUM_BITS 7 + +#define CFA_P40_PROF_CTXT_REMAP_MEM_L2_CTXT_BITPOS 0 +#define CFA_P40_PROF_CTXT_REMAP_MEM_L2_CTXT_NUM_BITS 10 + +#define CFA_P40_PROF_CTXT_REMAP_MEM_ARP_BITPOS 0 +#define CFA_P40_PROF_CTXT_REMAP_MEM_ARP_NUM_BITS 16 + +enum cfa_p40_prof_ctxt_remap_mem_flds { + CFA_P40_PROF_CTXT_REMAP_MEM_TPID_ANTI_SPOOF_CTL_FLD = 0, + CFA_P40_PROF_CTXT_REMAP_MEM_PRI_ANTI_SPOOF_CTL_FLD = 1, + CFA_P40_PROF_CTXT_REMAP_MEM_BYP_SP_LKUP_FLD = 2, + CFA_P40_PROF_CTXT_REMAP_MEM_SP_REC_PTR_FLD = 3, + CFA_P40_PROF_CTXT_REMAP_MEM_BD_ACT_EN_FLD = 4, + CFA_P40_PROF_CTXT_REMAP_MEM_DEFAULT_TPID_FLD = 5, + CFA_P40_PROF_CTXT_REMAP_MEM_ALLOWED_TPID_FLD = 6, + CFA_P40_PROF_CTXT_REMAP_MEM_DEFAULT_PRI_FLD = 7, + CFA_P40_PROF_CTXT_REMAP_MEM_ALLOWED_PRI_FLD = 8, + CFA_P40_PROF_CTXT_REMAP_MEM_PARIF_FLD = 9, + CFA_P40_PROF_CTXT_REMAP_MEM_BYP_LKUP_EN_FLD = 10, + CFA_P40_PROF_CTXT_REMAP_MEM_PROF_VNIC_FLD = 11, + CFA_P40_PROF_CTXT_REMAP_MEM_PROF_FUNC_FLD = 12, + CFA_P40_PROF_CTXT_REMAP_MEM_L2_CTXT_FLD = 13, + CFA_P40_PROF_CTXT_REMAP_MEM_ARP_FLD = 14, + CFA_P40_PROF_CTXT_REMAP_MEM_MAX_FLD +}; + +#define CFA_P40_PROF_CTXT_REMAP_MEM_TOTAL_NUM_BITS 64 + +/* Bypass action pointer look up (for idx 1 ...) */ +#define CFA_P40_PROF_PROFILE_TCAM_REMAP_MEM_PL_BYP_LKUP_EN_BITPOS 37 +#define CFA_P40_PROF_PROFILE_TCAM_REMAP_MEM_PL_BYP_LKUP_EN_NUM_BITS 1 + +/* Exact match search enable (for idx 1 ...) */ +#define CFA_P40_PROF_PROFILE_TCAM_REMAP_MEM_EM_SEARCH_ENB_BITPOS 36 +#define CFA_P40_PROF_PROFILE_TCAM_REMAP_MEM_EM_SEARCH_ENB_NUM_BITS 1 + +/* Exact match profile */ +#define CFA_P40_PROF_PROFILE_TCAM_REMAP_MEM_EM_PROFILE_ID_BITPOS 28 +#define CFA_P40_PROF_PROFILE_TCAM_REMAP_MEM_EM_PROFILE_ID_NUM_BITS 8 + +/* Exact match key format */ +#define CFA_P40_PROF_PROFILE_TCAM_REMAP_MEM_EM_KEY_ID_BITPOS 23 +#define CFA_P40_PROF_PROFILE_TCAM_REMAP_MEM_EM_KEY_ID_NUM_BITS 5 + +/* Exact match key mask */ +#define CFA_P40_PROF_PROFILE_TCAM_REMAP_MEM_EM_KEY_MASK_BITPOS 13 +#define CFA_P40_PROF_PROFILE_TCAM_REMAP_MEM_EM_KEY_MASK_NUM_BITS 10 + +/* TCAM search enable */ +#define CFA_P40_PROF_PROFILE_TCAM_REMAP_MEM_TCAM_SEARCH_ENB_BITPOS 12 +#define CFA_P40_PROF_PROFILE_TCAM_REMAP_MEM_TCAM_SEARCH_ENB_NUM_BITS 1 + +/* TCAM profile */ +#define CFA_P40_PROF_PROFILE_TCAM_REMAP_MEM_TCAM_PROFILE_ID_BITPOS 4 +#define CFA_P40_PROF_PROFILE_TCAM_REMAP_MEM_TCAM_PROFILE_ID_NUM_BITS 8 + +/* TCAM key format */ +#define CFA_P40_PROF_PROFILE_TCAM_REMAP_MEM_TCAM_KEY_ID_BITPOS 0 +#define CFA_P40_PROF_PROFILE_TCAM_REMAP_MEM_TCAM_KEY_ID_NUM_BITS 4 + +#define CFA_P40_PROF_PROFILE_TCAM_REMAP_MEM_BYPASS_OPT_BITPOS 16 +#define CFA_P40_PROF_PROFILE_TCAM_REMAP_MEM_BYPASS_OPT_NUM_BITS 2 + +#define CFA_P40_PROF_PROFILE_TCAM_REMAP_MEM_ACT_REC_PTR_BITPOS 0 +#define CFA_P40_PROF_PROFILE_TCAM_REMAP_MEM_ACT_REC_PTR_NUM_BITS 16 + +enum cfa_p40_prof_profile_tcam_remap_mem_flds { + CFA_P40_PROF_PROFILE_TCAM_REMAP_MEM_PL_BYP_LKUP_EN_FLD = 0, + CFA_P40_PROF_PROFILE_TCAM_REMAP_MEM_EM_SEARCH_ENB_FLD = 1, + CFA_P40_PROF_PROFILE_TCAM_REMAP_MEM_EM_PROFILE_ID_FLD = 2, + CFA_P40_PROF_PROFILE_TCAM_REMAP_MEM_EM_KEY_ID_FLD = 3, + CFA_P40_PROF_PROFILE_TCAM_REMAP_MEM_EM_KEY_MASK_FLD = 4, + CFA_P40_PROF_PROFILE_TCAM_REMAP_MEM_TCAM_SEARCH_ENB_FLD = 5, + CFA_P40_PROF_PROFILE_TCAM_REMAP_MEM_TCAM_PROFILE_ID_FLD = 6, + CFA_P40_PROF_PROFILE_TCAM_REMAP_MEM_TCAM_KEY_ID_FLD = 7, + CFA_P40_PROF_PROFILE_TCAM_REMAP_MEM_BYPASS_OPT_FLD = 8, + CFA_P40_PROF_PROFILE_TCAM_REMAP_MEM_ACT_REC_PTR_FLD = 9, + CFA_P40_PROF_PROFILE_TCAM_REMAP_MEM_MAX_FLD +}; + +#define CFA_P40_PROF_PROFILE_TCAM_REMAP_MEM_TOTAL_NUM_BITS 38 + +/* Valid TCAM entry (for idx 2 ...) */ +#define CFA_P40_PROF_PROFILE_TCAM_VALID_BITPOS 80 +#define CFA_P40_PROF_PROFILE_TCAM_VALID_NUM_BITS 1 + +/* Packet type (for idx 2 ...) */ +#define CFA_P40_PROF_PROFILE_TCAM_PKT_TYPE_BITPOS 76 +#define CFA_P40_PROF_PROFILE_TCAM_PKT_TYPE_NUM_BITS 4 + +/* Pass through CFA (for idx 2 ...) */ +#define CFA_P40_PROF_PROFILE_TCAM_RECYCLE_CNT_BITPOS 74 +#define CFA_P40_PROF_PROFILE_TCAM_RECYCLE_CNT_NUM_BITS 2 + +/* Aggregate error (for idx 2 ...) */ +#define CFA_P40_PROF_PROFILE_TCAM_AGG_ERROR_BITPOS 73 +#define CFA_P40_PROF_PROFILE_TCAM_AGG_ERROR_NUM_BITS 1 + +/* Profile function (for idx 2 ...) */ +#define CFA_P40_PROF_PROFILE_TCAM_PROF_FUNC_BITPOS 66 +#define CFA_P40_PROF_PROFILE_TCAM_PROF_FUNC_NUM_BITS 7 + +/* Reserved for future use. Set to 0. */ +#define CFA_P40_PROF_PROFILE_TCAM_RESERVED_BITPOS 57 +#define CFA_P40_PROF_PROFILE_TCAM_RESERVED_NUM_BITS 9 + +/* non-tunnel(0)/tunneled(1) packet (for idx 1 ...) */ +#define CFA_P40_PROF_PROFILE_TCAM_HREC_NEXT_BITPOS 56 +#define CFA_P40_PROF_PROFILE_TCAM_HREC_NEXT_NUM_BITS 1 + +/* Tunnel L2 tunnel valid (for idx 1 ...) */ +#define CFA_P40_PROF_PROFILE_TCAM_TL2_HDR_VALID_BITPOS 55 +#define CFA_P40_PROF_PROFILE_TCAM_TL2_HDR_VALID_NUM_BITS 1 + +/* Tunnel L2 header type (for idx 1 ...) */ +#define CFA_P40_PROF_PROFILE_TCAM_TL2_HDR_TYPE_BITPOS 53 +#define CFA_P40_PROF_PROFILE_TCAM_TL2_HDR_TYPE_NUM_BITS 2 + +/* Remapped tunnel L2 dest_type UC(0)/MC(2)/BC(3) (for idx 1 ...) */ +#define CFA_P40_PROF_PROFILE_TCAM_TL2_UC_MC_BC_BITPOS 51 +#define CFA_P40_PROF_PROFILE_TCAM_TL2_UC_MC_BC_NUM_BITS 2 + +/* Tunnel L2 1+ VLAN tags present (for idx 1 ...) */ +#define CFA_P40_PROF_PROFILE_TCAM_TL2_VTAG_PRESENT_BITPOS 50 +#define CFA_P40_PROF_PROFILE_TCAM_TL2_VTAG_PRESENT_NUM_BITS 1 + +/* Tunnel L2 2 VLAN tags present (for idx 1 ...) */ +#define CFA_P40_PROF_PROFILE_TCAM_TL2_TWO_VTAGS_BITPOS 49 +#define CFA_P40_PROF_PROFILE_TCAM_TL2_TWO_VTAGS_NUM_BITS 1 + +/* Tunnel L3 valid (for idx 1 ...) */ +#define CFA_P40_PROF_PROFILE_TCAM_TL3_VALID_BITPOS 48 +#define CFA_P40_PROF_PROFILE_TCAM_TL3_VALID_NUM_BITS 1 + +/* Tunnel L3 error (for idx 1 ...) */ +#define CFA_P40_PROF_PROFILE_TCAM_TL3_ERROR_BITPOS 47 +#define CFA_P40_PROF_PROFILE_TCAM_TL3_ERROR_NUM_BITS 1 + +/* Tunnel L3 header type (for idx 1 ...) */ +#define CFA_P40_PROF_PROFILE_TCAM_TL3_HDR_TYPE_BITPOS 43 +#define CFA_P40_PROF_PROFILE_TCAM_TL3_HDR_TYPE_NUM_BITS 4 + +/* Tunnel L3 header is IPV4 or IPV6. (for idx 1 ...) */ +#define CFA_P40_PROF_PROFILE_TCAM_TL3_HDR_ISIP_BITPOS 42 +#define CFA_P40_PROF_PROFILE_TCAM_TL3_HDR_ISIP_NUM_BITS 1 + +/* Tunnel L3 IPV6 src address is compressed (for idx 1 ...) */ +#define CFA_P40_PROF_PROFILE_TCAM_TL3_IPV6_CMP_SRC_BITPOS 41 +#define CFA_P40_PROF_PROFILE_TCAM_TL3_IPV6_CMP_SRC_NUM_BITS 1 + +/* Tunnel L3 IPV6 dest address is compressed (for idx 1 ...) */ +#define CFA_P40_PROF_PROFILE_TCAM_TL3_IPV6_CMP_DEST_BITPOS 40 +#define CFA_P40_PROF_PROFILE_TCAM_TL3_IPV6_CMP_DEST_NUM_BITS 1 + +/* Tunnel L4 valid (for idx 1 ...) */ +#define CFA_P40_PROF_PROFILE_TCAM_TL4_HDR_VALID_BITPOS 39 +#define CFA_P40_PROF_PROFILE_TCAM_TL4_HDR_VALID_NUM_BITS 1 + +/* Tunnel L4 error (for idx 1 ...) */ +#define CFA_P40_PROF_PROFILE_TCAM_TL4_HDR_ERROR_BITPOS 38 +#define CFA_P40_PROF_PROFILE_TCAM_TL4_HDR_ERROR_NUM_BITS 1 + +/* Tunnel L4 header type (for idx 1 ...) */ +#define CFA_P40_PROF_PROFILE_TCAM_TL4_HDR_TYPE_BITPOS 34 +#define CFA_P40_PROF_PROFILE_TCAM_TL4_HDR_TYPE_NUM_BITS 4 + +/* Tunnel L4 header is UDP or TCP (for idx 1 ...) */ +#define CFA_P40_PROF_PROFILE_TCAM_TL4_HDR_IS_UDP_TCP_BITPOS 33 +#define CFA_P40_PROF_PROFILE_TCAM_TL4_HDR_IS_UDP_TCP_NUM_BITS 1 + +/* Tunnel valid (for idx 1 ...) */ +#define CFA_P40_PROF_PROFILE_TCAM_TUN_HDR_VALID_BITPOS 32 +#define CFA_P40_PROF_PROFILE_TCAM_TUN_HDR_VALID_NUM_BITS 1 + +/* Tunnel error */ +#define CFA_P40_PROF_PROFILE_TCAM_TUN_HDR_ERR_BITPOS 31 +#define CFA_P40_PROF_PROFILE_TCAM_TUN_HDR_ERR_NUM_BITS 1 + +/* Tunnel header type */ +#define CFA_P40_PROF_PROFILE_TCAM_TUN_HDR_TYPE_BITPOS 27 +#define CFA_P40_PROF_PROFILE_TCAM_TUN_HDR_TYPE_NUM_BITS 4 + +/* Tunnel header flags */ +#define CFA_P40_PROF_PROFILE_TCAM_TUN_HDR_FLAGS_BITPOS 24 +#define CFA_P40_PROF_PROFILE_TCAM_TUN_HDR_FLAGS_NUM_BITS 3 + +/* L2 header valid */ +#define CFA_P40_PROF_PROFILE_TCAM_L2_HDR_VALID_BITPOS 23 +#define CFA_P40_PROF_PROFILE_TCAM_L2_HDR_VALID_NUM_BITS 1 + +/* L2 header error */ +#define CFA_P40_PROF_PROFILE_TCAM_L2_HDR_ERROR_BITPOS 22 +#define CFA_P40_PROF_PROFILE_TCAM_L2_HDR_ERROR_NUM_BITS 1 + +/* L2 header type */ +#define CFA_P40_PROF_PROFILE_TCAM_L2_HDR_TYPE_BITPOS 20 +#define CFA_P40_PROF_PROFILE_TCAM_L2_HDR_TYPE_NUM_BITS 2 + +/* Remapped L2 dest_type UC(0)/MC(2)/BC(3) */ +#define CFA_P40_PROF_PROFILE_TCAM_L2_UC_MC_BC_BITPOS 18 +#define CFA_P40_PROF_PROFILE_TCAM_L2_UC_MC_BC_NUM_BITS 2 + +/* L2 header 1+ VLAN tags present */ +#define CFA_P40_PROF_PROFILE_TCAM_L2_VTAG_PRESENT_BITPOS 17 +#define CFA_P40_PROF_PROFILE_TCAM_L2_VTAG_PRESENT_NUM_BITS 1 + +/* L2 header 2 VLAN tags present */ +#define CFA_P40_PROF_PROFILE_TCAM_L2_TWO_VTAGS_BITPOS 16 +#define CFA_P40_PROF_PROFILE_TCAM_L2_TWO_VTAGS_NUM_BITS 1 + +/* L3 header valid */ +#define CFA_P40_PROF_PROFILE_TCAM_L3_VALID_BITPOS 15 +#define CFA_P40_PROF_PROFILE_TCAM_L3_VALID_NUM_BITS 1 + +/* L3 header error */ +#define CFA_P40_PROF_PROFILE_TCAM_L3_ERROR_BITPOS 14 +#define CFA_P40_PROF_PROFILE_TCAM_L3_ERROR_NUM_BITS 1 + +/* L3 header type */ +#define CFA_P40_PROF_PROFILE_TCAM_L3_HDR_TYPE_BITPOS 10 +#define CFA_P40_PROF_PROFILE_TCAM_L3_HDR_TYPE_NUM_BITS 4 + +/* L3 header is IPV4 or IPV6. */ +#define CFA_P40_PROF_PROFILE_TCAM_L3_HDR_ISIP_BITPOS 9 +#define CFA_P40_PROF_PROFILE_TCAM_L3_HDR_ISIP_NUM_BITS 1 + +/* L3 header IPV6 src address is compressed */ +#define CFA_P40_PROF_PROFILE_TCAM_L3_IPV6_CMP_SRC_BITPOS 8 +#define CFA_P40_PROF_PROFILE_TCAM_L3_IPV6_CMP_SRC_NUM_BITS 1 + +/* L3 header IPV6 dest address is compressed */ +#define CFA_P40_PROF_PROFILE_TCAM_L3_IPV6_CMP_DEST_BITPOS 7 +#define CFA_P40_PROF_PROFILE_TCAM_L3_IPV6_CMP_DEST_NUM_BITS 1 + +/* L4 header valid */ +#define CFA_P40_PROF_PROFILE_TCAM_L4_HDR_VALID_BITPOS 6 +#define CFA_P40_PROF_PROFILE_TCAM_L4_HDR_VALID_NUM_BITS 1 + +/* L4 header error */ +#define CFA_P40_PROF_PROFILE_TCAM_L4_HDR_ERROR_BITPOS 5 +#define CFA_P40_PROF_PROFILE_TCAM_L4_HDR_ERROR_NUM_BITS 1 + +/* L4 header type */ +#define CFA_P40_PROF_PROFILE_TCAM_L4_HDR_TYPE_BITPOS 1 +#define CFA_P40_PROF_PROFILE_TCAM_L4_HDR_TYPE_NUM_BITS 4 + +/* L4 header is UDP or TCP */ +#define CFA_P40_PROF_PROFILE_TCAM_L4_HDR_IS_UDP_TCP_BITPOS 0 +#define CFA_P40_PROF_PROFILE_TCAM_L4_HDR_IS_UDP_TCP_NUM_BITS 1 + +enum cfa_p40_prof_profile_tcam_flds { + CFA_P40_PROF_PROFILE_TCAM_VALID_FLD = 0, + CFA_P40_PROF_PROFILE_TCAM_PKT_TYPE_FLD = 1, + CFA_P40_PROF_PROFILE_TCAM_RECYCLE_CNT_FLD = 2, + CFA_P40_PROF_PROFILE_TCAM_AGG_ERROR_FLD = 3, + CFA_P40_PROF_PROFILE_TCAM_PROF_FUNC_FLD = 4, + CFA_P40_PROF_PROFILE_TCAM_RESERVED_FLD = 5, + CFA_P40_PROF_PROFILE_TCAM_HREC_NEXT_FLD = 6, + CFA_P40_PROF_PROFILE_TCAM_TL2_HDR_VALID_FLD = 7, + CFA_P40_PROF_PROFILE_TCAM_TL2_HDR_TYPE_FLD = 8, + CFA_P40_PROF_PROFILE_TCAM_TL2_UC_MC_BC_FLD = 9, + CFA_P40_PROF_PROFILE_TCAM_TL2_VTAG_PRESENT_FLD = 10, + CFA_P40_PROF_PROFILE_TCAM_TL2_TWO_VTAGS_FLD = 11, + CFA_P40_PROF_PROFILE_TCAM_TL3_VALID_FLD = 12, + CFA_P40_PROF_PROFILE_TCAM_TL3_ERROR_FLD = 13, + CFA_P40_PROF_PROFILE_TCAM_TL3_HDR_TYPE_FLD = 14, + CFA_P40_PROF_PROFILE_TCAM_TL3_HDR_ISIP_FLD = 15, + CFA_P40_PROF_PROFILE_TCAM_TL3_IPV6_CMP_SRC_FLD = 16, + CFA_P40_PROF_PROFILE_TCAM_TL3_IPV6_CMP_DEST_FLD = 17, + CFA_P40_PROF_PROFILE_TCAM_TL4_HDR_VALID_FLD = 18, + CFA_P40_PROF_PROFILE_TCAM_TL4_HDR_ERROR_FLD = 19, + CFA_P40_PROF_PROFILE_TCAM_TL4_HDR_TYPE_FLD = 20, + CFA_P40_PROF_PROFILE_TCAM_TL4_HDR_IS_UDP_TCP_FLD = 21, + CFA_P40_PROF_PROFILE_TCAM_TUN_HDR_VALID_FLD = 22, + CFA_P40_PROF_PROFILE_TCAM_TUN_HDR_ERR_FLD = 23, + CFA_P40_PROF_PROFILE_TCAM_TUN_HDR_TYPE_FLD = 24, + CFA_P40_PROF_PROFILE_TCAM_TUN_HDR_FLAGS_FLD = 25, + CFA_P40_PROF_PROFILE_TCAM_L2_HDR_VALID_FLD = 26, + CFA_P40_PROF_PROFILE_TCAM_L2_HDR_ERROR_FLD = 27, + CFA_P40_PROF_PROFILE_TCAM_L2_HDR_TYPE_FLD = 28, + CFA_P40_PROF_PROFILE_TCAM_L2_UC_MC_BC_FLD = 29, + CFA_P40_PROF_PROFILE_TCAM_L2_VTAG_PRESENT_FLD = 30, + CFA_P40_PROF_PROFILE_TCAM_L2_TWO_VTAGS_FLD = 31, + CFA_P40_PROF_PROFILE_TCAM_L3_VALID_FLD = 32, + CFA_P40_PROF_PROFILE_TCAM_L3_ERROR_FLD = 33, + CFA_P40_PROF_PROFILE_TCAM_L3_HDR_TYPE_FLD = 34, + CFA_P40_PROF_PROFILE_TCAM_L3_HDR_ISIP_FLD = 35, + CFA_P40_PROF_PROFILE_TCAM_L3_IPV6_CMP_SRC_FLD = 36, + CFA_P40_PROF_PROFILE_TCAM_L3_IPV6_CMP_DEST_FLD = 37, + CFA_P40_PROF_PROFILE_TCAM_L4_HDR_VALID_FLD = 38, + CFA_P40_PROF_PROFILE_TCAM_L4_HDR_ERROR_FLD = 39, + CFA_P40_PROF_PROFILE_TCAM_L4_HDR_TYPE_FLD = 40, + CFA_P40_PROF_PROFILE_TCAM_L4_HDR_IS_UDP_TCP_FLD = 41, + CFA_P40_PROF_PROFILE_TCAM_MAX_FLD +}; + +#define CFA_P40_PROF_PROFILE_TCAM_TOTAL_NUM_BITS 81 + +/* CFA flexible key layout definition */ +enum cfa_p40_key_fld_id { + CFA_P40_KEY_FLD_ID_MAX +}; + +/**************************************************************************/ + +/* Non-autogenerated fields */ + +/* Valid */ +#define CFA_P40_EEM_KEY_TBL_VALID_BITPOS 0 +#define CFA_P40_EEM_KEY_TBL_VALID_NUM_BITS 1 + +/* L1 Cacheable */ +#define CFA_P40_EEM_KEY_TBL_L1_CACHEABLE_BITPOS 1 +#define CFA_P40_EEM_KEY_TBL_L1_CACHEABLE_NUM_BITS 1 + +/* Strength */ +#define CFA_P40_EEM_KEY_TBL_STRENGTH_BITPOS 2 +#define CFA_P40_EEM_KEY_TBL_STRENGTH_NUM_BITS 2 + +/* Key Size */ +#define CFA_P40_EEM_KEY_TBL_KEY_SZ_BITPOS 15 +#define CFA_P40_EEM_KEY_TBL_KEY_SZ_NUM_BITS 9 + +/* Record Size */ +#define CFA_P40_EEM_KEY_TBL_REC_SZ_BITPOS 24 +#define CFA_P40_EEM_KEY_TBL_REC_SZ_NUM_BITS 5 + +/* Action Record Internal */ +#define CFA_P40_EEM_KEY_TBL_ACT_REC_INT_BITPOS 29 +#define CFA_P40_EEM_KEY_TBL_ACT_REC_INT_NUM_BITS 1 + +/* External Flow Counter */ +#define CFA_P40_EEM_KEY_TBL_EXT_FLOW_CTR_BITPOS 30 +#define CFA_P40_EEM_KEY_TBL_EXT_FLOW_CTR_NUM_BITS 1 + +/* Action Record Pointer */ +#define CFA_P40_EEM_KEY_TBL_AR_PTR_BITPOS 31 +#define CFA_P40_EEM_KEY_TBL_AR_PTR_NUM_BITS 33 + +/* EEM Key omitted - create using keybuilder + * Fields here cannot be larger than a u64 + */ + +#define CFA_P40_EEM_KEY_TBL_TOTAL_NUM_BITS 64 + +enum cfa_p40_eem_key_tbl_flds { + CFA_P40_EEM_KEY_TBL_VALID_FLD = 0, + CFA_P40_EEM_KEY_TBL_L1_CACHEABLE_FLD = 1, + CFA_P40_EEM_KEY_TBL_STRENGTH_FLD = 2, + CFA_P40_EEM_KEY_TBL_KEY_SZ_FLD = 3, + CFA_P40_EEM_KEY_TBL_REC_SZ_FLD = 4, + CFA_P40_EEM_KEY_TBL_ACT_REC_INT_FLD = 5, + CFA_P40_EEM_KEY_TBL_EXT_FLOW_CTR_FLD = 6, + CFA_P40_EEM_KEY_TBL_AR_PTR_FLD = 7, + CFA_P40_EEM_KEY_TBL_MAX_FLD +}; + +/* Mirror Destination 0 Source Property Record Pointer */ +#define CFA_P40_MIRROR_TBL_SP_PTR_BITPOS 0 +#define CFA_P40_MIRROR_TBL_SP_PTR_NUM_BITS 11 + +/* ignore or honor drop */ +#define CFA_P40_MIRROR_TBL_IGN_DROP_BITPOS 13 +#define CFA_P40_MIRROR_TBL_IGN_DROP_NUM_BITS 1 + +/* ingress or egress copy */ +#define CFA_P40_MIRROR_TBL_COPY_BITPOS 14 +#define CFA_P40_MIRROR_TBL_COPY_NUM_BITS 1 + +/* Mirror Destination enable. */ +#define CFA_P40_MIRROR_TBL_EN_BITPOS 15 +#define CFA_P40_MIRROR_TBL_EN_NUM_BITS 1 + +/* Action Record Pointer */ +#define CFA_P40_MIRROR_TBL_AR_PTR_BITPOS 16 +#define CFA_P40_MIRROR_TBL_AR_PTR_NUM_BITS 16 + +#define CFA_P40_MIRROR_TBL_TOTAL_NUM_BITS 32 + +enum cfa_p40_mirror_tbl_flds { + CFA_P40_MIRROR_TBL_SP_PTR_FLD = 0, + CFA_P40_MIRROR_TBL_IGN_DROP_FLD = 1, + CFA_P40_MIRROR_TBL_COPY_FLD = 2, + CFA_P40_MIRROR_TBL_EN_FLD = 3, + CFA_P40_MIRROR_TBL_AR_PTR_FLD = 4, + CFA_P40_MIRROR_TBL_MAX_FLD +}; + +/* P45 Specific Updates (SR) - Non-autogenerated */ + +/* Valid TCAM entry. */ +#define CFA_P45_PROF_L2_CTXT_TCAM_VALID_BITPOS 170 +#define CFA_P45_PROF_L2_CTXT_TCAM_VALID_NUM_BITS 1 + +/* Source Partition. */ +#define CFA_P45_PROF_L2_CTXT_TCAM_SPARIF_BITPOS 166 +#define CFA_P45_PROF_L2_CTXT_TCAM_SPARIF_NUM_BITS 4 + +/* Source Virtual I/F. */ +#define CFA_P45_PROF_L2_CTXT_TCAM_SVIF_BITPOS 72 +#define CFA_P45_PROF_L2_CTXT_TCAM_SVIF_NUM_BITS 12 + +/* The SR layout of the l2 ctxt key is different from the Wh+. Switch to + * cfa_p45_hw.h definition when available. + */ +enum cfa_p45_prof_l2_ctxt_tcam_flds { + CFA_P45_PROF_L2_CTXT_TCAM_VALID_FLD = 0, + CFA_P45_PROF_L2_CTXT_TCAM_SPARIF_FLD = 1, + CFA_P45_PROF_L2_CTXT_TCAM_KEY_TYPE_FLD = 2, + CFA_P45_PROF_L2_CTXT_TCAM_TUN_HDR_TYPE_FLD = 3, + CFA_P45_PROF_L2_CTXT_TCAM_T_L2_NUMTAGS_FLD = 4, + CFA_P45_PROF_L2_CTXT_TCAM_L2_NUMTAGS_FLD = 5, + CFA_P45_PROF_L2_CTXT_TCAM_MAC1_FLD = 6, + CFA_P45_PROF_L2_CTXT_TCAM_T_OVID_FLD = 7, + CFA_P45_PROF_L2_CTXT_TCAM_T_IVID_FLD = 8, + CFA_P45_PROF_L2_CTXT_TCAM_SVIF_FLD = 9, + CFA_P45_PROF_L2_CTXT_TCAM_MAC0_FLD = 10, + CFA_P45_PROF_L2_CTXT_TCAM_OVID_FLD = 11, + CFA_P45_PROF_L2_CTXT_TCAM_IVID_FLD = 12, + CFA_P45_PROF_L2_CTXT_TCAM_MAX_FLD +}; + +#define CFA_P45_PROF_L2_CTXT_TCAM_TOTAL_NUM_BITS 171 + +#endif /* _CFA_P40_HW_H_ */ diff --git a/drivers/thirdparty/release-drivers/bnxt/hcapi/cfa/cfa_p58_hw.h b/drivers/thirdparty/release-drivers/bnxt/hcapi/cfa/cfa_p58_hw.h new file mode 100644 index 000000000000..c103f7067794 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/hcapi/cfa/cfa_p58_hw.h @@ -0,0 +1,1236 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019-2021 Broadcom + * All rights reserved. + */ +#ifndef _CFA_P58_HW_H_ +#define _CFA_P58_HW_H_ + +/* Definitions of the base FKB fields */ +#define CFA_P58_BASE_FKB_L2_CTXT_FLD (0) +#define CFA_P58_BASE_FKB_PARIF_FLD (1) +#define CFA_P58_BASE_FKB_SPIF_FLD (2) +#define CFA_P58_BASE_FKB_SVIF_FLD (3) +#define CFA_P58_BASE_FKB_LCOS_FLD (4) +#define CFA_P58_BASE_FKB_META_FLD (5) +#define CFA_P58_BASE_FKB_RCYC_CNT_FLD (6) +#define CFA_P58_BASE_FKB_LOOPBACK_FLD (7) + +#define CFA_P58_BASE_FKB_OT_L2_TYPE_FLD (8) +#define CFA_P58_BASE_FKB_OT_DMAC_FLD (9) +#define CFA_P58_BASE_FKB_OT_SMAC_FLD (10) +#define CFA_P58_BASE_FKB_OT_DEST_TYPE_FLD (11) +#define CFA_P58_BASE_FKB_OT_SA_FLD (12) +#define CFA_P58_BASE_FKB_OT_NVT_FLD (13) +#define CFA_P58_BASE_FKB_OT_OVP_FLD (14) +#define CFA_P58_BASE_FKB_OT_OVD_FLD (15) +#define CFA_P58_BASE_FKB_OT_OVV_FLD (16) +#define CFA_P58_BASE_FKB_OT_OVT_FLD (17) +#define CFA_P58_BASE_FKB_OT_IVP_FLD (18) +#define CFA_P58_BASE_FKB_OT_IVD_FLD (19) +#define CFA_P58_BASE_FKB_OT_IVV_FLD (20) +#define CFA_P58_BASE_FKB_OT_IVT_FLD (21) +#define CFA_P58_BASE_FKB_OT_ETYPE_FLD (22) + +#define CFA_P58_BASE_FKB_OT_L3_TYPE_FLD (23) +#define CFA_P58_BASE_FKB_OT_SIP_FLD (24) +#define CFA_P58_BASE_FKB_OT_SIP_SELCMP_FLD (25) +#define CFA_P58_BASE_FKB_OT_DIP_FLD (26) +#define CFA_P58_BASE_FKB_OT_DIP_SELCMP_FLD (27) +#define CFA_P58_BASE_FKB_OT_TTL_FLD (28) +#define CFA_P58_BASE_FKB_OT_L3_PROT_FLD (29) +#define CFA_P58_BASE_FKB_OT_FLOW_ID_FLD (30) +#define CFA_P58_BASE_FKB_OT_QOS_FLD (31) +#define CFA_P58_BASE_FKB_OT_IEH_NO_NEXT_FLD (32) +#define CFA_P58_BASE_FKB_OT_IEH_ESP_FLD (33) +#define CFA_P58_BASE_FKB_OT_IEH_AUTH_FLD (34) +#define CFA_P58_BASE_FKB_OT_IEH_DEST_FLD (35) +#define CFA_P58_BASE_FKB_OT_IEH_FRAG_FLD (36) +#define CFA_P58_BASE_FKB_OT_IEH_RTHDR_FLD (37) +#define CFA_P58_BASE_FKB_OT_IEH_HOP_FLD (38) +#define CFA_P58_BASE_FKB_OT_IEH_1FRAG_FLD (39) +#define CFA_P58_BASE_FKB_OT_IEH_DF_FLD (40) +#define CFA_P58_BASE_FKB_OT_L3_ERRORS_FLD (41) + +#define CFA_P58_BASE_FKB_OT_L4_TYPE_FLD (42) +#define CFA_P58_BASE_FKB_OT_SRC_PORT_FLD (43) +#define CFA_P58_BASE_FKB_OT_DEST_PORT_FLD (44) +#define CFA_P58_BASE_FKB_OT_L4_FLAGS_FLD (45) +#define CFA_P58_BASE_FKB_OT_L4_SEQ_FLD (46) +#define CFA_P58_BASE_FKB_OT_L4_PA_FLD (47) +#define CFA_P58_BASE_FKB_OT_L4_OPT_FLD (48) +#define CFA_P58_BASE_FKB_OT_L4_TCPTS_FLD (49) +#define CFA_P58_BASE_FKB_OT_L4_ERRORS_FLD (50) + +#define CFA_P58_BASE_FKB_OT_T_TYPE_FLD (51) +#define CFA_P58_BASE_FKB_OT_T_FLAGS_FLD (52) +#define CFA_P58_BASE_FKB_OT_TIDS_FLD (53) +#define CFA_P58_BASE_FKB_OT_TID_FLD (54) +#define CFA_P58_BASE_FKB_OT_TCTXTS_FLD (55) +#define CFA_P58_BASE_FKB_OT_TCTXT_FLD (56) +#define CFA_P58_BASE_FKB_OT_TQOS_FLD (57) +#define CFA_P58_BASE_FKB_OT_TERRORS_FLD (58) + +#define CFA_P58_BASE_FKB_T_L2_TYPE_FLD (59) +#define CFA_P58_BASE_FKB_T_DMAC_FLD (60) +#define CFA_P58_BASE_FKB_T_SMAC_FLD (61) +#define CFA_P58_BASE_FKB_T_DEST_TYPE_FLD (62) +#define CFA_P58_BASE_FKB_T_SA_FLD (63) +#define CFA_P58_BASE_FKB_T_NVT_FLD (64) +#define CFA_P58_BASE_FKB_T_OVP_FLD (65) +#define CFA_P58_BASE_FKB_T_OVD_FLD (66) +#define CFA_P58_BASE_FKB_T_OVV_FLD (67) +#define CFA_P58_BASE_FKB_T_OVT_FLD (68) +#define CFA_P58_BASE_FKB_T_IVP_FLD (69) +#define CFA_P58_BASE_FKB_T_IVD_FLD (70) +#define CFA_P58_BASE_FKB_T_IVV_FLD (71) +#define CFA_P58_BASE_FKB_T_IVT_FLD (72) +#define CFA_P58_BASE_FKB_T_ETYPE_FLD (73) + +#define CFA_P58_BASE_FKB_T_L3_TYPE_FLD (74) +#define CFA_P58_BASE_FKB_T_SIP_FLD (75) +#define CFA_P58_BASE_FKB_T_SIP_SELCMP_FLD (76) +#define CFA_P58_BASE_FKB_T_DIP_FLD (77) +#define CFA_P58_BASE_FKB_T_DIP_SELCMP_FLD (78) +#define CFA_P58_BASE_FKB_T_TTL_FLD (79) +#define CFA_P58_BASE_FKB_T_L3_PROT_FLD (80) +#define CFA_P58_BASE_FKB_T_FLOW_ID_FLD (81) +#define CFA_P58_BASE_FKB_T_QOS_FLD (82) +#define CFA_P58_BASE_FKB_T_IEH_NO_NEXT_FLD (83) +#define CFA_P58_BASE_FKB_T_IEH_ESP_FLD (84) +#define CFA_P58_BASE_FKB_T_IEH_AUTH_FLD (85) +#define CFA_P58_BASE_FKB_T_IEH_DEST_FLD (86) +#define CFA_P58_BASE_FKB_T_IEH_FRAG_FLD (87) +#define CFA_P58_BASE_FKB_T_IEH_RTHDR_FLD (88) +#define CFA_P58_BASE_FKB_T_IEH_HOP_FLD (89) +#define CFA_P58_BASE_FKB_T_IEH_1FRAG_FLD (90) +#define CFA_P58_BASE_FKB_T_IEH_DF_FLD (102) +#define CFA_P58_BASE_FKB_T_L3_ERRORS_FLD (92) + +#define CFA_P58_BASE_FKB_T_L4_TYPE_FLD (93) +#define CFA_P58_BASE_FKB_T_SRC_PORT_FLD (94) +#define CFA_P58_BASE_FKB_T_DEST_PORT_FLD (95) +#define CFA_P58_BASE_FKB_T_L4_FLAGS_FLD (96) +#define CFA_P58_BASE_FKB_T_L4_SEQ_FLD (97) +#define CFA_P58_BASE_FKB_T_L4_ACK_FLD (98) +#define CFA_P58_BASE_FKB_T_L4_WIN_FLD (99) +#define CFA_P58_BASE_FKB_T_L4_PA_FLD (100) +#define CFA_P58_BASE_FKB_T_L4_OPT_FLD (101) +#define CFA_P58_BASE_FKB_T_L4_TCPTS_FLD (102) +#define CFA_P58_BASE_FKB_T_L4_TSVAL_FLD (103) +#define CFA_P58_BASE_FKB_T_L4_TXECR_FLD (104) +#define CFA_P58_BASE_FKB_T_L4_ERRORS_FLD (105) + +#define CFA_P58_BASE_FKB_MAX_FLD (CFA_P58_BASE_FKB_T_L4_ERRORS_FLD + 1) + +#define CFA_P58_EM_FKB_MAX_FLD CFA_P58_BASE_FKB_MAX_FLD +#define CFA_P58_WC_TCAM_FKB_MAX_FLD CFA_P58_BASE_FKB_MAX_FLD + +/* TCAM entry is valid (for idx 6 ...) */ +#define CFA_P58_PROF_L2_CTXT_TCAM_VALID_BITPOS 212 +#define CFA_P58_PROF_L2_CTXT_TCAM_VALID_NUM_BITS 1 + +/* Multi-pass cycle count. (for idx 6 ...) */ +#define CFA_P58_PROF_L2_CTXT_TCAM_MPASS_CNT_BITPOS 210 +#define CFA_P58_PROF_L2_CTXT_TCAM_MPASS_CNT_NUM_BITS 2 + +/* Recycle count from prof_in (for idx 6 ...) */ +#define CFA_P58_PROF_L2_CTXT_TCAM_RCYC_BITPOS 208 +#define CFA_P58_PROF_L2_CTXT_TCAM_RCYC_NUM_BITS 2 + +/* loopback input from prof_in (for idx 6 ...) */ +#define CFA_P58_PROF_L2_CTXT_TCAM_LOOPBACK_BITPOS 207 +#define CFA_P58_PROF_L2_CTXT_TCAM_LOOPBACK_NUM_BITS 1 + +/* Source network port from prof_in (for idx 6 ...) */ +#define CFA_P58_PROF_L2_CTXT_TCAM_SPIF_BITPOS 205 +#define CFA_P58_PROF_L2_CTXT_TCAM_SPIF_NUM_BITS 2 + +/* Partition provided by input block (for idx 6 ...) */ +#define CFA_P58_PROF_L2_CTXT_TCAM_PARIF_BITPOS 201 +#define CFA_P58_PROF_L2_CTXT_TCAM_PARIF_NUM_BITS 4 + +/* Source network port or vnic */ +#define CFA_P58_PROF_L2_CTXT_TCAM_SVIF_BITPOS 190 +#define CFA_P58_PROF_L2_CTXT_TCAM_SVIF_NUM_BITS 11 + +/* Metadata provided by Input block (for idx 5 ...) */ +#define CFA_P58_PROF_L2_CTXT_TCAM_METADATA_BITPOS 174 +#define CFA_P58_PROF_L2_CTXT_TCAM_METADATA_NUM_BITS 16 + +/* ROCE Packet detected by the Parser (for idx 5 ...) */ +#define CFA_P58_PROF_L2_CTXT_TCAM_ROCE_BITPOS 173 +#define CFA_P58_PROF_L2_CTXT_TCAM_ROCE_NUM_BITS 1 + +/* LLC Packet detected by the Parser. (for idx 5 ...) */ +#define CFA_P58_PROF_L2_CTXT_TCAM_LLC_BITPOS 172 +#define CFA_P58_PROF_L2_CTXT_TCAM_LLC_NUM_BITS 1 + +/* 4b encoding for Tunnel Type (for idx 5 ...) */ +#define CFA_P58_PROF_L2_CTXT_TCAM_T_HDR_TYPE_BITPOS 168 +#define CFA_P58_PROF_L2_CTXT_TCAM_T_HDR_TYPE_NUM_BITS 4 + +/* FLDS Tunnel Status ID. */ +#define CFA_P58_PROF_L2_CTXT_TCAM_TUNNEL_ID_BITPOS 144 +#define CFA_P58_PROF_L2_CTXT_TCAM_TUNNEL_ID_NUM_BITS 24 + +/* Selected tunnel/inner DMAC/SMAC */ +#define CFA_P58_PROF_L2_CTXT_TCAM_MAC0_BITPOS 96 +#define CFA_P58_PROF_L2_CTXT_TCAM_MAC0_NUM_BITS 48 + +/* Selected tunnel/inner DMAC/SMAC Each of these fields are from the selected + * tunnel/inner L2 header + */ +#define CFA_P58_PROF_L2_CTXT_TCAM_MAC1_BITPOS 48 +#define CFA_P58_PROF_L2_CTXT_TCAM_MAC1_NUM_BITS 48 + +/* 1+ VLAN tags present (for idx 1 ...) */ +#define CFA_P58_PROF_L2_CTXT_TCAM_TL2_L2_VTAG_PRESENT_BITPOS 47 +#define CFA_P58_PROF_L2_CTXT_TCAM_TL2_L2_VTAG_PRESENT_NUM_BITS 1 + +/* 2 VLAN tags present (for idx 1 ...) */ +#define CFA_P58_PROF_L2_CTXT_TCAM_TL2_L2_TWO_VTAGS_BITPOS 46 +#define CFA_P58_PROF_L2_CTXT_TCAM_TL2_L2_TWO_VTAGS_NUM_BITS 1 + +/* Outer VLAN tag VID (for idx 1 ...) */ +#define CFA_P58_PROF_L2_CTXT_TCAM_TL2_L2_OVLAN_VID_BITPOS 34 +#define CFA_P58_PROF_L2_CTXT_TCAM_TL2_L2_OVLAN_VID_NUM_BITS 12 + +/* Outer VLAN tag 3b encoded TPID */ +#define CFA_P58_PROF_L2_CTXT_TCAM_TL2_L2_OVLAN_TPID_SEL_BITPOS 31 +#define CFA_P58_PROF_L2_CTXT_TCAM_TL2_L2_OVLAN_TPID_SEL_NUM_BITS 3 + +/* Inner VLAN tag VID */ +#define CFA_P58_PROF_L2_CTXT_TCAM_TL2_L2_IVLAN_VID_BITPOS 19 +#define CFA_P58_PROF_L2_CTXT_TCAM_TL2_L2_IVLAN_VID_NUM_BITS 12 + +/* Inner VLAN tag 3b encoded TPID */ +#define CFA_P58_PROF_L2_CTXT_TCAM_TL2_L2_IVLAN_TPID_SEL_BITPOS 16 +#define CFA_P58_PROF_L2_CTXT_TCAM_TL2_L2_IVLAN_TPID_SEL_NUM_BITS 3 + +/* Ethertype. */ +#define CFA_P58_PROF_L2_CTXT_TCAM_TL2_L2_ETYPE_BITPOS 0 +#define CFA_P58_PROF_L2_CTXT_TCAM_TL2_L2_ETYPE_NUM_BITS 16 + +enum cfa_p58_prof_l2_ctxt_tcam_flds { + CFA_P58_PROF_L2_CTXT_TCAM_VALID_FLD = 0, + CFA_P58_PROF_L2_CTXT_TCAM_MPASS_CNT_FLD = 1, + CFA_P58_PROF_L2_CTXT_TCAM_RCYC_FLD = 2, + CFA_P58_PROF_L2_CTXT_TCAM_LOOPBACK_FLD = 3, + CFA_P58_PROF_L2_CTXT_TCAM_SPIF_FLD = 4, + CFA_P58_PROF_L2_CTXT_TCAM_PARIF_FLD = 5, + CFA_P58_PROF_L2_CTXT_TCAM_SVIF_FLD = 6, + CFA_P58_PROF_L2_CTXT_TCAM_METADATA_FLD = 7, + CFA_P58_PROF_L2_CTXT_TCAM_ROCE_FLD = 8, + CFA_P58_PROF_L2_CTXT_TCAM_LLC_FLD = 9, + CFA_P58_PROF_L2_CTXT_TCAM_T_HDR_TYPE_FLD = 10, + CFA_P58_PROF_L2_CTXT_TCAM_TUNNEL_ID_FLD = 11, + CFA_P58_PROF_L2_CTXT_TCAM_MAC0_FLD = 12, + CFA_P58_PROF_L2_CTXT_TCAM_MAC1_FLD = 13, + CFA_P58_PROF_L2_CTXT_TCAM_TL2_L2_VTAG_PRESENT_FLD = 14, + CFA_P58_PROF_L2_CTXT_TCAM_TL2_L2_TWO_VTAGS_FLD = 15, + CFA_P58_PROF_L2_CTXT_TCAM_TL2_L2_OVLAN_VID_FLD = 16, + CFA_P58_PROF_L2_CTXT_TCAM_TL2_L2_OVLAN_TPID_SEL_FLD = 17, + CFA_P58_PROF_L2_CTXT_TCAM_TL2_L2_IVLAN_VID_FLD = 18, + CFA_P58_PROF_L2_CTXT_TCAM_TL2_L2_IVLAN_TPID_SEL_FLD = 19, + CFA_P58_PROF_L2_CTXT_TCAM_TL2_L2_ETYPE_FLD = 20, + CFA_P58_PROF_L2_CTXT_TCAM_MAX_FLD +}; + +#define CFA_P58_PROF_L2_CTXT_TCAM_TOTAL_NUM_BITS 213 + +/* Valid(1)/Invalid(0) TCAM entry. (for idx 2 ...) */ +#define CFA_P58_PROF_PROFILE_TCAM_VALID_BITPOS 93 +#define CFA_P58_PROF_PROFILE_TCAM_VALID_NUM_BITS 1 + +/* Packet type directly from prof_in. (for idx 2 ...) */ +#define CFA_P58_PROF_PROFILE_TCAM_PKT_TYPE_BITPOS 89 +#define CFA_P58_PROF_PROFILE_TCAM_PKT_TYPE_NUM_BITS 4 + +/* From L2 Context Lookup stage. (for idx 2 ...) */ +#define CFA_P58_PROF_PROFILE_TCAM_METADATA_BITPOS 73 +#define CFA_P58_PROF_PROFILE_TCAM_METADATA_NUM_BITS 16 + +/* Aggregate error flag from Input stage. (for idx 2 ...) */ +#define CFA_P58_PROF_PROFILE_TCAM_AGG_ERROR_BITPOS 72 +#define CFA_P58_PROF_PROFILE_TCAM_AGG_ERROR_NUM_BITS 1 + +/* Profile function from L2 Context Lookup stage. (for idx 2 ...) */ +#define CFA_P58_PROF_PROFILE_TCAM_PROF_FUNC_BITPOS 65 +#define CFA_P58_PROF_PROFILE_TCAM_PROF_FUNC_NUM_BITS 7 + +/* From FLDS Input General Status tunnel(1)/no tunnel(0) (for idx 2 ...) */ +#define CFA_P58_PROF_PROFILE_TCAM_HREC_NEXT_BITPOS 64 +#define CFA_P58_PROF_PROFILE_TCAM_HREC_NEXT_NUM_BITS 1 + +/* resolved flds_tl2_hdr_valid. (for idx 1 ...) */ +#define CFA_P58_PROF_PROFILE_TCAM_TL2_HDR_VALID_BITPOS 63 +#define CFA_P58_PROF_PROFILE_TCAM_TL2_HDR_VALID_NUM_BITS 1 + +/* Tunnel L2 header type directly from FLDS. (for idx 1 ...) */ +#define CFA_P58_PROF_PROFILE_TCAM_TL2_HDR_TYPE_BITPOS 61 +#define CFA_P58_PROF_PROFILE_TCAM_TL2_HDR_TYPE_NUM_BITS 2 + +/* flds_tl2_dst_type remapped: UC(0)/MC(2)/BC(3) (for idx 1 ...) */ +#define CFA_P58_PROF_PROFILE_TCAM_TL2_UC_MC_BC_BITPOS 59 +#define CFA_P58_PROF_PROFILE_TCAM_TL2_UC_MC_BC_NUM_BITS 2 + +/* 1+ VLAN tags present in Tunnel L2 header (for idx 1 ...) */ +#define CFA_P58_PROF_PROFILE_TCAM_TL2_VTAG_PRESENT_BITPOS 58 +#define CFA_P58_PROF_PROFILE_TCAM_TL2_VTAG_PRESENT_NUM_BITS 1 + +/* 2 VLAN tags present in Tunnel L2 header (for idx 1 ...) */ +#define CFA_P58_PROF_PROFILE_TCAM_TL2_TWO_VTAGS_BITPOS 57 +#define CFA_P58_PROF_PROFILE_TCAM_TL2_TWO_VTAGS_NUM_BITS 1 + +/* resolved flds_tl3_hdr_valid. (for idx 1 ...) */ +#define CFA_P58_PROF_PROFILE_TCAM_TL3_HDR_VALID_BITPOS 56 +#define CFA_P58_PROF_PROFILE_TCAM_TL3_HDR_VALID_NUM_BITS 1 + +/* flds_tl3_hdr_valid is stop_w_error. (for idx 1 ...) */ +#define CFA_P58_PROF_PROFILE_TCAM_TL3_HDR_ERROR_BITPOS 55 +#define CFA_P58_PROF_PROFILE_TCAM_TL3_HDR_ERROR_NUM_BITS 1 + +/* Tunnel L3 header type directly from FLDS. (for idx 1 ...) */ +#define CFA_P58_PROF_PROFILE_TCAM_TL3_HDR_TYPE_BITPOS 51 +#define CFA_P58_PROF_PROFILE_TCAM_TL3_HDR_TYPE_NUM_BITS 4 + +/* Tunnel L3 header is IPV4 or IPV6. (for idx 1 ...) */ +#define CFA_P58_PROF_PROFILE_TCAM_TL3_HDR_ISIP_BITPOS 50 +#define CFA_P58_PROF_PROFILE_TCAM_TL3_HDR_ISIP_NUM_BITS 1 + +/* Tunnel L3 IPV6 source address is compressed. (for idx 1 ...) */ +#define CFA_P58_PROF_PROFILE_TCAM_TL3_IPV6_CMP_SRC_BITPOS 49 +#define CFA_P58_PROF_PROFILE_TCAM_TL3_IPV6_CMP_SRC_NUM_BITS 1 + +/* Tunnel L3 IPV6 destination address is compressed. (for idx 1 ...) */ +#define CFA_P58_PROF_PROFILE_TCAM_TL3_IPV6_CMP_DST_BITPOS 48 +#define CFA_P58_PROF_PROFILE_TCAM_TL3_IPV6_CMP_DST_NUM_BITS 1 + +/* resolved flds_tl4_hdr_valid. (for idx 1 ...) */ +#define CFA_P58_PROF_PROFILE_TCAM_TL4_HDR_VALID_BITPOS 47 +#define CFA_P58_PROF_PROFILE_TCAM_TL4_HDR_VALID_NUM_BITS 1 + +/* flds_tl4_hdr_valid is stop_w_error. (for idx 1 ...) */ +#define CFA_P58_PROF_PROFILE_TCAM_TL4_HDR_ERROR_BITPOS 46 +#define CFA_P58_PROF_PROFILE_TCAM_TL4_HDR_ERROR_NUM_BITS 1 + +/* Tunnel L4 header type directly from FLDS. (for idx 1 ...) */ +#define CFA_P58_PROF_PROFILE_TCAM_TL4_HDR_TYPEP_BITPOS 42 +#define CFA_P58_PROF_PROFILE_TCAM_TL4_HDR_TYPEP_NUM_BITS 4 + +/* TL4 header is UDP or TCP. (for idx 1 ...) */ +#define CFA_P58_PROF_PROFILE_TCAM_TL4_HDR_IS_UDP_TCP_BITPOS 41 +#define CFA_P58_PROF_PROFILE_TCAM_TL4_HDR_IS_UDP_TCP_NUM_BITS 1 + +/* resolved flds_tun_hdr_valid. (for idx 1 ...) */ +#define CFA_P58_PROF_PROFILE_TCAM_TUN_HDR_VALID_BITPOS 40 +#define CFA_P58_PROF_PROFILE_TCAM_TUN_HDR_VALID_NUM_BITS 1 + +/* flds_tun_hdr_valid is stop_w_error. (for idx 1 ...) */ +#define CFA_P58_PROF_PROFILE_TCAM_TUN_HDR_ERROR_BITPOS 39 +#define CFA_P58_PROF_PROFILE_TCAM_TUN_HDR_ERROR_NUM_BITS 1 + +/* Tunnel header type directly from FLDS. (for idx 1 ...) */ +#define CFA_P58_PROF_PROFILE_TCAM_TUN_HDR_TYPE_BITPOS 35 +#define CFA_P58_PROF_PROFILE_TCAM_TUN_HDR_TYPE_NUM_BITS 4 + +/* Tunnel header flags directly from FLDS. (for idx 1 ...) */ +#define CFA_P58_PROF_PROFILE_TCAM_TUN_HDR_FLAGS_BITPOS 32 +#define CFA_P58_PROF_PROFILE_TCAM_TUN_HDR_FLAGS_NUM_BITS 3 + +/* resolved flds_l2_hdr_valid. */ +#define CFA_P58_PROF_PROFILE_TCAM_L2_HDR_VALID_BITPOS 31 +#define CFA_P58_PROF_PROFILE_TCAM_L2_HDR_VALID_NUM_BITS 1 + +/* flds_l2_hdr_valid is stop_w_error. */ +#define CFA_P58_PROF_PROFILE_TCAM_L2_HDR_ERROR_BITPOS 30 +#define CFA_P58_PROF_PROFILE_TCAM_L2_HDR_ERROR_NUM_BITS 1 + +/* L2 header type directly from FLDS. */ +#define CFA_P58_PROF_PROFILE_TCAM_L2_HDR_TYPE_BITPOS 28 +#define CFA_P58_PROF_PROFILE_TCAM_L2_HDR_TYPE_NUM_BITS 2 + +/* flds_l2_dst_type remapped: UC(0)/MC(2)/BC(3). */ +#define CFA_P58_PROF_PROFILE_TCAM_L2_UC_MC_BC_BITPOS 26 +#define CFA_P58_PROF_PROFILE_TCAM_L2_UC_MC_BC_NUM_BITS 2 + +/* 1+ VLAN tags present in inner L2 header. */ +#define CFA_P58_PROF_PROFILE_TCAM_L2_VTAG_PRESENT_BITPOS 25 +#define CFA_P58_PROF_PROFILE_TCAM_L2_VTAG_PRESENT_NUM_BITS 1 + +/* 2 VLAN tags present in inner L2 header. */ +#define CFA_P58_PROF_PROFILE_TCAM_L2_TWO_VTAGS_BITPOS 24 +#define CFA_P58_PROF_PROFILE_TCAM_L2_TWO_VTAGS_NUM_BITS 1 + +/* resolved flds_l3_hdr_valid. */ +#define CFA_P58_PROF_PROFILE_TCAM_L3_HDR_VALID_BITPOS 23 +#define CFA_P58_PROF_PROFILE_TCAM_L3_HDR_VALID_NUM_BITS 1 + +/* flds_l3_hdr_valid is stop_w_error. */ +#define CFA_P58_PROF_PROFILE_TCAM_L3_HDR_ERROR_BITPOS 22 +#define CFA_P58_PROF_PROFILE_TCAM_L3_HDR_ERROR_NUM_BITS 1 + +/* L3 header type directly from FLDS. */ +#define CFA_P58_PROF_PROFILE_TCAM_L3_HDR_TYPE_BITPOS 18 +#define CFA_P58_PROF_PROFILE_TCAM_L3_HDR_TYPE_NUM_BITS 4 + +/* L3 header is IPV4 or IPV6. */ +#define CFA_P58_PROF_PROFILE_TCAM_L3_HDR_ISIP_BITPOS 17 +#define CFA_P58_PROF_PROFILE_TCAM_L3_HDR_ISIP_NUM_BITS 1 + +/* L3 header IPV6 source address is compressed. */ +#define CFA_P58_PROF_PROFILE_TCAM_L3_IPV6_CMP_SRC_BITPOS 16 +#define CFA_P58_PROF_PROFILE_TCAM_L3_IPV6_CMP_SRC_NUM_BITS 1 + +/* L3 header IPV6 destination address is compressed. */ +#define CFA_P58_PROF_PROFILE_TCAM_L3_IPV6_CMP_DST_BITPOS 15 +#define CFA_P58_PROF_PROFILE_TCAM_L3_IPV6_CMP_DST_NUM_BITS 1 + +/* IP extension hdr of L3 Status. */ +#define CFA_P58_PROF_PROFILE_TCAM_IEH_BITPOS 7 +#define CFA_P58_PROF_PROFILE_TCAM_IEH_NUM_BITS 8 + +/* resolved flds_l4_hdr_valid. */ +#define CFA_P58_PROF_PROFILE_TCAM_L4_HDR_VALID_BITPOS 6 +#define CFA_P58_PROF_PROFILE_TCAM_L4_HDR_VALID_NUM_BITS 1 + +/* flds_l4_hdr_valid is stop_w_error. */ +#define CFA_P58_PROF_PROFILE_TCAM_L4_HDR_ERROR_BITPOS 5 +#define CFA_P58_PROF_PROFILE_TCAM_L4_HDR_ERROR_NUM_BITS 1 + +/* L4 header type directly from FLDS. */ +#define CFA_P58_PROF_PROFILE_TCAM_L4_HDR_TYPE_BITPOS 1 +#define CFA_P58_PROF_PROFILE_TCAM_L4_HDR_TYPE_NUM_BITS 4 + +/* L4 header is UDP or TCP.2 */ +#define CFA_P58_PROF_PROFILE_TCAM_L4_HDR_IS_UDP_TCP_BITPOS 0 +#define CFA_P58_PROF_PROFILE_TCAM_L4_HDR_IS_UDP_TCP_NUM_BITS 1 + +enum cfa_p58_prof_profile_tcam_flds { + CFA_P58_PROF_PROFILE_TCAM_VALID_FLD = 0, + CFA_P58_PROF_PROFILE_TCAM_PKT_TYPE_FLD = 1, + CFA_P58_PROF_PROFILE_TCAM_METADATA_FLD = 2, + CFA_P58_PROF_PROFILE_TCAM_AGG_ERROR_FLD = 3, + CFA_P58_PROF_PROFILE_TCAM_PROF_FUNC_FLD = 4, + CFA_P58_PROF_PROFILE_TCAM_HREC_NEXT_FLD = 5, + CFA_P58_PROF_PROFILE_TCAM_TL2_HDR_VALID_FLD = 6, + CFA_P58_PROF_PROFILE_TCAM_TL2_HDR_TYPE_FLD = 7, + CFA_P58_PROF_PROFILE_TCAM_TL2_UC_MC_BC_FLD = 8, + CFA_P58_PROF_PROFILE_TCAM_TL2_VTAG_PRESENT_FLD = 9, + CFA_P58_PROF_PROFILE_TCAM_TL2_TWO_VTAGS_FLD = 10, + CFA_P58_PROF_PROFILE_TCAM_TL3_HDR_VALID_FLD = 11, + CFA_P58_PROF_PROFILE_TCAM_TL3_HDR_ERROR_FLD = 12, + CFA_P58_PROF_PROFILE_TCAM_TL3_HDR_TYPE_FLD = 13, + CFA_P58_PROF_PROFILE_TCAM_TL3_HDR_ISIP_FLD = 14, + CFA_P58_PROF_PROFILE_TCAM_TL3_IPV6_CMP_SRC_FLD = 15, + CFA_P58_PROF_PROFILE_TCAM_TL3_IPV6_CMP_DST_FLD = 16, + CFA_P58_PROF_PROFILE_TCAM_TL4_HDR_VALID_FLD = 17, + CFA_P58_PROF_PROFILE_TCAM_TL4_HDR_ERROR_FLD = 18, + CFA_P58_PROF_PROFILE_TCAM_TL4_HDR_TYPEP_FLD = 19, + CFA_P58_PROF_PROFILE_TCAM_TL4_HDR_IS_UDP_TCP_FLD = 20, + CFA_P58_PROF_PROFILE_TCAM_TUN_HDR_VALID_FLD = 21, + CFA_P58_PROF_PROFILE_TCAM_TUN_HDR_ERROR_FLD = 22, + CFA_P58_PROF_PROFILE_TCAM_TUN_HDR_TYPE_FLD = 23, + CFA_P58_PROF_PROFILE_TCAM_TUN_HDR_FLAGS_FLD = 24, + CFA_P58_PROF_PROFILE_TCAM_L2_HDR_VALID_FLD = 25, + CFA_P58_PROF_PROFILE_TCAM_L2_HDR_ERROR_FLD = 26, + CFA_P58_PROF_PROFILE_TCAM_L2_HDR_TYPE_FLD = 27, + CFA_P58_PROF_PROFILE_TCAM_L2_UC_MC_BC_FLD = 28, + CFA_P58_PROF_PROFILE_TCAM_L2_VTAG_PRESENT_FLD = 29, + CFA_P58_PROF_PROFILE_TCAM_L2_TWO_VTAGS_FLD = 30, + CFA_P58_PROF_PROFILE_TCAM_L3_HDR_VALID_FLD = 31, + CFA_P58_PROF_PROFILE_TCAM_L3_HDR_ERROR_FLD = 32, + CFA_P58_PROF_PROFILE_TCAM_L3_HDR_TYPE_FLD = 33, + CFA_P58_PROF_PROFILE_TCAM_L3_HDR_ISIP_FLD = 34, + CFA_P58_PROF_PROFILE_TCAM_L3_IPV6_CMP_SRC_FLD = 35, + CFA_P58_PROF_PROFILE_TCAM_L3_IPV6_CMP_DST_FLD = 36, + CFA_P58_PROF_PROFILE_TCAM_IEH_FLD = 37, + CFA_P58_PROF_PROFILE_TCAM_L4_HDR_VALID_FLD = 38, + CFA_P58_PROF_PROFILE_TCAM_L4_HDR_ERROR_FLD = 39, + CFA_P58_PROF_PROFILE_TCAM_L4_HDR_TYPE_FLD = 40, + CFA_P58_PROF_PROFILE_TCAM_L4_HDR_IS_UDP_TCP_FLD = 41, + CFA_P58_PROF_PROFILE_TCAM_MAX_FLD +}; + +#define CFA_P58_PROF_PROFILE_TCAM_TOTAL_NUM_BITS 94 + +/* IPv4 ID field control. + * + * The enumerators are renamed from the autogenerated name + * to avoid redeclaration. + */ +#define CFA_P58_ACTP5_TECT_IPV4_ID_CTRL_BITPOS 16 +#define CFA_P58_ACTP5_TECT_IPV4_ID_CTRL_NUM_BITS 2 +enum cfa_p58_actp5_tect_ipv4_id_ctrl { + /* use IPv4 ID field from encapsulation record */ + CFA_P58_ACTP5_TECT_IPV4_ID_CTRL_ENCREC = 0x0UL, + /* reserved */ + CFA_P58_ACTP5_TECT_IPV4_ID_CTRL_RSVD = 0x1UL, + /* inherit inner IPv4 header ID field */ + CFA_P58_ACTP5_TECT_IPV4_ID_CTRL_INHRT = 0x2UL, + /* use CFA incrementing IPv4 ID counter */ + CFA_P58_ACTP5_TECT_IPV4_ID_CTRL_INCR = 0x3UL, + CFA_P58_ACTP5_TECT_IPV4_ID_CTRL_MAX = 0x3UL +}; + +/* SMAC override. + * + * The enumerators are renamed from the autogenerated name + * to avoid redeclaration. + */ +#define CFA_P58_ACTP5_TECT_SMAC_OVR_BITPOS 13 +#define CFA_P58_ACTP5_TECT_SMAC_OVR_NUM_BITS 3 +enum cfa_p58_actp5_tect_smac_ovr { + /* use source property record SMAC */ + CFA_P58_ACTP5_TECT_SMAC_OVR_ENCP = 0x0UL, + /* re-use existing inner L2 header SMAC */ + CFA_P58_ACTP5_TECT_SMAC_OVR_RUINSL2 = 0x1UL, + /* re-use existing tunnel L2 header SMAC */ + CFA_P58_ACTP5_TECT_SMAC_OVR_RUTNSL2 = 0x2UL, + /* re-use existing outer-most L2 header SMAC */ + CFA_P58_ACTP5_TECT_SMAC_OVR_RUOMSL2 = 0x3UL, + /* reserved for future use */ + CFA_P58_ACTP5_TECT_SMAC_OVR_RSVDL2 = 0x4UL, + /* re-use existing inner L2 header DMAC */ + CFA_P58_ACTP5_TECT_SMAC_OVR_RUINDL2 = 0x5UL, + /* re-use existing tunnel L2 header DMAC */ + CFA_P58_ACTP5_TECT_SMAC_OVR_RUTNDL2 = 0x6UL, + /* re-use existing outer-most L2 header DMAC */ + CFA_P58_ACTP5_TECT_SMAC_OVR_RUOMDL2 = 0x7UL, + CFA_P58_ACTP5_TECT_SMAC_OVR_MAX = 0x7UL +}; + +/* VLAN override. + * + * The enumerators are renamed from the autogenerated name + * to avoid redeclaration. + */ +#define CFA_P58_ACTP5_TECT_VLAN_OVR_BITPOS 10 +#define CFA_P58_ACTP5_TECT_VLAN_OVR_NUM_BITS 3 +enum cfa_p58_actp5_tect_vlan_ovr { + /* use only encap record VLAN tags */ + CFA_P58_ACTP5_TECT_VLAN_OVR_ENCP = 0x0UL, + /* use only existing inner L2 header VLAN tags */ + CFA_P58_ACTP5_TECT_VLAN_OVR_RUINL2 = 0x1UL, + /* use only existing tunnel L2 VLAN tags */ + CFA_P58_ACTP5_TECT_VLAN_OVR_RUTNL2 = 0x2UL, + /* use only existing outer-most L2 VLAN tags */ + CFA_P58_ACTP5_TECT_VLAN_OVR_RUOML2 = 0x3UL, + /* include inner VLAN tag inner L2 header. */ + CFA_P58_ACTP5_TECT_VLAN_OVR_INCIVL2 = 0x4UL, + /* include outer VLAN tag inner L2 header */ + CFA_P58_ACTP5_TECT_VLAN_OVR_INCOVL2 = 0x5UL, + /* include inner VLAN tag outer-most L2 header */ + CFA_P58_ACTP5_TECT_VLAN_OVR_INCIVOM = 0x6UL, + /* include outer VLAN tag outer-most L2 header */ + CFA_P58_ACTP5_TECT_VLAN_OVR_INCOVOM = 0x7UL, + CFA_P58_ACTP5_TECT_VLAN_OVR_MAX = 0x7UL +}; + +/* DMAC override. + * + * The enumerators are renamed from the autogenerated name + * to avoid redeclaration. + */ +#define CFA_P58_ACTP5_TECT_DMAC_OVR_BITPOS 8 +#define CFA_P58_ACTP5_TECT_DMAC_OVR_NUM_BITS 2 +enum cfa_p58_actp5_tect_dmac_ovr { + /* use encap record DMAC */ + CFA_P58_ACTP5_TECT_DMAC_OVR_ENCP = 0x0UL, + /* re-use existing inner L2 header DMAC */ + CFA_P58_ACTP5_TECT_DMAC_OVR_RUINL2 = 0x1UL, + /* re-use existing tunnel L2 header DMAC */ + CFA_P58_ACTP5_TECT_DMAC_OVR_RUTNL2 = 0x2UL, + /* re-use existing outer-most L2 header DMAC */ + CFA_P58_ACTP5_TECT_DMAC_OVR_RUOML2 = 0x3UL, + CFA_P58_ACTP5_TECT_DMAC_OVR_MAX = 0x3UL +}; + +/* When set to 1 any GRE tunnels will include the optional Key field. */ +#define CFA_P58_ACTP5_TECT_GRE_SET_K_BITPOS 7 +#define CFA_P58_ACTP5_TECT_GRE_SET_K_NUM_BITS 1 + +/* Default Tunnel QOS placed in fields bus on tunnel encapsulations. */ +#define CFA_P58_ACTP5_TECT_TUN_QOS_BITPOS 4 +#define CFA_P58_ACTP5_TECT_TUN_QOS_NUM_BITS 3 + +/* When set to 1 the IPV6 Traffic Class (TC) field of the outer header is + * inherited from the inner header (if present) or the fixed value as taken + * from the encap record. + */ +#define CFA_P58_ACTP5_TECT_IPV6_TC_IH_BITPOS 3 +#define CFA_P58_ACTP5_TECT_IPV6_TC_IH_NUM_BITS 1 + +/* When set to 1 the IPV6 Hop Limit field of the outer header is inherited + * from the inner header (if present) or the fixed value as taken from the + * encap record. + */ +#define CFA_P58_ACTP5_TECT_IPV6_HL_IH_BITPOS 2 +#define CFA_P58_ACTP5_TECT_IPV6_HL_IH_NUM_BITS 1 + +/* When set to 1 the IPV4 Type Of Service (TOS) field of the outer header is + * inherited from the inner header (if present) or the fixed value as taken + * from the encap record. + */ +#define CFA_P58_ACTP5_TECT_IPV4_TOS_IH_BITPOS 1 +#define CFA_P58_ACTP5_TECT_IPV4_TOS_IH_NUM_BITS 1 + +/* When set to 1 the IPV4 TTL field of the outer header is inherited from the + * inner header (if present) or the fixed value as taken from the encap + * record. + */ +#define CFA_P58_ACTP5_TECT_IPV4_TTL_IH_BITPOS 0 +#define CFA_P58_ACTP5_TECT_IPV4_TTL_IH_NUM_BITS 1 + +enum cfa_p58_actp5_tect_flds { + CFA_P58_ACTP5_TECT_IPV4_ID_CTRL_FLD = 0, + CFA_P58_ACTP5_TECT_SMAC_OVR_FLD = 1, + CFA_P58_ACTP5_TECT_VLAN_OVR_FLD = 2, + CFA_P58_ACTP5_TECT_DMAC_OVR_FLD = 3, + CFA_P58_ACTP5_TECT_GRE_SET_K_FLD = 4, + CFA_P58_ACTP5_TECT_TUN_QOS_FLD = 5, + CFA_P58_ACTP5_TECT_IPV6_TC_IH_FLD = 6, + CFA_P58_ACTP5_TECT_IPV6_HL_IH_FLD = 7, + CFA_P58_ACTP5_TECT_IPV4_TOS_IH_FLD = 8, + CFA_P58_ACTP5_TECT_IPV4_TTL_IH_FLD = 9, + CFA_P58_ACTP5_TECT_MAX_FLD +}; + +#define CFA_P58_ACTP5_TECT_TOTAL_NUM_BITS 18 + +/* Valid entry (for idx 2 ...) */ +#define CFA_P58_ACTP5_VEB_TCAM_TX_VALID_BITPOS 78 +#define CFA_P58_ACTP5_VEB_TCAM_TX_VALID_NUM_BITS 1 + +/* PF Parif Number (for idx 2 ...) */ +#define CFA_P58_ACTP5_VEB_TCAM_TX_PARIF_IN_BITPOS 74 +#define CFA_P58_ACTP5_VEB_TCAM_TX_PARIF_IN_NUM_BITS 4 + +/* Number of VLAN Tags. (for idx 2 ...) */ +#define CFA_P58_ACTP5_VEB_TCAM_TX_NUM_VTAGS_BITPOS 72 +#define CFA_P58_ACTP5_VEB_TCAM_TX_NUM_VTAGS_NUM_BITS 2 + +/* Dest. MAC Address */ +#define CFA_P58_ACTP5_VEB_TCAM_TX_DMAC_BITPOS 24 +#define CFA_P58_ACTP5_VEB_TCAM_TX_DMAC_NUM_BITS 48 + +/* Outer VLAN Tag ID */ +#define CFA_P58_ACTP5_VEB_TCAM_TX_OVID_BITPOS 12 +#define CFA_P58_ACTP5_VEB_TCAM_TX_OVID_NUM_BITS 12 + +/* Inner VLAN Tag ID RX Fields (Source Knockout Mode): */ +#define CFA_P58_ACTP5_VEB_TCAM_TX_IVID_BITPOS 0 +#define CFA_P58_ACTP5_VEB_TCAM_TX_IVID_NUM_BITS 12 + +/* Valid entry (for idx 2 ...) */ +#define CFA_P58_ACTP5_VEB_TCAM_RX_VALID_BITPOS 78 +#define CFA_P58_ACTP5_VEB_TCAM_RX_VALID_NUM_BITS 1 + +/* program to zero (for idx 2 ...) */ +#define CFA_P58_ACTP5_VEB_TCAM_RX_PADDING_BITPOS 67 +#define CFA_P58_ACTP5_VEB_TCAM_RX_PADDING_NUM_BITS 11 + +/* DMAC is unicast address (for idx 2 ...) */ +#define CFA_P58_ACTP5_VEB_TCAM_RX_UNICAST_BITPOS 66 +#define CFA_P58_ACTP5_VEB_TCAM_RX_UNICAST_NUM_BITS 1 + +/* DMAC is multicast address (for idx 2 ...) */ +#define CFA_P58_ACTP5_VEB_TCAM_RX_MULTICAST_BITPOS 65 +#define CFA_P58_ACTP5_VEB_TCAM_RX_MULTICAST_NUM_BITS 1 + +/* DMAC is broadcast address (for idx 2 ...) */ +#define CFA_P58_ACTP5_VEB_TCAM_RX_BROADCAST_BITPOS 64 +#define CFA_P58_ACTP5_VEB_TCAM_RX_BROADCAST_NUM_BITS 1 + +/* pfid (for idx 1 ...) */ +#define CFA_P58_ACTP5_VEB_TCAM_RX_PFID_BITPOS 60 +#define CFA_P58_ACTP5_VEB_TCAM_RX_PFID_NUM_BITS 4 + +/* vfid (for idx 1 ...) */ +#define CFA_P58_ACTP5_VEB_TCAM_RX_VFID_BITPOS 48 +#define CFA_P58_ACTP5_VEB_TCAM_RX_VFID_NUM_BITS 12 + +/* source mac */ +#define CFA_P58_ACTP5_VEB_TCAM_RX_SMAC_BITPOS 0 +#define CFA_P58_ACTP5_VEB_TCAM_RX_SMAC_NUM_BITS 48 + +enum cfa_p58_actp5_veb_tcam_flds { + CFA_P58_ACTP5_VEB_TCAM_TX_VALID_FLD = 0, + CFA_P58_ACTP5_VEB_TCAM_TX_PARIF_IN_FLD = 1, + CFA_P58_ACTP5_VEB_TCAM_TX_NUM_VTAGS_FLD = 2, + CFA_P58_ACTP5_VEB_TCAM_TX_DMAC_FLD = 3, + CFA_P58_ACTP5_VEB_TCAM_TX_OVID_FLD = 4, + CFA_P58_ACTP5_VEB_TCAM_TX_IVID_FLD = 5, + CFA_P58_ACTP5_VEB_TCAM_RX_VALID_FLD = 6, + CFA_P58_ACTP5_VEB_TCAM_RX_PADDING_FLD = 7, + CFA_P58_ACTP5_VEB_TCAM_RX_UNICAST_FLD = 8, + CFA_P58_ACTP5_VEB_TCAM_RX_MULTICAST_FLD = 9, + CFA_P58_ACTP5_VEB_TCAM_RX_BROADCAST_FLD = 10, + CFA_P58_ACTP5_VEB_TCAM_RX_PFID_FLD = 11, + CFA_P58_ACTP5_VEB_TCAM_RX_VFID_FLD = 12, + CFA_P58_ACTP5_VEB_TCAM_RX_SMAC_FLD = 13, + CFA_P58_ACTP5_VEB_TCAM_MAX_FLD +}; + +#define CFA_P58_ACTP5_VEB_TCAM_TOTAL_NUM_BITS 158 + +/* Partition (for idx 1 ...) */ +#define CFA_P58_PROF_INPUT_LOOKUP_TABLE_MEM_PARIF_BITPOS 37 +#define CFA_P58_PROF_INPUT_LOOKUP_TABLE_MEM_PARIF_NUM_BITS 4 + +/* When set cfa_meta opcode is allowed (for idx 1 ...) */ +#define CFA_P58_PROF_INPUT_LOOKUP_TABLE_MEM_EN_BD_META_BITPOS 36 +#define CFA_P58_PROF_INPUT_LOOKUP_TABLE_MEM_EN_BD_META_NUM_BITS 1 + +/* When set act_rec_ptr is set to cfa_action if it is non-zero. Otherwise + * act_rec_ptr is set to act_rec_ptr from this table. (for idx 1 ...) + */ +#define CFA_P58_PROF_INPUT_LOOKUP_TABLE_MEM_EN_BD_ACTION_BITPOS 35 +#define CFA_P58_PROF_INPUT_LOOKUP_TABLE_MEM_EN_BD_ACTION_NUM_BITS 1 + +/* When set destination is set to destination from this table. Otherwise it is + * set to est_dest. (for idx 1 ...) + */ +#define CFA_P58_PROF_INPUT_LOOKUP_TABLE_MEM_EN_ILT_DEST_BITPOS 34 +#define CFA_P58_PROF_INPUT_LOOKUP_TABLE_MEM_EN_ILT_DEST_NUM_BITS 1 + +/* ILT opcode (for idx 1 ...) */ +#define CFA_P58_PROF_INPUT_LOOKUP_TABLE_MEM_FWD_OP_BITPOS 32 +#define CFA_P58_PROF_INPUT_LOOKUP_TABLE_MEM_FWD_OP_NUM_BITS 2 +enum cfa_p58_prof_input_lookup_table_mem_fwd_op { + /* cfa is bypassed */ + CFA_P58_PROF_INPUT_LOOKUP_TABLE_MEM_BYPASS_CFA = 0x0UL, + /* cfa is bypassed if packet is ROCE */ + CFA_P58_PROF_INPUT_LOOKUP_TABLE_MEM_BYPASS_CFA_ROCE = 0x1UL, + /* profiler and lookup blocks are bypassed */ + CFA_P58_PROF_INPUT_LOOKUP_TABLE_MEM_BYPASS_LKUP = 0x2UL, + /* packet proceeds to L2 Context Stage */ + CFA_P58_PROF_INPUT_LOOKUP_TABLE_MEM_NORMAL_FLOW = 0x3UL, + CFA_P58_PROF_INPUT_LOOKUP_TABLE_MEM_MAX = 0x3UL +}; + +/* used for act_rec_ptr */ +#define CFA_P58_PROF_INPUT_LOOKUP_TABLE_MEM_ACT_REC_PTR_BITPOS 16 +#define CFA_P58_PROF_INPUT_LOOKUP_TABLE_MEM_ACT_REC_PTR_NUM_BITS 16 + +/* used for destination */ +#define CFA_P58_PROF_INPUT_LOOKUP_TABLE_MEM_DESTINATION_BITPOS 0 +#define CFA_P58_PROF_INPUT_LOOKUP_TABLE_MEM_DESTINATION_NUM_BITS 16 + +enum cfa_p58_prof_input_lookup_table_mem_flds { + CFA_P58_PROF_INPUT_LOOKUP_TABLE_MEM_PARIF_FLD = 0, + CFA_P58_PROF_INPUT_LOOKUP_TABLE_MEM_EN_BD_META_FLD = 1, + CFA_P58_PROF_INPUT_LOOKUP_TABLE_MEM_EN_BD_ACTION_FLD = 2, + CFA_P58_PROF_INPUT_LOOKUP_TABLE_MEM_EN_ILT_DEST_FLD = 3, + CFA_P58_PROF_INPUT_LOOKUP_TABLE_MEM_FWD_OP_FLD = 4, + CFA_P58_PROF_INPUT_LOOKUP_TABLE_MEM_ACT_REC_PTR_FLD = 5, + CFA_P58_PROF_INPUT_LOOKUP_TABLE_MEM_DESTINATION_FLD = 6, + CFA_P58_PROF_INPUT_LOOKUP_TABLE_MEM_MAX_FLD +}; + +#define CFA_P58_PROF_INPUT_LOOKUP_TABLE_MEM_TOTAL_NUM_BITS 41 + +/* Partition. Replaces parif from Input block (for idx 1 ...) */ +#define CFA_P58_PROF_L2_CTXT_REMAP_MEM_PARIF_BITPOS 39 +#define CFA_P58_PROF_L2_CTXT_REMAP_MEM_PARIF_NUM_BITS 4 + +/* L2 logical id which may be used in EM and WC Lookups. */ +#define CFA_P58_PROF_L2_CTXT_REMAP_MEM_L2_CTXT_BITPOS 29 +#define CFA_P58_PROF_L2_CTXT_REMAP_MEM_L2_CTXT_NUM_BITS 10 + +/* Context operation code. */ +#define CFA_P58_PROF_L2_CTXT_REMAP_MEM_CTXT_OPCODE_BITPOS 26 +#define CFA_P58_PROF_L2_CTXT_REMAP_MEM_CTXT_OPCODE_NUM_BITS 3 +enum cfa_p58_prof_l2_ctxt_remap_mem_ctxt_opcode { + /* def_ctxt_data provides destination */ + CFA_P58_PROF_L2_CTXT_REMAP_MEM_BYPASS_CFA = 0x0UL, + /* def_ctxt_data provides act_rec_ptr */ + CFA_P58_PROF_L2_CTXT_REMAP_MEM_BYPASS_LKUP = 0x1UL, + /* metadata modified with def_ctxt_data */ + CFA_P58_PROF_L2_CTXT_REMAP_MEM_META_UPDATE = 0x2UL, + /* continue normal flow */ + CFA_P58_PROF_L2_CTXT_REMAP_MEM_NORMAL_FLOW = 0x3UL, + /* mark packet for drop */ + CFA_P58_PROF_L2_CTXT_REMAP_MEM_DROP = 0x4UL, + CFA_P58_PROF_L2_CTXT_REMAP_MEM_MAX = 0x7UL +}; + +/* data dependent on ctxt_opcode. */ +#define CFA_P58_PROF_L2_CTXT_REMAP_MEM_DEF_CTXT_DATA_BITPOS 10 +#define CFA_P58_PROF_L2_CTXT_REMAP_MEM_DEF_CTXT_DATA_NUM_BITS 16 + +/* Selects 1 of 8x 16-bit masks for META_UPDATE only. */ +#define CFA_P58_PROF_L2_CTXT_REMAP_MEM_CTXT_META_PROF_BITPOS 7 +#define CFA_P58_PROF_L2_CTXT_REMAP_MEM_CTXT_META_PROF_NUM_BITS 3 + +/* Allow Profile TCAM Lookup Table to be logically partitioned. */ +#define CFA_P58_PROF_L2_CTXT_REMAP_MEM_PROF_FUNC_BITPOS 0 +#define CFA_P58_PROF_L2_CTXT_REMAP_MEM_PROF_FUNC_NUM_BITS 7 + +enum cfa_p58_prof_l2_ctxt_remap_mem_flds { + CFA_P58_PROF_L2_CTXT_REMAP_MEM_PARIF_FLD = 0, + CFA_P58_PROF_L2_CTXT_REMAP_MEM_L2_CTXT_FLD = 1, + CFA_P58_PROF_L2_CTXT_REMAP_MEM_CTXT_OPCODE_FLD = 2, + CFA_P58_PROF_L2_CTXT_REMAP_MEM_DEF_CTXT_DATA_FLD = 3, + CFA_P58_PROF_L2_CTXT_REMAP_MEM_CTXT_META_PROF_FLD = 4, + CFA_P58_PROF_L2_CTXT_REMAP_MEM_PROF_FUNC_FLD = 5, + CFA_P58_PROF_L2_CTXT_REMAP_MEM_MAX_FLD +}; + +#define CFA_P58_PROF_L2_CTXT_REMAP_MEM_TOTAL_NUM_BITS 43 + +/* Normal operation. (for idx 1 ...) */ +#define CFA_P58_PROF_PROFILE_TCAM_REMAP_MEM_PL_BYP_LKUP_EN_BITPOS 32 +#define CFA_P58_PROF_PROFILE_TCAM_REMAP_MEM_PL_BYP_LKUP_EN_NUM_BITS 1 + +/* Enable search in EM database. */ +#define CFA_P58_PROF_PROFILE_TCAM_REMAP_MEM_EM_SEARCH_EN_BITPOS 31 +#define CFA_P58_PROF_PROFILE_TCAM_REMAP_MEM_EM_SEARCH_EN_NUM_BITS 1 + +/* ID to differentiate common EM keys. */ +#define CFA_P58_PROF_PROFILE_TCAM_REMAP_MEM_EM_PROFILE_ID_BITPOS 23 +#define CFA_P58_PROF_PROFILE_TCAM_REMAP_MEM_EM_PROFILE_ID_NUM_BITS 8 + +/* Exact match key template select. */ +#define CFA_P58_PROF_PROFILE_TCAM_REMAP_MEM_EM_KEY_ID_BITPOS 17 +#define CFA_P58_PROF_PROFILE_TCAM_REMAP_MEM_EM_KEY_ID_NUM_BITS 6 + +/* Exact Match key type. Specifies normal/LAG EM lookups. */ +#define CFA_P58_PROF_PROFILE_TCAM_REMAP_MEM_EM_KEY_TYPE_BITPOS 15 +#define CFA_P58_PROF_PROFILE_TCAM_REMAP_MEM_EM_KEY_TYPE_NUM_BITS 2 + +/* Enable search in TCAM database. */ +#define CFA_P58_PROF_PROFILE_TCAM_REMAP_MEM_TCAM_SEARCH_EN_BITPOS 14 +#define CFA_P58_PROF_PROFILE_TCAM_REMAP_MEM_TCAM_SEARCH_EN_NUM_BITS 1 + +/* ID to differentiate common TCAM keys. */ +#define CFA_P58_PROF_PROFILE_TCAM_REMAP_MEM_TCAM_PROFILE_ID_BITPOS 6 +#define CFA_P58_PROF_PROFILE_TCAM_REMAP_MEM_TCAM_PROFILE_ID_NUM_BITS 8 + +/* TCAM key template select. */ +#define CFA_P58_PROF_PROFILE_TCAM_REMAP_MEM_TCAM_KEY_ID_BITPOS 0 +#define CFA_P58_PROF_PROFILE_TCAM_REMAP_MEM_TCAM_KEY_ID_NUM_BITS 6 + +/* Bypass operation. (for idx 1 ...) */ + +/* Reserved for future use. */ +#define CFA_P58_PROF_PROFILE_TCAM_REMAP_MEM_RESERVED_BITPOS 18 +#define CFA_P58_PROF_PROFILE_TCAM_REMAP_MEM_RESERVED_NUM_BITS 14 + +/* Bypass operations. */ +#define CFA_P58_PROF_PROFILE_TCAM_REMAP_MEM_BYPASS_OP_BITPOS 16 +#define CFA_P58_PROF_PROFILE_TCAM_REMAP_MEM_BYPASS_OP_NUM_BITS 2 +enum cfa_p58_prof_profile_tcam_remap_mem_bypass_op { + /* set the drop flag. */ + CFA_P58_PROF_PROFILE_TCAM_REMAP_MEM_DROP = 0x0UL, + /* Byass lookup use act_record_ptr from this table. */ + CFA_P58_PROF_PROFILE_TCAM_REMAP_MEM_BYPASS_LKUP = 0x1UL, + /* Byass lookup use Partition Default Action Record Pointer Table */ + CFA_P58_PROF_PROFILE_TCAM_REMAP_MEM_BYPASS_DEFAULT = 0x2UL, + /* Byass lookup use Partition Error Action Record Pointer Table. */ + CFA_P58_PROF_PROFILE_TCAM_REMAP_MEM_BYPASS_ERROR = 0x3UL, + CFA_P58_PROF_PROFILE_TCAM_REMAP_MEM_MAX = 0x3UL +}; + +/* Used for BYPASS_LKUP. */ +#define CFA_P58_PROF_PROFILE_TCAM_REMAP_MEM_ACT_RECORD_PTR_BITPOS 0 +#define CFA_P58_PROF_PROFILE_TCAM_REMAP_MEM_ACT_RECORD_PTR_NUM_BITS 16 + +enum cfa_p58_prof_profile_tcam_remap_mem_flds { + CFA_P58_PROF_PROFILE_TCAM_REMAP_MEM_PL_BYP_LKUP_EN_FLD = 0, + CFA_P58_PROF_PROFILE_TCAM_REMAP_MEM_EM_SEARCH_EN_FLD = 1, + CFA_P58_PROF_PROFILE_TCAM_REMAP_MEM_EM_PROFILE_ID_FLD = 2, + CFA_P58_PROF_PROFILE_TCAM_REMAP_MEM_EM_KEY_ID_FLD = 3, + CFA_P58_PROF_PROFILE_TCAM_REMAP_MEM_EM_KEY_TYPE_FLD = 4, + CFA_P58_PROF_PROFILE_TCAM_REMAP_MEM_TCAM_SEARCH_EN_FLD = 5, + CFA_P58_PROF_PROFILE_TCAM_REMAP_MEM_TCAM_PROFILE_ID_FLD = 6, + CFA_P58_PROF_PROFILE_TCAM_REMAP_MEM_TCAM_KEY_ID_FLD = 7, + /* The enumerators below are from the autogenerated + * enum cfa_p58_prof_profile_tcam_remap_mem_1_flds which + * is deleted + */ + CFA_P58_PROF_PROFILE_TCAM_REMAP_MEM_RESERVED_FLD = 8, + CFA_P58_PROF_PROFILE_TCAM_REMAP_MEM_BYPASS_OP_FLD = 9, + CFA_P58_PROF_PROFILE_TCAM_REMAP_MEM_ACT_RECORD_PTR_FLD = 10, + CFA_P58_PROF_PROFILE_TCAM_REMAP_MEM_MAX_FLD +}; + +#define CFA_P58_PROF_PROFILE_TCAM_REMAP_MEM_TOTAL_NUM_BITS 33 + +/* Reserved for future use. */ +#define CFA_P58_ACT_VSPT_TX_RSVD_BITPOS 40 +#define CFA_P58_ACT_VSPT_TX_RSVD_NUM_BITS 41 + +/* VLAN TPID anti-spoofing control. (for idx 1 ...) */ +#define CFA_P58_ACT_VSPT_TPID_AS_CTL_BITPOS 38 +#define CFA_P58_ACT_VSPT_TPID_AS_CTL_NUM_BITS 2 +enum cfa_p58_act_vspt_tpid_as_ctl { + CFA_P58_ACT_VSPT_TPID_IGNORE = 0x0UL, + CFA_P58_ACT_VSPT_TPID_DEFAULT = 0x1UL, + CFA_P58_ACT_VSPT_TPID_DROP = 0x2UL, + CFA_P58_ACT_VSPT_TPID_MAX = 0x3UL +}; + +/* VLAN allowed TPID bit map. */ +#define CFA_P58_ACT_VSPT_ALWD_TPID_BITPOS 30 +#define CFA_P58_ACT_VSPT_ALWD_TPID_NUM_BITS 8 + +/* VLAN encoded default TPID. */ +#define CFA_P58_ACT_VSPT_DFLT_TPID_BITPOS 27 +#define CFA_P58_ACT_VSPT_DFLT_TPID_NUM_BITS 3 + +/* VLAN PRIority anti-spoofing control. */ +#define CFA_P58_ACT_VSPT_PRI_AS_CTL_BITPOS 25 +#define CFA_P58_ACT_VSPT_PRI_AS_CTL_NUM_BITS 2 +enum cfa_p58_act_vspt_pri_as_ctl { + CFA_P58_ACT_VSPTPRI_IGNORE = 0x0UL, + CFA_P58_ACT_VSPTPRI_DEFAULT = 0x1UL, + CFA_P58_ACT_VSPTPRI_DROP = 0x2UL, + CFA_P58_ACT_VSPTPRI_MAX = 0x3UL +}; + +/* VLAN allowed PRIority bit map. */ +#define CFA_P58_ACT_VSPT_ALWD_PRI_BITPOS 17 +#define CFA_P58_ACT_VSPT_ALWD_PRI_NUM_BITS 8 + +/* VLAN default PRIority. */ +#define CFA_P58_ACT_VSPT_DFLT_PRI_BITPOS 14 +#define CFA_P58_ACT_VSPT_DFLT_PRI_NUM_BITS 3 + +/* Mirror destination (0..15) or 4'bF=NO_MIRROR */ +#define CFA_P58_ACT_VSPT_MIR_BITPOS 10 +#define CFA_P58_ACT_VSPT_MIR_NUM_BITS 4 + +/* Pointer to per svif meter; 0x0 = disabled */ +#define CFA_P58_ACT_VSPT_IFMTR_BITPOS 0 +#define CFA_P58_ACT_VSPT_IFMTR_NUM_BITS 10 + +enum cfa_p58_act_vspt_flds { + CFA_P58_ACT_VSPT_TX_RSVD_FLD = 0, + CFA_P58_ACT_VSPT_TPID_AS_CTL_FLD = 1, + CFA_P58_ACT_VSPT_ALWD_TPID_FLD = 2, + CFA_P58_ACT_VSPT_DFLT_TPID_FLD = 3, + CFA_P58_ACT_VSPT_PRI_AS_CTL_FLD = 4, + CFA_P58_ACT_VSPT_ALWD_PRI_FLD = 5, + CFA_P58_ACT_VSPT_DFLT_PRI_FLD = 6, + CFA_P58_ACT_VSPT_MIR_FLD = 7, + CFA_P58_ACT_VSPT_IFMTR_FLD = 8, + CFA_P58_ACT_VSPT_MAX_FLD +}; + +#define CFA_P58_ACT_VSPT_TOTAL_NUM_BITS 81 + +/* Enable source knockout. (for idx 2 ...) */ +#define CFA_P58_ACT_VSPTABC123_SRC_KO_EN_BITPOS 80 +#define CFA_P58_ACT_VSPTABC123_SRC_KO_EN_NUM_BITS 1 + +/* MAC associated with vnic for sko. */ +#define CFA_P58_ACT_VSPTABC123_MAC_BITPOS 32 +#define CFA_P58_ACT_VSPTABC123_MAC_NUM_BITS 48 + +/* Function ID: 4 bit PF and 12 bit VID (VNIC ID) */ +#define CFA_P58_ACT_VSPTABC123_FID_BITPOS 16 +#define CFA_P58_ACT_VSPTABC123_FID_NUM_BITS 16 + +/* Reserved for Future use. */ +#define CFA_P58_ACT_VSPTABC123_RX_RSVD_BITPOS 14 +#define CFA_P58_ACT_VSPTABC123_RX_RSVD_NUM_BITS 2 + +/* Mirror destination (0..15) or 4'bF=NO_MIRROR */ +#define CFA_P58_ACT_VSPTABC123_RX_MIR_BITPOS 10 +#define CFA_P58_ACT_VSPTABC123_RX_MIR_NUM_BITS 4 + +/* Pointer to per vnic meter; 0x0 = disabled */ +#define CFA_P58_ACT_VSPTABC123_RX_IFMTR_BITPOS 0 +#define CFA_P58_ACT_VSPTABC123_RX_IFMTR_NUM_BITS 10 + +enum cfa_p58_act_vsptabc123_flds { + CFA_P58_ACT_VSPTABC123_SRC_KO_EN_FLD = 0, + CFA_P58_ACT_VSPTABC123_MAC_FLD = 1, + CFA_P58_ACT_VSPTABC123_FID_FLD = 2, + CFA_P58_ACT_VSPTABC123_RX_RSVD_FLD = 3, + CFA_P58_ACT_VSPTABC123_RX_MIR_FLD = 4, + CFA_P58_ACT_VSPTABC123_RX_IFMTR_FLD = 5, + CFA_P58_ACT_VSPTABC123_MAX_FLD +}; + +#define CFA_P58_ACT_VSPTABC123_TOTAL_NUM_BITS 81 + +/* Destination or metadata. (for idx 1 ...) */ +#define CFA_P58_LKUPP5_TCAM_RECORD_MEM_STRENGTH_BITPOS 36 +#define CFA_P58_LKUPP5_TCAM_RECORD_MEM_STRENGTH_NUM_BITS 2 + +/* Destination or metadata. */ +#define CFA_P58_LKUPP5_TCAM_RECORD_MEM_DATA_BITPOS 20 +#define CFA_P58_LKUPP5_TCAM_RECORD_MEM_DATA_NUM_BITS 16 + +/* Opcode. */ +#define CFA_P58_LKUPP5_TCAM_RECORD_MEM_OPCODE_BITPOS 17 +#define CFA_P58_LKUPP5_TCAM_RECORD_MEM_OPCODE_NUM_BITS 3 + +/* Metadata profile */ +#define CFA_P58_LKUPP5_TCAM_RECORD_MEM_META_PROF_BITPOS 14 +#define CFA_P58_LKUPP5_TCAM_RECORD_MEM_META_PROF_NUM_BITS 3 + +/* Connection pointer or ring table index */ +#define CFA_P58_LKUPP5_TCAM_RECORD_MEM_CTXT_DATA_BITPOS 0 +#define CFA_P58_LKUPP5_TCAM_RECORD_MEM_CTXT_DATA_NUM_BITS 14 + +enum cfa_p58_lkupp5_tcam_record_mem_flds { + CFA_P58_LKUPP5_TCAM_RECORD_MEM_STRENGTH_FLD = 0, + CFA_P58_LKUPP5_TCAM_RECORD_MEM_DATA_FLD = 1, + CFA_P58_LKUPP5_TCAM_RECORD_MEM_OPCODE_FLD = 2, + CFA_P58_LKUPP5_TCAM_RECORD_MEM_META_PROF_FLD = 3, + CFA_P58_LKUPP5_TCAM_RECORD_MEM_CTXT_DATA_FLD = 4, + CFA_P58_LKUPP5_TCAM_RECORD_MEM_MAX_FLD +}; + +#define CFA_P58_LKUPP5_TCAM_RECORD_MEM_TOTAL_NUM_BITS 38 + +/* If 0 (invalid), connection tracking and filtering is disabled + * for the connection and the output action[1:0] is set to forward + * and copy_enabled=0 + */ +#define CFA_P58_LKUP_CNX_MEM_VALID_BITPOS 10 +#define CFA_P58_LKUP_CNX_MEM_VALID_NUM_BITS 1 + +/* Enable the copy action[1:0]. If not enabled for a connection and the output + * result action[1:0] is copy, it is overridden and set to forward. + */ +#define CFA_P58_LKUP_CNX_MEM_COPY_ENABLED_BITPOS 9 +#define CFA_P58_LKUP_CNX_MEM_COPY_ENABLED_NUM_BITS 1 + +/* Set to 1 if shared=1 and send_state=1 in matching rule and interface flow + * controlled. Will get send on next operation to entry in which the message + * interface to the other CFA is not flow controlled. Sent immediately on host + * write of 1 if not flow controlled (intended for debug). + */ +#define CFA_P58_LKUP_CNX_MEM_SEND_PENDING_BITPOS 8 +#define CFA_P58_LKUP_CNX_MEM_SEND_PENDING_NUM_BITS 1 + +/* If 1, enables notifications of the other CFA for this connection. These + * notifications are sent if a matching state machine rule specifies the + * notification as one of its actions. + */ +#define CFA_P58_LKUP_CNX_MEM_SHARED_BITPOS 7 +#define CFA_P58_LKUP_CNX_MEM_SHARED_NUM_BITS 1 + +/* Current state for connection. */ +#define CFA_P58_LKUP_CNX_MEM_STATE_BITPOS 4 +#define CFA_P58_LKUP_CNX_MEM_STATE_NUM_BITS 3 + +/* Current timer value for connection. */ +#define CFA_P58_LKUP_CNX_MEM_TIMER_VALUE_BITPOS 0 +#define CFA_P58_LKUP_CNX_MEM_TIMER_VALUE_NUM_BITS 4 + +enum cfa_p58_lkup_cnx_mem_flds { + CFA_P58_LKUP_CNX_MEM_VALID_FLD = 0, + CFA_P58_LKUP_CNX_MEM_COPY_ENABLED_FLD = 1, + CFA_P58_LKUP_CNX_MEM_SEND_PENDING_FLD = 2, + CFA_P58_LKUP_CNX_MEM_SHARED_FLD = 3, + CFA_P58_LKUP_CNX_MEM_STATE_FLD = 4, + CFA_P58_LKUP_CNX_MEM_TIMER_VALUE_FLD = 5, + CFA_P58_LKUP_CNX_MEM_MAX_FLD +}; + +#define CFA_P58_LKUP_CNX_MEM_TOTAL_NUM_BITS 11 + +/* Entry is valid. (for idx 3 ...) */ +#define CFA_P58_LKUPP5_EM_RECORD_MEM_VALID_BITPOS 99 +#define CFA_P58_LKUPP5_EM_RECORD_MEM_VALID_NUM_BITS 1 + +/* For resolving TCAM/EM conflicts (for idx 3 ...) */ +#define CFA_P58_LKUPP5_EM_RECORD_MEM_STRENGTH_BITPOS 97 +#define CFA_P58_LKUPP5_EM_RECORD_MEM_STRENGTH_NUM_BITS 2 + +/* Destination or metadata. */ +#define CFA_P58_LKUPP5_EM_RECORD_MEM_DATA_BITPOS 81 +#define CFA_P58_LKUPP5_EM_RECORD_MEM_DATA_NUM_BITS 16 + +/* Opcode. (for idx 2 ...) */ +#define CFA_P58_LKUPP5_EM_RECORD_MEM_OPCODE_BITPOS 78 +#define CFA_P58_LKUPP5_EM_RECORD_MEM_OPCODE_NUM_BITS 3 + +/* Metadata profile. (for idx 2 ...) */ +#define CFA_P58_LKUPP5_EM_RECORD_MEM_META_PROF_BITPOS 75 +#define CFA_P58_LKUPP5_EM_RECORD_MEM_META_PROF_NUM_BITS 3 + +/* Connection pointer or ring table index. */ +#define CFA_P58_LKUPP5_EM_RECORD_MEM_CTXT_DATA_BITPOS 61 +#define CFA_P58_LKUPP5_EM_RECORD_MEM_CTXT_DATA_NUM_BITS 14 + +/* Key and spare bits. Next Block Entry (nblock) */ +#define CFA_P58_LKUPP5_EM_RECORD_MEM_FIRST_KEY_DATA_BITPOS 0 +#define CFA_P58_LKUPP5_EM_RECORD_MEM_FIRST_KEY_DATA_NUM_BITS 61 + +/* Key and spare bits. None-auto-generated. */ +#define CFA_P58_LKUPP5_EM_RECORD_MEM_SECOND_KEY_DATA_LOW_BITPOS 128 +#define CFA_P58_LKUPP5_EM_RECORD_MEM_SECOND_KEY_DATA_LOW_NUM_BITS 64 + +#define CFA_P58_LKUPP5_EM_RECORD_MEM_SECOND_KEY_DATA_HIGH_BITPOS 192 +#define CFA_P58_LKUPP5_EM_RECORD_MEM_SECOND_KEY_DATA_HIGH_NUM_BITS 64 + +#define CFA_P58_LKUPP5_EM_RECORD_MEM_THIRD_KEY_DATA_LOW_BITPOS 256 +#define CFA_P58_LKUPP5_EM_RECORD_MEM_THIRD_KEY_DATA_LOW_NUM_BITS 64 + +#define CFA_P58_LKUPP5_EM_RECORD_MEM_THIRD_KEY_DATA_HIGH_BITPOS 320 +#define CFA_P58_LKUPP5_EM_RECORD_MEM_THIRD_KEY_DATA_HIGH_NUM_BITS 64 + +#define CFA_P58_LKUPP5_EM_RECORD_MEM_FOURTH_KEY_DATA_LOW_BITPOS 384 +#define CFA_P58_LKUPP5_EM_RECORD_MEM_FOURTH_KEY_DATA_LOW_NUM_BITS 64 + +#define CFA_P58_LKUPP5_EM_RECORD_MEM_FOURTH_KEY_DATA_HIGH_BITPOS 448 +#define CFA_P58_LKUPP5_EM_RECORD_MEM_FOURTH_KEY_DATA_HIGH_NUM_BITS 64 + +/* None-auto-generated. */ +enum cfa_p58_lkupp5_em_record_mem_flds { + CFA_P58_LKUPP5_EM_RECORD_MEM_VALID_FLD = 0, + CFA_P58_LKUPP5_EM_RECORD_MEM_STRENGTH_FLD = 1, + CFA_P58_LKUPP5_EM_RECORD_MEM_DATA_FLD = 2, + CFA_P58_LKUPP5_EM_RECORD_MEM_OPCODE_FLD = 3, + CFA_P58_LKUPP5_EM_RECORD_MEM_META_PROF_FLD = 4, + CFA_P58_LKUPP5_EM_RECORD_MEM_CTXT_DATA_FLD = 5, + CFA_P58_LKUPP5_EM_RECORD_MEM_FIRST_KEY_DATA_FLD = 6, + CFA_P58_LKUPP5_EM_RECORD_MEM_SECORD_KEY_DATA_LOW_FLD = 7, + CFA_P58_LKUPP5_EM_RECORD_MEM_SECORD_KEY_DATA_HIGH_FLD = 8, + CFA_P58_LKUPP5_EM_RECORD_MEM_THIRD_KEY_DATA_LOW_FLD = 9, + CFA_P58_LKUPP5_EM_RECORD_MEM_THIRD_KEY_DATA_HIGH_FLD = 10, + CFA_P58_LKUPP5_EM_RECORD_MEM_FOURTH_KEY_DATA_LOW_FLD = 11, + CFA_P58_LKUPP5_EM_RECORD_MEM_FOURTH_KEY_DATA_HIGH_FLD = 12, + CFA_P58_LKUPP5_EM_RECORD_MEM_MAX_FLD +}; + +/* Auto-generated number is modified. */ +#define CFA_P58_LKUPP5_EM_RECORD_MEM_TOTAL_NUM_BITS 512 + +/* Excess Information Rate. */ +#define CFA_P58_ACT_P5_MPT_EIR_BITPOS 48 +#define CFA_P58_ACT_P5_MPT_EIR_NUM_BITS 17 + +/* Committed Information Rate. */ +#define CFA_P58_ACT_P5_MPT_CIR_BITPOS 31 +#define CFA_P58_ACT_P5_MPT_CIR_NUM_BITS 17 + +/* Excess Burst Size. */ +#define CFA_P58_ACT_P5_MPT_EBS_BITPOS 19 +#define CFA_P58_ACT_P5_MPT_EBS_NUM_BITS 12 + +/* Committed Burst Size. */ +#define CFA_P58_ACT_P5_MPT_CBS_BITPOS 7 +#define CFA_P58_ACT_P5_MPT_CBS_NUM_BITS 12 + +/* Excess Bucket No Decrement. */ +#define CFA_P58_ACT_P5_MPT_EBND_BITPOS 6 +#define CFA_P58_ACT_P5_MPT_EBND_NUM_BITS 1 + +/* Committed Bucket No Decrement. */ +#define CFA_P58_ACT_P5_MPT_CBND_BITPOS 5 +#define CFA_P58_ACT_P5_MPT_CBND_NUM_BITS 1 + +/* Excess Bucket Strict Mode. */ +#define CFA_P58_ACT_P5_MPT_EBSM_BITPOS 4 +#define CFA_P58_ACT_P5_MPT_EBSM_NUM_BITS 1 + +/* Committed Bucket Strict Mode. */ +#define CFA_P58_ACT_P5_MPT_CBSM_BITPOS 3 +#define CFA_P58_ACT_P5_MPT_CBSM_NUM_BITS 1 + +/* RFC2698 Mode. */ +#define CFA_P58_ACT_P5_MPT_RFC2698_BITPOS 2 +#define CFA_P58_ACT_P5_MPT_RFC2698_NUM_BITS 1 + +/* Packet Mode. */ +#define CFA_P58_ACT_P5_MPT_PM_BITPOS 1 +#define CFA_P58_ACT_P5_MPT_PM_NUM_BITS 1 + +/* Coupling Flag. */ +#define CFA_P58_ACT_P5_MPT_CF_BITPOS 0 +#define CFA_P58_ACT_P5_MPT_CF_NUM_BITS 1 + +enum cfa_p58_act_p5_mpt_flds { + CFA_P58_ACT_P5_MPT_EIR_FLD = 0, + CFA_P58_ACT_P5_MPT_CIR_FLD = 1, + CFA_P58_ACT_P5_MPT_EBS_FLD = 2, + CFA_P58_ACT_P5_MPT_CBS_FLD = 3, + CFA_P58_ACT_P5_MPT_EBND_FLD = 4, + CFA_P58_ACT_P5_MPT_CBND_FLD = 5, + CFA_P58_ACT_P5_MPT_EBSM_FLD = 6, + CFA_P58_ACT_P5_MPT_CBSM_FLD = 7, + CFA_P58_ACT_P5_MPT_RFC2698_FLD = 8, + CFA_P58_ACT_P5_MPT_PM_FLD = 9, + CFA_P58_ACT_P5_MPT_CF_FLD = 10, + CFA_P58_ACT_P5_MPT_MAX_FLD +}; + +#define CFA_P58_ACT_P5_MPT_TOTAL_NUM_BITS 65 + +/* CFA flexible key layout definition */ +enum cfa_p58_key_fld_id { CFA_P58_KEY_FLD_ID_MAX }; + +/* Mirror - Non-autogenerated */ +#define CFA_P58_ACTP5_MICR_EN_BITPOS 31 +#define CFA_P58_ACTP5_MICR_EN_NUM_BITS 1 + +#define CFA_P58_ACTP5_MICR_COPY_BITPOS 30 +#define CFA_P58_ACTP5_MICR_COPY_NUM_BITS 1 + +#define CFA_P58_ACTP5_MICR_IGN_DROP_BITPOS 29 +#define CFA_P58_ACTP5_MICR_IGN_DROP_NUM_BITS 1 + +#define CFA_P58_ACTP5_MICR_AR_PTR_BITPOS 0 +#define CFA_P58_ACTP5_MICR_AR_PTR_NUM_BITS 16 + +enum cfa_p58_mirror_flds { + CFA_P58_MIRROR_TBL_EN_FLD, + CFA_P58_MIRROR_TBL_COPY_FLD, + CFA_P58_MIRROR_TBL_IGN_DROP_FLD, + CFA_P58_MIRROR_TBL_AR_PTR_FLD, + CFA_P58_MIRROR_TBL_MAX_FLD +}; + +#define CFA_P58_ACTP5_MICR_TOTAL_NUM_BITS 32 + +/* ABCR- Non-autogenerated */ +#define CFA_P58_ACT_ABCR_VTAG_TPID_BITPOS 16 +#define CFA_P58_ACT_ABCR_VTAG_TPID_NUM_BITS 16 + +#define CFA_P58_ACT_ABCR_DFLTPRI_BITPOS 13 +#define CFA_P58_ACT_ABCR_DFLTPRI_NUM_BITS 3 + +#define CFA_P58_ACT_ABCR_ECNM_EN_BITPOS 12 +#define CFA_P58_ACT_ABCR_ECNM_EN_NUM_BITS 1 + +#define CFA_P58_ACT_ABCR_ECNP_EN_BITPOS 10 +#define CFA_P58_ACT_ABCR_ECNP_EN_NUM_BITS 1 + +#define CFA_P58_ACT_ABCR_VEB_EN_BITPOS 9 +#define CFA_P58_ACT_ABCR_VEB_EN_NUM_BITS 1 + +#define CFA_P58_ACT_ABCR_TX_HW_HDR_BITPOS 7 +#define CFA_P58_ACT_ABCR_TX_HW_HDR_NUM_BITS 2 + +#define CFA_P58_ACT_ABCR_VTAG_EDIT_EN_BITPOS 4 +#define CFA_P58_ACT_ABCR_VTAG_EDIT_EN_NUM_BITS 1 + +#define CFA_P58_ACT_ABCR_MCAST_EN_BITPOS 3 +#define CFA_P58_ACT_ABCR_MCAST_EN_NUM_BITS 1 + +#define CFA_P58_ACT_ABCR_QCN_DLT_EN_BITPOS 1 +#define CFA_P58_ACT_ABCR_QCN_DLT_EN_NUM_BITS 1 + +enum cfa_p58_abcr_flds { + CFA_P58_ACT_ABCR_TBL_VTAG_TPID_FLD, + CFA_P58_ACT_ABCR_TBL_DFLTPRI_FLD, + CFA_P58_ACT_ABCR_TBL_ECNM_EN_FLD, + CFA_P58_ACT_ABCR_TBL_ECNP_EN_FLD, + CFA_P58_ACT_ABCR_TBL_VEB_EN_FLD, + CFA_P58_ACT_ABCR_TBL_TX_HW_HDR_FLD, + CFA_P58_ACT_ABCR_TBL_VTAG_EDIT_EN_FLD, + CFA_P58_ACT_ABCR_TBL_MCAST_EN_FLD, + CFA_P58_ACT_ABCR_TBL_QCN_DLT_EN_FLD, + CFA_P58_ACT_ABCR_TBL_MAX_FLD +}; + +#define CFA_P58_ACT_ABCR_TOTAL_NUM_BITS 32 + +#endif /* _CFA_P58_HW_H_ */ diff --git a/drivers/thirdparty/release-drivers/bnxt/hcapi/cfa/hcapi_cfa.h b/drivers/thirdparty/release-drivers/bnxt/hcapi/cfa/hcapi_cfa.h new file mode 100644 index 000000000000..46997769e605 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/hcapi/cfa/hcapi_cfa.h @@ -0,0 +1,89 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019-2021 Broadcom + * All rights reserved. + */ + +/*! + * \file + * \brief Exported functions for CFA HW programming + */ +#ifndef _HCAPI_CFA_H_ +#define _HCAPI_CFA_H_ + +#include +#include "hcapi_cfa_defs.h" + +struct hcapi_cfa_devops; + +/** + * CFA device information + */ +struct hcapi_cfa_devinfo { + /** [out] CFA hw fix formatted layouts */ + const struct hcapi_cfa_layout_tbl *layouts; + /** [out] CFA device ops function pointer table */ + const struct hcapi_cfa_devops *devops; +}; + +/** + * \defgroup CFA_HCAPI_DEVICE_API + * HCAPI used for writing to the hardware + * @{ + */ + +/** CFA device specific function hooks structure + * + * The following device hooks can be defined; unless noted otherwise, they are + * optional and can be filled with a null pointer. The pupose of these hooks + * to support CFA device operations for different device variants. + */ +struct hcapi_cfa_devops { + /** calculate a key hash for the provided key_data + * + * This API computes hash for a key. + * + * @param[in] key_data + * A pointer of the key data buffer + * + * @param[in] bitlen + * Number of bits of the key data + * + * @return + * 0 for SUCCESS, negative value for FAILURE + */ + u64 (*hcapi_cfa_key_hash)(u8 *key_data, u16 bitlen); +}; + +/*@}*/ + +extern const size_t CFA_RM_HANDLE_DATA_SIZE; + +#if SUPPORT_CFA_HW_ALL +extern const struct hcapi_cfa_devops cfa_p4_devops; +extern const struct hcapi_cfa_devops cfa_p58_devops; +extern const struct hcapi_cfa_devops cfa_p59_devops; +extern const struct hcapi_cfa_layout_tbl cfa_p59_layout_tbl; + +u64 hcapi_cfa_p59_key_hash(u64 *key_data, u16 bitlen); +#elif defined(SUPPORT_CFA_HW_P4) && SUPPORT_CFA_HW_P4 +extern const struct hcapi_cfa_devops cfa_p4_devops; +u64 hcapi_cfa_p4_key_hash(u64 *key_data, u16 bitlen); +/* SUPPORT_CFA_HW_P4 */ +#elif SUPPORT_CFA_HW_P45 +/* Firmware function defines */ +/* SUPPORT_CFA_HW_P45 */ +#elif defined(SUPPORT_CFA_HW_P58) && SUPPORT_CFA_HW_P58 +extern const struct hcapi_cfa_devops cfa_p58_devops; +u64 hcapi_cfa_p58_key_hash(u64 *key_data, u16 bitlen); +/* SUPPORT_CFA_HW_P58 */ +#elif defined(SUPPORT_CFA_HW_P59) && SUPPORT_CFA_HW_P59 +extern const struct hcapi_cfa_devops cfa_p59_devops; +extern const struct hcapi_cfa_layout_tbl cfa_p59_layout_tbl; +u64 hcapi_cfa_p59_key_hash(u64 *key_data, u16 bitlen); +#ifdef CFA_HW_SUPPORT_HOST_IF +#else +#endif +/* SUPPORT_CFA_HW_P59 */ +#endif + +#endif /* HCAPI_CFA_H_ */ diff --git a/drivers/thirdparty/release-drivers/bnxt/hcapi/cfa/hcapi_cfa_defs.h b/drivers/thirdparty/release-drivers/bnxt/hcapi/cfa/hcapi_cfa_defs.h new file mode 100644 index 000000000000..eeee7c11c5ce --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/hcapi/cfa/hcapi_cfa_defs.h @@ -0,0 +1,794 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019-2021 Broadcom + * All rights reserved. + */ + +/* Exported functions for CFA HW programming */ + +#ifndef _HCAPI_CFA_DEFS_H_ +#define _HCAPI_CFA_DEFS_H_ + +#include + +#define CFA_BITS_PER_BYTE (8) +#define CFA_BITS_PER_WORD (sizeof(u32) * CFA_BITS_PER_BYTE) +#define __CFA_ALIGN_MASK(x, mask) (((x) + (mask)) & ~(mask)) +#define CFA_ALIGN(x, a) __CFA_ALIGN_MASK((x), (a) - 1) +#define CFA_ALIGN_256(x) CFA_ALIGN(x, 256) +#define CFA_ALIGN_128(x) CFA_ALIGN(x, 128) +#define CFA_ALIGN_32(x) CFA_ALIGN(x, 32) + +#define NUM_WORDS_ALIGN_32BIT(x) (CFA_ALIGN_32(x) / CFA_BITS_PER_WORD) +#define NUM_WORDS_ALIGN_128BIT(x) (CFA_ALIGN_128(x) / CFA_BITS_PER_WORD) +#define NUM_WORDS_ALIGN_256BIT(x) (CFA_ALIGN_256(x) / CFA_BITS_PER_WORD) + +/* TODO: redefine according to chip variant */ +#define CFA_GLOBAL_CFG_DATA_SZ (100) + +#ifndef SUPPORT_CFA_HW_P4 +#define SUPPORT_CFA_HW_P4 (0) +#endif + +#ifndef SUPPORT_CFA_HW_P45 +#define SUPPORT_CFA_HW_P45 (0) +#endif + +#ifndef SUPPORT_CFA_HW_P58 +#define SUPPORT_CFA_HW_P58 (0) +#endif + +#ifndef SUPPORT_CFA_HW_P59 +#define SUPPORT_CFA_HW_P59 (0) +#endif + +#if SUPPORT_CFA_HW_P4 && SUPPORT_CFA_HW_P45 && SUPPORT_CFA_HW_P58 && \ + SUPPORT_CFA_HW_P59 +#define SUPPORT_CFA_HW_ALL (1) +#endif + +#if SUPPORT_CFA_HW_ALL +#include "hcapi_cfa_p4.h" +#include "hcapi_cfa_p58.h" + +#define CFA_PROF_L2CTXT_TCAM_MAX_FIELD_CNT CFA_P58_PROF_L2_CTXT_TCAM_MAX_FLD +#define CFA_PROF_L2CTXT_REMAP_MAX_FIELD_CNT CFA_P58_PROF_L2_CTXT_RMP_DR_MAX_FLD +#define CFA_PROF_MAX_KEY_CFG_SZ sizeof(struct cfa_p58_prof_key_cfg) +#define CFA_KEY_MAX_FIELD_CNT CFA_P58_KEY_FLD_ID_MAX +#define CFA_ACT_MAX_TEMPLATE_SZ sizeof(struct cfa_p58_action_template) +#else +#if SUPPORT_CFA_HW_P4 || SUPPORT_CFA_HW_P45 +#include "hcapi_cfa_p4.h" +#define CFA_PROF_L2CTXT_TCAM_MAX_FIELD_CNT CFA_P40_PROF_L2_CTXT_TCAM_MAX_FLD +#define CFA_PROF_L2CTXT_REMAP_MAX_FIELD_CNT CFA_P40_PROF_L2_CTXT_RMP_DR_MAX_FLD +#define CFA_PROF_MAX_KEY_CFG_SZ sizeof(struct cfa_p4_prof_key_cfg) +#define CFA_KEY_MAX_FIELD_CNT CFA_P40_KEY_FLD_ID_MAX +#define CFA_ACT_MAX_TEMPLATE_SZ sizeof(struct cfa_p4_action_template) +#endif +#if SUPPORT_CFA_HW_P58 +#include "hcapi_cfa_p58.h" +#define CFA_PROF_L2CTXT_TCAM_MAX_FIELD_CNT CFA_P58_PROF_L2_CTXT_TCAM_MAX_FLD +#define CFA_PROF_L2CTXT_REMAP_MAX_FIELD_CNT CFA_P58_PROF_L2_CTXT_RMP_DR_MAX_FLD +#define CFA_PROF_MAX_KEY_CFG_SZ sizeof(struct cfa_p5_prof_key_cfg) +#define CFA_KEY_MAX_FIELD_CNT CFA_P58_KEY_FLD_ID_MAX +#define CFA_ACT_MAX_TEMPLATE_SZ sizeof(struct cfa_p58_action_template) +#endif +#if SUPPORT_CFA_HW_P59 +#include "hcapi_cfa_p59.h" +#define CFA_PROF_L2CTXT_TCAM_MAX_FIELD_CNT CFA_P59_PROF_L2_CTXT_TCAM_MAX_FLD +#define CFA_PROF_L2CTXT_REMAP_MAX_FIELD_CNT CFA_P59_PROF_L2_CTXT_RMP_DR_MAX_FLD +#define CFA_PROF_MAX_KEY_CFG_SZ sizeof(struct cfa_p59_prof_key_cfg) +#define CFA_KEY_MAX_FIELD_CNT CFA_P59_EM_KEY_LAYOUT_MAX_FLD +#define CFA_ACT_MAX_TEMPLATE_SZ sizeof(struct cfa_p59_action_template) +#endif +#endif /* SUPPORT_CFA_HW_ALL */ + +/* Hashing defines */ +#define HCAPI_CFA_LKUP_SEED_MEM_SIZE 512 + +/* CRC32i support for Key0 hash */ +extern const u32 crc32tbl[]; +#define ucrc32(ch, crc) (crc32tbl[((crc) ^ (ch)) & 0xff] ^ ((crc) >> 8)) + +/* CFA HW version definition */ +enum hcapi_cfa_ver { + HCAPI_CFA_P40 = 0, /* CFA phase 4.0 */ + HCAPI_CFA_P45 = 1, /* CFA phase 4.5 */ + HCAPI_CFA_P58 = 2, /* CFA phase 5.8 */ + HCAPI_CFA_P59 = 3, /* CFA phase 5.9 */ + HCAPI_CFA_PMAX = 4 +}; + +/* CFA direction definition */ +enum hcapi_cfa_dir { + HCAPI_CFA_DIR_RX = 0, /* Receive */ + HCAPI_CFA_DIR_TX = 1, /* Transmit */ + HCAPI_CFA_DIR_MAX = 2 +}; + +/* CFA HW OPCODE definition */ +enum hcapi_cfa_hwops { + HCAPI_CFA_HWOPS_PUT, /* Write to HW operation */ + HCAPI_CFA_HWOPS_GET, /* Read from HW operation */ + HCAPI_CFA_HWOPS_ADD, /* For operations which require more then + * simple writes to HW, this operation is + * used. The distinction with this operation + * when compared to the PUT ops is that this + * operation is used in conjunction with + * the HCAPI_CFA_HWOPS_DEL op to remove + * the operations issued by the ADD OP. + */ + HCAPI_CFA_HWOPS_DEL, /* Beside to delete from the hardware, this + * operation is also undo the add operation + * performed by the HCAPI_CFA_HWOPS_ADD op. + */ + HCAPI_CFA_HWOPS_EVICT, /* This operaton is used to evict entries from + * CFA cache memories. This operation is only + * applicable to tables that use CFA caches. + */ + HCAPI_CFA_HWOPS_MAX +}; + +/* CFA HW KEY CONTROL OPCODE definition */ +enum hcapi_cfa_key_ctrlops { + HCAPI_CFA_KEY_CTRLOPS_INSERT, /* insert control bits */ + HCAPI_CFA_KEY_CTRLOPS_STRIP, /* strip control bits */ + HCAPI_CFA_KEY_CTRLOPS_MAX +}; + +/** + * CFA HW field structure definition + * @bitops: Starting bit position pf the HW field within a HW table + * entry. + * @bitlen: Number of bits for the HW field. + */ +struct hcapi_cfa_field { + u16 bitpos; + u16 bitlen; +}; + +/** + * CFA HW table entry layout structure definition + * @is_msb_order: Bit order of layout + * @total_sz_in_bits: Size in bits of entry + * @field_array: data pointer of the HW layout fields array + * @array_sz: number of HW field entries in the HW layout field array + * @layout_id: layout id associated with the layout + */ +struct hcapi_cfa_layout { + bool is_msb_order; + u32 total_sz_in_bits; + struct hcapi_cfa_field *field_array; + u32 array_sz; + u16 layout_id; +}; + +/** + * CFA HW data object definition + * @field_id: HW field identifier. Used as an index to a HW table layout + * @val: Value of the HW field + */ +struct hcapi_cfa_data_obj { + u16 field_id; + u64 val; +}; + +/** + * CFA HW definition + * @base_addr: HW table base address for the operation with optional device + * handle. For on-chip HW table operation, this is the either + * the TX or RX CFA HW base address. For off-chip table, this + * field is the base memory address of the off-chip table. + * @handle: Optional opaque device handle. It is generally used to access + * an GRC register space through PCIE BAR and passed to the BAR + * memory accessor routine. + */ +struct hcapi_cfa_hw { + u64 base_addr; + void *handle; +}; + +/** + * CFA HW operation definition + * @opcode: HW opcode + * @hw: CFA HW information used by accessor routines + */ +struct hcapi_cfa_hwop { + enum hcapi_cfa_hwops opcode; + struct hcapi_cfa_hw hw; +}; + +/** + * CFA HW data structure definition + * @union: physical offset to the HW table for the data to be + * written to. If this is an array of registers, this is the + * index into the array of registers. For writing keys, this + * is the byte pointer into the memory where the key should be + * written. + * @data: HW data buffer pointer + * @data_mask: HW data mask buffer pointer. When the CFA data is a FKB and + * data_mask pointer is NULL, then the default mask to enable + * all bit will be used. + * @data_sz: size of the HW data buffer in bytes + */ +struct hcapi_cfa_data { + union { + u32 index; + u32 byte_offset; + }; + u8 *data; + u8 *data_mask; + u16 data_sz; +}; + +/********************** Truflow start ***************************/ +enum hcapi_cfa_pg_tbl_lvl { + TF_PT_LVL_0, + TF_PT_LVL_1, + TF_PT_LVL_2, + TF_PT_LVL_MAX +}; + +enum hcapi_cfa_em_table_type { + TF_KEY0_TABLE, + TF_KEY1_TABLE, + TF_RECORD_TABLE, + TF_EFC_TABLE, + TF_ACTION_TABLE, + TF_EM_LKUP_TABLE, + TF_MAX_TABLE +}; + +struct hcapi_cfa_em_page_tbl { + u32 pg_count; + u32 pg_size; + void **pg_va_tbl; + u64 *pg_pa_tbl; +}; + +struct hcapi_cfa_em_table { + int type; + u32 num_entries; + u16 ctx_id; + u32 entry_size; + int num_lvl; + u32 page_cnt[TF_PT_LVL_MAX]; + u64 num_data_pages; + void *l0_addr; + u64 l0_dma_addr; + struct hcapi_cfa_em_page_tbl pg_tbl[TF_PT_LVL_MAX]; +}; + +struct hcapi_cfa_em_ctx_mem_info { + struct hcapi_cfa_em_table em_tables[TF_MAX_TABLE]; +}; + +/********************** Truflow end ****************************/ + +/** + * CFA HW key table definition + * Applicable to EEM and off-chip EM table only. + * @base0: For EEM, this is the KEY0 base mem pointer. For off-chip EM, + * this is the base mem pointer of the key table. + * @size: total size of the key table in bytes. For EEM, this size is + * same for both KEY0 and KEY1 table. + * @num_buckets: number of key buckets, applicable for newer chips + * @base1: For EEM, this is KEY1 base mem pointer. Fo off-chip EM, + * this is the key record memory base pointer within the key + * table, applicable for newer chip + * @bs_db: Optional - If the table is managed by a Backing Store + * database, then this object can be use to configure the EM Key. + * @page_size: Page size for EEM tables + */ +struct hcapi_cfa_key_tbl { + u8 *base0; + u32 size; + u32 num_buckets; + u8 *base1; + struct hcapi_cfa_bs_db *bs_db; + u32 page_size; +}; + +/** + * CFA HW key buffer definition + * @data: pointer to the key data buffer + * @len: buffer len in bytes + * @layout: Pointer to the key layout + */ +struct hcapi_cfa_key_obj { + u32 *data; + u32 len; + struct hcapi_cfa_key_layout *layout; +}; + +/** + * CFA HW key data definition + * @offset: For on-chip key table, it is the offset in unit of smallest + * key. For off-chip key table, it is the byte offset relative + * to the key record memory base and adjusted for page and + * entry size. + * @data: HW key data buffer pointer + * @size: size of the key in bytes + * @tbl_scope: optional table scope ID + * @metadata: the fid owner of the key + * stored with the bucket which can be used by + * the caller to retrieve later via the GET HW OP. + */ +struct hcapi_cfa_key_data { + u32 offset; + u8 *data; + u16 size; + u8 tbl_scope; + u64 metadata; +}; + +/** + * CFA HW key location definition + * @bucket_mem_ptr: on-chip EM bucket offset or off-chip EM bucket + * mem pointer + * @mem_ptr: off-chip EM key offset mem pointer + * @bucket_mem_idx: index within the array of the EM buckets + * @bucket_idx: index within the EM bucket + * @mem_idx: index within the EM records + */ +struct hcapi_cfa_key_loc { + u64 bucket_mem_ptr; + u64 mem_ptr; + u32 bucket_mem_idx; + u8 bucket_idx; + u32 mem_idx; +}; + +/** + * CFA HW layout table definition + * @tbl: data pointer to an array of fix formatted layouts supported. + * The index to the array is the CFA HW table ID + * @num_layouts: number of fix formatted layouts in the layout array + */ +struct hcapi_cfa_layout_tbl { + const struct hcapi_cfa_layout *tbl; + u16 num_layouts; +}; + +/** + * Key template consists of key fields that can be enabled/disabled + * individually. + * @field_en: key field enable field array, set 1 to the + * correspeonding field enable to make a field valid + * @is_wc_tcam_key: Identify if the key template is for TCAM. If false, + * the key template is for EM. This field is + * mandantory for device that only support fix key + * formats. + * @is_ipv6_key: Identify if the key template will be use for + * IPv6 Keys. + */ +struct hcapi_cfa_key_template { + u8 field_en[CFA_KEY_MAX_FIELD_CNT]; + bool is_wc_tcam_key; + bool is_ipv6_key; +}; + +/** + * key layout consist of field array, key bitlen, key ID, and other meta data + * pertain to a key + * @layout: key layout data + * @bitlen: actual key size in number of bits + * @id: key identifier and this field is only valid for device + * that supports fix key formats + * @is_wc_tcam_key: Identified the key layout is WC TCAM key + * @is_ipv6_key: Identify if the key template will be use for IPv6 Keys. + * @slices_size: total slices size, valid for WC TCAM key only. It can + * be used by the user to determine the total size of WC + * TCAM key slices in bytes. + */ +struct hcapi_cfa_key_layout { + struct hcapi_cfa_layout *layout; + u16 bitlen; + u16 id; + bool is_wc_tcam_key; + bool is_ipv6_key; + u16 slices_size; +}; + +/** + * key layout memory contents + * @key_layout: key layouts + * @layout: layout + * @field_array: fields + */ +struct hcapi_cfa_key_layout_contents { + struct hcapi_cfa_key_layout key_layout; + struct hcapi_cfa_layout layout; + struct hcapi_cfa_field field_array[CFA_KEY_MAX_FIELD_CNT]; +}; + +/** + * Action template consists of action fields that can be enabled/disabled + * individually. + * @hw_ver: CFA version for the action template + * @data: action field enable field array, set 1 to the correspeonding + * field enable to make a field valid + */ +struct hcapi_cfa_action_template { + enum hcapi_cfa_ver hw_ver; + u8 data[CFA_ACT_MAX_TEMPLATE_SZ]; +}; + +/** + * Action record info + * @blk_id: action SRAM block ID for on-chip action records or table + * scope of the action backing store + * @offset: offset + */ +struct hcapi_cfa_action_addr { + u16 blk_id; + u32 offset; +}; + +/** + * Action data definition + * @addr: action record addr info for on-chip action records + * @data: pointer to the action data buffer + * @len: action data buffer len in bytes + */ +struct hcapi_cfa_action_data { + struct hcapi_cfa_action_addr addr; + u32 *data; + u32 len; +}; + +/** + * Action object definition + * @data: pointer to the action data buffer + * @len: buffer len in bytes + * @layout: pointer to the action layout + */ +struct hcapi_cfa_action_obj { + u32 *data; + u32 len; + struct hcapi_cfa_action_layout *layout; +}; + +/** + * action layout consist of field array, action wordlen and action format ID + * @id: action identifier + * @layout: action layout data + * @bitlen: actual action record size in number of bits + */ +struct hcapi_cfa_action_layout { + u16 id; + struct hcapi_cfa_layout *layout; + u16 bitlen; +}; + +/* CFA backing store type definition */ +enum hcapi_cfa_bs_type { + HCAPI_CFA_BS_TYPE_LKUP, /* EM LKUP backing store type */ + HCAPI_CFA_BS_TYPE_ACT, /* Action backing store type */ + HCAPI_CFA_BS_TYPE_MAX +}; + +/* CFA backing store configuration data object */ +struct hcapi_cfa_bs_cfg { + enum hcapi_cfa_bs_type type; + u16 tbl_scope; + struct hcapi_cfa_bs_db *bs_db; +}; + +/** + * CFA backing store data base object + * @signature: memory manager database signature + * @mgmt_db: memory manager database base pointer (VA) + * @mgmt_db_sz: memory manager database size in bytes + * @bs_ptr: Backing store memory pool base pointer + * (VA – backed by IOVA which is DMA accessible) + * @offset: bs_offset - byte offset to the section of the backing + * store memory managed by the backing store memory manager. + * For EM backing store, this is the starting byte offset + * to the EM record memory. For Action backing store, this + * offset is 0. + * @bs_sz: backing store memory pool size in bytes + */ +struct hcapi_cfa_bs_db { + u32 signature; +#define HCAPI_CFA_BS_SIGNATURE 0xCFA0B300 + void *mgmt_db; + u32 mgmt_db_sz; + void *bs_ptr; + u32 offset; + u32 bs_sz; +}; + +/** + * defgroup CFA_HCAPI_PUT_API + * HCAPI used for writing to the hardware + */ + +/** + * This API provides the functionality to program a specified value to a + * HW field based on the provided programming layout. + * + * @data_buf: A data pointer to a CFA HW key/mask data + * @layout: A pointer to CFA HW programming layout + * @field_id: ID of the HW field to be programmed + * @val: Value of the HW field to be programmed + * + * @return + * 0 for SUCCESS, negative value for FAILURE + */ +int hcapi_cfa_put_field(u64 *data_buf, const struct hcapi_cfa_layout *layout, + u16 field_id, u64 val); + +/** + * This API provides the functionality to program an array of field values + * with corresponding field IDs to a number of profiler sub-block fields + * based on the fixed profiler sub-block hardware programming layout. + * + * @obj_data: A pointer to a CFA profiler key/mask object data + * @layout: A pointer to CFA HW programming layout + * @field_tbl: A pointer to an array that consists of the object field + * ID/value pairs + * @field_tbl_sz: Number of entries in the table + * + * @return + * 0 for SUCCESS, negative value for FAILURE + */ +int hcapi_cfa_put_fields(u64 *obj_data, const struct hcapi_cfa_layout *layout, + struct hcapi_cfa_data_obj *field_tbl, + u16 field_tbl_sz); +/** + * This API provides the functionality to program an array of field values + * with corresponding field IDs to a number of profiler sub-block fields + * based on the fixed profiler sub-block hardware programming layout. This + * API will swap the n byte blocks before programming the field array. + * + * @obj_data: A pointer to a CFA profiler key/mask object data + * @layout: A pointer to CFA HW programming layout + * @field_tbl: A pointer to an array that consists of the object field + * ID/value pairs + * @field_tbl_sz: Number of entries in the table + * @data_size: size of the data in bytes + * @n: block size in bytes + * + * @return + * 0 for SUCCESS, negative value for FAILURE + */ +int hcapi_cfa_put_fields_swap(u64 *obj_data, + const struct hcapi_cfa_layout *layout, + struct hcapi_cfa_data_obj *field_tbl, + u16 field_tbl_sz, u16 data_size, + u16 n); +/** + * This API provides the functionality to write a value to a + * field within the bit position and bit length of a HW data + * object based on a provided programming layout. + * + * @act_obj: A pointer of the action object to be initialized + * @layout: A pointer of the programming layout + * @field_id: Identifier of the HW field + * @bitpos_adj: Bit position adjustment value + * @bitlen_adj: Bit length adjustment value + * @val: HW field value to be programmed + * + * @return + * 0 for SUCCESS, negative value for FAILURE + */ +int hcapi_cfa_put_field_rel(u64 *obj_data, + const struct hcapi_cfa_layout *layout, + u16 field_id, int16_t bitpos_adj, + s16 bitlen_adj, u64 val); + +/** + * defgroup CFA_HCAPI_GET_API + * HCAPI used for reading from the hardware + */ + +/** + * This API provides the functionality to get the word length of + * a layout object. + * + * @layout: A pointer of the HW layout + * @return: + * Word length of the layout object + */ +u16 hcapi_cfa_get_wordlen(const struct hcapi_cfa_layout *layout); + +/** + * The API provides the functionality to get bit offset and bit + * length information of a field from a programming layout. + * + * @layout: A pointer of the action layout + * @slice: A pointer to the action offset info data structure + * + * @return: + * 0 for SUCCESS, negative value for FAILURE + */ +int hcapi_cfa_get_slice(const struct hcapi_cfa_layout *layout, + u16 field_id, struct hcapi_cfa_field *slice); + +/** + * This API provides the functionality to read the value of a + * CFA HW field from CFA HW data object based on the hardware + * programming layout. + * + * @obj_data: A pointer to a CFA HW key/mask object data + * @layout: A pointer to CFA HW programming layout + * @field_id: ID of the HW field to be programmed + * @val: Value of the HW field + * + * @return: + * 0 for SUCCESS, negative value for FAILURE + */ +int hcapi_cfa_get_field(u64 *obj_data, + const struct hcapi_cfa_layout *layout, + u16 field_id, u64 *val); + +/** + * This API provides the functionality to read 128-bit value of + * a CFA HW field from CFA HW data object based on the hardware + * programming layout. + * + * @obj_data: A pointer to a CFA HW key/mask object data + * @layout: A pointer to CFA HW programming layout + * @field_id: ID of the HW field to be programmed + * @val_msb: Msb value of the HW field + * @val_lsb: Lsb value of the HW field + * + * @return + * 0 for SUCCESS, negative value for FAILURE + */ +int hcapi_cfa_get128_field(u64 *obj_data, + const struct hcapi_cfa_layout *layout, + u16 field_id, u64 *val_msb, + u64 *val_lsb); + +/** + * This API provides the functionality to read a number of + * HW fields from a CFA HW data object based on the hardware + * programming layout. + * + * @obj_data: A pointer to a CFA profiler key/mask object data + * @layout: A pointer to CFA HW programming layout + * @field_tbl: A pointer to an array that consists of the object field + * ID/value pairs + * @field_tbl_sz: Number of entries in the table + * + * @return: + * 0 for SUCCESS, negative value for FAILURE + */ +int hcapi_cfa_get_fields(u64 *obj_data, + const struct hcapi_cfa_layout *layout, + struct hcapi_cfa_data_obj *field_tbl, + u16 field_tbl_sz); + +/** + * This API provides the functionality to read a number of + * HW fields from a CFA HW data object based on the hardware + * programming layout.This API will swap the n byte blocks before + * retrieving the field array. + * + * @obj_data: A pointer to a CFA profiler key/mask object data + * @layout: A pointer to CFA HW programming layout + * @field_tbl: A pointer to an array that consists of the object field + * ID/value pairs + * @field_tbl_sz: Number of entries in the table + * @data_size: size of the data in bytes + * @n: block size in bytes + * + * @return: + * 0 for SUCCESS, negative value for FAILURE + */ +int hcapi_cfa_get_fields_swap(u64 *obj_data, + const struct hcapi_cfa_layout *layout, + struct hcapi_cfa_data_obj *field_tbl, + u16 field_tbl_sz, u16 data_size, + u16 n); + +/** + * Get a value to a specific location relative to a HW field + * + * This API provides the functionality to read HW field from + * a section of a HW data object identified by the bit position + * and bit length from a given programming layout in order to avoid + * reading the entire HW data object. + * + * @obj_data: A pointer of the data object to read from + * @layout: A pointer of the programming layout + * @field_id: Identifier of the HW field + * @bitpos_adj: Bit position adjustment value + * @bitlen_adj: Bit length adjustment value + * @val: Value of the HW field + * + * @return + * 0 for SUCCESS, negative value for FAILURE + */ +int hcapi_cfa_get_field_rel(u64 *obj_data, + const struct hcapi_cfa_layout *layout, + u16 field_id, int16_t bitpos_adj, + s16 bitlen_adj, u64 *val); + +/** + * Get the length of the layout in words + * + * @layout: A pointer to the layout to determine the number of words + * required + * + * @return + * number of words needed for the given layout + */ +u16 cfa_hw_get_wordlen(const struct hcapi_cfa_layout *layout); + +/** + * This function is used to initialize a layout_contents structure + * + * The struct hcapi_cfa_key_layout is complex as there are three + * layers of abstraction. Each of those layer need to be properly + * initialized. + * + * @contents: A pointer of the layout contents to initialize + * + * @return + * 0 for SUCCESS, negative value for FAILURE + */ +int hcapi_cfa_init_key_contents(struct hcapi_cfa_key_layout_contents + *contents); + +/** + * This function is used to validate a key template + * + * The struct hcapi_cfa_key_template is complex as there are three + * layers of abstraction. Each of those layer need to be properly + * validated. + * + * @key_template: A pointer of the key template contents to validate + * + * @return + * 0 for SUCCESS, negative value for FAILURE + */ +int hcapi_cfa_is_valid_key_template(struct hcapi_cfa_key_template + *key_template); + +/** + * This function is used to validate a key layout + * + * The struct hcapi_cfa_key_layout is complex as there are three + * layers of abstraction. Each of those layer need to be properly + * validated. + * + * @key_layout: A pointer of the key layout contents to validate + * + * @return + * 0 for SUCCESS, negative value for FAILURE + */ +int hcapi_cfa_is_valid_key_layout(struct hcapi_cfa_key_layout *key_layout); + +/** + * This function is used to hash E/EM keys + * + * @key_data: A pointer of the key + * @bitlen: Number of bits in the key + * + * @return + * CRC32 and Lookup3 hashes of the input key + */ +u64 hcapi_cfa_key_hash(u8 *key_data, u16 bitlen); + +/** + * This function is used to execute an operation + * + * @op: Operation + * @key_tbl: Table + * @key_obj: Key data + * @key_key_loc: Key location + * + * @return + * 0 for SUCCESS, negative value for FAILURE + */ +int hcapi_cfa_key_hw_op(struct hcapi_cfa_hwop *op, + struct hcapi_cfa_key_tbl *key_tbl, + struct hcapi_cfa_key_data *key_obj, + struct hcapi_cfa_key_loc *key_loc); + +u64 hcapi_get_table_page(struct hcapi_cfa_em_table *mem, u32 page); +u64 hcapi_cfa_p4_key_hash(u8 *key_data, u16 bitlen); +u64 hcapi_cfa_p58_key_hash(u8 *key_data, u16 bitlen); +#endif /* HCAPI_CFA_DEFS_H_ */ diff --git a/drivers/thirdparty/release-drivers/bnxt/hcapi/cfa/hcapi_cfa_p4.c b/drivers/thirdparty/release-drivers/bnxt/hcapi/cfa/hcapi_cfa_p4.c new file mode 100644 index 000000000000..72b8388fbf5c --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/hcapi/cfa/hcapi_cfa_p4.c @@ -0,0 +1,137 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* Copyright(c) 2019-2021 Broadcom + * All rights reserved. + */ + +#include +#include "bnxt_compat.h" +#include + +#include +#include "rand.h" + +#include "hcapi_cfa.h" +#include "hcapi_cfa_defs.h" + +static u32 hcapi_cfa_lkup_lkup3_init_cfg; +static u32 hcapi_cfa_lkup_em_seed_mem[HCAPI_CFA_LKUP_SEED_MEM_SIZE]; +static bool hcapi_cfa_lkup_init; + +static void hcapi_cfa_seeds_init(void) +{ + int i; + u32 r; + + if (hcapi_cfa_lkup_init) + return; + + hcapi_cfa_lkup_init = true; + + /* Initialize the lfsr */ + rand_init(); + + /* RX and TX use the same seed values */ + hcapi_cfa_lkup_lkup3_init_cfg = swahb32(rand32()); + + for (i = 0; i < HCAPI_CFA_LKUP_SEED_MEM_SIZE / 2; i++) { + r = swahb32(rand32()); + hcapi_cfa_lkup_em_seed_mem[i * 2] = r; + r = swahb32(rand32()); + hcapi_cfa_lkup_em_seed_mem[i * 2 + 1] = (r & 0x1); + } +} + +static u32 hcapi_cfa_crc32_hash(u8 *key) +{ + u8 *kptr = key; + u32 val1, val2; + u8 temp[4]; + u32 index; + int i; + + /* Do byte-wise XOR of the 52-byte HASH key first. */ + index = *key; + kptr++; + + for (i = 0; i < (CFA_P58_EEM_KEY_MAX_SIZE - 1); i++) { + index = index ^ *kptr; + kptr++; + } + + /* Get seeds */ + val1 = hcapi_cfa_lkup_em_seed_mem[index * 2]; + val2 = hcapi_cfa_lkup_em_seed_mem[index * 2 + 1]; + + temp[0] = (u8)(val1 >> 24); + temp[1] = (u8)(val1 >> 16); + temp[2] = (u8)(val1 >> 8); + temp[3] = (u8)(val1 & 0xff); + val1 = 0; + + /* Start with seed */ + if (!(val2 & 0x1)) + val1 = ~(crc32(~val1, temp, 4)); + + val1 = ~(crc32(~val1, + key, + CFA_P58_EEM_KEY_MAX_SIZE)); + + /* End with seed */ + if (val2 & 0x1) + val1 = ~(crc32(~val1, temp, 4)); + + return val1; +} + +static u32 hcapi_cfa_lookup3_hash(u8 *in_key) +{ + u32 val1; + + val1 = jhash2(((u32 *)in_key), + CFA_P4_EEM_KEY_MAX_SIZE / (sizeof(u32)), + hcapi_cfa_lkup_lkup3_init_cfg); + + return val1; +} + +u64 hcapi_get_table_page(struct hcapi_cfa_em_table *mem, u32 page) +{ + int level = 0; + u64 addr; + + if (!mem) + return 0; + + /* Use the level according to the num_level of page table */ + level = mem->num_lvl - 1; + addr = (u64)mem->pg_tbl[level].pg_va_tbl[page]; + + return addr; +} + +/* Approximation of HCAPI hcapi_cfa_key_hash() */ +u64 hcapi_cfa_p4_key_hash(u8 *key_data, u16 bitlen) +{ + u32 key0_hash; + u32 key1_hash; + u32 *key_word = (u32 *)key_data; + u32 lk3_key[CFA_P4_EEM_KEY_MAX_SIZE / sizeof(u32)]; + u32 i; + + /* Init the seeds if needed */ + if (!hcapi_cfa_lkup_init) + hcapi_cfa_seeds_init(); + + key0_hash = hcapi_cfa_crc32_hash(key_data); + + for (i = 0; i < (bitlen / 8) / sizeof(uint32_t); i++) + lk3_key[i] = swab32(key_word[i]); + + key1_hash = hcapi_cfa_lookup3_hash((u8 *)lk3_key); + + return ((u64)key0_hash) << 32 | (u64)key1_hash; +} + +const struct hcapi_cfa_devops cfa_p4_devops = { + .hcapi_cfa_key_hash = hcapi_cfa_p4_key_hash, +}; diff --git a/drivers/thirdparty/release-drivers/bnxt/hcapi/cfa/hcapi_cfa_p4.h b/drivers/thirdparty/release-drivers/bnxt/hcapi/cfa/hcapi_cfa_p4.h new file mode 100644 index 000000000000..af16b1ad5257 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/hcapi/cfa/hcapi_cfa_p4.h @@ -0,0 +1,452 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019-2021 Broadcom + * All rights reserved. + */ + +#ifndef _HCAPI_CFA_P4_H_ +#define _HCAPI_CFA_P4_H_ + +#include "cfa_p40_hw.h" + +/* CFA phase 4 fix formatted table(layout) ID definition */ +enum cfa_p4_tbl_id { + CFA_P4_TBL_L2CTXT_TCAM = 0, + CFA_P4_TBL_L2CTXT_REMAP, + CFA_P4_TBL_PROF_TCAM, + CFA_P4_TBL_PROF_TCAM_REMAP, + CFA_P4_TBL_WC_TCAM, + CFA_P4_TBL_WC_TCAM_REC, + CFA_P4_TBL_WC_TCAM_REMAP, + CFA_P4_TBL_VEB_TCAM, + CFA_P4_TBL_SP_TCAM, + CFA_P4_TBL_PROF_SPIF_DFLT_L2CTXT, + CFA_P4_TBL_PROF_PARIF_DFLT_ACT_REC_PTR, + CFA_P4_TBL_PROF_PARIF_ERR_ACT_REC_PTR, + CFA_P4_TBL_LKUP_PARIF_DFLT_ACT_REC_PTR, + CFA_P4_TBL_MAX +}; + +#define CFA_P4_PROF_MAX_KEYS 4 +enum cfa_p4_mac_sel_mode { + CFA_P4_MAC_SEL_MODE_FIRST = 0, + CFA_P4_MAC_SEL_MODE_LOWEST = 1, +}; + +struct cfa_p4_prof_key_cfg { + u8 mac_sel[CFA_P4_PROF_MAX_KEYS]; +#define CFA_P4_PROF_MAC_SEL_DMAC0 BIT(0) +#define CFA_P4_PROF_MAC_SEL_T_MAC0 BIT(1) +#define CFA_P4_PROF_MAC_SEL_OUTERMOST_MAC0 BIT(2) +#define CFA_P4_PROF_MAC_SEL_DMAC1 BIT(3) +#define CFA_P4_PROF_MAC_SEL_T_MAC1 BIT(4) +#define CFA_P4_PROF_MAC_OUTERMOST_MAC1 BIT(5) + u8 pass_cnt; + enum cfa_p4_mac_sel_mode mode; +}; + +/* CFA action layout definition */ + +#define CFA_P4_ACTION_MAX_LAYOUT_SIZE 184 + +/** + * Action object template structure + * + * Template structure presents data fields that are necessary to know + * at the beginning of Action Builder (AB) processing. Like before the + * AB compilation. One such example could be a template that is + * flexible in size (Encap Record) and the presence of these fields + * allows for determining the template size as well as where the + * fields are located in the record. + * + * The template may also present fields that are not made visible to + * the caller by way of the action fields. + * + * Template fields also allow for additional checking on user visible + * fields. One such example could be the encap pointer behavior on a + * CFA_P4_ACT_OBJ_TYPE_ACT or CFA_P4_ACT_OBJ_TYPE_ACT_SRAM. + */ +struct cfa_p4_action_template { + /** Action Object type + * + * Controls the type of the Action Template + */ + enum { + /** Select this type to build an Action Record Object + */ + CFA_P4_ACT_OBJ_TYPE_ACT, + /** Select this type to build an Action Statistics + * Object + */ + CFA_P4_ACT_OBJ_TYPE_STAT, + /** Select this type to build a SRAM Action Record + * Object. + */ + CFA_P4_ACT_OBJ_TYPE_ACT_SRAM, + /** Select this type to build a SRAM Action + * Encapsulation Object. + */ + CFA_P4_ACT_OBJ_TYPE_ENCAP_SRAM, + /** Select this type to build a SRAM Action Modify + * Object, with IPv4 capability. + */ + /* In case of Stingray the term Modify is used for the 'NAT + * action'. Action builder is leveraged to fill in the NAT + * object which then can be referenced by the action + * record. + */ + CFA_P4_ACT_OBJ_TYPE_MODIFY_IPV4_SRAM, + /** Select this type to build a SRAM Action Source + * Property Object. + */ + /* In case of Stingray this is not a 'pure' action record. + * Action builder is leveraged to full in the Source Property + * object which can then be referenced by the action + * record. + */ + CFA_P4_ACT_OBJ_TYPE_SRC_PROP_SRAM, + /** Select this type to build a SRAM Action Statistics + * Object + */ + CFA_P4_ACT_OBJ_TYPE_STAT_SRAM, + } obj_type; + + /** Action Control + * + * Controls the internals of the Action Template + * + * act is valid when: + * (obj_type == CFA_P4_ACT_OBJ_TYPE_ACT) + */ + /* Stat and encap are always inline for EEM as table scope + * allocation does not allow for separate Stats allocation, + * but has the xx_inline flags as to be forward compatible + * with Stingray 2, always treated as TRUE. + */ + struct { + /** Set to CFA_HCAPI_TRUE to enable statistics + */ + u8 stat_enable; + /** Set to CFA_HCAPI_TRUE to enable statistics to be inlined + */ + u8 stat_inline; + + /** Set to CFA_HCAPI_TRUE to enable encapsulation + */ + u8 encap_enable; + /** Set to CFA_HCAPI_TRUE to enable encapsulation to be inlined + */ + u8 encap_inline; + } act; + + /** Modify Setting + * + * Controls the type of the Modify Action the template is + * describing + * + * modify is valid when: + * (obj_type == CFA_P4_ACT_OBJ_TYPE_MODIFY_SRAM) + */ + enum { + /** Set to enable Modify of Source IPv4 Address + */ + CFA_P4_MR_REPLACE_SOURCE_IPV4 = 0, + /** Set to enable Modify of Destination IPv4 Address + */ + CFA_P4_MR_REPLACE_DEST_IPV4 + } modify; + + /** Encap Control + * Controls the type of encapsulation the template is + * describing + * + * encap is valid when: + * ((obj_type == CFA_P4_ACT_OBJ_TYPE_ACT) && + * act.encap_enable) || + * ((obj_type == CFA_P4_ACT_OBJ_TYPE_SRC_PROP_SRAM) + */ + struct { + /* Direction is required as Stingray Encap on RX is + * limited to l2 and VTAG only. + */ + /** Receive or Transmit direction + */ + u8 direction; + /** Set to CFA_HCAPI_TRUE to enable L2 capability in the + * template + */ + u8 l2_enable; + /** vtag controls the Encap Vector - VTAG Encoding, 4 bits + * + * + * CFA_P4_ACT_ENCAP_VTAGS_PUSH_0, default, no VLAN + * Tags applied + * CFA_P4_ACT_ENCAP_VTAGS_PUSH_1, adds capability to + * set 1 VLAN Tag. Action Template compile adds + * the following field to the action object + * ::TF_ER_VLAN1 + * CFA_P4_ACT_ENCAP_VTAGS_PUSH_2, adds capability to + * set 2 VLAN Tags. Action Template compile adds + * the following fields to the action object + * ::TF_ER_VLAN1 and ::TF_ER_VLAN2 + * + */ + enum { CFA_P4_ACT_ENCAP_VTAGS_PUSH_0 = 0, + CFA_P4_ACT_ENCAP_VTAGS_PUSH_1, + CFA_P4_ACT_ENCAP_VTAGS_PUSH_2 } vtag; + + /* The remaining fields are NOT supported when + * direction is RX and ((obj_type == + * CFA_P4_ACT_OBJ_TYPE_ACT) && act.encap_enable). + * ab_compile_layout will perform the checking and + * skip remaining fields. + */ + /** L3 Encap controls the Encap Vector - L3 Encoding, + * 3 bits. Defines the type of L3 Encapsulation the + * template is describing. + * + * CFA_P4_ACT_ENCAP_L3_NONE, default, no L3 + * Encapsulation processing. + * CFA_P4_ACT_ENCAP_L3_IPV4, enables L3 IPv4 + * Encapsulation. + * CFA_P4_ACT_ENCAP_L3_IPV6, enables L3 IPv6 + * Encapsulation. + * CFA_P4_ACT_ENCAP_L3_MPLS_8847, enables L3 MPLS + * 8847 Encapsulation. + * CFA_P4_ACT_ENCAP_L3_MPLS_8848, enables L3 MPLS + * 8848 Encapsulation. + * + */ + enum { + /** Set to disable any L3 encapsulation + * processing, default + */ + CFA_P4_ACT_ENCAP_L3_NONE = 0, + /** Set to enable L3 IPv4 encapsulation + */ + CFA_P4_ACT_ENCAP_L3_IPV4 = 4, + /** Set to enable L3 IPv6 encapsulation + */ + CFA_P4_ACT_ENCAP_L3_IPV6 = 5, + /** Set to enable L3 MPLS 8847 encapsulation + */ + CFA_P4_ACT_ENCAP_L3_MPLS_8847 = 6, + /** Set to enable L3 MPLS 8848 encapsulation + */ + CFA_P4_ACT_ENCAP_L3_MPLS_8848 = 7 + } l3; + +#define CFA_P4_ACT_ENCAP_MAX_MPLS_LABELS 8 + /** 1-8 labels, valid when + * (l3 == CFA_P4_ACT_ENCAP_L3_MPLS_8847) || + * (l3 == CFA_P4_ACT_ENCAP_L3_MPLS_8848) + * + * MAX number of MPLS Labels 8. + */ + u8 l3_num_mpls_labels; + + /** Set to CFA_HCAPI_TRUE to enable L4 capability in the + * template. + * + * CFA_HCAPI_TRUE adds ::TF_EN_UDP_SRC_PORT and + * ::TF_EN_UDP_DST_PORT to the template. + */ + u8 l4_enable; + + /** Tunnel Encap controls the Encap Vector - Tunnel + * Encap, 3 bits. Defines the type of Tunnel + * encapsulation the template is describing + * + * CFA_P4_ACT_ENCAP_TNL_NONE, default, no Tunnel + * Encapsulation processing. + * CFA_P4_ACT_ENCAP_TNL_GENERIC_FULL + * CFA_P4_ACT_ENCAP_TNL_VXLAN. NOTE: Expects + * l4_enable set to CFA_P4_TRUE; + * CFA_P4_ACT_ENCAP_TNL_NGE. NOTE: Expects l4_enable + * set to CFA_P4_TRUE; + * CFA_P4_ACT_ENCAP_TNL_NVGRE. NOTE: only valid if + * l4_enable set to CFA_HCAPI_FALSE. + * CFA_P4_ACT_ENCAP_TNL_GRE.NOTE: only valid if + * l4_enable set to CFA_HCAPI_FALSE. + * CFA_P4_ACT_ENCAP_TNL_GENERIC_AFTER_TL4 + * CFA_P4_ACT_ENCAP_TNL_GENERIC_AFTER_TNL + * + */ + enum { + /** Set to disable Tunnel header encapsulation + * processing, default + */ + CFA_P4_ACT_ENCAP_TNL_NONE = 0, + /** Set to enable Tunnel Generic Full header + * encapsulation + */ + CFA_P4_ACT_ENCAP_TNL_GENERIC_FULL, + /** Set to enable VXLAN header encapsulation + */ + CFA_P4_ACT_ENCAP_TNL_VXLAN, + /** Set to enable NGE (VXLAN2) header encapsulation + */ + CFA_P4_ACT_ENCAP_TNL_NGE, + /** Set to enable NVGRE header encapsulation + */ + CFA_P4_ACT_ENCAP_TNL_NVGRE, + /** Set to enable GRE header encapsulation + */ + CFA_P4_ACT_ENCAP_TNL_GRE, + /** Set to enable Generic header after Tunnel + * L4 encapsulation + */ + CFA_P4_ACT_ENCAP_TNL_GENERIC_AFTER_TL4, + /** Set to enable Generic header after Tunnel + * encapsulation + */ + CFA_P4_ACT_ENCAP_TNL_GENERIC_AFTER_TNL + } tnl; + + /** Number of bytes of generic tunnel header, + * valid when + * (tnl == CFA_P4_ACT_ENCAP_TNL_GENERIC_FULL) || + * (tnl == CFA_P4_ACT_ENCAP_TNL_GENERIC_AFTER_TL4) || + * (tnl == CFA_P4_ACT_ENCAP_TNL_GENERIC_AFTER_TNL) + */ + u8 tnl_generic_size; + /** Number of 32b words of nge options, + * valid when + * (tnl == CFA_P4_ACT_ENCAP_TNL_NGE) + */ + u8 tnl_nge_op_len; + /* Currently not planned */ + /* Custom Header */ + /* u8 custom_enable; */ + } encap; +}; + +/** + * Enumeration of SRAM entry types, used for allocation of + * fixed SRAM entities. The memory model for CFA HCAPI + * determines if an SRAM entry type is supported. + * NOTE: Any additions to this enum must be reflected on FW + * side as well. + */ +enum cfa_p4_action_sram_entry_type { + CFA_P4_ACTION_SRAM_ENTRY_TYPE_FULL_ACTION, /* SRAM Action Record */ + + CFA_P4_ACTION_SRAM_ENTRY_TYPE_FORMAT_0_ACTION, + CFA_P4_ACTION_SRAM_ENTRY_TYPE_FORMAT_1_ACTION, + CFA_P4_ACTION_SRAM_ENTRY_TYPE_FORMAT_2_ACTION, + CFA_P4_ACTION_SRAM_ENTRY_TYPE_FORMAT_3_ACTION, + CFA_P4_ACTION_SRAM_ENTRY_TYPE_FORMAT_4_ACTION, + + CFA_P4_ACTION_SRAM_ENTRY_TYPE_ENCAP_8B, /* SRAM Action Encap + * 8 Bytes + */ + CFA_P4_ACTION_SRAM_ENTRY_TYPE_ENCAP_16B,/* SRAM Action Encap + * 16 Bytes + */ + CFA_P4_ACTION_SRAM_ENTRY_TYPE_ENCAP_64B,/* SRAM Action Encap + * 64 Bytes + */ + + CFA_P4_ACTION_SRAM_ENTRY_TYPE_MODIFY_PORT_SRC, + CFA_P4_ACTION_SRAM_ENTRY_TYPE_MODIFY_PORT_DEST, + + CFA_P4_ACTION_SRAM_ENTRY_TYPE_MODIFY_IPV4_SRC, /* SRAM Action Modify + * IPv4 Source + */ + CFA_P4_ACTION_SRAM_ENTRY_TYPE_MODIFY_IPV4_DEST, /* SRAM Action Modify + * IPv4 Destination + */ + CFA_P4_ACTION_SRAM_ENTRY_TYPE_SP_SMAC, /* SRAM Action Source + * Properties SMAC + */ + CFA_P4_ACTION_SRAM_ENTRY_TYPE_SP_SMAC_IPV4, /* SRAM Action Source + * Props SMAC IPv4 + */ + CFA_P4_ACTION_SRAM_ENTRY_TYPE_SP_SMAC_IPV6, /* SRAM Action Source + * Props SMAC IPv6 + */ + CFA_P4_ACTION_SRAM_ENTRY_TYPE_STATS_64, /* SRAM Action + * Stats 64 Bits + */ + CFA_P4_ACTION_SRAM_ENTRY_TYPE_MAX +}; + +/** + * SRAM Action Record structure holding either an action index or an + * action ptr. + * @act_idx: SRAM Action idx specifies the offset of the SRAM + * element within its SRAM Entry Type block. This + * index can be written into i.e. an L2 Context. Use + * this type for all SRAM Action Record types except + * SRAM Full Action records. Use act_ptr instead. + * @act_ptr: SRAM Full Action is special in that it needs an + * action record pointer. This pointer can be written + * into i.e. a Wildcard TCAM entry. + */ +union cfa_p4_action_sram_act_record { + u16 act_idx; + u32 act_ptr; +}; + +/** + * cfa_p4_action_param parameter definition + * @dir: receive or transmit direction + * @type: type of the sram allocation type + * @record: action record to set. The 'type' specified lists the + * record definition to use in the passed in record. + * @act_size: number of elements in act_data + * @act_data: ptr to array of action data + */ +struct cfa_p4_action_param { + u8 dir; + enum cfa_p4_action_sram_entry_type type; + union cfa_p4_action_sram_act_record record; + u32 act_size; + u64 *act_data; +}; + +/* EEM Key entry sizes */ +#define CFA_P4_EEM_KEY_MAX_SIZE 52 +#define CFA_P4_EEM_KEY_RECORD_SIZE 64 + +/** + * cfa_eem_entry_hdr + * @pointer: eem entry pointer + * @word1: The header is made up of two words, this is the first word. + * This field has multiple subfields, there is no suitable + * single name for it so just going with word1. + */ +struct cfa_p4_eem_entry_hdr { + u32 pointer; + u32 word1; +#define CFA_P4_EEM_ENTRY_VALID_SHIFT 31 +#define CFA_P4_EEM_ENTRY_VALID_MASK 0x80000000 +#define CFA_P4_EEM_ENTRY_L1_CACHEABLE_SHIFT 30 +#define CFA_P4_EEM_ENTRY_L1_CACHEABLE_MASK 0x40000000 +#define CFA_P4_EEM_ENTRY_STRENGTH_SHIFT 28 +#define CFA_P4_EEM_ENTRY_STRENGTH_MASK 0x30000000 +#define CFA_P4_EEM_ENTRY_RESERVED_SHIFT 17 +#define CFA_P4_EEM_ENTRY_RESERVED_MASK 0x0FFE0000 +#define CFA_P4_EEM_ENTRY_KEY_SIZE_SHIFT 8 +#define CFA_P4_EEM_ENTRY_KEY_SIZE_MASK 0x0001FF00 +#define CFA_P4_EEM_ENTRY_ACT_REC_SIZE_SHIFT 3 +#define CFA_P4_EEM_ENTRY_ACT_REC_SIZE_MASK 0x000000F8 +#define CFA_P4_EEM_ENTRY_ACT_REC_INT_SHIFT 2 +#define CFA_P4_EEM_ENTRY_ACT_REC_INT_MASK 0x00000004 +#define CFA_P4_EEM_ENTRY_EXT_FLOW_CTR_SHIFT 1 +#define CFA_P4_EEM_ENTRY_EXT_FLOW_CTR_MASK 0x00000002 +#define CFA_P4_EEM_ENTRY_ACT_PTR_MSB_SHIFT 0 +#define CFA_P4_EEM_ENTRY_ACT_PTR_MSB_MASK 0x00000001 +}; + +/** + * cfa_p4_eem_key_entry + * @key: Key is 448 bits - 56 bytes + * @hdr: Header is 8 bytes long + */ +struct cfa_p4_eem_64b_entry { + u8 key[CFA_P4_EEM_KEY_RECORD_SIZE - + sizeof(struct cfa_p4_eem_entry_hdr)]; + struct cfa_p4_eem_entry_hdr hdr; +}; + +#endif /* _CFA_HW_P4_H_ */ diff --git a/drivers/thirdparty/release-drivers/bnxt/hcapi/cfa/hcapi_cfa_p58.c b/drivers/thirdparty/release-drivers/bnxt/hcapi/cfa/hcapi_cfa_p58.c new file mode 100644 index 000000000000..cbbc939f5b2b --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/hcapi/cfa/hcapi_cfa_p58.c @@ -0,0 +1,116 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* Copyright(c) 2019-2021 Broadcom + * All rights reserved. + */ + +#include +#include "bnxt_compat.h" +#include + +#include +#include "rand.h" +#include "hcapi_cfa_defs.h" + +static u32 hcapi_cfa_lkup_lkup3_init_cfg; +static u32 hcapi_cfa_lkup_em_seed_mem[HCAPI_CFA_LKUP_SEED_MEM_SIZE]; +static bool hcapi_cfa_lkup_init; + +static void hcapi_cfa_seeds_init(void) +{ + int i; + u32 r; + + if (hcapi_cfa_lkup_init) + return; + + hcapi_cfa_lkup_init = true; + + /* Initialize the lfsr */ + rand_init(); + + /* RX and TX use the same seed values */ + hcapi_cfa_lkup_lkup3_init_cfg = rand32(); + + for (i = 0; i < HCAPI_CFA_LKUP_SEED_MEM_SIZE / 2; i++) { + r = rand32(); + hcapi_cfa_lkup_em_seed_mem[i * 2] = r; + r = rand32(); + hcapi_cfa_lkup_em_seed_mem[i * 2 + 1] = (r & 0x1); + } +} + +static u32 hcapi_cfa_crc32_hash(u8 *key) +{ + u8 *kptr = key; + u32 val1, val2; + u8 temp[4]; + u32 index; + int i; + + /* Do byte-wise XOR of the 52-byte HASH key first. */ + index = *key; + kptr++; + + for (i = 0; i < (CFA_P58_EEM_KEY_MAX_SIZE - 1); i++) { + index = index ^ *kptr; + kptr++; + } + + /* Get seeds */ + val1 = hcapi_cfa_lkup_em_seed_mem[index * 2]; + val2 = hcapi_cfa_lkup_em_seed_mem[index * 2 + 1]; + + temp[0] = (u8)(val1 >> 24); + temp[1] = (u8)(val1 >> 16); + temp[2] = (u8)(val1 >> 8); + temp[3] = (u8)(val1 & 0xff); + val1 = 0; + + /* Start with seed */ + if (!(val2 & 0x1)) + val1 = ~(crc32(~val1, temp, 4)); + + val1 = ~(crc32(~val1, + key, + CFA_P58_EEM_KEY_MAX_SIZE)); + + /* End with seed */ + if (val2 & 0x1) + val1 = ~(crc32(~val1, temp, 4)); + + return val1; +} + +static u32 hcapi_cfa_lookup3_hash(u8 *in_key) +{ + u32 val1; + + val1 = jhash2(((u32 *)in_key), + CFA_P58_EEM_KEY_MAX_SIZE / (sizeof(u32)), + hcapi_cfa_lkup_lkup3_init_cfg); + + return val1; +} + +/* Approximation of HCAPI hcapi_cfa_key_hash() */ +u64 hcapi_cfa_p58_key_hash(u8 *key_data, u16 bitlen) +{ + u32 key0_hash; + u32 key1_hash; + u32 *key_word = (u32 *)key_data; + u32 lk3_key[CFA_P58_EEM_KEY_MAX_SIZE / sizeof(u32)]; + u32 i; + + /* Init the seeds if needed */ + if (!hcapi_cfa_lkup_init) + hcapi_cfa_seeds_init(); + + key0_hash = hcapi_cfa_crc32_hash(key_data); + + for (i = 0; i < (bitlen / (8 * sizeof(uint32_t))); i++) + lk3_key[i] = swab32(key_word[i]); + + key1_hash = hcapi_cfa_lookup3_hash((u8 *)lk3_key); + + return ((u64)key0_hash) << 32 | (u64)key1_hash; +} diff --git a/drivers/thirdparty/release-drivers/bnxt/hcapi/cfa/hcapi_cfa_p58.h b/drivers/thirdparty/release-drivers/bnxt/hcapi/cfa/hcapi_cfa_p58.h new file mode 100644 index 000000000000..048906825385 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/hcapi/cfa/hcapi_cfa_p58.h @@ -0,0 +1,411 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019-2021 Broadcom + * All rights reserved. + */ + +#ifndef _HCAPI_CFA_P58_H_ +#define _HCAPI_CFA_P58_H_ + +#include "cfa_p58_hw.h" + +/* EEM Key entry sizes */ +#define CFA_P58_EEM_KEY_MAX_SIZE 80 +#define CFA_P58_EEM_KEY_RECORD_SIZE 80 + +#define CFA_P58_EM_FKB_NUM_WORDS 4 +#define CFA_P58_EM_FKB_NUM_ENTRIES 64 +#define CFA_P58_WC_TCAM_FKB_NUM_WORDS 4 +#define CFA_P58_WC_TCAM_FKB_NUM_ENTRIES 64 + +/* CFA phase 5.8 fix formatted table(layout) ID definition */ +enum cfa_p58_tbl_id { + CFA_P58_TBL_ILT = 0, + CFA_P58_TBL_L2CTXT_TCAM, + CFA_P58_TBL_L2CTXT_REMAP, + CFA_P58_TBL_PROF_TCAM, + CFA_P58_TBL_PROF_TCAM_REMAP, + CFA_P58_TBL_WC_TCAM, + CFA_P58_TBL_WC_TCAM_REC, + CFA_P58_TBL_VEB_TCAM, + CFA_P58_TBL_SP_TCAM, + CFA_P58_TBL_PROF_PARIF_DFLT_ACT_REC_PTR, /* Default Profile TCAM/Lookup + * Action Record Ptr Table + */ + CFA_P58_TBL_PROF_PARIF_ERR_ACT_REC_PTR, /* Error Profile TCAM Miss + * Action Record Ptr Table + */ + CFA_P58_TBL_VSPT, /* VNIC/SVIF Props Table */ + CFA_P58_TBL_MAX +}; + +#define CFA_P58_PROF_MAX_KEYS 4 +enum cfa_p58_mac_sel_mode { + CFA_P58_MAC_SEL_MODE_FIRST = 0, + CFA_P58_MAC_SEL_MODE_LOWEST = 1, +}; + +struct cfa_p58_prof_key_cfg { + u8 mac_sel[CFA_P58_PROF_MAX_KEYS]; +#define CFA_P58_PROF_MAC_SEL_DMAC0 BIT(0) +#define CFA_P58_PROF_MAC_SEL_T_MAC0 BIT(1) +#define CFA_P58_PROF_MAC_SEL_OUTERMOST_MAC0 BIT(2) +#define CFA_P58_PROF_MAC_SEL_DMAC1 BIT(3) +#define CFA_P58_PROF_MAC_SEL_T_MAC1 BIT(4) +#define CFA_P58_PROF_MAC_OUTERMOST_MAC1 BIT(5) + u8 vlan_sel[CFA_P58_PROF_MAX_KEYS]; +#define CFA_P58_PROFILER_VLAN_SEL_INNER_HDR 0 +#define CFA_P58_PROFILER_VLAN_SEL_TUNNEL_HDR 1 +#define CFA_P58_PROFILER_VLAN_SEL_OUTERMOST_HDR 2 + u8 pass_cnt; + enum cfa_p58_mac_sel_mode mode; +}; + +/* CFA action layout definition */ + +#define CFA_P58_ACTION_MAX_LAYOUT_SIZE 184 + +/** + * Action object template structure + * + * Template structure presents data fields that are necessary to know + * at the beginning of Action Builder (AB) processing. Like before the + * AB compilation. One such example could be a template that is + * flexible in size (Encap Record) and the presence of these fields + * allows for determining the template size as well as where the + * fields are located in the record. + * + * The template may also present fields that are not made visible to + * the caller by way of the action fields. + * + * Template fields also allow for additional checking on user visible + * fields. One such example could be the encap pointer behavior on a + * CFA_P58_ACT_OBJ_TYPE_ACT or CFA_P58_ACT_OBJ_TYPE_ACT_SRAM. + */ +struct cfa_p58_action_template { + /** Action Object type + * + * Controls the type of the Action Template + */ + enum { + /** Select this type to build an Action Record Object + */ + CFA_P58_ACT_OBJ_TYPE_ACT, + /** Select this type to build an Action Statistics + * Object + */ + CFA_P58_ACT_OBJ_TYPE_STAT, + /** Select this type to build a SRAM Action Record + * Object. + */ + CFA_P58_ACT_OBJ_TYPE_ACT_SRAM, + /** Select this type to build a SRAM Action + * Encapsulation Object. + */ + CFA_P58_ACT_OBJ_TYPE_ENCAP_SRAM, + /** Select this type to build a SRAM Action Modify + * Object, with IPv4 capability. + */ + /* In case of Stingray the term Modify is used for the 'NAT + * action'. Action builder is leveraged to fill in the NAT + * object which then can be referenced by the action + * record. + */ + CFA_P58_ACT_OBJ_TYPE_MODIFY_IPV4_SRAM, + /** Select this type to build a SRAM Action Source + * Property Object. + */ + /* In case of Stingray this is not a 'pure' action record. + * Action builder is leveraged to full in the Source Property + * object which can then be referenced by the action + * record. + */ + CFA_P58_ACT_OBJ_TYPE_SRC_PROP_SRAM, + /** Select this type to build a SRAM Action Statistics + * Object + */ + CFA_P58_ACT_OBJ_TYPE_STAT_SRAM, + } obj_type; + + /** Action Control + * + * Controls the internals of the Action Template + * + * act is valid when: + * (obj_type == CFA_P58_ACT_OBJ_TYPE_ACT) + */ + /* Stat and encap are always inline for EEM as table scope + * allocation does not allow for separate Stats allocation, + * but has the xx_inline flags as to be forward compatible + * with Stingray 2, always treated as TRUE. + */ + struct { + /** Set to CFA_HCAPI_TRUE to enable statistics + */ + u8 stat_enable; + /** Set to CFA_HCAPI_TRUE to enable statistics to be inlined + */ + u8 stat_inline; + + /** Set to CFA_HCAPI_TRUE to enable encapsulation + */ + u8 encap_enable; + /** Set to CFA_HCAPI_TRUE to enable encapsulation to be inlined + */ + u8 encap_inline; + } act; + + /** Modify Setting + * + * Controls the type of the Modify Action the template is + * describing + * + * modify is valid when: + * (obj_type == CFA_P58_ACT_OBJ_TYPE_MODIFY_SRAM) + */ + enum { + /** Set to enable Modify of Source IPv4 Address + */ + CFA_P58_MR_REPLACE_SOURCE_IPV4 = 0, + /** Set to enable Modify of Destination IPv4 Address + */ + CFA_P58_MR_REPLACE_DEST_IPV4 + } modify; + + /** Encap Control + * Controls the type of encapsulation the template is + * describing + * + * encap is valid when: + * ((obj_type == CFA_P58_ACT_OBJ_TYPE_ACT) && + * act.encap_enable) || + * ((obj_type == CFA_P58_ACT_OBJ_TYPE_SRC_PROP_SRAM) + */ + struct { + /* Direction is required as Stingray Encap on RX is + * limited to l2 and VTAG only. + */ + /** Receive or Transmit direction + */ + u8 direction; + /** Set to CFA_HCAPI_TRUE to enable L2 capability in the + * template + */ + u8 l2_enable; + /** vtag controls the Encap Vector - VTAG Encoding, 4 bits + * + * + * CFA_P58_ACT_ENCAP_VTAGS_PUSH_0, default, no VLAN + * Tags applied + * CFA_P58_ACT_ENCAP_VTAGS_PUSH_1, adds capability to + * set 1 VLAN Tag. Action Template compile adds + * the following field to the action object + * ::TF_ER_VLAN1 + * CFA_P58_ACT_ENCAP_VTAGS_PUSH_2, adds capability to + * set 2 VLAN Tags. Action Template compile adds + * the following fields to the action object + * ::TF_ER_VLAN1 and ::TF_ER_VLAN2 + * + */ + enum { CFA_P58_ACT_ENCAP_VTAGS_PUSH_0 = 0, + CFA_P58_ACT_ENCAP_VTAGS_PUSH_1, + CFA_P58_ACT_ENCAP_VTAGS_PUSH_2 } vtag; + + /* The remaining fields are NOT supported when + * direction is RX and ((obj_type == + * CFA_P58_ACT_OBJ_TYPE_ACT) && act.encap_enable). + * ab_compile_layout will perform the checking and + * skip remaining fields. + */ + /** L3 Encap controls the Encap Vector - L3 Encoding, + * 3 bits. Defines the type of L3 Encapsulation the + * template is describing. + * + * CFA_P58_ACT_ENCAP_L3_NONE, default, no L3 + * Encapsulation processing. + * CFA_P58_ACT_ENCAP_L3_IPV4, enables L3 IPv4 + * Encapsulation. + * CFA_P58_ACT_ENCAP_L3_IPV6, enables L3 IPv6 + * Encapsulation. + * CFA_P58_ACT_ENCAP_L3_MPLS_8847, enables L3 MPLS + * 8847 Encapsulation. + * CFA_P58_ACT_ENCAP_L3_MPLS_8848, enables L3 MPLS + * 8848 Encapsulation. + * + */ + enum { + /** Set to disable any L3 encapsulation + * processing, default + */ + CFA_P58_ACT_ENCAP_L3_NONE = 0, + /** Set to enable L3 IPv4 encapsulation + */ + CFA_P58_ACT_ENCAP_L3_IPV4 = 4, + /** Set to enable L3 IPv6 encapsulation + */ + CFA_P58_ACT_ENCAP_L3_IPV6 = 5, + /** Set to enable L3 MPLS 8847 encapsulation + */ + CFA_P58_ACT_ENCAP_L3_MPLS_8847 = 6, + /** Set to enable L3 MPLS 8848 encapsulation + */ + CFA_P58_ACT_ENCAP_L3_MPLS_8848 = 7 + } l3; + +#define CFA_P58_ACT_ENCAP_MAX_MPLS_LABELS 8 + /** 1-8 labels, valid when + * (l3 == CFA_P58_ACT_ENCAP_L3_MPLS_8847) || + * (l3 == CFA_P58_ACT_ENCAP_L3_MPLS_8848) + * + * MAX number of MPLS Labels 8. + */ + u8 l3_num_mpls_labels; + + /** Set to CFA_HCAPI_TRUE to enable L4 capability in the + * template. + * + * CFA_HCAPI_TRUE adds ::TF_EN_UDP_SRC_PORT and + * ::TF_EN_UDP_DST_PORT to the template. + */ + u8 l4_enable; + + /** Tunnel Encap controls the Encap Vector - Tunnel + * Encap, 3 bits. Defines the type of Tunnel + * encapsulation the template is describing + * + * CFA_P58_ACT_ENCAP_TNL_NONE, default, no Tunnel + * Encapsulation processing. + * CFA_P58_ACT_ENCAP_TNL_GENERIC_FULL + * CFA_P58_ACT_ENCAP_TNL_VXLAN. NOTE: Expects + * l4_enable set to CFA_P58_TRUE; + * CFA_P58_ACT_ENCAP_TNL_NGE. NOTE: Expects l4_enable + * set to CFA_P58_TRUE; + * CFA_P58_ACT_ENCAP_TNL_NVGRE. NOTE: only valid if + * l4_enable set to CFA_HCAPI_FALSE. + * CFA_P58_ACT_ENCAP_TNL_GRE.NOTE: only valid if + * l4_enable set to CFA_HCAPI_FALSE. + * CFA_P58_ACT_ENCAP_TNL_GENERIC_AFTER_TL4 + * CFA_P58_ACT_ENCAP_TNL_GENERIC_AFTER_TNL + * + */ + enum { + /** Set to disable Tunnel header encapsulation + * processing, default + */ + CFA_P58_ACT_ENCAP_TNL_NONE = 0, + /** Set to enable Tunnel Generic Full header + * encapsulation + */ + CFA_P58_ACT_ENCAP_TNL_GENERIC_FULL, + /** Set to enable VXLAN header encapsulation + */ + CFA_P58_ACT_ENCAP_TNL_VXLAN, + /** Set to enable NGE (VXLAN2) header encapsulation + */ + CFA_P58_ACT_ENCAP_TNL_NGE, + /** Set to enable NVGRE header encapsulation + */ + CFA_P58_ACT_ENCAP_TNL_NVGRE, + /** Set to enable GRE header encapsulation + */ + CFA_P58_ACT_ENCAP_TNL_GRE, + /** Set to enable Generic header after Tunnel + * L4 encapsulation + */ + CFA_P58_ACT_ENCAP_TNL_GENERIC_AFTER_TL4, + /** Set to enable Generic header after Tunnel + * encapsulation + */ + CFA_P58_ACT_ENCAP_TNL_GENERIC_AFTER_TNL + } tnl; + + /** Number of bytes of generic tunnel header, + * valid when + * (tnl == CFA_P58_ACT_ENCAP_TNL_GENERIC_FULL) || + * (tnl == CFA_P58_ACT_ENCAP_TNL_GENERIC_AFTER_TL4) || + * (tnl == CFA_P58_ACT_ENCAP_TNL_GENERIC_AFTER_TNL) + */ + u8 tnl_generic_size; + /** Number of 32b words of nge options, + * valid when + * (tnl == CFA_P58_ACT_ENCAP_TNL_NGE) + */ + u8 tnl_nge_op_len; + /* Currently not planned */ + /* Custom Header */ + /* u8 custom_enable; */ + } encap; +}; + +/** + * Enumeration of SRAM entry types, used for allocation of + * fixed SRAM entities. The memory model for CFA HCAPI + * determines if an SRAM entry type is supported. + * NOTE: Any additions to this enum must be reflected on FW + * side as well. + */ +enum cfa_p58_action_sram_entry_type { + CFA_P58_ACTION_SRAM_ENTRY_TYPE_ACT, /* SRAM Action Record */ + CFA_P58_ACTION_SRAM_ENTRY_TYPE_ENCAP_8B, /* SRAM Action Encap + * 8 Bytes + */ + CFA_P58_ACTION_SRAM_ENTRY_TYPE_ENCAP_16B, /* SRAM Action Encap + * 16 Bytes + */ + CFA_P58_ACTION_SRAM_ENTRY_TYPE_ENCAP_64B, /* SRAM Action Encap + * 64 Bytes + */ + CFA_P58_ACTION_SRAM_ENTRY_TYPE_MODIFY_IPV4_SRC, /* SRAM Action Modify + * IPv4 Source + */ + CFA_P58_ACTION_SRAM_ENTRY_TYPE_MODIFY_IPV4_DEST,/* SRAM Action Modify + * IPv4 Destination + */ + CFA_P58_ACTION_SRAM_ENTRY_TYPE_SP_SMAC, /* SRAM Action Source + * Properties SMAC + */ + CFA_P58_ACTION_SRAM_ENTRY_TYPE_SP_SMAC_IPV4, /* SRAM Action Source + * Props SMAC IPv4 + */ + CFA_P58_ACTION_SRAM_ENTRY_TYPE_SP_SMAC_IPV6, /* SRAM Action Source + * Props SMAC IPv6 + */ + CFA_P58_ACTION_SRAM_ENTRY_TYPE_STATS_64, /* SRAM Action Stats + * 64 Bits + */ + CFA_P58_ACTION_SRAM_ENTRY_TYPE_MAX +}; + +/** + * SRAM Action Record structure holding either an action index or an + * action ptr. + * @act_idx: SRAM Action idx specifies the offset of the SRAM + * element within its SRAM Entry Type block. This + * index can be written into i.e. an L2 Context. Use + * this type for all SRAM Action Record types except + * SRAM Full Action records. Use act_ptr instead. + * @act_ptr: SRAM Full Action is special in that it needs an + * action record pointer. This pointer can be written + * into i.e. a Wildcard TCAM entry. + */ +union cfa_p58_action_sram_act_record { + u16 act_idx; + u32 act_ptr; +}; + +/** + * cfa_p58_action_param parameter definition + * @dir: receive or transmit direction + * @type: type of the sram allocation type + * @record: action record to set. The 'type' specified lists the + * record definition to use in the passed in record. + * @act_size: number of elements in act_data + * @act_data: ptr to array of action data + */ +struct cfa_p58_action_param { + u8 dir; + enum cfa_p58_action_sram_entry_type type; + union cfa_p58_action_sram_act_record record; + u32 act_size; + u64 *act_data; +}; +#endif /* _CFA_HW_P58_H_ */ diff --git a/drivers/thirdparty/release-drivers/bnxt/hcapi/cfa_v3/include/cfa_resources.h b/drivers/thirdparty/release-drivers/bnxt/hcapi/cfa_v3/include/cfa_resources.h new file mode 100644 index 000000000000..ce9dc157f3c6 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/hcapi/cfa_v3/include/cfa_resources.h @@ -0,0 +1,180 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019-2023 Broadcom + * All rights reserved. + */ + +#ifndef _CFA_RESOURCES_H_ +#define _CFA_RESOURCES_H_ + +/** + * @addtogroup CFA_RESC_TYPES CFA Resource Types + * \ingroup CFA_V3 + * CFA HW resource types and sub types definition + * @{ + */ + +/** + * CFA hardware Resource Type + * + * Depending on the type of CFA hardware resource, the resources are divided + * into multiple groups. This group is identified by resource type. The + * following enum defines all the CFA resource types + */ +enum cfa_resource_type { + /** CFA resources using fixed identifiers (IDM) + */ + CFA_RTYPE_IDENT = 0, + /** CFA resources accessed by fixed indices (TBM) + */ + CFA_RTYPE_IDX_TBL, + /** CFA TCAM resources + */ + CFA_RTYPE_TCAM, + /** CFA interface tables (IFM) + */ + CFA_RTYPE_IF_TBL, + /** CFA resources accessed using CFA memory manager index + */ + CFA_RTYPE_CMM, + /** CFA Global fields (e.g. registers which configure global settings) + */ + CFA_RTYPE_GLB_FLD, + + /** CFA Firmware internal ONLY definitions reserved starting with 12. + */ + CFA_RTYPE_HW_MAX = 12, + + /** FIrmware only types + */ + /** CFA Firmware Session Manager + */ + CFA_RTYPE_SM = CFA_RTYPE_HW_MAX, + /** CFA Firmware Table Scope Manager + */ + CFA_RTYPE_TSM, + /** CFA Firmware Table Scope Instance Manager + */ + CFA_RTYPE_TIM, + /** CFA Firmware Global Id Manager + */ + CFA_RTYPE_GIM, + + CFA_RTYPE_MAX +}; + +/** + * Resource sub-types for CFA_RTYPE_IDENT + */ +enum cfa_resource_subtype_ident { + CFA_RSUBTYPE_IDENT_L2CTX = 0, /**< Remapped L2 contexts */ + CFA_RSUBTYPE_IDENT_PROF_FUNC, /**< Profile functions */ + CFA_RSUBTYPE_IDENT_WC_PROF, /**< WC TCAM profile IDs */ + CFA_RSUBTYPE_IDENT_EM_PROF, /**< EM profile IDs */ + CFA_RSUBTYPE_IDENT_L2_FUNC, /**< L2 functions */ + CFA_RSUBTYPE_IDENT_LAG_ID, /**< LAG IDs */ + CFA_RSUBTYPE_IDENT_MAX +}; + +/** + * Resource sub-types for CFA_RTYPE_IDX + */ +enum cfa_resource_subtype_idx_tbl { + CFA_RSUBTYPE_IDX_TBL_STAT64 = 0, /**< Statistics */ + CFA_RSUBTYPE_IDX_TBL_METER_PROF, /**< Meter profile */ + CFA_RSUBTYPE_IDX_TBL_METER_INST, /**< Meter instances */ + CFA_RSUBTYPE_IDX_TBL_METER_DROP_CNT, /**< Meter Drop Count */ + CFA_RSUBTYPE_IDX_TBL_MIRROR, /**< Mirror table */ + /* Metadata mask for profiler block */ + CFA_RSUBTYPE_IDX_TBL_METADATA_PROF, + /* Metadata mask for lookup block (for recycling) */ + CFA_RSUBTYPE_IDX_TBL_METADATA_LKUP, + /* Metadata mask for action block */ + CFA_RSUBTYPE_IDX_TBL_METADATA_ACT, + CFA_RSUBTYPE_IDX_TBL_CT_STATE, /**< Connection tracking */ + CFA_RSUBTYPE_IDX_TBL_RANGE_PROF, /**< Range profile */ + CFA_RSUBTYPE_IDX_TBL_RANGE_ENTRY, /**< Range entry */ + CFA_RSUBTYPE_IDX_TBL_EM_FKB, /**< EM FKB table */ + CFA_RSUBTYPE_IDX_TBL_WC_FKB, /**< WC TCAM FKB table */ + CFA_RSUBTYPE_IDX_TBL_EM_FKB_MASK, /**< EM FKB Mask table */ + CFA_RSUBTYPE_IDX_TBL_MAX +}; + +/** + * Resource sub-types for CFA_RTYPE_TCAM + */ +enum cfa_resource_subtype_tcam { + CFA_RSUBTYPE_TCAM_L2CTX = 0, /**< L2 contexts TCAM */ + CFA_RSUBTYPE_TCAM_PROF_TCAM, /**< Profile TCAM */ + CFA_RSUBTYPE_TCAM_WC, /**< WC lookup TCAM */ + CFA_RSUBTYPE_TCAM_CT_RULE, /**< Connection tracking TCAM */ + CFA_RSUBTYPE_TCAM_VEB, /**< VEB TCAM */ + CFA_RSUBTYPE_TCAM_FEATURE_CHAIN, /**< Feature chain TCAM */ + CFA_RSUBTYPE_TCAM_MAX +}; + +/** + * Resource sub-types for CFA_RTYPE_IF_TBL + */ +enum cfa_resource_subtype_if_tbl { + /** ILT table indexed by SVIF + */ + CFA_RSUBTYPE_IF_TBL_ILT = 0, + /** VSPT table + */ + CFA_RSUBTYPE_IF_TBL_VSPT, + /** Profiler partition default action record pointer + */ + CFA_RSUBTYPE_IF_TBL_PROF_PARIF_DFLT_ACT_PTR, + /** Profiler partition error action record pointer + */ + CFA_RSUBTYPE_IF_TBL_PROF_PARIF_ERR_ACT_PTR, + CFA_RSUBTYPE_IF_TBL_EPOCH0, /**< Epoch0 mask table */ + CFA_RSUBTYPE_IF_TBL_EPOCH1, /**< Epoch1 mask table */ + CFA_RSUBTYPE_IF_TBL_LAG, /**< LAG Table */ + CFA_RSUBTYPE_IF_TBL_MAX +}; + +/** + * Resource sub-types for CFA_RTYPE_CMM + */ +enum cfa_resource_subtype_cmm { + CFA_RSUBTYPE_CMM_INT_ACT_B0 = 0, /**< SRAM Bank 0 */ + CFA_RSUBTYPE_CMM_INT_ACT_B1, /**< SRAM Bank 0 */ + CFA_RSUBTYPE_CMM_INT_ACT_B2, /**< SRAM Bank 0 */ + CFA_RSUBTYPE_CMM_INT_ACT_B3, /**< SRAM Bank 0 */ + CFA_RSUBTYPE_CMM_ACT, /**< Action table */ + CFA_RSUBTYPE_CMM_LKUP, /**< EM lookup table */ + CFA_RSUBTYPE_CMM_MAX +}; + +#define CFA_RSUBTYPE_GLB_FLD_MAX 1 +#define CFA_RSUBTYPE_SM_MAX 1 +#define CFA_RSUBTYPE_TSM_MAX 1 +#define CFA_RSUBTYPE_TIM_MAX 1 + +/** + * Resource sub-types for CFA_RTYPE_GIM + */ +enum cfa_resource_subtype_gim { + CFA_RSUBTYPE_GIM_DOMAIN_0 = 0, /**< Domain 0 */ + CFA_RSUBTYPE_GIM_DOMAIN_1, /**< Domain 1 */ + CFA_RSUBTYPE_GIM_DOMAIN_2, /**< Domain 2 */ + CFA_RSUBTYPE_GIM_DOMAIN_3, /**< Domain 3 */ + CFA_RSUBTYPE_GIM_MAX +}; + +/** + * Total number of resource subtypes + */ +#define CFA_NUM_RSUBTYPES \ + (CFA_RSUBTYPE_IDENT_MAX + CFA_RSUBTYPE_IDX_TBL_MAX + \ + CFA_RSUBTYPE_TCAM_MAX + CFA_RSUBTYPE_IF_TBL_MAX + \ + CFA_RSUBTYPE_CMM_MAX + CFA_RSUBTYPE_GLB_FLD_MAX + \ + CFA_RSUBTYPE_SM_MAX + CFA_RSUBTYPE_TSM_MAX + CFA_RSUBTYPE_TIM_MAX + \ + CFA_RSUBTYPE_GIM_MAX) + +/** + * @} + */ + +#endif /* _CFA_RESOURCES_H_ */ diff --git a/drivers/thirdparty/release-drivers/bnxt/hcapi/cfa_v3/include/cfa_types.h b/drivers/thirdparty/release-drivers/bnxt/hcapi/cfa_v3/include/cfa_types.h new file mode 100644 index 000000000000..876dcf1b8561 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/hcapi/cfa_v3/include/cfa_types.h @@ -0,0 +1,107 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019-2023 Broadcom + * All rights reserved. + */ + +#ifndef _CFA_TYPES_H_ +#define _CFA_TYPES_H_ + +/* + * + * The primary goal of the CFA common HW access framework is to unify the CFA + * resource management and hardware programming design for different CFA + * applications so the CFA hardware can be properly shared with different + * entities. This framework is collection of the following CFA resource + * managers and Libraries listed below: + * + * 1. CFA Memory Manager + * 2. CFA Object Instance Manager + * 3. CFA Session Manager + * 4. CFA TCAM Manager + * 5. CFA Table Scope Manager + * 6. CFA Hardware Access Library + * 7. CFA Builder Library + * 8. CFA Index table manager + * 9. CFA Utilities Library + * + */ + +/* + * CFA HW version definition + */ +enum cfa_ver { + CFA_P40 = 0, /* CFA phase 4.0 */ + CFA_P45 = 1, /* CFA phase 4.5 */ + CFA_P58 = 2, /* CFA phase 5.8 */ + CFA_P59 = 3, /* CFA phase 5.9 */ + CFA_P70 = 4, /* CFA phase 7.0 */ + CFA_PMAX = 5 +}; + +/* + * CFA direction definition + */ +enum cfa_dir { + CFA_DIR_RX = 0, /* Receive */ + CFA_DIR_TX = 1, /* Transmit */ + CFA_DIR_MAX = 2 +}; + +/* + * CFA Remap Table Type + */ +enum cfa_remap_tbl_type { + CFA_REMAP_TBL_TYPE_NORMAL = 0, + CFA_REMAP_TBL_TYPE_BYPASS, + CFA_REMAP_TBL_TYPE_MAX +}; + +/* + * CFA tracker types + */ +enum cfa_track_type { + CFA_TRACK_TYPE_INVALID = 0, /* Invalid */ + CFA_TRACK_TYPE_SID, /* Tracked by session id */ + CFA_TRACK_TYPE_FIRST = CFA_TRACK_TYPE_SID, + CFA_TRACK_TYPE_FID, /* Tracked by function id */ + CFA_TRACK_TYPE_MAX +}; + +/* + * CFA Region Type + */ +enum cfa_region_type { + CFA_REGION_TYPE_LKUP = 0, + CFA_REGION_TYPE_ACT, + CFA_REGION_TYPE_MAX +}; + +/* + * CFA application type + */ +enum cfa_app_type { + CFA_APP_TYPE_AFM = 0, /* AFM firmware */ + CFA_APP_TYPE_TF = 1, /* TruFlow firmware */ + CFA_APP_TYPE_MAX = 2, + CFA_APP_TYPE_INVALID = CFA_APP_TYPE_MAX, +}; + +/* + * CFA FID types + */ +enum cfa_fid_type { + CFA_FID_TYPE_FID = 0, /* General */ + CFA_FID_TYPE_RFID = 1, /* Representor */ + CFA_FID_TYPE_EFID = 2 /* Endpoint */ +}; + +/* + * CFA srchm modes + */ +enum cfa_srch_mode { + CFA_SRCH_MODE_FIRST = 0, /* Start new iteration */ + CFA_SRCH_MODE_NEXT, /* Next item in iteration */ + CFA_SRCH_MODE_MAX +}; + +#endif /* _CFA_TYPES_H_ */ diff --git a/drivers/thirdparty/release-drivers/bnxt/hcapi/cfa_v3/include/cfa_util.h b/drivers/thirdparty/release-drivers/bnxt/hcapi/cfa_v3/include/cfa_util.h new file mode 100644 index 000000000000..f02a563d26b0 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/hcapi/cfa_v3/include/cfa_util.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019-2023 Broadcom + * All rights reserved. + */ + +#ifndef _CFA_UTIL_H_ +#define _CFA_UTIL_H_ + +/* + * CFA specific utility macros + */ + +/* Bounds (closed interval) check helper macro */ +#define CFA_CHECK_BOUNDS(x, l, h) (((x) >= (l)) && ((x) <= (h))) +#define CFA_CHECK_UPPER_BOUNDS(x, h) ((x) <= (h)) + +#define CFA_ALIGN_LN2(x) (((x) < 3U) ? (x) : 32U - __builtin_clz((x) - 1U) + 1U) + +#endif /* _CFA_UTIL_H_ */ diff --git a/drivers/thirdparty/release-drivers/bnxt/hcapi/cfa_v3/include/sys_util.h b/drivers/thirdparty/release-drivers/bnxt/hcapi/cfa_v3/include/sys_util.h new file mode 100644 index 000000000000..aad84499fbb5 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/hcapi/cfa_v3/include/sys_util.h @@ -0,0 +1,57 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019-2023 Broadcom + * All rights reserved. + */ + +#ifndef _SYS_UTIL_H_ +#define _SYS_UTIL_H_ + +#include "linux/kernel.h" + +#define INVALID_U64 U64_MAX +#define INVALID_U32 U32_MAX +#define INVALID_U16 U16_MAX +#define INVALID_U8 U8_MAX + +#define ALIGN_256(x) ALIGN(x, 256) +#define ALIGN_128(x) ALIGN(x, 128) +#define ALIGN_64(x) ALIGN(x, 64) +#define ALIGN_32(x) ALIGN(x, 32) +#define ALIGN_16(x) ALIGN(x, 16) +#define ALIGN_8(x) ALIGN(x, 8) +#define ALIGN_4(x) ALIGN(x, 4) + +#define NUM_ALIGN_UNITS(x, unit) (((x) + (unit) - (1)) / (unit)) +#define NUM_WORDS_ALIGN_32BIT(x) (ALIGN_32(x) / BITS_PER_WORD) +#define NUM_WORDS_ALIGN_64BIT(x) (ALIGN_64(x) / BITS_PER_WORD) +#define NUM_WORDS_ALIGN_128BIT(x) (ALIGN_128(x) / BITS_PER_WORD) +#define NUM_WORDS_ALIGN_256BIT(x) (ALIGN_256(x) / BITS_PER_WORD) + +#ifndef MAX +#define MAX(A, B) ((A) > (B) ? (A) : (B)) +#endif + +#ifndef MIN +#define MIN(A, B) ((A) < (B) ? (A) : (B)) +#endif + +#ifndef STRINGIFY +#define STRINGIFY(X) #X +#endif + +/* Helper macros to get/set/clear Nth bit in a u8 bitmap */ +#define BMP_GETBIT(BMP, N) \ + ((*((u8 *)(BMP) + ((N) / 8)) >> ((N) % 8)) & 0x1) +#define BMP_SETBIT(BMP, N) \ + do { \ + u32 n = (N); \ + *((u8 *)(BMP) + (n / 8)) |= (0x1U << (n % 8)); \ + } while (0) +#define BMP_CLRBIT(BMP, N) \ + do { \ + u32 n = (N); \ + *((u8 *)(BMP) + (n / 8)) &= \ + (u8)(~(0x1U << (n % 8))); \ + } while (0) + +#endif /* _SYS_UTIL_H_ */ diff --git a/drivers/thirdparty/release-drivers/bnxt/hcapi/cfa_v3/mm/cfa_mm.c b/drivers/thirdparty/release-drivers/bnxt/hcapi/cfa_v3/mm/cfa_mm.c new file mode 100644 index 000000000000..e0ac8f221c92 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/hcapi/cfa_v3/mm/cfa_mm.c @@ -0,0 +1,673 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* Copyright(c) 2019-2023 Broadcom + * All rights reserved. + */ + +#include +#include +#include +#include +#include "sys_util.h" +#include "cfa_util.h" +#include "cfa_types.h" +#include "cfa_mm.h" +#include "bnxt_compat.h" + +#define CFA_MM_SIGNATURE 0xCFA66C89 + +#define CFA_MM_INVALID8 U8_MAX +#define CFA_MM_INVALID16 U16_MAX +#define CFA_MM_INVALID32 U32_MAX +#define CFA_MM_INVALID64 U64_MAX + +#define CFA_MM_MAX_RECORDS (64 * 1024 * 1024) +#define CFA_MM_MAX_CONTIG_RECORDS 8 +#define CFA_MM_RECORDS_PER_BYTE 8 +#define CFA_MM_MIN_RECORDS_PER_BLOCK 8 + +/* CFA Records block + * + * Structure used to store the CFA record block info + */ +struct cfa_mm_blk { + /* Index of the previous block in the list */ + u32 prev_blk_idx; + /* Index of the next block in the list */ + u32 next_blk_idx; + /* Number of free records available in the block */ + u16 num_free_records; + /* Location of first free record in the block */ + u16 first_free_record; + /* Number of contiguous records */ + u16 num_contig_records; + /* Reserved for future use */ + u16 reserved; +}; + +/* CFA Record block list + * + * Structure used to store CFA Record block list info + */ +struct cfa_mm_blk_list { + /* Index of the first block in the list */ + u32 first_blk_idx; + /* Index of the current block having free records */ + u32 current_blk_idx; +}; + +/* CFA memory manager Database + * + * Structure used to store CFA memory manager database info + */ +struct cfa_mm { + /* Signature of the CFA Memory Manager Database */ + u32 signature; + /* Maximum number of CFA Records */ + u32 max_records; + /* Number of CFA Records in use*/ + u32 records_in_use; + /* Number of Records per block */ + u16 records_per_block; + /* Maximum number of contiguous records */ + u16 max_contig_records; + /** + * Block list table stores the info of lists of blocks + * for various numbers of contiguous records + */ + struct cfa_mm_blk_list *blk_list_tbl; + /** + * Block table stores the info about the blocks of CFA Records + */ + struct cfa_mm_blk *blk_tbl; + /** + * Block bitmap table stores bit maps for the blocks of CFA Records + */ + u8 *blk_bmap_tbl; +}; + +static void cfa_mm_db_info(u32 max_records, u16 max_contig_records, + u16 *records_per_block, u32 *num_blocks, + u16 *num_lists, u32 *db_size) +{ + *records_per_block = + MAX(CFA_MM_MIN_RECORDS_PER_BLOCK, max_contig_records); + + *num_blocks = (max_records / (*records_per_block)); + + *num_lists = CFA_ALIGN_LN2(max_contig_records) + 1; + + *db_size = sizeof(struct cfa_mm) + + ((*num_blocks) * NUM_ALIGN_UNITS(*records_per_block, + CFA_MM_RECORDS_PER_BYTE)) + + ((*num_blocks) * sizeof(struct cfa_mm_blk)) + + ((*num_lists) * sizeof(struct cfa_mm_blk_list)); +} + +int cfa_mm_query(struct cfa_mm_query_parms *parms) +{ + u16 max_contig_records, num_lists, records_per_block; + u32 max_records, num_blocks; + + if (!parms) { + netdev_dbg(NULL, "parms = %p\n", parms); + return -EINVAL; + } + + max_records = parms->max_records; + max_contig_records = (u16)parms->max_contig_records; + + if (!(CFA_CHECK_BOUNDS(max_records, 1, CFA_MM_MAX_RECORDS) && + is_power_of_2(max_contig_records) && + CFA_CHECK_BOUNDS(max_contig_records, 1, + CFA_MM_MAX_CONTIG_RECORDS))) { + netdev_dbg(NULL, "parms = %p, max_records = %d, max_contig_records = %d\n", + parms, parms->max_records, + parms->max_contig_records); + return -EINVAL; + } + + cfa_mm_db_info(max_records, max_contig_records, &records_per_block, + &num_blocks, &num_lists, &parms->db_size); + + return 0; +} + +int cfa_mm_open(void *cmm, struct cfa_mm_open_parms *parms) +{ + u16 max_contig_records, num_lists, records_per_block; + struct cfa_mm *context = (struct cfa_mm *)cmm; + u32 max_records, num_blocks, db_size, i; + + if (!cmm || !parms) { + netdev_dbg(NULL, "cmm = %p, parms = %p\n", cmm, parms); + return -EINVAL; + } + + max_records = parms->max_records; + max_contig_records = (u16)parms->max_contig_records; + + if (!(CFA_CHECK_BOUNDS(max_records, 1, CFA_MM_MAX_RECORDS) && + is_power_of_2(max_contig_records) && + CFA_CHECK_BOUNDS(max_contig_records, 1, + CFA_MM_MAX_CONTIG_RECORDS))) { + netdev_dbg(NULL, "cmm = %p, parms = %p, db_mem_size = %d, ", + cmm, parms, parms->db_mem_size); + netdev_dbg(NULL, "max_records = %d max_contig_records = %d\n", + max_records, max_contig_records); + return -EINVAL; + } + + cfa_mm_db_info(max_records, max_contig_records, &records_per_block, + &num_blocks, &num_lists, &db_size); + + if (parms->db_mem_size < db_size) { + netdev_dbg(NULL, "cmm = %p, parms = %p, db_mem_size = %d, ", + cmm, parms, parms->db_mem_size); + netdev_dbg(NULL, "max_records = %d max_contig_records = %d\n", + max_records, max_contig_records); + return -EINVAL; + } + + memset(context, 0, parms->db_mem_size); + + context->signature = CFA_MM_SIGNATURE; + context->max_records = max_records; + context->records_in_use = 0; + context->records_per_block = records_per_block; + context->max_contig_records = max_contig_records; + + context->blk_list_tbl = (struct cfa_mm_blk_list *)(context + 1); + context->blk_tbl = + (struct cfa_mm_blk *)(context->blk_list_tbl + num_lists); + context->blk_bmap_tbl = (u8 *)(context->blk_tbl + num_blocks); + + context->blk_list_tbl[0].first_blk_idx = 0; + context->blk_list_tbl[0].current_blk_idx = 0; + + for (i = 1; i < num_lists; i++) { + context->blk_list_tbl[i].first_blk_idx = CFA_MM_INVALID32; + context->blk_list_tbl[i].current_blk_idx = CFA_MM_INVALID32; + } + + for (i = 0; i < num_blocks; i++) { + context->blk_tbl[i].prev_blk_idx = i - 1; + context->blk_tbl[i].next_blk_idx = i + 1; + context->blk_tbl[i].num_free_records = records_per_block; + context->blk_tbl[i].first_free_record = 0; + context->blk_tbl[i].num_contig_records = 0; + } + + context->blk_tbl[num_blocks - 1].next_blk_idx = CFA_MM_INVALID32; + + memset(context->blk_bmap_tbl, 0, + num_blocks * NUM_ALIGN_UNITS(records_per_block, + CFA_MM_RECORDS_PER_BYTE)); + + return 0; +} + +int cfa_mm_close(void *cmm) +{ + struct cfa_mm *context = (struct cfa_mm *)cmm; + u16 num_lists, records_per_block; + u32 db_size, num_blocks; + + if (!cmm || context->signature != CFA_MM_SIGNATURE) { + netdev_err(NULL, "cmm = %p\n", cmm); + return -EINVAL; + } + + cfa_mm_db_info(context->max_records, context->max_contig_records, + &records_per_block, &num_blocks, &num_lists, &db_size); + + memset(cmm, 0, db_size); + + return 0; +} + +static u32 cfa_mm_blk_alloc(struct cfa_mm *context) +{ + struct cfa_mm_blk_list *free_list; + u32 blk_idx; + + free_list = context->blk_list_tbl; + + blk_idx = free_list->first_blk_idx; + + if (blk_idx == CFA_MM_INVALID32) { + netdev_err(NULL, "Out of record blocks\n"); + return CFA_MM_INVALID32; + } + + free_list->first_blk_idx = + context->blk_tbl[free_list->first_blk_idx].next_blk_idx; + + free_list->current_blk_idx = free_list->first_blk_idx; + + if (free_list->first_blk_idx != CFA_MM_INVALID32) { + context->blk_tbl[free_list->first_blk_idx].prev_blk_idx = + CFA_MM_INVALID32; + } + + context->blk_tbl[blk_idx].prev_blk_idx = CFA_MM_INVALID32; + context->blk_tbl[blk_idx].next_blk_idx = CFA_MM_INVALID32; + + return blk_idx; +} + +static void cfa_mm_blk_free(struct cfa_mm *context, u32 blk_idx) +{ + struct cfa_mm_blk_list *free_list = context->blk_list_tbl; + + context->blk_tbl[blk_idx].prev_blk_idx = CFA_MM_INVALID32; + context->blk_tbl[blk_idx].next_blk_idx = free_list->first_blk_idx; + context->blk_tbl[blk_idx].num_free_records = context->records_per_block; + context->blk_tbl[blk_idx].first_free_record = 0; + context->blk_tbl[blk_idx].num_contig_records = 0; + + if (free_list->first_blk_idx != CFA_MM_INVALID32) { + context->blk_tbl[free_list->first_blk_idx].prev_blk_idx = + blk_idx; + } + + free_list->first_blk_idx = blk_idx; + free_list->current_blk_idx = blk_idx; +} + +static void cfa_mm_blk_insert(struct cfa_mm *context, + struct cfa_mm_blk_list *blk_list, + u32 blk_idx) +{ + if (blk_list->first_blk_idx == CFA_MM_INVALID32) { + blk_list->first_blk_idx = blk_idx; + blk_list->current_blk_idx = blk_idx; + } else { + struct cfa_mm_blk *blk_info = &context->blk_tbl[blk_idx]; + + blk_info->prev_blk_idx = CFA_MM_INVALID32; + blk_info->next_blk_idx = blk_list->first_blk_idx; + context->blk_tbl[blk_list->first_blk_idx].prev_blk_idx = + blk_idx; + blk_list->first_blk_idx = blk_idx; + blk_list->current_blk_idx = blk_idx; + } +} + +static void cfa_mm_blk_delete(struct cfa_mm *context, + struct cfa_mm_blk_list *blk_list, + u32 blk_idx) +{ + struct cfa_mm_blk *blk_info = &context->blk_tbl[blk_idx]; + + if (blk_list->first_blk_idx == CFA_MM_INVALID32) + return; + + if (blk_list->first_blk_idx == blk_idx) { + blk_list->first_blk_idx = blk_info->next_blk_idx; + if (blk_list->first_blk_idx != CFA_MM_INVALID32) { + context->blk_tbl[blk_list->first_blk_idx].prev_blk_idx = + CFA_MM_INVALID32; + } + if (blk_list->current_blk_idx == blk_idx) + blk_list->current_blk_idx = blk_list->first_blk_idx; + + return; + } + + if (blk_info->prev_blk_idx != CFA_MM_INVALID32) { + context->blk_tbl[blk_info->prev_blk_idx].next_blk_idx = + blk_info->next_blk_idx; + } + + if (blk_info->next_blk_idx != CFA_MM_INVALID32) { + context->blk_tbl[blk_info->next_blk_idx].prev_blk_idx = + blk_info->prev_blk_idx; + } + + if (blk_list->current_blk_idx == blk_idx) { + if (blk_info->next_blk_idx != CFA_MM_INVALID32) { + blk_list->current_blk_idx = blk_info->next_blk_idx; + } else { + if (blk_info->prev_blk_idx != CFA_MM_INVALID32) { + blk_list->current_blk_idx = + blk_info->prev_blk_idx; + } else { + blk_list->current_blk_idx = + blk_list->first_blk_idx; + } + } + } +} + +/* Returns true if the bit in the bitmap is set to 'val' else returns false */ +static bool cfa_mm_test_bit(u8 *bmap, u16 index, u8 val) +{ + u8 shift; + + bmap += index / CFA_MM_RECORDS_PER_BYTE; + index %= CFA_MM_RECORDS_PER_BYTE; + + shift = CFA_MM_RECORDS_PER_BYTE - (index + 1); + if (val) { + if ((*bmap >> shift) & 0x1) + return true; + } else { + if (!((*bmap >> shift) & 0x1)) + return true; + } + + return false; +} + +static int cfa_mm_test_and_set_bits(u8 *bmap, u16 start, + u16 count, u8 val) +{ + u8 mask[NUM_ALIGN_UNITS(CFA_MM_MAX_CONTIG_RECORDS, + CFA_MM_RECORDS_PER_BYTE) + 1]; + u16 i, j, nbits; + + bmap += start / CFA_MM_RECORDS_PER_BYTE; + start %= CFA_MM_RECORDS_PER_BYTE; + + if ((start + count - 1) < CFA_MM_RECORDS_PER_BYTE) { + nbits = CFA_MM_RECORDS_PER_BYTE - (start + count); + mask[0] = (u8)(((u16)1 << count) - 1); + mask[0] <<= nbits; + if (val) { + if (*bmap & mask[0]) + return -EINVAL; + *bmap |= mask[0]; + } else { + if ((*bmap & mask[0]) != mask[0]) + return -EINVAL; + *bmap &= ~(mask[0]); + } + return 0; + } + + i = 0; + + nbits = CFA_MM_RECORDS_PER_BYTE - start; + mask[i++] = (u8)(((u16)1 << nbits) - 1); + + count -= nbits; + + while (count > CFA_MM_RECORDS_PER_BYTE) { + count -= CFA_MM_RECORDS_PER_BYTE; + mask[i++] = 0xff; + } + + mask[i] = (u8)(((u16)1 << count) - 1); + mask[i++] <<= (CFA_MM_RECORDS_PER_BYTE - count); + + for (j = 0; j < i; j++) { + if (val) { + if (bmap[j] & mask[j]) + return -EINVAL; + } else { + if ((bmap[j] & mask[j]) != mask[j]) + return -EINVAL; + } + } + + for (j = 0; j < i; j++) { + if (val) + bmap[j] |= mask[j]; + else + bmap[j] &= ~(mask[j]); + } + + return 0; +} + +int cfa_mm_alloc(void *cmm, struct cfa_mm_alloc_parms *parms) +{ + struct cfa_mm *context = (struct cfa_mm *)cmm; + struct cfa_mm_blk_list *blk_list; + u32 i, cnt, blk_idx, record_idx; + struct cfa_mm_blk *blk_info; + u16 list_idx, num_records; + u8 *blk_bmap; + int ret = 0; + + if (!cmm || !parms || + context->signature != CFA_MM_SIGNATURE) { + netdev_dbg(NULL, "cmm = %p parms = %p\n", cmm, parms); + return -EINVAL; + } + + if (!(CFA_CHECK_BOUNDS(parms->num_contig_records, 1, + context->max_contig_records) && + is_power_of_2(parms->num_contig_records))) { + netdev_dbg(NULL, "cmm = %p parms = %p num_records = %d\n", cmm, + parms, parms->num_contig_records); + return -EINVAL; + } + + list_idx = CFA_ALIGN_LN2(parms->num_contig_records); + + blk_list = context->blk_list_tbl + list_idx; + + num_records = 1 << (list_idx - 1); + + if (context->records_in_use + num_records > context->max_records) { + netdev_err(NULL, "Requested number (%d) of records not available\n", + num_records); + ret = -ENOMEM; + goto cfa_mm_alloc_exit; + } + + if (blk_list->first_blk_idx == CFA_MM_INVALID32) { + blk_idx = cfa_mm_blk_alloc(context); + if (blk_idx == CFA_MM_INVALID32) { + ret = -ENOMEM; + goto cfa_mm_alloc_exit; + } + + cfa_mm_blk_insert(context, blk_list, blk_idx); + + blk_info = &context->blk_tbl[blk_idx]; + + blk_info->num_contig_records = num_records; + } else { + blk_idx = blk_list->current_blk_idx; + blk_info = &context->blk_tbl[blk_idx]; + } + + while (blk_info->num_free_records < num_records) { + if (blk_info->next_blk_idx == CFA_MM_INVALID32) { + blk_idx = cfa_mm_blk_alloc(context); + if (blk_idx == CFA_MM_INVALID32) { + ret = -ENOMEM; + goto cfa_mm_alloc_exit; + } + + cfa_mm_blk_insert(context, blk_list, blk_idx); + + blk_info = &context->blk_tbl[blk_idx]; + + blk_info->num_contig_records = num_records; + } else { + blk_idx = blk_info->next_blk_idx; + blk_info = &context->blk_tbl[blk_idx]; + + blk_list->current_blk_idx = blk_idx; + } + } + + blk_bmap = context->blk_bmap_tbl + blk_idx * + context->records_per_block / + CFA_MM_RECORDS_PER_BYTE; + + record_idx = blk_info->first_free_record; + + if (cfa_mm_test_and_set_bits(blk_bmap, record_idx, num_records, 1)) { + netdev_dbg(NULL, + "Records are already allocated. record_idx = %d, num_records = %d\n", + record_idx, num_records); + return -EINVAL; + } + + parms->record_offset = + (blk_idx * context->records_per_block) + record_idx; + + parms->num_contig_records = num_records; + + blk_info->num_free_records -= num_records; + + if (!blk_info->num_free_records) { + blk_info->first_free_record = context->records_per_block; + } else { + cnt = NUM_ALIGN_UNITS(context->records_per_block, + CFA_MM_RECORDS_PER_BYTE); + + for (i = (record_idx + num_records) / CFA_MM_RECORDS_PER_BYTE; + i < cnt; i++) { + if (blk_bmap[i] != 0xff) { + u8 bmap = blk_bmap[i]; + + blk_info->first_free_record = + i * CFA_MM_RECORDS_PER_BYTE; + while (bmap & 0x80) { + bmap <<= 1; + blk_info->first_free_record++; + } + break; + } + } + } + + context->records_in_use += num_records; + + ret = 0; + +cfa_mm_alloc_exit: + + parms->used_count = context->records_in_use; + + parms->all_used = (context->records_in_use >= context->max_records); + + return ret; +} + +int cfa_mm_free(void *cmm, struct cfa_mm_free_parms *parms) +{ + struct cfa_mm *context = (struct cfa_mm *)cmm; + struct cfa_mm_blk_list *blk_list; + struct cfa_mm_blk *blk_info; + u16 list_idx, num_records; + u32 blk_idx, record_idx; + uint8_t *blk_bmap; + + if (!cmm || !parms || + context->signature != CFA_MM_SIGNATURE) { + netdev_err(NULL, "cmm = %p parms = %p\n", cmm, parms); + return -EINVAL; + } + + if (!(parms->record_offset < context->max_records && + CFA_CHECK_BOUNDS(parms->num_contig_records, 1, + context->max_contig_records) && + is_power_of_2(parms->num_contig_records))) { + netdev_dbg(NULL, + "cmm = %p, parms = %p, record_offset = %d, num_contig_records = %d\n", + cmm, parms, parms->record_offset, parms->num_contig_records); + return -EINVAL; + } + + record_idx = parms->record_offset % context->records_per_block; + blk_idx = parms->record_offset / context->records_per_block; + + list_idx = CFA_ALIGN_LN2(parms->num_contig_records); + + blk_list = &context->blk_list_tbl[list_idx]; + + if (blk_list->first_blk_idx == CFA_MM_INVALID32) { + netdev_err(NULL, "Records were not allocated\n"); + return -EINVAL; + } + + num_records = 1 << (list_idx - 1); + + blk_info = &context->blk_tbl[blk_idx]; + + if (blk_info->num_contig_records != num_records) { + netdev_dbg(NULL, + "num_contig_records (%d) and num_records (%d) mismatch\n", + num_records, blk_info->num_contig_records); + return -EINVAL; + } + + blk_bmap = context->blk_bmap_tbl + blk_idx * + context->records_per_block / + CFA_MM_RECORDS_PER_BYTE; + + if (cfa_mm_test_and_set_bits(blk_bmap, record_idx, num_records, 0)) { + netdev_dbg(NULL, "Records are not allocated. record_idx = %d, num_records = %d\n", + record_idx, num_records); + return -EINVAL; + } + + blk_info->num_free_records += num_records; + + if (blk_info->num_free_records >= context->records_per_block) { + cfa_mm_blk_delete(context, blk_list, blk_idx); + cfa_mm_blk_free(context, blk_idx); + } else { + if (blk_info->num_free_records == num_records) { + cfa_mm_blk_delete(context, blk_list, blk_idx); + cfa_mm_blk_insert(context, blk_list, blk_idx); + blk_info->first_free_record = record_idx; + } else { + if (record_idx < blk_info->first_free_record) + blk_info->first_free_record = record_idx; + } + } + + context->records_in_use -= num_records; + + parms->used_count = context->records_in_use; + + return 0; +} + +int cfa_mm_entry_size_get(void *cmm, u32 entry_id, u8 *size) +{ + struct cfa_mm *context = (struct cfa_mm *)cmm; + struct cfa_mm_blk *blk_info; + u32 blk_idx, record_idx; + u8 *blk_bmap; + + if (!cmm || !size || context->signature != CFA_MM_SIGNATURE) + return -EINVAL; + + if (!(entry_id < context->max_records)) { + netdev_dbg(NULL, "cmm = %p, entry_id = %d\n", cmm, entry_id); + return -EINVAL; + } + + blk_idx = entry_id / context->records_per_block; + blk_info = &context->blk_tbl[blk_idx]; + record_idx = entry_id % context->records_per_block; + + /* + * Block is unused if num contig records is 0 and + * there are no allocated entries in the block + */ + if (blk_info->num_contig_records == 0) + return -ENOENT; + + /* + * Check the entry is indeed allocated. Suffices to check if + * the first bit in the bitmap is set. + */ + blk_bmap = context->blk_bmap_tbl + blk_idx * context->records_per_block / + CFA_MM_RECORDS_PER_BYTE; + + if (cfa_mm_test_bit(blk_bmap, record_idx, 1)) { + *size = blk_info->num_contig_records; + return 0; + } else { + return -ENOENT; + } +} diff --git a/drivers/thirdparty/release-drivers/bnxt/hcapi/cfa_v3/mm/include/cfa_mm.h b/drivers/thirdparty/release-drivers/bnxt/hcapi/cfa_v3/mm/include/cfa_mm.h new file mode 100644 index 000000000000..16136d94e2c0 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/hcapi/cfa_v3/mm/include/cfa_mm.h @@ -0,0 +1,156 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019-2023 Broadcom + * All rights reserved. + */ + +#ifndef _CFA_MM_H_ +#define _CFA_MM_H_ + +/** + * CFA_MM CFA Memory Manager + * A CFA memory manager (Document Control:DCSG00988445) is a object instance + * within the CFA service module that is responsible for managing CFA related + * memories such as Thor2 CFA backings stores, Thor CFA action SRAM, etc. It + * is designed to operate in firmware or as part of the host Truflow stack. + * Each manager instance consists of a number of bank databases with each + * database managing a pool of CFA memory. + */ + +/** CFA Memory Manager database query params structure + * + * Structure of database params + * @max_records: [in] Maximum number of CFA records + * @max_contig_records: [in] Max contiguous CFA records per Alloc (Must be a power of 2). + * @db_size: [out] Memory required for Database + */ +struct cfa_mm_query_parms { + u32 max_records; + u32 max_contig_records; + u32 db_size; +}; + +/** CFA Memory Manager open parameters + * + * Structure to store CFA MM open parameters + * @db_mem_size: [in] Size of memory allocated for CFA MM database + * @max_records: [in] Max number of CFA records + * @max_contig_records: [in] Maximum number of contiguous CFA records + */ +struct cfa_mm_open_parms { + u32 db_mem_size; + u32 max_records; + u16 max_contig_records; +}; + +/** CFA Memory Manager record alloc parameters + * + * Structure to contain parameters for record alloc + * @num_contig_records - [in] Number of contiguous CFA records + * @record_offset: [out] Offset of the first of the records allocated + * @used_count: [out] Total number of records already allocated + * @all_used: [out] Flag to indicate if all the records are allocated + */ +struct cfa_mm_alloc_parms { + u32 num_contig_records; + u32 record_offset; + u32 used_count; + u32 all_used; +}; + +/** CFA Memory Manager record free parameters + * + * Structure to contain parameters for record free + * @record_offset: [in] Offset of the first of the records allocated + * @num_contig_records: [in] Number of contiguous CFA records + * @used_count: [out] Total number of records already allocated + */ +struct cfa_mm_free_parms { + u32 record_offset; + u32 num_contig_records; + u32 used_count; +}; + +/** CFA Memory Manager query API + * + * This API returns the size of memory required for internal data structures to + * manage the pool of CFA Records with given parameters. + * + * @parms: [in,out] CFA Memory manager query data base parameters. + * + * Returns + * - (0) if successful. + * - (-ERRNO) on failure + */ +int cfa_mm_query(struct cfa_mm_query_parms *parms); + +/** CFA Memory Manager open API + * + * This API initializes the CFA Memory Manager database + * + * @cmm: [in] Pointer to the memory used for the CFA Mmeory Manager Database + * + * @parms: [in] CFA Memory manager data base parameters. + * + * Returns + * - (0) if successful. + * - (-ERRNO) on failure + */ +int cfa_mm_open(void *cmm, struct cfa_mm_open_parms *parms); + +/** CFA Memory Manager close API + * + * This API frees the CFA Memory NManager database + * + * @cmm: [in] Pointer to the database memory for the record pool + * + * Returns + * - (0) if successful. + * - (-ERRNO) on failure + */ +int cfa_mm_close(void *cmm); + +/** CFA Memory Manager Allocate CFA Records API + * + * This API allocates the request number of contiguous CFA Records + * + * @cmm: [in] Pointer to the database from which to allocate CFA Records + * + * @parms: [in,out] CFA MM alloc records parameters + * + * Returns + * - (0) if successful. + * - (-ERRNO) on failure + */ +int cfa_mm_alloc(void *cmm, struct cfa_mm_alloc_parms *parms); + +/** CFA MemoryManager Free CFA Records API + * + * This API frees the requested number of contiguous CFA Records + * + * @cmm: [in] Pointer to the database from which to free CFA Records + * + * @parms: [in,out] CFA MM free records parameters + * + * Returns + * - (0) if successful. + * - (-ERRNO) on failure + */ +int cfa_mm_free(void *cmm, struct cfa_mm_free_parms *parms); + +/** CFA Memory Manager Get Entry Size API + * + * This API retrieves the size of an allocated CMM entry. + * + * @cmm: [in] Pointer to the database from which to allocate CFA Records + * + * @entry_id: [in] Index of the allocated entry. + * + * @size: [out] Number of contiguous records in the entry. + * + * Returns + * - (0) if successful. + * - (-ERRNO) on failure + */ +int cfa_mm_entry_size_get(void *cmm, u32 entry_id, u8 *size); + +#endif /* _CFA_MM_H_ */ diff --git a/drivers/thirdparty/release-drivers/bnxt/hcapi/cfa_v3/mm/include/sys_util.h b/drivers/thirdparty/release-drivers/bnxt/hcapi/cfa_v3/mm/include/sys_util.h new file mode 100644 index 000000000000..12b3a8bbf190 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/hcapi/cfa_v3/mm/include/sys_util.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019-2023 Broadcom + * All rights reserved. + */ + +#ifndef _SYS_UTIL_H_ +#define _SYS_UTIL_H_ + +#define Y_NUM_ALIGN_UNITS(x, unit) (((x) + (unit) - (1)) / (unit)) +#define Y_IS_POWER_2(x) (((x) != 0) && (((x) & ((x) - (1))) == 0)) + +#endif /* _SYS_UTIL_H_ */ + diff --git a/drivers/thirdparty/release-drivers/bnxt/hcapi/cfa_v3/mpc/cfa_bld_mpc.c b/drivers/thirdparty/release-drivers/bnxt/hcapi/cfa_v3/mpc/cfa_bld_mpc.c new file mode 100644 index 000000000000..a52764409345 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/hcapi/cfa_v3/mpc/cfa_bld_mpc.c @@ -0,0 +1,33 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* Copyright(c) 2019-2023 Broadcom + * All rights reserved. + */ + + +#include +#include +#include +#include "bnxt_compat.h" +#include "sys_util.h" +#include "cfa_types.h" +#include "cfa_bld_p70_mpc.h" +#include "cfa_bld_p70_mpc_defs.h" +#include "cfa_bld_p70_mpcops.h" + +int cfa_bld_mpc_bind(enum cfa_ver hw_ver, struct cfa_bld_mpcinfo *mpcinfo) +{ + if (!mpcinfo) + return -EINVAL; + + switch (hw_ver) { + case CFA_P40: + case CFA_P45: + case CFA_P58: + case CFA_P59: + return -ENOTSUPP; + case CFA_P70: + return cfa_bld_p70_mpc_bind(hw_ver, mpcinfo); + default: + return -EINVAL; + } +} diff --git a/drivers/thirdparty/release-drivers/bnxt/hcapi/cfa_v3/mpc/cfa_bld_p70_host_mpc_wrapper.c b/drivers/thirdparty/release-drivers/bnxt/hcapi/cfa_v3/mpc/cfa_bld_p70_host_mpc_wrapper.c new file mode 100644 index 000000000000..95a5038b6055 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/hcapi/cfa_v3/mpc/cfa_bld_p70_host_mpc_wrapper.c @@ -0,0 +1,1110 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* Copyright(c) 2019-2023 Broadcom + * All rights reserved. + */ + +#include +#include +#include +#include "bnxt_compat.h" +#include "sys_util.h" + +#include "cfa_types.h" +#include "cfa_bld_mpcops.h" + +#include "cfa_bld_mpc_field_ids.h" +#include "cfa_p70_mpc_field_ids.h" +#include "cfa_bld_p70_mpc.h" +#include "cfa_bld_p70_host_mpc_wrapper.h" +#include "cfa_p70_mpc_field_mapping.h" + +/* + * Helper macro to set an input parm field from fields array + */ +#define SET_PARM_VALUE(NAME, TYPE, INDEX, FIELDS) \ + do { \ + if (FIELDS[INDEX].field_id != INVALID_U16) \ + parms.NAME = (TYPE)fields[INDEX].val; \ + } while (0) + +/* + * Helper macro to set an input parm field from fields array thorugh a mapping + * function + */ +#define SET_PARM_MAPPED_VALUE(NAME, TYPE, INDEX, FIELDS, MAP_FUNC) \ + ({ \ + int retcode = 0; \ + if (FIELDS[INDEX].field_id != INVALID_U16) { \ + int retcode; \ + u64 mapped_val; \ + retcode = MAP_FUNC(fields[INDEX].val, &mapped_val); \ + if (retcode) \ + ASSERT_RTNL(); \ + else \ + parms.NAME = (TYPE)mapped_val; \ + } \ + retcode; \ + }) + +/* + * Helper macro to set a result field into fields array + */ +#define GET_RESP_VALUE(NAME, INDEX, FIELDS) \ + do { \ + if (FIELDS[INDEX].field_id != INVALID_U16) \ + FIELDS[INDEX].val = (u64)result.NAME; \ + } while (0) + +/* + * Helper macro to set a result field into fields array thorugh a mapping + * function + */ +#define GET_RESP_MAPPED_VALUE(NAME, INDEX, FIELDS, MAP_FUNC) \ + ({ \ + int retcode = 0; \ + if (FIELDS[INDEX].field_id != INVALID_U16) { \ + int retcode; \ + u64 mapped_val; \ + retcode = MAP_FUNC(result.NAME, &mapped_val); \ + if (retcode) \ + ASSERT_RTNL(); \ + else \ + fields[INDEX].val = mapped_val; \ + } \ + retcode; \ + }) + +/* + * MPC fields validate routine. + */ +static bool fields_valid(struct cfa_mpc_data_obj *fields, u16 len, + struct field_mapping *fld_map) +{ + int i; + + for (i = 0; i < len; i++) { + /* Field not requested to be set by caller, skip it */ + if (fields[i].field_id == INVALID_U16) + continue; + + /* + * Field id should be index value unless + * it is set to UINT16_MAx + */ + if (fields[i].field_id != i) + return false; + + /* Field is valid */ + if (!fld_map[i].valid) + return false; + } + + return true; +} + +/* Map global table type definition to p70 specific value */ +static int table_type_map(u64 val, u64 *mapped_val) +{ + switch (val) { + case CFA_BLD_MPC_HW_TABLE_TYPE_ACTION: + *mapped_val = CFA_HW_TABLE_ACTION; + break; + case CFA_BLD_MPC_HW_TABLE_TYPE_LOOKUP: + *mapped_val = CFA_HW_TABLE_LOOKUP; + break; + default: + ASSERT_RTNL(); + return -EINVAL; + } + + return 0; +} + +/* Map global read mode value to p70 specific value */ +static int read_mode_map(u64 val, u64 *mapped_val) +{ + switch (val) { + case CFA_BLD_MPC_RD_NORMAL: + *mapped_val = CFA_MPC_RD_NORMAL; + break; + case CFA_BLD_MPC_RD_EVICT: + *mapped_val = CFA_MPC_RD_EVICT; + break; + case CFA_BLD_MPC_RD_DEBUG_LINE: + *mapped_val = CFA_MPC_RD_DEBUG_LINE; + break; + case CFA_BLD_MPC_RD_DEBUG_TAG: + *mapped_val = CFA_MPC_RD_DEBUG_TAG; + break; + default: + ASSERT_RTNL(); + return -EINVAL; + } + return 0; +} + +/* Map global write mode value to p70 specific value */ +static int write_mode_map(u64 val, u64 *mapped_val) +{ + switch (val) { + case CFA_BLD_MPC_WR_WRITE_THRU: + *mapped_val = CFA_MPC_WR_WRITE_THRU; + break; + case CFA_BLD_MPC_WR_WRITE_BACK: + *mapped_val = CFA_MPC_WR_WRITE_BACK; + break; + default: + ASSERT_RTNL(); + return -EINVAL; + } + return 0; +} + +/* Map global evict mode value to p70 specific value */ +static int evict_mode_map(u64 val, u64 *mapped_val) +{ + switch (val) { + case CFA_BLD_MPC_EV_EVICT_LINE: + *mapped_val = CFA_MPC_EV_EVICT_LINE; + break; + case CFA_BLD_MPC_EV_EVICT_SCOPE_ADDRESS: + *mapped_val = CFA_MPC_EV_EVICT_SCOPE_ADDRESS; + break; + case CFA_BLD_MPC_EV_EVICT_CLEAN_LINES: + *mapped_val = CFA_MPC_EV_EVICT_CLEAN_LINES; + break; + case CFA_BLD_MPC_EV_EVICT_CLEAN_FAST_EVICT_LINES: + *mapped_val = CFA_MPC_EV_EVICT_CLEAN_FAST_EVICT_LINES; + break; + case CFA_BLD_MPC_EV_EVICT_CLEAN_AND_CLEAN_FAST_EVICT_LINES: + *mapped_val = CFA_MPC_EV_EVICT_CLEAN_AND_CLEAN_FAST_EVICT_LINES; + break; + case CFA_BLD_MPC_EV_EVICT_TABLE_SCOPE: + *mapped_val = CFA_MPC_EV_EVICT_TABLE_SCOPE; + break; + default: + ASSERT_RTNL(); + return -EINVAL; + } + return 0; +} + +/* Map device specific response status code to global value */ +static int status_code_map(u64 val, u64 *mapped_val) +{ + switch (val) { + case CFA_MPC_OK: + *mapped_val = CFA_BLD_MPC_OK; + break; + case CFA_MPC_UNSPRT_ERR: + *mapped_val = CFA_BLD_MPC_UNSPRT_ERR; + break; + case CFA_MPC_FMT_ERR: + *mapped_val = CFA_BLD_MPC_FMT_ERR; + break; + case CFA_MPC_SCOPE_ERR: + *mapped_val = CFA_BLD_MPC_SCOPE_ERR; + break; + case CFA_MPC_ADDR_ERR: + *mapped_val = CFA_BLD_MPC_ADDR_ERR; + break; + case CFA_MPC_CACHE_ERR: + *mapped_val = CFA_BLD_MPC_CACHE_ERR; + break; + case CFA_MPC_EM_MISS: + *mapped_val = CFA_BLD_MPC_EM_MISS; + break; + case CFA_MPC_EM_DUPLICATE: + *mapped_val = CFA_BLD_MPC_EM_DUPLICATE; + break; + case CFA_MPC_EM_EVENT_COLLECTION_FAIL: + *mapped_val = CFA_BLD_MPC_EM_EVENT_COLLECTION_FAIL; + break; + case CFA_MPC_EM_ABORT: + *mapped_val = CFA_BLD_MPC_EM_ABORT; + break; + default: + ASSERT_RTNL(); + return -EINVAL; + } + return 0; +} + +static bool has_unsupported_fields(struct cfa_mpc_data_obj *fields, + u16 len, u16 *unsup_flds, + u16 unsup_flds_len) +{ + int i, j; + + for (i = 0; i < len; i++) { + /* Skip invalid fields */ + if (fields[i].field_id == INVALID_U16) + continue; + + for (j = 0; j < unsup_flds_len; j++) { + if (fields[i].field_id == unsup_flds[j]) + return true; + } + } + + return false; +} + +int cfa_bld_p70_mpc_build_cache_read(u8 *cmd, u32 *cmd_buff_len, + struct cfa_mpc_data_obj *fields) +{ + struct cfa_mpc_cache_axs_params parms = { 0 }; + int rc; + + /* Parameters check */ + if (!cmd || !cmd_buff_len || !fields) { + ASSERT_RTNL(); + return -EINVAL; + } + + if (!fields_valid(fields, CFA_BLD_MPC_READ_CMD_MAX_FLD, + cfa_p70_mpc_read_cmd_gbl_to_dev)) { + ASSERT_RTNL(); + return -EINVAL; + } + + /* Prepare parameters structure */ + SET_PARM_VALUE(opaque, u32, CFA_BLD_MPC_READ_CMD_OPAQUE_FLD, + fields); + SET_PARM_VALUE(tbl_scope, u8, CFA_BLD_MPC_READ_CMD_TABLE_SCOPE_FLD, + fields); + SET_PARM_VALUE(tbl_index, u32, + CFA_BLD_MPC_READ_CMD_TABLE_INDEX_FLD, fields); + SET_PARM_VALUE(data_size, u8, CFA_BLD_MPC_READ_CMD_DATA_SIZE_FLD, + fields); + SET_PARM_VALUE(read.host_address, u64, + CFA_BLD_MPC_READ_CMD_HOST_ADDRESS_FLD, fields); + rc = SET_PARM_MAPPED_VALUE(tbl_type, enum cfa_hw_table_type, + CFA_BLD_MPC_READ_CMD_TABLE_TYPE_FLD, fields, + table_type_map); + if (rc) { + ASSERT_RTNL(); + return rc; + } + + rc = SET_PARM_MAPPED_VALUE(read.mode, enum cfa_mpc_read_mode, + CFA_BLD_MPC_READ_CMD_CACHE_OPTION_FLD, + fields, read_mode_map); + if (rc) { + ASSERT_RTNL(); + return rc; + } + + return cfa_mpc_build_cache_axs_cmd(CFA_MPC_READ, cmd, cmd_buff_len, + &parms); +} + +int cfa_bld_p70_mpc_build_cache_write(u8 *cmd, u32 *cmd_buff_len, + const u8 *data, + struct cfa_mpc_data_obj *fields) +{ + struct cfa_mpc_cache_axs_params parms = { 0 }; + int rc; + + /* Parameters check */ + if (!cmd || !cmd_buff_len || !fields) { + ASSERT_RTNL(); + return -EINVAL; + } + + if (!fields_valid(fields, CFA_BLD_MPC_WRITE_CMD_MAX_FLD, + cfa_p70_mpc_write_cmd_gbl_to_dev)) { + ASSERT_RTNL(); + return -EINVAL; + } + + /* Prepare parameters structure */ + SET_PARM_VALUE(opaque, u32, CFA_BLD_MPC_WRITE_CMD_OPAQUE_FLD, + fields); + SET_PARM_VALUE(tbl_scope, u8, + CFA_BLD_MPC_WRITE_CMD_TABLE_SCOPE_FLD, fields); + SET_PARM_VALUE(tbl_index, u32, + CFA_BLD_MPC_WRITE_CMD_TABLE_INDEX_FLD, fields); + SET_PARM_VALUE(data_size, u8, CFA_BLD_MPC_WRITE_CMD_DATA_SIZE_FLD, + fields); + rc = SET_PARM_MAPPED_VALUE(tbl_type, enum cfa_hw_table_type, + CFA_BLD_MPC_WRITE_CMD_TABLE_TYPE_FLD, fields, + table_type_map); + if (rc) { + ASSERT_RTNL(); + return rc; + } + + parms.write.data_ptr = data; + rc = SET_PARM_MAPPED_VALUE(write.mode, enum cfa_mpc_write_mode, + CFA_BLD_MPC_WRITE_CMD_CACHE_OPTION_FLD, + fields, write_mode_map); + if (rc) { + ASSERT_RTNL(); + return rc; + } + + return cfa_mpc_build_cache_axs_cmd(CFA_MPC_WRITE, cmd, cmd_buff_len, + &parms); +} + +int cfa_bld_p70_mpc_build_cache_evict(u8 *cmd, u32 *cmd_buff_len, + struct cfa_mpc_data_obj *fields) +{ + struct cfa_mpc_cache_axs_params parms = { 0 }; + int rc; + + /* Parameters check */ + if (!cmd || !cmd_buff_len || !fields) { + ASSERT_RTNL(); + return -EINVAL; + } + + if (!fields_valid(fields, CFA_BLD_MPC_INVALIDATE_CMD_MAX_FLD, + cfa_p70_mpc_invalidate_cmd_gbl_to_dev)) { + ASSERT_RTNL(); + return -EINVAL; + } + + /* Prepare parameters structure */ + SET_PARM_VALUE(opaque, u32, CFA_BLD_MPC_INVALIDATE_CMD_OPAQUE_FLD, + fields); + SET_PARM_VALUE(tbl_scope, u8, + CFA_BLD_MPC_INVALIDATE_CMD_TABLE_SCOPE_FLD, fields); + SET_PARM_VALUE(tbl_index, u32, + CFA_BLD_MPC_INVALIDATE_CMD_TABLE_INDEX_FLD, fields); + SET_PARM_VALUE(data_size, u8, + CFA_BLD_MPC_INVALIDATE_CMD_DATA_SIZE_FLD, fields); + rc = SET_PARM_MAPPED_VALUE(tbl_type, enum cfa_hw_table_type, + CFA_BLD_MPC_INVALIDATE_CMD_TABLE_TYPE_FLD, + fields, table_type_map); + if (rc) { + ASSERT_RTNL(); + return rc; + } + + rc = SET_PARM_MAPPED_VALUE(evict.mode, enum cfa_mpc_evict_mode, + CFA_BLD_MPC_INVALIDATE_CMD_CACHE_OPTION_FLD, + fields, evict_mode_map); + if (rc) { + ASSERT_RTNL(); + return rc; + } + + return cfa_mpc_build_cache_axs_cmd(CFA_MPC_INVALIDATE, cmd, + cmd_buff_len, &parms); +} + +int cfa_bld_p70_mpc_build_cache_rdclr(u8 *cmd, u32 *cmd_buff_len, + struct cfa_mpc_data_obj *fields) +{ + struct cfa_mpc_cache_axs_params parms = { 0 }; + int rc; + + /* Parameters check */ + if (!cmd || !cmd_buff_len || !fields) { + ASSERT_RTNL(); + return -EINVAL; + } + + if (!fields_valid(fields, CFA_BLD_MPC_READ_CLR_CMD_MAX_FLD, + cfa_p70_mpc_read_clr_cmd_gbl_to_dev)) { + ASSERT_RTNL(); + return -EINVAL; + } + + /* Prepare parameters structure */ + SET_PARM_VALUE(opaque, u32, CFA_BLD_MPC_READ_CLR_CMD_OPAQUE_FLD, + fields); + SET_PARM_VALUE(tbl_scope, u8, + CFA_BLD_MPC_READ_CLR_CMD_TABLE_SCOPE_FLD, fields); + SET_PARM_VALUE(tbl_index, u32, + CFA_BLD_MPC_READ_CLR_CMD_TABLE_INDEX_FLD, fields); + SET_PARM_VALUE(data_size, u8, + CFA_BLD_MPC_READ_CLR_CMD_DATA_SIZE_FLD, fields); + SET_PARM_VALUE(read.host_address, u64, + CFA_BLD_MPC_READ_CLR_CMD_HOST_ADDRESS_FLD, fields); + rc = SET_PARM_MAPPED_VALUE(tbl_type, enum cfa_hw_table_type, + CFA_BLD_MPC_READ_CLR_CMD_TABLE_TYPE_FLD, + fields, table_type_map); + if (rc) { + ASSERT_RTNL(); + return rc; + } + + SET_PARM_VALUE(read.clear_mask, u16, + CFA_BLD_MPC_READ_CLR_CMD_CLEAR_MASK_FLD, fields); + rc = SET_PARM_MAPPED_VALUE(read.mode, enum cfa_mpc_read_mode, + CFA_BLD_MPC_READ_CLR_CMD_CACHE_OPTION_FLD, + fields, read_mode_map); + if (rc) { + ASSERT_RTNL(); + return rc; + } + + return cfa_mpc_build_cache_axs_cmd(CFA_MPC_READ_CLR, cmd, cmd_buff_len, + &parms); +} + +int cfa_bld_p70_mpc_build_em_search(u8 *cmd, u32 *cmd_buff_len, + u8 *em_entry, + struct cfa_mpc_data_obj *fields) +{ + u16 unsupported_fields[] = { CFA_BLD_MPC_EM_SEARCH_CMD_CACHE_OPTION_FLD, }; + struct cfa_mpc_em_op_params parms = { 0 }; + + /* Parameters check */ + if (!cmd || !cmd_buff_len || !fields) { + ASSERT_RTNL(); + return -EINVAL; + } + + if (has_unsupported_fields(fields, CFA_BLD_MPC_EM_SEARCH_CMD_MAX_FLD, + unsupported_fields, + ARRAY_SIZE(unsupported_fields))) { + ASSERT_RTNL(); + return -EOPNOTSUPP; + } + + if (!fields_valid(fields, CFA_BLD_MPC_EM_SEARCH_CMD_MAX_FLD, + cfa_p70_mpc_em_search_cmd_gbl_to_dev)) { + ASSERT_RTNL(); + return -EINVAL; + } + + /* Prepare parameters structure */ + SET_PARM_VALUE(opaque, u32, CFA_BLD_MPC_EM_SEARCH_CMD_OPAQUE_FLD, + fields); + SET_PARM_VALUE(tbl_scope, u8, + CFA_BLD_MPC_EM_SEARCH_CMD_TABLE_SCOPE_FLD, fields); + + parms.search.em_entry = em_entry; + SET_PARM_VALUE(search.data_size, u8, + CFA_BLD_MPC_EM_SEARCH_CMD_DATA_SIZE_FLD, fields); + + return cfa_mpc_build_em_op_cmd(CFA_MPC_EM_SEARCH, cmd, cmd_buff_len, + &parms); +} + +int cfa_bld_p70_mpc_build_em_insert(u8 *cmd, u32 *cmd_buff_len, + const u8 *em_entry, + struct cfa_mpc_data_obj *fields) +{ + u16 unsupported_fields[] = { + CFA_BLD_MPC_EM_INSERT_CMD_WRITE_THROUGH_FLD, + CFA_BLD_MPC_EM_INSERT_CMD_CACHE_OPTION_FLD, + CFA_BLD_MPC_EM_INSERT_CMD_CACHE_OPTION2_FLD, + }; + struct cfa_mpc_em_op_params parms = { 0 }; + + /* Parameters check */ + if (!cmd || !cmd_buff_len || !fields) { + ASSERT_RTNL(); + return -EINVAL; + } + + if (has_unsupported_fields(fields, CFA_BLD_MPC_EM_INSERT_CMD_MAX_FLD, + unsupported_fields, + ARRAY_SIZE(unsupported_fields))) { + ASSERT_RTNL(); + return -EOPNOTSUPP; + } + + if (!fields_valid(fields, CFA_BLD_MPC_EM_INSERT_CMD_MAX_FLD, + cfa_p70_mpc_em_insert_cmd_gbl_to_dev)) { + ASSERT_RTNL(); + return -EINVAL; + } + + /* Prepare parameters structure */ + SET_PARM_VALUE(opaque, u32, CFA_BLD_MPC_EM_INSERT_CMD_OPAQUE_FLD, + fields); + SET_PARM_VALUE(tbl_scope, u8, + CFA_BLD_MPC_EM_INSERT_CMD_TABLE_SCOPE_FLD, fields); + + parms.insert.em_entry = (const u8 *)em_entry; + SET_PARM_VALUE(insert.replace, u8, + CFA_BLD_MPC_EM_INSERT_CMD_REPLACE_FLD, fields); + SET_PARM_VALUE(insert.entry_idx, u32, + CFA_BLD_MPC_EM_INSERT_CMD_TABLE_INDEX_FLD, fields); + SET_PARM_VALUE(insert.bucket_idx, u32, + CFA_BLD_MPC_EM_INSERT_CMD_TABLE_INDEX2_FLD, fields); + SET_PARM_VALUE(insert.data_size, u8, + CFA_BLD_MPC_EM_INSERT_CMD_DATA_SIZE_FLD, fields); + + return cfa_mpc_build_em_op_cmd(CFA_MPC_EM_INSERT, cmd, cmd_buff_len, + &parms); +} + +int cfa_bld_p70_mpc_build_em_delete(u8 *cmd, u32 *cmd_buff_len, + struct cfa_mpc_data_obj *fields) +{ + u16 unsupported_fields[] = { + CFA_BLD_MPC_EM_DELETE_CMD_WRITE_THROUGH_FLD, + CFA_BLD_MPC_EM_DELETE_CMD_CACHE_OPTION_FLD, + CFA_BLD_MPC_EM_DELETE_CMD_CACHE_OPTION2_FLD, + }; + struct cfa_mpc_em_op_params parms = { 0 }; + + /* Parameters check */ + if (!cmd || !cmd_buff_len || !fields) { + ASSERT_RTNL(); + return -EINVAL; + } + + if (has_unsupported_fields(fields, CFA_BLD_MPC_EM_DELETE_CMD_MAX_FLD, + unsupported_fields, + ARRAY_SIZE(unsupported_fields))) { + ASSERT_RTNL(); + return -EOPNOTSUPP; + } + + if (!fields_valid(fields, CFA_BLD_MPC_EM_DELETE_CMD_MAX_FLD, + cfa_p70_mpc_em_delete_cmd_gbl_to_dev)) { + ASSERT_RTNL(); + return -EINVAL; + } + + /* Prepare parameters structure */ + SET_PARM_VALUE(opaque, u32, CFA_BLD_MPC_EM_DELETE_CMD_OPAQUE_FLD, + fields); + SET_PARM_VALUE(tbl_scope, u8, + CFA_BLD_MPC_EM_DELETE_CMD_TABLE_SCOPE_FLD, fields); + + SET_PARM_VALUE(del.entry_idx, u32, + CFA_BLD_MPC_EM_DELETE_CMD_TABLE_INDEX_FLD, fields); + SET_PARM_VALUE(del.bucket_idx, u32, + CFA_BLD_MPC_EM_DELETE_CMD_TABLE_INDEX2_FLD, fields); + + return cfa_mpc_build_em_op_cmd(CFA_MPC_EM_DELETE, cmd, cmd_buff_len, + &parms); +} + +int cfa_bld_p70_mpc_build_em_chain(u8 *cmd, u32 *cmd_buff_len, + struct cfa_mpc_data_obj *fields) +{ + u16 unsupported_fields[] = { + CFA_BLD_MPC_EM_CHAIN_CMD_WRITE_THROUGH_FLD, + CFA_BLD_MPC_EM_CHAIN_CMD_CACHE_OPTION_FLD, + CFA_BLD_MPC_EM_CHAIN_CMD_CACHE_OPTION2_FLD, + }; + struct cfa_mpc_em_op_params parms = { 0 }; + + /* Parameters check */ + if (!cmd || !cmd_buff_len || !fields) { + ASSERT_RTNL(); + return -EINVAL; + } + + if (has_unsupported_fields(fields, CFA_BLD_MPC_EM_CHAIN_CMD_MAX_FLD, + unsupported_fields, + ARRAY_SIZE(unsupported_fields))) { + ASSERT_RTNL(); + return -EOPNOTSUPP; + } + + if (!fields_valid(fields, CFA_BLD_MPC_EM_CHAIN_CMD_MAX_FLD, + cfa_p70_mpc_em_chain_cmd_gbl_to_dev)) { + ASSERT_RTNL(); + return -EINVAL; + } + + /* Prepare parameters structure */ + SET_PARM_VALUE(opaque, u32, CFA_BLD_MPC_EM_CHAIN_CMD_OPAQUE_FLD, + fields); + SET_PARM_VALUE(tbl_scope, u8, + CFA_BLD_MPC_EM_CHAIN_CMD_TABLE_SCOPE_FLD, fields); + + SET_PARM_VALUE(chain.entry_idx, u32, + CFA_BLD_MPC_EM_CHAIN_CMD_TABLE_INDEX_FLD, fields); + SET_PARM_VALUE(chain.bucket_idx, u32, + CFA_BLD_MPC_EM_CHAIN_CMD_TABLE_INDEX2_FLD, fields); + + return cfa_mpc_build_em_op_cmd(CFA_MPC_EM_CHAIN, cmd, cmd_buff_len, + &parms); +} + +int cfa_bld_p70_mpc_parse_cache_read(u8 *resp, u32 resp_buff_len, + u8 *rd_data, u32 rd_data_len, + struct cfa_mpc_data_obj *fields) +{ + u16 unsupported_fields[] = { + CFA_BLD_MPC_READ_CMP_TYPE_FLD, + CFA_BLD_MPC_READ_CMP_MP_CLIENT_FLD, + CFA_BLD_MPC_READ_CMP_DMA_LENGTH_FLD, + CFA_BLD_MPC_READ_CMP_OPCODE_FLD, + CFA_BLD_MPC_READ_CMP_V_FLD, + CFA_BLD_MPC_READ_CMP_TABLE_TYPE_FLD, + CFA_BLD_MPC_READ_CMP_TABLE_SCOPE_FLD, + CFA_BLD_MPC_READ_CMP_TABLE_INDEX_FLD, + }; + struct cfa_mpc_cache_axs_result result = { 0 }; + int rc; + + /* Parameters check */ + if (!resp || !resp_buff_len || !fields || !rd_data) { + ASSERT_RTNL(); + return -EINVAL; + } + + if (has_unsupported_fields(fields, CFA_BLD_MPC_READ_CMP_MAX_FLD, + unsupported_fields, + ARRAY_SIZE(unsupported_fields))) { + ASSERT_RTNL(); + return -EOPNOTSUPP; + } + + if (!fields_valid(fields, CFA_BLD_MPC_READ_CMP_MAX_FLD, + cfa_p70_mpc_read_cmp_gbl_to_dev)) { + ASSERT_RTNL(); + return -EINVAL; + } + + /* Retrieve response parameters */ + result.rd_data = rd_data; + result.data_len = rd_data_len; + rc = cfa_mpc_parse_cache_axs_resp(CFA_MPC_READ, resp, resp_buff_len, + &result); + if (rc) + return rc; + + GET_RESP_VALUE(opaque, CFA_BLD_MPC_READ_CMP_OPAQUE_FLD, fields); + GET_RESP_VALUE(error_data, CFA_BLD_MPC_READ_CMP_HASH_MSB_FLD, fields); + rc = GET_RESP_MAPPED_VALUE(status, CFA_BLD_MPC_READ_CMP_STATUS_FLD, + fields, status_code_map); + if (rc) { + ASSERT_RTNL(); + return rc; + } + + return 0; +} + +int cfa_bld_p70_mpc_parse_cache_write(u8 *resp, u32 resp_buff_len, + struct cfa_mpc_data_obj *fields) +{ + u16 unsupported_fields[] = { + CFA_BLD_MPC_WRITE_CMP_TYPE_FLD, + CFA_BLD_MPC_WRITE_CMP_MP_CLIENT_FLD, + CFA_BLD_MPC_WRITE_CMP_OPCODE_FLD, + CFA_BLD_MPC_WRITE_CMP_V_FLD, + CFA_BLD_MPC_WRITE_CMP_TABLE_TYPE_FLD, + CFA_BLD_MPC_WRITE_CMP_TABLE_SCOPE_FLD, + CFA_BLD_MPC_WRITE_CMP_TABLE_INDEX_FLD, + }; + struct cfa_mpc_cache_axs_result result = { 0 }; + int rc; + + /* Parameters check */ + if (!resp || !resp_buff_len || !fields) { + ASSERT_RTNL(); + return -EINVAL; + } + + if (has_unsupported_fields(fields, CFA_BLD_MPC_WRITE_CMP_MAX_FLD, + unsupported_fields, + ARRAY_SIZE(unsupported_fields))) { + ASSERT_RTNL(); + return -EOPNOTSUPP; + } + + if (!fields_valid(fields, CFA_BLD_MPC_WRITE_CMP_MAX_FLD, + cfa_p70_mpc_write_cmp_gbl_to_dev)) { + ASSERT_RTNL(); + return -EINVAL; + } + + /* Retrieve response parameters */ + rc = cfa_mpc_parse_cache_axs_resp(CFA_MPC_WRITE, resp, resp_buff_len, + &result); + if (rc) { + ASSERT_RTNL(); + return rc; + } + + GET_RESP_VALUE(opaque, CFA_BLD_MPC_WRITE_CMP_OPAQUE_FLD, fields); + GET_RESP_VALUE(error_data, CFA_BLD_MPC_WRITE_CMP_HASH_MSB_FLD, fields); + rc = GET_RESP_MAPPED_VALUE(status, CFA_BLD_MPC_WRITE_CMP_STATUS_FLD, + fields, status_code_map); + if (rc) { + ASSERT_RTNL(); + return rc; + } + + return 0; +} + +int cfa_bld_p70_mpc_parse_cache_evict(u8 *resp, u32 resp_buff_len, + struct cfa_mpc_data_obj *fields) +{ + u16 unsupported_fields[] = { + CFA_BLD_MPC_INVALIDATE_CMP_TYPE_FLD, + CFA_BLD_MPC_INVALIDATE_CMP_MP_CLIENT_FLD, + CFA_BLD_MPC_INVALIDATE_CMP_OPCODE_FLD, + CFA_BLD_MPC_INVALIDATE_CMP_V_FLD, + CFA_BLD_MPC_INVALIDATE_CMP_TABLE_TYPE_FLD, + CFA_BLD_MPC_INVALIDATE_CMP_TABLE_SCOPE_FLD, + CFA_BLD_MPC_INVALIDATE_CMP_TABLE_INDEX_FLD, + }; + struct cfa_mpc_cache_axs_result result = { 0 }; + int rc; + + /* Parameters check */ + if (!resp || !resp_buff_len || !fields) { + ASSERT_RTNL(); + return -EINVAL; + } + + if (has_unsupported_fields(fields, CFA_BLD_MPC_INVALIDATE_CMP_MAX_FLD, + unsupported_fields, + ARRAY_SIZE(unsupported_fields))) { + ASSERT_RTNL(); + return -EOPNOTSUPP; + } + + if (!fields_valid(fields, CFA_BLD_MPC_INVALIDATE_CMP_MAX_FLD, + cfa_p70_mpc_invalidate_cmp_gbl_to_dev)) { + ASSERT_RTNL(); + return -EINVAL; + } + + /* Retrieve response parameters */ + rc = cfa_mpc_parse_cache_axs_resp(CFA_MPC_INVALIDATE, resp, + resp_buff_len, &result); + if (rc) { + ASSERT_RTNL(); + return rc; + } + + GET_RESP_VALUE(opaque, CFA_BLD_MPC_INVALIDATE_CMP_OPAQUE_FLD, fields); + GET_RESP_VALUE(error_data, CFA_BLD_MPC_INVALIDATE_CMP_HASH_MSB_FLD, + fields); + rc = GET_RESP_MAPPED_VALUE(status, + CFA_BLD_MPC_INVALIDATE_CMP_STATUS_FLD, + fields, status_code_map); + if (rc) { + ASSERT_RTNL(); + return rc; + } + + return 0; +} + +int cfa_bld_p70_mpc_parse_cache_rdclr(u8 *resp, u32 resp_buff_len, + u8 *rd_data, u32 rd_data_len, + struct cfa_mpc_data_obj *fields) +{ + int rc; + struct cfa_mpc_cache_axs_result result = { 0 }; + u16 unsupported_fields[] = { + CFA_BLD_MPC_READ_CMP_TYPE_FLD, + CFA_BLD_MPC_READ_CMP_MP_CLIENT_FLD, + CFA_BLD_MPC_READ_CMP_DMA_LENGTH_FLD, + CFA_BLD_MPC_READ_CMP_OPCODE_FLD, + CFA_BLD_MPC_READ_CMP_V_FLD, + CFA_BLD_MPC_READ_CMP_TABLE_TYPE_FLD, + CFA_BLD_MPC_READ_CMP_TABLE_SCOPE_FLD, + CFA_BLD_MPC_READ_CMP_TABLE_INDEX_FLD, + }; + + /* Parameters check */ + if (!resp || !resp_buff_len || !fields || !rd_data) { + ASSERT_RTNL(); + return -EINVAL; + } + + if (has_unsupported_fields(fields, CFA_BLD_MPC_READ_CMP_MAX_FLD, + unsupported_fields, + ARRAY_SIZE(unsupported_fields))) { + ASSERT_RTNL(); + return -EOPNOTSUPP; + } + + if (!fields_valid(fields, CFA_BLD_MPC_READ_CMP_MAX_FLD, + cfa_p70_mpc_read_cmp_gbl_to_dev)) { + ASSERT_RTNL(); + return -EINVAL; + } + + /* Retrieve response parameters */ + result.rd_data = rd_data; + result.data_len = rd_data_len; + rc = cfa_mpc_parse_cache_axs_resp(CFA_MPC_READ_CLR, resp, resp_buff_len, + &result); + if (rc) + return rc; + + GET_RESP_VALUE(opaque, CFA_BLD_MPC_READ_CMP_OPAQUE_FLD, fields); + GET_RESP_VALUE(error_data, CFA_BLD_MPC_READ_CMP_HASH_MSB_FLD, fields); + rc = GET_RESP_MAPPED_VALUE(status, CFA_BLD_MPC_READ_CMP_STATUS_FLD, + fields, status_code_map); + if (rc) { + ASSERT_RTNL(); + return rc; + } + + return 0; +} + +int cfa_bld_p70_mpc_parse_em_search(u8 *resp, u32 resp_buff_len, + struct cfa_mpc_data_obj *fields) +{ + u16 unsupported_fields[] = { + CFA_BLD_MPC_EM_SEARCH_CMP_TYPE_FLD, + CFA_BLD_MPC_EM_SEARCH_CMP_MP_CLIENT_FLD, + CFA_BLD_MPC_EM_SEARCH_CMP_OPCODE_FLD, + CFA_BLD_MPC_EM_SEARCH_CMP_V1_FLD, + CFA_BLD_MPC_EM_SEARCH_CMP_TABLE_SCOPE_FLD, + CFA_BLD_MPC_EM_SEARCH_CMP_V2_FLD, + }; + struct cfa_mpc_em_op_result result = { 0 }; + int rc; + + /* Parameters check */ + if (!resp || !resp_buff_len || !fields) { + ASSERT_RTNL(); + return -EINVAL; + } + + if (has_unsupported_fields(fields, CFA_BLD_MPC_EM_SEARCH_CMP_MAX_FLD, + unsupported_fields, + ARRAY_SIZE(unsupported_fields))) { + ASSERT_RTNL(); + return -EOPNOTSUPP; + } + + if (!fields_valid(fields, CFA_BLD_MPC_EM_SEARCH_CMP_MAX_FLD, + cfa_p70_mpc_em_search_cmp_gbl_to_dev)) { + ASSERT_RTNL(); + return -EINVAL; + } + + /* Retrieve response parameters */ + rc = cfa_mpc_parse_em_op_resp(CFA_MPC_EM_SEARCH, resp, resp_buff_len, + &result); + if (rc) { + ASSERT_RTNL(); + return rc; + } + + GET_RESP_VALUE(opaque, CFA_BLD_MPC_EM_SEARCH_CMP_OPAQUE_FLD, fields); + GET_RESP_VALUE(error_data, CFA_BLD_MPC_EM_SEARCH_CMP_HASH_MSB_FLD, + fields); + rc = GET_RESP_MAPPED_VALUE(status, CFA_BLD_MPC_EM_SEARCH_CMP_STATUS_FLD, + fields, status_code_map); + if (rc) { + ASSERT_RTNL(); + return rc; + } + + GET_RESP_VALUE(search.bucket_num, CFA_BLD_MPC_EM_SEARCH_CMP_BKT_NUM_FLD, + fields); + GET_RESP_VALUE(search.num_entries, + CFA_BLD_MPC_EM_SEARCH_CMP_NUM_ENTRIES_FLD, fields); + GET_RESP_VALUE(search.hash_msb, CFA_BLD_MPC_EM_SEARCH_CMP_HASH_MSB_FLD, + fields); + GET_RESP_VALUE(search.match_idx, + CFA_BLD_MPC_EM_SEARCH_CMP_TABLE_INDEX_FLD, fields); + GET_RESP_VALUE(search.bucket_idx, + CFA_BLD_MPC_EM_SEARCH_CMP_TABLE_INDEX2_FLD, fields); + + return 0; +} + +int cfa_bld_p70_mpc_parse_em_insert(u8 *resp, u32 resp_buff_len, + struct cfa_mpc_data_obj *fields) +{ + u16 unsupported_fields[] = { + CFA_BLD_MPC_EM_INSERT_CMP_TYPE_FLD, + CFA_BLD_MPC_EM_INSERT_CMP_MP_CLIENT_FLD, + CFA_BLD_MPC_EM_INSERT_CMP_OPCODE_FLD, + CFA_BLD_MPC_EM_INSERT_CMP_V1_FLD, + CFA_BLD_MPC_EM_INSERT_CMP_TABLE_SCOPE_FLD, + CFA_BLD_MPC_EM_INSERT_CMP_V2_FLD, + CFA_BLD_MPC_EM_INSERT_CMP_TABLE_INDEX_FLD, + CFA_BLD_MPC_EM_INSERT_CMP_TABLE_INDEX2_FLD, + }; + struct cfa_mpc_em_op_result result = { 0 }; + int rc; + + /* Parameters check */ + if (!resp || !resp_buff_len || !fields) { + ASSERT_RTNL(); + return -EINVAL; + } + + if (has_unsupported_fields(fields, CFA_BLD_MPC_EM_INSERT_CMP_MAX_FLD, + unsupported_fields, + ARRAY_SIZE(unsupported_fields))) { + ASSERT_RTNL(); + return -EOPNOTSUPP; + } + + if (!fields_valid(fields, CFA_BLD_MPC_EM_INSERT_CMP_MAX_FLD, + cfa_p70_mpc_em_insert_cmp_gbl_to_dev)) { + ASSERT_RTNL(); + return -EINVAL; + } + + /* Retrieve response parameters */ + rc = cfa_mpc_parse_em_op_resp(CFA_MPC_EM_INSERT, resp, resp_buff_len, + &result); + if (rc) { + ASSERT_RTNL(); + return rc; + } + + GET_RESP_VALUE(opaque, CFA_BLD_MPC_EM_INSERT_CMP_OPAQUE_FLD, fields); + GET_RESP_VALUE(error_data, CFA_BLD_MPC_EM_INSERT_CMP_HASH_MSB_FLD, + fields); + rc = GET_RESP_MAPPED_VALUE(status, CFA_BLD_MPC_EM_INSERT_CMP_STATUS_FLD, + fields, status_code_map); + if (rc) { + ASSERT_RTNL(); + return rc; + } + + GET_RESP_VALUE(insert.bucket_num, CFA_BLD_MPC_EM_INSERT_CMP_BKT_NUM_FLD, + fields); + GET_RESP_VALUE(insert.num_entries, + CFA_BLD_MPC_EM_INSERT_CMP_NUM_ENTRIES_FLD, fields); + GET_RESP_VALUE(insert.hash_msb, CFA_BLD_MPC_EM_INSERT_CMP_HASH_MSB_FLD, + fields); + GET_RESP_VALUE(insert.match_idx, + CFA_BLD_MPC_EM_INSERT_CMP_TABLE_INDEX4_FLD, fields); + GET_RESP_VALUE(insert.bucket_idx, + CFA_BLD_MPC_EM_INSERT_CMP_TABLE_INDEX3_FLD, fields); + GET_RESP_VALUE(insert.replaced, + CFA_BLD_MPC_EM_INSERT_CMP_REPLACED_ENTRY_FLD, fields); + GET_RESP_VALUE(insert.chain_update, + CFA_BLD_MPC_EM_INSERT_CMP_CHAIN_UPD_FLD, fields); + + return 0; +} + +int cfa_bld_p70_mpc_parse_em_delete(u8 *resp, u32 resp_buff_len, + struct cfa_mpc_data_obj *fields) +{ + u16 unsupported_fields[] = { + CFA_BLD_MPC_EM_DELETE_CMP_TYPE_FLD, + CFA_BLD_MPC_EM_DELETE_CMP_MP_CLIENT_FLD, + CFA_BLD_MPC_EM_DELETE_CMP_OPCODE_FLD, + CFA_BLD_MPC_EM_DELETE_CMP_V1_FLD, + CFA_BLD_MPC_EM_DELETE_CMP_TABLE_SCOPE_FLD, + CFA_BLD_MPC_EM_DELETE_CMP_V2_FLD, + CFA_BLD_MPC_EM_DELETE_CMP_TABLE_INDEX_FLD, + CFA_BLD_MPC_EM_DELETE_CMP_TABLE_INDEX2_FLD, + }; + struct cfa_mpc_em_op_result result = { 0 }; + int rc; + + /* Parameters check */ + if (!resp || !resp_buff_len || !fields) { + ASSERT_RTNL(); + return -EINVAL; + } + + if (has_unsupported_fields(fields, CFA_BLD_MPC_EM_DELETE_CMP_MAX_FLD, + unsupported_fields, + ARRAY_SIZE(unsupported_fields))) { + ASSERT_RTNL(); + return -EOPNOTSUPP; + } + + if (!fields_valid(fields, CFA_BLD_MPC_EM_DELETE_CMP_MAX_FLD, + cfa_p70_mpc_em_delete_cmp_gbl_to_dev)) { + ASSERT_RTNL(); + return -EINVAL; + } + + /* Retrieve response parameters */ + rc = cfa_mpc_parse_em_op_resp(CFA_MPC_EM_DELETE, resp, resp_buff_len, + &result); + if (rc) { + ASSERT_RTNL(); + return rc; + } + + GET_RESP_VALUE(opaque, CFA_BLD_MPC_EM_DELETE_CMP_OPAQUE_FLD, fields); + GET_RESP_VALUE(error_data, CFA_BLD_MPC_EM_DELETE_CMP_HASH_MSB_FLD, + fields); + rc = GET_RESP_MAPPED_VALUE(status, CFA_BLD_MPC_EM_DELETE_CMP_STATUS_FLD, + fields, status_code_map); + if (rc) { + ASSERT_RTNL(); + return rc; + } + + GET_RESP_VALUE(del.new_tail, CFA_BLD_MPC_EM_DELETE_CMP_TABLE_INDEX4_FLD, + fields); + GET_RESP_VALUE(del.prev_tail, + CFA_BLD_MPC_EM_DELETE_CMP_TABLE_INDEX3_FLD, fields); + GET_RESP_VALUE(del.chain_update, + CFA_BLD_MPC_EM_DELETE_CMP_CHAIN_UPD_FLD, fields); + GET_RESP_VALUE(del.bucket_num, CFA_BLD_MPC_EM_DELETE_CMP_BKT_NUM_FLD, + fields); + GET_RESP_VALUE(del.num_entries, + CFA_BLD_MPC_EM_DELETE_CMP_NUM_ENTRIES_FLD, fields); + return 0; +} + +int cfa_bld_p70_mpc_parse_em_chain(u8 *resp, u32 resp_buff_len, + struct cfa_mpc_data_obj *fields) +{ + u16 unsupported_fields[] = { + CFA_BLD_MPC_EM_CHAIN_CMP_TYPE_FLD, + CFA_BLD_MPC_EM_CHAIN_CMP_MP_CLIENT_FLD, + CFA_BLD_MPC_EM_CHAIN_CMP_OPCODE_FLD, + CFA_BLD_MPC_EM_CHAIN_CMP_V1_FLD, + CFA_BLD_MPC_EM_CHAIN_CMP_TABLE_SCOPE_FLD, + CFA_BLD_MPC_EM_CHAIN_CMP_V2_FLD, + CFA_BLD_MPC_EM_CHAIN_CMP_TABLE_INDEX_FLD, + CFA_BLD_MPC_EM_CHAIN_CMP_TABLE_INDEX2_FLD, + }; + struct cfa_mpc_em_op_result result = { 0 }; + int rc; + + /* Parameters check */ + if (!resp || !resp_buff_len || !fields) { + ASSERT_RTNL(); + return -EINVAL; + } + + if (has_unsupported_fields(fields, CFA_BLD_MPC_EM_CHAIN_CMP_MAX_FLD, + unsupported_fields, + ARRAY_SIZE(unsupported_fields))) { + ASSERT_RTNL(); + return -EOPNOTSUPP; + } + + if (!fields_valid(fields, CFA_BLD_MPC_EM_CHAIN_CMP_MAX_FLD, + cfa_p70_mpc_em_chain_cmp_gbl_to_dev)) { + ASSERT_RTNL(); + return -EINVAL; + } + + /* Retrieve response parameters */ + rc = cfa_mpc_parse_em_op_resp(CFA_MPC_EM_CHAIN, resp, resp_buff_len, + &result); + if (rc) { + ASSERT_RTNL(); + return rc; + } + + GET_RESP_VALUE(opaque, CFA_BLD_MPC_EM_CHAIN_CMP_OPAQUE_FLD, fields); + GET_RESP_VALUE(error_data, CFA_BLD_MPC_EM_CHAIN_CMP_HASH_MSB_FLD, + fields); + rc = GET_RESP_MAPPED_VALUE(status, CFA_BLD_MPC_EM_CHAIN_CMP_STATUS_FLD, + fields, status_code_map); + if (rc) { + ASSERT_RTNL(); + return rc; + } + + GET_RESP_VALUE(chain.bucket_num, CFA_BLD_MPC_EM_CHAIN_CMP_BKT_NUM_FLD, + fields); + GET_RESP_VALUE(chain.num_entries, + CFA_BLD_MPC_EM_CHAIN_CMP_NUM_ENTRIES_FLD, fields); + return 0; +} + diff --git a/drivers/thirdparty/release-drivers/bnxt/hcapi/cfa_v3/mpc/cfa_bld_p70_mpc.c b/drivers/thirdparty/release-drivers/bnxt/hcapi/cfa_v3/mpc/cfa_bld_p70_mpc.c new file mode 100644 index 000000000000..64c654941600 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/hcapi/cfa_v3/mpc/cfa_bld_p70_mpc.c @@ -0,0 +1,883 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* Copyright(c) 2019-2023 Broadcom + * All rights reserved. + */ + +#include +#include +#include +#include "bnxt_compat.h" +#include "sys_util.h" +#include "cfa_types.h" +#include "cfa_bld_p70_mpc.h" +#include "cfa_bld_p70_mpc_defs.h" +#include "cfa_p70_mpc_cmds.h" +#include "cfa_p70_mpc_cmpls.h" + +/* CFA MPC client ids */ +#define MP_CLIENT_TE_CFA READ_CMP_MP_CLIENT_TE_CFA +#define MP_CLIENT_RE_CFA READ_CMP_MP_CLIENT_RE_CFA + +/* MPC Client id check in CFA completion messages */ +#define ASSERT_CFA_MPC_CLIENT_ID(MPCID) \ + do { \ + if ((MPCID) != MP_CLIENT_TE_CFA && \ + (MPCID) != MP_CLIENT_RE_CFA) { \ + netdev_warn(NULL, \ + "Unexpected MPC client id in response: %d\n", \ + (MPCID)); \ + } \ + } while (0) + +/** Add MPC header information to MPC command message */ +static int fill_mpc_header(u8 *cmd, u32 size, u32 opaque_val) +{ + struct mpc_header hdr = { + .opaque = opaque_val, + }; + + if (size < sizeof(struct mpc_header)) { + netdev_dbg(NULL, "%s: invalid parameter: size:%d too small\n", __func__, size); + ASSERT_RTNL(); + return -EINVAL; + } + + memcpy(cmd, &hdr, sizeof(hdr)); + + return 0; +} + +/** Compose Table read-clear message */ +static int compose_mpc_read_clr_msg(u8 *cmd_buff, u32 *cmd_buff_len, + struct cfa_mpc_cache_axs_params *parms) +{ + u32 cmd_size = sizeof(struct mpc_header) + TFC_MPC_CMD_TBL_RDCLR_SIZE; + struct cfa_mpc_cache_read_params *rd_parms = &parms->read; + u8 *cmd; + + if (parms->data_size != 1) { + netdev_dbg(NULL, "%s: invalid parameter: data_size:%d\n", + __func__, parms->data_size); + ASSERT_RTNL(); + return -EINVAL; + } + + if (parms->tbl_type >= CFA_HW_TABLE_MAX) { + netdev_dbg(NULL, "%s: invalid parameter: tbl_typed: %d out of range\n", + __func__, parms->tbl_type); + ASSERT_RTNL(); + return -EINVAL; + } + + if (*cmd_buff_len < cmd_size) { + netdev_dbg(NULL, "%s: invalid parameter: cmd_buff_len too small\n", __func__); + ASSERT_RTNL(); + return -EINVAL; + } + + cmd = cmd_buff + sizeof(struct mpc_header); + + /* Populate CFA MPC command header */ + memset(cmd, 0, TFC_MPC_CMD_TBL_RDCLR_SIZE); + TFC_MPC_CMD_TBL_RDCLR_SET_OPCODE(cmd, TFC_MPC_CMD_OPCODE_READ_CLR); + TFC_MPC_CMD_TBL_RDCLR_SET_TABLE_TYPE(cmd, parms->tbl_type); + TFC_MPC_CMD_TBL_RDCLR_SET_TABLE_SCOPE(cmd, parms->tbl_scope); + TFC_MPC_CMD_TBL_RDCLR_SET_DATA_SIZE(cmd, parms->data_size); + TFC_MPC_CMD_TBL_RDCLR_SET_TABLE_INDEX(cmd, parms->tbl_index); + TFC_MPC_CMD_TBL_RDCLR_SET_HOST_ADDRESS_0(cmd, (u32)rd_parms->host_address); + TFC_MPC_CMD_TBL_RDCLR_SET_HOST_ADDRESS_1(cmd, (u32)(rd_parms->host_address >> 32)); + switch (rd_parms->mode) { + case CFA_MPC_RD_EVICT: + TFC_MPC_CMD_TBL_RDCLR_SET_CACHE_OPTION(cmd, CACHE_READ_CLR_OPTION_EVICT); + break; + case CFA_MPC_RD_NORMAL: + default: + TFC_MPC_CMD_TBL_RDCLR_SET_CACHE_OPTION(cmd, CACHE_READ_CLR_OPTION_NORMAL); + break; + } + TFC_MPC_CMD_TBL_RDCLR_SET_CLEAR_MASK(cmd, rd_parms->clear_mask); + + *cmd_buff_len = cmd_size; + + return 0; +} + +/** Compose Table read message */ +static int compose_mpc_read_msg(u8 *cmd_buff, u32 *cmd_buff_len, + struct cfa_mpc_cache_axs_params *parms) +{ + u32 cmd_size = sizeof(struct mpc_header) + TFC_MPC_CMD_TBL_RD_SIZE; + struct cfa_mpc_cache_read_params *rd_parms = &parms->read; + u8 *cmd; + + if (parms->data_size < 1 || parms->data_size > 4) { + netdev_dbg(NULL, "%s: invalid parameter: data_size:%d out of range\n", + __func__, parms->data_size); + ASSERT_RTNL(); + return -EINVAL; + } + + if (parms->tbl_type >= CFA_HW_TABLE_MAX) { + netdev_dbg(NULL, "%s: invalid parameter: tbl_typed: %d out of range\n", + __func__, parms->tbl_type); + ASSERT_RTNL(); + return -EINVAL; + } + + if (*cmd_buff_len < cmd_size) { + netdev_dbg(NULL, "%s: invalid parameter: cmd_buff_len too small\n", __func__); + ASSERT_RTNL(); + return -EINVAL; + } + + cmd = (cmd_buff + sizeof(struct mpc_header)); + + /* Populate CFA MPC command header */ + memset(cmd, 0, TFC_MPC_CMD_TBL_RD_SIZE); + TFC_MPC_CMD_TBL_RD_SET_OPCODE(cmd, TFC_MPC_CMD_OPCODE_READ); + TFC_MPC_CMD_TBL_RD_SET_TABLE_TYPE(cmd, parms->tbl_type); + TFC_MPC_CMD_TBL_RD_SET_TABLE_SCOPE(cmd, parms->tbl_scope); + TFC_MPC_CMD_TBL_RD_SET_DATA_SIZE(cmd, parms->data_size); + TFC_MPC_CMD_TBL_RD_SET_TABLE_INDEX(cmd, parms->tbl_index); + TFC_MPC_CMD_TBL_RD_SET_HOST_ADDRESS_0(cmd, (u32)rd_parms->host_address); + TFC_MPC_CMD_TBL_RD_SET_HOST_ADDRESS_1(cmd, (u32)(rd_parms->host_address >> 32)); + switch (rd_parms->mode) { + case CFA_MPC_RD_EVICT: + TFC_MPC_CMD_TBL_RD_SET_CACHE_OPTION(cmd, CACHE_READ_OPTION_EVICT); + break; + case CFA_MPC_RD_DEBUG_LINE: + TFC_MPC_CMD_TBL_RD_SET_CACHE_OPTION(cmd, CACHE_READ_OPTION_DEBUG_LINE); + break; + case CFA_MPC_RD_DEBUG_TAG: + TFC_MPC_CMD_TBL_RD_SET_CACHE_OPTION(cmd, CACHE_READ_OPTION_DEBUG_TAG); + break; + case CFA_MPC_RD_NORMAL: + default: + TFC_MPC_CMD_TBL_RD_SET_CACHE_OPTION(cmd, CACHE_READ_OPTION_NORMAL); + break; + } + + *cmd_buff_len = cmd_size; + + return 0; +} + +/** Compose Table write message */ +static int compose_mpc_write_msg(u8 *cmd_buff, u32 *cmd_buff_len, + struct cfa_mpc_cache_axs_params *parms) +{ + u32 cmd_size = sizeof(struct mpc_header) + TFC_MPC_CMD_TBL_WR_SIZE + + parms->data_size * MPC_CFA_CACHE_ACCESS_UNIT_SIZE; + struct cfa_mpc_cache_write_params *wr_parms = &parms->write; + u8 *cmd; + + if (parms->data_size < 1 || parms->data_size > 4) { + ASSERT_RTNL(); + return -EINVAL; + } + + if (parms->tbl_type >= CFA_HW_TABLE_MAX) { + netdev_dbg(NULL, "%s: invalid parameter: tbl_typed: %d out of range\n", + __func__, parms->tbl_type); + ASSERT_RTNL(); + return -EINVAL; + } + + if (!parms->write.data_ptr) { + ASSERT_RTNL(); + return -EINVAL; + } + + if (*cmd_buff_len < cmd_size) { + netdev_dbg(NULL, "%s: invalid parameter: cmd_buff_len too small\n", __func__); + ASSERT_RTNL(); + return -EINVAL; + } + + cmd = (cmd_buff + sizeof(struct mpc_header)); + + /* Populate CFA MPC command header */ + memset(cmd, 0, TFC_MPC_CMD_TBL_WR_SIZE); + TFC_MPC_CMD_TBL_WR_SET_OPCODE(cmd, TFC_MPC_CMD_OPCODE_WRITE); + TFC_MPC_CMD_TBL_WR_SET_TABLE_TYPE(cmd, parms->tbl_type); + TFC_MPC_CMD_TBL_WR_SET_TABLE_SCOPE(cmd, parms->tbl_scope); + TFC_MPC_CMD_TBL_WR_SET_DATA_SIZE(cmd, parms->data_size); + TFC_MPC_CMD_TBL_WR_SET_TABLE_INDEX(cmd, parms->tbl_index); + switch (wr_parms->mode) { + case CFA_MPC_WR_WRITE_THRU: + TFC_MPC_CMD_TBL_WR_SET_CACHE_OPTION(cmd, CACHE_WRITE_OPTION_WRITE_THRU); + break; + case CFA_MPC_WR_WRITE_BACK: + default: + TFC_MPC_CMD_TBL_WR_SET_CACHE_OPTION(cmd, CACHE_WRITE_OPTION_WRITE_BACK); + break; + } + + /* Populate CFA MPC command payload following the header */ + memcpy(cmd + TFC_MPC_CMD_TBL_WR_SIZE, wr_parms->data_ptr, + parms->data_size * MPC_CFA_CACHE_ACCESS_UNIT_SIZE); + + *cmd_buff_len = cmd_size; + + return 0; +} + +/** Compose Invalidate message */ +static int compose_mpc_evict_msg(u8 *cmd_buff, u32 *cmd_buff_len, + struct cfa_mpc_cache_axs_params *parms) +{ + u32 cmd_size = sizeof(struct mpc_header) + TFC_MPC_CMD_TBL_INV_SIZE; + struct cfa_mpc_cache_evict_params *ev_parms = &parms->evict; + u8 *cmd; + + if (parms->data_size < 1 || parms->data_size > 4) { + ASSERT_RTNL(); + return -EINVAL; + } + + if (parms->tbl_type >= CFA_HW_TABLE_MAX) { + netdev_dbg(NULL, "%s: invalid parameter: tbl_typed: %d out of range\n", + __func__, parms->tbl_type); + ASSERT_RTNL(); + return -EINVAL; + } + + if (*cmd_buff_len < cmd_size) { + netdev_dbg(NULL, "%s: invalid parameter: cmd_buff_len too small\n", __func__); + ASSERT_RTNL(); + return -EINVAL; + } + + cmd = cmd_buff + sizeof(struct mpc_header); + + /* Populate CFA MPC command header */ + memset(cmd, 0, TFC_MPC_CMD_TBL_INV_SIZE); + TFC_MPC_CMD_TBL_INV_SET_OPCODE(cmd, TFC_MPC_CMD_OPCODE_INVALIDATE); + TFC_MPC_CMD_TBL_INV_SET_TABLE_TYPE(cmd, parms->tbl_type); + TFC_MPC_CMD_TBL_INV_SET_TABLE_SCOPE(cmd, parms->tbl_scope); + TFC_MPC_CMD_TBL_INV_SET_DATA_SIZE(cmd, parms->data_size); + TFC_MPC_CMD_TBL_INV_SET_TABLE_INDEX(cmd, parms->tbl_index); + + switch (ev_parms->mode) { + case CFA_MPC_EV_EVICT_LINE: + TFC_MPC_CMD_TBL_INV_SET_CACHE_OPTION(cmd, CACHE_EVICT_OPTION_LINE); + break; + case CFA_MPC_EV_EVICT_CLEAN_LINES: + TFC_MPC_CMD_TBL_INV_SET_CACHE_OPTION(cmd, CACHE_EVICT_OPTION_CLEAN_LINES); + break; + case CFA_MPC_EV_EVICT_CLEAN_FAST_EVICT_LINES: + TFC_MPC_CMD_TBL_INV_SET_CACHE_OPTION(cmd, CACHE_EVICT_OPTION_CLEAN_FAST_LINES); + break; + case CFA_MPC_EV_EVICT_CLEAN_AND_CLEAN_FAST_EVICT_LINES: + TFC_MPC_CMD_TBL_INV_SET_CACHE_OPTION(cmd, + CACHE_EVICT_OPTION_CLEAN_AND_FAST_LINES); + break; + case CFA_MPC_EV_EVICT_TABLE_SCOPE: + /* Not supported */ + ASSERT_RTNL(); + return -EOPNOTSUPP; + case CFA_MPC_EV_EVICT_SCOPE_ADDRESS: + default: + TFC_MPC_CMD_TBL_INV_SET_CACHE_OPTION(cmd, CACHE_EVICT_OPTION_SCOPE_ADDRESS); + break; + } + + *cmd_buff_len = cmd_size; + + return 0; +} + +/** + * Build MPC CFA Cache access command + * + * @param [in] opc MPC opcode + * + * @param [out] cmd_buff Command data buffer to write the command to + * + * @param [in/out] cmd_buff_len Pointer to command buffer size param + * Set by caller to indicate the input cmd_buff size. + * Set to the actual size of the command generated by the api. + * + * @param [in] parms Pointer to MPC cache access command parameters + * + * @return 0 on Success, negative errno on failure + */ +int cfa_mpc_build_cache_axs_cmd(enum cfa_mpc_opcode opc, u8 *cmd_buff, + u32 *cmd_buff_len, + struct cfa_mpc_cache_axs_params *parms) +{ + int rc; + + if (!cmd_buff || !cmd_buff_len || *cmd_buff_len == 0 || !parms) { + netdev_dbg(NULL, "%s: invalid parameter: cmd_buff_len too small\n", __func__); + ASSERT_RTNL(); + return -EINVAL; + } + + rc = fill_mpc_header(cmd_buff, *cmd_buff_len, parms->opaque); + if (rc) + return rc; + + switch (opc) { + case CFA_MPC_READ_CLR: + return compose_mpc_read_clr_msg(cmd_buff, cmd_buff_len, parms); + case CFA_MPC_READ: + return compose_mpc_read_msg(cmd_buff, cmd_buff_len, parms); + case CFA_MPC_WRITE: + return compose_mpc_write_msg(cmd_buff, cmd_buff_len, parms); + case CFA_MPC_INVALIDATE: + return compose_mpc_evict_msg(cmd_buff, cmd_buff_len, parms); + default: + ASSERT_RTNL(); + return -EOPNOTSUPP; + } +} + +/** Compose EM Search message */ +static int compose_mpc_em_search_msg(u8 *cmd_buff, u32 *cmd_buff_len, + struct cfa_mpc_em_op_params *parms) +{ + struct cfa_mpc_em_search_params *e = &parms->search; + u8 *cmd; + u32 cmd_size = 0; + + cmd_size = sizeof(struct mpc_header) + TFC_MPC_CMD_EM_SEARCH_SIZE + + e->data_size * MPC_CFA_CACHE_ACCESS_UNIT_SIZE; + + if (e->data_size < 1 || e->data_size > 4) { + ASSERT_RTNL(); + return -EINVAL; + } + + if (*cmd_buff_len < cmd_size) { + netdev_dbg(NULL, "%s: invalid parameter: cmd_buff_len too small\n", __func__); + ASSERT_RTNL(); + return -EINVAL; + } + + if (!e->em_entry) { + ASSERT_RTNL(); + return -EINVAL; + } + + cmd = cmd_buff + sizeof(struct mpc_header); + + /* Populate CFA MPC command header */ + memset(cmd, 0, TFC_MPC_CMD_EM_SEARCH_SIZE); + TFC_MPC_CMD_EM_SEARCH_SET_OPCODE(cmd, TFC_MPC_CMD_OPCODE_EM_SEARCH); + TFC_MPC_CMD_EM_SEARCH_SET_TABLE_SCOPE(cmd, parms->tbl_scope); + TFC_MPC_CMD_EM_SEARCH_SET_DATA_SIZE(cmd, e->data_size); + /* Default to normal read cache option for EM search */ + TFC_MPC_CMD_EM_SEARCH_SET_CACHE_OPTION(cmd, CACHE_READ_OPTION_NORMAL); + + /* Populate CFA MPC command payload following the header */ + memcpy(cmd + TFC_MPC_CMD_EM_SEARCH_SIZE, e->em_entry, + e->data_size * MPC_CFA_CACHE_ACCESS_UNIT_SIZE); + + *cmd_buff_len = cmd_size; + + return 0; +} + +/** Compose EM Insert message */ +static int compose_mpc_em_insert_msg(u8 *cmd_buff, u32 *cmd_buff_len, + struct cfa_mpc_em_op_params *parms) +{ + struct cfa_mpc_em_insert_params *e = &parms->insert; + u8 *cmd; + u32 cmd_size = 0; + + cmd_size = sizeof(struct mpc_header) + TFC_MPC_CMD_EM_INSERT_SIZE + + e->data_size * MPC_CFA_CACHE_ACCESS_UNIT_SIZE; + + if (e->data_size < 1 || e->data_size > 4) { + ASSERT_RTNL(); + return -EINVAL; + } + + if (*cmd_buff_len < cmd_size) { + netdev_dbg(NULL, "%s: invalid parameter: cmd_buff_len too small\n", __func__); + ASSERT_RTNL(); + return -EINVAL; + } + + if (!e->em_entry) { + ASSERT_RTNL(); + return -EINVAL; + } + + cmd = (cmd_buff + sizeof(struct mpc_header)); + + /* Populate CFA MPC command header */ + memset(cmd, 0, TFC_MPC_CMD_EM_INSERT_SIZE); + TFC_MPC_CMD_EM_INSERT_SET_OPCODE(cmd, TFC_MPC_CMD_OPCODE_EM_INSERT); + TFC_MPC_CMD_EM_INSERT_SET_WRITE_THROUGH(cmd, 1); + TFC_MPC_CMD_EM_INSERT_SET_TABLE_SCOPE(cmd, parms->tbl_scope); + TFC_MPC_CMD_EM_INSERT_SET_DATA_SIZE(cmd, e->data_size); + TFC_MPC_CMD_EM_INSERT_SET_REPLACE(cmd, e->replace); + TFC_MPC_CMD_EM_INSERT_SET_TABLE_INDEX(cmd, e->entry_idx); + TFC_MPC_CMD_EM_INSERT_SET_TABLE_INDEX2(cmd, e->bucket_idx); + /* Default to normal read cache option for EM insert */ + TFC_MPC_CMD_EM_INSERT_SET_CACHE_OPTION(cmd, CACHE_READ_OPTION_NORMAL); + /* Default to write through cache write option for EM insert */ + TFC_MPC_CMD_EM_INSERT_SET_CACHE_OPTION2(cmd, CACHE_WRITE_OPTION_WRITE_THRU); + + /* Populate CFA MPC command payload following the header */ + memcpy(cmd + TFC_MPC_CMD_EM_INSERT_SIZE, e->em_entry, + e->data_size * MPC_CFA_CACHE_ACCESS_UNIT_SIZE); + + *cmd_buff_len = cmd_size; + + return 0; +} + +/** Compose EM Delete message */ +static int compose_mpc_em_delete_msg(u8 *cmd_buff, u32 *cmd_buff_len, + struct cfa_mpc_em_op_params *parms) +{ + u32 cmd_size = sizeof(struct mpc_header) + TFC_MPC_CMD_EM_DELETE_SIZE; + struct cfa_mpc_em_delete_params *e = &parms->del; + u8 *cmd; + + if (*cmd_buff_len < cmd_size) { + netdev_dbg(NULL, "%s: invalid parameter: cmd_buff_len too small\n", __func__); + ASSERT_RTNL(); + return -EINVAL; + } + + /* Populate CFA MPC command header */ + cmd = cmd_buff + sizeof(struct mpc_header); + memset(cmd, 0, TFC_MPC_CMD_EM_DELETE_SIZE); + TFC_MPC_CMD_EM_DELETE_SET_OPCODE(cmd, TFC_MPC_CMD_OPCODE_EM_DELETE); + TFC_MPC_CMD_EM_DELETE_SET_TABLE_SCOPE(cmd, parms->tbl_scope); + TFC_MPC_CMD_EM_DELETE_SET_TABLE_INDEX(cmd, e->entry_idx); + TFC_MPC_CMD_EM_DELETE_SET_TABLE_INDEX2(cmd, e->bucket_idx); + /* Default to normal read cache option for EM delete */ + TFC_MPC_CMD_EM_DELETE_SET_CACHE_OPTION(cmd, CACHE_READ_OPTION_NORMAL); + /* Default to write through cache write option for EM delete */ + TFC_MPC_CMD_EM_DELETE_SET_CACHE_OPTION2(cmd, CACHE_WRITE_OPTION_WRITE_THRU); + + *cmd_buff_len = cmd_size; + + return 0; +} + +/** Compose EM Chain message */ +static int compose_mpc_em_chain_msg(u8 *cmd_buff, u32 *cmd_buff_len, + struct cfa_mpc_em_op_params *parms) +{ + u32 cmd_size = sizeof(struct mpc_header) + TFC_MPC_CMD_EM_MATCH_CHAIN_SIZE; + struct cfa_mpc_em_chain_params *e = &parms->chain; + u8 *cmd; + + if (*cmd_buff_len < cmd_size) { + netdev_dbg(NULL, "%s: invalid parameter: cmd_buff_len too small\n", __func__); + ASSERT_RTNL(); + return -EINVAL; + } + + /* Populate CFA MPC command header */ + cmd = cmd_buff + TFC_MPC_CMD_EM_MATCH_CHAIN_SIZE; + memset(cmd, 0, TFC_MPC_CMD_EM_MATCH_CHAIN_SIZE); + TFC_MPC_CMD_EM_MATCH_CHAIN_SET_OPCODE(cmd, TFC_MPC_CMD_OPCODE_EM_CHAIN); + TFC_MPC_CMD_EM_MATCH_CHAIN_SET_TABLE_SCOPE(cmd, parms->tbl_scope); + TFC_MPC_CMD_EM_MATCH_CHAIN_SET_TABLE_INDEX(cmd, e->entry_idx); + TFC_MPC_CMD_EM_MATCH_CHAIN_SET_TABLE_INDEX2(cmd, e->bucket_idx); + /* Default to normal read cache option for EM delete */ + TFC_MPC_CMD_EM_MATCH_CHAIN_SET_CACHE_OPTION(cmd, CACHE_READ_OPTION_NORMAL); + /* Default to write through cache write option for EM delete */ + TFC_MPC_CMD_EM_MATCH_CHAIN_SET_CACHE_OPTION2(cmd, CACHE_WRITE_OPTION_WRITE_THRU); + + *cmd_buff_len = cmd_size; + + return 0; +} + +/** + * Build MPC CFA EM operation command + * + * @param [in] opc MPC EM opcode + * + * @param [in] cmd_buff Command data buffer to write the command to + * + * @param [in/out] cmd_buff_len Pointer to command buffer size param + * Set by caller to indicate the input cmd_buff size. + * Set to the actual size of the command generated by the api. + * + * @param [in] parms Pointer to MPC cache access command parameters + * + * @return 0 on Success, negative errno on failure + */ +int cfa_mpc_build_em_op_cmd(enum cfa_mpc_opcode opc, u8 *cmd_buff, u32 *cmd_buff_len, + struct cfa_mpc_em_op_params *parms) +{ + int rc; + + if (!cmd_buff || !cmd_buff_len || *cmd_buff_len == 0 || !parms) { + netdev_dbg(NULL, "%s: invalid parameter: cmd_buff_len too small\n", __func__); + ASSERT_RTNL(); + return -EINVAL; + } + + rc = fill_mpc_header(cmd_buff, *cmd_buff_len, parms->opaque); + if (rc) + return rc; + + switch (opc) { + case CFA_MPC_EM_SEARCH: + return compose_mpc_em_search_msg(cmd_buff, cmd_buff_len, parms); + case CFA_MPC_EM_INSERT: + return compose_mpc_em_insert_msg(cmd_buff, cmd_buff_len, parms); + case CFA_MPC_EM_DELETE: + return compose_mpc_em_delete_msg(cmd_buff, cmd_buff_len, parms); + case CFA_MPC_EM_CHAIN: + return compose_mpc_em_chain_msg(cmd_buff, cmd_buff_len, parms); + default: + ASSERT_RTNL(); + return -EOPNOTSUPP; + } + + return 0; +} + +/** Parse MPC read clear completion */ +static int parse_mpc_read_clr_result(u8 *resp_buff, u32 resp_buff_len, + struct cfa_mpc_cache_axs_result *result) +{ + u8 *cmp; + u32 resp_size, rd_size; + u8 *rd_data; + + /* Minimum data size = 1 32B unit */ + rd_size = MPC_CFA_CACHE_ACCESS_UNIT_SIZE; + resp_size = sizeof(struct mpc_header) + + TFC_MPC_TBL_RDCLR_CMPL_SIZE + + sizeof(struct mpc_cr_short_dma_data) + rd_size; + cmp = resp_buff + sizeof(struct mpc_header); + + if (resp_buff_len < resp_size || + result->data_len < rd_size || + !result->rd_data) { + ASSERT_RTNL(); + return -EINVAL; + } + + ASSERT_CFA_MPC_CLIENT_ID((int)TFC_MPC_TBL_RDCLR_CMPL_GET_MP_CLIENT(cmp)); + + result->status = TFC_MPC_TBL_RDCLR_CMPL_GET_STATUS(cmp); + result->error_data = TFC_MPC_TBL_RDCLR_CMPL_GET_HASH_MSB(cmp); + result->opaque = TFC_MPC_TBL_RDCLR_CMPL_GET_OPAQUE(cmp); + + /* No data to copy if there was an error, return early */ + if (result->status != TFC_MPC_TBL_RDCLR_CMPL_STATUS_OK) + return 0; + + /* Copy the read data - starting at the end of the completion header including dma data */ + rd_data = resp_buff + sizeof(struct mpc_header) + + TFC_MPC_TBL_RDCLR_CMPL_SIZE + + sizeof(struct mpc_cr_short_dma_data); + + memcpy(result->rd_data, rd_data, rd_size); + + return 0; +} + +/** Parse MPC table read completion */ +static int parse_mpc_read_result(u8 *resp_buff, u32 resp_buff_len, + struct cfa_mpc_cache_axs_result *result) +{ + u8 *cmp; + u32 resp_size, rd_size; + u8 *rd_data; + + /* Minimum data size = 1 32B unit */ + rd_size = MPC_CFA_CACHE_ACCESS_UNIT_SIZE; + resp_size = sizeof(struct mpc_header) + + TFC_MPC_TBL_RD_CMPL_SIZE + + sizeof(struct mpc_cr_short_dma_data) + rd_size; + cmp = (resp_buff + sizeof(struct mpc_header)); + + if (resp_buff_len < resp_size || + result->data_len < rd_size || + !result->rd_data) { + ASSERT_RTNL(); + return -EINVAL; + } + + ASSERT_CFA_MPC_CLIENT_ID((int)TFC_MPC_TBL_RD_CMPL_GET_MP_CLIENT(cmp)); + + result->status = TFC_MPC_TBL_RD_CMPL_GET_STATUS(cmp); + result->error_data = TFC_MPC_TBL_RD_CMPL_GET_HASH_MSB(cmp); + result->opaque = TFC_MPC_TBL_RD_CMPL_GET_OPAQUE(cmp); + + /* No data to copy if there was an error, return early */ + if (result->status != TFC_MPC_TBL_RD_CMPL_STATUS_OK) + return 0; + + /* Copy max of 4 32B words that can fit into the return buffer */ + rd_size = MIN(4 * MPC_CFA_CACHE_ACCESS_UNIT_SIZE, result->data_len); + + /* Copy the read data - starting at the end of the completion header */ + rd_data = resp_buff + sizeof(struct mpc_header) + + TFC_MPC_TBL_RD_CMPL_SIZE + + sizeof(struct mpc_cr_short_dma_data); + + memcpy(result->rd_data, rd_data, rd_size); + + return 0; +} + +/** Parse MPC table write completion */ +static int parse_mpc_write_result(u8 *resp_buff, u32 resp_buff_len, + struct cfa_mpc_cache_axs_result *result) +{ + u32 resp_size; + u8 *cmp; + + resp_size = sizeof(struct mpc_header) + TFC_MPC_TBL_WR_CMPL_SIZE; + cmp = (resp_buff + sizeof(struct mpc_header)); + + if (resp_buff_len < resp_size) { + ASSERT_RTNL(); + return -EINVAL; + } + + ASSERT_CFA_MPC_CLIENT_ID((int)TFC_MPC_TBL_WR_CMPL_GET_MP_CLIENT(cmp)); + + result->status = TFC_MPC_TBL_WR_CMPL_GET_STATUS(cmp); + result->error_data = TFC_MPC_TBL_WR_CMPL_GET_HASH_MSB(cmp); + result->opaque = TFC_MPC_TBL_WR_CMPL_GET_OPAQUE(cmp); + + return 0; +} + +/** Parse MPC table evict completion */ +static int parse_mpc_evict_result(u8 *resp_buff, u32 resp_buff_len, + struct cfa_mpc_cache_axs_result *result) +{ + u8 *cmp; + u32 resp_size; + + resp_size = sizeof(struct mpc_header) + + TFC_MPC_TBL_INV_CMPL_SIZE; + cmp = resp_buff + sizeof(struct mpc_header); + + if (resp_buff_len < resp_size) { + ASSERT_RTNL(); + return -EINVAL; + } + + ASSERT_CFA_MPC_CLIENT_ID((int)TFC_MPC_TBL_INV_CMPL_GET_MP_CLIENT(cmp)); + + result->status = TFC_MPC_TBL_INV_CMPL_GET_STATUS(cmp); + result->error_data = TFC_MPC_TBL_INV_CMPL_GET_HASH_MSB(cmp); + result->opaque = TFC_MPC_TBL_INV_CMPL_GET_OPAQUE(cmp); + + return 0; +} + +/** + * Parse MPC CFA Cache access command completion result + * + * @param [in] opc MPC cache access opcode + * + * @param [in] resp_buff Data buffer containing the response to parse + * + * @param [in] resp_buff_len Response buffer size + * + * @param [out] result Pointer to MPC cache access result object. This + * object will contain the fields parsed and extracted from the + * response buffer. + * + * @return 0 on Success, negative errno on failure + */ +int cfa_mpc_parse_cache_axs_resp(enum cfa_mpc_opcode opc, u8 *resp_buff, + u32 resp_buff_len, + struct cfa_mpc_cache_axs_result *result) +{ + if (!resp_buff || resp_buff_len == 0 || !result) { + ASSERT_RTNL(); + return -EINVAL; + } + + switch (opc) { + case CFA_MPC_READ_CLR: + return parse_mpc_read_clr_result(resp_buff, resp_buff_len, + result); + case CFA_MPC_READ: + return parse_mpc_read_result(resp_buff, resp_buff_len, result); + case CFA_MPC_WRITE: + return parse_mpc_write_result(resp_buff, resp_buff_len, result); + case CFA_MPC_INVALIDATE: + return parse_mpc_evict_result(resp_buff, resp_buff_len, result); + default: + ASSERT_RTNL(); + return -EOPNOTSUPP; + } +} + +/** Parse MPC EM Search completion */ +static int parse_mpc_em_search_result(u8 *resp_buff, + u32 resp_buff_len, + struct cfa_mpc_em_op_result *result) +{ + u8 *cmp; + u32 resp_size; + + cmp = resp_buff + sizeof(struct mpc_header); + resp_size = sizeof(struct mpc_header) + + TFC_MPC_TBL_EM_SEARCH_CMPL_SIZE; + + if (resp_buff_len < resp_size) { + ASSERT_RTNL(); + return -EINVAL; + } + + ASSERT_CFA_MPC_CLIENT_ID((int)TFC_MPC_TBL_EM_SEARCH_CMPL_GET_MP_CLIENT(cmp)); + + result->status = TFC_MPC_TBL_EM_SEARCH_CMPL_GET_STATUS(cmp); + result->error_data = result->status != CFA_MPC_OK ? + TFC_MPC_TBL_EM_SEARCH_CMPL_GET_HASH_MSB(cmp) : 0; + result->opaque = TFC_MPC_TBL_EM_SEARCH_CMPL_GET_OPAQUE(cmp); + result->search.bucket_num = TFC_MPC_TBL_EM_SEARCH_CMPL_GET_BKT_NUM(cmp); + result->search.num_entries = TFC_MPC_TBL_EM_SEARCH_CMPL_GET_NUM_ENTRIES(cmp); + result->search.hash_msb = TFC_MPC_TBL_EM_SEARCH_CMPL_GET_HASH_MSB(cmp); + result->search.match_idx = TFC_MPC_TBL_EM_SEARCH_CMPL_GET_TABLE_INDEX(cmp); + result->search.bucket_idx = TFC_MPC_TBL_EM_SEARCH_CMPL_GET_TABLE_INDEX2(cmp); + + return 0; +} + +/** Parse MPC EM Insert completion */ +static int parse_mpc_em_insert_result(u8 *resp_buff, + u32 resp_buff_len, + struct cfa_mpc_em_op_result *result) +{ + u8 *cmp; + u32 resp_size; + + cmp = resp_buff + sizeof(struct mpc_header); + resp_size = sizeof(struct mpc_header) + TFC_MPC_TBL_EM_INSERT_CMPL_SIZE; + + if (resp_buff_len < resp_size) { + ASSERT_RTNL(); + return -EINVAL; + } + + ASSERT_CFA_MPC_CLIENT_ID((int)TFC_MPC_TBL_EM_INSERT_CMPL_GET_MP_CLIENT(cmp)); + + result->status = TFC_MPC_TBL_EM_INSERT_CMPL_GET_STATUS(cmp); + result->error_data = (result->status != TFC_MPC_TBL_EM_INSERT_CMPL_STATUS_OK) ? + (u32)TFC_MPC_TBL_EM_INSERT_CMPL_GET_HASH_MSB(cmp) : 0UL; + result->opaque = TFC_MPC_TBL_EM_INSERT_CMPL_GET_OPAQUE(cmp); + result->insert.bucket_num = TFC_MPC_TBL_EM_INSERT_CMPL_GET_BKT_NUM(cmp); + result->insert.num_entries = TFC_MPC_TBL_EM_INSERT_CMPL_GET_NUM_ENTRIES(cmp); + result->insert.hash_msb = TFC_MPC_TBL_EM_DELETE_CMPL_GET_HASH_MSB(cmp); + result->insert.match_idx = TFC_MPC_TBL_EM_INSERT_CMPL_GET_TABLE_INDEX4(cmp); + result->insert.bucket_idx = TFC_MPC_TBL_EM_INSERT_CMPL_GET_TABLE_INDEX3(cmp); + result->insert.replaced = TFC_MPC_TBL_EM_INSERT_CMPL_GET_REPLACED_ENTRY(cmp); + result->insert.chain_update = TFC_MPC_TBL_EM_INSERT_CMPL_GET_CHAIN_UPD(cmp); + + return 0; +} + +/** Parse MPC EM Delete completion */ +static int parse_mpc_em_delete_result(u8 *resp_buff, + u32 resp_buff_len, + struct cfa_mpc_em_op_result *result) +{ + u8 *cmp; + u32 resp_size; + + cmp = resp_buff + sizeof(struct mpc_header); + resp_size = sizeof(struct mpc_header) + + TFC_MPC_TBL_EM_DELETE_CMPL_SIZE; + + if (resp_buff_len < resp_size) { + ASSERT_RTNL(); + return -EINVAL; + } + + ASSERT_CFA_MPC_CLIENT_ID((int)TFC_MPC_TBL_EM_DELETE_CMPL_GET_MP_CLIENT(cmp)); + + result->status = TFC_MPC_TBL_EM_DELETE_CMPL_GET_STATUS(cmp); + result->error_data = TFC_MPC_TBL_EM_DELETE_CMPL_GET_HASH_MSB(cmp); + result->opaque = TFC_MPC_TBL_EM_DELETE_CMPL_GET_OPAQUE(cmp); + result->del.bucket_num = TFC_MPC_TBL_EM_DELETE_CMPL_GET_BKT_NUM(cmp); + result->del.num_entries = TFC_MPC_TBL_EM_DELETE_CMPL_GET_NUM_ENTRIES(cmp); + result->del.prev_tail = TFC_MPC_TBL_EM_DELETE_CMPL_GET_TABLE_INDEX3(cmp); + result->del.new_tail = TFC_MPC_TBL_EM_DELETE_CMPL_GET_TABLE_INDEX4(cmp); + result->del.chain_update = TFC_MPC_TBL_EM_DELETE_CMPL_GET_CHAIN_UPD(cmp); + + return 0; +} + +/** Parse MPC EM Chain completion */ +static int parse_mpc_em_chain_result(u8 *resp_buff, u32 resp_buff_len, + struct cfa_mpc_em_op_result *result) +{ + u8 *cmp; + u32 resp_size; + + cmp = resp_buff + sizeof(struct mpc_header); + resp_size = + sizeof(struct mpc_header) + TFC_MPC_TBL_EM_CHAIN_CMPL_SIZE; + + if (resp_buff_len < resp_size) { + ASSERT_RTNL(); + return -EINVAL; + } + + ASSERT_CFA_MPC_CLIENT_ID((int)TFC_MPC_TBL_EM_CHAIN_CMPL_GET_MP_CLIENT(cmp)); + + result->status = TFC_MPC_TBL_EM_CHAIN_CMPL_GET_STATUS(cmp); + result->error_data = TFC_MPC_TBL_EM_CHAIN_CMPL_GET_HASH_MSB(cmp); + result->opaque = TFC_MPC_TBL_EM_CHAIN_CMPL_GET_OPAQUE(cmp); + result->chain.bucket_num = TFC_MPC_TBL_EM_CHAIN_CMPL_GET_BKT_NUM(cmp); + result->chain.num_entries = TFC_MPC_TBL_EM_CHAIN_CMPL_GET_NUM_ENTRIES(cmp); + + return 0; +} + +/** + * Parse MPC CFA EM operation command completion result + * + * @param [in] opc MPC cache access opcode + * + * @param [in] resp_buff Data buffer containing the response to parse + * + * @param [in] resp_buff_len Response buffer size + * + * @param [out] result Pointer to MPC EM operation result object. This + * object will contain the fields parsed and extracted from the + * response buffer. + * + * @return 0 on Success, negative errno on failure + */ +int cfa_mpc_parse_em_op_resp(enum cfa_mpc_opcode opc, u8 *resp_buff, + u32 resp_buff_len, + struct cfa_mpc_em_op_result *result) +{ + if (!resp_buff || resp_buff_len == 0 || !result) { + ASSERT_RTNL(); + return -EINVAL; + } + + switch (opc) { + case CFA_MPC_EM_SEARCH: + return parse_mpc_em_search_result(resp_buff, resp_buff_len, + result); + case CFA_MPC_EM_INSERT: + return parse_mpc_em_insert_result(resp_buff, resp_buff_len, + result); + case CFA_MPC_EM_DELETE: + return parse_mpc_em_delete_result(resp_buff, resp_buff_len, + result); + case CFA_MPC_EM_CHAIN: + return parse_mpc_em_chain_result(resp_buff, resp_buff_len, + result); + default: + ASSERT_RTNL(); + return -EOPNOTSUPP; + } +} diff --git a/drivers/thirdparty/release-drivers/bnxt/hcapi/cfa_v3/mpc/cfa_bld_p70_mpcops.c b/drivers/thirdparty/release-drivers/bnxt/hcapi/cfa_v3/mpc/cfa_bld_p70_mpcops.c new file mode 100644 index 000000000000..083dbb6c99cf --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/hcapi/cfa_v3/mpc/cfa_bld_p70_mpcops.c @@ -0,0 +1,43 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* Copyright(c) 2019-2023 Broadcom + * All rights reserved. + */ + +#include +#include +#include +#include "bnxt_compat.h" +#include "cfa_bld_mpcops.h" +#include "cfa_bld_p70_host_mpc_wrapper.h" +#include "cfa_bld_p70_mpcops.h" + +const struct cfa_bld_mpcops cfa_bld_p70_mpcops = { + /* Build command apis */ + .cfa_bld_mpc_build_cache_read = cfa_bld_p70_mpc_build_cache_read, + .cfa_bld_mpc_build_cache_write = cfa_bld_p70_mpc_build_cache_write, + .cfa_bld_mpc_build_cache_evict = cfa_bld_p70_mpc_build_cache_evict, + .cfa_bld_mpc_build_cache_read_clr = cfa_bld_p70_mpc_build_cache_rdclr, + .cfa_bld_mpc_build_em_search = cfa_bld_p70_mpc_build_em_search, + .cfa_bld_mpc_build_em_insert = cfa_bld_p70_mpc_build_em_insert, + .cfa_bld_mpc_build_em_delete = cfa_bld_p70_mpc_build_em_delete, + .cfa_bld_mpc_build_em_chain = cfa_bld_p70_mpc_build_em_chain, + /* Parse response apis */ + .cfa_bld_mpc_parse_cache_read = cfa_bld_p70_mpc_parse_cache_read, + .cfa_bld_mpc_parse_cache_write = cfa_bld_p70_mpc_parse_cache_write, + .cfa_bld_mpc_parse_cache_evict = cfa_bld_p70_mpc_parse_cache_evict, + .cfa_bld_mpc_parse_cache_read_clr = cfa_bld_p70_mpc_parse_cache_rdclr, + .cfa_bld_mpc_parse_em_search = cfa_bld_p70_mpc_parse_em_search, + .cfa_bld_mpc_parse_em_insert = cfa_bld_p70_mpc_parse_em_insert, + .cfa_bld_mpc_parse_em_delete = cfa_bld_p70_mpc_parse_em_delete, + .cfa_bld_mpc_parse_em_chain = cfa_bld_p70_mpc_parse_em_chain, +}; + +int cfa_bld_p70_mpc_bind(enum cfa_ver hw_ver, struct cfa_bld_mpcinfo *mpcinfo) +{ + if (hw_ver != CFA_P70 || !mpcinfo) + return -EINVAL; + + mpcinfo->mpcops = &cfa_bld_p70_mpcops; + + return 0; +} diff --git a/drivers/thirdparty/release-drivers/bnxt/hcapi/cfa_v3/mpc/include/cfa_bld_defs.h b/drivers/thirdparty/release-drivers/bnxt/hcapi/cfa_v3/mpc/include/cfa_bld_defs.h new file mode 100644 index 000000000000..cc15066500c3 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/hcapi/cfa_v3/mpc/include/cfa_bld_defs.h @@ -0,0 +1,399 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019-2023 Broadcom + * All rights reserved. + */ + +#ifndef _CFA_BLD_DEFS_H_ +#define _CFA_BLD_DEFS_H_ + +#include "cfa_resources.h" +#include "cfa_types.h" + +/** + * @addtogroup CFA_BLD CFA Builder Library + * \ingroup CFA_V3 + * The CFA builder library is a set of APIs provided the following services: + * + * 1. Provide users generic put service to convert software programming data + * into a hardware data bit stream according to a HW layout representation, + * or generic get service to extract value of a field or values of a number + * of fields from the raw hardware data bit stream according to a HW layout. + * + * - A software programming data is represented in {field_idx, val} + * structure. + * - A HW layout is represented with array of CFA field structures with + * {bitpos, bitlen} and identified by a layout id corresponding to a CFA + * HW table. + * - A HW data bit stream are bits that is formatted according to a HW + * layout representation. + * + * 2. Provide EM/WC key and action related service APIs to compile layout, + * init, and manipulate key and action data objects. + * + * 3. Provide CFA mid-path message building APIs. (TBD) + * + * The CFA builder library is designed to run in the primate firmware and also + * as part of the following host base diagnostic software. + * - Lcdiag + * - Truflow CLI + * - coredump decorder + * + * @{ + */ + +/** @name CFA Builder Common Definition + * CFA builder common structures and enumerations + */ + +/**@{*/ +/** + * CFA HW KEY CONTROL OPCODE definition + */ +enum cfa_key_ctrlops { + CFA_KEY_CTRLOPS_INSERT, /**< insert control bits */ + CFA_KEY_CTRLOPS_STRIP, /**< strip control bits */ + CFA_KEY_CTRLOPS_MAX +}; + +/** + * CFA HW field structure definition + */ +struct cfa_field { + /** [in] Starting bit position pf the HW field within a HW table + * entry. + */ + u16 bitpos; + /** [in] Number of bits for the HW field. */ + u16 bitlen; +}; + +/** + * CFA HW table entry layout structure definition + */ +struct cfa_layout { + /** [out] Bit order of layout + * if swap_order_bitpos is non-zero, the bit order of the layout + * will be swapped after this bit. swap_order_bitpos must be a + * multiple of 64. This is currently only used for inlined action + * records where the AR is lsb and the following inlined actions + * must be msb. + */ + bool is_msb_order; + /** [out] Reverse is_msb_order after this bit if non-zero */ + u16 swap_order_bitpos; + /** [out] Size in bits of entry */ + u32 total_sz_in_bits; + /** [in/out] data pointer of the HW layout fields array */ + struct cfa_field *field_array; + /** [out] number of HW field entries in the HW layout field array */ + u32 array_sz; + /** [out] layout_id - layout id associated with the layout */ + u16 layout_id; +}; + +/** + * CFA HW data object definition + */ +struct cfa_data_obj { + /** [in] HW field identifier. Used as an index to a HW table layout */ + u16 field_id; + /** [in] Value of the HW field */ + u64 val; +}; + +/**@}*/ + +/** @name CFA Builder PUT_FIELD APIs + * CFA Manager apis used for generating hw layout specific data objects that + * can be programmed to the hardware + */ + +/**@{*/ +/** + * @brief This API provides the functionality to program a specified value to a + * HW field based on the provided programming layout. + * + * @param[in,out] data_buf + * A data pointer to a CFA HW key/mask data + * + * @param[in] layout + * A pointer to CFA HW programming layout + * + * @param[in] field_id + * ID of the HW field to be programmed + * + * @param[in] val + * Value of the HW field to be programmed + * + * @return + * 0 for SUCCESS, negative value for FAILURE + */ +int cfa_put_field(u64 *data_buf, const struct cfa_layout *layout, + u16 field_id, u64 val); + +/** + * @brief This API provides the functionality to program an array of field + * values with corresponding field IDs to a number of profiler sub-block fields + * based on the fixed profiler sub-block hardware programming layout. + * + * @param[in, out] obj_data + * A pointer to a CFA profiler key/mask object data + * + * @param[in] layout + * A pointer to CFA HW programming layout + * + * @param[in] field_tbl + * A pointer to an array that consists of the object field + * ID/value pairs + * + * @param[in] field_tbl_sz + * Number of entries in the table + * + * @return + * 0 for SUCCESS, negative value for FAILURE + */ +int cfa_put_fields(u64 *obj_data, const struct cfa_layout *layout, + struct cfa_data_obj *field_tbl, u16 field_tbl_sz); + +/** + * @brief This API provides the functionality to program an array of field + * values with corresponding field IDs to a number of profiler sub-block fields + * based on the fixed profiler sub-block hardware programming layout. This + * API will swap the n byte blocks before programming the field array. + * + * @param[in, out] obj_data + * A pointer to a CFA profiler key/mask object data + * + * @param[in] layout + * A pointer to CFA HW programming layout + * + * @param[in] field_tbl + * A pointer to an array that consists of the object field + * ID/value pairs + * + * @param[in] field_tbl_sz + * Number of entries in the table + * + * @param[in] data_size + * size of the data in bytes + * + * @param[in] n + * block size in bytes + * + * @return + * 0 for SUCCESS, negative value for FAILURE + */ +int cfa_put_fields_swap(u64 *obj_data, const struct cfa_layout *layout, + struct cfa_data_obj *field_tbl, u16 field_tbl_sz, + u16 data_size, u16 n); + +/** + * @brief This API provides the functionality to write a value to a + * field within the bit position and bit length of a HW data + * object based on a provided programming layout. + * + * @param[in, out] obj_data + * A pointer of the action object to be initialized + * + * @param[in] layout + * A pointer of the programming layout + * + * @param field_id + * [in] Identifier of the HW field + * + * @param[in] bitpos_adj + * Bit position adjustment value + * + * @param[in] bitlen_adj + * Bit length adjustment value + * + * @param[in] val + * HW field value to be programmed + * + * @return + * 0 for SUCCESS, negative value for FAILURE + */ +int cfa_put_field_rel(u64 *obj_data, const struct cfa_layout *layout, + u16 field_id, int16_t bitpos_adj, int16_t bitlen_adj, + u64 val); + +/**@}*/ + +/** @name CFA Builder GET_FIELD APIs + * CFA Manager apis used for extract hw layout specific fields from CFA HW + * data objects + */ + +/**@{*/ +/** + * @brief The API provides the functionality to get bit offset and bit + * length information of a field from a programming layout. + * + * @param[in] layout + * A pointer of the action layout + * + * @param[in] field_id + * The field for which to retrieve the slice + * + * @param[out] slice + * A pointer to the action offset info data structure + * + * @return + * 0 for SUCCESS, negative value for FAILURE + */ +int cfa_get_slice(const struct cfa_layout *layout, u16 field_id, + struct cfa_field *slice); + +/** + * @brief This API provides the functionality to read the value of a + * CFA HW field from CFA HW data object based on the hardware + * programming layout. + * + * @param[in] obj_data + * A pointer to a CFA HW key/mask object data + * + * @param[in] layout + * A pointer to CFA HW programming layout + * + * @param[in] field_id + * ID of the HW field to be programmed + * + * @param[out] val + * Value of the HW field + * + * @return + * 0 for SUCCESS, negative value for FAILURE + */ +int cfa_get_field(u64 *obj_data, const struct cfa_layout *layout, + u16 field_id, u64 *val); + +/** + * @brief This API provides the functionality to read 128-bit value of + * a CFA HW field from CFA HW data object based on the hardware + * programming layout. + * + * @param[in] obj_data + * A pointer to a CFA HW key/mask object data + * + * @param[in] layout + * A pointer to CFA HW programming layout + * + * @param[in] field_id + * ID of the HW field to be programmed + * + * @param[out] val_msb + * Msb value of the HW field + * + * @param[out] val_lsb + * Lsb value of the HW field + * + * @return + * 0 for SUCCESS, negative value for FAILURE + */ +int cfa_get128_field(u64 *obj_data, const struct cfa_layout *layout, + u16 field_id, u64 *val_msb, u64 *val_lsb); + +/** + * @brief This API provides the functionality to read a number of + * HW fields from a CFA HW data object based on the hardware + * programming layout. + * + * @param[in] obj_data + * A pointer to a CFA profiler key/mask object data + * + * @param[in] layout + * A pointer to CFA HW programming layout + * + * @param[in, out] field_tbl + * A pointer to an array that consists of the object field + * ID/value pairs + * + * @param[in] field_tbl_sz + * Number of entries in the table + * + * @return + * 0 for SUCCESS, negative value for FAILURE + */ +int cfa_get_fields(u64 *obj_data, const struct cfa_layout *layout, + struct cfa_data_obj *field_tbl, u16 field_tbl_sz); + +/** + * @brief This API provides the functionality to read a number of + * HW fields from a CFA HW data object based on the hardware + * programming layout.This API will swap the n byte blocks before + * retrieving the field array. + * + * @param[in] obj_data + * A pointer to a CFA profiler key/mask object data + * + * @param[in] layout + * A pointer to CFA HW programming layout + * + * @param[in, out] field_tbl + * A pointer to an array that consists of the object field + * ID/value pairs + * + * @param[in] field_tbl_sz + * Number of entries in the table + * + * @param[in] data_size + * size of the data in bytes + * + * @param[in] n + * block size in bytes + * + * @return + * 0 for SUCCESS, negative value for FAILURE + */ +int cfa_get_fields_swap(u64 *obj_data, const struct cfa_layout *layout, + struct cfa_data_obj *field_tbl, u16 field_tbl_sz, + u16 data_size, u16 n); + +/** + * @brief Get a value to a specific location relative to a HW field + * This API provides the functionality to read HW field from + * a section of a HW data object identified by the bit position + * and bit length from a given programming layout in order to avoid + * reading the entire HW data object. + * + * @param[in] obj_data + * A pointer of the data object to read from + * + * @param[in] layout + * A pointer of the programming layout + * + * @param[in] field_id + * Identifier of the HW field + * + * @param[in] bitpos_adj + * Bit position adjustment value + * + * @param[in] bitlen_adj + * Bit length adjustment value + * + * @param[out] val + * Value of the HW field + * + * @return + * 0 for SUCCESS, negative value for FAILURE + */ +int cfa_get_field_rel(u64 *obj_data, const struct cfa_layout *layout, + u16 field_id, int16_t bitpos_adj, int16_t bitlen_adj, + u64 *val); + +/** + * @brief Get the length of the layout in words + * + * @param[in] layout + * A pointer to the layout to determine the number of words + * required + * + * @return + * number of words needed for the given layout + */ +u16 cfa_get_wordlen(const struct cfa_layout *layout); + +/**@}*/ + +/**@}*/ +#endif /* _CFA_BLD_DEFS_H_*/ diff --git a/drivers/thirdparty/release-drivers/bnxt/hcapi/cfa_v3/mpc/include/cfa_bld_mpc_field_ids.h b/drivers/thirdparty/release-drivers/bnxt/hcapi/cfa_v3/mpc/include/cfa_bld_mpc_field_ids.h new file mode 100644 index 000000000000..86e77f1ec05b --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/hcapi/cfa_v3/mpc/include/cfa_bld_mpc_field_ids.h @@ -0,0 +1,1268 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019-2023 Broadcom + * All rights reserved. + */ + +#ifndef _CFA_BLD_MPC_FIELD_IDS_H_ +#define _CFA_BLD_MPC_FIELD_IDS_H_ + +/** + * CFA Hardware Cache Table Type + */ +enum cfa_bld_mpc_hw_table_type { + CFA_BLD_MPC_HW_TABLE_TYPE_ACTION, /**< CFA Action Record Table */ + CFA_BLD_MPC_HW_TABLE_TYPE_LOOKUP, /**< CFA EM Lookup Record Table */ + CFA_BLD_MPC_HW_TABLE_TYPE_MAX +}; + +/* + * CFA MPC Cache access reading mode + * To be used as a value for CFA_BLD_MPC_READ_CMD_CACHE_OPTION_FLD + */ +enum cfa_bld_mpc_read_mode { + CFA_BLD_MPC_RD_NORMAL, /**< Normal read mode */ + CFA_BLD_MPC_RD_EVICT, /**< Read the cache and evict the cache line */ + CFA_BLD_MPC_RD_DEBUG_LINE, /**< Debug read line mode */ + CFA_BLD_MPC_RD_DEBUG_TAG, /**< Debug read tag mode */ + CFA_BLD_MPC_RD_MODE_MAX +}; + +/** + * CFA MPC Cache access writing mode + * To be used as a value for CFA_BLD_MPC_WRITE_CMD_CACHE_OPTION_FLD + */ +enum cfa_bld_mpc_write_mode { + CFA_BLD_MPC_WR_WRITE_THRU, /**< Write to cache in Write through mode */ + CFA_BLD_MPC_WR_WRITE_BACK, /**< Write to cache in Write back mode */ + CFA_BLD_MPC_WR_MODE_MAX +}; + +/** + * CFA MPC Cache access eviction mode + * To be used as a value for CFA_BLD_MPC_INVALIDATE_CMD_CACHE_OPTION_FLD + */ +enum cfa_bld_mpc_evict_mode { + /** + * Line evict: These modes evict a single cache line + * In these modes, the eviction occurs regardless of the cache line + * state (CLEAN/CLEAN_FAST_EVICT/DIRTY) + */ + /* Cache line addressed by set/way is evicted */ + CFA_BLD_MPC_EV_EVICT_LINE, + /* Cache line hit with the table scope/address tuple is evicted */ + CFA_BLD_MPC_EV_EVICT_SCOPE_ADDRESS, + + /** + * Set Evict: These modes evict cache lines that meet certain criteria + * from the entire cache set. + */ + /* + * Cache lines only in CLEAN state are evicted from the set + * derived from the address + */ + CFA_BLD_MPC_EV_EVICT_CLEAN_LINES, + /* + * Cache lines only in CLEAN_FAST_EVICT state are evicted from + * the set derived from the address + */ + CFA_BLD_MPC_EV_EVICT_CLEAN_FAST_EVICT_LINES, + /* + * Cache lines in both CLEAN and CLEAN_FAST_EVICT states are + * evicted from the set derived from the address + */ + CFA_BLD_MPC_EV_EVICT_CLEAN_AND_CLEAN_FAST_EVICT_LINES, + /* + * All Cache lines in the set identified by the address and + * belonging to the table scope are evicted. + */ + CFA_BLD_MPC_EV_EVICT_TABLE_SCOPE, + CFA_BLD_MPC_EV_MODE_MAX, +}; + +/** + * MPC CFA Command completion status + */ +enum cfa_bld_mpc_cmpl_status { + /* Command success */ + CFA_BLD_MPC_OK, + /* Unsupported CFA opcode */ + CFA_BLD_MPC_UNSPRT_ERR, + /* CFA command format error */ + CFA_BLD_MPC_FMT_ERR, + /* SVIF-Table Scope error */ + CFA_BLD_MPC_SCOPE_ERR, + /* Address error: Only used if EM command or TABLE_TYPE=EM */ + CFA_BLD_MPC_ADDR_ERR, + /* Cache operation error */ + CFA_BLD_MPC_CACHE_ERR, + /* EM_SEARCH or EM_DELETE did not find a matching EM entry */ + CFA_BLD_MPC_EM_MISS, + /* EM_INSERT found a matching EM entry and REPLACE=0 in the command */ + CFA_BLD_MPC_EM_DUPLICATE, + /* EM_EVENT_COLLECTION_FAIL no events to return */ + CFA_BLD_MPC_EM_EVENT_COLLECTION_FAIL, + /* + * EM_INSERT required a dynamic bucket to be added to the chain + * to successfully insert the EM entry, but the entry provided + * for use as dynamic bucket was invalid. (bucket_idx == 0) + */ + CFA_BLD_MPC_EM_ABORT, +}; + +/** + * Field IDS for READ_CMD: This command reads 1-4 consecutive 32B words + * from the specified address within a table scope. + */ +enum cfa_bld_mpc_read_cmd_fields { + CFA_BLD_MPC_READ_CMD_OPAQUE_FLD = 0, + /* This value selects the table type to be acted upon. */ + CFA_BLD_MPC_READ_CMD_TABLE_TYPE_FLD = 1, + /* Table scope to access. */ + CFA_BLD_MPC_READ_CMD_TABLE_SCOPE_FLD = 2, + /* + * Number of 32B units in access. If value is outside the range [1, 4], + * CFA aborts processing and reports FMT_ERR status. + */ + CFA_BLD_MPC_READ_CMD_DATA_SIZE_FLD = 3, + /* + * Test field for CFA MPC builder validation, added to introduce + * a hold in the field mapping array + */ + CFA_BLD_MPC_READ_CMD_RANDOM_TEST_FLD = 4, + /* + * Determines setting of OPTION field for all cache requests while + * processing any command other than EM_INSERT, EM_DELETE, or EM_CHAIN. + * For these latter commands, CACHE_OPTION sets the OPTION field for all + * read requests, and CACHE_OPTION2 sets it for all write requests. CFA + * does not support posted write requests. Therefore, for WRITE + * commands, CACHE_OPTION[1] must be set to 0. And for EM commands that + * send write requests (all but EM_SEARCH), CACHE_OPTION2[1] must be set + * to 0. + */ + CFA_BLD_MPC_READ_CMD_CACHE_OPTION_FLD = 5, + /* + * A 32B index into the table identified by (TABLE_TYPE, TABLE_SCOPE): + */ + CFA_BLD_MPC_READ_CMD_TABLE_INDEX_FLD = 6, + /* + * The 64-bit host address to which to write the DMA data returned in + * the completion. The data will be written to the same function as the + * one that owns the SQ this command is read from. DATA_SIZE determines + * the maximum size of the data written. If HOST_ADDRESS[1:0] is not 0, + * CFA aborts processing and reports FMT_ERR status. + */ + CFA_BLD_MPC_READ_CMD_HOST_ADDRESS_FLD = 7, + CFA_BLD_MPC_READ_CMD_MAX_FLD = 8, +}; + +/** + * Field IDS for WRITE_CMD: This command writes 1-4 consecutive 32B + * words to the specified address within a table scope. + */ +enum cfa_bld_mpc_write_cmd_fields { + CFA_BLD_MPC_WRITE_CMD_OPAQUE_FLD = 0, + /* This value selects the table type to be acted upon. */ + CFA_BLD_MPC_WRITE_CMD_TABLE_TYPE_FLD = 1, + /* + * Sets the OPTION field on the cache interface to use write-through for + * EM entry writes while processing EM_INSERT commands. For all other + * cases (inluding EM_INSERT bucket writes), the OPTION field is set by + * the CACHE_OPTION and CACHE_OPTION2 fields. + */ + CFA_BLD_MPC_WRITE_CMD_WRITE_THROUGH_FLD = 2, + /* Table scope to access. */ + CFA_BLD_MPC_WRITE_CMD_TABLE_SCOPE_FLD = 3, + /* + * Number of 32B units in access. If value is outside the range [1, 4], + * CFA aborts processing and reports FMT_ERR status. + */ + CFA_BLD_MPC_WRITE_CMD_DATA_SIZE_FLD = 4, + /* + * Determines setting of OPTION field for all cache requests while + * processing any command other than EM_INSERT, EM_DELETE, or EM_CHAIN. + * For these latter commands, CACHE_OPTION sets the OPTION field for all + * read requests, and CACHE_OPTION2 sets it for all write requests. CFA + * does not support posted write requests. Therefore, for WRITE + * commands, CACHE_OPTION[1] must be set to 0. And for EM commands that + * send write requests (all but EM_SEARCH), CACHE_OPTION2[1] must be set + * to 0. + */ + CFA_BLD_MPC_WRITE_CMD_CACHE_OPTION_FLD = 5, + /* + * A 32B index into the table identified by (TABLE_TYPE, TABLE_SCOPE): + */ + CFA_BLD_MPC_WRITE_CMD_TABLE_INDEX_FLD = 6, + CFA_BLD_MPC_WRITE_CMD_MAX_FLD = 7, +}; + +/** + * Field IDS for READ_CLR_CMD: This command performs a read-modify-write + * to the specified 32B address using a 16b mask that specifies up to 16 + * 16b words to clear before writing the data back. It returns the 32B + * data word read from cache (not the value written after the clear + * operation). + */ +enum cfa_bld_mpc_read_clr_cmd_fields { + CFA_BLD_MPC_READ_CLR_CMD_OPAQUE_FLD = 0, + /* This value selects the table type to be acted upon. */ + CFA_BLD_MPC_READ_CLR_CMD_TABLE_TYPE_FLD = 1, + /* Table scope to access. */ + CFA_BLD_MPC_READ_CLR_CMD_TABLE_SCOPE_FLD = 2, + /* + * This field is no longer used. The READ_CLR command always reads (and + * does a mask-clear) on a single cache line. This field was added for + * SR2 A0 to avoid an ADDR_ERR when TABLE_INDEX=0 and TABLE_TYPE=EM (see + * CUMULUS-17872). That issue was fixed in SR2 B0. + */ + CFA_BLD_MPC_READ_CLR_CMD_DATA_SIZE_FLD = 3, + /* + * Determines setting of OPTION field for all cache requests while + * processing any command other than EM_INSERT, EM_DELETE, or EM_CHAIN. + * For these latter commands, CACHE_OPTION sets the OPTION field for all + * read requests, and CACHE_OPTION2 sets it for all write requests. CFA + * does not support posted write requests. Therefore, for WRITE + * commands, CACHE_OPTION[1] must be set to 0. And for EM commands that + * send write requests (all but EM_SEARCH), CACHE_OPTION2[1] must be set + * to 0. + */ + CFA_BLD_MPC_READ_CLR_CMD_CACHE_OPTION_FLD = 4, + /* + * A 32B index into the table identified by (TABLE_TYPE, TABLE_SCOPE): + */ + CFA_BLD_MPC_READ_CLR_CMD_TABLE_INDEX_FLD = 5, + /* + * The 64-bit host address to which to write the DMA data returned in + * the completion. The data will be written to the same function as the + * one that owns the SQ this command is read from. DATA_SIZE determines + * the maximum size of the data written. If HOST_ADDRESS[1:0] is not 0, + * CFA aborts processing and reports FMT_ERR status. + */ + CFA_BLD_MPC_READ_CLR_CMD_HOST_ADDRESS_FLD = 6, + /* + * Specifies bits in 32B data word to clear. For x=0..15, when + * clear_mask[x]=1, data[x*16+15:x*16] is set to 0. + */ + CFA_BLD_MPC_READ_CLR_CMD_CLEAR_MASK_FLD = 7, + CFA_BLD_MPC_READ_CLR_CMD_MAX_FLD = 8, +}; + +/** + * Field IDS for INVALIDATE_CMD: This command forces an explicit evict + * of 1-4 consecutive cache lines such that the next time the structure + * is used it will be re-read from its backing store location. + */ +enum cfa_bld_mpc_invalidate_cmd_fields { + CFA_BLD_MPC_INVALIDATE_CMD_OPAQUE_FLD = 0, + /* This value selects the table type to be acted upon. */ + CFA_BLD_MPC_INVALIDATE_CMD_TABLE_TYPE_FLD = 1, + /* Table scope to access. */ + CFA_BLD_MPC_INVALIDATE_CMD_TABLE_SCOPE_FLD = 2, + /* + * This value identifies the number of cache lines to invalidate. A + * FMT_ERR is reported if the value is not in the range of [1, 4]. + */ + CFA_BLD_MPC_INVALIDATE_CMD_DATA_SIZE_FLD = 3, + /* + * Determines setting of OPTION field for all cache requests while + * processing any command other than EM_INSERT, EM_DELETE, or EM_CHAIN. + * For these latter commands, CACHE_OPTION sets the OPTION field for all + * read requests, and CACHE_OPTION2 sets it for all write requests. CFA + * does not support posted write requests. Therefore, for WRITE + * commands, CACHE_OPTION[1] must be set to 0. And for EM commands that + * send write requests (all but EM_SEARCH), CACHE_OPTION2[1] must be set + * to 0. + */ + CFA_BLD_MPC_INVALIDATE_CMD_CACHE_OPTION_FLD = 4, + /* + * A 32B index into the table identified by (TABLE_TYPE, TABLE_SCOPE): + */ + CFA_BLD_MPC_INVALIDATE_CMD_TABLE_INDEX_FLD = 5, + CFA_BLD_MPC_INVALIDATE_CMD_MAX_FLD = 6, +}; + +/** + * Field IDS for EM_SEARCH_CMD: This command supplies an exact match + * entry of 1-4 32B words to search for in the exact match table. CFA + * first computes the hash value of the key in the entry, and determines + * the static bucket address to search from the hash and the + * (EM_BUCKETS, EM_SIZE) for TABLE_SCOPE. It then searches that static + * bucket chain for an entry with a matching key (the LREC in the + * command entry is ignored). If a matching entry is found, CFA reports + * OK status in the completion. Otherwise, assuming no errors abort the + * search before it completes, it reports EM_MISS status. + */ +enum cfa_bld_mpc_em_search_cmd_fields { + CFA_BLD_MPC_EM_SEARCH_CMD_OPAQUE_FLD = 0, + /* Table scope to access. */ + CFA_BLD_MPC_EM_SEARCH_CMD_TABLE_SCOPE_FLD = 1, + /* + * Number of 32B units in access. If value is outside the range [1, 4], + * CFA aborts processing and reports FMT_ERR status. + */ + CFA_BLD_MPC_EM_SEARCH_CMD_DATA_SIZE_FLD = 2, + /* + * Determines setting of OPTION field for all cache requests while + * processing any command other than EM_INSERT, EM_DELETE, or EM_CHAIN. + * For these latter commands, CACHE_OPTION sets the OPTION field for all + * read requests, and CACHE_OPTION2 sets it for all write requests. CFA + * does not support posted write requests. Therefore, for WRITE + * commands, CACHE_OPTION[1] must be set to 0. And for EM commands that + * send write requests (all but EM_SEARCH), CACHE_OPTION2[1] must be set + * to 0. + */ + CFA_BLD_MPC_EM_SEARCH_CMD_CACHE_OPTION_FLD = 3, + CFA_BLD_MPC_EM_SEARCH_CMD_MAX_FLD = 4, +}; + +/** + * Field IDS for EM_INSERT_CMD: This command supplies an exact match + * entry of 1-4 32B words to insert in the exact match table. CFA first + * computes the hash value of the key in the entry, and determines the + * static bucket address to search from the hash and the (EM_BUCKETS, + * EM_SIZE) for TABLE_SCOPE. It then writes the 1-4 32B words of the + * exact match entry starting at the TABLE_INDEX location in the + * command. When the entry write completes, it searches the static + * bucket chain for an existing entry with a key matching the key in the + * insert entry (the LREC does not need to match). If a matching entry + * is found: * If REPLACE=0, the CFA aborts the insert and returns + * EM_DUPLICATE status. * If REPLACE=1, the CFA overwrites the matching + * entry with the new entry. REPLACED_ENTRY=1 in the completion in this + * case to signal that an entry was replaced. The location of the entry + * is provided in the completion. If no match is found, CFA adds the new + * entry to the lowest unused entry in the tail bucket. If the current + * tail bucket is full, this requires adding a new bucket to the tail. + * Then entry is then inserted at entry number 0. TABLE_INDEX2 provides + * the address of the new tail bucket, if needed. If set to 0, the + * insert is aborted and returns EM_ABORT status instead of adding a new + * bucket to the tail. CHAIN_UPD in the completion indicates whether a + * new bucket was added (1) or not (0). For locked scopes, if the read + * of the static bucket gives a locked scope miss error, indicating that + * the address is not in the cache, the static bucket is assumed empty. + * In this case, TAI creates a new bucket, setting entry 0 to the new + * entry fields and initializing all other fields to 0. It writes this + * new bucket to the static bucket address, which installs it in the + * cache. + */ +enum cfa_bld_mpc_em_insert_cmd_fields { + CFA_BLD_MPC_EM_INSERT_CMD_OPAQUE_FLD = 0, + /* + * Sets the OPTION field on the cache interface to use write-through for + * EM entry writes while processing EM_INSERT commands. For all other + * cases (inluding EM_INSERT bucket writes), the OPTION field is set by + * the CACHE_OPTION and CACHE_OPTION2 fields. + */ + CFA_BLD_MPC_EM_INSERT_CMD_WRITE_THROUGH_FLD = 1, + /* Table scope to access. */ + CFA_BLD_MPC_EM_INSERT_CMD_TABLE_SCOPE_FLD = 2, + /* + * Number of 32B units in access. If value is outside the range [1, 4], + * CFA aborts processing and reports FMT_ERR status. + */ + CFA_BLD_MPC_EM_INSERT_CMD_DATA_SIZE_FLD = 3, + /* + * Determines setting of OPTION field for all cache requests while + * processing any command other than EM_INSERT, EM_DELETE, or EM_CHAIN. + * For these latter commands, CACHE_OPTION sets the OPTION field for all + * read requests, and CACHE_OPTION2 sets it for all write requests. CFA + * does not support posted write requests. Therefore, for WRITE + * commands, CACHE_OPTION[1] must be set to 0. And for EM commands that + * send write requests (all but EM_SEARCH), CACHE_OPTION2[1] must be set + * to 0. + */ + CFA_BLD_MPC_EM_INSERT_CMD_CACHE_OPTION_FLD = 4, + /* + * A 32B index into the EM table identified by TABLE_SCOPE. Starting + * address to write exact match entry being inserted. + */ + CFA_BLD_MPC_EM_INSERT_CMD_TABLE_INDEX_FLD = 5, + /* + * Determines setting of OPTION field for all cache write requests for + * EM_INSERT, EM_DELETE, and EM_CHAIN commands. CFA does not support + * posted write requests. Therefore, CACHE_OPTION2[1] must be set to 0. + */ + CFA_BLD_MPC_EM_INSERT_CMD_CACHE_OPTION2_FLD = 6, + /* + * A 32B index into the EM table identified by TABLE_SCOPE. Only used + * when no duplicate entry is found and the tail bucket in the chain + * searched has no unused entries. In this case, TABLE_INDEX2 provides + * the index to the 32B dynamic bucket to add to the tail of the chain + * (it is the new tail bucket). In this case, the CFA first writes + * TABLE_INDEX2 with a new bucket: * Entry 0 of the bucket sets the + * HASH_MSBS computed from the hash and ENTRY_PTR to TABLE_INDEX. * + * Entries 1-5 of the bucket set HASH_MSBS and ENTRY_PTR to 0. * CHAIN=0 + * and CHAIN_PTR is set to CHAIN_PTR from to original tail bucket to + * maintain the background chaining. CFA then sets CHAIN=1 and + * CHAIN_PTR=TABLE_INDEX2 in the original tail bucket to link the new + * bucket to the chain. CHAIN_UPD=1 in the completion to signal that the + * new bucket at TABLE_INDEX2 was added to the tail of the chain. + */ + CFA_BLD_MPC_EM_INSERT_CMD_TABLE_INDEX2_FLD = 7, + /* + * Only used if an entry is found whose key matches the exact match + * entry key in the command: * REPLACE=0: The insert is aborted and + * EM_DUPLICATE status is returned, signaling that the insert failed. + * The index of the matching entry that blocked the insertion is + * returned in the completion. * REPLACE=1: The matching entry is + * replaced with that from the command (ENTRY_PTR in the bucket is + * overwritten with TABLE_INDEX from the command). HASH_MSBS for the + * entry number never changes in this case since it had to match the new + * entry key HASH_MSBS to match. When an entry is replaced, + * REPLACED_ENTRY=1 in the completion and the index of the matching + * entry is returned in the completion so that software can de-allocate + * the entry. + */ + CFA_BLD_MPC_EM_INSERT_CMD_REPLACE_FLD = 8, + CFA_BLD_MPC_EM_INSERT_CMD_MAX_FLD = 9, +}; + +/** + * Field IDS for EM_DELETE_CMD: This command searches for an exact match + * entry index in the static bucket chain and deletes it if found. + * TABLE_INDEX give the entry index to delete and TABLE_INDEX2 gives the + * static bucket index. If a matching entry is found: * If the matching + * entry is the last valid entry in the tail bucket, its entry fields + * (HASH_MSBS and ENTRY_PTR) are set to 0 to delete the entry. * If the + * matching entry is not the last valid entry in the tail bucket, the + * entry fields from that last entry are moved to the matching entry, + * and the fields of that last entry are set to 0. * If any of the + * previous processing results in the tail bucket not having any valid + * entries, the tail bucket is the static bucket, the scope is a locked + * scope, and CHAIN_PTR=0, hardware evicts the static bucket from the + * cache and the completion signals this case with CHAIN_UPD=1. * If any + * of the previous processing results in the tail bucket not having any + * valid entries, and the tail bucket is not the static bucket, the tail + * bucket is removed from the chain. In this case, the penultimate + * bucket in the chain becomes the tail bucket. It has CHAIN set to 0 to + * unlink the tail bucket, and CHAIN_PTR set to that from the original + * tail bucket to preserve background chaining. The completion signals + * this case with CHAIN_UPD=1 and returns the index to the bucket + * removed so that software can de-allocate it. CFA returns OK status if + * the entry was successfully deleted. Otherwise, it returns EM_MISS + * status assuming there were no errors that caused processing to be + * aborted. + */ +enum cfa_bld_mpc_em_delete_cmd_fields { + CFA_BLD_MPC_EM_DELETE_CMD_OPAQUE_FLD = 0, + /* + * Sets the OPTION field on the cache interface to use write-through for + * EM entry writes while processing EM_INSERT commands. For all other + * cases (inluding EM_INSERT bucket writes), the OPTION field is set by + * the CACHE_OPTION and CACHE_OPTION2 fields. + */ + CFA_BLD_MPC_EM_DELETE_CMD_WRITE_THROUGH_FLD = 1, + /* Table scope to access. */ + CFA_BLD_MPC_EM_DELETE_CMD_TABLE_SCOPE_FLD = 2, + /* + * Determines setting of OPTION field for all cache requests while + * processing any command other than EM_INSERT, EM_DELETE, or EM_CHAIN. + * For these latter commands, CACHE_OPTION sets the OPTION field for all + * read requests, and CACHE_OPTION2 sets it for all write requests. CFA + * does not support posted write requests. Therefore, for WRITE + * commands, CACHE_OPTION[1] must be set to 0. And for EM commands that + * send write requests (all but EM_SEARCH), CACHE_OPTION2[1] must be set + * to 0. + */ + CFA_BLD_MPC_EM_DELETE_CMD_CACHE_OPTION_FLD = 3, + /* + * A 32B index into the EM table identified by TABLE_SCOPE. Entry index + * to delete. + */ + CFA_BLD_MPC_EM_DELETE_CMD_TABLE_INDEX_FLD = 4, + /* + * Determines setting of OPTION field for all cache write requests for + * EM_INSERT, EM_DELETE, and EM_CHAIN commands. CFA does not support + * posted write requests. Therefore, CACHE_OPTION2[1] must be set to 0. + */ + CFA_BLD_MPC_EM_DELETE_CMD_CACHE_OPTION2_FLD = 5, + /* + * A 32B index into the EM table identified by TABLE_SCOPE. Static + * bucket address for bucket chain. + */ + CFA_BLD_MPC_EM_DELETE_CMD_TABLE_INDEX2_FLD = 6, + CFA_BLD_MPC_EM_DELETE_CMD_MAX_FLD = 7, +}; + +/** + * Field IDS for EM_CHAIN_CMD: This command updates CHAIN_PTR in the + * tail bucket of a static bucket chain, supplying both the static + * bucket and the new CHAIN_PTR value. TABLE_INDEX is the new CHAIN_PTR + * value and TABLE_INDEX2[23:0] is the static bucket. This command + * provides software a means to update background chaining coherently + * with other bucket updates. The value of CHAIN is unaffected (stays at + * 0). For locked scopes, if the static bucket is the tail bucket, it is + * empty (all of its ENTRY_PTR values are 0), and TABLE_INDEX=0 (the + * CHAIN_PTR is being set to 0), instead of updating the static bucket + * it is evicted from the cache. In this case, CHAIN_UPD=1 in the + * completion. + */ +enum cfa_bld_mpc_em_chain_cmd_fields { + CFA_BLD_MPC_EM_CHAIN_CMD_OPAQUE_FLD = 0, + /* + * Sets the OPTION field on the cache interface to use write-through for + * EM entry writes while processing EM_INSERT commands. For all other + * cases (inluding EM_INSERT bucket writes), the OPTION field is set by + * the CACHE_OPTION and CACHE_OPTION2 fields. + */ + CFA_BLD_MPC_EM_CHAIN_CMD_WRITE_THROUGH_FLD = 1, + /* Table scope to access. */ + CFA_BLD_MPC_EM_CHAIN_CMD_TABLE_SCOPE_FLD = 2, + /* + * Determines setting of OPTION field for all cache requests while + * processing any command other than EM_INSERT, EM_DELETE, or EM_CHAIN. + * For these latter commands, CACHE_OPTION sets the OPTION field for all + * read requests, and CACHE_OPTION2 sets it for all write requests. CFA + * does not support posted write requests. Therefore, for WRITE + * commands, CACHE_OPTION[1] must be set to 0. And for EM commands that + * send write requests (all but EM_SEARCH), CACHE_OPTION2[1] must be set + * to 0. + */ + CFA_BLD_MPC_EM_CHAIN_CMD_CACHE_OPTION_FLD = 3, + /* + * A 32B index into the EM table identified by TABLE_SCOPE. New + * CHAIN_PTR to write to tail bucket. + */ + CFA_BLD_MPC_EM_CHAIN_CMD_TABLE_INDEX_FLD = 4, + /* + * Determines setting of OPTION field for all cache write requests for + * EM_INSERT, EM_DELETE, and EM_CHAIN commands. CFA does not support + * posted write requests. Therefore, CACHE_OPTION2[1] must be set to 0. + */ + CFA_BLD_MPC_EM_CHAIN_CMD_CACHE_OPTION2_FLD = 5, + /* + * A 32B index into the EM table identified by TABLE_SCOPE. Static + * bucket address for bucket chain. + */ + CFA_BLD_MPC_EM_CHAIN_CMD_TABLE_INDEX2_FLD = 6, + CFA_BLD_MPC_EM_CHAIN_CMD_MAX_FLD = 7, +}; + +/** + * Field IDS for READ_CMP: When no errors, teturns 1-4 consecutive 32B + * words from the TABLE_INDEX within the TABLE_SCOPE specified in the + * command, writing them to HOST_ADDRESS from the command. + */ +enum cfa_bld_mpc_read_cmp_fields { + /* + * This field indicates the exact type of the completion. By convention, + * the LSB identifies the length of the record in 16B units. Even values + * indicate 16B records. Odd values indicate 32B records **(EXCEPT + * no_op!!!!)** . + */ + CFA_BLD_MPC_READ_CMP_TYPE_FLD = 0, + /* The command processing status. */ + CFA_BLD_MPC_READ_CMP_STATUS_FLD = 1, + /* + * This field represents the Mid-Path client that generated the + * completion. + */ + CFA_BLD_MPC_READ_CMP_MP_CLIENT_FLD = 2, + /* OPCODE from the command. */ + CFA_BLD_MPC_READ_CMP_OPCODE_FLD = 3, + /* + * The length of the DMA that accompanies the completion in units of + * DWORDs (32b). Valid values are [0, 128]. A value of zero indicates + * that there is no DMA that accompanies the completion. + */ + CFA_BLD_MPC_READ_CMP_DMA_LENGTH_FLD = 4, + /* + * This is a copy of the opaque field from the mid path BD of this + * command. + */ + CFA_BLD_MPC_READ_CMP_OPAQUE_FLD = 5, + /* + * This value is written by the NIC such that it will be different for + * each pass through the completion queue. The even passes will write 1. + * The odd passes will write 0. + */ + CFA_BLD_MPC_READ_CMP_V_FLD = 6, + /* + * For EM_SEARCH and EM_INSERT commands without errors that abort the + * command processing prior to the hash computation, set to HASH[35:24] + * of the hash computed from the exact match entry key in the command. + * For all other cases, set to 0 except for the following error + * conditions, which carry debug information in this field as shown by + * error status below: * FMT_ERR: - Set to {7'd0, HOST_ADDRESS[1:0], + * DATA_SIZE[2:0]}. - If HOST_ADDRESS or DATA_SIZE field not present + * they are set to 0. * SCOPE_ERR: - Set to {1'b0, SVIF[10:0]}. * + * ADDR_ERR: - Only possible when TABLE_TYPE=EM or for EM* commands - + * Set to {1'b0, TABLE_INDEX[2:0], 5'd0, DATA_SIZE[2:0]} - + * TABLE_INDEX[2]=1 if TABLE_INDEX3 had an error - TABLE_INDEX[1]=1 if + * TABLE_INDEX2 had an error - TABLE_INDEX[0]=1 if TABLE_INDEX had an + * error - TABLE_INDEX[n]=0 if the completion does not have the + * corresponding TABLE_INDEX field above. * CACHE_ERR: - Set to {9'd0, + * DATA_SIZE[2:0]} + */ + CFA_BLD_MPC_READ_CMP_HASH_MSB_FLD = 7, + /* TABLE_TYPE from the command. */ + CFA_BLD_MPC_READ_CMP_TABLE_TYPE_FLD = 8, + /* TABLE_SCOPE from the command. */ + CFA_BLD_MPC_READ_CMP_TABLE_SCOPE_FLD = 9, + /* TABLE_INDEX from the command. */ + CFA_BLD_MPC_READ_CMP_TABLE_INDEX_FLD = 10, + CFA_BLD_MPC_READ_CMP_MAX_FLD = 11, +}; + +/** + * Field IDS for WRITE_CMP: Returns status of the write of 1-4 + * consecutive 32B words starting at TABLE_INDEX in the table specified + * by (TABLE_TYPE, TABLE_SCOPE). + */ +enum cfa_bld_mpc_write_cmp_fields { + /* + * This field indicates the exact type of the completion. By convention, + * the LSB identifies the length of the record in 16B units. Even values + * indicate 16B records. Odd values indicate 32B records **(EXCEPT + * no_op!!!!)** . + */ + CFA_BLD_MPC_WRITE_CMP_TYPE_FLD = 0, + /* The command processing status. */ + CFA_BLD_MPC_WRITE_CMP_STATUS_FLD = 1, + /* + * This field represents the Mid-Path client that generated the + * completion. + */ + CFA_BLD_MPC_WRITE_CMP_MP_CLIENT_FLD = 2, + /* OPCODE from the command. */ + CFA_BLD_MPC_WRITE_CMP_OPCODE_FLD = 3, + /* + * This is a copy of the opaque field from the mid path BD of this + * command. + */ + CFA_BLD_MPC_WRITE_CMP_OPAQUE_FLD = 4, + /* + * This value is written by the NIC such that it will be different for + * each pass through the completion queue. The even passes will write 1. + * The odd passes will write 0. + */ + CFA_BLD_MPC_WRITE_CMP_V_FLD = 5, + /* + * For EM_SEARCH and EM_INSERT commands without errors that abort the + * command processing prior to the hash computation, set to HASH[35:24] + * of the hash computed from the exact match entry key in the command. + * For all other cases, set to 0 except for the following error + * conditions, which carry debug information in this field as shown by + * error status below: * FMT_ERR: - Set to {7'd0, HOST_ADDRESS[1:0], + * DATA_SIZE[2:0]}. - If HOST_ADDRESS or DATA_SIZE field not present + * they are set to 0. * SCOPE_ERR: - Set to {1'b0, SVIF[10:0]}. * + * ADDR_ERR: - Only possible when TABLE_TYPE=EM or for EM* commands - + * Set to {1'b0, TABLE_INDEX[2:0], 5'd0, DATA_SIZE[2:0]} - + * TABLE_INDEX[2]=1 if TABLE_INDEX3 had an error - TABLE_INDEX[1]=1 if + * TABLE_INDEX2 had an error - TABLE_INDEX[0]=1 if TABLE_INDEX had an + * error - TABLE_INDEX[n]=0 if the completion does not have the + * corresponding TABLE_INDEX field above. * CACHE_ERR: - Set to {9'd0, + * DATA_SIZE[2:0]} + */ + CFA_BLD_MPC_WRITE_CMP_HASH_MSB_FLD = 6, + /* TABLE_TYPE from the command. */ + CFA_BLD_MPC_WRITE_CMP_TABLE_TYPE_FLD = 7, + /* TABLE_SCOPE from the command. */ + CFA_BLD_MPC_WRITE_CMP_TABLE_SCOPE_FLD = 8, + /* TABLE_INDEX from the command. */ + CFA_BLD_MPC_WRITE_CMP_TABLE_INDEX_FLD = 9, + CFA_BLD_MPC_WRITE_CMP_MAX_FLD = 10, +}; + +/** + * Field IDS for READ_CLR_CMP: When no errors, returns 1 32B word from + * TABLE_INDEX in the table specified by (TABLE_TYPE, TABLE_SCOPE). The + * data returned is the value prior to the clear. + */ +enum cfa_bld_mpc_read_clr_cmp_fields { + /* + * This field indicates the exact type of the completion. By convention, + * the LSB identifies the length of the record in 16B units. Even values + * indicate 16B records. Odd values indicate 32B records **(EXCEPT + * no_op!!!!)** . + */ + CFA_BLD_MPC_READ_CLR_CMP_TYPE_FLD = 0, + /* The command processing status. */ + CFA_BLD_MPC_READ_CLR_CMP_STATUS_FLD = 1, + /* + * This field represents the Mid-Path client that generated the + * completion. + */ + CFA_BLD_MPC_READ_CLR_CMP_MP_CLIENT_FLD = 2, + /* OPCODE from the command. */ + CFA_BLD_MPC_READ_CLR_CMP_OPCODE_FLD = 3, + /* + * The length of the DMA that accompanies the completion in units of + * DWORDs (32b). Valid values are [0, 128]. A value of zero indicates + * that there is no DMA that accompanies the completion. + */ + CFA_BLD_MPC_READ_CLR_CMP_DMA_LENGTH_FLD = 4, + /* + * This is a copy of the opaque field from the mid path BD of this + * command. + */ + CFA_BLD_MPC_READ_CLR_CMP_OPAQUE_FLD = 5, + /* + * This value is written by the NIC such that it will be different for + * each pass through the completion queue. The even passes will write 1. + * The odd passes will write 0. + */ + CFA_BLD_MPC_READ_CLR_CMP_V_FLD = 6, + /* + * For EM_SEARCH and EM_INSERT commands without errors that abort the + * command processing prior to the hash computation, set to HASH[35:24] + * of the hash computed from the exact match entry key in the command. + * For all other cases, set to 0 except for the following error + * conditions, which carry debug information in this field as shown by + * error status below: * FMT_ERR: - Set to {7'd0, HOST_ADDRESS[1:0], + * DATA_SIZE[2:0]}. - If HOST_ADDRESS or DATA_SIZE field not present + * they are set to 0. * SCOPE_ERR: - Set to {1'b0, SVIF[10:0]}. * + * ADDR_ERR: - Only possible when TABLE_TYPE=EM or for EM* commands - + * Set to {1'b0, TABLE_INDEX[2:0], 5'd0, DATA_SIZE[2:0]} - + * TABLE_INDEX[2]=1 if TABLE_INDEX3 had an error - TABLE_INDEX[1]=1 if + * TABLE_INDEX2 had an error - TABLE_INDEX[0]=1 if TABLE_INDEX had an + * error - TABLE_INDEX[n]=0 if the completion does not have the + * corresponding TABLE_INDEX field above. * CACHE_ERR: - Set to {9'd0, + * DATA_SIZE[2:0]} + */ + CFA_BLD_MPC_READ_CLR_CMP_HASH_MSB_FLD = 7, + /* TABLE_TYPE from the command. */ + CFA_BLD_MPC_READ_CLR_CMP_TABLE_TYPE_FLD = 8, + /* TABLE_SCOPE from the command. */ + CFA_BLD_MPC_READ_CLR_CMP_TABLE_SCOPE_FLD = 9, + /* TABLE_INDEX from the command. */ + CFA_BLD_MPC_READ_CLR_CMP_TABLE_INDEX_FLD = 10, + CFA_BLD_MPC_READ_CLR_CMP_MAX_FLD = 11, +}; + +/** + * Field IDS for INVALIDATE_CMP: Returns status for INVALIDATE commands. + */ +enum cfa_bld_mpc_invalidate_cmp_fields { + /* + * This field indicates the exact type of the completion. By convention, + * the LSB identifies the length of the record in 16B units. Even values + * indicate 16B records. Odd values indicate 32B records **(EXCEPT + * no_op!!!!)** . + */ + CFA_BLD_MPC_INVALIDATE_CMP_TYPE_FLD = 0, + /* The command processing status. */ + CFA_BLD_MPC_INVALIDATE_CMP_STATUS_FLD = 1, + /* + * This field represents the Mid-Path client that generated the + * completion. + */ + CFA_BLD_MPC_INVALIDATE_CMP_MP_CLIENT_FLD = 2, + /* OPCODE from the command. */ + CFA_BLD_MPC_INVALIDATE_CMP_OPCODE_FLD = 3, + /* + * This is a copy of the opaque field from the mid path BD of this + * command. + */ + CFA_BLD_MPC_INVALIDATE_CMP_OPAQUE_FLD = 4, + /* + * This value is written by the NIC such that it will be different for + * each pass through the completion queue. The even passes will write 1. + * The odd passes will write 0. + */ + CFA_BLD_MPC_INVALIDATE_CMP_V_FLD = 5, + /* + * For EM_SEARCH and EM_INSERT commands without errors that abort the + * command processing prior to the hash computation, set to HASH[35:24] + * of the hash computed from the exact match entry key in the command. + * For all other cases, set to 0 except for the following error + * conditions, which carry debug information in this field as shown by + * error status below: * FMT_ERR: - Set to {7'd0, HOST_ADDRESS[1:0], + * DATA_SIZE[2:0]}. - If HOST_ADDRESS or DATA_SIZE field not present + * they are set to 0. * SCOPE_ERR: - Set to {1'b0, SVIF[10:0]}. * + * ADDR_ERR: - Only possible when TABLE_TYPE=EM or for EM* commands - + * Set to {1'b0, TABLE_INDEX[2:0], 5'd0, DATA_SIZE[2:0]} - + * TABLE_INDEX[2]=1 if TABLE_INDEX3 had an error - TABLE_INDEX[1]=1 if + * TABLE_INDEX2 had an error - TABLE_INDEX[0]=1 if TABLE_INDEX had an + * error - TABLE_INDEX[n]=0 if the completion does not have the + * corresponding TABLE_INDEX field above. * CACHE_ERR: - Set to {9'd0, + * DATA_SIZE[2:0]} + */ + CFA_BLD_MPC_INVALIDATE_CMP_HASH_MSB_FLD = 6, + /* TABLE_TYPE from the command. */ + CFA_BLD_MPC_INVALIDATE_CMP_TABLE_TYPE_FLD = 7, + /* TABLE_SCOPE from the command. */ + CFA_BLD_MPC_INVALIDATE_CMP_TABLE_SCOPE_FLD = 8, + /* TABLE_INDEX from the command. */ + CFA_BLD_MPC_INVALIDATE_CMP_TABLE_INDEX_FLD = 9, + CFA_BLD_MPC_INVALIDATE_CMP_MAX_FLD = 10, +}; + +/** + * Field IDS for EM_SEARCH_CMP: For OK status, returns the index of the + * matching entry found for the EM key supplied in the command. Returns + * EM_MISS status if no match was found. + */ +enum cfa_bld_mpc_em_search_cmp_fields { + /* + * This field indicates the exact type of the completion. By convention, + * the LSB identifies the length of the record in 16B units. Even values + * indicate 16B records. Odd values indicate 32B records **(EXCEPT + * no_op!!!!)** . + */ + CFA_BLD_MPC_EM_SEARCH_CMP_TYPE_FLD = 0, + /* The command processing status. */ + CFA_BLD_MPC_EM_SEARCH_CMP_STATUS_FLD = 1, + /* + * This field represents the Mid-Path client that generated the + * completion. + */ + CFA_BLD_MPC_EM_SEARCH_CMP_MP_CLIENT_FLD = 2, + /* OPCODE from the command. */ + CFA_BLD_MPC_EM_SEARCH_CMP_OPCODE_FLD = 3, + /* + * This is a copy of the opaque field from the mid path BD of this + * command. + */ + CFA_BLD_MPC_EM_SEARCH_CMP_OPAQUE_FLD = 4, + /* + * This value is written by the NIC such that it will be different for + * each pass through the completion queue. The even passes will write 1. + * The odd passes will write 0. + */ + CFA_BLD_MPC_EM_SEARCH_CMP_V1_FLD = 5, + /* + * For EM_SEARCH and EM_INSERT commands without errors that abort the + * command processing prior to the hash computation, set to HASH[35:24] + * of the hash computed from the exact match entry key in the command. + * For all other cases, set to 0 except for the following error + * conditions, which carry debug information in this field as shown by + * error status below: * FMT_ERR: - Set to {7'd0, HOST_ADDRESS[1:0], + * DATA_SIZE[2:0]}. - If HOST_ADDRESS or DATA_SIZE field not present + * they are set to 0. * SCOPE_ERR: - Set to {1'b0, SVIF[10:0]}. * + * ADDR_ERR: - Only possible when TABLE_TYPE=EM or for EM* commands - + * Set to {1'b0, TABLE_INDEX[2:0], 5'd0, DATA_SIZE[2:0]} - + * TABLE_INDEX[2]=1 if TABLE_INDEX3 had an error - TABLE_INDEX[1]=1 if + * TABLE_INDEX2 had an error - TABLE_INDEX[0]=1 if TABLE_INDEX had an + * error - TABLE_INDEX[n]=0 if the completion does not have the + * corresponding TABLE_INDEX field above. * CACHE_ERR: - Set to {9'd0, + * DATA_SIZE[2:0]} + */ + CFA_BLD_MPC_EM_SEARCH_CMP_HASH_MSB_FLD = 6, + /* TABLE_SCOPE from the command. */ + CFA_BLD_MPC_EM_SEARCH_CMP_TABLE_SCOPE_FLD = 7, + /* + * A 32B index into the EM table identified by TABLE_SCOPE. For OK + * status, gives ENTRY_PTR[25:0] of the matching entry found. Otherwise, + * set to 0. + */ + CFA_BLD_MPC_EM_SEARCH_CMP_TABLE_INDEX_FLD = 8, + /* + * A 32B index into the EM table identified by TABLE_SCOPE. If the hash + * is computed (no errors during initial processing of the command), + * TABLE_INDEX2[23:0] is the static bucket address determined from the + * hash of the exact match entry key in the command and the (EM_SIZE, + * EM_BUCKETS) configuration for TABLE_SCOPE of the command. Bits 25:24 + * in this case are set to 0. For any other status, it is always 0. + */ + CFA_BLD_MPC_EM_SEARCH_CMP_TABLE_INDEX2_FLD = 9, + /* + * This value is written by the NIC such that it will be different for + * each pass through the completion queue. The even passes will write 1. + * The odd passes will write 0. + */ + CFA_BLD_MPC_EM_SEARCH_CMP_V2_FLD = 10, + /* + * BKT_NUM is the bucket number in chain of the tail bucket after + * finishing processing the command, except when the command stops + * processing before the tail bucket. NUM_ENTRIES is the number of valid + * entries in the BKT_NUM bucket. The following describes the cases + * where BKT_NUM and NUM_ENTRIES are not for the tail bucket after + * finishing processing of the command: * For UNSPRT_ERR, FMT_ERR, + * SCOPE_ERR, or ADDR_ERR completion status, BKT_NUM will be set to 0. * + * For CACHE_ERR completion status, BKT_NUM will be set to the bucket + * number that was last read without error. If ERR=1 in the response to + * the static bucket read, BKT_NUM and NUM_ENTRIES are set to 0. The + * static bucket is number 0, BKT_NUM increments for each new bucket in + * the chain, and saturates at 255. Therefore, if the value is 255, + * BKT_NUM may or may not be accurate. In this case, though, NUM_ENTRIES + * will still be the correct value as described above for the bucket. + */ + CFA_BLD_MPC_EM_SEARCH_CMP_BKT_NUM_FLD = 11, + /* See BKT_NUM description. */ + CFA_BLD_MPC_EM_SEARCH_CMP_NUM_ENTRIES_FLD = 12, + CFA_BLD_MPC_EM_SEARCH_CMP_MAX_FLD = 13, +}; + +/** + * Field IDS for EM_INSERT_CMP: OK status indicates that the exact match + * entry from the command was successfully inserted. EM_DUPLICATE status + * indicates that the insert was aborted because an entry with the same + * exact match key was found and REPLACE=0 in the command. EM_ABORT + * status indicates that no duplicate was found, the tail bucket in the + * chain was full, and TABLE_INDEX2=0. No changes are made to the + * database in this case. TABLE_INDEX is the starting address at which + * to insert the exact match entry (from the command). TABLE_INDEX2 is + * the address at which to insert a new bucket at the tail of the static + * bucket chain if needed (from the command). CHAIN_UPD=1 if a new + * bucket was added at this address. TABLE_INDEX3 is the static bucket + * address for the chain, determined from hashing the exact match entry. + * Software needs this address and TABLE_INDEX in order to delete the + * entry using an EM_DELETE command. TABLE_INDEX4 is the index of an + * entry found that had a matching exact match key to the command entry + * key. If no matching entry was found, it is set to 0. There are two + * cases when there is a matching entry, depending on REPLACE from the + * command: * REPLACE=0: EM_DUPLICATE status is reported and the insert + * is aborted. Software can use the static bucket address + * (TABLE_INDEX3[23:0]) and the matching entry (TABLE_INDEX4) in an + * EM_DELETE command if it wishes to explicity delete the matching + * entry. * REPLACE=1: REPLACED_ENTRY=1 to signal that the entry at + * TABLE_INDEX4 was replaced by the insert entry. REPLACED_ENTRY will + * only be 1 if reporting OK status in this case. Software can de- + * allocate the entry at TABLE_INDEX4. + */ +enum cfa_bld_mpc_em_insert_cmp_fields { + /* + * This field indicates the exact type of the completion. By convention, + * the LSB identifies the length of the record in 16B units. Even values + * indicate 16B records. Odd values indicate 32B records **(EXCEPT + * no_op!!!!)** . + */ + CFA_BLD_MPC_EM_INSERT_CMP_TYPE_FLD = 0, + /* The command processing status. */ + CFA_BLD_MPC_EM_INSERT_CMP_STATUS_FLD = 1, + /* + * This field represents the Mid-Path client that generated the + * completion. + */ + CFA_BLD_MPC_EM_INSERT_CMP_MP_CLIENT_FLD = 2, + /* OPCODE from the command. */ + CFA_BLD_MPC_EM_INSERT_CMP_OPCODE_FLD = 3, + /* + * This is a copy of the opaque field from the mid path BD of this + * command. + */ + CFA_BLD_MPC_EM_INSERT_CMP_OPAQUE_FLD = 4, + /* + * This value is written by the NIC such that it will be different for + * each pass through the completion queue. The even passes will write 1. + * The odd passes will write 0. + */ + CFA_BLD_MPC_EM_INSERT_CMP_V1_FLD = 5, + /* + * For EM_SEARCH and EM_INSERT commands without errors that abort the + * command processing prior to the hash computation, set to HASH[35:24] + * of the hash computed from the exact match entry key in the command. + * For all other cases, set to 0 except for the following error + * conditions, which carry debug information in this field as shown by + * error status below: * FMT_ERR: - Set to {7'd0, HOST_ADDRESS[1:0], + * DATA_SIZE[2:0]}. - If HOST_ADDRESS or DATA_SIZE field not present + * they are set to 0. * SCOPE_ERR: - Set to {1'b0, SVIF[10:0]}. * + * ADDR_ERR: - Only possible when TABLE_TYPE=EM or for EM* commands - + * Set to {1'b0, TABLE_INDEX[2:0], 5'd0, DATA_SIZE[2:0]} - + * TABLE_INDEX[2]=1 if TABLE_INDEX3 had an error - TABLE_INDEX[1]=1 if + * TABLE_INDEX2 had an error - TABLE_INDEX[0]=1 if TABLE_INDEX had an + * error - TABLE_INDEX[n]=0 if the completion does not have the + * corresponding TABLE_INDEX field above. * CACHE_ERR: - Set to {9'd0, + * DATA_SIZE[2:0]} + */ + CFA_BLD_MPC_EM_INSERT_CMP_HASH_MSB_FLD = 6, + /* TABLE_SCOPE from the command. */ + CFA_BLD_MPC_EM_INSERT_CMP_TABLE_SCOPE_FLD = 7, + /* + * A 32B index into the EM table identified by TABLE_SCOPE. TABLE_INDEX + * from the command, which is the starting address at which to insert + * the exact match entry. + */ + CFA_BLD_MPC_EM_INSERT_CMP_TABLE_INDEX_FLD = 8, + /* + * A 32B index into the EM table identified by TABLE_SCOPE. TABLE_INDEX2 + * from the command, which is the index for the new tail bucket to add + * if needed (CHAIN_UPD=1 if it was used). + */ + CFA_BLD_MPC_EM_INSERT_CMP_TABLE_INDEX2_FLD = 9, + /* + * A 32B index into the EM table identified by TABLE_SCOPE. If the hash + * is computed (no errors during initial processing of the command), + * TABLE_INDEX2[23:0] is the static bucket address determined from the + * hash of the exact match entry key in the command and the (EM_SIZE, + * EM_BUCKETS) configuration for TABLE_SCOPE of the command. Bits 25:24 + * in this case are set to 0. For any other status, it is always 0. + */ + CFA_BLD_MPC_EM_INSERT_CMP_TABLE_INDEX3_FLD = 10, + /* + * This value is written by the NIC such that it will be different for + * each pass through the completion queue. The even passes will write 1. + * The odd passes will write 0. + */ + CFA_BLD_MPC_EM_INSERT_CMP_V2_FLD = 11, + /* + * A 32B index into the EM table identified by TABLE_SCOPE. ENTRY_PTR of + * matching entry found. Set to 0 if no matching entry found. If + * REPLACED_ENTRY=1, that indicates a matching entry was found and + * REPLACE=1 in the command. In this case, the matching entry was + * replaced by the new entry in the command and this index can therefore + * by de-allocated. + */ + CFA_BLD_MPC_EM_INSERT_CMP_TABLE_INDEX4_FLD = 12, + /* + * BKT_NUM is the bucket number in chain of the tail bucket after + * finishing processing the command, except when the command stops + * processing before the tail bucket. NUM_ENTRIES is the number of valid + * entries in the BKT_NUM bucket. The following describes the cases + * where BKT_NUM and NUM_ENTRIES are not for the tail bucket after + * finishing processing of the command: * For UNSPRT_ERR, FMT_ERR, + * SCOPE_ERR, or ADDR_ERR completion status, BKT_NUM will be set to 0. * + * For CACHE_ERR completion status, BKT_NUM will be set to the bucket + * number that was last read without error. If ERR=1 in the response to + * the static bucket read, BKT_NUM and NUM_ENTRIES are set to 0. The + * static bucket is number 0, BKT_NUM increments for each new bucket in + * the chain, and saturates at 255. Therefore, if the value is 255, + * BKT_NUM may or may not be accurate. In this case, though, NUM_ENTRIES + * will still be the correct value as described above for the bucket. + */ + CFA_BLD_MPC_EM_INSERT_CMP_BKT_NUM_FLD = 13, + /* See BKT_NUM description. */ + CFA_BLD_MPC_EM_INSERT_CMP_NUM_ENTRIES_FLD = 14, + /* + * Specifies if the chain was updated while processing the command: Set + * to 1 when a new bucket is added to the tail of the static bucket + * chain at TABLE_INDEX2. This occurs if and only if the insert requires + * adding a new entry and the tail bucket is full. If set to 0, + * TABLE_INDEX2 was not used and is therefore still free. + */ + CFA_BLD_MPC_EM_INSERT_CMP_CHAIN_UPD_FLD = 15, + /* + * Set to 1 if a matching entry was found and REPLACE=1 in command. In + * the case, the entry starting at TABLE_INDEX4 was replaced and can + * therefore be de-allocated. Otherwise, this flag is set to 0. + */ + CFA_BLD_MPC_EM_INSERT_CMP_REPLACED_ENTRY_FLD = 16, + CFA_BLD_MPC_EM_INSERT_CMP_MAX_FLD = 17, +}; + +/** + * Field IDS for EM_DELETE_CMP: OK status indicates that an ENTRY_PTR + * matching TABLE_INDEX was found in the static bucket chain specified + * and was therefore deleted. EM_MISS status indicates that no match was + * found. TABLE_INDEX is from the command. It is the index of the entry + * to delete. TABLE_INDEX2 is from the command. It is the static bucket + * address. TABLE_INDEX3 is the index of the tail bucket of the static + * bucket chain prior to processing the command. TABLE_INDEX4 is the + * index of the tail bucket of the static bucket chain after processing + * the command. If CHAIN_UPD=1 and TABLE_INDEX4==TABLE_INDEX2, the + * static bucket was the tail bucket, it became empty after the delete, + * the scope is a locked scope, and CHAIN_PTR was 0. In this case, the + * static bucket has been evicted from the cache. Otherwise, if + * CHAIN_UPD=1, the original tail bucket given by TABLE_INDEX3 was + * removed from the chain because it went empty. It can therefore be de- + * allocated. + */ +enum cfa_bld_mpc_em_delete_cmp_fields { + /* + * This field indicates the exact type of the completion. By convention, + * the LSB identifies the length of the record in 16B units. Even values + * indicate 16B records. Odd values indicate 32B records **(EXCEPT + * no_op!!!!)** . + */ + CFA_BLD_MPC_EM_DELETE_CMP_TYPE_FLD = 0, + /* The command processing status. */ + CFA_BLD_MPC_EM_DELETE_CMP_STATUS_FLD = 1, + /* + * This field represents the Mid-Path client that generated the + * completion. + */ + CFA_BLD_MPC_EM_DELETE_CMP_MP_CLIENT_FLD = 2, + /* OPCODE from the command. */ + CFA_BLD_MPC_EM_DELETE_CMP_OPCODE_FLD = 3, + /* + * This is a copy of the opaque field from the mid path BD of this + * command. + */ + CFA_BLD_MPC_EM_DELETE_CMP_OPAQUE_FLD = 4, + /* + * This value is written by the NIC such that it will be different for + * each pass through the completion queue. The even passes will write 1. + * The odd passes will write 0. + */ + CFA_BLD_MPC_EM_DELETE_CMP_V1_FLD = 5, + /* + * For EM_SEARCH and EM_INSERT commands without errors that abort the + * command processing prior to the hash computation, set to HASH[35:24] + * of the hash computed from the exact match entry key in the command. + * For all other cases, set to 0 except for the following error + * conditions, which carry debug information in this field as shown by + * error status below: * FMT_ERR: - Set to {7'd0, HOST_ADDRESS[1:0], + * DATA_SIZE[2:0]}. - If HOST_ADDRESS or DATA_SIZE field not present + * they are set to 0. * SCOPE_ERR: - Set to {1'b0, SVIF[10:0]}. * + * ADDR_ERR: - Only possible when TABLE_TYPE=EM or for EM* commands - + * Set to {1'b0, TABLE_INDEX[2:0], 5'd0, DATA_SIZE[2:0]} - + * TABLE_INDEX[2]=1 if TABLE_INDEX3 had an error - TABLE_INDEX[1]=1 if + * TABLE_INDEX2 had an error - TABLE_INDEX[0]=1 if TABLE_INDEX had an + * error - TABLE_INDEX[n]=0 if the completion does not have the + * corresponding TABLE_INDEX field above. * CACHE_ERR: - Set to {9'd0, + * DATA_SIZE[2:0]} + */ + CFA_BLD_MPC_EM_DELETE_CMP_HASH_MSB_FLD = 6, + /* TABLE_SCOPE from the command. */ + CFA_BLD_MPC_EM_DELETE_CMP_TABLE_SCOPE_FLD = 7, + /* + * A 32B index into the EM table identified by TABLE_SCOPE. TABLE_INDEX + * from the command, which is the index of the entry to delete. + */ + CFA_BLD_MPC_EM_DELETE_CMP_TABLE_INDEX_FLD = 8, + /* + * A 32B index into the EM table identified by TABLE_SCOPE. TABLE_INDEX2 + * from the command. + */ + CFA_BLD_MPC_EM_DELETE_CMP_TABLE_INDEX2_FLD = 9, + /* + * A 32B index into the EM table identified by TABLE_SCOPE. For OK or + * EM_MISS status, the index of the tail bucket of the chain prior to + * processing the command. If CHAIN_UPD=1, the bucket was removed and + * this index can be de-allocated. For other status values, it is set to + * 0. + */ + CFA_BLD_MPC_EM_DELETE_CMP_TABLE_INDEX3_FLD = 10, + /* + * This value is written by the NIC such that it will be different for + * each pass through the completion queue. The even passes will write 1. + * The odd passes will write 0. + */ + CFA_BLD_MPC_EM_DELETE_CMP_V2_FLD = 11, + /* + * A 32B index into the EM table identified by TABLE_SCOPE. For OK or + * EM_MISS status, the index of the tail bucket of the chain prior to + * after the command. If CHAIN_UPD=0 (always for EM_MISS status), it is + * always equal to TABLE_INDEX3 as the chain was not updated. For other + * status values, it is set to 0. + */ + CFA_BLD_MPC_EM_DELETE_CMP_TABLE_INDEX4_FLD = 12, + /* + * BKT_NUM is the bucket number in chain of the tail bucket after + * finishing processing the command, except when the command stops + * processing before the tail bucket. NUM_ENTRIES is the number of valid + * entries in the BKT_NUM bucket. The following describes the cases + * where BKT_NUM and NUM_ENTRIES are not for the tail bucket after + * finishing processing of the command: * For UNSPRT_ERR, FMT_ERR, + * SCOPE_ERR, or ADDR_ERR completion status, BKT_NUM will be set to 0. * + * For CACHE_ERR completion status, BKT_NUM will be set to the bucket + * number that was last read without error. If ERR=1 in the response to + * the static bucket read, BKT_NUM and NUM_ENTRIES are set to 0. The + * static bucket is number 0, BKT_NUM increments for each new bucket in + * the chain, and saturates at 255. Therefore, if the value is 255, + * BKT_NUM may or may not be accurate. In this case, though, NUM_ENTRIES + * will still be the correct value as described above for the bucket. + */ + CFA_BLD_MPC_EM_DELETE_CMP_BKT_NUM_FLD = 13, + /* See BKT_NUM description. */ + CFA_BLD_MPC_EM_DELETE_CMP_NUM_ENTRIES_FLD = 14, + /* + * Specifies if the chain was updated while processing the command: Set + * to 1 when a bucket is removed from the static bucket chain. This + * occurs if after the delete, the tail bucket is a dynamic bucket and + * no longer has any valid entries. In this case, software should de- + * allocate the dynamic bucket at TABLE_INDEX3. It is also set to 1 when + * the static bucket is evicted, which only occurs for locked scopes. + * See the EM_DELETE command description for details. + */ + CFA_BLD_MPC_EM_DELETE_CMP_CHAIN_UPD_FLD = 15, + CFA_BLD_MPC_EM_DELETE_CMP_MAX_FLD = 16, +}; + +/** + * Field IDS for EM_CHAIN_CMP: OK status indicates that the CHAIN_PTR of + * the tail bucket was successfully updated. TABLE_INDEX is from the + * command. It is the value of the new CHAIN_PTR. TABLE_INDEX2 is from + * the command. TABLE_INDEX3 is the index of the tail bucket of the + * static bucket chain. + */ +enum cfa_bld_mpc_em_chain_cmp_fields { + /* + * This field indicates the exact type of the completion. By convention, + * the LSB identifies the length of the record in 16B units. Even values + * indicate 16B records. Odd values indicate 32B records **(EXCEPT + * no_op!!!!)** . + */ + CFA_BLD_MPC_EM_CHAIN_CMP_TYPE_FLD = 0, + /* The command processing status. */ + CFA_BLD_MPC_EM_CHAIN_CMP_STATUS_FLD = 1, + /* + * This field represents the Mid-Path client that generated the + * completion. + */ + CFA_BLD_MPC_EM_CHAIN_CMP_MP_CLIENT_FLD = 2, + /* OPCODE from the command. */ + CFA_BLD_MPC_EM_CHAIN_CMP_OPCODE_FLD = 3, + /* + * This is a copy of the opaque field from the mid path BD of this + * command. + */ + CFA_BLD_MPC_EM_CHAIN_CMP_OPAQUE_FLD = 4, + /* + * This value is written by the NIC such that it will be different for + * each pass through the completion queue. The even passes will write 1. + * The odd passes will write 0. + */ + CFA_BLD_MPC_EM_CHAIN_CMP_V1_FLD = 5, + /* + * For EM_SEARCH and EM_INSERT commands without errors that abort the + * command processing prior to the hash computation, set to HASH[35:24] + * of the hash computed from the exact match entry key in the command. + * For all other cases, set to 0 except for the following error + * conditions, which carry debug information in this field as shown by + * error status below: * FMT_ERR: - Set to {7'd0, HOST_ADDRESS[1:0], + * DATA_SIZE[2:0]}. - If HOST_ADDRESS or DATA_SIZE field not present + * they are set to 0. * SCOPE_ERR: - Set to {1'b0, SVIF[10:0]}. * + * ADDR_ERR: - Only possible when TABLE_TYPE=EM or for EM* commands - + * Set to {1'b0, TABLE_INDEX[2:0], 5'd0, DATA_SIZE[2:0]} - + * TABLE_INDEX[2]=1 if TABLE_INDEX3 had an error - TABLE_INDEX[1]=1 if + * TABLE_INDEX2 had an error - TABLE_INDEX[0]=1 if TABLE_INDEX had an + * error - TABLE_INDEX[n]=0 if the completion does not have the + * corresponding TABLE_INDEX field above. * CACHE_ERR: - Set to {9'd0, + * DATA_SIZE[2:0]} + */ + CFA_BLD_MPC_EM_CHAIN_CMP_HASH_MSB_FLD = 6, + /* TABLE_SCOPE from the command. */ + CFA_BLD_MPC_EM_CHAIN_CMP_TABLE_SCOPE_FLD = 7, + /* + * A 32B index into the EM table identified by TABLE_SCOPE. TABLE_INDEX + * from the command, which is the new CHAIN_PTR for the tail bucket of + * the static bucket chain. + */ + CFA_BLD_MPC_EM_CHAIN_CMP_TABLE_INDEX_FLD = 8, + /* + * A 32B index into the EM table identified by TABLE_SCOPE. TABLE_INDEX2 + * from the command. + */ + CFA_BLD_MPC_EM_CHAIN_CMP_TABLE_INDEX2_FLD = 9, + /* + * A 32B index into the EM table identified by TABLE_SCOPE. For OK + * status, the index of the tail bucket of the chain. Otherwise, set to + * 0. + */ + CFA_BLD_MPC_EM_CHAIN_CMP_TABLE_INDEX3_FLD = 10, + /* + * This value is written by the NIC such that it will be different for + * each pass through the completion queue. The even passes will write 1. + * The odd passes will write 0. + */ + CFA_BLD_MPC_EM_CHAIN_CMP_V2_FLD = 11, + /* + * BKT_NUM is the bucket number in chain of the tail bucket after + * finishing processing the command, except when the command stops + * processing before the tail bucket. NUM_ENTRIES is the number of valid + * entries in the BKT_NUM bucket. The following describes the cases + * where BKT_NUM and NUM_ENTRIES are not for the tail bucket after + * finishing processing of the command: * For UNSPRT_ERR, FMT_ERR, + * SCOPE_ERR, or ADDR_ERR completion status, BKT_NUM will be set to 0. * + * For CACHE_ERR completion status, BKT_NUM will be set to the bucket + * number that was last read without error. If ERR=1 in the response to + * the static bucket read, BKT_NUM and NUM_ENTRIES are set to 0. The + * static bucket is number 0, BKT_NUM increments for each new bucket in + * the chain, and saturates at 255. Therefore, if the value is 255, + * BKT_NUM may or may not be accurate. In this case, though, NUM_ENTRIES + * will still be the correct value as described above for the bucket. + */ + CFA_BLD_MPC_EM_CHAIN_CMP_BKT_NUM_FLD = 12, + /* See BKT_NUM description. */ + CFA_BLD_MPC_EM_CHAIN_CMP_NUM_ENTRIES_FLD = 13, + /* + * Set to 1 when the scope is a locked scope, the tail bucket is the + * static bucket, the bucket is empty (all of its ENTRY_PTR values are + * 0), and TABLE_INDEX=0 in the command. In this case, the static bucket + * is evicted. For all other cases, it is set to 0. + */ + CFA_BLD_MPC_EM_CHAIN_CMP_CHAIN_UPD_FLD = 14, + CFA_BLD_MPC_EM_CHAIN_CMP_MAX_FLD = 15, +}; + +#endif /* _CFA_BLD_MPC_FIELD_IDS_H_ */ diff --git a/drivers/thirdparty/release-drivers/bnxt/hcapi/cfa_v3/mpc/include/cfa_bld_mpcops.h b/drivers/thirdparty/release-drivers/bnxt/hcapi/cfa_v3/mpc/include/cfa_bld_mpcops.h new file mode 100644 index 000000000000..b00f1d6b51a3 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/hcapi/cfa_v3/mpc/include/cfa_bld_mpcops.h @@ -0,0 +1,589 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019-2023 Broadcom + * All rights reserved. + */ + +#ifndef _CFA_BLD_MPCOPS_H_ +#define _CFA_BLD_MPCOPS_H_ + +#include "cfa_types.h" + +/** + * CFA HW data object definition + */ +struct cfa_mpc_data_obj { + /** [in] MPC field identifier */ + u16 field_id; + /** [in] Value of the HW field */ + u64 val; +}; + +struct cfa_bld_mpcops; + +/** + * @addtogroup CFA_BLD CFA Builder Library + * \ingroup CFA_V3 + * @{ + */ + +/** + * CFA MPC ops interface + */ +struct cfa_bld_mpcinfo { + /** [out] CFA MPC Builder operations function pointer table */ + const struct cfa_bld_mpcops *mpcops; +}; + +/** + * @name CFA_BLD_MPC CFA Builder Host MPC OPS API + * CFA builder host specific API used by host CFA application to bind + * to different CFA devices and access device by using MPC OPS. + */ + +/**@{*/ +/** CFA builder MPC bind API + * + * This API retrieves the CFA global MPC configuration. + * + * @param[in] hw_ver + * hardware version of the CFA + * + * @param[out] mpc_info + * CFA MPC interface + * + * @return + * 0 for SUCCESS, negative value for FAILURE + */ +int cfa_bld_mpc_bind(enum cfa_ver hw_ver, struct cfa_bld_mpcinfo *mpc_info); + +/** CFA device specific function hooks for CFA MPC command composition + * and response parsing + * + * The following device hooks can be defined; unless noted otherwise, they are + * optional and can be filled with a null pointer. The pupose of these hooks + * to support CFA device operations for different device variants. + */ +struct cfa_bld_mpcops { + /** Build MPC Cache read command + * + * This API composes the MPC cache read command given the list + * of read parameters specified as an array of cfa_mpc_data_obj objects. + * + * @param[in] cmd + * MPC command buffer to compose the cache read command into. + * + * @param[in,out] cmd_buff_len + * Pointer to command buffer length variable. The caller sets this + * to the size of the 'cmd' buffer in byes. The api updates this to + * the actual size of the composed command. If the buffer length + * passed is not large enough to hold the composed command, an error + * is returned by the api. + * + * @param[in] fields + * Array of CFA data objects indexed by CFA_BLD_MPC_READ_CMD_XXX_FLD + * enum values. The size of this array shall be + * CFA_BLD_MPC_READ_CMD_MAX_FLD. If the caller intends to set a + * specific field in the MPC command, the caller should set the + * field_id in cfa_mpc_data_obj to the array index itself (See example + * below). Otherwise set the field_id to INVALID_U16. If the caller + * sets the field_id for a field that is not valid for the device + * an error is returned. + * + * To set the table type to EM: + * fields[CFA_BLD_MPC_READ_CMD_TABLE_TYPE_FLD].field_id = + * CFA_BLD_MPC_READ_CMD_TABLE_TYPE_FLD; + * fields[CFA_BLD_MPC_READ_CMD_TABLE_TYPE_FLD].val = + * CFA_HW_TABLE_LOOKUP; + * + * @return + * 0 for SUCCESS, negative errno for FAILURE + * + */ + int (*cfa_bld_mpc_build_cache_read)(u8 *cmd, + u32 *cmd_buff_len, + struct cfa_mpc_data_obj *fields); + + /** Build MPC Cache Write command + * + * This API composes the MPC cache write command given the list + * of write parameters specified as an array of cfa_mpc_data_obj + * objects. + * + * @param[in] cmd + * MPC command buffer to compose the cache write command into. + * + * @param[in,out] cmd_buff_len + * Pointer to command buffer length variable. The caller sets this + * to the size of the 'cmd' buffer in byes. The api updates this to + * the actual size of the composed command. If the buffer length + * passed is not large enough to hold the composed command, an error + * is returned by the api. + * + * @param[in] data + * Pointer to the data to be written. Note that this data is just + * copied at the right offset into the command buffer. The actual MPC + * write happens when the command is issued over the MPC interface. + * + * @param[in] fields + * Array of CFA data objects indexed by CFA_BLD_MPC_WRITE_CMD_XXX_FLD + * enum values. The size of this array shall be + * CFA_BLD_MPC_WRITE_CMD_MAX_FLD. If the caller intends to set a + * specific field in the MPC command, the caller should set the + * field_id in cfa_mpc_data_obj to the array index itself. Otherwise + * set the field_id to INVALID_U16. If the caller sets the field_id for + * a field that is not valid for the device an error is returned. + * + * @return + * 0 for SUCCESS, negative errno for FAILURE + * + */ + int (*cfa_bld_mpc_build_cache_write)(u8 *cmd, + u32 *cmd_buff_len, + const u8 *data, + struct cfa_mpc_data_obj *fields); + + /** Build MPC Cache Invalidate (Evict) command + * + * This API composes the MPC cache evict command given the list + * of evict parameters specified as an array of cfa_mpc_data_obj + * objects. + * + * @param[in] cmd + * MPC command buffer to compose the cache evict command into. + * + * @param[in,out] cmd_buff_len + * Pointer to command buffer length variable. The caller sets this + * to the size of the 'cmd' buffer in byes. The api updates this to + * the actual size of the composed command. If the buffer length + * passed is not large enough to hold the composed command, an error + * is returned by the api. + * + * @param[in] fields + * Array of cfa_mpc_data_obj indexed by + * CFA_BLD_MPC_INVALIDATE_CMD_XXX_FLD enum values. The size of this + * array shall be CFA_BLD_MPC_INVALIDATE_CMD_MAX_FLD. If the caller + * intends to set a specific field in the MPC command, the caller + * should set the field_id in cfa_mpc_data_obj to the array index + * itself. Otherwise set the field_id to INVALID_U16. If the caller + * sets the field_id for a field that is not valid for the device an + * error is returned. + * + * @return + * 0 for SUCCESS, negative errno for FAILURE + * + */ + int (*cfa_bld_mpc_build_cache_evict)(u8 *cmd, + u32 *cmd_buff_len, + struct cfa_mpc_data_obj *fields); + + /** Build MPC Cache read and clear command + * + * This API composes the MPC cache read-n-clear command given the list + * of read parameters specified as an array of cfa_mpc_data_obj objects. + * + * @param[in] cmd + * MPC command buffer to compose the cache read-n-clear command into. + * + * @param[in,out] cmd_buff_len + * Pointer to command buffer length variable. The caller sets this + * to the size of the 'cmd' buffer in byes. The api updates this to + * the actual size of the composed command. If the buffer length + * passed is not large enough to hold the composed command, an error + * is returned by the api. + * + * @param[in] fields + * Array of cfa_mpc_data_obj indexed by + * CFA_BLD_MPC_READ_CLR_CMD_XXX_FLD enum values. The size of this + * array shall be CFA_BLD_MPC_READ_CLR_CMD_MAX_FLD. If the caller + * intends to set a specific field in the MPC command, the caller + * should set the field_id in cfa_mpc_data_obj to the array index + * itself. Otherwise set the field_id to INVALID_U16. If the caller + * sets the field_id for a field that is not valid for the device + * an error is returned. + * + * @return + * 0 for SUCCESS, negative errno for FAILURE + * + */ + int (*cfa_bld_mpc_build_cache_read_clr)(u8 *cmd, + u32 *cmd_buff_len, + struct cfa_mpc_data_obj *fields); + + /** Build MPC EM search command + * + * This API composes the MPC EM search command given the list + * of EM search parameters specified as an array of cfa_mpc_data_obj + * objects + * + * @param[in] cmd + * MPC command buffer to compose the EM search command into. + * + * @param[in,out] cmd_buff_len + * Pointer to command buffer length variable. The caller sets this + * to the size of the 'cmd' buffer in byes. The api updates this to + * the actual size of the composed command. If the buffer length + * passed is not large enough to hold the composed command, an error + * is returned by the api. + * + * @param[in] em_entry + * Pointer to the em_entry to be searched. + * + * @param[in] fields + * Array of cfa_mpc_data_obj indexed by + * CFA_BLD_MPC_EM_SEARCH_CMD_XXX_FLD enum values. The size of this + * array shall be CFA_BLD_MPC_EM_SEARCH_CMD_MAX_FLD. If the caller + * intends to set a specific field in the MPC command, the caller + * should set the field_id in cfa_mpc_data_obj to the array index + * itself. Otherwise set the field_id to INVALID_U16. If the caller + * sets the field_id for a field that is not valid for the device an + * error is returned. + * + * @return + * 0 for SUCCESS, negative errno for FAILURE + * + */ + int (*cfa_bld_mpc_build_em_search)(u8 *cmd, u32 *cmd_buff_len, + u8 *em_entry, + struct cfa_mpc_data_obj *fields); + + /** Build MPC EM insert command + * + * This API composes the MPC EM insert command given the list + * of EM insert parameters specified as an array of cfa_mpc_data_obj objects + * + * @param[in] cmd + * MPC command buffer to compose the EM insert command into. + * + * @param[in,out] cmd_buff_len + * Pointer to command buffer length variable. The caller sets this + * to the size of the 'cmd' buffer in bytes. The api updates this to + * the actual size of the composed command. If the buffer length + * passed is not large enough to hold the composed command, an error + * is returned by the api. + * + * @param[in] em_entry + * Pointer to the em_entry to be inserted. + * + * @param[in] fields + * Array of cfa_mpc_data_obj indexed by CFA_BLD_MPC_EM_INSERT_CMD_XXX_FLD + * enum values. The size of this array shall be + * CFA_BLD_MPC_EM_INSERT_CMD_MAX_FLD. If the caller intends to set a + * specific field in the MPC command, the caller should set the + * field_id in cfa_mpc_data_obj to the array index itself. Otherwise set + * the field_id to INVALID_U16. If the caller sets the field_id for a + * field that is not valid for the device an error is returned. + * + * @return + * 0 for SUCCESS, negative errno for FAILURE + * + */ + int (*cfa_bld_mpc_build_em_insert)(u8 *cmd, u32 *cmd_buff_len, + const u8 *em_entry, + struct cfa_mpc_data_obj *fields); + + /** Build MPC EM delete command + * + * This API composes the MPC EM delete command given the list + * of EM delete parameters specified as an array of cfa_mpc_data_obj objects + * + * @param[in] cmd + * MPC command buffer to compose the EM delete command into. + * + * @param[in,out] cmd_buff_len + * Pointer to command buffer length variable. The caller sets this + * to the size of the 'cmd' buffer in byes. The api updates this to + * the actual size of the composed command. If the buffer length + * passed is not large enough to hold the composed command, an error + * is returned by the api. + * + * @param[in] fields + * Array of cfa_mpc_data_obj indexed by CFA_BLD_MPC_EM_DELETE_CMD_XXX_FLD + * enum values. The size of this array shall be + * CFA_BLD_MPC_EM_DELETE_CMD_MAX_FLD. If the caller intends to set a + * specific field in the MPC command, the caller should set the + * field_id in cfa_mpc_data_obj to the array index itself. Otherwise set + * the field_id to INVALID_U16. If the caller sets the field_id for a + * field that is not valid for the device an error is returned. + * + * @return + * 0 for SUCCESS, negative errno for FAILURE + * + */ + int (*cfa_bld_mpc_build_em_delete)(u8 *cmd, u32 *cmd_buff_len, + struct cfa_mpc_data_obj *fields); + + /** Build MPC EM chain command + * + * This API composes the MPC EM chain command given the list + * of EM chain parameters specified as an array of cfa_mpc_data_obj objects + * + * @param[in] cmd + * MPC command buffer to compose the EM chain command into. + * + * @param[in,out] cmd_buff_len + * Pointer to command buffer length variable. The caller sets this + * to the size of the 'cmd' buffer in byes. The api updates this to + * the actual size of the composed command. If the buffer length + * passed is not large enough to hold the composed command, an error + * is returned by the api. + * + * @param[in] fields + * Array of cfa_mpc_data_obj indexed by CFA_BLD_MPC_EM_CHAIN_CMD_XXX_FLD + * enum values. The size of this array shall be + * CFA_BLD_MPC_EM_CHAIN_CMD_MAX_FLD. If the caller intends to set a + * specific field in the MPC command, the caller should set the + * field_id in cfa_mpc_data_obj to the array index itself. Otherwise set + * the field_id to INVALID_U16. If the caller sets the field_id for a + * field that is not valid for the device an error is returned. + * + * @return + * 0 for SUCCESS, negative errno for FAILURE + * + */ + int (*cfa_bld_mpc_build_em_chain)(u8 *cmd, u32 *cmd_buff_len, + struct cfa_mpc_data_obj *fields); + + /** Parse MPC Cache read response + * + * This API parses the MPC cache read response message and returns + * the read parameters as an array of cfa_mpc_data_obj objects. + * + * @param[in] resp + * MPC response buffer containing the cache read response. + * + * @param[in] resp_buff_len + * Response buffer length in bytes + * + * @param[in] rd_data + * Buffer to copy the MPC read data into + * + * @param[in] rd_data_len + * Size of the rd_data buffer in bytes + * + * @param[out] fields + * Array of CFA data objects indexed by CFA_BLD_MPC_READ_CMP_XXX_FLD + * enum values. The size of this array shall be + * CFA_BLD_MPC_READ_CMP_MAX_FLD. If the caller intends to retrieve a + * specific field in the MPC response, the caller should set the + * field_id in cfa_mpc_data_obj to the array index itself. Otherwise set + * the field_id to INVALID_U16. If the caller sets the field_id for a + * field that is not valid for the device an error is returned. + * + * @return + * 0 for SUCCESS, negative errno for FAILURE + * + */ + int (*cfa_bld_mpc_parse_cache_read)(u8 *resp, + u32 resp_buff_len, + u8 *rd_data, + u32 rd_data_len, + struct cfa_mpc_data_obj *fields); + + /** Parse MPC Cache Write response + * + * This API parses the MPC cache write response message and returns + * the write response fields as an array of cfa_mpc_data_obj objects. + * + * @param[in] resp + * MPC response buffer containing the cache write response. + * + * @param[in] resp_buff_len + * Response buffer length in bytes + * + * @param[out] fields + * Array of CFA data objects indexed by CFA_BLD_MPC_WRITE_CMP_XXX_FLD + * enum values. The size of this array shall be + * CFA_BLD_MPC_WRITE_CMP_MAX_FLD. If the caller intends to retrieve a + * specific field in the MPC response, the caller should set the + * field_id in cfa_mpc_data_obj to the array index itself. Otherwise set + * the field_id to INVALID_U16. If the caller sets the field_id for a + * field that is not valid for the device an error is returned. + * + * @return + * 0 for SUCCESS, negative errno for FAILURE + * + */ + int (*cfa_bld_mpc_parse_cache_write)(u8 *resp, + u32 resp_buff_len, + struct cfa_mpc_data_obj *fields); + + /** Parse MPC Cache Invalidate (Evict) response + * + * This API parses the MPC cache evict response message and returns + * the evict response fields as an array of cfa_mpc_data_obj objects. + * + * @param[in] resp + * MPC response buffer containing the cache evict response. + * + * @param[in] resp_buff_len + * Response buffer length in bytes + * + * @param[out] fields + * Array of cfa_mpc_data_obj indexed by CFA_BLD_MPC_INVALIDATE_CMP_XXX_FLD + * enum values. The size of this array shall be + * CFA_BLD_MPC_INVALIDATE_CMP_MAX_FLD. If the caller intends to get a + * specific field in the MPC response, the caller should set the + * field_id in cfa_mpc_data_obj to the array index itself. Otherwise set + * the field_id to INVALID_U16. If the caller sets the field_id for a + * field that is not valid for the device an error is returned. + * + * @return + * 0 for SUCCESS, negative errno for FAILURE + * + */ + int (*cfa_bld_mpc_parse_cache_evict)(u8 *resp, + u32 resp_buff_len, + struct cfa_mpc_data_obj *fields); + + /* clang-format off */ + /** Parse MPC Cache read and clear response + * + * This API parses the MPC cache read-n-clear response message and + * returns the read response fields as an array of cfa_mpc_data_obj objects. + * + * @param[in] resp + * MPC response buffer containing the cache read-n-clear response. + * + * @param[in] resp_buff_len + * Response buffer length in bytes + * + * @param[in] rd_data + * Buffer to copy the MPC read data into + * + * @param[in] rd_data_len + * Size of the rd_data buffer in bytes + * + * @param[out] fields + * Array of cfa_mpc_data_obj indexed by CFA_BLD_MPC_READ_CLR_CMP_XXX_FLD + * enum values. The size of this array shall be + * CFA_BLD_MPC_READ_CLR_CMP_MAX_FLD. If the caller intends to get a + * specific field in the MPC response, the caller should set the + * field_id in cfa_mpc_data_obj to the array index itself. Otherwise set + * the field_id to INVALID_U16. If the caller sets the field_id for a + * field that is not valid for the device an error is returned. + * + * @return + * 0 for SUCCESS, negative errno for FAILURE + * + */ + int (*cfa_bld_mpc_parse_cache_read_clr)(u8 *resp, + u32 resp_buff_len, u8 *rd_data, u32 rd_data_len, + struct cfa_mpc_data_obj *fields); + + /* clang-format on */ + /** Parse MPC EM search response + * + * This API parses the MPC EM search response message and returns + * the EM search response fields as an array of cfa_mpc_data_obj objects + * + * @param[in] resp + * MPC response buffer containing the EM search response. + * + * @param[in] resp_buff_len + * Response buffer length in bytes + * + * @param[out] fields + * Array of cfa_mpc_data_obj indexed by CFA_BLD_MPC_EM_SEARCH_CMP_XXX_FLD + * enum values. The size of this array shall be + * CFA_BLD_MPC_EM_SEARCH_CMP_MAX_FLD. If the caller intends to get a + * specific field in the MPC response, the caller should set the + * field_id in cfa_mpc_data_obj to the array index itself. Otherwise set + * the field_id to INVALID_U16. If the caller sets the field_id for a + * field that is not valid for the device an error is returned. + * + * @return + * 0 for SUCCESS, negative errno for FAILURE + * + */ + int (*cfa_bld_mpc_parse_em_search)(u8 *resp, + u32 resp_buff_len, + struct cfa_mpc_data_obj *fields); + + /** Parse MPC EM insert response + * + * This API parses the MPC EM insert response message and returns + * the EM insert response fields as an array of cfa_mpc_data_obj objects + * + * @param[in] resp + * MPC response buffer containing the EM insert response. + * + * @param[in] resp_buff_len + * Response buffer length in bytes + * + * @param[out] fields + * Array of cfa_mpc_data_obj indexed by CFA_BLD_MPC_EM_INSERT_CMP_XXX_FLD + * enum values. The size of this array shall be + * CFA_BLD_MPC_EM_INSERT_CMP_MAX_FLD. If the caller intends to get a + * specific field in the MPC response, the caller should set the + * field_id in cfa_mpc_data_obj to the array index itself. Otherwise set + * the field_id to INVALID_U16. If the caller sets the field_id for a + * field that is not valid for the device an error is returned. + * + * @return + * 0 for SUCCESS, negative errno for FAILURE + * + */ + int (*cfa_bld_mpc_parse_em_insert)(u8 *resp, + u32 resp_buff_len, + struct cfa_mpc_data_obj *fields); + + /** Parse MPC EM delete response + * + * This API parses the MPC EM delete response message and returns + * the EM delete response fields as an array of cfa_mpc_data_obj objects + * + * @param[in] resp + * MPC response buffer containing the EM delete response. + * + * @param[in] resp_buff_len + * Response buffer length in bytes + * + * @param[out] fields + * Array of cfa_mpc_data_obj indexed by CFA_BLD_MPC_EM_DELETE_CMP_XXX_FLD + * enum values. The size of this array shall be + * CFA_BLD_MPC_EM_DELETE_CMP_MAX_FLD. If the caller intends to get a + * specific field in the MPC response, the caller should set the + * field_id in cfa_mpc_data_obj to the array index itself. Otherwise set + * the field_id to INVALID_U16. If the caller sets the field_id for a + * field that is not valid for the device an error is returned. + * + * @return + * 0 for SUCCESS, negative errno for FAILURE + * + */ + int (*cfa_bld_mpc_parse_em_delete)(u8 *resp, + u32 resp_buff_len, + struct cfa_mpc_data_obj *fields); + + /** Parse MPC EM chain response + * + * This API parses the MPC EM chain response message and returns + * the EM chain response fields as an array of cfa_mpc_data_obj objects + * + * @param[in] resp + * MPC response buffer containing the EM chain response. + * + * @param[in] resp_buff_len + * Response buffer length in bytes + * + * @param[out] fields + * Array of cfa_mpc_data_obj indexed by CFA_BLD_MPC_EM_CHAIN_CMP_XXX_FLD + * enum values. The size of this array shall be + * CFA_BLD_MPC_EM_CHAIN_CMP_MAX_FLD. If the caller intends to get a + * specific field in the MPC response, the caller should set the + * field_id in cfa_mpc_data_obj to the array index itself. Otherwise set + * the field_id to INVALID_U16. If the caller sets the field_id for a + * field that is not valid for the device an error is returned. + * + * @return + * 0 for SUCCESS, negative errno for FAILURE + * + */ + int (*cfa_bld_mpc_parse_em_chain)(u8 *resp, u32 resp_buff_len, + struct cfa_mpc_data_obj *fields); +}; + +/**@}*/ + +/**@}*/ +#endif /* _CFA_BLD_DEVOPS_H_ */ diff --git a/drivers/thirdparty/release-drivers/bnxt/hcapi/cfa_v3/mpc/include/cfa_bld_p70_host_mpc_wrapper.h b/drivers/thirdparty/release-drivers/bnxt/hcapi/cfa_v3/mpc/include/cfa_bld_p70_host_mpc_wrapper.h new file mode 100644 index 000000000000..8c4bb713f492 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/hcapi/cfa_v3/mpc/include/cfa_bld_p70_host_mpc_wrapper.h @@ -0,0 +1,75 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019-2023 Broadcom + * All rights reserved. + */ + +#ifndef _CFA_BLD_P70_HOST_MPC_WRAPPER_H_ +#define _CFA_BLD_P70_HOST_MPC_WRAPPER_H_ + +#include "cfa_bld_mpcops.h" +/** + * MPC Cache operation command build apis + */ +int cfa_bld_p70_mpc_build_cache_read(u8 *cmd, u32 *cmd_buff_len, + struct cfa_mpc_data_obj *fields); + +int cfa_bld_p70_mpc_build_cache_write(u8 *cmd, u32 *cmd_buff_len, + const u8 *data, + struct cfa_mpc_data_obj *fields); + +int cfa_bld_p70_mpc_build_cache_evict(u8 *cmd, u32 *cmd_buff_len, + struct cfa_mpc_data_obj *fields); + +int cfa_bld_p70_mpc_build_cache_rdclr(u8 *cmd, u32 *cmd_buff_len, + struct cfa_mpc_data_obj *fields); + +/** + * MPC EM operation command build apis + */ +int cfa_bld_p70_mpc_build_em_search(u8 *cmd, u32 *cmd_buff_len, + u8 *em_entry, + struct cfa_mpc_data_obj *fields); + +int cfa_bld_p70_mpc_build_em_insert(u8 *cmd, u32 *cmd_buff_len, + const u8 *em_entry, + struct cfa_mpc_data_obj *fields); + +int cfa_bld_p70_mpc_build_em_delete(u8 *cmd, u32 *cmd_buff_len, + struct cfa_mpc_data_obj *fields); + +int cfa_bld_p70_mpc_build_em_chain(u8 *cmd, u32 *cmd_buff_len, + struct cfa_mpc_data_obj *fields); + +/** + * MPC Cache operation completion parse apis + */ +int cfa_bld_p70_mpc_parse_cache_read(u8 *resp, u32 resp_buff_len, + u8 *rd_data, u32 rd_data_len, + struct cfa_mpc_data_obj *fields); + +int cfa_bld_p70_mpc_parse_cache_write(u8 *resp, u32 resp_buff_len, + struct cfa_mpc_data_obj *fields); + +int cfa_bld_p70_mpc_parse_cache_evict(u8 *resp, u32 resp_buff_len, + struct cfa_mpc_data_obj *fields); + +int cfa_bld_p70_mpc_parse_cache_rdclr(u8 *resp, u32 resp_buff_len, + u8 *rd_data, u32 rd_data_len, + struct cfa_mpc_data_obj *fields); + +/** + * MPC EM operation completion parse apis + */ +int cfa_bld_p70_mpc_parse_em_search(u8 *resp, u32 resp_buff_len, + struct cfa_mpc_data_obj *fields); + +int cfa_bld_p70_mpc_parse_em_insert(u8 *resp, u32 resp_buff_len, + struct cfa_mpc_data_obj *fields); + +int cfa_bld_p70_mpc_parse_em_delete(u8 *resp, u32 resp_buff_len, + struct cfa_mpc_data_obj *fields); + +int cfa_bld_p70_mpc_parse_em_chain(u8 *resp, u32 resp_buff_len, + struct cfa_mpc_data_obj *fields); + +#endif /* _CFA_BLD_P70_HOST_MPC_WRAPPER_H_ */ diff --git a/drivers/thirdparty/release-drivers/bnxt/hcapi/cfa_v3/mpc/include/cfa_bld_p70_mpc.h b/drivers/thirdparty/release-drivers/bnxt/hcapi/cfa_v3/mpc/include/cfa_bld_p70_mpc.h new file mode 100644 index 000000000000..edb99481bb42 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/hcapi/cfa_v3/mpc/include/cfa_bld_p70_mpc.h @@ -0,0 +1,610 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019-2023 Broadcom + * All rights reserved. + */ + +#ifndef _CFA_BLD_P70_MPC_H_ +#define _CFA_BLD_P70_MPC_H_ + +/** + * CFA Mid-Path Command (MPC) opcodes. The MPC CFA operations + * are divided into 2 sub groups. Cache access operations + * and EM update operations. + */ +enum cfa_mpc_opcode { + /** + * MPC Cache access commands + */ + /* MPC Command to read Action/Lookup cache (up to 4 lines) */ + CFA_MPC_READ, + /* MPC Command to write to Action/Lookup cache (up to 4 lines) */ + CFA_MPC_WRITE, + /* MPC Cmd to Read and Clear Action/Lookup cache line (max 1 line) */ + CFA_MPC_READ_CLR, + /* MPC Cmd to Invalidate Action/Lkup cache lines (up to 4 lines) */ + CFA_MPC_INVALIDATE, + + /** + * MPC EM update commands + */ + /** + * MPC Command to search for an EM entry by its key in the + * EM bucket chain + */ + CFA_MPC_EM_SEARCH, + /* MPC command to insert a new EM entry to the EM bucket chain */ + CFA_MPC_EM_INSERT, + /* MPC Command to delete an EM entry from the EM bucket chain */ + CFA_MPC_EM_DELETE, + /* MPC Command to add an EM bucket to the tail of EM bucket chain */ + CFA_MPC_EM_CHAIN, + CFA_MPC_OPC_MAX, +}; + +/** + * CFA MPC Cache access reading mode + */ +enum cfa_mpc_read_mode { + CFA_MPC_RD_NORMAL, /**< Normal read mode */ + CFA_MPC_RD_EVICT, /**< Read the cache and evict the cache line */ + CFA_MPC_RD_DEBUG_LINE, /**< Debug read mode line */ + CFA_MPC_RD_DEBUG_TAG, /**< Debug read mode tag */ + CFA_MPC_RD_MODE_MAX +}; + +/** + * CFA MPC Cache access writing mode + */ +enum cfa_mpc_write_mode { + CFA_MPC_WR_WRITE_THRU, /**< Write to cache in Write through mode */ + CFA_MPC_WR_WRITE_BACK, /**< Write to cache in Write back mode */ + CFA_MPC_WR_MODE_MAX +}; + +/** + * CFA MPC Cache access eviction mode + */ +enum cfa_mpc_evict_mode { + /** + * Line evict: These modes evict a single cache line + * In these modes, the eviction occurs regardless of the cache line + * state (CLEAN/CLEAN_FAST_EVICT/DIRTY) + */ + /* Cache line addressed by set/way is evicted */ + CFA_MPC_EV_EVICT_LINE, + /* Cache line hit with the table scope/address tuple is evicted */ + CFA_MPC_EV_EVICT_SCOPE_ADDRESS, + + /** + * Set Evict: These modes evict cache lines that meet certain criteria + * from the entire cache set. + */ + /* + * Cache lines only in CLEAN state are evicted from the set + * derived from the address + */ + CFA_MPC_EV_EVICT_CLEAN_LINES, + /* + * Cache lines only in CLEAN_FAST_EVICT state are evicted from + * the set derived from the address + */ + CFA_MPC_EV_EVICT_CLEAN_FAST_EVICT_LINES, + /* + * Cache lines in both CLEAN and CLEAN_FAST_EVICT states are + * evicted from the set derived from the address + */ + CFA_MPC_EV_EVICT_CLEAN_AND_CLEAN_FAST_EVICT_LINES, + /* + * All Cache lines in the set identified by the address and + * belonging to the table scope are evicted. + */ + CFA_MPC_EV_EVICT_TABLE_SCOPE, + CFA_MPC_EV_MODE_MAX, +}; + +/** + * CFA Hardware Cache Table Type + */ +enum cfa_hw_table_type { + CFA_HW_TABLE_ACTION, /**< CFA Action Record Table */ + CFA_HW_TABLE_LOOKUP, /**< CFA EM Lookup Record Table */ + CFA_HW_TABLE_MAX +}; + +/** + * MPC Command parameters specific to Cache read operations + */ +struct cfa_mpc_cache_read_params { + /* Specifies the cache option for reading the cache lines */ + enum cfa_mpc_read_mode mode; + /** + * Clear mask to use for the Read-Clear operation + * Each bit in the mask correspond to 2 bytes in the + * cache line. Setting the corresponding mask bit, clears + * the corresponding data bytes in the cache line AFTER + * the read. This field is ignored for Read CMD. + */ + u16 clear_mask; + /** + * External host memory address + * + * The 64-bit IOVA host address to which to write the DMA data returned + * in the completion. The data will be written to the same function as + * the one that owns the queue this command is read from. Address must + * be 4 byte aligned. + */ + u64 host_address; +}; + +/** + * MPC Command parameters specific to Cache write operation + */ +struct cfa_mpc_cache_write_params { + /* Specifies the cache option for the write access */ + enum cfa_mpc_write_mode mode; + /* Pointer to data to be written to cache */ + const u8 *data_ptr; +}; + +/** + * MPC Command parameters specific to Cache evict/invalidate operation + */ +struct cfa_mpc_cache_evict_params { + /* Specifies the cache option for Invalidation operation */ + enum cfa_mpc_evict_mode mode; +}; + +/** + * MPC CFA Command parameters for cache related operations + */ +struct cfa_mpc_cache_axs_params { + /** Common parameters for cache operations */ + /* + * Opaque value that will be returned in the MPC CFA + * Completion message. This can be used by the caller to associate + * completions with commands. + */ + u32 opaque; + /* + * Table Scope to address the cache line. For Thor2 + * the table scope goes for 0 - 31. + */ + u8 tbl_scope; + /* + * Table Index to address the cache line. Note that + * this is the offset to the 32B record in the table + * scope backing store, expressed in 32B units. + */ + u32 tbl_index; + /* + * Number of cache lines (32B word) in the access + * This should be set to 1 for READ-CLEAR command and between 1 and + * 4 for all other cache access commands (READ/WRITE/INVALIDATE) + */ + u8 data_size; + /* CFA table type for which this Host IF hw operation is intended for */ + enum cfa_hw_table_type tbl_type; + + /* Cache operation specific params */ + union { + /** Read and Read clear specific parameters */ + struct cfa_mpc_cache_read_params read; + /** Cache write specific parameters */ + struct cfa_mpc_cache_write_params write; + /** Cache invalidate operation specific parameters */ + struct cfa_mpc_cache_evict_params evict; + }; +}; + +/** + * MPC CFA command parameters specific to EM insert operation + */ +struct cfa_mpc_em_insert_params { + /* + * Pointer to the Exact Match entry to search. The + * EM Key in the entry is used to for the search + */ + const u8 *em_entry; + /* Size of the EM entry in 32B words (1- 4) */ + u8 data_size; + /* Flag to indicate if a matching entry (if found) should be replaced */ + bool replace; + /* Table index to write the EM entry being inserted */ + u32 entry_idx; + /* + * Table index to the EM record that can be used to + * create a new EM bucket, if the insertion results + * in a EM bucket chain's tail update. + */ + u32 bucket_idx; +}; + +/** + * MPC CFA command parameters specific to EM search operation + */ +struct cfa_mpc_em_search_params { + /* + * Pointer to the Exact Match entry to search. The + * EM Key in the entry is used to for the search + */ + u8 *em_entry; + /* Size of the EM entry in 32B words (1- 4) */ + u8 data_size; +}; + +/** + * MPC CFA command parameters specific to EM delete operation + */ +struct cfa_mpc_em_delete_params { + /* Table index to the EM record to delete */ + u32 entry_idx; + /* + * Table index to the static bucket for the EM bucket chain. + * As part of EM Delete processing, the hw walks the EM bucket + * chain to determine if the entry_idx is part of the chain. + * If the entry_idx is found to be a part of the chain, it is + * deleted from the chain and the EM bucket is repacked. If the + * tail of the bucket has only one valid entry, then the delete + * operation results in a tail update and one free EM entry + */ + u32 bucket_idx; +}; + +/** + * MPC CFA command parameters specific to EM chain operation + */ +struct cfa_mpc_em_chain_params { + /* + * Table index that will form the chain + * pointer to the tail bucket in the EM bucket chain + */ + u32 entry_idx; + /* + * Table index to the static bucket for + * EM bucket chain to be updated. + */ + u32 bucket_idx; +}; + +/** + * MPC CFA Command parameters for EM operations + */ +struct cfa_mpc_em_op_params { + /** Common parameters for EM update operations */ + /* + * Opaque value that will be returned in the MPC CFA + * Completion message. This can be used by the caller to associate + * completions with commands. + */ + u32 opaque; + /* + * Table Scope to address the cache line. For Thor2 + * the table scope goes for 0 - 31. + */ + u8 tbl_scope; + /** EM update operation specific params */ + union { + /** EM Search operation params */ + struct cfa_mpc_em_search_params search; + /** EM Insert operation params */ + struct cfa_mpc_em_insert_params insert; + /** EM Delete operation params */ + struct cfa_mpc_em_delete_params del; + /** EM Chain operation params */ + struct cfa_mpc_em_chain_params chain; + }; +}; + +/** + * MPC CFA Command completion status + */ +enum cfa_mpc_cmpl_status { + /* Command success */ + CFA_MPC_OK = 0, + /* Unsupported CFA opcode */ + CFA_MPC_UNSPRT_ERR = 1, + /* CFA command format error */ + CFA_MPC_FMT_ERR = 2, + /* SVIF-Table Scope error */ + CFA_MPC_SCOPE_ERR = 3, + /* Address error: Only used if EM command or TABLE_TYPE=EM */ + CFA_MPC_ADDR_ERR = 4, + /* Cache operation error */ + CFA_MPC_CACHE_ERR = 5, + /* EM_SEARCH or EM_DELETE did not find a matching EM entry */ + CFA_MPC_EM_MISS = 6, + /* EM_INSERT found a matching EM entry and REPLACE=0 in the command */ + CFA_MPC_EM_DUPLICATE = 7, + /* EM_EVENT_COLLECTION_FAIL no events to return */ + CFA_MPC_EM_EVENT_COLLECTION_FAIL = 8, + /* + * EM_INSERT required a dynamic bucket to be added to the chain + * to successfully insert the EM entry, but the entry provided + * for use as dynamic bucket was invalid. (bucket_idx == 0) + */ + CFA_MPC_EM_ABORT = 9, +}; + +/** + * MPC Cache access command completion result + */ +struct cfa_mpc_cache_axs_result { + /* + * Opaque value returned in the completion message. This can + * be used by the caller to associate completions with commands. + */ + u32 opaque; + /* MPC Command completion status code */ + enum cfa_mpc_cmpl_status status; + /* + * Additional error information + * when status code is one of FMT, SCOPE, ADDR or CACHE error + */ + u32 error_data; + /* + * Pointer to buffer to copy read data to. + * Needs to be valid for READ, READ-CLEAR operations + * Not set for write and evict operations + */ + u8 *rd_data; + /* + * Size of the data buffer in Bytes. Should be at least + * be data_size * 32 for MPC cache reads + */ + u16 data_len; +}; + +/** + * MPC EM search operation result + */ +struct cfa_mpc_em_search_result { + u32 bucket_num; /**< See CFA EAS */ + u32 num_entries; /**< See CFA EAS */ + /* Set to HASH[35:24] of the hash computed from the EM entry key. */ + u32 hash_msb; + /* + * IF a match is found, this field is set + * to the table index of the matching EM entry + */ + u32 match_idx; + /* + * Table index to the static bucket determined by hashing the EM entry + * key + */ + u32 bucket_idx; +}; + +/** + * MPC EM insert operation result + */ +struct cfa_mpc_em_insert_result { + u32 bucket_num; /**< See CFA EAS */ + u32 num_entries; /**< See CFA EAS */ + /* Set to HASH[35:24] of the hash computed from the EM entry key. */ + u32 hash_msb; + /* + * If replace = 1 and a matchng entry is found, this field is + * updated with the table index of the replaced entry. This table + * index is therefore free for use. + */ + u32 match_idx; + /* + * Table index to the static bucket determined by hashing the EM entry + * key + */ + u32 bucket_idx; + /* Flag: Matching entry was found and replace */ + u8 replaced : 1; + /* Flag: EM bucket chain was updated */ + u8 chain_update : 1; +}; + +/** + * MPC EM delete operation result + */ +struct cfa_mpc_em_delete_result { + u32 bucket_num; /**< See CFA EAS */ + u32 num_entries; /**< See CFA EAS */ + /* + * Table index to EM bucket tail BEFORE the delete command + * was processed with a OK or EM_MISS status. If chain update = 1, then + * this bucket can be freed + */ + u32 prev_tail; + /* + * Table index to EM bucket tail AFTER the delete command + * was processed with a OK or EM_MISS status. Same as prev_tail + * if chain_update = 0. + */ + u32 new_tail; + /* Flag: EM bucket chain was updated */ + u8 chain_update : 1; +}; + +/** + * MPC EM chain operation result + */ +struct cfa_mpc_em_chain_result { + u32 bucket_num; /**< See CFA EAS */ + u32 num_entries; /**< See CFA EAS */ +}; + +/** + * MPC EM operation completion result + */ +struct cfa_mpc_em_op_result { + /* + * Opaque value returned in the completion message. This can + * be used by the caller to associate completions with commands. + */ + u32 opaque; + /* MPC Command completion status code */ + enum cfa_mpc_cmpl_status status; + /* + * Additional error information + * when status code is one of FMT, SCOPE, ADDR or CACHE error + */ + u32 error_data; + union { + /** EM Search specific results */ + struct cfa_mpc_em_search_result search; + /** EM Insert specific results */ + struct cfa_mpc_em_insert_result insert; + /** EM Delete specific results */ + struct cfa_mpc_em_delete_result del; + /** EM Chain specific results */ + struct cfa_mpc_em_chain_result chain; + }; +}; + +#define TFC_MPC_HDR_TYPE_EB 5 +#define TFC_MPC_HDR_TYPE_SB 0 +#define TFC_MPC_HDR_TYPE_OFFS 0x0 + +#define TFC_MPC_HDR_SET_TYPE(buf, val) \ + SET_BITFLD32(((u32 *)(buf))[O2I_4B(TFC_MPC_HDR_TYPE_OFFS)], (u32)(val), \ + TFC_MPC_HDR_TYPE_EB, \ + TFC_MPC_HDR_TYPE_SB) +#define TFC_MPC_HDR_GET_TYPE(buf) \ + GET_BITFLD32(((u32 *)(buf))[O2I_4B(TFC_MPC_HDR_TYPE_OFFS)], \ + TFC_MPC_HDR_TYPE_EB, \ + TFC_MPC_HDR_TYPE_SB) + +#define TFC_MPC_HDR_FLAGS_EB 15 +#define TFC_MPC_HDR_FLAGS_SB 6 +#define TFC_MPC_HDR_FLAGS_OFFS 0x0 + +#define TFC_MPC_HDR_SET_FLAGS(buf, val) \ + SET_BITFLD32(((u32 *)(buf))[O2I_4B(TFC_MPC_HDR_FLAGS_OFFS)], (u32)(val), \ + TFC_MPC_HDR_FLAGS_EB, \ + TFC_MPC_HDR_FLAGS_SB) +#define TFC_MPC_HDR_GET_FLAGS(buf) \ + GET_BITFLD32(((u32 *)(buf))[O2I_4B(TFC_MPC_HDR_FLAGS_OFFS)], \ + TFC_MPC_HDR_FLAGS_EB, \ + TFC_MPC_HDR_FLAGS_SB) + +#define TFC_MPC_HDR_LEN_EB 31 +#define TFC_MPC_HDR_LEN_SB 16 +#define TFC_MPC_HDR_LEN_OFFS 0x0 + +#define TFC_MPC_HDR_SET_LEN(buf, val) \ + SET_BITFLD32(((u32 *)(buf))[O2I_4B(TFC_MPC_HDR_LEN_OFFS)], (u32)(val), \ + TFC_MPC_HDR_LEN_EB, \ + TFC_MPC_HDR_LEN_SB) +#define TFC_MPC_HDR_GET_LEN(buf) \ + GET_BITFLD32(((u32 *)(buf))[O2I_4B(TFC_MPC_HDR_LEN_OFFS)], \ + TFC_MPC_HDR_LEN_EB, \ + TFC_MPC_HDR_LEN_SB) + +/** + * MPC header definition + */ +struct mpc_header { + u32 type_flags_len; + u32 opaque; + u64 unused; +}; + +#define TFC_MPC_CR_SHORT_DMA_DATA_LEN_EB 5 +#define TFC_MPC_CR_SHORT_DMA_DATA_LEN_SB 0 +#define TFC_MPC_CR_SHORT_DMA_DATA_LEN_OFFS 0x0 + +#define TFC_MPC_CR_SHORT_DMA_DATA_SET_LEN(buf, val) \ + SET_BITFLD32(((u32 *)(buf))[O2I_4B(TFC_MPC_CR_SHORT_DMA_DATA_LEN_OFFS)], \ + (u32)(val), \ + TFC_MPC_CR_SHORT_DMA_DATA_LEN_EB, \ + TFC_MPC_CR_SHORT_DMA_DATA_LEN_SB) +#define TFC_MPC_CR_SHORT_DMA_DATA_GET_LEN(buf) \ + GET_BITFLD32(((u32 *)(buf))[O2I_4B(TFC_MPC_CR_SHORT_DMA_DATA_LEN_OFFS)], \ + TFC_MPC_CR_SHORT_DMA_DATA_LEN_EB, \ + TFC_MPC_CR_SHORT_DMA_DATA_LEN_SB) + +/* + * For successful completions of read and read-clear MPC CFA + * commands, the responses will contain this dma info structure + * following the cfa_mpc_read(|clr)_cmp structure and preceding + * the actual data read from the cache. + */ +struct mpc_cr_short_dma_data { + u32 dma_length; + u32 dma_addr0; + u32 dma_addr1; +}; + +/** + * Build MPC CFA Cache access command + * + * @param [in] opc MPC opcode + * + * @param [out] cmd_buff Command data buffer to write the command to + * + * @param [in/out] cmd_buff_len Pointer to command buffer size param + * Set by caller to indicate the input cmd_buff size. + * Set to the actual size of the command generated by the api. + * + * @param [in] parms Pointer to MPC cache access command parameters + * + * @return 0 on Success, negative errno on failure + */ +int cfa_mpc_build_cache_axs_cmd(enum cfa_mpc_opcode opc, u8 *cmd_buff, + u32 *cmd_buff_len, + struct cfa_mpc_cache_axs_params *parms); + +/** + * Parse MPC CFA Cache access command completion result + * + * @param [in] opc MPC cache access opcode + * + * @param [in] resp_buff Data buffer containing the response to parse + * + * @param [in] resp_buff_len Response buffer size + * + * @param [out] result Pointer to MPC cache access result object. This + * object will contain the fields parsed and extracted from the + * response buffer. + * + * @return 0 on Success, negative errno on failure + */ +int cfa_mpc_parse_cache_axs_resp(enum cfa_mpc_opcode opc, u8 *resp_buff, + u32 resp_buff_len, + struct cfa_mpc_cache_axs_result *result); + +/** + * Build MPC CFA EM operation command + * + * @param [in] opc MPC EM opcode + * + * @param [in] cmd_buff Command data buffer to write the command to + * + * @param [in/out] cmd_buff_len Pointer to command buffer size param + * Set by caller to indicate the input cmd_buff size. + * Set to the actual size of the command generated by the api. + * + * @param [in] parms Pointer to MPC cache access command parameters + * + * @return 0 on Success, negative errno on failure + */ +int cfa_mpc_build_em_op_cmd(enum cfa_mpc_opcode opc, u8 *cmd_buff, + u32 *cmd_buff_len, + struct cfa_mpc_em_op_params *parms); + +/** + * Parse MPC CFA EM operation command completion result + * + * @param [in] opc MPC cache access opcode + * + * @param [in] resp_buff Data buffer containing the response to parse + * + * @param [in] resp_buff_len Response buffer size + * + * @param [out] result Pointer to MPC EM operation result object. This + * object will contain the fields parsed and extracted from the + * response buffer. + * + * @return 0 on Success, negative errno on failure + */ +int cfa_mpc_parse_em_op_resp(enum cfa_mpc_opcode opc, u8 *resp_buff, + u32 resp_buff_len, + struct cfa_mpc_em_op_result *result); + +#endif /* _CFA_BLD_P70_MPC_H_ */ diff --git a/drivers/thirdparty/release-drivers/bnxt/hcapi/cfa_v3/mpc/include/cfa_bld_p70_mpc_defs.h b/drivers/thirdparty/release-drivers/bnxt/hcapi/cfa_v3/mpc/include/cfa_bld_p70_mpc_defs.h new file mode 100644 index 000000000000..cc9d7aa9a149 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/hcapi/cfa_v3/mpc/include/cfa_bld_p70_mpc_defs.h @@ -0,0 +1,373 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019-2023 Broadcom + * All rights reserved. + */ + +#ifndef _CFA_BLD_P70_MPC_DEFS_H_ +#define _CFA_BLD_P70_MPC_DEFS_H_ + +/* + * CFA phase 7.0 Action/Lookup cache option values for various accesses + * From EAS + */ +#define CACHE_READ_OPTION_NORMAL 0x0 +#define CACHE_READ_OPTION_EVICT 0x1 +#define CACHE_READ_OPTION_FAST_EVICT 0x2 +#define CACHE_READ_OPTION_DEBUG_LINE 0x4 +#define CACHE_READ_OPTION_DEBUG_TAG 0x5 + +/* + * Cache read and clear command expects the cache option bit 3 + * to be set, failing which the clear is not done. + */ +#define CACHE_READ_CLR_MASK (0x1U << 3) +#define CACHE_READ_CLR_OPTION_NORMAL \ + (CACHE_READ_CLR_MASK | CACHE_READ_OPTION_NORMAL) +#define CACHE_READ_CLR_OPTION_EVICT \ + (CACHE_READ_CLR_MASK | CACHE_READ_OPTION_EVICT) +#define CACHE_READ_CLR_OPTION_FAST_EVICT \ + (CACHE_READ_CLR_MASK | CACHE_READ_OPTION_FAST_EVICT) + +#define CACHE_WRITE_OPTION_WRITE_BACK 0x0 +#define CACHE_WRITE_OPTION_WRITE_THRU 0x1 + +#define CACHE_EVICT_OPTION_CLEAN_LINES 0x1 +#define CACHE_EVICT_OPTION_CLEAN_FAST_LINES 0x2 +#define CACHE_EVICT_OPTION_CLEAN_AND_FAST_LINES 0x3 +#define CACHE_EVICT_OPTION_LINE 0x4 +#define CACHE_EVICT_OPTION_SCOPE_ADDRESS 0x5 + +#define CFA_P70_CACHE_LINE_BYTES 32 +#define CFA_P70_CACHE_LINE_BITS (CFA_P70_CACHE_LINE_BYTES * BITS_PER_BYTE) + +/* EM/action cache access unit size in bytes */ +#define MPC_CFA_CACHE_ACCESS_UNIT_SIZE CFA_P70_CACHE_LINE_BYTES + +/** + * READ_CMD: This command reads 1-4 consecutive 32B words from the + * specified address within a table scope. + */ +#define READ_CMD_OPCODE_READ 0 + +#define READ_CMD_TABLE_TYPE_ACTION 0 +#define READ_CMD_TABLE_TYPE_EM 1 + +/** + * WRITE_CMD: This command writes 1-4 consecutive 32B words to the + * specified address within a table scope. + */ +#define WRITE_CMD_OPCODE_WRITE 1 + +#define WRITE_CMD_TABLE_TYPE_ACTION 0 +#define WRITE_CMD_TABLE_TYPE_EM 1 + +/** + * READ_CLR_CMD: This command performs a read-modify-write to the + * specified 32B address using a 16b mask that specifies up to 16 16b + * words to clear before writing the data back. It returns the 32B data + * word read from cache (not the value written after the clear + * operation). + */ +#define READ_CLR_CMD_OPCODE_READ_CLR 2 + +#define READ_CLR_CMD_TABLE_TYPE_ACTION 0 +#define READ_CLR_CMD_TABLE_TYPE_EM 1 + +/** + * INVALIDATE_CMD: This command forces an explicit evict of 1-4 + * consecutive cache lines such that the next time the structure is used + * it will be re-read from its backing store location. + */ +#define INVALIDATE_CMD_OPCODE_INVALIDATE 5 + +#define INVALIDATE_CMD_TABLE_TYPE_ACTION 0 +#define INVALIDATE_CMD_TABLE_TYPE_EM 1 + +/** + * EM_SEARCH_CMD: This command supplies an exact match entry of 1-4 32B + * words to search for in the exact match table. CFA first computes the + * hash value of the key in the entry, and determines the static bucket + * address to search from the hash and the (EM_BUCKETS, EM_SIZE) for + * TABLE_SCOPE. It then searches that static bucket chain for an entry + * with a matching key (the LREC in the command entry is ignored). If a + * matching entry is found, CFA reports OK status in the completion. + * Otherwise, assuming no errors abort the search before it completes, + * it reports EM_MISS status. + */ +#define EM_SEARCH_CMD_OPCODE_EM_SEARCH 8 + +/** + * EM_INSERT_CMD: This command supplies an exact match entry of 1-4 32B + * words to insert in the exact match table. CFA first computes the hash + * value of the key in the entry, and determines the static bucket + * address to search from the hash and the (EM_BUCKETS, EM_SIZE) for + * TABLE_SCOPE. It then writes the 1-4 32B words of the exact match + * entry starting at the TABLE_INDEX location in the command. When the + * entry write completes, it searches the static bucket chain for an + * existing entry with a key matching the key in the insert entry (the + * LREC does not need to match). If a matching entry is found: * If + * REPLACE=0, the CFA aborts the insert and returns EM_DUPLICATE status. + * * If REPLACE=1, the CFA overwrites the matching entry with the new + * entry. REPLACED_ENTRY=1 in the completion in this case to signal that + * an entry was replaced. The location of the entry is provided in the + * completion. If no match is found, CFA adds the new entry to the + * lowest unused entry in the tail bucket. If the current tail bucket is + * full, this requires adding a new bucket to the tail. Then entry is + * then inserted at entry number 0. TABLE_INDEX2 provides the address of + * the new tail bucket, if needed. If set to 0, the insert is aborted + * and returns EM_ABORT status instead of adding a new bucket to the + * tail. CHAIN_UPD in the completion indicates whether a new bucket was + * added (1) or not (0). For locked scopes, if the read of the static + * bucket gives a locked scope miss error, indicating that the address + * is not in the cache, the static bucket is assumed empty. In this + * case, TAI creates a new bucket, setting entry 0 to the new entry + * fields and initializing all other fields to 0. It writes this new + * bucket to the static bucket address, which installs it in the cache. + */ +#define EM_INSERT_CMD_OPCODE_EM_INSERT 9 + +/** + * EM_DELETE_CMD: This command searches for an exact match entry index + * in the static bucket chain and deletes it if found. TABLE_INDEX give + * the entry index to delete and TABLE_INDEX2 gives the static bucket + * index. If a matching entry is found: * If the matching entry is the + * last valid entry in the tail bucket, its entry fields (HASH_MSBS and + * ENTRY_PTR) are set to 0 to delete the entry. * If the matching entry + * is not the last valid entry in the tail bucket, the entry fields from + * that last entry are moved to the matching entry, and the fields of + * that last entry are set to 0. * If any of the previous processing + * results in the tail bucket not having any valid entries, the tail + * bucket is the static bucket, the scope is a locked scope, and + * CHAIN_PTR=0, hardware evicts the static bucket from the cache and the + * completion signals this case with CHAIN_UPD=1. * If any of the + * previous processing results in the tail bucket not having any valid + * entries, and the tail bucket is not the static bucket, the tail + * bucket is removed from the chain. In this case, the penultimate + * bucket in the chain becomes the tail bucket. It has CHAIN set to 0 to + * unlink the tail bucket, and CHAIN_PTR set to that from the original + * tail bucket to preserve background chaining. The completion signals + * this case with CHAIN_UPD=1 and returns the index to the bucket + * removed so that software can de-allocate it. CFA returns OK status if + * the entry was successfully deleted. Otherwise, it returns EM_MISS + * status assuming there were no errors that caused processing to be + * aborted. + */ +#define EM_DELETE_CMD_OPCODE_EM_DELETE 10 + +/** + * EM_CHAIN_CMD: This command updates CHAIN_PTR in the tail bucket of a + * static bucket chain, supplying both the static bucket and the new + * CHAIN_PTR value. TABLE_INDEX is the new CHAIN_PTR value and + * TABLE_INDEX2[23:0] is the static bucket. This command provides + * software a means to update background chaining coherently with other + * bucket updates. The value of CHAIN is unaffected (stays at 0). For + * locked scopes, if the static bucket is the tail bucket, it is empty + * (all of its ENTRY_PTR values are 0), and TABLE_INDEX=0 (the CHAIN_PTR + * is being set to 0), instead of updating the static bucket it is + * evicted from the cache. In this case, CHAIN_UPD=1 in the completion. + */ +#define EM_CHAIN_CMD_OPCODE_EM_CHAIN 11 + +/** + * READ_CMP: When no errors, teturns 1-4 consecutive 32B words from the + * TABLE_INDEX within the TABLE_SCOPE specified in the command, writing + * them to HOST_ADDRESS from the command. + */ +#define READ_CMP_TYPE_MID_PATH_SHORT 30 + +#define READ_CMP_STATUS_OK 0 +#define READ_CMP_STATUS_UNSPRT_ERR 1 +#define READ_CMP_STATUS_FMT_ERR 2 +#define READ_CMP_STATUS_SCOPE_ERR 3 +#define READ_CMP_STATUS_ADDR_ERR 4 +#define READ_CMP_STATUS_CACHE_ERR 5 + +#define READ_CMP_MP_CLIENT_TE_CFA 2 +#define READ_CMP_MP_CLIENT_RE_CFA 3 + +#define READ_CMP_OPCODE_READ 0 + +#define READ_CMP_TABLE_TYPE_ACTION 0 +#define READ_CMP_TABLE_TYPE_EM 1 + +/** + * WRITE_CMP: Returns status of the write of 1-4 consecutive 32B words + * starting at TABLE_INDEX in the table specified by (TABLE_TYPE, + * TABLE_SCOPE). + */ +#define WRITE_CMP_TYPE_MID_PATH_SHORT 30 + +#define WRITE_CMP_STATUS_OK 0 +#define WRITE_CMP_STATUS_UNSPRT_ERR 1 +#define WRITE_CMP_STATUS_FMT_ERR 2 +#define WRITE_CMP_STATUS_SCOPE_ERR 3 +#define WRITE_CMP_STATUS_ADDR_ERR 4 +#define WRITE_CMP_STATUS_CACHE_ERR 5 + +#define WRITE_CMP_MP_CLIENT_TE_CFA 2 +#define WRITE_CMP_MP_CLIENT_RE_CFA 3 + +#define WRITE_CMP_OPCODE_WRITE 1 + +#define WRITE_CMP_TABLE_TYPE_ACTION 0 +#define WRITE_CMP_TABLE_TYPE_EM 1 + +/** + * READ_CLR_CMP: When no errors, returns 1 32B word from TABLE_INDEX in + * the table specified by (TABLE_TYPE, TABLE_SCOPE). The data returned + * is the value prior to the clear. + */ +#define READ_CLR_CMP_TYPE_MID_PATH_SHORT 30 + +#define READ_CLR_CMP_STATUS_OK 0 +#define READ_CLR_CMP_STATUS_UNSPRT_ERR 1 +#define READ_CLR_CMP_STATUS_FMT_ERR 2 +#define READ_CLR_CMP_STATUS_SCOPE_ERR 3 +#define READ_CLR_CMP_STATUS_ADDR_ERR 4 +#define READ_CLR_CMP_STATUS_CACHE_ERR 5 + +#define READ_CLR_CMP_MP_CLIENT_TE_CFA 2 +#define READ_CLR_CMP_MP_CLIENT_RE_CFA 3 + +#define READ_CLR_CMP_OPCODE_READ_CLR 2 + +#define READ_CLR_CMP_TABLE_TYPE_ACTION 0 +#define READ_CLR_CMP_TABLE_TYPE_EM 1 + +/** + * INVALIDATE_CMP: Returns status for INVALIDATE commands. + */ +#define INVALIDATE_CMP_TYPE_MID_PATH_SHORT 30 + +#define INVALIDATE_CMP_STATUS_OK 0 +#define INVALIDATE_CMP_STATUS_UNSPRT_ERR 1 +#define INVALIDATE_CMP_STATUS_FMT_ERR 2 +#define INVALIDATE_CMP_STATUS_SCOPE_ERR 3 +#define INVALIDATE_CMP_STATUS_ADDR_ERR 4 +#define INVALIDATE_CMP_STATUS_CACHE_ERR 5 + +#define INVALIDATE_CMP_MP_CLIENT_TE_CFA 2 +#define INVALIDATE_CMP_MP_CLIENT_RE_CFA 3 + +#define INVALIDATE_CMP_OPCODE_INVALIDATE 5 + +#define INVALIDATE_CMP_TABLE_TYPE_ACTION 0 +#define INVALIDATE_CMP_TABLE_TYPE_EM 1 + +/** + * EM_SEARCH_CMP: For OK status, returns the index of the matching entry + * found for the EM key supplied in the command. Returns EM_MISS status + * if no match was found. + */ +#define EM_SEARCH_CMP_TYPE_MID_PATH_LONG 31 + +#define EM_SEARCH_CMP_STATUS_OK 0 +#define EM_SEARCH_CMP_STATUS_UNSPRT_ERR 1 +#define EM_SEARCH_CMP_STATUS_FMT_ERR 2 +#define EM_SEARCH_CMP_STATUS_SCOPE_ERR 3 +#define EM_SEARCH_CMP_STATUS_ADDR_ERR 4 +#define EM_SEARCH_CMP_STATUS_CACHE_ERR 5 +#define EM_SEARCH_CMP_STATUS_EM_MISS 6 + +#define EM_SEARCH_CMP_MP_CLIENT_TE_CFA 2 +#define EM_SEARCH_CMP_MP_CLIENT_RE_CFA 3 + +#define EM_SEARCH_CMP_OPCODE_EM_SEARCH 8 + +/** + * EM_INSERT_CMP: OK status indicates that the exact match entry from + * the command was successfully inserted. EM_DUPLICATE status indicates + * that the insert was aborted because an entry with the same exact + * match key was found and REPLACE=0 in the command. EM_ABORT status + * indicates that no duplicate was found, the tail bucket in the chain + * was full, and TABLE_INDEX2=0. No changes are made to the database in + * this case. TABLE_INDEX is the starting address at which to insert the + * exact match entry (from the command). TABLE_INDEX2 is the address at + * which to insert a new bucket at the tail of the static bucket chain + * if needed (from the command). CHAIN_UPD=1 if a new bucket was added + * at this address. TABLE_INDEX3 is the static bucket address for the + * chain, determined from hashing the exact match entry. Software needs + * this address and TABLE_INDEX in order to delete the entry using an + * EM_DELETE command. TABLE_INDEX4 is the index of an entry found that + * had a matching exact match key to the command entry key. If no + * matching entry was found, it is set to 0. There are two cases when + * there is a matching entry, depending on REPLACE from the command: * + * REPLACE=0: EM_DUPLICATE status is reported and the insert is aborted. + * Software can use the static bucket address (TABLE_INDEX3[23:0]) and + * the matching entry (TABLE_INDEX4) in an EM_DELETE command if it + * wishes to explicity delete the matching entry. * REPLACE=1: + * REPLACED_ENTRY=1 to signal that the entry at TABLE_INDEX4 was + * replaced by the insert entry. REPLACED_ENTRY will only be 1 if + * reporting OK status in this case. Software can de-allocate the entry + * at TABLE_INDEX4. + */ +#define EM_INSERT_CMP_TYPE_MID_PATH_LONG 31 + +#define EM_INSERT_CMP_STATUS_OK 0 +#define EM_INSERT_CMP_STATUS_UNSPRT_ERR 1 +#define EM_INSERT_CMP_STATUS_FMT_ERR 2 +#define EM_INSERT_CMP_STATUS_SCOPE_ERR 3 +#define EM_INSERT_CMP_STATUS_ADDR_ERR 4 +#define EM_INSERT_CMP_STATUS_CACHE_ERR 5 +#define EM_INSERT_CMP_STATUS_EM_DUPLICATE 7 +#define EM_INSERT_CMP_STATUS_EM_ABORT 9 + +#define EM_INSERT_CMP_MP_CLIENT_TE_CFA 2 +#define EM_INSERT_CMP_MP_CLIENT_RE_CFA 3 + +#define EM_INSERT_CMP_OPCODE_EM_INSERT 9 + +/** + * EM_DELETE_CMP: OK status indicates that an ENTRY_PTR matching + * TABLE_INDEX was found in the static bucket chain specified and was + * therefore deleted. EM_MISS status indicates that no match was found. + * TABLE_INDEX is from the command. It is the index of the entry to + * delete. TABLE_INDEX2 is from the command. It is the static bucket + * address. TABLE_INDEX3 is the index of the tail bucket of the static + * bucket chain prior to processing the command. TABLE_INDEX4 is the + * index of the tail bucket of the static bucket chain after processing + * the command. If CHAIN_UPD=1 and TABLE_INDEX4==TABLE_INDEX2, the + * static bucket was the tail bucket, it became empty after the delete, + * the scope is a locked scope, and CHAIN_PTR was 0. In this case, the + * static bucket has been evicted from the cache. Otherwise, if + * CHAIN_UPD=1, the original tail bucket given by TABLE_INDEX3 was + * removed from the chain because it went empty. It can therefore be de- + * allocated. + */ +#define EM_DELETE_CMP_TYPE_MID_PATH_LONG 31 + +#define EM_DELETE_CMP_STATUS_OK 0 +#define EM_DELETE_CMP_STATUS_UNSPRT_ERR 1 +#define EM_DELETE_CMP_STATUS_FMT_ERR 2 +#define EM_DELETE_CMP_STATUS_SCOPE_ERR 3 +#define EM_DELETE_CMP_STATUS_ADDR_ERR 4 +#define EM_DELETE_CMP_STATUS_CACHE_ERR 5 +#define EM_DELETE_CMP_STATUS_EM_MISS 6 + +#define EM_DELETE_CMP_MP_CLIENT_TE_CFA 2 +#define EM_DELETE_CMP_MP_CLIENT_RE_CFA 3 + +#define EM_DELETE_CMP_OPCODE_EM_DELETE 10 + +/** + * EM_CHAIN_CMP: OK status indicates that the CHAIN_PTR of the tail + * bucket was successfully updated. TABLE_INDEX is from the command. It + * is the value of the new CHAIN_PTR. TABLE_INDEX2 is from the command. + * TABLE_INDEX3 is the index of the tail bucket of the static bucket + * chain. + */ +#define EM_CHAIN_CMP_TYPE_MID_PATH_LONG 31 + +#define EM_CHAIN_CMP_STATUS_OK 0 +#define EM_CHAIN_CMP_STATUS_UNSPRT_ERR 1 +#define EM_CHAIN_CMP_STATUS_FMT_ERR 2 +#define EM_CHAIN_CMP_STATUS_SCOPE_ERR 3 +#define EM_CHAIN_CMP_STATUS_ADDR_ERR 4 +#define EM_CHAIN_CMP_STATUS_CACHE_ERR 5 + +#define EM_CHAIN_CMP_MP_CLIENT_TE_CFA 2 +#define EM_CHAIN_CMP_MP_CLIENT_RE_CFA 3 + +#define EM_CHAIN_CMP_OPCODE_EM_CHAIN 11 + +#endif /* _CFA_BLD_P70_MPC_DEFS_H_ */ diff --git a/drivers/thirdparty/release-drivers/bnxt/hcapi/cfa_v3/mpc/include/cfa_bld_p70_mpcops.h b/drivers/thirdparty/release-drivers/bnxt/hcapi/cfa_v3/mpc/include/cfa_bld_p70_mpcops.h new file mode 100644 index 000000000000..69b9100f0c6e --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/hcapi/cfa_v3/mpc/include/cfa_bld_p70_mpcops.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019-2023 Broadcom + * All rights reserved. + */ + +#ifndef _CFA_BLD_P70_MPCOPS_H_ +#define _CFA_BLD_P70_MPCOPS_H_ + +#include "cfa_types.h" +#include "cfa_bld_mpcops.h" + +int cfa_bld_p70_mpc_bind(enum cfa_ver hw_ver, struct cfa_bld_mpcinfo *mpcinfo); + +#endif /* _CFA_BLD_P70_MPCOPS_H_ */ diff --git a/drivers/thirdparty/release-drivers/bnxt/hcapi/cfa_v3/mpc/include/cfa_p70_mpc_cmds.h b/drivers/thirdparty/release-drivers/bnxt/hcapi/cfa_v3/mpc/include/cfa_p70_mpc_cmds.h new file mode 100644 index 000000000000..d4ce8b1523b9 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/hcapi/cfa_v3/mpc/include/cfa_p70_mpc_cmds.h @@ -0,0 +1,1528 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019-2023 Broadcom + * All rights reserved. + */ +#ifndef __CFA_P70_MPC_CMDS_H__ +#define __CFA_P70_MPC_CMDS_H__ + +#include "cfa_p70_mpc_common.h" + +/* + * CFA Table Read Command Record: + * + * This command reads 1-4 consecutive 32B words from the specified address + * within a table scope. + * Offset 31 0 + * 0x0 cache_option unused(1) data_size unused(3) + * table_scope unused(4) table_type opcode + * 0x4 unused(6) table_index + * 0x8 + * - + * 0xf host_address + * + * opcode (Offset:0x0[7:0], Size: 8) + * This value selects the format for the mid-path + * command for the CFA. + * Value Enum Enumeration Description + * 0 READ + * This command reads 1-4 consecutive 32B words + * from the specified address within a table scope. + * + * table_type (Offset:0x0[11:8], Size: 4) + * This value selects the table type to be acted + * upon. + * Value Enum Enumeration Description + * 0 ACTION + * This command acts on the action table of the + * specified scope. + * 1 EM + * This command acts on the exact match table of + * the specified scope. + * + * table_scope (Offset:0x0[20:16], Size: 5) + * Table scope to access. + * + * data_size (Offset:0x0[26:24], Size: 3) + * Number of 32B units in access. If value is outside + * the range [1, 4], CFA aborts processing and reports + * FMT_ERR status. + * + * cache_option (Offset:0x0[31:28], Size: 4) + * Determines setting of OPTION field for all cache + * requests while processing any command other than + * EM_INSERT, EM_DELETE, or EM_CHAIN. For these latter + * commands, CACHE_OPTION sets the OPTION field for + * all read requests, and CACHE_OPTION2 sets it for + * all write requests. + * CFA does not support posted write requests. + * Therefore, for WRITE commands, CACHE_OPTION[1] must + * be set to 0. And for EM commands that send write + * requests (all but EM_SEARCH), CACHE_OPTION2[1] must + * be set to 0. + * + * table_index (Offset:0x4[25:0], Size: 26) + * A 32B index into the table identified by + * (TABLE_TYPE, TABLE_SCOPE): + * + * host_address (Offset:0x8[31:0], Size: 32, Words: 2) + * The 64-bit host address to which to write the DMA + * data returned in the completion. The data will be + * written to the same function as the one that owns + * the SQ this command is read from. DATA_SIZE + * determines the maximum size of the data written. If + * HOST_ADDRESS[1:0] is not 0, CFA aborts processing + * and reports FMT_ERR status. + */ +#define TFC_MPC_CMD_OPCODE_READ 0 + +#define TFC_MPC_CMD_TBL_RD_OPCODE_EB 7 +#define TFC_MPC_CMD_TBL_RD_OPCODE_SB 0 +#define TFC_MPC_CMD_TBL_RD_OPCODE_OFFS 0x0 + +#define TFC_MPC_CMD_TBL_RD_SET_OPCODE(buf, val) \ + SET_BITFLD32(TO_P32((buf), TFC_MPC_CMD_TBL_RD_OPCODE_OFFS), \ + (u32)(val), \ + TFC_MPC_CMD_TBL_RD_OPCODE_EB, \ + TFC_MPC_CMD_TBL_RD_OPCODE_SB) +#define TFC_MPC_CMD_TBL_RD_GET_OPCODE(buf) \ + GET_BITFLD32(TO_P32((buf), TFC_MPC_CMD_TBL_RD_OPCODE_OFFS), \ + TFC_MPC_CMD_TBL_RD_OPCODE_EB, \ + TFC_MPC_CMD_TBL_RD_OPCODE_SB) + +#define TFC_MPC_CMD_TBL_RD_TABLE_TYPE_ACTION 0 +#define TFC_MPC_CMD_TBL_RD_TABLE_TYPE_EM 1 + +#define TFC_MPC_CMD_TBL_RD_TABLE_TYPE_EB 11 +#define TFC_MPC_CMD_TBL_RD_TABLE_TYPE_SB 8 +#define TFC_MPC_CMD_TBL_RD_TABLE_TYPE_OFFS 0x0 + +#define TFC_MPC_CMD_TBL_RD_SET_TABLE_TYPE(buf, val) \ + SET_BITFLD32(TO_P32((buf), TFC_MPC_CMD_TBL_RD_TABLE_TYPE_OFFS), \ + (u32)(val), \ + TFC_MPC_CMD_TBL_RD_TABLE_TYPE_EB, \ + TFC_MPC_CMD_TBL_RD_TABLE_TYPE_SB) +#define TFC_MPC_CMD_TBL_RD_GET_TABLE_TYPE(buf) \ + GET_BITFLD32(TO_P32((buf), TFC_MPC_CMD_TBL_RD_TABLE_TYPE_OFFS), \ + TFC_MPC_CMD_TBL_RD_TABLE_TYPE_EB, \ + TFC_MPC_CMD_TBL_RD_TABLE_TYPE_SB) + +#define TFC_MPC_CMD_TBL_RD_TABLE_SCOPE_EB 20 +#define TFC_MPC_CMD_TBL_RD_TABLE_SCOPE_SB 16 +#define TFC_MPC_CMD_TBL_RD_TABLE_SCOPE_OFFS 0x0 + +#define TFC_MPC_CMD_TBL_RD_SET_TABLE_SCOPE(buf, val) \ + SET_BITFLD32(TO_P32((buf), TFC_MPC_CMD_TBL_RD_TABLE_SCOPE_OFFS), \ + (u32)(val), \ + TFC_MPC_CMD_TBL_RD_TABLE_SCOPE_EB, \ + TFC_MPC_CMD_TBL_RD_TABLE_SCOPE_SB) +#define TFC_MPC_CMD_TBL_RD_GET_TABLE_SCOPE(buf) \ + GET_BITFLD32(TO_P32((buf), TFC_MPC_CMD_TBL_RD_TABLE_SCOPE_OFFS), \ + TFC_MPC_CMD_TBL_RD_TABLE_SCOPE_EB, \ + TFC_MPC_CMD_TBL_RD_TABLE_SCOPE_SB) + +#define TFC_MPC_CMD_TBL_RD_DATA_SIZE_EB 26 +#define TFC_MPC_CMD_TBL_RD_DATA_SIZE_SB 24 +#define TFC_MPC_CMD_TBL_RD_DATA_SIZE_OFFS 0x0 + +#define TFC_MPC_CMD_TBL_RD_SET_DATA_SIZE(buf, val) \ + SET_BITFLD32(TO_P32((buf), TFC_MPC_CMD_TBL_RD_DATA_SIZE_OFFS), \ + (u32)(val), \ + TFC_MPC_CMD_TBL_RD_DATA_SIZE_EB, \ + TFC_MPC_CMD_TBL_RD_DATA_SIZE_SB) +#define TFC_MPC_CMD_TBL_RD_GET_DATA_SIZE(buf) \ + GET_BITFLD32(TO_P32((buf), TFC_MPC_CMD_TBL_RD_DATA_SIZE_OFFS), \ + TFC_MPC_CMD_TBL_RD_DATA_SIZE_EB, \ + TFC_MPC_CMD_TBL_RD_DATA_SIZE_SB) + +#define TFC_MPC_CMD_TBL_RD_CACHE_OPTION_EB 31 +#define TFC_MPC_CMD_TBL_RD_CACHE_OPTION_SB 28 +#define TFC_MPC_CMD_TBL_RD_CACHE_OPTION_OFFS 0x0 + +#define TFC_MPC_CMD_TBL_RD_SET_CACHE_OPTION(buf, val) \ + SET_BITFLD32(TO_P32((buf), TFC_MPC_CMD_TBL_RD_CACHE_OPTION_OFFS), \ + (u32)(val), \ + TFC_MPC_CMD_TBL_RD_CACHE_OPTION_EB, \ + TFC_MPC_CMD_TBL_RD_CACHE_OPTION_SB) +#define TFC_MPC_CMD_TBL_RD_GET_CACHE_OPTION(buf) \ + GET_BITFLD32(TO_P32((buf), TFC_MPC_CMD_TBL_RD_CACHE_OPTION_OFFS), \ + TFC_MPC_CMD_TBL_RD_CACHE_OPTION_EB, \ + TFC_MPC_CMD_TBL_RD_CACHE_OPTION_SB) + +#define TFC_MPC_CMD_TBL_RD_TABLE_INDEX_EB 25 +#define TFC_MPC_CMD_TBL_RD_TABLE_INDEX_SB 0 +#define TFC_MPC_CMD_TBL_RD_TABLE_INDEX_OFFS 0x4 + +#define TFC_MPC_CMD_TBL_RD_SET_TABLE_INDEX(buf, val) \ + SET_BITFLD32(TO_P32((buf), TFC_MPC_CMD_TBL_RD_TABLE_INDEX_OFFS), \ + (u32)(val), \ + TFC_MPC_CMD_TBL_RD_TABLE_INDEX_EB, \ + TFC_MPC_CMD_TBL_RD_TABLE_INDEX_SB) +#define TFC_MPC_CMD_TBL_RD_GET_TABLE_INDEX(buf) \ + GET_BITFLD32(TO_P32((buf), TFC_MPC_CMD_TBL_RD_TABLE_INDEX_OFFS), \ + TFC_MPC_CMD_TBL_RD_TABLE_INDEX_EB, \ + TFC_MPC_CMD_TBL_RD_TABLE_INDEX_SB) + +#define TFC_MPC_CMD_TBL_RD_HOST_ADDRESS_0_OFFS 0x8 + +#define TFC_MPC_CMD_TBL_RD_SET_HOST_ADDRESS_0(buf, val) \ + SET_FLD32(TO_P32((buf), TFC_MPC_CMD_TBL_RD_HOST_ADDRESS_0_OFFS), (u32)(val)) +#define TFC_MPC_CMD_TBL_RD_GET_HOST_ADDRESS_0(buf) \ + GET_FLD32(TO_P32((buf), TFC_MPC_CMD_TBL_RD_HOST_ADDRESS_0_OFFS)) + +#define TFC_MPC_CMD_TBL_RD_HOST_ADDRESS_1_OFFS 0xc + +#define TFC_MPC_CMD_TBL_RD_SET_HOST_ADDRESS_1(buf, val) \ + SET_FLD32(TO_P32((buf), TFC_MPC_CMD_TBL_RD_HOST_ADDRESS_1_OFFS), (u32)(val)) +#define TFC_MPC_CMD_TBL_RD_GET_HOST_ADDRESS_1(buf) \ + GET_FLD32(TO_P32((buf), TFC_MPC_CMD_TBL_RD_HOST_ADDRESS_1_OFFS)) + +#define TFC_MPC_CMD_TBL_RD_SIZE 16 + +/* + * CFA Table Write Command Record: + * + * This command writes 1-4 consecutive 32B words to the specified address + * within a table scope. + * Offset 31 0 + * 0x0 cache_option unused(1) data_size unused(3) + * table_scope unused(3) write_through table_type opcode + * 0x4 unused(6) table_index + * 0x8 + * - + * 0xf unused(64) + * + * opcode (Offset:0x0[7:0], Size: 8) + * This value selects the format for the mid-path + * command for the CFA. + * Value Enum Enumeration Description + * 1 WRITE + * This command writes 1-4 consecutive 32B words + * to the specified address within a table scope. + * + * table_type (Offset:0x0[11:8], Size: 4) + * This value selects the table type to be acted + * upon. + * Value Enum Enumeration Description + * 0 ACTION + * This command acts on the action table of the + * specified scope. + * 1 EM + * This command acts on the exact match table of + * the specified scope. + * + * write_through (Offset:0x0[12], Size: 1) + * Sets the OPTION field on the cache interface to + * use write-through for EM entry writes while + * processing EM_INSERT commands. For all other cases + * (inluding EM_INSERT bucket writes), the OPTION + * field is set by the CACHE_OPTION and CACHE_OPTION2 + * fields. + * + * table_scope (Offset:0x0[20:16], Size: 5) + * Table scope to access. + * + * data_size (Offset:0x0[26:24], Size: 3) + * Number of 32B units in access. If value is outside + * the range [1, 4], CFA aborts processing and reports + * FMT_ERR status. + * + * cache_option (Offset:0x0[31:28], Size: 4) + * Determines setting of OPTION field for all cache + * requests while processing any command other than + * EM_INSERT, EM_DELETE, or EM_CHAIN. For these latter + * commands, CACHE_OPTION sets the OPTION field for + * all read requests, and CACHE_OPTION2 sets it for + * all write requests. + * CFA does not support posted write requests. + * Therefore, for WRITE commands, CACHE_OPTION[1] must + * be set to 0. And for EM commands that send write + * requests (all but EM_SEARCH), CACHE_OPTION2[1] must + * be set to 0. + * + * table_index (Offset:0x4[25:0], Size: 26) + * A 32B index into the table identified by + * (TABLE_TYPE, TABLE_SCOPE): + */ +#define TFC_MPC_CMD_OPCODE_WRITE 1 + +#define TFC_MPC_CMD_TBL_WR_OPCODE_EB 7 +#define TFC_MPC_CMD_TBL_WR_OPCODE_SB 0 +#define TFC_MPC_CMD_TBL_WR_OPCODE_OFFS 0x0 + +#define TFC_MPC_CMD_TBL_WR_SET_OPCODE(buf, val) \ + SET_BITFLD32(TO_P32((buf), TFC_MPC_CMD_TBL_WR_OPCODE_OFFS), \ + (u32)(val), \ + TFC_MPC_CMD_TBL_WR_OPCODE_EB, \ + TFC_MPC_CMD_TBL_WR_OPCODE_SB) +#define TFC_MPC_CMD_TBL_WR_GET_OPCODE(buf) \ + GET_BITFLD32(TO_P32((buf), TFC_MPC_CMD_TBL_WR_OPCODE_OFFS), \ + TFC_MPC_CMD_TBL_WR_OPCODE_EB, \ + TFC_MPC_CMD_TBL_WR_OPCODE_SB) + +#define TFC_MPC_CMD_TBL_WR_TABLE_TYPE_ACTION 0 +#define TFC_MPC_CMD_TBL_WR_TABLE_TYPE_EM 1 + +#define TFC_MPC_CMD_TBL_WR_TABLE_TYPE_EB 11 +#define TFC_MPC_CMD_TBL_WR_TABLE_TYPE_SB 8 +#define TFC_MPC_CMD_TBL_WR_TABLE_TYPE_OFFS 0x0 + +#define TFC_MPC_CMD_TBL_WR_SET_TABLE_TYPE(buf, val) \ + SET_BITFLD32(TO_P32((buf), TFC_MPC_CMD_TBL_WR_TABLE_TYPE_OFFS), \ + (u32)(val), \ + TFC_MPC_CMD_TBL_WR_TABLE_TYPE_EB, \ + TFC_MPC_CMD_TBL_WR_TABLE_TYPE_SB) +#define TFC_MPC_CMD_TBL_WR_GET_TABLE_TYPE(buf) \ + GET_BITFLD32(TO_P32((buf), TFC_MPC_CMD_TBL_WR_TABLE_TYPE_OFFS), \ + TFC_MPC_CMD_TBL_WR_TABLE_TYPE_EB, \ + TFC_MPC_CMD_TBL_WR_TABLE_TYPE_SB) + +#define TFC_MPC_CMD_TBL_WR_WRITE_THROUGH_EB 12 +#define TFC_MPC_CMD_TBL_WR_WRITE_THROUGH_SB 12 +#define TFC_MPC_CMD_TBL_WR_WRITE_THROUGH_OFFS 0x0 + +#define TFC_MPC_CMD_TBL_WR_SET_WRITE_THROUGH(buf, val) \ + SET_BITFLD32(TO_P32((buf), TFC_MPC_CMD_TBL_WR_WRITE_THROUGH_OFFS), \ + (u32)(val), \ + TFC_MPC_CMD_TBL_WR_WRITE_THROUGH_EB, \ + TFC_MPC_CMD_TBL_WR_WRITE_THROUGH_SB) +#define TFC_MPC_CMD_TBL_WR_GET_WRITE_THROUGH(buf) \ + GET_BITFLD32(TO_P32((buf), TFC_MPC_CMD_TBL_WR_WRITE_THROUGH_OFFS), \ + TFC_MPC_CMD_TBL_WR_WRITE_THROUGH_EB, \ + TFC_MPC_CMD_TBL_WR_WRITE_THROUGH_SB) + +#define TFC_MPC_CMD_TBL_WR_TABLE_SCOPE_EB 20 +#define TFC_MPC_CMD_TBL_WR_TABLE_SCOPE_SB 16 +#define TFC_MPC_CMD_TBL_WR_TABLE_SCOPE_OFFS 0x0 + +#define TFC_MPC_CMD_TBL_WR_SET_TABLE_SCOPE(buf, val) \ + SET_BITFLD32(TO_P32((buf), TFC_MPC_CMD_TBL_WR_TABLE_SCOPE_OFFS), \ + (u32)(val), \ + TFC_MPC_CMD_TBL_WR_TABLE_SCOPE_EB, \ + TFC_MPC_CMD_TBL_WR_TABLE_SCOPE_SB) +#define TFC_MPC_CMD_TBL_WR_GET_TABLE_SCOPE(buf) \ + GET_BITFLD32(TO_P32((buf), TFC_MPC_CMD_TBL_WR_TABLE_SCOPE_OFFS), \ + TFC_MPC_CMD_TBL_WR_TABLE_SCOPE_EB, \ + TFC_MPC_CMD_TBL_WR_TABLE_SCOPE_SB) + +#define TFC_MPC_CMD_TBL_WR_DATA_SIZE_EB 26 +#define TFC_MPC_CMD_TBL_WR_DATA_SIZE_SB 24 +#define TFC_MPC_CMD_TBL_WR_DATA_SIZE_OFFS 0x0 + +#define TFC_MPC_CMD_TBL_WR_SET_DATA_SIZE(buf, val) \ + SET_BITFLD32(TO_P32((buf), TFC_MPC_CMD_TBL_WR_DATA_SIZE_OFFS), \ + (u32)(val), \ + TFC_MPC_CMD_TBL_WR_DATA_SIZE_EB, \ + TFC_MPC_CMD_TBL_WR_DATA_SIZE_SB) +#define TFC_MPC_CMD_TBL_WR_GET_DATA_SIZE(buf) \ + GET_BITFLD32(TO_P32((buf), TFC_MPC_CMD_TBL_WR_DATA_SIZE_OFFS), \ + TFC_MPC_CMD_TBL_WR_DATA_SIZE_EB, \ + TFC_MPC_CMD_TBL_WR_DATA_SIZE_SB) + +#define TFC_MPC_CMD_TBL_WR_CACHE_OPTION_EB 31 +#define TFC_MPC_CMD_TBL_WR_CACHE_OPTION_SB 28 +#define TFC_MPC_CMD_TBL_WR_CACHE_OPTION_OFFS 0x0 + +#define TFC_MPC_CMD_TBL_WR_SET_CACHE_OPTION(buf, val) \ + SET_BITFLD32(TO_P32((buf), TFC_MPC_CMD_TBL_WR_CACHE_OPTION_OFFS), \ + (u32)(val), \ + TFC_MPC_CMD_TBL_WR_CACHE_OPTION_EB, \ + TFC_MPC_CMD_TBL_WR_CACHE_OPTION_SB) +#define TFC_MPC_CMD_TBL_WR_GET_CACHE_OPTION(buf) \ + GET_BITFLD32(TO_P32((buf), TFC_MPC_CMD_TBL_WR_CACHE_OPTION_OFFS), \ + TFC_MPC_CMD_TBL_WR_CACHE_OPTION_EB, \ + TFC_MPC_CMD_TBL_WR_CACHE_OPTION_SB) + +#define TFC_MPC_CMD_TBL_WR_TABLE_INDEX_EB 25 +#define TFC_MPC_CMD_TBL_WR_TABLE_INDEX_SB 0 +#define TFC_MPC_CMD_TBL_WR_TABLE_INDEX_OFFS 0x4 + +#define TFC_MPC_CMD_TBL_WR_SET_TABLE_INDEX(buf, val) \ + SET_BITFLD32(TO_P32((buf), TFC_MPC_CMD_TBL_WR_TABLE_INDEX_OFFS), \ + (u32)(val), \ + TFC_MPC_CMD_TBL_WR_TABLE_INDEX_EB, \ + TFC_MPC_CMD_TBL_WR_TABLE_INDEX_SB) +#define TFC_MPC_CMD_TBL_WR_GET_TABLE_INDEX(buf) \ + GET_BITFLD32(TO_P32((buf), TFC_MPC_CMD_TBL_WR_TABLE_INDEX_OFFS), \ + TFC_MPC_CMD_TBL_WR_TABLE_INDEX_EB, \ + TFC_MPC_CMD_TBL_WR_TABLE_INDEX_SB) + +#define TFC_MPC_CMD_TBL_WR_SIZE 16 + +/* + * CFA Table Read-Clear Command Record: + * + * This command performs a read-modify-write to the specified 32B address using + * a 16b mask that specifies up to 16 16b words to clear before writing the data + * back. It returns the 32B data word read from cache (not the value written + * after the clear operation). + * Offset 31 0 + * 0x0 cache_option unused(1) data_size unused(3) + * table_scope unused(4) table_type opcode + * 0x4 unused(6) table_index + * 0x8 + * - + * 0xf host_address + * 0x10 unused(16) clear_mask + * + * opcode (Offset:0x0[7:0], Size: 8) + * This value selects the format for the mid-path + * command for the CFA. + * Value Enum Enumeration Description + * 2 READ_CLR + * This command performs a read-modify-write to + * the specified 32B address using a 16b mask that + * specifies up to 16 16b words to clear. It + * returns the 32B data word prior to the clear + * operation. + * + * table_type (Offset:0x0[11:8], Size: 4) + * This value selects the table type to be acted + * upon. + * Value Enum Enumeration Description + * 0 ACTION + * This command acts on the action table of the + * specified scope. + * 1 EM + * This command acts on the exact match table of + * the specified scope. + * + * table_scope (Offset:0x0[20:16], Size: 5) + * Table scope to access. + * + * data_size (Offset:0x0[26:24], Size: 3) + * This field is no longer used. The READ_CLR command + * always reads (and does a mask-clear) on a single + * cache line. + * This field was added for SR2 A0 to avoid an + * ADDR_ERR when TABLE_INDEX=0 and TABLE_TYPE=EM (see + * CUMULUS-17872). That issue was fixed in SR2 B0. + * + * cache_option (Offset:0x0[31:28], Size: 4) + * Determines setting of OPTION field for all cache + * requests while processing any command other than + * EM_INSERT, EM_DELETE, or EM_CHAIN. For these latter + * commands, CACHE_OPTION sets the OPTION field for + * all read requests, and CACHE_OPTION2 sets it for + * all write requests. + * CFA does not support posted write requests. + * Therefore, for WRITE commands, CACHE_OPTION[1] must + * be set to 0. And for EM commands that send write + * requests (all but EM_SEARCH), CACHE_OPTION2[1] must + * be set to 0. + * + * table_index (Offset:0x4[25:0], Size: 26) + * A 32B index into the table identified by + * (TABLE_TYPE, TABLE_SCOPE): + * + * host_address (Offset:0x8[31:0], Size: 32, Words: 2) + * The 64-bit host address to which to write the DMA + * data returned in the completion. The data will be + * written to the same function as the one that owns + * the SQ this command is read from. DATA_SIZE + * determines the maximum size of the data written. If + * HOST_ADDRESS[1:0] is not 0, CFA aborts processing + * and reports FMT_ERR status. + * + * clear_mask (Offset:0x10[15:0], Size: 16) + * Specifies bits in 32B data word to clear. For + * x=0..15, when clear_mask[x]=1, data[x*16+15:x*16] + * is set to 0. + */ +#define TFC_MPC_CMD_OPCODE_READ_CLR 2 + +#define TFC_MPC_CMD_TBL_RDCLR_OPCODE_EB 7 +#define TFC_MPC_CMD_TBL_RDCLR_OPCODE_SB 0 +#define TFC_MPC_CMD_TBL_RDCLR_OPCODE_OFFS 0x0 + +#define TFC_MPC_CMD_TBL_RDCLR_SET_OPCODE(buf, val) \ + SET_BITFLD32(TO_P32((buf), TFC_MPC_CMD_TBL_RDCLR_OPCODE_OFFS), \ + (u32)(val), \ + TFC_MPC_CMD_TBL_RDCLR_OPCODE_EB, \ + TFC_MPC_CMD_TBL_RDCLR_OPCODE_SB) +#define TFC_MPC_CMD_TBL_RDCLR_GET_OPCODE(buf) \ + GET_BITFLD32(TO_P32((buf), TFC_MPC_CMD_TBL_RDCLR_OPCODE_OFFS), \ + TFC_MPC_CMD_TBL_RDCLR_OPCODE_EB, \ + TFC_MPC_CMD_TBL_RDCLR_OPCODE_SB) + +#define TFC_MPC_CMD_TBL_RDCLR_TABLE_TYPE_ACTION 0 +#define TFC_MPC_CMD_TBL_RDCLR_TABLE_TYPE_EM 1 + +#define TFC_MPC_CMD_TBL_RDCLR_TABLE_TYPE_EB 11 +#define TFC_MPC_CMD_TBL_RDCLR_TABLE_TYPE_SB 8 +#define TFC_MPC_CMD_TBL_RDCLR_TABLE_TYPE_OFFS 0x0 + +#define TFC_MPC_CMD_TBL_RDCLR_SET_TABLE_TYPE(buf, val) \ + SET_BITFLD32(TO_P32((buf), TFC_MPC_CMD_TBL_RDCLR_TABLE_TYPE_OFFS), \ + (u32)(val), \ + TFC_MPC_CMD_TBL_RDCLR_TABLE_TYPE_EB, \ + TFC_MPC_CMD_TBL_RDCLR_TABLE_TYPE_SB) +#define TFC_MPC_CMD_TBL_RDCLR_GET_TABLE_TYPE(buf) \ + GET_BITFLD32(TO_P32((buf), TFC_MPC_CMD_TBL_RDCLR_TABLE_TYPE_OFFS), \ + TFC_MPC_CMD_TBL_RDCLR_TABLE_TYPE_EB, \ + TFC_MPC_CMD_TBL_RDCLR_TABLE_TYPE_SB) + +#define TFC_MPC_CMD_TBL_RDCLR_TABLE_SCOPE_EB 20 +#define TFC_MPC_CMD_TBL_RDCLR_TABLE_SCOPE_SB 16 +#define TFC_MPC_CMD_TBL_RDCLR_TABLE_SCOPE_OFFS 0x0 + +#define TFC_MPC_CMD_TBL_RDCLR_SET_TABLE_SCOPE(buf, val) \ + SET_BITFLD32(TO_P32((buf), TFC_MPC_CMD_TBL_RDCLR_TABLE_SCOPE_OFFS), \ + (u32)(val), \ + TFC_MPC_CMD_TBL_RDCLR_TABLE_SCOPE_EB, \ + TFC_MPC_CMD_TBL_RDCLR_TABLE_SCOPE_SB) +#define TFC_MPC_CMD_TBL_RDCLR_GET_TABLE_SCOPE(buf) \ + GET_BITFLD32(TO_P32((buf), TFC_MPC_CMD_TBL_RDCLR_TABLE_SCOPE_OFFS), \ + TFC_MPC_CMD_TBL_RDCLR_TABLE_SCOPE_EB, \ + TFC_MPC_CMD_TBL_RDCLR_TABLE_SCOPE_SB) + +#define TFC_MPC_CMD_TBL_RDCLR_DATA_SIZE_EB 26 +#define TFC_MPC_CMD_TBL_RDCLR_DATA_SIZE_SB 24 +#define TFC_MPC_CMD_TBL_RDCLR_DATA_SIZE_OFFS 0x0 + +#define TFC_MPC_CMD_TBL_RDCLR_SET_DATA_SIZE(buf, val) \ + SET_BITFLD32(TO_P32((buf), TFC_MPC_CMD_TBL_RDCLR_DATA_SIZE_OFFS), \ + (u32)(val), \ + TFC_MPC_CMD_TBL_RDCLR_DATA_SIZE_EB, \ + TFC_MPC_CMD_TBL_RDCLR_DATA_SIZE_SB) +#define TFC_MPC_CMD_TBL_RDCLR_GET_DATA_SIZE(buf) \ + GET_BITFLD32(TO_P32((buf), TFC_MPC_CMD_TBL_RDCLR_DATA_SIZE_OFFS), \ + TFC_MPC_CMD_TBL_RDCLR_DATA_SIZE_EB, \ + TFC_MPC_CMD_TBL_RDCLR_DATA_SIZE_SB) + +#define TFC_MPC_CMD_TBL_RDCLR_CACHE_OPTION_EB 31 +#define TFC_MPC_CMD_TBL_RDCLR_CACHE_OPTION_SB 28 +#define TFC_MPC_CMD_TBL_RDCLR_CACHE_OPTION_OFFS 0x0 + +#define TFC_MPC_CMD_TBL_RDCLR_SET_CACHE_OPTION(buf, val) \ + SET_BITFLD32(TO_P32((buf), TFC_MPC_CMD_TBL_RDCLR_CACHE_OPTION_OFFS), \ + (u32)(val), \ + TFC_MPC_CMD_TBL_RDCLR_CACHE_OPTION_EB, \ + TFC_MPC_CMD_TBL_RDCLR_CACHE_OPTION_SB) +#define TFC_MPC_CMD_TBL_RDCLR_GET_CACHE_OPTION(buf) \ + GET_BITFLD32(TO_P32((buf), TFC_MPC_CMD_TBL_RDCLR_CACHE_OPTION_OFFS), \ + TFC_MPC_CMD_TBL_RDCLR_CACHE_OPTION_EB, \ + TFC_MPC_CMD_TBL_RDCLR_CACHE_OPTION_SB) + +#define TFC_MPC_CMD_TBL_RDCLR_TABLE_INDEX_EB 25 +#define TFC_MPC_CMD_TBL_RDCLR_TABLE_INDEX_SB 0 +#define TFC_MPC_CMD_TBL_RDCLR_TABLE_INDEX_OFFS 0x4 + +#define TFC_MPC_CMD_TBL_RDCLR_SET_TABLE_INDEX(buf, val) \ + SET_BITFLD32(TO_P32((buf), TFC_MPC_CMD_TBL_RDCLR_TABLE_INDEX_OFFS), \ + (u32)(val), \ + TFC_MPC_CMD_TBL_RDCLR_TABLE_INDEX_EB, \ + TFC_MPC_CMD_TBL_RDCLR_TABLE_INDEX_SB) +#define TFC_MPC_CMD_TBL_RDCLR_GET_TABLE_INDEX(buf) \ + GET_BITFLD32(TO_P32((buf), TFC_MPC_CMD_TBL_RDCLR_TABLE_INDEX_OFFS), \ + TFC_MPC_CMD_TBL_RDCLR_TABLE_INDEX_EB, \ + TFC_MPC_CMD_TBL_RDCLR_TABLE_INDEX_SB) + +#define TFC_MPC_CMD_TBL_RDCLR_HOST_ADDRESS_0_OFFS 0x8 + +#define TFC_MPC_CMD_TBL_RDCLR_SET_HOST_ADDRESS_0(buf, val) \ + SET_FLD32(TO_P32((buf), TFC_MPC_CMD_TBL_RDCLR_HOST_ADDRESS_0_OFFS), (u32)(val)) +#define TFC_MPC_CMD_TBL_RDCLR_GET_HOST_ADDRESS_0(buf) \ + GET_FLD32(TO_P32((buf), TFC_MPC_CMD_TBL_RDCLR_HOST_ADDRESS_0_OFFS)) + +#define TFC_MPC_CMD_TBL_RDCLR_HOST_ADDRESS_1_OFFS 0xc + +#define TFC_MPC_CMD_TBL_RDCLR_SET_HOST_ADDRESS_1(buf, val) \ + SET_FLD32(TO_P32((buf), TFC_MPC_CMD_TBL_RDCLR_HOST_ADDRESS_1_OFFS), (u32)(val)) +#define TFC_MPC_CMD_TBL_RDCLR_GET_HOST_ADDRESS_1(buf) \ + GET_FLD32(TO_P32((buf), TFC_MPC_CMD_TBL_RDCLR_HOST_ADDRESS_1_OFFS)) + +#define TFC_MPC_CMD_TBL_RDCLR_CLEAR_MASK_EB 15 +#define TFC_MPC_CMD_TBL_RDCLR_CLEAR_MASK_SB 0 +#define TFC_MPC_CMD_TBL_RDCLR_CLEAR_MASK_OFFS 0x10 + +#define TFC_MPC_CMD_TBL_RDCLR_SET_CLEAR_MASK(buf, val) \ + SET_BITFLD32(TO_P32((buf), TFC_MPC_CMD_TBL_RDCLR_CLEAR_MASK_OFFS), \ + (u32)(val), \ + TFC_MPC_CMD_TBL_RDCLR_CLEAR_MASK_EB, \ + TFC_MPC_CMD_TBL_RDCLR_CLEAR_MASK_SB) +#define TFC_MPC_CMD_TBL_RDCLR_GET_CLEAR_MASK(buf) \ + GET_BITFLD32(TO_P32((buf), TFC_MPC_CMD_TBL_RDCLR_CLEAR_MASK_OFFS), \ + TFC_MPC_CMD_TBL_RDCLR_CLEAR_MASK_EB, \ + TFC_MPC_CMD_TBL_RDCLR_CLEAR_MASK_SB) + +#define TFC_MPC_CMD_TBL_RDCLR_SIZE 20 + +/* + * CFA Invalidate Command Record: + * + * This command forces an explicit evict of 1-4 consecutive cache lines such + * that the next time the structure is used it will be re-read from its backing + * store location. + * Offset 31 0 + * 0x0 cache_option unused(1) data_size unused(3) + * table_scope unused(4) table_type opcode + * 0x4 unused(6) table_index + * + * opcode (Offset:0x0[7:0], Size: 8) + * This value selects the format for the mid-path + * command for the CFA. + * Value Enum Enumeration Description + * 5 INVALIDATE + * This command invalidates 1-4 consecutively- + * addressed 32B words in the cache. + * + * table_type (Offset:0x0[11:8], Size: 4) + * This value selects the table type to be acted + * upon. + * Value Enum Enumeration Description + * 0 ACTION + * This command acts on the action table of the + * specified scope. + * 1 EM + * This command acts on the exact match table of + * the specified scope. + * + * table_scope (Offset:0x0[20:16], Size: 5) + * Table scope to access. + * + * data_size (Offset:0x0[26:24], Size: 3) + * This value identifies the number of cache lines to + * invalidate. A FMT_ERR is reported if the value is + * not in the range of [1, 4]. + * + * cache_option (Offset:0x0[31:28], Size: 4) + * Determines setting of OPTION field for all cache + * requests while processing any command other than + * EM_INSERT, EM_DELETE, or EM_CHAIN. For these latter + * commands, CACHE_OPTION sets the OPTION field for + * all read requests, and CACHE_OPTION2 sets it for + * all write requests. + * CFA does not support posted write requests. + * Therefore, for WRITE commands, CACHE_OPTION[1] must + * be set to 0. And for EM commands that send write + * requests (all but EM_SEARCH), CACHE_OPTION2[1] must + * be set to 0. + * + * table_index (Offset:0x4[25:0], Size: 26) + * A 32B index into the table identified by + * (TABLE_TYPE, TABLE_SCOPE): + */ +#define TFC_MPC_CMD_OPCODE_INVALIDATE 5 + +#define TFC_MPC_CMD_TBL_INV_OPCODE_EB 7 +#define TFC_MPC_CMD_TBL_INV_OPCODE_SB 0 +#define TFC_MPC_CMD_TBL_INV_OPCODE_OFFS 0x0 + +#define TFC_MPC_CMD_TBL_INV_SET_OPCODE(buf, val) \ + SET_BITFLD32(TO_P32((buf), TFC_MPC_CMD_TBL_INV_OPCODE_OFFS), \ + (u32)(val), \ + TFC_MPC_CMD_TBL_INV_OPCODE_EB, \ + TFC_MPC_CMD_TBL_INV_OPCODE_SB) +#define TFC_MPC_CMD_TBL_INV_GET_OPCODE(buf) \ + GET_BITFLD32(TO_P32((buf), TFC_MPC_CMD_TBL_INV_OPCODE_OFFS), \ + TFC_MPC_CMD_TBL_INV_OPCODE_EB, \ + TFC_MPC_CMD_TBL_INV_OPCODE_SB) + +#define TFC_MPC_CMD_TBL_INV_TABLE_TYPE_ACTION 0 +#define TFC_MPC_CMD_TBL_INV_TABLE_TYPE_EM 1 + +#define TFC_MPC_CMD_TBL_INV_TABLE_TYPE_EB 11 +#define TFC_MPC_CMD_TBL_INV_TABLE_TYPE_SB 8 +#define TFC_MPC_CMD_TBL_INV_TABLE_TYPE_OFFS 0x0 + +#define TFC_MPC_CMD_TBL_INV_SET_TABLE_TYPE(buf, val) \ + SET_BITFLD32(TO_P32((buf), TFC_MPC_CMD_TBL_INV_TABLE_TYPE_OFFS), \ + (u32)(val), \ + TFC_MPC_CMD_TBL_INV_TABLE_TYPE_EB, \ + TFC_MPC_CMD_TBL_INV_TABLE_TYPE_SB) +#define TFC_MPC_CMD_TBL_INV_GET_TABLE_TYPE(buf) \ + GET_BITFLD32(TO_P32((buf), TFC_MPC_CMD_TBL_INV_TABLE_TYPE_OFFS), \ + TFC_MPC_CMD_TBL_INV_TABLE_TYPE_EB, \ + TFC_MPC_CMD_TBL_INV_TABLE_TYPE_SB) + +#define TFC_MPC_CMD_TBL_INV_TABLE_SCOPE_EB 20 +#define TFC_MPC_CMD_TBL_INV_TABLE_SCOPE_SB 16 +#define TFC_MPC_CMD_TBL_INV_TABLE_SCOPE_OFFS 0x0 + +#define TFC_MPC_CMD_TBL_INV_SET_TABLE_SCOPE(buf, val) \ + SET_BITFLD32(TO_P32((buf), TFC_MPC_CMD_TBL_INV_TABLE_SCOPE_OFFS), \ + (u32)(val), \ + TFC_MPC_CMD_TBL_INV_TABLE_SCOPE_EB, \ + TFC_MPC_CMD_TBL_INV_TABLE_SCOPE_SB) +#define TFC_MPC_CMD_TBL_INV_GET_TABLE_SCOPE(buf) \ + GET_BITFLD32(TO_P32((buf), TFC_MPC_CMD_TBL_INV_TABLE_SCOPE_OFFS), \ + TFC_MPC_CMD_TBL_INV_TABLE_SCOPE_EB, \ + TFC_MPC_CMD_TBL_INV_TABLE_SCOPE_SB) + +#define TFC_MPC_CMD_TBL_INV_DATA_SIZE_EB 26 +#define TFC_MPC_CMD_TBL_INV_DATA_SIZE_SB 24 +#define TFC_MPC_CMD_TBL_INV_DATA_SIZE_OFFS 0x0 + +#define TFC_MPC_CMD_TBL_INV_SET_DATA_SIZE(buf, val) \ + SET_BITFLD32(TO_P32((buf), TFC_MPC_CMD_TBL_INV_DATA_SIZE_OFFS), \ + (u32)(val), \ + TFC_MPC_CMD_TBL_INV_DATA_SIZE_EB, \ + TFC_MPC_CMD_TBL_INV_DATA_SIZE_SB) +#define TFC_MPC_CMD_TBL_INV_GET_DATA_SIZE(buf) \ + GET_BITFLD32(TO_P32((buf), TFC_MPC_CMD_TBL_INV_DATA_SIZE_OFFS), \ + TFC_MPC_CMD_TBL_INV_DATA_SIZE_EB, \ + TFC_MPC_CMD_TBL_INV_DATA_SIZE_SB) + +#define TFC_MPC_CMD_TBL_INV_CACHE_OPTION_EB 31 +#define TFC_MPC_CMD_TBL_INV_CACHE_OPTION_SB 28 +#define TFC_MPC_CMD_TBL_INV_CACHE_OPTION_OFFS 0x0 + +#define TFC_MPC_CMD_TBL_INV_SET_CACHE_OPTION(buf, val) \ + SET_BITFLD32(TO_P32((buf), TFC_MPC_CMD_TBL_INV_CACHE_OPTION_OFFS), \ + (u32)(val), \ + TFC_MPC_CMD_TBL_INV_CACHE_OPTION_EB, \ + TFC_MPC_CMD_TBL_INV_CACHE_OPTION_SB) +#define TFC_MPC_CMD_TBL_INV_GET_CACHE_OPTION(buf) \ + GET_BITFLD32(TO_P32((buf), TFC_MPC_CMD_TBL_INV_CACHE_OPTION_OFFS), \ + TFC_MPC_CMD_TBL_INV_CACHE_OPTION_EB, \ + TFC_MPC_CMD_TBL_INV_CACHE_OPTION_SB) + +#define TFC_MPC_CMD_TBL_INV_TABLE_INDEX_EB 25 +#define TFC_MPC_CMD_TBL_INV_TABLE_INDEX_SB 0 +#define TFC_MPC_CMD_TBL_INV_TABLE_INDEX_OFFS 0x4 + +#define TFC_MPC_CMD_TBL_INV_SET_TABLE_INDEX(buf, val) \ + SET_BITFLD32(TO_P32((buf), TFC_MPC_CMD_TBL_INV_TABLE_INDEX_OFFS), \ + (u32)(val), \ + TFC_MPC_CMD_TBL_INV_TABLE_INDEX_EB, \ + TFC_MPC_CMD_TBL_INV_TABLE_INDEX_SB) +#define TFC_MPC_CMD_TBL_INV_GET_TABLE_INDEX(buf) \ + GET_BITFLD32(TO_P32((buf), TFC_MPC_CMD_TBL_INV_TABLE_INDEX_OFFS), \ + TFC_MPC_CMD_TBL_INV_TABLE_INDEX_EB, \ + TFC_MPC_CMD_TBL_INV_TABLE_INDEX_SB) + +#define TFC_MPC_CMD_TBL_INV_SIZE 8 + +/* + * CFA Event Collection Command Record: + * + * This command is used to read notification messages from the Host + * Notification Queue for TABLE_SCOPE in the command. + * Offset 31 0 + * 0x0 unused(5) data_size unused(3) table_scope + * unused(8) opcode + * 0x4 unused(32) + * 0x8 + * - + * 0xf host_address + * + * opcode (Offset:0x0[7:0], Size: 8) + * This value selects the format for the mid-path + * command for the CFA. + * Value Enum Enumeration Description + * 6 EVENT_COLLECTION + * This command reads host notification messages + * from the lookup block connection tracking for a + * specified table scope. The command can specify + * the maximum number of messages returned: 4, 8, + * 12, or 16. The actual number returned may be + * fewer than the maximum depending on the number + * queued. + * + * table_scope (Offset:0x0[20:16], Size: 5) + * Table scope to access. + * + * data_size (Offset:0x0[26:24], Size: 3) + * This value identifies the maximum number of host + * notification messages that will be returned: + * 1 = 32B = up to 4 messages + * 2 = 64B = up to 8 messages + * 3 = 96B = up to 12 messages + * 4 = 128B = up to 16 messages + * + * host_address (Offset:0x8[31:0], Size: 32, Words: 2) + * The 64-bit host address to which to write the DMA + * data returned in the completion. The data will be + * written to the same function as the one that owns + * the SQ this command is read from. DATA_SIZE + * determines the maximum size of the data written. If + * HOST_ADDRESS[1:0] is not 0, CFA aborts processing + * and reports FMT_ERR status. + */ +#define TFC_MPC_CMD_OPCODE_EVENT_COLLECTION 6 + +#define TFC_MPC_CMD_EVT_COLL_OPCODE_EB 7 +#define TFC_MPC_CMD_EVT_COLL_OPCODE_SB 0 +#define TFC_MPC_CMD_EVT_COLL_OPCODE_OFFS 0x0 + +#define TFC_MPC_CMD_EVT_COLL_SET_OPCODE(buf, val) \ + SET_BITFLD32(TO_P32((buf), TFC_MPC_CMD_EVT_COLL_OPCODE_OFFS), \ + (u32)(val), \ + TFC_MPC_CMD_EVT_COLL_OPCODE_EB, \ + TFC_MPC_CMD_EVT_COLL_OPCODE_SB) +#define TFC_MPC_CMD_EVT_COLL_GET_OPCODE(buf) \ + GET_BITFLD32(TO_P32((buf), TFC_MPC_CMD_EVT_COLL_OPCODE_OFFS), \ + TFC_MPC_CMD_EVT_COLL_OPCODE_EB, \ + TFC_MPC_CMD_EVT_COLL_OPCODE_SB) + +#define TFC_MPC_CMD_EVT_COLL_TABLE_SCOPE_EB 20 +#define TFC_MPC_CMD_EVT_COLL_TABLE_SCOPE_SB 16 +#define TFC_MPC_CMD_EVT_COLL_TABLE_SCOPE_OFFS 0x0 + +#define TFC_MPC_CMD_EVT_COLL_SET_TABLE_SCOPE(buf, val) \ + SET_BITFLD32(TO_P32((buf), TFC_MPC_CMD_EVT_COLL_TABLE_SCOPE_OFFS), \ + (u32)(val), \ + TFC_MPC_CMD_EVT_COLL_TABLE_SCOPE_EB, \ + TFC_MPC_CMD_EVT_COLL_TABLE_SCOPE_SB) +#define TFC_MPC_CMD_EVT_COLL_GET_TABLE_SCOPE(buf) \ + GET_BITFLD32(TO_P32((buf), TFC_MPC_CMD_EVT_COLL_TABLE_SCOPE_OFFS), \ + TFC_MPC_CMD_EVT_COLL_TABLE_SCOPE_EB, \ + TFC_MPC_CMD_EVT_COLL_TABLE_SCOPE_SB) + +#define TFC_MPC_CMD_EVT_COLL_DATA_SIZE_EB 26 +#define TFC_MPC_CMD_EVT_COLL_DATA_SIZE_SB 24 +#define TFC_MPC_CMD_EVT_COLL_DATA_SIZE_OFFS 0x0 + +#define TFC_MPC_CMD_EVT_COLL_SET_DATA_SIZE(buf, val) \ + SET_BITFLD32(TO_P32((buf), TFC_MPC_CMD_EVT_COLL_DATA_SIZE_OFFS), \ + (u32)(val), \ + TFC_MPC_CMD_EVT_COLL_DATA_SIZE_EB, \ + TFC_MPC_CMD_EVT_COLL_DATA_SIZE_SB) +#define TFC_MPC_CMD_EVT_COLL_GET_DATA_SIZE(buf) \ + GET_BITFLD32(TO_P32((buf), TFC_MPC_CMD_EVT_COLL_DATA_SIZE_OFFS), \ + TFC_MPC_CMD_EVT_COLL_DATA_SIZE_EB, \ + TFC_MPC_CMD_EVT_COLL_DATA_SIZE_SB) + +#define TFC_MPC_CMD_EVT_COLL_HOST_ADDRESS_0_OFFS 0x8 + +#define TFC_MPC_CMD_EVT_COLL_SET_HOST_ADDRESS_0(buf, val) \ + SET_FLD32(TO_P32((buf), TFC_MPC_CMD_EVT_COLL_HOST_ADDRESS_0_OFFS), (u32)(val)) +#define TFC_MPC_CMD_EVT_COLL_GET_HOST_ADDRESS_0(buf) \ + GET_FLD32(TO_P32((buf), TFC_MPC_CMD_EVT_COLL_HOST_ADDRESS_0_OFFS)) + +#define TFC_MPC_CMD_EVT_COLL_HOST_ADDRESS_1_OFFS 0xc + +#define TFC_MPC_CMD_EVT_COLL_SET_HOST_ADDRESS_1(buf, val) \ + SET_FLD32(TO_P32((buf), TFC_MPC_CMD_EVT_COLL_HOST_ADDRESS_1_OFFS), (u32)(val)) +#define TFC_MPC_CMD_EVT_COLL_GET_HOST_ADDRESS_1(buf) \ + GET_FLD32(TO_P32((buf), TFC_MPC_CMD_EVT_COLL_HOST_ADDRESS_1_OFFS)) + +#define TFC_MPC_CMD_EVT_COLL_SIZE 16 + +/* + * CFA Exact Match Search Command Record: + * + * This command supplies an exact match entry of 1-4 32B words to search for in + * the exact match table. CFA first computes the hash value of the key in the + * entry, and determines the static bucket address to search from the hash and + * the (EM_BUCKETS, EM_SIZE) for TABLE_SCOPE. + * It then searches that static bucket chain for an entry with a matching key + * (the LREC in the command entry is ignored). + * If a matching entry is found, CFA reports OK status in the completion. + * Otherwise, assuming no errors abort the search before it completes, it + * reports EM_MISS status. + * Offset 31 0 + * 0x0 cache_option unused(1) data_size unused(3) + * table_scope unused(8) opcode + * 0x4 + * - + * 0xf unused(96) + * + * opcode (Offset:0x0[7:0], Size: 8) + * This value selects the format for the mid-path + * command for the CFA. + * Value Enum Enumeration Description + * 8 EM_SEARCH + * This command supplies an exact match entry of + * 1-4 32B words to search for in the exact match + * table. + * + * table_scope (Offset:0x0[20:16], Size: 5) + * Table scope to access. + * + * data_size (Offset:0x0[26:24], Size: 3) + * Number of 32B units in access. If value is outside + * the range [1, 4], CFA aborts processing and reports + * FMT_ERR status. + * + * cache_option (Offset:0x0[31:28], Size: 4) + * Determines setting of OPTION field for all cache + * requests while processing any command other than + * EM_INSERT, EM_DELETE, or EM_CHAIN. For these latter + * commands, CACHE_OPTION sets the OPTION field for + * all read requests, and CACHE_OPTION2 sets it for + * all write requests. + * CFA does not support posted write requests. + * Therefore, for WRITE commands, CACHE_OPTION[1] must + * be set to 0. And for EM commands that send write + * requests (all but EM_SEARCH), CACHE_OPTION2[1] must + * be set to 0. + */ +#define TFC_MPC_CMD_OPCODE_EM_SEARCH 8 + +#define TFC_MPC_CMD_EM_SEARCH_OPCODE_EB 7 +#define TFC_MPC_CMD_EM_SEARCH_OPCODE_SB 0 +#define TFC_MPC_CMD_EM_SEARCH_OPCODE_OFFS 0x0 + +#define TFC_MPC_CMD_EM_SEARCH_SET_OPCODE(buf, val) \ + SET_BITFLD32(TO_P32((buf), TFC_MPC_CMD_EM_SEARCH_OPCODE_OFFS), \ + (u32)(val), \ + TFC_MPC_CMD_EM_SEARCH_OPCODE_EB, \ + TFC_MPC_CMD_EM_SEARCH_OPCODE_SB) +#define TFC_MPC_CMD_EM_SEARCH_GET_OPCODE(buf) \ + GET_BITFLD32(TO_P32((buf), TFC_MPC_CMD_EM_SEARCH_OPCODE_OFFS), \ + TFC_MPC_CMD_EM_SEARCH_OPCODE_EB, \ + TFC_MPC_CMD_EM_SEARCH_OPCODE_SB) + +#define TFC_MPC_CMD_EM_SEARCH_TABLE_SCOPE_EB 20 +#define TFC_MPC_CMD_EM_SEARCH_TABLE_SCOPE_SB 16 +#define TFC_MPC_CMD_EM_SEARCH_TABLE_SCOPE_OFFS 0x0 + +#define TFC_MPC_CMD_EM_SEARCH_SET_TABLE_SCOPE(buf, val) \ + SET_BITFLD32(TO_P32((buf), TFC_MPC_CMD_EM_SEARCH_TABLE_SCOPE_OFFS), \ + (u32)(val), \ + TFC_MPC_CMD_EM_SEARCH_TABLE_SCOPE_EB, \ + TFC_MPC_CMD_EM_SEARCH_TABLE_SCOPE_SB) +#define TFC_MPC_CMD_EM_SEARCH_GET_TABLE_SCOPE(buf) \ + GET_BITFLD32(TO_P32((buf), TFC_MPC_CMD_EM_SEARCH_TABLE_SCOPE_OFFS), \ + TFC_MPC_CMD_EM_SEARCH_TABLE_SCOPE_EB, \ + TFC_MPC_CMD_EM_SEARCH_TABLE_SCOPE_SB) + +#define TFC_MPC_CMD_EM_SEARCH_DATA_SIZE_EB 26 +#define TFC_MPC_CMD_EM_SEARCH_DATA_SIZE_SB 24 +#define TFC_MPC_CMD_EM_SEARCH_DATA_SIZE_OFFS 0x0 + +#define TFC_MPC_CMD_EM_SEARCH_SET_DATA_SIZE(buf, val) \ + SET_BITFLD32(TO_P32((buf), TFC_MPC_CMD_EM_SEARCH_DATA_SIZE_OFFS), \ + (u32)(val), \ + TFC_MPC_CMD_EM_SEARCH_DATA_SIZE_EB, \ + TFC_MPC_CMD_EM_SEARCH_DATA_SIZE_SB) +#define TFC_MPC_CMD_EM_SEARCH_GET_DATA_SIZE(buf) \ + GET_BITFLD32(TO_P32((buf), TFC_MPC_CMD_EM_SEARCH_DATA_SIZE_OFFS), \ + TFC_MPC_CMD_EM_SEARCH_DATA_SIZE_EB, \ + TFC_MPC_CMD_EM_SEARCH_DATA_SIZE_SB) + +#define TFC_MPC_CMD_EM_SEARCH_CACHE_OPTION_EB 31 +#define TFC_MPC_CMD_EM_SEARCH_CACHE_OPTION_SB 28 +#define TFC_MPC_CMD_EM_SEARCH_CACHE_OPTION_OFFS 0x0 + +#define TFC_MPC_CMD_EM_SEARCH_SET_CACHE_OPTION(buf, val) \ + SET_BITFLD32(TO_P32((buf), TFC_MPC_CMD_EM_SEARCH_CACHE_OPTION_OFFS), \ + (u32)(val), \ + TFC_MPC_CMD_EM_SEARCH_CACHE_OPTION_EB, \ + TFC_MPC_CMD_EM_SEARCH_CACHE_OPTION_SB) +#define TFC_MPC_CMD_EM_SEARCH_GET_CACHE_OPTION(buf) \ + GET_BITFLD32(TO_P32((buf), TFC_MPC_CMD_EM_SEARCH_CACHE_OPTION_OFFS), \ + TFC_MPC_CMD_EM_SEARCH_CACHE_OPTION_EB, \ + TFC_MPC_CMD_EM_SEARCH_CACHE_OPTION_SB) + +#define TFC_MPC_CMD_EM_SEARCH_SIZE 16 + +/* + * CFA Exact Match Insert Command Record: + * + * This command supplies an exact match entry of 1-4 32B words to insert in the + * exact match table. CFA first computes the hash value of the key in the entry, + * and determines the static bucket address to search from the hash and the + * (EM_BUCKETS, EM_SIZE) for TABLE_SCOPE. + * It then writes the 1-4 32B words of the exact match entry starting at the + * TABLE_INDEX location in the command. + * When the entry write completes, it searches the static bucket chain for an + * existing entry with a key matching the key in the insert entry (the LREC does + * not need to match). + * If a matching entry is found: + * If REPLACE=0, the CFA aborts the insert and returns EM_DUPLICATE status. + * If REPLACE=1, the CFA overwrites the matching entry with the new entry. + * REPLACED_ENTRY=1 in the completion in this case to signal that an entry was + * replaced. The location of the entry is provided in the completion. + * If no match is found, CFA adds the new entry to the lowest unused entry in + * the tail bucket. If the current tail bucket is full, this requires adding a + * new bucket to the tail. Then entry is then inserted at entry number 0. + * TABLE_INDEX2 provides the address of the new tail bucket, if needed. If set + * to 0, the insert is aborted and returns EM_ABORT status instead of adding a + * new bucket to the tail. + * CHAIN_UPD in the completion indicates whether a new bucket was added (1) or + * not (0). + * For locked scopes, if the read of the static bucket gives a locked scope + * miss error, indicating that the address is not in the cache, the static + * bucket is assumed empty. In this case, TAI creates a new bucket, setting + * entry 0 to the new entry fields and initializing all other fields to 0. It + * writes this new bucket to the static bucket address, which installs it in the + * cache. + * Offset 31 0 + * 0x0 cache_option unused(1) data_size unused(3) + * table_scope unused(3) write_through unused(4) opcode + * 0x4 cache_option2 unused(2) table_index + * 0x8 replace unused(5) table_index2 + * 0xc unused(32) + * + * opcode (Offset:0x0[7:0], Size: 8) + * This value selects the format for the mid-path + * command for the CFA. + * Value Enum Enumeration Description + * 9 EM_INSERT + * This command supplies an exact match entry of + * 1-4 32B words to be inserted into the exact + * match table. + * + * write_through (Offset:0x0[12], Size: 1) + * Sets the OPTION field on the cache interface to + * use write-through for EM entry writes while + * processing EM_INSERT commands. For all other cases + * (inluding EM_INSERT bucket writes), the OPTION + * field is set by the CACHE_OPTION and CACHE_OPTION2 + * fields. + * + * table_scope (Offset:0x0[20:16], Size: 5) + * Table scope to access. + * + * data_size (Offset:0x0[26:24], Size: 3) + * Number of 32B units in access. If value is outside + * the range [1, 4], CFA aborts processing and reports + * FMT_ERR status. + * + * cache_option (Offset:0x0[31:28], Size: 4) + * Determines setting of OPTION field for all cache + * requests while processing any command other than + * EM_INSERT, EM_DELETE, or EM_CHAIN. For these latter + * commands, CACHE_OPTION sets the OPTION field for + * all read requests, and CACHE_OPTION2 sets it for + * all write requests. + * CFA does not support posted write requests. + * Therefore, for WRITE commands, CACHE_OPTION[1] must + * be set to 0. And for EM commands that send write + * requests (all but EM_SEARCH), CACHE_OPTION2[1] must + * be set to 0. + * + * table_index (Offset:0x4[25:0], Size: 26) + * A 32B index into the EM table identified by + * TABLE_SCOPE. + * Starting address to write exact match entry being + * inserted. + * + * cache_option2 (Offset:0x4[31:28], Size: 4) + * Determines setting of OPTION field for all cache + * write requests for EM_INSERT, EM_DELETE, and + * EM_CHAIN commands. + * CFA does not support posted write requests. + * Therefore, CACHE_OPTION2[1] must be set to 0. + * + * table_index2 (Offset:0x8[25:0], Size: 26) + * A 32B index into the EM table identified by + * TABLE_SCOPE. + * Only used when no duplicate entry is found and the + * tail bucket in the chain searched has no unused + * entries. In this case, TABLE_INDEX2 provides the + * index to the 32B dynamic bucket to add to the tail + * of the chain (it is the new tail bucket). + * In this case, the CFA first writes TABLE_INDEX2 + * with a new bucket: + * Entry 0 of the bucket sets the HASH_MSBS computed + * from the hash and ENTRY_PTR to TABLE_INDEX. + * Entries 1-5 of the bucket set HASH_MSBS and + * ENTRY_PTR to 0. + * CHAIN=0 and CHAIN_PTR is set to CHAIN_PTR from to + * original tail bucket to maintain the background + * chaining. + * CFA then sets CHAIN=1 and CHAIN_PTR=TABLE_INDEX2 + * in the original tail bucket to link the new bucket + * to the chain. + * CHAIN_UPD=1 in the completion to signal that the + * new bucket at TABLE_INDEX2 was added to the tail of + * the chain. + * + * replace (Offset:0x8[31], Size: 1) + * Only used if an entry is found whose key matches + * the exact match entry key in the command: + * REPLACE=0: The insert is aborted and EM_DUPLICATE + * status is returned, signaling that the insert + * failed. The index of the matching entry that + * blocked the insertion is returned in the + * completion. + * REPLACE=1: The matching entry is replaced with + * that from the command (ENTRY_PTR in the bucket is + * overwritten with TABLE_INDEX from the command). + * HASH_MSBS for the entry number never changes in + * this case since it had to match the new entry key + * HASH_MSBS to match. + * When an entry is replaced, REPLACED_ENTRY=1 in the + * completion and the index of the matching entry is + * returned in the completion so that software can de- + * allocate the entry. + */ +#define TFC_MPC_CMD_OPCODE_EM_INSERT 9 + +#define TFC_MPC_CMD_EM_INSERT_OPCODE_EB 7 +#define TFC_MPC_CMD_EM_INSERT_OPCODE_SB 0 +#define TFC_MPC_CMD_EM_INSERT_OPCODE_OFFS 0x0 + +#define TFC_MPC_CMD_EM_INSERT_SET_OPCODE(buf, val) \ + SET_BITFLD32(TO_P32((buf), TFC_MPC_CMD_EM_INSERT_OPCODE_OFFS), \ + (u32)(val), \ + TFC_MPC_CMD_EM_INSERT_OPCODE_EB, \ + TFC_MPC_CMD_EM_INSERT_OPCODE_SB) +#define TFC_MPC_CMD_EM_INSERT_GET_OPCODE(buf) \ + GET_BITFLD32(TO_P32((buf), TFC_MPC_CMD_EM_INSERT_OPCODE_OFFS), \ + TFC_MPC_CMD_EM_INSERT_OPCODE_EB, \ + TFC_MPC_CMD_EM_INSERT_OPCODE_SB) + +#define TFC_MPC_CMD_EM_INSERT_WRITE_THROUGH_EB 12 +#define TFC_MPC_CMD_EM_INSERT_WRITE_THROUGH_SB 12 +#define TFC_MPC_CMD_EM_INSERT_WRITE_THROUGH_OFFS 0x0 + +#define TFC_MPC_CMD_EM_INSERT_SET_WRITE_THROUGH(buf, val) \ + SET_BITFLD32(TO_P32((buf), TFC_MPC_CMD_EM_INSERT_WRITE_THROUGH_OFFS), \ + (u32)(val), \ + TFC_MPC_CMD_EM_INSERT_WRITE_THROUGH_EB, \ + TFC_MPC_CMD_EM_INSERT_WRITE_THROUGH_SB) +#define TFC_MPC_CMD_EM_INSERT_GET_WRITE_THROUGH(buf) \ + GET_BITFLD32(TO_P32((buf), TFC_MPC_CMD_EM_INSERT_WRITE_THROUGH_OFFS), \ + TFC_MPC_CMD_EM_INSERT_WRITE_THROUGH_EB, \ + TFC_MPC_CMD_EM_INSERT_WRITE_THROUGH_SB) + +#define TFC_MPC_CMD_EM_INSERT_TABLE_SCOPE_EB 20 +#define TFC_MPC_CMD_EM_INSERT_TABLE_SCOPE_SB 16 +#define TFC_MPC_CMD_EM_INSERT_TABLE_SCOPE_OFFS 0x0 + +#define TFC_MPC_CMD_EM_INSERT_SET_TABLE_SCOPE(buf, val) \ + SET_BITFLD32(TO_P32((buf), TFC_MPC_CMD_EM_INSERT_TABLE_SCOPE_OFFS), \ + (u32)(val), \ + TFC_MPC_CMD_EM_INSERT_TABLE_SCOPE_EB, \ + TFC_MPC_CMD_EM_INSERT_TABLE_SCOPE_SB) +#define TFC_MPC_CMD_EM_INSERT_GET_TABLE_SCOPE(buf) \ + GET_BITFLD32(TO_P32((buf), TFC_MPC_CMD_EM_INSERT_TABLE_SCOPE_OFFS), \ + TFC_MPC_CMD_EM_INSERT_TABLE_SCOPE_EB, \ + TFC_MPC_CMD_EM_INSERT_TABLE_SCOPE_SB) + +#define TFC_MPC_CMD_EM_INSERT_DATA_SIZE_EB 26 +#define TFC_MPC_CMD_EM_INSERT_DATA_SIZE_SB 24 +#define TFC_MPC_CMD_EM_INSERT_DATA_SIZE_OFFS 0x0 + +#define TFC_MPC_CMD_EM_INSERT_SET_DATA_SIZE(buf, val) \ + SET_BITFLD32(TO_P32((buf), TFC_MPC_CMD_EM_INSERT_DATA_SIZE_OFFS), \ + (u32)(val), \ + TFC_MPC_CMD_EM_INSERT_DATA_SIZE_EB, \ + TFC_MPC_CMD_EM_INSERT_DATA_SIZE_SB) +#define TFC_MPC_CMD_EM_INSERT_GET_DATA_SIZE(buf) \ + GET_BITFLD32(TO_P32((buf), TFC_MPC_CMD_EM_INSERT_DATA_SIZE_OFFS), \ + TFC_MPC_CMD_EM_INSERT_DATA_SIZE_EB, \ + TFC_MPC_CMD_EM_INSERT_DATA_SIZE_SB) + +#define TFC_MPC_CMD_EM_INSERT_CACHE_OPTION_EB 31 +#define TFC_MPC_CMD_EM_INSERT_CACHE_OPTION_SB 28 +#define TFC_MPC_CMD_EM_INSERT_CACHE_OPTION_OFFS 0x0 + +#define TFC_MPC_CMD_EM_INSERT_SET_CACHE_OPTION(buf, val) \ + SET_BITFLD32(TO_P32((buf), TFC_MPC_CMD_EM_INSERT_CACHE_OPTION_OFFS), \ + (u32)(val), \ + TFC_MPC_CMD_EM_INSERT_CACHE_OPTION_EB, \ + TFC_MPC_CMD_EM_INSERT_CACHE_OPTION_SB) +#define TFC_MPC_CMD_EM_INSERT_GET_CACHE_OPTION(buf) \ + GET_BITFLD32(TO_P32((buf), TFC_MPC_CMD_EM_INSERT_CACHE_OPTION_OFFS), \ + TFC_MPC_CMD_EM_INSERT_CACHE_OPTION_EB, \ + TFC_MPC_CMD_EM_INSERT_CACHE_OPTION_SB) + +#define TFC_MPC_CMD_EM_INSERT_TABLE_INDEX_EB 25 +#define TFC_MPC_CMD_EM_INSERT_TABLE_INDEX_SB 0 +#define TFC_MPC_CMD_EM_INSERT_TABLE_INDEX_OFFS 0x4 + +#define TFC_MPC_CMD_EM_INSERT_SET_TABLE_INDEX(buf, val) \ + SET_BITFLD32(TO_P32((buf), TFC_MPC_CMD_EM_INSERT_TABLE_INDEX_OFFS), \ + (u32)(val), \ + TFC_MPC_CMD_EM_INSERT_TABLE_INDEX_EB, \ + TFC_MPC_CMD_EM_INSERT_TABLE_INDEX_SB) +#define TFC_MPC_CMD_EM_INSERT_GET_TABLE_INDEX(buf) \ + GET_BITFLD32(TO_P32((buf), TFC_MPC_CMD_EM_INSERT_TABLE_INDEX_OFFS), \ + TFC_MPC_CMD_EM_INSERT_TABLE_INDEX_EB, \ + TFC_MPC_CMD_EM_INSERT_TABLE_INDEX_SB) + +#define TFC_MPC_CMD_EM_INSERT_CACHE_OPTION2_EB 31 +#define TFC_MPC_CMD_EM_INSERT_CACHE_OPTION2_SB 28 +#define TFC_MPC_CMD_EM_INSERT_CACHE_OPTION2_OFFS 0x4 + +#define TFC_MPC_CMD_EM_INSERT_SET_CACHE_OPTION2(buf, val) \ + SET_BITFLD32(TO_P32((buf), TFC_MPC_CMD_EM_INSERT_CACHE_OPTION2_OFFS), \ + (u32)(val), \ + TFC_MPC_CMD_EM_INSERT_CACHE_OPTION2_EB, \ + TFC_MPC_CMD_EM_INSERT_CACHE_OPTION2_SB) +#define TFC_MPC_CMD_EM_INSERT_GET_CACHE_OPTION2(buf) \ + GET_BITFLD32(TO_P32((buf), TFC_MPC_CMD_EM_INSERT_CACHE_OPTION2_OFFS), \ + TFC_MPC_CMD_EM_INSERT_CACHE_OPTION2_EB, \ + TFC_MPC_CMD_EM_INSERT_CACHE_OPTION2_SB) + +#define TFC_MPC_CMD_EM_INSERT_TABLE_INDEX2_EB 25 +#define TFC_MPC_CMD_EM_INSERT_TABLE_INDEX2_SB 0 +#define TFC_MPC_CMD_EM_INSERT_TABLE_INDEX2_OFFS 0x8 + +#define TFC_MPC_CMD_EM_INSERT_SET_TABLE_INDEX2(buf, val) \ + SET_BITFLD32(TO_P32((buf), TFC_MPC_CMD_EM_INSERT_TABLE_INDEX2_OFFS), \ + (u32)(val), \ + TFC_MPC_CMD_EM_INSERT_TABLE_INDEX2_EB, \ + TFC_MPC_CMD_EM_INSERT_TABLE_INDEX2_SB) +#define TFC_MPC_CMD_EM_INSERT_GET_TABLE_INDEX2(buf) \ + GET_BITFLD32(TO_P32((buf), TFC_MPC_CMD_EM_INSERT_TABLE_INDEX2_OFFS), \ + TFC_MPC_CMD_EM_INSERT_TABLE_INDEX2_EB, \ + TFC_MPC_CMD_EM_INSERT_TABLE_INDEX2_SB) + +#define TFC_MPC_CMD_EM_INSERT_REPLACE_EB 31 +#define TFC_MPC_CMD_EM_INSERT_REPLACE_SB 31 +#define TFC_MPC_CMD_EM_INSERT_REPLACE_OFFS 0x8 + +#define TFC_MPC_CMD_EM_INSERT_SET_REPLACE(buf, val) \ + SET_BITFLD32(TO_P32((buf), TFC_MPC_CMD_EM_INSERT_REPLACE_OFFS), \ + (u32)(val), \ + TFC_MPC_CMD_EM_INSERT_REPLACE_EB, \ + TFC_MPC_CMD_EM_INSERT_REPLACE_SB) +#define TFC_MPC_CMD_EM_INSERT_GET_REPLACE(buf) \ + GET_BITFLD32(TO_P32((buf), TFC_MPC_CMD_EM_INSERT_REPLACE_OFFS), \ + TFC_MPC_CMD_EM_INSERT_REPLACE_EB, \ + TFC_MPC_CMD_EM_INSERT_REPLACE_SB) + +#define TFC_MPC_CMD_EM_INSERT_SIZE 16 + +/* + * CFA Exact Match Delete Command Record: + * + * This command searches for an exact match entry index in the static bucket + * chain and deletes it if found. TABLE_INDEX give the entry index to delete and + * TABLE_INDEX2 gives the static bucket index. If a matching entry is found: + * If the matching entry is the last valid entry in the tail bucket, its entry + * fields (HASH_MSBS and ENTRY_PTR) are set to 0 to delete the entry. + * If the matching entry is not the last valid entry in the tail bucket, the + * entry fields from that last entry are moved to the matching entry, and the + * fields of that last entry are set to 0. + * If any of the previous processing results in the tail bucket not having any + * valid entries, the tail bucket is the static bucket, the scope is a locked + * scope, and CHAIN_PTR=0, hardware evicts the static bucket from the cache and + * the completion signals this case with CHAIN_UPD=1. + * If any of the previous processing results in the tail bucket not having any + * valid entries, and the tail bucket is not the static bucket, the tail bucket + * is removed from the chain. In this case, the penultimate bucket in the chain + * becomes the tail bucket. It has CHAIN set to 0 to unlink the tail bucket, and + * CHAIN_PTR set to that from the original tail bucket to preserve background + * chaining. The completion signals this case with CHAIN_UPD=1 and returns the + * index to the bucket removed so that software can de-allocate it. + * CFA returns OK status if the entry was successfully deleted. Otherwise, it + * returns EM_MISS status assuming there were no errors that caused processing + * to be aborted. + * Offset 31 0 + * 0x0 cache_option unused(7) table_scope unused(3) + * write_through unused(4) opcode + * 0x4 cache_option2 unused(2) table_index + * 0x8 unused(6) table_index2 + * + * opcode (Offset:0x0[7:0], Size: 8) + * This value selects the format for the mid-path + * command for the CFA. + * Value Enum Enumeration Description + * 10 EM_DELETE + * This command deletes an entry from the exact + * match table. CFA searches for the specified + * entry address in the bucket chain at the static + * bucket address given. + * + * write_through (Offset:0x0[12], Size: 1) + * Sets the OPTION field on the cache interface to + * use write-through for EM entry writes while + * processing EM_INSERT commands. For all other cases + * (inluding EM_INSERT bucket writes), the OPTION + * field is set by the CACHE_OPTION and CACHE_OPTION2 + * fields. + * + * table_scope (Offset:0x0[20:16], Size: 5) + * Table scope to access. + * + * cache_option (Offset:0x0[31:28], Size: 4) + * Determines setting of OPTION field for all cache + * requests while processing any command other than + * EM_INSERT, EM_DELETE, or EM_CHAIN. For these latter + * commands, CACHE_OPTION sets the OPTION field for + * all read requests, and CACHE_OPTION2 sets it for + * all write requests. + * CFA does not support posted write requests. + * Therefore, for WRITE commands, CACHE_OPTION[1] must + * be set to 0. And for EM commands that send write + * requests (all but EM_SEARCH), CACHE_OPTION2[1] must + * be set to 0. + * + * table_index (Offset:0x4[25:0], Size: 26) + * A 32B index into the EM table identified by + * TABLE_SCOPE. + * Entry index to delete. + * + * cache_option2 (Offset:0x4[31:28], Size: 4) + * Determines setting of OPTION field for all cache + * write requests for EM_INSERT, EM_DELETE, and + * EM_CHAIN commands. + * CFA does not support posted write requests. + * Therefore, CACHE_OPTION2[1] must be set to 0. + * + * table_index2 (Offset:0x8[25:0], Size: 26) + * A 32B index into the EM table identified by + * TABLE_SCOPE. + * Static bucket address for bucket chain. + */ +#define TFC_MPC_CMD_OPCODE_EM_DELETE 10 + +#define TFC_MPC_CMD_EM_DELETE_OPCODE_EB 7 +#define TFC_MPC_CMD_EM_DELETE_OPCODE_SB 0 +#define TFC_MPC_CMD_EM_DELETE_OPCODE_OFFS 0x0 + +#define TFC_MPC_CMD_EM_DELETE_SET_OPCODE(buf, val) \ + SET_BITFLD32(TO_P32((buf), TFC_MPC_CMD_EM_DELETE_OPCODE_OFFS), \ + (u32)(val), \ + TFC_MPC_CMD_EM_DELETE_OPCODE_EB, \ + TFC_MPC_CMD_EM_DELETE_OPCODE_SB) +#define TFC_MPC_CMD_EM_DELETE_GET_OPCODE(buf) \ + GET_BITFLD32(TO_P32((buf), TFC_MPC_CMD_EM_DELETE_OPCODE_OFFS), \ + TFC_MPC_CMD_EM_DELETE_OPCODE_EB, \ + TFC_MPC_CMD_EM_DELETE_OPCODE_SB) + +#define TFC_MPC_CMD_EM_DELETE_WRITE_THROUGH_EB 12 +#define TFC_MPC_CMD_EM_DELETE_WRITE_THROUGH_SB 12 +#define TFC_MPC_CMD_EM_DELETE_WRITE_THROUGH_OFFS 0x0 + +#define TFC_MPC_CMD_EM_DELETE_SET_WRITE_THROUGH(buf, val) \ + SET_BITFLD32(TO_P32((buf), TFC_MPC_CMD_EM_DELETE_WRITE_THROUGH_OFFS), \ + (u32)(val), \ + TFC_MPC_CMD_EM_DELETE_WRITE_THROUGH_EB, \ + TFC_MPC_CMD_EM_DELETE_WRITE_THROUGH_SB) +#define TFC_MPC_CMD_EM_DELETE_GET_WRITE_THROUGH(buf) \ + GET_BITFLD32(TO_P32((buf), TFC_MPC_CMD_EM_DELETE_WRITE_THROUGH_OFFS), \ + TFC_MPC_CMD_EM_DELETE_WRITE_THROUGH_EB, \ + TFC_MPC_CMD_EM_DELETE_WRITE_THROUGH_SB) + +#define TFC_MPC_CMD_EM_DELETE_TABLE_SCOPE_EB 20 +#define TFC_MPC_CMD_EM_DELETE_TABLE_SCOPE_SB 16 +#define TFC_MPC_CMD_EM_DELETE_TABLE_SCOPE_OFFS 0x0 + +#define TFC_MPC_CMD_EM_DELETE_SET_TABLE_SCOPE(buf, val) \ + SET_BITFLD32(TO_P32((buf), TFC_MPC_CMD_EM_DELETE_TABLE_SCOPE_OFFS), \ + (u32)(val), \ + TFC_MPC_CMD_EM_DELETE_TABLE_SCOPE_EB, \ + TFC_MPC_CMD_EM_DELETE_TABLE_SCOPE_SB) +#define TFC_MPC_CMD_EM_DELETE_GET_TABLE_SCOPE(buf) \ + GET_BITFLD32(TO_P32((buf), TFC_MPC_CMD_EM_DELETE_TABLE_SCOPE_OFFS), \ + TFC_MPC_CMD_EM_DELETE_TABLE_SCOPE_EB, \ + TFC_MPC_CMD_EM_DELETE_TABLE_SCOPE_SB) + +#define TFC_MPC_CMD_EM_DELETE_CACHE_OPTION_EB 31 +#define TFC_MPC_CMD_EM_DELETE_CACHE_OPTION_SB 28 +#define TFC_MPC_CMD_EM_DELETE_CACHE_OPTION_OFFS 0x0 + +#define TFC_MPC_CMD_EM_DELETE_SET_CACHE_OPTION(buf, val) \ + SET_BITFLD32(TO_P32((buf), TFC_MPC_CMD_EM_DELETE_CACHE_OPTION_OFFS), \ + (u32)(val), \ + TFC_MPC_CMD_EM_DELETE_CACHE_OPTION_EB, \ + TFC_MPC_CMD_EM_DELETE_CACHE_OPTION_SB) +#define TFC_MPC_CMD_EM_DELETE_GET_CACHE_OPTION(buf) \ + GET_BITFLD32(TO_P32((buf), TFC_MPC_CMD_EM_DELETE_CACHE_OPTION_OFFS), \ + TFC_MPC_CMD_EM_DELETE_CACHE_OPTION_EB, \ + TFC_MPC_CMD_EM_DELETE_CACHE_OPTION_SB) + +#define TFC_MPC_CMD_EM_DELETE_TABLE_INDEX_EB 25 +#define TFC_MPC_CMD_EM_DELETE_TABLE_INDEX_SB 0 +#define TFC_MPC_CMD_EM_DELETE_TABLE_INDEX_OFFS 0x4 + +#define TFC_MPC_CMD_EM_DELETE_SET_TABLE_INDEX(buf, val) \ + SET_BITFLD32(TO_P32((buf), TFC_MPC_CMD_EM_DELETE_TABLE_INDEX_OFFS), \ + (u32)(val), \ + TFC_MPC_CMD_EM_DELETE_TABLE_INDEX_EB, \ + TFC_MPC_CMD_EM_DELETE_TABLE_INDEX_SB) +#define TFC_MPC_CMD_EM_DELETE_GET_TABLE_INDEX(buf) \ + GET_BITFLD32(TO_P32((buf), TFC_MPC_CMD_EM_DELETE_TABLE_INDEX_OFFS), \ + TFC_MPC_CMD_EM_DELETE_TABLE_INDEX_EB, \ + TFC_MPC_CMD_EM_DELETE_TABLE_INDEX_SB) + +#define TFC_MPC_CMD_EM_DELETE_CACHE_OPTION2_EB 31 +#define TFC_MPC_CMD_EM_DELETE_CACHE_OPTION2_SB 28 +#define TFC_MPC_CMD_EM_DELETE_CACHE_OPTION2_OFFS 0x4 + +#define TFC_MPC_CMD_EM_DELETE_SET_CACHE_OPTION2(buf, val) \ + SET_BITFLD32(TO_P32((buf), TFC_MPC_CMD_EM_DELETE_CACHE_OPTION2_OFFS), \ + (u32)(val), \ + TFC_MPC_CMD_EM_DELETE_CACHE_OPTION2_EB, \ + TFC_MPC_CMD_EM_DELETE_CACHE_OPTION2_SB) +#define TFC_MPC_CMD_EM_DELETE_GET_CACHE_OPTION2(buf) \ + GET_BITFLD32(TO_P32((buf), TFC_MPC_CMD_EM_DELETE_CACHE_OPTION2_OFFS), \ + TFC_MPC_CMD_EM_DELETE_CACHE_OPTION2_EB, \ + TFC_MPC_CMD_EM_DELETE_CACHE_OPTION2_SB) + +#define TFC_MPC_CMD_EM_DELETE_TABLE_INDEX2_EB 25 +#define TFC_MPC_CMD_EM_DELETE_TABLE_INDEX2_SB 0 +#define TFC_MPC_CMD_EM_DELETE_TABLE_INDEX2_OFFS 0x8 + +#define TFC_MPC_CMD_EM_DELETE_SET_TABLE_INDEX2(buf, val) \ + SET_BITFLD32(TO_P32((buf), TFC_MPC_CMD_EM_DELETE_TABLE_INDEX2_OFFS), \ + (u32)(val), \ + TFC_MPC_CMD_EM_DELETE_TABLE_INDEX2_EB, \ + TFC_MPC_CMD_EM_DELETE_TABLE_INDEX2_SB) +#define TFC_MPC_CMD_EM_DELETE_GET_TABLE_INDEX2(buf) \ + GET_BITFLD32(TO_P32((buf), TFC_MPC_CMD_EM_DELETE_TABLE_INDEX2_OFFS), \ + TFC_MPC_CMD_EM_DELETE_TABLE_INDEX2_EB, \ + TFC_MPC_CMD_EM_DELETE_TABLE_INDEX2_SB) + +#define TFC_MPC_CMD_EM_DELETE_SIZE 12 + +/* + * CFA Exact Match Chain Command Record: + * + * This command updates CHAIN_PTR in the tail bucket of a static bucket chain, + * supplying both the static bucket and the new CHAIN_PTR value. TABLE_INDEX is + * the new CHAIN_PTR value and TABLE_INDEX2[23:0] is the static bucket. + * This command provides software a means to update background chaining + * coherently with other bucket updates. The value of CHAIN is unaffected (stays + * at 0). + * For locked scopes, if the static bucket is the tail bucket, it is empty (all + * of its ENTRY_PTR values are 0), and TABLE_INDEX=0 (the CHAIN_PTR is being set + * to 0), instead of updating the static bucket it is evicted from the cache. In + * this case, CHAIN_UPD=1 in the completion. + * Offset 31 0 + * 0x0 cache_option unused(7) table_scope unused(3) + * write_through unused(4) opcode + * 0x4 cache_option2 unused(2) table_index + * 0x8 unused(6) table_index2 + * + * opcode (Offset:0x0[7:0], Size: 8) + * This value selects the format for the mid-path + * command for the CFA. + * Value Enum Enumeration Description + * 11 EM_CHAIN + * This command updates CHAIN_PTR in the tail + * bucket of a static bucket chain, supplying both + * the static bucket and the new CHAIN_PTR value. + * + * write_through (Offset:0x0[12], Size: 1) + * Sets the OPTION field on the cache interface to + * use write-through for EM entry writes while + * processing EM_INSERT commands. For all other cases + * (inluding EM_INSERT bucket writes), the OPTION + * field is set by the CACHE_OPTION and CACHE_OPTION2 + * fields. + * + * table_scope (Offset:0x0[20:16], Size: 5) + * Table scope to access. + * + * cache_option (Offset:0x0[31:28], Size: 4) + * Determines setting of OPTION field for all cache + * requests while processing any command other than + * EM_INSERT, EM_DELETE, or EM_CHAIN. For these latter + * commands, CACHE_OPTION sets the OPTION field for + * all read requests, and CACHE_OPTION2 sets it for + * all write requests. + * CFA does not support posted write requests. + * Therefore, for WRITE commands, CACHE_OPTION[1] must + * be set to 0. And for EM commands that send write + * requests (all but EM_SEARCH), CACHE_OPTION2[1] must + * be set to 0. + * + * table_index (Offset:0x4[25:0], Size: 26) + * A 32B index into the EM table identified by + * TABLE_SCOPE. + * New CHAIN_PTR to write to tail bucket. + * + * cache_option2 (Offset:0x4[31:28], Size: 4) + * Determines setting of OPTION field for all cache + * write requests for EM_INSERT, EM_DELETE, and + * EM_CHAIN commands. + * CFA does not support posted write requests. + * Therefore, CACHE_OPTION2[1] must be set to 0. + * + * table_index2 (Offset:0x8[25:0], Size: 26) + * A 32B index into the EM table identified by + * TABLE_SCOPE. + * Static bucket address for bucket chain. + */ +#define TFC_MPC_CMD_OPCODE_EM_CHAIN 11 + +#define TFC_MPC_CMD_EM_MATCH_CHAIN_OPCODE_EB 7 +#define TFC_MPC_CMD_EM_MATCH_CHAIN_OPCODE_SB 0 +#define TFC_MPC_CMD_EM_MATCH_CHAIN_OPCODE_OFFS 0x0 + +#define TFC_MPC_CMD_EM_MATCH_CHAIN_SET_OPCODE(buf, val) \ + SET_BITFLD32(TO_P32((buf), TFC_MPC_CMD_EM_MATCH_CHAIN_OPCODE_OFFS), \ + (u32)(val), \ + TFC_MPC_CMD_EM_MATCH_CHAIN_OPCODE_EB, \ + TFC_MPC_CMD_EM_MATCH_CHAIN_OPCODE_SB) +#define TFC_MPC_CMD_EM_MATCH_CHAIN_GET_OPCODE(buf) \ + GET_BITFLD32(TO_P32((buf), TFC_MPC_CMD_EM_MATCH_CHAIN_OPCODE_OFFS), \ + TFC_MPC_CMD_EM_MATCH_CHAIN_OPCODE_EB, \ + TFC_MPC_CMD_EM_MATCH_CHAIN_OPCODE_SB) + +#define TFC_MPC_CMD_EM_MATCH_CHAIN_WRITE_THROUGH_EB 12 +#define TFC_MPC_CMD_EM_MATCH_CHAIN_WRITE_THROUGH_SB 12 +#define TFC_MPC_CMD_EM_MATCH_CHAIN_WRITE_THROUGH_OFFS 0x0 + +#define TFC_MPC_CMD_EM_MATCH_CHAIN_SET_WRITE_THROUGH(buf, val) \ + SET_BITFLD32(TO_P32((buf), TFC_MPC_CMD_EM_MATCH_CHAIN_WRITE_THROUGH_OFFS), \ + (u32)(val), \ + TFC_MPC_CMD_EM_MATCH_CHAIN_WRITE_THROUGH_EB, \ + TFC_MPC_CMD_EM_MATCH_CHAIN_WRITE_THROUGH_SB) +#define TFC_MPC_CMD_EM_MATCH_CHAIN_GET_WRITE_THROUGH(buf) \ + GET_BITFLD32(TO_P32((buf), TFC_MPC_CMD_EM_MATCH_CHAIN_WRITE_THROUGH_OFFS), \ + TFC_MPC_CMD_EM_MATCH_CHAIN_WRITE_THROUGH_EB, \ + TFC_MPC_CMD_EM_MATCH_CHAIN_WRITE_THROUGH_SB) + +#define TFC_MPC_CMD_EM_MATCH_CHAIN_TABLE_SCOPE_EB 20 +#define TFC_MPC_CMD_EM_MATCH_CHAIN_TABLE_SCOPE_SB 16 +#define TFC_MPC_CMD_EM_MATCH_CHAIN_TABLE_SCOPE_OFFS 0x0 + +#define TFC_MPC_CMD_EM_MATCH_CHAIN_SET_TABLE_SCOPE(buf, val) \ + SET_BITFLD32(TO_P32((buf), TFC_MPC_CMD_EM_MATCH_CHAIN_TABLE_SCOPE_OFFS), \ + (u32)(val), \ + TFC_MPC_CMD_EM_MATCH_CHAIN_TABLE_SCOPE_EB, \ + TFC_MPC_CMD_EM_MATCH_CHAIN_TABLE_SCOPE_SB) +#define TFC_MPC_CMD_EM_MATCH_CHAIN_GET_TABLE_SCOPE(buf) \ + GET_BITFLD32(TO_P32((buf), TFC_MPC_CMD_EM_MATCH_CHAIN_TABLE_SCOPE_OFFS), \ + TFC_MPC_CMD_EM_MATCH_CHAIN_TABLE_SCOPE_EB, \ + TFC_MPC_CMD_EM_MATCH_CHAIN_TABLE_SCOPE_SB) + +#define TFC_MPC_CMD_EM_MATCH_CHAIN_CACHE_OPTION_EB 31 +#define TFC_MPC_CMD_EM_MATCH_CHAIN_CACHE_OPTION_SB 28 +#define TFC_MPC_CMD_EM_MATCH_CHAIN_CACHE_OPTION_OFFS 0x0 + +#define TFC_MPC_CMD_EM_MATCH_CHAIN_SET_CACHE_OPTION(buf, val) \ + SET_BITFLD32(TO_P32((buf), TFC_MPC_CMD_EM_MATCH_CHAIN_CACHE_OPTION_OFFS), \ + (u32)(val), \ + TFC_MPC_CMD_EM_MATCH_CHAIN_CACHE_OPTION_EB, \ + TFC_MPC_CMD_EM_MATCH_CHAIN_CACHE_OPTION_SB) +#define TFC_MPC_CMD_EM_MATCH_CHAIN_GET_CACHE_OPTION(buf) \ + GET_BITFLD32(TO_P32((buf), TFC_MPC_CMD_EM_MATCH_CHAIN_CACHE_OPTION_OFFS), \ + TFC_MPC_CMD_EM_MATCH_CHAIN_CACHE_OPTION_EB, \ + TFC_MPC_CMD_EM_MATCH_CHAIN_CACHE_OPTION_SB) + +#define TFC_MPC_CMD_EM_MATCH_CHAIN_TABLE_INDEX_EB 25 +#define TFC_MPC_CMD_EM_MATCH_CHAIN_TABLE_INDEX_SB 0 +#define TFC_MPC_CMD_EM_MATCH_CHAIN_TABLE_INDEX_OFFS 0x4 + +#define TFC_MPC_CMD_EM_MATCH_CHAIN_SET_TABLE_INDEX(buf, val) \ + SET_BITFLD32(TO_P32((buf), TFC_MPC_CMD_EM_MATCH_CHAIN_TABLE_INDEX_OFFS), \ + (u32)(val), \ + TFC_MPC_CMD_EM_MATCH_CHAIN_TABLE_INDEX_EB, \ + TFC_MPC_CMD_EM_MATCH_CHAIN_TABLE_INDEX_SB) +#define TFC_MPC_CMD_EM_MATCH_CHAIN_GET_TABLE_INDEX(buf) \ + GET_BITFLD32(TO_P32((buf), TFC_MPC_CMD_EM_MATCH_CHAIN_TABLE_INDEX_OFFS), \ + TFC_MPC_CMD_EM_MATCH_CHAIN_TABLE_INDEX_EB, \ + TFC_MPC_CMD_EM_MATCH_CHAIN_TABLE_INDEX_SB) + +#define TFC_MPC_CMD_EM_MATCH_CHAIN_CACHE_OPTION2_EB 31 +#define TFC_MPC_CMD_EM_MATCH_CHAIN_CACHE_OPTION2_SB 28 +#define TFC_MPC_CMD_EM_MATCH_CHAIN_CACHE_OPTION2_OFFS 0x4 + +#define TFC_MPC_CMD_EM_MATCH_CHAIN_SET_CACHE_OPTION2(buf, val) \ + SET_BITFLD32(TO_P32((buf), TFC_MPC_CMD_EM_MATCH_CHAIN_CACHE_OPTION2_OFFS), \ + (u32)(val), \ + TFC_MPC_CMD_EM_MATCH_CHAIN_CACHE_OPTION2_EB, \ + TFC_MPC_CMD_EM_MATCH_CHAIN_CACHE_OPTION2_SB) +#define TFC_MPC_CMD_EM_MATCH_CHAIN_GET_CACHE_OPTION2(buf) \ + GET_BITFLD32(TO_P32((buf), TFC_MPC_CMD_EM_MATCH_CHAIN_CACHE_OPTION2_OFFS), \ + TFC_MPC_CMD_EM_MATCH_CHAIN_CACHE_OPTION2_EB, \ + TFC_MPC_CMD_EM_MATCH_CHAIN_CACHE_OPTION2_SB) + +#define TFC_MPC_CMD_EM_MATCH_CHAIN_TABLE_INDEX2_EB 25 +#define TFC_MPC_CMD_EM_MATCH_CHAIN_TABLE_INDEX2_SB 0 +#define TFC_MPC_CMD_EM_MATCH_CHAIN_TABLE_INDEX2_OFFS 0x8 + +#define TFC_MPC_CMD_EM_MATCH_CHAIN_SET_TABLE_INDEX2(buf, val) \ + SET_BITFLD32(TO_P32((buf), TFC_MPC_CMD_EM_MATCH_CHAIN_TABLE_INDEX2_OFFS), \ + (u32)(val), \ + TFC_MPC_CMD_EM_MATCH_CHAIN_TABLE_INDEX2_EB, \ + TFC_MPC_CMD_EM_MATCH_CHAIN_TABLE_INDEX2_SB) +#define TFC_MPC_CMD_EM_MATCH_CHAIN_GET_TABLE_INDEX2(buf) \ + GET_BITFLD32(TO_P32((buf), TFC_MPC_CMD_EM_MATCH_CHAIN_TABLE_INDEX2_OFFS), \ + TFC_MPC_CMD_EM_MATCH_CHAIN_TABLE_INDEX2_EB, \ + TFC_MPC_CMD_EM_MATCH_CHAIN_TABLE_INDEX2_SB) + +#define TFC_MPC_CMD_EM_MATCH_CHAIN_SIZE 12 + +#endif /* __CFA_P70_MPC_CMDS_H__ */ diff --git a/drivers/thirdparty/release-drivers/bnxt/hcapi/cfa_v3/mpc/include/cfa_p70_mpc_cmpls.h b/drivers/thirdparty/release-drivers/bnxt/hcapi/cfa_v3/mpc/include/cfa_p70_mpc_cmpls.h new file mode 100644 index 000000000000..1568c9e249c3 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/hcapi/cfa_v3/mpc/include/cfa_p70_mpc_cmpls.h @@ -0,0 +1,3294 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019-2023 Broadcom + * All rights reserved. + */ +#ifndef __CFA_P70_MPC_CMPLS_H__ +#define __CFA_P70_MPC_CMPLS_H__ + +#include "cfa_p70_mpc_common.h" + +/* + * CFA Table Read Completion Record: + * + * When no errors, teturns 1-4 consecutive 32B words from the TABLE_INDEX + * within the TABLE_SCOPE specified in the command, writing them to HOST_ADDRESS + * from the command. + * Offset 63 0 + * 0x0 opaque dma_length opcode mp_client status unused(2) + * type + * 0x8 unused(6) table_index unused(3) table_scope + * table_type unused(4) hash_msb unused(3) v + * + * type (Offset:0x0[5:0], Size: 6) + * This field indicates the exact type of the + * completion. By convention, the LSB identifies the + * length of the record in 16B units. Even values + * indicate 16B records. Odd values indicate 32B + * records **(EXCEPT no_op!!!!)** . + * Value Enum Enumeration Description + * 30 mid_path_short + * Mid Path Short Completion : Completion of a Mid + * Path Command. Length = 16B + * + * status (Offset:0x0[11:8], Size: 4) + * The command processing status. + * Value Enum Enumeration Description + * 0 OK + * Completed without error. + * 1 UNSPRT_ERR + * The CFA OPCODE is an unsupported value. + * 2 FMT_ERR + * Indicates a CFA command formatting error. This + * error can occur on any of the supported CFA + * commands. + * Error conditions: + * DATA_SIZE[2:0] outside range of [1, 4]. (Does + * not apply to READ_CLR, EM_DELETE, or EM_CHAIN + * commands as they do not have a DATA_SIZE field) + * HOST_ADDRESS[1:0] != 0 (Only applies to READ, + * READ_CLR, and EVENT_COLLECTION as other commands + * do not have a HOST_ADDRESS field. + * 3 SCOPE_ERR + * Access to TABLE_SCOPE is disabled for the SVIF. + * Indates that the bit indexed by (SVIF, + * TABLE_SCOPE) in the TAI_SVIF_SCOPE memory is set + * to 0. + * 4 ADDR_ERR + * This error can only occur for commands having + * TABLE_TYPE present and set to EM and not having + * any of the previous errors, or for any of the + * EM* commands, for which a TABLE_TYPE of EM is + * implied. + * It indicates that an EM address (TABLE_INDEX*) + * in the command is invalid based on (EM_BUCKETS, + * EM_SIZE) parameters configured for TABLE_SCOPE. + * All addresses must be in the range [0, + * EM_SIZE). Static bucket addresses must be within + * the range determined by EM_BUCKETS. Dynamic + * bucket addresses and entries must be outside of + * the static bucket range. + * 5 CACHE_ERR + * One of more cache responses signaled an error + * while processing the command. + * + * mp_client (Offset:0x0[15:12], Size: 4) + * This field represents the Mid-Path client that + * generated the completion. + * Value Enum Enumeration Description + * 2 TE_CFA + * TE-CFA + * 3 RE_CFA + * RE-CFA + * + * opcode (Offset:0x0[23:16], Size: 8) + * OPCODE from the command. + * Value Enum Enumeration Description + * 0 READ + * This command reads 1-4 consecutive 32B words + * from the specified address within a table scope. + * + * dma_length (Offset:0x0[31:24], Size: 8) + * The length of the DMA that accompanies the + * completion in units of DWORDs (32b). Valid values + * are [0, 128]. A value of zero indicates that there + * is no DMA that accompanies the completion. + * + * opaque (Offset:0x0[63:32], Size: 32) + * This is a copy of the opaque field from the mid + * path BD of this command. + * + * v (Offset:0x8[0], Size: 1) + * This value is written by the NIC such that it will + * be different for each pass through the completion + * queue. The even passes will write 1. The odd passes + * will write 0. + * + * hash_msb (Offset:0x8[15:4], Size: 12) + * For EM_SEARCH and EM_INSERT commands without + * errors that abort the command processing prior to + * the hash computation, set to HASH[35:24] of the + * hash computed from the exact match entry key in the + * command. + * For all other cases, set to 0 except for the + * following error conditions, which carry debug + * information in this field as shown by error status + * below: + * FMT_ERR: + * Set to {7'd0, HOST_ADDRESS[1:0], + * DATA_SIZE[2:0]}. + * If HOST_ADDRESS or DATA_SIZE field not + * present they are set to 0. + * SCOPE_ERR: + * Set to {1'b0, SVIF[10:0]}. + * ADDR_ERR: + * Only possible when TABLE_TYPE=EM or for EM* + * commands + * Set to {1'b0, TABLE_INDEX[2:0], 5'd0, + * DATA_SIZE[2:0]} + * TABLE_INDEX[2]=1 if TABLE_INDEX3 had an error + * TABLE_INDEX[1]=1 if TABLE_INDEX2 had an error + * TABLE_INDEX[0]=1 if TABLE_INDEX had an error + * TABLE_INDEX[n]=0 if the completion does not + * have the corresponding TABLE_INDEX field above. + * CACHE_ERR: + * Set to {9'd0, DATA_SIZE[2:0]} + * + * table_type (Offset:0x8[23:20], Size: 4) + * TABLE_TYPE from the command. + * Value Enum Enumeration Description + * 0 ACTION + * This command acts on the action table of the + * specified scope. + * 1 EM + * This command acts on the exact match table of + * the specified scope. + * + * table_scope (Offset:0x8[28:24], Size: 5) + * TABLE_SCOPE from the command. + * + * table_index (Offset:0x8[57:32], Size: 26) + * TABLE_INDEX from the command. + * This structure is used to inform the host of an + * event within the NIC. + */ +#define TFC_MPC_TBL_RD_CMPL_TYPE_MID_PATH_SHORT 30 + +#define TFC_MPC_TBL_RD_CMPL_TYPE_EB 5 +#define TFC_MPC_TBL_RD_CMPL_TYPE_SB 0 +#define TFC_MPC_TBL_RD_CMPL_TYPE_OFFS 0x0 + +#define TFC_MPC_TBL_RD_CMPL_SET_TYPE(buf, val) \ + SET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_RD_CMPL_TYPE_OFFS), \ + (u64)(val), \ + TFC_MPC_TBL_RD_CMPL_TYPE_EB, \ + TFC_MPC_TBL_RD_CMPL_TYPE_SB) +#define TFC_MPC_TBL_RD_CMPL_GET_TYPE(buf) \ + GET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_RD_CMPL_TYPE_OFFS), \ + TFC_MPC_TBL_RD_CMPL_TYPE_EB, \ + TFC_MPC_TBL_RD_CMPL_TYPE_SB) + +#define TFC_MPC_TBL_RD_CMPL_STATUS_OK 0 +#define TFC_MPC_TBL_RD_CMPL_STATUS_UNSPRT_ERR 1 +#define TFC_MPC_TBL_RD_CMPL_STATUS_FMT_ERR 2 +#define TFC_MPC_TBL_RD_CMPL_STATUS_SCOPE_ERR 3 +#define TFC_MPC_TBL_RD_CMPL_STATUS_ADDR_ERR 4 +#define TFC_MPC_TBL_RD_CMPL_STATUS_CACHE_ERR 5 + +#define TFC_MPC_TBL_RD_CMPL_STATUS_EB 11 +#define TFC_MPC_TBL_RD_CMPL_STATUS_SB 8 +#define TFC_MPC_TBL_RD_CMPL_STATUS_OFFS 0x0 + +#define TFC_MPC_TBL_RD_CMPL_SET_STATUS(buf, val) \ + SET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_RD_CMPL_STATUS_OFFS), \ + (u64)(val), \ + TFC_MPC_TBL_RD_CMPL_STATUS_EB, \ + TFC_MPC_TBL_RD_CMPL_STATUS_SB) +#define TFC_MPC_TBL_RD_CMPL_GET_STATUS(buf) \ + GET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_RD_CMPL_STATUS_OFFS), \ + TFC_MPC_TBL_RD_CMPL_STATUS_EB, \ + TFC_MPC_TBL_RD_CMPL_STATUS_SB) + +#define TFC_MPC_TBL_RD_CMPL_MP_CLIENT_TE_CFA 2 +#define TFC_MPC_TBL_RD_CMPL_MP_CLIENT_RE_CFA 3 + +#define TFC_MPC_TBL_RD_CMPL_MP_CLIENT_EB 15 +#define TFC_MPC_TBL_RD_CMPL_MP_CLIENT_SB 12 +#define TFC_MPC_TBL_RD_CMPL_MP_CLIENT_OFFS 0x0 + +#define TFC_MPC_TBL_RD_CMPL_SET_MP_CLIENT(buf, val) \ + SET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_RD_CMPL_MP_CLIENT_OFFS), \ + (u64)(val), \ + TFC_MPC_TBL_RD_CMPL_MP_CLIENT_EB, \ + TFC_MPC_TBL_RD_CMPL_MP_CLIENT_SB) +#define TFC_MPC_TBL_RD_CMPL_GET_MP_CLIENT(buf) \ + GET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_RD_CMPL_MP_CLIENT_OFFS), \ + TFC_MPC_TBL_RD_CMPL_MP_CLIENT_EB, \ + TFC_MPC_TBL_RD_CMPL_MP_CLIENT_SB) + +#define TFC_MPC_CMD_OPCODE_READ 0 + +#define TFC_MPC_TBL_RD_CMPL_OPCODE_EB 23 +#define TFC_MPC_TBL_RD_CMPL_OPCODE_SB 16 +#define TFC_MPC_TBL_RD_CMPL_OPCODE_OFFS 0x0 + +#define TFC_MPC_TBL_RD_CMPL_SET_OPCODE(buf, val) \ + SET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_RD_CMPL_OPCODE_OFFS), \ + (u64)(val), \ + TFC_MPC_TBL_RD_CMPL_OPCODE_EB, \ + TFC_MPC_TBL_RD_CMPL_OPCODE_SB) +#define TFC_MPC_TBL_RD_CMPL_GET_OPCODE(buf) \ + GET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_RD_CMPL_OPCODE_OFFS), \ + TFC_MPC_TBL_RD_CMPL_OPCODE_EB, \ + TFC_MPC_TBL_RD_CMPL_OPCODE_SB) + +#define TFC_MPC_TBL_RD_CMPL_DMA_LENGTH_EB 31 +#define TFC_MPC_TBL_RD_CMPL_DMA_LENGTH_SB 24 +#define TFC_MPC_TBL_RD_CMPL_DMA_LENGTH_OFFS 0x0 + +#define TFC_MPC_TBL_RD_CMPL_SET_DMA_LENGTH(buf, val) \ + SET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_RD_CMPL_DMA_LENGTH_OFFS), \ + (u64)(val), \ + TFC_MPC_TBL_RD_CMPL_DMA_LENGTH_EB, \ + TFC_MPC_TBL_RD_CMPL_DMA_LENGTH_SB) +#define TFC_MPC_TBL_RD_CMPL_GET_DMA_LENGTH(buf) \ + GET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_RD_CMPL_DMA_LENGTH_OFFS), \ + TFC_MPC_TBL_RD_CMPL_DMA_LENGTH_EB, \ + TFC_MPC_TBL_RD_CMPL_DMA_LENGTH_SB) + +#define TFC_MPC_TBL_RD_CMPL_OPAQUE_EB 63 +#define TFC_MPC_TBL_RD_CMPL_OPAQUE_SB 32 +#define TFC_MPC_TBL_RD_CMPL_OPAQUE_OFFS 0x0 + +#define TFC_MPC_TBL_RD_CMPL_SET_OPAQUE(buf, val) \ + SET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_RD_CMPL_OPAQUE_OFFS), \ + (u64)(val), \ + TFC_MPC_TBL_RD_CMPL_OPAQUE_EB, \ + TFC_MPC_TBL_RD_CMPL_OPAQUE_SB) +#define TFC_MPC_TBL_RD_CMPL_GET_OPAQUE(buf) \ + GET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_RD_CMPL_OPAQUE_OFFS), \ + TFC_MPC_TBL_RD_CMPL_OPAQUE_EB, \ + TFC_MPC_TBL_RD_CMPL_OPAQUE_SB) + +#define TFC_MPC_TBL_RD_CMPL_V_EB 0 +#define TFC_MPC_TBL_RD_CMPL_V_SB 0 +#define TFC_MPC_TBL_RD_CMPL_V_OFFS 0x8 + +#define TFC_MPC_TBL_RD_CMPL_SET_V(buf, val) \ + SET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_RD_CMPL_V_OFFS), \ + (u64)(val), \ + TFC_MPC_TBL_RD_CMPL_V_EB, \ + TFC_MPC_TBL_RD_CMPL_V_SB) +#define TFC_MPC_TBL_RD_CMPL_GET_V(buf) \ + GET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_RD_CMPL_V_OFFS), \ + TFC_MPC_TBL_RD_CMPL_V_EB, \ + TFC_MPC_TBL_RD_CMPL_V_SB) + +#define TFC_MPC_TBL_RD_CMPL_HASH_MSB_EB 15 +#define TFC_MPC_TBL_RD_CMPL_HASH_MSB_SB 4 +#define TFC_MPC_TBL_RD_CMPL_HASH_MSB_OFFS 0x8 + +#define TFC_MPC_TBL_RD_CMPL_SET_HASH_MSB(buf, val) \ + SET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_RD_CMPL_HASH_MSB_OFFS), \ + (u64)(val), \ + TFC_MPC_TBL_RD_CMPL_HASH_MSB_EB, \ + TFC_MPC_TBL_RD_CMPL_HASH_MSB_SB) +#define TFC_MPC_TBL_RD_CMPL_GET_HASH_MSB(buf) \ + GET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_RD_CMPL_HASH_MSB_OFFS), \ + TFC_MPC_TBL_RD_CMPL_HASH_MSB_EB, \ + TFC_MPC_TBL_RD_CMPL_HASH_MSB_SB) + +#define TFC_MPC_TBL_RD_CMPL_TABLE_TYPE_ACTION 0 +#define TFC_MPC_TBL_RD_CMPL_TABLE_TYPE_EM 1 + +#define TFC_MPC_TBL_RD_CMPL_TABLE_TYPE_EB 23 +#define TFC_MPC_TBL_RD_CMPL_TABLE_TYPE_SB 20 +#define TFC_MPC_TBL_RD_CMPL_TABLE_TYPE_OFFS 0x8 + +#define TFC_MPC_TBL_RD_CMPL_SET_TABLE_TYPE(buf, val) \ + SET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_RD_CMPL_TABLE_TYPE_OFFS), \ + (u64)(val), \ + TFC_MPC_TBL_RD_CMPL_TABLE_TYPE_EB, \ + TFC_MPC_TBL_RD_CMPL_TABLE_TYPE_SB) +#define TFC_MPC_TBL_RD_CMPL_GET_TABLE_TYPE(buf) \ + GET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_RD_CMPL_TABLE_TYPE_OFFS), \ + TFC_MPC_TBL_RD_CMPL_TABLE_TYPE_EB, \ + TFC_MPC_TBL_RD_CMPL_TABLE_TYPE_SB) + +#define TFC_MPC_TBL_RD_CMPL_TABLE_SCOPE_EB 28 +#define TFC_MPC_TBL_RD_CMPL_TABLE_SCOPE_SB 24 +#define TFC_MPC_TBL_RD_CMPL_TABLE_SCOPE_OFFS 0x8 + +#define TFC_MPC_TBL_RD_CMPL_SET_TABLE_SCOPE(buf, val) \ + SET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_RD_CMPL_TABLE_SCOPE_OFFS), \ + (u64)(val), \ + TFC_MPC_TBL_RD_CMPL_TABLE_SCOPE_EB, \ + TFC_MPC_TBL_RD_CMPL_TABLE_SCOPE_SB) +#define TFC_MPC_TBL_RD_CMPL_GET_TABLE_SCOPE(buf) \ + GET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_RD_CMPL_TABLE_SCOPE_OFFS), \ + TFC_MPC_TBL_RD_CMPL_TABLE_SCOPE_EB, \ + TFC_MPC_TBL_RD_CMPL_TABLE_SCOPE_SB) + +#define TFC_MPC_TBL_RD_CMPL_TABLE_INDEX_EB 57 +#define TFC_MPC_TBL_RD_CMPL_TABLE_INDEX_SB 32 +#define TFC_MPC_TBL_RD_CMPL_TABLE_INDEX_OFFS 0x8 + +#define TFC_MPC_TBL_RD_CMPL_SET_TABLE_INDEX(buf, val) \ + SET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_RD_CMPL_TABLE_INDEX_OFFS), \ + (u64)(val), \ + TFC_MPC_TBL_RD_CMPL_TABLE_INDEX_EB, \ + TFC_MPC_TBL_RD_CMPL_TABLE_INDEX_SB) +#define TFC_MPC_TBL_RD_CMPL_GET_TABLE_INDEX(buf) \ + GET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_RD_CMPL_TABLE_INDEX_OFFS), \ + TFC_MPC_TBL_RD_CMPL_TABLE_INDEX_EB, \ + TFC_MPC_TBL_RD_CMPL_TABLE_INDEX_SB) + +#define TFC_MPC_TBL_RD_CMPL_SIZE 16 + +/* + * CFA Table Write Completion Record: + * + * Returns status of the write of 1-4 consecutive 32B words starting at + * TABLE_INDEX in the table specified by (TABLE_TYPE, TABLE_SCOPE). + * Offset 63 0 + * 0x0 opaque unused(8) opcode mp_client status unused(2) + * type + * 0x8 unused(6) table_index unused(3) table_scope + * table_type unused(4) hash_msb unused(3) v + * + * type (Offset:0x0[5:0], Size: 6) + * This field indicates the exact type of the + * completion. By convention, the LSB identifies the + * length of the record in 16B units. Even values + * indicate 16B records. Odd values indicate 32B + * records **(EXCEPT no_op!!!!)** . + * Value Enum Enumeration Description + * 30 mid_path_short + * Mid Path Short Completion : Completion of a Mid + * Path Command. Length = 16B + * + * status (Offset:0x0[11:8], Size: 4) + * The command processing status. + * Value Enum Enumeration Description + * 0 OK + * Completed without error. + * 1 UNSPRT_ERR + * The CFA OPCODE is an unsupported value. + * 2 FMT_ERR + * Indicates a CFA command formatting error. This + * error can occur on any of the supported CFA + * commands. + * Error conditions: + * DATA_SIZE[2:0] outside range of [1, 4]. (Does + * not apply to READ_CLR, EM_DELETE, or EM_CHAIN + * commands as they do not have a DATA_SIZE field) + * HOST_ADDRESS[1:0] != 0 (Only applies to READ, + * READ_CLR, and EVENT_COLLECTION as other commands + * do not have a HOST_ADDRESS field. + * 3 SCOPE_ERR + * Access to TABLE_SCOPE is disabled for the SVIF. + * Indates that the bit indexed by (SVIF, + * TABLE_SCOPE) in the TAI_SVIF_SCOPE memory is set + * to 0. + * 4 ADDR_ERR + * This error can only occur for commands having + * TABLE_TYPE present and set to EM and not having + * any of the previous errors, or for any of the + * EM* commands, for which a TABLE_TYPE of EM is + * implied. + * It indicates that an EM address (TABLE_INDEX*) + * in the command is invalid based on (EM_BUCKETS, + * EM_SIZE) parameters configured for TABLE_SCOPE. + * All addresses must be in the range [0, + * EM_SIZE). Static bucket addresses must be within + * the range determined by EM_BUCKETS. Dynamic + * bucket addresses and entries must be outside of + * the static bucket range. + * 5 CACHE_ERR + * One of more cache responses signaled an error + * while processing the command. + * + * mp_client (Offset:0x0[15:12], Size: 4) + * This field represents the Mid-Path client that + * generated the completion. + * Value Enum Enumeration Description + * 2 TE_CFA + * TE-CFA + * 3 RE_CFA + * RE-CFA + * + * opcode (Offset:0x0[23:16], Size: 8) + * OPCODE from the command. + * Value Enum Enumeration Description + * 1 WRITE + * This command writes 1-4 consecutive 32B words + * to the specified address within a table scope. + * + * opaque (Offset:0x0[63:32], Size: 32) + * This is a copy of the opaque field from the mid + * path BD of this command. + * + * v (Offset:0x8[0], Size: 1) + * This value is written by the NIC such that it will + * be different for each pass through the completion + * queue. The even passes will write 1. The odd passes + * will write 0. + * + * hash_msb (Offset:0x8[15:4], Size: 12) + * For EM_SEARCH and EM_INSERT commands without + * errors that abort the command processing prior to + * the hash computation, set to HASH[35:24] of the + * hash computed from the exact match entry key in the + * command. + * For all other cases, set to 0 except for the + * following error conditions, which carry debug + * information in this field as shown by error status + * below: + * FMT_ERR: + * Set to {7'd0, HOST_ADDRESS[1:0], + * DATA_SIZE[2:0]}. + * If HOST_ADDRESS or DATA_SIZE field not + * present they are set to 0. + * SCOPE_ERR: + * Set to {1'b0, SVIF[10:0]}. + * ADDR_ERR: + * Only possible when TABLE_TYPE=EM or for EM* + * commands + * Set to {1'b0, TABLE_INDEX[2:0], 5'd0, + * DATA_SIZE[2:0]} + * TABLE_INDEX[2]=1 if TABLE_INDEX3 had an error + * TABLE_INDEX[1]=1 if TABLE_INDEX2 had an error + * TABLE_INDEX[0]=1 if TABLE_INDEX had an error + * TABLE_INDEX[n]=0 if the completion does not + * have the corresponding TABLE_INDEX field above. + * CACHE_ERR: + * Set to {9'd0, DATA_SIZE[2:0]} + * + * table_type (Offset:0x8[23:20], Size: 4) + * TABLE_TYPE from the command. + * Value Enum Enumeration Description + * 0 ACTION + * This command acts on the action table of the + * specified scope. + * 1 EM + * This command acts on the exact match table of + * the specified scope. + * + * table_scope (Offset:0x8[28:24], Size: 5) + * TABLE_SCOPE from the command. + * + * table_index (Offset:0x8[57:32], Size: 26) + * TABLE_INDEX from the command. + * This structure is used to inform the host of an + * event within the NIC. + */ +#define TFC_MPC_TBL_WR_CMPL_TYPE_MID_PATH_SHORT 30 + +#define TFC_MPC_TBL_WR_CMPL_TYPE_EB 5 +#define TFC_MPC_TBL_WR_CMPL_TYPE_SB 0 +#define TFC_MPC_TBL_WR_CMPL_TYPE_OFFS 0x0 + +#define TFC_MPC_TBL_WR_CMPL_SET_TYPE(buf, val) \ + SET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_WR_CMPL_TYPE_OFFS), \ + (u64)(val), \ + TFC_MPC_TBL_WR_CMPL_TYPE_EB, \ + TFC_MPC_TBL_WR_CMPL_TYPE_SB) +#define TFC_MPC_TBL_WR_CMPL_GET_TYPE(buf) \ + GET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_WR_CMPL_TYPE_OFFS), \ + TFC_MPC_TBL_WR_CMPL_TYPE_EB, \ + TFC_MPC_TBL_WR_CMPL_TYPE_SB) + +#define TFC_MPC_TBL_WR_CMPL_STATUS_OK 0 +#define TFC_MPC_TBL_WR_CMPL_STATUS_UNSPRT_ERR 1 +#define TFC_MPC_TBL_WR_CMPL_STATUS_FMT_ERR 2 +#define TFC_MPC_TBL_WR_CMPL_STATUS_SCOPE_ERR 3 +#define TFC_MPC_TBL_WR_CMPL_STATUS_ADDR_ERR 4 +#define TFC_MPC_TBL_WR_CMPL_STATUS_CACHE_ERR 5 + +#define TFC_MPC_TBL_WR_CMPL_STATUS_EB 11 +#define TFC_MPC_TBL_WR_CMPL_STATUS_SB 8 +#define TFC_MPC_TBL_WR_CMPL_STATUS_OFFS 0x0 + +#define TFC_MPC_TBL_WR_CMPL_SET_STATUS(buf, val) \ + SET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_WR_CMPL_STATUS_OFFS), \ + (u64)(val), \ + TFC_MPC_TBL_WR_CMPL_STATUS_EB, \ + TFC_MPC_TBL_WR_CMPL_STATUS_SB) +#define TFC_MPC_TBL_WR_CMPL_GET_STATUS(buf) \ + GET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_WR_CMPL_STATUS_OFFS), \ + TFC_MPC_TBL_WR_CMPL_STATUS_EB, \ + TFC_MPC_TBL_WR_CMPL_STATUS_SB) + +#define TFC_MPC_TBL_WR_CMPL_MP_CLIENT_TE_CFA 2 +#define TFC_MPC_TBL_WR_CMPL_MP_CLIENT_RE_CFA 3 + +#define TFC_MPC_TBL_WR_CMPL_MP_CLIENT_EB 15 +#define TFC_MPC_TBL_WR_CMPL_MP_CLIENT_SB 12 +#define TFC_MPC_TBL_WR_CMPL_MP_CLIENT_OFFS 0x0 + +#define TFC_MPC_TBL_WR_CMPL_SET_MP_CLIENT(buf, val) \ + SET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_WR_CMPL_MP_CLIENT_OFFS), \ + (u64)(val), \ + TFC_MPC_TBL_WR_CMPL_MP_CLIENT_EB, \ + TFC_MPC_TBL_WR_CMPL_MP_CLIENT_SB) +#define TFC_MPC_TBL_WR_CMPL_GET_MP_CLIENT(buf) \ + GET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_WR_CMPL_MP_CLIENT_OFFS), \ + TFC_MPC_TBL_WR_CMPL_MP_CLIENT_EB, \ + TFC_MPC_TBL_WR_CMPL_MP_CLIENT_SB) + +#define TFC_MPC_CMD_OPCODE_WRITE 1 + +#define TFC_MPC_TBL_WR_CMPL_OPCODE_EB 23 +#define TFC_MPC_TBL_WR_CMPL_OPCODE_SB 16 +#define TFC_MPC_TBL_WR_CMPL_OPCODE_OFFS 0x0 + +#define TFC_MPC_TBL_WR_CMPL_SET_OPCODE(buf, val) \ + SET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_WR_CMPL_OPCODE_OFFS), \ + (u64)(val), \ + TFC_MPC_TBL_WR_CMPL_OPCODE_EB, \ + TFC_MPC_TBL_WR_CMPL_OPCODE_SB) +#define TFC_MPC_TBL_WR_CMPL_GET_OPCODE(buf) \ + GET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_WR_CMPL_OPCODE_OFFS), \ + TFC_MPC_TBL_WR_CMPL_OPCODE_EB, \ + TFC_MPC_TBL_WR_CMPL_OPCODE_SB) + +#define TFC_MPC_TBL_WR_CMPL_OPAQUE_EB 63 +#define TFC_MPC_TBL_WR_CMPL_OPAQUE_SB 32 +#define TFC_MPC_TBL_WR_CMPL_OPAQUE_OFFS 0x0 + +#define TFC_MPC_TBL_WR_CMPL_SET_OPAQUE(buf, val) \ + SET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_WR_CMPL_OPAQUE_OFFS), \ + (u64)(val), \ + TFC_MPC_TBL_WR_CMPL_OPAQUE_EB, \ + TFC_MPC_TBL_WR_CMPL_OPAQUE_SB) +#define TFC_MPC_TBL_WR_CMPL_GET_OPAQUE(buf) \ + GET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_WR_CMPL_OPAQUE_OFFS), \ + TFC_MPC_TBL_WR_CMPL_OPAQUE_EB, \ + TFC_MPC_TBL_WR_CMPL_OPAQUE_SB) + +#define TFC_MPC_TBL_WR_CMPL_V_EB 0 +#define TFC_MPC_TBL_WR_CMPL_V_SB 0 +#define TFC_MPC_TBL_WR_CMPL_V_OFFS 0x8 + +#define TFC_MPC_TBL_WR_CMPL_SET_V(buf, val) \ + SET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_WR_CMPL_V_OFFS), \ + (u64)(val), \ + TFC_MPC_TBL_WR_CMPL_V_EB, \ + TFC_MPC_TBL_WR_CMPL_V_SB) +#define TFC_MPC_TBL_WR_CMPL_GET_V(buf) \ + GET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_WR_CMPL_V_OFFS), \ + TFC_MPC_TBL_WR_CMPL_V_EB, \ + TFC_MPC_TBL_WR_CMPL_V_SB) + +#define TFC_MPC_TBL_WR_CMPL_HASH_MSB_EB 15 +#define TFC_MPC_TBL_WR_CMPL_HASH_MSB_SB 4 +#define TFC_MPC_TBL_WR_CMPL_HASH_MSB_OFFS 0x8 + +#define TFC_MPC_TBL_WR_CMPL_SET_HASH_MSB(buf, val) \ + SET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_WR_CMPL_HASH_MSB_OFFS), \ + (u64)(val), \ + TFC_MPC_TBL_WR_CMPL_HASH_MSB_EB, \ + TFC_MPC_TBL_WR_CMPL_HASH_MSB_SB) +#define TFC_MPC_TBL_WR_CMPL_GET_HASH_MSB(buf) \ + GET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_WR_CMPL_HASH_MSB_OFFS), \ + TFC_MPC_TBL_WR_CMPL_HASH_MSB_EB, \ + TFC_MPC_TBL_WR_CMPL_HASH_MSB_SB) + +#define TFC_MPC_TBL_WR_CMPL_TABLE_TYPE_ACTION 0 +#define TFC_MPC_TBL_WR_CMPL_TABLE_TYPE_EM 1 + +#define TFC_MPC_TBL_WR_CMPL_TABLE_TYPE_EB 23 +#define TFC_MPC_TBL_WR_CMPL_TABLE_TYPE_SB 20 +#define TFC_MPC_TBL_WR_CMPL_TABLE_TYPE_OFFS 0x8 + +#define TFC_MPC_TBL_WR_CMPL_SET_TABLE_TYPE(buf, val) \ + SET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_WR_CMPL_TABLE_TYPE_OFFS), \ + (u64)(val), \ + TFC_MPC_TBL_WR_CMPL_TABLE_TYPE_EB, \ + TFC_MPC_TBL_WR_CMPL_TABLE_TYPE_SB) +#define TFC_MPC_TBL_WR_CMPL_GET_TABLE_TYPE(buf) \ + GET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_WR_CMPL_TABLE_TYPE_OFFS), \ + TFC_MPC_TBL_WR_CMPL_TABLE_TYPE_EB, \ + TFC_MPC_TBL_WR_CMPL_TABLE_TYPE_SB) + +#define TFC_MPC_TBL_WR_CMPL_TABLE_SCOPE_EB 28 +#define TFC_MPC_TBL_WR_CMPL_TABLE_SCOPE_SB 24 +#define TFC_MPC_TBL_WR_CMPL_TABLE_SCOPE_OFFS 0x8 + +#define TFC_MPC_TBL_WR_CMPL_SET_TABLE_SCOPE(buf, val) \ + SET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_WR_CMPL_TABLE_SCOPE_OFFS), \ + (u64)(val), \ + TFC_MPC_TBL_WR_CMPL_TABLE_SCOPE_EB, \ + TFC_MPC_TBL_WR_CMPL_TABLE_SCOPE_SB) +#define TFC_MPC_TBL_WR_CMPL_GET_TABLE_SCOPE(buf) \ + GET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_WR_CMPL_TABLE_SCOPE_OFFS), \ + TFC_MPC_TBL_WR_CMPL_TABLE_SCOPE_EB, \ + TFC_MPC_TBL_WR_CMPL_TABLE_SCOPE_SB) + +#define TFC_MPC_TBL_WR_CMPL_TABLE_INDEX_EB 57 +#define TFC_MPC_TBL_WR_CMPL_TABLE_INDEX_SB 32 +#define TFC_MPC_TBL_WR_CMPL_TABLE_INDEX_OFFS 0x8 + +#define TFC_MPC_TBL_WR_CMPL_SET_TABLE_INDEX(buf, val) \ + SET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_WR_CMPL_TABLE_INDEX_OFFS), \ + (u64)(val), \ + TFC_MPC_TBL_WR_CMPL_TABLE_INDEX_EB, \ + TFC_MPC_TBL_WR_CMPL_TABLE_INDEX_SB) +#define TFC_MPC_TBL_WR_CMPL_GET_TABLE_INDEX(buf) \ + GET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_WR_CMPL_TABLE_INDEX_OFFS), \ + TFC_MPC_TBL_WR_CMPL_TABLE_INDEX_EB, \ + TFC_MPC_TBL_WR_CMPL_TABLE_INDEX_SB) + +#define TFC_MPC_TBL_WR_CMPL_SIZE 16 + +/* + * CFA Table Read-Clear Completion Record: + * + * When no errors, returns 1 32B word from TABLE_INDEX in the table specified + * by (TABLE_TYPE, TABLE_SCOPE). The data returned is the value prior to the + * clear. + * Offset 63 0 + * 0x0 opaque dma_length opcode mp_client status unused(2) + * type + * 0x8 unused(6) table_index unused(3) table_scope + * table_type unused(4) hash_msb unused(3) v + * + * type (Offset:0x0[5:0], Size: 6) + * This field indicates the exact type of the + * completion. By convention, the LSB identifies the + * length of the record in 16B units. Even values + * indicate 16B records. Odd values indicate 32B + * records **(EXCEPT no_op!!!!)** . + * Value Enum Enumeration Description + * 30 mid_path_short + * Mid Path Short Completion : Completion of a Mid + * Path Command. Length = 16B + * + * status (Offset:0x0[11:8], Size: 4) + * The command processing status. + * Value Enum Enumeration Description + * 0 OK + * Completed without error. + * 1 UNSPRT_ERR + * The CFA OPCODE is an unsupported value. + * 2 FMT_ERR + * Indicates a CFA command formatting error. This + * error can occur on any of the supported CFA + * commands. + * Error conditions: + * DATA_SIZE[2:0] outside range of [1, 4]. (Does + * not apply to READ_CLR, EM_DELETE, or EM_CHAIN + * commands as they do not have a DATA_SIZE field) + * HOST_ADDRESS[1:0] != 0 (Only applies to READ, + * READ_CLR, and EVENT_COLLECTION as other commands + * do not have a HOST_ADDRESS field. + * 3 SCOPE_ERR + * Access to TABLE_SCOPE is disabled for the SVIF. + * Indates that the bit indexed by (SVIF, + * TABLE_SCOPE) in the TAI_SVIF_SCOPE memory is set + * to 0. + * 4 ADDR_ERR + * This error can only occur for commands having + * TABLE_TYPE present and set to EM and not having + * any of the previous errors, or for any of the + * EM* commands, for which a TABLE_TYPE of EM is + * implied. + * It indicates that an EM address (TABLE_INDEX*) + * in the command is invalid based on (EM_BUCKETS, + * EM_SIZE) parameters configured for TABLE_SCOPE. + * All addresses must be in the range [0, + * EM_SIZE). Static bucket addresses must be within + * the range determined by EM_BUCKETS. Dynamic + * bucket addresses and entries must be outside of + * the static bucket range. + * 5 CACHE_ERR + * One of more cache responses signaled an error + * while processing the command. + * + * mp_client (Offset:0x0[15:12], Size: 4) + * This field represents the Mid-Path client that + * generated the completion. + * Value Enum Enumeration Description + * 2 TE_CFA + * TE-CFA + * 3 RE_CFA + * RE-CFA + * + * opcode (Offset:0x0[23:16], Size: 8) + * OPCODE from the command. + * Value Enum Enumeration Description + * 2 READ_CLR + * This command performs a read-modify-write to + * the specified 32B address using a 16b mask that + * specifies up to 16 16b words to clear. It + * returns the 32B data word prior to the clear + * operation. + * + * dma_length (Offset:0x0[31:24], Size: 8) + * The length of the DMA that accompanies the + * completion in units of DWORDs (32b). Valid values + * are [0, 128]. A value of zero indicates that there + * is no DMA that accompanies the completion. + * + * opaque (Offset:0x0[63:32], Size: 32) + * This is a copy of the opaque field from the mid + * path BD of this command. + * + * v (Offset:0x8[0], Size: 1) + * This value is written by the NIC such that it will + * be different for each pass through the completion + * queue. The even passes will write 1. The odd passes + * will write 0. + * + * hash_msb (Offset:0x8[15:4], Size: 12) + * For EM_SEARCH and EM_INSERT commands without + * errors that abort the command processing prior to + * the hash computation, set to HASH[35:24] of the + * hash computed from the exact match entry key in the + * command. + * For all other cases, set to 0 except for the + * following error conditions, which carry debug + * information in this field as shown by error status + * below: + * FMT_ERR: + * Set to {7'd0, HOST_ADDRESS[1:0], + * DATA_SIZE[2:0]}. + * If HOST_ADDRESS or DATA_SIZE field not + * present they are set to 0. + * SCOPE_ERR: + * Set to {1'b0, SVIF[10:0]}. + * ADDR_ERR: + * Only possible when TABLE_TYPE=EM or for EM* + * commands + * Set to {1'b0, TABLE_INDEX[2:0], 5'd0, + * DATA_SIZE[2:0]} + * TABLE_INDEX[2]=1 if TABLE_INDEX3 had an error + * TABLE_INDEX[1]=1 if TABLE_INDEX2 had an error + * TABLE_INDEX[0]=1 if TABLE_INDEX had an error + * TABLE_INDEX[n]=0 if the completion does not + * have the corresponding TABLE_INDEX field above. + * CACHE_ERR: + * Set to {9'd0, DATA_SIZE[2:0]} + * + * table_type (Offset:0x8[23:20], Size: 4) + * TABLE_TYPE from the command. + * Value Enum Enumeration Description + * 0 ACTION + * This command acts on the action table of the + * specified scope. + * 1 EM + * This command acts on the exact match table of + * the specified scope. + * + * table_scope (Offset:0x8[28:24], Size: 5) + * TABLE_SCOPE from the command. + * + * table_index (Offset:0x8[57:32], Size: 26) + * TABLE_INDEX from the command. + * This structure is used to inform the host of an + * event within the NIC. + */ +#define TFC_MPC_TBL_RDCLR_CMPL_TYPE_MID_PATH_SHORT 30 + +#define TFC_MPC_TBL_RDCLR_CMPL_TYPE_EB 5 +#define TFC_MPC_TBL_RDCLR_CMPL_TYPE_SB 0 +#define TFC_MPC_TBL_RDCLR_CMPL_TYPE_OFFS 0x0 + +#define TFC_MPC_TBL_RDCLR_CMPL_SET_TYPE(buf, val) \ + SET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_RDCLR_CMPL_TYPE_OFFS), \ + (u64)(val), \ + TFC_MPC_TBL_RDCLR_CMPL_TYPE_EB, \ + TFC_MPC_TBL_RDCLR_CMPL_TYPE_SB) +#define TFC_MPC_TBL_RDCLR_CMPL_GET_TYPE(buf) \ + GET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_RDCLR_CMPL_TYPE_OFFS), \ + TFC_MPC_TBL_RDCLR_CMPL_TYPE_EB, \ + TFC_MPC_TBL_RDCLR_CMPL_TYPE_SB) + +#define TFC_MPC_TBL_RDCLR_CMPL_STATUS_OK 0 +#define TFC_MPC_TBL_RDCLR_CMPL_STATUS_UNSPRT_ERR 1 +#define TFC_MPC_TBL_RDCLR_CMPL_STATUS_FMT_ERR 2 +#define TFC_MPC_TBL_RDCLR_CMPL_STATUS_SCOPE_ERR 3 +#define TFC_MPC_TBL_RDCLR_CMPL_STATUS_ADDR_ERR 4 +#define TFC_MPC_TBL_RDCLR_CMPL_STATUS_CACHE_ERR 5 + +#define TFC_MPC_TBL_RDCLR_CMPL_STATUS_EB 11 +#define TFC_MPC_TBL_RDCLR_CMPL_STATUS_SB 8 +#define TFC_MPC_TBL_RDCLR_CMPL_STATUS_OFFS 0x0 + +#define TFC_MPC_TBL_RDCLR_CMPL_SET_STATUS(buf, val) \ + SET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_RDCLR_CMPL_STATUS_OFFS), \ + (u64)(val), \ + TFC_MPC_TBL_RDCLR_CMPL_STATUS_EB, \ + TFC_MPC_TBL_RDCLR_CMPL_STATUS_SB) +#define TFC_MPC_TBL_RDCLR_CMPL_GET_STATUS(buf) \ + GET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_RDCLR_CMPL_STATUS_OFFS), \ + TFC_MPC_TBL_RDCLR_CMPL_STATUS_EB, \ + TFC_MPC_TBL_RDCLR_CMPL_STATUS_SB) + +#define TFC_MPC_TBL_RDCLR_CMPL_MP_CLIENT_TE_CFA 2 +#define TFC_MPC_TBL_RDCLR_CMPL_MP_CLIENT_RE_CFA 3 + +#define TFC_MPC_TBL_RDCLR_CMPL_MP_CLIENT_EB 15 +#define TFC_MPC_TBL_RDCLR_CMPL_MP_CLIENT_SB 12 +#define TFC_MPC_TBL_RDCLR_CMPL_MP_CLIENT_OFFS 0x0 + +#define TFC_MPC_TBL_RDCLR_CMPL_SET_MP_CLIENT(buf, val) \ + SET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_RDCLR_CMPL_MP_CLIENT_OFFS), \ + (u64)(val), \ + TFC_MPC_TBL_RDCLR_CMPL_MP_CLIENT_EB, \ + TFC_MPC_TBL_RDCLR_CMPL_MP_CLIENT_SB) +#define TFC_MPC_TBL_RDCLR_CMPL_GET_MP_CLIENT(buf) \ + GET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_RDCLR_CMPL_MP_CLIENT_OFFS), \ + TFC_MPC_TBL_RDCLR_CMPL_MP_CLIENT_EB, \ + TFC_MPC_TBL_RDCLR_CMPL_MP_CLIENT_SB) + +#define TFC_MPC_CMD_OPCODE_READ_CLR 2 + +#define TFC_MPC_TBL_RDCLR_CMPL_OPCODE_EB 23 +#define TFC_MPC_TBL_RDCLR_CMPL_OPCODE_SB 16 +#define TFC_MPC_TBL_RDCLR_CMPL_OPCODE_OFFS 0x0 + +#define TFC_MPC_TBL_RDCLR_CMPL_SET_OPCODE(buf, val) \ + SET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_RDCLR_CMPL_OPCODE_OFFS), \ + (u64)(val), \ + TFC_MPC_TBL_RDCLR_CMPL_OPCODE_EB, \ + TFC_MPC_TBL_RDCLR_CMPL_OPCODE_SB) +#define TFC_MPC_TBL_RDCLR_CMPL_GET_OPCODE(buf) \ + GET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_RDCLR_CMPL_OPCODE_OFFS), \ + TFC_MPC_TBL_RDCLR_CMPL_OPCODE_EB, \ + TFC_MPC_TBL_RDCLR_CMPL_OPCODE_SB) + +#define TFC_MPC_TBL_RDCLR_CMPL_DMA_LENGTH_EB 31 +#define TFC_MPC_TBL_RDCLR_CMPL_DMA_LENGTH_SB 24 +#define TFC_MPC_TBL_RDCLR_CMPL_DMA_LENGTH_OFFS 0x0 + +#define TFC_MPC_TBL_RDCLR_CMPL_SET_DMA_LENGTH(buf, val) \ + SET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_RDCLR_CMPL_DMA_LENGTH_OFFS), \ + (u64)(val), \ + TFC_MPC_TBL_RDCLR_CMPL_DMA_LENGTH_EB, \ + TFC_MPC_TBL_RDCLR_CMPL_DMA_LENGTH_SB) +#define TFC_MPC_TBL_RDCLR_CMPL_GET_DMA_LENGTH(buf) \ + GET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_RDCLR_CMPL_DMA_LENGTH_OFFS), \ + TFC_MPC_TBL_RDCLR_CMPL_DMA_LENGTH_EB, \ + TFC_MPC_TBL_RDCLR_CMPL_DMA_LENGTH_SB) + +#define TFC_MPC_TBL_RDCLR_CMPL_OPAQUE_EB 63 +#define TFC_MPC_TBL_RDCLR_CMPL_OPAQUE_SB 32 +#define TFC_MPC_TBL_RDCLR_CMPL_OPAQUE_OFFS 0x0 + +#define TFC_MPC_TBL_RDCLR_CMPL_SET_OPAQUE(buf, val) \ + SET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_RDCLR_CMPL_OPAQUE_OFFS), \ + (u64)(val), \ + TFC_MPC_TBL_RDCLR_CMPL_OPAQUE_EB, \ + TFC_MPC_TBL_RDCLR_CMPL_OPAQUE_SB) +#define TFC_MPC_TBL_RDCLR_CMPL_GET_OPAQUE(buf) \ + GET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_RDCLR_CMPL_OPAQUE_OFFS), \ + TFC_MPC_TBL_RDCLR_CMPL_OPAQUE_EB, \ + TFC_MPC_TBL_RDCLR_CMPL_OPAQUE_SB) + +#define TFC_MPC_TBL_RDCLR_CMPL_V_EB 0 +#define TFC_MPC_TBL_RDCLR_CMPL_V_SB 0 +#define TFC_MPC_TBL_RDCLR_CMPL_V_OFFS 0x8 + +#define TFC_MPC_TBL_RDCLR_CMPL_SET_V(buf, val) \ + SET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_RDCLR_CMPL_V_OFFS), \ + (u64)(val), \ + TFC_MPC_TBL_RDCLR_CMPL_V_EB, \ + TFC_MPC_TBL_RDCLR_CMPL_V_SB) +#define TFC_MPC_TBL_RDCLR_CMPL_GET_V(buf) \ + GET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_RDCLR_CMPL_V_OFFS), \ + TFC_MPC_TBL_RDCLR_CMPL_V_EB, \ + TFC_MPC_TBL_RDCLR_CMPL_V_SB) + +#define TFC_MPC_TBL_RDCLR_CMPL_HASH_MSB_EB 15 +#define TFC_MPC_TBL_RDCLR_CMPL_HASH_MSB_SB 4 +#define TFC_MPC_TBL_RDCLR_CMPL_HASH_MSB_OFFS 0x8 + +#define TFC_MPC_TBL_RDCLR_CMPL_SET_HASH_MSB(buf, val) \ + SET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_RDCLR_CMPL_HASH_MSB_OFFS), \ + (u64)(val), \ + TFC_MPC_TBL_RDCLR_CMPL_HASH_MSB_EB, \ + TFC_MPC_TBL_RDCLR_CMPL_HASH_MSB_SB) +#define TFC_MPC_TBL_RDCLR_CMPL_GET_HASH_MSB(buf) \ + GET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_RDCLR_CMPL_HASH_MSB_OFFS), \ + TFC_MPC_TBL_RDCLR_CMPL_HASH_MSB_EB, \ + TFC_MPC_TBL_RDCLR_CMPL_HASH_MSB_SB) + +#define TFC_MPC_TBL_RDCLR_CMPL_TABLE_TYPE_ACTION 0 +#define TFC_MPC_TBL_RDCLR_CMPL_TABLE_TYPE_EM 1 + +#define TFC_MPC_TBL_RDCLR_CMPL_TABLE_TYPE_EB 23 +#define TFC_MPC_TBL_RDCLR_CMPL_TABLE_TYPE_SB 20 +#define TFC_MPC_TBL_RDCLR_CMPL_TABLE_TYPE_OFFS 0x8 + +#define TFC_MPC_TBL_RDCLR_CMPL_SET_TABLE_TYPE(buf, val) \ + SET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_RDCLR_CMPL_TABLE_TYPE_OFFS), \ + (u64)(val), \ + TFC_MPC_TBL_RDCLR_CMPL_TABLE_TYPE_EB, \ + TFC_MPC_TBL_RDCLR_CMPL_TABLE_TYPE_SB) +#define TFC_MPC_TBL_RDCLR_CMPL_GET_TABLE_TYPE(buf) \ + GET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_RDCLR_CMPL_TABLE_TYPE_OFFS), \ + TFC_MPC_TBL_RDCLR_CMPL_TABLE_TYPE_EB, \ + TFC_MPC_TBL_RDCLR_CMPL_TABLE_TYPE_SB) + +#define TFC_MPC_TBL_RDCLR_CMPL_TABLE_SCOPE_EB 28 +#define TFC_MPC_TBL_RDCLR_CMPL_TABLE_SCOPE_SB 24 +#define TFC_MPC_TBL_RDCLR_CMPL_TABLE_SCOPE_OFFS 0x8 + +#define TFC_MPC_TBL_RDCLR_CMPL_SET_TABLE_SCOPE(buf, val) \ + SET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_RDCLR_CMPL_TABLE_SCOPE_OFFS), \ + (u64)(val), \ + TFC_MPC_TBL_RDCLR_CMPL_TABLE_SCOPE_EB, \ + TFC_MPC_TBL_RDCLR_CMPL_TABLE_SCOPE_SB) +#define TFC_MPC_TBL_RDCLR_CMPL_GET_TABLE_SCOPE(buf) \ + GET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_RDCLR_CMPL_TABLE_SCOPE_OFFS), \ + TFC_MPC_TBL_RDCLR_CMPL_TABLE_SCOPE_EB, \ + TFC_MPC_TBL_RDCLR_CMPL_TABLE_SCOPE_SB) + +#define TFC_MPC_TBL_RDCLR_CMPL_TABLE_INDEX_EB 57 +#define TFC_MPC_TBL_RDCLR_CMPL_TABLE_INDEX_SB 32 +#define TFC_MPC_TBL_RDCLR_CMPL_TABLE_INDEX_OFFS 0x8 + +#define TFC_MPC_TBL_RDCLR_CMPL_SET_TABLE_INDEX(buf, val) \ + SET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_RDCLR_CMPL_TABLE_INDEX_OFFS), \ + (u64)(val), \ + TFC_MPC_TBL_RDCLR_CMPL_TABLE_INDEX_EB, \ + TFC_MPC_TBL_RDCLR_CMPL_TABLE_INDEX_SB) +#define TFC_MPC_TBL_RDCLR_CMPL_GET_TABLE_INDEX(buf) \ + GET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_RDCLR_CMPL_TABLE_INDEX_OFFS), \ + TFC_MPC_TBL_RDCLR_CMPL_TABLE_INDEX_EB, \ + TFC_MPC_TBL_RDCLR_CMPL_TABLE_INDEX_SB) + +#define TFC_MPC_TBL_RDCLR_CMPL_SIZE 16 + +/* + * CFA Table Invalidate Completion Record: + * + * Returns status for INVALIDATE commands. + * Offset 63 0 + * 0x0 opaque unused(8) opcode mp_client status unused(2) + * type + * 0x8 unused(6) table_index unused(3) table_scope + * table_type unused(4) hash_msb unused(3) v + * + * type (Offset:0x0[5:0], Size: 6) + * This field indicates the exact type of the + * completion. By convention, the LSB identifies the + * length of the record in 16B units. Even values + * indicate 16B records. Odd values indicate 32B + * records **(EXCEPT no_op!!!!)** . + * Value Enum Enumeration Description + * 30 mid_path_short + * Mid Path Short Completion : Completion of a Mid + * Path Command. Length = 16B + * + * status (Offset:0x0[11:8], Size: 4) + * The command processing status. + * Value Enum Enumeration Description + * 0 OK + * Completed without error. + * 1 UNSPRT_ERR + * The CFA OPCODE is an unsupported value. + * 2 FMT_ERR + * Indicates a CFA command formatting error. This + * error can occur on any of the supported CFA + * commands. + * Error conditions: + * DATA_SIZE[2:0] outside range of [1, 4]. (Does + * not apply to READ_CLR, EM_DELETE, or EM_CHAIN + * commands as they do not have a DATA_SIZE field) + * HOST_ADDRESS[1:0] != 0 (Only applies to READ, + * READ_CLR, and EVENT_COLLECTION as other commands + * do not have a HOST_ADDRESS field. + * 3 SCOPE_ERR + * Access to TABLE_SCOPE is disabled for the SVIF. + * Indates that the bit indexed by (SVIF, + * TABLE_SCOPE) in the TAI_SVIF_SCOPE memory is set + * to 0. + * 4 ADDR_ERR + * This error can only occur for commands having + * TABLE_TYPE present and set to EM and not having + * any of the previous errors, or for any of the + * EM* commands, for which a TABLE_TYPE of EM is + * implied. + * It indicates that an EM address (TABLE_INDEX*) + * in the command is invalid based on (EM_BUCKETS, + * EM_SIZE) parameters configured for TABLE_SCOPE. + * All addresses must be in the range [0, + * EM_SIZE). Static bucket addresses must be within + * the range determined by EM_BUCKETS. Dynamic + * bucket addresses and entries must be outside of + * the static bucket range. + * 5 CACHE_ERR + * One of more cache responses signaled an error + * while processing the command. + * + * mp_client (Offset:0x0[15:12], Size: 4) + * This field represents the Mid-Path client that + * generated the completion. + * Value Enum Enumeration Description + * 2 TE_CFA + * TE-CFA + * 3 RE_CFA + * RE-CFA + * + * opcode (Offset:0x0[23:16], Size: 8) + * OPCODE from the command. + * Value Enum Enumeration Description + * 5 INVALIDATE + * This command invalidates 1-4 consecutively- + * addressed 32B words in the cache. + * + * opaque (Offset:0x0[63:32], Size: 32) + * This is a copy of the opaque field from the mid + * path BD of this command. + * + * v (Offset:0x8[0], Size: 1) + * This value is written by the NIC such that it will + * be different for each pass through the completion + * queue. The even passes will write 1. The odd passes + * will write 0. + * + * hash_msb (Offset:0x8[15:4], Size: 12) + * For EM_SEARCH and EM_INSERT commands without + * errors that abort the command processing prior to + * the hash computation, set to HASH[35:24] of the + * hash computed from the exact match entry key in the + * command. + * For all other cases, set to 0 except for the + * following error conditions, which carry debug + * information in this field as shown by error status + * below: + * FMT_ERR: + * Set to {7'd0, HOST_ADDRESS[1:0], + * DATA_SIZE[2:0]}. + * If HOST_ADDRESS or DATA_SIZE field not + * present they are set to 0. + * SCOPE_ERR: + * Set to {1'b0, SVIF[10:0]}. + * ADDR_ERR: + * Only possible when TABLE_TYPE=EM or for EM* + * commands + * Set to {1'b0, TABLE_INDEX[2:0], 5'd0, + * DATA_SIZE[2:0]} + * TABLE_INDEX[2]=1 if TABLE_INDEX3 had an error + * TABLE_INDEX[1]=1 if TABLE_INDEX2 had an error + * TABLE_INDEX[0]=1 if TABLE_INDEX had an error + * TABLE_INDEX[n]=0 if the completion does not + * have the corresponding TABLE_INDEX field above. + * CACHE_ERR: + * Set to {9'd0, DATA_SIZE[2:0]} + * + * table_type (Offset:0x8[23:20], Size: 4) + * TABLE_TYPE from the command. + * Value Enum Enumeration Description + * 0 ACTION + * This command acts on the action table of the + * specified scope. + * 1 EM + * This command acts on the exact match table of + * the specified scope. + * + * table_scope (Offset:0x8[28:24], Size: 5) + * TABLE_SCOPE from the command. + * + * table_index (Offset:0x8[57:32], Size: 26) + * TABLE_INDEX from the command. + * This structure is used to inform the host of an + * event within the NIC. + */ +#define TFC_MPC_TBL_INV_CMPL_TYPE_MID_PATH_SHORT 30 + +#define TFC_MPC_TBL_INV_CMPL_TYPE_EB 5 +#define TFC_MPC_TBL_INV_CMPL_TYPE_SB 0 +#define TFC_MPC_TBL_INV_CMPL_TYPE_OFFS 0x0 + +#define TFC_MPC_TBL_INV_CMPL_SET_TYPE(buf, val) \ + SET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_INV_CMPL_TYPE_OFFS), \ + (u64)(val), \ + TFC_MPC_TBL_INV_CMPL_TYPE_EB, \ + TFC_MPC_TBL_INV_CMPL_TYPE_SB) +#define TFC_MPC_TBL_INV_CMPL_GET_TYPE(buf) \ + GET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_INV_CMPL_TYPE_OFFS), \ + TFC_MPC_TBL_INV_CMPL_TYPE_EB, \ + TFC_MPC_TBL_INV_CMPL_TYPE_SB) + +#define TFC_MPC_TBL_INV_CMPL_STATUS_OK 0 +#define TFC_MPC_TBL_INV_CMPL_STATUS_UNSPRT_ERR 1 +#define TFC_MPC_TBL_INV_CMPL_STATUS_FMT_ERR 2 +#define TFC_MPC_TBL_INV_CMPL_STATUS_SCOPE_ERR 3 +#define TFC_MPC_TBL_INV_CMPL_STATUS_ADDR_ERR 4 +#define TFC_MPC_TBL_INV_CMPL_STATUS_CACHE_ERR 5 + +#define TFC_MPC_TBL_INV_CMPL_STATUS_EB 11 +#define TFC_MPC_TBL_INV_CMPL_STATUS_SB 8 +#define TFC_MPC_TBL_INV_CMPL_STATUS_OFFS 0x0 + +#define TFC_MPC_TBL_INV_CMPL_SET_STATUS(buf, val) \ + SET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_INV_CMPL_STATUS_OFFS), \ + (u64)(val), \ + TFC_MPC_TBL_INV_CMPL_STATUS_EB, \ + TFC_MPC_TBL_INV_CMPL_STATUS_SB) +#define TFC_MPC_TBL_INV_CMPL_GET_STATUS(buf) \ + GET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_INV_CMPL_STATUS_OFFS), \ + TFC_MPC_TBL_INV_CMPL_STATUS_EB, \ + TFC_MPC_TBL_INV_CMPL_STATUS_SB) + +#define TFC_MPC_TBL_INV_CMPL_MP_CLIENT_TE_CFA 2 +#define TFC_MPC_TBL_INV_CMPL_MP_CLIENT_RE_CFA 3 + +#define TFC_MPC_TBL_INV_CMPL_MP_CLIENT_EB 15 +#define TFC_MPC_TBL_INV_CMPL_MP_CLIENT_SB 12 +#define TFC_MPC_TBL_INV_CMPL_MP_CLIENT_OFFS 0x0 + +#define TFC_MPC_TBL_INV_CMPL_SET_MP_CLIENT(buf, val) \ + SET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_INV_CMPL_MP_CLIENT_OFFS), \ + (u64)(val), \ + TFC_MPC_TBL_INV_CMPL_MP_CLIENT_EB, \ + TFC_MPC_TBL_INV_CMPL_MP_CLIENT_SB) +#define TFC_MPC_TBL_INV_CMPL_GET_MP_CLIENT(buf) \ + GET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_INV_CMPL_MP_CLIENT_OFFS), \ + TFC_MPC_TBL_INV_CMPL_MP_CLIENT_EB, \ + TFC_MPC_TBL_INV_CMPL_MP_CLIENT_SB) + +#define TFC_MPC_CMD_OPCODE_INVALIDATE 5 + +#define TFC_MPC_TBL_INV_CMPL_OPCODE_EB 23 +#define TFC_MPC_TBL_INV_CMPL_OPCODE_SB 16 +#define TFC_MPC_TBL_INV_CMPL_OPCODE_OFFS 0x0 + +#define TFC_MPC_TBL_INV_CMPL_SET_OPCODE(buf, val) \ + SET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_INV_CMPL_OPCODE_OFFS), \ + (u64)(val), \ + TFC_MPC_TBL_INV_CMPL_OPCODE_EB, \ + TFC_MPC_TBL_INV_CMPL_OPCODE_SB) +#define TFC_MPC_TBL_INV_CMPL_GET_OPCODE(buf) \ + GET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_INV_CMPL_OPCODE_OFFS), \ + TFC_MPC_TBL_INV_CMPL_OPCODE_EB, \ + TFC_MPC_TBL_INV_CMPL_OPCODE_SB) + +#define TFC_MPC_TBL_INV_CMPL_OPAQUE_EB 63 +#define TFC_MPC_TBL_INV_CMPL_OPAQUE_SB 32 +#define TFC_MPC_TBL_INV_CMPL_OPAQUE_OFFS 0x0 + +#define TFC_MPC_TBL_INV_CMPL_SET_OPAQUE(buf, val) \ + SET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_INV_CMPL_OPAQUE_OFFS), \ + (u64)(val), \ + TFC_MPC_TBL_INV_CMPL_OPAQUE_EB, \ + TFC_MPC_TBL_INV_CMPL_OPAQUE_SB) +#define TFC_MPC_TBL_INV_CMPL_GET_OPAQUE(buf) \ + GET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_INV_CMPL_OPAQUE_OFFS), \ + TFC_MPC_TBL_INV_CMPL_OPAQUE_EB, \ + TFC_MPC_TBL_INV_CMPL_OPAQUE_SB) + +#define TFC_MPC_TBL_INV_CMPL_V_EB 0 +#define TFC_MPC_TBL_INV_CMPL_V_SB 0 +#define TFC_MPC_TBL_INV_CMPL_V_OFFS 0x8 + +#define TFC_MPC_TBL_INV_CMPL_SET_V(buf, val) \ + SET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_INV_CMPL_V_OFFS), \ + (u64)(val), \ + TFC_MPC_TBL_INV_CMPL_V_EB, \ + TFC_MPC_TBL_INV_CMPL_V_SB) +#define TFC_MPC_TBL_INV_CMPL_GET_V(buf) \ + GET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_INV_CMPL_V_OFFS), \ + TFC_MPC_TBL_INV_CMPL_V_EB, \ + TFC_MPC_TBL_INV_CMPL_V_SB) + +#define TFC_MPC_TBL_INV_CMPL_HASH_MSB_EB 15 +#define TFC_MPC_TBL_INV_CMPL_HASH_MSB_SB 4 +#define TFC_MPC_TBL_INV_CMPL_HASH_MSB_OFFS 0x8 + +#define TFC_MPC_TBL_INV_CMPL_SET_HASH_MSB(buf, val) \ + SET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_INV_CMPL_HASH_MSB_OFFS), \ + (u64)(val), \ + TFC_MPC_TBL_INV_CMPL_HASH_MSB_EB, \ + TFC_MPC_TBL_INV_CMPL_HASH_MSB_SB) +#define TFC_MPC_TBL_INV_CMPL_GET_HASH_MSB(buf) \ + GET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_INV_CMPL_HASH_MSB_OFFS), \ + TFC_MPC_TBL_INV_CMPL_HASH_MSB_EB, \ + TFC_MPC_TBL_INV_CMPL_HASH_MSB_SB) + +#define TFC_MPC_TBL_INV_CMPL_TABLE_TYPE_ACTION 0 +#define TFC_MPC_TBL_INV_CMPL_TABLE_TYPE_EM 1 + +#define TFC_MPC_TBL_INV_CMPL_TABLE_TYPE_EB 23 +#define TFC_MPC_TBL_INV_CMPL_TABLE_TYPE_SB 20 +#define TFC_MPC_TBL_INV_CMPL_TABLE_TYPE_OFFS 0x8 + +#define TFC_MPC_TBL_INV_CMPL_SET_TABLE_TYPE(buf, val) \ + SET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_INV_CMPL_TABLE_TYPE_OFFS), \ + (u64)(val), \ + TFC_MPC_TBL_INV_CMPL_TABLE_TYPE_EB, \ + TFC_MPC_TBL_INV_CMPL_TABLE_TYPE_SB) +#define TFC_MPC_TBL_INV_CMPL_GET_TABLE_TYPE(buf) \ + GET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_INV_CMPL_TABLE_TYPE_OFFS), \ + TFC_MPC_TBL_INV_CMPL_TABLE_TYPE_EB, \ + TFC_MPC_TBL_INV_CMPL_TABLE_TYPE_SB) + +#define TFC_MPC_TBL_INV_CMPL_TABLE_SCOPE_EB 28 +#define TFC_MPC_TBL_INV_CMPL_TABLE_SCOPE_SB 24 +#define TFC_MPC_TBL_INV_CMPL_TABLE_SCOPE_OFFS 0x8 + +#define TFC_MPC_TBL_INV_CMPL_SET_TABLE_SCOPE(buf, val) \ + SET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_INV_CMPL_TABLE_SCOPE_OFFS), \ + (u64)(val), \ + TFC_MPC_TBL_INV_CMPL_TABLE_SCOPE_EB, \ + TFC_MPC_TBL_INV_CMPL_TABLE_SCOPE_SB) +#define TFC_MPC_TBL_INV_CMPL_GET_TABLE_SCOPE(buf) \ + GET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_INV_CMPL_TABLE_SCOPE_OFFS), \ + TFC_MPC_TBL_INV_CMPL_TABLE_SCOPE_EB, \ + TFC_MPC_TBL_INV_CMPL_TABLE_SCOPE_SB) + +#define TFC_MPC_TBL_INV_CMPL_TABLE_INDEX_EB 57 +#define TFC_MPC_TBL_INV_CMPL_TABLE_INDEX_SB 32 +#define TFC_MPC_TBL_INV_CMPL_TABLE_INDEX_OFFS 0x8 + +#define TFC_MPC_TBL_INV_CMPL_SET_TABLE_INDEX(buf, val) \ + SET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_INV_CMPL_TABLE_INDEX_OFFS), \ + (u64)(val), \ + TFC_MPC_TBL_INV_CMPL_TABLE_INDEX_EB, \ + TFC_MPC_TBL_INV_CMPL_TABLE_INDEX_SB) +#define TFC_MPC_TBL_INV_CMPL_GET_TABLE_INDEX(buf) \ + GET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_INV_CMPL_TABLE_INDEX_OFFS), \ + TFC_MPC_TBL_INV_CMPL_TABLE_INDEX_EB, \ + TFC_MPC_TBL_INV_CMPL_TABLE_INDEX_SB) + +#define TFC_MPC_TBL_INV_CMPL_SIZE 16 + +/* + * CFA Table Event Collection Completion Record: + * + * For OK status, returns 1-16 8B Host Notification Record for TABLE_SCOPE, + * where the maximum number is limited by DATA_SIZE from the command (see + * command for details). Returns EVENT_COLLECTION_FAIL status and no DMA data + * when there are no messages available. + * Offset 63 0 + * 0x0 opaque dma_length opcode mp_client status unused(2) + * type + * 0x8 unused(35) table_scope unused(8) hash_msb + * unused(3) v + * + * type (Offset:0x0[5:0], Size: 6) + * This field indicates the exact type of the + * completion. By convention, the LSB identifies the + * length of the record in 16B units. Even values + * indicate 16B records. Odd values indicate 32B + * records **(EXCEPT no_op!!!!)** . + * Value Enum Enumeration Description + * 30 mid_path_short + * Mid Path Short Completion : Completion of a Mid + * Path Command. Length = 16B + * + * status (Offset:0x0[11:8], Size: 4) + * The command processing status. + * Value Enum Enumeration Description + * 0 OK + * Completed without error. + * 1 UNSPRT_ERR + * The CFA OPCODE is an unsupported value. + * 2 FMT_ERR + * Indicates a CFA command formatting error. This + * error can occur on any of the supported CFA + * commands. + * Error conditions: + * DATA_SIZE[2:0] outside range of [1, 4]. (Does + * not apply to READ_CLR, EM_DELETE, or EM_CHAIN + * commands as they do not have a DATA_SIZE field) + * HOST_ADDRESS[1:0] != 0 (Only applies to READ, + * READ_CLR, and EVENT_COLLECTION as other commands + * do not have a HOST_ADDRESS field. + * 3 SCOPE_ERR + * Access to TABLE_SCOPE is disabled for the SVIF. + * Indates that the bit indexed by (SVIF, + * TABLE_SCOPE) in the TAI_SVIF_SCOPE memory is set + * to 0. + * 8 EVENT_COLLECTION_FAIL + * The TABLE_SCOPE had no host notification + * messages to return. + * + * mp_client (Offset:0x0[15:12], Size: 4) + * This field represents the Mid-Path client that + * generated the completion. + * Value Enum Enumeration Description + * 2 TE_CFA + * TE-CFA + * 3 RE_CFA + * RE-CFA + * + * opcode (Offset:0x0[23:16], Size: 8) + * OPCODE from the command. + * Value Enum Enumeration Description + * 6 EVENT_COLLECTION + * This command reads host notification messages + * from the lookup block connection tracking for a + * specified table scope. The command can specify + * the maximum number of messages returned: 4, 8, + * 12, or 16. The actual number returned may be + * fewer than the maximum depending on the number + * queued. + * + * dma_length (Offset:0x0[31:24], Size: 8) + * The length of the DMA that accompanies the + * completion in units of DWORDs (32b). Valid values + * are [0, 128]. A value of zero indicates that there + * is no DMA that accompanies the completion. + * + * opaque (Offset:0x0[63:32], Size: 32) + * This is a copy of the opaque field from the mid + * path BD of this command. + * + * v (Offset:0x8[0], Size: 1) + * This value is written by the NIC such that it will + * be different for each pass through the completion + * queue. The even passes will write 1. The odd passes + * will write 0. + * + * hash_msb (Offset:0x8[15:4], Size: 12) + * For EM_SEARCH and EM_INSERT commands without + * errors that abort the command processing prior to + * the hash computation, set to HASH[35:24] of the + * hash computed from the exact match entry key in the + * command. + * For all other cases, set to 0 except for the + * following error conditions, which carry debug + * information in this field as shown by error status + * below: + * FMT_ERR: + * Set to {7'd0, HOST_ADDRESS[1:0], + * DATA_SIZE[2:0]}. + * If HOST_ADDRESS or DATA_SIZE field not + * present they are set to 0. + * SCOPE_ERR: + * Set to {1'b0, SVIF[10:0]}. + * ADDR_ERR: + * Only possible when TABLE_TYPE=EM or for EM* + * commands + * Set to {1'b0, TABLE_INDEX[2:0], 5'd0, + * DATA_SIZE[2:0]} + * TABLE_INDEX[2]=1 if TABLE_INDEX3 had an error + * TABLE_INDEX[1]=1 if TABLE_INDEX2 had an error + * TABLE_INDEX[0]=1 if TABLE_INDEX had an error + * TABLE_INDEX[n]=0 if the completion does not + * have the corresponding TABLE_INDEX field above. + * CACHE_ERR: + * Set to {9'd0, DATA_SIZE[2:0]} + * + * table_scope (Offset:0x8[28:24], Size: 5) + * TABLE_SCOPE from the command. + * This structure is used to inform the host of an + * event within the NIC. + */ +#define TFC_MPC_TBL_EVENT_COLL_CMPL_TYPE_MID_PATH_SHORT 30 + +#define TFC_MPC_TBL_EVENT_COLL_CMPL_TYPE_EB 5 +#define TFC_MPC_TBL_EVENT_COLL_CMPL_TYPE_SB 0 +#define TFC_MPC_TBL_EVENT_COLL_CMPL_TYPE_OFFS 0x0 + +#define TFC_MPC_TBL_EVENT_COLL_CMPL_SET_TYPE(buf, val) \ + SET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_EVENT_COLL_CMPL_TYPE_OFFS), \ + (u64)(val), \ + TFC_MPC_TBL_EVENT_COLL_CMPL_TYPE_EB, \ + TFC_MPC_TBL_EVENT_COLL_CMPL_TYPE_SB) +#define TFC_MPC_TBL_EVENT_COLL_CMPL_GET_TYPE(buf) \ + GET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_EVENT_COLL_CMPL_TYPE_OFFS), \ + TFC_MPC_TBL_EVENT_COLL_CMPL_TYPE_EB, \ + TFC_MPC_TBL_EVENT_COLL_CMPL_TYPE_SB) + +#define TFC_MPC_TBL_EVENT_COLL_CMPL_STATUS_OK 0 +#define TFC_MPC_TBL_EVENT_COLL_CMPL_STATUS_UNSPRT_ERR 1 +#define TFC_MPC_TBL_EVENT_COLL_CMPL_STATUS_FMT_ERR 2 +#define TFC_MPC_TBL_EVENT_COLL_CMPL_STATUS_SCOPE_ERR 3 +#define TFC_MPC_TBL_EVENT_COLL_CMPL_STATUS_EVENT_COLLECTION_FAIL 8 + +#define TFC_MPC_TBL_EVENT_COLL_CMPL_STATUS_EB 11 +#define TFC_MPC_TBL_EVENT_COLL_CMPL_STATUS_SB 8 +#define TFC_MPC_TBL_EVENT_COLL_CMPL_STATUS_OFFS 0x0 + +#define TFC_MPC_TBL_EVENT_COLL_CMPL_SET_STATUS(buf, val) \ + SET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_EVENT_COLL_CMPL_STATUS_OFFS), \ + (u64)(val), \ + TFC_MPC_TBL_EVENT_COLL_CMPL_STATUS_EB, \ + TFC_MPC_TBL_EVENT_COLL_CMPL_STATUS_SB) +#define TFC_MPC_TBL_EVENT_COLL_CMPL_GET_STATUS(buf) \ + GET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_EVENT_COLL_CMPL_STATUS_OFFS), \ + TFC_MPC_TBL_EVENT_COLL_CMPL_STATUS_EB, \ + TFC_MPC_TBL_EVENT_COLL_CMPL_STATUS_SB) + +#define TFC_MPC_TBL_EVENT_COLL_CMPL_MP_CLIENT_TE_CFA 2 +#define TFC_MPC_TBL_EVENT_COLL_CMPL_MP_CLIENT_RE_CFA 3 + +#define TFC_MPC_TBL_EVENT_COLL_CMPL_MP_CLIENT_EB 15 +#define TFC_MPC_TBL_EVENT_COLL_CMPL_MP_CLIENT_SB 12 +#define TFC_MPC_TBL_EVENT_COLL_CMPL_MP_CLIENT_OFFS 0x0 + +#define TFC_MPC_TBL_EVENT_COLL_CMPL_SET_MP_CLIENT(buf, val) \ + SET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_EVENT_COLL_CMPL_MP_CLIENT_OFFS), \ + (u64)(val), \ + TFC_MPC_TBL_EVENT_COLL_CMPL_MP_CLIENT_EB, \ + TFC_MPC_TBL_EVENT_COLL_CMPL_MP_CLIENT_SB) +#define TFC_MPC_TBL_EVENT_COLL_CMPL_GET_MP_CLIENT(buf) \ + GET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_EVENT_COLL_CMPL_MP_CLIENT_OFFS), \ + TFC_MPC_TBL_EVENT_COLL_CMPL_MP_CLIENT_EB, \ + TFC_MPC_TBL_EVENT_COLL_CMPL_MP_CLIENT_SB) + +#define TFC_MPC_CMD_OPCODE_EVENT_COLLECTION 6 + +#define TFC_MPC_TBL_EVENT_COLL_CMPL_OPCODE_EB 23 +#define TFC_MPC_TBL_EVENT_COLL_CMPL_OPCODE_SB 16 +#define TFC_MPC_TBL_EVENT_COLL_CMPL_OPCODE_OFFS 0x0 + +#define TFC_MPC_TBL_EVENT_COLL_CMPL_SET_OPCODE(buf, val) \ + SET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_EVENT_COLL_CMPL_OPCODE_OFFS), \ + (u64)(val), \ + TFC_MPC_TBL_EVENT_COLL_CMPL_OPCODE_EB, \ + TFC_MPC_TBL_EVENT_COLL_CMPL_OPCODE_SB) +#define TFC_MPC_TBL_EVENT_COLL_CMPL_GET_OPCODE(buf) \ + GET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_EVENT_COLL_CMPL_OPCODE_OFFS), \ + TFC_MPC_TBL_EVENT_COLL_CMPL_OPCODE_EB, \ + TFC_MPC_TBL_EVENT_COLL_CMPL_OPCODE_SB) + +#define TFC_MPC_TBL_EVENT_COLL_CMPL_DMA_LENGTH_EB 31 +#define TFC_MPC_TBL_EVENT_COLL_CMPL_DMA_LENGTH_SB 24 +#define TFC_MPC_TBL_EVENT_COLL_CMPL_DMA_LENGTH_OFFS 0x0 + +#define TFC_MPC_TBL_EVENT_COLL_CMPL_SET_DMA_LENGTH(buf, val) \ + SET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_EVENT_COLL_CMPL_DMA_LENGTH_OFFS), \ + (u64)(val), \ + TFC_MPC_TBL_EVENT_COLL_CMPL_DMA_LENGTH_EB, \ + TFC_MPC_TBL_EVENT_COLL_CMPL_DMA_LENGTH_SB) +#define TFC_MPC_TBL_EVENT_COLL_CMPL_GET_DMA_LENGTH(buf) \ + GET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_EVENT_COLL_CMPL_DMA_LENGTH_OFFS), \ + TFC_MPC_TBL_EVENT_COLL_CMPL_DMA_LENGTH_EB, \ + TFC_MPC_TBL_EVENT_COLL_CMPL_DMA_LENGTH_SB) + +#define TFC_MPC_TBL_EVENT_COLL_CMPL_OPAQUE_EB 63 +#define TFC_MPC_TBL_EVENT_COLL_CMPL_OPAQUE_SB 32 +#define TFC_MPC_TBL_EVENT_COLL_CMPL_OPAQUE_OFFS 0x0 + +#define TFC_MPC_TBL_EVENT_COLL_CMPL_SET_OPAQUE(buf, val) \ + SET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_EVENT_COLL_CMPL_OPAQUE_OFFS), \ + (u64)(val), \ + TFC_MPC_TBL_EVENT_COLL_CMPL_OPAQUE_EB, \ + TFC_MPC_TBL_EVENT_COLL_CMPL_OPAQUE_SB) +#define TFC_MPC_TBL_EVENT_COLL_CMPL_GET_OPAQUE(buf) \ + GET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_EVENT_COLL_CMPL_OPAQUE_OFFS), \ + TFC_MPC_TBL_EVENT_COLL_CMPL_OPAQUE_EB, \ + TFC_MPC_TBL_EVENT_COLL_CMPL_OPAQUE_SB) + +#define TFC_MPC_TBL_EVENT_COLL_CMPL_V_EB 0 +#define TFC_MPC_TBL_EVENT_COLL_CMPL_V_SB 0 +#define TFC_MPC_TBL_EVENT_COLL_CMPL_V_OFFS 0x8 + +#define TFC_MPC_TBL_EVENT_COLL_CMPL_SET_V(buf, val) \ + SET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_EVENT_COLL_CMPL_V_OFFS), \ + (u64)(val), \ + TFC_MPC_TBL_EVENT_COLL_CMPL_V_EB, \ + TFC_MPC_TBL_EVENT_COLL_CMPL_V_SB) +#define TFC_MPC_TBL_EVENT_COLL_CMPL_GET_V(buf) \ + GET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_EVENT_COLL_CMPL_V_OFFS), \ + TFC_MPC_TBL_EVENT_COLL_CMPL_V_EB, \ + TFC_MPC_TBL_EVENT_COLL_CMPL_V_SB) + +#define TFC_MPC_TBL_EVENT_COLL_CMPL_HASH_MSB_EB 15 +#define TFC_MPC_TBL_EVENT_COLL_CMPL_HASH_MSB_SB 4 +#define TFC_MPC_TBL_EVENT_COLL_CMPL_HASH_MSB_OFFS 0x8 + +#define TFC_MPC_TBL_EVENT_COLL_CMPL_SET_HASH_MSB(buf, val) \ + SET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_EVENT_COLL_CMPL_HASH_MSB_OFFS), \ + (u64)(val), \ + TFC_MPC_TBL_EVENT_COLL_CMPL_HASH_MSB_EB, \ + TFC_MPC_TBL_EVENT_COLL_CMPL_HASH_MSB_SB) +#define TFC_MPC_TBL_EVENT_COLL_CMPL_GET_HASH_MSB(buf) \ + GET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_EVENT_COLL_CMPL_HASH_MSB_OFFS), \ + TFC_MPC_TBL_EVENT_COLL_CMPL_HASH_MSB_EB, \ + TFC_MPC_TBL_EVENT_COLL_CMPL_HASH_MSB_SB) + +#define TFC_MPC_TBL_EVENT_COLL_CMPL_TABLE_SCOPE_EB 28 +#define TFC_MPC_TBL_EVENT_COLL_CMPL_TABLE_SCOPE_SB 24 +#define TFC_MPC_TBL_EVENT_COLL_CMPL_TABLE_SCOPE_OFFS 0x8 + +#define TFC_MPC_TBL_EVENT_COLL_CMPL_SET_TABLE_SCOPE(buf, val) \ + SET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_EVENT_COLL_CMPL_TABLE_SCOPE_OFFS), \ + (u64)(val), \ + TFC_MPC_TBL_EVENT_COLL_CMPL_TABLE_SCOPE_EB, \ + TFC_MPC_TBL_EVENT_COLL_CMPL_TABLE_SCOPE_SB) +#define TFC_MPC_TBL_EVENT_COLL_CMPL_GET_TABLE_SCOPE(buf) \ + GET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_EVENT_COLL_CMPL_TABLE_SCOPE_OFFS), \ + TFC_MPC_TBL_EVENT_COLL_CMPL_TABLE_SCOPE_EB, \ + TFC_MPC_TBL_EVENT_COLL_CMPL_TABLE_SCOPE_SB) + +#define TFC_MPC_TBL_EVENT_COLL_CMPL_SIZE 16 + +/* + * CFA Table EM Search Completion Record: + * + * For OK status, returns the index of the matching entry found for the EM key + * supplied in the command. Returns EM_MISS status if no match was found. + * Offset 63 0 + * 0x0 opaque unused(8) opcode mp_client status unused(2) + * type + * 0x8 unused(6) table_index unused(3) table_scope + * unused(8) hash_msb unused(3) v1 + * 0x10 unused(38) table_index2 + * 0x18 unused(21) num_entries bkt_num unused(31) v2 + * + * type (Offset:0x0[5:0], Size: 6) + * This field indicates the exact type of the + * completion. By convention, the LSB identifies the + * length of the record in 16B units. Even values + * indicate 16B records. Odd values indicate 32B + * records **(EXCEPT no_op!!!!)** . + * Value Enum Enumeration Description + * 31 mid_path_long + * Mid Path Long Completion : Completion of a Mid + * Path Command. Length = 32B + * + * status (Offset:0x0[11:8], Size: 4) + * The command processing status. + * Value Enum Enumeration Description + * 0 OK + * Completed without error. + * 1 UNSPRT_ERR + * The CFA OPCODE is an unsupported value. + * 2 FMT_ERR + * Indicates a CFA command formatting error. This + * error can occur on any of the supported CFA + * commands. + * Error conditions: + * DATA_SIZE[2:0] outside range of [1, 4]. (Does + * not apply to READ_CLR, EM_DELETE, or EM_CHAIN + * commands as they do not have a DATA_SIZE field) + * HOST_ADDRESS[1:0] != 0 (Only applies to READ, + * READ_CLR, and EVENT_COLLECTION as other commands + * do not have a HOST_ADDRESS field. + * 3 SCOPE_ERR + * Access to TABLE_SCOPE is disabled for the SVIF. + * Indates that the bit indexed by (SVIF, + * TABLE_SCOPE) in the TAI_SVIF_SCOPE memory is set + * to 0. + * 4 ADDR_ERR + * This error can only occur for commands having + * TABLE_TYPE present and set to EM and not having + * any of the previous errors, or for any of the + * EM* commands, for which a TABLE_TYPE of EM is + * implied. + * It indicates that an EM address (TABLE_INDEX*) + * in the command is invalid based on (EM_BUCKETS, + * EM_SIZE) parameters configured for TABLE_SCOPE. + * All addresses must be in the range [0, + * EM_SIZE). Static bucket addresses must be within + * the range determined by EM_BUCKETS. Dynamic + * bucket addresses and entries must be outside of + * the static bucket range. + * 5 CACHE_ERR + * One of more cache responses signaled an error + * while processing the command. + * 6 EM_MISS + * No matching entry found. + * + * mp_client (Offset:0x0[15:12], Size: 4) + * This field represents the Mid-Path client that + * generated the completion. + * Value Enum Enumeration Description + * 2 TE_CFA + * TE-CFA + * 3 RE_CFA + * RE-CFA + * + * opcode (Offset:0x0[23:16], Size: 8) + * OPCODE from the command. + * Value Enum Enumeration Description + * 8 EM_SEARCH + * This command supplies an exact match entry of + * 1-4 32B words to search for in the exact match + * table. + * + * opaque (Offset:0x0[63:32], Size: 32) + * This is a copy of the opaque field from the mid + * path BD of this command. + * + * v1 (Offset:0x8[0], Size: 1) + * This value is written by the NIC such that it will + * be different for each pass through the completion + * queue. The even passes will write 1. The odd passes + * will write 0. + * + * hash_msb (Offset:0x8[15:4], Size: 12) + * For EM_SEARCH and EM_INSERT commands without + * errors that abort the command processing prior to + * the hash computation, set to HASH[35:24] of the + * hash computed from the exact match entry key in the + * command. + * For all other cases, set to 0 except for the + * following error conditions, which carry debug + * information in this field as shown by error status + * below: + * FMT_ERR: + * Set to {7'd0, HOST_ADDRESS[1:0], + * DATA_SIZE[2:0]}. + * If HOST_ADDRESS or DATA_SIZE field not + * present they are set to 0. + * SCOPE_ERR: + * Set to {1'b0, SVIF[10:0]}. + * ADDR_ERR: + * Only possible when TABLE_TYPE=EM or for EM* + * commands + * Set to {1'b0, TABLE_INDEX[2:0], 5'd0, + * DATA_SIZE[2:0]} + * TABLE_INDEX[2]=1 if TABLE_INDEX3 had an error + * TABLE_INDEX[1]=1 if TABLE_INDEX2 had an error + * TABLE_INDEX[0]=1 if TABLE_INDEX had an error + * TABLE_INDEX[n]=0 if the completion does not + * have the corresponding TABLE_INDEX field above. + * CACHE_ERR: + * Set to {9'd0, DATA_SIZE[2:0]} + * + * table_scope (Offset:0x8[28:24], Size: 5) + * TABLE_SCOPE from the command. + * + * table_index (Offset:0x8[57:32], Size: 26) + * A 32B index into the EM table identified by + * TABLE_SCOPE. + * For OK status, gives ENTRY_PTR[25:0] of the + * matching entry found. Otherwise, set to 0. + * + * table_index2 (Offset:0x10[25:0], Size: 26) + * A 32B index into the EM table identified by + * TABLE_SCOPE. + * If the hash is computed (no errors during initial + * processing of the command), TABLE_INDEX2[23:0] is + * the static bucket address determined from the hash + * of the exact match entry key in the command and the + * (EM_SIZE, EM_BUCKETS) configuration for TABLE_SCOPE + * of the command. Bits 25:24 in this case are set to + * 0. For any other status, it is always 0. + * + * v2 (Offset:0x18[0], Size: 1) + * This value is written by the NIC such that it will + * be different for each pass through the completion + * queue. The even passes will write 1. The odd passes + * will write 0. + * + * bkt_num (Offset:0x18[39:32], Size: 8) + * BKT_NUM is the bucket number in chain of the tail + * bucket after finishing processing the command, + * except when the command stops processing before the + * tail bucket. NUM_ENTRIES is the number of valid + * entries in the BKT_NUM bucket. The following + * describes the cases where BKT_NUM and NUM_ENTRIES + * are not for the tail bucket after finishing + * processing of the command: + * For UNSPRT_ERR, FMT_ERR, SCOPE_ERR, or ADDR_ERR + * completion status, BKT_NUM will be set to 0. + * For CACHE_ERR completion status, BKT_NUM will be + * set to the bucket number that was last read without + * error. If ERR=1 in the response to the static + * bucket read, BKT_NUM and NUM_ENTRIES are set to 0. + * The static bucket is number 0, BKT_NUM increments + * for each new bucket in the chain, and saturates at + * 255. Therefore, if the value is 255, BKT_NUM may or + * may not be accurate. In this case, though, + * NUM_ENTRIES will still be the correct value as + * described above for the bucket. + * For OK status, which indicates a matching entry + * was found, BKT_NUM and NUM_ENTRIES are for the + * bucket containing the match entry, which may or may + * not be the tail bucket. For EM_MISS status, the + * values are always for the tail bucket. + * + * num_entries (Offset:0x18[42:40], Size: 3) + * See BKT_NUM description. + * This structure is used to inform the host of an + * event within the NIC. + */ +#define TFC_MPC_TBL_EM_SEARCH_CMPL_TYPE_MID_PATH_LONG 31 + +#define TFC_MPC_TBL_EM_SEARCH_CMPL_TYPE_EB 5 +#define TFC_MPC_TBL_EM_SEARCH_CMPL_TYPE_SB 0 +#define TFC_MPC_TBL_EM_SEARCH_CMPL_TYPE_OFFS 0x0 + +#define TFC_MPC_TBL_EM_SEARCH_CMPL_SET_TYPE(buf, val) \ + SET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_EM_SEARCH_CMPL_TYPE_OFFS), \ + (u64)(val), \ + TFC_MPC_TBL_EM_SEARCH_CMPL_TYPE_EB, \ + TFC_MPC_TBL_EM_SEARCH_CMPL_TYPE_SB) +#define TFC_MPC_TBL_EM_SEARCH_CMPL_GET_TYPE(buf) \ + GET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_EM_SEARCH_CMPL_TYPE_OFFS), \ + TFC_MPC_TBL_EM_SEARCH_CMPL_TYPE_EB, \ + TFC_MPC_TBL_EM_SEARCH_CMPL_TYPE_SB) + +#define TFC_MPC_TBL_EM_SEARCH_CMPL_STATUS_OK 0 +#define TFC_MPC_TBL_EM_SEARCH_CMPL_STATUS_UNSPRT_ERR 1 +#define TFC_MPC_TBL_EM_SEARCH_CMPL_STATUS_FMT_ERR 2 +#define TFC_MPC_TBL_EM_SEARCH_CMPL_STATUS_SCOPE_ERR 3 +#define TFC_MPC_TBL_EM_SEARCH_CMPL_STATUS_ADDR_ERR 4 +#define TFC_MPC_TBL_EM_SEARCH_CMPL_STATUS_CACHE_ERR 5 +#define TFC_MPC_TBL_EM_SEARCH_CMPL_STATUS_EM_MISS 6 + +#define TFC_MPC_TBL_EM_SEARCH_CMPL_STATUS_EB 11 +#define TFC_MPC_TBL_EM_SEARCH_CMPL_STATUS_SB 8 +#define TFC_MPC_TBL_EM_SEARCH_CMPL_STATUS_OFFS 0x0 + +#define TFC_MPC_TBL_EM_SEARCH_CMPL_SET_STATUS(buf, val) \ + SET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_EM_SEARCH_CMPL_STATUS_OFFS), \ + (u64)(val), \ + TFC_MPC_TBL_EM_SEARCH_CMPL_STATUS_EB, \ + TFC_MPC_TBL_EM_SEARCH_CMPL_STATUS_SB) +#define TFC_MPC_TBL_EM_SEARCH_CMPL_GET_STATUS(buf) \ + GET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_EM_SEARCH_CMPL_STATUS_OFFS), \ + TFC_MPC_TBL_EM_SEARCH_CMPL_STATUS_EB, \ + TFC_MPC_TBL_EM_SEARCH_CMPL_STATUS_SB) + +#define TFC_MPC_TBL_EM_SEARCH_CMPL_MP_CLIENT_TE_CFA 2 +#define TFC_MPC_TBL_EM_SEARCH_CMPL_MP_CLIENT_RE_CFA 3 + +#define TFC_MPC_TBL_EM_SEARCH_CMPL_MP_CLIENT_EB 15 +#define TFC_MPC_TBL_EM_SEARCH_CMPL_MP_CLIENT_SB 12 +#define TFC_MPC_TBL_EM_SEARCH_CMPL_MP_CLIENT_OFFS 0x0 + +#define TFC_MPC_TBL_EM_SEARCH_CMPL_SET_MP_CLIENT(buf, val) \ + SET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_EM_SEARCH_CMPL_MP_CLIENT_OFFS), \ + (u64)(val), \ + TFC_MPC_TBL_EM_SEARCH_CMPL_MP_CLIENT_EB, \ + TFC_MPC_TBL_EM_SEARCH_CMPL_MP_CLIENT_SB) +#define TFC_MPC_TBL_EM_SEARCH_CMPL_GET_MP_CLIENT(buf) \ + GET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_EM_SEARCH_CMPL_MP_CLIENT_OFFS), \ + TFC_MPC_TBL_EM_SEARCH_CMPL_MP_CLIENT_EB, \ + TFC_MPC_TBL_EM_SEARCH_CMPL_MP_CLIENT_SB) + +#define TFC_MPC_CMD_OPCODE_EM_SEARCH 8 + +#define TFC_MPC_TBL_EM_SEARCH_CMPL_OPCODE_EB 23 +#define TFC_MPC_TBL_EM_SEARCH_CMPL_OPCODE_SB 16 +#define TFC_MPC_TBL_EM_SEARCH_CMPL_OPCODE_OFFS 0x0 + +#define TFC_MPC_TBL_EM_SEARCH_CMPL_SET_OPCODE(buf, val) \ + SET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_EM_SEARCH_CMPL_OPCODE_OFFS), \ + (u64)(val), \ + TFC_MPC_TBL_EM_SEARCH_CMPL_OPCODE_EB, \ + TFC_MPC_TBL_EM_SEARCH_CMPL_OPCODE_SB) +#define TFC_MPC_TBL_EM_SEARCH_CMPL_GET_OPCODE(buf) \ + GET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_EM_SEARCH_CMPL_OPCODE_OFFS), \ + TFC_MPC_TBL_EM_SEARCH_CMPL_OPCODE_EB, \ + TFC_MPC_TBL_EM_SEARCH_CMPL_OPCODE_SB) + +#define TFC_MPC_TBL_EM_SEARCH_CMPL_OPAQUE_EB 63 +#define TFC_MPC_TBL_EM_SEARCH_CMPL_OPAQUE_SB 32 +#define TFC_MPC_TBL_EM_SEARCH_CMPL_OPAQUE_OFFS 0x0 + +#define TFC_MPC_TBL_EM_SEARCH_CMPL_SET_OPAQUE(buf, val) \ + SET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_EM_SEARCH_CMPL_OPAQUE_OFFS), \ + (u64)(val), \ + TFC_MPC_TBL_EM_SEARCH_CMPL_OPAQUE_EB, \ + TFC_MPC_TBL_EM_SEARCH_CMPL_OPAQUE_SB) +#define TFC_MPC_TBL_EM_SEARCH_CMPL_GET_OPAQUE(buf) \ + GET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_EM_SEARCH_CMPL_OPAQUE_OFFS), \ + TFC_MPC_TBL_EM_SEARCH_CMPL_OPAQUE_EB, \ + TFC_MPC_TBL_EM_SEARCH_CMPL_OPAQUE_SB) + +#define TFC_MPC_TBL_EM_SEARCH_CMPL_V1_EB 0 +#define TFC_MPC_TBL_EM_SEARCH_CMPL_V1_SB 0 +#define TFC_MPC_TBL_EM_SEARCH_CMPL_V1_OFFS 0x8 + +#define TFC_MPC_TBL_EM_SEARCH_CMPL_SET_V1(buf, val) \ + SET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_EM_SEARCH_CMPL_V1_OFFS), \ + (u64)(val), \ + TFC_MPC_TBL_EM_SEARCH_CMPL_V1_EB, \ + TFC_MPC_TBL_EM_SEARCH_CMPL_V1_SB) +#define TFC_MPC_TBL_EM_SEARCH_CMPL_GET_V1(buf) \ + GET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_EM_SEARCH_CMPL_V1_OFFS), \ + TFC_MPC_TBL_EM_SEARCH_CMPL_V1_EB, \ + TFC_MPC_TBL_EM_SEARCH_CMPL_V1_SB) + +#define TFC_MPC_TBL_EM_SEARCH_CMPL_HASH_MSB_EB 15 +#define TFC_MPC_TBL_EM_SEARCH_CMPL_HASH_MSB_SB 4 +#define TFC_MPC_TBL_EM_SEARCH_CMPL_HASH_MSB_OFFS 0x8 + +#define TFC_MPC_TBL_EM_SEARCH_CMPL_SET_HASH_MSB(buf, val) \ + SET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_EM_SEARCH_CMPL_HASH_MSB_OFFS), \ + (u64)(val), \ + TFC_MPC_TBL_EM_SEARCH_CMPL_HASH_MSB_EB, \ + TFC_MPC_TBL_EM_SEARCH_CMPL_HASH_MSB_SB) +#define TFC_MPC_TBL_EM_SEARCH_CMPL_GET_HASH_MSB(buf) \ + GET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_EM_SEARCH_CMPL_HASH_MSB_OFFS), \ + TFC_MPC_TBL_EM_SEARCH_CMPL_HASH_MSB_EB, \ + TFC_MPC_TBL_EM_SEARCH_CMPL_HASH_MSB_SB) + +#define TFC_MPC_TBL_EM_SEARCH_CMPL_TABLE_SCOPE_EB 28 +#define TFC_MPC_TBL_EM_SEARCH_CMPL_TABLE_SCOPE_SB 24 +#define TFC_MPC_TBL_EM_SEARCH_CMPL_TABLE_SCOPE_OFFS 0x8 + +#define TFC_MPC_TBL_EM_SEARCH_CMPL_SET_TABLE_SCOPE(buf, val) \ + SET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_EM_SEARCH_CMPL_TABLE_SCOPE_OFFS), \ + (u64)(val), \ + TFC_MPC_TBL_EM_SEARCH_CMPL_TABLE_SCOPE_EB, \ + TFC_MPC_TBL_EM_SEARCH_CMPL_TABLE_SCOPE_SB) +#define TFC_MPC_TBL_EM_SEARCH_CMPL_GET_TABLE_SCOPE(buf) \ + GET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_EM_SEARCH_CMPL_TABLE_SCOPE_OFFS), \ + TFC_MPC_TBL_EM_SEARCH_CMPL_TABLE_SCOPE_EB, \ + TFC_MPC_TBL_EM_SEARCH_CMPL_TABLE_SCOPE_SB) + +#define TFC_MPC_TBL_EM_SEARCH_CMPL_TABLE_INDEX_EB 57 +#define TFC_MPC_TBL_EM_SEARCH_CMPL_TABLE_INDEX_SB 32 +#define TFC_MPC_TBL_EM_SEARCH_CMPL_TABLE_INDEX_OFFS 0x8 + +#define TFC_MPC_TBL_EM_SEARCH_CMPL_SET_TABLE_INDEX(buf, val) \ + SET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_EM_SEARCH_CMPL_TABLE_INDEX_OFFS), \ + (u64)(val), \ + TFC_MPC_TBL_EM_SEARCH_CMPL_TABLE_INDEX_EB, \ + TFC_MPC_TBL_EM_SEARCH_CMPL_TABLE_INDEX_SB) +#define TFC_MPC_TBL_EM_SEARCH_CMPL_GET_TABLE_INDEX(buf) \ + GET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_EM_SEARCH_CMPL_TABLE_INDEX_OFFS), \ + TFC_MPC_TBL_EM_SEARCH_CMPL_TABLE_INDEX_EB, \ + TFC_MPC_TBL_EM_SEARCH_CMPL_TABLE_INDEX_SB) + +#define TFC_MPC_TBL_EM_SEARCH_CMPL_TABLE_INDEX2_EB 25 +#define TFC_MPC_TBL_EM_SEARCH_CMPL_TABLE_INDEX2_SB 0 +#define TFC_MPC_TBL_EM_SEARCH_CMPL_TABLE_INDEX2_OFFS 0x10 + +#define TFC_MPC_TBL_EM_SEARCH_CMPL_SET_TABLE_INDEX2(buf, val) \ + SET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_EM_SEARCH_CMPL_TABLE_INDEX2_OFFS), \ + (u64)(val), \ + TFC_MPC_TBL_EM_SEARCH_CMPL_TABLE_INDEX2_EB, \ + TFC_MPC_TBL_EM_SEARCH_CMPL_TABLE_INDEX2_SB) +#define TFC_MPC_TBL_EM_SEARCH_CMPL_GET_TABLE_INDEX2(buf) \ + GET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_EM_SEARCH_CMPL_TABLE_INDEX2_OFFS), \ + TFC_MPC_TBL_EM_SEARCH_CMPL_TABLE_INDEX2_EB, \ + TFC_MPC_TBL_EM_SEARCH_CMPL_TABLE_INDEX2_SB) + +#define TFC_MPC_TBL_EM_SEARCH_CMPL_V2_EB 0 +#define TFC_MPC_TBL_EM_SEARCH_CMPL_V2_SB 0 +#define TFC_MPC_TBL_EM_SEARCH_CMPL_V2_OFFS 0x18 + +#define TFC_MPC_TBL_EM_SEARCH_CMPL_SET_V2(buf, val) \ + SET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_EM_SEARCH_CMPL_V2_OFFS), \ + (u64)(val), \ + TFC_MPC_TBL_EM_SEARCH_CMPL_V2_EB, \ + TFC_MPC_TBL_EM_SEARCH_CMPL_V2_SB) +#define TFC_MPC_TBL_EM_SEARCH_CMPL_GET_V2(buf) \ + GET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_EM_SEARCH_CMPL_V2_OFFS), \ + TFC_MPC_TBL_EM_SEARCH_CMPL_V2_EB, \ + TFC_MPC_TBL_EM_SEARCH_CMPL_V2_SB) + +#define TFC_MPC_TBL_EM_SEARCH_CMPL_BKT_NUM_EB 39 +#define TFC_MPC_TBL_EM_SEARCH_CMPL_BKT_NUM_SB 32 +#define TFC_MPC_TBL_EM_SEARCH_CMPL_BKT_NUM_OFFS 0x18 + +#define TFC_MPC_TBL_EM_SEARCH_CMPL_SET_BKT_NUM(buf, val) \ + SET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_EM_SEARCH_CMPL_BKT_NUM_OFFS), \ + (u64)(val), \ + TFC_MPC_TBL_EM_SEARCH_CMPL_BKT_NUM_EB, \ + TFC_MPC_TBL_EM_SEARCH_CMPL_BKT_NUM_SB) +#define TFC_MPC_TBL_EM_SEARCH_CMPL_GET_BKT_NUM(buf) \ + GET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_EM_SEARCH_CMPL_BKT_NUM_OFFS), \ + TFC_MPC_TBL_EM_SEARCH_CMPL_BKT_NUM_EB, \ + TFC_MPC_TBL_EM_SEARCH_CMPL_BKT_NUM_SB) + +#define TFC_MPC_TBL_EM_SEARCH_CMPL_NUM_ENTRIES_EB 42 +#define TFC_MPC_TBL_EM_SEARCH_CMPL_NUM_ENTRIES_SB 40 +#define TFC_MPC_TBL_EM_SEARCH_CMPL_NUM_ENTRIES_OFFS 0x18 + +#define TFC_MPC_TBL_EM_SEARCH_CMPL_SET_NUM_ENTRIES(buf, val) \ + SET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_EM_SEARCH_CMPL_NUM_ENTRIES_OFFS), \ + (u64)(val), \ + TFC_MPC_TBL_EM_SEARCH_CMPL_NUM_ENTRIES_EB, \ + TFC_MPC_TBL_EM_SEARCH_CMPL_NUM_ENTRIES_SB) +#define TFC_MPC_TBL_EM_SEARCH_CMPL_GET_NUM_ENTRIES(buf) \ + GET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_EM_SEARCH_CMPL_NUM_ENTRIES_OFFS), \ + TFC_MPC_TBL_EM_SEARCH_CMPL_NUM_ENTRIES_EB, \ + TFC_MPC_TBL_EM_SEARCH_CMPL_NUM_ENTRIES_SB) + +#define TFC_MPC_TBL_EM_SEARCH_CMPL_SIZE 32 + +/* + * CFA Table EM Insert Completion Record: + * + * OK status indicates that the exact match entry from the command was + * successfully inserted. + * EM_DUPLICATE status indicates that the insert was aborted because an entry + * with the same exact match key was found and REPLACE=0 in the command. + * EM_ABORT status indicates that no duplicate was found, the tail bucket in + * the chain was full, and TABLE_INDEX2=0. No changes are made to the database + * in this case. + * TABLE_INDEX is the starting address at which to insert the exact match entry + * (from the command). + * TABLE_INDEX2 is the address at which to insert a new bucket at the tail of + * the static bucket chain if needed (from the command). CHAIN_UPD=1 if a new + * bucket was added at this address. + * TABLE_INDEX3 is the static bucket address for the chain, determined from + * hashing the exact match entry. Software needs this address and TABLE_INDEX in + * order to delete the entry using an EM_DELETE command. + * TABLE_INDEX4 is the index of an entry found that had a matching exact match + * key to the command entry key. If no matching entry was found, it is set to 0. + * There are two cases when there is a matching entry, depending on REPLACE from + * the command: + * REPLACE=0: EM_DUPLICATE status is reported and the insert is aborted. + * Software can use the static bucket address (TABLE_INDEX3[23:0]) and the + * matching entry (TABLE_INDEX4) in an EM_DELETE command if it wishes to + * explicity delete the matching entry. + * REPLACE=1: REPLACED_ENTRY=1 to signal that the entry at TABLE_INDEX4 was + * replaced by the insert entry. REPLACED_ENTRY will only be 1 if reporting OK + * status in this case. Software can de-allocate the entry at TABLE_INDEX4. + * Offset 63 0 + * 0x0 opaque unused(8) opcode mp_client status unused(2) + * type + * 0x8 unused(6) table_index unused(3) table_scope + * unused(8) hash_msb unused(3) v1 + * 0x10 unused(6) table_index3 unused(6) table_index2 + * 0x18 unused(19) replaced_entry chain_upd num_entries + * bkt_num unused(5) table_index4 v2 + * + * type (Offset:0x0[5:0], Size: 6) + * This field indicates the exact type of the + * completion. By convention, the LSB identifies the + * length of the record in 16B units. Even values + * indicate 16B records. Odd values indicate 32B + * records **(EXCEPT no_op!!!!)** . + * Value Enum Enumeration Description + * 31 mid_path_long + * Mid Path Long Completion : Completion of a Mid + * Path Command. Length = 32B + * + * status (Offset:0x0[11:8], Size: 4) + * The command processing status. + * Value Enum Enumeration Description + * 0 OK + * Completed without error. + * 1 UNSPRT_ERR + * The CFA OPCODE is an unsupported value. + * 2 FMT_ERR + * Indicates a CFA command formatting error. This + * error can occur on any of the supported CFA + * commands. + * Error conditions: + * DATA_SIZE[2:0] outside range of [1, 4]. (Does + * not apply to READ_CLR, EM_DELETE, or EM_CHAIN + * commands as they do not have a DATA_SIZE field) + * HOST_ADDRESS[1:0] != 0 (Only applies to READ, + * READ_CLR, and EVENT_COLLECTION as other commands + * do not have a HOST_ADDRESS field. + * 3 SCOPE_ERR + * Access to TABLE_SCOPE is disabled for the SVIF. + * Indates that the bit indexed by (SVIF, + * TABLE_SCOPE) in the TAI_SVIF_SCOPE memory is set + * to 0. + * 4 ADDR_ERR + * This error can only occur for commands having + * TABLE_TYPE present and set to EM and not having + * any of the previous errors, or for any of the + * EM* commands, for which a TABLE_TYPE of EM is + * implied. + * It indicates that an EM address (TABLE_INDEX*) + * in the command is invalid based on (EM_BUCKETS, + * EM_SIZE) parameters configured for TABLE_SCOPE. + * All addresses must be in the range [0, + * EM_SIZE). Static bucket addresses must be within + * the range determined by EM_BUCKETS. Dynamic + * bucket addresses and entries must be outside of + * the static bucket range. + * 5 CACHE_ERR + * One of more cache responses signaled an error + * while processing the command. + * 7 EM_DUPLICATE + * Found an entry with a key that matches the + * entry to insert and the command has REPLACE=0. + * The new entry was not inserted. + * 9 EM_ABORT + * For insert commands, TABLE_INDEX2 provides the + * address at which to add a new bucket if the tail + * bucket of the chain is full and no duplicate was + * found. If TABLE_INDEX2=0, the insert is aborted + * (no changes are made to the database) and this + * status is returned. + * + * mp_client (Offset:0x0[15:12], Size: 4) + * This field represents the Mid-Path client that + * generated the completion. + * Value Enum Enumeration Description + * 2 TE_CFA + * TE-CFA + * 3 RE_CFA + * RE-CFA + * + * opcode (Offset:0x0[23:16], Size: 8) + * OPCODE from the command. + * Value Enum Enumeration Description + * 9 EM_INSERT + * This command supplies an exact match entry of + * 1-4 32B words to be inserted into the exact + * match table. + * + * opaque (Offset:0x0[63:32], Size: 32) + * This is a copy of the opaque field from the mid + * path BD of this command. + * + * v1 (Offset:0x8[0], Size: 1) + * This value is written by the NIC such that it will + * be different for each pass through the completion + * queue. The even passes will write 1. The odd passes + * will write 0. + * + * hash_msb (Offset:0x8[15:4], Size: 12) + * For EM_SEARCH and EM_INSERT commands without + * errors that abort the command processing prior to + * the hash computation, set to HASH[35:24] of the + * hash computed from the exact match entry key in the + * command. + * For all other cases, set to 0 except for the + * following error conditions, which carry debug + * information in this field as shown by error status + * below: + * FMT_ERR: + * Set to {7'd0, HOST_ADDRESS[1:0], + * DATA_SIZE[2:0]}. + * If HOST_ADDRESS or DATA_SIZE field not + * present they are set to 0. + * SCOPE_ERR: + * Set to {1'b0, SVIF[10:0]}. + * ADDR_ERR: + * Only possible when TABLE_TYPE=EM or for EM* + * commands + * Set to {1'b0, TABLE_INDEX[2:0], 5'd0, + * DATA_SIZE[2:0]} + * TABLE_INDEX[2]=1 if TABLE_INDEX3 had an error + * TABLE_INDEX[1]=1 if TABLE_INDEX2 had an error + * TABLE_INDEX[0]=1 if TABLE_INDEX had an error + * TABLE_INDEX[n]=0 if the completion does not + * have the corresponding TABLE_INDEX field above. + * CACHE_ERR: + * Set to {9'd0, DATA_SIZE[2:0]} + * + * table_scope (Offset:0x8[28:24], Size: 5) + * TABLE_SCOPE from the command. + * + * table_index (Offset:0x8[57:32], Size: 26) + * A 32B index into the EM table identified by + * TABLE_SCOPE. + * TABLE_INDEX from the command, which is the + * starting address at which to insert the exact match + * entry. + * + * table_index2 (Offset:0x10[25:0], Size: 26) + * A 32B index into the EM table identified by + * TABLE_SCOPE. + * TABLE_INDEX2 from the command, which is the index + * for the new tail bucket to add if needed + * (CHAIN_UPD=1 if it was used). + * + * table_index3 (Offset:0x10[57:32], Size: 26) + * A 32B index into the EM table identified by + * TABLE_SCOPE. + * If the hash is computed (no errors during initial + * processing of the command), TABLE_INDEX2[23:0] is + * the static bucket address determined from the hash + * of the exact match entry key in the command and the + * (EM_SIZE, EM_BUCKETS) configuration for TABLE_SCOPE + * of the command. Bits 25:24 in this case are set to + * 0. + * For any other status, it is always 0. + * + * v2 (Offset:0x18[0], Size: 1) + * This value is written by the NIC such that it will + * be different for each pass through the completion + * queue. The even passes will write 1. The odd passes + * will write 0. + * + * table_index4 (Offset:0x18[26:1], Size: 26) + * A 32B index into the EM table identified by + * TABLE_SCOPE. + * ENTRY_PTR of matching entry found. Set to 0 if no + * matching entry found. If REPLACED_ENTRY=1, that + * indicates a matching entry was found and REPLACE=1 + * in the command. In this case, the matching entry + * was replaced by the new entry in the command and + * this index can therefore by de-allocated. + * + * bkt_num (Offset:0x18[39:32], Size: 8) + * BKT_NUM is the bucket number in chain of the tail + * bucket after finishing processing the command, + * except when the command stops processing before the + * tail bucket. NUM_ENTRIES is the number of valid + * entries in the BKT_NUM bucket. The following + * describes the cases where BKT_NUM and NUM_ENTRIES + * are not for the tail bucket after finishing + * processing of the command: + * For UNSPRT_ERR, FMT_ERR, SCOPE_ERR, or ADDR_ERR + * completion status, BKT_NUM will be set to 0. + * For CACHE_ERR completion status, BKT_NUM will be + * set to the bucket number that was last read without + * error. If ERR=1 in the response to the static + * bucket read, BKT_NUM and NUM_ENTRIES are set to 0. + * The static bucket is number 0, BKT_NUM increments + * for each new bucket in the chain, and saturates at + * 255. Therefore, if the value is 255, BKT_NUM may or + * may not be accurate. In this case, though, + * NUM_ENTRIES will still be the correct value as + * described above for the bucket. + * For EM_DUPLICATE status, which indicates a + * matching entry was found and prevented the insert, + * BKT_NUM and NUM_ENTRIES are for the bucket + * containing the match entry, which may or may not be + * the tail bucket. For OK and EM_ABORT status, the + * values are always for the tail bucket. For + * EM_ABORT, NUM_ENTRIES will always be 6 since the + * tail bucket is full. + * + * num_entries (Offset:0x18[42:40], Size: 3) + * See BKT_NUM description. + * + * chain_upd (Offset:0x18[43], Size: 1) + * Specifies if the chain was updated while + * processing the command: + * Set to 1 when a new bucket is added to the tail of + * the static bucket chain at TABLE_INDEX2. This + * occurs if and only if the insert requires adding a + * new entry and the tail bucket is full. If set to 0, + * TABLE_INDEX2 was not used and is therefore still + * free. + * When the CFA updates the static bucket chain by + * adding a bucket during inserts or removing one + * during deletes, it always sets CHAIN=0 in the new + * tail bucket and sets CHAIN_PTR to that of the + * original tail bucket. This is done to preserve the + * background chaining. EM_CHAIN provides a means to + * coherently update the CHAIN_PTR in the tail bucket + * separately if desired. + * + * replaced_entry (Offset:0x18[44], Size: 1) + * Set to 1 if a matching entry was found and + * REPLACE=1 in command. In the case, the entry + * starting at TABLE_INDEX4 was replaced and can + * therefore be de-allocated. Otherwise, this flag is + * set to 0. + * This structure is used to inform the host of an + * event within the NIC. + */ +#define TFC_MPC_TBL_EM_INSERT_CMPL_TYPE_MID_PATH_LONG 31 + +#define TFC_MPC_TBL_EM_INSERT_CMPL_TYPE_EB 5 +#define TFC_MPC_TBL_EM_INSERT_CMPL_TYPE_SB 0 +#define TFC_MPC_TBL_EM_INSERT_CMPL_TYPE_OFFS 0x0 + +#define TFC_MPC_TBL_EM_INSERT_CMPL_SET_TYPE(buf, val) \ + SET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_EM_INSERT_CMPL_TYPE_OFFS), \ + (u64)(val), \ + TFC_MPC_TBL_EM_INSERT_CMPL_TYPE_EB, \ + TFC_MPC_TBL_EM_INSERT_CMPL_TYPE_SB) +#define TFC_MPC_TBL_EM_INSERT_CMPL_GET_TYPE(buf) \ + GET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_EM_INSERT_CMPL_TYPE_OFFS), \ + TFC_MPC_TBL_EM_INSERT_CMPL_TYPE_EB, \ + TFC_MPC_TBL_EM_INSERT_CMPL_TYPE_SB) + +#define TFC_MPC_TBL_EM_INSERT_CMPL_STATUS_OK 0 +#define TFC_MPC_TBL_EM_INSERT_CMPL_STATUS_UNSPRT_ERR 1 +#define TFC_MPC_TBL_EM_INSERT_CMPL_STATUS_FMT_ERR 2 +#define TFC_MPC_TBL_EM_INSERT_CMPL_STATUS_SCOPE_ERR 3 +#define TFC_MPC_TBL_EM_INSERT_CMPL_STATUS_ADDR_ERR 4 +#define TFC_MPC_TBL_EM_INSERT_CMPL_STATUS_CACHE_ERR 5 +#define TFC_MPC_TBL_EM_INSERT_CMPL_STATUS_EM_DUPLICATE 7 +#define TFC_MPC_TBL_EM_INSERT_CMPL_STATUS_EM_ABORT 9 + +#define TFC_MPC_TBL_EM_INSERT_CMPL_STATUS_EB 11 +#define TFC_MPC_TBL_EM_INSERT_CMPL_STATUS_SB 8 +#define TFC_MPC_TBL_EM_INSERT_CMPL_STATUS_OFFS 0x0 + +#define TFC_MPC_TBL_EM_INSERT_CMPL_SET_STATUS(buf, val) \ + SET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_EM_INSERT_CMPL_STATUS_OFFS), \ + (u64)(val), \ + TFC_MPC_TBL_EM_INSERT_CMPL_STATUS_EB, \ + TFC_MPC_TBL_EM_INSERT_CMPL_STATUS_SB) +#define TFC_MPC_TBL_EM_INSERT_CMPL_GET_STATUS(buf) \ + GET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_EM_INSERT_CMPL_STATUS_OFFS), \ + TFC_MPC_TBL_EM_INSERT_CMPL_STATUS_EB, \ + TFC_MPC_TBL_EM_INSERT_CMPL_STATUS_SB) + +#define TFC_MPC_TBL_EM_INSERT_CMPL_MP_CLIENT_TE_CFA 2 +#define TFC_MPC_TBL_EM_INSERT_CMPL_MP_CLIENT_RE_CFA 3 + +#define TFC_MPC_TBL_EM_INSERT_CMPL_MP_CLIENT_EB 15 +#define TFC_MPC_TBL_EM_INSERT_CMPL_MP_CLIENT_SB 12 +#define TFC_MPC_TBL_EM_INSERT_CMPL_MP_CLIENT_OFFS 0x0 + +#define TFC_MPC_TBL_EM_INSERT_CMPL_SET_MP_CLIENT(buf, val) \ + SET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_EM_INSERT_CMPL_MP_CLIENT_OFFS), \ + (u64)(val), \ + TFC_MPC_TBL_EM_INSERT_CMPL_MP_CLIENT_EB, \ + TFC_MPC_TBL_EM_INSERT_CMPL_MP_CLIENT_SB) +#define TFC_MPC_TBL_EM_INSERT_CMPL_GET_MP_CLIENT(buf) \ + GET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_EM_INSERT_CMPL_MP_CLIENT_OFFS), \ + TFC_MPC_TBL_EM_INSERT_CMPL_MP_CLIENT_EB, \ + TFC_MPC_TBL_EM_INSERT_CMPL_MP_CLIENT_SB) + +#define TFC_MPC_CMD_OPCODE_EM_INSERT 9 + +#define TFC_MPC_TBL_EM_INSERT_CMPL_OPCODE_EB 23 +#define TFC_MPC_TBL_EM_INSERT_CMPL_OPCODE_SB 16 +#define TFC_MPC_TBL_EM_INSERT_CMPL_OPCODE_OFFS 0x0 + +#define TFC_MPC_TBL_EM_INSERT_CMPL_SET_OPCODE(buf, val) \ + SET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_EM_INSERT_CMPL_OPCODE_OFFS), \ + (u64)(val), \ + TFC_MPC_TBL_EM_INSERT_CMPL_OPCODE_EB, \ + TFC_MPC_TBL_EM_INSERT_CMPL_OPCODE_SB) +#define TFC_MPC_TBL_EM_INSERT_CMPL_GET_OPCODE(buf) \ + GET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_EM_INSERT_CMPL_OPCODE_OFFS), \ + TFC_MPC_TBL_EM_INSERT_CMPL_OPCODE_EB, \ + TFC_MPC_TBL_EM_INSERT_CMPL_OPCODE_SB) + +#define TFC_MPC_TBL_EM_INSERT_CMPL_OPAQUE_EB 63 +#define TFC_MPC_TBL_EM_INSERT_CMPL_OPAQUE_SB 32 +#define TFC_MPC_TBL_EM_INSERT_CMPL_OPAQUE_OFFS 0x0 + +#define TFC_MPC_TBL_EM_INSERT_CMPL_SET_OPAQUE(buf, val) \ + SET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_EM_INSERT_CMPL_OPAQUE_OFFS), \ + (u64)(val), \ + TFC_MPC_TBL_EM_INSERT_CMPL_OPAQUE_EB, \ + TFC_MPC_TBL_EM_INSERT_CMPL_OPAQUE_SB) +#define TFC_MPC_TBL_EM_INSERT_CMPL_GET_OPAQUE(buf) \ + GET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_EM_INSERT_CMPL_OPAQUE_OFFS), \ + TFC_MPC_TBL_EM_INSERT_CMPL_OPAQUE_EB, \ + TFC_MPC_TBL_EM_INSERT_CMPL_OPAQUE_SB) + +#define TFC_MPC_TBL_EM_INSERT_CMPL_V1_EB 0 +#define TFC_MPC_TBL_EM_INSERT_CMPL_V1_SB 0 +#define TFC_MPC_TBL_EM_INSERT_CMPL_V1_OFFS 0x8 + +#define TFC_MPC_TBL_EM_INSERT_CMPL_SET_V1(buf, val) \ + SET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_EM_INSERT_CMPL_V1_OFFS), \ + (u64)(val), \ + TFC_MPC_TBL_EM_INSERT_CMPL_V1_EB, \ + TFC_MPC_TBL_EM_INSERT_CMPL_V1_SB) +#define TFC_MPC_TBL_EM_INSERT_CMPL_GET_V1(buf) \ + GET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_EM_INSERT_CMPL_V1_OFFS), \ + TFC_MPC_TBL_EM_INSERT_CMPL_V1_EB, \ + TFC_MPC_TBL_EM_INSERT_CMPL_V1_SB) + +#define TFC_MPC_TBL_EM_INSERT_CMPL_HASH_MSB_EB 15 +#define TFC_MPC_TBL_EM_INSERT_CMPL_HASH_MSB_SB 4 +#define TFC_MPC_TBL_EM_INSERT_CMPL_HASH_MSB_OFFS 0x8 + +#define TFC_MPC_TBL_EM_INSERT_CMPL_SET_HASH_MSB(buf, val) \ + SET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_EM_INSERT_CMPL_HASH_MSB_OFFS), \ + (u64)(val), \ + TFC_MPC_TBL_EM_INSERT_CMPL_HASH_MSB_EB, \ + TFC_MPC_TBL_EM_INSERT_CMPL_HASH_MSB_SB) +#define TFC_MPC_TBL_EM_INSERT_CMPL_GET_HASH_MSB(buf) \ + GET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_EM_INSERT_CMPL_HASH_MSB_OFFS), \ + TFC_MPC_TBL_EM_INSERT_CMPL_HASH_MSB_EB, \ + TFC_MPC_TBL_EM_INSERT_CMPL_HASH_MSB_SB) + +#define TFC_MPC_TBL_EM_INSERT_CMPL_TABLE_SCOPE_EB 28 +#define TFC_MPC_TBL_EM_INSERT_CMPL_TABLE_SCOPE_SB 24 +#define TFC_MPC_TBL_EM_INSERT_CMPL_TABLE_SCOPE_OFFS 0x8 + +#define TFC_MPC_TBL_EM_INSERT_CMPL_SET_TABLE_SCOPE(buf, val) \ + SET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_EM_INSERT_CMPL_TABLE_SCOPE_OFFS), \ + (u64)(val), \ + TFC_MPC_TBL_EM_INSERT_CMPL_TABLE_SCOPE_EB, \ + TFC_MPC_TBL_EM_INSERT_CMPL_TABLE_SCOPE_SB) +#define TFC_MPC_TBL_EM_INSERT_CMPL_GET_TABLE_SCOPE(buf) \ + GET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_EM_INSERT_CMPL_TABLE_SCOPE_OFFS), \ + TFC_MPC_TBL_EM_INSERT_CMPL_TABLE_SCOPE_EB, \ + TFC_MPC_TBL_EM_INSERT_CMPL_TABLE_SCOPE_SB) + +#define TFC_MPC_TBL_EM_INSERT_CMPL_TABLE_INDEX_EB 57 +#define TFC_MPC_TBL_EM_INSERT_CMPL_TABLE_INDEX_SB 32 +#define TFC_MPC_TBL_EM_INSERT_CMPL_TABLE_INDEX_OFFS 0x8 + +#define TFC_MPC_TBL_EM_INSERT_CMPL_SET_TABLE_INDEX(buf, val) \ + SET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_EM_INSERT_CMPL_TABLE_INDEX_OFFS), \ + (u64)(val), \ + TFC_MPC_TBL_EM_INSERT_CMPL_TABLE_INDEX_EB, \ + TFC_MPC_TBL_EM_INSERT_CMPL_TABLE_INDEX_SB) +#define TFC_MPC_TBL_EM_INSERT_CMPL_GET_TABLE_INDEX(buf) \ + GET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_EM_INSERT_CMPL_TABLE_INDEX_OFFS), \ + TFC_MPC_TBL_EM_INSERT_CMPL_TABLE_INDEX_EB, \ + TFC_MPC_TBL_EM_INSERT_CMPL_TABLE_INDEX_SB) + +#define TFC_MPC_TBL_EM_INSERT_CMPL_TABLE_INDEX2_EB 25 +#define TFC_MPC_TBL_EM_INSERT_CMPL_TABLE_INDEX2_SB 0 +#define TFC_MPC_TBL_EM_INSERT_CMPL_TABLE_INDEX2_OFFS 0x10 + +#define TFC_MPC_TBL_EM_INSERT_CMPL_SET_TABLE_INDEX2(buf, val) \ + SET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_EM_INSERT_CMPL_TABLE_INDEX2_OFFS), \ + (u64)(val), \ + TFC_MPC_TBL_EM_INSERT_CMPL_TABLE_INDEX2_EB, \ + TFC_MPC_TBL_EM_INSERT_CMPL_TABLE_INDEX2_SB) +#define TFC_MPC_TBL_EM_INSERT_CMPL_GET_TABLE_INDEX2(buf) \ + GET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_EM_INSERT_CMPL_TABLE_INDEX2_OFFS), \ + TFC_MPC_TBL_EM_INSERT_CMPL_TABLE_INDEX2_EB, \ + TFC_MPC_TBL_EM_INSERT_CMPL_TABLE_INDEX2_SB) + +#define TFC_MPC_TBL_EM_INSERT_CMPL_TABLE_INDEX3_EB 57 +#define TFC_MPC_TBL_EM_INSERT_CMPL_TABLE_INDEX3_SB 32 +#define TFC_MPC_TBL_EM_INSERT_CMPL_TABLE_INDEX3_OFFS 0x10 + +#define TFC_MPC_TBL_EM_INSERT_CMPL_SET_TABLE_INDEX3(buf, val) \ + SET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_EM_INSERT_CMPL_TABLE_INDEX3_OFFS), \ + (u64)(val), \ + TFC_MPC_TBL_EM_INSERT_CMPL_TABLE_INDEX3_EB, \ + TFC_MPC_TBL_EM_INSERT_CMPL_TABLE_INDEX3_SB) +#define TFC_MPC_TBL_EM_INSERT_CMPL_GET_TABLE_INDEX3(buf) \ + GET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_EM_INSERT_CMPL_TABLE_INDEX3_OFFS), \ + TFC_MPC_TBL_EM_INSERT_CMPL_TABLE_INDEX3_EB, \ + TFC_MPC_TBL_EM_INSERT_CMPL_TABLE_INDEX3_SB) + +#define TFC_MPC_TBL_EM_INSERT_CMPL_V2_EB 0 +#define TFC_MPC_TBL_EM_INSERT_CMPL_V2_SB 0 +#define TFC_MPC_TBL_EM_INSERT_CMPL_V2_OFFS 0x18 + +#define TFC_MPC_TBL_EM_INSERT_CMPL_SET_V2(buf, val) \ + SET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_EM_INSERT_CMPL_V2_OFFS), \ + (u64)(val), \ + TFC_MPC_TBL_EM_INSERT_CMPL_V2_EB, \ + TFC_MPC_TBL_EM_INSERT_CMPL_V2_SB) +#define TFC_MPC_TBL_EM_INSERT_CMPL_GET_V2(buf) \ + GET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_EM_INSERT_CMPL_V2_OFFS), \ + TFC_MPC_TBL_EM_INSERT_CMPL_V2_EB, \ + TFC_MPC_TBL_EM_INSERT_CMPL_V2_SB) + +#define TFC_MPC_TBL_EM_INSERT_CMPL_TABLE_INDEX4_EB 26 +#define TFC_MPC_TBL_EM_INSERT_CMPL_TABLE_INDEX4_SB 1 +#define TFC_MPC_TBL_EM_INSERT_CMPL_TABLE_INDEX4_OFFS 0x18 + +#define TFC_MPC_TBL_EM_INSERT_CMPL_SET_TABLE_INDEX4(buf, val) \ + SET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_EM_INSERT_CMPL_TABLE_INDEX4_OFFS), \ + (u64)(val), \ + TFC_MPC_TBL_EM_INSERT_CMPL_TABLE_INDEX4_EB, \ + TFC_MPC_TBL_EM_INSERT_CMPL_TABLE_INDEX4_SB) +#define TFC_MPC_TBL_EM_INSERT_CMPL_GET_TABLE_INDEX4(buf) \ + GET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_EM_INSERT_CMPL_TABLE_INDEX4_OFFS), \ + TFC_MPC_TBL_EM_INSERT_CMPL_TABLE_INDEX4_EB, \ + TFC_MPC_TBL_EM_INSERT_CMPL_TABLE_INDEX4_SB) + +#define TFC_MPC_TBL_EM_INSERT_CMPL_BKT_NUM_EB 39 +#define TFC_MPC_TBL_EM_INSERT_CMPL_BKT_NUM_SB 32 +#define TFC_MPC_TBL_EM_INSERT_CMPL_BKT_NUM_OFFS 0x18 + +#define TFC_MPC_TBL_EM_INSERT_CMPL_SET_BKT_NUM(buf, val) \ + SET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_EM_INSERT_CMPL_BKT_NUM_OFFS), \ + (u64)(val), \ + TFC_MPC_TBL_EM_INSERT_CMPL_BKT_NUM_EB, \ + TFC_MPC_TBL_EM_INSERT_CMPL_BKT_NUM_SB) +#define TFC_MPC_TBL_EM_INSERT_CMPL_GET_BKT_NUM(buf) \ + GET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_EM_INSERT_CMPL_BKT_NUM_OFFS), \ + TFC_MPC_TBL_EM_INSERT_CMPL_BKT_NUM_EB, \ + TFC_MPC_TBL_EM_INSERT_CMPL_BKT_NUM_SB) + +#define TFC_MPC_TBL_EM_INSERT_CMPL_NUM_ENTRIES_EB 42 +#define TFC_MPC_TBL_EM_INSERT_CMPL_NUM_ENTRIES_SB 40 +#define TFC_MPC_TBL_EM_INSERT_CMPL_NUM_ENTRIES_OFFS 0x18 + +#define TFC_MPC_TBL_EM_INSERT_CMPL_SET_NUM_ENTRIES(buf, val) \ + SET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_EM_INSERT_CMPL_NUM_ENTRIES_OFFS), \ + (u64)(val), \ + TFC_MPC_TBL_EM_INSERT_CMPL_NUM_ENTRIES_EB, \ + TFC_MPC_TBL_EM_INSERT_CMPL_NUM_ENTRIES_SB) +#define TFC_MPC_TBL_EM_INSERT_CMPL_GET_NUM_ENTRIES(buf) \ + GET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_EM_INSERT_CMPL_NUM_ENTRIES_OFFS), \ + TFC_MPC_TBL_EM_INSERT_CMPL_NUM_ENTRIES_EB, \ + TFC_MPC_TBL_EM_INSERT_CMPL_NUM_ENTRIES_SB) + +#define TFC_MPC_TBL_EM_INSERT_CMPL_CHAIN_UPD_EB 43 +#define TFC_MPC_TBL_EM_INSERT_CMPL_CHAIN_UPD_SB 43 +#define TFC_MPC_TBL_EM_INSERT_CMPL_CHAIN_UPD_OFFS 0x18 + +#define TFC_MPC_TBL_EM_INSERT_CMPL_SET_CHAIN_UPD(buf, val) \ + SET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_EM_INSERT_CMPL_CHAIN_UPD_OFFS), \ + (u64)(val), \ + TFC_MPC_TBL_EM_INSERT_CMPL_CHAIN_UPD_EB, \ + TFC_MPC_TBL_EM_INSERT_CMPL_CHAIN_UPD_SB) +#define TFC_MPC_TBL_EM_INSERT_CMPL_GET_CHAIN_UPD(buf) \ + GET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_EM_INSERT_CMPL_CHAIN_UPD_OFFS), \ + TFC_MPC_TBL_EM_INSERT_CMPL_CHAIN_UPD_EB, \ + TFC_MPC_TBL_EM_INSERT_CMPL_CHAIN_UPD_SB) + +#define TFC_MPC_TBL_EM_INSERT_CMPL_REPLACED_ENTRY_EB 44 +#define TFC_MPC_TBL_EM_INSERT_CMPL_REPLACED_ENTRY_SB 44 +#define TFC_MPC_TBL_EM_INSERT_CMPL_REPLACED_ENTRY_OFFS 0x18 + +#define TFC_MPC_TBL_EM_INSERT_CMPL_SET_REPLACED_ENTRY(buf, val) \ + SET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_EM_INSERT_CMPL_REPLACED_ENTRY_OFFS), \ + (u64)(val), \ + TFC_MPC_TBL_EM_INSERT_CMPL_REPLACED_ENTRY_EB, \ + TFC_MPC_TBL_EM_INSERT_CMPL_REPLACED_ENTRY_SB) +#define TFC_MPC_TBL_EM_INSERT_CMPL_GET_REPLACED_ENTRY(buf) \ + GET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_EM_INSERT_CMPL_REPLACED_ENTRY_OFFS), \ + TFC_MPC_TBL_EM_INSERT_CMPL_REPLACED_ENTRY_EB, \ + TFC_MPC_TBL_EM_INSERT_CMPL_REPLACED_ENTRY_SB) + +#define TFC_MPC_TBL_EM_INSERT_CMPL_SIZE 32 + +/* + * CFA Table EM Delete Completion Record: + * + * OK status indicates that an ENTRY_PTR matching TABLE_INDEX was found in the + * static bucket chain specified and was therefore deleted. EM_MISS status + * indicates that no match was found. + * TABLE_INDEX is from the command. It is the index of the entry to delete. + * TABLE_INDEX2 is from the command. It is the static bucket address. + * TABLE_INDEX3 is the index of the tail bucket of the static bucket chain + * prior to processing the command. + * TABLE_INDEX4 is the index of the tail bucket of the static bucket chain + * after processing the command. + * If CHAIN_UPD=1 and TABLE_INDEX4==TABLE_INDEX2, the static bucket was the + * tail bucket, it became empty after the delete, the scope is a locked scope, + * and CHAIN_PTR was 0. In this case, the static bucket has been evicted from + * the cache. + * Otherwise, if CHAIN_UPD=1, the original tail bucket given by TABLE_INDEX3 + * was removed from the chain because it went empty. It can therefore be de- + * allocated. + * Offset 63 0 + * 0x0 opaque unused(8) opcode mp_client status unused(2) + * type + * 0x8 unused(6) table_index unused(3) table_scope + * unused(8) hash_msb unused(3) v1 + * 0x10 unused(6) table_index3 unused(6) table_index2 + * 0x18 unused(20) chain_upd num_entries bkt_num unused(5) + * table_index4 v2 + * + * type (Offset:0x0[5:0], Size: 6) + * This field indicates the exact type of the + * completion. By convention, the LSB identifies the + * length of the record in 16B units. Even values + * indicate 16B records. Odd values indicate 32B + * records **(EXCEPT no_op!!!!)** . + * Value Enum Enumeration Description + * 31 mid_path_long + * Mid Path Long Completion : Completion of a Mid + * Path Command. Length = 32B + * + * status (Offset:0x0[11:8], Size: 4) + * The command processing status. + * Value Enum Enumeration Description + * 0 OK + * Completed without error. + * 1 UNSPRT_ERR + * The CFA OPCODE is an unsupported value. + * 2 FMT_ERR + * Indicates a CFA command formatting error. This + * error can occur on any of the supported CFA + * commands. + * Error conditions: + * DATA_SIZE[2:0] outside range of [1, 4]. (Does + * not apply to READ_CLR, EM_DELETE, or EM_CHAIN + * commands as they do not have a DATA_SIZE field) + * HOST_ADDRESS[1:0] != 0 (Only applies to READ, + * READ_CLR, and EVENT_COLLECTION as other commands + * do not have a HOST_ADDRESS field. + * 3 SCOPE_ERR + * Access to TABLE_SCOPE is disabled for the SVIF. + * Indates that the bit indexed by (SVIF, + * TABLE_SCOPE) in the TAI_SVIF_SCOPE memory is set + * to 0. + * 4 ADDR_ERR + * This error can only occur for commands having + * TABLE_TYPE present and set to EM and not having + * any of the previous errors, or for any of the + * EM* commands, for which a TABLE_TYPE of EM is + * implied. + * It indicates that an EM address (TABLE_INDEX*) + * in the command is invalid based on (EM_BUCKETS, + * EM_SIZE) parameters configured for TABLE_SCOPE. + * All addresses must be in the range [0, + * EM_SIZE). Static bucket addresses must be within + * the range determined by EM_BUCKETS. Dynamic + * bucket addresses and entries must be outside of + * the static bucket range. + * 5 CACHE_ERR + * One of more cache responses signaled an error + * while processing the command. + * 6 EM_MISS + * No matching entry found. + * + * mp_client (Offset:0x0[15:12], Size: 4) + * This field represents the Mid-Path client that + * generated the completion. + * Value Enum Enumeration Description + * 2 TE_CFA + * TE-CFA + * 3 RE_CFA + * RE-CFA + * + * opcode (Offset:0x0[23:16], Size: 8) + * OPCODE from the command. + * Value Enum Enumeration Description + * 10 EM_DELETE + * This command deletes an entry from the exact + * match table. CFA searches for the specified + * entry address in the bucket chain at the static + * bucket address given. + * + * opaque (Offset:0x0[63:32], Size: 32) + * This is a copy of the opaque field from the mid + * path BD of this command. + * + * v1 (Offset:0x8[0], Size: 1) + * This value is written by the NIC such that it will + * be different for each pass through the completion + * queue. The even passes will write 1. The odd passes + * will write 0. + * + * hash_msb (Offset:0x8[15:4], Size: 12) + * For EM_SEARCH and EM_INSERT commands without + * errors that abort the command processing prior to + * the hash computation, set to HASH[35:24] of the + * hash computed from the exact match entry key in the + * command. + * For all other cases, set to 0 except for the + * following error conditions, which carry debug + * information in this field as shown by error status + * below: + * FMT_ERR: + * Set to {7'd0, HOST_ADDRESS[1:0], + * DATA_SIZE[2:0]}. + * If HOST_ADDRESS or DATA_SIZE field not + * present they are set to 0. + * SCOPE_ERR: + * Set to {1'b0, SVIF[10:0]}. + * ADDR_ERR: + * Only possible when TABLE_TYPE=EM or for EM* + * commands + * Set to {1'b0, TABLE_INDEX[2:0], 5'd0, + * DATA_SIZE[2:0]} + * TABLE_INDEX[2]=1 if TABLE_INDEX3 had an error + * TABLE_INDEX[1]=1 if TABLE_INDEX2 had an error + * TABLE_INDEX[0]=1 if TABLE_INDEX had an error + * TABLE_INDEX[n]=0 if the completion does not + * have the corresponding TABLE_INDEX field above. + * CACHE_ERR: + * Set to {9'd0, DATA_SIZE[2:0]} + * + * table_scope (Offset:0x8[28:24], Size: 5) + * TABLE_SCOPE from the command. + * + * table_index (Offset:0x8[57:32], Size: 26) + * A 32B index into the EM table identified by + * TABLE_SCOPE. + * TABLE_INDEX from the command, which is the index + * of the entry to delete. + * + * table_index2 (Offset:0x10[25:0], Size: 26) + * A 32B index into the EM table identified by + * TABLE_SCOPE. + * TABLE_INDEX2 from the command. + * + * table_index3 (Offset:0x10[57:32], Size: 26) + * A 32B index into the EM table identified by + * TABLE_SCOPE. + * For OK or EM_MISS status, the index of the tail + * bucket of the chain prior to processing the + * command. If CHAIN_UPD=1, the bucket was removed and + * this index can be de-allocated. For other status + * values, it is set to 0. + * + * v2 (Offset:0x18[0], Size: 1) + * This value is written by the NIC such that it will + * be different for each pass through the completion + * queue. The even passes will write 1. The odd passes + * will write 0. + * + * table_index4 (Offset:0x18[26:1], Size: 26) + * A 32B index into the EM table identified by + * TABLE_SCOPE. + * For OK or EM_MISS status, the index of the tail + * bucket of the chain prior to after the command. If + * CHAIN_UPD=0 (always for EM_MISS status), it is + * always equal to TABLE_INDEX3 as the chain was not + * updated. For other status values, it is set to 0. + * + * bkt_num (Offset:0x18[39:32], Size: 8) + * BKT_NUM is the bucket number in chain of the tail + * bucket after finishing processing the command, + * except when the command stops processing before the + * tail bucket. NUM_ENTRIES is the number of valid + * entries in the BKT_NUM bucket. The following + * describes the cases where BKT_NUM and NUM_ENTRIES + * are not for the tail bucket after finishing + * processing of the command: + * For UNSPRT_ERR, FMT_ERR, SCOPE_ERR, or ADDR_ERR + * completion status, BKT_NUM will be set to 0. + * For CACHE_ERR completion status, BKT_NUM will be + * set to the bucket number that was last read without + * error. If ERR=1 in the response to the static + * bucket read, BKT_NUM and NUM_ENTRIES are set to 0. + * The static bucket is number 0, BKT_NUM increments + * for each new bucket in the chain, and saturates at + * 255. Therefore, if the value is 255, BKT_NUM may or + * may not be accurate. In this case, though, + * NUM_ENTRIES will still be the correct value as + * described above for the bucket. + * For OK status, BKT_NUM and NUM_ENTRIES will be for + * the tail bucket after processing. + * + * num_entries (Offset:0x18[42:40], Size: 3) + * See BKT_NUM description. + * + * chain_upd (Offset:0x18[43], Size: 1) + * Specifies if the chain was updated while + * processing the command: + * Set to 1 when a bucket is removed from the static + * bucket chain. This occurs if after the delete, the + * tail bucket is a dynamic bucket and no longer has + * any valid entries. In this case, software should + * de-allocate the dynamic bucket at TABLE_INDEX3. + * It is also set to 1 when the static bucket is + * evicted, which only occurs for locked scopes. See + * the EM_DELETE command description for details. + * When the CFA updates the static bucket chain by + * adding a bucket during inserts or removing one + * during deletes, it always sets CHAIN=0 in the new + * tail bucket and sets CHAIN_PTR to that of the + * original tail bucket. This is done to preserve the + * background chaining. EM_CHAIN provides a means to + * coherently update the CHAIN_PTR in the tail bucket + * separately if desired. + * This structure is used to inform the host of an + * event within the NIC. + */ +#define TFC_MPC_TBL_EM_DELETE_CMPL_TYPE_MID_PATH_LONG 31 + +#define TFC_MPC_TBL_EM_DELETE_CMPL_TYPE_EB 5 +#define TFC_MPC_TBL_EM_DELETE_CMPL_TYPE_SB 0 +#define TFC_MPC_TBL_EM_DELETE_CMPL_TYPE_OFFS 0x0 + +#define TFC_MPC_TBL_EM_DELETE_CMPL_SET_TYPE(buf, val) \ + SET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_EM_DELETE_CMPL_TYPE_OFFS), \ + (u64)(val), \ + TFC_MPC_TBL_EM_DELETE_CMPL_TYPE_EB, \ + TFC_MPC_TBL_EM_DELETE_CMPL_TYPE_SB) +#define TFC_MPC_TBL_EM_DELETE_CMPL_GET_TYPE(buf) \ + GET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_EM_DELETE_CMPL_TYPE_OFFS), \ + TFC_MPC_TBL_EM_DELETE_CMPL_TYPE_EB, \ + TFC_MPC_TBL_EM_DELETE_CMPL_TYPE_SB) + +#define TFC_MPC_TBL_EM_DELETE_CMPL_STATUS_OK 0 +#define TFC_MPC_TBL_EM_DELETE_CMPL_STATUS_UNSPRT_ERR 1 +#define TFC_MPC_TBL_EM_DELETE_CMPL_STATUS_FMT_ERR 2 +#define TFC_MPC_TBL_EM_DELETE_CMPL_STATUS_SCOPE_ERR 3 +#define TFC_MPC_TBL_EM_DELETE_CMPL_STATUS_ADDR_ERR 4 +#define TFC_MPC_TBL_EM_DELETE_CMPL_STATUS_CACHE_ERR 5 +#define TFC_MPC_TBL_EM_DELETE_CMPL_STATUS_EM_MISS 6 + +#define TFC_MPC_TBL_EM_DELETE_CMPL_STATUS_EB 11 +#define TFC_MPC_TBL_EM_DELETE_CMPL_STATUS_SB 8 +#define TFC_MPC_TBL_EM_DELETE_CMPL_STATUS_OFFS 0x0 + +#define TFC_MPC_TBL_EM_DELETE_CMPL_SET_STATUS(buf, val) \ + SET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_EM_DELETE_CMPL_STATUS_OFFS), \ + (u64)(val), \ + TFC_MPC_TBL_EM_DELETE_CMPL_STATUS_EB, \ + TFC_MPC_TBL_EM_DELETE_CMPL_STATUS_SB) +#define TFC_MPC_TBL_EM_DELETE_CMPL_GET_STATUS(buf) \ + GET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_EM_DELETE_CMPL_STATUS_OFFS), \ + TFC_MPC_TBL_EM_DELETE_CMPL_STATUS_EB, \ + TFC_MPC_TBL_EM_DELETE_CMPL_STATUS_SB) + +#define TFC_MPC_TBL_EM_DELETE_CMPL_MP_CLIENT_TE_CFA 2 +#define TFC_MPC_TBL_EM_DELETE_CMPL_MP_CLIENT_RE_CFA 3 + +#define TFC_MPC_TBL_EM_DELETE_CMPL_MP_CLIENT_EB 15 +#define TFC_MPC_TBL_EM_DELETE_CMPL_MP_CLIENT_SB 12 +#define TFC_MPC_TBL_EM_DELETE_CMPL_MP_CLIENT_OFFS 0x0 + +#define TFC_MPC_TBL_EM_DELETE_CMPL_SET_MP_CLIENT(buf, val) \ + SET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_EM_DELETE_CMPL_MP_CLIENT_OFFS), \ + (u64)(val), \ + TFC_MPC_TBL_EM_DELETE_CMPL_MP_CLIENT_EB, \ + TFC_MPC_TBL_EM_DELETE_CMPL_MP_CLIENT_SB) +#define TFC_MPC_TBL_EM_DELETE_CMPL_GET_MP_CLIENT(buf) \ + GET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_EM_DELETE_CMPL_MP_CLIENT_OFFS), \ + TFC_MPC_TBL_EM_DELETE_CMPL_MP_CLIENT_EB, \ + TFC_MPC_TBL_EM_DELETE_CMPL_MP_CLIENT_SB) + +#define TFC_MPC_CMD_OPCODE_EM_DELETE 10 + +#define TFC_MPC_TBL_EM_DELETE_CMPL_OPCODE_EB 23 +#define TFC_MPC_TBL_EM_DELETE_CMPL_OPCODE_SB 16 +#define TFC_MPC_TBL_EM_DELETE_CMPL_OPCODE_OFFS 0x0 + +#define TFC_MPC_TBL_EM_DELETE_CMPL_SET_OPCODE(buf, val) \ + SET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_EM_DELETE_CMPL_OPCODE_OFFS), \ + (u64)(val), \ + TFC_MPC_TBL_EM_DELETE_CMPL_OPCODE_EB, \ + TFC_MPC_TBL_EM_DELETE_CMPL_OPCODE_SB) +#define TFC_MPC_TBL_EM_DELETE_CMPL_GET_OPCODE(buf) \ + GET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_EM_DELETE_CMPL_OPCODE_OFFS), \ + TFC_MPC_TBL_EM_DELETE_CMPL_OPCODE_EB, \ + TFC_MPC_TBL_EM_DELETE_CMPL_OPCODE_SB) + +#define TFC_MPC_TBL_EM_DELETE_CMPL_OPAQUE_EB 63 +#define TFC_MPC_TBL_EM_DELETE_CMPL_OPAQUE_SB 32 +#define TFC_MPC_TBL_EM_DELETE_CMPL_OPAQUE_OFFS 0x0 + +#define TFC_MPC_TBL_EM_DELETE_CMPL_SET_OPAQUE(buf, val) \ + SET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_EM_DELETE_CMPL_OPAQUE_OFFS), \ + (u64)(val), \ + TFC_MPC_TBL_EM_DELETE_CMPL_OPAQUE_EB, \ + TFC_MPC_TBL_EM_DELETE_CMPL_OPAQUE_SB) +#define TFC_MPC_TBL_EM_DELETE_CMPL_GET_OPAQUE(buf) \ + GET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_EM_DELETE_CMPL_OPAQUE_OFFS), \ + TFC_MPC_TBL_EM_DELETE_CMPL_OPAQUE_EB, \ + TFC_MPC_TBL_EM_DELETE_CMPL_OPAQUE_SB) + +#define TFC_MPC_TBL_EM_DELETE_CMPL_V1_EB 0 +#define TFC_MPC_TBL_EM_DELETE_CMPL_V1_SB 0 +#define TFC_MPC_TBL_EM_DELETE_CMPL_V1_OFFS 0x8 + +#define TFC_MPC_TBL_EM_DELETE_CMPL_SET_V1(buf, val) \ + SET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_EM_DELETE_CMPL_V1_OFFS), \ + (u64)(val), \ + TFC_MPC_TBL_EM_DELETE_CMPL_V1_EB, \ + TFC_MPC_TBL_EM_DELETE_CMPL_V1_SB) +#define TFC_MPC_TBL_EM_DELETE_CMPL_GET_V1(buf) \ + GET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_EM_DELETE_CMPL_V1_OFFS), \ + TFC_MPC_TBL_EM_DELETE_CMPL_V1_EB, \ + TFC_MPC_TBL_EM_DELETE_CMPL_V1_SB) + +#define TFC_MPC_TBL_EM_DELETE_CMPL_HASH_MSB_EB 15 +#define TFC_MPC_TBL_EM_DELETE_CMPL_HASH_MSB_SB 4 +#define TFC_MPC_TBL_EM_DELETE_CMPL_HASH_MSB_OFFS 0x8 + +#define TFC_MPC_TBL_EM_DELETE_CMPL_SET_HASH_MSB(buf, val) \ + SET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_EM_DELETE_CMPL_HASH_MSB_OFFS), \ + (u64)(val), \ + TFC_MPC_TBL_EM_DELETE_CMPL_HASH_MSB_EB, \ + TFC_MPC_TBL_EM_DELETE_CMPL_HASH_MSB_SB) +#define TFC_MPC_TBL_EM_DELETE_CMPL_GET_HASH_MSB(buf) \ + GET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_EM_DELETE_CMPL_HASH_MSB_OFFS), \ + TFC_MPC_TBL_EM_DELETE_CMPL_HASH_MSB_EB, \ + TFC_MPC_TBL_EM_DELETE_CMPL_HASH_MSB_SB) + +#define TFC_MPC_TBL_EM_DELETE_CMPL_TABLE_SCOPE_EB 28 +#define TFC_MPC_TBL_EM_DELETE_CMPL_TABLE_SCOPE_SB 24 +#define TFC_MPC_TBL_EM_DELETE_CMPL_TABLE_SCOPE_OFFS 0x8 + +#define TFC_MPC_TBL_EM_DELETE_CMPL_SET_TABLE_SCOPE(buf, val) \ + SET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_EM_DELETE_CMPL_TABLE_SCOPE_OFFS), \ + (u64)(val), \ + TFC_MPC_TBL_EM_DELETE_CMPL_TABLE_SCOPE_EB, \ + TFC_MPC_TBL_EM_DELETE_CMPL_TABLE_SCOPE_SB) +#define TFC_MPC_TBL_EM_DELETE_CMPL_GET_TABLE_SCOPE(buf) \ + GET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_EM_DELETE_CMPL_TABLE_SCOPE_OFFS), \ + TFC_MPC_TBL_EM_DELETE_CMPL_TABLE_SCOPE_EB, \ + TFC_MPC_TBL_EM_DELETE_CMPL_TABLE_SCOPE_SB) + +#define TFC_MPC_TBL_EM_DELETE_CMPL_TABLE_INDEX_EB 57 +#define TFC_MPC_TBL_EM_DELETE_CMPL_TABLE_INDEX_SB 32 +#define TFC_MPC_TBL_EM_DELETE_CMPL_TABLE_INDEX_OFFS 0x8 + +#define TFC_MPC_TBL_EM_DELETE_CMPL_SET_TABLE_INDEX(buf, val) \ + SET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_EM_DELETE_CMPL_TABLE_INDEX_OFFS), \ + (u64)(val), \ + TFC_MPC_TBL_EM_DELETE_CMPL_TABLE_INDEX_EB, \ + TFC_MPC_TBL_EM_DELETE_CMPL_TABLE_INDEX_SB) +#define TFC_MPC_TBL_EM_DELETE_CMPL_GET_TABLE_INDEX(buf) \ + GET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_EM_DELETE_CMPL_TABLE_INDEX_OFFS), \ + TFC_MPC_TBL_EM_DELETE_CMPL_TABLE_INDEX_EB, \ + TFC_MPC_TBL_EM_DELETE_CMPL_TABLE_INDEX_SB) + +#define TFC_MPC_TBL_EM_DELETE_CMPL_TABLE_INDEX2_EB 25 +#define TFC_MPC_TBL_EM_DELETE_CMPL_TABLE_INDEX2_SB 0 +#define TFC_MPC_TBL_EM_DELETE_CMPL_TABLE_INDEX2_OFFS 0x10 + +#define TFC_MPC_TBL_EM_DELETE_CMPL_SET_TABLE_INDEX2(buf, val) \ + SET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_EM_DELETE_CMPL_TABLE_INDEX2_OFFS), \ + (u64)(val), \ + TFC_MPC_TBL_EM_DELETE_CMPL_TABLE_INDEX2_EB, \ + TFC_MPC_TBL_EM_DELETE_CMPL_TABLE_INDEX2_SB) +#define TFC_MPC_TBL_EM_DELETE_CMPL_GET_TABLE_INDEX2(buf) \ + GET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_EM_DELETE_CMPL_TABLE_INDEX2_OFFS), \ + TFC_MPC_TBL_EM_DELETE_CMPL_TABLE_INDEX2_EB, \ + TFC_MPC_TBL_EM_DELETE_CMPL_TABLE_INDEX2_SB) + +#define TFC_MPC_TBL_EM_DELETE_CMPL_TABLE_INDEX3_EB 57 +#define TFC_MPC_TBL_EM_DELETE_CMPL_TABLE_INDEX3_SB 32 +#define TFC_MPC_TBL_EM_DELETE_CMPL_TABLE_INDEX3_OFFS 0x10 + +#define TFC_MPC_TBL_EM_DELETE_CMPL_SET_TABLE_INDEX3(buf, val) \ + SET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_EM_DELETE_CMPL_TABLE_INDEX3_OFFS), \ + (u64)(val), \ + TFC_MPC_TBL_EM_DELETE_CMPL_TABLE_INDEX3_EB, \ + TFC_MPC_TBL_EM_DELETE_CMPL_TABLE_INDEX3_SB) +#define TFC_MPC_TBL_EM_DELETE_CMPL_GET_TABLE_INDEX3(buf) \ + GET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_EM_DELETE_CMPL_TABLE_INDEX3_OFFS), \ + TFC_MPC_TBL_EM_DELETE_CMPL_TABLE_INDEX3_EB, \ + TFC_MPC_TBL_EM_DELETE_CMPL_TABLE_INDEX3_SB) + +#define TFC_MPC_TBL_EM_DELETE_CMPL_V2_EB 0 +#define TFC_MPC_TBL_EM_DELETE_CMPL_V2_SB 0 +#define TFC_MPC_TBL_EM_DELETE_CMPL_V2_OFFS 0x18 + +#define TFC_MPC_TBL_EM_DELETE_CMPL_SET_V2(buf, val) \ + SET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_EM_DELETE_CMPL_V2_OFFS), \ + (u64)(val), \ + TFC_MPC_TBL_EM_DELETE_CMPL_V2_EB, \ + TFC_MPC_TBL_EM_DELETE_CMPL_V2_SB) +#define TFC_MPC_TBL_EM_DELETE_CMPL_GET_V2(buf) \ + GET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_EM_DELETE_CMPL_V2_OFFS), \ + TFC_MPC_TBL_EM_DELETE_CMPL_V2_EB, \ + TFC_MPC_TBL_EM_DELETE_CMPL_V2_SB) + +#define TFC_MPC_TBL_EM_DELETE_CMPL_TABLE_INDEX4_EB 26 +#define TFC_MPC_TBL_EM_DELETE_CMPL_TABLE_INDEX4_SB 1 +#define TFC_MPC_TBL_EM_DELETE_CMPL_TABLE_INDEX4_OFFS 0x18 + +#define TFC_MPC_TBL_EM_DELETE_CMPL_SET_TABLE_INDEX4(buf, val) \ + SET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_EM_DELETE_CMPL_TABLE_INDEX4_OFFS), \ + (u64)(val), \ + TFC_MPC_TBL_EM_DELETE_CMPL_TABLE_INDEX4_EB, \ + TFC_MPC_TBL_EM_DELETE_CMPL_TABLE_INDEX4_SB) +#define TFC_MPC_TBL_EM_DELETE_CMPL_GET_TABLE_INDEX4(buf) \ + GET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_EM_DELETE_CMPL_TABLE_INDEX4_OFFS), \ + TFC_MPC_TBL_EM_DELETE_CMPL_TABLE_INDEX4_EB, \ + TFC_MPC_TBL_EM_DELETE_CMPL_TABLE_INDEX4_SB) + +#define TFC_MPC_TBL_EM_DELETE_CMPL_BKT_NUM_EB 39 +#define TFC_MPC_TBL_EM_DELETE_CMPL_BKT_NUM_SB 32 +#define TFC_MPC_TBL_EM_DELETE_CMPL_BKT_NUM_OFFS 0x18 + +#define TFC_MPC_TBL_EM_DELETE_CMPL_SET_BKT_NUM(buf, val) \ + SET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_EM_DELETE_CMPL_BKT_NUM_OFFS), \ + (u64)(val), \ + TFC_MPC_TBL_EM_DELETE_CMPL_BKT_NUM_EB, \ + TFC_MPC_TBL_EM_DELETE_CMPL_BKT_NUM_SB) +#define TFC_MPC_TBL_EM_DELETE_CMPL_GET_BKT_NUM(buf) \ + GET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_EM_DELETE_CMPL_BKT_NUM_OFFS), \ + TFC_MPC_TBL_EM_DELETE_CMPL_BKT_NUM_EB, \ + TFC_MPC_TBL_EM_DELETE_CMPL_BKT_NUM_SB) + +#define TFC_MPC_TBL_EM_DELETE_CMPL_NUM_ENTRIES_EB 42 +#define TFC_MPC_TBL_EM_DELETE_CMPL_NUM_ENTRIES_SB 40 +#define TFC_MPC_TBL_EM_DELETE_CMPL_NUM_ENTRIES_OFFS 0x18 + +#define TFC_MPC_TBL_EM_DELETE_CMPL_SET_NUM_ENTRIES(buf, val) \ + SET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_EM_DELETE_CMPL_NUM_ENTRIES_OFFS), \ + (u64)(val), \ + TFC_MPC_TBL_EM_DELETE_CMPL_NUM_ENTRIES_EB, \ + TFC_MPC_TBL_EM_DELETE_CMPL_NUM_ENTRIES_SB) +#define TFC_MPC_TBL_EM_DELETE_CMPL_GET_NUM_ENTRIES(buf) \ + GET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_EM_DELETE_CMPL_NUM_ENTRIES_OFFS), \ + TFC_MPC_TBL_EM_DELETE_CMPL_NUM_ENTRIES_EB, \ + TFC_MPC_TBL_EM_DELETE_CMPL_NUM_ENTRIES_SB) + +#define TFC_MPC_TBL_EM_DELETE_CMPL_CHAIN_UPD_EB 43 +#define TFC_MPC_TBL_EM_DELETE_CMPL_CHAIN_UPD_SB 43 +#define TFC_MPC_TBL_EM_DELETE_CMPL_CHAIN_UPD_OFFS 0x18 + +#define TFC_MPC_TBL_EM_DELETE_CMPL_SET_CHAIN_UPD(buf, val) \ + SET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_EM_DELETE_CMPL_CHAIN_UPD_OFFS), \ + (u64)(val), \ + TFC_MPC_TBL_EM_DELETE_CMPL_CHAIN_UPD_EB, \ + TFC_MPC_TBL_EM_DELETE_CMPL_CHAIN_UPD_SB) +#define TFC_MPC_TBL_EM_DELETE_CMPL_GET_CHAIN_UPD(buf) \ + GET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_EM_DELETE_CMPL_CHAIN_UPD_OFFS), \ + TFC_MPC_TBL_EM_DELETE_CMPL_CHAIN_UPD_EB, \ + TFC_MPC_TBL_EM_DELETE_CMPL_CHAIN_UPD_SB) + +#define TFC_MPC_TBL_EM_DELETE_CMPL_SIZE 32 + +/* + * CFA Table EM Chain Completion Record: + * + * OK status indicates that the CHAIN_PTR of the tail bucket was successfully + * updated. + * TABLE_INDEX is from the command. It is the value of the new CHAIN_PTR. + * TABLE_INDEX2 is from the command. + * TABLE_INDEX3 is the index of the tail bucket of the static bucket chain. + * Offset 63 0 + * 0x0 opaque unused(8) opcode mp_client status unused(2) + * type + * 0x8 unused(6) table_index unused(3) table_scope + * unused(8) hash_msb unused(3) v1 + * 0x10 unused(6) table_index3 unused(6) table_index2 + * 0x18 unused(20) chain_upd num_entries bkt_num unused(31) + * v2 + * + * type (Offset:0x0[5:0], Size: 6) + * This field indicates the exact type of the + * completion. By convention, the LSB identifies the + * length of the record in 16B units. Even values + * indicate 16B records. Odd values indicate 32B + * records **(EXCEPT no_op!!!!)** . + * Value Enum Enumeration Description + * 31 mid_path_long + * Mid Path Long Completion : Completion of a Mid + * Path Command. Length = 32B + * + * status (Offset:0x0[11:8], Size: 4) + * The command processing status. + * Value Enum Enumeration Description + * 0 OK + * Completed without error. + * 1 UNSPRT_ERR + * The CFA OPCODE is an unsupported value. + * 2 FMT_ERR + * Indicates a CFA command formatting error. This + * error can occur on any of the supported CFA + * commands. + * Error conditions: + * DATA_SIZE[2:0] outside range of [1, 4]. (Does + * not apply to READ_CLR, EM_DELETE, or EM_CHAIN + * commands as they do not have a DATA_SIZE field) + * HOST_ADDRESS[1:0] != 0 (Only applies to READ, + * READ_CLR, and EVENT_COLLECTION as other commands + * do not have a HOST_ADDRESS field. + * 3 SCOPE_ERR + * Access to TABLE_SCOPE is disabled for the SVIF. + * Indates that the bit indexed by (SVIF, + * TABLE_SCOPE) in the TAI_SVIF_SCOPE memory is set + * to 0. + * 4 ADDR_ERR + * This error can only occur for commands having + * TABLE_TYPE present and set to EM and not having + * any of the previous errors, or for any of the + * EM* commands, for which a TABLE_TYPE of EM is + * implied. + * It indicates that an EM address (TABLE_INDEX*) + * in the command is invalid based on (EM_BUCKETS, + * EM_SIZE) parameters configured for TABLE_SCOPE. + * All addresses must be in the range [0, + * EM_SIZE). Static bucket addresses must be within + * the range determined by EM_BUCKETS. Dynamic + * bucket addresses and entries must be outside of + * the static bucket range. + * 5 CACHE_ERR + * One of more cache responses signaled an error + * while processing the command. + * + * mp_client (Offset:0x0[15:12], Size: 4) + * This field represents the Mid-Path client that + * generated the completion. + * Value Enum Enumeration Description + * 2 TE_CFA + * TE-CFA + * 3 RE_CFA + * RE-CFA + * + * opcode (Offset:0x0[23:16], Size: 8) + * OPCODE from the command. + * Value Enum Enumeration Description + * 11 EM_CHAIN + * This command updates CHAIN_PTR in the tail + * bucket of a static bucket chain, supplying both + * the static bucket and the new CHAIN_PTR value. + * + * opaque (Offset:0x0[63:32], Size: 32) + * This is a copy of the opaque field from the mid + * path BD of this command. + * + * v1 (Offset:0x8[0], Size: 1) + * This value is written by the NIC such that it will + * be different for each pass through the completion + * queue. The even passes will write 1. The odd passes + * will write 0. + * + * hash_msb (Offset:0x8[15:4], Size: 12) + * For EM_SEARCH and EM_INSERT commands without + * errors that abort the command processing prior to + * the hash computation, set to HASH[35:24] of the + * hash computed from the exact match entry key in the + * command. + * For all other cases, set to 0 except for the + * following error conditions, which carry debug + * information in this field as shown by error status + * below: + * FMT_ERR: + * Set to {7'd0, HOST_ADDRESS[1:0], + * DATA_SIZE[2:0]}. + * If HOST_ADDRESS or DATA_SIZE field not + * present they are set to 0. + * SCOPE_ERR: + * Set to {1'b0, SVIF[10:0]}. + * ADDR_ERR: + * Only possible when TABLE_TYPE=EM or for EM* + * commands + * Set to {1'b0, TABLE_INDEX[2:0], 5'd0, + * DATA_SIZE[2:0]} + * TABLE_INDEX[2]=1 if TABLE_INDEX3 had an error + * TABLE_INDEX[1]=1 if TABLE_INDEX2 had an error + * TABLE_INDEX[0]=1 if TABLE_INDEX had an error + * TABLE_INDEX[n]=0 if the completion does not + * have the corresponding TABLE_INDEX field above. + * CACHE_ERR: + * Set to {9'd0, DATA_SIZE[2:0]} + * + * table_scope (Offset:0x8[28:24], Size: 5) + * TABLE_SCOPE from the command. + * + * table_index (Offset:0x8[57:32], Size: 26) + * A 32B index into the EM table identified by + * TABLE_SCOPE. + * TABLE_INDEX from the command, which is the new + * CHAIN_PTR for the tail bucket of the static bucket + * chain. + * + * table_index2 (Offset:0x10[25:0], Size: 26) + * A 32B index into the EM table identified by + * TABLE_SCOPE. + * TABLE_INDEX2 from the command. + * + * table_index3 (Offset:0x10[57:32], Size: 26) + * A 32B index into the EM table identified by + * TABLE_SCOPE. + * For OK status, the index of the tail bucket of the + * chain. Otherwise, set to 0. + * + * v2 (Offset:0x18[0], Size: 1) + * This value is written by the NIC such that it will + * be different for each pass through the completion + * queue. The even passes will write 1. The odd passes + * will write 0. + * + * bkt_num (Offset:0x18[39:32], Size: 8) + * BKT_NUM is the bucket number in chain of the tail + * bucket after finishing processing the command, + * except when the command stops processing before the + * tail bucket. NUM_ENTRIES is the number of valid + * entries in the BKT_NUM bucket. The following + * describes the cases where BKT_NUM and NUM_ENTRIES + * are not for the tail bucket after finishing + * processing of the command: + * For UNSPRT_ERR, FMT_ERR, SCOPE_ERR, or ADDR_ERR + * completion status, BKT_NUM will be set to 0. + * For CACHE_ERR completion status, BKT_NUM will be + * set to the bucket number that was last read without + * error. If ERR=1 in the response to the static + * bucket read, BKT_NUM and NUM_ENTRIES are set to 0. + * The static bucket is number 0, BKT_NUM increments + * for each new bucket in the chain, and saturates at + * 255. Therefore, if the value is 255, BKT_NUM may or + * may not be accurate. In this case, though, + * NUM_ENTRIES will still be the correct value as + * described above for the bucket. + * For OK status, BKT_NUM and NUM_ENTRIES will be for + * the tail bucket. + * + * num_entries (Offset:0x18[42:40], Size: 3) + * See BKT_NUM description. + * + * chain_upd (Offset:0x18[43], Size: 1) + * Set to 1 when the scope is a locked scope, the + * tail bucket is the static bucket, the bucket is + * empty (all of its ENTRY_PTR values are 0), and + * TABLE_INDEX=0 in the command. In this case, the + * static bucket is evicted. For all other cases, it + * is set to 0. + * When the CFA updates the static bucket chain by + * adding a bucket during inserts or removing one + * during deletes, it always sets CHAIN=0 in the new + * tail bucket and sets CHAIN_PTR to that of the + * original tail bucket. This is done to preserve the + * background chaining. EM_CHAIN provides a means to + * coherently update the CHAIN_PTR in the tail bucket + * separately if desired. + * This structure is used to inform the host of an + * event within the NIC. + */ +#define TFC_MPC_TBL_EM_CHAIN_CMPL_TYPE_MID_PATH_LONG 31 + +#define TFC_MPC_TBL_EM_CHAIN_CMPL_TYPE_EB 5 +#define TFC_MPC_TBL_EM_CHAIN_CMPL_TYPE_SB 0 +#define TFC_MPC_TBL_EM_CHAIN_CMPL_TYPE_OFFS 0x0 + +#define TFC_MPC_TBL_EM_CHAIN_CMPL_SET_TYPE(buf, val) \ + SET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_EM_CHAIN_CMPL_TYPE_OFFS), \ + (u64)(val), \ + TFC_MPC_TBL_EM_CHAIN_CMPL_TYPE_EB, \ + TFC_MPC_TBL_EM_CHAIN_CMPL_TYPE_SB) +#define TFC_MPC_TBL_EM_CHAIN_CMPL_GET_TYPE(buf) \ + GET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_EM_CHAIN_CMPL_TYPE_OFFS), \ + TFC_MPC_TBL_EM_CHAIN_CMPL_TYPE_EB, \ + TFC_MPC_TBL_EM_CHAIN_CMPL_TYPE_SB) + +#define TFC_MPC_TBL_EM_CHAIN_CMPL_STATUS_OK 0 +#define TFC_MPC_TBL_EM_CHAIN_CMPL_STATUS_UNSPRT_ERR 1 +#define TFC_MPC_TBL_EM_CHAIN_CMPL_STATUS_FMT_ERR 2 +#define TFC_MPC_TBL_EM_CHAIN_CMPL_STATUS_SCOPE_ERR 3 +#define TFC_MPC_TBL_EM_CHAIN_CMPL_STATUS_ADDR_ERR 4 +#define TFC_MPC_TBL_EM_CHAIN_CMPL_STATUS_CACHE_ERR 5 + +#define TFC_MPC_TBL_EM_CHAIN_CMPL_STATUS_EB 11 +#define TFC_MPC_TBL_EM_CHAIN_CMPL_STATUS_SB 8 +#define TFC_MPC_TBL_EM_CHAIN_CMPL_STATUS_OFFS 0x0 + +#define TFC_MPC_TBL_EM_CHAIN_CMPL_SET_STATUS(buf, val) \ + SET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_EM_CHAIN_CMPL_STATUS_OFFS), \ + (u64)(val), \ + TFC_MPC_TBL_EM_CHAIN_CMPL_STATUS_EB, \ + TFC_MPC_TBL_EM_CHAIN_CMPL_STATUS_SB) +#define TFC_MPC_TBL_EM_CHAIN_CMPL_GET_STATUS(buf) \ + GET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_EM_CHAIN_CMPL_STATUS_OFFS), \ + TFC_MPC_TBL_EM_CHAIN_CMPL_STATUS_EB, \ + TFC_MPC_TBL_EM_CHAIN_CMPL_STATUS_SB) + +#define TFC_MPC_TBL_EM_CHAIN_CMPL_MP_CLIENT_TE_CFA 2 +#define TFC_MPC_TBL_EM_CHAIN_CMPL_MP_CLIENT_RE_CFA 3 + +#define TFC_MPC_TBL_EM_CHAIN_CMPL_MP_CLIENT_EB 15 +#define TFC_MPC_TBL_EM_CHAIN_CMPL_MP_CLIENT_SB 12 +#define TFC_MPC_TBL_EM_CHAIN_CMPL_MP_CLIENT_OFFS 0x0 + +#define TFC_MPC_TBL_EM_CHAIN_CMPL_SET_MP_CLIENT(buf, val) \ + SET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_EM_CHAIN_CMPL_MP_CLIENT_OFFS), \ + (u64)(val), \ + TFC_MPC_TBL_EM_CHAIN_CMPL_MP_CLIENT_EB, \ + TFC_MPC_TBL_EM_CHAIN_CMPL_MP_CLIENT_SB) +#define TFC_MPC_TBL_EM_CHAIN_CMPL_GET_MP_CLIENT(buf) \ + GET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_EM_CHAIN_CMPL_MP_CLIENT_OFFS), \ + TFC_MPC_TBL_EM_CHAIN_CMPL_MP_CLIENT_EB, \ + TFC_MPC_TBL_EM_CHAIN_CMPL_MP_CLIENT_SB) + +#define TFC_MPC_CMD_OPCODE_EM_CHAIN 11 + +#define TFC_MPC_TBL_EM_CHAIN_CMPL_OPCODE_EB 23 +#define TFC_MPC_TBL_EM_CHAIN_CMPL_OPCODE_SB 16 +#define TFC_MPC_TBL_EM_CHAIN_CMPL_OPCODE_OFFS 0x0 + +#define TFC_MPC_TBL_EM_CHAIN_CMPL_SET_OPCODE(buf, val) \ + SET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_EM_CHAIN_CMPL_OPCODE_OFFS), \ + (u64)(val), \ + TFC_MPC_TBL_EM_CHAIN_CMPL_OPCODE_EB, \ + TFC_MPC_TBL_EM_CHAIN_CMPL_OPCODE_SB) +#define TFC_MPC_TBL_EM_CHAIN_CMPL_GET_OPCODE(buf) \ + GET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_EM_CHAIN_CMPL_OPCODE_OFFS), \ + TFC_MPC_TBL_EM_CHAIN_CMPL_OPCODE_EB, \ + TFC_MPC_TBL_EM_CHAIN_CMPL_OPCODE_SB) + +#define TFC_MPC_TBL_EM_CHAIN_CMPL_OPAQUE_EB 63 +#define TFC_MPC_TBL_EM_CHAIN_CMPL_OPAQUE_SB 32 +#define TFC_MPC_TBL_EM_CHAIN_CMPL_OPAQUE_OFFS 0x0 + +#define TFC_MPC_TBL_EM_CHAIN_CMPL_SET_OPAQUE(buf, val) \ + SET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_EM_CHAIN_CMPL_OPAQUE_OFFS), \ + (u64)(val), \ + TFC_MPC_TBL_EM_CHAIN_CMPL_OPAQUE_EB, \ + TFC_MPC_TBL_EM_CHAIN_CMPL_OPAQUE_SB) +#define TFC_MPC_TBL_EM_CHAIN_CMPL_GET_OPAQUE(buf) \ + GET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_EM_CHAIN_CMPL_OPAQUE_OFFS), \ + TFC_MPC_TBL_EM_CHAIN_CMPL_OPAQUE_EB, \ + TFC_MPC_TBL_EM_CHAIN_CMPL_OPAQUE_SB) + +#define TFC_MPC_TBL_EM_CHAIN_CMPL_V1_EB 0 +#define TFC_MPC_TBL_EM_CHAIN_CMPL_V1_SB 0 +#define TFC_MPC_TBL_EM_CHAIN_CMPL_V1_OFFS 0x8 + +#define TFC_MPC_TBL_EM_CHAIN_CMPL_SET_V1(buf, val) \ + SET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_EM_CHAIN_CMPL_V1_OFFS), \ + (u64)(val), \ + TFC_MPC_TBL_EM_CHAIN_CMPL_V1_EB, \ + TFC_MPC_TBL_EM_CHAIN_CMPL_V1_SB) +#define TFC_MPC_TBL_EM_CHAIN_CMPL_GET_V1(buf) \ + GET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_EM_CHAIN_CMPL_V1_OFFS), \ + TFC_MPC_TBL_EM_CHAIN_CMPL_V1_EB, \ + TFC_MPC_TBL_EM_CHAIN_CMPL_V1_SB) + +#define TFC_MPC_TBL_EM_CHAIN_CMPL_HASH_MSB_EB 15 +#define TFC_MPC_TBL_EM_CHAIN_CMPL_HASH_MSB_SB 4 +#define TFC_MPC_TBL_EM_CHAIN_CMPL_HASH_MSB_OFFS 0x8 + +#define TFC_MPC_TBL_EM_CHAIN_CMPL_SET_HASH_MSB(buf, val) \ + SET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_EM_CHAIN_CMPL_HASH_MSB_OFFS), \ + (u64)(val), \ + TFC_MPC_TBL_EM_CHAIN_CMPL_HASH_MSB_EB, \ + TFC_MPC_TBL_EM_CHAIN_CMPL_HASH_MSB_SB) +#define TFC_MPC_TBL_EM_CHAIN_CMPL_GET_HASH_MSB(buf) \ + GET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_EM_CHAIN_CMPL_HASH_MSB_OFFS), \ + TFC_MPC_TBL_EM_CHAIN_CMPL_HASH_MSB_EB, \ + TFC_MPC_TBL_EM_CHAIN_CMPL_HASH_MSB_SB) + +#define TFC_MPC_TBL_EM_CHAIN_CMPL_TABLE_SCOPE_EB 28 +#define TFC_MPC_TBL_EM_CHAIN_CMPL_TABLE_SCOPE_SB 24 +#define TFC_MPC_TBL_EM_CHAIN_CMPL_TABLE_SCOPE_OFFS 0x8 + +#define TFC_MPC_TBL_EM_CHAIN_CMPL_SET_TABLE_SCOPE(buf, val) \ + SET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_EM_CHAIN_CMPL_TABLE_SCOPE_OFFS), \ + (u64)(val), \ + TFC_MPC_TBL_EM_CHAIN_CMPL_TABLE_SCOPE_EB, \ + TFC_MPC_TBL_EM_CHAIN_CMPL_TABLE_SCOPE_SB) +#define TFC_MPC_TBL_EM_CHAIN_CMPL_GET_TABLE_SCOPE(buf) \ + GET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_EM_CHAIN_CMPL_TABLE_SCOPE_OFFS), \ + TFC_MPC_TBL_EM_CHAIN_CMPL_TABLE_SCOPE_EB, \ + TFC_MPC_TBL_EM_CHAIN_CMPL_TABLE_SCOPE_SB) + +#define TFC_MPC_TBL_EM_CHAIN_CMPL_TABLE_INDEX_EB 57 +#define TFC_MPC_TBL_EM_CHAIN_CMPL_TABLE_INDEX_SB 32 +#define TFC_MPC_TBL_EM_CHAIN_CMPL_TABLE_INDEX_OFFS 0x8 + +#define TFC_MPC_TBL_EM_CHAIN_CMPL_SET_TABLE_INDEX(buf, val) \ + SET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_EM_CHAIN_CMPL_TABLE_INDEX_OFFS), \ + (u64)(val), \ + TFC_MPC_TBL_EM_CHAIN_CMPL_TABLE_INDEX_EB, \ + TFC_MPC_TBL_EM_CHAIN_CMPL_TABLE_INDEX_SB) +#define TFC_MPC_TBL_EM_CHAIN_CMPL_GET_TABLE_INDEX(buf) \ + GET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_EM_CHAIN_CMPL_TABLE_INDEX_OFFS), \ + TFC_MPC_TBL_EM_CHAIN_CMPL_TABLE_INDEX_EB, \ + TFC_MPC_TBL_EM_CHAIN_CMPL_TABLE_INDEX_SB) + +#define TFC_MPC_TBL_EM_CHAIN_CMPL_TABLE_INDEX2_EB 25 +#define TFC_MPC_TBL_EM_CHAIN_CMPL_TABLE_INDEX2_SB 0 +#define TFC_MPC_TBL_EM_CHAIN_CMPL_TABLE_INDEX2_OFFS 0x10 + +#define TFC_MPC_TBL_EM_CHAIN_CMPL_SET_TABLE_INDEX2(buf, val) \ + SET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_EM_CHAIN_CMPL_TABLE_INDEX2_OFFS), \ + (u64)(val), \ + TFC_MPC_TBL_EM_CHAIN_CMPL_TABLE_INDEX2_EB, \ + TFC_MPC_TBL_EM_CHAIN_CMPL_TABLE_INDEX2_SB) +#define TFC_MPC_TBL_EM_CHAIN_CMPL_GET_TABLE_INDEX2(buf) \ + GET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_EM_CHAIN_CMPL_TABLE_INDEX2_OFFS), \ + TFC_MPC_TBL_EM_CHAIN_CMPL_TABLE_INDEX2_EB, \ + TFC_MPC_TBL_EM_CHAIN_CMPL_TABLE_INDEX2_SB) + +#define TFC_MPC_TBL_EM_CHAIN_CMPL_TABLE_INDEX3_EB 57 +#define TFC_MPC_TBL_EM_CHAIN_CMPL_TABLE_INDEX3_SB 32 +#define TFC_MPC_TBL_EM_CHAIN_CMPL_TABLE_INDEX3_OFFS 0x10 + +#define TFC_MPC_TBL_EM_CHAIN_CMPL_SET_TABLE_INDEX3(buf, val) \ + SET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_EM_CHAIN_CMPL_TABLE_INDEX3_OFFS), \ + (u64)(val), \ + TFC_MPC_TBL_EM_CHAIN_CMPL_TABLE_INDEX3_EB, \ + TFC_MPC_TBL_EM_CHAIN_CMPL_TABLE_INDEX3_SB) +#define TFC_MPC_TBL_EM_CHAIN_CMPL_GET_TABLE_INDEX3(buf) \ + GET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_EM_CHAIN_CMPL_TABLE_INDEX3_OFFS), \ + TFC_MPC_TBL_EM_CHAIN_CMPL_TABLE_INDEX3_EB, \ + TFC_MPC_TBL_EM_CHAIN_CMPL_TABLE_INDEX3_SB) + +#define TFC_MPC_TBL_EM_CHAIN_CMPL_V2_EB 0 +#define TFC_MPC_TBL_EM_CHAIN_CMPL_V2_SB 0 +#define TFC_MPC_TBL_EM_CHAIN_CMPL_V2_OFFS 0x18 + +#define TFC_MPC_TBL_EM_CHAIN_CMPL_SET_V2(buf, val) \ + SET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_EM_CHAIN_CMPL_V2_OFFS), \ + (u64)(val), \ + TFC_MPC_TBL_EM_CHAIN_CMPL_V2_EB, \ + TFC_MPC_TBL_EM_CHAIN_CMPL_V2_SB) +#define TFC_MPC_TBL_EM_CHAIN_CMPL_GET_V2(buf) \ + GET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_EM_CHAIN_CMPL_V2_OFFS), \ + TFC_MPC_TBL_EM_CHAIN_CMPL_V2_EB, \ + TFC_MPC_TBL_EM_CHAIN_CMPL_V2_SB) + +#define TFC_MPC_TBL_EM_CHAIN_CMPL_BKT_NUM_EB 39 +#define TFC_MPC_TBL_EM_CHAIN_CMPL_BKT_NUM_SB 32 +#define TFC_MPC_TBL_EM_CHAIN_CMPL_BKT_NUM_OFFS 0x18 + +#define TFC_MPC_TBL_EM_CHAIN_CMPL_SET_BKT_NUM(buf, val) \ + SET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_EM_CHAIN_CMPL_BKT_NUM_OFFS), \ + (u64)(val), \ + TFC_MPC_TBL_EM_CHAIN_CMPL_BKT_NUM_EB, \ + TFC_MPC_TBL_EM_CHAIN_CMPL_BKT_NUM_SB) +#define TFC_MPC_TBL_EM_CHAIN_CMPL_GET_BKT_NUM(buf) \ + GET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_EM_CHAIN_CMPL_BKT_NUM_OFFS), \ + TFC_MPC_TBL_EM_CHAIN_CMPL_BKT_NUM_EB, \ + TFC_MPC_TBL_EM_CHAIN_CMPL_BKT_NUM_SB) + +#define TFC_MPC_TBL_EM_CHAIN_CMPL_NUM_ENTRIES_EB 42 +#define TFC_MPC_TBL_EM_CHAIN_CMPL_NUM_ENTRIES_SB 40 +#define TFC_MPC_TBL_EM_CHAIN_CMPL_NUM_ENTRIES_OFFS 0x18 + +#define TFC_MPC_TBL_EM_CHAIN_CMPL_SET_NUM_ENTRIES(buf, val) \ + SET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_EM_CHAIN_CMPL_NUM_ENTRIES_OFFS), \ + (u64)(val), \ + TFC_MPC_TBL_EM_CHAIN_CMPL_NUM_ENTRIES_EB, \ + TFC_MPC_TBL_EM_CHAIN_CMPL_NUM_ENTRIES_SB) +#define TFC_MPC_TBL_EM_CHAIN_CMPL_GET_NUM_ENTRIES(buf) \ + GET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_EM_CHAIN_CMPL_NUM_ENTRIES_OFFS), \ + TFC_MPC_TBL_EM_CHAIN_CMPL_NUM_ENTRIES_EB, \ + TFC_MPC_TBL_EM_CHAIN_CMPL_NUM_ENTRIES_SB) + +#define TFC_MPC_TBL_EM_CHAIN_CMPL_CHAIN_UPD_EB 43 +#define TFC_MPC_TBL_EM_CHAIN_CMPL_CHAIN_UPD_SB 43 +#define TFC_MPC_TBL_EM_CHAIN_CMPL_CHAIN_UPD_OFFS 0x18 + +#define TFC_MPC_TBL_EM_CHAIN_CMPL_SET_CHAIN_UPD(buf, val) \ + SET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_EM_CHAIN_CMPL_CHAIN_UPD_OFFS), \ + (u64)(val), \ + TFC_MPC_TBL_EM_CHAIN_CMPL_CHAIN_UPD_EB, \ + TFC_MPC_TBL_EM_CHAIN_CMPL_CHAIN_UPD_SB) +#define TFC_MPC_TBL_EM_CHAIN_CMPL_GET_CHAIN_UPD(buf) \ + GET_BITFLD64(TO_P64((buf), TFC_MPC_TBL_EM_CHAIN_CMPL_CHAIN_UPD_OFFS), \ + TFC_MPC_TBL_EM_CHAIN_CMPL_CHAIN_UPD_EB, \ + TFC_MPC_TBL_EM_CHAIN_CMPL_CHAIN_UPD_SB) + +#define TFC_MPC_TBL_EM_CHAIN_CMPL_SIZE 32 + +#endif /* __CFA_P70_MPC_CMPLS_H__ */ diff --git a/drivers/thirdparty/release-drivers/bnxt/hcapi/cfa_v3/mpc/include/cfa_p70_mpc_common.h b/drivers/thirdparty/release-drivers/bnxt/hcapi/cfa_v3/mpc/include/cfa_p70_mpc_common.h new file mode 100644 index 000000000000..217e288befc0 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/hcapi/cfa_v3/mpc/include/cfa_p70_mpc_common.h @@ -0,0 +1,75 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019-2023 Broadcom + * All rights reserved. + */ +#ifndef __CFA_P70_MPC_COMMON_H__ +#define __CFA_P70_MPC_COMMON_H__ + +/* Convert a u8* ptr + offset to a u32* ptr */ +#define TO_P32(buf, offs) ((u32 *)((buf) + (offs))) +/* Convert a u8* ptr + offset to a u64* ptr */ +#define TO_P64(buf, offs) ((u64 *)((buf) + (offs))) + +static inline u32 MASK_32_W(u8 eb, u8 sb) +{ + return ((1UL << ((eb - sb) + 1)) - 1); +} + +static inline u32 MASK_32(u8 eb, u8 sb) +{ + return (((1UL << ((eb - sb) + 1)) - 1) << sb); +} + +static inline u32 GET_BITFLD32(u32 *fld, u8 eb, u8 sb) +{ + return ((*fld >> sb) & MASK_32_W(eb, sb)); +} + +static inline void SET_BITFLD32(u32 *fld, u32 val, u8 eb, u8 sb) +{ + *fld &= ~MASK_32(eb, sb); + *fld |= ((val << sb) & MASK_32(eb, sb)); +} + +static inline u32 GET_FLD32(u32 *fld) +{ + return *fld; +} + +static inline void SET_FLD32(u32 *fld, u32 val) +{ + *fld = val; +} + +static inline u64 MASK_64_W(u8 eb, u8 sb) +{ + return ((1ULL << ((eb - sb) + 1)) - 1); +} + +static inline u64 MASK_64(u8 eb, u8 sb) +{ +return (((1ULL << ((eb - sb) + 1)) - 1) << sb); +} + +static inline u64 GET_BITFLD64(u64 *fld, u8 eb, u8 sb) +{ + return ((*fld >> sb) & MASK_64_W(eb, sb)); +} + +static inline void SET_BITFLD64(u64 *fld, u64 val, u8 eb, u8 sb) +{ + *fld &= ~MASK_64(eb, sb); + *fld |= ((val << sb) & MASK_64(eb, sb)); +} + +static inline u64 GET_FLD64(u64 *fld) +{ + return *fld; +} + +static inline void SET_FLD64(u64 *fld, u64 val) +{ + *fld = val; +} + +#endif /* __CFA_P70_MPC_COMMON_H__ */ diff --git a/drivers/thirdparty/release-drivers/bnxt/hcapi/cfa_v3/mpc/include/cfa_p70_mpc_field_ids.h b/drivers/thirdparty/release-drivers/bnxt/hcapi/cfa_v3/mpc/include/cfa_p70_mpc_field_ids.h new file mode 100644 index 000000000000..363091b87767 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/hcapi/cfa_v3/mpc/include/cfa_p70_mpc_field_ids.h @@ -0,0 +1,1170 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019-2023 Broadcom + * All rights reserved. + * + * Date: 09/29/22 11:50:38 + * + * Note: This file is scripted generated by ./cfa_header_gen.py. + * DO NOT modify this file manually !!!! + * + */ + +#ifndef _CFA_P70_MPC_FIELD_IDS_H_ +#define _CFA_P70_MPC_FIELD_IDS_H_ + +/* clang-format off */ + +/** + * Field IDS for READ_CMD: This command reads 1-4 consecutive 32B words + * from the specified address within a table scope. + */ +enum cfa_p70_mpc_read_cmd_fields { + CFA_P70_MPC_READ_CMD_OPAQUE_FLD = 0, + /* This value selects the table type to be acted upon. */ + CFA_P70_MPC_READ_CMD_TABLE_TYPE_FLD = 1, + /* Table scope to access. */ + CFA_P70_MPC_READ_CMD_TABLE_SCOPE_FLD = 2, + /* + * Number of 32B units in access. If value is outside the range [1, 4], + * CFA aborts processing and reports FMT_ERR status. + */ + CFA_P70_MPC_READ_CMD_DATA_SIZE_FLD = 3, + /* + * Determines setting of OPTION field for all cache requests while + * processing any command other than EM_INSERT, EM_DELETE, or EM_CHAIN. + * For these latter commands, CACHE_OPTION sets the OPTION field for all + * read requests, and CACHE_OPTION2 sets it for all write requests. CFA + * does not support posted write requests. Therefore, for WRITE + * commands, CACHE_OPTION[1] must be set to 0. And for EM commands that + * send write requests (all but EM_SEARCH), CACHE_OPTION2[1] must be set + * to 0. + */ + CFA_P70_MPC_READ_CMD_CACHE_OPTION_FLD = 4, + /* + * A 32B index into the table identified by (TABLE_TYPE, TABLE_SCOPE): + */ + CFA_P70_MPC_READ_CMD_TABLE_INDEX_FLD = 5, + /* + * The 64-bit host address to which to write the DMA data returned in + * the completion. The data will be written to the same function as the + * one that owns the SQ this command is read from. DATA_SIZE determines + * the maximum size of the data written. If HOST_ADDRESS[1:0] is not 0, + * CFA aborts processing and reports FMT_ERR status. + */ + CFA_P70_MPC_READ_CMD_HOST_ADDRESS_FLD = 6, + CFA_P70_MPC_READ_CMD_MAX_FLD = 7, +}; + +/** + * Field IDS for WRITE_CMD: This command writes 1-4 consecutive 32B + * words to the specified address within a table scope. + */ +enum cfa_p70_mpc_write_cmd_fields { + CFA_P70_MPC_WRITE_CMD_OPAQUE_FLD = 0, + /* This value selects the table type to be acted upon. */ + CFA_P70_MPC_WRITE_CMD_TABLE_TYPE_FLD = 1, + /* + * Sets the OPTION field on the cache interface to use write-through for + * EM entry writes while processing EM_INSERT commands. For all other + * cases (inluding EM_INSERT bucket writes), the OPTION field is set by + * the CACHE_OPTION and CACHE_OPTION2 fields. + */ + CFA_P70_MPC_WRITE_CMD_WRITE_THROUGH_FLD = 2, + /* Table scope to access. */ + CFA_P70_MPC_WRITE_CMD_TABLE_SCOPE_FLD = 3, + /* + * Number of 32B units in access. If value is outside the range [1, 4], + * CFA aborts processing and reports FMT_ERR status. + */ + CFA_P70_MPC_WRITE_CMD_DATA_SIZE_FLD = 4, + /* + * Determines setting of OPTION field for all cache requests while + * processing any command other than EM_INSERT, EM_DELETE, or EM_CHAIN. + * For these latter commands, CACHE_OPTION sets the OPTION field for all + * read requests, and CACHE_OPTION2 sets it for all write requests. CFA + * does not support posted write requests. Therefore, for WRITE + * commands, CACHE_OPTION[1] must be set to 0. And for EM commands that + * send write requests (all but EM_SEARCH), CACHE_OPTION2[1] must be set + * to 0. + */ + CFA_P70_MPC_WRITE_CMD_CACHE_OPTION_FLD = 5, + /* + * A 32B index into the table identified by (TABLE_TYPE, TABLE_SCOPE): + */ + CFA_P70_MPC_WRITE_CMD_TABLE_INDEX_FLD = 6, + CFA_P70_MPC_WRITE_CMD_MAX_FLD = 7, +}; + +/** + * Field IDS for READ_CLR_CMD: This command performs a read-modify-write + * to the specified 32B address using a 16b mask that specifies up to 16 + * 16b words to clear before writing the data back. It returns the 32B + * data word read from cache (not the value written after the clear + * operation). + */ +enum cfa_p70_mpc_read_clr_cmd_fields { + CFA_P70_MPC_READ_CLR_CMD_OPAQUE_FLD = 0, + /* This value selects the table type to be acted upon. */ + CFA_P70_MPC_READ_CLR_CMD_TABLE_TYPE_FLD = 1, + /* Table scope to access. */ + CFA_P70_MPC_READ_CLR_CMD_TABLE_SCOPE_FLD = 2, + /* + * This field is no longer used. The READ_CLR command always reads (and + * does a mask-clear) on a single cache line. This field was added for + * SR2 A0 to avoid an ADDR_ERR when TABLE_INDEX=0 and TABLE_TYPE=EM (see + * CUMULUS-17872). That issue was fixed in SR2 B0. + */ + CFA_P70_MPC_READ_CLR_CMD_DATA_SIZE_FLD = 3, + /* + * Determines setting of OPTION field for all cache requests while + * processing any command other than EM_INSERT, EM_DELETE, or EM_CHAIN. + * For these latter commands, CACHE_OPTION sets the OPTION field for all + * read requests, and CACHE_OPTION2 sets it for all write requests. CFA + * does not support posted write requests. Therefore, for WRITE + * commands, CACHE_OPTION[1] must be set to 0. And for EM commands that + * send write requests (all but EM_SEARCH), CACHE_OPTION2[1] must be set + * to 0. + */ + CFA_P70_MPC_READ_CLR_CMD_CACHE_OPTION_FLD = 4, + /* + * A 32B index into the table identified by (TABLE_TYPE, TABLE_SCOPE): + */ + CFA_P70_MPC_READ_CLR_CMD_TABLE_INDEX_FLD = 5, + /* + * The 64-bit host address to which to write the DMA data returned in + * the completion. The data will be written to the same function as the + * one that owns the SQ this command is read from. DATA_SIZE determines + * the maximum size of the data written. If HOST_ADDRESS[1:0] is not 0, + * CFA aborts processing and reports FMT_ERR status. + */ + CFA_P70_MPC_READ_CLR_CMD_HOST_ADDRESS_FLD = 6, + /* + * Specifies bits in 32B data word to clear. For x=0..15, when + * clear_mask[x]=1, data[x*16+15:x*16] is set to 0. + */ + CFA_P70_MPC_READ_CLR_CMD_CLEAR_MASK_FLD = 7, + CFA_P70_MPC_READ_CLR_CMD_MAX_FLD = 8, +}; + +/** + * Field IDS for INVALIDATE_CMD: This command forces an explicit evict + * of 1-4 consecutive cache lines such that the next time the structure + * is used it will be re-read from its backing store location. + */ +enum cfa_p70_mpc_invalidate_cmd_fields { + CFA_P70_MPC_INVALIDATE_CMD_OPAQUE_FLD = 0, + /* This value selects the table type to be acted upon. */ + CFA_P70_MPC_INVALIDATE_CMD_TABLE_TYPE_FLD = 1, + /* Table scope to access. */ + CFA_P70_MPC_INVALIDATE_CMD_TABLE_SCOPE_FLD = 2, + /* + * This value identifies the number of cache lines to invalidate. A + * FMT_ERR is reported if the value is not in the range of [1, 4]. + */ + CFA_P70_MPC_INVALIDATE_CMD_DATA_SIZE_FLD = 3, + /* + * Determines setting of OPTION field for all cache requests while + * processing any command other than EM_INSERT, EM_DELETE, or EM_CHAIN. + * For these latter commands, CACHE_OPTION sets the OPTION field for all + * read requests, and CACHE_OPTION2 sets it for all write requests. CFA + * does not support posted write requests. Therefore, for WRITE + * commands, CACHE_OPTION[1] must be set to 0. And for EM commands that + * send write requests (all but EM_SEARCH), CACHE_OPTION2[1] must be set + * to 0. + */ + CFA_P70_MPC_INVALIDATE_CMD_CACHE_OPTION_FLD = 4, + /* + * A 32B index into the table identified by (TABLE_TYPE, TABLE_SCOPE): + */ + CFA_P70_MPC_INVALIDATE_CMD_TABLE_INDEX_FLD = 5, + CFA_P70_MPC_INVALIDATE_CMD_MAX_FLD = 6, +}; + +/** + * Field IDS for EM_SEARCH_CMD: This command supplies an exact match + * entry of 1-4 32B words to search for in the exact match table. CFA + * first computes the hash value of the key in the entry, and determines + * the static bucket address to search from the hash and the + * (EM_BUCKETS, EM_SIZE) for TABLE_SCOPE. It then searches that static + * bucket chain for an entry with a matching key (the LREC in the + * command entry is ignored). If a matching entry is found, CFA reports + * OK status in the completion. Otherwise, assuming no errors abort the + * search before it completes, it reports EM_MISS status. + */ +enum cfa_p70_mpc_em_search_cmd_fields { + CFA_P70_MPC_EM_SEARCH_CMD_OPAQUE_FLD = 0, + /* Table scope to access. */ + CFA_P70_MPC_EM_SEARCH_CMD_TABLE_SCOPE_FLD = 1, + /* + * Number of 32B units in access. If value is outside the range [1, 4], + * CFA aborts processing and reports FMT_ERR status. + */ + CFA_P70_MPC_EM_SEARCH_CMD_DATA_SIZE_FLD = 2, + /* + * Determines setting of OPTION field for all cache requests while + * processing any command other than EM_INSERT, EM_DELETE, or EM_CHAIN. + * For these latter commands, CACHE_OPTION sets the OPTION field for all + * read requests, and CACHE_OPTION2 sets it for all write requests. CFA + * does not support posted write requests. Therefore, for WRITE + * commands, CACHE_OPTION[1] must be set to 0. And for EM commands that + * send write requests (all but EM_SEARCH), CACHE_OPTION2[1] must be set + * to 0. + */ + CFA_P70_MPC_EM_SEARCH_CMD_CACHE_OPTION_FLD = 3, + CFA_P70_MPC_EM_SEARCH_CMD_MAX_FLD = 4, +}; + +/** + * Field IDS for EM_INSERT_CMD: This command supplies an exact match + * entry of 1-4 32B words to insert in the exact match table. CFA first + * computes the hash value of the key in the entry, and determines the + * static bucket address to search from the hash and the (EM_BUCKETS, + * EM_SIZE) for TABLE_SCOPE. It then writes the 1-4 32B words of the + * exact match entry starting at the TABLE_INDEX location in the + * command. When the entry write completes, it searches the static + * bucket chain for an existing entry with a key matching the key in the + * insert entry (the LREC does not need to match). If a matching entry + * is found: * If REPLACE=0, the CFA aborts the insert and returns + * EM_DUPLICATE status. * If REPLACE=1, the CFA overwrites the matching + * entry with the new entry. REPLACED_ENTRY=1 in the completion in this + * case to signal that an entry was replaced. The location of the entry + * is provided in the completion. If no match is found, CFA adds the new + * entry to the lowest unused entry in the tail bucket. If the current + * tail bucket is full, this requires adding a new bucket to the tail. + * Then entry is then inserted at entry number 0. TABLE_INDEX2 provides + * the address of the new tail bucket, if needed. If set to 0, the + * insert is aborted and returns EM_ABORT status instead of adding a new + * bucket to the tail. CHAIN_UPD in the completion indicates whether a + * new bucket was added (1) or not (0). For locked scopes, if the read + * of the static bucket gives a locked scope miss error, indicating that + * the address is not in the cache, the static bucket is assumed empty. + * In this case, TAI creates a new bucket, setting entry 0 to the new + * entry fields and initializing all other fields to 0. It writes this + * new bucket to the static bucket address, which installs it in the + * cache. + */ +enum cfa_p70_mpc_em_insert_cmd_fields { + CFA_P70_MPC_EM_INSERT_CMD_OPAQUE_FLD = 0, + /* + * Sets the OPTION field on the cache interface to use write-through for + * EM entry writes while processing EM_INSERT commands. For all other + * cases (inluding EM_INSERT bucket writes), the OPTION field is set by + * the CACHE_OPTION and CACHE_OPTION2 fields. + */ + CFA_P70_MPC_EM_INSERT_CMD_WRITE_THROUGH_FLD = 1, + /* Table scope to access. */ + CFA_P70_MPC_EM_INSERT_CMD_TABLE_SCOPE_FLD = 2, + /* + * Number of 32B units in access. If value is outside the range [1, 4], + * CFA aborts processing and reports FMT_ERR status. + */ + CFA_P70_MPC_EM_INSERT_CMD_DATA_SIZE_FLD = 3, + /* + * Determines setting of OPTION field for all cache requests while + * processing any command other than EM_INSERT, EM_DELETE, or EM_CHAIN. + * For these latter commands, CACHE_OPTION sets the OPTION field for all + * read requests, and CACHE_OPTION2 sets it for all write requests. CFA + * does not support posted write requests. Therefore, for WRITE + * commands, CACHE_OPTION[1] must be set to 0. And for EM commands that + * send write requests (all but EM_SEARCH), CACHE_OPTION2[1] must be set + * to 0. + */ + CFA_P70_MPC_EM_INSERT_CMD_CACHE_OPTION_FLD = 4, + /* + * A 32B index into the EM table identified by TABLE_SCOPE. Starting + * address to write exact match entry being inserted. + */ + CFA_P70_MPC_EM_INSERT_CMD_TABLE_INDEX_FLD = 5, + /* + * Determines setting of OPTION field for all cache write requests for + * EM_INSERT, EM_DELETE, and EM_CHAIN commands. CFA does not support + * posted write requests. Therefore, CACHE_OPTION2[1] must be set to 0. + */ + CFA_P70_MPC_EM_INSERT_CMD_CACHE_OPTION2_FLD = 6, + /* + * A 32B index into the EM table identified by TABLE_SCOPE. Only used + * when no duplicate entry is found and the tail bucket in the chain + * searched has no unused entries. In this case, TABLE_INDEX2 provides + * the index to the 32B dynamic bucket to add to the tail of the chain + * (it is the new tail bucket). In this case, the CFA first writes + * TABLE_INDEX2 with a new bucket: * Entry 0 of the bucket sets the + * HASH_MSBS computed from the hash and ENTRY_PTR to TABLE_INDEX. * + * Entries 1-5 of the bucket set HASH_MSBS and ENTRY_PTR to 0. * CHAIN=0 + * and CHAIN_PTR is set to CHAIN_PTR from to original tail bucket to + * maintain the background chaining. CFA then sets CHAIN=1 and + * CHAIN_PTR=TABLE_INDEX2 in the original tail bucket to link the new + * bucket to the chain. CHAIN_UPD=1 in the completion to signal that the + * new bucket at TABLE_INDEX2 was added to the tail of the chain. + */ + CFA_P70_MPC_EM_INSERT_CMD_TABLE_INDEX2_FLD = 7, + /* + * Only used if an entry is found whose key matches the exact match + * entry key in the command: * REPLACE=0: The insert is aborted and + * EM_DUPLICATE status is returned, signaling that the insert failed. + * The index of the matching entry that blocked the insertion is + * returned in the completion. * REPLACE=1: The matching entry is + * replaced with that from the command (ENTRY_PTR in the bucket is + * overwritten with TABLE_INDEX from the command). HASH_MSBS for the + * entry number never changes in this case since it had to match the new + * entry key HASH_MSBS to match. When an entry is replaced, + * REPLACED_ENTRY=1 in the completion and the index of the matching + * entry is returned in the completion so that software can de-allocate + * the entry. + */ + CFA_P70_MPC_EM_INSERT_CMD_REPLACE_FLD = 8, + CFA_P70_MPC_EM_INSERT_CMD_MAX_FLD = 9, +}; + +/** + * Field IDS for EM_DELETE_CMD: This command searches for an exact match + * entry index in the static bucket chain and deletes it if found. + * TABLE_INDEX give the entry index to delete and TABLE_INDEX2 gives the + * static bucket index. If a matching entry is found: * If the matching + * entry is the last valid entry in the tail bucket, its entry fields + * (HASH_MSBS and ENTRY_PTR) are set to 0 to delete the entry. * If the + * matching entry is not the last valid entry in the tail bucket, the + * entry fields from that last entry are moved to the matching entry, + * and the fields of that last entry are set to 0. * If any of the + * previous processing results in the tail bucket not having any valid + * entries, the tail bucket is the static bucket, the scope is a locked + * scope, and CHAIN_PTR=0, hardware evicts the static bucket from the + * cache and the completion signals this case with CHAIN_UPD=1. * If any + * of the previous processing results in the tail bucket not having any + * valid entries, and the tail bucket is not the static bucket, the tail + * bucket is removed from the chain. In this case, the penultimate + * bucket in the chain becomes the tail bucket. It has CHAIN set to 0 to + * unlink the tail bucket, and CHAIN_PTR set to that from the original + * tail bucket to preserve background chaining. The completion signals + * this case with CHAIN_UPD=1 and returns the index to the bucket + * removed so that software can de-allocate it. CFA returns OK status if + * the entry was successfully deleted. Otherwise, it returns EM_MISS + * status assuming there were no errors that caused processing to be + * aborted. + */ +enum cfa_p70_mpc_em_delete_cmd_fields { + CFA_P70_MPC_EM_DELETE_CMD_OPAQUE_FLD = 0, + /* + * Sets the OPTION field on the cache interface to use write-through for + * EM entry writes while processing EM_INSERT commands. For all other + * cases (inluding EM_INSERT bucket writes), the OPTION field is set by + * the CACHE_OPTION and CACHE_OPTION2 fields. + */ + CFA_P70_MPC_EM_DELETE_CMD_WRITE_THROUGH_FLD = 1, + /* Table scope to access. */ + CFA_P70_MPC_EM_DELETE_CMD_TABLE_SCOPE_FLD = 2, + /* + * Determines setting of OPTION field for all cache requests while + * processing any command other than EM_INSERT, EM_DELETE, or EM_CHAIN. + * For these latter commands, CACHE_OPTION sets the OPTION field for all + * read requests, and CACHE_OPTION2 sets it for all write requests. CFA + * does not support posted write requests. Therefore, for WRITE + * commands, CACHE_OPTION[1] must be set to 0. And for EM commands that + * send write requests (all but EM_SEARCH), CACHE_OPTION2[1] must be set + * to 0. + */ + CFA_P70_MPC_EM_DELETE_CMD_CACHE_OPTION_FLD = 3, + /* + * A 32B index into the EM table identified by TABLE_SCOPE. Entry index + * to delete. + */ + CFA_P70_MPC_EM_DELETE_CMD_TABLE_INDEX_FLD = 4, + /* + * Determines setting of OPTION field for all cache write requests for + * EM_INSERT, EM_DELETE, and EM_CHAIN commands. CFA does not support + * posted write requests. Therefore, CACHE_OPTION2[1] must be set to 0. + */ + CFA_P70_MPC_EM_DELETE_CMD_CACHE_OPTION2_FLD = 5, + /* + * A 32B index into the EM table identified by TABLE_SCOPE. Static + * bucket address for bucket chain. + */ + CFA_P70_MPC_EM_DELETE_CMD_TABLE_INDEX2_FLD = 6, + CFA_P70_MPC_EM_DELETE_CMD_MAX_FLD = 7, +}; + +/** + * Field IDS for EM_CHAIN_CMD: This command updates CHAIN_PTR in the + * tail bucket of a static bucket chain, supplying both the static + * bucket and the new CHAIN_PTR value. TABLE_INDEX is the new CHAIN_PTR + * value and TABLE_INDEX2[23:0] is the static bucket. This command + * provides software a means to update background chaining coherently + * with other bucket updates. The value of CHAIN is unaffected (stays at + * 0). For locked scopes, if the static bucket is the tail bucket, it is + * empty (all of its ENTRY_PTR values are 0), and TABLE_INDEX=0 (the + * CHAIN_PTR is being set to 0), instead of updating the static bucket + * it is evicted from the cache. In this case, CHAIN_UPD=1 in the + * completion. + */ +enum cfa_p70_mpc_em_chain_cmd_fields { + CFA_P70_MPC_EM_CHAIN_CMD_OPAQUE_FLD = 0, + /* + * Sets the OPTION field on the cache interface to use write-through for + * EM entry writes while processing EM_INSERT commands. For all other + * cases (inluding EM_INSERT bucket writes), the OPTION field is set by + * the CACHE_OPTION and CACHE_OPTION2 fields. + */ + CFA_P70_MPC_EM_CHAIN_CMD_WRITE_THROUGH_FLD = 1, + /* Table scope to access. */ + CFA_P70_MPC_EM_CHAIN_CMD_TABLE_SCOPE_FLD = 2, + /* + * Determines setting of OPTION field for all cache requests while + * processing any command other than EM_INSERT, EM_DELETE, or EM_CHAIN. + * For these latter commands, CACHE_OPTION sets the OPTION field for all + * read requests, and CACHE_OPTION2 sets it for all write requests. CFA + * does not support posted write requests. Therefore, for WRITE + * commands, CACHE_OPTION[1] must be set to 0. And for EM commands that + * send write requests (all but EM_SEARCH), CACHE_OPTION2[1] must be set + * to 0. + */ + CFA_P70_MPC_EM_CHAIN_CMD_CACHE_OPTION_FLD = 3, + /* + * A 32B index into the EM table identified by TABLE_SCOPE. New + * CHAIN_PTR to write to tail bucket. + */ + CFA_P70_MPC_EM_CHAIN_CMD_TABLE_INDEX_FLD = 4, + /* + * Determines setting of OPTION field for all cache write requests for + * EM_INSERT, EM_DELETE, and EM_CHAIN commands. CFA does not support + * posted write requests. Therefore, CACHE_OPTION2[1] must be set to 0. + */ + CFA_P70_MPC_EM_CHAIN_CMD_CACHE_OPTION2_FLD = 5, + /* + * A 32B index into the EM table identified by TABLE_SCOPE. Static + * bucket address for bucket chain. + */ + CFA_P70_MPC_EM_CHAIN_CMD_TABLE_INDEX2_FLD = 6, + CFA_P70_MPC_EM_CHAIN_CMD_MAX_FLD = 7, +}; + +/** + * Field IDS for READ_CMP: When no errors, teturns 1-4 consecutive 32B + * words from the TABLE_INDEX within the TABLE_SCOPE specified in the + * command, writing them to HOST_ADDRESS from the command. + */ +enum cfa_p70_mpc_read_cmp_fields { + /* + * This field indicates the exact type of the completion. By convention, + * the LSB identifies the length of the record in 16B units. Even values + * indicate 16B records. Odd values indicate 32B records **(EXCEPT + * no_op!!!!)** . + */ + CFA_P70_MPC_READ_CMP_TYPE_FLD = 0, + /* The command processing status. */ + CFA_P70_MPC_READ_CMP_STATUS_FLD = 1, + /* + * This field represents the Mid-Path client that generated the + * completion. + */ + CFA_P70_MPC_READ_CMP_MP_CLIENT_FLD = 2, + /* OPCODE from the command. */ + CFA_P70_MPC_READ_CMP_OPCODE_FLD = 3, + /* + * The length of the DMA that accompanies the completion in units of + * DWORDs (32b). Valid values are [0, 128]. A value of zero indicates + * that there is no DMA that accompanies the completion. + */ + CFA_P70_MPC_READ_CMP_DMA_LENGTH_FLD = 4, + /* + * This is a copy of the opaque field from the mid path BD of this + * command. + */ + CFA_P70_MPC_READ_CMP_OPAQUE_FLD = 5, + /* + * This value is written by the NIC such that it will be different for + * each pass through the completion queue. The even passes will write 1. + * The odd passes will write 0. + */ + CFA_P70_MPC_READ_CMP_V_FLD = 6, + /* + * For EM_SEARCH and EM_INSERT commands without errors that abort the + * command processing prior to the hash computation, set to HASH[35:24] + * of the hash computed from the exact match entry key in the command. + * For all other cases, set to 0 except for the following error + * conditions, which carry debug information in this field as shown by + * error status below: * FMT_ERR: - Set to {7'd0, HOST_ADDRESS[1:0], + * DATA_SIZE[2:0]}. - If HOST_ADDRESS or DATA_SIZE field not present + * they are set to 0. * SCOPE_ERR: - Set to {1'b0, SVIF[10:0]}. * + * ADDR_ERR: - Only possible when TABLE_TYPE=EM or for EM* commands - + * Set to {1'b0, TABLE_INDEX[2:0], 5'd0, DATA_SIZE[2:0]} - + * TABLE_INDEX[2]=1 if TABLE_INDEX3 had an error - TABLE_INDEX[1]=1 if + * TABLE_INDEX2 had an error - TABLE_INDEX[0]=1 if TABLE_INDEX had an + * error - TABLE_INDEX[n]=0 if the completion does not have the + * corresponding TABLE_INDEX field above. * CACHE_ERR: - Set to {9'd0, + * DATA_SIZE[2:0]} + */ + CFA_P70_MPC_READ_CMP_HASH_MSB_FLD = 7, + /* TABLE_TYPE from the command. */ + CFA_P70_MPC_READ_CMP_TABLE_TYPE_FLD = 8, + /* TABLE_SCOPE from the command. */ + CFA_P70_MPC_READ_CMP_TABLE_SCOPE_FLD = 9, + /* TABLE_INDEX from the command. */ + CFA_P70_MPC_READ_CMP_TABLE_INDEX_FLD = 10, + CFA_P70_MPC_READ_CMP_MAX_FLD = 11, +}; + +/** + * Field IDS for WRITE_CMP: Returns status of the write of 1-4 + * consecutive 32B words starting at TABLE_INDEX in the table specified + * by (TABLE_TYPE, TABLE_SCOPE). + */ +enum cfa_p70_mpc_write_cmp_fields { + /* + * This field indicates the exact type of the completion. By convention, + * the LSB identifies the length of the record in 16B units. Even values + * indicate 16B records. Odd values indicate 32B records **(EXCEPT + * no_op!!!!)** . + */ + CFA_P70_MPC_WRITE_CMP_TYPE_FLD = 0, + /* The command processing status. */ + CFA_P70_MPC_WRITE_CMP_STATUS_FLD = 1, + /* + * This field represents the Mid-Path client that generated the + * completion. + */ + CFA_P70_MPC_WRITE_CMP_MP_CLIENT_FLD = 2, + /* OPCODE from the command. */ + CFA_P70_MPC_WRITE_CMP_OPCODE_FLD = 3, + /* + * This is a copy of the opaque field from the mid path BD of this + * command. + */ + CFA_P70_MPC_WRITE_CMP_OPAQUE_FLD = 4, + /* + * This value is written by the NIC such that it will be different for + * each pass through the completion queue. The even passes will write 1. + * The odd passes will write 0. + */ + CFA_P70_MPC_WRITE_CMP_V_FLD = 5, + /* + * For EM_SEARCH and EM_INSERT commands without errors that abort the + * command processing prior to the hash computation, set to HASH[35:24] + * of the hash computed from the exact match entry key in the command. + * For all other cases, set to 0 except for the following error + * conditions, which carry debug information in this field as shown by + * error status below: * FMT_ERR: - Set to {7'd0, HOST_ADDRESS[1:0], + * DATA_SIZE[2:0]}. - If HOST_ADDRESS or DATA_SIZE field not present + * they are set to 0. * SCOPE_ERR: - Set to {1'b0, SVIF[10:0]}. * + * ADDR_ERR: - Only possible when TABLE_TYPE=EM or for EM* commands - + * Set to {1'b0, TABLE_INDEX[2:0], 5'd0, DATA_SIZE[2:0]} - + * TABLE_INDEX[2]=1 if TABLE_INDEX3 had an error - TABLE_INDEX[1]=1 if + * TABLE_INDEX2 had an error - TABLE_INDEX[0]=1 if TABLE_INDEX had an + * error - TABLE_INDEX[n]=0 if the completion does not have the + * corresponding TABLE_INDEX field above. * CACHE_ERR: - Set to {9'd0, + * DATA_SIZE[2:0]} + */ + CFA_P70_MPC_WRITE_CMP_HASH_MSB_FLD = 6, + /* TABLE_TYPE from the command. */ + CFA_P70_MPC_WRITE_CMP_TABLE_TYPE_FLD = 7, + /* TABLE_SCOPE from the command. */ + CFA_P70_MPC_WRITE_CMP_TABLE_SCOPE_FLD = 8, + /* TABLE_INDEX from the command. */ + CFA_P70_MPC_WRITE_CMP_TABLE_INDEX_FLD = 9, + CFA_P70_MPC_WRITE_CMP_MAX_FLD = 10, +}; + +/** + * Field IDS for READ_CLR_CMP: When no errors, returns 1 32B word from + * TABLE_INDEX in the table specified by (TABLE_TYPE, TABLE_SCOPE). The + * data returned is the value prior to the clear. + */ +enum cfa_p70_mpc_read_clr_cmp_fields { + /* + * This field indicates the exact type of the completion. By convention, + * the LSB identifies the length of the record in 16B units. Even values + * indicate 16B records. Odd values indicate 32B records **(EXCEPT + * no_op!!!!)** . + */ + CFA_P70_MPC_READ_CLR_CMP_TYPE_FLD = 0, + /* The command processing status. */ + CFA_P70_MPC_READ_CLR_CMP_STATUS_FLD = 1, + /* + * This field represents the Mid-Path client that generated the + * completion. + */ + CFA_P70_MPC_READ_CLR_CMP_MP_CLIENT_FLD = 2, + /* OPCODE from the command. */ + CFA_P70_MPC_READ_CLR_CMP_OPCODE_FLD = 3, + /* + * The length of the DMA that accompanies the completion in units of + * DWORDs (32b). Valid values are [0, 128]. A value of zero indicates + * that there is no DMA that accompanies the completion. + */ + CFA_P70_MPC_READ_CLR_CMP_DMA_LENGTH_FLD = 4, + /* + * This is a copy of the opaque field from the mid path BD of this + * command. + */ + CFA_P70_MPC_READ_CLR_CMP_OPAQUE_FLD = 5, + /* + * This value is written by the NIC such that it will be different for + * each pass through the completion queue. The even passes will write 1. + * The odd passes will write 0. + */ + CFA_P70_MPC_READ_CLR_CMP_V_FLD = 6, + /* + * For EM_SEARCH and EM_INSERT commands without errors that abort the + * command processing prior to the hash computation, set to HASH[35:24] + * of the hash computed from the exact match entry key in the command. + * For all other cases, set to 0 except for the following error + * conditions, which carry debug information in this field as shown by + * error status below: * FMT_ERR: - Set to {7'd0, HOST_ADDRESS[1:0], + * DATA_SIZE[2:0]}. - If HOST_ADDRESS or DATA_SIZE field not present + * they are set to 0. * SCOPE_ERR: - Set to {1'b0, SVIF[10:0]}. * + * ADDR_ERR: - Only possible when TABLE_TYPE=EM or for EM* commands - + * Set to {1'b0, TABLE_INDEX[2:0], 5'd0, DATA_SIZE[2:0]} - + * TABLE_INDEX[2]=1 if TABLE_INDEX3 had an error - TABLE_INDEX[1]=1 if + * TABLE_INDEX2 had an error - TABLE_INDEX[0]=1 if TABLE_INDEX had an + * error - TABLE_INDEX[n]=0 if the completion does not have the + * corresponding TABLE_INDEX field above. * CACHE_ERR: - Set to {9'd0, + * DATA_SIZE[2:0]} + */ + CFA_P70_MPC_READ_CLR_CMP_HASH_MSB_FLD = 7, + /* TABLE_TYPE from the command. */ + CFA_P70_MPC_READ_CLR_CMP_TABLE_TYPE_FLD = 8, + /* TABLE_SCOPE from the command. */ + CFA_P70_MPC_READ_CLR_CMP_TABLE_SCOPE_FLD = 9, + /* TABLE_INDEX from the command. */ + CFA_P70_MPC_READ_CLR_CMP_TABLE_INDEX_FLD = 10, + CFA_P70_MPC_READ_CLR_CMP_MAX_FLD = 11, +}; + +/** + * Field IDS for INVALIDATE_CMP: Returns status for INVALIDATE commands. + */ +enum cfa_p70_mpc_invalidate_cmp_fields { + /* + * This field indicates the exact type of the completion. By convention, + * the LSB identifies the length of the record in 16B units. Even values + * indicate 16B records. Odd values indicate 32B records **(EXCEPT + * no_op!!!!)** . + */ + CFA_P70_MPC_INVALIDATE_CMP_TYPE_FLD = 0, + /* The command processing status. */ + CFA_P70_MPC_INVALIDATE_CMP_STATUS_FLD = 1, + /* + * This field represents the Mid-Path client that generated the + * completion. + */ + CFA_P70_MPC_INVALIDATE_CMP_MP_CLIENT_FLD = 2, + /* OPCODE from the command. */ + CFA_P70_MPC_INVALIDATE_CMP_OPCODE_FLD = 3, + /* + * This is a copy of the opaque field from the mid path BD of this + * command. + */ + CFA_P70_MPC_INVALIDATE_CMP_OPAQUE_FLD = 4, + /* + * This value is written by the NIC such that it will be different for + * each pass through the completion queue. The even passes will write 1. + * The odd passes will write 0. + */ + CFA_P70_MPC_INVALIDATE_CMP_V_FLD = 5, + /* + * For EM_SEARCH and EM_INSERT commands without errors that abort the + * command processing prior to the hash computation, set to HASH[35:24] + * of the hash computed from the exact match entry key in the command. + * For all other cases, set to 0 except for the following error + * conditions, which carry debug information in this field as shown by + * error status below: * FMT_ERR: - Set to {7'd0, HOST_ADDRESS[1:0], + * DATA_SIZE[2:0]}. - If HOST_ADDRESS or DATA_SIZE field not present + * they are set to 0. * SCOPE_ERR: - Set to {1'b0, SVIF[10:0]}. * + * ADDR_ERR: - Only possible when TABLE_TYPE=EM or for EM* commands - + * Set to {1'b0, TABLE_INDEX[2:0], 5'd0, DATA_SIZE[2:0]} - + * TABLE_INDEX[2]=1 if TABLE_INDEX3 had an error - TABLE_INDEX[1]=1 if + * TABLE_INDEX2 had an error - TABLE_INDEX[0]=1 if TABLE_INDEX had an + * error - TABLE_INDEX[n]=0 if the completion does not have the + * corresponding TABLE_INDEX field above. * CACHE_ERR: - Set to {9'd0, + * DATA_SIZE[2:0]} + */ + CFA_P70_MPC_INVALIDATE_CMP_HASH_MSB_FLD = 6, + /* TABLE_TYPE from the command. */ + CFA_P70_MPC_INVALIDATE_CMP_TABLE_TYPE_FLD = 7, + /* TABLE_SCOPE from the command. */ + CFA_P70_MPC_INVALIDATE_CMP_TABLE_SCOPE_FLD = 8, + /* TABLE_INDEX from the command. */ + CFA_P70_MPC_INVALIDATE_CMP_TABLE_INDEX_FLD = 9, + CFA_P70_MPC_INVALIDATE_CMP_MAX_FLD = 10, +}; + +/** + * Field IDS for EM_SEARCH_CMP: For OK status, returns the index of the + * matching entry found for the EM key supplied in the command. Returns + * EM_MISS status if no match was found. + */ +enum cfa_p70_mpc_em_search_cmp_fields { + /* + * This field indicates the exact type of the completion. By convention, + * the LSB identifies the length of the record in 16B units. Even values + * indicate 16B records. Odd values indicate 32B records **(EXCEPT + * no_op!!!!)** . + */ + CFA_P70_MPC_EM_SEARCH_CMP_TYPE_FLD = 0, + /* The command processing status. */ + CFA_P70_MPC_EM_SEARCH_CMP_STATUS_FLD = 1, + /* + * This field represents the Mid-Path client that generated the + * completion. + */ + CFA_P70_MPC_EM_SEARCH_CMP_MP_CLIENT_FLD = 2, + /* OPCODE from the command. */ + CFA_P70_MPC_EM_SEARCH_CMP_OPCODE_FLD = 3, + /* + * This is a copy of the opaque field from the mid path BD of this + * command. + */ + CFA_P70_MPC_EM_SEARCH_CMP_OPAQUE_FLD = 4, + /* + * This value is written by the NIC such that it will be different for + * each pass through the completion queue. The even passes will write 1. + * The odd passes will write 0. + */ + CFA_P70_MPC_EM_SEARCH_CMP_V1_FLD = 5, + /* + * For EM_SEARCH and EM_INSERT commands without errors that abort the + * command processing prior to the hash computation, set to HASH[35:24] + * of the hash computed from the exact match entry key in the command. + * For all other cases, set to 0 except for the following error + * conditions, which carry debug information in this field as shown by + * error status below: * FMT_ERR: - Set to {7'd0, HOST_ADDRESS[1:0], + * DATA_SIZE[2:0]}. - If HOST_ADDRESS or DATA_SIZE field not present + * they are set to 0. * SCOPE_ERR: - Set to {1'b0, SVIF[10:0]}. * + * ADDR_ERR: - Only possible when TABLE_TYPE=EM or for EM* commands - + * Set to {1'b0, TABLE_INDEX[2:0], 5'd0, DATA_SIZE[2:0]} - + * TABLE_INDEX[2]=1 if TABLE_INDEX3 had an error - TABLE_INDEX[1]=1 if + * TABLE_INDEX2 had an error - TABLE_INDEX[0]=1 if TABLE_INDEX had an + * error - TABLE_INDEX[n]=0 if the completion does not have the + * corresponding TABLE_INDEX field above. * CACHE_ERR: - Set to {9'd0, + * DATA_SIZE[2:0]} + */ + CFA_P70_MPC_EM_SEARCH_CMP_HASH_MSB_FLD = 6, + /* TABLE_SCOPE from the command. */ + CFA_P70_MPC_EM_SEARCH_CMP_TABLE_SCOPE_FLD = 7, + /* + * A 32B index into the EM table identified by TABLE_SCOPE. For OK + * status, gives ENTRY_PTR[25:0] of the matching entry found. Otherwise, + * set to 0. + */ + CFA_P70_MPC_EM_SEARCH_CMP_TABLE_INDEX_FLD = 8, + /* + * A 32B index into the EM table identified by TABLE_SCOPE. If the hash + * is computed (no errors during initial processing of the command), + * TABLE_INDEX2[23:0] is the static bucket address determined from the + * hash of the exact match entry key in the command and the (EM_SIZE, + * EM_BUCKETS) configuration for TABLE_SCOPE of the command. Bits 25:24 + * in this case are set to 0. For any other status, it is always 0. + */ + CFA_P70_MPC_EM_SEARCH_CMP_TABLE_INDEX2_FLD = 9, + /* + * This value is written by the NIC such that it will be different for + * each pass through the completion queue. The even passes will write 1. + * The odd passes will write 0. + */ + CFA_P70_MPC_EM_SEARCH_CMP_V2_FLD = 10, + /* + * BKT_NUM is the bucket number in chain of the tail bucket after + * finishing processing the command, except when the command stops + * processing before the tail bucket. NUM_ENTRIES is the number of valid + * entries in the BKT_NUM bucket. The following describes the cases + * where BKT_NUM and NUM_ENTRIES are not for the tail bucket after + * finishing processing of the command: * For UNSPRT_ERR, FMT_ERR, + * SCOPE_ERR, or ADDR_ERR completion status, BKT_NUM will be set to 0. * + * For CACHE_ERR completion status, BKT_NUM will be set to the bucket + * number that was last read without error. If ERR=1 in the response to + * the static bucket read, BKT_NUM and NUM_ENTRIES are set to 0. The + * static bucket is number 0, BKT_NUM increments for each new bucket in + * the chain, and saturates at 255. Therefore, if the value is 255, + * BKT_NUM may or may not be accurate. In this case, though, NUM_ENTRIES + * will still be the correct value as described above for the bucket. + */ + CFA_P70_MPC_EM_SEARCH_CMP_BKT_NUM_FLD = 11, + /* See BKT_NUM description. */ + CFA_P70_MPC_EM_SEARCH_CMP_NUM_ENTRIES_FLD = 12, + CFA_P70_MPC_EM_SEARCH_CMP_MAX_FLD = 13, +}; + +/** + * Field IDS for EM_INSERT_CMP: OK status indicates that the exact match + * entry from the command was successfully inserted. EM_DUPLICATE status + * indicates that the insert was aborted because an entry with the same + * exact match key was found and REPLACE=0 in the command. EM_ABORT + * status indicates that no duplicate was found, the tail bucket in the + * chain was full, and TABLE_INDEX2=0. No changes are made to the + * database in this case. TABLE_INDEX is the starting address at which + * to insert the exact match entry (from the command). TABLE_INDEX2 is + * the address at which to insert a new bucket at the tail of the static + * bucket chain if needed (from the command). CHAIN_UPD=1 if a new + * bucket was added at this address. TABLE_INDEX3 is the static bucket + * address for the chain, determined from hashing the exact match entry. + * Software needs this address and TABLE_INDEX in order to delete the + * entry using an EM_DELETE command. TABLE_INDEX4 is the index of an + * entry found that had a matching exact match key to the command entry + * key. If no matching entry was found, it is set to 0. There are two + * cases when there is a matching entry, depending on REPLACE from the + * command: * REPLACE=0: EM_DUPLICATE status is reported and the insert + * is aborted. Software can use the static bucket address + * (TABLE_INDEX3[23:0]) and the matching entry (TABLE_INDEX4) in an + * EM_DELETE command if it wishes to explicity delete the matching + * entry. * REPLACE=1: REPLACED_ENTRY=1 to signal that the entry at + * TABLE_INDEX4 was replaced by the insert entry. REPLACED_ENTRY will + * only be 1 if reporting OK status in this case. Software can de- + * allocate the entry at TABLE_INDEX4. + */ +enum cfa_p70_mpc_em_insert_cmp_fields { + /* + * This field indicates the exact type of the completion. By convention, + * the LSB identifies the length of the record in 16B units. Even values + * indicate 16B records. Odd values indicate 32B records **(EXCEPT + * no_op!!!!)** . + */ + CFA_P70_MPC_EM_INSERT_CMP_TYPE_FLD = 0, + /* The command processing status. */ + CFA_P70_MPC_EM_INSERT_CMP_STATUS_FLD = 1, + /* + * This field represents the Mid-Path client that generated the + * completion. + */ + CFA_P70_MPC_EM_INSERT_CMP_MP_CLIENT_FLD = 2, + /* OPCODE from the command. */ + CFA_P70_MPC_EM_INSERT_CMP_OPCODE_FLD = 3, + /* + * This is a copy of the opaque field from the mid path BD of this + * command. + */ + CFA_P70_MPC_EM_INSERT_CMP_OPAQUE_FLD = 4, + /* + * This value is written by the NIC such that it will be different for + * each pass through the completion queue. The even passes will write 1. + * The odd passes will write 0. + */ + CFA_P70_MPC_EM_INSERT_CMP_V1_FLD = 5, + /* + * For EM_SEARCH and EM_INSERT commands without errors that abort the + * command processing prior to the hash computation, set to HASH[35:24] + * of the hash computed from the exact match entry key in the command. + * For all other cases, set to 0 except for the following error + * conditions, which carry debug information in this field as shown by + * error status below: * FMT_ERR: - Set to {7'd0, HOST_ADDRESS[1:0], + * DATA_SIZE[2:0]}. - If HOST_ADDRESS or DATA_SIZE field not present + * they are set to 0. * SCOPE_ERR: - Set to {1'b0, SVIF[10:0]}. * + * ADDR_ERR: - Only possible when TABLE_TYPE=EM or for EM* commands - + * Set to {1'b0, TABLE_INDEX[2:0], 5'd0, DATA_SIZE[2:0]} - + * TABLE_INDEX[2]=1 if TABLE_INDEX3 had an error - TABLE_INDEX[1]=1 if + * TABLE_INDEX2 had an error - TABLE_INDEX[0]=1 if TABLE_INDEX had an + * error - TABLE_INDEX[n]=0 if the completion does not have the + * corresponding TABLE_INDEX field above. * CACHE_ERR: - Set to {9'd0, + * DATA_SIZE[2:0]} + */ + CFA_P70_MPC_EM_INSERT_CMP_HASH_MSB_FLD = 6, + /* TABLE_SCOPE from the command. */ + CFA_P70_MPC_EM_INSERT_CMP_TABLE_SCOPE_FLD = 7, + /* + * A 32B index into the EM table identified by TABLE_SCOPE. TABLE_INDEX + * from the command, which is the starting address at which to insert + * the exact match entry. + */ + CFA_P70_MPC_EM_INSERT_CMP_TABLE_INDEX_FLD = 8, + /* + * A 32B index into the EM table identified by TABLE_SCOPE. TABLE_INDEX2 + * from the command, which is the index for the new tail bucket to add + * if needed (CHAIN_UPD=1 if it was used). + */ + CFA_P70_MPC_EM_INSERT_CMP_TABLE_INDEX2_FLD = 9, + /* + * A 32B index into the EM table identified by TABLE_SCOPE. If the hash + * is computed (no errors during initial processing of the command), + * TABLE_INDEX2[23:0] is the static bucket address determined from the + * hash of the exact match entry key in the command and the (EM_SIZE, + * EM_BUCKETS) configuration for TABLE_SCOPE of the command. Bits 25:24 + * in this case are set to 0. For any other status, it is always 0. + */ + CFA_P70_MPC_EM_INSERT_CMP_TABLE_INDEX3_FLD = 10, + /* + * This value is written by the NIC such that it will be different for + * each pass through the completion queue. The even passes will write 1. + * The odd passes will write 0. + */ + CFA_P70_MPC_EM_INSERT_CMP_V2_FLD = 11, + /* + * A 32B index into the EM table identified by TABLE_SCOPE. ENTRY_PTR of + * matching entry found. Set to 0 if no matching entry found. If + * REPLACED_ENTRY=1, that indicates a matching entry was found and + * REPLACE=1 in the command. In this case, the matching entry was + * replaced by the new entry in the command and this index can therefore + * by de-allocated. + */ + CFA_P70_MPC_EM_INSERT_CMP_TABLE_INDEX4_FLD = 12, + /* + * BKT_NUM is the bucket number in chain of the tail bucket after + * finishing processing the command, except when the command stops + * processing before the tail bucket. NUM_ENTRIES is the number of valid + * entries in the BKT_NUM bucket. The following describes the cases + * where BKT_NUM and NUM_ENTRIES are not for the tail bucket after + * finishing processing of the command: * For UNSPRT_ERR, FMT_ERR, + * SCOPE_ERR, or ADDR_ERR completion status, BKT_NUM will be set to 0. * + * For CACHE_ERR completion status, BKT_NUM will be set to the bucket + * number that was last read without error. If ERR=1 in the response to + * the static bucket read, BKT_NUM and NUM_ENTRIES are set to 0. The + * static bucket is number 0, BKT_NUM increments for each new bucket in + * the chain, and saturates at 255. Therefore, if the value is 255, + * BKT_NUM may or may not be accurate. In this case, though, NUM_ENTRIES + * will still be the correct value as described above for the bucket. + */ + CFA_P70_MPC_EM_INSERT_CMP_BKT_NUM_FLD = 13, + /* See BKT_NUM description. */ + CFA_P70_MPC_EM_INSERT_CMP_NUM_ENTRIES_FLD = 14, + /* + * Specifies if the chain was updated while processing the command: Set + * to 1 when a new bucket is added to the tail of the static bucket + * chain at TABLE_INDEX2. This occurs if and only if the insert requires + * adding a new entry and the tail bucket is full. If set to 0, + * TABLE_INDEX2 was not used and is therefore still free. + */ + CFA_P70_MPC_EM_INSERT_CMP_CHAIN_UPD_FLD = 15, + /* + * Set to 1 if a matching entry was found and REPLACE=1 in command. In + * the case, the entry starting at TABLE_INDEX4 was replaced and can + * therefore be de-allocated. Otherwise, this flag is set to 0. + */ + CFA_P70_MPC_EM_INSERT_CMP_REPLACED_ENTRY_FLD = 16, + CFA_P70_MPC_EM_INSERT_CMP_MAX_FLD = 17, +}; + +/** + * Field IDS for EM_DELETE_CMP: OK status indicates that an ENTRY_PTR + * matching TABLE_INDEX was found in the static bucket chain specified + * and was therefore deleted. EM_MISS status indicates that no match was + * found. TABLE_INDEX is from the command. It is the index of the entry + * to delete. TABLE_INDEX2 is from the command. It is the static bucket + * address. TABLE_INDEX3 is the index of the tail bucket of the static + * bucket chain prior to processing the command. TABLE_INDEX4 is the + * index of the tail bucket of the static bucket chain after processing + * the command. If CHAIN_UPD=1 and TABLE_INDEX4==TABLE_INDEX2, the + * static bucket was the tail bucket, it became empty after the delete, + * the scope is a locked scope, and CHAIN_PTR was 0. In this case, the + * static bucket has been evicted from the cache. Otherwise, if + * CHAIN_UPD=1, the original tail bucket given by TABLE_INDEX3 was + * removed from the chain because it went empty. It can therefore be de- + * allocated. + */ +enum cfa_p70_mpc_em_delete_cmp_fields { + /* + * This field indicates the exact type of the completion. By convention, + * the LSB identifies the length of the record in 16B units. Even values + * indicate 16B records. Odd values indicate 32B records **(EXCEPT + * no_op!!!!)** . + */ + CFA_P70_MPC_EM_DELETE_CMP_TYPE_FLD = 0, + /* The command processing status. */ + CFA_P70_MPC_EM_DELETE_CMP_STATUS_FLD = 1, + /* + * This field represents the Mid-Path client that generated the + * completion. + */ + CFA_P70_MPC_EM_DELETE_CMP_MP_CLIENT_FLD = 2, + /* OPCODE from the command. */ + CFA_P70_MPC_EM_DELETE_CMP_OPCODE_FLD = 3, + /* + * This is a copy of the opaque field from the mid path BD of this + * command. + */ + CFA_P70_MPC_EM_DELETE_CMP_OPAQUE_FLD = 4, + /* + * This value is written by the NIC such that it will be different for + * each pass through the completion queue. The even passes will write 1. + * The odd passes will write 0. + */ + CFA_P70_MPC_EM_DELETE_CMP_V1_FLD = 5, + /* + * For EM_SEARCH and EM_INSERT commands without errors that abort the + * command processing prior to the hash computation, set to HASH[35:24] + * of the hash computed from the exact match entry key in the command. + * For all other cases, set to 0 except for the following error + * conditions, which carry debug information in this field as shown by + * error status below: * FMT_ERR: - Set to {7'd0, HOST_ADDRESS[1:0], + * DATA_SIZE[2:0]}. - If HOST_ADDRESS or DATA_SIZE field not present + * they are set to 0. * SCOPE_ERR: - Set to {1'b0, SVIF[10:0]}. * + * ADDR_ERR: - Only possible when TABLE_TYPE=EM or for EM* commands - + * Set to {1'b0, TABLE_INDEX[2:0], 5'd0, DATA_SIZE[2:0]} - + * TABLE_INDEX[2]=1 if TABLE_INDEX3 had an error - TABLE_INDEX[1]=1 if + * TABLE_INDEX2 had an error - TABLE_INDEX[0]=1 if TABLE_INDEX had an + * error - TABLE_INDEX[n]=0 if the completion does not have the + * corresponding TABLE_INDEX field above. * CACHE_ERR: - Set to {9'd0, + * DATA_SIZE[2:0]} + */ + CFA_P70_MPC_EM_DELETE_CMP_HASH_MSB_FLD = 6, + /* TABLE_SCOPE from the command. */ + CFA_P70_MPC_EM_DELETE_CMP_TABLE_SCOPE_FLD = 7, + /* + * A 32B index into the EM table identified by TABLE_SCOPE. TABLE_INDEX + * from the command, which is the index of the entry to delete. + */ + CFA_P70_MPC_EM_DELETE_CMP_TABLE_INDEX_FLD = 8, + /* + * A 32B index into the EM table identified by TABLE_SCOPE. TABLE_INDEX2 + * from the command. + */ + CFA_P70_MPC_EM_DELETE_CMP_TABLE_INDEX2_FLD = 9, + /* + * A 32B index into the EM table identified by TABLE_SCOPE. For OK or + * EM_MISS status, the index of the tail bucket of the chain prior to + * processing the command. If CHAIN_UPD=1, the bucket was removed and + * this index can be de-allocated. For other status values, it is set to + * 0. + */ + CFA_P70_MPC_EM_DELETE_CMP_TABLE_INDEX3_FLD = 10, + /* + * This value is written by the NIC such that it will be different for + * each pass through the completion queue. The even passes will write 1. + * The odd passes will write 0. + */ + CFA_P70_MPC_EM_DELETE_CMP_V2_FLD = 11, + /* + * A 32B index into the EM table identified by TABLE_SCOPE. For OK or + * EM_MISS status, the index of the tail bucket of the chain prior to + * after the command. If CHAIN_UPD=0 (always for EM_MISS status), it is + * always equal to TABLE_INDEX3 as the chain was not updated. For other + * status values, it is set to 0. + */ + CFA_P70_MPC_EM_DELETE_CMP_TABLE_INDEX4_FLD = 12, + /* + * BKT_NUM is the bucket number in chain of the tail bucket after + * finishing processing the command, except when the command stops + * processing before the tail bucket. NUM_ENTRIES is the number of valid + * entries in the BKT_NUM bucket. The following describes the cases + * where BKT_NUM and NUM_ENTRIES are not for the tail bucket after + * finishing processing of the command: * For UNSPRT_ERR, FMT_ERR, + * SCOPE_ERR, or ADDR_ERR completion status, BKT_NUM will be set to 0. * + * For CACHE_ERR completion status, BKT_NUM will be set to the bucket + * number that was last read without error. If ERR=1 in the response to + * the static bucket read, BKT_NUM and NUM_ENTRIES are set to 0. The + * static bucket is number 0, BKT_NUM increments for each new bucket in + * the chain, and saturates at 255. Therefore, if the value is 255, + * BKT_NUM may or may not be accurate. In this case, though, NUM_ENTRIES + * will still be the correct value as described above for the bucket. + */ + CFA_P70_MPC_EM_DELETE_CMP_BKT_NUM_FLD = 13, + /* See BKT_NUM description. */ + CFA_P70_MPC_EM_DELETE_CMP_NUM_ENTRIES_FLD = 14, + /* + * Specifies if the chain was updated while processing the command: Set + * to 1 when a bucket is removed from the static bucket chain. This + * occurs if after the delete, the tail bucket is a dynamic bucket and + * no longer has any valid entries. In this case, software should de- + * allocate the dynamic bucket at TABLE_INDEX3. It is also set to 1 when + * the static bucket is evicted, which only occurs for locked scopes. + * See the EM_DELETE command description for details. + */ + CFA_P70_MPC_EM_DELETE_CMP_CHAIN_UPD_FLD = 15, + CFA_P70_MPC_EM_DELETE_CMP_MAX_FLD = 16, +}; + +/** + * Field IDS for EM_CHAIN_CMP: OK status indicates that the CHAIN_PTR of + * the tail bucket was successfully updated. TABLE_INDEX is from the + * command. It is the value of the new CHAIN_PTR. TABLE_INDEX2 is from + * the command. TABLE_INDEX3 is the index of the tail bucket of the + * static bucket chain. + */ +enum cfa_p70_mpc_em_chain_cmp_fields { + /* + * This field indicates the exact type of the completion. By convention, + * the LSB identifies the length of the record in 16B units. Even values + * indicate 16B records. Odd values indicate 32B records **(EXCEPT + * no_op!!!!)** . + */ + CFA_P70_MPC_EM_CHAIN_CMP_TYPE_FLD = 0, + /* The command processing status. */ + CFA_P70_MPC_EM_CHAIN_CMP_STATUS_FLD = 1, + /* + * This field represents the Mid-Path client that generated the + * completion. + */ + CFA_P70_MPC_EM_CHAIN_CMP_MP_CLIENT_FLD = 2, + /* OPCODE from the command. */ + CFA_P70_MPC_EM_CHAIN_CMP_OPCODE_FLD = 3, + /* + * This is a copy of the opaque field from the mid path BD of this + * command. + */ + CFA_P70_MPC_EM_CHAIN_CMP_OPAQUE_FLD = 4, + /* + * This value is written by the NIC such that it will be different for + * each pass through the completion queue. The even passes will write 1. + * The odd passes will write 0. + */ + CFA_P70_MPC_EM_CHAIN_CMP_V1_FLD = 5, + /* + * For EM_SEARCH and EM_INSERT commands without errors that abort the + * command processing prior to the hash computation, set to HASH[35:24] + * of the hash computed from the exact match entry key in the command. + * For all other cases, set to 0 except for the following error + * conditions, which carry debug information in this field as shown by + * error status below: * FMT_ERR: - Set to {7'd0, HOST_ADDRESS[1:0], + * DATA_SIZE[2:0]}. - If HOST_ADDRESS or DATA_SIZE field not present + * they are set to 0. * SCOPE_ERR: - Set to {1'b0, SVIF[10:0]}. * + * ADDR_ERR: - Only possible when TABLE_TYPE=EM or for EM* commands - + * Set to {1'b0, TABLE_INDEX[2:0], 5'd0, DATA_SIZE[2:0]} - + * TABLE_INDEX[2]=1 if TABLE_INDEX3 had an error - TABLE_INDEX[1]=1 if + * TABLE_INDEX2 had an error - TABLE_INDEX[0]=1 if TABLE_INDEX had an + * error - TABLE_INDEX[n]=0 if the completion does not have the + * corresponding TABLE_INDEX field above. * CACHE_ERR: - Set to {9'd0, + * DATA_SIZE[2:0]} + */ + CFA_P70_MPC_EM_CHAIN_CMP_HASH_MSB_FLD = 6, + /* TABLE_SCOPE from the command. */ + CFA_P70_MPC_EM_CHAIN_CMP_TABLE_SCOPE_FLD = 7, + /* + * A 32B index into the EM table identified by TABLE_SCOPE. TABLE_INDEX + * from the command, which is the new CHAIN_PTR for the tail bucket of + * the static bucket chain. + */ + CFA_P70_MPC_EM_CHAIN_CMP_TABLE_INDEX_FLD = 8, + /* + * A 32B index into the EM table identified by TABLE_SCOPE. TABLE_INDEX2 + * from the command. + */ + CFA_P70_MPC_EM_CHAIN_CMP_TABLE_INDEX2_FLD = 9, + /* + * A 32B index into the EM table identified by TABLE_SCOPE. For OK + * status, the index of the tail bucket of the chain. Otherwise, set to + * 0. + */ + CFA_P70_MPC_EM_CHAIN_CMP_TABLE_INDEX3_FLD = 10, + /* + * This value is written by the NIC such that it will be different for + * each pass through the completion queue. The even passes will write 1. + * The odd passes will write 0. + */ + CFA_P70_MPC_EM_CHAIN_CMP_V2_FLD = 11, + /* + * BKT_NUM is the bucket number in chain of the tail bucket after + * finishing processing the command, except when the command stops + * processing before the tail bucket. NUM_ENTRIES is the number of valid + * entries in the BKT_NUM bucket. The following describes the cases + * where BKT_NUM and NUM_ENTRIES are not for the tail bucket after + * finishing processing of the command: * For UNSPRT_ERR, FMT_ERR, + * SCOPE_ERR, or ADDR_ERR completion status, BKT_NUM will be set to 0. * + * For CACHE_ERR completion status, BKT_NUM will be set to the bucket + * number that was last read without error. If ERR=1 in the response to + * the static bucket read, BKT_NUM and NUM_ENTRIES are set to 0. The + * static bucket is number 0, BKT_NUM increments for each new bucket in + * the chain, and saturates at 255. Therefore, if the value is 255, + * BKT_NUM may or may not be accurate. In this case, though, NUM_ENTRIES + * will still be the correct value as described above for the bucket. + */ + CFA_P70_MPC_EM_CHAIN_CMP_BKT_NUM_FLD = 12, + /* See BKT_NUM description. */ + CFA_P70_MPC_EM_CHAIN_CMP_NUM_ENTRIES_FLD = 13, + /* + * Set to 1 when the scope is a locked scope, the tail bucket is the + * static bucket, the bucket is empty (all of its ENTRY_PTR values are + * 0), and TABLE_INDEX=0 in the command. In this case, the static bucket + * is evicted. For all other cases, it is set to 0. + */ + CFA_P70_MPC_EM_CHAIN_CMP_CHAIN_UPD_FLD = 14, + CFA_P70_MPC_EM_CHAIN_CMP_MAX_FLD = 15, +}; + +/* clang-format on */ + +#endif /* _CFA_P70_MPC_FIELD_IDS_H_ */ diff --git a/drivers/thirdparty/release-drivers/bnxt/hcapi/cfa_v3/mpc/include/cfa_p70_mpc_field_mapping.h b/drivers/thirdparty/release-drivers/bnxt/hcapi/cfa_v3/mpc/include/cfa_p70_mpc_field_mapping.h new file mode 100644 index 000000000000..d1f3880e1432 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/hcapi/cfa_v3/mpc/include/cfa_p70_mpc_field_mapping.h @@ -0,0 +1,768 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019-2023 Broadcom + * All rights reserved. + * + * Date: 09/29/22 11:50:38 + * + * Note: This file is scripted generated by ./cfa_header_gen.py. + * DO NOT modify this file manually !!!! + * + */ + +#ifndef _CFA_P70_MPC_FIELD_MAPPING_H_ +#define _CFA_P70_MPC_FIELD_MAPPING_H_ + +/* clang-format off */ +/** Device specific Field ID mapping structure */ +struct field_mapping { + bool valid; + u16 mapping; +}; + +/** + * Global to device field id mapping for READ_CMD + */ +struct field_mapping cfa_p70_mpc_read_cmd_gbl_to_dev + [CFA_BLD_MPC_READ_CMD_MAX_FLD] = { + [CFA_BLD_MPC_READ_CMD_OPAQUE_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_READ_CMD_OPAQUE_FLD, + }, + [CFA_BLD_MPC_READ_CMD_TABLE_TYPE_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_READ_CMD_TABLE_TYPE_FLD, + }, + [CFA_BLD_MPC_READ_CMD_TABLE_SCOPE_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_READ_CMD_TABLE_SCOPE_FLD, + }, + [CFA_BLD_MPC_READ_CMD_DATA_SIZE_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_READ_CMD_DATA_SIZE_FLD, + }, + [CFA_BLD_MPC_READ_CMD_CACHE_OPTION_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_READ_CMD_CACHE_OPTION_FLD, + }, + [CFA_BLD_MPC_READ_CMD_TABLE_INDEX_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_READ_CMD_TABLE_INDEX_FLD, + }, + [CFA_BLD_MPC_READ_CMD_HOST_ADDRESS_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_READ_CMD_HOST_ADDRESS_FLD, + }, +}; + +/** + * Global to device field id mapping for WRITE_CMD + */ +struct field_mapping cfa_p70_mpc_write_cmd_gbl_to_dev + [CFA_BLD_MPC_WRITE_CMD_MAX_FLD] = { + [CFA_BLD_MPC_WRITE_CMD_OPAQUE_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_WRITE_CMD_OPAQUE_FLD, + }, + [CFA_BLD_MPC_WRITE_CMD_TABLE_TYPE_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_WRITE_CMD_TABLE_TYPE_FLD, + }, + [CFA_BLD_MPC_WRITE_CMD_WRITE_THROUGH_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_WRITE_CMD_WRITE_THROUGH_FLD, + }, + [CFA_BLD_MPC_WRITE_CMD_TABLE_SCOPE_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_WRITE_CMD_TABLE_SCOPE_FLD, + }, + [CFA_BLD_MPC_WRITE_CMD_DATA_SIZE_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_WRITE_CMD_DATA_SIZE_FLD, + }, + [CFA_BLD_MPC_WRITE_CMD_CACHE_OPTION_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_WRITE_CMD_CACHE_OPTION_FLD, + }, + [CFA_BLD_MPC_WRITE_CMD_TABLE_INDEX_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_WRITE_CMD_TABLE_INDEX_FLD, + }, +}; + +/** + * Global to device field id mapping for READ_CLR_CMD + */ +struct field_mapping cfa_p70_mpc_read_clr_cmd_gbl_to_dev + [CFA_BLD_MPC_READ_CLR_CMD_MAX_FLD] = { + [CFA_BLD_MPC_READ_CLR_CMD_OPAQUE_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_READ_CLR_CMD_OPAQUE_FLD, + }, + [CFA_BLD_MPC_READ_CLR_CMD_TABLE_TYPE_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_READ_CLR_CMD_TABLE_TYPE_FLD, + }, + [CFA_BLD_MPC_READ_CLR_CMD_TABLE_SCOPE_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_READ_CLR_CMD_TABLE_SCOPE_FLD, + }, + [CFA_BLD_MPC_READ_CLR_CMD_DATA_SIZE_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_READ_CLR_CMD_DATA_SIZE_FLD, + }, + [CFA_BLD_MPC_READ_CLR_CMD_CACHE_OPTION_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_READ_CLR_CMD_CACHE_OPTION_FLD, + }, + [CFA_BLD_MPC_READ_CLR_CMD_TABLE_INDEX_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_READ_CLR_CMD_TABLE_INDEX_FLD, + }, + [CFA_BLD_MPC_READ_CLR_CMD_HOST_ADDRESS_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_READ_CLR_CMD_HOST_ADDRESS_FLD, + }, + [CFA_BLD_MPC_READ_CLR_CMD_CLEAR_MASK_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_READ_CLR_CMD_CLEAR_MASK_FLD, + }, +}; + +/** + * Global to device field id mapping for INVALIDATE_CMD + */ +struct field_mapping cfa_p70_mpc_invalidate_cmd_gbl_to_dev + [CFA_BLD_MPC_INVALIDATE_CMD_MAX_FLD] = { + [CFA_BLD_MPC_INVALIDATE_CMD_OPAQUE_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_INVALIDATE_CMD_OPAQUE_FLD, + }, + [CFA_BLD_MPC_INVALIDATE_CMD_TABLE_TYPE_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_INVALIDATE_CMD_TABLE_TYPE_FLD, + }, + [CFA_BLD_MPC_INVALIDATE_CMD_TABLE_SCOPE_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_INVALIDATE_CMD_TABLE_SCOPE_FLD, + }, + [CFA_BLD_MPC_INVALIDATE_CMD_DATA_SIZE_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_INVALIDATE_CMD_DATA_SIZE_FLD, + }, + [CFA_BLD_MPC_INVALIDATE_CMD_CACHE_OPTION_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_INVALIDATE_CMD_CACHE_OPTION_FLD, + }, + [CFA_BLD_MPC_INVALIDATE_CMD_TABLE_INDEX_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_INVALIDATE_CMD_TABLE_INDEX_FLD, + }, +}; + +/** + * Global to device field id mapping for EM_SEARCH_CMD + */ +struct field_mapping cfa_p70_mpc_em_search_cmd_gbl_to_dev + [CFA_BLD_MPC_EM_SEARCH_CMD_MAX_FLD] = { + [CFA_BLD_MPC_EM_SEARCH_CMD_OPAQUE_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_EM_SEARCH_CMD_OPAQUE_FLD, + }, + [CFA_BLD_MPC_EM_SEARCH_CMD_TABLE_SCOPE_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_EM_SEARCH_CMD_TABLE_SCOPE_FLD, + }, + [CFA_BLD_MPC_EM_SEARCH_CMD_DATA_SIZE_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_EM_SEARCH_CMD_DATA_SIZE_FLD, + }, + [CFA_BLD_MPC_EM_SEARCH_CMD_CACHE_OPTION_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_EM_SEARCH_CMD_CACHE_OPTION_FLD, + }, +}; + +/** + * Global to device field id mapping for EM_INSERT_CMD + */ +struct field_mapping cfa_p70_mpc_em_insert_cmd_gbl_to_dev + [CFA_BLD_MPC_EM_INSERT_CMD_MAX_FLD] = { + [CFA_BLD_MPC_EM_INSERT_CMD_OPAQUE_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_EM_INSERT_CMD_OPAQUE_FLD, + }, + [CFA_BLD_MPC_EM_INSERT_CMD_WRITE_THROUGH_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_EM_INSERT_CMD_WRITE_THROUGH_FLD, + }, + [CFA_BLD_MPC_EM_INSERT_CMD_TABLE_SCOPE_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_EM_INSERT_CMD_TABLE_SCOPE_FLD, + }, + [CFA_BLD_MPC_EM_INSERT_CMD_DATA_SIZE_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_EM_INSERT_CMD_DATA_SIZE_FLD, + }, + [CFA_BLD_MPC_EM_INSERT_CMD_CACHE_OPTION_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_EM_INSERT_CMD_CACHE_OPTION_FLD, + }, + [CFA_BLD_MPC_EM_INSERT_CMD_TABLE_INDEX_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_EM_INSERT_CMD_TABLE_INDEX_FLD, + }, + [CFA_BLD_MPC_EM_INSERT_CMD_CACHE_OPTION2_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_EM_INSERT_CMD_CACHE_OPTION2_FLD, + }, + [CFA_BLD_MPC_EM_INSERT_CMD_TABLE_INDEX2_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_EM_INSERT_CMD_TABLE_INDEX2_FLD, + }, + [CFA_BLD_MPC_EM_INSERT_CMD_REPLACE_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_EM_INSERT_CMD_REPLACE_FLD, + }, +}; + +/** + * Global to device field id mapping for EM_DELETE_CMD + */ +struct field_mapping cfa_p70_mpc_em_delete_cmd_gbl_to_dev + [CFA_BLD_MPC_EM_DELETE_CMD_MAX_FLD] = { + [CFA_BLD_MPC_EM_DELETE_CMD_OPAQUE_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_EM_DELETE_CMD_OPAQUE_FLD, + }, + [CFA_BLD_MPC_EM_DELETE_CMD_WRITE_THROUGH_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_EM_DELETE_CMD_WRITE_THROUGH_FLD, + }, + [CFA_BLD_MPC_EM_DELETE_CMD_TABLE_SCOPE_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_EM_DELETE_CMD_TABLE_SCOPE_FLD, + }, + [CFA_BLD_MPC_EM_DELETE_CMD_CACHE_OPTION_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_EM_DELETE_CMD_CACHE_OPTION_FLD, + }, + [CFA_BLD_MPC_EM_DELETE_CMD_TABLE_INDEX_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_EM_DELETE_CMD_TABLE_INDEX_FLD, + }, + [CFA_BLD_MPC_EM_DELETE_CMD_CACHE_OPTION2_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_EM_DELETE_CMD_CACHE_OPTION2_FLD, + }, + [CFA_BLD_MPC_EM_DELETE_CMD_TABLE_INDEX2_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_EM_DELETE_CMD_TABLE_INDEX2_FLD, + }, +}; + +/** + * Global to device field id mapping for EM_CHAIN_CMD + */ +struct field_mapping cfa_p70_mpc_em_chain_cmd_gbl_to_dev + [CFA_BLD_MPC_EM_CHAIN_CMD_MAX_FLD] = { + [CFA_BLD_MPC_EM_CHAIN_CMD_OPAQUE_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_EM_CHAIN_CMD_OPAQUE_FLD, + }, + [CFA_BLD_MPC_EM_CHAIN_CMD_WRITE_THROUGH_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_EM_CHAIN_CMD_WRITE_THROUGH_FLD, + }, + [CFA_BLD_MPC_EM_CHAIN_CMD_TABLE_SCOPE_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_EM_CHAIN_CMD_TABLE_SCOPE_FLD, + }, + [CFA_BLD_MPC_EM_CHAIN_CMD_CACHE_OPTION_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_EM_CHAIN_CMD_CACHE_OPTION_FLD, + }, + [CFA_BLD_MPC_EM_CHAIN_CMD_TABLE_INDEX_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_EM_CHAIN_CMD_TABLE_INDEX_FLD, + }, + [CFA_BLD_MPC_EM_CHAIN_CMD_CACHE_OPTION2_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_EM_CHAIN_CMD_CACHE_OPTION2_FLD, + }, + [CFA_BLD_MPC_EM_CHAIN_CMD_TABLE_INDEX2_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_EM_CHAIN_CMD_TABLE_INDEX2_FLD, + }, +}; + +/** + * Global to device field id mapping for READ_CMP + */ +struct field_mapping cfa_p70_mpc_read_cmp_gbl_to_dev + [CFA_BLD_MPC_READ_CMP_MAX_FLD] = { + [CFA_BLD_MPC_READ_CMP_TYPE_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_READ_CMP_TYPE_FLD, + }, + [CFA_BLD_MPC_READ_CMP_STATUS_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_READ_CMP_STATUS_FLD, + }, + [CFA_BLD_MPC_READ_CMP_MP_CLIENT_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_READ_CMP_MP_CLIENT_FLD, + }, + [CFA_BLD_MPC_READ_CMP_OPCODE_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_READ_CMP_OPCODE_FLD, + }, + [CFA_BLD_MPC_READ_CMP_DMA_LENGTH_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_READ_CMP_DMA_LENGTH_FLD, + }, + [CFA_BLD_MPC_READ_CMP_OPAQUE_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_READ_CMP_OPAQUE_FLD, + }, + [CFA_BLD_MPC_READ_CMP_V_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_READ_CMP_V_FLD, + }, + [CFA_BLD_MPC_READ_CMP_HASH_MSB_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_READ_CMP_HASH_MSB_FLD, + }, + [CFA_BLD_MPC_READ_CMP_TABLE_TYPE_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_READ_CMP_TABLE_TYPE_FLD, + }, + [CFA_BLD_MPC_READ_CMP_TABLE_SCOPE_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_READ_CMP_TABLE_SCOPE_FLD, + }, + [CFA_BLD_MPC_READ_CMP_TABLE_INDEX_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_READ_CMP_TABLE_INDEX_FLD, + }, +}; + +/** + * Global to device field id mapping for WRITE_CMP + */ +struct field_mapping cfa_p70_mpc_write_cmp_gbl_to_dev + [CFA_BLD_MPC_WRITE_CMP_MAX_FLD] = { + [CFA_BLD_MPC_WRITE_CMP_TYPE_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_WRITE_CMP_TYPE_FLD, + }, + [CFA_BLD_MPC_WRITE_CMP_STATUS_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_WRITE_CMP_STATUS_FLD, + }, + [CFA_BLD_MPC_WRITE_CMP_MP_CLIENT_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_WRITE_CMP_MP_CLIENT_FLD, + }, + [CFA_BLD_MPC_WRITE_CMP_OPCODE_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_WRITE_CMP_OPCODE_FLD, + }, + [CFA_BLD_MPC_WRITE_CMP_OPAQUE_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_WRITE_CMP_OPAQUE_FLD, + }, + [CFA_BLD_MPC_WRITE_CMP_V_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_WRITE_CMP_V_FLD, + }, + [CFA_BLD_MPC_WRITE_CMP_HASH_MSB_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_WRITE_CMP_HASH_MSB_FLD, + }, + [CFA_BLD_MPC_WRITE_CMP_TABLE_TYPE_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_WRITE_CMP_TABLE_TYPE_FLD, + }, + [CFA_BLD_MPC_WRITE_CMP_TABLE_SCOPE_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_WRITE_CMP_TABLE_SCOPE_FLD, + }, + [CFA_BLD_MPC_WRITE_CMP_TABLE_INDEX_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_WRITE_CMP_TABLE_INDEX_FLD, + }, +}; + +/** + * Global to device field id mapping for READ_CLR_CMP + */ +struct field_mapping cfa_p70_mpc_read_clr_cmp_gbl_to_dev + [CFA_BLD_MPC_READ_CLR_CMP_MAX_FLD] = { + [CFA_BLD_MPC_READ_CLR_CMP_TYPE_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_READ_CLR_CMP_TYPE_FLD, + }, + [CFA_BLD_MPC_READ_CLR_CMP_STATUS_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_READ_CLR_CMP_STATUS_FLD, + }, + [CFA_BLD_MPC_READ_CLR_CMP_MP_CLIENT_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_READ_CLR_CMP_MP_CLIENT_FLD, + }, + [CFA_BLD_MPC_READ_CLR_CMP_OPCODE_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_READ_CLR_CMP_OPCODE_FLD, + }, + [CFA_BLD_MPC_READ_CLR_CMP_DMA_LENGTH_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_READ_CLR_CMP_DMA_LENGTH_FLD, + }, + [CFA_BLD_MPC_READ_CLR_CMP_OPAQUE_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_READ_CLR_CMP_OPAQUE_FLD, + }, + [CFA_BLD_MPC_READ_CLR_CMP_V_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_READ_CLR_CMP_V_FLD, + }, + [CFA_BLD_MPC_READ_CLR_CMP_HASH_MSB_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_READ_CLR_CMP_HASH_MSB_FLD, + }, + [CFA_BLD_MPC_READ_CLR_CMP_TABLE_TYPE_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_READ_CLR_CMP_TABLE_TYPE_FLD, + }, + [CFA_BLD_MPC_READ_CLR_CMP_TABLE_SCOPE_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_READ_CLR_CMP_TABLE_SCOPE_FLD, + }, + [CFA_BLD_MPC_READ_CLR_CMP_TABLE_INDEX_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_READ_CLR_CMP_TABLE_INDEX_FLD, + }, +}; + +/** + * Global to device field id mapping for INVALIDATE_CMP + */ +struct field_mapping cfa_p70_mpc_invalidate_cmp_gbl_to_dev + [CFA_BLD_MPC_INVALIDATE_CMP_MAX_FLD] = { + [CFA_BLD_MPC_INVALIDATE_CMP_TYPE_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_INVALIDATE_CMP_TYPE_FLD, + }, + [CFA_BLD_MPC_INVALIDATE_CMP_STATUS_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_INVALIDATE_CMP_STATUS_FLD, + }, + [CFA_BLD_MPC_INVALIDATE_CMP_MP_CLIENT_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_INVALIDATE_CMP_MP_CLIENT_FLD, + }, + [CFA_BLD_MPC_INVALIDATE_CMP_OPCODE_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_INVALIDATE_CMP_OPCODE_FLD, + }, + [CFA_BLD_MPC_INVALIDATE_CMP_OPAQUE_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_INVALIDATE_CMP_OPAQUE_FLD, + }, + [CFA_BLD_MPC_INVALIDATE_CMP_V_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_INVALIDATE_CMP_V_FLD, + }, + [CFA_BLD_MPC_INVALIDATE_CMP_HASH_MSB_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_INVALIDATE_CMP_HASH_MSB_FLD, + }, + [CFA_BLD_MPC_INVALIDATE_CMP_TABLE_TYPE_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_INVALIDATE_CMP_TABLE_TYPE_FLD, + }, + [CFA_BLD_MPC_INVALIDATE_CMP_TABLE_SCOPE_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_INVALIDATE_CMP_TABLE_SCOPE_FLD, + }, + [CFA_BLD_MPC_INVALIDATE_CMP_TABLE_INDEX_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_INVALIDATE_CMP_TABLE_INDEX_FLD, + }, +}; + +/** + * Global to device field id mapping for EM_SEARCH_CMP + */ +struct field_mapping cfa_p70_mpc_em_search_cmp_gbl_to_dev + [CFA_BLD_MPC_EM_SEARCH_CMP_MAX_FLD] = { + [CFA_BLD_MPC_EM_SEARCH_CMP_TYPE_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_EM_SEARCH_CMP_TYPE_FLD, + }, + [CFA_BLD_MPC_EM_SEARCH_CMP_STATUS_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_EM_SEARCH_CMP_STATUS_FLD, + }, + [CFA_BLD_MPC_EM_SEARCH_CMP_MP_CLIENT_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_EM_SEARCH_CMP_MP_CLIENT_FLD, + }, + [CFA_BLD_MPC_EM_SEARCH_CMP_OPCODE_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_EM_SEARCH_CMP_OPCODE_FLD, + }, + [CFA_BLD_MPC_EM_SEARCH_CMP_OPAQUE_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_EM_SEARCH_CMP_OPAQUE_FLD, + }, + [CFA_BLD_MPC_EM_SEARCH_CMP_V1_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_EM_SEARCH_CMP_V1_FLD, + }, + [CFA_BLD_MPC_EM_SEARCH_CMP_HASH_MSB_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_EM_SEARCH_CMP_HASH_MSB_FLD, + }, + [CFA_BLD_MPC_EM_SEARCH_CMP_TABLE_SCOPE_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_EM_SEARCH_CMP_TABLE_SCOPE_FLD, + }, + [CFA_BLD_MPC_EM_SEARCH_CMP_TABLE_INDEX_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_EM_SEARCH_CMP_TABLE_INDEX_FLD, + }, + [CFA_BLD_MPC_EM_SEARCH_CMP_TABLE_INDEX2_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_EM_SEARCH_CMP_TABLE_INDEX2_FLD, + }, + [CFA_BLD_MPC_EM_SEARCH_CMP_V2_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_EM_SEARCH_CMP_V2_FLD, + }, + [CFA_BLD_MPC_EM_SEARCH_CMP_BKT_NUM_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_EM_SEARCH_CMP_BKT_NUM_FLD, + }, + [CFA_BLD_MPC_EM_SEARCH_CMP_NUM_ENTRIES_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_EM_SEARCH_CMP_NUM_ENTRIES_FLD, + }, +}; + +/** + * Global to device field id mapping for EM_INSERT_CMP + */ +struct field_mapping cfa_p70_mpc_em_insert_cmp_gbl_to_dev + [CFA_BLD_MPC_EM_INSERT_CMP_MAX_FLD] = { + [CFA_BLD_MPC_EM_INSERT_CMP_TYPE_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_EM_INSERT_CMP_TYPE_FLD, + }, + [CFA_BLD_MPC_EM_INSERT_CMP_STATUS_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_EM_INSERT_CMP_STATUS_FLD, + }, + [CFA_BLD_MPC_EM_INSERT_CMP_MP_CLIENT_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_EM_INSERT_CMP_MP_CLIENT_FLD, + }, + [CFA_BLD_MPC_EM_INSERT_CMP_OPCODE_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_EM_INSERT_CMP_OPCODE_FLD, + }, + [CFA_BLD_MPC_EM_INSERT_CMP_OPAQUE_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_EM_INSERT_CMP_OPAQUE_FLD, + }, + [CFA_BLD_MPC_EM_INSERT_CMP_V1_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_EM_INSERT_CMP_V1_FLD, + }, + [CFA_BLD_MPC_EM_INSERT_CMP_HASH_MSB_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_EM_INSERT_CMP_HASH_MSB_FLD, + }, + [CFA_BLD_MPC_EM_INSERT_CMP_TABLE_SCOPE_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_EM_INSERT_CMP_TABLE_SCOPE_FLD, + }, + [CFA_BLD_MPC_EM_INSERT_CMP_TABLE_INDEX_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_EM_INSERT_CMP_TABLE_INDEX_FLD, + }, + [CFA_BLD_MPC_EM_INSERT_CMP_TABLE_INDEX2_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_EM_INSERT_CMP_TABLE_INDEX2_FLD, + }, + [CFA_BLD_MPC_EM_INSERT_CMP_TABLE_INDEX3_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_EM_INSERT_CMP_TABLE_INDEX3_FLD, + }, + [CFA_BLD_MPC_EM_INSERT_CMP_V2_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_EM_INSERT_CMP_V2_FLD, + }, + [CFA_BLD_MPC_EM_INSERT_CMP_TABLE_INDEX4_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_EM_INSERT_CMP_TABLE_INDEX4_FLD, + }, + [CFA_BLD_MPC_EM_INSERT_CMP_BKT_NUM_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_EM_INSERT_CMP_BKT_NUM_FLD, + }, + [CFA_BLD_MPC_EM_INSERT_CMP_NUM_ENTRIES_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_EM_INSERT_CMP_NUM_ENTRIES_FLD, + }, + [CFA_BLD_MPC_EM_INSERT_CMP_CHAIN_UPD_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_EM_INSERT_CMP_CHAIN_UPD_FLD, + }, + [CFA_BLD_MPC_EM_INSERT_CMP_REPLACED_ENTRY_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_EM_INSERT_CMP_REPLACED_ENTRY_FLD, + }, +}; + +/** + * Global to device field id mapping for EM_DELETE_CMP + */ +struct field_mapping cfa_p70_mpc_em_delete_cmp_gbl_to_dev + [CFA_BLD_MPC_EM_DELETE_CMP_MAX_FLD] = { + [CFA_BLD_MPC_EM_DELETE_CMP_TYPE_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_EM_DELETE_CMP_TYPE_FLD, + }, + [CFA_BLD_MPC_EM_DELETE_CMP_STATUS_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_EM_DELETE_CMP_STATUS_FLD, + }, + [CFA_BLD_MPC_EM_DELETE_CMP_MP_CLIENT_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_EM_DELETE_CMP_MP_CLIENT_FLD, + }, + [CFA_BLD_MPC_EM_DELETE_CMP_OPCODE_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_EM_DELETE_CMP_OPCODE_FLD, + }, + [CFA_BLD_MPC_EM_DELETE_CMP_OPAQUE_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_EM_DELETE_CMP_OPAQUE_FLD, + }, + [CFA_BLD_MPC_EM_DELETE_CMP_V1_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_EM_DELETE_CMP_V1_FLD, + }, + [CFA_BLD_MPC_EM_DELETE_CMP_HASH_MSB_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_EM_DELETE_CMP_HASH_MSB_FLD, + }, + [CFA_BLD_MPC_EM_DELETE_CMP_TABLE_SCOPE_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_EM_DELETE_CMP_TABLE_SCOPE_FLD, + }, + [CFA_BLD_MPC_EM_DELETE_CMP_TABLE_INDEX_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_EM_DELETE_CMP_TABLE_INDEX_FLD, + }, + [CFA_BLD_MPC_EM_DELETE_CMP_TABLE_INDEX2_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_EM_DELETE_CMP_TABLE_INDEX2_FLD, + }, + [CFA_BLD_MPC_EM_DELETE_CMP_TABLE_INDEX3_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_EM_DELETE_CMP_TABLE_INDEX3_FLD, + }, + [CFA_BLD_MPC_EM_DELETE_CMP_V2_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_EM_DELETE_CMP_V2_FLD, + }, + [CFA_BLD_MPC_EM_DELETE_CMP_TABLE_INDEX4_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_EM_DELETE_CMP_TABLE_INDEX4_FLD, + }, + [CFA_BLD_MPC_EM_DELETE_CMP_BKT_NUM_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_EM_DELETE_CMP_BKT_NUM_FLD, + }, + [CFA_BLD_MPC_EM_DELETE_CMP_NUM_ENTRIES_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_EM_DELETE_CMP_NUM_ENTRIES_FLD, + }, + [CFA_BLD_MPC_EM_DELETE_CMP_CHAIN_UPD_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_EM_DELETE_CMP_CHAIN_UPD_FLD, + }, +}; + +/** + * Global to device field id mapping for EM_CHAIN_CMP + */ +struct field_mapping cfa_p70_mpc_em_chain_cmp_gbl_to_dev + [CFA_BLD_MPC_EM_CHAIN_CMP_MAX_FLD] = { + [CFA_BLD_MPC_EM_CHAIN_CMP_TYPE_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_EM_CHAIN_CMP_TYPE_FLD, + }, + [CFA_BLD_MPC_EM_CHAIN_CMP_STATUS_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_EM_CHAIN_CMP_STATUS_FLD, + }, + [CFA_BLD_MPC_EM_CHAIN_CMP_MP_CLIENT_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_EM_CHAIN_CMP_MP_CLIENT_FLD, + }, + [CFA_BLD_MPC_EM_CHAIN_CMP_OPCODE_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_EM_CHAIN_CMP_OPCODE_FLD, + }, + [CFA_BLD_MPC_EM_CHAIN_CMP_OPAQUE_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_EM_CHAIN_CMP_OPAQUE_FLD, + }, + [CFA_BLD_MPC_EM_CHAIN_CMP_V1_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_EM_CHAIN_CMP_V1_FLD, + }, + [CFA_BLD_MPC_EM_CHAIN_CMP_HASH_MSB_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_EM_CHAIN_CMP_HASH_MSB_FLD, + }, + [CFA_BLD_MPC_EM_CHAIN_CMP_TABLE_SCOPE_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_EM_CHAIN_CMP_TABLE_SCOPE_FLD, + }, + [CFA_BLD_MPC_EM_CHAIN_CMP_TABLE_INDEX_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_EM_CHAIN_CMP_TABLE_INDEX_FLD, + }, + [CFA_BLD_MPC_EM_CHAIN_CMP_TABLE_INDEX2_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_EM_CHAIN_CMP_TABLE_INDEX2_FLD, + }, + [CFA_BLD_MPC_EM_CHAIN_CMP_TABLE_INDEX3_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_EM_CHAIN_CMP_TABLE_INDEX3_FLD, + }, + [CFA_BLD_MPC_EM_CHAIN_CMP_V2_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_EM_CHAIN_CMP_V2_FLD, + }, + [CFA_BLD_MPC_EM_CHAIN_CMP_BKT_NUM_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_EM_CHAIN_CMP_BKT_NUM_FLD, + }, + [CFA_BLD_MPC_EM_CHAIN_CMP_NUM_ENTRIES_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_EM_CHAIN_CMP_NUM_ENTRIES_FLD, + }, + [CFA_BLD_MPC_EM_CHAIN_CMP_CHAIN_UPD_FLD] = { + .valid = true, + .mapping = CFA_P70_MPC_EM_CHAIN_CMP_CHAIN_UPD_FLD, + }, +}; + +/* clang-format on */ + +#endif /* _CFA_P70_MPC_FIELD_MAPPING_H_ */ diff --git a/drivers/thirdparty/release-drivers/bnxt/hcapi/cfa_v3/tim/cfa_tim.c b/drivers/thirdparty/release-drivers/bnxt/hcapi/cfa_v3/tim/cfa_tim.c new file mode 100644 index 000000000000..64404b0a24fa --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/hcapi/cfa_v3/tim/cfa_tim.c @@ -0,0 +1,114 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* Copyright(c) 2019-2023 Broadcom + * All rights reserved. + */ + +#include +#include +#include +#include "bnxt_compat.h" +#include "cfa_util.h" +#include "cfa_types.h" +#include "cfa_tim.h" + +static u32 cfa_tim_size(u8 max_tbl_scopes, u8 max_regions) +{ + return (sizeof(struct cfa_tim) + + (max_tbl_scopes * max_regions * CFA_DIR_MAX) * sizeof(void *)); +} + +int cfa_tim_query(u8 max_tbl_scopes, u8 max_regions, + u32 *tim_db_size) +{ + if (!tim_db_size) { + netdev_err(NULL, "tim_db_size = %p\n", tim_db_size); + return -EINVAL; + } + + *tim_db_size = cfa_tim_size(max_tbl_scopes, max_regions); + + return 0; +} + +int cfa_tim_open(void *tim, u32 tim_db_size, u8 max_tbl_scopes, + u8 max_regions) +{ + struct cfa_tim *ctx = (struct cfa_tim *)tim; + + if (!tim) { + netdev_err(NULL, "tim = %p\n", tim); + return -EINVAL; + } + if (tim_db_size < cfa_tim_size(max_tbl_scopes, max_regions)) { + netdev_err(NULL, "max_tbl_scopes = %d, max_regions = %d\n", + max_tbl_scopes, max_regions); + return -EINVAL; + } + + memset(tim, 0, tim_db_size); + + ctx->signature = CFA_TIM_SIGNATURE; + ctx->max_tsid = max_tbl_scopes; + ctx->max_regions = max_regions; + ctx->tpm_tbl = (void **)(ctx + 1); + + return 0; +} + +int cfa_tim_close(void *tim) +{ + struct cfa_tim *ctx = (struct cfa_tim *)tim; + + if (!tim || ctx->signature != CFA_TIM_SIGNATURE) { + netdev_err(NULL, "tim = %p\n", tim); + return -EINVAL; + } + + memset(tim, 0, cfa_tim_size(ctx->max_tsid, ctx->max_regions)); + + return 0; +} + +int cfa_tim_tpm_inst_set(void *tim, u8 tsid, u8 region_id, + int dir, void *tpm_inst) +{ + struct cfa_tim *ctx = (struct cfa_tim *)tim; + + if (!tim || ctx->signature != CFA_TIM_SIGNATURE) { + netdev_err(NULL, "tim = %p\n", tim); + return -EINVAL; + } + + if (!(CFA_CHECK_UPPER_BOUNDS(tsid, ctx->max_tsid - 1) && + CFA_CHECK_UPPER_BOUNDS(region_id, ctx->max_regions - 1))) { + netdev_err(NULL, "tsid = %d, region_id = %d\n", tsid, region_id); + return -EINVAL; + } + + ctx->tpm_tbl[CFA_TIM_MAKE_INDEX(tsid, region_id, dir, + ctx->max_regions, ctx->max_tsid)] = tpm_inst; + return 0; +} + +int cfa_tim_tpm_inst_get(void *tim, u8 tsid, u8 region_id, + int dir, void **tpm_inst) +{ + struct cfa_tim *ctx = (struct cfa_tim *)tim; + + *tpm_inst = NULL; + + if (!tim || ctx->signature != CFA_TIM_SIGNATURE) { + netdev_err(NULL, "tim = %p\n", tim); + return -EINVAL; + } + + if (!(CFA_CHECK_UPPER_BOUNDS(tsid, ctx->max_tsid - 1) && + CFA_CHECK_UPPER_BOUNDS(region_id, ctx->max_regions - 1))) { + netdev_err(NULL, "tsid = %d, region_id = %d\n", tsid, region_id); + return -EINVAL; + } + + *tpm_inst = ctx->tpm_tbl[CFA_TIM_MAKE_INDEX(tsid, region_id, dir, ctx->max_regions, + ctx->max_tsid)]; + return 0; +} diff --git a/drivers/thirdparty/release-drivers/bnxt/hcapi/cfa_v3/tim/include/cfa_tim.h b/drivers/thirdparty/release-drivers/bnxt/hcapi/cfa_v3/tim/include/cfa_tim.h new file mode 100644 index 000000000000..ab992e763382 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/hcapi/cfa_v3/tim/include/cfa_tim.h @@ -0,0 +1,191 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019-2023 Broadcom + * All rights reserved. + */ + +#ifndef _CFA_TIM_H_ +#define _CFA_TIM_H_ + +#define CFA_TIM_SIGNATURE 0xCFACEE11 + +/* + * Total index space is (MaxDir * MaxRegion * MaxTableScope), the + * following macro satisfies that: + * + * (Dir# * (MaxRegionSpace + MaxTableScope)) + + * (TableScope# * (MaxRegionSpace)) + + * Region# + * + * Examples: + * + * MaxD MaxR MaxT Total + * 2 1 1 2 + * + * Dir Region TableScope Index + * 0 0 0 0 + * 1 0 0 1 + * + * MaxD MaxR MaxT Total + * 2 2 1 4 + * + * Dir Region TableScope Index + * 0 0 0 0 + * 1 0 0 2 + * 0 1 0 1 + * 1 1 0 3 + * + * MaxD MaxR MaxT Total + * 2 2 3 12 + * + * Dir Region TableScope Index + * 0 0 0 0 + * 1 0 0 6 + * 0 1 0 1 + * 1 1 0 7 + * 0 0 1 2 + * 1 0 1 8 + * 0 1 1 3 + * 1 1 1 9 + * 0 0 2 4 + * 1 0 2 10 + * 0 1 2 5 + * 1 1 2 11 + * + */ +#define CFA_TIM_MAKE_INDEX(tsid, region, dir, max_regions, max_tsid) \ + (((dir) * (max_regions) * (max_tsid)) + ((tsid) * (max_regions)) + (region)) + +/** + * CFA Table Scope Instance Manager Database + * + * Structure used to store CFA Table Scope Instance Manager database info + */ +struct cfa_tim { + /* Signature of the CFA Table Scope Instance Manager Database */ + uint32_t signature; + /* Maximum number of Table Scope Ids */ + uint8_t max_tsid; + /* Maximum number of regions per Table Scope */ + uint8_t max_regions; + /* TPM instance table */ + void **tpm_tbl; +}; + +/** + * @addtogroup CFA_TIM CFA Table Scope Instance Manager + * \ingroup CFA_V3 + * The purpose of the CFA Table Scope Instance manager is to provide a + * centralized management of Table Scope Pool Manager instances. Each instance + * is identified by the Table Scope id and Region id. A caller can set and + * retrieve the instance handle using the Table Scope Id and Region Id. + * @{ + */ + +/** CFA Table Scope Instance Manager query DB size API + * + * This API returns the size of memory required for internal data structures to + * manage the table scope instances. + * + * @param[in] max_tbl_scopes + * Maximum number of table scope ids available to manage. + * + * @param[in] max_regions + * Maximum number of regions per table scope. + * + * @param[out] tim_db_size + * Pointer to 32 bit integer to return the amount of memory required. + * + * @return + * Returns 0 if successful, Error Code otherwise + */ +int cfa_tim_query(uint8_t max_tbl_scopes, uint8_t max_regions, + uint32_t *tim_db_size); + +/** CFA Table Scope Instance Manager open API + * + * This API initializes the CFA Table Scope Instance Manager database + * + * @param[in] tim + * Pointer to the memory used for the CFA Table Scope Instance Manager + * Database. + * + * @param[in] tim_db_size + * The size of memory block pointed to by tim parameter. + * + * @param[in] max_tbl_scopes + * Maximum number of table scope ids available to manage. + * + * @param[in] max_regions + * Maximum number of regions per table scope. + * + * @return + * Returns 0 if successful, Error Code otherwise + */ +int cfa_tim_open(void *tim, uint32_t tim_db_size, uint8_t max_tbl_scopes, + uint8_t max_regions); + +/** CFA Table Scope Instance Manager close API + * + * This API resets the CFA Table Scope Instance Manager database + * + * @param[in] tim + * Pointer to the database memory for the Table Scope Instance Manager. + * + * @return + * Returns 0 if successful, Error Code otherwise + */ +int cfa_tim_close(void *tim); + +/** CFA Table Scope Instance Manager set instance API + * + * This API sets the TPM instance handle into TIM. + * + * @param[in] tim + * Pointer to the database memory for the Table Scope Instance Manager. + * + * @param[in] tsid + * The Table scope id of the instance. + * + * @param[in] region_id + * The region id of the instance. + * + * @param[in] dir + * The direction of the instance. + * + * @param[in] tpm_inst + * The handle of TPM instance. + * + * @return + * Returns 0 if successful, Error Code otherwise + */ +int cfa_tim_tpm_inst_set(void *tim, uint8_t tsid, uint8_t region_id, + int dir, void *tpm_inst); + +/** CFA Table Scope Instance Manager get instance API + * + * This API gets the TPM instance handle from TIM. + * + * @param[in] tim + * Pointer to the database memory for the Table Scope Instance Manager. + * + * @param[in] tsid + * The Table scope id of the instance. + * + * @param[in] region_id + * The region id of the instance. + * + * @param[in] dir + * The direction of the instance. + * + * @param[out] tpm_inst + * Pointer to memory location to return the handle of TPM instance. + * + * @return + * Returns 0 if successful, Error Code otherwise + */ +int cfa_tim_tpm_inst_get(void *tim, uint8_t tsid, uint8_t region_id, + int dir, void **tpm_inst); + +/**@}*/ + +#endif /* _CFA_TIM_H_ */ diff --git a/drivers/thirdparty/release-drivers/bnxt/hcapi/cfa_v3/tpm/cfa_tpm.c b/drivers/thirdparty/release-drivers/bnxt/hcapi/cfa_v3/tpm/cfa_tpm.c new file mode 100644 index 000000000000..7e3fdca3a52f --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/hcapi/cfa_v3/tpm/cfa_tpm.c @@ -0,0 +1,256 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* Copyright(c) 2023 Broadcom + * All rights reserved. + */ + +#include +#include +#include +#include "bnxt_compat.h" +#include "cfa_util.h" +#include "cfa_tpm.h" +#include "bitalloc.h" + +static u32 cfa_tpm_size(u16 max_pools) +{ + return (sizeof(struct cfa_tpm) + BITALLOC_SIZEOF(max_pools) + + max_pools * sizeof(u16)); +} + +int cfa_tpm_query(u16 max_pools, u32 *tpm_db_size) +{ + if (!tpm_db_size) { + netdev_err(NULL, "tpm_db_size = %p\n", tpm_db_size); + return -EINVAL; + } + + if (!CFA_CHECK_BOUNDS(max_pools, CFA_TPM_MIN_POOLS, + CFA_TPM_MAX_POOLS)) { + netdev_err(NULL, "max_pools = %d\n", max_pools); + return -EINVAL; + } + + *tpm_db_size = cfa_tpm_size(max_pools); + + return 0; +} + +int cfa_tpm_open(void *tpm, u32 tpm_db_size, u16 max_pools) +{ + int i; + struct cfa_tpm *ctx = (struct cfa_tpm *)tpm; + + if (!tpm) { + netdev_err(NULL, "tpm = %p\n", tpm); + return -EINVAL; + } + + if (!(CFA_CHECK_BOUNDS(max_pools, CFA_TPM_MIN_POOLS, + CFA_TPM_MAX_POOLS) && + tpm_db_size >= cfa_tpm_size(max_pools))) { + netdev_err(NULL, "max_pools = %d tpm_db_size = %d\n", max_pools, tpm_db_size); + return -EINVAL; + } + + memset(tpm, 0, tpm_db_size); + + ctx->signature = CFA_TPM_SIGNATURE; + ctx->max_pools = max_pools; + ctx->pool_ba = (struct bitalloc *)(ctx + 1); + ctx->fid_tbl = (u16 *)((u8 *)ctx->pool_ba + + BITALLOC_SIZEOF(max_pools)); + + if (bnxt_ba_init(ctx->pool_ba, max_pools, true)) + return -EINVAL; + + for (i = 0; i < max_pools; i++) + ctx->fid_tbl[i] = CFA_INVALID_FID; + + return 0; +} + +int cfa_tpm_close(void *tpm) +{ + struct cfa_tpm *ctx = (struct cfa_tpm *)tpm; + + if (!tpm || ctx->signature != CFA_TPM_SIGNATURE) { + netdev_err(NULL, "tpm = %p\n", tpm); + return -EINVAL; + } + + memset(tpm, 0, cfa_tpm_size(ctx->max_pools)); + + return 0; +} + +int cfa_tpm_alloc(void *tpm, u16 *pool_id) +{ + int rc; + struct cfa_tpm *ctx = (struct cfa_tpm *)tpm; + + if (!tpm || !pool_id || + ctx->signature != CFA_TPM_SIGNATURE) { + netdev_err(NULL, "tpm = %p, pool_id = %p\n", tpm, pool_id); + return -EINVAL; + } + + rc = bnxt_ba_alloc(ctx->pool_ba); + + if (rc < 0) + return -ENOMEM; + + *pool_id = rc; + + ctx->fid_tbl[rc] = CFA_INVALID_FID; + + return 0; +} + +int cfa_tpm_free(void *tpm, u16 pool_id) +{ + struct cfa_tpm *ctx = (struct cfa_tpm *)tpm; + + if (!tpm || ctx->signature != CFA_TPM_SIGNATURE) { + netdev_err(NULL, "tpm = %p, pool_id = %d\n", tpm, pool_id); + return -EINVAL; + } + + if (ctx->fid_tbl[pool_id] != CFA_INVALID_FID) { + netdev_err(NULL, "A function (%d) is still using the pool (%d)\n", + ctx->fid_tbl[pool_id], pool_id); + return -EINVAL; + } + + return bnxt_ba_free(ctx->pool_ba, pool_id); +} + +int cfa_tpm_fid_add(void *tpm, u16 pool_id, u16 fid) +{ + struct cfa_tpm *ctx = (struct cfa_tpm *)tpm; + + if (!tpm || ctx->signature != CFA_TPM_SIGNATURE) { + netdev_err(NULL, "tpm = %p, pool_id = %d\n", tpm, pool_id); + return -EINVAL; + } + + if (!bnxt_ba_inuse(ctx->pool_ba, pool_id)) { + netdev_err(NULL, "Pool id (%d) was not allocated\n", pool_id); + return -EINVAL; + } + + if (ctx->fid_tbl[pool_id] != CFA_INVALID_FID && + ctx->fid_tbl[pool_id] != fid) { + netdev_err(NULL, "A function id %d was already set to the pool %d\n", + fid, ctx->fid_tbl[pool_id]); + return -EINVAL; + } + + ctx->fid_tbl[pool_id] = fid; + + return 0; +} + +int cfa_tpm_fid_rem(void *tpm, u16 pool_id, u16 fid) +{ + struct cfa_tpm *ctx = (struct cfa_tpm *)tpm; + + if (!tpm || ctx->signature != CFA_TPM_SIGNATURE) { + netdev_err(NULL, "tpm = %p, pool_id = %d\n", tpm, pool_id); + return -EINVAL; + } + + if (!bnxt_ba_inuse(ctx->pool_ba, pool_id)) { + netdev_err(NULL, "Pool id (%d) was not allocated\n", pool_id); + return -EINVAL; + } + + if (ctx->fid_tbl[pool_id] == CFA_INVALID_FID || + ctx->fid_tbl[pool_id] != fid) { + netdev_err(NULL, "The function id %d was not set to the pool %d\n", fid, pool_id); + return -EINVAL; + } + + ctx->fid_tbl[pool_id] = CFA_INVALID_FID; + + return 0; +} + +int cfa_tpm_srch_by_pool(void *tpm, u16 pool_id, u16 *fid) +{ + struct cfa_tpm *ctx = (struct cfa_tpm *)tpm; + + if (!tpm || ctx->signature != CFA_TPM_SIGNATURE || !fid || + pool_id >= ctx->max_pools) { + netdev_err(NULL, "tpm = %p, pool_id = %d, fid = %p\n", tpm, pool_id, fid); + return -EINVAL; + } + + if (!bnxt_ba_inuse(ctx->pool_ba, pool_id)) { + netdev_err(NULL, "Pool id (%d) was not allocated\n", pool_id); + return -EINVAL; + } + + if (ctx->fid_tbl[pool_id] == CFA_INVALID_FID) { + netdev_err(NULL, "A function id was not set to the pool (%d)\n", pool_id); + return -EINVAL; + } + + *fid = ctx->fid_tbl[pool_id]; + + return 0; +} + +int cfa_tpm_srchm_by_fid(void *tpm, enum cfa_srch_mode srch_mode, u16 fid, + u16 *pool_id) +{ + struct cfa_tpm *ctx = (struct cfa_tpm *)tpm; + u16 i; + + if (!tpm || ctx->signature != CFA_TPM_SIGNATURE || !pool_id) { + netdev_err(NULL, "tpm = %p, pool_id = %p fid = %d\n", tpm, pool_id, fid); + return -EINVAL; + } + + if (srch_mode == CFA_SRCH_MODE_FIRST) + ctx->next_index = 0; + + for (i = ctx->next_index; i < ctx->max_pools; i++) { + if (ctx->fid_tbl[i] == fid) { + ctx->next_index = i + 1; + *pool_id = i; + return 0; + } + } + + ctx->next_index = ctx->max_pools; + + return -ENOENT; +} + +int cfa_tpm_pool_size_set(void *tpm, u8 pool_sz_exp) +{ + struct cfa_tpm *ctx = (struct cfa_tpm *)tpm; + + if (!tpm || ctx->signature != CFA_TPM_SIGNATURE) { + netdev_err(NULL, "tpm = %p\n", tpm); + return -EINVAL; + } + + ctx->pool_sz_exp = pool_sz_exp; + + return 0; +} + +int cfa_tpm_pool_size_get(void *tpm, u8 *pool_sz_exp) +{ + struct cfa_tpm *ctx = (struct cfa_tpm *)tpm; + + if (!tpm || ctx->signature != CFA_TPM_SIGNATURE || !pool_sz_exp) { + netdev_err(NULL, "tpm = %p, pool_sz_exp = %p\n", tpm, pool_sz_exp); + return -EINVAL; + } + + *pool_sz_exp = ctx->pool_sz_exp; + + return 0; +} diff --git a/drivers/thirdparty/release-drivers/bnxt/hcapi/cfa_v3/tpm/include/cfa_tpm.h b/drivers/thirdparty/release-drivers/bnxt/hcapi/cfa_v3/tpm/include/cfa_tpm.h new file mode 100644 index 000000000000..6f52b82c3b12 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/hcapi/cfa_v3/tpm/include/cfa_tpm.h @@ -0,0 +1,234 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2023 Broadcom + * All rights reserved. + */ +#ifndef _CFA_TPM_H_ +#define _CFA_TPM_H_ + +#include "cfa_types.h" + +#define CFA_TPM_SIGNATURE 0xCFACF0CD + +#define CFA_TPM_MAX_POOLS 1040 +#define CFA_TPM_MIN_POOLS 1 + +#define CFA_INVALID_FID 0xffff + +/** + * CFA Table Scope Manager Pool Database + * + * Structure used to store CFA Table Scope Pool Manager database info + */ +struct cfa_tpm { + /* Signature of the CFA Table Scope Pool Manager Database */ + uint32_t signature; + /* Maximum number of pools */ + uint16_t max_pools; + /* Size of each pool, in powers of 2 */ + uint8_t pool_sz_exp; + /* Next index for search multiple by fid */ + uint16_t next_index; + /* Bitmap to keep track of pool usage */ + struct bitalloc *pool_ba; + /* Fid table */ + uint16_t *fid_tbl; +}; + +/** + * @addtogroup CFA_TPM CFA Table Scope Pool Manager + * \ingroup CFA_V3 + * The purpose of the CFA Table Scope pool manager is to provide a centralized + * management of Table Scope region pools. Each CFA TPM instance manages the + * pools belonging to one region. The Table Scope Pool Manager(TPM) keeps + * track of fids that are using the pools. + * @{ + */ + +/** CFA Table Scope Pool Manager query DB size API + * + * This API returns the size of memory required for internal data structures to + * manage the table scope pool ids, and user fids. + * + * @param[in] max_pools + * Maximum number of pool ids available to manage. + * + * @param[out] tpm_db_size + * Pointer to 32 bit integer to return the amount of memory required. + * + * @return + * Returns 0 if successful, Error Code otherwise + */ +int cfa_tpm_query(uint16_t max_pools, uint32_t *tpm_db_size); + +/** CFA Table Scope Pool Manager open API + * + * This API initializes the CFA Table Scope Pool Manager database + * + * @param[in] tpm + * Pointer to the memory used for the CFA Table Scope Pool Manager Database. + * + * @param[in] tpm_db_size + * The size of memory block pointed to by tpm parameter. + * + * @param[in] max_pools + * Maximum number of pool ids to manage. + * + * @return + * Returns 0 if successful, Error Code otherwise + */ +int cfa_tpm_open(void *tpm, uint32_t tpm_db_size, uint16_t max_pools); + +/** CFA Table Scope Pool Manager close API + * + * This API resets the CFA Table Scope Pool Manager database + * + * @param[in] tpm + * Pointer to the database memory for the Table Scope Pool Manager. + * + * @return + * Returns 0 if successful, Error Code otherwise + */ +int cfa_tpm_close(void *tpm); + +/** CFA Table Scope pool Manager alloc API + * + * This API allocates a pool Id. + * + * @param[in] tpm + * Pointer to the database memory for the Table Scope Pool Manager. + * + * @param[out] pool_id + * Pointer to memory location to return the allocated Pool Id. + * + * @return + * Returns 0 if successful, Error Code otherwise + */ +int cfa_tpm_alloc(void *tpm, uint16_t *pool_id); + +/** CFA Table Scope Pool Manager free API + * + * This API frees a previously allocated Pool Id. + * + * @param[in] tpm + * Pointer to the database memory for the Table Scope Pool Manager. + * + * @param[in] pool_id + * Pool Id to be freed. + * + * @return + * Returns 0 if successful, Error Code otherwise + */ +int cfa_tpm_free(void *tpm, uint16_t pool_id); + +/** CFA Table Scope Pool Manager add fid API + * + * This API adds an fid to a Pool Id. + * + * @param[in] tpm + * Pointer to the database memory for the Table Scope Pool Manager. + * + * @param[in] pool_id + * Pool Id to which the fid has to be added. + * + * @param[in] fid + * Function id to be added. + * + * @return + * Returns 0 if successful, Error Code otherwise + */ +int cfa_tpm_fid_add(void *tpm, uint16_t pool_id, uint16_t fid); + +/** CFA Table Scope Pool Manager remove fid API + * + * This API removes a previously added fid from a Pool Id. + * + * @param[in] tpm + * Pointer to the database memory for the Table Scope Pool Manager. + * + * @param[in] pool_id + * Pool Id from which the fid has to be removed. + * + * @param[in] fid + * Function id to be removed. + * + * @return + * Returns 0 if successful, Error Code otherwise + */ +int cfa_tpm_fid_rem(void *tpm, uint16_t pool_id, uint16_t fid); + +/** CFA Table Scope Pool Manager search by pool id API + * + * This API searches for the fid that is added to the pool id. + * + * @param[in] tpm + * Pointer to the database memory for the Table Scope Pool Manager. + * + * @param[in] pool_id + * Pool id to be searched for. + * + * @param[out] fid + * Pointer to memory location to return the fid that is added + * to the Pool id.. + * + * @return + * Returns 0 if successful, Error Code otherwise + */ +int cfa_tpm_srch_by_pool(void *tpm, uint16_t pool_id, uint16_t *fid); + +/** CFA Table Scope Pool Manager search by fid API + * + * This API searches for the Pool ids to which fid is added. + * + * @param[in] tpm + * Pointer to the database memory for the Table Scope Pool Manager. + * + * @param[in] srch_mode + * srch_mode indicates if the iteration is for the first match, which + * indicates the start of new iteration or for the next match. + * + * @param[in] fid + * Function id to be searched for. + * + * @param[out] pool_id + * Pointer to memory location to return the Pool Id to which fid is + * added. + * + * @return + * Returns 0 if successful, Error Code otherwise + */ +int cfa_tpm_srchm_by_fid(void *tpm, enum cfa_srch_mode srch_mode, uint16_t fid, + uint16_t *pool_id); + +/** CFA Table Scope Pool Manager set pool size API + * + * This API sets the pool size into TPM. + * + * @param[in] tpm + * Pointer to the database memory for the Table Scope Pool Manager. + * + * @param[in] pool_sz_exp + * The size of each pool in power of 2. + * + * @return + * Returns 0 if successful, Error Code otherwise + */ +int cfa_tpm_pool_size_set(void *tpm, uint8_t pool_sz_exp); + +/** CFA Table Scope Pool Manager get pool size API + * + * This API returns the pool size from TPM. + * + * @param[in] tpm + * Pointer to the database memory for the Table Scope Pool Manager. + * + * @param[out] pool_sz_exp + * Pointer to memory location to return the pool size in power of 2. + * + * @return + * Returns 0 if successful, Error Code otherwise + */ +int cfa_tpm_pool_size_get(void *tpm, uint8_t *pool_sz_exp); + +/**@}*/ + +#endif /* _CFA_TPM_H_ */ diff --git a/drivers/thirdparty/release-drivers/bnxt/tf_core/cfa_resource_types.h b/drivers/thirdparty/release-drivers/bnxt/tf_core/cfa_resource_types.h new file mode 100644 index 000000000000..ca52c7846dd9 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/tf_core/cfa_resource_types.h @@ -0,0 +1,265 @@ +/* SPDX-License-Identifier: BSD-3-Clause */ +/* + * Copyright(c) 2001-2021, Broadcom. All rights reserved. The + * term Broadcom refers to Broadcom Inc. and/or its subsidiaries. + * Proprietary and Confidential Information. + * + * This source file is the property of Broadcom Corporation, and + * may not be copied or distributed in any isomorphic form without + * the prior written consent of Broadcom Corporation. + * + * DO NOT MODIFY!!! This file is automatically generated. + */ + +#ifndef _CFA_RESOURCE_TYPES_H_ +#define _CFA_RESOURCE_TYPES_H_ +/* This is the constant used to define invalid CFA + * resource types across all devices. + */ +#define CFA_RESOURCE_TYPE_INVALID 65535 + +/* L2 Context TCAM High priority entries */ +#define CFA_RESOURCE_TYPE_P59_L2_CTXT_TCAM_HIGH 0x0UL +/* L2 Context TCAM Low priority entries */ +#define CFA_RESOURCE_TYPE_P59_L2_CTXT_TCAM_LOW 0x1UL +/* L2 Context REMAP high priority entries */ +#define CFA_RESOURCE_TYPE_P59_L2_CTXT_REMAP_HIGH 0x2UL +/* L2 Context REMAP Low priority entries */ +#define CFA_RESOURCE_TYPE_P59_L2_CTXT_REMAP_LOW 0x3UL +/* Profile Func */ +#define CFA_RESOURCE_TYPE_P59_PROF_FUNC 0x4UL +/* Profile TCAM */ +#define CFA_RESOURCE_TYPE_P59_PROF_TCAM 0x5UL +/* Exact Match Profile Id */ +#define CFA_RESOURCE_TYPE_P59_EM_PROF_ID 0x6UL +/* Wildcard TCAM Profile Id */ +#define CFA_RESOURCE_TYPE_P59_WC_TCAM_PROF_ID 0x7UL +/* Wildcard TCAM */ +#define CFA_RESOURCE_TYPE_P59_WC_TCAM 0x8UL +/* Meter Profile */ +#define CFA_RESOURCE_TYPE_P59_METER_PROF 0x9UL +/* Meter */ +#define CFA_RESOURCE_TYPE_P59_METER 0xaUL +/* Meter */ +#define CFA_RESOURCE_TYPE_P59_MIRROR 0xbUL +/* Source Properties TCAM */ +#define CFA_RESOURCE_TYPE_P59_SP_TCAM 0xcUL +/* Exact Match Flexible Key Builder */ +#define CFA_RESOURCE_TYPE_P59_EM_FKB 0xdUL +/* Wildcard Flexible Key Builder */ +#define CFA_RESOURCE_TYPE_P59_WC_FKB 0xeUL +/* Table Scope */ +#define CFA_RESOURCE_TYPE_P59_TBL_SCOPE 0xfUL +/* L2 Func */ +#define CFA_RESOURCE_TYPE_P59_L2_FUNC 0x10UL +/* EPOCH 0 */ +#define CFA_RESOURCE_TYPE_P59_EPOCH0 0x11UL +/* EPOCH 1 */ +#define CFA_RESOURCE_TYPE_P59_EPOCH1 0x12UL +/* Metadata */ +#define CFA_RESOURCE_TYPE_P59_METADATA 0x13UL +/* Connection Tracking Rule TCAM */ +#define CFA_RESOURCE_TYPE_P59_CT_RULE_TCAM 0x14UL +/* Range Profile */ +#define CFA_RESOURCE_TYPE_P59_RANGE_PROF 0x15UL +/* Range */ +#define CFA_RESOURCE_TYPE_P59_RANGE 0x16UL +/* Link Aggregation */ +#define CFA_RESOURCE_TYPE_P59_LAG 0x17UL +/* VEB TCAM */ +#define CFA_RESOURCE_TYPE_P59_VEB_TCAM 0x18UL +#define CFA_RESOURCE_TYPE_P59_LAST CFA_RESOURCE_TYPE_P59_VEB_TCAM + +/* Meter */ +#define CFA_RESOURCE_TYPE_P58_METER 0x0UL +/* SRAM_Bank_0 */ +#define CFA_RESOURCE_TYPE_P58_SRAM_BANK_0 0x1UL +/* SRAM_Bank_1 */ +#define CFA_RESOURCE_TYPE_P58_SRAM_BANK_1 0x2UL +/* SRAM_Bank_2 */ +#define CFA_RESOURCE_TYPE_P58_SRAM_BANK_2 0x3UL +/* SRAM_Bank_3 */ +#define CFA_RESOURCE_TYPE_P58_SRAM_BANK_3 0x4UL +/* L2 Context TCAM High priority entries */ +#define CFA_RESOURCE_TYPE_P58_L2_CTXT_TCAM_HIGH 0x5UL +/* L2 Context TCAM Low priority entries */ +#define CFA_RESOURCE_TYPE_P58_L2_CTXT_TCAM_LOW 0x6UL +/* L2 Context REMAP high priority entries */ +#define CFA_RESOURCE_TYPE_P58_L2_CTXT_REMAP_HIGH 0x7UL +/* L2 Context REMAP Low priority entries */ +#define CFA_RESOURCE_TYPE_P58_L2_CTXT_REMAP_LOW 0x8UL +/* Profile Func */ +#define CFA_RESOURCE_TYPE_P58_PROF_FUNC 0x9UL +/* Profile TCAM */ +#define CFA_RESOURCE_TYPE_P58_PROF_TCAM 0xaUL +/* Exact Match Profile Id */ +#define CFA_RESOURCE_TYPE_P58_EM_PROF_ID 0xbUL +/* Wildcard Profile Id */ +#define CFA_RESOURCE_TYPE_P58_WC_TCAM_PROF_ID 0xcUL +/* Exact Match Record */ +#define CFA_RESOURCE_TYPE_P58_EM_REC 0xdUL +/* Wildcard TCAM */ +#define CFA_RESOURCE_TYPE_P58_WC_TCAM 0xeUL +/* Meter profile */ +#define CFA_RESOURCE_TYPE_P58_METER_PROF 0xfUL +/* Meter */ +#define CFA_RESOURCE_TYPE_P58_MIRROR 0x10UL +/* Exact Match Flexible Key Builder */ +#define CFA_RESOURCE_TYPE_P58_EM_FKB 0x11UL +/* Wildcard Flexible Key Builder */ +#define CFA_RESOURCE_TYPE_P58_WC_FKB 0x12UL +/* VEB TCAM */ +#define CFA_RESOURCE_TYPE_P58_VEB_TCAM 0x13UL +/* Metadata */ +#define CFA_RESOURCE_TYPE_P58_METADATA 0x14UL +/* Meter drop counter */ +#define CFA_RESOURCE_TYPE_P58_METER_DROP_CNT 0x15UL +#define CFA_RESOURCE_TYPE_P58_LAST CFA_RESOURCE_TYPE_P58_METER_DROP_CNT + +/* Multicast Group */ +#define CFA_RESOURCE_TYPE_P45_MCG 0x0UL +/* Encap 8 byte record */ +#define CFA_RESOURCE_TYPE_P45_ENCAP_8B 0x1UL +/* Encap 16 byte record */ +#define CFA_RESOURCE_TYPE_P45_ENCAP_16B 0x2UL +/* Encap 64 byte record */ +#define CFA_RESOURCE_TYPE_P45_ENCAP_64B 0x3UL +/* Source Property MAC */ +#define CFA_RESOURCE_TYPE_P45_SP_MAC 0x4UL +/* Source Property MAC and IPv4 */ +#define CFA_RESOURCE_TYPE_P45_SP_MAC_IPV4 0x5UL +/* Source Property MAC and IPv6 */ +#define CFA_RESOURCE_TYPE_P45_SP_MAC_IPV6 0x6UL +/* 64B Counters */ +#define CFA_RESOURCE_TYPE_P45_COUNTER_64B 0x7UL +/* Network Address Translation Port */ +#define CFA_RESOURCE_TYPE_P45_NAT_PORT 0x8UL +/* Network Address Translation IPv4 address */ +#define CFA_RESOURCE_TYPE_P45_NAT_IPV4 0x9UL +/* Meter */ +#define CFA_RESOURCE_TYPE_P45_METER 0xaUL +/* Flow State */ +#define CFA_RESOURCE_TYPE_P45_FLOW_STATE 0xbUL +/* Full Action Records */ +#define CFA_RESOURCE_TYPE_P45_FULL_ACTION 0xcUL +/* Action Record Format 0 */ +#define CFA_RESOURCE_TYPE_P45_FORMAT_0_ACTION 0xdUL +/* Action Record Ext Format 0 */ +#define CFA_RESOURCE_TYPE_P45_EXT_FORMAT_0_ACTION 0xeUL +/* Action Record Format 1 */ +#define CFA_RESOURCE_TYPE_P45_FORMAT_1_ACTION 0xfUL +/* Action Record Format 2 */ +#define CFA_RESOURCE_TYPE_P45_FORMAT_2_ACTION 0x10UL +/* Action Record Format 3 */ +#define CFA_RESOURCE_TYPE_P45_FORMAT_3_ACTION 0x11UL +/* Action Record Format 4 */ +#define CFA_RESOURCE_TYPE_P45_FORMAT_4_ACTION 0x12UL +/* Action Record Format 5 */ +#define CFA_RESOURCE_TYPE_P45_FORMAT_5_ACTION 0x13UL +/* Action Record Format 6 */ +#define CFA_RESOURCE_TYPE_P45_FORMAT_6_ACTION 0x14UL +/* L2 Context TCAM High priority entries */ +#define CFA_RESOURCE_TYPE_P45_L2_CTXT_TCAM_HIGH 0x15UL +/* L2 Context TCAM Low priority entries */ +#define CFA_RESOURCE_TYPE_P45_L2_CTXT_TCAM_LOW 0x16UL +/* L2 Context REMAP high priority entries */ +#define CFA_RESOURCE_TYPE_P45_L2_CTXT_REMAP_HIGH 0x17UL +/* L2 Context REMAP Low priority entries */ +#define CFA_RESOURCE_TYPE_P45_L2_CTXT_REMAP_LOW 0x18UL +/* Profile Func */ +#define CFA_RESOURCE_TYPE_P45_PROF_FUNC 0x19UL +/* Profile TCAM */ +#define CFA_RESOURCE_TYPE_P45_PROF_TCAM 0x1aUL +/* Exact Match Profile Id */ +#define CFA_RESOURCE_TYPE_P45_EM_PROF_ID 0x1bUL +/* Exact Match Record */ +#define CFA_RESOURCE_TYPE_P45_EM_REC 0x1cUL +/* Wildcard Profile Id */ +#define CFA_RESOURCE_TYPE_P45_WC_TCAM_PROF_ID 0x1dUL +/* Wildcard TCAM */ +#define CFA_RESOURCE_TYPE_P45_WC_TCAM 0x1eUL +/* Meter profile */ +#define CFA_RESOURCE_TYPE_P45_METER_PROF 0x1fUL +/* Meter */ +#define CFA_RESOURCE_TYPE_P45_MIRROR 0x20UL +/* Source Property TCAM */ +#define CFA_RESOURCE_TYPE_P45_SP_TCAM 0x21UL +/* VEB TCAM */ +#define CFA_RESOURCE_TYPE_P45_VEB_TCAM 0x22UL +/* Table Scope */ +#define CFA_RESOURCE_TYPE_P45_TBL_SCOPE 0x23UL +#define CFA_RESOURCE_TYPE_P45_LAST CFA_RESOURCE_TYPE_P45_TBL_SCOPE + +/* Multicast Group */ +#define CFA_RESOURCE_TYPE_P4_MCG 0x0UL +/* Encap 8 byte record */ +#define CFA_RESOURCE_TYPE_P4_ENCAP_8B 0x1UL +/* Encap 16 byte record */ +#define CFA_RESOURCE_TYPE_P4_ENCAP_16B 0x2UL +/* Encap 64 byte record */ +#define CFA_RESOURCE_TYPE_P4_ENCAP_64B 0x3UL +/* Source Property MAC */ +#define CFA_RESOURCE_TYPE_P4_SP_MAC 0x4UL +/* Source Property MAC and IPv4 */ +#define CFA_RESOURCE_TYPE_P4_SP_MAC_IPV4 0x5UL +/* Source Property MAC and IPv6 */ +#define CFA_RESOURCE_TYPE_P4_SP_MAC_IPV6 0x6UL +/* 64B Counters */ +#define CFA_RESOURCE_TYPE_P4_COUNTER_64B 0x7UL +/* Network Address Translation Port */ +#define CFA_RESOURCE_TYPE_P4_NAT_PORT 0x8UL +/* Network Address Translation IPv4 address */ +#define CFA_RESOURCE_TYPE_P4_NAT_IPV4 0x9UL +/* Meter */ +#define CFA_RESOURCE_TYPE_P4_METER 0xaUL +/* Flow State */ +#define CFA_RESOURCE_TYPE_P4_FLOW_STATE 0xbUL +/* Full Action Records */ +#define CFA_RESOURCE_TYPE_P4_FULL_ACTION 0xcUL +/* Action Record Format 0 */ +#define CFA_RESOURCE_TYPE_P4_FORMAT_0_ACTION 0xdUL +/* Action Record Ext Format 0 */ +#define CFA_RESOURCE_TYPE_P4_EXT_FORMAT_0_ACTION 0xeUL +/* Action Record Format 1 */ +#define CFA_RESOURCE_TYPE_P4_FORMAT_1_ACTION 0xfUL +/* Action Record Format 2 */ +#define CFA_RESOURCE_TYPE_P4_FORMAT_2_ACTION 0x10UL +/* Action Record Format 3 */ +#define CFA_RESOURCE_TYPE_P4_FORMAT_3_ACTION 0x11UL +/* Action Record Format 4 */ +#define CFA_RESOURCE_TYPE_P4_FORMAT_4_ACTION 0x12UL +/* Action Record Format 5 */ +#define CFA_RESOURCE_TYPE_P4_FORMAT_5_ACTION 0x13UL +/* Action Record Format 6 */ +#define CFA_RESOURCE_TYPE_P4_FORMAT_6_ACTION 0x14UL +/* L2 Context TCAM High priority entries */ +#define CFA_RESOURCE_TYPE_P4_L2_CTXT_TCAM_HIGH 0x15UL +/* L2 Context TCAM Low priority entries */ +#define CFA_RESOURCE_TYPE_P4_L2_CTXT_TCAM_LOW 0x16UL +/* L2 Context REMAP high priority entries */ +#define CFA_RESOURCE_TYPE_P4_L2_CTXT_REMAP_HIGH 0x17UL +/* L2 Context REMAP Low priority entries */ +#define CFA_RESOURCE_TYPE_P4_L2_CTXT_REMAP_LOW 0x18UL +/* Profile Func */ +#define CFA_RESOURCE_TYPE_P4_PROF_FUNC 0x19UL +/* Profile TCAM */ +#define CFA_RESOURCE_TYPE_P4_PROF_TCAM 0x1aUL +/* Exact Match Profile Id */ +#define CFA_RESOURCE_TYPE_P4_EM_PROF_ID 0x1bUL +/* Exact Match Record */ +#define CFA_RESOURCE_TYPE_P4_EM_REC 0x1cUL +/* Wildcard Profile Id */ +#define CFA_RESOURCE_TYPE_P4_WC_TCAM_PROF_ID 0x1dUL +/* Wildcard TCAM */ +#define CFA_RESOURCE_TYPE_P4_WC_TCAM 0x1eUL +/* Meter profile */ +#define CFA_RESOURCE_TYPE_P4_METER_PROF 0x1fUL +/* Meter */ +#define CFA_RESOURCE_TYPE_P4_MIRROR 0x20UL +/* Source Property TCAM */ +#define CFA_RESOURCE_TYPE_P4_SP_TCAM 0x21UL +/* Table Scope */ +#define CFA_RESOURCE_TYPE_P4_TBL_SCOPE 0x22UL +#define CFA_RESOURCE_TYPE_P4_LAST CFA_RESOURCE_TYPE_P4_TBL_SCOPE + +#endif /* _CFA_RESOURCE_TYPES_H_ */ diff --git a/drivers/thirdparty/release-drivers/bnxt/tf_core/cfa_tcam_mgr.c b/drivers/thirdparty/release-drivers/bnxt/tf_core/cfa_tcam_mgr.c new file mode 100644 index 000000000000..9babcd04a589 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/tf_core/cfa_tcam_mgr.c @@ -0,0 +1,1855 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* Copyright(c) 2021-2022 Broadcom + * All rights reserved. + */ + +#include +#include +#include +#include + +#include "hcapi_cfa_defs.h" +#include "bnxt_hsi.h" +#include "bnxt_compat.h" +#include "bnxt.h" +#include "tf_core.h" +#include "tf_session.h" +#include "tf_util.h" + +#include "cfa_tcam_mgr.h" +#include "cfa_tcam_mgr_device.h" +#include "cfa_tcam_mgr_hwop_msg.h" + +/* Thor */ +#include "cfa_tcam_mgr_p58.h" +/* Wh+, SR */ +#include "cfa_tcam_mgr_p4.h" + +#define TF_TCAM_SLICE_INVALID (-1) + +/* The following macros are for setting the entry status in a row entry. + * row is (struct cfa_tcam_mgr_table_rows_0 *) + */ +#define ROW_ENTRY_INUSE(row, entry) ((row)->entry_inuse & (1U << (entry))) +#define ROW_ENTRY_SET(row, entry) ((row)->entry_inuse |= (1U << (entry))) +#define ROW_ENTRY_CLEAR(row, entry) ((row)->entry_inuse &= ~(1U << (entry))) +#define ROW_INUSE(row) ((row)->entry_inuse != 0) + +static int physical_table_types[CFA_TCAM_MGR_TBL_TYPE_MAX] = { + [CFA_TCAM_MGR_TBL_TYPE_L2_CTXT_TCAM_HIGH_APPS] = + TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_HIGH, + [CFA_TCAM_MGR_TBL_TYPE_L2_CTXT_TCAM_LOW_APPS] = + TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_LOW, + [CFA_TCAM_MGR_TBL_TYPE_PROF_TCAM_APPS] = + TF_TCAM_TBL_TYPE_PROF_TCAM, + [CFA_TCAM_MGR_TBL_TYPE_WC_TCAM_APPS] = + TF_TCAM_TBL_TYPE_WC_TCAM, + [CFA_TCAM_MGR_TBL_TYPE_SP_TCAM_APPS] = + TF_TCAM_TBL_TYPE_SP_TCAM, + [CFA_TCAM_MGR_TBL_TYPE_CT_RULE_TCAM_APPS] = + TF_TCAM_TBL_TYPE_CT_RULE_TCAM, + [CFA_TCAM_MGR_TBL_TYPE_VEB_TCAM_APPS] = + TF_TCAM_TBL_TYPE_VEB_TCAM, +}; + +int cfa_tcam_mgr_get_phys_table_type(enum cfa_tcam_mgr_tbl_type type) +{ + WARN_ON(type >= CFA_TCAM_MGR_TBL_TYPE_MAX); + + return physical_table_types[type]; +} + +const char * +cfa_tcam_mgr_tbl_2_str(enum cfa_tcam_mgr_tbl_type tcam_type) +{ + switch (tcam_type) { + case CFA_TCAM_MGR_TBL_TYPE_L2_CTXT_TCAM_HIGH_AFM: + return "l2_ctxt_tcam_high AFM"; + case CFA_TCAM_MGR_TBL_TYPE_L2_CTXT_TCAM_HIGH_APPS: + return "l2_ctxt_tcam_high Apps"; + case CFA_TCAM_MGR_TBL_TYPE_L2_CTXT_TCAM_LOW_AFM: + return "l2_ctxt_tcam_low AFM"; + case CFA_TCAM_MGR_TBL_TYPE_L2_CTXT_TCAM_LOW_APPS: + return "l2_ctxt_tcam_low Apps"; + case CFA_TCAM_MGR_TBL_TYPE_PROF_TCAM_AFM: + return "prof_tcam AFM"; + case CFA_TCAM_MGR_TBL_TYPE_PROF_TCAM_APPS: + return "prof_tcam Apps"; + case CFA_TCAM_MGR_TBL_TYPE_WC_TCAM_AFM: + return "wc_tcam AFM"; + case CFA_TCAM_MGR_TBL_TYPE_WC_TCAM_APPS: + return "wc_tcam Apps"; + case CFA_TCAM_MGR_TBL_TYPE_VEB_TCAM_AFM: + return "veb_tcam AFM"; + case CFA_TCAM_MGR_TBL_TYPE_VEB_TCAM_APPS: + return "veb_tcam Apps"; + case CFA_TCAM_MGR_TBL_TYPE_SP_TCAM_AFM: + return "sp_tcam AFM"; + case CFA_TCAM_MGR_TBL_TYPE_SP_TCAM_APPS: + return "sp_tcam Apps"; + case CFA_TCAM_MGR_TBL_TYPE_CT_RULE_TCAM_AFM: + return "ct_rule_tcam AFM"; + case CFA_TCAM_MGR_TBL_TYPE_CT_RULE_TCAM_APPS: + return "ct_rule_tcam Apps"; + default: + return "Invalid tcam table type"; + } +} + +/* key_size and slice_width are in bytes */ +static int cfa_tcam_mgr_get_num_slices(unsigned int key_size, unsigned int slice_width) +{ + int num_slices = 0; + + if (!key_size) + return -EINVAL; + + num_slices = ((key_size - 1U) / slice_width) + 1U; + /* Round up to next highest power of 2 */ + /* This is necessary since, for example, 3 slices is not a valid entry + * width. + */ + num_slices--; + /* Repeat to maximum number of bits actually used */ + /* This fills in all the bits. */ + num_slices |= num_slices >> 1; + num_slices |= num_slices >> 2; + num_slices |= num_slices >> 4; + /* + * If the maximum number of slices that are supported by the HW + * increases, then additional shifts are needed. + */ + num_slices++; + return num_slices; +} + +static struct cfa_tcam_mgr_entry_data *cfa_tcam_mgr_entry_get(struct + cfa_tcam_mgr_data + * tcam_mgr_data, + u16 id) +{ + if (id > tcam_mgr_data->cfa_tcam_mgr_max_entries) + return NULL; + + return &tcam_mgr_data->entry_data[id]; +} + +/* Insert an entry into the entry table */ +static int cfa_tcam_mgr_entry_insert(struct cfa_tcam_mgr_data *tcam_mgr_data, + struct tf *tfp, u16 id, + struct cfa_tcam_mgr_entry_data *entry) +{ + if (id > tcam_mgr_data->cfa_tcam_mgr_max_entries) + return -EINVAL; + + memcpy(&tcam_mgr_data->entry_data[id], entry, + sizeof(tcam_mgr_data->entry_data[id])); + + netdev_dbg(tfp->bp->dev, "Added entry %d to table\n", id); + + return 0; +} + +/* Delete an entry from the entry table */ +static int cfa_tcam_mgr_entry_delete(struct cfa_tcam_mgr_data *tcam_mgr_data, + struct tf *tfp, u16 id) +{ + if (id > tcam_mgr_data->cfa_tcam_mgr_max_entries) + return -EINVAL; + + memset(&tcam_mgr_data->entry_data[id], 0, + sizeof(tcam_mgr_data->entry_data[id])); + + netdev_dbg(tfp->bp->dev, + "Deleted entry %d from table\n", id); + + return 0; +} + +/* Returns the size of the row structure taking into account how many slices a + * TCAM supports. + */ +static int cfa_tcam_mgr_row_size_get(struct cfa_tcam_mgr_data *tcam_mgr_data, + enum tf_dir dir, + enum cfa_tcam_mgr_tbl_type type) +{ + return sizeof(struct cfa_tcam_mgr_table_rows_0) + + (tcam_mgr_data->cfa_tcam_mgr_tables[dir][type].max_slices * + sizeof(((struct cfa_tcam_mgr_table_rows_0 *)0)->entries[0])); +} + +static void *cfa_tcam_mgr_row_ptr_get(void *base, int index, int row_size) +{ + return (u8 *)base + (index * row_size); +} + +/* Searches a table to find the direction and type of an entry. */ +static int cfa_tcam_mgr_entry_find_in_table(struct cfa_tcam_mgr_data + *tcam_mgr_data, + int id, enum tf_dir dir, + enum cfa_tcam_mgr_tbl_type type) +{ + struct cfa_tcam_mgr_table_data *table_data; + int max_slices, row_idx, row_size, slice; + struct cfa_tcam_mgr_table_rows_0 *row; + + table_data = &tcam_mgr_data->cfa_tcam_mgr_tables[dir][type]; + if (table_data->max_entries > 0 && + table_data->hcapi_type > 0) { + max_slices = table_data->max_slices; + row_size = cfa_tcam_mgr_row_size_get(tcam_mgr_data, dir, type); + for (row_idx = table_data->start_row; + row_idx <= table_data->end_row; + row_idx++) { + row = cfa_tcam_mgr_row_ptr_get(table_data->tcam_rows, + row_idx, row_size); + if (!ROW_INUSE(row)) + continue; + for (slice = 0; + slice < (max_slices / row->entry_size); + slice++) { + if (!ROW_ENTRY_INUSE(row, slice)) + continue; + if (row->entries[slice] == id) + return 0; + } + } + } + + return -ENOENT; +} + +/* Searches all the tables to find the direction and type of an entry. */ +static int cfa_tcam_mgr_entry_find(struct cfa_tcam_mgr_data *tcam_mgr_data, + int id, enum tf_dir *tbl_dir, + enum cfa_tcam_mgr_tbl_type *tbl_type) +{ + enum cfa_tcam_mgr_tbl_type type; + int rc = -ENOENT; + enum tf_dir dir; + + for (dir = TF_DIR_RX; dir < + ARRAY_SIZE(tcam_mgr_data->cfa_tcam_mgr_tables); + dir++) { + for (type = CFA_TCAM_MGR_TBL_TYPE_START; + type < + ARRAY_SIZE(tcam_mgr_data->cfa_tcam_mgr_tables[dir]); + type++) { + rc = cfa_tcam_mgr_entry_find_in_table(tcam_mgr_data, + id, dir, type); + if (!rc) { + *tbl_dir = dir; + *tbl_type = type; + return rc; + } + } + } + + return rc; +} + +static int cfa_tcam_mgr_row_is_entry_free(struct cfa_tcam_mgr_table_rows_0 *row, + int max_slices, int key_slices) +{ + int j; + + if (ROW_INUSE(row) && + row->entry_size == key_slices) { + for (j = 0; j < (max_slices / row->entry_size); j++) { + if (!ROW_ENTRY_INUSE(row, j)) + return j; + } + } + return -EINVAL; +} + +static int cfa_tcam_mgr_entry_move(struct cfa_tcam_mgr_data *tcam_mgr_data, + struct tf *tfp, enum tf_dir dir, + enum cfa_tcam_mgr_tbl_type type, + int entry_id, + struct cfa_tcam_mgr_table_data *table_data, + int dest_row_index, int dest_row_slice, + struct cfa_tcam_mgr_table_rows_0 *dest_row, + int source_row_index, + struct cfa_tcam_mgr_table_rows_0 *source_row, + bool free_source_entry) +{ + struct cfa_tcam_mgr_get_parms gparms = { 0 }; + struct cfa_tcam_mgr_set_parms sparms = { 0 }; + struct cfa_tcam_mgr_free_parms fparms = { 0 }; + struct cfa_tcam_mgr_entry_data *entry; + u8 result[CFA_TCAM_MGR_MAX_KEY_SIZE]; + u8 mask[CFA_TCAM_MGR_MAX_KEY_SIZE]; + u8 key[CFA_TCAM_MGR_MAX_KEY_SIZE]; + int j, rc; + + entry = cfa_tcam_mgr_entry_get(tcam_mgr_data, entry_id); + if (!entry) + return -EINVAL; + + gparms.dir = dir; + gparms.type = type; + gparms.hcapi_type = table_data->hcapi_type; + gparms.key = key; + gparms.mask = mask; + gparms.result = result; + gparms.id = source_row->entries[entry->slice]; + gparms.key_size = sizeof(key); + gparms.result_size = sizeof(result); + + rc = cfa_tcam_mgr_entry_get_msg(tcam_mgr_data, tfp, &gparms, + source_row_index, + entry->slice * source_row->entry_size, + table_data->max_slices); + if (rc) + return rc; + + sparms.dir = dir; + sparms.type = type; + sparms.hcapi_type = table_data->hcapi_type; + sparms.key = key; + sparms.mask = mask; + sparms.result = result; + sparms.id = gparms.id; + sparms.key_size = gparms.key_size; + sparms.result_size = gparms.result_size; + + /* Slice in destination row not specified. Find first free slice. */ + if (dest_row_slice < 0) + for (j = 0; + j < (table_data->max_slices / dest_row->entry_size); + j++) { + if (!ROW_ENTRY_INUSE(dest_row, j)) { + dest_row_slice = j; + break; + } + } + + /* If no free slice found, return error. */ + if (dest_row_slice < 0) + return -EPERM; + + rc = cfa_tcam_mgr_entry_set_msg(tcam_mgr_data, tfp, &sparms, + dest_row_index, + dest_row_slice * dest_row->entry_size, + table_data->max_slices); + if (rc) + return rc; + + if (free_source_entry) { + fparms.dir = dir; + fparms.type = type; + fparms.hcapi_type = table_data->hcapi_type; + rc = cfa_tcam_mgr_entry_free_msg(tcam_mgr_data, + tfp, &fparms, + source_row_index, + entry->slice * + dest_row->entry_size, + table_data->row_width / + table_data->max_slices * + source_row->entry_size, + table_data->result_size, + table_data->max_slices); + if (rc) { + netdev_dbg(tfp->bp->dev, + "%s: %s Failed to free ID:%d row:%d slice:%d rc:%d\n", + tf_dir_2_str(dir), + cfa_tcam_mgr_tbl_2_str(type), + gparms.id, source_row_index, entry->slice, + -rc); + } + } + + netdev_dbg(tfp->bp->dev, + "Moved entry:%d from row:%d slice:%d to row:%d slice:%d\n", + entry_id, source_row_index, entry->slice, dest_row_index, + dest_row_slice); + + ROW_ENTRY_SET(dest_row, dest_row_slice); + dest_row->entries[dest_row_slice] = entry_id; + ROW_ENTRY_CLEAR(source_row, entry->slice); + entry->row = dest_row_index; + entry->slice = dest_row_slice; + + cfa_tcam_mgr_rows_dump(tfp, dir, type); + + return 0; +} + +static int cfa_tcam_mgr_row_move(struct cfa_tcam_mgr_data *tcam_mgr_data, + struct tf *tfp, enum tf_dir dir, + enum cfa_tcam_mgr_tbl_type type, + struct cfa_tcam_mgr_table_data *table_data, + int dest_row_index, + struct cfa_tcam_mgr_table_rows_0 *dest_row, + int source_row_index, + struct cfa_tcam_mgr_table_rows_0 *source_row) +{ + struct cfa_tcam_mgr_free_parms fparms = { 0 }; + int j, rc; + + dest_row->priority = source_row->priority; + dest_row->entry_size = source_row->entry_size; + dest_row->entry_inuse = 0; + + fparms.dir = dir; + fparms.type = type; + fparms.hcapi_type = table_data->hcapi_type; + + for (j = 0; + j < (table_data->max_slices / source_row->entry_size); + j++) { + if (ROW_ENTRY_INUSE(source_row, j)) { + cfa_tcam_mgr_entry_move(tcam_mgr_data, tfp, + dir, type, + source_row->entries[j], + table_data, + dest_row_index, j, dest_row, + source_row_index, source_row, + true); + } else { + /* Slice not in use, write an empty slice. */ + rc = cfa_tcam_mgr_entry_free_msg(tcam_mgr_data, + tfp, &fparms, + dest_row_index, + j * + dest_row->entry_size, + table_data->row_width / + table_data->max_slices * + dest_row->entry_size, + table_data->result_size, + table_data->max_slices); + if (rc) + return rc; + } + } + + return 0; +} + +/* Install entry into in-memory tables, not into TCAM (yet). */ +static void cfa_tcam_mgr_row_entry_install(struct tf *tfp, + struct cfa_tcam_mgr_table_rows_0 + *row, + struct cfa_tcam_mgr_alloc_parms + *parms, + struct cfa_tcam_mgr_entry_data + *entry, + u16 id, int key_slices, + int row_index, int slice) +{ + if (slice == TF_TCAM_SLICE_INVALID) { + slice = 0; + row->entry_size = key_slices; + row->priority = parms->priority; + } + + ROW_ENTRY_SET(row, slice); + row->entries[slice] = id; + entry->row = row_index; + entry->slice = slice; + + netdev_dbg(tfp->bp->dev, + "Entry %d installed row:%d slice:%d prio:%d\n", + id, row_index, slice, row->priority); + cfa_tcam_mgr_rows_dump(tfp, parms->dir, parms->type); +} + +/* Finds an empty row that can be used and reserve for entry. If necessary, + * entries will be shuffled in order to make room. + */ +static struct cfa_tcam_mgr_table_rows_0 * +cfa_tcam_mgr_empty_row_alloc(struct cfa_tcam_mgr_data *tcam_mgr_data, + struct tf *tfp, + struct cfa_tcam_mgr_alloc_parms *parms, + struct cfa_tcam_mgr_entry_data *entry, + u16 id, int key_slices) +{ + int to_row_idx, from_row_idx, slice, start_row, end_row; + struct cfa_tcam_mgr_table_rows_0 *tcam_rows; + struct cfa_tcam_mgr_table_data *table_data; + struct cfa_tcam_mgr_table_rows_0 *from_row; + struct cfa_tcam_mgr_table_rows_0 *to_row; + struct cfa_tcam_mgr_table_rows_0 *row; + int i, max_slices, row_size; + int target_row = -1; + int empty_row = -1; + + table_data = + &tcam_mgr_data->cfa_tcam_mgr_tables[parms->dir][parms->type]; + + start_row = table_data->start_row; + end_row = table_data->end_row; + max_slices = table_data->max_slices; + tcam_rows = table_data->tcam_rows; + + row_size = cfa_tcam_mgr_row_size_get(tcam_mgr_data, parms->dir, + parms->type); + /* Note: The rows are ordered from highest priority to lowest priority. + * That is, the first row in the table will have the highest priority + * and the last row in the table will have the lowest priority. + */ + + netdev_dbg(tfp->bp->dev, + "Trying to alloc space for entry with priority %d and width %d slices.\n", + parms->priority, key_slices); + + /* First check for partially used entries, but only if the key needs + * fewer slices than there are in a row. + */ + if (key_slices < max_slices) { + for (i = start_row; i <= end_row; i++) { + row = cfa_tcam_mgr_row_ptr_get(tcam_rows, i, row_size); + if (!ROW_INUSE(row)) + continue; + if (row->priority < parms->priority) + break; + if (row->priority > parms->priority) + continue; + slice = cfa_tcam_mgr_row_is_entry_free(row, + max_slices, + key_slices); + if (slice >= 0) { + cfa_tcam_mgr_row_entry_install(tfp, + row, parms, + entry, id, + key_slices, i, + slice); + return row; + } + } + } + + /* No partially used rows available. Find an empty row, if any. */ + + /* All max priority entries are placed in the beginning of the TCAM. It + * should not be necessary to shuffle any of these entries. All other + * priorities are placed from the end of the TCAM and may require + * shuffling. + */ + if (parms->priority == TF_TCAM_PRIORITY_MAX) { + /* Handle max priority first. */ + for (i = start_row; i <= end_row; i++) { + row = cfa_tcam_mgr_row_ptr_get(tcam_rows, i, row_size); + if (!ROW_INUSE(row)) { + cfa_tcam_mgr_row_entry_install(tfp, + row, parms, + entry, id, + key_slices, i, + TF_TCAM_SLICE_INVALID); + return row; + } + if (row->priority < parms->priority) { + /* No free entries before priority change, table is full. */ + return NULL; + } + } + /* No free entries found, table is full. */ + return NULL; + } + + /* Use the highest available entry */ + for (i = end_row; i >= start_row; i--) { + row = cfa_tcam_mgr_row_ptr_get(tcam_rows, i, row_size); + if (!ROW_INUSE(row)) { + empty_row = i; + break; + } + + if (row->priority > parms->priority && + target_row < 0) + target_row = i; + } + + if (empty_row < 0) { + /* No free entries found, table is full. */ + return NULL; + } + + if (target_row < 0) { + /* Did not find a row with higher priority before unused row so + * just install new entry in empty_row. + */ + row = cfa_tcam_mgr_row_ptr_get(tcam_rows, empty_row, row_size); + cfa_tcam_mgr_row_entry_install(tfp, row, parms, entry, id, + key_slices, empty_row, + TF_TCAM_SLICE_INVALID); + return row; + } + + to_row_idx = empty_row; + to_row = cfa_tcam_mgr_row_ptr_get(tcam_rows, to_row_idx, row_size); + while (to_row_idx < target_row) { + from_row_idx = to_row_idx + 1; + from_row = cfa_tcam_mgr_row_ptr_get(tcam_rows, from_row_idx, + row_size); + /* Find the highest row with the same priority as the initial + * source row (from_row). It's only necessary to copy one row + * of each priority. + */ + for (i = from_row_idx + 1; i <= target_row; i++) { + row = cfa_tcam_mgr_row_ptr_get(tcam_rows, i, row_size); + if (row->priority != from_row->priority) + break; + from_row_idx = i; + from_row = row; + } + cfa_tcam_mgr_row_move(tcam_mgr_data, tfp, parms->dir, + parms->type, + table_data, to_row_idx, to_row, + from_row_idx, from_row); + netdev_dbg(tfp->bp->dev, "Moved row %d to row %d.\n", + from_row_idx, to_row_idx); + + to_row = from_row; + to_row_idx = from_row_idx; + } + to_row = cfa_tcam_mgr_row_ptr_get(tcam_rows, target_row, row_size); + memset(to_row, 0, row_size); + cfa_tcam_mgr_row_entry_install(tfp, to_row, parms, entry, id, + key_slices, target_row, + TF_TCAM_SLICE_INVALID); + + return row; +} + +/* This function will combine rows when possible to result in the fewest rows + * used necessary for the entries that are installed. + */ +static void cfa_tcam_mgr_rows_combine(struct cfa_tcam_mgr_data *tcam_mgr_data, + struct tf *tfp, + struct cfa_tcam_mgr_free_parms *parms, + struct cfa_tcam_mgr_table_data + *table_data, + int changed_row_index) +{ + int to_row_idx, from_row_idx, start_row, end_row, max_slices; + struct cfa_tcam_mgr_table_rows_0 *from_row = NULL; + struct cfa_tcam_mgr_table_rows_0 *tcam_rows; + struct cfa_tcam_mgr_table_rows_0 *to_row; + bool entry_moved = false; + int i, j, row_size; + + start_row = table_data->start_row; + end_row = table_data->end_row; + max_slices = table_data->max_slices; + tcam_rows = table_data->tcam_rows; + + row_size = cfa_tcam_mgr_row_size_get(tcam_mgr_data, parms->dir, + parms->type); + + from_row_idx = changed_row_index; + from_row = cfa_tcam_mgr_row_ptr_get(tcam_rows, from_row_idx, row_size); + + if (ROW_INUSE(from_row)) { + /* Row is still in partial use. See if remaining entry(s) can + * be moved to free up a row. + */ + for (i = 0; i < (max_slices / from_row->entry_size); i++) { + if (!ROW_ENTRY_INUSE(from_row, i)) + continue; + for (to_row_idx = end_row; + to_row_idx >= start_row; + to_row_idx--) { + to_row = cfa_tcam_mgr_row_ptr_get(tcam_rows, + to_row_idx, + row_size); + if (!ROW_INUSE(to_row)) + continue; + if (to_row->priority > from_row->priority) + break; + if (to_row->priority != from_row->priority) + continue; + if (to_row->entry_size != from_row->entry_size) + continue; + if (to_row_idx == changed_row_index) + continue; + for (j = 0; + j < (max_slices / to_row->entry_size); + j++) { + if (!ROW_ENTRY_INUSE(to_row, j)) { + cfa_tcam_mgr_entry_move + (tcam_mgr_data, + tfp, + parms->dir, + parms->type, + from_row->entries[i], + table_data, + to_row_idx, + -1, to_row, + from_row_idx, + from_row, + true); + entry_moved = true; + break; + } + } + if (entry_moved) + break; + } + if (ROW_INUSE(from_row)) + entry_moved = false; + else + break; + } + } +} + +/* This function will ensure that all rows, except those of the highest + * priority, at the end of the table. When this function is finished, all the + * empty rows should be between the highest priority rows at the beginning of + * the table and the rest of the rows with lower priorities. + * + * Will need to free the row left newly empty as a result of moving. + * Return row to free to caller. If new_row_to_free < 0, then no new row to + * free. + */ +static void cfa_tcam_mgr_rows_compact(struct cfa_tcam_mgr_data *tcam_mgr_data, + struct tf *tfp, + struct cfa_tcam_mgr_free_parms *parms, + struct cfa_tcam_mgr_table_data + *table_data, + int *new_row_to_free, + int changed_row_index) +{ + int to_row_idx = 0, from_row_idx = 0, start_row = 0, end_row = 0; + struct cfa_tcam_mgr_table_rows_0 *from_row = NULL; + struct cfa_tcam_mgr_table_rows_0 *tcam_rows; + struct cfa_tcam_mgr_table_rows_0 *to_row; + struct cfa_tcam_mgr_table_rows_0 *row; + int i, row_size, priority; + + *new_row_to_free = -1; + + start_row = table_data->start_row; + end_row = table_data->end_row; + tcam_rows = table_data->tcam_rows; + + row_size = cfa_tcam_mgr_row_size_get(tcam_mgr_data, parms->dir, + parms->type); + + /* The row is no longer in use, so see if rows need to be moved in order + * to not leave any gaps. + */ + to_row_idx = changed_row_index; + to_row = cfa_tcam_mgr_row_ptr_get(tcam_rows, to_row_idx, row_size); + + priority = to_row->priority; + if (priority == TF_TCAM_PRIORITY_MAX) { + if (changed_row_index == end_row) + /* Nothing to move - the last row in the TCAM is being deleted. */ + return; + for (i = changed_row_index + 1; i <= end_row; i++) { + row = cfa_tcam_mgr_row_ptr_get(tcam_rows, i, row_size); + if (!ROW_INUSE(row)) + break; + + if (row->priority < priority) + break; + + from_row = row; + from_row_idx = i; + } + } else { + if (changed_row_index == start_row) + /* Nothing to move - the first row in the TCAM is being deleted. */ + return; + for (i = changed_row_index - 1; i >= start_row; i--) { + row = cfa_tcam_mgr_row_ptr_get(tcam_rows, i, row_size); + if (!ROW_INUSE(row)) + break; + + if (row->priority > priority) { + /* Don't move the highest priority rows. */ + if (row->priority == TF_TCAM_PRIORITY_MAX) + break; + /* If from_row is NULL, that means that there + * were no rows of the deleted priority. + * Nothing to move yet. + * + * If from_row is not NULL, then it is the last + * row with the same priority and must be moved + * to fill the newly empty (by free or by move) + * row. + */ + if (from_row) { + cfa_tcam_mgr_row_move(tcam_mgr_data, + tfp, + parms->dir, + parms->type, + table_data, + to_row_idx, + to_row, + from_row_idx, + from_row); + netdev_dbg(tfp->bp->dev, + "Moved row %d to row %d.\n", from_row_idx, + to_row_idx); + *new_row_to_free = from_row_idx; + to_row = from_row; + to_row_idx = from_row_idx; + } + + priority = row->priority; + } + from_row = row; + from_row_idx = i; + } + } + + if (from_row) { + cfa_tcam_mgr_row_move(tcam_mgr_data, tfp, parms->dir, + parms->type, table_data, to_row_idx, + to_row, from_row_idx, from_row); + netdev_dbg(tfp->bp->dev, "Moved row %d to row %d.\n", + from_row_idx, to_row_idx); + *new_row_to_free = from_row_idx; + } +} + +/* This function is to set table limits for the logical TCAM tables. */ +static int cfa_tcam_mgr_table_limits_set(struct cfa_tcam_mgr_data + *tcam_mgr_data, struct tf *tfp, + struct cfa_tcam_mgr_init_parms *parms) +{ + struct cfa_tcam_mgr_table_data *table_data; + unsigned int dir, type; + int start, stride; + + if (!parms) + return 0; + + for (dir = 0; dir < ARRAY_SIZE(tcam_mgr_data->cfa_tcam_mgr_tables); + dir++) + for (type = 0; + type < + ARRAY_SIZE(tcam_mgr_data->cfa_tcam_mgr_tables[dir]); + type++) { + table_data = + &tcam_mgr_data->cfa_tcam_mgr_tables[dir][type]; + /* If num_rows is zero, then TCAM Manager did not + * allocate any row storage for that table so cannot + * manage it. + */ + if (!table_data->num_rows) + continue; + start = parms->resc[dir][type].start; + stride = parms->resc[dir][type].stride; + if (start % table_data->max_slices > 0) { + netdev_dbg(tfp->bp->dev, + "%s: %s Resrces(%d) not on row boundary\n", + tf_dir_2_str(dir), + cfa_tcam_mgr_tbl_2_str(type), + start); + netdev_dbg(tfp->bp->dev, + "%s: Start:%d, num slices:%d\n", + tf_dir_2_str(dir), start, + table_data->max_slices); + return -EINVAL; + } + if (stride % table_data->max_slices > 0) { + netdev_dbg(tfp->bp->dev, + "%s: %s Resrces(%d) not on row boundary.\n", + tf_dir_2_str(dir), + cfa_tcam_mgr_tbl_2_str(type), + stride); + netdev_dbg(tfp->bp->dev, + "%s: Stride:%d, num slices:%d\n", + tf_dir_2_str(dir), stride, + table_data->max_slices); + return -EINVAL; + } + if (!stride) { + table_data->start_row = 0; + table_data->end_row = 0; + table_data->max_entries = 0; + } else { + table_data->start_row = start / + table_data->max_slices; + table_data->end_row = table_data->start_row + + (stride / table_data->max_slices) - 1; + table_data->max_entries = + table_data->max_slices * + (table_data->end_row - + table_data->start_row + 1); + } + } + + return 0; +} + +static int cfa_tcam_mgr_bitmap_alloc(struct tf *tfp, struct cfa_tcam_mgr_data *tcam_mgr_data) +{ + unsigned long session_bmp_size; + unsigned long *session_bmp; + int max_entries; + + if (!tcam_mgr_data->cfa_tcam_mgr_max_entries) + return -EINVAL; + + max_entries = tcam_mgr_data->cfa_tcam_mgr_max_entries; + + session_bmp_size = (sizeof(unsigned long) * + (((max_entries - 1) / sizeof(unsigned long)) + 1)); + session_bmp = vzalloc(session_bmp_size); + if (!session_bmp) + return -ENOMEM; + + tcam_mgr_data->session_bmp = session_bmp; + tcam_mgr_data->session_bmp_size = max_entries; + + netdev_dbg(tfp->bp->dev, "session bitmap size is %lu\n", tcam_mgr_data->session_bmp_size); + + return 0; +} + +static void cfa_tcam_mgr_uninit(struct tf *tfp, + enum cfa_tcam_mgr_device_type type) +{ + switch (type) { + case CFA_TCAM_MGR_DEVICE_TYPE_WH: + case CFA_TCAM_MGR_DEVICE_TYPE_SR: + cfa_tcam_mgr_uninit_p4(tfp); + break; + case CFA_TCAM_MGR_DEVICE_TYPE_THOR: + cfa_tcam_mgr_uninit_p58(tfp); + break; + default: + netdev_dbg(tfp->bp->dev, "No such device %d\n", type); + return; + } +} + +int cfa_tcam_mgr_init(struct tf *tfp, enum cfa_tcam_mgr_device_type type, + struct cfa_tcam_mgr_init_parms *parms) +{ + struct cfa_tcam_mgr_table_data *table_data; + struct cfa_tcam_mgr_data *tcam_mgr_data; + unsigned int dir, tbl_type; + struct tf_session *tfs; + int rc; + + rc = tf_session_get_session_internal(tfp, &tfs); + if (rc) + return rc; + + switch (type) { + case CFA_TCAM_MGR_DEVICE_TYPE_WH: + case CFA_TCAM_MGR_DEVICE_TYPE_SR: + rc = cfa_tcam_mgr_init_p4(tfp); + break; + case CFA_TCAM_MGR_DEVICE_TYPE_THOR: + rc = cfa_tcam_mgr_init_p58(tfp); + break; + default: + netdev_dbg(tfp->bp->dev, "No such device %d\n", type); + return -ENODEV; + } + if (rc) + return rc; + + tcam_mgr_data = tfs->tcam_mgr_handle; + rc = cfa_tcam_mgr_table_limits_set(tcam_mgr_data, tfp, parms); + if (rc) + return rc; + + /* Now calculate the max entries per table and global max entries based + * on the updated table limits. + */ + tcam_mgr_data->cfa_tcam_mgr_max_entries = 0; + for (dir = 0; dir < ARRAY_SIZE(tcam_mgr_data->cfa_tcam_mgr_tables); + dir++) + for (tbl_type = 0; + tbl_type < + ARRAY_SIZE(tcam_mgr_data->cfa_tcam_mgr_tables[dir]); + tbl_type++) { + table_data = + &tcam_mgr_data->cfa_tcam_mgr_tables[dir] + [tbl_type]; + /* If num_rows is zero, then TCAM Manager did not + * allocate any row storage for that table so cannot + * manage it. + */ + if (!table_data->num_rows) { + table_data->start_row = 0; + table_data->end_row = 0; + table_data->max_entries = 0; + } else if (table_data->end_row >= + table_data->num_rows) { + netdev_dbg(tfp->bp->dev, + "%s: %s End row is OOR(%d >= %d)\n", + tf_dir_2_str(dir), + cfa_tcam_mgr_tbl_2_str((enum cfa_tcam_mgr_tbl_type)type), + table_data->end_row, + table_data->num_rows); + return -EFAULT; + } else if (!table_data->max_entries && + !table_data->start_row && + !table_data->end_row) { + /* Nothing to do */ + } else { + table_data->max_entries = + table_data->max_slices * + (table_data->end_row - + table_data->start_row + 1); + } + tcam_mgr_data->cfa_tcam_mgr_max_entries += + table_data->max_entries; + } + + rc = cfa_tcam_mgr_bitmap_alloc(tfp, tcam_mgr_data); + if (rc) + return rc; + + rc = cfa_tcam_mgr_hwops_init(tcam_mgr_data, type); + if (rc) + return rc; + + if (parms) + parms->max_entries = tcam_mgr_data->cfa_tcam_mgr_max_entries; + + netdev_dbg(tfp->bp->dev, "Global TCAM table initialized\n"); + + return 0; +} + +int cfa_tcam_mgr_qcaps(struct tf *tfp, struct cfa_tcam_mgr_qcaps_parms *parms) +{ + struct cfa_tcam_mgr_data *tcam_mgr_data; + struct tf_session *tfs; + unsigned int type; + int rc; + + if (!tfp || !parms) + return -EINVAL; + + rc = tf_session_get_session_internal(tfp, &tfs); + if (rc) + return rc; + + tcam_mgr_data = tfs->tcam_mgr_handle; + if (!tcam_mgr_data) { + netdev_dbg(tfp->bp->dev, + "No TCAM data created for session\n"); + return -EPERM; + } + + /* This code will indicate if TCAM Manager is managing a logical TCAM + * table or not. If not, then the physical TCAM will have to be + * accessed using the traditional methods. + */ + parms->rx_tcam_supported = 0; + parms->tx_tcam_supported = 0; + for (type = 0; type < CFA_TCAM_MGR_TBL_TYPE_MAX; type++) { + if (tcam_mgr_data->cfa_tcam_mgr_tables[TF_DIR_RX] + [type].max_entries > 0 && + tcam_mgr_data->cfa_tcam_mgr_tables[TF_DIR_RX] + [type].hcapi_type > 0) + parms->rx_tcam_supported |= + 1 << cfa_tcam_mgr_get_phys_table_type(type); + if (tcam_mgr_data->cfa_tcam_mgr_tables[TF_DIR_TX] + [type].max_entries > 0 && + tcam_mgr_data->cfa_tcam_mgr_tables[TF_DIR_TX] + [type].hcapi_type > 0) + parms->tx_tcam_supported |= + 1 << cfa_tcam_mgr_get_phys_table_type(type); + } + + return 0; +} + +static int cfa_tcam_mgr_validate_tcam_cnt(struct tf *tfp, + struct cfa_tcam_mgr_data + *tcam_mgr_data, + u16 tcam_cnt[] + [CFA_TCAM_MGR_TBL_TYPE_MAX]) +{ + struct cfa_tcam_mgr_table_data *table_data; + unsigned int dir, type; + u16 requested_cnt; + + /* Validate session request */ + for (dir = 0; dir < ARRAY_SIZE(tcam_mgr_data->cfa_tcam_mgr_tables); + dir++) { + for (type = 0; + type < ARRAY_SIZE(tcam_mgr_data->cfa_tcam_mgr_tables[dir]); + type++) { + table_data = + &tcam_mgr_data->cfa_tcam_mgr_tables[dir][type]; + requested_cnt = tcam_cnt[dir][type]; + /* Only check if table supported (max_entries > 0). */ + if (table_data->max_entries > 0 && + requested_cnt > table_data->max_entries) { + netdev_err(tfp->bp->dev, + "%s: %s Requested %d, available %d\n", + tf_dir_2_str(dir), + cfa_tcam_mgr_tbl_2_str(type), + requested_cnt, + table_data->max_entries); + return -ENOSPC; + } + } + } + + return 0; +} + +static int cfa_tcam_mgr_free_entries(struct tf *tfp) +{ + struct cfa_tcam_mgr_free_parms free_parms; + struct cfa_tcam_mgr_data *tcam_mgr_data; + struct tf_session *tfs; + int entry_id; + int rc; + + netdev_dbg(tfp->bp->dev, "Unbinding session\n"); + + rc = tf_session_get_session_internal(tfp, &tfs); + if (rc) + return rc; + + tcam_mgr_data = tfs->tcam_mgr_handle; + memset(&free_parms, 0, sizeof(free_parms)); + /* Since we are freeing all pending TCAM entries (which is typically + * done during tcam_unbind), we don't know the type of each entry. + * So we set the type to MAX as a hint to cfa_tcam_mgr_free() to + * figure out the actual type. We need to set it through each + * iteration in the loop below; otherwise, the type determined for + * the first entry would be used for subsequent entries that may or + * may not be of the same type, resulting in errors. + */ + for (entry_id = 0; entry_id < tcam_mgr_data->cfa_tcam_mgr_max_entries; + entry_id++) { + if (test_bit(entry_id, tcam_mgr_data->session_bmp)) { + clear_bit(entry_id, tcam_mgr_data->session_bmp); + + free_parms.id = entry_id; + free_parms.type = CFA_TCAM_MGR_TBL_TYPE_MAX; + cfa_tcam_mgr_free(tfp, &free_parms); + } + } + + return 0; +} + +int cfa_tcam_mgr_bind(struct tf *tfp, struct cfa_tcam_mgr_cfg_parms *parms) +{ + struct cfa_tcam_mgr_table_data *table_data; + enum cfa_tcam_mgr_device_type device_type; + struct cfa_tcam_mgr_data *tcam_mgr_data; + struct tf_dev_info *dev; + struct tf_session *tfs; + int prev_max_entries; + unsigned int type; + int start, stride; + unsigned int dir; + int rc; + + if (!tfp || !parms) + return -EINVAL; + + /* Retrieve the session information */ + rc = tf_session_get_session_internal(tfp, &tfs); + if (rc) + return rc; + + /* Retrieve the device information */ + rc = tf_session_get_device(tfs, &dev); + if (rc) + return rc; + + switch (dev->type) { + case TF_DEVICE_TYPE_P4: + device_type = CFA_TCAM_MGR_DEVICE_TYPE_WH; + break; + case TF_DEVICE_TYPE_P5: + device_type = CFA_TCAM_MGR_DEVICE_TYPE_THOR; + break; + default: + netdev_dbg(tfp->bp->dev, "No such device %d\n", dev->type); + return -ENODEV; + } + + tcam_mgr_data = tfs->tcam_mgr_handle; + if (!tcam_mgr_data) { + rc = cfa_tcam_mgr_init(tfp, device_type, NULL); + if (rc) + return rc; + tcam_mgr_data = tfs->tcam_mgr_handle; + } + + if (parms->num_elements != + ARRAY_SIZE(tcam_mgr_data->cfa_tcam_mgr_tables[dir])) { + netdev_dbg(tfp->bp->dev, + "Element count:%d != table count:%zu\n", + parms->num_elements, + ARRAY_SIZE(tcam_mgr_data->cfa_tcam_mgr_tables[dir])); + return -EINVAL; + } + + /* Only managing one session. resv_res contains the resources allocated + * to this session by the resource manager. Update the limits on TCAMs. + */ + for (dir = 0; dir < ARRAY_SIZE(tcam_mgr_data->cfa_tcam_mgr_tables); + dir++) { + for (type = 0; + type < + ARRAY_SIZE(tcam_mgr_data->cfa_tcam_mgr_tables[dir]); + type++) { + table_data = + &tcam_mgr_data->cfa_tcam_mgr_tables[dir][type]; + prev_max_entries = table_data->max_entries; + /* In AFM logical tables, max_entries is initialized to + * zero. These logical tables are not used when TCAM + * Manager is in the core so skip. + */ + if (!prev_max_entries) + continue; + start = parms->resv_res[dir][type].start; + stride = parms->resv_res[dir][type].stride; + if (start % table_data->max_slices > 0) { + netdev_dbg(tfp->bp->dev, + "%s: %s Resource:%d not on row boundary\n", + tf_dir_2_str(dir), + cfa_tcam_mgr_tbl_2_str(type), + start); + netdev_dbg(tfp->bp->dev, + "%s: Start:%d, num slices:%d\n", + tf_dir_2_str(dir), start, + table_data->max_slices); + cfa_tcam_mgr_free_entries(tfp); + return -EINVAL; + } + if (stride % table_data->max_slices > 0) { + netdev_dbg(tfp->bp->dev, + "%s: %s Resource:%d not on row boundary\n", + tf_dir_2_str(dir), + cfa_tcam_mgr_tbl_2_str(type), + stride); + netdev_dbg(tfp->bp->dev, + "%s: Stride:%d num slices:%d\n", + tf_dir_2_str(dir), stride, + table_data->max_slices); + cfa_tcam_mgr_free_entries(tfp); + return -EINVAL; + } + if (!stride) { + table_data->start_row = 0; + table_data->end_row = 0; + table_data->max_entries = 0; + } else { + table_data->start_row = start / + table_data->max_slices; + table_data->end_row = table_data->start_row + + (stride / table_data->max_slices) - 1; + table_data->max_entries = + table_data->max_slices * + (table_data->end_row - + table_data->start_row + 1); + } + tcam_mgr_data->cfa_tcam_mgr_max_entries += + (table_data->max_entries - prev_max_entries); + } + } + + rc = cfa_tcam_mgr_validate_tcam_cnt(tfp, tcam_mgr_data, + parms->tcam_cnt); + if (rc) { + cfa_tcam_mgr_free_entries(tfp); + return rc; + } + + cfa_tcam_mgr_tables_dump(tfp, TF_DIR_MAX, CFA_TCAM_MGR_TBL_TYPE_MAX); + return 0; +} + +int cfa_tcam_mgr_unbind(struct tf *tfp) +{ + enum cfa_tcam_mgr_device_type device_type; + struct cfa_tcam_mgr_data *tcam_mgr_data; + struct tf_dev_info *dev; + struct tf_session *tfs; + int rc; + + if (!tfp) + return -EINVAL; + + rc = tf_session_get_session_internal(tfp, &tfs); + if (rc) + return rc; + + /* Retrieve the device information */ + rc = tf_session_get_device(tfs, &dev); + if (rc) + return rc; + + switch (dev->type) { + case TF_DEVICE_TYPE_P4: + device_type = CFA_TCAM_MGR_DEVICE_TYPE_WH; + break; + case TF_DEVICE_TYPE_P5: + device_type = CFA_TCAM_MGR_DEVICE_TYPE_THOR; + break; + default: + netdev_dbg(tfp->bp->dev, "No such device %d\n", dev->type); + return -ENODEV; + } + + tcam_mgr_data = tfs->tcam_mgr_handle; + if (!tcam_mgr_data) { + netdev_dbg(tfp->bp->dev, "No TCAM data created for session\n"); + return -EPERM; + } + + cfa_tcam_mgr_free_entries(tfp); + cfa_tcam_mgr_uninit(tfp, device_type); + + return 0; +} + +static int cfa_tcam_mgr_alloc_entry(struct tf *tfp, + struct cfa_tcam_mgr_data *tcam_mgr_data, + enum tf_dir dir, + enum cfa_tcam_mgr_tbl_type type) +{ + u32 free_idx; + + free_idx = find_first_zero_bit(tcam_mgr_data->session_bmp, + tcam_mgr_data->session_bmp_size); + if (free_idx == tcam_mgr_data->session_bmp_size) { + netdev_dbg(tfp->bp->dev, "Table full (session)\n"); + return -ENOSPC; + } + + /* Set the bit in the bitmap. set_bit */ + set_bit(free_idx, tcam_mgr_data->session_bmp); + + return free_idx; +} + +static int cfa_tcam_mgr_free_entry(struct tf *tfp, + struct cfa_tcam_mgr_data *tcam_mgr_data, + unsigned int entry_id, enum tf_dir dir, + enum cfa_tcam_mgr_tbl_type type) +{ + if (entry_id >= tcam_mgr_data->session_bmp_size) + return -EINVAL; + + clear_bit(entry_id, tcam_mgr_data->session_bmp); + netdev_dbg(tfp->bp->dev, "Removed session from entry %d\n", entry_id); + + return 0; +} + +int cfa_tcam_mgr_alloc(struct tf *tfp, struct cfa_tcam_mgr_alloc_parms *parms) +{ + struct cfa_tcam_mgr_table_data *table_data; + struct cfa_tcam_mgr_data *tcam_mgr_data; + struct cfa_tcam_mgr_table_rows_0 *row; + struct cfa_tcam_mgr_entry_data entry; + struct tf_session *tfs; + int key_slices, rc; + int dir, tbl_type; + int new_entry_id; + + if (!tfp || !parms) + return -EINVAL; + + dir = parms->dir; + tbl_type = parms->type; + + if (dir >= TF_DIR_MAX) { + netdev_dbg(tfp->bp->dev, "Invalid direction: %d.\n", dir); + return -EINVAL; + } + + if (tbl_type >= CFA_TCAM_MGR_TBL_TYPE_MAX) { + netdev_dbg(tfp->bp->dev, "%s: Invalid table type: %d.\n", + tf_dir_2_str(dir), tbl_type); + return -EINVAL; + } + + if (parms->priority > TF_TCAM_PRIORITY_MAX) { + netdev_dbg(tfp->bp->dev, "%s: Priority (%u) out of range (%u -%u).\n", + tf_dir_2_str(dir), parms->priority, + TF_TCAM_PRIORITY_MIN, + TF_TCAM_PRIORITY_MAX); + } + + rc = tf_session_get_session_internal(tfp, &tfs); + if (rc) + return rc; + + tcam_mgr_data = tfs->tcam_mgr_handle; + if (!tcam_mgr_data) { + netdev_dbg(tfp->bp->dev, "No TCAM data created for session\n"); + return -EPERM; + } + + table_data = &tcam_mgr_data->cfa_tcam_mgr_tables[dir][tbl_type]; + + if (!parms->key_size || + parms->key_size > table_data->row_width) { + netdev_dbg(tfp->bp->dev, + "%s: Invalid key size:%d (range 1-%d)\n", + tf_dir_2_str(dir), parms->key_size, + table_data->row_width); + return -EINVAL; + } + + /* Check global limits */ + if (table_data->used_entries >= + table_data->max_entries) { + netdev_dbg(tfp->bp->dev, "%s: %s Table full\n", + tf_dir_2_str(parms->dir), + cfa_tcam_mgr_tbl_2_str(parms->type)); + return -ENOSPC; + } + + /* There is room, now increment counts and allocate an entry. */ + new_entry_id = cfa_tcam_mgr_alloc_entry(tfp, tcam_mgr_data, parms->dir, + parms->type); + if (new_entry_id < 0) + return new_entry_id; + + memset(&entry, 0, sizeof(entry)); + entry.ref_cnt++; + + netdev_dbg(tfp->bp->dev, "Allocated entry ID %d.\n", new_entry_id); + + key_slices = cfa_tcam_mgr_get_num_slices(parms->key_size, + (table_data->row_width / + table_data->max_slices)); + + row = cfa_tcam_mgr_empty_row_alloc(tcam_mgr_data, tfp, parms, &entry, + new_entry_id, key_slices); + if (!row) { + netdev_dbg(tfp->bp->dev, "%s: %s Table full (HW)\n", + tf_dir_2_str(parms->dir), + cfa_tcam_mgr_tbl_2_str(parms->type)); + cfa_tcam_mgr_free_entry(tfp, tcam_mgr_data, new_entry_id, + parms->dir, parms->type); + return -ENOSPC; + } + + memcpy(&tcam_mgr_data->entry_data[new_entry_id], + &entry, + sizeof(tcam_mgr_data->entry_data[new_entry_id])); + table_data->used_entries += 1; + + cfa_tcam_mgr_entry_insert(tcam_mgr_data, tfp, new_entry_id, &entry); + + parms->id = new_entry_id; + + return 0; +} + +int cfa_tcam_mgr_free(struct tf *tfp, struct cfa_tcam_mgr_free_parms *parms) +{ + struct cfa_tcam_mgr_table_data *table_data; + struct cfa_tcam_mgr_data *tcam_mgr_data; + struct cfa_tcam_mgr_entry_data *entry; + struct cfa_tcam_mgr_table_rows_0 *row; + int row_size, rc, new_row_to_free; + struct tf_session *tfs; + u16 id; + + if (!tfp || !parms) + return -EINVAL; + + rc = tf_session_get_session_internal(tfp, &tfs); + if (rc) + return rc; + + tcam_mgr_data = tfs->tcam_mgr_handle; + if (!tcam_mgr_data) { + netdev_dbg(tfp->bp->dev, "No TCAM data created for session\n"); + return -EPERM; + } + + id = parms->id; + entry = cfa_tcam_mgr_entry_get(tcam_mgr_data, id); + if (!entry) { + netdev_dbg(tfp->bp->dev, "Entry %d not found\n", id); + return -EINVAL; + } + + if (!entry->ref_cnt) { + netdev_dbg(tfp->bp->dev, "Entry %d not in use\n", id); + return -EINVAL; + } + + /* If the TCAM type is CFA_TCAM_MGR_TBL_TYPE_MAX, that implies that the + * caller does not know the table or direction of the entry and TCAM + * Manager must search the tables to find out which table has the entry + * installed. + * + * This would be the case if RM has informed TCAM Mgr that an entry must + * be freed. Clients (sessions, AFM) should always know the type and + * direction of the table where an entry is installed. + */ + if (parms->type == CFA_TCAM_MGR_TBL_TYPE_MAX) { + /* Need to search for the entry in the tables */ + rc = cfa_tcam_mgr_entry_find(tcam_mgr_data, id, &parms->dir, + &parms->type); + if (rc) { + netdev_dbg(tfp->bp->dev, + "Entry %d not in tables\n", id); + return rc; + } + netdev_dbg(tfp->bp->dev, "%s: id: %d dir: 0x%x type: 0x%x\n", + __func__, id, parms->dir, parms->type); + } + + table_data = + &tcam_mgr_data->cfa_tcam_mgr_tables[parms->dir][parms->type]; + parms->hcapi_type = table_data->hcapi_type; + + row_size = cfa_tcam_mgr_row_size_get(tcam_mgr_data, parms->dir, + parms->type); + + row = cfa_tcam_mgr_row_ptr_get(table_data->tcam_rows, entry->row, + row_size); + + entry->ref_cnt--; + parms->ref_cnt = entry->ref_cnt; + + cfa_tcam_mgr_free_entry(tfp, tcam_mgr_data, id, parms->dir, + parms->type); + + if (!entry->ref_cnt) { + netdev_dbg(tfp->bp->dev, "Freeing entry %d, row %d, slice %d.\n", + id, entry->row, entry->slice); + cfa_tcam_mgr_entry_free_msg(tcam_mgr_data, tfp, + parms, entry->row, + entry->slice * row->entry_size, + table_data->row_width / + table_data->max_slices * + row->entry_size, + table_data->result_size, + table_data->max_slices); + ROW_ENTRY_CLEAR(row, entry->slice); + + new_row_to_free = entry->row; + cfa_tcam_mgr_rows_combine(tcam_mgr_data, tfp, parms, + table_data, new_row_to_free); + + if (!ROW_INUSE(row)) { + cfa_tcam_mgr_rows_compact(tcam_mgr_data, tfp, + parms, table_data, + &new_row_to_free, + new_row_to_free); + if (new_row_to_free >= 0) + cfa_tcam_mgr_entry_free_msg(tcam_mgr_data, + tfp, parms, + new_row_to_free, 0, + table_data->row_width, + table_data->result_size, + table_data->max_slices); + } + + cfa_tcam_mgr_entry_delete(tcam_mgr_data, tfp, id); + table_data->used_entries -= 1; + netdev_dbg(tfp->bp->dev, "Freed entry %d.\n", id); + } else { + netdev_dbg(tfp->bp->dev, "Entry %d ref cnt = %d.\n", id, entry->ref_cnt); + } + + return 0; +} + +int cfa_tcam_mgr_set(struct tf *tfp, struct cfa_tcam_mgr_set_parms *parms) +{ + struct cfa_tcam_mgr_table_data *table_data; + struct cfa_tcam_mgr_data *tcam_mgr_data; + struct cfa_tcam_mgr_entry_data *entry; + struct cfa_tcam_mgr_table_rows_0 *row; + int entry_size_in_bytes; + struct tf_session *tfs; + int row_size; + int rc; + + if (!tfp || !parms) + return -EINVAL; + + rc = tf_session_get_session_internal(tfp, &tfs); + if (rc) + return rc; + + tcam_mgr_data = tfs->tcam_mgr_handle; + if (!tcam_mgr_data) { + netdev_dbg(tfp->bp->dev, "No TCAM data created for session\n"); + return -EPERM; + } + + entry = cfa_tcam_mgr_entry_get(tcam_mgr_data, parms->id); + if (!entry) { + netdev_dbg(tfp->bp->dev, "Entry %d not found\n", parms->id); + return -EINVAL; + } + + table_data = + &tcam_mgr_data->cfa_tcam_mgr_tables[parms->dir][parms->type]; + parms->hcapi_type = table_data->hcapi_type; + + row_size = cfa_tcam_mgr_row_size_get(tcam_mgr_data, parms->dir, + parms->type); + row = cfa_tcam_mgr_row_ptr_get(table_data->tcam_rows, entry->row, + row_size); + + entry_size_in_bytes = table_data->row_width / + table_data->max_slices * + row->entry_size; + if (parms->key_size != entry_size_in_bytes) { + netdev_dbg(tfp->bp->dev, + "Key size(%d) is different from entry size(%d).\n", + parms->key_size, entry_size_in_bytes); + return -EINVAL; + } + + rc = cfa_tcam_mgr_entry_set_msg(tcam_mgr_data, tfp, parms, + entry->row, + entry->slice * row->entry_size, + table_data->max_slices); + if (rc) { + netdev_dbg(tfp->bp->dev, "Failed to set TCAM data.\n"); + return rc; + } + + netdev_dbg(tfp->bp->dev, "Set data for entry %d\n", parms->id); + + return 0; +} + +int cfa_tcam_mgr_get(struct tf *tfp, struct cfa_tcam_mgr_get_parms *parms) +{ + struct cfa_tcam_mgr_table_data *table_data; + struct cfa_tcam_mgr_data *tcam_mgr_data; + struct cfa_tcam_mgr_entry_data *entry; + struct cfa_tcam_mgr_table_rows_0 *row; + struct tf_session *tfs; + int row_size; + int rc; + + if (!tfp || !parms) + return -EINVAL; + + rc = tf_session_get_session_internal(tfp, &tfs); + if (rc) + return rc; + + tcam_mgr_data = tfs->tcam_mgr_handle; + if (!tcam_mgr_data) { + netdev_dbg(tfp->bp->dev, "No TCAM data created for session\n"); + return -EPERM; + } + + entry = cfa_tcam_mgr_entry_get(tcam_mgr_data, parms->id); + if (!entry) { + netdev_dbg(tfp->bp->dev, "Entry %d not found.\n", parms->id); + return -EINVAL; + } + + table_data = + &tcam_mgr_data->cfa_tcam_mgr_tables[parms->dir][parms->type]; + parms->hcapi_type = table_data->hcapi_type; + + row_size = cfa_tcam_mgr_row_size_get(tcam_mgr_data, parms->dir, + parms->type); + row = cfa_tcam_mgr_row_ptr_get(table_data->tcam_rows, entry->row, + row_size); + + rc = cfa_tcam_mgr_entry_get_msg(tcam_mgr_data, tfp, parms, + entry->row, + entry->slice * row->entry_size, + table_data->max_slices); + if (rc) { + netdev_dbg(tfp->bp->dev, "Failed to read from TCAM.\n"); + return rc; + } + + return 0; +} + +void cfa_tcam_mgr_rows_dump(struct tf *tfp, enum tf_dir dir, + enum cfa_tcam_mgr_tbl_type type) +{ + struct cfa_tcam_mgr_table_rows_0 *table_row; + struct cfa_tcam_mgr_table_data *table_data; + struct cfa_tcam_mgr_data *tcam_mgr_data; + struct tf_session *tfs; + bool row_found = false; + bool empty_row = false; + int i, row, row_size; + int rc; + + if (dir >= TF_DIR_MAX) { + netdev_dbg(tfp->bp->dev, "Must specify a valid direction (0-%d).\n", + TF_DIR_MAX - 1); + return; + } + if (type >= CFA_TCAM_MGR_TBL_TYPE_MAX) { + netdev_dbg(tfp->bp->dev, "Must specify a valid type (0-%d).\n", + CFA_TCAM_MGR_TBL_TYPE_MAX - 1); + return; + } + + rc = tf_session_get_session_internal(tfp, &tfs); + if (rc) + return; + + tcam_mgr_data = tfs->tcam_mgr_handle; + if (!tcam_mgr_data) { + netdev_dbg(tfp->bp->dev, "No TCAM data created for session\n"); + return; + } + + table_data = &tcam_mgr_data->cfa_tcam_mgr_tables[dir][type]; + row_size = cfa_tcam_mgr_row_size_get(tcam_mgr_data, dir, type); + + netdev_dbg(tfp->bp->dev, "\nTCAM Rows:\n"); + netdev_dbg(tfp->bp->dev, + "Rows for direction %s, Logical table type %s\n", + tf_dir_2_str(dir), cfa_tcam_mgr_tbl_2_str(type)); + netdev_dbg(tfp->bp->dev, "Managed rows %d-%d\n", + table_data->start_row, table_data->end_row); + + netdev_dbg(tfp->bp->dev, "Index Pri Size Entry IDs\n"); + netdev_dbg(tfp->bp->dev, " Sl 0"); + for (i = 1; i < table_data->max_slices; i++) + netdev_dbg(tfp->bp->dev, " Sl %d", i); + netdev_dbg(tfp->bp->dev, "\n"); + for (row = table_data->start_row; row <= table_data->end_row; row++) { + table_row = cfa_tcam_mgr_row_ptr_get(table_data->tcam_rows, row, + row_size); + if (ROW_INUSE(table_row)) { + empty_row = false; + netdev_dbg(tfp->bp->dev, "%5u %5u %4u", row, + TF_TCAM_PRIORITY_MAX - table_row->priority - 1, + table_row->entry_size); + for (i = 0; + i < table_data->max_slices / table_row->entry_size; + i++) { + if (ROW_ENTRY_INUSE(table_row, i)) + netdev_dbg(tfp->bp->dev, " %5u", + table_row->entries[i]); + else + netdev_dbg(tfp->bp->dev, " x"); + } + netdev_dbg(tfp->bp->dev, "\n"); + row_found = true; + } else if (!empty_row) { + empty_row = true; + netdev_dbg(tfp->bp->dev, "\n"); + } + } + + if (!row_found) + netdev_dbg(tfp->bp->dev, "No rows in use.\n"); +} + +static void cfa_tcam_mgr_table_dump(struct cfa_tcam_mgr_data *tcam_mgr_data, + struct tf *tfp, enum tf_dir dir, + enum cfa_tcam_mgr_tbl_type type) +{ + struct cfa_tcam_mgr_table_data *table_data = + &tcam_mgr_data->cfa_tcam_mgr_tables[dir][type]; + + netdev_dbg(tfp->bp->dev, "%3s %-22s %5u %5u %5u %5u %6u %7u %2u\n", + tf_dir_2_str(dir), + cfa_tcam_mgr_tbl_2_str(type), table_data->row_width, + table_data->num_rows, table_data->start_row, + table_data->end_row, table_data->max_entries, + table_data->used_entries, table_data->max_slices); +} + +#define TABLE_DUMP_HEADER \ + "Dir Table Width Rows Start End " \ + "MaxEnt UsedEnt Slices\n" + +void cfa_tcam_mgr_tables_dump(struct tf *tfp, enum tf_dir dir, + enum cfa_tcam_mgr_tbl_type type) +{ + struct cfa_tcam_mgr_data *tcam_mgr_data; + struct tf_session *tfs; + int rc; + + netdev_dbg(tfp->bp->dev, "\nTCAM Table(s)\n"); + netdev_dbg(tfp->bp->dev, TABLE_DUMP_HEADER); + + rc = tf_session_get_session_internal(tfp, &tfs); + if (rc) + return; + + tcam_mgr_data = tfs->tcam_mgr_handle; + if (!tcam_mgr_data) { + netdev_dbg(tfp->bp->dev, "No TCAM data created for session\n"); + return; + } + + if (dir >= TF_DIR_MAX) { + /* Iterate over all directions */ + for (dir = 0; dir < TF_DIR_MAX; dir++) { + if (type >= CFA_TCAM_MGR_TBL_TYPE_MAX) { + /* Iterate over all types */ + for (type = 0; + type < CFA_TCAM_MGR_TBL_TYPE_MAX; + type++) { + cfa_tcam_mgr_table_dump(tcam_mgr_data, + tfp, dir, type); + } + } else { + /* Display a specific type */ + cfa_tcam_mgr_table_dump(tcam_mgr_data, tfp, + dir, type); + } + } + } else if (type >= CFA_TCAM_MGR_TBL_TYPE_MAX) { + /* Iterate over all types for a direction */ + for (type = 0; type < CFA_TCAM_MGR_TBL_TYPE_MAX; type++) + cfa_tcam_mgr_table_dump(tcam_mgr_data, tfp, dir, type); + } else { + /* Display a specific direction and type */ + cfa_tcam_mgr_table_dump(tcam_mgr_data, tfp, dir, type); + } +} + +#define ENTRY_DUMP_HEADER "Entry RefCnt Row Slice\n" + +void cfa_tcam_mgr_entries_dump(struct tf *tfp) +{ + struct cfa_tcam_mgr_data *tcam_mgr_data; + struct cfa_tcam_mgr_entry_data *entry; + bool entry_found = false; + struct tf_session *tfs; + u16 id; + int rc; + + rc = tf_session_get_session_internal(tfp, &tfs); + if (rc) + return; + + tcam_mgr_data = tfs->tcam_mgr_handle; + if (!tcam_mgr_data) { + netdev_dbg(tfp->bp->dev, "No TCAM data created for session\n"); + return; + } + + netdev_dbg(tfp->bp->dev, "\nGlobal Maximum Entries: %d\n\n", + tcam_mgr_data->cfa_tcam_mgr_max_entries); + netdev_dbg(tfp->bp->dev, "TCAM Entry Table:\n"); + for (id = 0; id < tcam_mgr_data->cfa_tcam_mgr_max_entries; id++) { + if (tcam_mgr_data->entry_data[id].ref_cnt > 0) { + entry = &tcam_mgr_data->entry_data[id]; + if (!entry_found) + netdev_dbg(tfp->bp->dev, ENTRY_DUMP_HEADER); + netdev_dbg(tfp->bp->dev, "%5u %5u %5u %5u", id, entry->ref_cnt, + entry->row, entry->slice); + netdev_dbg(tfp->bp->dev, "\n"); + entry_found = true; + } + } + + if (!entry_found) + netdev_dbg(tfp->bp->dev, "No entries found.\n"); +} diff --git a/drivers/thirdparty/release-drivers/bnxt/tf_core/cfa_tcam_mgr.h b/drivers/thirdparty/release-drivers/bnxt/tf_core/cfa_tcam_mgr.h new file mode 100644 index 000000000000..fc7c3b4716b0 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/tf_core/cfa_tcam_mgr.h @@ -0,0 +1,297 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2021-2021 Broadcom + * All rights reserved. + */ + +#ifndef _CFA_TCAM_MGR_H_ +#define _CFA_TCAM_MGR_H_ + +#include +#include "bnxt_hsi.h" +#include "tf_core.h" + +/* The TCAM module provides processing of Internal TCAM types. */ + +#ifndef TF_TCAM_MAX_SESSIONS +#define TF_TCAM_MAX_SESSIONS 16 +#endif + +#define ENTRY_ID_INVALID 65535 + +#define TF_TCAM_PRIORITY_MIN 0 +#define TF_TCAM_PRIORITY_MAX 65535 + +#define CFA_TCAM_MGR_TBL_TYPE_START 0 + +/* Logical TCAM tables */ +enum cfa_tcam_mgr_tbl_type { + CFA_TCAM_MGR_TBL_TYPE_L2_CTXT_TCAM_HIGH_AFM = + CFA_TCAM_MGR_TBL_TYPE_START, + CFA_TCAM_MGR_TBL_TYPE_L2_CTXT_TCAM_HIGH_APPS, + CFA_TCAM_MGR_TBL_TYPE_L2_CTXT_TCAM_LOW_AFM, + CFA_TCAM_MGR_TBL_TYPE_L2_CTXT_TCAM_LOW_APPS, + CFA_TCAM_MGR_TBL_TYPE_PROF_TCAM_AFM, + CFA_TCAM_MGR_TBL_TYPE_PROF_TCAM_APPS, + CFA_TCAM_MGR_TBL_TYPE_WC_TCAM_AFM, + CFA_TCAM_MGR_TBL_TYPE_WC_TCAM_APPS, + CFA_TCAM_MGR_TBL_TYPE_SP_TCAM_AFM, + CFA_TCAM_MGR_TBL_TYPE_SP_TCAM_APPS, + CFA_TCAM_MGR_TBL_TYPE_CT_RULE_TCAM_AFM, + CFA_TCAM_MGR_TBL_TYPE_CT_RULE_TCAM_APPS, + CFA_TCAM_MGR_TBL_TYPE_VEB_TCAM_AFM, + CFA_TCAM_MGR_TBL_TYPE_VEB_TCAM_APPS, + CFA_TCAM_MGR_TBL_TYPE_MAX +}; + +enum cfa_tcam_mgr_device_type { + CFA_TCAM_MGR_DEVICE_TYPE_WH = 0, /* Whitney+ */ + CFA_TCAM_MGR_DEVICE_TYPE_SR, /* Stingray */ + CFA_TCAM_MGR_DEVICE_TYPE_THOR, /* Thor */ + CFA_TCAM_MGR_DEVICE_TYPE_MAX /* Maximum */ +}; + +/** + * TCAM Manager initialization parameters + * + * @resc: TCAM resources reserved type element is not used. + * @max_entries: maximum number of entries available. + */ +struct cfa_tcam_mgr_init_parms { + struct tf_rm_resc_entry resc[TF_DIR_MAX][CFA_TCAM_MGR_TBL_TYPE_MAX]; + u32 max_entries; +}; + +/** + * TCAM Manager initialization parameters + * + * These are bitmasks. Set if TCAM Manager is managing a logical TCAM. + * Each bitmask is indexed by logical TCAM table ID. + */ +struct cfa_tcam_mgr_qcaps_parms { + u32 rx_tcam_supported; + u32 tx_tcam_supported; +}; + +/** + * TCAM Manager configuration parameters + * + * @num_elements: Number of tcam types in each of the configuration arrays + * @tcam_cnt: Session resource allocations + * @tf_rm_resc_entry: TCAM Locations reserved + */ +struct cfa_tcam_mgr_cfg_parms { + u16 num_elements; + u16 tcam_cnt[TF_DIR_MAX][CFA_TCAM_MGR_TBL_TYPE_MAX]; + struct tf_rm_resc_entry (*resv_res)[CFA_TCAM_MGR_TBL_TYPE_MAX]; +}; + +/** + * TCAM Manager allocation parameters + * + * @dir: Receive or transmit direction + * @type: Type of the allocation + * @hcapi_type: Type of HCAPI + * @key_size: key size (bytes) + * @priority: Priority of entry requested (definition TBD) + * @id: Id of allocated entry or found entry (if search_enable) + */ +struct cfa_tcam_mgr_alloc_parms { + enum tf_dir dir; + enum cfa_tcam_mgr_tbl_type type; + u16 hcapi_type; + u16 key_size; + u16 priority; + u16 id; +}; + +/** + * TCAM Manager free parameters + * + * @dir: Receive or transmit direction + * @type: Type of the allocation. If the type is not known, set the + * type to CFA_TCAM_MGR_TBL_TYPE_MAX. + * @hcapi_type: Type of HCAPI + * @id: Entry ID to free + * @ref_cnt: Reference count after free, only valid if session has been + * created with shadow_copy. + */ +struct cfa_tcam_mgr_free_parms { + enum tf_dir dir; + enum cfa_tcam_mgr_tbl_type type; + u16 hcapi_type; + u16 id; + u16 ref_cnt; +}; + +/** + * TCAM Manager set parameters + * + * @dir: Receive or transmit direction + * @type: Type of object to set + * @hcapi_type: Type of HCAPI + * @id: Entry ID to write to + * @key: array containing key + * @mask: array containing mask fields + * @key_size: key size (bytes) + * @result: array containing result + * @result_size: result size (bytes) + */ +struct cfa_tcam_mgr_set_parms { + enum tf_dir dir; + enum cfa_tcam_mgr_tbl_type type; + u16 hcapi_type; + u16 id; + u8 *key; + u8 *mask; + u16 key_size; + u8 *result; + u16 result_size; +}; + +/** + * TCAM Manager get parameters + * + * @dir: Receive or transmit direction + * @type: Type of object to get + * @hcapi_type: Type of HCAPI + * @id: Entry ID to read + * @key: array containing key + * @mask: array containing mask fields + * @key_size: key size (bytes) + * @result: array containing result + * @result_size: result size (bytes) + */ +struct cfa_tcam_mgr_get_parms { + enum tf_dir dir; + enum cfa_tcam_mgr_tbl_type type; + u16 hcapi_type; + u16 id; + u8 *key; + u8 *mask; + u16 key_size; + u8 *result; + u16 result_size; +}; + +const char *cfa_tcam_mgr_tbl_2_str(enum cfa_tcam_mgr_tbl_type tcam_type); + +/** + * Initializes the TCAM Manager + * + * @type: Device type + * + * Returns + * - (0) if successful. + * - (<0) on failure. + */ +int cfa_tcam_mgr_init(struct tf *tfp, enum cfa_tcam_mgr_device_type type, + struct cfa_tcam_mgr_init_parms *parms); + +/** + * Returns the physical TCAM table that a logical TCAM table uses. + * + * @type: Logical table type + * + * Returns + * - (tf_tcam_tbl_type) if successful. + * - (<0) on failure. + */ +int cfa_tcam_mgr_get_phys_table_type(enum cfa_tcam_mgr_tbl_type type); + +/** + * Queries the capabilities of TCAM Manager. + * + * @tfp: Pointer to Truflow handle + * @parms: Pointer to parameters to be returned + * + * Returns + * - (0) if successful. + * - (<0) on failure. + */ +int cfa_tcam_mgr_qcaps(struct tf *tfp, struct cfa_tcam_mgr_qcaps_parms *parms); + +/** + * Initializes the TCAM module with the requested DBs. Must be + * invoked as the first thing before any of the access functions. + * + * @tfp: Pointer to Truflow handle + * @parms: Pointer to parameters + * + * Returns + * - (0) if successful. + * - (-EINVAL) on failure. + */ +int cfa_tcam_mgr_bind(struct tf *tfp, struct cfa_tcam_mgr_cfg_parms *parms); + +/** + * Cleans up the private DBs and releases all the data. + * + * @tfp: Pointer to Truflow handle + * @parms: Pointer to parameters + * + * Returns + * - (0) if successful. + * - (-EINVAL) on failure. + */ +int cfa_tcam_mgr_unbind(struct tf *tfp); + +/** + * Allocates the requested tcam type from the internal RM DB. + * + * @tfp: Pointer to Truflow handle + * @parms: Pointer to parameters + * + * Returns + * - (0) if successful. + * - (-EINVAL) on failure. + */ +int cfa_tcam_mgr_alloc(struct tf *tfp, struct cfa_tcam_mgr_alloc_parms *parms); + +/** + * Free's the requested table type and returns it to the DB. If shadow + * DB is enabled its searched first and if found the element refcount + * is decremented. If refcount goes to 0 then its returned to the + * table type DB. + * + * @tfp: Pointer to Truflow handle + * @parms: Pointer to parameters + * + * Returns + * - (0) if successful. + * - (-EINVAL) on failure. + */ +int cfa_tcam_mgr_free(struct tf *tfp, struct cfa_tcam_mgr_free_parms *parms); + +/** + * Configures the requested element by sending a firmware request which + * then installs it into the device internal structures. + * + * @tfp: Pointer to Truflow handle + * @parms: Pointer to parameters + * + * Returns + * - (0) if successful. + * - (-EINVAL) on failure. + */ +int cfa_tcam_mgr_set(struct tf *tfp, struct cfa_tcam_mgr_set_parms *parms); + +/** + * Retrieves the requested element by sending a firmware request to get + * the element. + * + * @tfp: Pointer to Truflow handle + * @parms: Pointer to parameters + * + * Returns + * - (0) if successful. + * - (-EINVAL) on failure. + */ +int cfa_tcam_mgr_get(struct tf *tfp, struct cfa_tcam_mgr_get_parms *parms); + +void cfa_tcam_mgr_rows_dump(struct tf *tfp, enum tf_dir dir, + enum cfa_tcam_mgr_tbl_type type); +void cfa_tcam_mgr_tables_dump(struct tf *tfp, enum tf_dir dir, + enum cfa_tcam_mgr_tbl_type type); +void cfa_tcam_mgr_entries_dump(struct tf *tfp); + +#endif /* _CFA_TCAM_MGR_H */ diff --git a/drivers/thirdparty/release-drivers/bnxt/tf_core/cfa_tcam_mgr_device.h b/drivers/thirdparty/release-drivers/bnxt/tf_core/cfa_tcam_mgr_device.h new file mode 100644 index 000000000000..f910bcd399e0 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/tf_core/cfa_tcam_mgr_device.h @@ -0,0 +1,122 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2021-2021 Broadcom + * All rights reserved. + */ + +#ifndef CFA_TCAM_MGR_DEVICE_H +#define CFA_TCAM_MGR_DEVICE_H + +#include "cfa_tcam_mgr.h" + +struct cfa_tcam_mgr_data; + +/* HW OP definitions */ +typedef int (*cfa_tcam_mgr_hwop_set_func_t)(struct cfa_tcam_mgr_data + *tcam_mgr_data, + struct cfa_tcam_mgr_set_parms + *parms, int row, int slice, + int max_slices); +typedef int (*cfa_tcam_mgr_hwop_get_func_t)(struct cfa_tcam_mgr_data + *tcam_mgr_data, + struct cfa_tcam_mgr_get_parms + *parms, int row, int slice, + int max_slices); +typedef int (*cfa_tcam_mgr_hwop_free_func_t)(struct cfa_tcam_mgr_data + *tcam_mgr_data, + struct cfa_tcam_mgr_free_parms + *parms, int row, int slice, + int max_slices); + +struct cfa_tcam_mgr_hwops_funcs { + cfa_tcam_mgr_hwop_set_func_t set; + cfa_tcam_mgr_hwop_get_func_t get; + cfa_tcam_mgr_hwop_free_func_t free; +}; + +/* End: HW OP definitions */ + +/* This identifier is to be used for one-off variable sizes. + * Do not use it for sizing keys in an array. + */ +#define CFA_TCAM_MGR_MAX_KEY_SIZE 96 + +/* Note that this macro's arguments are not macro expanded due to + * concatenation. + */ +#define TF_TCAM_TABLE_ROWS_DEF(_slices) \ + struct cfa_tcam_mgr_table_rows_ ## _slices { \ + u16 priority; \ + u8 entry_size; /* Slices per entry */ \ + u8 entry_inuse; /* bit[entry] set if in use */ \ + u16 entries[_slices]; \ + } + +/* Have to explicitly declare this struct since some compilers don't + * accept the GNU C extension of zero length arrays. + */ +struct cfa_tcam_mgr_table_rows_0 { + u16 priority; + u8 entry_size; /* Slices per entry */ + u8 entry_inuse; /* bit[entry] set if in use */ + u16 entries[]; +}; + +TF_TCAM_TABLE_ROWS_DEF(1); +TF_TCAM_TABLE_ROWS_DEF(2); +TF_TCAM_TABLE_ROWS_DEF(4); +TF_TCAM_TABLE_ROWS_DEF(8); + +#define TF_TCAM_MAX_ENTRIES (L2_CTXT_TCAM_RX_MAX_ENTRIES + \ + L2_CTXT_TCAM_TX_MAX_ENTRIES + \ + PROF_TCAM_RX_MAX_ENTRIES + \ + PROF_TCAM_TX_MAX_ENTRIES + \ + WC_TCAM_RX_MAX_ENTRIES + \ + WC_TCAM_TX_MAX_ENTRIES + \ + SP_TCAM_RX_MAX_ENTRIES + \ + SP_TCAM_TX_MAX_ENTRIES + \ + CT_RULE_TCAM_RX_MAX_ENTRIES + \ + CT_RULE_TCAM_TX_MAX_ENTRIES + \ + VEB_TCAM_RX_MAX_ENTRIES + \ + VEB_TCAM_TX_MAX_ENTRIES) + +#define TCAM_SET_END_ROW(n) ((n) ? (n) - 1 : 0) + +#define L2_CTXT_TCAM_RX_APP_LO_START (L2_CTXT_TCAM_RX_NUM_ROWS / 2) +#define L2_CTXT_TCAM_RX_APP_HI_END (L2_CTXT_TCAM_RX_APP_LO_START - 1) +#define L2_CTXT_TCAM_TX_APP_LO_START (L2_CTXT_TCAM_TX_NUM_ROWS / 2) +#define L2_CTXT_TCAM_TX_APP_HI_END (L2_CTXT_TCAM_TX_APP_LO_START - 1) + +struct cfa_tcam_mgr_entry_data { + u16 row; + u8 slice; + u8 ref_cnt; +}; + +struct cfa_tcam_mgr_table_data { + struct cfa_tcam_mgr_table_rows_0 *tcam_rows; + u16 hcapi_type; + u16 num_rows; /* Rows in physical TCAM */ + u16 start_row; /* Where the logical TCAM starts */ + u16 end_row; /* Where the logical TCAM ends */ + u16 max_entries; + u16 used_entries; + u8 row_width; /* bytes */ + u8 result_size; /* bytes */ + u8 max_slices; +}; + +struct cfa_tcam_mgr_data { + int cfa_tcam_mgr_max_entries; + struct cfa_tcam_mgr_table_data + cfa_tcam_mgr_tables[TF_DIR_MAX][CFA_TCAM_MGR_TBL_TYPE_MAX]; + void *table_rows; + struct cfa_tcam_mgr_entry_data *entry_data; + unsigned long *session_bmp; + unsigned long session_bmp_size; + void *row_tables[TF_DIR_MAX][TF_TCAM_TBL_TYPE_MAX]; + void *rx_row_data; + void *tx_row_data; + struct cfa_tcam_mgr_hwops_funcs hwop_funcs; +}; + +#endif /* CFA_TCAM_MGR_DEVICE_H */ diff --git a/drivers/thirdparty/release-drivers/bnxt/tf_core/cfa_tcam_mgr_hwop_msg.c b/drivers/thirdparty/release-drivers/bnxt/tf_core/cfa_tcam_mgr_hwop_msg.c new file mode 100644 index 000000000000..18ab237bee44 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/tf_core/cfa_tcam_mgr_hwop_msg.c @@ -0,0 +1,227 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* Copyright(c) 2021-2022 Broadcom + * All rights reserved. + */ + +/* This file will "do the right thing" for each of the primitives set, get and + * free. + * + * If TCAM manager is running stand-alone, the tables will be shadowed here. + * + * If TCAM manager is running in the core, the tables will also be shadowed. + * Set and free messages will also be sent to the firmware. Instead of sending + * get messages, the entry will be read from the shadow copy thus saving a + * firmware message. + * + * Support for running in firmware has not yet been added. + */ + +#include "tf_tcam.h" +#include "hcapi_cfa_defs.h" +#include "cfa_tcam_mgr.h" +#include "cfa_tcam_mgr_device.h" +#include "cfa_tcam_mgr_hwop_msg.h" +#include "cfa_tcam_mgr_p58.h" +#include "cfa_tcam_mgr_p4.h" +#include "tf_session.h" +#include "tf_msg.h" +#include "tf_util.h" + +int cfa_tcam_mgr_hwops_init(struct cfa_tcam_mgr_data *tcam_mgr_data, + enum cfa_tcam_mgr_device_type type) +{ + struct cfa_tcam_mgr_hwops_funcs *hwop_funcs = + &tcam_mgr_data->hwop_funcs; + + switch (type) { + case CFA_TCAM_MGR_DEVICE_TYPE_WH: + case CFA_TCAM_MGR_DEVICE_TYPE_SR: + return cfa_tcam_mgr_hwops_get_funcs_p4(hwop_funcs); + case CFA_TCAM_MGR_DEVICE_TYPE_THOR: + return cfa_tcam_mgr_hwops_get_funcs_p58(hwop_funcs); + default: + return -ENODEV; + } +} + +/* This is the glue between the TCAM manager and the firmware HW operations. + * It is intended to abstract out the location of the TCAM manager so that + * the TCAM manager code will be the same whether or not it is actually using + * the firmware. + * + * There are three possibilities: + * - TCAM manager is running in the core. + * These APIs will cause HW RM messages to be sent. + * CFA_TCAM_MGR_CORE + * - TCAM manager is running in firmware. + * These APIs will call the HW ops functions. + * + * - TCAM manager is running standalone. + * These APIs will access all data in memory. + * CFA_TCAM_MGR_STANDALONE + */ +int cfa_tcam_mgr_entry_set_msg(struct cfa_tcam_mgr_data *tcam_mgr_data, + struct tf *tfp, + struct cfa_tcam_mgr_set_parms *parms, + int row, int slice, int max_slices) +{ + enum tf_tcam_tbl_type type = + cfa_tcam_mgr_get_phys_table_type(parms->type); + cfa_tcam_mgr_hwop_set_func_t set_func; + struct tf_tcam_set_parms sparms; + struct tf_dev_info *dev; + struct tf_session *tfs; + u8 fw_session_id; + int rc; + + set_func = tcam_mgr_data->hwop_funcs.set; + if (!set_func) + return -EINVAL; + + /* Retrieve the session information */ + rc = tf_session_get_session_internal(tfp, &tfs); + if (rc) + return rc; + + /* Retrieve the device information */ + rc = tf_session_get_device(tfs, &dev); + if (rc) + return rc; + + rc = tf_session_get_fw_session_id(tfp, &fw_session_id); + if (rc) + return rc; + + memset(&sparms, 0, sizeof(sparms)); + sparms.dir = parms->dir; + sparms.type = type; + sparms.hcapi_type = parms->hcapi_type; + sparms.idx = (row * max_slices) + slice; + sparms.key = parms->key; + sparms.mask = parms->mask; + sparms.key_size = parms->key_size; + sparms.result = parms->result; + sparms.result_size = parms->result_size; + + netdev_dbg(tfp->bp->dev, + "%s: %s row:%d slice:%d set tcam physical idx 0x%x\n", + tf_dir_2_str(parms->dir), + cfa_tcam_mgr_tbl_2_str(parms->type), + row, slice, sparms.idx); + + rc = tf_msg_tcam_entry_set(tfp, &sparms, fw_session_id); + if (rc) { + netdev_err(tfp->bp->dev, + "%s: %s entry:%d set tcam failed, rc:%d\n", + tf_dir_2_str(parms->dir), + cfa_tcam_mgr_tbl_2_str(parms->type), + parms->id, -rc); + return rc; + } + + return set_func(tcam_mgr_data, parms, row, slice, max_slices); +} + +int cfa_tcam_mgr_entry_get_msg(struct cfa_tcam_mgr_data *tcam_mgr_data, + struct tf *tfp, + struct cfa_tcam_mgr_get_parms *parms, + int row, int slice, int max_slices) +{ + cfa_tcam_mgr_hwop_get_func_t get_func; + + get_func = tcam_mgr_data->hwop_funcs.get; + if (!get_func) + return -EINVAL; + + return get_func(tcam_mgr_data, parms, row, slice, max_slices); +} + +int cfa_tcam_mgr_entry_free_msg(struct cfa_tcam_mgr_data *tcam_mgr_data, + struct tf *tfp, + struct cfa_tcam_mgr_free_parms *parms, + int row, int slice, int key_size, + int result_size, int max_slices) +{ + enum tf_tcam_tbl_type type = + cfa_tcam_mgr_get_phys_table_type(parms->type); + u8 mask[CFA_TCAM_MGR_MAX_KEY_SIZE] = { 0 }; + u8 key[CFA_TCAM_MGR_MAX_KEY_SIZE] = { 0 }; + cfa_tcam_mgr_hwop_free_func_t free_func; + struct tf_tcam_set_parms sparms; + struct tf_dev_info *dev; + struct tf_session *tfs; + u8 fw_session_id; + int rc; + + free_func = tcam_mgr_data->hwop_funcs.free; + if (!free_func) + return -EINVAL; + + /* The free hwop will free more than a single slice (an entire row), + * so cannot be used. Use set message to clear an individual entry + */ + + /* Retrieve the session information */ + rc = tf_session_get_session_internal(tfp, &tfs); + if (rc) + return rc; + + /* Retrieve the device information */ + rc = tf_session_get_device(tfs, &dev); + if (rc) + return rc; + + rc = tf_session_get_fw_session_id(tfp, &fw_session_id); + if (rc) + return rc; + + if (key_size > CFA_TCAM_MGR_MAX_KEY_SIZE) { + netdev_dbg(tfp->bp->dev, + "%s: %s entry:%d key size:%d > %d\n", + tf_dir_2_str(parms->dir), + cfa_tcam_mgr_tbl_2_str(parms->type), + parms->id, key_size, CFA_TCAM_MGR_MAX_KEY_SIZE); + return -EINVAL; + } + + if (result_size > CFA_TCAM_MGR_MAX_KEY_SIZE) { + netdev_dbg(tfp->bp->dev, + "%s: %s entry:%d result size:%d > %d\n", + tf_dir_2_str(parms->dir), + cfa_tcam_mgr_tbl_2_str(parms->type), + parms->id, result_size, CFA_TCAM_MGR_MAX_KEY_SIZE); + return -EINVAL; + } + + memset(&sparms, 0, sizeof(sparms)); + memset(&key, 0, sizeof(key)); + memset(&mask, 0xff, sizeof(mask)); + + sparms.dir = parms->dir; + sparms.type = type; + sparms.hcapi_type = parms->hcapi_type; + sparms.key = key; + sparms.mask = mask; + sparms.result = key; + sparms.idx = (row * max_slices) + slice; + sparms.key_size = key_size; + sparms.result_size = result_size; + + netdev_dbg(tfp->bp->dev, + "%s: %s row:%d slice:%d free idx:%d key_sz:%d res_sz:%d\n", + tf_dir_2_str(parms->dir), + cfa_tcam_mgr_tbl_2_str(parms->type), + row, slice, sparms.idx, key_size, result_size); + + rc = tf_msg_tcam_entry_set(tfp, &sparms, fw_session_id); + if (rc) { + netdev_err(tfp->bp->dev, + "%s: %s row:%d slice:%d set tcam failed, rc:%d\n", + tf_dir_2_str(parms->dir), + cfa_tcam_mgr_tbl_2_str(parms->type), + row, slice, rc); + return rc; + } + + return free_func(tcam_mgr_data, parms, row, slice, max_slices); +} diff --git a/drivers/thirdparty/release-drivers/bnxt/tf_core/cfa_tcam_mgr_hwop_msg.h b/drivers/thirdparty/release-drivers/bnxt/tf_core/cfa_tcam_mgr_hwop_msg.h new file mode 100644 index 000000000000..176dfa6bff67 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/tf_core/cfa_tcam_mgr_hwop_msg.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2021-2021 Broadcom + * All rights reserved. + */ + +#ifndef CFA_TCAM_MGR_HWOP_MSG_H +#define CFA_TCAM_MGR_HWOP_MSG_H + +int cfa_tcam_mgr_hwops_init(struct cfa_tcam_mgr_data *tcam_mgr_data, + enum cfa_tcam_mgr_device_type type); +int cfa_tcam_mgr_entry_set_msg(struct cfa_tcam_mgr_data *tcam_mgr_data, + struct tf *tfp, + struct cfa_tcam_mgr_set_parms *parms, + int row, int slice, int max_slices); +int cfa_tcam_mgr_entry_get_msg(struct cfa_tcam_mgr_data *tcam_mgr_data, + struct tf *tfp, + struct cfa_tcam_mgr_get_parms *parms, + int row, int slice, int max_slices); +int cfa_tcam_mgr_entry_free_msg(struct cfa_tcam_mgr_data *tcam_mgr_data, + struct tf *tfp, + struct cfa_tcam_mgr_free_parms *parms, + int row, int slice, int key_size, + int result_size, int max_slices); + +#endif /* CFA_TCAM_MGR_HWOP_MSG_H */ diff --git a/drivers/thirdparty/release-drivers/bnxt/tf_core/cfa_tcam_mgr_p4.c b/drivers/thirdparty/release-drivers/bnxt/tf_core/cfa_tcam_mgr_p4.c new file mode 100644 index 000000000000..4d5980b92e54 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/tf_core/cfa_tcam_mgr_p4.c @@ -0,0 +1,857 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* Copyright(c) 2021-2022 Broadcom + * All rights reserved. + */ + +#include +#include +#include +#include +#include "hcapi_cfa_defs.h" +#include "bnxt_hsi.h" +#include "bnxt_compat.h" +#include "bnxt.h" +#include "cfa_tcam_mgr.h" +#include "cfa_tcam_mgr_p4.h" +#include "cfa_tcam_mgr_device.h" +#include "cfa_resource_types.h" +#include "tf_util.h" +#include "tf_session.h" + +/* Sizings of the TCAMs on Whitney+/Stingray */ +#define MAX_ROW_WIDTH 48 +#define MAX_RESULT_SIZE 8 + +/* TCAM definitions + * + * These define the TCAMs in HW. + * + * Note: Set xxx_TCAM_[R|T]X_NUM_ROWS to zero if a TCAM is either not supported + * by HW or not supported by TCAM Manager. + */ + +/* L2 Context TCAM */ +#define L2_CTXT_TCAM_RX_MAX_SLICES 1 +#define L2_CTXT_TCAM_RX_ROW_WIDTH TF_BITS2BYTES_WORD_ALIGN(167) +#define L2_CTXT_TCAM_RX_NUM_ROWS 1024 +#define L2_CTXT_TCAM_RX_MAX_ENTRIES (L2_CTXT_TCAM_RX_MAX_SLICES * \ + L2_CTXT_TCAM_RX_NUM_ROWS) +#define L2_CTXT_TCAM_RX_RESULT_SIZE 8 + +#define L2_CTXT_TCAM_TX_MAX_SLICES L2_CTXT_TCAM_RX_MAX_SLICES +#define L2_CTXT_TCAM_TX_ROW_WIDTH L2_CTXT_TCAM_RX_ROW_WIDTH +#define L2_CTXT_TCAM_TX_NUM_ROWS L2_CTXT_TCAM_RX_NUM_ROWS +#define L2_CTXT_TCAM_TX_MAX_ENTRIES L2_CTXT_TCAM_RX_MAX_ENTRIES +#define L2_CTXT_TCAM_TX_RESULT_SIZE L2_CTXT_TCAM_RX_RESULT_SIZE + +/* Profile TCAM */ +#define PROF_TCAM_RX_MAX_SLICES 1 +#define PROF_TCAM_RX_ROW_WIDTH TF_BITS2BYTES_WORD_ALIGN(81) +#define PROF_TCAM_RX_NUM_ROWS 1024 +#define PROF_TCAM_RX_MAX_ENTRIES (PROF_TCAM_RX_MAX_SLICES * \ + PROF_TCAM_RX_NUM_ROWS) +#define PROF_TCAM_RX_RESULT_SIZE 8 + +#define PROF_TCAM_TX_MAX_SLICES PROF_TCAM_RX_MAX_SLICES +#define PROF_TCAM_TX_ROW_WIDTH PROF_TCAM_RX_ROW_WIDTH +#define PROF_TCAM_TX_NUM_ROWS PROF_TCAM_RX_NUM_ROWS +#define PROF_TCAM_TX_MAX_ENTRIES PROF_TCAM_RX_MAX_ENTRIES +#define PROF_TCAM_TX_RESULT_SIZE PROF_TCAM_RX_RESULT_SIZE + +/* Wildcard TCAM */ +#define WC_TCAM_RX_MAX_SLICES 4 +/* 82 bits per slice */ +#define WC_TCAM_RX_ROW_WIDTH (TF_BITS2BYTES_WORD_ALIGN(82) * \ + WC_TCAM_RX_MAX_SLICES) +#define WC_TCAM_RX_NUM_ROWS 256 +#define WC_TCAM_RX_MAX_ENTRIES (WC_TCAM_RX_MAX_SLICES * WC_TCAM_RX_NUM_ROWS) +#define WC_TCAM_RX_RESULT_SIZE 4 + +#define WC_TCAM_TX_MAX_SLICES WC_TCAM_RX_MAX_SLICES +#define WC_TCAM_TX_ROW_WIDTH WC_TCAM_RX_ROW_WIDTH +#define WC_TCAM_TX_NUM_ROWS WC_TCAM_RX_NUM_ROWS +#define WC_TCAM_TX_MAX_ENTRIES WC_TCAM_RX_MAX_ENTRIES +#define WC_TCAM_TX_RESULT_SIZE WC_TCAM_RX_RESULT_SIZE + +/* Source Properties TCAM */ +#define SP_TCAM_RX_MAX_SLICES 1 +#define SP_TCAM_RX_ROW_WIDTH TF_BITS2BYTES_WORD_ALIGN(89) +#define SP_TCAM_RX_NUM_ROWS 512 +#define SP_TCAM_RX_MAX_ENTRIES (SP_TCAM_RX_MAX_SLICES * SP_TCAM_RX_NUM_ROWS) +#define SP_TCAM_RX_RESULT_SIZE 8 + +#define SP_TCAM_TX_MAX_SLICES SP_TCAM_RX_MAX_SLICES +#define SP_TCAM_TX_ROW_WIDTH SP_TCAM_RX_ROW_WIDTH +#define SP_TCAM_TX_NUM_ROWS SP_TCAM_RX_NUM_ROWS +#define SP_TCAM_TX_MAX_ENTRIES SP_TCAM_RX_MAX_ENTRIES +#define SP_TCAM_TX_RESULT_SIZE SP_TCAM_RX_RESULT_SIZE + +/* Connection Tracking Rule TCAM */ +#define CT_RULE_TCAM_RX_MAX_SLICES 1 +#define CT_RULE_TCAM_RX_ROW_WIDTH TF_BITS2BYTES_WORD_ALIGN(16) +#define CT_RULE_TCAM_RX_NUM_ROWS 16 +#define CT_RULE_TCAM_RX_MAX_ENTRIES (CT_RULE_TCAM_RX_MAX_SLICES * \ + CT_RULE_TCAM_RX_NUM_ROWS) +#define CT_RULE_TCAM_RX_RESULT_SIZE 8 + +#define CT_RULE_TCAM_TX_MAX_SLICES CT_RULE_TCAM_RX_MAX_SLICES +#define CT_RULE_TCAM_TX_ROW_WIDTH CT_RULE_TCAM_RX_ROW_WIDTH +#define CT_RULE_TCAM_TX_NUM_ROWS CT_RULE_TCAM_RX_NUM_ROWS +#define CT_RULE_TCAM_TX_MAX_ENTRIES CT_RULE_TCAM_RX_MAX_ENTRIES +#define CT_RULE_TCAM_TX_RESULT_SIZE CT_RULE_TCAM_RX_RESULT_SIZE + +/* Virtual Edge Bridge TCAM */ +#define VEB_TCAM_RX_MAX_SLICES 1 +#define VEB_TCAM_RX_ROW_WIDTH TF_BITS2BYTES_WORD_ALIGN(78) +/* Tx only */ +#define VEB_TCAM_RX_NUM_ROWS 1024 +#define VEB_TCAM_RX_MAX_ENTRIES (VEB_TCAM_RX_MAX_SLICES * VEB_TCAM_RX_NUM_ROWS) +#define VEB_TCAM_RX_RESULT_SIZE 8 + +#define VEB_TCAM_TX_MAX_SLICES VEB_TCAM_RX_MAX_SLICES +#define VEB_TCAM_TX_ROW_WIDTH VEB_TCAM_RX_ROW_WIDTH +#define VEB_TCAM_TX_NUM_ROWS 1024 +#define VEB_TCAM_TX_MAX_ENTRIES (VEB_TCAM_TX_MAX_SLICES * VEB_TCAM_TX_NUM_ROWS) +#define VEB_TCAM_TX_RESULT_SIZE VEB_TCAM_RX_RESULT_SIZE + +/* Declare the table rows for each table here. If new tables are added to the + * enum tf_tcam_tbl_type, then new declarations will be needed here. + * + * The numeric suffix of the structure type indicates how many slices a + * particular TCAM supports. + */ + +struct cfa_tcam_mgr_table_rows_p4 { + struct cfa_tcam_mgr_table_rows_1 + table_rows_L2_CTXT_TCAM_RX[L2_CTXT_TCAM_RX_NUM_ROWS]; + struct cfa_tcam_mgr_table_rows_1 + table_rows_L2_CTXT_TCAM_TX[L2_CTXT_TCAM_TX_NUM_ROWS]; + struct cfa_tcam_mgr_table_rows_1 + table_rows_PROF_TCAM_RX[PROF_TCAM_RX_NUM_ROWS]; + struct cfa_tcam_mgr_table_rows_1 + table_rows_PROF_TCAM_TX[PROF_TCAM_TX_NUM_ROWS]; + struct cfa_tcam_mgr_table_rows_4 + table_rows_WC_TCAM_RX[WC_TCAM_RX_NUM_ROWS]; + struct cfa_tcam_mgr_table_rows_4 + table_rows_WC_TCAM_TX[WC_TCAM_TX_NUM_ROWS]; + struct cfa_tcam_mgr_table_rows_1 + table_rows_SP_TCAM_RX[SP_TCAM_RX_NUM_ROWS]; + struct cfa_tcam_mgr_table_rows_1 + table_rows_SP_TCAM_TX[SP_TCAM_TX_NUM_ROWS]; + struct cfa_tcam_mgr_table_rows_1 + table_rows_CT_RULE_TCAM_RX[CT_RULE_TCAM_RX_NUM_ROWS]; + struct cfa_tcam_mgr_table_rows_1 + table_rows_CT_RULE_TCAM_TX[CT_RULE_TCAM_TX_NUM_ROWS]; + struct cfa_tcam_mgr_table_rows_1 + table_rows_VEB_TCAM_RX[VEB_TCAM_RX_NUM_ROWS]; + struct cfa_tcam_mgr_table_rows_1 + table_rows_VEB_TCAM_TX[VEB_TCAM_TX_NUM_ROWS]; +}; + +struct cfa_tcam_mgr_table_data +cfa_tcam_mgr_tables_p4[TF_DIR_MAX][CFA_TCAM_MGR_TBL_TYPE_MAX] = { + { /* RX */ + { /* High AFM */ + .max_slices = L2_CTXT_TCAM_RX_MAX_SLICES, + .row_width = L2_CTXT_TCAM_RX_ROW_WIDTH, + .num_rows = L2_CTXT_TCAM_RX_NUM_ROWS, + .start_row = 0, + .end_row = 0, + .max_entries = 0, + .result_size = L2_CTXT_TCAM_RX_RESULT_SIZE, + .hcapi_type = CFA_RESOURCE_TYPE_P4_L2_CTXT_TCAM_HIGH, + }, + { /* High APPS */ + .max_slices = L2_CTXT_TCAM_RX_MAX_SLICES, + .row_width = L2_CTXT_TCAM_RX_ROW_WIDTH, + .num_rows = L2_CTXT_TCAM_RX_NUM_ROWS, + .start_row = 0, + .end_row = L2_CTXT_TCAM_RX_APP_HI_END, + .max_entries = (L2_CTXT_TCAM_RX_MAX_ENTRIES / 2), + .result_size = L2_CTXT_TCAM_RX_RESULT_SIZE, + .hcapi_type = CFA_RESOURCE_TYPE_P4_L2_CTXT_TCAM_HIGH, + }, + { /* Low AFM */ + .max_slices = L2_CTXT_TCAM_RX_MAX_SLICES, + .row_width = L2_CTXT_TCAM_RX_ROW_WIDTH, + .num_rows = L2_CTXT_TCAM_RX_NUM_ROWS, + .start_row = 0, + .end_row = 0, + .max_entries = 0, + .result_size = L2_CTXT_TCAM_RX_RESULT_SIZE, + .hcapi_type = CFA_RESOURCE_TYPE_P4_L2_CTXT_TCAM_LOW, + }, + { /* Low APPS */ + .max_slices = L2_CTXT_TCAM_RX_MAX_SLICES, + .row_width = L2_CTXT_TCAM_RX_ROW_WIDTH, + .num_rows = L2_CTXT_TCAM_RX_NUM_ROWS, + .start_row = L2_CTXT_TCAM_RX_APP_LO_START, + .end_row = L2_CTXT_TCAM_RX_NUM_ROWS - 1, + .max_entries = (L2_CTXT_TCAM_RX_MAX_ENTRIES / 2), + .result_size = L2_CTXT_TCAM_RX_RESULT_SIZE, + .hcapi_type = CFA_RESOURCE_TYPE_P4_L2_CTXT_TCAM_LOW, + }, + { /* AFM */ + .max_slices = PROF_TCAM_RX_MAX_SLICES, + .row_width = PROF_TCAM_RX_ROW_WIDTH, + .num_rows = PROF_TCAM_RX_NUM_ROWS, + .start_row = 0, + .end_row = 0, + .max_entries = 0, + .result_size = PROF_TCAM_RX_RESULT_SIZE, + .hcapi_type = CFA_RESOURCE_TYPE_P4_PROF_TCAM, + }, + { /* APPS */ + .max_slices = PROF_TCAM_RX_MAX_SLICES, + .row_width = PROF_TCAM_RX_ROW_WIDTH, + .num_rows = PROF_TCAM_RX_NUM_ROWS, + .start_row = 0, + .end_row = PROF_TCAM_RX_NUM_ROWS - 1, + .max_entries = PROF_TCAM_RX_MAX_ENTRIES, + .result_size = PROF_TCAM_RX_RESULT_SIZE, + .hcapi_type = CFA_RESOURCE_TYPE_P4_PROF_TCAM, + }, + { /* AFM */ + .max_slices = WC_TCAM_RX_MAX_SLICES, + .row_width = WC_TCAM_RX_ROW_WIDTH, + .num_rows = WC_TCAM_RX_NUM_ROWS, + .start_row = 0, + .end_row = 0, + .max_entries = 0, + .result_size = WC_TCAM_RX_RESULT_SIZE, + .hcapi_type = CFA_RESOURCE_TYPE_P4_WC_TCAM, + }, + { /* APPS */ + .max_slices = WC_TCAM_RX_MAX_SLICES, + .row_width = WC_TCAM_RX_ROW_WIDTH, + .num_rows = WC_TCAM_RX_NUM_ROWS, + .start_row = 0, + .end_row = WC_TCAM_RX_NUM_ROWS - 1, + .max_entries = WC_TCAM_RX_MAX_ENTRIES, + .result_size = WC_TCAM_RX_RESULT_SIZE, + .hcapi_type = CFA_RESOURCE_TYPE_P4_WC_TCAM, + }, + { /* AFM */ + .max_slices = SP_TCAM_RX_MAX_SLICES, + .row_width = SP_TCAM_RX_ROW_WIDTH, + .num_rows = SP_TCAM_RX_NUM_ROWS, + .start_row = 0, + .end_row = 0, + .max_entries = 0, + .result_size = SP_TCAM_RX_RESULT_SIZE, + .hcapi_type = CFA_RESOURCE_TYPE_P4_SP_TCAM, + }, + { /* APPS */ + .max_slices = SP_TCAM_RX_MAX_SLICES, + .row_width = SP_TCAM_RX_ROW_WIDTH, + .num_rows = SP_TCAM_RX_NUM_ROWS, + .start_row = 0, + .end_row = SP_TCAM_RX_NUM_ROWS - 1, + .max_entries = SP_TCAM_RX_MAX_ENTRIES, + .result_size = SP_TCAM_RX_RESULT_SIZE, + .hcapi_type = CFA_RESOURCE_TYPE_P4_SP_TCAM, + }, + { /* AFM */ + .max_slices = CT_RULE_TCAM_RX_MAX_SLICES, + .row_width = CT_RULE_TCAM_RX_ROW_WIDTH, + .num_rows = CT_RULE_TCAM_RX_NUM_ROWS, + .start_row = 0, + .end_row = 0, + .max_entries = 0, + .result_size = CT_RULE_TCAM_RX_RESULT_SIZE, + }, + { /* APPS */ + .max_slices = CT_RULE_TCAM_RX_MAX_SLICES, + .row_width = CT_RULE_TCAM_RX_ROW_WIDTH, + .num_rows = CT_RULE_TCAM_RX_NUM_ROWS, + .start_row = 0, + .end_row = + TCAM_SET_END_ROW(CT_RULE_TCAM_RX_NUM_ROWS), + .max_entries = CT_RULE_TCAM_RX_MAX_ENTRIES, + .result_size = CT_RULE_TCAM_RX_RESULT_SIZE, + }, + { /* AFM */ + .max_slices = VEB_TCAM_RX_MAX_SLICES, + .row_width = VEB_TCAM_RX_ROW_WIDTH, + .num_rows = VEB_TCAM_RX_NUM_ROWS, + .start_row = 0, + .end_row = 0, + .max_entries = 0, + .result_size = VEB_TCAM_RX_RESULT_SIZE, + }, + { /* APPS */ + .max_slices = VEB_TCAM_RX_MAX_SLICES, + .row_width = VEB_TCAM_RX_ROW_WIDTH, + .num_rows = VEB_TCAM_RX_NUM_ROWS, + .start_row = 0, + .end_row = + TCAM_SET_END_ROW(VEB_TCAM_RX_NUM_ROWS), + .max_entries = VEB_TCAM_RX_MAX_ENTRIES, + .result_size = VEB_TCAM_RX_RESULT_SIZE, + }, + }, + { /* TX */ + { /* AFM */ + .max_slices = L2_CTXT_TCAM_TX_MAX_SLICES, + .row_width = L2_CTXT_TCAM_TX_ROW_WIDTH, + .num_rows = L2_CTXT_TCAM_TX_NUM_ROWS, + .start_row = 0, + .end_row = 0, + .max_entries = 0, + .result_size = L2_CTXT_TCAM_TX_RESULT_SIZE, + .hcapi_type = CFA_RESOURCE_TYPE_P4_L2_CTXT_TCAM_HIGH, + }, + { /* APPS */ + .max_slices = L2_CTXT_TCAM_TX_MAX_SLICES, + .row_width = L2_CTXT_TCAM_TX_ROW_WIDTH, + .num_rows = L2_CTXT_TCAM_TX_NUM_ROWS, + .start_row = 0, + .end_row = L2_CTXT_TCAM_TX_APP_HI_END, + .max_entries = (L2_CTXT_TCAM_TX_MAX_ENTRIES / 2), + .result_size = L2_CTXT_TCAM_TX_RESULT_SIZE, + .hcapi_type = CFA_RESOURCE_TYPE_P4_L2_CTXT_TCAM_HIGH, + }, + { /* AFM */ + .max_slices = L2_CTXT_TCAM_TX_MAX_SLICES, + .row_width = L2_CTXT_TCAM_TX_ROW_WIDTH, + .num_rows = L2_CTXT_TCAM_TX_NUM_ROWS, + .start_row = 0, + .end_row = 0, + .max_entries = 0, + .result_size = L2_CTXT_TCAM_TX_RESULT_SIZE, + .hcapi_type = CFA_RESOURCE_TYPE_P4_L2_CTXT_TCAM_LOW, + }, + { /* APPS */ + .max_slices = L2_CTXT_TCAM_TX_MAX_SLICES, + .row_width = L2_CTXT_TCAM_TX_ROW_WIDTH, + .num_rows = L2_CTXT_TCAM_TX_NUM_ROWS, + .start_row = L2_CTXT_TCAM_TX_APP_LO_START, + .end_row = L2_CTXT_TCAM_TX_NUM_ROWS - 1, + .max_entries = (L2_CTXT_TCAM_TX_MAX_ENTRIES / 2), + .result_size = L2_CTXT_TCAM_TX_RESULT_SIZE, + .hcapi_type = CFA_RESOURCE_TYPE_P4_L2_CTXT_TCAM_LOW, + }, + { /* AFM */ + .max_slices = PROF_TCAM_TX_MAX_SLICES, + .row_width = PROF_TCAM_TX_ROW_WIDTH, + .num_rows = PROF_TCAM_TX_NUM_ROWS, + .start_row = 0, + .end_row = 0, + .max_entries = 0, + .result_size = PROF_TCAM_TX_RESULT_SIZE, + .hcapi_type = CFA_RESOURCE_TYPE_P4_PROF_TCAM, + }, + { /* APPS */ + .max_slices = PROF_TCAM_TX_MAX_SLICES, + .row_width = PROF_TCAM_TX_ROW_WIDTH, + .num_rows = PROF_TCAM_TX_NUM_ROWS, + .start_row = 0, + .end_row = PROF_TCAM_TX_NUM_ROWS - 1, + .max_entries = PROF_TCAM_TX_MAX_ENTRIES, + .result_size = PROF_TCAM_TX_RESULT_SIZE, + .hcapi_type = CFA_RESOURCE_TYPE_P4_PROF_TCAM, + }, + { /* AFM */ + .max_slices = WC_TCAM_TX_MAX_SLICES, + .row_width = WC_TCAM_TX_ROW_WIDTH, + .num_rows = WC_TCAM_TX_NUM_ROWS, + .start_row = 0, + .end_row = 0, + .max_entries = 0, + .result_size = WC_TCAM_TX_RESULT_SIZE, + .hcapi_type = CFA_RESOURCE_TYPE_P4_WC_TCAM, + }, + { /* APPS */ + .max_slices = WC_TCAM_TX_MAX_SLICES, + .row_width = WC_TCAM_TX_ROW_WIDTH, + .num_rows = WC_TCAM_TX_NUM_ROWS, + .start_row = 0, + .end_row = WC_TCAM_TX_NUM_ROWS - 1, + .max_entries = WC_TCAM_TX_MAX_ENTRIES, + .result_size = WC_TCAM_TX_RESULT_SIZE, + .hcapi_type = CFA_RESOURCE_TYPE_P4_WC_TCAM, + }, + { /* AFM */ + .max_slices = SP_TCAM_TX_MAX_SLICES, + .row_width = SP_TCAM_TX_ROW_WIDTH, + .num_rows = SP_TCAM_TX_NUM_ROWS, + .start_row = 0, + .end_row = 0, + .max_entries = 0, + .result_size = SP_TCAM_TX_RESULT_SIZE, + .hcapi_type = CFA_RESOURCE_TYPE_P4_SP_TCAM, + }, + { /* APPS */ + .max_slices = SP_TCAM_TX_MAX_SLICES, + .row_width = SP_TCAM_TX_ROW_WIDTH, + .num_rows = SP_TCAM_TX_NUM_ROWS, + .start_row = 0, + .end_row = SP_TCAM_TX_NUM_ROWS - 1, + .max_entries = SP_TCAM_TX_MAX_ENTRIES, + .result_size = SP_TCAM_TX_RESULT_SIZE, + .hcapi_type = CFA_RESOURCE_TYPE_P4_SP_TCAM, + }, + { /* AFM */ + .max_slices = CT_RULE_TCAM_TX_MAX_SLICES, + .row_width = CT_RULE_TCAM_TX_ROW_WIDTH, + .num_rows = CT_RULE_TCAM_TX_NUM_ROWS, + .start_row = 0, + .end_row = 0, + .max_entries = 0, + .result_size = CT_RULE_TCAM_RX_RESULT_SIZE, + }, + { /* APPS */ + .max_slices = CT_RULE_TCAM_TX_MAX_SLICES, + .row_width = CT_RULE_TCAM_TX_ROW_WIDTH, + .num_rows = CT_RULE_TCAM_TX_NUM_ROWS, + .start_row = 0, + .end_row = + TCAM_SET_END_ROW(CT_RULE_TCAM_TX_NUM_ROWS), + .max_entries = CT_RULE_TCAM_TX_MAX_ENTRIES, + .result_size = CT_RULE_TCAM_RX_RESULT_SIZE, + }, + { /* AFM */ + .max_slices = VEB_TCAM_TX_MAX_SLICES, + .row_width = VEB_TCAM_TX_ROW_WIDTH, + .num_rows = VEB_TCAM_TX_NUM_ROWS, + .start_row = 0, + .end_row = 0, + .max_entries = 0, + .result_size = VEB_TCAM_RX_RESULT_SIZE, + }, + { /* APPS */ + .max_slices = VEB_TCAM_TX_MAX_SLICES, + .row_width = VEB_TCAM_TX_ROW_WIDTH, + .num_rows = VEB_TCAM_TX_NUM_ROWS, + .start_row = 0, + .end_row = VEB_TCAM_TX_NUM_ROWS - 1, + .max_entries = VEB_TCAM_TX_MAX_ENTRIES, + .result_size = VEB_TCAM_RX_RESULT_SIZE, + }, + }, +}; + +static int cfa_tcam_mgr_row_data_alloc(struct cfa_tcam_mgr_data *tcam_mgr_data); +static void cfa_tcam_mgr_row_data_free(struct cfa_tcam_mgr_data *tcam_mgr_data); + +static void cfa_tcam_mgr_data_free(struct tf_session *tfs) +{ + struct cfa_tcam_mgr_data *tcam_mgr_data = tfs->tcam_mgr_handle; + + if (!tcam_mgr_data) + return; + + vfree(tcam_mgr_data->table_rows); + vfree(tcam_mgr_data->entry_data); + vfree(tcam_mgr_data->session_bmp); + cfa_tcam_mgr_row_data_free(tcam_mgr_data); + + vfree(tcam_mgr_data); + tfs->tcam_mgr_handle = NULL; +} + +int cfa_tcam_mgr_init_p4(struct tf *tfp) +{ + struct cfa_tcam_mgr_table_rows_p4 *table_rows; + struct cfa_tcam_mgr_entry_data *entry_data; + struct cfa_tcam_mgr_data *tcam_mgr_data; + int max_result_size = 0; + struct tf_session *tfs; + int max_row_width = 0; + int dir, type; + int rc; + + rc = tf_session_get_session_internal(tfp, &tfs); + if (rc) + return rc; + + tcam_mgr_data = vzalloc(sizeof(*tcam_mgr_data)); + if (!tcam_mgr_data) + return -ENOMEM; + tfs->tcam_mgr_handle = tcam_mgr_data; + + table_rows = vzalloc(sizeof(*table_rows)); + if (!table_rows) { + rc = -ENOMEM; + goto fail; + } + tcam_mgr_data->table_rows = table_rows; + + entry_data = vzalloc(sizeof(*entry_data) * TF_TCAM_MAX_ENTRIES); + if (!entry_data) { + rc = -ENOMEM; + goto fail; + } + tcam_mgr_data->entry_data = entry_data; + + rc = cfa_tcam_mgr_row_data_alloc(tcam_mgr_data); + if (rc) + goto fail; + + memcpy(&tcam_mgr_data->cfa_tcam_mgr_tables, + &cfa_tcam_mgr_tables_p4, + sizeof(tcam_mgr_data->cfa_tcam_mgr_tables)); + + tcam_mgr_data->cfa_tcam_mgr_tables[TF_DIR_RX] + [CFA_TCAM_MGR_TBL_TYPE_L2_CTXT_TCAM_HIGH_AFM].tcam_rows = + (struct cfa_tcam_mgr_table_rows_0 *) + &table_rows->table_rows_L2_CTXT_TCAM_RX[0]; + tcam_mgr_data->cfa_tcam_mgr_tables[TF_DIR_RX] + [CFA_TCAM_MGR_TBL_TYPE_L2_CTXT_TCAM_HIGH_APPS].tcam_rows = + (struct cfa_tcam_mgr_table_rows_0 *) + &table_rows->table_rows_L2_CTXT_TCAM_RX[0]; + + tcam_mgr_data->cfa_tcam_mgr_tables[TF_DIR_TX] + [CFA_TCAM_MGR_TBL_TYPE_L2_CTXT_TCAM_HIGH_AFM].tcam_rows = + (struct cfa_tcam_mgr_table_rows_0 *) + &table_rows->table_rows_L2_CTXT_TCAM_TX[0]; + tcam_mgr_data->cfa_tcam_mgr_tables[TF_DIR_TX] + [CFA_TCAM_MGR_TBL_TYPE_L2_CTXT_TCAM_HIGH_APPS].tcam_rows = + (struct cfa_tcam_mgr_table_rows_0 *) + &table_rows->table_rows_L2_CTXT_TCAM_TX[0]; + + tcam_mgr_data->cfa_tcam_mgr_tables[TF_DIR_RX] + [CFA_TCAM_MGR_TBL_TYPE_L2_CTXT_TCAM_LOW_AFM].tcam_rows = + (struct cfa_tcam_mgr_table_rows_0 *) + &table_rows->table_rows_L2_CTXT_TCAM_RX[0]; + tcam_mgr_data->cfa_tcam_mgr_tables[TF_DIR_RX] + [CFA_TCAM_MGR_TBL_TYPE_L2_CTXT_TCAM_LOW_APPS].tcam_rows = + (struct cfa_tcam_mgr_table_rows_0 *) + &table_rows->table_rows_L2_CTXT_TCAM_RX[0]; + + tcam_mgr_data->cfa_tcam_mgr_tables[TF_DIR_TX] + [CFA_TCAM_MGR_TBL_TYPE_L2_CTXT_TCAM_LOW_AFM].tcam_rows = + (struct cfa_tcam_mgr_table_rows_0 *) + &table_rows->table_rows_L2_CTXT_TCAM_TX[0]; + tcam_mgr_data->cfa_tcam_mgr_tables[TF_DIR_TX] + [CFA_TCAM_MGR_TBL_TYPE_L2_CTXT_TCAM_LOW_APPS].tcam_rows = + (struct cfa_tcam_mgr_table_rows_0 *) + &table_rows->table_rows_L2_CTXT_TCAM_TX[0]; + + tcam_mgr_data->cfa_tcam_mgr_tables[TF_DIR_RX] + [CFA_TCAM_MGR_TBL_TYPE_PROF_TCAM_AFM].tcam_rows = + (struct cfa_tcam_mgr_table_rows_0 *) + &table_rows->table_rows_PROF_TCAM_RX[0]; + tcam_mgr_data->cfa_tcam_mgr_tables[TF_DIR_RX] + [CFA_TCAM_MGR_TBL_TYPE_PROF_TCAM_APPS].tcam_rows = + (struct cfa_tcam_mgr_table_rows_0 *) + &table_rows->table_rows_PROF_TCAM_RX[0]; + + tcam_mgr_data->cfa_tcam_mgr_tables[TF_DIR_TX] + [CFA_TCAM_MGR_TBL_TYPE_PROF_TCAM_AFM].tcam_rows = + (struct cfa_tcam_mgr_table_rows_0 *) + &table_rows->table_rows_PROF_TCAM_TX[0]; + tcam_mgr_data->cfa_tcam_mgr_tables[TF_DIR_TX] + [CFA_TCAM_MGR_TBL_TYPE_PROF_TCAM_APPS].tcam_rows = + (struct cfa_tcam_mgr_table_rows_0 *) + &table_rows->table_rows_PROF_TCAM_TX[0]; + + tcam_mgr_data->cfa_tcam_mgr_tables[TF_DIR_RX] + [CFA_TCAM_MGR_TBL_TYPE_WC_TCAM_AFM].tcam_rows = + (struct cfa_tcam_mgr_table_rows_0 *) + &table_rows->table_rows_WC_TCAM_RX[0]; + tcam_mgr_data->cfa_tcam_mgr_tables[TF_DIR_RX] + [CFA_TCAM_MGR_TBL_TYPE_WC_TCAM_APPS].tcam_rows = + (struct cfa_tcam_mgr_table_rows_0 *) + &table_rows->table_rows_WC_TCAM_RX[0]; + + tcam_mgr_data->cfa_tcam_mgr_tables[TF_DIR_TX] + [CFA_TCAM_MGR_TBL_TYPE_WC_TCAM_AFM].tcam_rows = + (struct cfa_tcam_mgr_table_rows_0 *) + &table_rows->table_rows_WC_TCAM_TX[0]; + tcam_mgr_data->cfa_tcam_mgr_tables[TF_DIR_TX] + [CFA_TCAM_MGR_TBL_TYPE_WC_TCAM_APPS].tcam_rows = + (struct cfa_tcam_mgr_table_rows_0 *) + &table_rows->table_rows_WC_TCAM_TX[0]; + + tcam_mgr_data->cfa_tcam_mgr_tables[TF_DIR_RX] + [CFA_TCAM_MGR_TBL_TYPE_SP_TCAM_AFM].tcam_rows = + (struct cfa_tcam_mgr_table_rows_0 *) + &table_rows->table_rows_SP_TCAM_RX[0]; + tcam_mgr_data->cfa_tcam_mgr_tables[TF_DIR_RX] + [CFA_TCAM_MGR_TBL_TYPE_SP_TCAM_APPS].tcam_rows = + (struct cfa_tcam_mgr_table_rows_0 *) + &table_rows->table_rows_SP_TCAM_RX[0]; + + tcam_mgr_data->cfa_tcam_mgr_tables[TF_DIR_TX] + [CFA_TCAM_MGR_TBL_TYPE_SP_TCAM_AFM].tcam_rows = + (struct cfa_tcam_mgr_table_rows_0 *) + &table_rows->table_rows_SP_TCAM_TX[0]; + tcam_mgr_data->cfa_tcam_mgr_tables[TF_DIR_TX] + [CFA_TCAM_MGR_TBL_TYPE_SP_TCAM_APPS].tcam_rows = + (struct cfa_tcam_mgr_table_rows_0 *) + &table_rows->table_rows_SP_TCAM_TX[0]; + + tcam_mgr_data->cfa_tcam_mgr_tables[TF_DIR_RX] + [CFA_TCAM_MGR_TBL_TYPE_CT_RULE_TCAM_AFM].tcam_rows = + (struct cfa_tcam_mgr_table_rows_0 *) + &table_rows->table_rows_CT_RULE_TCAM_RX[0]; + tcam_mgr_data->cfa_tcam_mgr_tables[TF_DIR_RX] + [CFA_TCAM_MGR_TBL_TYPE_CT_RULE_TCAM_APPS].tcam_rows = + (struct cfa_tcam_mgr_table_rows_0 *) + &table_rows->table_rows_CT_RULE_TCAM_RX[0]; + + tcam_mgr_data->cfa_tcam_mgr_tables[TF_DIR_TX] + [CFA_TCAM_MGR_TBL_TYPE_CT_RULE_TCAM_AFM].tcam_rows = + (struct cfa_tcam_mgr_table_rows_0 *) + &table_rows->table_rows_CT_RULE_TCAM_TX[0]; + tcam_mgr_data->cfa_tcam_mgr_tables[TF_DIR_TX] + [CFA_TCAM_MGR_TBL_TYPE_CT_RULE_TCAM_APPS].tcam_rows = + (struct cfa_tcam_mgr_table_rows_0 *) + &table_rows->table_rows_CT_RULE_TCAM_TX[0]; + + tcam_mgr_data->cfa_tcam_mgr_tables[TF_DIR_RX] + [CFA_TCAM_MGR_TBL_TYPE_VEB_TCAM_AFM].tcam_rows = + (struct cfa_tcam_mgr_table_rows_0 *) + &table_rows->table_rows_VEB_TCAM_RX[0]; + tcam_mgr_data->cfa_tcam_mgr_tables[TF_DIR_RX] + [CFA_TCAM_MGR_TBL_TYPE_VEB_TCAM_APPS].tcam_rows = + (struct cfa_tcam_mgr_table_rows_0 *) + &table_rows->table_rows_VEB_TCAM_RX[0]; + + tcam_mgr_data->cfa_tcam_mgr_tables[TF_DIR_TX] + [CFA_TCAM_MGR_TBL_TYPE_VEB_TCAM_AFM].tcam_rows = + (struct cfa_tcam_mgr_table_rows_0 *) + &table_rows->table_rows_VEB_TCAM_TX[0]; + tcam_mgr_data->cfa_tcam_mgr_tables[TF_DIR_TX] + [CFA_TCAM_MGR_TBL_TYPE_VEB_TCAM_APPS].tcam_rows = + (struct cfa_tcam_mgr_table_rows_0 *) + &table_rows->table_rows_VEB_TCAM_TX[0]; + for (dir = 0; dir < TF_DIR_MAX; dir++) { + for (type = 0; type < CFA_TCAM_MGR_TBL_TYPE_MAX; type++) { + if (tcam_mgr_data->cfa_tcam_mgr_tables[dir][type].row_width > + max_row_width) + max_row_width = + tcam_mgr_data->cfa_tcam_mgr_tables[dir][type].row_width; + if (tcam_mgr_data->cfa_tcam_mgr_tables[dir][type].result_size > + max_result_size) + max_result_size = + tcam_mgr_data->cfa_tcam_mgr_tables[dir][type].result_size; + } + } + + if (max_row_width != MAX_ROW_WIDTH) { + netdev_dbg(tfp->bp->dev, + "MAX_ROW_WIDTH:%d does not match actual val:%d\n", + MAX_ROW_WIDTH, max_row_width); + rc = -EINVAL; + goto fail; + } + if (max_result_size != MAX_RESULT_SIZE) { + netdev_dbg(tfp->bp->dev, + "MAX_RESULT_SIZE:%d does not match actual val:%d\n", + MAX_RESULT_SIZE, max_result_size); + rc = -EINVAL; + goto fail; + } + + return 0; + +fail: + cfa_tcam_mgr_data_free(tfs); + return rc; +} + +void cfa_tcam_mgr_uninit_p4(struct tf *tfp) +{ + struct tf_session *tfs; + int rc; + + rc = tf_session_get_session_internal(tfp, &tfs); + if (rc) + return; + + cfa_tcam_mgr_data_free(tfs); +} + +/* HW OP declarations begin here */ +struct cfa_tcam_mgr_TCAM_row_data { + int key_size; + int result_size; + u8 key[MAX_ROW_WIDTH]; + u8 mask[MAX_ROW_WIDTH]; + u8 result[MAX_RESULT_SIZE]; +}; + +/* These macros are only needed to avoid exceeding 80 columns */ +#define L2_CTXT_RX_MAX_ROWS \ + (L2_CTXT_TCAM_RX_MAX_SLICES * L2_CTXT_TCAM_RX_NUM_ROWS) +#define PROF_RX_MAX_ROWS (PROF_TCAM_RX_MAX_SLICES * PROF_TCAM_RX_NUM_ROWS) +#define WC_RX_MAX_ROWS (WC_TCAM_RX_MAX_SLICES * WC_TCAM_RX_NUM_ROWS) +#define SP_RX_MAX_ROWS (SP_TCAM_RX_MAX_SLICES * SP_TCAM_RX_NUM_ROWS) +#define CT_RULE_RX_MAX_ROWS \ + (CT_RULE_TCAM_RX_MAX_SLICES * CT_RULE_TCAM_RX_NUM_ROWS) +#define VEB_RX_MAX_ROWS (VEB_TCAM_RX_MAX_SLICES * VEB_TCAM_RX_NUM_ROWS) + +#define L2_CTXT_TX_MAX_ROWS \ + (L2_CTXT_TCAM_TX_MAX_SLICES * L2_CTXT_TCAM_TX_NUM_ROWS) +#define PROF_TX_MAX_ROWS (PROF_TCAM_TX_MAX_SLICES * PROF_TCAM_TX_NUM_ROWS) +#define WC_TX_MAX_ROWS (WC_TCAM_TX_MAX_SLICES * WC_TCAM_TX_NUM_ROWS) +#define SP_TX_MAX_ROWS (SP_TCAM_TX_MAX_SLICES * SP_TCAM_TX_NUM_ROWS) +#define CT_RULE_TX_MAX_ROWS \ + (CT_RULE_TCAM_TX_MAX_SLICES * CT_RULE_TCAM_TX_NUM_ROWS) +#define VEB_TX_MAX_ROWS (VEB_TCAM_TX_MAX_SLICES * VEB_TCAM_TX_NUM_ROWS) + +struct cfa_tcam_mgr_rx_row_data { + struct cfa_tcam_mgr_TCAM_row_data + cfa_tcam_mgr_L2_CTXT_TCAM_RX_row_data[L2_CTXT_RX_MAX_ROWS]; + struct cfa_tcam_mgr_TCAM_row_data + cfa_tcam_mgr_PROF_TCAM_RX_row_data[PROF_RX_MAX_ROWS]; + struct cfa_tcam_mgr_TCAM_row_data + cfa_tcam_mgr_WC_TCAM_RX_row_data[WC_RX_MAX_ROWS]; + struct cfa_tcam_mgr_TCAM_row_data + cfa_tcam_mgr_SP_TCAM_RX_row_data[SP_RX_MAX_ROWS]; + struct cfa_tcam_mgr_TCAM_row_data + cfa_tcam_mgr_CT_RULE_TCAM_RX_row_data[CT_RULE_RX_MAX_ROWS]; + struct cfa_tcam_mgr_TCAM_row_data + cfa_tcam_mgr_VEB_TCAM_RX_row_data[VEB_RX_MAX_ROWS]; +}; + +struct cfa_tcam_mgr_tx_row_data { + struct cfa_tcam_mgr_TCAM_row_data + cfa_tcam_mgr_L2_CTXT_TCAM_TX_row_data[L2_CTXT_TX_MAX_ROWS]; + struct cfa_tcam_mgr_TCAM_row_data + cfa_tcam_mgr_PROF_TCAM_TX_row_data[PROF_TX_MAX_ROWS]; + struct cfa_tcam_mgr_TCAM_row_data + cfa_tcam_mgr_WC_TCAM_TX_row_data[WC_TX_MAX_ROWS]; + struct cfa_tcam_mgr_TCAM_row_data + cfa_tcam_mgr_SP_TCAM_TX_row_data[SP_TX_MAX_ROWS]; + struct cfa_tcam_mgr_TCAM_row_data + cfa_tcam_mgr_CT_RULE_TCAM_TX_row_data[CT_RULE_TX_MAX_ROWS]; + struct cfa_tcam_mgr_TCAM_row_data + cfa_tcam_mgr_VEB_TCAM_TX_row_data[VEB_TX_MAX_ROWS]; +}; + +#define TF_TCAM_L2_CTX_HI TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_HIGH +#define TF_TCAM_L2_CTX_LO TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_LOW +#define TF_TCAM_PROF TF_TCAM_TBL_TYPE_PROF_TCAM +#define TF_TCAM_WC TF_TCAM_TBL_TYPE_WC_TCAM +#define TF_TCAM_SP TF_TCAM_TBL_TYPE_SP_TCAM +#define TF_TCAM_CT TF_TCAM_TBL_TYPE_CT_RULE_TCAM +#define TF_TCAM_VEB TF_TCAM_TBL_TYPE_VEB_TCAM + +static int cfa_tcam_mgr_row_data_alloc(struct cfa_tcam_mgr_data + *tcam_mgr_data) +{ + struct cfa_tcam_mgr_rx_row_data *rx_row_data; + struct cfa_tcam_mgr_tx_row_data *tx_row_data; + + rx_row_data = vzalloc(sizeof(*rx_row_data)); + if (!rx_row_data) + return -ENOMEM; + + tx_row_data = vzalloc(sizeof(*tx_row_data)); + if (!tx_row_data) { + vfree(rx_row_data); + return -ENOMEM; + } + + tcam_mgr_data->rx_row_data = rx_row_data; + tcam_mgr_data->tx_row_data = tx_row_data; + + tcam_mgr_data->row_tables[TF_DIR_RX][TF_TCAM_L2_CTX_HI] = + &rx_row_data->cfa_tcam_mgr_L2_CTXT_TCAM_RX_row_data[0]; + tcam_mgr_data->row_tables[TF_DIR_RX][TF_TCAM_L2_CTX_LO] = + &rx_row_data->cfa_tcam_mgr_L2_CTXT_TCAM_RX_row_data[0]; + tcam_mgr_data->row_tables[TF_DIR_RX][TF_TCAM_PROF] = + &rx_row_data->cfa_tcam_mgr_PROF_TCAM_RX_row_data[0]; + tcam_mgr_data->row_tables[TF_DIR_RX][TF_TCAM_WC] = + &rx_row_data->cfa_tcam_mgr_WC_TCAM_RX_row_data[0]; + tcam_mgr_data->row_tables[TF_DIR_RX][TF_TCAM_SP] = + &rx_row_data->cfa_tcam_mgr_SP_TCAM_RX_row_data[0]; + tcam_mgr_data->row_tables[TF_DIR_RX][TF_TCAM_CT] = + &rx_row_data->cfa_tcam_mgr_CT_RULE_TCAM_RX_row_data[0]; + tcam_mgr_data->row_tables[TF_DIR_RX][TF_TCAM_VEB] = + &rx_row_data->cfa_tcam_mgr_VEB_TCAM_RX_row_data[0]; + + tcam_mgr_data->row_tables[TF_DIR_TX][TF_TCAM_L2_CTX_HI] = + &tx_row_data->cfa_tcam_mgr_L2_CTXT_TCAM_TX_row_data[0]; + tcam_mgr_data->row_tables[TF_DIR_TX][TF_TCAM_L2_CTX_LO] = + &tx_row_data->cfa_tcam_mgr_L2_CTXT_TCAM_TX_row_data[0]; + tcam_mgr_data->row_tables[TF_DIR_TX][TF_TCAM_PROF] = + &tx_row_data->cfa_tcam_mgr_PROF_TCAM_TX_row_data[0]; + tcam_mgr_data->row_tables[TF_DIR_TX][TF_TCAM_WC] = + &tx_row_data->cfa_tcam_mgr_WC_TCAM_TX_row_data[0]; + tcam_mgr_data->row_tables[TF_DIR_TX][TF_TCAM_SP] = + &tx_row_data->cfa_tcam_mgr_SP_TCAM_TX_row_data[0]; + tcam_mgr_data->row_tables[TF_DIR_TX][TF_TCAM_CT] = + &tx_row_data->cfa_tcam_mgr_CT_RULE_TCAM_TX_row_data[0]; + tcam_mgr_data->row_tables[TF_DIR_TX][TF_TCAM_VEB] = + &tx_row_data->cfa_tcam_mgr_VEB_TCAM_TX_row_data[0]; + + return 0; +} + +static void cfa_tcam_mgr_row_data_free(struct cfa_tcam_mgr_data + *tcam_mgr_data) +{ + vfree(tcam_mgr_data->rx_row_data); + vfree(tcam_mgr_data->tx_row_data); +} + +static int cfa_tcam_mgr_hwop_set(struct cfa_tcam_mgr_data *tcam_mgr_data, + struct cfa_tcam_mgr_set_parms + *parms, int row, int slice, int max_slices) +{ + struct cfa_tcam_mgr_TCAM_row_data *this_table; + struct cfa_tcam_mgr_TCAM_row_data *this_row; + + this_table = tcam_mgr_data->row_tables[parms->dir] + [cfa_tcam_mgr_get_phys_table_type(parms->type)]; + this_row = &this_table[row * max_slices + slice]; + this_row->key_size = parms->key_size; + memcpy(&this_row->key, parms->key, parms->key_size); + memcpy(&this_row->mask, parms->mask, parms->key_size); + this_row->result_size = parms->result_size; + if (parms->result) + memcpy(&this_row->result, parms->result, parms->result_size); + return 0; +}; + +static int cfa_tcam_mgr_hwop_get(struct cfa_tcam_mgr_data *tcam_mgr_data, + struct cfa_tcam_mgr_get_parms + *parms, int row, int slice, int max_slices) +{ + struct cfa_tcam_mgr_TCAM_row_data *this_table; + struct cfa_tcam_mgr_TCAM_row_data *this_row; + + this_table = tcam_mgr_data->row_tables[parms->dir] + [cfa_tcam_mgr_get_phys_table_type(parms->type)]; + this_row = &this_table[row * max_slices + slice]; + parms->key_size = this_row->key_size; + parms->result_size = this_row->result_size; + if (parms->key) + memcpy(parms->key, &this_row->key, parms->key_size); + if (parms->mask) + memcpy(parms->mask, &this_row->mask, parms->key_size); + if (parms->result) + memcpy(parms->result, &this_row->result, parms->result_size); + return 0; +}; + +static int cfa_tcam_mgr_hwop_free(struct cfa_tcam_mgr_data *tcam_mgr_data, + struct cfa_tcam_mgr_free_parms + *parms, int row, int slice, int max_slices) +{ + struct cfa_tcam_mgr_TCAM_row_data *this_table; + struct cfa_tcam_mgr_TCAM_row_data *this_row; + + this_table = tcam_mgr_data->row_tables[parms->dir] + [cfa_tcam_mgr_get_phys_table_type(parms->type)]; + this_row = &this_table[row * max_slices + slice]; + memset(&this_row->key, 0, sizeof(this_row->key)); + memset(&this_row->mask, 0, sizeof(this_row->mask)); + memset(&this_row->result, 0, sizeof(this_row->result)); + this_row->key_size = 0; + this_row->result_size = 0; + return 0; +}; + +int cfa_tcam_mgr_hwops_get_funcs_p4(struct cfa_tcam_mgr_hwops_funcs + *hwop_funcs) +{ + hwop_funcs->set = cfa_tcam_mgr_hwop_set; + hwop_funcs->get = cfa_tcam_mgr_hwop_get; + hwop_funcs->free = cfa_tcam_mgr_hwop_free; + return 0; +} diff --git a/drivers/thirdparty/release-drivers/bnxt/tf_core/cfa_tcam_mgr_p4.h b/drivers/thirdparty/release-drivers/bnxt/tf_core/cfa_tcam_mgr_p4.h new file mode 100644 index 000000000000..f820930ac44a --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/tf_core/cfa_tcam_mgr_p4.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2021-2021 Broadcom + * All rights reserved. + */ + +#ifndef CFA_TCAM_MGR_P4_H +#define CFA_TCAM_MGR_P4_H + +#include "cfa_tcam_mgr_device.h" + +int cfa_tcam_mgr_init_p4(struct tf *tfp); +void cfa_tcam_mgr_uninit_p4(struct tf *tfp); +int cfa_tcam_mgr_hwops_get_funcs_p4(struct cfa_tcam_mgr_hwops_funcs + *hwop_funcs); + +#endif /* CFA_TCAM_MGR_P4_H */ diff --git a/drivers/thirdparty/release-drivers/bnxt/tf_core/cfa_tcam_mgr_p58.c b/drivers/thirdparty/release-drivers/bnxt/tf_core/cfa_tcam_mgr_p58.c new file mode 100644 index 000000000000..4047ae4cc286 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/tf_core/cfa_tcam_mgr_p58.c @@ -0,0 +1,858 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* Copyright(c) 2021-2022 Broadcom + * All rights reserved. + */ + +#include +#include +#include +#include +#include "hcapi_cfa_defs.h" +#include "bnxt_hsi.h" +#include "bnxt_compat.h" +#include "bnxt.h" +#include "cfa_tcam_mgr.h" +#include "cfa_tcam_mgr_p58.h" +#include "cfa_tcam_mgr.h" +#include "cfa_tcam_mgr_device.h" +#include "cfa_resource_types.h" +#include "tf_util.h" +#include "tf_session.h" + +/* Sizings of the TCAMs on Thor */ + +#define MAX_ROW_WIDTH 96 +#define MAX_RESULT_SIZE 8 + +/* TCAM definitions + * + * These define the TCAMs in HW. + * + * Note: Set xxx_TCAM_[R|T]X_NUM_ROWS to zero if a TCAM is either not supported + * by HW or not supported by TCAM Manager. + */ + +/* L2 Context TCAM */ +#define L2_CTXT_TCAM_RX_MAX_SLICES 1 +#define L2_CTXT_TCAM_RX_ROW_WIDTH TF_BITS2BYTES_64B_WORD_ALIGN(214) +#define L2_CTXT_TCAM_RX_NUM_ROWS 1024 +#define L2_CTXT_TCAM_RX_MAX_ENTRIES (L2_CTXT_TCAM_RX_MAX_SLICES * \ + L2_CTXT_TCAM_RX_NUM_ROWS) +#define L2_CTXT_TCAM_RX_RESULT_SIZE 8 + +#define L2_CTXT_TCAM_TX_MAX_SLICES L2_CTXT_TCAM_RX_MAX_SLICES +#define L2_CTXT_TCAM_TX_ROW_WIDTH L2_CTXT_TCAM_RX_ROW_WIDTH +#define L2_CTXT_TCAM_TX_NUM_ROWS L2_CTXT_TCAM_RX_NUM_ROWS +#define L2_CTXT_TCAM_TX_MAX_ENTRIES L2_CTXT_TCAM_RX_MAX_ENTRIES +#define L2_CTXT_TCAM_TX_RESULT_SIZE L2_CTXT_TCAM_RX_RESULT_SIZE + +/* Profile TCAM */ +#define PROF_TCAM_RX_MAX_SLICES 1 +#define PROF_TCAM_RX_ROW_WIDTH TF_BITS2BYTES_64B_WORD_ALIGN(94) +#define PROF_TCAM_RX_NUM_ROWS 256 +#define PROF_TCAM_RX_MAX_ENTRIES (PROF_TCAM_RX_MAX_SLICES * \ + PROF_TCAM_RX_NUM_ROWS) +#define PROF_TCAM_RX_RESULT_SIZE 8 + +#define PROF_TCAM_TX_MAX_SLICES PROF_TCAM_RX_MAX_SLICES +#define PROF_TCAM_TX_ROW_WIDTH PROF_TCAM_RX_ROW_WIDTH +#define PROF_TCAM_TX_NUM_ROWS PROF_TCAM_RX_NUM_ROWS +#define PROF_TCAM_TX_MAX_ENTRIES PROF_TCAM_RX_MAX_ENTRIES +#define PROF_TCAM_TX_RESULT_SIZE PROF_TCAM_RX_RESULT_SIZE + +/* Wildcard TCAM */ +#define WC_TCAM_RX_MAX_SLICES 4 +/* 162 bits per slice */ +#define WC_TCAM_RX_ROW_WIDTH (TF_BITS2BYTES_64B_WORD_ALIGN(162) * \ + WC_TCAM_RX_MAX_SLICES) +#define WC_TCAM_RX_NUM_ROWS 2048 +#define WC_TCAM_RX_MAX_ENTRIES (WC_TCAM_RX_MAX_SLICES * WC_TCAM_RX_NUM_ROWS) +#define WC_TCAM_RX_RESULT_SIZE 8 + +#define WC_TCAM_TX_MAX_SLICES WC_TCAM_RX_MAX_SLICES +#define WC_TCAM_TX_ROW_WIDTH WC_TCAM_RX_ROW_WIDTH +#define WC_TCAM_TX_NUM_ROWS WC_TCAM_RX_NUM_ROWS +#define WC_TCAM_TX_MAX_ENTRIES WC_TCAM_RX_MAX_ENTRIES +#define WC_TCAM_TX_RESULT_SIZE WC_TCAM_RX_RESULT_SIZE + +/* Source Properties TCAM */ +#define SP_TCAM_RX_MAX_SLICES 1 +#define SP_TCAM_RX_ROW_WIDTH TF_BITS2BYTES_64B_WORD_ALIGN(89) +#define SP_TCAM_RX_NUM_ROWS 512 +#define SP_TCAM_RX_MAX_ENTRIES (SP_TCAM_RX_MAX_SLICES * SP_TCAM_RX_NUM_ROWS) +#define SP_TCAM_RX_RESULT_SIZE 8 + +#define SP_TCAM_TX_MAX_SLICES SP_TCAM_RX_MAX_SLICES +#define SP_TCAM_TX_ROW_WIDTH SP_TCAM_RX_ROW_WIDTH +#define SP_TCAM_TX_NUM_ROWS SP_TCAM_RX_NUM_ROWS +#define SP_TCAM_TX_MAX_ENTRIES SP_TCAM_RX_MAX_ENTRIES +#define SP_TCAM_TX_RESULT_SIZE SP_TCAM_RX_RESULT_SIZE + +/* Connection Tracking Rule TCAM */ +#define CT_RULE_TCAM_RX_MAX_SLICES 1 +#define CT_RULE_TCAM_RX_ROW_WIDTH TF_BITS2BYTES_64B_WORD_ALIGN(16) +#define CT_RULE_TCAM_RX_NUM_ROWS 16 +#define CT_RULE_TCAM_RX_MAX_ENTRIES (CT_RULE_TCAM_RX_MAX_SLICES * \ + CT_RULE_TCAM_RX_NUM_ROWS) +#define CT_RULE_TCAM_RX_RESULT_SIZE 8 + +#define CT_RULE_TCAM_TX_MAX_SLICES CT_RULE_TCAM_RX_MAX_SLICES +#define CT_RULE_TCAM_TX_ROW_WIDTH CT_RULE_TCAM_RX_ROW_WIDTH +#define CT_RULE_TCAM_TX_NUM_ROWS CT_RULE_TCAM_RX_NUM_ROWS +#define CT_RULE_TCAM_TX_MAX_ENTRIES CT_RULE_TCAM_RX_MAX_ENTRIES +#define CT_RULE_TCAM_TX_RESULT_SIZE CT_RULE_TCAM_RX_RESULT_SIZE + +/* Virtual Edge Bridge TCAM */ +#define VEB_TCAM_RX_MAX_SLICES 1 +#define VEB_TCAM_RX_ROW_WIDTH TF_BITS2BYTES_WORD_ALIGN(79) +/* Tx only */ +#define VEB_TCAM_RX_NUM_ROWS 1024 +#define VEB_TCAM_RX_MAX_ENTRIES (VEB_TCAM_RX_MAX_SLICES * VEB_TCAM_RX_NUM_ROWS) +#define VEB_TCAM_RX_RESULT_SIZE 8 + +#define VEB_TCAM_TX_MAX_SLICES VEB_TCAM_RX_MAX_SLICES +#define VEB_TCAM_TX_ROW_WIDTH VEB_TCAM_RX_ROW_WIDTH +#define VEB_TCAM_TX_NUM_ROWS 1024 +#define VEB_TCAM_TX_MAX_ENTRIES (VEB_TCAM_TX_MAX_SLICES * VEB_TCAM_TX_NUM_ROWS) +#define VEB_TCAM_TX_RESULT_SIZE VEB_TCAM_RX_RESULT_SIZE + +/* Declare the table rows for each table here. If new tables are added to the + * enum tf_tcam_tbl_type, then new declarations will be needed here. + * + * The numeric suffix of the structure type indicates how many slices a + * particular TCAM supports. + */ + +struct cfa_tcam_mgr_table_rows_p58 { + struct cfa_tcam_mgr_table_rows_1 + table_rows_L2_CTXT_TCAM_RX[L2_CTXT_TCAM_RX_NUM_ROWS]; + struct cfa_tcam_mgr_table_rows_1 + table_rows_L2_CTXT_TCAM_TX[L2_CTXT_TCAM_TX_NUM_ROWS]; + struct cfa_tcam_mgr_table_rows_1 + table_rows_PROF_TCAM_RX[PROF_TCAM_RX_NUM_ROWS]; + struct cfa_tcam_mgr_table_rows_1 + table_rows_PROF_TCAM_TX[PROF_TCAM_TX_NUM_ROWS]; + struct cfa_tcam_mgr_table_rows_4 + table_rows_WC_TCAM_RX[WC_TCAM_RX_NUM_ROWS]; + struct cfa_tcam_mgr_table_rows_4 + table_rows_WC_TCAM_TX[WC_TCAM_TX_NUM_ROWS]; + struct cfa_tcam_mgr_table_rows_1 + table_rows_SP_TCAM_RX[SP_TCAM_RX_NUM_ROWS]; + struct cfa_tcam_mgr_table_rows_1 + table_rows_SP_TCAM_TX[SP_TCAM_TX_NUM_ROWS]; + struct cfa_tcam_mgr_table_rows_1 + table_rows_CT_RULE_TCAM_RX[CT_RULE_TCAM_RX_NUM_ROWS]; + struct cfa_tcam_mgr_table_rows_1 + table_rows_CT_RULE_TCAM_TX[CT_RULE_TCAM_TX_NUM_ROWS]; + struct cfa_tcam_mgr_table_rows_1 + table_rows_VEB_TCAM_RX[VEB_TCAM_RX_NUM_ROWS]; + struct cfa_tcam_mgr_table_rows_1 + table_rows_VEB_TCAM_TX[VEB_TCAM_TX_NUM_ROWS]; +}; + +struct cfa_tcam_mgr_table_data +cfa_tcam_mgr_tables_p58[TF_DIR_MAX][CFA_TCAM_MGR_TBL_TYPE_MAX] = { + { /* RX */ + { /* High AFM */ + .max_slices = L2_CTXT_TCAM_RX_MAX_SLICES, + .row_width = L2_CTXT_TCAM_RX_ROW_WIDTH, + .num_rows = 0, + .start_row = 0, + .end_row = 0, + .max_entries = 0, + .result_size = L2_CTXT_TCAM_RX_RESULT_SIZE, + .hcapi_type = CFA_RESOURCE_TYPE_P58_L2_CTXT_TCAM_HIGH, + }, + { /* High APPS */ + .max_slices = L2_CTXT_TCAM_RX_MAX_SLICES, + .row_width = L2_CTXT_TCAM_RX_ROW_WIDTH, + .num_rows = L2_CTXT_TCAM_RX_NUM_ROWS, + .start_row = 0, + .end_row = L2_CTXT_TCAM_RX_APP_HI_END, + .max_entries = (L2_CTXT_TCAM_RX_MAX_ENTRIES / 2), + .result_size = L2_CTXT_TCAM_RX_RESULT_SIZE, + .hcapi_type = CFA_RESOURCE_TYPE_P58_L2_CTXT_TCAM_HIGH, + }, + { /* Low AFM */ + .max_slices = L2_CTXT_TCAM_RX_MAX_SLICES, + .row_width = L2_CTXT_TCAM_RX_ROW_WIDTH, + .num_rows = 0, + .start_row = 0, + .end_row = 0, + .max_entries = 0, + .result_size = L2_CTXT_TCAM_RX_RESULT_SIZE, + .hcapi_type = CFA_RESOURCE_TYPE_P58_L2_CTXT_TCAM_LOW, + }, + { /* Low APPS */ + .max_slices = L2_CTXT_TCAM_RX_MAX_SLICES, + .row_width = L2_CTXT_TCAM_RX_ROW_WIDTH, + .num_rows = L2_CTXT_TCAM_RX_NUM_ROWS, + .start_row = L2_CTXT_TCAM_RX_APP_LO_START, + .end_row = L2_CTXT_TCAM_RX_NUM_ROWS - 1, + .max_entries = (L2_CTXT_TCAM_RX_MAX_ENTRIES / 2), + .result_size = L2_CTXT_TCAM_RX_RESULT_SIZE, + .hcapi_type = CFA_RESOURCE_TYPE_P58_L2_CTXT_TCAM_LOW, + }, + { /* AFM */ + .max_slices = PROF_TCAM_RX_MAX_SLICES, + .row_width = PROF_TCAM_RX_ROW_WIDTH, + .num_rows = 0, + .start_row = 0, + .end_row = 0, + .max_entries = 0, + .result_size = PROF_TCAM_RX_RESULT_SIZE, + .hcapi_type = CFA_RESOURCE_TYPE_P58_PROF_TCAM, + }, + { /* APPS */ + .max_slices = PROF_TCAM_RX_MAX_SLICES, + .row_width = PROF_TCAM_RX_ROW_WIDTH, + .num_rows = PROF_TCAM_RX_NUM_ROWS, + .start_row = 0, + .end_row = PROF_TCAM_RX_NUM_ROWS - 1, + .max_entries = PROF_TCAM_RX_MAX_ENTRIES, + .result_size = PROF_TCAM_RX_RESULT_SIZE, + .hcapi_type = CFA_RESOURCE_TYPE_P58_PROF_TCAM, + }, + { /* AFM */ + .max_slices = WC_TCAM_RX_MAX_SLICES, + .row_width = WC_TCAM_RX_ROW_WIDTH, + .num_rows = WC_TCAM_RX_NUM_ROWS, + .start_row = 0, + .end_row = 0, + .max_entries = 0, + .result_size = WC_TCAM_RX_RESULT_SIZE, + .hcapi_type = CFA_RESOURCE_TYPE_P58_WC_TCAM, + }, + { /* APPS */ + .max_slices = WC_TCAM_RX_MAX_SLICES, + .row_width = WC_TCAM_RX_ROW_WIDTH, + .num_rows = WC_TCAM_RX_NUM_ROWS, + .start_row = 0, + .end_row = WC_TCAM_RX_NUM_ROWS - 1, + .max_entries = WC_TCAM_RX_MAX_ENTRIES, + .result_size = WC_TCAM_RX_RESULT_SIZE, + .hcapi_type = CFA_RESOURCE_TYPE_P58_WC_TCAM, + }, + { /* AFM */ + .max_slices = SP_TCAM_RX_MAX_SLICES, + .row_width = SP_TCAM_RX_ROW_WIDTH, + .num_rows = 0, + .start_row = 0, + .end_row = 0, + .max_entries = 0, + .result_size = SP_TCAM_RX_RESULT_SIZE, + }, + { /* APPS */ + .max_slices = SP_TCAM_RX_MAX_SLICES, + .row_width = SP_TCAM_RX_ROW_WIDTH, + .num_rows = SP_TCAM_RX_NUM_ROWS, + .start_row = 0, + .end_row = SP_TCAM_RX_NUM_ROWS - 1, + .max_entries = SP_TCAM_RX_MAX_ENTRIES, + .result_size = SP_TCAM_RX_RESULT_SIZE, + }, + { /* AFM */ + .max_slices = CT_RULE_TCAM_RX_MAX_SLICES, + .row_width = CT_RULE_TCAM_RX_ROW_WIDTH, + .num_rows = CT_RULE_TCAM_RX_NUM_ROWS, + .start_row = 0, + .end_row = 0, + .max_entries = 0, + .result_size = CT_RULE_TCAM_RX_RESULT_SIZE, + }, + { /* APPS */ + .max_slices = CT_RULE_TCAM_RX_MAX_SLICES, + .row_width = CT_RULE_TCAM_RX_ROW_WIDTH, + .num_rows = CT_RULE_TCAM_RX_NUM_ROWS, + .start_row = 0, + .end_row = + TCAM_SET_END_ROW(CT_RULE_TCAM_RX_NUM_ROWS), + .max_entries = CT_RULE_TCAM_RX_MAX_ENTRIES, + .result_size = CT_RULE_TCAM_RX_RESULT_SIZE, + }, + { /* AFM */ + .max_slices = VEB_TCAM_RX_MAX_SLICES, + .row_width = VEB_TCAM_RX_ROW_WIDTH, + .num_rows = VEB_TCAM_RX_NUM_ROWS, + .start_row = 0, + .end_row = 0, + .max_entries = 0, + .result_size = VEB_TCAM_RX_RESULT_SIZE, + .hcapi_type = CFA_RESOURCE_TYPE_P58_VEB_TCAM, + }, + { /* APPS */ + .max_slices = VEB_TCAM_RX_MAX_SLICES, + .row_width = VEB_TCAM_RX_ROW_WIDTH, + .num_rows = VEB_TCAM_RX_NUM_ROWS, + .start_row = 0, + .end_row = TCAM_SET_END_ROW(VEB_TCAM_RX_NUM_ROWS), + .max_entries = VEB_TCAM_RX_MAX_ENTRIES, + .result_size = VEB_TCAM_RX_RESULT_SIZE, + .hcapi_type = CFA_RESOURCE_TYPE_P58_VEB_TCAM, + }, + }, + { /* TX */ + { /* AFM */ + .max_slices = L2_CTXT_TCAM_TX_MAX_SLICES, + .row_width = L2_CTXT_TCAM_TX_ROW_WIDTH, + .num_rows = L2_CTXT_TCAM_TX_NUM_ROWS, + .start_row = 0, + .end_row = 0, + .max_entries = 0, + .result_size = L2_CTXT_TCAM_TX_RESULT_SIZE, + .hcapi_type = CFA_RESOURCE_TYPE_P58_L2_CTXT_TCAM_HIGH, + }, + { /* APPS */ + .max_slices = L2_CTXT_TCAM_TX_MAX_SLICES, + .row_width = L2_CTXT_TCAM_TX_ROW_WIDTH, + .num_rows = L2_CTXT_TCAM_TX_NUM_ROWS, + .start_row = 0, + .end_row = L2_CTXT_TCAM_TX_APP_HI_END, + .max_entries = (L2_CTXT_TCAM_TX_MAX_ENTRIES / 2), + .result_size = L2_CTXT_TCAM_TX_RESULT_SIZE, + .hcapi_type = CFA_RESOURCE_TYPE_P58_L2_CTXT_TCAM_HIGH, + }, + { /* AFM */ + .max_slices = L2_CTXT_TCAM_TX_MAX_SLICES, + .row_width = L2_CTXT_TCAM_TX_ROW_WIDTH, + .num_rows = L2_CTXT_TCAM_TX_NUM_ROWS, + .start_row = 0, + .end_row = 0, + .max_entries = 0, + .result_size = L2_CTXT_TCAM_TX_RESULT_SIZE, + .hcapi_type = CFA_RESOURCE_TYPE_P58_L2_CTXT_TCAM_LOW, + }, + { /* APPS */ + .max_slices = L2_CTXT_TCAM_TX_MAX_SLICES, + .row_width = L2_CTXT_TCAM_TX_ROW_WIDTH, + .num_rows = L2_CTXT_TCAM_TX_NUM_ROWS, + .start_row = L2_CTXT_TCAM_TX_APP_LO_START, + .end_row = L2_CTXT_TCAM_TX_NUM_ROWS - 1, + .max_entries = (L2_CTXT_TCAM_TX_MAX_ENTRIES / 2), + .result_size = L2_CTXT_TCAM_TX_RESULT_SIZE, + .hcapi_type = CFA_RESOURCE_TYPE_P58_L2_CTXT_TCAM_LOW, + }, + { /* AFM */ + .max_slices = PROF_TCAM_TX_MAX_SLICES, + .row_width = PROF_TCAM_TX_ROW_WIDTH, + .num_rows = PROF_TCAM_TX_NUM_ROWS, + .start_row = 0, + .end_row = 0, + .max_entries = 0, + .result_size = PROF_TCAM_TX_RESULT_SIZE, + .hcapi_type = CFA_RESOURCE_TYPE_P58_PROF_TCAM, + }, + { /* APPS */ + .max_slices = PROF_TCAM_TX_MAX_SLICES, + .row_width = PROF_TCAM_TX_ROW_WIDTH, + .num_rows = PROF_TCAM_TX_NUM_ROWS, + .start_row = 0, + .end_row = PROF_TCAM_TX_NUM_ROWS - 1, + .max_entries = PROF_TCAM_TX_MAX_ENTRIES, + .result_size = PROF_TCAM_TX_RESULT_SIZE, + .hcapi_type = CFA_RESOURCE_TYPE_P58_PROF_TCAM, + }, + { /* AFM */ + .max_slices = WC_TCAM_TX_MAX_SLICES, + .row_width = WC_TCAM_TX_ROW_WIDTH, + .num_rows = WC_TCAM_TX_NUM_ROWS, + .start_row = 0, + .end_row = 0, + .max_entries = 0, + .result_size = WC_TCAM_TX_RESULT_SIZE, + .hcapi_type = CFA_RESOURCE_TYPE_P58_WC_TCAM, + }, + { /* APPS */ + .max_slices = WC_TCAM_TX_MAX_SLICES, + .row_width = WC_TCAM_TX_ROW_WIDTH, + .num_rows = WC_TCAM_TX_NUM_ROWS, + .start_row = 0, + .end_row = WC_TCAM_TX_NUM_ROWS - 1, + .max_entries = WC_TCAM_TX_MAX_ENTRIES, + .result_size = WC_TCAM_TX_RESULT_SIZE, + .hcapi_type = CFA_RESOURCE_TYPE_P58_WC_TCAM, + }, + { /* AFM */ + .max_slices = SP_TCAM_TX_MAX_SLICES, + .row_width = SP_TCAM_TX_ROW_WIDTH, + .num_rows = SP_TCAM_TX_NUM_ROWS, + .start_row = 0, + .end_row = 0, + .max_entries = 0, + .result_size = SP_TCAM_TX_RESULT_SIZE, + }, + { /* APPS */ + .max_slices = SP_TCAM_TX_MAX_SLICES, + .row_width = SP_TCAM_TX_ROW_WIDTH, + .num_rows = SP_TCAM_TX_NUM_ROWS, + .start_row = 0, + .end_row = SP_TCAM_TX_NUM_ROWS - 1, + .max_entries = SP_TCAM_TX_MAX_ENTRIES, + .result_size = SP_TCAM_TX_RESULT_SIZE, + }, + { /* AFM */ + .max_slices = CT_RULE_TCAM_TX_MAX_SLICES, + .row_width = CT_RULE_TCAM_TX_ROW_WIDTH, + .num_rows = CT_RULE_TCAM_TX_NUM_ROWS, + .start_row = 0, + .end_row = 0, + .max_entries = 0, + .result_size = CT_RULE_TCAM_RX_RESULT_SIZE, + }, + { /* APPS */ + .max_slices = CT_RULE_TCAM_TX_MAX_SLICES, + .row_width = CT_RULE_TCAM_TX_ROW_WIDTH, + .num_rows = CT_RULE_TCAM_TX_NUM_ROWS, + .start_row = 0, + .end_row = + TCAM_SET_END_ROW(CT_RULE_TCAM_TX_NUM_ROWS), + .max_entries = CT_RULE_TCAM_TX_MAX_ENTRIES, + .result_size = CT_RULE_TCAM_RX_RESULT_SIZE, + }, + { /* AFM */ + .max_slices = VEB_TCAM_TX_MAX_SLICES, + .row_width = VEB_TCAM_TX_ROW_WIDTH, + .num_rows = VEB_TCAM_TX_NUM_ROWS, + .start_row = 0, + .end_row = 0, + .max_entries = 0, + .result_size = VEB_TCAM_RX_RESULT_SIZE, + .hcapi_type = CFA_RESOURCE_TYPE_P58_VEB_TCAM, + }, + { /* APPS */ + .max_slices = VEB_TCAM_TX_MAX_SLICES, + .row_width = VEB_TCAM_TX_ROW_WIDTH, + .num_rows = VEB_TCAM_TX_NUM_ROWS, + .start_row = 0, + .end_row = VEB_TCAM_TX_NUM_ROWS - 1, + .max_entries = VEB_TCAM_TX_MAX_ENTRIES, + .result_size = VEB_TCAM_RX_RESULT_SIZE, + .hcapi_type = CFA_RESOURCE_TYPE_P58_VEB_TCAM, + }, + }, +}; + +static int cfa_tcam_mgr_row_data_alloc(struct cfa_tcam_mgr_data *tcam_mgr_data); +static void cfa_tcam_mgr_row_data_free(struct cfa_tcam_mgr_data *tcam_mgr_data); + +static void cfa_tcam_mgr_data_free(struct tf_session *tfs) +{ + struct cfa_tcam_mgr_data *tcam_mgr_data = tfs->tcam_mgr_handle; + + if (!tcam_mgr_data) + return; + + vfree(tcam_mgr_data->table_rows); + vfree(tcam_mgr_data->entry_data); + vfree(tcam_mgr_data->session_bmp); + cfa_tcam_mgr_row_data_free(tcam_mgr_data); + + vfree(tcam_mgr_data); + tfs->tcam_mgr_handle = NULL; +} + +int cfa_tcam_mgr_init_p58(struct tf *tfp) +{ + struct cfa_tcam_mgr_table_rows_p58 *table_rows; + struct cfa_tcam_mgr_entry_data *entry_data; + struct cfa_tcam_mgr_data *tcam_mgr_data; + struct tf_session *tfs; + int max_result_size = 0; + int max_row_width = 0; + int dir, type; + int rc; + + rc = tf_session_get_session_internal(tfp, &tfs); + if (rc) + return rc; + + tcam_mgr_data = vzalloc(sizeof(*tcam_mgr_data)); + if (!tcam_mgr_data) + return -ENOMEM; + tfs->tcam_mgr_handle = tcam_mgr_data; + + table_rows = vzalloc(sizeof(*table_rows)); + if (!table_rows) { + rc = -ENOMEM; + goto fail; + } + tcam_mgr_data->table_rows = table_rows; + + entry_data = vzalloc(sizeof(*entry_data) * TF_TCAM_MAX_ENTRIES); + if (!entry_data) { + rc = -ENOMEM; + goto fail; + } + tcam_mgr_data->entry_data = entry_data; + + rc = cfa_tcam_mgr_row_data_alloc(tcam_mgr_data); + if (rc) + goto fail; + + memcpy(tcam_mgr_data->cfa_tcam_mgr_tables, + &cfa_tcam_mgr_tables_p58, + sizeof(tcam_mgr_data->cfa_tcam_mgr_tables)); + + tcam_mgr_data->cfa_tcam_mgr_tables[TF_DIR_RX] + [CFA_TCAM_MGR_TBL_TYPE_L2_CTXT_TCAM_HIGH_AFM].tcam_rows = + (struct cfa_tcam_mgr_table_rows_0 *) + &table_rows->table_rows_L2_CTXT_TCAM_RX[0]; + tcam_mgr_data->cfa_tcam_mgr_tables[TF_DIR_RX] + [CFA_TCAM_MGR_TBL_TYPE_L2_CTXT_TCAM_HIGH_APPS].tcam_rows = + (struct cfa_tcam_mgr_table_rows_0 *) + &table_rows->table_rows_L2_CTXT_TCAM_RX[0]; + + tcam_mgr_data->cfa_tcam_mgr_tables[TF_DIR_TX] + [CFA_TCAM_MGR_TBL_TYPE_L2_CTXT_TCAM_HIGH_AFM].tcam_rows = + (struct cfa_tcam_mgr_table_rows_0 *) + &table_rows->table_rows_L2_CTXT_TCAM_TX[0]; + tcam_mgr_data->cfa_tcam_mgr_tables[TF_DIR_TX] + [CFA_TCAM_MGR_TBL_TYPE_L2_CTXT_TCAM_HIGH_APPS].tcam_rows = + (struct cfa_tcam_mgr_table_rows_0 *) + &table_rows->table_rows_L2_CTXT_TCAM_TX[0]; + + tcam_mgr_data->cfa_tcam_mgr_tables[TF_DIR_RX] + [CFA_TCAM_MGR_TBL_TYPE_L2_CTXT_TCAM_LOW_AFM].tcam_rows = + (struct cfa_tcam_mgr_table_rows_0 *) + &table_rows->table_rows_L2_CTXT_TCAM_RX[0]; + tcam_mgr_data->cfa_tcam_mgr_tables[TF_DIR_RX] + [CFA_TCAM_MGR_TBL_TYPE_L2_CTXT_TCAM_LOW_APPS].tcam_rows = + (struct cfa_tcam_mgr_table_rows_0 *) + &table_rows->table_rows_L2_CTXT_TCAM_RX[0]; + + tcam_mgr_data->cfa_tcam_mgr_tables[TF_DIR_TX] + [CFA_TCAM_MGR_TBL_TYPE_L2_CTXT_TCAM_LOW_AFM].tcam_rows = + (struct cfa_tcam_mgr_table_rows_0 *) + &table_rows->table_rows_L2_CTXT_TCAM_TX[0]; + tcam_mgr_data->cfa_tcam_mgr_tables[TF_DIR_TX] + [CFA_TCAM_MGR_TBL_TYPE_L2_CTXT_TCAM_LOW_APPS].tcam_rows = + (struct cfa_tcam_mgr_table_rows_0 *) + &table_rows->table_rows_L2_CTXT_TCAM_TX[0]; + + tcam_mgr_data->cfa_tcam_mgr_tables[TF_DIR_RX] + [CFA_TCAM_MGR_TBL_TYPE_PROF_TCAM_AFM].tcam_rows = + (struct cfa_tcam_mgr_table_rows_0 *) + &table_rows->table_rows_PROF_TCAM_RX[0]; + tcam_mgr_data->cfa_tcam_mgr_tables[TF_DIR_RX] + [CFA_TCAM_MGR_TBL_TYPE_PROF_TCAM_APPS].tcam_rows = + (struct cfa_tcam_mgr_table_rows_0 *) + &table_rows->table_rows_PROF_TCAM_RX[0]; + + tcam_mgr_data->cfa_tcam_mgr_tables[TF_DIR_TX] + [CFA_TCAM_MGR_TBL_TYPE_PROF_TCAM_AFM].tcam_rows = + (struct cfa_tcam_mgr_table_rows_0 *) + &table_rows->table_rows_PROF_TCAM_TX[0]; + tcam_mgr_data->cfa_tcam_mgr_tables[TF_DIR_TX] + [CFA_TCAM_MGR_TBL_TYPE_PROF_TCAM_APPS].tcam_rows = + (struct cfa_tcam_mgr_table_rows_0 *) + &table_rows->table_rows_PROF_TCAM_TX[0]; + + tcam_mgr_data->cfa_tcam_mgr_tables[TF_DIR_RX] + [CFA_TCAM_MGR_TBL_TYPE_WC_TCAM_AFM].tcam_rows = + (struct cfa_tcam_mgr_table_rows_0 *) + &table_rows->table_rows_WC_TCAM_RX[0]; + tcam_mgr_data->cfa_tcam_mgr_tables[TF_DIR_RX] + [CFA_TCAM_MGR_TBL_TYPE_WC_TCAM_APPS].tcam_rows = + (struct cfa_tcam_mgr_table_rows_0 *) + &table_rows->table_rows_WC_TCAM_RX[0]; + + tcam_mgr_data->cfa_tcam_mgr_tables[TF_DIR_TX] + [CFA_TCAM_MGR_TBL_TYPE_WC_TCAM_AFM].tcam_rows = + (struct cfa_tcam_mgr_table_rows_0 *) + &table_rows->table_rows_WC_TCAM_TX[0]; + tcam_mgr_data->cfa_tcam_mgr_tables[TF_DIR_TX] + [CFA_TCAM_MGR_TBL_TYPE_WC_TCAM_APPS].tcam_rows = + (struct cfa_tcam_mgr_table_rows_0 *) + &table_rows->table_rows_WC_TCAM_TX[0]; + + tcam_mgr_data->cfa_tcam_mgr_tables[TF_DIR_RX] + [CFA_TCAM_MGR_TBL_TYPE_SP_TCAM_AFM].tcam_rows = + (struct cfa_tcam_mgr_table_rows_0 *) + &table_rows->table_rows_SP_TCAM_RX[0]; + tcam_mgr_data->cfa_tcam_mgr_tables[TF_DIR_RX] + [CFA_TCAM_MGR_TBL_TYPE_SP_TCAM_APPS].tcam_rows = + (struct cfa_tcam_mgr_table_rows_0 *) + &table_rows->table_rows_SP_TCAM_RX[0]; + + tcam_mgr_data->cfa_tcam_mgr_tables[TF_DIR_TX] + [CFA_TCAM_MGR_TBL_TYPE_SP_TCAM_AFM].tcam_rows = + (struct cfa_tcam_mgr_table_rows_0 *) + &table_rows->table_rows_SP_TCAM_TX[0]; + tcam_mgr_data->cfa_tcam_mgr_tables[TF_DIR_TX] + [CFA_TCAM_MGR_TBL_TYPE_SP_TCAM_APPS].tcam_rows = + (struct cfa_tcam_mgr_table_rows_0 *) + &table_rows->table_rows_SP_TCAM_TX[0]; + + tcam_mgr_data->cfa_tcam_mgr_tables[TF_DIR_RX] + [CFA_TCAM_MGR_TBL_TYPE_CT_RULE_TCAM_AFM].tcam_rows = + (struct cfa_tcam_mgr_table_rows_0 *) + &table_rows->table_rows_CT_RULE_TCAM_RX[0]; + tcam_mgr_data->cfa_tcam_mgr_tables[TF_DIR_RX] + [CFA_TCAM_MGR_TBL_TYPE_CT_RULE_TCAM_APPS].tcam_rows = + (struct cfa_tcam_mgr_table_rows_0 *) + &table_rows->table_rows_CT_RULE_TCAM_RX[0]; + + tcam_mgr_data->cfa_tcam_mgr_tables[TF_DIR_TX] + [CFA_TCAM_MGR_TBL_TYPE_CT_RULE_TCAM_AFM].tcam_rows = + (struct cfa_tcam_mgr_table_rows_0 *) + &table_rows->table_rows_CT_RULE_TCAM_TX[0]; + tcam_mgr_data->cfa_tcam_mgr_tables[TF_DIR_TX] + [CFA_TCAM_MGR_TBL_TYPE_CT_RULE_TCAM_APPS].tcam_rows = + (struct cfa_tcam_mgr_table_rows_0 *) + &table_rows->table_rows_CT_RULE_TCAM_TX[0]; + + tcam_mgr_data->cfa_tcam_mgr_tables[TF_DIR_RX] + [CFA_TCAM_MGR_TBL_TYPE_VEB_TCAM_AFM].tcam_rows = + (struct cfa_tcam_mgr_table_rows_0 *) + &table_rows->table_rows_VEB_TCAM_RX[0]; + tcam_mgr_data->cfa_tcam_mgr_tables[TF_DIR_RX] + [CFA_TCAM_MGR_TBL_TYPE_VEB_TCAM_APPS].tcam_rows = + (struct cfa_tcam_mgr_table_rows_0 *) + &table_rows->table_rows_VEB_TCAM_RX[0]; + + tcam_mgr_data->cfa_tcam_mgr_tables[TF_DIR_TX] + [CFA_TCAM_MGR_TBL_TYPE_VEB_TCAM_AFM].tcam_rows = + (struct cfa_tcam_mgr_table_rows_0 *) + &table_rows->table_rows_VEB_TCAM_TX[0]; + tcam_mgr_data->cfa_tcam_mgr_tables[TF_DIR_TX] + [CFA_TCAM_MGR_TBL_TYPE_VEB_TCAM_APPS].tcam_rows = + (struct cfa_tcam_mgr_table_rows_0 *) + &table_rows->table_rows_VEB_TCAM_TX[0]; + for (dir = 0; dir < TF_DIR_MAX; dir++) { + for (type = 0; type < CFA_TCAM_MGR_TBL_TYPE_MAX; type++) { + if (tcam_mgr_data->cfa_tcam_mgr_tables[dir][type].row_width > + max_row_width) + max_row_width = + tcam_mgr_data->cfa_tcam_mgr_tables[dir][type].row_width; + if (tcam_mgr_data->cfa_tcam_mgr_tables[dir][type].result_size > + max_result_size) + max_result_size = + tcam_mgr_data->cfa_tcam_mgr_tables[dir][type].result_size; + } + } + + if (max_row_width != MAX_ROW_WIDTH) { + netdev_dbg(tfp->bp->dev, + "MAX_ROW_WIDTH:%d does not match actual val:%d\n", + MAX_ROW_WIDTH, max_row_width); + rc = -EINVAL; + goto fail; + } + if (max_result_size != MAX_RESULT_SIZE) { + netdev_dbg(tfp->bp->dev, + "MAX_RESULT_SIZE:%d does not match actual val:%d\n", + MAX_RESULT_SIZE, max_result_size); + rc = -EINVAL; + goto fail; + } + + return 0; + +fail: + cfa_tcam_mgr_data_free(tfs); + return rc; +} + +void cfa_tcam_mgr_uninit_p58(struct tf *tfp) +{ + struct tf_session *tfs; + int rc; + + rc = tf_session_get_session_internal(tfp, &tfs); + if (rc) + return; + + cfa_tcam_mgr_data_free(tfs); +} + +/* HW OP declarations begin here */ + +struct cfa_tcam_mgr_TCAM_row_data { + int key_size; + int result_size; + u8 key[MAX_ROW_WIDTH]; + u8 mask[MAX_ROW_WIDTH]; + u8 result[MAX_RESULT_SIZE]; +}; + +#define L2_CTXT_RX_MAX_ROWS \ + (L2_CTXT_TCAM_RX_MAX_SLICES * L2_CTXT_TCAM_RX_NUM_ROWS) +#define PROF_RX_MAX_ROWS (PROF_TCAM_RX_MAX_SLICES * PROF_TCAM_RX_NUM_ROWS) +#define WC_RX_MAX_ROWS (WC_TCAM_RX_MAX_SLICES * WC_TCAM_RX_NUM_ROWS) +#define SP_RX_MAX_ROWS (SP_TCAM_RX_MAX_SLICES * SP_TCAM_RX_NUM_ROWS) +#define CT_RULE_RX_MAX_ROWS \ + (CT_RULE_TCAM_RX_MAX_SLICES * CT_RULE_TCAM_RX_NUM_ROWS) +#define VEB_RX_MAX_ROWS (VEB_TCAM_RX_MAX_SLICES * VEB_TCAM_RX_NUM_ROWS) + +#define L2_CTXT_TX_MAX_ROWS \ + (L2_CTXT_TCAM_TX_MAX_SLICES * L2_CTXT_TCAM_TX_NUM_ROWS) +#define PROF_TX_MAX_ROWS (PROF_TCAM_TX_MAX_SLICES * PROF_TCAM_TX_NUM_ROWS) +#define WC_TX_MAX_ROWS (WC_TCAM_TX_MAX_SLICES * WC_TCAM_TX_NUM_ROWS) +#define SP_TX_MAX_ROWS (SP_TCAM_TX_MAX_SLICES * SP_TCAM_TX_NUM_ROWS) +#define CT_RULE_TX_MAX_ROWS \ + (CT_RULE_TCAM_TX_MAX_SLICES * CT_RULE_TCAM_TX_NUM_ROWS) +#define VEB_TX_MAX_ROWS (VEB_TCAM_TX_MAX_SLICES * VEB_TCAM_TX_NUM_ROWS) + +struct cfa_tcam_mgr_rx_row_data { + struct cfa_tcam_mgr_TCAM_row_data + cfa_tcam_mgr_L2_CTXT_TCAM_RX_row_data[L2_CTXT_RX_MAX_ROWS]; + struct cfa_tcam_mgr_TCAM_row_data + cfa_tcam_mgr_PROF_TCAM_RX_row_data[PROF_RX_MAX_ROWS]; + struct cfa_tcam_mgr_TCAM_row_data + cfa_tcam_mgr_WC_TCAM_RX_row_data[WC_RX_MAX_ROWS]; + struct cfa_tcam_mgr_TCAM_row_data + cfa_tcam_mgr_SP_TCAM_RX_row_data[SP_RX_MAX_ROWS]; + struct cfa_tcam_mgr_TCAM_row_data + cfa_tcam_mgr_CT_RULE_TCAM_RX_row_data[CT_RULE_RX_MAX_ROWS]; + struct cfa_tcam_mgr_TCAM_row_data + cfa_tcam_mgr_VEB_TCAM_RX_row_data[VEB_RX_MAX_ROWS]; +}; + +struct cfa_tcam_mgr_tx_row_data { + struct cfa_tcam_mgr_TCAM_row_data + cfa_tcam_mgr_L2_CTXT_TCAM_TX_row_data[L2_CTXT_TX_MAX_ROWS]; + struct cfa_tcam_mgr_TCAM_row_data + cfa_tcam_mgr_PROF_TCAM_TX_row_data[PROF_TX_MAX_ROWS]; + struct cfa_tcam_mgr_TCAM_row_data + cfa_tcam_mgr_WC_TCAM_TX_row_data[WC_TX_MAX_ROWS]; + struct cfa_tcam_mgr_TCAM_row_data + cfa_tcam_mgr_SP_TCAM_TX_row_data[SP_TX_MAX_ROWS]; + struct cfa_tcam_mgr_TCAM_row_data + cfa_tcam_mgr_CT_RULE_TCAM_TX_row_data[CT_RULE_TX_MAX_ROWS]; + struct cfa_tcam_mgr_TCAM_row_data + cfa_tcam_mgr_VEB_TCAM_TX_row_data[VEB_TX_MAX_ROWS]; +}; + +#define TF_TCAM_L2_CTX_HI TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_HIGH +#define TF_TCAM_L2_CTX_LO TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_LOW +#define TF_TCAM_PROF TF_TCAM_TBL_TYPE_PROF_TCAM +#define TF_TCAM_WC TF_TCAM_TBL_TYPE_WC_TCAM +#define TF_TCAM_SP TF_TCAM_TBL_TYPE_SP_TCAM +#define TF_TCAM_CT TF_TCAM_TBL_TYPE_CT_RULE_TCAM +#define TF_TCAM_VEB TF_TCAM_TBL_TYPE_VEB_TCAM + +static int cfa_tcam_mgr_row_data_alloc(struct cfa_tcam_mgr_data + *tcam_mgr_data) +{ + struct cfa_tcam_mgr_rx_row_data *rx_row_data; + struct cfa_tcam_mgr_tx_row_data *tx_row_data; + + rx_row_data = vzalloc(sizeof(*rx_row_data)); + if (!rx_row_data) + return -ENOMEM; + + tx_row_data = vzalloc(sizeof(*tx_row_data)); + if (!tx_row_data) { + vfree(rx_row_data); + return -ENOMEM; + } + + tcam_mgr_data->rx_row_data = rx_row_data; + tcam_mgr_data->tx_row_data = tx_row_data; + + tcam_mgr_data->row_tables[TF_DIR_RX][TF_TCAM_L2_CTX_HI] = + &rx_row_data->cfa_tcam_mgr_L2_CTXT_TCAM_RX_row_data[0]; + tcam_mgr_data->row_tables[TF_DIR_RX][TF_TCAM_L2_CTX_LO] = + &rx_row_data->cfa_tcam_mgr_L2_CTXT_TCAM_RX_row_data[0]; + tcam_mgr_data->row_tables[TF_DIR_RX][TF_TCAM_PROF] = + &rx_row_data->cfa_tcam_mgr_PROF_TCAM_RX_row_data[0]; + tcam_mgr_data->row_tables[TF_DIR_RX][TF_TCAM_WC] = + &rx_row_data->cfa_tcam_mgr_WC_TCAM_RX_row_data[0]; + tcam_mgr_data->row_tables[TF_DIR_RX][TF_TCAM_SP] = + &rx_row_data->cfa_tcam_mgr_SP_TCAM_RX_row_data[0]; + tcam_mgr_data->row_tables[TF_DIR_RX][TF_TCAM_CT] = + &rx_row_data->cfa_tcam_mgr_CT_RULE_TCAM_RX_row_data[0]; + tcam_mgr_data->row_tables[TF_DIR_RX][TF_TCAM_VEB] = + &rx_row_data->cfa_tcam_mgr_VEB_TCAM_RX_row_data[0]; + + tcam_mgr_data->row_tables[TF_DIR_TX][TF_TCAM_L2_CTX_HI] = + &tx_row_data->cfa_tcam_mgr_L2_CTXT_TCAM_TX_row_data[0]; + tcam_mgr_data->row_tables[TF_DIR_TX][TF_TCAM_L2_CTX_LO] = + &tx_row_data->cfa_tcam_mgr_L2_CTXT_TCAM_TX_row_data[0]; + tcam_mgr_data->row_tables[TF_DIR_TX][TF_TCAM_PROF] = + &tx_row_data->cfa_tcam_mgr_PROF_TCAM_TX_row_data[0]; + tcam_mgr_data->row_tables[TF_DIR_TX][TF_TCAM_WC] = + &tx_row_data->cfa_tcam_mgr_WC_TCAM_TX_row_data[0]; + tcam_mgr_data->row_tables[TF_DIR_TX][TF_TCAM_SP] = + &tx_row_data->cfa_tcam_mgr_SP_TCAM_TX_row_data[0]; + tcam_mgr_data->row_tables[TF_DIR_TX][TF_TCAM_CT] = + &tx_row_data->cfa_tcam_mgr_CT_RULE_TCAM_TX_row_data[0]; + tcam_mgr_data->row_tables[TF_DIR_TX][TF_TCAM_VEB] = + &tx_row_data->cfa_tcam_mgr_VEB_TCAM_TX_row_data[0]; + + return 0; +} + +static void cfa_tcam_mgr_row_data_free(struct cfa_tcam_mgr_data + *tcam_mgr_data) +{ + vfree(tcam_mgr_data->rx_row_data); + vfree(tcam_mgr_data->tx_row_data); +} + +static int cfa_tcam_mgr_hwop_set(struct cfa_tcam_mgr_data *tcam_mgr_data, + struct cfa_tcam_mgr_set_parms + *parms, int row, int slice, int max_slices) +{ + struct cfa_tcam_mgr_TCAM_row_data *this_table; + struct cfa_tcam_mgr_TCAM_row_data *this_row; + + this_table = tcam_mgr_data->row_tables[parms->dir] + [cfa_tcam_mgr_get_phys_table_type(parms->type)]; + this_row = &this_table[row * max_slices + slice]; + this_row->key_size = parms->key_size; + memcpy(&this_row->key, parms->key, parms->key_size); + memcpy(&this_row->mask, parms->mask, parms->key_size); + this_row->result_size = parms->result_size; + if (parms->result) + memcpy(&this_row->result, parms->result, parms->result_size); + return 0; +}; + +static int cfa_tcam_mgr_hwop_get(struct cfa_tcam_mgr_data *tcam_mgr_data, + struct cfa_tcam_mgr_get_parms + *parms, int row, int slice, int max_slices) +{ + struct cfa_tcam_mgr_TCAM_row_data *this_table; + struct cfa_tcam_mgr_TCAM_row_data *this_row; + + this_table = tcam_mgr_data->row_tables[parms->dir] + [cfa_tcam_mgr_get_phys_table_type(parms->type)]; + this_row = &this_table[row * max_slices + slice]; + parms->key_size = this_row->key_size; + parms->result_size = this_row->result_size; + if (parms->key) + memcpy(parms->key, &this_row->key, parms->key_size); + if (parms->mask) + memcpy(parms->mask, &this_row->mask, parms->key_size); + if (parms->result) + memcpy(parms->result, &this_row->result, parms->result_size); + return 0; +}; + +static int cfa_tcam_mgr_hwop_free(struct cfa_tcam_mgr_data *tcam_mgr_data, + struct cfa_tcam_mgr_free_parms + *parms, int row, int slice, int max_slices) +{ + struct cfa_tcam_mgr_TCAM_row_data *this_table; + struct cfa_tcam_mgr_TCAM_row_data *this_row; + + this_table = tcam_mgr_data->row_tables[parms->dir] + [cfa_tcam_mgr_get_phys_table_type(parms->type)]; + this_row = &this_table[row * max_slices + slice]; + memset(&this_row->key, 0, sizeof(this_row->key)); + memset(&this_row->mask, 0, sizeof(this_row->mask)); + memset(&this_row->result, 0, sizeof(this_row->result)); + this_row->key_size = 0; + this_row->result_size = 0; + return 0; +}; + +int cfa_tcam_mgr_hwops_get_funcs_p58(struct cfa_tcam_mgr_hwops_funcs + *hwop_funcs) +{ + hwop_funcs->set = cfa_tcam_mgr_hwop_set; + hwop_funcs->get = cfa_tcam_mgr_hwop_get; + hwop_funcs->free = cfa_tcam_mgr_hwop_free; + return 0; +} diff --git a/drivers/thirdparty/release-drivers/bnxt/tf_core/cfa_tcam_mgr_p58.h b/drivers/thirdparty/release-drivers/bnxt/tf_core/cfa_tcam_mgr_p58.h new file mode 100644 index 000000000000..1523b241820e --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/tf_core/cfa_tcam_mgr_p58.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2021-2021 Broadcom + * All rights reserved. + */ + +#ifndef CFA_TCAM_MGR_P58_H +#define CFA_TCAM_MGR_P58_H + +#include "cfa_tcam_mgr_device.h" + +int cfa_tcam_mgr_init_p58(struct tf *tfp); +void cfa_tcam_mgr_uninit_p58(struct tf *tfp); +int cfa_tcam_mgr_hwops_get_funcs_p58(struct cfa_tcam_mgr_hwops_funcs + *hwop_funcs); + +#endif /* CFA_TCAM_MGR_P58_H */ diff --git a/drivers/thirdparty/release-drivers/bnxt/tf_core/dpool.c b/drivers/thirdparty/release-drivers/bnxt/tf_core/dpool.c new file mode 100644 index 000000000000..2f74a558eed7 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/tf_core/dpool.c @@ -0,0 +1,596 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* Copyright(c) 2019-2021 Broadcom + * All rights reserved. + */ +#include +#include +#include +#include "dpool.h" + +/* Dynamic Pool Allocator + * + * The Dynamic Pool Allocator or "dpool" supports the + * allocation of variable size table entries intended for use + * with SRAM based EM entries. + * + * Dpool maintains a list of all blocks and the current status + * of each block. A block may be: + * + * . Free, size = 0 + * . Busy, First, size = n + * . Busy, size = n + * + * Here's an example of some dpool entries and the associated + * EM Record Pointer Table + * + * +----------------------+ +----------+ + * |First, Busy, Size = 3 | | FBlock | + * +----------------------+ +----------+ + * |Busy, Size = 3 | | NBlock | + * +----------------------+ +----------+ + * |Busy, Size = 3 | | NBlock | + * +----------------------+ +----------+ + * |Free, Size = 0 | | Free | + * +----------------------+ +----------+ + * |Free, Size = 0 | | Free | + * +----------------------+ +----------+ + * |Free, Size = 0 | | Free | + * +----------------------+ +----------+ + * |First, Busy, Size = 2 | | FBlock | + * +----------------------+ +----------+ + * |Busy, Size = 2 | | NBlock | + * +----------------------+ +----------+ + * |Free, Size = 0 | | Free | + * +----------------------+ +----------+ + * |Free, Size = 0 | | Free | + * +----------------------+ +----------+ + * . . . . + * . . . . + * . . . . + * + * This shows a three block entry followed by three free + * entries followed by a two block entry. + * + * Dpool supports the ability to defragment the currently + * allocated entries. For dpool to support defragmentation + * the firmware must support the "EM Move" HWRM. When an + * application attempts to insert an entry it will pass an + * argument indicating if dpool should, in the event of there + * being insufficient space for the new entry, defragment the + * existing entries to make space. + */ + +/* dpool_init + * + * Initialize the dpool + * + * *dpool - Pointer to the dpool structure. + * start_index - The lowest permited index. + * size - The number of entries + * max_alloc_size - Max size of an entry. + * *user_data - Pointer to memory that will be passed in + * callbacks. + * move_callback - If the EM Move HWRM is supported in FW then + * this function pointer will point to a function + * that will invoke the EM Move HWRM. + */ +int dpool_init(struct dpool *dpool, u32 start_index, u32 size, + u8 max_alloc_size, void *user_data, + int (*move_callback)(void *, u64, u32)) +{ + size_t len; + u32 i; + + len = size * sizeof(struct dpool_entry); + dpool->entry = vzalloc(len); + if (!dpool->entry) + return -ENOMEM; + + dpool->start_index = start_index; + dpool->size = size; + dpool->max_alloc_size = max_alloc_size; + dpool->user_data = user_data; + dpool->move_callback = move_callback; + /* Init entries */ + for (i = 0; i < size; i++) { + dpool->entry[i].flags = 0; + dpool->entry[i].index = start_index; + dpool->entry[i].entry_data = 0UL; + start_index++; + } + + return 0; +} + +/* dpool_dump_free_list + * + * Debug function to dump the free list + */ +static void dpool_dump_free_list(struct dpool_free_list *free_list) +{ + u32 i; + + netdev_dbg(NULL, "FreeList:"); + + for (i = 0; i < free_list->size; i++) { + netdev_dbg(NULL, "[%02d-%d:%d]", i, free_list->entry[i].index, + free_list->entry[i].size); + } + + netdev_dbg(NULL, "\n"); +} + +/* dpool_dump_adj_list + * + * Debug function to dump the adjacencies list + */ +static void dpool_dump_adj_list(struct dpool_adj_list *adj_list) +{ + u32 i; + + netdev_dbg(NULL, "AdjList: "); + + for (i = 0; i < adj_list->size; i++) { + netdev_dbg(NULL, "[%02d-%d:%d:%d:%d]", i, + adj_list->entry[i].index, adj_list->entry[i].size, + adj_list->entry[i].left, adj_list->entry[i].right); + } + + netdev_dbg(NULL, "\n"); +} + +/* dpool_move + * + * Function to invoke the EM HWRM callback. Will only be used + * if defrag is selected and is required to insert an entry. This + * function will only be called if the dst_index has sufficient + * adjacent space for the src_index to be moved in to. + * + * dst_index - Table entry index to move to. + * src_index - Table entry index to move. + */ +static int dpool_move(struct dpool *dpool, u32 dst_index, u32 src_index) +{ + struct dpool_entry *entry = dpool->entry; + u32 size; + u32 i; + + netdev_dbg(NULL, "Moving %d to %d\n", src_index, dst_index); + if (!DP_IS_FREE(entry[dst_index].flags)) + return -1; + + size = DP_FLAGS_SIZE(entry[src_index].flags); + + /* Mark destination as busy. */ + entry[dst_index].flags = entry[src_index].flags; + entry[dst_index].entry_data = entry[src_index].entry_data; + + /* Invoke EM move HWRM */ + if (dpool->move_callback) { + dpool->move_callback(dpool->user_data, + entry[src_index].entry_data, + dst_index + dpool->start_index); + } + + /* Mark source as free. */ + entry[src_index].flags = 0; + entry[src_index].entry_data = 0UL; + + /* For multi bock entries mark all dest blocks as busy + * and src blocks are free. + */ + for (i = 1; i < size; i++) { + entry[dst_index + i].flags = size; + entry[src_index + i].flags = 0; + } + + return 0; +} + +/* dpool_defrag_create_free_list + * + * Create a list of free entries. + * + * *lf_index - Returns the start index of the largest block + * of contiguious free entries. + * *lf_size - Returns the size of the largest block of + * contiguious free entries. + */ +static void dpool_defrag_create_free_list(struct dpool *dpool, + struct dpool_free_list *free_list, + u32 *lf_index, u32 *lf_size) +{ + u32 count = 0; + u32 index = 0; + u32 i; + + for (i = 0; i < dpool->size; i++) { + if (DP_IS_FREE(dpool->entry[i].flags)) { + if (count == 0) + index = i; + count++; + } else if (count > 0) { + free_list->entry[free_list->size].index = index; + free_list->entry[free_list->size].size = count; + + if (count > *lf_size) { + *lf_index = free_list->size; + *lf_size = count; + } + + free_list->size++; + count = 0; + } + } + + if (free_list->size == 0) + *lf_size = count; + + netdev_dbg(NULL, "Largest Free Index:%d Size:%d\n", *lf_index, + *lf_size); + dpool_dump_free_list(free_list); +} + +/* dpool_defrag_create_adj_list + * + * Create a list of busy entries including the number of free + * entries before and after the busy block. + */ +static void dpool_defrag_create_adj_list(struct dpool *dpool, + struct dpool_adj_list *adj_list) +{ + u32 count = 0; + u32 used = 0; + u32 i; + + for (i = 0; i < dpool->size; ) { + if (DP_IS_USED(dpool->entry[i].flags)) { + used++; + + if (count > 0) { + adj_list->entry[adj_list->size].index = i; + adj_list->entry[adj_list->size].size = + DP_FLAGS_SIZE(dpool->entry[i].flags); + adj_list->entry[adj_list->size].left = count; + + if (adj_list->size > 0 && used == 1) + adj_list->entry[adj_list->size - 1].right = count; + + adj_list->size++; + } + + count = 0; + i += DP_FLAGS_SIZE(dpool->entry[i].flags); + } else { + used = 0; + count++; + i++; + } + } + + dpool_dump_adj_list(adj_list); +} + +/* dpool_defrag_find_adj_entry + * + * Using the adjacency and free lists find block with largest + * adjacent free space to the left and right. Such a block would + * be the prime target for moving so that the left and right adjacent + * free space can be combined. + */ +static void dpool_defrag_find_adj_entry(struct dpool_adj_list *adj_list, + struct dpool_free_list *free_list, + u32 *lf_index, u32 *lf_size, + u32 *max, u32 *max_index) +{ + u32 max_size = 0; + u32 size; + u32 i; + + /* Using the size of the largest free space available select the + * adjacency list entry of that size with the largest left + right + + * size count. If there are no entries of that size then decrement + * the size and try again. + */ + for (size = *lf_size; size > 0; size--) { + for (i = 0; i < adj_list->size; i++) { + if (adj_list->entry[i].size == size && + ((size + adj_list->entry[i].left + + adj_list->entry[i].right) > *max)) { + *max = size + adj_list->entry[i].left + + adj_list->entry[i].right; + max_size = size; + *max_index = adj_list->entry[i].index; + } + } + + if (*max) + break; + } + + /* If the max entry is smaller than the largest_free_size + * find the first entry in the free list that it cn fit in to. + */ + if (max_size < *lf_size) { + for (i = 0; i < free_list->size; i++) { + if (free_list->entry[i].size >= max_size) { + *lf_index = i; + break; + } + } + } +} + +/* dpool_defrag + * + * Defragment the entries. This can either defragment until there's + * just sufficient space to fit the new entry or defragment until + * there's no more defragmentation possible. Will only be used if + * the EM Move callback is supported and the application selects + * a defrag option on insert. + */ +int dpool_defrag(struct dpool *dpool, u32 entry_size, u8 defrag) +{ + struct dpool_free_list *free_list; + struct dpool_adj_list *adj_list; + u32 largest_free_index = 0; + u32 largest_free_size; + u32 max_index; + u32 max; + int rc; + + free_list = vzalloc(sizeof(*free_list)); + if (!free_list) + return largest_free_index; + + adj_list = vzalloc(sizeof(*adj_list)); + if (!adj_list) { + vfree(free_list); + return largest_free_index; + } + + while (1) { + /* Create list of free entries */ + free_list->size = 0; + largest_free_size = 0; + largest_free_index = 0; + dpool_defrag_create_free_list(dpool, free_list, + &largest_free_index, + &largest_free_size); + + /* If using defrag to fit and there's a large enough + * space then we are done. + */ + if (defrag == DP_DEFRAG_TO_FIT && + largest_free_size >= entry_size) + goto end; + + /* Create list of entries adjacent to free entries */ + adj_list->size = 0; + dpool_defrag_create_adj_list(dpool, adj_list); + + max = 0; + max_index = 0; + dpool_defrag_find_adj_entry(adj_list, free_list, + &largest_free_index, + &largest_free_size, + &max, &max_index); + if (!max) + break; + + /* If we have a contender then move it to the new spot. */ + rc = dpool_move(dpool, + free_list->entry[largest_free_index].index, + max_index); + if (rc) { + largest_free_size = rc; + goto end; + } + } + +end: + vfree(free_list); + vfree(adj_list); + return largest_free_size; +} + +/* dpool_find_free_entries + * + * Find size consecutive free entries and if successful then + * mark those entries as busy. + */ +static u32 dpool_find_free_entries(struct dpool *dpool, u32 size) +{ + u32 first_entry_index; + u32 count = 0; + u32 i; + u32 j; + + for (i = 0; i < dpool->size; i++) { + if (!DP_IS_FREE(dpool->entry[i].flags)) { + /* Busy entry, reset count and keep trying */ + count = 0; + continue; + } + + /* Found a free entry */ + if (count == 0) + first_entry_index = i; + + count++; + if (count < size) + continue; + + /* Success, found enough entries, mark as busy. */ + for (j = 0; j < size; j++) { + dpool->entry[j + first_entry_index].flags = size; + } + /* mark first entry as start */ + dpool->entry[first_entry_index].flags |= DP_FLAGS_START; + + dpool->entry[i].entry_data = 0UL; + + /* Success */ + return (first_entry_index + dpool->start_index); + } + + /* Failure */ + return DP_INVALID_INDEX; +} + +/* dpool_alloc + * + * Request a FW index of size and if necessary de-fragment the dpool + * array. + * + * @dpool: The dpool + * @size: The size of the requested allocation. + * @defrag: Operation to apply when there is insufficient space: + * + * DP_DEFRAG_NONE (0x0) - Don't do anything. + * DP_DEFRAG_ALL (0x1) - Defrag until there is nothing left + * to defrag. + * DP_DEFRAG_TO_FIT (0x2) - Defrag until there is just enough + * space to insert the requested + * allocation. + * + * Return + * - FW index on success + * - DP_INVALID_INDEX on failure + */ +u32 dpool_alloc(struct dpool *dpool, u32 size, u8 defrag) +{ + u32 index; + int rc; + + if (size > dpool->max_alloc_size || size == 0) + return DP_INVALID_INDEX; + + /* Defrag requires EM move support. */ + if (defrag != DP_DEFRAG_NONE && !dpool->move_callback) + return DP_INVALID_INDEX; + + while (1) { + /* This will find and allocate the required number + * of entries. If there's not enough space then + * it will return DP_INVALID_INDEX and we can go + * on and defrag if selected. + */ + index = dpool_find_free_entries(dpool, size); + if (index != DP_INVALID_INDEX) + return index; + + /* If not defragging we are done */ + if (defrag == DP_DEFRAG_NONE) + break; + + /* If defragging then do it */ + rc = dpool_defrag(dpool, size, defrag); + if (rc < 0) + return DP_INVALID_INDEX; + + /* If the defrag created enough space then try the + * alloc again else quit. + */ + if ((u32)rc < size) + break; + } + + return DP_INVALID_INDEX; +} + +/* dpool_free + * + * Free allocated entry. The is responsible for the dpool and dpool + * entry array memory. + * + * @dpool: The pool + * @index: FW index to free up. + * + * Result + * - 0 on success + * - -1 on failure + */ +int dpool_free(struct dpool *dpool, + u32 index) +{ + int start = (index - dpool->start_index); + u32 size; + u32 i; + + if (start < 0) + return -1; + + if (DP_IS_START(dpool->entry[start].flags)) { + size = DP_FLAGS_SIZE(dpool->entry[start].flags); + if (size > dpool->max_alloc_size || size == 0) + return -1; + + for (i = start; i < (start + size); i++) + dpool->entry[i].flags = 0; + + return 0; + } + + return -1; +} + +/* dpool_free_all + * + * Free all entries. + * + * @dpool: The pool + * + * Result + * - 0 on success + * - -1 on failure + */ +void dpool_free_all(struct dpool *dpool) +{ + u32 i; + + for (i = 0; i < dpool->size; i++) + dpool_free(dpool, dpool->entry[i].index); +} + +/* dpool_set_entry_data + * + * Set the entry data field. This will be passed to callbacks. + * + * @dpool: The dpool + * @index: FW index + * @entry_data: Entry data value + * + * Return + * - FW index on success + * - DP_INVALID_INDEX on failure + */ +int dpool_set_entry_data(struct dpool *dpool, u32 index, u64 entry_data) +{ + int start = (index - dpool->start_index); + + if (start < 0) + return -1; + + if (DP_IS_START(dpool->entry[start].flags)) { + dpool->entry[start].entry_data = entry_data; + return 0; + } + + return -1; +} + +void dpool_dump(struct dpool *dpool) +{ + u32 i; + + netdev_dbg(NULL, "Dpool size;%d start:0x%x\n", dpool->size, + dpool->start_index); + + for (i = 0; i < dpool->size; i++) { + netdev_dbg(NULL, "[0x%08x-0x%08x]\n", dpool->entry[i].flags, + dpool->entry[i].index); + } + + netdev_dbg(NULL, "\n"); +} diff --git a/drivers/thirdparty/release-drivers/bnxt/tf_core/dpool.h b/drivers/thirdparty/release-drivers/bnxt/tf_core/dpool.h new file mode 100644 index 000000000000..c6acc5015f05 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/tf_core/dpool.h @@ -0,0 +1,248 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019-2021 Broadcom + * All rights reserved. + */ + +#ifndef _DPOOL_H_ +#define _DPOOL_H_ + +#include + +#define DP_MAX_FREE_SIZE 0x8000 /* 32K */ + +#define DP_INVALID_INDEX 0xffffffff + +#define DP_FLAGS_START 0x80000000 +#define DP_IS_START(flags) ((flags) & DP_FLAGS_START) + +#define DP_FLAGS_SIZE_SHIFT 0 +#define DP_FLAGS_SIZE_MASK 0x07 + +#define DP_FLAGS_SIZE(flags) (((flags) >> DP_FLAGS_SIZE_SHIFT) & DP_FLAGS_SIZE_MASK) + +#define DP_IS_FREE(flags) (DP_FLAGS_SIZE(flags) == 0) +#define DP_IS_USED(flags) (DP_FLAGS_SIZE(flags) != 0) + +#define DP_DEFRAG_NONE 0x0 +#define DP_DEFRAG_ALL 0x1 +#define DP_DEFRAG_TO_FIT 0x2 + +/** + * Free list entry + * + * @index: Index in to dpool entry array + * @size: The size of the entry in the dpool entry array + * + * Each entry includes an index in to the dpool entry array + * and the size of dpool array entry. + */ +struct dpool_free_list_entry { + u32 index; + u32 size; +}; + +/** + * Free list + * + * @size: Number of entries in the free list + * @entry: List of unused entries in the dpool entry array + * + * Used internally to record free entries in the dpool entry array. + * Each entry represents a single or multiple contiguous entries + * in the dpool entry array. + * + * Used only during the defrag operation. + */ +struct dpool_free_list { + u32 size; + struct dpool_free_list_entry entry[DP_MAX_FREE_SIZE]; +}; + +/** + * Adjacent list entry + * + * @index: Index in to dpool entry array + * @size: The size of the entry in the dpool entry array + * @left: Number of free entries directly to the left of this entry + * @right: Number of free entries directly to the right of this entry + * + * Each entry includes an index in to the dpool entry array, + * the size of the entry and the counts of free entries to the + * right and left off that entry. + */ +struct dpool_adj_list_entry { + u32 index; + u32 size; + u32 left; + u32 right; +}; + +/** + * Adjacent list + * + * @size: Number of entries in the adj list + * @entry: List of entries in the dpool entry array that have + * free entries directly to their left and right. + * + * A list of references to entries in the dpool entry array that + * have free entries to the left and right. Since we pack to the + * left entries will always have a non zero left out. + * + * Used only during the defrag operation. + */ +struct dpool_adj_list { + u32 size; + struct dpool_adj_list_entry entry[DP_MAX_FREE_SIZE]; +}; + +/** + * Dpool entry + * + * Each entry includes flags and the FW index. + */ +struct dpool_entry { + u32 flags; + u32 index; + u64 entry_data; +}; + +/** + * Dpool + * + * Used to manage resource pool. Includes the start FW index, the + * size of the entry array and the entry array it's self. + */ +struct dpool { + u32 start_index; + u32 size; + u8 max_alloc_size; + void *user_data; + int (*move_callback)(void *user_data, + u64 entry_data, + u32 new_index); + struct dpool_entry *entry; +}; + +/** + * dpool_init + * + * Initialize the dpool + * + * @dpool: Pointer to a dpool structure that includes an entry field + * that points to the entry array. The user is responsible for + * allocating memory for the dpool struct and the entry array. + * @start_index: The base index to use. + * @size: The number of entries + * @max_alloc_size: The number of entries + * @user_data: Pointer to user data. Will be passed in callbacks. + * @move_callback: Pointer to move EM entry callback. + * + * Return + * - 0 on success + * - -1 on failure + */ +int dpool_init(struct dpool *dpool, u32 start_index, u32 size, + u8 max_alloc_size, void *user_data, + int (*move_callback)(void *, u64, u32)); + +/** + * dpool_alloc + * + * Request a FW index of size and if necessary de-fragment the dpool + * array. + * + * @dpool: The dpool + * @size: The size of the requested allocation. + * @defrag: Operation to apply when there is insufficient space: + * + * DP_DEFRAG_NONE (0x0) - Don't do anything. + * DP_DEFRAG_ALL (0x1) - Defrag until there is nothing left + * to defrag. + * DP_DEFRAG_TO_FIT (0x2) - Defrag until there is just enough + * space to insert the requested + * allocation. + * + * Return + * - FW index on success + * - DP_INVALID_INDEX on failure + */ +u32 dpool_alloc(struct dpool *dpool, u32 size, u8 defrag); + +/** + * dpool_set_entry_data + * + * Set the entry data field. This will be passed to callbacks. + * + * @dpool: The dpool + * @index: FW index + * @entry_data: Entry data value + * + * Return + * - FW index on success + * - DP_INVALID_INDEX on failure + */ +int dpool_set_entry_data(struct dpool *dpool, u32 index, u64 entry_data); + +/** + * dpool_free + * + * Free allocated entry. The is responsible for the dpool and dpool + * entry array memory. + * + * @dpool: The pool + * @index: FW index to free up. + * + * Result + * - 0 on success + * - -1 on failure + * + */ +int dpool_free(struct dpool *dpool, u32 index); + +/** + * dpool_free_all + * + * Free all entries. + * + * @dpool: The pool + * + * Result + * - 0 on success + * - -1 on failure + * + */ +void dpool_free_all(struct dpool *dpool); + +/** + * dpool_dump + * + * Debug/util function to dump the dpool array. + * + * @dpool: The pool + * + */ +void dpool_dump(struct dpool *dpool); + +/** + * dpool_defrag + * + * De-fragment the dpool array and apply the specified defrag strategy. + * + * @dpool: The dpool + * @entry_size: If using the DP_DEFRAG_TO_FIT stratagy defrag will stop when + * there's at least entry_size space available. + * @defrag: + * Defrag strategy: + * + * DP_DEFRAG_ALL (0x1) - Defrag until there is nothing left + * to defrag. + * DP_DEFRAG_TO_FIT (0x2) - Defrag until there is just enough space + * to insert the requested allocation. + * + * Return + * < 0 - on failure + * > 0 - The size of the largest free space + */ +int dpool_defrag(struct dpool *dpool, u32 entry_size, u8 defrag); + +#endif /* _DPOOL_H_ */ diff --git a/drivers/thirdparty/release-drivers/bnxt/tf_core/rand.c b/drivers/thirdparty/release-drivers/bnxt/tf_core/rand.c new file mode 100644 index 000000000000..efcab70c0da3 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/tf_core/rand.c @@ -0,0 +1,44 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* Copyright(c) 2019-2021 Broadcom + * All rights reserved. + */ + +/* Random Number Functions */ + +#include +#include "rand.h" + +#define TF_RAND_LFSR_INIT_VALUE 0xACE1u + +u16 lfsr = TF_RAND_LFSR_INIT_VALUE; +u32 bit; + +/** + * Generates a 16 bit pseudo random number + * + * Returns: + * u16 number + */ +static u16 rand16(void) +{ + bit = ((lfsr >> 0) ^ (lfsr >> 2) ^ (lfsr >> 3) ^ (lfsr >> 5)) & 1; + return lfsr = (lfsr >> 1) | (bit << 15); +} + +/** + * Generates a 32 bit pseudo random number + * + * Returns: + * u32 number + */ +u32 rand32(void) +{ + return (rand16() << 16) | rand16(); +} + +/* Resets the seed used by the pseudo random number generator */ +void rand_init(void) +{ + lfsr = TF_RAND_LFSR_INIT_VALUE; + bit = 0; +} diff --git a/drivers/thirdparty/release-drivers/bnxt/tf_core/rand.h b/drivers/thirdparty/release-drivers/bnxt/tf_core/rand.h new file mode 100644 index 000000000000..956e84223c91 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/tf_core/rand.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019-2021 Broadcom + * All rights reserved. + */ + +/* Random Number Functions */ +#ifndef __RAND_H__ +#define __RAND_H__ + +/** + * Generates a 32 bit pseudo random number + * + * Returns: + * u32 number + */ +u32 rand32(void); + +/** + * Resets the seed used by the pseudo random number generator + * + * Returns: + */ +void rand_init(void); + +#endif /* __RAND_H__ */ diff --git a/drivers/thirdparty/release-drivers/bnxt/tf_core/tf_core.c b/drivers/thirdparty/release-drivers/bnxt/tf_core/tf_core.c new file mode 100644 index 000000000000..c844e28c77d3 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/tf_core/tf_core.c @@ -0,0 +1,1600 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* Copyright(c) 2019-2021 Broadcom + * All rights reserved. + */ +#include +#include +#include "tf_core.h" +#include "tf_util.h" +#include "tf_session.h" +#include "tf_tbl.h" +#include "tf_em.h" +#include "tf_rm.h" +#include "tf_global_cfg.h" +#include "tf_msg.h" +#include "bitalloc.h" +#include "bnxt.h" +#include "tf_ext_flow_handle.h" + +int tf_open_session(struct tf *tfp, struct tf_open_session_parms *parms) +{ + struct tf_session_open_session_parms oparms; + unsigned int domain, bus, slot, device; + struct bnxt *bp; + int rc; + + if (!tfp || !parms) + return -EINVAL; + + bp = parms->bp; + + /* Filter out any non-supported device types on the Core + * side. It is assumed that the Firmware will be supported if + * firmware open session succeeds. + */ + if (parms->device_type != TF_DEVICE_TYPE_P4 && + parms->device_type != TF_DEVICE_TYPE_P5) { + netdev_dbg(bp->dev, "Unsupported device type %d\n", + parms->device_type); + return -EOPNOTSUPP; + } + + /* Verify control channel and build the beginning of session_id */ + rc = sscanf(parms->ctrl_chan_name, "%x:%x:%x.%u", &domain, &bus, &slot, + &device); + if (rc != 4) { + /* PCI Domain not provided (optional in DPDK), thus we + * force domain to 0 and recheck. + */ + domain = 0; + + /* Check parsing of bus/slot/device */ + rc = sscanf(parms->ctrl_chan_name, "%x:%x.%u", &bus, &slot, + &device); + if (rc != 3) { + netdev_dbg(bp->dev, + "Failed to scan device ctrl_chan_name\n"); + return -EINVAL; + } + } + + parms->session_id.internal.domain = domain; + parms->session_id.internal.bus = bus; + parms->session_id.internal.device = device; + oparms.open_cfg = parms; + + /* Session vs session client is decided in + * tf_session_open_session() + */ + rc = tf_session_open_session(tfp, &oparms); + /* Logging handled by tf_session_open_session */ + if (rc) + return rc; + + netdev_dbg(bp->dev, "%s: domain:%d, bus:%d, device:%u\n", __func__, + parms->session_id.internal.domain, + parms->session_id.internal.bus, + parms->session_id.internal.device); + + return 0; +} + +int tf_attach_session(struct tf *tfp, struct tf_attach_session_parms *parms) +{ + struct tf_session_attach_session_parms aparms; + unsigned int domain, bus, slot, device; + struct bnxt *bp; + int rc; + + if (!tfp || !parms) + return -EINVAL; + + bp = tfp->bp; + + /* Verify control channel */ + rc = sscanf(parms->ctrl_chan_name, "%x:%x:%x.%u", &domain, &bus, &slot, + &device); + if (rc != 4) { + netdev_dbg(bp->dev, "Failed to scan device ctrl_chan_name\n"); + return -EINVAL; + } + + /* Verify 'attach' channel */ + rc = sscanf(parms->attach_chan_name, "%x:%x:%x.%u", &domain, &bus, + &slot, &device); + if (rc != 4) { + netdev_dbg(bp->dev, "Failed to scan device attach_chan_name\n"); + return -EINVAL; + } + + /* Prepare return value of session_id, using ctrl_chan_name + * device values as it becomes the session id. + */ + parms->session_id.internal.domain = domain; + parms->session_id.internal.bus = bus; + parms->session_id.internal.device = device; + aparms.attach_cfg = parms; + rc = tf_session_attach_session(tfp, + &aparms); + /* Logging handled by dev_bind */ + if (rc) + return rc; + + netdev_dbg(bp->dev, + "%s: sid:%d domain:%d, bus:%d, device:%d, fw_sid:%d\n", + __func__, parms->session_id.id, + parms->session_id.internal.domain, + parms->session_id.internal.bus, + parms->session_id.internal.device, + parms->session_id.internal.fw_session_id); + + return rc; +} + +int tf_close_session(struct tf *tfp) +{ + struct tf_session_close_session_parms cparms = { 0 }; + union tf_session_id session_id = { 0 }; + u8 ref_count; + int rc; + + if (!tfp) + return -EINVAL; + + cparms.ref_count = &ref_count; + cparms.session_id = &session_id; + /* Session vs session client is decided in + * tf_session_close_session() + */ + rc = tf_session_close_session(tfp, + &cparms); + /* Logging handled by tf_session_close_session */ + if (rc) + return rc; + + netdev_dbg(tfp->bp->dev, "%s: domain:%d, bus:%d, device:%d\n", + __func__, cparms.session_id->internal.domain, + cparms.session_id->internal.bus, + cparms.session_id->internal.device); + + return rc; +} + +/* insert EM hash entry API + * + * returns: + * 0 - Success + * -EINVAL - Error + */ +int tf_insert_em_entry(struct tf *tfp, struct tf_insert_em_entry_parms *parms) +{ + struct bnxt *bp; + struct tf_dev_info *dev; + struct tf_session *tfs; + int rc; + + if (!tfp || !parms) + return -EINVAL; + + bp = tfp->bp; + + /* Retrieve the session information */ + rc = tf_session_get_session(tfp, &tfs); + if (rc) { + netdev_dbg(bp->dev, "%s: Failed to lookup session, rc:%d\n", + tf_dir_2_str(parms->dir), rc); + return rc; + } + + /* Retrieve the device information */ + rc = tf_session_get_device(tfs, &dev); + if (rc) { + netdev_dbg(bp->dev, "%s: Failed to lookup device, rc:%d\n", + tf_dir_2_str(parms->dir), rc); + return rc; + } + + if (parms->mem == TF_MEM_EXTERNAL && + dev->ops->tf_dev_insert_ext_em_entry) + rc = dev->ops->tf_dev_insert_ext_em_entry(tfp, parms); + else if (parms->mem == TF_MEM_INTERNAL && + dev->ops->tf_dev_insert_int_em_entry) + rc = dev->ops->tf_dev_insert_int_em_entry(tfp, parms); + else + return -EINVAL; + + if (rc) { + netdev_err(bp->dev, "%s: EM insert failed, rc:%d\n", + tf_dir_2_str(parms->dir), rc); + return rc; + } + + return 0; +} + +/* Delete EM hash entry API + * + * returns: + * 0 - Success + * -EINVAL - Error + */ +int tf_delete_em_entry(struct tf *tfp, struct tf_delete_em_entry_parms *parms) +{ + struct bnxt *bp; + struct tf_dev_info *dev; + struct tf_session *tfs; + unsigned int flag = 0; + int rc; + + if (!tfp || !parms) + return -EINVAL; + + bp = tfp->bp; + + /* Retrieve the session information */ + rc = tf_session_get_session(tfp, &tfs); + if (rc) { + netdev_dbg(bp->dev, "%s: Failed to lookup session, rc:%d\n", + tf_dir_2_str(parms->dir), rc); + return rc; + } + + /* Retrieve the device information */ + rc = tf_session_get_device(tfs, &dev); + if (rc) { + netdev_dbg(bp->dev, "%s: Failed to lookup device, rc:%d\n", + tf_dir_2_str(parms->dir), rc); + return rc; + } + + TF_GET_FLAG_FROM_FLOW_HANDLE(parms->flow_handle, flag); + if ((flag & TF_FLAGS_FLOW_HANDLE_INTERNAL)) + rc = dev->ops->tf_dev_delete_int_em_entry(tfp, parms); + else + rc = dev->ops->tf_dev_delete_ext_em_entry(tfp, parms); + + if (rc) { + netdev_dbg(bp->dev, "%s: EM delete failed, rc:%d\n", + tf_dir_2_str(parms->dir), rc); + return rc; + } + + return rc; +} + +/* Get global configuration API + * + * returns: + * 0 - Success + * -EINVAL - Error + */ +int tf_get_global_cfg(struct tf *tfp, struct tf_global_cfg_parms *parms) +{ + struct bnxt *bp; + struct tf_dev_info *dev; + struct tf_session *tfs; + int rc = 0; + + if (!tfp || !parms) + return -EINVAL; + + bp = tfp->bp; + + /* Retrieve the session information */ + rc = tf_session_get_session(tfp, &tfs); + if (rc) { + netdev_dbg(bp->dev, "%s: Failed to lookup session, rc:%d\n", + tf_dir_2_str(parms->dir), rc); + return rc; + } + + /* Retrieve the device information */ + rc = tf_session_get_device(tfs, &dev); + if (rc) { + netdev_dbg(bp->dev, "%s: Failed to lookup device, rc:%d\n", + tf_dir_2_str(parms->dir), rc); + return rc; + } + + if (!parms->config || parms->config_sz_in_bytes == 0) { + netdev_dbg(bp->dev, "Invalid Argument(s)\n"); + return -EINVAL; + } + + if (!dev->ops->tf_dev_get_global_cfg) { + rc = -EOPNOTSUPP; + netdev_dbg(bp->dev, "%s: Operation not supported, rc:%d\n", + tf_dir_2_str(parms->dir), rc); + return -EOPNOTSUPP; + } + + rc = dev->ops->tf_dev_get_global_cfg(tfp, parms); + if (rc) { + netdev_dbg(bp->dev, "%s: Global Cfg get failed, rc:%d\n", + tf_dir_2_str(parms->dir), rc); + return rc; + } + + return rc; +} + +/* Set global configuration API + * + * returns: + * 0 - Success + * -EINVAL - Error + */ +int tf_set_global_cfg(struct tf *tfp, struct tf_global_cfg_parms *parms) +{ + struct bnxt *bp; + struct tf_dev_info *dev; + struct tf_session *tfs; + int rc = 0; + + if (!tfp || !parms) + return -EINVAL; + + bp = tfp->bp; + + /* Retrieve the session information */ + rc = tf_session_get_session(tfp, &tfs); + if (rc) { + netdev_dbg(bp->dev, "%s: Failed to lookup session, rc:%d\n", + tf_dir_2_str(parms->dir), rc); + return rc; + } + + /* Retrieve the device information */ + rc = tf_session_get_device(tfs, &dev); + if (rc) { + netdev_dbg(bp->dev, "%s: Failed to lookup device, rc:%d\n", + tf_dir_2_str(parms->dir), rc); + return rc; + } + + if (!parms->config || parms->config_sz_in_bytes == 0) { + netdev_dbg(bp->dev, "Invalid Argument(s)\n"); + return -EINVAL; + } + + if (!dev->ops->tf_dev_set_global_cfg) { + rc = -EOPNOTSUPP; + netdev_dbg(bp->dev, "%s: Operation not supported, rc:%d\n", + tf_dir_2_str(parms->dir), rc); + return -EOPNOTSUPP; + } + + rc = dev->ops->tf_dev_set_global_cfg(tfp, parms); + if (rc) { + netdev_dbg(bp->dev, "%s: Global Cfg set failed, rc:%d\n", + tf_dir_2_str(parms->dir), rc); + return rc; + } + + return rc; +} + +int tf_alloc_identifier(struct tf *tfp, + struct tf_alloc_identifier_parms *parms) +{ + struct tf_ident_alloc_parms aparms = { 0 }; + struct bnxt *bp; + struct tf_dev_info *dev; + struct tf_session *tfs; + int rc; + u16 id; + + if (!tfp || !parms) + return -EINVAL; + + bp = tfp->bp; + + /* Retrieve the session information */ + rc = tf_session_get_session(tfp, &tfs); + if (rc) { + netdev_dbg(bp->dev, "%s: Failed to lookup session, rc:%d\n", + tf_dir_2_str(parms->dir), rc); + return rc; + } + + /* Retrieve the device information */ + rc = tf_session_get_device(tfs, &dev); + if (rc) { + netdev_dbg(bp->dev, "%s: Failed to lookup device, rc:%d\n", + tf_dir_2_str(parms->dir), rc); + return rc; + } + + if (!dev->ops->tf_dev_alloc_ident) { + rc = -EOPNOTSUPP; + netdev_dbg(bp->dev, "%s: Operation not supported, rc:%d\n", + tf_dir_2_str(parms->dir), rc); + return -EOPNOTSUPP; + } + + aparms.dir = parms->dir; + aparms.type = parms->ident_type; + aparms.id = &id; + rc = dev->ops->tf_dev_alloc_ident(tfp, &aparms); + if (rc) { + netdev_dbg(bp->dev, + "%s: Identifier allocation failed, rc:%d\n", + tf_dir_2_str(parms->dir), rc); + return rc; + } + + parms->id = id; + + return 0; +} + +int tf_free_identifier(struct tf *tfp, struct tf_free_identifier_parms *parms) +{ + struct tf_ident_free_parms fparms = { 0 }; + struct bnxt *bp; + struct tf_dev_info *dev; + struct tf_session *tfs; + int rc; + + if (!tfp || !parms) + return -EINVAL; + + bp = tfp->bp; + + /* Retrieve the session information */ + rc = tf_session_get_session(tfp, &tfs); + if (rc) { + netdev_dbg(bp->dev, "%s: Failed to lookup session, rc:%d\n", + tf_dir_2_str(parms->dir), rc); + return rc; + } + + /* Retrieve the device information */ + rc = tf_session_get_device(tfs, &dev); + if (rc) { + netdev_dbg(bp->dev, "%s: Failed to lookup device, rc:%d\n", + tf_dir_2_str(parms->dir), rc); + return rc; + } + + if (!dev->ops->tf_dev_free_ident) { + rc = -EOPNOTSUPP; + netdev_dbg(bp->dev, "%s: Operation not supported, rc:%d\n", + tf_dir_2_str(parms->dir), rc); + return -EOPNOTSUPP; + } + + fparms.dir = parms->dir; + fparms.type = parms->ident_type; + fparms.id = parms->id; + fparms.ref_cnt = &parms->ref_cnt; + rc = dev->ops->tf_dev_free_ident(tfp, &fparms); + if (rc) { + netdev_dbg(bp->dev, "%s: Identifier free failed, rc:%d\n", + tf_dir_2_str(parms->dir), rc); + return rc; + } + + return 0; +} + +int tf_alloc_tcam_entry(struct tf *tfp, + struct tf_alloc_tcam_entry_parms *parms) +{ + struct tf_tcam_alloc_parms aparms = { 0 }; + struct bnxt *bp; + struct tf_dev_info *dev; + struct tf_session *tfs; + int rc; + + if (!tfp || !parms) + return -EINVAL; + + bp = tfp->bp; + + /* Retrieve the session information */ + rc = tf_session_get_session(tfp, &tfs); + if (rc) { + netdev_dbg(bp->dev, "%s: Failed to lookup session, rc:%d\n", + tf_dir_2_str(parms->dir), rc); + return rc; + } + + /* Retrieve the device information */ + rc = tf_session_get_device(tfs, &dev); + if (rc) { + netdev_dbg(bp->dev, "%s: Failed to lookup device, rc:%d\n", + tf_dir_2_str(parms->dir), rc); + return rc; + } + + if (!dev->ops->tf_dev_alloc_tcam) { + rc = -EOPNOTSUPP; + netdev_dbg(bp->dev, "%s: Operation not supported, rc:%d\n", + tf_dir_2_str(parms->dir), rc); + return rc; + } + + aparms.dir = parms->dir; + aparms.type = parms->tcam_tbl_type; + aparms.key_size = TF_BITS2BYTES_WORD_ALIGN(parms->key_sz_in_bits); + aparms.priority = parms->priority; + rc = dev->ops->tf_dev_alloc_tcam(tfp, &aparms); + if (rc) { + netdev_dbg(bp->dev, "%s: TCAM allocation failed, rc:%d\n", + tf_dir_2_str(parms->dir), rc); + return rc; + } + + parms->idx = aparms.idx; + + return 0; +} + +int tf_set_tcam_entry(struct tf *tfp, struct tf_set_tcam_entry_parms *parms) +{ + struct tf_tcam_set_parms sparms = { 0 }; + struct bnxt *bp; + struct tf_dev_info *dev; + struct tf_session *tfs; + int rc; + + if (!tfp || !parms) + return -EINVAL; + + bp = tfp->bp; + + /* Retrieve the session information */ + rc = tf_session_get_session(tfp, &tfs); + if (rc) { + netdev_dbg(bp->dev, "%s: Failed to lookup session, rc:%d\n", + tf_dir_2_str(parms->dir), rc); + return rc; + } + + /* Retrieve the device information */ + rc = tf_session_get_device(tfs, &dev); + if (rc) { + netdev_dbg(bp->dev, "%s: Failed to lookup device, rc:%d\n", + tf_dir_2_str(parms->dir), rc); + return rc; + } + + if (!dev->ops->tf_dev_set_tcam || !dev->ops->tf_dev_word_align) { + rc = -EOPNOTSUPP; + netdev_dbg(bp->dev, "%s: Operation not supported, rc:%d\n", + tf_dir_2_str(parms->dir), rc); + return rc; + } + + sparms.dir = parms->dir; + sparms.type = parms->tcam_tbl_type; + sparms.idx = parms->idx; + sparms.key = parms->key; + sparms.mask = parms->mask; + sparms.key_size = dev->ops->tf_dev_word_align(parms->key_sz_in_bits); + sparms.result = parms->result; + sparms.result_size = TF_BITS2BYTES_WORD_ALIGN(parms->result_sz_in_bits); + + rc = dev->ops->tf_dev_set_tcam(tfp, &sparms); + if (rc) { + netdev_err(bp->dev, "%s: TCAM set failed, rc:%d\n", + tf_dir_2_str(parms->dir), rc); + return rc; + } + + return 0; +} + +int tf_get_tcam_entry(struct tf *tfp, struct tf_get_tcam_entry_parms *parms) +{ + struct tf_tcam_get_parms gparms = { 0 }; + struct bnxt *bp; + struct tf_dev_info *dev; + struct tf_session *tfs; + int rc; + + if (!tfp || !parms) + return -EINVAL; + + bp = tfp->bp; + + /* Retrieve the session information */ + rc = tf_session_get_session(tfp, &tfs); + if (rc) { + netdev_dbg(bp->dev, "%s: Failed to lookup session, rc:%d\n", + tf_dir_2_str(parms->dir), rc); + return rc; + } + + /* Retrieve the device information */ + rc = tf_session_get_device(tfs, &dev); + if (rc) { + netdev_dbg(bp->dev, "%s: Failed to lookup device, rc:%d\n", + tf_dir_2_str(parms->dir), rc); + return rc; + } + + if (!dev->ops->tf_dev_get_tcam) { + rc = -EOPNOTSUPP; + netdev_dbg(bp->dev, "%s: Operation not supported, rc:%d\n", + tf_dir_2_str(parms->dir), rc); + return rc; + } + + gparms.dir = parms->dir; + gparms.type = parms->tcam_tbl_type; + gparms.idx = parms->idx; + gparms.key = parms->key; + gparms.key_size = dev->ops->tf_dev_word_align(parms->key_sz_in_bits); + gparms.mask = parms->mask; + gparms.result = parms->result; + gparms.result_size = TF_BITS2BYTES_WORD_ALIGN(parms->result_sz_in_bits); + + rc = dev->ops->tf_dev_get_tcam(tfp, &gparms); + if (rc) { + netdev_dbg(bp->dev, "%s: TCAM get failed, rc:%d\n", + tf_dir_2_str(parms->dir), rc); + return rc; + } + parms->key_sz_in_bits = gparms.key_size * 8; + parms->result_sz_in_bits = gparms.result_size * 8; + + return 0; +} + +int tf_free_tcam_entry(struct tf *tfp, struct tf_free_tcam_entry_parms *parms) +{ + struct tf_tcam_free_parms fparms = { 0 }; + struct bnxt *bp; + struct tf_dev_info *dev; + struct tf_session *tfs; + int rc; + + if (!tfp || !parms) + return -EINVAL; + + bp = tfp->bp; + + /* Retrieve the session information */ + rc = tf_session_get_session(tfp, &tfs); + if (rc) { + netdev_dbg(bp->dev, "%s: Failed to lookup session, rc:%d\n", + tf_dir_2_str(parms->dir), rc); + return rc; + } + + /* Retrieve the device information */ + rc = tf_session_get_device(tfs, &dev); + if (rc) { + netdev_dbg(bp->dev, "%s: Failed to lookup device, rc:%d\n", + tf_dir_2_str(parms->dir), rc); + return rc; + } + + if (!dev->ops->tf_dev_free_tcam) { + rc = -EOPNOTSUPP; + netdev_dbg(bp->dev, "%s: Operation not supported, rc:%d\n", + tf_dir_2_str(parms->dir), rc); + return rc; + } + + fparms.dir = parms->dir; + fparms.type = parms->tcam_tbl_type; + fparms.idx = parms->idx; + rc = dev->ops->tf_dev_free_tcam(tfp, &fparms); + if (rc) { + netdev_dbg(bp->dev, "%s: TCAM free failed, rc:%d\n", + tf_dir_2_str(parms->dir), rc); + return rc; + } + + return 0; +} + +int tf_alloc_tbl_entry(struct tf *tfp, struct tf_alloc_tbl_entry_parms *parms) +{ + struct tf_tbl_alloc_parms aparms = { 0 }; + struct bnxt *bp; + struct tf_dev_info *dev; + struct tf_session *tfs; + int rc; + u32 idx; + + if (!tfp || !parms) + return -EINVAL; + + bp = tfp->bp; + + /* Retrieve the session information */ + rc = tf_session_get_session(tfp, &tfs); + if (rc) { + netdev_dbg(bp->dev, "%s: Failed to lookup session, rc:%d\n", + tf_dir_2_str(parms->dir), rc); + return rc; + } + + /* Retrieve the device information */ + rc = tf_session_get_device(tfs, &dev); + if (rc) { + netdev_dbg(bp->dev, "%s: Failed to lookup device, rc:%d\n", + tf_dir_2_str(parms->dir), rc); + return rc; + } + + aparms.dir = parms->dir; + aparms.type = parms->type; + aparms.idx = &idx; + aparms.tbl_scope_id = parms->tbl_scope_id; + + if (parms->type == TF_TBL_TYPE_EXT) { + if (!dev->ops->tf_dev_alloc_ext_tbl) { + rc = -EOPNOTSUPP; + netdev_dbg(bp->dev, + "%s: Operation not supported, rc:%d\n", + tf_dir_2_str(parms->dir), rc); + return -EOPNOTSUPP; + } + + rc = dev->ops->tf_dev_alloc_ext_tbl(tfp, &aparms); + if (rc) { + netdev_dbg(bp->dev, + "%s: External table allocation failed, rc:%d\n", + tf_dir_2_str(parms->dir), rc); + return rc; + } + } else if (dev->ops->tf_dev_is_sram_managed(tfp, parms->type)) { + rc = dev->ops->tf_dev_alloc_sram_tbl(tfp, &aparms); + if (rc) { + netdev_dbg(bp->dev, + "%s: SRAM table allocation failed, rc:%d\n", + tf_dir_2_str(parms->dir), rc); + return rc; + } + } else { + rc = dev->ops->tf_dev_alloc_tbl(tfp, &aparms); + if (rc) { + netdev_dbg(bp->dev, + "%s: Table allocation failed, rc:%d\n", + tf_dir_2_str(parms->dir), rc); + return rc; + } + } + + parms->idx = idx; + + return 0; +} + +int tf_free_tbl_entry(struct tf *tfp, struct tf_free_tbl_entry_parms *parms) +{ + struct tf_tbl_free_parms fparms = { 0 }; + struct bnxt *bp; + struct tf_dev_info *dev; + struct tf_session *tfs; + int rc; + + if (!tfp || !parms) + return -EINVAL; + + bp = tfp->bp; + + /* Retrieve the session information */ + rc = tf_session_get_session(tfp, &tfs); + if (rc) { + netdev_dbg(bp->dev, "%s: Failed to lookup session, rc:%d\n", + tf_dir_2_str(parms->dir), rc); + return rc; + } + + /* Retrieve the device information */ + rc = tf_session_get_device(tfs, &dev); + if (rc) { + netdev_dbg(bp->dev, "%s: Failed to lookup device, rc:%d\n", + tf_dir_2_str(parms->dir), rc); + return rc; + } + + fparms.dir = parms->dir; + fparms.type = parms->type; + fparms.idx = parms->idx; + fparms.tbl_scope_id = parms->tbl_scope_id; + + if (parms->type == TF_TBL_TYPE_EXT) { + if (!dev->ops->tf_dev_free_ext_tbl) { + rc = -EOPNOTSUPP; + netdev_dbg(bp->dev, + "%s: Operation not supported, rc:%d\n", + tf_dir_2_str(parms->dir), + rc); + return -EOPNOTSUPP; + } + + rc = dev->ops->tf_dev_free_ext_tbl(tfp, &fparms); + if (rc) { + netdev_dbg(bp->dev, "%s: Table free failed, rc:%d\n", + tf_dir_2_str(parms->dir), rc); + return rc; + } + } else if (dev->ops->tf_dev_is_sram_managed(tfp, parms->type)) { + rc = dev->ops->tf_dev_free_sram_tbl(tfp, &fparms); + if (rc) { + netdev_dbg(bp->dev, + "%s: SRAM table free failed, rc:%d\n", + tf_dir_2_str(parms->dir), rc); + return rc; + } + } else { + rc = dev->ops->tf_dev_free_tbl(tfp, &fparms); + if (rc) { + netdev_dbg(bp->dev, "%s: Table free failed, rc:%d\n", + tf_dir_2_str(parms->dir), rc); + return rc; + } + } + return 0; +} + +int tf_set_tbl_entry(struct tf *tfp, struct tf_set_tbl_entry_parms *parms) +{ + struct tf_tbl_set_parms sparms = { 0 }; + struct bnxt *bp; + struct tf_dev_info *dev; + struct tf_session *tfs; + int rc = 0; + + if (!tfp || !parms) + return -EINVAL; + + bp = tfp->bp; + + /* Retrieve the session information */ + rc = tf_session_get_session(tfp, &tfs); + if (rc) { + netdev_dbg(bp->dev, "%s: Failed to lookup session, rc:%d\n", + tf_dir_2_str(parms->dir), rc); + return rc; + } + + /* Retrieve the device information */ + rc = tf_session_get_device(tfs, &dev); + if (rc) { + netdev_dbg(bp->dev, "%s: Failed to lookup device, rc:%d\n", + tf_dir_2_str(parms->dir), rc); + return rc; + } + + sparms.dir = parms->dir; + sparms.type = parms->type; + sparms.data = parms->data; + sparms.data_sz_in_bytes = parms->data_sz_in_bytes; + sparms.idx = parms->idx; + sparms.tbl_scope_id = parms->tbl_scope_id; + + if (parms->type == TF_TBL_TYPE_EXT) { + if (!dev->ops->tf_dev_set_ext_tbl) { + rc = -EOPNOTSUPP; + netdev_dbg(bp->dev, + "%s: Operation not supported, rc:%d\n", + tf_dir_2_str(parms->dir), rc); + return -EOPNOTSUPP; + } + + rc = dev->ops->tf_dev_set_ext_tbl(tfp, &sparms); + if (rc) { + netdev_dbg(bp->dev, "%s: Table set failed, rc:%d\n", + tf_dir_2_str(parms->dir), rc); + return rc; + } + } else if (dev->ops->tf_dev_is_sram_managed(tfp, parms->type)) { + rc = dev->ops->tf_dev_set_sram_tbl(tfp, &sparms); + if (rc) { + netdev_dbg(bp->dev, + "%s: SRAM table set failed, rc:%d\n", + tf_dir_2_str(parms->dir), rc); + return rc; + } + } else { + if (!dev->ops->tf_dev_set_tbl) { + rc = -EOPNOTSUPP; + netdev_dbg(bp->dev, + "%s: Operation not supported, rc:%d\n", + tf_dir_2_str(parms->dir), rc); + return -EOPNOTSUPP; + } + + rc = dev->ops->tf_dev_set_tbl(tfp, &sparms); + if (rc) { + netdev_dbg(bp->dev, + "%s: Table set failed, rc:%d\n", + tf_dir_2_str(parms->dir), rc); + return rc; + } + } + + return rc; +} + +int tf_get_tbl_entry(struct tf *tfp, struct tf_get_tbl_entry_parms *parms) +{ + struct tf_tbl_get_parms gparms = { 0 }; + struct bnxt *bp; + struct tf_dev_info *dev; + struct tf_session *tfs; + int rc = 0; + + if (!tfp || !parms || !parms->data) + return -EINVAL; + + bp = tfp->bp; + + /* Retrieve the session information */ + rc = tf_session_get_session(tfp, &tfs); + if (rc) { + netdev_dbg(bp->dev, "%s: Failed to lookup session, rc:%d\n", + tf_dir_2_str(parms->dir), rc); + return rc; + } + + /* Retrieve the device information */ + rc = tf_session_get_device(tfs, &dev); + if (rc) { + netdev_dbg(bp->dev, "%s: Failed to lookup device, rc:%d\n", + tf_dir_2_str(parms->dir), rc); + return rc; + } + + gparms.dir = parms->dir; + gparms.type = parms->type; + gparms.data = parms->data; + gparms.data_sz_in_bytes = parms->data_sz_in_bytes; + gparms.idx = parms->idx; + + if (dev->ops->tf_dev_is_sram_managed(tfp, parms->type)) { + rc = dev->ops->tf_dev_get_sram_tbl(tfp, &gparms); + if (rc) { + netdev_dbg(bp->dev, + "%s: SRAM table get failed, rc:%d\n", + tf_dir_2_str(parms->dir), rc); + return rc; + } + } else { + if (!dev->ops->tf_dev_get_tbl) { + rc = -EOPNOTSUPP; + netdev_dbg(bp->dev, + "%s: Operation not supported, rc:%d\n", + tf_dir_2_str(parms->dir), rc); + return -EOPNOTSUPP; + } + + rc = dev->ops->tf_dev_get_tbl(tfp, &gparms); + if (rc) { + netdev_dbg(bp->dev, "%s: Table get failed, rc:%d\n", + tf_dir_2_str(parms->dir), rc); + return rc; + } + } + + return rc; +} + +int tf_bulk_get_tbl_entry(struct tf *tfp, + struct tf_bulk_get_tbl_entry_parms *parms) +{ + struct tf_tbl_get_bulk_parms bparms = { 0 }; + struct bnxt *bp; + struct tf_dev_info *dev; + struct tf_session *tfs; + int rc = 0; + + if (!tfp || !parms) + return -EINVAL; + + bp = tfp->bp; + + /* Retrieve the session information */ + rc = tf_session_get_session(tfp, &tfs); + if (rc) { + netdev_dbg(bp->dev, "%s: Failed to lookup session, rc:%d\n", + tf_dir_2_str(parms->dir), rc); + return rc; + } + + /* Retrieve the device information */ + rc = tf_session_get_device(tfs, &dev); + if (rc) { + netdev_dbg(bp->dev, "%s: Failed to lookup device, rc:%d\n", + tf_dir_2_str(parms->dir), rc); + return rc; + } + + bparms.dir = parms->dir; + bparms.type = parms->type; + bparms.starting_idx = parms->starting_idx; + bparms.num_entries = parms->num_entries; + bparms.entry_sz_in_bytes = parms->entry_sz_in_bytes; + bparms.physical_mem_addr = parms->physical_mem_addr; + + if (parms->type == TF_TBL_TYPE_EXT) { + /* Not supported, yet */ + rc = -EOPNOTSUPP; + netdev_dbg(bp->dev, + "%s, External table type not supported, rc:%d\n", + tf_dir_2_str(parms->dir), rc); + + return rc; + } else if (dev->ops->tf_dev_is_sram_managed(tfp, parms->type)) { + rc = dev->ops->tf_dev_get_bulk_sram_tbl(tfp, &bparms); + if (rc) { + netdev_dbg(bp->dev, + "%s: SRAM table bulk get failed, rc:%d\n", + tf_dir_2_str(parms->dir), rc); + } + return rc; + } + + if (!dev->ops->tf_dev_get_bulk_tbl) { + rc = -EOPNOTSUPP; + netdev_dbg(bp->dev, "%s: Operation not supported, rc:%d\n", + tf_dir_2_str(parms->dir), rc); + return -EOPNOTSUPP; + } + + rc = dev->ops->tf_dev_get_bulk_tbl(tfp, &bparms); + if (rc) { + netdev_dbg(bp->dev, "%s: Table get bulk failed, rc:%d\n", + tf_dir_2_str(parms->dir), rc); + return rc; + } + return rc; +} + +int tf_get_shared_tbl_increment(struct tf *tfp, + struct tf_get_shared_tbl_increment_parms + *parms) +{ + struct tf_session *tfs; + struct tf_dev_info *dev; + struct bnxt *bp; + int rc = 0; + + if (!tfp || !parms) + return -EINVAL; + + bp = tfp->bp; + + /* Retrieve the session information */ + rc = tf_session_get_session(tfp, &tfs); + if (rc) { + netdev_dbg(bp->dev, "%s: Failed to lookup session, rc:%d\n", + tf_dir_2_str(parms->dir), rc); + return rc; + } + + /* Retrieve the device information */ + rc = tf_session_get_device(tfs, &dev); + if (rc) { + netdev_dbg(bp->dev, "%s: Failed to lookup device, rc:%d\n", + tf_dir_2_str(parms->dir), rc); + return rc; + } + + /* Internal table type processing */ + + if (!dev->ops->tf_dev_get_shared_tbl_increment) { + rc = -EOPNOTSUPP; + netdev_dbg(bp->dev, "%s: Operation not supported, rc:%d\n", + tf_dir_2_str(parms->dir), -rc); + return rc; + } + + rc = dev->ops->tf_dev_get_shared_tbl_increment(tfp, parms); + if (rc) { + netdev_dbg(bp->dev, + "%s: Get table increment not supported, rc:%d\n", + tf_dir_2_str(parms->dir), rc); + return rc; + } + + return rc; +} + +int +tf_alloc_tbl_scope(struct tf *tfp, + struct tf_alloc_tbl_scope_parms *parms) +{ + struct tf_dev_info *dev; + struct tf_session *tfs; + struct bnxt *bp; + int rc; + + if (!tfp || !parms) + return -EINVAL; + + bp = tfp->bp; + + /* Retrieve the session information */ + rc = tf_session_get_session(tfp, &tfs); + if (rc) { + netdev_dbg(bp->dev, + "Failed to lookup session, rc:%d\n", + rc); + return rc; + } + + /* Retrieve the device information */ + rc = tf_session_get_device(tfs, &dev); + if (rc) { + netdev_dbg(bp->dev, + "Failed to lookup device, rc:%d\n", + rc); + return rc; + } + + if (dev->ops->tf_dev_alloc_tbl_scope) { + rc = dev->ops->tf_dev_alloc_tbl_scope(tfp, parms); + } else { + netdev_dbg(bp->dev, + "Alloc table scope not supported by device\n"); + return -EINVAL; + } + + return rc; +} + +int +tf_map_tbl_scope(struct tf *tfp, + struct tf_map_tbl_scope_parms *parms) +{ + struct tf_dev_info *dev; + struct tf_session *tfs; + struct bnxt *bp; + int rc; + + if (!tfp || !parms) + return -EINVAL; + + bp = tfp->bp; + + /* Retrieve the session information */ + rc = tf_session_get_session(tfp, &tfs); + if (rc) { + netdev_dbg(bp->dev, + "Failed to lookup session, rc:%d\n", + rc); + return rc; + } + + /* Retrieve the device information */ + rc = tf_session_get_device(tfs, &dev); + if (rc) { + netdev_dbg(bp->dev, + "Failed to lookup device, rc:%d\n", + rc); + return rc; + } + + if (dev->ops->tf_dev_map_tbl_scope) { + rc = dev->ops->tf_dev_map_tbl_scope(tfp, parms); + } else { + netdev_dbg(bp->dev, + "Map table scope not supported by device\n"); + return -EINVAL; + } + + return rc; +} + +int +tf_free_tbl_scope(struct tf *tfp, + struct tf_free_tbl_scope_parms *parms) +{ + struct tf_dev_info *dev; + struct tf_session *tfs; + struct bnxt *bp; + int rc; + + if (!tfp || !parms) + return -EINVAL; + + bp = tfp->bp; + + /* Retrieve the session information */ + rc = tf_session_get_session(tfp, &tfs); + if (rc) { + netdev_dbg(bp->dev, + "Failed to lookup session, rc:%d\n", + rc); + return rc; + } + + /* Retrieve the device information */ + rc = tf_session_get_device(tfs, &dev); + if (rc) { + netdev_dbg(bp->dev, + "Failed to lookup device, rc:%d\n", + rc); + return rc; + } + + if (dev->ops->tf_dev_free_tbl_scope) { + rc = dev->ops->tf_dev_free_tbl_scope(tfp, parms); + } else { + netdev_dbg(bp->dev, + "Free table scope not supported by device\n"); + return -EINVAL; + } + + return rc; +} + +int tf_set_if_tbl_entry(struct tf *tfp, + struct tf_set_if_tbl_entry_parms *parms) +{ + struct tf_if_tbl_set_parms sparms = { 0 }; + struct bnxt *bp; + struct tf_dev_info *dev; + struct tf_session *tfs; + int rc; + + if (!tfp || !parms) + return -EINVAL; + + bp = tfp->bp; + + /* Retrieve the session information */ + rc = tf_session_get_session(tfp, &tfs); + if (rc) { + netdev_dbg(bp->dev, "%s: Failed to lookup session, rc:%d\n", + tf_dir_2_str(parms->dir), rc); + return rc; + } + + /* Retrieve the device information */ + rc = tf_session_get_device(tfs, &dev); + if (rc) { + netdev_dbg(bp->dev, "%s: Failed to lookup device, rc:%d\n", + tf_dir_2_str(parms->dir), rc); + return rc; + } + + if (!dev->ops->tf_dev_set_if_tbl) { + rc = -EOPNOTSUPP; + netdev_dbg(bp->dev, "%s: Operation not supported, rc:%d\n", + tf_dir_2_str(parms->dir), rc); + return rc; + } + + sparms.dir = parms->dir; + sparms.type = parms->type; + sparms.idx = parms->idx; + sparms.data_sz_in_bytes = parms->data_sz_in_bytes; + sparms.data = parms->data; + + rc = dev->ops->tf_dev_set_if_tbl(tfp, &sparms); + if (rc) { + netdev_dbg(bp->dev, "%s: If_tbl set failed, rc:%d\n", + tf_dir_2_str(parms->dir), rc); + return rc; + } + + return 0; +} + +int tf_get_if_tbl_entry(struct tf *tfp, + struct tf_get_if_tbl_entry_parms *parms) +{ + struct tf_if_tbl_get_parms gparms = { 0 }; + struct bnxt *bp; + struct tf_dev_info *dev; + struct tf_session *tfs; + int rc; + + if (!tfp || !parms) + return -EINVAL; + + bp = tfp->bp; + + /* Retrieve the session information */ + rc = tf_session_get_session(tfp, &tfs); + if (rc) { + netdev_dbg(bp->dev, "%s: Failed to lookup session, rc:%d\n", + tf_dir_2_str(parms->dir), rc); + return rc; + } + + /* Retrieve the device information */ + rc = tf_session_get_device(tfs, &dev); + if (rc) { + netdev_dbg(bp->dev, "%s: Failed to lookup device, rc:%d\n", + tf_dir_2_str(parms->dir), rc); + return rc; + } + + if (!dev->ops->tf_dev_get_if_tbl) { + rc = -EOPNOTSUPP; + netdev_dbg(bp->dev, "%s: Operation not supported, rc:%d\n", + tf_dir_2_str(parms->dir), rc); + return rc; + } + + gparms.dir = parms->dir; + gparms.type = parms->type; + gparms.idx = parms->idx; + gparms.data_sz_in_bytes = parms->data_sz_in_bytes; + gparms.data = parms->data; + + rc = dev->ops->tf_dev_get_if_tbl(tfp, &gparms); + if (rc) { + netdev_dbg(bp->dev, "%s: If_tbl get failed, rc:%d\n", + tf_dir_2_str(parms->dir), rc); + return rc; + } + + return 0; +} + +int tf_get_session_info(struct tf *tfp, + struct tf_get_session_info_parms *parms) +{ + struct bnxt *bp; + struct tf_dev_info *dev; + struct tf_session *tfs; + int rc; + + if (!tfp || !parms) + return -EINVAL; + + bp = tfp->bp; + + /* Retrieve the session information */ + rc = tf_session_get_session(tfp, &tfs); + if (rc) { + netdev_dbg(bp->dev, "%s: Failed to lookup session, rc:%d\n", + __func__, rc); + return rc; + } + + /* Retrieve the device information */ + rc = tf_session_get_device(tfs, &dev); + if (rc) { + netdev_dbg(bp->dev, "%s: Failed to lookup device, rc:%d\n", + __func__, rc); + return rc; + } + + if (!dev->ops->tf_dev_get_ident_resc_info) { + rc = -EOPNOTSUPP; + netdev_dbg(bp->dev, + "%s: get_ident_resc_info unsupported, rc:%d\n", + __func__, rc); + return rc; + } + + rc = dev->ops->tf_dev_get_ident_resc_info(tfp, + parms->session_info.ident); + if (rc) { + netdev_dbg(bp->dev, "%s: Ident get resc info failed, rc:%d\n", + __func__, rc); + } + + if (!dev->ops->tf_dev_get_tbl_resc_info) { + rc = -EOPNOTSUPP; + netdev_dbg(bp->dev, + "%s: get_tbl_resc_info unsupported, rc:%d\n", + __func__, rc); + return rc; + } + + rc = dev->ops->tf_dev_get_tbl_resc_info(tfp, parms->session_info.tbl); + if (rc) { + netdev_dbg(bp->dev, "%s: Tbl get resc info failed, rc:%d\n", + __func__, rc); + } + + if (!dev->ops->tf_dev_get_tcam_resc_info) { + rc = -EOPNOTSUPP; + netdev_dbg(bp->dev, + "%s: get_tcam_resc_info unsupported, rc:%d\n", + __func__, rc); + return rc; + } + + rc = dev->ops->tf_dev_get_tcam_resc_info(tfp, + parms->session_info.tcam); + if (rc) { + netdev_dbg(bp->dev, "%s: TCAM get resc info failed, rc:%d\n", + __func__, rc); + } + + if (!dev->ops->tf_dev_get_em_resc_info) { + rc = -EOPNOTSUPP; + netdev_dbg(bp->dev, + "%s: get_em_resc_info unsupported, rc:%d\n", + __func__, rc); + return rc; + } + + rc = dev->ops->tf_dev_get_em_resc_info(tfp, parms->session_info.em); + if (rc) { + netdev_dbg(bp->dev, "%s: EM get resc info failed, rc:%d\n", + __func__, rc); + } + + return 0; +} + +int tf_get_version(struct tf *tfp, + struct tf_get_version_parms *parms) +{ + struct tf_dev_info dev; + struct bnxt *bp; + int rc; + + if (!tfp || !parms) + return -EINVAL; + + bp = tfp->bp; + + /* This function can be called before open session, filter + * out any non-supported device types on the Core side. + */ + if (parms->device_type != TF_DEVICE_TYPE_P4 && + parms->device_type != TF_DEVICE_TYPE_P5) { + netdev_dbg(bp->dev, + "Unsupported device type %d\n", + parms->device_type); + return -EOPNOTSUPP; + } + + tf_dev_bind_ops(parms->device_type, &dev); + + rc = tf_msg_get_version(parms->bp, &dev, parms); + if (rc) + return rc; + + return 0; +} + +int tf_query_sram_resources(struct tf *tfp, + struct tf_query_sram_resources_parms *parms) +{ + enum tf_rm_resc_resv_strategy resv_strategy; + struct tf_rm_resc_req_entry *query; + struct bnxt *bp; + struct tf_dev_info dev; + u16 max_types; + int rc; + + if (!tfp || !parms) + return -EINVAL; + + bp = tfp->bp; + + /* This function can be called before open session, filter + * out any non-supported device types on the Core side. + */ + if (parms->device_type != TF_DEVICE_TYPE_P5) { + netdev_dbg(bp->dev, "Unsupported device type %d\n", + parms->device_type); + return -EINVAL; + } + + tf_dev_bind_ops(parms->device_type, &dev); + + if (!dev.ops->tf_dev_get_max_types) { + netdev_dbg(bp->dev, "%s: Operation not supported, rc:%d\n", + tf_dir_2_str(parms->dir), EOPNOTSUPP); + return -EOPNOTSUPP; + } + + /* Need device max number of elements for the RM QCAPS */ + rc = dev.ops->tf_dev_get_max_types(tfp, &max_types); + if (rc) { + netdev_dbg(bp->dev, "Get SRAM resc info failed, rc:%d\n", rc); + return rc; + } + + /* Allocate memory for RM QCAPS request */ + query = vzalloc(max_types * sizeof(*query)); + tfp->bp = parms->bp; + + /* Get Firmware Capabilities */ + rc = tf_msg_session_resc_qcaps(tfp, parms->dir, max_types, query, + &resv_strategy, &parms->sram_profile); + if (rc) + goto end; + + if (!dev.ops->tf_dev_get_sram_resources) { + netdev_dbg(bp->dev, "%s: Operation not supported, rc:%d\n", + tf_dir_2_str(parms->dir), EOPNOTSUPP); + rc = -EOPNOTSUPP; + goto end; + } + + rc = dev.ops->tf_dev_get_sram_resources((void *)query, + parms->bank_resc_count, + &parms->dynamic_sram_capable); + if (rc) + netdev_dbg(bp->dev, "Get SRAM resc info failed, rc:%d\n", rc); + + end: + vfree(query); + return rc; +} + +int tf_set_sram_policy(struct tf *tfp, struct tf_set_sram_policy_parms *parms) +{ + struct bnxt *bp; + struct tf_dev_info dev; + int rc = 0; + + if (!tfp || !parms) + return -EINVAL; + + bp = tfp->bp; + + /* This function can be called before open session, filter + * out any non-supported device types on the Core side. + */ + if (parms->device_type != TF_DEVICE_TYPE_P5) { + netdev_dbg(bp->dev, "%s: Unsupported device type %d\n", + __func__, parms->device_type); + return -EINVAL; + } + + tf_dev_bind_ops(parms->device_type, &dev); + + if (!dev.ops->tf_dev_set_sram_policy) { + netdev_dbg(bp->dev, "%s: Operation not supported, rc:%d\n", + tf_dir_2_str(parms->dir), EOPNOTSUPP); + return -EOPNOTSUPP; + } + + rc = dev.ops->tf_dev_set_sram_policy(parms->dir, parms->bank_id); + if (rc) { + netdev_dbg(bp->dev, "%s: SRAM policy set failed, rc:%d\n", + tf_dir_2_str(parms->dir), -rc); + return rc; + } + + return rc; +} + +int tf_get_sram_policy(struct tf *tfp, struct tf_get_sram_policy_parms *parms) +{ + struct bnxt *bp; + struct tf_dev_info dev; + int rc = 0; + + if (!tfp || !parms) + return -EINVAL; + + bp = tfp->bp; + + /* This function can be called before open session, filter + * out any non-supported device types on the Core side. + */ + if (parms->device_type != TF_DEVICE_TYPE_P5) { + netdev_dbg(bp->dev, "%s: Unsupported device type %d\n", + __func__, parms->device_type); + return -EINVAL; + } + + tf_dev_bind_ops(parms->device_type, &dev); + + if (!dev.ops->tf_dev_get_sram_policy) { + netdev_dbg(bp->dev, "%s: Operation not supported, rc:%d\n", + tf_dir_2_str(parms->dir), EOPNOTSUPP); + return -EOPNOTSUPP; + } + + rc = dev.ops->tf_dev_get_sram_policy(parms->dir, parms->bank_id); + if (rc) { + netdev_dbg(bp->dev, "%s: SRAM policy get failed, rc:%d\n", + tf_dir_2_str(parms->dir), -rc); + return rc; + } + + return rc; +} diff --git a/drivers/thirdparty/release-drivers/bnxt/tf_core/tf_core.h b/drivers/thirdparty/release-drivers/bnxt/tf_core/tf_core.h new file mode 100644 index 000000000000..e2fa5d23c49d --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/tf_core/tf_core.h @@ -0,0 +1,1598 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019-2021 Broadcom + * All rights reserved. + */ + +#ifndef _TF_CORE_H_ +#define _TF_CORE_H_ + +#include +#include "hcapi_cfa_defs.h" + +/* Truflow Core API Header File */ + +/********** BEGIN Truflow Core DEFINITIONS **********/ + +#define TF_KILOBYTE 1024 +#define TF_MEGABYTE (1024 * 1024) + +/* direction */ +enum tf_dir { + TF_DIR_RX, /* Receive */ + TF_DIR_TX, /* Transmit */ + TF_DIR_MAX +}; + +/* Memory choice */ +enum tf_mem { + TF_MEM_INTERNAL, /* Internal */ + TF_MEM_EXTERNAL, /* External */ + TF_MEM_MAX +}; + +/* External memory control channel type */ +enum tf_ext_mem_chan_type { + TF_EXT_MEM_CHAN_TYPE_DIRECT = 0, /* Direct memory write(Wh+/SR) */ + TF_EXT_MEM_CHAN_TYPE_RING_IF, /* Ring interface MPC */ + TF_EXT_MEM_CHAN_TYPE_FW, /* Use HWRM message to firmware */ + TF_EXT_MEM_CHAN_TYPE_RING_IF_FW, /* Use ring_if message to firmware */ + TF_EXT_MEM_CHAN_TYPE_MAX +}; + +/* WC TCAM number of slice per row that devices supported */ +enum tf_wc_num_slice { + TF_WC_TCAM_1_SLICE_PER_ROW = 1, + TF_WC_TCAM_2_SLICE_PER_ROW = 2, + TF_WC_TCAM_4_SLICE_PER_ROW = 4, + TF_WC_TCAM_8_SLICE_PER_ROW = 8, +}; + +/* Bank identifier */ +enum tf_sram_bank_id { + TF_SRAM_BANK_ID_0, /* SRAM Bank 0 id */ + TF_SRAM_BANK_ID_1, /* SRAM Bank 1 id */ + TF_SRAM_BANK_ID_2, /* SRAM Bank 2 id */ + TF_SRAM_BANK_ID_3, /* SRAM Bank 3 id */ + TF_SRAM_BANK_ID_MAX /* SRAM Bank index limit */ +}; + +/* EEM record AR helper + * + * Helper to handle the Action Record Pointer in the EEM Record Entry. + * + * Convert absolute offset to action record pointer in EEM record entry + * Convert action record pointer in EEM record entry to absolute offset + */ +#define TF_ACT_REC_OFFSET_2_PTR(offset) ((offset) >> 4) +#define TF_ACT_REC_PTR_2_OFFSET(offset) ((offset) << 4) + +/********** BEGIN API FUNCTION PROTOTYPES/PARAMETERS **********/ + +/** + * Session Version + * + * The version controls the format of the tf_session and + * tf_session_info structure. This is to ensure upgrade between + * versions can be supported. + */ +#define TF_SESSION_VER_MAJOR 1 /* Major Version */ +#define TF_SESSION_VER_MINOR 0 /* Minor Version */ +#define TF_SESSION_VER_UPDATE 0 /* Update Version */ + +/** + * Session Name + * + * Name of the TruFlow control channel interface. + */ +#define TF_SESSION_NAME_MAX 64 + +#define TF_FW_SESSION_ID_INVALID 0xFF /* Invalid FW Session ID */ + +/** + * Session Identifier + * + * Unique session identifier which includes PCIe bus info to + * distinguish the PF and session info to identify the associated + * TruFlow session. Session ID is constructed from the passed in + * ctrl_chan_name in tf_open_session() together with an allocated + * fw_session_id. Done by TruFlow on tf_open_session(). + */ +union tf_session_id { + u32 id; + struct { + u8 domain; + u8 bus; + u8 device; + u8 fw_session_id; + } internal; +}; + +/** + * Session Client Identifier + * + * Unique identifier for a client within a session. Session Client ID + * is constructed from the passed in session and a firmware allocated + * fw_session_client_id. Done by TruFlow on tf_open_session(). + */ +union tf_session_client_id { + u16 id; + struct { + u8 fw_session_id; + u8 fw_session_client_id; + } internal; +}; + +/** + * Session Version + * + * The version controls the format of the tf_session and + * tf_session_info structure. This is to ensure upgrade between + * versions can be supported. + * + * Please see the TF_VER_MAJOR/MINOR and UPDATE defines. + */ +struct tf_session_version { + u8 major; + u8 minor; + u8 update; +}; + +/** + * Session supported device types + */ +enum tf_device_type { + TF_DEVICE_TYPE_P4 = 0, + TF_DEVICE_TYPE_P5, + TF_DEVICE_TYPE_MAX /* Maximum */ +}; + +/** + * Module types + */ +enum tf_module_type { + TF_MODULE_TYPE_IDENTIFIER, /* Identifier module */ + TF_MODULE_TYPE_TABLE, /* Table type module */ + TF_MODULE_TYPE_TCAM, /* TCAM module */ + TF_MODULE_TYPE_EM, /* EM module */ + TF_MODULE_TYPE_MAX +}; + +/** + * Identifier resource types + */ +enum tf_identifier_type { + TF_IDENT_TYPE_L2_CTXT_HIGH, /* WH/SR/TH + * The L2 Context is returned from the + * L2 Ctxt TCAM lookup and can be used + * in WC TCAM or EM keys to virtualize + * further lookups. + */ + TF_IDENT_TYPE_L2_CTXT_LOW, /* WH/SR/TH + * The L2 Context is returned from the + * L2 Ctxt TCAM lookup and can be used + * in WC TCAM or EM keys to virtualize + * further lookups. + */ + TF_IDENT_TYPE_PROF_FUNC, /* WH/SR/TH + * The WC profile func is returned + * from the L2 Ctxt TCAM lookup to + * enable virtualization of the + * profile TCAM. + */ + TF_IDENT_TYPE_WC_PROF, /* WH/SR/TH + * The WC profile ID is included in + * the WC lookup key to enable + * virtualization of the WC TCAM + * hardware. + */ + TF_IDENT_TYPE_EM_PROF, /* WH/SR/TH + * The EM profile ID is included in + * the EM lookup key to enable + * virtualization of the EM hardware. + */ + TF_IDENT_TYPE_L2_FUNC, /* TH + * The L2 func is included in the ILT + * result and from recycling to + * enable virtualization of further + * lookups. + */ + TF_IDENT_TYPE_MAX +}; + +/** + * Enumeration of TruFlow table types. A table type is used to identify a + * resource object. + * + * NOTE: The table type TF_TBL_TYPE_EXT is unique in that it is + * the only table type that is connected with a table scope. + */ +enum tf_tbl_type { + /* Internal */ + + TF_TBL_TYPE_FULL_ACT_RECORD, /* Wh+/SR/TH Action Record */ + TF_TBL_TYPE_COMPACT_ACT_RECORD, /* TH Compact Action Record */ + TF_TBL_TYPE_MCAST_GROUPS, /* (Future) Multicast Groups */ + TF_TBL_TYPE_ACT_ENCAP_8B, /* Wh+/SR/TH Action Encap 8 Bytes */ + TF_TBL_TYPE_ACT_ENCAP_16B, /* Wh+/SR/TH Action Encap 16 Bytes */ + TF_TBL_TYPE_ACT_ENCAP_32B, /* WH+/SR/TH Action Encap 32 Bytes */ + TF_TBL_TYPE_ACT_ENCAP_64B, /* Wh+/SR/TH Action Encap 64 Bytes */ + TF_TBL_TYPE_ACT_ENCAP_128B, /* TH Action Encap 128 Bytes */ + TF_TBL_TYPE_ACT_SP_SMAC, /* WH+/SR/TH Action Src Props SMAC */ + TF_TBL_TYPE_ACT_SP_SMAC_IPV4, /* Wh+/SR/TH Action Src Props SMAC + * IPv4 + */ + TF_TBL_TYPE_ACT_SP_SMAC_IPV6, /* Wh+/SR/TH Action Src Props SMAC + * IPv6 + */ + TF_TBL_TYPE_ACT_STATS_64, /* Wh+/SR/TH Action Stats 64 Bits */ + TF_TBL_TYPE_ACT_MODIFY_IPV4, /* Wh+/SR Action Modify IPv4 Source */ + TF_TBL_TYPE_ACT_MODIFY_8B, /* TH 8B Modify Record */ + TF_TBL_TYPE_ACT_MODIFY_16B, /* TH 16B Modify Record */ + TF_TBL_TYPE_ACT_MODIFY_32B, /* TH 32B Modify Record */ + TF_TBL_TYPE_ACT_MODIFY_64B, /* TH 64B Modify Record */ + TF_TBL_TYPE_METER_PROF, /* (Future) Meter Profiles */ + TF_TBL_TYPE_METER_INST, /* (Future) Meter Instance */ + TF_TBL_TYPE_MIRROR_CONFIG, /* Wh+/SR/Th Mirror Config */ + TF_TBL_TYPE_UPAR, /* (Future) UPAR */ + TF_TBL_TYPE_METADATA, /* (Future) TH Metadata */ + TF_TBL_TYPE_CT_STATE, /* (Future) TH CT State */ + TF_TBL_TYPE_RANGE_PROF, /* (Future) TH Range Profile */ + TF_TBL_TYPE_EM_FKB, /* TH EM Flexible Key builder */ + TF_TBL_TYPE_WC_FKB, /* TH WC Flexible Key builder */ + TF_TBL_TYPE_METER_DROP_CNT, /* Meter Drop Counter */ + + /* External */ + + /** + * External table type - initially 1 poolsize entries. + * All External table types are associated with a table + * scope. Internal types are not. Currently this is + * a pool of 64B entries. + */ + TF_TBL_TYPE_EXT, + TF_TBL_TYPE_MAX +}; + +/** + * TCAM table type + */ +enum tf_tcam_tbl_type { + TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_HIGH, /* L2 Context TCAM */ + TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_LOW, /* L2 Context TCAM */ + TF_TCAM_TBL_TYPE_PROF_TCAM, /* Profile TCAM */ + TF_TCAM_TBL_TYPE_WC_TCAM, /* Wildcard TCAM */ + TF_TCAM_TBL_TYPE_SP_TCAM, /* Source Properties TCAM */ + TF_TCAM_TBL_TYPE_CT_RULE_TCAM, /* Connection Tracking Rule TCAM */ + TF_TCAM_TBL_TYPE_VEB_TCAM, /* Virtual Edge Bridge TCAM */ + TF_TCAM_TBL_TYPE_WC_TCAM_HIGH, /* Wildcard TCAM HI Priority */ + TF_TCAM_TBL_TYPE_WC_TCAM_LOW, /* Wildcard TCAM Low Priority */ + TF_TCAM_TBL_TYPE_MAX +}; + +/** + * SEARCH STATUS + */ +enum tf_search_status { + MISS, /* entry not found; but an idx allocated if requested */ + HIT, /* entry found; result/idx are valid */ + REJECT /* entry not found; table is full */ +}; + +/** + * EM Resources + * These defines are provisioned during + * tf_open_session() + */ +enum tf_em_tbl_type { + TF_EM_TBL_TYPE_EM_RECORD, /* # internal EM records for session */ + TF_EM_TBL_TYPE_TBL_SCOPE, /* # table scopes requested */ + TF_EM_TBL_TYPE_MAX +}; + +/** + * TruFlow Session Information + * + * @ver: TrueFlow Version. Used to control the structure layout + * when sharing sessions. TruFlow initializes this variable + * on tf_open_session(). + * @session_id: Session ID is a unique identifier for the session. + * TruFlow initializes this variable during tf_open_session() + * processing. + * @core_data: The core_data holds the TruFlow tf_session data structure. + * This memory is allocated and owned by TruFlow on + * tf_open_session(). TruFlow uses this memory for session + * management control until the session is closed by ULP. + * The ULP is expected to synchronize access to this before + * it invokes Core APIs. Please see tf_open_session_parms for + * specification details on this variable. + * @core_data_sz: The field specifies the size of core_data in bytes. + * The size is set by TruFlow on tf_open_session(). + * Please see tf_open_session_parms for specification details + * on this variable. + * + * Structure defining a TruFlow Session, also known as a Management + * session. This structure is initialized at time of + * tf_open_session(). It is passed to all of the TruFlow APIs as way + * to prescribe and isolate resources between different TruFlow ULP + * Applications. + */ +struct tf_session_info { + struct tf_session_version ver; + union tf_session_id session_id; + void *core_data; + u32 core_data_sz_bytes; +}; + +/** + * TruFlow handle + * + * Contains a pointer to the session info. Allocated by ULP and passed + * to TruFlow using tf_open_session(). TruFlow will populate the + * session info at that time. A TruFlow Session can be used by more + * than one PF/VF by using the tf_open_session(). + */ +struct tf { + struct tf_session_info *session; /* session_info (shared) */ + struct bnxt *bp; /* back pointer to parent bp */ +}; + +/** + * Identifier resource definition + * @cnt: Array of TF Identifiers where each entry is expected to be + * set to the requested resource number of that specific type. + * The index used is tf_identifier_type. + */ +struct tf_identifier_resources { + u16 cnt[TF_IDENT_TYPE_MAX]; +}; + +/** + * Table type resource definition + * @cnt: Array of TF Table types where each entry is expected to be + * set to the requested resource number of that specific + * type. The index used is tf_tbl_type. + */ +struct tf_tbl_resources { + u16 cnt[TF_TBL_TYPE_MAX]; +}; + +/** + * TCAM type resource definition + * @cnt: Array of TF TCAM types where each entry is expected to be + * set to the requested resource number of that specific + * type. The index used is tf_tcam_tbl_type. + */ +struct tf_tcam_resources { + u16 cnt[TF_TCAM_TBL_TYPE_MAX]; +}; + +/** + * EM type resource definition + * @cnt: Array of TF EM table types where each entry is expected to + * be set to the requested resource number of that specific + * type. The index used is tf_em_tbl_type. + */ +struct tf_em_resources { + u16 cnt[TF_EM_TBL_TYPE_MAX]; +}; + +/** + * tf_session_resources parameter definition. + * @ident_cnt: Requested Identifier Resources Number of identifier + * resources requested for the session. + * @tbl_cnt: Requested Index Table resource counts. The number of + * index table resources requested for the session. + * @tcam_cnt: Requested TCAM Table resource counts. The number of + * TCAM table resources requested for the session. + * @em_cnt: Requested EM resource counts. The number of internal + * EM table resources requested for the session. + */ +struct tf_session_resources { + struct tf_identifier_resources ident_cnt[TF_DIR_MAX]; + struct tf_tbl_resources tbl_cnt[TF_DIR_MAX]; + struct tf_tcam_resources tcam_cnt[TF_DIR_MAX]; + struct tf_em_resources em_cnt[TF_DIR_MAX]; +}; + +/** + * tf_open_session parameters definition. + * @ctrl_chan_name: String containing name of control channel interface to + * be used for this session to communicate with firmware. + * ctrl_chan_name will be used as part of a name for any + * shared memory allocation. The ctrl_chan_name is usually + * in format 0000:02:00.0. The name for shared session is + * 0000:02:00.0-tf_shared. + * @shadow_copy: Boolean controlling the use and availability of shadow + * copy. Shadow copy will allow the TruFlow to keep track of + * resource content on the firmware side without having to + * query firmware. Additional private session core_data will + * be allocated if this boolean is set to 'true', default + * 'false'. + * + * Size of memory depends on the NVM Resource settings for + * the control channel. + * + * @session_id: Session_id is unique per session. Session_id is + * composed of domain, bus, device and fw_session_id. + * The construction is done by parsing the ctrl_chan_name + * together with allocation of a fw_session_id. + * The session_id allows a session to be shared between + * devices. + * @session_client_id: Session_client_id is unique per client. + * It is composed of session_id and the + * fw_session_client_id fw_session_id. The + * construction is done by parsing the ctrl_chan_name + * together with allocation of a fw_session_client_id + * during tf_open_session(). A reference count will be + * incremented in the session on which a client is + * created. A session can first be closed if there is + * one Session Client left. Session Clients should + * be closed using tf_close_session(). + * @device_type: Device type for the session. + * @resources: Resource allocation for the session. + * @bp: The pointer to the parent bp struct. This is only + * used for HWRM message passing within the portability + * layer. The type is struct bnxt. + * @wc_num_slices: The number of slices per row for WC TCAM entry. + * @shared_session_creator: Indicates whether the application created + * the session if set. Otherwise the shared session + * already existed. Just for information purposes. + */ +struct tf_open_session_parms { + char ctrl_chan_name[TF_SESSION_NAME_MAX]; + bool shadow_copy; + union tf_session_id session_id; + union tf_session_client_id session_client_id; + enum tf_device_type device_type; + struct tf_session_resources resources; + void *bp; + enum tf_wc_num_slice wc_num_slices; + int shared_session_creator; +}; + +/** + * tf_open_session: Opens a new TruFlow Session or session client. + * + * @tfp: Pointer to TF handle + * @parms: Pointer to open parameters + * + * What gets created depends on the passed in tfp content. If the tfp does not + * have prior session data a new session with associated session client. If tfp + * has a session already a session client will be created. In both cases the + * session client is created using the provided ctrl_chan_name. + * + * In case of session creation TruFlow will allocate session specific memory to + * hold its session data. This data is private to TruFlow. + * + * No other TruFlow APIs will succeed unless this API is first called + * and succeeds. + * + * tf_open_session() returns a session id and session client id. These are + * also stored within the tfp structure passed in to all other APIs. + * + * A Session or session client can be closed using tf_close_session(). + * + * There are 2 types of sessions - shared and not. For non-shared all + * the allocated resources are owned and managed by a single session instance. + * No other applications have access to the resources owned by the non-shared + * session. For a shared session, resources are shared between 2 applications. + * + * When the caller of tf_open_session() sets the ctrl_chan_name[] to a name + * like "0000:02:00.0-tf_shared", it is a request to create a new "shared" + * session in the firmware or access the existing shared session. There is + * only 1 shared session that can be created. If the shared session has + * already been created in the firmware, this API will return this indication + * by clearing the shared_session_creator flag. Only the first shared session + * create will have the shared_session_creator flag set. + * + * The shared session should always be the first session to be created by + * application and the last session closed due to RM management preference. + * + * Sessions remain open in the firmware until the last client of the session + * closes the session (tf_close_session()). + * + * Returns + * - (0) if successful. + * - (-EINVAL) on failure. + */ +int tf_open_session(struct tf *tfp, struct tf_open_session_parms *parms); + +/** + * General internal resource info + * + * TODO: remove tf_rm_new_entry structure and use this structure + * internally. + */ +struct tf_resource_info { + u16 start; + u16 stride; +}; + +/** + * Identifier resource definition + * @info: Array of TF Identifiers. The index used is tf_identifier_type. + */ +struct tf_identifier_resource_info { + struct tf_resource_info info[TF_IDENT_TYPE_MAX]; +}; + +/** + * Table type resource info definition + * @info: Array of TF Table types. The index used is tf_tbl_type. + */ +struct tf_tbl_resource_info { + struct tf_resource_info info[TF_TBL_TYPE_MAX]; +}; + +/** + * TCAM type resource definition + * @info: Array of TF TCAM types. The index used is tf_tcam_tbl_type. + */ +struct tf_tcam_resource_info { + struct tf_resource_info info[TF_TCAM_TBL_TYPE_MAX]; +}; + +/** + * EM type resource definition + * @info: Array of TF EM table types. The index used is tf_em_tbl_type. + */ +struct tf_em_resource_info { + struct tf_resource_info info[TF_EM_TBL_TYPE_MAX]; +}; + +/** + * tf_session_resources parameter definition. + * @ident: Requested Identifier Resources. Number of identifier + * resources requested for the session. + * @tbl: Requested Index Table resource counts. The number of + * index table resources requested for the session. + * @tcam: Requested TCAM Table resource counts. The number of + * TCAM table resources requested for the session. + * @em: Requested EM resource counts. The number of internal + * EM table resources requested for the session. + */ +struct tf_session_resource_info { + struct tf_identifier_resource_info ident[TF_DIR_MAX]; + struct tf_tbl_resource_info tbl[TF_DIR_MAX]; + struct tf_tcam_resource_info tcam[TF_DIR_MAX]; + struct tf_em_resource_info em[TF_DIR_MAX]; +}; + +/** + * tf_get_session_resources parameter definition. + * @session_info: the structure is used to return the information + * of allocated resources. + */ +struct tf_get_session_info_parms { + struct tf_session_resource_info session_info; +}; + +/** (experimental) + * Gets info about a TruFlow Session + * + * @tfp: Pointer to TF handle + * @parms: Pointer to get parameters + * + * Get info about the session which has been created. Whether it exists and + * what resource start and stride offsets are in use. This API is primarily + * intended to be used by an application which has created a shared session + * This application needs to obtain the resources which have already been + * allocated for the shared session. + * + * Returns + * - (0) if successful. + * - (-EINVAL) on failure. + */ +int tf_get_session_info(struct tf *tfp, + struct tf_get_session_info_parms *parms); +/** + * Experimental + * + * tf_attach_session parameters definition. + * @ctrl_chan_name: String containing name of control channel interface + * to be used for this session to communicate with + * firmware. The ctrl_chan_name will be used as part of + * a name for any shared memory allocation. + * @attach_chan_name: String containing name of attach channel interface + * to be used for this session. The attach_chan_name + * must be given to a 2nd process after the primary + * process has been created. This is the ctrl_chan_name + * of the primary process and is used to find the shared + * memory for the session that the attach is going + * to use. + * @session_id: Session_id is unique per session. For Attach the + * session_id should be the session_id that was returned + * on the first open. Session_id is composed of domain, + * bus, device and fw_session_id. The construction is + * done by parsing the ctrl_chan_name together with + * allocation of a fw_session_id during tf_open_session(). + * A reference count will be incremented on attach. + * A session is first fully closed when reference count + * is zero by calling tf_close_session(). + */ +struct tf_attach_session_parms { + char ctrl_chan_name[TF_SESSION_NAME_MAX]; + char attach_chan_name[TF_SESSION_NAME_MAX]; + union tf_session_id session_id; +}; + +/** + * Experimental + * + * Allows a 2nd application instance to attach to an existing + * session. Used when a session is to be shared between two processes. + * + * Attach will increment a ref count as to manage the shared session data. + * + * @tfp: Pointer to TF handle + * @parms: Pointer to attach parameters + * + * Returns + * - (0) if successful. + * - (-EINVAL) on failure. + */ +int tf_attach_session(struct tf *tfp, + struct tf_attach_session_parms *parms); + +/** + * Closes an existing session client or the session it self. The + * session client is default closed and if the session reference count + * is 0 then the session is closed as well. + * + * On session close all hardware and firmware state associated with + * the TruFlow application is cleaned up. + * + * The session client is extracted from the tfp. Thus tf_close_session() + * cannot close a session client on behalf of another function. + * + * Returns success or failure code. + */ +int tf_close_session(struct tf *tfp); + +/** + * tf_alloc_identifier parameter definition + * + * @dir: receive or transmit direction + * @ident_type: Identifier type + * @id: Allocated identifier [out] + */ +struct tf_alloc_identifier_parms { + enum tf_dir dir; + enum tf_identifier_type ident_type; + u32 id; +}; + +/** + * tf_free_identifier parameter definition + * + * @dir: receive or transmit direction + * @ident_type: Identifier type + * @id: ID to free + * @refcnt: Current refcnt after free + */ +struct tf_free_identifier_parms { + enum tf_dir dir; + enum tf_identifier_type ident_type; + u32 id; + u32 ref_cnt; +}; + +/** + * allocate identifier resource + * + * TruFlow core will allocate a free id from the per identifier resource type + * pool reserved for the session during tf_open(). No firmware is involved. + * + * If shadow copy is enabled, the internal ref_cnt is set to 1 in the + * shadow table for a newly allocated resource. + * + * Returns success or failure code. + */ +int tf_alloc_identifier(struct tf *tfp, + struct tf_alloc_identifier_parms *parms); + +/** + * free identifier resource + * + * TruFlow core will return an id back to the per identifier resource type pool + * reserved for the session. No firmware is involved. During tf_close, the + * complete pool is returned to the firmware. + * + * additional operation (experimental) + * Decrement reference count. Only release resource once refcnt goes to 0 if + * shadow copy is enabled. + * + * Returns success or failure code. + */ +int tf_free_identifier(struct tf *tfp, + struct tf_free_identifier_parms *parms); + +/* DRAM Table Scope Interface + * + * If we allocate the EEM memory from the core, we need to store it in + * the shared session data structure to make sure it can be freed later. + * (for example if the PF goes away) + * + * Current thought is that memory is allocated within core. + */ + +/** + * tf_alloc_tbl_scope_parms definition + * + * @rx_max_key_sz_in_bits: All Maximum key size required + * @rx_max_action_entry_sz_in_bits: Maximum Action size required (includes + * inlined items) + * @rx_mem_size_in_mb: Memory size in Megabytes Total memory + * size allocated by user to be divided + * up for actions, hash, counters. Only + * inline external actions. Use this + * variable or the number of flows, do + * not set both. + * @rx_num_flows_in_k: Number of flows * 1000. If set, + * rx_mem_size_in_mb must equal 0. + * @tx_max_key_sz_in_bits: All Maximum key size required. + * @tx_max_action_entry_sz_in_bits: Maximum Action size required (includes + * inlined items) + * @tx_mem_size_in_mb: Memory size in Megabytes Total memory + * size allocated by user to be divided + * up for actions, hash, counters. Only + * inline external actions. + * @tx_num_flows_in_k: Number of flows * 1000 + * @hw_flow_cache_flush_timer: Flush pending HW cached flows every + * 1/10th of value set in seconds, both + * idle and active flows are flushed + * from the HW cache. If set to 0, this + * feature will be disabled. + * @tbl_scope_id: table scope identifier + * + */ +struct tf_alloc_tbl_scope_parms { + u16 rx_max_key_sz_in_bits; + u16 rx_max_action_entry_sz_in_bits; + u32 rx_mem_size_in_mb; + u32 rx_num_flows_in_k; + u16 tx_max_key_sz_in_bits; + u16 tx_max_action_entry_sz_in_bits; + u32 tx_mem_size_in_mb; + u32 tx_num_flows_in_k; + u8 hw_flow_cache_flush_timer; + u32 tbl_scope_id; +}; + +/** + * tf_free_tbl_scope_parms definition + * + * @tbl_scope_id: table scope identifier + */ +struct tf_free_tbl_scope_parms { + u32 tbl_scope_id; +}; + +/** + * tf_map_tbl_scope_parms definition + * + * @tbl_scope_id: table scope identifier + * @parif_bitmask: Which parifs are associated with this table scope. + * Bit 0 indicates parif 0. + */ +struct tf_map_tbl_scope_parms { + u32 tbl_scope_id; + u16 parif_bitmask; +}; + +/** + * allocate a table scope + * + * The scope is a software construct to identify an EEM table. This function + * will divide the hash memory/buckets and records according to the device + * constraints based upon calculations using either the number of flows + * requested or the size of memory indicated. Other parameters passed in + * determine the configuration (maximum key size, maximum external action + * record size). + * + * A single API is used to allocate a common table scope identifier in both + * receive and transmit CFA. The scope identifier is common due to nature of + * connection tracking sending notifications between RX and TX direction. + * + * The receive and transmit table access identifiers specify which rings will + * be used to initialize table DRAM. The application must ensure mutual + * exclusivity of ring usage for table scope allocation and any table update + * operations. + * + * The hash table buckets, EM keys, and EM lookup results are stored in the + * memory allocated based on the rx_em_hash_mb/tx_em_hash_mb parameters. The + * hash table buckets are stored at the beginning of that memory. + * + * NOTE: No EM internal setup is done here. On chip EM records are managed + * internally by TruFlow core. + * + * Returns success or failure code. + */ +int tf_alloc_tbl_scope(struct tf *tfp, + struct tf_alloc_tbl_scope_parms *parms); + +/** + * map a table scope (legacy device only Wh+/SR) + * + * Map a table scope to one or more partition interfaces (parifs). + * The parif can be remapped in the L2 context lookup for legacy devices. This + * API allows a number of parifs to be mapped to the same table scope. On + * legacy devices a table scope identifies one of 16 sets of EEM table base + * addresses and is associated with a PF communication channel. The associated + * PF must be onfigured for the table scope to operate. + * + * An L2 context TCAM lookup returns a remapped parif value used to + * index into the set of 16 parif_to_pf registers which are used to map to one + * of the 16 table scopes. This API allows the user to map the parifs in the + * mask to the previously allocated table scope (EEM table). + + * Returns success or failure code. + */ +int tf_map_tbl_scope(struct tf *tfp, struct tf_map_tbl_scope_parms *parms); + +/** + * free a table scope + * + * Firmware checks that the table scope ID is owned by the TruFlow + * session, verifies that no references to this table scope remains + * or Profile TCAM entries for either CFA (RX/TX) direction, + * then frees the table scope ID. + * + * Returns success or failure code. + */ +int tf_free_tbl_scope(struct tf *tfp, struct tf_free_tbl_scope_parms *parms); + +/** + * tf_alloc_tcam_entry parameter definition + * + * @dir: receive or transmit direction + * @tcam_tbl_type: TCAM table type + * @search_enable: Enable search for matching entry + * @key: Key data to match on (if search) + * @key_sz_in_bits: key size in bits (if search) + * @mask: Mask data to match on (if search) + * @priority: Priority of entry requested (definition TBD) + * @hit: If search, set if matching entry found + * @ref_cnt: Current refcnt after allocation [out] + * @idx: Idx allocated + */ +struct tf_alloc_tcam_entry_parms { + enum tf_dir dir; + enum tf_tcam_tbl_type tcam_tbl_type; + u8 search_enable; + u8 *key; + u16 key_sz_in_bits; + u8 *mask; + u32 priority; + u8 hit; + u16 ref_cnt; + u16 idx; +}; + +/** + * allocate TCAM entry + * + * Allocate a TCAM entry - one of these types: + * + * L2 Context + * Profile TCAM + * WC TCAM + * VEB TCAM + * + * This function allocates a TCAM table record. This function + * will attempt to allocate a TCAM table entry from the session + * owned TCAM entries or search a shadow copy of the TCAM table for a + * matching entry if search is enabled. Key, mask and result must match for + * hit to be set. Only TruFlow core data is accessed. + * A hash table to entry mapping is maintained for search purposes. If + * search is not enabled, the first available free entry is returned based + * on priority and alloc_cnt is set to 1. If search is enabled and a matching + * entry to entry_data is found, hit is set to TRUE and alloc_cnt is set to 1. + * RefCnt is also returned. + * + * Also returns success or failure code. + */ +int tf_alloc_tcam_entry(struct tf *tfp, + struct tf_alloc_tcam_entry_parms *parms); + +/** + * tf_set_tcam_entry parameter definition + * + * @dir: receive or transmit direction + * @tcam_tbl_type: TCAM table type + * @idx: base index of the entry to program + * @key: struct containing key + * @mask: struct containing mask fields + * @key_sz_in_bits: key size in bits (if search) + * @result: struct containing result + * @result_sz_in_bits: struct containing result size in bits + */ +struct tf_set_tcam_entry_parms { + enum tf_dir dir; + enum tf_tcam_tbl_type tcam_tbl_type; + u16 idx; + u8 *key; + u8 *mask; + u16 key_sz_in_bits; + u8 *result; + u16 result_sz_in_bits; +}; + +/** + * set TCAM entry + * + * Program a TCAM table entry for a TruFlow session. + * + * If the entry has not been allocated, an error will be returned. + * + * Returns success or failure code. + */ +int tf_set_tcam_entry(struct tf *tfp, struct tf_set_tcam_entry_parms *parms); + +/** + * tf_get_tcam_entry parameter definition + * @dir: receive or transmit direction + * @tcam_tbl_type: TCAM table type + * @idx: index of the entry to get + * @key: struct containing key [out] + * @mask: struct containing mask fields [out] + * @key_sz_in_bits: key size in bits + * @result: struct containing result + * @result_sz_in_bits: struct containing result size in bits + */ +struct tf_get_tcam_entry_parms { + enum tf_dir dir; + enum tf_tcam_tbl_type tcam_tbl_type; + u16 idx; + u8 *key; + u8 *mask; + u16 key_sz_in_bits; + u8 *result; + u16 result_sz_in_bits; +}; + +/** + * get TCAM entry + * + * Program a TCAM table entry for a TruFlow session. + * + * If the entry has not been allocated, an error will be returned. + * + * Returns success or failure code. + */ +int tf_get_tcam_entry(struct tf *tfp, struct tf_get_tcam_entry_parms *parms); + +/** + * tf_free_tcam_entry parameter definition + * + * @dir: receive or transmit direction + * @tcam_tbl_type: TCAM table type + * @idx: Index to free + * @ref_cnt: reference count after free + */ +struct tf_free_tcam_entry_parms { + enum tf_dir dir; + enum tf_tcam_tbl_type tcam_tbl_type; + u16 idx; + u16 ref_cnt; +}; + +/** + * Free TCAM entry. + * + * Firmware checks to ensure the TCAM entries are owned by the TruFlow + * session. TCAM entry will be invalidated. All-ones mask. + * writes to hw. + * + * WCTCAM profile id of 0 must be used to invalidate an entry. + * + * Returns success or failure code. + */ +int tf_free_tcam_entry(struct tf *tfp, + struct tf_free_tcam_entry_parms *parms); + +/** + * tf_alloc_tbl_entry parameter definition + * + * @dir: Receive or transmit direction + * @type: Type of the allocation + * @tbl_scope_id: Table scope identifier (ignored unless TF_TBL_TYPE_EXT) + * @idx: Idx of allocated entry + */ +struct tf_alloc_tbl_entry_parms { + enum tf_dir dir; + enum tf_tbl_type type; + u32 tbl_scope_id; + u32 idx; +}; + +/** + * allocate index table entries + * + * Internal types: + * + * Allocate an on chip index table entry or search for a matching + * entry of the indicated type for this TruFlow session. + * + * Allocates an index table record. This function will attempt to + * allocate an index table entry. + * + * External types: + * + * These are used to allocate inlined action record memory. + * + * Allocates an external index table action record. + * + * NOTE: + * Implementation of the internals of the external function will be a stack with + * push and pop. + * + * Returns success or failure code. + */ +int tf_alloc_tbl_entry(struct tf *tfp, + struct tf_alloc_tbl_entry_parms *parms); + +/** + * tf_free_tbl_entry parameter definition + * + * @dir: Receive or transmit direction + * @type: Type of the allocation + * @tbl_scope_id: Table scope identifier (ignored unless TF_TBL_TYPE_EXT) + * @idx: Index to free + */ +struct tf_free_tbl_entry_parms { + enum tf_dir dir; + enum tf_tbl_type type; + u32 tbl_scope_id; + u32 idx; +}; + +/** + * free index table entry + * + * Used to free a previously allocated table entry. + * + * Internal types: + * + * The element is freed and given back to the session pool. + * + * External types: + * + * Frees an external index table action record. + * + * NOTE: + * Implementation of the internals of the external table will be a stack with + * push and pop. + * + * Returns success or failure code. + */ +int tf_free_tbl_entry(struct tf *tfp, struct tf_free_tbl_entry_parms *parms); + +/** + * tf_set_tbl_entry parameter definition + * + * @tbl_scope_id: Table scope identifier + * @dir: Receive or transmit direction + * @type: Type of object to set + * @data: Entry data + * @data_sz_in_bytes: Entry size + * @chan_type: External memory channel type to use + * @idx: Entry index to write to + */ +struct tf_set_tbl_entry_parms { + u32 tbl_scope_id; + enum tf_dir dir; + enum tf_tbl_type type; + u8 *data; + u16 data_sz_in_bytes; + enum tf_ext_mem_chan_type chan_type; + u32 idx; +}; + +/** + * set index table entry + * + * Used to set an application programmed index table entry into a + * previous allocated table location. + * + * Returns success or failure code. + */ +int tf_set_tbl_entry(struct tf *tfp, struct tf_set_tbl_entry_parms *parms); + +/** + * tf_get_shared_tbl_increment parameter definition + * + * @dir: Receive or transmit direction + * @type: Type of object to get + * @increment_cnt: Value to increment by for resource type + */ +struct tf_get_shared_tbl_increment_parms { + enum tf_dir dir; + enum tf_tbl_type type; + u32 increment_cnt; +}; + +/** + * tf_get_shared_tbl_increment + * + * This API is currently only required for use in the shared + * session for Thor (p58) actions. An increment count is returned per + * type to indicate how much to increment the start by for each + * entry (see tf_resource_info) + * + * Returns success or failure code. + */ +int tf_get_shared_tbl_increment(struct tf *tfp, + struct tf_get_shared_tbl_increment_parms + *parms); + +/** + * tf_get_tbl_entry parameter definition + * + * @dir: Receive or transmit direction + * @type: Type of object to get + * @data: Entry data + * @data_sz_in_bytes: Entry size + * @chan_type: External memory channel type to use + * @idx: Entry index to read + */ +struct tf_get_tbl_entry_parms { + enum tf_dir dir; + enum tf_tbl_type type; + u8 *data; + u16 data_sz_in_bytes; + enum tf_ext_mem_chan_type chan_type; + u32 idx; +}; + +/** + * get index table entry + * + * Used to retrieve a previous set index table entry. + * + * Reads and compares with the shadow table copy (if enabled) (only + * for internal objects). + * + * Returns success or failure code. Failure will be returned if the + * provided data buffer is too small for the data type requested. + */ +int tf_get_tbl_entry(struct tf *tfp, + struct tf_get_tbl_entry_parms *parms); + +/** + * tf_bulk_get_tbl_entry parameter definition + * + * @dir: Receive or transmit direction + * @type: Type of object to get + * @starting_idx: Starting index to read from + * @num_entries: Number of sequential entries + * @entry_sz_in_bytes: Size of the single entry + * @physical_mem_addr: Host physical address, where the data will be copied + * to by the firmware. + * @chan_type: External memory channel type to use + */ +struct tf_bulk_get_tbl_entry_parms { + enum tf_dir dir; + enum tf_tbl_type type; + u32 starting_idx; + u16 num_entries; + u16 entry_sz_in_bytes; + u64 physical_mem_addr; + enum tf_ext_mem_chan_type chan_type; +}; + +/** + * Bulk get index table entry + * + * Used to retrieve a set of index table entries. + * + * Entries within the range may not have been allocated using + * tf_alloc_tbl_entry() at the time of access. But the range must + * be within the bounds determined from tf_open_session() for the + * given table type. Currently, this is only used for collecting statistics. + * + * Returns success or failure code. Failure will be returned if the + * provided data buffer is too small for the data type requested. + */ +int tf_bulk_get_tbl_entry(struct tf *tfp, + struct tf_bulk_get_tbl_entry_parms *parms); + +/** + * Exact Match Table + */ + +/** + * tf_insert_em_entry parameter definition + * + * @dir: Receive or transmit direction + * @mem: internal or external + * @tbl_scope_id: ID of table scope to use (external only) + * @key: ptr to structure containing key fields + * @key_sz_in_bits: key bit length + * @em_record: ptr to structure containing result field + * @em_record_sz_in_bits: result size in bits + * @dup_check: duplicate check flag + * @chan_type: External memory channel type to use + * @flow_handle: Flow handle value for the inserted entry. This is + * encoded as the entries[4]:bucket[2]:hashId[1]:hash[14] + * @flow_id: Flow id is returned as null (internal). Flow id is + * the GFID value for the inserted entry (external). + * This is the value written to the BD and useful + * information for mark. + */ +struct tf_insert_em_entry_parms { + enum tf_dir dir; + enum tf_mem mem; + u32 tbl_scope_id; + u8 *key; + u16 key_sz_in_bits; + u8 *em_record; + u16 em_record_sz_in_bits; + u8 dup_check; + enum tf_ext_mem_chan_type chan_type; + u64 flow_handle; + u64 flow_id; +}; + +/** + * tf_delete_em_entry parameter definition + * + * @dir: Receive or transmit direction + * @mem: internal or external + * @tbl_scope_id: ID of table scope to use (external only) + * @index: The index of the entry + * @chan_type: External memory channel type to use + * @flow_handle: structure containing flow delete handle information + * + */ +struct tf_delete_em_entry_parms { + enum tf_dir dir; + enum tf_mem mem; + u32 tbl_scope_id; + u16 index; + enum tf_ext_mem_chan_type chan_type; + u64 flow_handle; +}; + +/** + * tf_move_em_entry parameter definition + * + * @dir: Receive or transmit direction + * @mem: internal or external + * @tbl_scope_id: ID of table scope to use (external only) + * @tbl_if_id: ID of table interface to use (SR2 only) + * @epochs: epoch group IDs of entry to delete 2 element array + * with 2 ids. (SR2 only) + * @index: The index of the entry + * @chan_type: External memory channel type to use + * @new_index: The index of the new EM record + * @flow_handle: structure containing flow delete handle information + */ +struct tf_move_em_entry_parms { + enum tf_dir dir; + enum tf_mem mem; + u32 tbl_scope_id; + u32 tbl_if_id; + u16 *epochs; + u16 index; + enum tf_ext_mem_chan_type chan_type; + u32 new_index; + u64 flow_handle; +}; + +/** + * insert em hash entry in internal table memory + * + * Internal: + * + * This API inserts an exact match entry into internal EM table memory + * of the specified direction. + * + * Note: The EM record is managed within the TruFlow core and not the + * application. + * + * Shadow copy of internal record table an association with hash and 1,2, or 4 + * associated buckets + * + * External: + * This API inserts an exact match entry into DRAM EM table memory of the + * specified direction and table scope. + * + * The insertion of duplicate entries in an EM table is not permitted. If a + * TruFlow application can guarantee that it will never insert duplicates, it + * can disable duplicate checking by passing a zero value in the dup_check + * parameter to this API. This will optimize performance. Otherwise, the + * TruFlow library will enforce protection against inserting duplicate entries. + * + * Flow handle is defined in this document: + * + * https://docs.google.com + * /document/d/1NESu7RpTN3jwxbokaPfYORQyChYRmJgs40wMIRe8_-Q/edit + * + * Returns success or busy code. + * + */ +int tf_insert_em_entry(struct tf *tfp, + struct tf_insert_em_entry_parms *parms); + +/** + * delete em hash entry table memory + * + * Internal: + * + * This API deletes an exact match entry from internal EM table memory of the + * specified direction. If a valid flow ptr is passed in then that takes + * precedence over the pointer to the complete key passed in. + * + * + * External: + * + * This API deletes an exact match entry from EM table memory of the specified + * direction and table scope. If a valid flow handle is passed in then that + * takes precedence over the pointer to the complete key passed in. + * + * The TruFlow library may release a dynamic bucket when an entry is deleted. + * + * + * Returns success or not found code + * + * + */ +int tf_delete_em_entry(struct tf *tfp, + struct tf_delete_em_entry_parms *parms); + +/** + * Tunnel Encapsulation Offsets + */ +enum tf_tunnel_encap_offsets { + TF_TUNNEL_ENCAP_L2, + TF_TUNNEL_ENCAP_NAT, + TF_TUNNEL_ENCAP_MPLS, + TF_TUNNEL_ENCAP_VXLAN, + TF_TUNNEL_ENCAP_GENEVE, + TF_TUNNEL_ENCAP_NVGRE, + TF_TUNNEL_ENCAP_GRE, + TF_TUNNEL_ENCAP_FULL_GENERIC +}; + +/** + * Global Configuration Table Types + */ +enum tf_global_config_type { + TF_TUNNEL_ENCAP, /* Tunnel Encap Config(TECT) */ + TF_ACTION_BLOCK, /* Action Block Config(ABCR) */ + TF_COUNTER_CFG, /* Counter Configuration (CNTRS_CTRL) */ + TF_METER_CFG, /* Meter Config(ACTP4_FMTCR) */ + TF_METER_INTERVAL_CFG, /* METER Interval Config(FMTCR_INTERVAL) */ + TF_DSCP_RMP_CFG, /* Remap IPv6 DSCP */ + TF_GLOBAL_CFG_TYPE_MAX +}; + +/** + * tf_global_cfg parameter definition + * @dir: receive or transmit direction + * @type: Global config type + * @offset: Offset @ the type + * @config: Value of the configuration. + * set - Read, Modify and Write + * get - Read the full configuration + * @config_mask: Configuration mask + * set - Read, Modify with mask and Write + * get - unused + * @config_sz_in_bytes: struct containing size + */ +struct tf_global_cfg_parms { + enum tf_dir dir; + enum tf_global_config_type type; + u32 offset; + u8 *config; + u8 *config_mask; + u16 config_sz_in_bytes; +}; + +/** + * Get global configuration + * + * Retrieve the configuration + * + * Returns success or failure code. + */ +int tf_get_global_cfg(struct tf *tfp, + struct tf_global_cfg_parms *parms); + +/** + * Update the global configuration table + * + * Read, modify write the value. + * + * Returns success or failure code. + */ +int tf_set_global_cfg(struct tf *tfp, + struct tf_global_cfg_parms *parms); + +/** + * Enumeration of TruFlow interface table types. + */ +enum tf_if_tbl_type { + TF_IF_TBL_TYPE_PROF_SPIF_DFLT_L2_CTXT, /* Default Profile L2 + * Context Entry + */ + TF_IF_TBL_TYPE_PROF_PARIF_DFLT_ACT_REC_PTR, /* Default Profile TCAM/ + * Lookup Action Record + * Pointer Table + */ + TF_IF_TBL_TYPE_PROF_PARIF_ERR_ACT_REC_PTR, /* Error Profile TCAM + * Miss Action Record + * Pointer Table + */ + TF_IF_TBL_TYPE_LKUP_PARIF_DFLT_ACT_REC_PTR, /* Default Error Profile + * TCAM Miss Action + * Record Pointer Table + */ + TF_IF_TBL_TYPE_ILT, /* Ingress lookup table */ + TF_IF_TBL_TYPE_VSPT, /* VNIC/SVIF Props Tbl */ + TF_IF_TBL_TYPE_MAX +}; + +/** + * tf_set_if_tbl_entry parameter definition + * + * @dir: Receive or transmit direction + * @type: Type of object to set + * @data: Entry data + * @data_sz_in_bytes: Entry size + * @idx: Interface to write + */ +struct tf_set_if_tbl_entry_parms { + enum tf_dir dir; + enum tf_if_tbl_type type; + u8 *data; + u16 data_sz_in_bytes; + u32 idx; +}; + +/** + * set interface table entry + * + * Used to set an interface table. This API is used for managing tables indexed + * by SVIF/SPIF/PARIF interfaces. In current implementation only the value is + * set. + * Returns success or failure code. + */ +int tf_set_if_tbl_entry(struct tf *tfp, + struct tf_set_if_tbl_entry_parms *parms); + +/** + * tf_get_if_tbl_entry parameter definition + * + * @dir: Receive or transmit direction + * @type: Type of object to get + * @data: Entry data + * @data_sz_in_bytes: Entry size + * @idx: Entry index to read + */ +struct tf_get_if_tbl_entry_parms { + enum tf_dir dir; + enum tf_if_tbl_type type; + u8 *data; + u16 data_sz_in_bytes; + u32 idx; +}; + +/** + * get interface table entry + * + * Used to retrieve an interface table entry. + * + * Reads the interface table entry value + * + * Returns success or failure code. Failure will be returned if the + * provided data buffer is too small for the data type requested. + */ +int tf_get_if_tbl_entry(struct tf *tfp, + struct tf_get_if_tbl_entry_parms *parms); + +/** + * tf_get_version parameters definition. + * + * @device_type: Device type for the session. + * @bp: The pointer to the parent bp struct. This is only used + * for HWRM message passing within the portability layer. + * The type is struct bnxt. + * @major: Version Major number. + * @minor: Version Minor number. + * @update: Version Update number. + * @dev_ident_caps: fw available identifier resource list + * @dev_tbl_caps: fw available table resource list + * @dev_tcam_caps: fw available tcam resource list + * @dev_em_caps: fw available em resource list + */ +struct tf_get_version_parms { + enum tf_device_type device_type; + void *bp; + u8 major; + u8 minor; + u8 update; + u32 dev_ident_caps; + u32 dev_tbl_caps; + u32 dev_tcam_caps; + u32 dev_em_caps; +}; + +/** + * Get tf fw version + * Used to retrieve Truflow fw version information. + * Returns success or failure code. + */ +int tf_get_version(struct tf *tfp, struct tf_get_version_parms *parms); + +/** + * tf_query_sram_resources parameter definition + * + * @device_type: Device type for the session. + * @bp: The pointer to the parent bp struct. This is only used + * for HWRM message passing within the portability layer. + * The type is struct bnxt. + * @dir: Receive or transmit direction + * @bank_resc_count: Bank resource count in 8 bytes entry + * @dynamic_sram_capable: Dynamic SRAM Enable + * @sram_profile: SRAM profile + */ +struct tf_query_sram_resources_parms { + enum tf_device_type device_type; + void *bp; + enum tf_dir dir; + u32 bank_resc_count[TF_SRAM_BANK_ID_MAX]; + bool dynamic_sram_capable; + u8 sram_profile; +}; + +/** + * Get SRAM resources information + * Used to retrieve sram bank partition information + * Returns success or failure code. + */ +int tf_query_sram_resources(struct tf *tfp, + struct tf_query_sram_resources_parms *parms); + +/** + * tf_set_sram_policy parameter definition + * + * @device_type: Device type for the session. + * @dir: Receive or transmit direction + * @bank_id: Array of Bank id for each truflow tbl type + */ +struct tf_set_sram_policy_parms { + enum tf_device_type device_type; + enum tf_dir dir; + enum tf_sram_bank_id bank_id[TF_TBL_TYPE_ACT_MODIFY_64B + 1]; +}; + +/** + * Set SRAM policy + * Used to assign SRAM bank index to all truflow table type. + * Returns success or failure code. + */ +int tf_set_sram_policy(struct tf *tfp, struct tf_set_sram_policy_parms *parms); + +/** + * tf_get_sram_policy parameter definition + * + * @device_type: Device type for the session. + * @dir: Receive or transmit direction + * @bank_id: Array of Bank id for each truflow tbl type + */ +struct tf_get_sram_policy_parms { + enum tf_device_type device_type; + enum tf_dir dir; + enum tf_sram_bank_id bank_id[TF_TBL_TYPE_ACT_MODIFY_64B + 1]; +}; + +/** + * Get SRAM policy + * Used to get the assigned bank of table types. + * Returns success or failure code. + */ +int tf_get_sram_policy(struct tf *tfp, struct tf_get_sram_policy_parms *parms); + +#endif /* _TF_CORE_H_ */ diff --git a/drivers/thirdparty/release-drivers/bnxt/tf_core/tf_device.c b/drivers/thirdparty/release-drivers/bnxt/tf_core/tf_device.c new file mode 100644 index 000000000000..9528432e66bf --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/tf_core/tf_device.c @@ -0,0 +1,567 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* Copyright(c) 2019-2021 Broadcom + * All rights reserved. + */ + +#include "tf_device.h" +#include "tf_device_p4.h" +#include "tf_device_p58.h" +#include "tf_em.h" +#include "tf_rm.h" +#include "bnxt_compat.h" +#include "bnxt_hsi.h" +#include "bnxt.h" +#include "tf_tbl_sram.h" + +struct tf; + +/* Forward declarations */ +static int tf_dev_unbind_p4(struct tf *tfp); +static int tf_dev_unbind_p58(struct tf *tfp); + +/** + * Resource Reservation Check function + * + * @count: Number of module subtypes + * @cfg: Pointer to rm element config + * @reservations: Pointer to resource reservation array + * + * Returns + * - (n) number of tables in module that have non-zero reservation count. + */ +static int tf_dev_reservation_check(u16 count, struct tf_rm_element_cfg *cfg, + u16 *reservations) +{ + u16 *rm_num; + u16 cnt = 0; + int i, j; + + for (i = 0; i < TF_DIR_MAX; i++) { + rm_num = (u16 *)reservations + i * count; + for (j = 0; j < count; j++) { + if ((cfg[j].cfg_type == TF_RM_ELEM_CFG_HCAPI || + cfg[j].cfg_type == TF_RM_ELEM_CFG_HCAPI_BA || + cfg[j].cfg_type == + TF_RM_ELEM_CFG_HCAPI_BA_PARENT || + cfg[j].cfg_type == + TF_RM_ELEM_CFG_HCAPI_BA_CHILD) && + rm_num[j] > 0) + cnt++; + } + } + + return cnt; +} + +/** + * Device specific bind function, WH+ + * + * @tfp: Pointer to TF handle + * @resources: Pointer to resource allocation information + * @dev_handle: Device handle [out] + * + * Returns + * - (0) if successful. + * - (-EINVAL) on parameter or internal failure. + */ +static int tf_dev_bind_p4(struct tf *tfp, + struct tf_session_resources *resources, + struct tf_dev_info *dev_handle, + enum tf_wc_num_slice wc_num_slices) +{ + struct tf_global_cfg_cfg_parms global_cfg; + struct tf_if_tbl_cfg_parms if_tbl_cfg; + struct tf_ident_cfg_parms ident_cfg; + struct tf_tcam_cfg_parms tcam_cfg; + struct tf_tbl_cfg_parms tbl_cfg; + struct tf_em_cfg_parms em_cfg; + bool no_rsv_flag = true; + struct tf_session *tfs; + int rsv_cnt; + int frc; + int rc; + + /* Retrieve the session information */ + rc = tf_session_get_session_internal(tfp, &tfs); + if (rc) + return rc; + + /* Initial function initialization */ + dev_handle->ops = &tf_dev_ops_p4_init; + + /* Initialize the modules */ + rsv_cnt = tf_dev_reservation_check(TF_IDENT_TYPE_MAX, + tf_ident_p4, + (u16 *)resources->ident_cnt); + if (rsv_cnt) { + ident_cfg.num_elements = TF_IDENT_TYPE_MAX; + ident_cfg.cfg = tf_ident_p4; + ident_cfg.resources = resources; + rc = tf_ident_bind(tfp, &ident_cfg); + if (rc) { + netdev_dbg(tfp->bp->dev, + "Identifier initialization failure\n"); + goto fail; + } + + no_rsv_flag = false; + } + + rsv_cnt = tf_dev_reservation_check(TF_TBL_TYPE_MAX, + tf_tbl_p4[TF_DIR_RX], + (u16 *)resources->tbl_cnt); + if (rsv_cnt) { + tbl_cfg.num_elements = TF_TBL_TYPE_MAX; + tbl_cfg.cfg = tf_tbl_p4[TF_DIR_RX]; + tbl_cfg.resources = resources; + rc = tf_tbl_bind(tfp, &tbl_cfg); + if (rc) { + netdev_dbg(tfp->bp->dev, + "Table initialization failure\n"); + goto fail; + } + + no_rsv_flag = false; + } + + rsv_cnt = tf_dev_reservation_check(TF_TCAM_TBL_TYPE_MAX, + tf_tcam_p4, + (u16 *)resources->tcam_cnt); + if (rsv_cnt) { + tcam_cfg.num_elements = TF_TCAM_TBL_TYPE_MAX; + tcam_cfg.cfg = tf_tcam_p4; + tcam_cfg.resources = resources; + tcam_cfg.wc_num_slices = wc_num_slices; + rc = tf_tcam_bind(tfp, &tcam_cfg); + + if (rc) { + netdev_dbg(tfp->bp->dev, + "TCAM initialization failure\n"); + goto fail; + } + no_rsv_flag = false; + } + + /* EM */ + rsv_cnt = tf_dev_reservation_check(TF_EM_TBL_TYPE_MAX, + tf_em_int_p4, + (u16 *)resources->em_cnt); + if (rsv_cnt) { + em_cfg.num_elements = TF_EM_TBL_TYPE_MAX; + em_cfg.cfg = tf_em_int_p4; + em_cfg.resources = resources; + em_cfg.mem_type = 0; /* Not used by EM */ + + rc = tf_em_int_bind(tfp, &em_cfg); + if (rc) { + netdev_dbg(tfp->bp->dev, + "EM initialization failure\n"); + goto fail; + } + no_rsv_flag = false; + } + + /* There is no rm reserved for any tables */ + if (no_rsv_flag) { + netdev_dbg(tfp->bp->dev, "No rm reserved for any tables\n"); + return -ENOMEM; + } + + /* IF_TBL */ + if_tbl_cfg.num_elements = TF_IF_TBL_TYPE_MAX; + if_tbl_cfg.cfg = tf_if_tbl_p4; + rc = tf_if_tbl_bind(tfp, &if_tbl_cfg); + if (rc) { + netdev_dbg(tfp->bp->dev, "IF Table initialization failure\n"); + goto fail; + } + + if (!tf_session_is_shared_session(tfs)) { + /* GLOBAL_CFG */ + global_cfg.num_elements = TF_GLOBAL_CFG_TYPE_MAX; + global_cfg.cfg = tf_global_cfg_p4; + rc = tf_global_cfg_bind(tfp, &global_cfg); + if (rc) { + netdev_dbg(tfp->bp->dev, + "Global Cfg initialization failure\n"); + goto fail; + } + } + + /* Final function initialization */ + dev_handle->ops = &tf_dev_ops_p4; + + return 0; + + fail: + /* Cleanup of already created modules */ + frc = tf_dev_unbind_p4(tfp); + if (frc) + return frc; + + return rc; +} + +/** + * Device specific unbind function, WH+ + * + * @tfp: Pointer to TF handle + * + * Returns + * - (0) if successful. + * - (-EINVAL) on failure. + */ +static int tf_dev_unbind_p4(struct tf *tfp) +{ + struct tf_session *tfs; + bool fail = false; + int rc = 0; + + /* Retrieve the session information */ + rc = tf_session_get_session_internal(tfp, &tfs); + if (rc) + return rc; + + /* Unbind all the support modules. As this is only done on + * close we only report errors as everything has to be cleaned + * up regardless. + * + * In case of residuals TCAMs are cleaned up first as to + * invalidate the pipeline in a clean manner. + */ + rc = tf_tcam_unbind(tfp); + + if (rc) { + netdev_dbg(tfp->bp->dev, "Device unbind failed, TCAM\n"); + fail = true; + } + + rc = tf_ident_unbind(tfp); + if (rc) { + netdev_dbg(tfp->bp->dev, "Device unbind failed, Identifier\n"); + fail = true; + } + + rc = tf_tbl_unbind(tfp); + if (rc) { + netdev_dbg(tfp->bp->dev, "Device unbind failed, Table Type\n"); + fail = true; + } + + rc = tf_em_int_unbind(tfp); + if (rc) { + netdev_dbg(tfp->bp->dev, "Device unbind failed, EM\n"); + fail = true; + } + + if (!tf_session_is_shared_session(tfs)) { + rc = tf_if_tbl_unbind(tfp); + if (rc) { + netdev_dbg(tfp->bp->dev, + "Device unbind failed, IF Table Type\n"); + fail = true; + } + + rc = tf_global_cfg_unbind(tfp); + if (rc) { + netdev_dbg(tfp->bp->dev, + "Device unbind failed, Global Cfg Type\n"); + fail = true; + } + } + + if (fail) + return -1; + + return rc; +} + +/** + * Device specific bind function, THOR + * + * @tfp: Pointer to TF handle + * @resources: Pointer to resource allocation information + * @dev_handle: Device handle [out] + * + * Returns + * - (0) if successful. + * - (-EINVAL) on parameter or internal failure. + */ +static int tf_dev_bind_p58(struct tf *tfp, + struct tf_session_resources *resources, + struct tf_dev_info *dev_handle, + enum tf_wc_num_slice wc_num_slices) +{ + struct tf_global_cfg_cfg_parms global_cfg; + struct tf_if_tbl_cfg_parms if_tbl_cfg; + struct tf_ident_cfg_parms ident_cfg; + struct tf_tcam_cfg_parms tcam_cfg; + struct tf_tbl_cfg_parms tbl_cfg; + struct tf_em_cfg_parms em_cfg; + bool no_rsv_flag = true; + struct tf_session *tfs; + int rsv_cnt; + int frc; + int rc; + + /* Initial function initialization */ + dev_handle->ops = &tf_dev_ops_p58_init; + + /* Retrieve the session information */ + rc = tf_session_get_session_internal(tfp, &tfs); + if (rc) + return rc; + + rsv_cnt = tf_dev_reservation_check(TF_IDENT_TYPE_MAX, + tf_ident_p58, + (u16 *)resources->ident_cnt); + if (rsv_cnt) { + ident_cfg.num_elements = TF_IDENT_TYPE_MAX; + ident_cfg.cfg = tf_ident_p58; + ident_cfg.resources = resources; + rc = tf_ident_bind(tfp, &ident_cfg); + if (rc) { + netdev_dbg(tfp->bp->dev, + "Identifier initialization failure\n"); + goto fail; + } + no_rsv_flag = false; + } + + rsv_cnt = tf_dev_reservation_check(TF_TBL_TYPE_MAX, + tf_tbl_p58[TF_DIR_RX], + (u16 *)resources->tbl_cnt); + rsv_cnt += tf_dev_reservation_check(TF_TBL_TYPE_MAX, + tf_tbl_p58[TF_DIR_TX], + (u16 *)resources->tbl_cnt); + if (rsv_cnt) { + tbl_cfg.num_elements = TF_TBL_TYPE_MAX; + tbl_cfg.cfg = tf_tbl_p58[TF_DIR_RX]; + tbl_cfg.resources = resources; + rc = tf_tbl_bind(tfp, &tbl_cfg); + if (rc) { + netdev_dbg(tfp->bp->dev, + "Table initialization failure\n"); + goto fail; + } + no_rsv_flag = false; + + rc = tf_tbl_sram_bind(tfp); + if (rc) { + netdev_dbg(tfp->bp->dev, + "SRAM table initialization failure\n"); + goto fail; + } + } + + rsv_cnt = tf_dev_reservation_check(TF_TCAM_TBL_TYPE_MAX, + tf_tcam_p58, + (u16 *)resources->tcam_cnt); + if (rsv_cnt) { + tcam_cfg.num_elements = TF_TCAM_TBL_TYPE_MAX; + tcam_cfg.cfg = tf_tcam_p58; + tcam_cfg.resources = resources; + tcam_cfg.wc_num_slices = wc_num_slices; + rc = tf_tcam_bind(tfp, &tcam_cfg); + + if (rc) { + netdev_dbg(tfp->bp->dev, + "TCAM initialization failure\n"); + goto fail; + } + no_rsv_flag = false; + } + + /* EM */ + rsv_cnt = tf_dev_reservation_check(TF_EM_TBL_TYPE_MAX, + tf_em_int_p58, + (u16 *)resources->em_cnt); + if (rsv_cnt) { + em_cfg.num_elements = TF_EM_TBL_TYPE_MAX; + em_cfg.cfg = tf_em_int_p58; + em_cfg.resources = resources; + em_cfg.mem_type = 0; /* Not used by EM */ + + rc = tf_em_int_bind(tfp, &em_cfg); + if (rc) { + netdev_dbg(tfp->bp->dev, + "EM initialization failure\n"); + goto fail; + } + no_rsv_flag = false; + } + + /* There is no rm reserved for any tables */ + if (no_rsv_flag) { + netdev_dbg(tfp->bp->dev, "No rm reserved for any tables\n"); + return -ENOMEM; + } + + /* IF_TBL */ + if_tbl_cfg.num_elements = TF_IF_TBL_TYPE_MAX; + if_tbl_cfg.cfg = tf_if_tbl_p58; + rc = tf_if_tbl_bind(tfp, &if_tbl_cfg); + if (rc) { + netdev_dbg(tfp->bp->dev, "IF Table initialization failure\n"); + goto fail; + } + + if (!tf_session_is_shared_session(tfs)) { + /* GLOBAL_CFG */ + global_cfg.num_elements = TF_GLOBAL_CFG_TYPE_MAX; + global_cfg.cfg = tf_global_cfg_p58; + rc = tf_global_cfg_bind(tfp, &global_cfg); + if (rc) { + netdev_dbg(tfp->bp->dev, + "Global Cfg initialization failure\n"); + goto fail; + } + } + + /* Final function initialization */ + dev_handle->ops = &tf_dev_ops_p58; + + return 0; + + fail: + /* Cleanup of already created modules */ + frc = tf_dev_unbind_p58(tfp); + if (frc) + return frc; + + return rc; +} + +/** + * Device specific unbind function, THOR + * + * @tfp: Pointer to TF handle + * + * Returns + * - (0) if successful. + * - (-EINVAL) on failure. + */ +static int tf_dev_unbind_p58(struct tf *tfp) +{ + struct tf_session *tfs; + bool fail = false; + int rc = 0; + + /* Retrieve the session information */ + rc = tf_session_get_session_internal(tfp, &tfs); + if (rc) + return rc; + + /* Unbind all the support modules. As this is only done on + * close we only report errors as everything has to be cleaned + * up regardless. + * + * In case of residuals TCAMs are cleaned up first as to + * invalidate the pipeline in a clean manner. + */ + rc = tf_tcam_unbind(tfp); + + if (rc) { + netdev_dbg(tfp->bp->dev, "Device unbind failed, TCAM\n"); + fail = true; + } + + rc = tf_ident_unbind(tfp); + if (rc) { + netdev_dbg(tfp->bp->dev, "Device unbind failed, Identifier\n"); + fail = true; + } + + /* Unbind the SRAM table prior to table as the table manager + * owns and frees the table DB while the SRAM table manager owns + * and manages it's internal data structures. SRAM table manager + * relies on the table rm_db to exist. + */ + rc = tf_tbl_sram_unbind(tfp); + if (rc) { + netdev_dbg(tfp->bp->dev, "Device unbind failed, SRAM table\n"); + fail = true; + } + + rc = tf_tbl_unbind(tfp); + if (rc) { + netdev_dbg(tfp->bp->dev, "Device unbind failed, Table Type\n"); + fail = true; + } + + rc = tf_em_int_unbind(tfp); + if (rc) { + netdev_dbg(tfp->bp->dev, "Device unbind failed, EM\n"); + fail = true; + } + + rc = tf_if_tbl_unbind(tfp); + if (rc) { + netdev_dbg(tfp->bp->dev, + "Device unbind failed, IF Table Type\n"); + fail = true; + } + + if (!tf_session_is_shared_session(tfs)) { + rc = tf_global_cfg_unbind(tfp); + if (rc) { + netdev_dbg(tfp->bp->dev, + "Device unbind failed, Global Cfg Type\n"); + fail = true; + } + } + + if (fail) + return -1; + + return rc; +} + +int tf_dev_bind(struct tf *tfp, enum tf_device_type type, + struct tf_session_resources *resources, + u16 wc_num_slices, struct tf_dev_info *dev_handle) +{ + switch (type) { + case TF_DEVICE_TYPE_P4: + dev_handle->type = type; + return tf_dev_bind_p4(tfp, resources, + dev_handle, wc_num_slices); + case TF_DEVICE_TYPE_P5: + dev_handle->type = type; + return tf_dev_bind_p58(tfp, resources, + dev_handle, wc_num_slices); + default: + netdev_dbg(tfp->bp->dev, "No such device\n"); + return -ENODEV; + } +} + +int tf_dev_bind_ops(enum tf_device_type type, struct tf_dev_info *dev_handle) +{ + switch (type) { + case TF_DEVICE_TYPE_P4: + dev_handle->ops = &tf_dev_ops_p4_init; + break; + case TF_DEVICE_TYPE_P5: + dev_handle->ops = &tf_dev_ops_p58_init; + break; + default: + netdev_dbg(NULL, "%s: No such device\n", __func__); + return -ENODEV; + } + + return 0; +} + +int tf_dev_unbind(struct tf *tfp, struct tf_dev_info *dev_handle) +{ + switch (dev_handle->type) { + case TF_DEVICE_TYPE_P4: + return tf_dev_unbind_p4(tfp); + case TF_DEVICE_TYPE_P5: + return tf_dev_unbind_p58(tfp); + default: + netdev_dbg(tfp->bp->dev, "No such device\n"); + return -ENODEV; + } +} diff --git a/drivers/thirdparty/release-drivers/bnxt/tf_core/tf_device.h b/drivers/thirdparty/release-drivers/bnxt/tf_core/tf_device.h new file mode 100644 index 000000000000..552bd99cf609 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/tf_core/tf_device.h @@ -0,0 +1,881 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019-2021 Broadcom + * All rights reserved. + */ + +#ifndef _TF_DEVICE_H_ +#define _TF_DEVICE_H_ + +#include "cfa_resource_types.h" +#include "tf_core.h" +#include "tf_identifier.h" +#include "tf_tbl.h" +#include "tf_tcam.h" +#include "tf_if_tbl.h" +#include "tf_global_cfg.h" + +struct tf; +struct tf_session; + +/** + * The Device module provides a general device template. A supported + * device type should implement one or more of the listed function + * pointers according to its capabilities. + * + * If a device function pointer is NULL the device capability is not + * supported. + */ + +/* TF device information */ +struct tf_dev_info { + enum tf_device_type type; + const struct tf_dev_ops *ops; +}; + +/** + * This structure can be used to translate the CFA resource type to TF type. + * + * @module_type: Truflow module type associated with this resource type. + * @type_caps: Bitmap of TF sub-type for the element. + */ +struct tf_hcapi_resource_map { + enum tf_module_type module_type; + u32 type_caps; +}; + +/** + * Device bind handles the initialization of the specified device + * type. + * + * @tfp: Pointer to TF handle + * @type: Device type + * @resources: Pointer to resource allocation information + * @wc_num_slices: Number of slices per row for WC + * @dev_handle: Device handle [out] + * + * Returns + * - (0) if successful. + * - (-EINVAL) parameter failure. + * - (-ENODEV) no such device supported. + */ +int tf_dev_bind(struct tf *tfp, + enum tf_device_type type, + struct tf_session_resources *resources, + u16 wc_num_slices, + struct tf_dev_info *dev_handle); + +/** + * Device release handles cleanup of the device specific information. + * + * @tfp: Pointer to TF handle + * @dev_handle: Device handle + * + * Returns + * - (0) if successful. + * - (-EINVAL) parameter failure. + * - (-ENODEV) no such device supported. + */ +int tf_dev_unbind(struct tf *tfp, struct tf_dev_info *dev_handle); + +/** + * Device_ops bind handles the initialization of the specified device + * type prior to a successful tf_open_session() call. This allows + * APIs to operate which do not require an open session to access the + * device specific functions they need. + * + * @tfp: Pointer to TF handle + * @type: Device type + * @dev_handle: Device handle [out] + * + * Returns + * - (0) if successful. + * - (-EINVAL) parameter failure. + * - (-ENODEV) no such device supported. + */ +int tf_dev_bind_ops(enum tf_device_type type, struct tf_dev_info *dev_handle); + +/** + * Truflow device specific function hooks structure + * + * The following device hooks can be defined; unless noted otherwise, + * they are optional and can be filled with a null pointer. The + * purpose of these hooks is to support Truflow device operations for + * different device variants. + */ +struct tf_dev_ops { + /** + * Retrieves the MAX number of resource types that the device + * supports. + * + * @tfp: Pointer to TF handle + * @max_types: Pointer to MAX number of types the device supports[out] + * + * Returns + * - (0) if successful. + * - (-EINVAL) on failure. + */ + int (*tf_dev_get_max_types)(struct tf *tfp, u16 *max_types); + + /** + * Retrieves the string description for the CFA resource type + * + * @tfp: Pointer to TF handle + * @resource_id: HCAPI cfa resource type id + * @resource_str: Pointer to a string + * + * Returns + * - (0) if successful. + * - (-EINVAL) on failure. + */ + int (*tf_dev_get_resource_str)(struct tf *tfp, u16 resource_id, + const char **resource_str); + + /** + * Set the WC TCAM slice information that the device + * supports. + * + * @tfp: Pointer to TF handle + * @num_slices_per_row: Number of slices per row the device supports + * + * Returns + * - (0) if successful. + * - (-EINVAL) on failure. + */ + int (*tf_dev_set_tcam_slice_info)(struct tf *tfp, + enum tf_wc_num_slice + num_slices_per_row); + + /** + * Retrieves the WC TCAM slice information that the device + * supports. + * + * @tfp: Pointer to TF handle + * @type: TCAM table type + * @key_sz: Key size + * @num_slices_per_row: Pointer to number of slices per row the + * device supports[out] + * + * Returns + * - (0) if successful. + * - (-EINVAL) on failure. + */ + int (*tf_dev_get_tcam_slice_info)(struct tf *tfp, + enum tf_tcam_tbl_type type, + u16 key_sz, + u16 *num_slices_per_row); + /** + * Allocation of an identifier element. + * + * @tfp: Pointer to TF handle + * @parms: Pointer to identifier allocation parameters + * + * This API allocates the specified identifier element from a + * device specific identifier DB. The allocated element is + * returned. + * + * Returns + * - (0) if successful. + * - (-EINVAL) on failure. + */ + int (*tf_dev_alloc_ident)(struct tf *tfp, + struct tf_ident_alloc_parms *parms); + + /** + * Free of an identifier element. + * + * @tfp: Pointer to TF handle + * @parms: Pointer to identifier free parameters + * + * This API free's a previous allocated identifier element from a + * device specific identifier DB. + * + * Returns + * - (0) if successful. + * - (-EINVAL) on failure. + */ + int (*tf_dev_free_ident)(struct tf *tfp, + struct tf_ident_free_parms *parms); + + /** + * Retrieves the identifier resource info. + * + * @tfp: Pointer to TF handle + * @parms: Pointer to identifier info + * + * This API retrieves the identifier resource info from the rm db. + * + * Returns + * - (0) if successful. + * - (-EINVAL) on failure. + */ + int (*tf_dev_get_ident_resc_info)(struct tf *tfp, + struct tf_identifier_resource_info + *parms); + + /** + * Indicates whether the index table type is SRAM managed + * + * @tfp: Pointer to TF handle + * @type: Truflow index table type, e.g. TF_TYPE_FULL_ACT_RECORD + * + * Returns + * - (0) if the table is not managed by the SRAM manager + * - (1) if the table is managed by the SRAM manager + */ + bool (*tf_dev_is_sram_managed)(struct tf *tfp, + enum tf_tbl_type tbl_type); + + /** + * Get SRAM table information. + * + * Converts an internal RM allocated element offset to + * a user address and vice versa. + * + * @tfp: Pointer to TF handle + * @type: Truflow index table type, e.g. TF_TYPE_FULL_ACT_RECORD + * @base: Pointer to the base address of the associated table + * type. + * @shift: Pointer to any shift required for the associated table + * type. + * + * Returns + * - (0) if successful. + * - (-EINVAL) on failure. + */ + int (*tf_dev_get_tbl_info)(struct tf *tfp, void *tbl_db, + enum tf_tbl_type type, u16 *base, + u16 *shift); + + /** + * Allocation of an index table type element. + * + * @tfp: Pointer to TF handle + * @parms: Pointer to table allocation parameters + * + * This API allocates the specified table type element from a + * device specific table type DB. The allocated element is + * returned. + * + * Returns + * - (0) if successful. + * - (-EINVAL) on failure. + */ + int (*tf_dev_alloc_tbl)(struct tf *tfp, + struct tf_tbl_alloc_parms *parms); + + /** + * Allocation of an SRAM index table type element. + * + * @tfp: Pointer to TF handle + * @parms: Pointer to table allocation parameters + * + * This API allocates the specified table type element from a + * device specific table type DB. The allocated element is + * returned. + * + * Returns + * - (0) if successful. + * - (-EINVAL) on failure. + */ + int (*tf_dev_alloc_sram_tbl)(struct tf *tfp, + struct tf_tbl_alloc_parms *parms); + + /** + * Free of a table type element. + * + * @tfp: Pointer to TF handle + * @parms: Pointer to table free parameters + * + * This API free's a previous allocated table type element from a + * device specific table type DB. + * + * Returns + * - (0) if successful. + * - (-EINVAL) on failure. + */ + int (*tf_dev_free_tbl)(struct tf *tfp, + struct tf_tbl_free_parms *parms); + + /** + * Free of an SRAM table type element. + * + * @tfp: Pointer to TF handle + * @parms: Pointer to table free parameters + * + * This API free's a previous allocated table type element from a + * device specific table type DB. + * + * Returns + * - (0) if successful. + * - (-EINVAL) on failure. + */ + int (*tf_dev_free_sram_tbl)(struct tf *tfp, + struct tf_tbl_free_parms *parms); + + /** + * Sets the specified table type element. + * + * @tfp: Pointer to TF handle + * @parms: Pointer to table set parameters + * + * This API sets the specified element data by invoking the + * firmware. + * + * Returns + * - (0) if successful. + * - (-EINVAL) on failure. + */ + int (*tf_dev_set_tbl)(struct tf *tfp, + struct tf_tbl_set_parms *parms); + + /** + * Retrieves the specified table type element. + * + * @tfp: Pointer to TF handle + * @parms: Pointer to table get parameters + * + * This API retrieves the specified element data by invoking the + * firmware. + * + * Returns + * - (0) if successful. + * - (-EINVAL) on failure. + */ + int (*tf_dev_get_tbl)(struct tf *tfp, + struct tf_tbl_get_parms *parms); + + /** + * Retrieves the specified SRAM table type element. + * + * @tfp: Pointer to TF handle + * @parms: Pointer to table get parameters + * + * This API retrieves the specified element data by invoking the + * firmware. + * + * Returns + * - (0) if successful. + * - (-EINVAL) on failure. + */ + int (*tf_dev_get_sram_tbl)(struct tf *tfp, + struct tf_tbl_get_parms *parms); + + /** + * Retrieves the specified table type element using 'bulk' mechanism. + * + * @tfp: Pointer to TF handle + * @parms: Pointer to table get bulk parameters + * + * This API retrieves the specified element data by invoking the + * firmware. + * + * Returns + * - (0) if successful. + * - (-EINVAL) on failure. + */ + int (*tf_dev_get_bulk_tbl)(struct tf *tfp, + struct tf_tbl_get_bulk_parms *parms); + + /** + * Retrieves the specified SRAM table type element using 'bulk' + * mechanism. + * + * @tfp: Pointer to TF handle + * @parms: Pointer to table get bulk parameters + * + * This API retrieves the specified element data by invoking the + * firmware. + * + * Returns + * - (0) if successful. + * - (-EINVAL) on failure. + */ + int (*tf_dev_get_bulk_sram_tbl)(struct tf *tfp, + struct tf_tbl_get_bulk_parms *parms); + + /** + * Gets the increment value to add to the shared session resource + * start offset by for each count in the "stride" + * + * @tfp: Pointer to TF handle + * @parms: Pointer to get shared tbl increment parameters + * + * Returns + * - (0) if successful. + * - (-EINVAL) on failure. + */ + int (*tf_dev_get_shared_tbl_increment)(struct tf *tfp, + struct tf_get_shared_tbl_increment_parms *parms); + + /** + * Retrieves the table resource info. + * + * This API retrieves the table resource info from the rm db. + * + * @tfp: Pointer to TF handle + * @parms: Pointer to tbl info + * + * Returns + * - (0) if successful. + * - (-EINVAL) on failure. + */ + int (*tf_dev_get_tbl_resc_info)(struct tf *tfp, + struct tf_tbl_resource_info *parms); + + /** + * Allocation of a tcam element. + * + * @tfp: Pointer to TF handle + * @parms: Pointer to tcam allocation parameters + * + * This API allocates the specified tcam element from a device + * specific tcam DB. The allocated element is returned. + * + * Returns + * - (0) if successful. + * - (-EINVAL) on failure. + */ + int (*tf_dev_alloc_tcam)(struct tf *tfp, + struct tf_tcam_alloc_parms *parms); + + /** + * Free of a tcam element. + * + * @tfp: Pointer to TF handle + * @parms: Pointer to tcam free parameters + * + * This API free's a previous allocated tcam element from a + * device specific tcam DB. + * + * Returns + * - (0) if successful. + * - (-EINVAL) on failure. + */ + int (*tf_dev_free_tcam)(struct tf *tfp, + struct tf_tcam_free_parms *parms); + + /** + * Searches for the specified tcam element in a shadow DB. + * + * @tfp: Pointer to TF handle + * @parms: Pointer to tcam allocation and search parameters + * + * This API searches for the specified tcam element in a + * device specific shadow DB. If the element is found the + * reference count for the element is updated. If the element + * is not found a new element is allocated from the tcam DB + * and then inserted into the shadow DB. + * + * Returns + * - (0) if successful. + * - (-EINVAL) on failure. + */ + int (*tf_dev_alloc_search_tcam) + (struct tf *tfp, + struct tf_tcam_alloc_search_parms *parms); + + /** + * Sets the specified tcam element. + * + * @tfp: Pointer to TF handle + * @parms: Pointer to tcam set parameters + * + * This API sets the specified element data by invoking the + * firmware. + * + * Returns + * - (0) if successful. + * - (-EINVAL) on failure. + */ + int (*tf_dev_set_tcam)(struct tf *tfp, + struct tf_tcam_set_parms *parms); + + /** + * Retrieves the specified tcam element. + * + * @tfp: Pointer to TF handle + * @parms: Pointer to tcam get parameters + * + * This API retrieves the specified element data by invoking the + * firmware. + * + * Returns + * - (0) if successful. + * - (-EINVAL) on failure. + */ + int (*tf_dev_get_tcam)(struct tf *tfp, + struct tf_tcam_get_parms *parms); + + /** + * Retrieves the tcam resource info. + * + * This API retrieves the tcam resource info from the rm db. + * + * @tfp: Pointer to TF handle + * @parms: Pointer to tcam info + * + * Returns + * - (0) if successful. + * - (-EINVAL) on failure. + */ + int (*tf_dev_get_tcam_resc_info)(struct tf *tfp, + struct tf_tcam_resource_info *parms); + + /** + * Insert EM hash entry API + * + * @tfp: Pointer to TF handle + * @parms: Pointer to E/EM insert parameters + * + * Returns: + * 0 - Success + * -EINVAL - Error + */ + int (*tf_dev_insert_int_em_entry)(struct tf *tfp, + struct tf_insert_em_entry_parms + *parms); + + /** + * Delete EM hash entry API + * + * @tfp: Pointer to TF handle + * @parms: Pointer to E/EM delete parameters + * + * returns: + * 0 - Success + * -EINVAL - Error + */ + int (*tf_dev_delete_int_em_entry)(struct tf *tfp, + struct tf_delete_em_entry_parms + *parms); + + /** + * Move EM hash entry API + * + * @tfp: Pointer to TF handle + * @parms: Pointer to E/EM move parameters + * + * returns: + * 0 - Success + * -EINVAL - Error + */ + int (*tf_dev_move_int_em_entry)(struct tf *tfp, + struct tf_move_em_entry_parms *parms); + + /** + * Retrieves the em resource info. + * + * This API retrieves the em resource info from the rm db. + * + * @tfp: Pointer to TF handle + * @parms: Pointer to em info + * + * Returns + * - (0) if successful. + * - (-EINVAL) on failure. + */ + int (*tf_dev_get_em_resc_info)(struct tf *tfp, + struct tf_em_resource_info *parms); + + /** + * Sets the specified interface table type element. + * + * This API sets the specified element data by invoking the + * firmware. + * + * @tfp: Pointer to TF handle + * @parms: Pointer to interface table set parameters + * + * Returns + * - (0) if successful. + * - (-EINVAL) on failure. + */ + int (*tf_dev_set_if_tbl)(struct tf *tfp, + struct tf_if_tbl_set_parms *parms); + + /** + * Retrieves the specified interface table type element. + * + * This API retrieves the specified element data by invoking the + * firmware. + * + * @tfp: Pointer to TF handle + * @parms: Pointer to table get parameters + * + * Returns + * - (0) if successful. + * - (-EINVAL) on failure. + */ + int (*tf_dev_get_if_tbl)(struct tf *tfp, + struct tf_if_tbl_get_parms *parms); + + /** + * Update global cfg + * + * @tfp: Pointer to TF handle + * @parms: Pointer to global cfg parameters + * + * returns: + * 0 - Success + * -EINVAL - Error + */ + int (*tf_dev_set_global_cfg)(struct tf *tfp, + struct tf_global_cfg_parms *parms); + + /** + * Get global cfg + * + * @tfp: Pointer to TF handle + * @parms: Pointer to global cfg parameters + * + * returns: + * 0 - Success + * -EINVAL - Error + */ + int (*tf_dev_get_global_cfg)(struct tf *tfp, + struct tf_global_cfg_parms *parms); + + /** + * Convert length in bit to length in byte and align to word. + * The word length depends on device type. + * + * @size: Size in bit + * + * Returns + * Size in byte + */ + int (*tf_dev_word_align)(u16 size); + + /** + * Hash key using crc32 and lookup3 + * + * @key_data: Pointer to key + * @bitlen: Number of key bits + * + * Returns + * Hashes + */ + u64 (*tf_dev_cfa_key_hash)(u8 *key_data, u16 bitlen); + + /** + * Translate the CFA resource type to Truflow type + * + * @hcapi_types: CFA resource type bitmap + * @ident_types: Pointer to identifier type bitmap + * @tcam_types: Pointer to tcam type bitmap + * @tbl_types: Pointer to table type bitmap + * @em_types: Pointer to em type bitmap + * + * Returns + * - (0) if successful. + * - (-EINVAL) on failure. + */ + int (*tf_dev_map_hcapi_caps)(u64 hcapi_caps, u32 *ident_caps, + u32 *tcam_caps, u32 *tbl_caps, + u32 *em_caps); + + /** + * Device specific function that retrieves the sram resource + * + * @query: Point to resources query result + * @sram_bank_caps: Pointer to SRAM bank capabilities + * @dynamic_sram_capable: Pointer to dynamic sram capable + * + * Returns + * - (0) if successful. + * - (-EINVAL) on failure. + */ + int (*tf_dev_get_sram_resources)(void *query, u32 *sram_bank_caps, + bool *dynamic_sram_capable); + + /** + * Device specific function that sets the sram policy + * + * @dir: Receive or transmit direction + * @band_id: SRAM bank id + * + * Returns + * - (0) if successful. + * - (-EINVAL) on failure. + */ + int (*tf_dev_set_sram_policy)(enum tf_dir dir, + enum tf_sram_bank_id *bank_id); + + /** + * Device specific function that gets the sram policy + * + * @dir: Receive or transmit direction + * @band_id: pointer to SRAM bank id + * + * Returns + * - (0) if successful. + * - (-EINVAL) on failure. + */ + int (*tf_dev_get_sram_policy)(enum tf_dir dir, + enum tf_sram_bank_id *bank_id); + + /** + * Allocation of a external table type element. + * + * @tfp: Pointer to TF handle + * @parms: Pointer to table allocation parameters + * + * This API allocates the specified table type element from a + * device specific table type DB. The allocated element is + * returned. + * + * Returns + * - (0) if successful. + * - (-EINVAL) on failure. + */ + int (*tf_dev_alloc_ext_tbl)(struct tf *tfp, + struct tf_tbl_alloc_parms *parms); + + /** + * Free of a external table type element. + * + * @tfp: Pointer to TF handle + * @parms: Pointer to table free parameters + * + * This API free's a previous allocated table type element from a + * device specific table type DB. + * + * Returns + * - (0) if successful. + * - (-EINVAL) on failure. + */ + int (*tf_dev_free_ext_tbl)(struct tf *tfp, + struct tf_tbl_free_parms *parms); + + /** + * Sets the specified external table type element. + * + * @tfp: Pointer to TF handle + * @parms: Pointer to table set parameters + * + * This API sets the specified element data by invoking the + * firmware. + * + * Returns + * - (0) if successful. + * - (-EINVAL) on failure. + */ + int (*tf_dev_set_ext_tbl)(struct tf *tfp, + struct tf_tbl_set_parms *parms); + + /** + * Sets the specified SRAM table type element. + * + * @tfp: Pointer to TF handle + * @parms: Pointer to table set parameters + * + * This API sets the specified element data by invoking the + * firmware. + * + * Returns + * - (0) if successful. + * - (-EINVAL) on failure. + */ + int (*tf_dev_set_sram_tbl)(struct tf *tfp, + struct tf_tbl_set_parms *parms); + + /** + * Insert EEM hash entry API + * + * @tfp: Pointer to TF handle + * @parms: Pointer to E/EM insert parameters + * + * Returns: + * 0 - Success + * -EINVAL - Error + */ + int (*tf_dev_insert_ext_em_entry)(struct tf *tfp, + struct tf_insert_em_entry_parms + *parms); + + /** + * Delete EEM hash entry API + * + * @tfp: Pointer to TF handle + * @parms: Pointer to E/EM delete parameters + * + * returns: + * 0 - Success + * -EINVAL - Error + */ + int (*tf_dev_delete_ext_em_entry)(struct tf *tfp, + struct tf_delete_em_entry_parms + *parms); + /** + * Allocate EEM table scope + * + * @tfp: Pointer to TF handle + * @parms: Pointer to table scope alloc parameters + * + * returns: + * 0 - Success + * -EINVAL - Error + */ + int (*tf_dev_alloc_tbl_scope)(struct tf *tfp, + struct tf_alloc_tbl_scope_parms *parms); + + /** + * Map EEM parif + * + * @tfp: Pointer to TF handle + * @pf: PF associated with the table scope + * @parif_bitmask: Bitmask of PARIFs to enable + * @data: pointer to the parif_2_pf data to be updated + * @mask: pointer to the parif_2_pf mask to be updated + * @sz_in_bytes: number of bytes to be written + * + * returns: + * 0 - Success + * -EINVAL - Error + */ + int (*tf_dev_map_parif)(struct tf *tfp, u16 parif_bitmask, u16 pf, + u8 *data, u8 *mask, u16 sz_in_bytes); + + /** + * Map EEM table scope + * + * @tfp: Pointer to TF handle + * @parms: Pointer to table scope map parameters + * + * returns: + * 0 - Success + * -EINVAL - Error + */ + int (*tf_dev_map_tbl_scope)(struct tf *tfp, + struct tf_map_tbl_scope_parms *parms); + + /** + * Free EEM table scope + * + * @tfp: Pointer to TF handle + * @parms: Pointer to table scope free parameters + * + * returns: + * 0 - Success + * -EINVAL - Error + */ + int (*tf_dev_free_tbl_scope)(struct tf *tfp, + struct tf_free_tbl_scope_parms *parms); + +}; + +/** + * Supported device operation structures + */ +extern const struct tf_dev_ops tf_dev_ops_p4_init; +extern const struct tf_dev_ops tf_dev_ops_p4; +extern const struct tf_dev_ops tf_dev_ops_p58_init; +extern const struct tf_dev_ops tf_dev_ops_p58; + +/** + * Supported device resource type mapping structures + */ +extern const struct tf_hcapi_resource_map tf_hcapi_res_map_p4[CFA_RESOURCE_TYPE_P4_LAST + 1]; +extern const struct tf_hcapi_resource_map tf_hcapi_res_map_p58[CFA_RESOURCE_TYPE_P58_LAST + 1]; + +#endif /* _TF_DEVICE_H_ */ diff --git a/drivers/thirdparty/release-drivers/bnxt/tf_core/tf_device_p4.c b/drivers/thirdparty/release-drivers/bnxt/tf_core/tf_device_p4.c new file mode 100644 index 000000000000..b3cc30223313 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/tf_core/tf_device_p4.c @@ -0,0 +1,455 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* Copyright(c) 2019-2021 Broadcom + * All rights reserved. + */ +#include +#include +#include "cfa_resource_types.h" +#include "tf_device.h" +#include "tf_identifier.h" +#include "tf_tbl.h" +#include "tf_tcam.h" +#include "tf_em.h" +#include "tf_if_tbl.h" +#include "tf_util.h" + +#define TF_DEV_P4_PARIF_MAX 16 +#define TF_DEV_P4_PF_MASK 0xfUL + +const char *tf_resource_str_p4[CFA_RESOURCE_TYPE_P4_LAST + 1] = { + [CFA_RESOURCE_TYPE_P4_MCG] = "mc_group", + [CFA_RESOURCE_TYPE_P4_ENCAP_8B] = "encap_8 ", + [CFA_RESOURCE_TYPE_P4_ENCAP_16B] = "encap_16", + [CFA_RESOURCE_TYPE_P4_ENCAP_64B] = "encap_64", + [CFA_RESOURCE_TYPE_P4_SP_MAC] = "sp_mac ", + [CFA_RESOURCE_TYPE_P4_SP_MAC_IPV4] = "sp_macv4", + [CFA_RESOURCE_TYPE_P4_SP_MAC_IPV6] = "sp_macv6", + [CFA_RESOURCE_TYPE_P4_COUNTER_64B] = "ctr_64b ", + [CFA_RESOURCE_TYPE_P4_NAT_PORT] = "nat_port", + [CFA_RESOURCE_TYPE_P4_NAT_IPV4] = "nat_ipv4", + [CFA_RESOURCE_TYPE_P4_METER] = "meter ", + [CFA_RESOURCE_TYPE_P4_FLOW_STATE] = "flow_st ", + [CFA_RESOURCE_TYPE_P4_FULL_ACTION] = "full_act", + [CFA_RESOURCE_TYPE_P4_FORMAT_0_ACTION] = "fmt0_act", + [CFA_RESOURCE_TYPE_P4_EXT_FORMAT_0_ACTION] = "ext0_act", + [CFA_RESOURCE_TYPE_P4_FORMAT_1_ACTION] = "fmt1_act", + [CFA_RESOURCE_TYPE_P4_FORMAT_2_ACTION] = "fmt2_act", + [CFA_RESOURCE_TYPE_P4_FORMAT_3_ACTION] = "fmt3_act", + [CFA_RESOURCE_TYPE_P4_FORMAT_4_ACTION] = "fmt4_act", + [CFA_RESOURCE_TYPE_P4_FORMAT_5_ACTION] = "fmt5_act", + [CFA_RESOURCE_TYPE_P4_FORMAT_6_ACTION] = "fmt6_act", + [CFA_RESOURCE_TYPE_P4_L2_CTXT_TCAM_HIGH] = "l2ctx_hi", + [CFA_RESOURCE_TYPE_P4_L2_CTXT_TCAM_LOW] = "l2ctx_lo", + [CFA_RESOURCE_TYPE_P4_L2_CTXT_REMAP_HIGH] = "l2ctr_hi", + [CFA_RESOURCE_TYPE_P4_L2_CTXT_REMAP_LOW] = "l2ctr_lo", + [CFA_RESOURCE_TYPE_P4_PROF_FUNC] = "prf_func", + [CFA_RESOURCE_TYPE_P4_PROF_TCAM] = "prf_tcam", + [CFA_RESOURCE_TYPE_P4_EM_PROF_ID] = "em_prof ", + [CFA_RESOURCE_TYPE_P4_EM_REC] = "em_rec ", + [CFA_RESOURCE_TYPE_P4_WC_TCAM_PROF_ID] = "wc_prof ", + [CFA_RESOURCE_TYPE_P4_WC_TCAM] = "wc_tcam ", + [CFA_RESOURCE_TYPE_P4_METER_PROF] = "mtr_prof", + [CFA_RESOURCE_TYPE_P4_MIRROR] = "mirror ", + [CFA_RESOURCE_TYPE_P4_SP_TCAM] = "sp_tcam ", + [CFA_RESOURCE_TYPE_P4_TBL_SCOPE] = "tb_scope", +}; + +struct tf_rm_element_cfg tf_tbl_p4[TF_DIR_MAX][TF_TBL_TYPE_MAX] = { + [TF_DIR_RX][TF_TBL_TYPE_FULL_ACT_RECORD] = { + TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_FULL_ACTION, + 0, 0 + }, + [TF_DIR_RX][TF_TBL_TYPE_MCAST_GROUPS] = { + TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_MCG, + 0, 0 + }, + [TF_DIR_RX][TF_TBL_TYPE_ACT_ENCAP_8B] = { + TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_ENCAP_8B, + 0, 0 + }, + [TF_DIR_RX][TF_TBL_TYPE_ACT_ENCAP_16B] = { + TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_ENCAP_16B, + 0, 0 + }, + [TF_DIR_RX][TF_TBL_TYPE_ACT_ENCAP_64B] = { + TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_ENCAP_64B, + 0, 0 + }, + [TF_DIR_RX][TF_TBL_TYPE_ACT_SP_SMAC] = { + TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_SP_MAC, + 0, 0 + }, + [TF_DIR_RX][TF_TBL_TYPE_ACT_SP_SMAC_IPV4] = { + TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_SP_MAC_IPV4, + 0, 0 + }, + [TF_DIR_RX][TF_TBL_TYPE_ACT_SP_SMAC_IPV6] = { + TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_SP_MAC_IPV6, + 0, 0 + }, + [TF_DIR_RX][TF_TBL_TYPE_ACT_STATS_64] = { + TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_COUNTER_64B, + 0, 0 + }, + [TF_DIR_RX][TF_TBL_TYPE_ACT_MODIFY_IPV4] = { + TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_NAT_IPV4, + 0, 0 + }, + [TF_DIR_RX][TF_TBL_TYPE_METER_PROF] = { + TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_METER_PROF, + 0, 0 + }, + [TF_DIR_RX][TF_TBL_TYPE_METER_INST] = { + TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_METER, + 0, 0 + }, + [TF_DIR_RX][TF_TBL_TYPE_MIRROR_CONFIG] = { + TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_MIRROR, + 0, 0 + }, + [TF_DIR_TX][TF_TBL_TYPE_FULL_ACT_RECORD] = { + TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_FULL_ACTION, + 0, 0 + }, + [TF_DIR_TX][TF_TBL_TYPE_MCAST_GROUPS] = { + TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_MCG, + 0, 0 + }, + [TF_DIR_TX][TF_TBL_TYPE_ACT_ENCAP_8B] = { + TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_ENCAP_8B, + 0, 0 + }, + [TF_DIR_TX][TF_TBL_TYPE_ACT_ENCAP_16B] = { + TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_ENCAP_16B, + 0, 0 + }, + [TF_DIR_TX][TF_TBL_TYPE_ACT_ENCAP_64B] = { + TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_ENCAP_64B, + 0, 0 + }, + [TF_DIR_TX][TF_TBL_TYPE_ACT_SP_SMAC] = { + TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_SP_MAC, + 0, 0 + }, + [TF_DIR_TX][TF_TBL_TYPE_ACT_SP_SMAC_IPV4] = { + TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_SP_MAC_IPV4, + 0, 0 + }, + [TF_DIR_TX][TF_TBL_TYPE_ACT_SP_SMAC_IPV6] = { + TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_SP_MAC_IPV6, + 0, 0 + }, + [TF_DIR_TX][TF_TBL_TYPE_ACT_STATS_64] = { + TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_COUNTER_64B, + 0, 0 + }, + [TF_DIR_TX][TF_TBL_TYPE_ACT_MODIFY_IPV4] = { + TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_NAT_IPV4, + 0, 0 + }, + [TF_DIR_TX][TF_TBL_TYPE_METER_PROF] = { + TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_METER_PROF, + 0, 0 + }, + [TF_DIR_TX][TF_TBL_TYPE_METER_INST] = { + TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_METER, + 0, 0 + }, + [TF_DIR_TX][TF_TBL_TYPE_MIRROR_CONFIG] = { + TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_MIRROR, + 0, 0 + }, +}; + +/** + * Device specific function that retrieves the MAX number of HCAPI + * types the device supports. + * + * @tfp: Pointer to TF handle + * @max_types: Pointer to the MAX number of CFA resource types supported + * + * Returns + * - (0) if successful. + * - (-EINVAL) on failure. + */ +static int tf_dev_p4_get_max_types(struct tf *tfp, u16 *max_types) +{ + if (!max_types || !tfp) + return -EINVAL; + + *max_types = CFA_RESOURCE_TYPE_P4_LAST + 1; + + return 0; +} + +/** + * Device specific function that retrieves a human readable + * string to identify a CFA resource type. + * + * @tfp: Pointer to TF handle + * @resource_id: HCAPI CFA resource id + * @resource_str: Resource string[out] + * + * Returns + * - (0) if successful. + * - (-EINVAL) on failure. + */ +static int tf_dev_p4_get_resource_str(struct tf *tfp, u16 resource_id, + const char **resource_str) +{ + if (!resource_str) + return -EINVAL; + + if (resource_id > CFA_RESOURCE_TYPE_P4_LAST) + return -EINVAL; + + *resource_str = tf_resource_str_p4[resource_id]; + + return 0; +} + +/** + * Device specific function that set the WC TCAM slices the + * device supports. + * + * @tfp: Pointer to TF handle + * @num_slices_per_row: The WC TCAM row slice configuration + * + * Returns + * - (0) if successful. + * - (-EINVAL) on failure. + */ +static int tf_dev_p4_set_tcam_slice_info(struct tf *tfp, + enum tf_wc_num_slice + num_slices_per_row) +{ + struct tf_session *tfs = NULL; + int rc; + + /* Retrieve the session information */ + rc = tf_session_get_session_internal(tfp, &tfs); + if (rc) + return rc; + + switch (num_slices_per_row) { + case TF_WC_TCAM_1_SLICE_PER_ROW: + case TF_WC_TCAM_2_SLICE_PER_ROW: + case TF_WC_TCAM_4_SLICE_PER_ROW: + tfs->wc_num_slices_per_row = num_slices_per_row; + break; + default: + return -EINVAL; + } + + return 0; +} + +/** + * Device specific function that retrieves the WC TCAM slices the + * device supports. + * + * @tfp: Pointer to TF handle + * @slice_size: Pointer to the WC TCAM slice size[out] + * @num_slices_per_row: Pointer to the WC TCAM row slice configuration[out] + * + * Returns + * - (0) if successful. + * - (-EINVAL) on failure. + */ +static int tf_dev_p4_get_tcam_slice_info(struct tf *tfp, + enum tf_tcam_tbl_type type, + u16 key_sz, u16 *num_slices_per_row) +{ + struct tf_session *tfs; + int rc; + + /* Retrieve the session information */ + rc = tf_session_get_session_internal(tfp, &tfs); + if (rc) + return rc; + +/* Single slice support */ +#define CFA_P4_WC_TCAM_SLICE_SIZE 12 + + if (type == TF_TCAM_TBL_TYPE_WC_TCAM) { + if (key_sz <= 1 * CFA_P4_WC_TCAM_SLICE_SIZE) + *num_slices_per_row = TF_WC_TCAM_1_SLICE_PER_ROW; + else if (key_sz <= 2 * CFA_P4_WC_TCAM_SLICE_SIZE) + *num_slices_per_row = TF_WC_TCAM_2_SLICE_PER_ROW; + else if (key_sz <= 4 * CFA_P4_WC_TCAM_SLICE_SIZE) + *num_slices_per_row = TF_WC_TCAM_4_SLICE_PER_ROW; + else + return -EOPNOTSUPP; + } else { /* for other type of tcam */ + *num_slices_per_row = 1; + } + + return 0; +} + +/** + * Device specific function that retrieves the increment + * required for certain table types in a shared session + * + * @tfp: tf handle + * @parms: pointer to parms structure + * + * Returns + * - (0) if successful. + * - (-EINVAL) on failure. + */ +static int +tf_dev_p4_get_shared_tbl_increment(struct tf *tfp, + struct tf_get_shared_tbl_increment_parms + *parms) +{ + parms->increment_cnt = 1; + return 0; +} + +static int tf_dev_p4_word_align(u16 size) +{ + return TF_BITS2BYTES_WORD_ALIGN(size); +} + +/** + * Indicates whether the index table type is SRAM managed + * + * @tfp: Pointer to TF handle + * @type: Truflow index table type, e.g. TF_TYPE_FULL_ACT_RECORD + * + * Returns + * - (0) if the table is not managed by the SRAM manager + * - (1) if the table is managed by the SRAM manager + */ +static bool tf_dev_p4_is_sram_managed(struct tf *tfp, + enum tf_tbl_type type) +{ + return false; +} + +/** + * Device specific function that maps the hcapi resource types + * to Truflow type. + * + * @hcapi_caps: CFA resource type bitmap + * @ident_caps: Pointer to identifier type bitmap + * @tcam_caps: Pointer to tcam type bitmap + * @tbl_caps: Pointer to table type bitmap + * @em_caps: Pointer to em type bitmap + * + * Returns + * - (0) if successful. + * - (-EINVAL) on failure. + */ +static int tf_dev_p4_map_hcapi_caps(u64 hcapi_caps, u32 *ident_caps, + u32 *tcam_caps, u32 *tbl_caps, + u32 *em_caps) +{ + u32 i; + + *ident_caps = 0; + *tcam_caps = 0; + *tbl_caps = 0; + *em_caps = 0; + + for (i = 0; i <= CFA_RESOURCE_TYPE_P4_LAST; i++) { + if (hcapi_caps & 1ULL << i) { + switch (tf_hcapi_res_map_p4[i].module_type) { + case TF_MODULE_TYPE_IDENTIFIER: + *ident_caps |= tf_hcapi_res_map_p4[i].type_caps; + break; + case TF_MODULE_TYPE_TABLE: + *tbl_caps |= tf_hcapi_res_map_p4[i].type_caps; + break; + case TF_MODULE_TYPE_TCAM: + *tcam_caps |= tf_hcapi_res_map_p4[i].type_caps; + break; + case TF_MODULE_TYPE_EM: + *em_caps |= tf_hcapi_res_map_p4[i].type_caps; + break; + default: + return -EINVAL; + } + } + } + + return 0; +} + +/* Truflow P4 device specific functions */ +const struct tf_dev_ops tf_dev_ops_p4_init = { + .tf_dev_get_max_types = tf_dev_p4_get_max_types, + .tf_dev_get_resource_str = tf_dev_p4_get_resource_str, + .tf_dev_set_tcam_slice_info = tf_dev_p4_set_tcam_slice_info, + .tf_dev_get_tcam_slice_info = tf_dev_p4_get_tcam_slice_info, + .tf_dev_alloc_ident = NULL, + .tf_dev_free_ident = NULL, + .tf_dev_get_ident_resc_info = NULL, + .tf_dev_get_tbl_info = NULL, + .tf_dev_is_sram_managed = tf_dev_p4_is_sram_managed, + .tf_dev_alloc_tbl = NULL, + .tf_dev_alloc_sram_tbl = NULL, + .tf_dev_free_tbl = NULL, + .tf_dev_free_sram_tbl = NULL, + .tf_dev_set_tbl = NULL, + .tf_dev_set_sram_tbl = NULL, + .tf_dev_get_tbl = NULL, + .tf_dev_get_sram_tbl = NULL, + .tf_dev_get_bulk_tbl = NULL, + .tf_dev_get_bulk_sram_tbl = NULL, + .tf_dev_get_shared_tbl_increment = tf_dev_p4_get_shared_tbl_increment, + .tf_dev_get_tbl_resc_info = NULL, + .tf_dev_alloc_tcam = NULL, + .tf_dev_free_tcam = NULL, + .tf_dev_set_tcam = NULL, + .tf_dev_get_tcam = NULL, + .tf_dev_word_align = NULL, + .tf_dev_map_hcapi_caps = tf_dev_p4_map_hcapi_caps, + .tf_dev_get_sram_resources = NULL, + .tf_dev_set_sram_policy = NULL, + .tf_dev_get_sram_policy = NULL, +}; + +/* Truflow P4 device specific functions */ +const struct tf_dev_ops tf_dev_ops_p4 = { + .tf_dev_get_max_types = tf_dev_p4_get_max_types, + .tf_dev_get_resource_str = tf_dev_p4_get_resource_str, + .tf_dev_set_tcam_slice_info = tf_dev_p4_set_tcam_slice_info, + .tf_dev_get_tcam_slice_info = tf_dev_p4_get_tcam_slice_info, + .tf_dev_alloc_ident = tf_ident_alloc, + .tf_dev_free_ident = tf_ident_free, + .tf_dev_get_ident_resc_info = tf_ident_get_resc_info, + .tf_dev_is_sram_managed = tf_dev_p4_is_sram_managed, + .tf_dev_alloc_tbl = tf_tbl_alloc, + .tf_dev_alloc_sram_tbl = tf_tbl_alloc, + .tf_dev_free_tbl = tf_tbl_free, + .tf_dev_free_sram_tbl = tf_tbl_free, + .tf_dev_set_tbl = tf_tbl_set, + .tf_dev_set_sram_tbl = NULL, + .tf_dev_get_tbl = tf_tbl_get, + .tf_dev_get_sram_tbl = NULL, + .tf_dev_get_bulk_tbl = tf_tbl_bulk_get, + .tf_dev_get_bulk_sram_tbl = NULL, + .tf_dev_get_shared_tbl_increment = tf_dev_p4_get_shared_tbl_increment, + .tf_dev_get_tbl_resc_info = tf_tbl_get_resc_info, + .tf_dev_get_tcam_resc_info = tf_tcam_get_resc_info, + .tf_dev_alloc_tcam = tf_tcam_alloc, + .tf_dev_free_tcam = tf_tcam_free, + .tf_dev_set_tcam = tf_tcam_set, + .tf_dev_get_tcam = tf_tcam_get, + .tf_dev_alloc_search_tcam = tf_tcam_alloc_search, + .tf_dev_insert_int_em_entry = tf_em_insert_int_entry, + .tf_dev_delete_int_em_entry = tf_em_delete_int_entry, + .tf_dev_get_em_resc_info = tf_em_get_resc_info, + .tf_dev_set_if_tbl = tf_if_tbl_set, + .tf_dev_get_if_tbl = tf_if_tbl_get, + .tf_dev_set_global_cfg = tf_global_cfg_set, + .tf_dev_get_global_cfg = tf_global_cfg_get, + .tf_dev_cfa_key_hash = hcapi_cfa_p4_key_hash, + .tf_dev_word_align = tf_dev_p4_word_align, + .tf_dev_map_hcapi_caps = tf_dev_p4_map_hcapi_caps, + .tf_dev_get_sram_resources = NULL, + .tf_dev_set_sram_policy = NULL, + .tf_dev_get_sram_policy = NULL, +}; diff --git a/drivers/thirdparty/release-drivers/bnxt/tf_core/tf_device_p4.h b/drivers/thirdparty/release-drivers/bnxt/tf_core/tf_device_p4.h new file mode 100644 index 000000000000..ce349c5fd729 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/tf_core/tf_device_p4.h @@ -0,0 +1,190 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019-2021 Broadcom + * All rights reserved. + */ + +#ifndef _TF_DEVICE_P4_H_ +#define _TF_DEVICE_P4_H_ + +#include "cfa_resource_types.h" +#include "tf_core.h" +#include "tf_rm.h" +#include "tf_if_tbl.h" +#include "tf_global_cfg.h" + +extern struct tf_rm_element_cfg tf_tbl_p4[TF_DIR_MAX][TF_TBL_TYPE_MAX]; + +struct tf_rm_element_cfg tf_ident_p4[TF_IDENT_TYPE_MAX] = { + [TF_IDENT_TYPE_L2_CTXT_HIGH] = { + TF_RM_ELEM_CFG_HCAPI_BA, + CFA_RESOURCE_TYPE_P4_L2_CTXT_REMAP_HIGH, + 0, 0 + }, + [TF_IDENT_TYPE_L2_CTXT_LOW] = { + TF_RM_ELEM_CFG_HCAPI_BA, + CFA_RESOURCE_TYPE_P4_L2_CTXT_REMAP_LOW, + 0, 0 + }, + [TF_IDENT_TYPE_PROF_FUNC] = { + TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_PROF_FUNC, + 0, 0 + }, + [TF_IDENT_TYPE_WC_PROF] = { + TF_RM_ELEM_CFG_HCAPI_BA, + CFA_RESOURCE_TYPE_P4_WC_TCAM_PROF_ID, + 0, 0 + }, + [TF_IDENT_TYPE_EM_PROF] = { + TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_EM_PROF_ID, + 0, 0 + }, +}; + +struct tf_rm_element_cfg tf_tcam_p4[TF_TCAM_TBL_TYPE_MAX] = { + [TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_HIGH] = { + TF_RM_ELEM_CFG_HCAPI_BA, + CFA_RESOURCE_TYPE_P4_L2_CTXT_TCAM_HIGH, + 0, 0 + }, + [TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_LOW] = { + TF_RM_ELEM_CFG_HCAPI_BA, + CFA_RESOURCE_TYPE_P4_L2_CTXT_TCAM_LOW, + 0, 0 + }, + [TF_TCAM_TBL_TYPE_PROF_TCAM] = { + TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_PROF_TCAM, + 0, 0 + }, + [TF_TCAM_TBL_TYPE_WC_TCAM] = { + TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_WC_TCAM, + 0, 0 + }, + [TF_TCAM_TBL_TYPE_SP_TCAM] = { + TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_SP_TCAM, + 0, 0 + }, +}; + +struct tf_rm_element_cfg tf_em_ext_p4[TF_EM_TBL_TYPE_MAX] = { + [TF_EM_TBL_TYPE_TBL_SCOPE] = { + TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P4_TBL_SCOPE, + 0, 0 + }, +}; + +struct tf_rm_element_cfg tf_em_int_p4[TF_EM_TBL_TYPE_MAX] = { + [TF_EM_TBL_TYPE_EM_RECORD] = { + TF_RM_ELEM_CFG_HCAPI, CFA_RESOURCE_TYPE_P4_EM_REC, + 0, 0 + }, +}; + +/* Note that hcapi_types from this table are from hcapi_cfa_p4.h + * These are not CFA resource types because they are not allocated + * CFA resources - they are identifiers for the interface tables + * shared between the firmware and the host. It may make sense to + * move these types to cfa_resource_types.h. + */ +struct tf_if_tbl_cfg tf_if_tbl_p4[TF_IF_TBL_TYPE_MAX] = { + [TF_IF_TBL_TYPE_PROF_SPIF_DFLT_L2_CTXT] = { + TF_IF_TBL_CFG, CFA_P4_TBL_PROF_SPIF_DFLT_L2CTXT + }, + [TF_IF_TBL_TYPE_PROF_PARIF_DFLT_ACT_REC_PTR] = { + TF_IF_TBL_CFG, CFA_P4_TBL_PROF_PARIF_DFLT_ACT_REC_PTR + }, + [TF_IF_TBL_TYPE_PROF_PARIF_ERR_ACT_REC_PTR] = { + TF_IF_TBL_CFG, CFA_P4_TBL_PROF_PARIF_ERR_ACT_REC_PTR + }, + [TF_IF_TBL_TYPE_LKUP_PARIF_DFLT_ACT_REC_PTR] = { + TF_IF_TBL_CFG, CFA_P4_TBL_LKUP_PARIF_DFLT_ACT_REC_PTR + }, +}; + +struct tf_global_cfg_cfg tf_global_cfg_p4[TF_GLOBAL_CFG_TYPE_MAX] = { + [TF_TUNNEL_ENCAP] = { + TF_GLOBAL_CFG_CFG_HCAPI, TF_TUNNEL_ENCAP + }, + [TF_ACTION_BLOCK] = { + TF_GLOBAL_CFG_CFG_HCAPI, TF_ACTION_BLOCK + }, +}; + +const struct tf_hcapi_resource_map tf_hcapi_res_map_p4[CFA_RESOURCE_TYPE_P4_LAST + 1] = { + [CFA_RESOURCE_TYPE_P4_L2_CTXT_REMAP_HIGH] = { + TF_MODULE_TYPE_IDENTIFIER, 1 << TF_IDENT_TYPE_L2_CTXT_HIGH + }, + [CFA_RESOURCE_TYPE_P4_L2_CTXT_REMAP_LOW] = { + TF_MODULE_TYPE_IDENTIFIER, 1 << TF_IDENT_TYPE_L2_CTXT_LOW + }, + [CFA_RESOURCE_TYPE_P4_PROF_FUNC] = { + TF_MODULE_TYPE_IDENTIFIER, 1 << TF_IDENT_TYPE_PROF_FUNC + }, + [CFA_RESOURCE_TYPE_P4_WC_TCAM_PROF_ID] = { + TF_MODULE_TYPE_IDENTIFIER, 1 << TF_IDENT_TYPE_WC_PROF + }, + [CFA_RESOURCE_TYPE_P4_EM_PROF_ID] = { + TF_MODULE_TYPE_IDENTIFIER, 1 << TF_IDENT_TYPE_EM_PROF + }, + [CFA_RESOURCE_TYPE_P4_L2_CTXT_TCAM_HIGH] = { + TF_MODULE_TYPE_TCAM, 1 << TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_HIGH + }, + [CFA_RESOURCE_TYPE_P4_L2_CTXT_TCAM_LOW] = { + TF_MODULE_TYPE_TCAM, 1 << TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_LOW + }, + [CFA_RESOURCE_TYPE_P4_PROF_TCAM] = { + TF_MODULE_TYPE_TCAM, 1 << TF_TCAM_TBL_TYPE_PROF_TCAM + }, + [CFA_RESOURCE_TYPE_P4_WC_TCAM] = { + TF_MODULE_TYPE_TCAM, 1 << TF_TCAM_TBL_TYPE_WC_TCAM + }, + [CFA_RESOURCE_TYPE_P4_SP_TCAM] = { + TF_MODULE_TYPE_TCAM, 1 << TF_TCAM_TBL_TYPE_SP_TCAM + }, + [CFA_RESOURCE_TYPE_P4_NAT_IPV4] = { + TF_MODULE_TYPE_TABLE, 1 << TF_TBL_TYPE_ACT_MODIFY_IPV4 + }, + [CFA_RESOURCE_TYPE_P4_METER_PROF] = { + TF_MODULE_TYPE_TABLE, 1 << TF_TBL_TYPE_METER_PROF + }, + [CFA_RESOURCE_TYPE_P4_METER] = { + TF_MODULE_TYPE_TABLE, 1 << TF_TBL_TYPE_METER_INST + }, + [CFA_RESOURCE_TYPE_P4_MIRROR] = { + TF_MODULE_TYPE_TABLE, 1 << TF_TBL_TYPE_MIRROR_CONFIG + }, + [CFA_RESOURCE_TYPE_P4_FULL_ACTION] = { + TF_MODULE_TYPE_TABLE, 1 << TF_TBL_TYPE_FULL_ACT_RECORD + }, + [CFA_RESOURCE_TYPE_P4_MCG] = { + TF_MODULE_TYPE_TABLE, 1 << TF_TBL_TYPE_MCAST_GROUPS + }, + [CFA_RESOURCE_TYPE_P4_ENCAP_8B] = { + TF_MODULE_TYPE_TABLE, 1 << TF_TBL_TYPE_ACT_ENCAP_8B + }, + [CFA_RESOURCE_TYPE_P4_ENCAP_16B] = { + TF_MODULE_TYPE_TABLE, 1 << TF_TBL_TYPE_ACT_ENCAP_16B + }, + [CFA_RESOURCE_TYPE_P4_ENCAP_64B] = { + TF_MODULE_TYPE_TABLE, 1 << TF_TBL_TYPE_ACT_ENCAP_64B + }, + [CFA_RESOURCE_TYPE_P4_SP_MAC] = { + TF_MODULE_TYPE_TABLE, 1 << TF_TBL_TYPE_ACT_SP_SMAC + }, + [CFA_RESOURCE_TYPE_P4_SP_MAC_IPV4] = { + TF_MODULE_TYPE_TABLE, 1 << TF_TBL_TYPE_ACT_SP_SMAC_IPV4 + }, + [CFA_RESOURCE_TYPE_P4_SP_MAC_IPV6] = { + TF_MODULE_TYPE_TABLE, 1 << TF_TBL_TYPE_ACT_SP_SMAC_IPV6 + }, + [CFA_RESOURCE_TYPE_P4_COUNTER_64B] = { + TF_MODULE_TYPE_TABLE, 1 << TF_TBL_TYPE_ACT_STATS_64 + }, + [CFA_RESOURCE_TYPE_P4_EM_REC] = { + TF_MODULE_TYPE_EM, 1 << TF_EM_TBL_TYPE_EM_RECORD + }, + [CFA_RESOURCE_TYPE_P4_TBL_SCOPE] = { + TF_MODULE_TYPE_EM, 1 << TF_EM_TBL_TYPE_TBL_SCOPE + }, +}; + +#endif /* _TF_DEVICE_P4_H_ */ diff --git a/drivers/thirdparty/release-drivers/bnxt/tf_core/tf_device_p58.c b/drivers/thirdparty/release-drivers/bnxt/tf_core/tf_device_p58.c new file mode 100644 index 000000000000..4361a528ae4b --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/tf_core/tf_device_p58.c @@ -0,0 +1,787 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* Copyright(c) 2019-2021 Broadcom + * All rights reserved. + */ + +#include +#include +#include "bnxt_hsi.h" +#include "cfa_resource_types.h" +#include "tf_device.h" +#include "tf_identifier.h" +#include "tf_tbl.h" +#include "tf_tcam.h" +#include "tf_em.h" +#include "tf_if_tbl.h" +#include "tf_tbl_sram.h" +#include "cfa_resource_types.h" +#include "tf_device.h" +#include "tf_identifier.h" +#include "tf_tbl.h" +#include "tf_tcam.h" +#include "tf_em.h" +#include "tf_if_tbl.h" +#include "tf_util.h" + +#define TF_DEV_P58_PARIF_MAX 16 +#define TF_DEV_P58_PF_MASK 0xfUL + +/* For print alignment, make all entries 8 chars in this table */ +const char *tf_resource_str_p58[CFA_RESOURCE_TYPE_P58_LAST + 1] = { + [CFA_RESOURCE_TYPE_P58_METER] = "meter ", + [CFA_RESOURCE_TYPE_P58_SRAM_BANK_0] = "sram_bk0", + [CFA_RESOURCE_TYPE_P58_SRAM_BANK_1] = "sram_bk1", + [CFA_RESOURCE_TYPE_P58_SRAM_BANK_2] = "sram_bk2", + [CFA_RESOURCE_TYPE_P58_SRAM_BANK_3] = "sram_bk3", + [CFA_RESOURCE_TYPE_P58_L2_CTXT_TCAM_HIGH] = "l2ctx_hi", + [CFA_RESOURCE_TYPE_P58_L2_CTXT_TCAM_LOW] = "l2ctx_lo", + [CFA_RESOURCE_TYPE_P58_L2_CTXT_REMAP_HIGH] = "l2ctr_hi", + [CFA_RESOURCE_TYPE_P58_L2_CTXT_REMAP_LOW] = "l2ctr_lo", + [CFA_RESOURCE_TYPE_P58_PROF_FUNC] = "prf_func", + [CFA_RESOURCE_TYPE_P58_PROF_TCAM] = "prf_tcam", + [CFA_RESOURCE_TYPE_P58_EM_PROF_ID] = "em_prof ", + [CFA_RESOURCE_TYPE_P58_WC_TCAM_PROF_ID] = "wc_prof ", + [CFA_RESOURCE_TYPE_P58_EM_REC] = "em_rec ", + [CFA_RESOURCE_TYPE_P58_WC_TCAM] = "wc_tcam ", + [CFA_RESOURCE_TYPE_P58_METER_PROF] = "mtr_prof", + [CFA_RESOURCE_TYPE_P58_MIRROR] = "mirror ", + [CFA_RESOURCE_TYPE_P58_EM_FKB] = "em_fkb ", + [CFA_RESOURCE_TYPE_P58_WC_FKB] = "wc_fkb ", + [CFA_RESOURCE_TYPE_P58_VEB_TCAM] = "veb ", + [CFA_RESOURCE_TYPE_P58_METADATA] = "metadata", + [CFA_RESOURCE_TYPE_P58_METER_DROP_CNT] = "meter_dc", +}; + +struct tf_rm_element_cfg tf_tbl_p58[TF_DIR_MAX][TF_TBL_TYPE_MAX] = { + [TF_DIR_RX][TF_TBL_TYPE_EM_FKB] = { + TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P58_EM_FKB, + 0, 0 + }, + [TF_DIR_RX][TF_TBL_TYPE_WC_FKB] = { + TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P58_WC_FKB, + 0, 0 + }, + [TF_DIR_RX][TF_TBL_TYPE_METER_PROF] = { + TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P58_METER_PROF, + 0, 0 + }, + [TF_DIR_RX][TF_TBL_TYPE_METER_INST] = { + TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P58_METER, + 0, 0 + }, + [TF_DIR_RX][TF_TBL_TYPE_METER_DROP_CNT] = { + TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P58_METER_DROP_CNT, + 0, 0 + }, + [TF_DIR_RX][TF_TBL_TYPE_MIRROR_CONFIG] = { + TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P58_MIRROR, + 0, 0 + }, + [TF_DIR_RX][TF_TBL_TYPE_METADATA] = { + TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P58_METADATA, + 0, 0 + }, + /* Policy - ARs in bank 1 */ + [TF_DIR_RX][TF_TBL_TYPE_FULL_ACT_RECORD] = { + .cfg_type = TF_RM_ELEM_CFG_HCAPI_BA_PARENT, + .hcapi_type = CFA_RESOURCE_TYPE_P58_SRAM_BANK_1, + .slices = 4, + }, + [TF_DIR_RX][TF_TBL_TYPE_COMPACT_ACT_RECORD] = { + .cfg_type = TF_RM_ELEM_CFG_HCAPI_BA_CHILD, + .parent_subtype = TF_TBL_TYPE_FULL_ACT_RECORD, + .hcapi_type = CFA_RESOURCE_TYPE_P58_SRAM_BANK_1, + .slices = 8, + }, + /* Policy - Encaps in bank 2 */ + [TF_DIR_RX][TF_TBL_TYPE_ACT_ENCAP_8B] = { + .cfg_type = TF_RM_ELEM_CFG_HCAPI_BA_PARENT, + .hcapi_type = CFA_RESOURCE_TYPE_P58_SRAM_BANK_2, + .slices = 8, + }, + [TF_DIR_RX][TF_TBL_TYPE_ACT_ENCAP_16B] = { + .cfg_type = TF_RM_ELEM_CFG_HCAPI_BA_CHILD, + .parent_subtype = TF_TBL_TYPE_ACT_ENCAP_8B, + .hcapi_type = CFA_RESOURCE_TYPE_P58_SRAM_BANK_2, + .slices = 4, + }, + [TF_DIR_RX][TF_TBL_TYPE_ACT_ENCAP_32B] = { + .cfg_type = TF_RM_ELEM_CFG_HCAPI_BA_CHILD, + .parent_subtype = TF_TBL_TYPE_ACT_ENCAP_8B, + .hcapi_type = CFA_RESOURCE_TYPE_P58_SRAM_BANK_2, + .slices = 2, + }, + [TF_DIR_RX][TF_TBL_TYPE_ACT_ENCAP_64B] = { + .cfg_type = TF_RM_ELEM_CFG_HCAPI_BA_CHILD, + .parent_subtype = TF_TBL_TYPE_ACT_ENCAP_8B, + .hcapi_type = CFA_RESOURCE_TYPE_P58_SRAM_BANK_2, + .slices = 1, + }, + /* Policy - Modify in bank 2 with Encaps */ + [TF_DIR_RX][TF_TBL_TYPE_ACT_MODIFY_8B] = { + .cfg_type = TF_RM_ELEM_CFG_HCAPI_BA_CHILD, + .parent_subtype = TF_TBL_TYPE_ACT_ENCAP_8B, + .hcapi_type = CFA_RESOURCE_TYPE_P58_SRAM_BANK_2, + .slices = 8, + }, + [TF_DIR_RX][TF_TBL_TYPE_ACT_MODIFY_16B] = { + .cfg_type = TF_RM_ELEM_CFG_HCAPI_BA_CHILD, + .parent_subtype = TF_TBL_TYPE_ACT_ENCAP_8B, + .hcapi_type = CFA_RESOURCE_TYPE_P58_SRAM_BANK_2, + .slices = 4, + }, + [TF_DIR_RX][TF_TBL_TYPE_ACT_MODIFY_32B] = { + .cfg_type = TF_RM_ELEM_CFG_HCAPI_BA_CHILD, + .parent_subtype = TF_TBL_TYPE_ACT_ENCAP_8B, + .hcapi_type = CFA_RESOURCE_TYPE_P58_SRAM_BANK_2, + .slices = 2, + }, + [TF_DIR_RX][TF_TBL_TYPE_ACT_MODIFY_64B] = { + .cfg_type = TF_RM_ELEM_CFG_HCAPI_BA_CHILD, + .parent_subtype = TF_TBL_TYPE_ACT_ENCAP_8B, + .hcapi_type = CFA_RESOURCE_TYPE_P58_SRAM_BANK_2, + .slices = 1, + }, + /* Policy - SP in bank 0 */ + [TF_DIR_RX][TF_TBL_TYPE_ACT_SP_SMAC] = { + .cfg_type = TF_RM_ELEM_CFG_HCAPI_BA_PARENT, + .hcapi_type = CFA_RESOURCE_TYPE_P58_SRAM_BANK_0, + .slices = 8, + }, + [TF_DIR_RX][TF_TBL_TYPE_ACT_SP_SMAC_IPV4] = { + .cfg_type = TF_RM_ELEM_CFG_HCAPI_BA_CHILD, + .parent_subtype = TF_TBL_TYPE_ACT_SP_SMAC, + .hcapi_type = CFA_RESOURCE_TYPE_P58_SRAM_BANK_0, + .slices = 4, + }, + [TF_DIR_RX][TF_TBL_TYPE_ACT_SP_SMAC_IPV6] = { + .cfg_type = TF_RM_ELEM_CFG_HCAPI_BA_CHILD, + .parent_subtype = TF_TBL_TYPE_ACT_SP_SMAC, + .hcapi_type = CFA_RESOURCE_TYPE_P58_SRAM_BANK_0, + .slices = 2, + }, + /* Policy - Stats in bank 3 */ + [TF_DIR_RX][TF_TBL_TYPE_ACT_STATS_64] = { + .cfg_type = TF_RM_ELEM_CFG_HCAPI_BA_PARENT, + .hcapi_type = CFA_RESOURCE_TYPE_P58_SRAM_BANK_3, + .slices = 8, + }, + [TF_DIR_TX][TF_TBL_TYPE_EM_FKB] = { + TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P58_EM_FKB, + 0, 0 + }, + [TF_DIR_TX][TF_TBL_TYPE_WC_FKB] = { + TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P58_WC_FKB, + 0, 0 + }, + [TF_DIR_TX][TF_TBL_TYPE_METER_PROF] = { + TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P58_METER_PROF, + 0, 0 + }, + [TF_DIR_TX][TF_TBL_TYPE_METER_INST] = { + TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P58_METER, + 0, 0 + }, + [TF_DIR_TX][TF_TBL_TYPE_METER_DROP_CNT] = { + TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P58_METER_DROP_CNT, + 0, 0 + }, + [TF_DIR_TX][TF_TBL_TYPE_MIRROR_CONFIG] = { + TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P58_MIRROR, + 0, 0 + }, + [TF_DIR_TX][TF_TBL_TYPE_METADATA] = { + TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P58_METADATA, + 0, 0 + }, + /* Policy - ARs in bank 1 */ + [TF_DIR_TX][TF_TBL_TYPE_FULL_ACT_RECORD] = { + .cfg_type = TF_RM_ELEM_CFG_HCAPI_BA_PARENT, + .hcapi_type = CFA_RESOURCE_TYPE_P58_SRAM_BANK_1, + .slices = 4, + }, + [TF_DIR_TX][TF_TBL_TYPE_COMPACT_ACT_RECORD] = { + .cfg_type = TF_RM_ELEM_CFG_HCAPI_BA_CHILD, + .parent_subtype = TF_TBL_TYPE_FULL_ACT_RECORD, + .hcapi_type = CFA_RESOURCE_TYPE_P58_SRAM_BANK_1, + .slices = 8, + }, + /* Policy - Encaps in bank 2 */ + [TF_DIR_TX][TF_TBL_TYPE_ACT_ENCAP_8B] = { + .cfg_type = TF_RM_ELEM_CFG_HCAPI_BA_PARENT, + .hcapi_type = CFA_RESOURCE_TYPE_P58_SRAM_BANK_2, + .slices = 8, + }, + [TF_DIR_TX][TF_TBL_TYPE_ACT_ENCAP_16B] = { + .cfg_type = TF_RM_ELEM_CFG_HCAPI_BA_CHILD, + .parent_subtype = TF_TBL_TYPE_ACT_ENCAP_8B, + .hcapi_type = CFA_RESOURCE_TYPE_P58_SRAM_BANK_2, + .slices = 4, + }, + [TF_DIR_TX][TF_TBL_TYPE_ACT_ENCAP_32B] = { + .cfg_type = TF_RM_ELEM_CFG_HCAPI_BA_CHILD, + .parent_subtype = TF_TBL_TYPE_ACT_ENCAP_8B, + .hcapi_type = CFA_RESOURCE_TYPE_P58_SRAM_BANK_2, + .slices = 2, + }, + [TF_DIR_TX][TF_TBL_TYPE_ACT_ENCAP_64B] = { + .cfg_type = TF_RM_ELEM_CFG_HCAPI_BA_CHILD, + .parent_subtype = TF_TBL_TYPE_ACT_ENCAP_8B, + .hcapi_type = CFA_RESOURCE_TYPE_P58_SRAM_BANK_2, + .slices = 1, + }, + /* Policy - Modify in bank 2 with Encaps */ + [TF_DIR_TX][TF_TBL_TYPE_ACT_MODIFY_8B] = { + .cfg_type = TF_RM_ELEM_CFG_HCAPI_BA_CHILD, + .parent_subtype = TF_TBL_TYPE_ACT_ENCAP_8B, + .hcapi_type = CFA_RESOURCE_TYPE_P58_SRAM_BANK_2, + .slices = 8, + }, + [TF_DIR_TX][TF_TBL_TYPE_ACT_MODIFY_16B] = { + .cfg_type = TF_RM_ELEM_CFG_HCAPI_BA_CHILD, + .parent_subtype = TF_TBL_TYPE_ACT_ENCAP_8B, + .hcapi_type = CFA_RESOURCE_TYPE_P58_SRAM_BANK_2, + .slices = 4, + }, + [TF_DIR_TX][TF_TBL_TYPE_ACT_MODIFY_32B] = { + .cfg_type = TF_RM_ELEM_CFG_HCAPI_BA_CHILD, + .parent_subtype = TF_TBL_TYPE_ACT_ENCAP_8B, + .hcapi_type = CFA_RESOURCE_TYPE_P58_SRAM_BANK_2, + .slices = 2, + }, + [TF_DIR_TX][TF_TBL_TYPE_ACT_MODIFY_64B] = { + .cfg_type = TF_RM_ELEM_CFG_HCAPI_BA_CHILD, + .parent_subtype = TF_TBL_TYPE_ACT_ENCAP_8B, + .hcapi_type = CFA_RESOURCE_TYPE_P58_SRAM_BANK_2, + .slices = 1, + }, + /* Policy - SP in bank 0 */ + [TF_DIR_TX][TF_TBL_TYPE_ACT_SP_SMAC] = { + .cfg_type = TF_RM_ELEM_CFG_HCAPI_BA_PARENT, + .hcapi_type = CFA_RESOURCE_TYPE_P58_SRAM_BANK_0, + .slices = 8, + }, + [TF_DIR_TX][TF_TBL_TYPE_ACT_SP_SMAC_IPV4] = { + .cfg_type = TF_RM_ELEM_CFG_HCAPI_BA_CHILD, + .parent_subtype = TF_TBL_TYPE_ACT_SP_SMAC, + .hcapi_type = CFA_RESOURCE_TYPE_P58_SRAM_BANK_0, + .slices = 4, + }, + [TF_DIR_TX][TF_TBL_TYPE_ACT_SP_SMAC_IPV6] = { + .cfg_type = TF_RM_ELEM_CFG_HCAPI_BA_CHILD, + .parent_subtype = TF_TBL_TYPE_ACT_SP_SMAC, + .hcapi_type = CFA_RESOURCE_TYPE_P58_SRAM_BANK_0, + .slices = 2, + }, + /* Policy - Stats in bank 3 */ + [TF_DIR_TX][TF_TBL_TYPE_ACT_STATS_64] = { + .cfg_type = TF_RM_ELEM_CFG_HCAPI_BA_PARENT, + .hcapi_type = CFA_RESOURCE_TYPE_P58_SRAM_BANK_3, + .slices = 8, + }, +}; + +/** + * Device specific function that retrieves the MAX number of HCAPI + * types the device supports. + * + * @tfp: Pointer to TF handle + * @max_types: Pointer to the MAX number of HCAPI types supported[out] + * + * Returns + * - (0) if successful. + * - (-EINVAL) on failure. + */ +static int tf_dev_p58_get_max_types(struct tf *tfp, u16 *max_types) +{ + if (!max_types || !tfp) + return -EINVAL; + + *max_types = CFA_RESOURCE_TYPE_P58_LAST + 1; + + return 0; +} + +/** + * Device specific function that retrieves a human readable + * string to identify a CFA resource type. + * + * @tfp: Pointer to TF handle + * @resource_id: HCAPI CFA resource id + * @resource_str: Resource string[out] + * + * Returns + * - (0) if successful. + * - (-EINVAL) on failure. + */ +static int tf_dev_p58_get_resource_str(struct tf *tfp, u16 resource_id, + const char **resource_str) +{ + if (!resource_str) + return -EINVAL; + + if (resource_id > CFA_RESOURCE_TYPE_P58_LAST) + return -EINVAL; + + *resource_str = tf_resource_str_p58[resource_id]; + + return 0; +} + +/** + * Device specific function that set the WC TCAM slices the + * device supports. + * + * @tfp: Pointer to TF handle + * @num_slices_per_row: The WC TCAM row slice configuration + * + * Returns + * - (0) if successful. + * - (-EINVAL) on failure. + */ +static int tf_dev_p58_set_tcam_slice_info(struct tf *tfp, + enum tf_wc_num_slice + num_slices_per_row) +{ + struct tf_session *tfs = NULL; + int rc; + + /* Retrieve the session information */ + rc = tf_session_get_session_internal(tfp, &tfs); + if (rc) + return rc; + + switch (num_slices_per_row) { + case TF_WC_TCAM_1_SLICE_PER_ROW: + case TF_WC_TCAM_2_SLICE_PER_ROW: + case TF_WC_TCAM_4_SLICE_PER_ROW: + tfs->wc_num_slices_per_row = num_slices_per_row; + break; + default: + return -EINVAL; + } + + return 0; +} + +/** + * Device specific function that retrieves the WC TCAM slices the + * device supports. + * + * @tfp: Pointer to TF handle + * @slice_size: Pointer to the WC TCAM slice size + * @num_slices_per_row: Pointer to the WC TCAM row slice configuration + * + * Returns + * - (0) if successful. + * - (-EINVAL) on failure. + */ +static int tf_dev_p58_get_tcam_slice_info(struct tf *tfp, + enum tf_tcam_tbl_type type, + u16 key_sz, u16 *num_slices_per_row) +{ + struct tf_session *tfs = NULL; + int rc; + + /* Retrieve the session information */ + rc = tf_session_get_session_internal(tfp, &tfs); + if (rc) + return rc; + +#define CFA_P58_WC_TCAM_SLICE_SIZE 24 + + if (type == TF_TCAM_TBL_TYPE_WC_TCAM) { + if (key_sz <= 1 * CFA_P58_WC_TCAM_SLICE_SIZE) + *num_slices_per_row = TF_WC_TCAM_1_SLICE_PER_ROW; + else if (key_sz <= 2 * CFA_P58_WC_TCAM_SLICE_SIZE) + *num_slices_per_row = TF_WC_TCAM_2_SLICE_PER_ROW; + else if (key_sz <= 4 * CFA_P58_WC_TCAM_SLICE_SIZE) + *num_slices_per_row = TF_WC_TCAM_4_SLICE_PER_ROW; + else + return -EOPNOTSUPP; + } else { /* for other type of tcam */ + *num_slices_per_row = 1; + } + + return 0; +} + +static int tf_dev_p58_word_align(u16 size) +{ + return TF_BITS2BYTES_64B_WORD_ALIGN(size); +} + +/** + * Device specific function that retrieves the increment + * required for certain table types in a shared session + * + * @tfp: tf handle + * @parms: pointer to parms structure + * + * Returns + * - (0) if successful. + * - (-EINVAL) on failure. + */ +static int +tf_dev_p58_get_shared_tbl_increment(struct tf *tfp, + struct tf_get_shared_tbl_increment_parms + *parms) +{ + switch (parms->type) { + case TF_TBL_TYPE_FULL_ACT_RECORD: + case TF_TBL_TYPE_COMPACT_ACT_RECORD: + case TF_TBL_TYPE_ACT_ENCAP_8B: + case TF_TBL_TYPE_ACT_ENCAP_16B: + case TF_TBL_TYPE_ACT_ENCAP_32B: + case TF_TBL_TYPE_ACT_ENCAP_64B: + case TF_TBL_TYPE_ACT_SP_SMAC: + case TF_TBL_TYPE_ACT_SP_SMAC_IPV4: + case TF_TBL_TYPE_ACT_SP_SMAC_IPV6: + case TF_TBL_TYPE_ACT_STATS_64: + case TF_TBL_TYPE_ACT_MODIFY_IPV4: + case TF_TBL_TYPE_ACT_MODIFY_8B: + case TF_TBL_TYPE_ACT_MODIFY_16B: + case TF_TBL_TYPE_ACT_MODIFY_32B: + case TF_TBL_TYPE_ACT_MODIFY_64B: + parms->increment_cnt = 8; + break; + default: + parms->increment_cnt = 1; + break; + } + return 0; +} + +/** + * Indicates whether the index table type is SRAM managed + * + * @tfp: Pointer to TF handle + * @type: Truflow index table type, e.g. TF_TYPE_FULL_ACT_RECORD + * + * Returns + * - (0) if the table is not managed by the SRAM manager + * - (1) if the table is managed by the SRAM manager + */ +static bool tf_dev_p58_is_sram_managed(struct tf *tfp, enum tf_tbl_type type) +{ + switch (type) { + case TF_TBL_TYPE_FULL_ACT_RECORD: + case TF_TBL_TYPE_COMPACT_ACT_RECORD: + case TF_TBL_TYPE_ACT_ENCAP_8B: + case TF_TBL_TYPE_ACT_ENCAP_16B: + case TF_TBL_TYPE_ACT_ENCAP_32B: + case TF_TBL_TYPE_ACT_ENCAP_64B: + case TF_TBL_TYPE_ACT_SP_SMAC: + case TF_TBL_TYPE_ACT_SP_SMAC_IPV4: + case TF_TBL_TYPE_ACT_SP_SMAC_IPV6: + case TF_TBL_TYPE_ACT_STATS_64: + case TF_TBL_TYPE_ACT_MODIFY_IPV4: + case TF_TBL_TYPE_ACT_MODIFY_8B: + case TF_TBL_TYPE_ACT_MODIFY_16B: + case TF_TBL_TYPE_ACT_MODIFY_32B: + case TF_TBL_TYPE_ACT_MODIFY_64B: + return true; + default: + return false; + } +} + +#define TF_DEV_P58_BANK_SZ_64B 2048 +/** + * Get SRAM table information. + * + * Converts an internal RM allocated element offset to + * a user address and vice versa. + * + * @tfp: Pointer to TF handle + * @type: Truflow index table type, e.g. TF_TYPE_FULL_ACT_RECORD + * @base: Pointer to the Base address of the associated SRAM bank used + * for the type of record allocated[in/out] + * @shift: Pointer to the factor to be used as a multiplier to translate + * between the RM units to the user address. SRAM manages 64B + * entries. Addresses must be shifted to an 8B address[in/out]. + * + * Returns + * - (0) if successful. + * - (-EINVAL) on failure. + */ +static int tf_dev_p58_get_sram_tbl_info(struct tf *tfp, void *db, + enum tf_tbl_type type, u16 *base, + u16 *shift) +{ + struct tf_rm_get_hcapi_parms parms; + u16 hcapi_type; + int rc; + + parms.rm_db = db; + parms.subtype = type; + parms.hcapi_type = &hcapi_type; + + rc = tf_rm_get_hcapi_type(&parms); + if (rc) { + *base = 0; + *shift = 0; + return 0; + } + + switch (hcapi_type) { + case CFA_RESOURCE_TYPE_P58_SRAM_BANK_0: + *base = 0; + *shift = 3; + break; + case CFA_RESOURCE_TYPE_P58_SRAM_BANK_1: + *base = TF_DEV_P58_BANK_SZ_64B; + *shift = 3; + break; + case CFA_RESOURCE_TYPE_P58_SRAM_BANK_2: + *base = TF_DEV_P58_BANK_SZ_64B * 2; + *shift = 3; + break; + case CFA_RESOURCE_TYPE_P58_SRAM_BANK_3: + *base = TF_DEV_P58_BANK_SZ_64B * 3; + *shift = 3; + break; + default: + *base = 0; + *shift = 0; + break; + } + return 0; +} + +/** + * Device specific function that maps the hcapi resource types + * to Truflow type. + * + * @hcapi_caps: CFA resource type bitmap + * @ident_caps: Pointer to identifier type bitmap + * @tcam_caps: Pointer to tcam type bitmap + * @tbl_caps: Pointer to table type bitmap + * @em_caps: Pointer to em type bitmap + * + * Returns + * - (0) if successful. + * - (-EINVAL) on failure. + */ +static int tf_dev_p58_map_hcapi_caps(u64 hcapi_caps, u32 *ident_caps, + u32 *tcam_caps, u32 *tbl_caps, + u32 *em_caps) +{ + u32 i; + + *ident_caps = 0; + *tcam_caps = 0; + *tbl_caps = 0; + *em_caps = 0; + + for (i = 0; i <= CFA_RESOURCE_TYPE_P58_LAST; i++) { + if (hcapi_caps & 1ULL << i) { + switch (tf_hcapi_res_map_p58[i].module_type) { + case TF_MODULE_TYPE_IDENTIFIER: + *ident_caps |= tf_hcapi_res_map_p58[i].type_caps; + break; + case TF_MODULE_TYPE_TABLE: + *tbl_caps |= tf_hcapi_res_map_p58[i].type_caps; + break; + case TF_MODULE_TYPE_TCAM: + *tcam_caps |= tf_hcapi_res_map_p58[i].type_caps; + break; + case TF_MODULE_TYPE_EM: + *em_caps |= tf_hcapi_res_map_p58[i].type_caps; + break; + default: + return -EINVAL; + } + } + } + + return 0; +} + +/** + * Device specific function that retrieve the sram resource + * + * @query: Point to resources query result + * @sram_bank_caps: Pointer to SRAM bank capabilities + * @dynamic_sram_capable: Pointer to dynamic sram capable + * + * Returns + * - (0) if successful. + * - (-EINVAL) on failure. + */ +static int tf_dev_p58_get_sram_resources(void *q, u32 *sram_bank_caps, + bool *dynamic_sram_capable) +{ + struct tf_rm_resc_req_entry *query = q; + u32 i; + + for (i = 0; i < CFA_RESOURCE_TYPE_P58_LAST + 1; i++) { + switch (query[i].type) { + case CFA_RESOURCE_TYPE_P58_SRAM_BANK_0: + sram_bank_caps[0] = query[i].max; + break; + case CFA_RESOURCE_TYPE_P58_SRAM_BANK_1: + sram_bank_caps[1] = query[i].max; + break; + case CFA_RESOURCE_TYPE_P58_SRAM_BANK_2: + sram_bank_caps[2] = query[i].max; + break; + case CFA_RESOURCE_TYPE_P58_SRAM_BANK_3: + sram_bank_caps[3] = query[i].max; + break; + default: + break; + } + } + + *dynamic_sram_capable = false; + return 0; +} + +static int sram_bank_hcapi_type[] = { + CFA_RESOURCE_TYPE_P58_SRAM_BANK_0, + CFA_RESOURCE_TYPE_P58_SRAM_BANK_1, + CFA_RESOURCE_TYPE_P58_SRAM_BANK_2, + CFA_RESOURCE_TYPE_P58_SRAM_BANK_3 +}; + +/** + * Device specific function that set the sram policy + * + * @dir: Receive or transmit direction + * @band_id: SRAM bank id + * + * Returns + * - (0) if successful. + * - (-EINVAL) on failure. + */ +static int tf_dev_p58_set_sram_policy(enum tf_dir dir, + enum tf_sram_bank_id *bank_id) +{ + u8 parent[TF_SRAM_BANK_ID_MAX] = { 0xFF, 0xFF, 0xFF, 0xFF }; + struct tf_rm_element_cfg *rm_cfg = tf_tbl_p58[dir]; + u8 type; + + for (type = TF_TBL_TYPE_FULL_ACT_RECORD; + type <= TF_TBL_TYPE_ACT_MODIFY_64B; type++) { + if (bank_id[type] >= TF_SRAM_BANK_ID_MAX) + return -EINVAL; + + rm_cfg[type].hcapi_type = sram_bank_hcapi_type[bank_id[type]]; + if (parent[bank_id[type]] == 0xFF) { + parent[bank_id[type]] = type; + rm_cfg[type].cfg_type = TF_RM_ELEM_CFG_HCAPI_BA_PARENT; + rm_cfg[type].parent_subtype = 0; + if (rm_cfg[type].slices == 0) + rm_cfg[type].slices = 1; + } else { + rm_cfg[type].cfg_type = TF_RM_ELEM_CFG_HCAPI_BA_CHILD; + rm_cfg[type].parent_subtype = parent[bank_id[type]]; + } + } + + return 0; +} + +/** + * Device specific function that get the sram policy + * + * @dir: Receive or transmit direction + * @band_id: pointer to SRAM bank id + * + * Returns + * - (0) if successful. + * - (-EINVAL) on failure. + */ +static int tf_dev_p58_get_sram_policy(enum tf_dir dir, + enum tf_sram_bank_id *bank_id) +{ + struct tf_rm_element_cfg *rm_cfg = tf_tbl_p58[dir]; + u8 type; + + for (type = TF_TBL_TYPE_FULL_ACT_RECORD; + type < TF_TBL_TYPE_ACT_MODIFY_64B + 1; type++) + bank_id[type] = rm_cfg[type].hcapi_type - + CFA_RESOURCE_TYPE_P58_SRAM_BANK_0; + + return 0; +} + +/* Truflow P58 device specific functions */ +const struct tf_dev_ops tf_dev_ops_p58_init = { + .tf_dev_get_max_types = tf_dev_p58_get_max_types, + .tf_dev_get_resource_str = tf_dev_p58_get_resource_str, + .tf_dev_set_tcam_slice_info = tf_dev_p58_set_tcam_slice_info, + .tf_dev_get_tcam_slice_info = tf_dev_p58_get_tcam_slice_info, + .tf_dev_alloc_ident = NULL, + .tf_dev_free_ident = NULL, + .tf_dev_get_ident_resc_info = NULL, + .tf_dev_get_tbl_info = NULL, + .tf_dev_is_sram_managed = tf_dev_p58_is_sram_managed, + .tf_dev_alloc_tbl = NULL, + .tf_dev_alloc_sram_tbl = NULL, + .tf_dev_free_tbl = NULL, + .tf_dev_free_sram_tbl = NULL, + .tf_dev_set_tbl = NULL, + .tf_dev_set_sram_tbl = NULL, + .tf_dev_get_tbl = NULL, + .tf_dev_get_sram_tbl = NULL, + .tf_dev_get_bulk_tbl = NULL, + .tf_dev_get_tbl_resc_info = NULL, + .tf_dev_get_shared_tbl_increment = tf_dev_p58_get_shared_tbl_increment, + .tf_dev_alloc_tcam = NULL, + .tf_dev_free_tcam = NULL, + .tf_dev_set_tcam = NULL, + .tf_dev_get_tcam = NULL, + .tf_dev_get_tcam_resc_info = NULL, + .tf_dev_word_align = NULL, + .tf_dev_map_hcapi_caps = tf_dev_p58_map_hcapi_caps, + .tf_dev_get_sram_resources = tf_dev_p58_get_sram_resources, + .tf_dev_set_sram_policy = tf_dev_p58_set_sram_policy, + .tf_dev_get_sram_policy = tf_dev_p58_get_sram_policy, +}; + +/* Truflow P58 device specific functions */ +const struct tf_dev_ops tf_dev_ops_p58 = { + .tf_dev_get_max_types = tf_dev_p58_get_max_types, + .tf_dev_get_resource_str = tf_dev_p58_get_resource_str, + .tf_dev_set_tcam_slice_info = tf_dev_p58_set_tcam_slice_info, + .tf_dev_get_tcam_slice_info = tf_dev_p58_get_tcam_slice_info, + .tf_dev_alloc_ident = tf_ident_alloc, + .tf_dev_free_ident = tf_ident_free, + .tf_dev_get_ident_resc_info = tf_ident_get_resc_info, + .tf_dev_is_sram_managed = tf_dev_p58_is_sram_managed, + .tf_dev_alloc_tbl = tf_tbl_alloc, + .tf_dev_alloc_sram_tbl = tf_tbl_sram_alloc, + .tf_dev_free_tbl = tf_tbl_free, + .tf_dev_free_sram_tbl = tf_tbl_sram_free, + .tf_dev_set_tbl = tf_tbl_set, + .tf_dev_set_sram_tbl = tf_tbl_sram_set, + .tf_dev_get_tbl = tf_tbl_get, + .tf_dev_get_sram_tbl = tf_tbl_sram_get, + .tf_dev_get_bulk_tbl = tf_tbl_bulk_get, + .tf_dev_get_bulk_sram_tbl = tf_tbl_sram_bulk_get, + .tf_dev_get_tbl_info = tf_dev_p58_get_sram_tbl_info, + .tf_dev_get_tbl_resc_info = tf_tbl_get_resc_info, + .tf_dev_get_shared_tbl_increment = tf_dev_p58_get_shared_tbl_increment, + .tf_dev_alloc_tcam = tf_tcam_alloc, + .tf_dev_free_tcam = tf_tcam_free, + .tf_dev_set_tcam = tf_tcam_set, + .tf_dev_get_tcam = tf_tcam_get, + .tf_dev_alloc_search_tcam = tf_tcam_alloc_search, + .tf_dev_insert_int_em_entry = tf_em_hash_insert_int_entry, + .tf_dev_delete_int_em_entry = tf_em_hash_delete_int_entry, + .tf_dev_move_int_em_entry = tf_em_move_int_entry, + .tf_dev_get_tcam_resc_info = tf_tcam_get_resc_info, + .tf_dev_get_em_resc_info = tf_em_get_resc_info, + .tf_dev_set_if_tbl = tf_if_tbl_set, + .tf_dev_get_if_tbl = tf_if_tbl_get, + .tf_dev_set_global_cfg = tf_global_cfg_set, + .tf_dev_get_global_cfg = tf_global_cfg_get, + .tf_dev_cfa_key_hash = hcapi_cfa_p58_key_hash, + .tf_dev_word_align = tf_dev_p58_word_align, + .tf_dev_map_hcapi_caps = tf_dev_p58_map_hcapi_caps, + .tf_dev_get_sram_resources = tf_dev_p58_get_sram_resources, + .tf_dev_set_sram_policy = tf_dev_p58_set_sram_policy, + .tf_dev_get_sram_policy = tf_dev_p58_get_sram_policy, +}; diff --git a/drivers/thirdparty/release-drivers/bnxt/tf_core/tf_device_p58.h b/drivers/thirdparty/release-drivers/bnxt/tf_core/tf_device_p58.h new file mode 100644 index 000000000000..5c04b58634b9 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/tf_core/tf_device_p58.h @@ -0,0 +1,193 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019-2021 Broadcom + * All rights reserved. + */ + +#ifndef _TF_DEVICE_P58_H_ +#define _TF_DEVICE_P58_H_ + +#include "cfa_resource_types.h" +#include "tf_core.h" +#include "tf_rm.h" +#include "tf_if_tbl.h" +#include "tf_global_cfg.h" + +extern struct tf_rm_element_cfg tf_tbl_p58[TF_DIR_MAX][TF_TBL_TYPE_MAX]; + +struct tf_rm_element_cfg tf_ident_p58[TF_IDENT_TYPE_MAX] = { + [TF_IDENT_TYPE_L2_CTXT_HIGH] = { + TF_RM_ELEM_CFG_HCAPI_BA, + CFA_RESOURCE_TYPE_P58_L2_CTXT_REMAP_HIGH, + 0, 0 + }, + [TF_IDENT_TYPE_L2_CTXT_LOW] = { + TF_RM_ELEM_CFG_HCAPI_BA, + CFA_RESOURCE_TYPE_P58_L2_CTXT_REMAP_LOW, + 0, 0 + }, + [TF_IDENT_TYPE_PROF_FUNC] = { + TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P58_PROF_FUNC, + 0, 0 + }, + [TF_IDENT_TYPE_WC_PROF] = { + TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P58_WC_TCAM_PROF_ID, + 0, 0 + }, + [TF_IDENT_TYPE_EM_PROF] = { + TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P58_EM_PROF_ID, + 0, 0 + }, +}; + +struct tf_rm_element_cfg tf_tcam_p58[TF_TCAM_TBL_TYPE_MAX] = { + [TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_HIGH] = { + TF_RM_ELEM_CFG_HCAPI_BA, + CFA_RESOURCE_TYPE_P58_L2_CTXT_TCAM_HIGH, + 0, 0 + }, + [TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_LOW] = { + TF_RM_ELEM_CFG_HCAPI_BA, + CFA_RESOURCE_TYPE_P58_L2_CTXT_TCAM_LOW, + 0, 0 + }, + [TF_TCAM_TBL_TYPE_PROF_TCAM] = { + TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P58_PROF_TCAM, + 0, 0 + }, + [TF_TCAM_TBL_TYPE_WC_TCAM] = { + TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P58_WC_TCAM, + 0, 0 + }, + [TF_TCAM_TBL_TYPE_VEB_TCAM] = { + TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P58_VEB_TCAM, + 0, 0 + }, +}; + +struct tf_rm_element_cfg tf_em_int_p58[TF_EM_TBL_TYPE_MAX] = { + [TF_EM_TBL_TYPE_EM_RECORD] = { + TF_RM_ELEM_CFG_HCAPI, CFA_RESOURCE_TYPE_P58_EM_REC, + 0, 0 + }, +}; + +struct tf_if_tbl_cfg tf_if_tbl_p58[TF_IF_TBL_TYPE_MAX] = { + [TF_IF_TBL_TYPE_PROF_PARIF_DFLT_ACT_REC_PTR] = { + TF_IF_TBL_CFG, CFA_P58_TBL_PROF_PARIF_DFLT_ACT_REC_PTR}, + [TF_IF_TBL_TYPE_PROF_PARIF_ERR_ACT_REC_PTR] = { + TF_IF_TBL_CFG, CFA_P58_TBL_PROF_PARIF_ERR_ACT_REC_PTR}, + [TF_IF_TBL_TYPE_ILT] = { + TF_IF_TBL_CFG, CFA_P58_TBL_ILT}, + [TF_IF_TBL_TYPE_VSPT] = { + TF_IF_TBL_CFG, CFA_P58_TBL_VSPT}, +}; + +struct tf_global_cfg_cfg tf_global_cfg_p58[TF_GLOBAL_CFG_TYPE_MAX] = { + [TF_TUNNEL_ENCAP] = { + TF_GLOBAL_CFG_CFG_HCAPI, TF_TUNNEL_ENCAP + }, + [TF_ACTION_BLOCK] = { + TF_GLOBAL_CFG_CFG_HCAPI, TF_ACTION_BLOCK + }, + [TF_COUNTER_CFG] = { + TF_GLOBAL_CFG_CFG_HCAPI, TF_COUNTER_CFG + }, + [TF_METER_CFG] = { + TF_GLOBAL_CFG_CFG_HCAPI, TF_METER_CFG + }, + [TF_METER_INTERVAL_CFG] = { + TF_GLOBAL_CFG_CFG_HCAPI, TF_METER_INTERVAL_CFG + }, + [TF_DSCP_RMP_CFG] = { + TF_GLOBAL_CFG_CFG_HCAPI, TF_DSCP_RMP_CFG + }, +}; + +const struct tf_hcapi_resource_map tf_hcapi_res_map_p58[CFA_RESOURCE_TYPE_P58_LAST + 1] = { + [CFA_RESOURCE_TYPE_P58_L2_CTXT_REMAP_HIGH] = { + TF_MODULE_TYPE_IDENTIFIER, 1 << TF_IDENT_TYPE_L2_CTXT_HIGH + }, + [CFA_RESOURCE_TYPE_P58_L2_CTXT_REMAP_LOW] = { + TF_MODULE_TYPE_IDENTIFIER, 1 << TF_IDENT_TYPE_L2_CTXT_LOW + }, + [CFA_RESOURCE_TYPE_P58_PROF_FUNC] = { + TF_MODULE_TYPE_IDENTIFIER, 1 << TF_IDENT_TYPE_PROF_FUNC + }, + [CFA_RESOURCE_TYPE_P58_WC_TCAM_PROF_ID] = { + TF_MODULE_TYPE_IDENTIFIER, 1 << TF_IDENT_TYPE_WC_PROF + }, + [CFA_RESOURCE_TYPE_P58_EM_PROF_ID] = { + TF_MODULE_TYPE_IDENTIFIER, 1 << TF_IDENT_TYPE_EM_PROF + }, + [CFA_RESOURCE_TYPE_P58_L2_CTXT_TCAM_HIGH] = { + TF_MODULE_TYPE_TCAM, 1 << TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_HIGH + }, + [CFA_RESOURCE_TYPE_P58_L2_CTXT_TCAM_LOW] = { + TF_MODULE_TYPE_TCAM, 1 << TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_LOW + }, + [CFA_RESOURCE_TYPE_P58_PROF_TCAM] = { + TF_MODULE_TYPE_TCAM, 1 << TF_TCAM_TBL_TYPE_PROF_TCAM + }, + [CFA_RESOURCE_TYPE_P58_WC_TCAM] = { + TF_MODULE_TYPE_TCAM, 1 << TF_TCAM_TBL_TYPE_WC_TCAM + }, + [CFA_RESOURCE_TYPE_P58_VEB_TCAM] = { + TF_MODULE_TYPE_TCAM, 1 << TF_TCAM_TBL_TYPE_VEB_TCAM + }, + [CFA_RESOURCE_TYPE_P58_EM_FKB] = { + TF_MODULE_TYPE_TABLE, 1 << TF_TBL_TYPE_EM_FKB + }, + [CFA_RESOURCE_TYPE_P58_WC_FKB] = { + TF_MODULE_TYPE_TABLE, 1 << TF_TBL_TYPE_WC_FKB + }, + [CFA_RESOURCE_TYPE_P58_METER_PROF] = { + TF_MODULE_TYPE_TABLE, 1 << TF_TBL_TYPE_METER_PROF + }, + [CFA_RESOURCE_TYPE_P58_METER] = { + TF_MODULE_TYPE_TABLE, 1 << TF_TBL_TYPE_METER_INST + }, + [CFA_RESOURCE_TYPE_P58_METER_DROP_CNT] = { + TF_MODULE_TYPE_TABLE, 1 << TF_TBL_TYPE_METER_DROP_CNT + }, + [CFA_RESOURCE_TYPE_P58_MIRROR] = { + TF_MODULE_TYPE_TABLE, 1 << TF_TBL_TYPE_MIRROR_CONFIG + }, + [CFA_RESOURCE_TYPE_P58_METADATA] = { + TF_MODULE_TYPE_TABLE, 1 << TF_TBL_TYPE_METADATA + }, + /* Resources in bank 1 */ + [CFA_RESOURCE_TYPE_P58_SRAM_BANK_1] = { + TF_MODULE_TYPE_TABLE, + 1 << TF_TBL_TYPE_FULL_ACT_RECORD + | 1 << TF_TBL_TYPE_COMPACT_ACT_RECORD + }, + /* Resources in bank 2 */ + [CFA_RESOURCE_TYPE_P58_SRAM_BANK_2] = { + TF_MODULE_TYPE_TABLE, + 1 << TF_TBL_TYPE_ACT_ENCAP_8B | + 1 << TF_TBL_TYPE_ACT_ENCAP_16B | + 1 << TF_TBL_TYPE_ACT_ENCAP_32B | + 1 << TF_TBL_TYPE_ACT_ENCAP_64B | + 1 << TF_TBL_TYPE_ACT_MODIFY_8B | + 1 << TF_TBL_TYPE_ACT_MODIFY_16B | + 1 << TF_TBL_TYPE_ACT_MODIFY_32B | + 1 << TF_TBL_TYPE_ACT_MODIFY_64B + + }, + /* Resources in bank 0 */ + [CFA_RESOURCE_TYPE_P58_SRAM_BANK_0] = { + TF_MODULE_TYPE_TABLE, + 1 << TF_TBL_TYPE_ACT_SP_SMAC | + 1 << TF_TBL_TYPE_ACT_SP_SMAC_IPV4 | + 1 << TF_TBL_TYPE_ACT_SP_SMAC_IPV6 + }, + /* Resources in bank 3 */ + [CFA_RESOURCE_TYPE_P58_SRAM_BANK_3] = { + TF_MODULE_TYPE_TABLE, 1 << TF_TBL_TYPE_ACT_STATS_64 + }, + [CFA_RESOURCE_TYPE_P58_EM_REC] = { + TF_MODULE_TYPE_EM, 1 << TF_EM_TBL_TYPE_EM_RECORD + }, +}; + +#endif /* _TF_DEVICE_P58_H_ */ diff --git a/drivers/thirdparty/release-drivers/bnxt/tf_core/tf_em.h b/drivers/thirdparty/release-drivers/bnxt/tf_core/tf_em.h new file mode 100644 index 000000000000..1b4dbef2cd9b --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/tf_core/tf_em.h @@ -0,0 +1,233 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019-2021 Broadcom + * All rights reserved. + */ + +#ifndef _TF_EM_H_ +#define _TF_EM_H_ + +#include "tf_core.h" +#include "tf_session.h" +#include "hcapi_cfa_defs.h" + +#define TF_EM_MIN_ENTRIES BIT(15) /* 32K */ +#define TF_EM_MAX_ENTRIES BIT(27) /* 128M */ + +#define TF_P4_HW_EM_KEY_MAX_SIZE 52 +#define TF_P4_EM_KEY_RECORD_SIZE 64 + +#define TF_P58_HW_EM_KEY_MAX_SIZE 80 + +#define TF_EM_MAX_MASK 0x7FFF +#define TF_EM_MAX_ENTRY (128 * 1024 * 1024) + +/** + * Hardware Page sizes supported for EEM: + * 4K, 8K, 64K, 256K, 1M, 2M, 4M, 1G. + * + * Round-down other page sizes to the lower hardware page + * size supported. + */ +#define TF_EM_PAGE_SIZE_4K 12 +#define TF_EM_PAGE_SIZE_8K 13 +#define TF_EM_PAGE_SIZE_64K 16 +#define TF_EM_PAGE_SIZE_256K 18 +#define TF_EM_PAGE_SIZE_1M 20 +#define TF_EM_PAGE_SIZE_2M 21 +#define TF_EM_PAGE_SIZE_4M 22 +#define TF_EM_PAGE_SIZE_1G 30 + +/* Set page size */ +#define BNXT_TF_PAGE_SIZE TF_EM_PAGE_SIZE_2M + +#if (BNXT_TF_PAGE_SIZE == TF_EM_PAGE_SIZE_4K) /** 4K */ +#define TF_EM_PAGE_SHIFT TF_EM_PAGE_SIZE_4K +#define TF_EM_PAGE_SIZE_ENUM HWRM_TF_CTXT_MEM_RGTR_INPUT_PAGE_SIZE_4K +#elif (BNXT_TF_PAGE_SIZE == TF_EM_PAGE_SIZE_8K) /** 8K */ +#define TF_EM_PAGE_SHIFT TF_EM_PAGE_SIZE_8K +#define TF_EM_PAGE_SIZE_ENUM HWRM_TF_CTXT_MEM_RGTR_INPUT_PAGE_SIZE_8K +#elif (BNXT_TF_PAGE_SIZE == TF_EM_PAGE_SIZE_64K) /** 64K */ +#define TF_EM_PAGE_SHIFT TF_EM_PAGE_SIZE_64K +#define TF_EM_PAGE_SIZE_ENUM HWRM_TF_CTXT_MEM_RGTR_INPUT_PAGE_SIZE_64K +#elif (BNXT_TF_PAGE_SIZE == TF_EM_PAGE_SIZE_256K) /** 256K */ +#define TF_EM_PAGE_SHIFT TF_EM_PAGE_SIZE_256K +#define TF_EM_PAGE_SIZE_ENUM HWRM_TF_CTXT_MEM_RGTR_INPUT_PAGE_SIZE_256K +#elif (BNXT_TF_PAGE_SIZE == TF_EM_PAGE_SIZE_1M) /** 1M */ +#define TF_EM_PAGE_SHIFT TF_EM_PAGE_SIZE_1M +#define TF_EM_PAGE_SIZE_ENUM HWRM_TF_CTXT_MEM_RGTR_INPUT_PAGE_SIZE_1M +#elif (BNXT_TF_PAGE_SIZE == TF_EM_PAGE_SIZE_2M) /** 2M */ +#define TF_EM_PAGE_SHIFT TF_EM_PAGE_SIZE_2M +#define TF_EM_PAGE_SIZE_ENUM HWRM_TF_CTXT_MEM_RGTR_INPUT_PAGE_SIZE_2M +#elif (BNXT_TF_PAGE_SIZE == TF_EM_PAGE_SIZE_4M) /** 4M */ +#define TF_EM_PAGE_SHIFT TF_EM_PAGE_SIZE_4M +#define TF_EM_PAGE_SIZE_ENUM HWRM_TF_CTXT_MEM_RGTR_INPUT_PAGE_SIZE_4M +#elif (BNXT_TF_PAGE_SIZE == TF_EM_PAGE_SIZE_1G) /** 1G */ +#define TF_EM_PAGE_SHIFT TF_EM_PAGE_SIZE_1G +#define TF_EM_PAGE_SIZE_ENUM HWRM_TF_CTXT_MEM_RGTR_INPUT_PAGE_SIZE_1G +#else +#error "Invalid Page Size specified. Please use a TF_EM_PAGE_SIZE_n define" +#endif + +/* System memory always uses 4K pages */ +#ifdef TF_USE_SYSTEM_MEM +#define TF_EM_PAGE_SIZE BIT(TF_EM_PAGE_SIZE_4K) +#define TF_EM_PAGE_ALIGNMENT BIT(TF_EM_PAGE_SIZE_4K) +#else +#define TF_EM_PAGE_SIZE BIT(TF_EM_PAGE_SHIFT) +#define TF_EM_PAGE_ALIGNMENT BIT(TF_EM_PAGE_SHIFT) +#endif + +/* Used to build GFID: + * + * 15 2 0 + * +--------------+--+ + * | Index |E | + * +--------------+--+ + * + * E = Entry (bucket index) + */ +#define TF_EM_INTERNAL_INDEX_SHIFT 2 +#define TF_EM_INTERNAL_INDEX_MASK 0xFFFC +#define TF_EM_INTERNAL_ENTRY_MASK 0x3 + +/** + * EM Entry + * Each EM entry is 512-bit (64-bytes) but ordered differently to EEM. + * + * @hdr: Header is 8 bytes long + * @key: Key is 448 bits - 56 bytes + */ +struct tf_em_64b_entry { + struct cfa_p4_eem_entry_hdr hdr; + u8 key[TF_P4_EM_KEY_RECORD_SIZE - sizeof(struct cfa_p4_eem_entry_hdr)]; +}; + +/* EEM Memory Type */ +enum tf_mem_type { + TF_EEM_MEM_TYPE_INVALID, + TF_EEM_MEM_TYPE_HOST, + TF_EEM_MEM_TYPE_SYSTEM +}; + +/** + * tf_em_cfg_parms definition + * + * @num_elements: Num entries in resource config + * @cfg: Resource config + * @resources: Session resource allocations + * @mem_type: Memory type + */ +struct tf_em_cfg_parms { + u16 num_elements; + struct tf_rm_element_cfg *cfg; + struct tf_session_resources *resources; + enum tf_mem_type mem_type; +}; + +/* EM RM database */ +struct em_rm_db { + struct rm_db *em_db[TF_DIR_MAX]; +}; + +/** + * Insert record in to internal EM table + * + * @tfp: Pointer to TruFlow handle + * @parms: Pointer to input parameters + * + * Returns: + * 0 - Success + * -EINVAL - Parameter error + */ +int tf_em_insert_int_entry(struct tf *tfp, + struct tf_insert_em_entry_parms *parms); + +/** + * Delete record from internal EM table + * + * @tfp: Pointer to TruFlow handle + * @parms: Pointer to input parameters + * + * Returns: + * 0 - Success + * -EINVAL - Parameter error + */ +int tf_em_delete_int_entry(struct tf *tfp, + struct tf_delete_em_entry_parms *parms); + +/** + * Insert record in to internal EM table + * + * @tfp: Pointer to TruFlow handle + * @parms: Pointer to input parameters + * + * Returns: + * 0 - Success + * -EINVAL - Parameter error + */ +int tf_em_hash_insert_int_entry(struct tf *tfp, + struct tf_insert_em_entry_parms *parms); + +/** + * Delete record from internal EM table + * + * @tfp: Pointer to TruFlow handle + * @parms: Pointer to input parameters + * + * Returns: + * 0 - Success + * -EINVAL - Parameter error + */ +int tf_em_hash_delete_int_entry(struct tf *tfp, + struct tf_delete_em_entry_parms *parms); + +/** + * Move record from internal EM table + * + * @tfp: Pointer to TruFlow handle + * @parms: Pointer to input parameters + * + * Returns: + * 0 - Success + * -EINVAL - Parameter error + */ +int tf_em_move_int_entry(struct tf *tfp, + struct tf_move_em_entry_parms *parms); + +/** + * Bind internal EM device interface + * + * @tfp: Pointer to TruFlow handle + * @parms: Pointer to input parameters + * + * Returns: + * 0 - Success + * -EINVAL - Parameter error + */ +int tf_em_int_bind(struct tf *tfp, struct tf_em_cfg_parms *parms); + +/** + * Unbind internal EM device interface + * + * @tfp: Pointer to TruFlow handle + * @parms: Pointer to input parameters + * + * Returns: + * 0 - Success + * -EINVAL - Parameter error + */ +int tf_em_int_unbind(struct tf *tfp); + +/** + * Retrieves the allocated resource info + * + * @tfp: Pointer to TF handle, used for HCAPI communication + * @parms: Pointer to parameters + * + * Returns + * - (0) if successful. + * - (-EINVAL) on failure. + */ +int tf_em_get_resc_info(struct tf *tfp, struct tf_em_resource_info *em); + +#endif /* _TF_EM_H_ */ diff --git a/drivers/thirdparty/release-drivers/bnxt/tf_core/tf_em_hash_internal.c b/drivers/thirdparty/release-drivers/bnxt/tf_core/tf_em_hash_internal.c new file mode 100644 index 000000000000..bb1345d367ee --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/tf_core/tf_em_hash_internal.c @@ -0,0 +1,180 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* Copyright(c) 2019-2021 Broadcom + * All rights reserved. + */ + +#include +#include +#include "tf_core.h" +#include "tf_util.h" +#include "tf_em.h" +#include "tf_msg.h" +#include "tf_ext_flow_handle.h" +#include "tf_device.h" +#include "bnxt.h" +#include "dpool.h" + +/* Insert EM internal entry API + * + * returns: + * 0 - Success + */ +int tf_em_hash_insert_int_entry(struct tf *tfp, + struct tf_insert_em_entry_parms *parms) +{ + struct tf_dev_info *dev; + struct tf_session *tfs; + u8 num_of_entries = 0; + struct dpool *pool; + u16 rptr_index = 0; + u8 rptr_entry = 0; + u8 fw_session_id; + u32 key0_hash; + u32 key1_hash; + u64 big_hash; + u32 index; + u32 gfid; + int rc; + + /* Retrieve the session information */ + rc = tf_session_get_session_internal(tfp, &tfs); + if (rc) + return rc; + + /* Retrieve the device information */ + rc = tf_session_get_device(tfs, &dev); + if (rc) + return rc; + + rc = tf_session_get_fw_session_id(tfp, &fw_session_id); + if (rc) + return rc; + + pool = (struct dpool *)tfs->em_pool[parms->dir]; + index = dpool_alloc(pool, + parms->em_record_sz_in_bits / 128, + DP_DEFRAG_TO_FIT); + + if (index == DP_INVALID_INDEX) { + netdev_dbg(tfp->bp->dev, + "%s, EM entry index allocation failed\n", + tf_dir_2_str(parms->dir)); + return -ENOMEM; /* no more space to add entries */ + } + + if (!dev->ops->tf_dev_cfa_key_hash) + return -EINVAL; + + big_hash = dev->ops->tf_dev_cfa_key_hash((u8 *)parms->key, + TF_P58_HW_EM_KEY_MAX_SIZE * 8); + key0_hash = (u32)(big_hash >> 32); + key1_hash = (u32)(big_hash & 0xFFFFFFFF); + + netdev_dbg(tfp->bp->dev, "Key0 hash:0x%04x\n", key0_hash); + netdev_dbg(tfp->bp->dev, "Key1 hash:0x%04x\n", key1_hash); + + rptr_index = index; + rc = tf_msg_hash_insert_em_internal_entry(tfp, parms, key0_hash, + key1_hash, fw_session_id, + &rptr_index, &rptr_entry, + &num_of_entries); + if (rc) { + /* Free the allocated index before returning */ + dpool_free(pool, index); + return rc; + } + + netdev_dbg(tfp->bp->dev, + "%s, Internal index:%d rptr_i:0x%x rptr_e:0x%x num:%d\n", + tf_dir_2_str(parms->dir), index, rptr_index, rptr_entry, + num_of_entries); + + TF_SET_GFID(gfid, + ((rptr_index << TF_EM_INTERNAL_INDEX_SHIFT) | rptr_entry), + 0); /* N/A for internal table */ + + TF_SET_FLOW_ID(parms->flow_id, gfid, TF_GFID_TABLE_INTERNAL, + parms->dir); + + TF_SET_FIELDS_IN_FLOW_HANDLE(parms->flow_handle, (u32)num_of_entries, + 0, TF_FLAGS_FLOW_HANDLE_INTERNAL, + rptr_index, rptr_entry, 0); + dpool_set_entry_data(pool, index, parms->flow_handle); + return 0; +} + +/* Delete EM internal entry API + * + * returns: + * 0 + * -EINVAL + */ +int tf_em_hash_delete_int_entry(struct tf *tfp, + struct tf_delete_em_entry_parms *parms) +{ + struct tf_session *tfs; + struct dpool *pool; + u8 fw_session_id; + int rc = 0; + + /* Retrieve the session information */ + rc = tf_session_get_session(tfp, &tfs); + if (rc) { + netdev_dbg(tfp->bp->dev, + "%s: Failed to lookup session, rc:%d\n", + tf_dir_2_str(parms->dir), rc); + return rc; + } + + rc = tf_session_get_fw_session_id(tfp, &fw_session_id); + if (rc) + return rc; + + rc = tf_msg_delete_em_entry(tfp, parms, fw_session_id); + + /* Return resource to pool */ + if (rc == 0) { + pool = (struct dpool *)tfs->em_pool[parms->dir]; + dpool_free(pool, parms->index); + } + + return rc; +} + +/* Move EM internal entry API + * + * returns: + * 0 + * -EINVAL + */ +int tf_em_move_int_entry(struct tf *tfp, + struct tf_move_em_entry_parms *parms) +{ + struct tf_session *tfs; + struct dpool *pool; + u8 fw_session_id; + int rc = 0; + + /* Retrieve the session information */ + rc = tf_session_get_session(tfp, &tfs); + if (rc) { + netdev_dbg(tfp->bp->dev, + "%s: Failed to lookup session, rc:%d\n", + tf_dir_2_str(parms->dir), rc); + return rc; + } + + rc = tf_session_get_fw_session_id(tfp, &fw_session_id); + if (rc) + return rc; + + rc = tf_msg_move_em_entry(tfp, parms, fw_session_id); + + /* Return resource to pool */ + if (rc == 0) { + pool = (struct dpool *)tfs->em_pool[parms->dir]; + dpool_free(pool, parms->index); + } + + return rc; +} diff --git a/drivers/thirdparty/release-drivers/bnxt/tf_core/tf_em_internal.c b/drivers/thirdparty/release-drivers/bnxt/tf_core/tf_em_internal.c new file mode 100644 index 000000000000..12e9e5fdadbd --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/tf_core/tf_em_internal.c @@ -0,0 +1,365 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* Copyright(c) 2019-2021 Broadcom + * All rights reserved. + */ + +#include +#include +#include +#include "tf_core.h" +#include "tf_util.h" +#include "tf_em.h" +#include "tf_msg.h" +#include "tf_ext_flow_handle.h" +#include "bnxt.h" + +#define TF_EM_DB_EM_REC 0 +#include "dpool.h" + +/** + * Insert EM internal entry API + * + * returns: + * 0 - Success + */ +int tf_em_insert_int_entry(struct tf *tfp, + struct tf_insert_em_entry_parms *parms) +{ + struct tf_session *tfs; + u8 num_of_entries = 0; + struct dpool *pool; + u16 rptr_index = 0; + u8 rptr_entry = 0; + u8 fw_session_id; + u32 index; + u32 gfid; + int rc; + + /* Retrieve the session information */ + rc = tf_session_get_session(tfp, &tfs); + if (rc) { + netdev_dbg(NULL, "%s: Failed to lookup session, rc:%d\n", + tf_dir_2_str(parms->dir), rc); + return rc; + } + + rc = tf_session_get_fw_session_id(tfp, &fw_session_id); + if (rc) + return rc; + + pool = (struct dpool *)tfs->em_pool[parms->dir]; + index = dpool_alloc(pool, TF_SESSION_EM_ENTRY_SIZE, 0); + if (index == DP_INVALID_INDEX) { + netdev_dbg(NULL, "%s, EM entry index allocation failed\n", + tf_dir_2_str(parms->dir)); + return -1; + } + + rptr_index = index; + rc = tf_msg_insert_em_internal_entry(tfp, parms, fw_session_id, + &rptr_index, &rptr_entry, + &num_of_entries); + if (rc) { + /* Free the allocated index before returning */ + dpool_free(pool, index); + return -1; + } + netdev_dbg(NULL, + "%s, Internal index:%d rptr_i:0x%x rptr_e:0x%x num:%d\n", + tf_dir_2_str(parms->dir), index, rptr_index, rptr_entry, + num_of_entries); + TF_SET_GFID(gfid, + ((rptr_index << TF_EM_INTERNAL_INDEX_SHIFT) | rptr_entry), + 0); /* N/A for internal table */ + + TF_SET_FLOW_ID(parms->flow_id, gfid, TF_GFID_TABLE_INTERNAL, + parms->dir); + + TF_SET_FIELDS_IN_FLOW_HANDLE(parms->flow_handle, + (u32)num_of_entries, + 0, + TF_FLAGS_FLOW_HANDLE_INTERNAL, + rptr_index, + rptr_entry, + 0); + return 0; +} + +/** + * Delete EM internal entry API + * + * returns: + * 0 + * -EINVAL + */ +int tf_em_delete_int_entry(struct tf *tfp, + struct tf_delete_em_entry_parms *parms) +{ + struct tf_session *tfs; + struct dpool *pool; + u8 fw_session_id; + int rc = 0; + + /* Retrieve the session information */ + rc = tf_session_get_session(tfp, &tfs); + if (rc) { + netdev_dbg(NULL, "%s: Failed to lookup session, rc:%d\n", + tf_dir_2_str(parms->dir), rc); + return rc; + } + + rc = tf_session_get_fw_session_id(tfp, &fw_session_id); + if (rc) + return rc; + + rc = tf_msg_delete_em_entry(tfp, parms, fw_session_id); + + /* Return resource to pool */ + if (rc == 0) { + pool = (struct dpool *)tfs->em_pool[parms->dir]; + dpool_free(pool, parms->index); + } + + return rc; +} + +static int tf_em_move_callback(void *user_data, u64 entry_data, + u32 new_index) +{ + struct tf *tfp = (struct tf *)user_data; + struct tf_move_em_entry_parms parms = { 0 }; + struct tf_dev_info *dev; + struct tf_session *tfs; + int rc; + + parms.tbl_scope_id = 0; + parms.flow_handle = entry_data; + parms.new_index = new_index; + TF_GET_DIR_FROM_FLOW_ID(entry_data, parms.dir); + parms.mem = TF_MEM_INTERNAL; + + /* Retrieve the session information */ + rc = tf_session_get_session(tfp, &tfs); + if (rc) { + netdev_dbg(tfp->bp->dev, + "%s: Failed to lookup session, rc:%d\n", + tf_dir_2_str(parms.dir), rc); + return rc; + } + + /* Retrieve the device information */ + rc = tf_session_get_device(tfs, &dev); + if (rc) { + netdev_dbg(tfp->bp->dev, + "%s: Failed to lookup device, rc:%d\n", + tf_dir_2_str(parms.dir), rc); + return rc; + } + + if (!dev->ops->tf_dev_move_int_em_entry) + rc = dev->ops->tf_dev_move_int_em_entry(tfp, &parms); + else + rc = -EOPNOTSUPP; + + return rc; +} + +int tf_em_int_bind(struct tf *tfp, struct tf_em_cfg_parms *parms) +{ + struct tf_rm_create_db_parms db_cfg = { 0 }; + struct tf_rm_get_alloc_info_parms iparms; + int db_rc[TF_DIR_MAX] = { 0 }; + struct tf_rm_alloc_info info; + struct tf_session *tfs; + struct em_rm_db *em_db; + struct dpool *mem_va; + int rc; + int i; + + if (!tfp || !parms) + return -EINVAL; + + /* Retrieve the session information */ + rc = tf_session_get_session_internal(tfp, &tfs); + if (rc) + return rc; + + em_db = vzalloc(sizeof(*em_db)); + if (!em_db) + return -ENOMEM; + + for (i = 0; i < TF_DIR_MAX; i++) + em_db->em_db[i] = NULL; + tf_session_set_db(tfp, TF_MODULE_TYPE_EM, em_db); + + db_cfg.module = TF_MODULE_TYPE_EM; + db_cfg.num_elements = parms->num_elements; + db_cfg.cfg = parms->cfg; + + for (i = 0; i < TF_DIR_MAX; i++) { + db_cfg.dir = i; + db_cfg.alloc_cnt = parms->resources->em_cnt[i].cnt; + + /* Check if we got any request to support EEM, if so + * we build an EM Int DB holding Table Scopes. + */ + if (db_cfg.alloc_cnt[TF_EM_TBL_TYPE_EM_RECORD] == 0) + continue; + + if (db_cfg.alloc_cnt[TF_EM_TBL_TYPE_EM_RECORD] % + TF_SESSION_EM_ENTRY_SIZE != 0) { + rc = -ENOMEM; + netdev_dbg(tfp->bp->dev, + "%s, EM must be in blocks of %d, rc %d\n", + tf_dir_2_str(i), TF_SESSION_EM_ENTRY_SIZE, + rc); + + return rc; + } + + db_cfg.rm_db = (void *)&em_db->em_db[i]; + if (tf_session_is_shared_session(tfs) && + (!tf_session_is_shared_session_creator(tfs))) + db_rc[i] = tf_rm_create_db_no_reservation(tfp, + &db_cfg); + else + db_rc[i] = tf_rm_create_db(tfp, &db_cfg); + if (db_rc[i]) { + netdev_dbg(tfp->bp->dev, + "%s: EM Int DB creation failed\n", + tf_dir_2_str(i)); + } + } + + /* No db created */ + if (db_rc[TF_DIR_RX] && db_rc[TF_DIR_TX]) { + netdev_dbg(tfp->bp->dev, "EM Int DB creation failed\n"); + return db_rc[TF_DIR_RX]; + } + + if (tf_session_is_shared_session(tfs)) + return 0; + + for (i = 0; i < TF_DIR_MAX; i++) { + iparms.rm_db = em_db->em_db[i]; + iparms.subtype = TF_EM_DB_EM_REC; + iparms.info = &info; + + rc = tf_rm_get_info(&iparms); + if (rc) { + netdev_dbg(tfp->bp->dev, + "%s: EM DB get info failed\n", + tf_dir_2_str(i)); + return rc; + } + /* Allocate stack pool */ + mem_va = vzalloc(sizeof(*mem_va)); + if (!mem_va) { + rc = -ENOMEM; + return rc; + } + + tfs->em_pool[i] = mem_va; + + rc = dpool_init(tfs->em_pool[i], + iparms.info->entry.start, + iparms.info->entry.stride, + 7, + (void *)tfp, + tf_em_move_callback); + /* Logging handled in tf_create_em_pool */ + if (rc) + return rc; + } + + return 0; +} + +int tf_em_int_unbind(struct tf *tfp) +{ + struct tf_rm_free_db_parms fparms = { 0 }; + struct em_rm_db *em_db; + void *em_db_ptr = NULL; + struct tf_session *tfs; + int rc; + int i; + + if (!tfp) + return -EINVAL; + + /* Retrieve the session information */ + rc = tf_session_get_session_internal(tfp, &tfs); + if (rc) + return rc; + + if (!tf_session_is_shared_session(tfs)) { + for (i = 0; i < TF_DIR_MAX; i++) { + if (!tfs->em_pool[i]) + continue; + dpool_free_all(tfs->em_pool[i]); + } + } + + rc = tf_session_get_db(tfp, TF_MODULE_TYPE_EM, &em_db_ptr); + if (rc) { + netdev_dbg(tfp->bp->dev, "Em_db is not initialized\n"); + return 0; + } + em_db = (struct em_rm_db *)em_db_ptr; + + for (i = 0; i < TF_DIR_MAX; i++) { + if (!em_db->em_db[i]) + continue; + fparms.dir = i; + fparms.rm_db = em_db->em_db[i]; + rc = tf_rm_free_db(tfp, &fparms); + if (rc) + return rc; + + em_db->em_db[i] = NULL; + } + + /* Free EM database pointer */ + tf_session_set_db(tfp, TF_MODULE_TYPE_EM, NULL); + vfree(em_db); + + return 0; +} + +int tf_em_get_resc_info(struct tf *tfp, struct tf_em_resource_info *em) +{ + struct tf_rm_get_alloc_info_parms ainfo; + struct tf_resource_info *dinfo; + void *em_db_ptr = NULL; + struct em_rm_db *em_db; + int rc; + int d; + + if (!tfp || !em) + return -EINVAL; + + rc = tf_session_get_db(tfp, TF_MODULE_TYPE_EM, &em_db_ptr); + if (rc == -ENOMEM) + return 0; /* db does not exist */ + else if (rc) + return rc; /* db error */ + + em_db = (struct em_rm_db *)em_db_ptr; + + /* check if reserved resource for EM is multiple of num_slices */ + for (d = 0; d < TF_DIR_MAX; d++) { + ainfo.rm_db = em_db->em_db[d]; + dinfo = em[d].info; + + if (!ainfo.rm_db) + continue; + + ainfo.info = (struct tf_rm_alloc_info *)dinfo; + ainfo.subtype = 0; + rc = tf_rm_get_all_info(&ainfo, TF_EM_TBL_TYPE_MAX); + if (rc && rc != -EOPNOTSUPP) + return rc; + } + + return 0; +} diff --git a/drivers/thirdparty/release-drivers/bnxt/tf_core/tf_ext_flow_handle.h b/drivers/thirdparty/release-drivers/bnxt/tf_core/tf_ext_flow_handle.h new file mode 100644 index 000000000000..44b83386aef9 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/tf_core/tf_ext_flow_handle.h @@ -0,0 +1,185 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019-2021 Broadcom + * All rights reserved. + */ + +#ifndef _TF_EXT_FLOW_HANDLE_H_ +#define _TF_EXT_FLOW_HANDLE_H_ + +#define TF_NUM_KEY_ENTRIES_FLOW_HANDLE_MASK 0x00000000F0000000ULL +#define TF_NUM_KEY_ENTRIES_FLOW_HANDLE_SFT 28 +#define TF_FLOW_TYPE_FLOW_HANDLE_MASK 0x00000000000000F0ULL +#define TF_FLOW_TYPE_FLOW_HANDLE_SFT 4 +#define TF_FLAGS_FLOW_HANDLE_MASK 0x000000000000000FULL +#define TF_FLAGS_FLOW_HANDLE_SFT 0 +#define TF_INDEX_FLOW_HANDLE_MASK 0xFFFFFFF000000000ULL +#define TF_INDEX_FLOW_HANDLE_SFT 36 +#define TF_ENTRY_NUM_FLOW_HANDLE_MASK 0x0000000E00000000ULL +#define TF_ENTRY_NUM_FLOW_HANDLE_SFT 33 +#define TF_HASH_TYPE_FLOW_HANDLE_MASK 0x0000000100000000ULL +#define TF_HASH_TYPE_FLOW_HANDLE_SFT 32 + +#define TF_FLAGS_FLOW_HANDLE_INTERNAL 0x2 +#define TF_FLAGS_FLOW_HANDLE_EXTERNAL 0x0 + +#define TF_FLOW_HANDLE_MASK (TF_NUM_KEY_ENTRIES_FLOW_HANDLE_MASK | \ + TF_FLOW_TYPE_FLOW_HANDLE_MASK | \ + TF_FLAGS_FLOW_HANDLE_MASK | \ + TF_INDEX_FLOW_HANDLE_MASK | \ + TF_ENTRY_NUM_FLOW_HANDLE_MASK | \ + TF_HASH_TYPE_FLOW_HANDLE_MASK) + +#define TF_GET_FIELDS_FROM_FLOW_HANDLE(flow_handle, \ + num_key_entries, \ + flow_type, \ + flags, \ + index, \ + entry_num, \ + hash_type) \ +do { \ + (num_key_entries) = \ + (((flow_handle) & TF_NUM_KEY_ENTRIES_FLOW_HANDLE_MASK) >> \ + TF_NUM_KEY_ENTRIES_FLOW_HANDLE_SFT); \ + (flow_type) = (((flow_handle) & TF_FLOW_TYPE_FLOW_HANDLE_MASK) >> \ + TF_FLOW_TYPE_FLOW_HANDLE_SFT); \ + (flags) = (((flow_handle) & TF_FLAGS_FLOW_HANDLE_MASK) >> \ + TF_FLAGS_FLOW_HANDLE_SFT); \ + (index) = (((flow_handle) & TF_INDEX_FLOW_HANDLE_MASK) >> \ + TF_INDEX_FLOW_HANDLE_SFT); \ + (entry_num) = (((flow_handle) & TF_ENTRY_NUM_FLOW_HANDLE_MASK) >> \ + TF_ENTRY_NUM_FLOW_HANDLE_SFT); \ + (hash_type) = (((flow_handle) & TF_HASH_TYPE_FLOW_HANDLE_MASK) >> \ + TF_HASH_TYPE_FLOW_HANDLE_SFT); \ +} while (0) + +#define TF_SET_FIELDS_IN_FLOW_HANDLE(flow_handle, \ + num_key_entries, \ + flow_type, \ + flags, \ + index, \ + entry_num, \ + hash_type) \ +do { \ + (flow_handle) &= ~TF_FLOW_HANDLE_MASK; \ + (flow_handle) |= \ + (((num_key_entries) << TF_NUM_KEY_ENTRIES_FLOW_HANDLE_SFT) & \ + TF_NUM_KEY_ENTRIES_FLOW_HANDLE_MASK); \ + (flow_handle) |= (((flow_type) << TF_FLOW_TYPE_FLOW_HANDLE_SFT) & \ + TF_FLOW_TYPE_FLOW_HANDLE_MASK); \ + (flow_handle) |= (((flags) << TF_FLAGS_FLOW_HANDLE_SFT) & \ + TF_FLAGS_FLOW_HANDLE_MASK); \ + (flow_handle) |= ((((uint64_t)index) << TF_INDEX_FLOW_HANDLE_SFT) & \ + TF_INDEX_FLOW_HANDLE_MASK); \ + (flow_handle) |= \ + ((((uint64_t)entry_num) << TF_ENTRY_NUM_FLOW_HANDLE_SFT) & \ + TF_ENTRY_NUM_FLOW_HANDLE_MASK); \ + (flow_handle) |= \ + ((((uint64_t)hash_type) << TF_HASH_TYPE_FLOW_HANDLE_SFT) & \ + TF_HASH_TYPE_FLOW_HANDLE_MASK); \ +} while (0) + +#define TF_SET_FIELDS_IN_WH_FLOW_HANDLE TF_SET_FIELDS_IN_FLOW_HANDLE + +#define TF_GET_INDEX_FROM_FLOW_HANDLE(flow_handle, \ + index) \ +{ \ + (index) = (((flow_handle) & TF_INDEX_FLOW_HANDLE_MASK) >> \ + TF_INDEX_FLOW_HANDLE_SFT); \ +} + +#define TF_GET_HASH_TYPE_FROM_FLOW_HANDLE(flow_handle, \ + hash_type) \ +{ \ + (hash_type) = (((flow_handle) & TF_HASH_TYPE_FLOW_HANDLE_MASK) >> \ + TF_HASH_TYPE_FLOW_HANDLE_SFT); \ +} + +#define TF_GET_NUM_KEY_ENTRIES_FROM_FLOW_HANDLE(flow_handle, \ + num_key_entries) \ +{ \ + ((num_key_entries) = \ + (((flow_handle) & TF_NUM_KEY_ENTRIES_FLOW_HANDLE_MASK) >> \ + TF_NUM_KEY_ENTRIES_FLOW_HANDLE_SFT)); \ +} + +#define TF_GET_ENTRY_NUM_FROM_FLOW_HANDLE(flow_handle, \ + entry_num) \ +{ \ + ((entry_num) = \ + (((flow_handle) & TF_ENTRY_NUM_FLOW_HANDLE_MASK) >> \ + TF_ENTRY_NUM_FLOW_HANDLE_SFT)); \ +} + +#define TF_GET_FLAG_FROM_FLOW_HANDLE(flow_handle, flag) \ + (flag = (((flow_handle) & TF_FLAGS_FLOW_HANDLE_MASK) >>\ + TF_FLAGS_FLOW_HANDLE_SFT)) + +/* 32 bit Flow ID handlers */ +#define TF_GFID_FLOW_ID_MASK 0xFFFFFFF0UL +#define TF_GFID_FLOW_ID_SFT 4 +#define TF_FLAG_FLOW_ID_MASK 0x00000002UL +#define TF_FLAG_FLOW_ID_SFT 1 +#define TF_DIR_FLOW_ID_MASK 0x00000001UL +#define TF_DIR_FLOW_ID_SFT 0 + +#define TF_SET_FLOW_ID(flow_id, gfid, flag, dir) \ +{ \ + (flow_id) &= ~(TF_GFID_FLOW_ID_MASK | \ + TF_FLAG_FLOW_ID_MASK | \ + TF_DIR_FLOW_ID_MASK); \ + (flow_id) |= (((gfid) << TF_GFID_FLOW_ID_SFT) & \ + TF_GFID_FLOW_ID_MASK) | \ + (((flag) << TF_FLAG_FLOW_ID_SFT) & \ + TF_FLAG_FLOW_ID_MASK) | \ + (((dir) << TF_DIR_FLOW_ID_SFT) & \ + TF_DIR_FLOW_ID_MASK); \ +} + +#define TF_GET_GFID_FROM_FLOW_ID(flow_id, gfid) \ +{ \ + (gfid) = (((flow_id) & TF_GFID_FLOW_ID_MASK) >> \ + TF_GFID_FLOW_ID_SFT); \ +} + +#define TF_GET_DIR_FROM_FLOW_ID(flow_id, dir) \ +{ \ + (dir) = (((flow_id) & TF_DIR_FLOW_ID_MASK) >> \ + TF_DIR_FLOW_ID_SFT); \ +} + +#define TF_GET_FLAG_FROM_FLOW_ID(flow_id, flag) \ +{ \ + (flag) = (((flow_id) & TF_FLAG_FLOW_ID_MASK) >> \ + TF_FLAG_FLOW_ID_SFT); \ +} + +/* 32 bit GFID handlers */ +#define TF_HASH_INDEX_GFID_MASK 0x07FFFFFFUL +#define TF_HASH_INDEX_GFID_SFT 0 +#define TF_HASH_TYPE_GFID_MASK 0x08000000UL +#define TF_HASH_TYPE_GFID_SFT 27 + +#define TF_GFID_TABLE_INTERNAL 0 +#define TF_GFID_TABLE_EXTERNAL 1 + +#define TF_SET_GFID(gfid, index, type) \ +{ \ + (gfid) = (((index) << TF_HASH_INDEX_GFID_SFT) & \ + TF_HASH_INDEX_GFID_MASK) | \ + (((type) << TF_HASH_TYPE_GFID_SFT) & \ + TF_HASH_TYPE_GFID_MASK); \ +} + +#define TF_GET_HASH_INDEX_FROM_GFID(gfid, index) \ +{ \ + (index) = (((gfid) & TF_HASH_INDEX_GFID_MASK) >> \ + TF_HASH_INDEX_GFID_SFT); \ +} + +#define TF_GET_HASH_TYPE_FROM_GFID(gfid, type) \ +{ \ + (type) = (((gfid) & TF_HASH_TYPE_GFID_MASK) >> \ + TF_HASH_TYPE_GFID_SFT); \ +} + +#endif /* _TF_EXT_FLOW_HANDLE_H_ */ diff --git a/drivers/thirdparty/release-drivers/bnxt/tf_core/tf_global_cfg.c b/drivers/thirdparty/release-drivers/bnxt/tf_core/tf_global_cfg.c new file mode 100644 index 000000000000..20f7894307de --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/tf_core/tf_global_cfg.c @@ -0,0 +1,181 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* Copyright(c) 2019-2021 Broadcom + * All rights reserved. + */ +#include +#include +#include +#include "tf_global_cfg.h" +#include "tf_util.h" +#include "tf_msg.h" +#include "tf_session.h" + +struct tf; + +/* Global cfg database */ +struct tf_global_cfg_db { + struct tf_global_cfg_cfg *global_cfg_db[TF_DIR_MAX]; +}; + +/* Get HCAPI type parameters for a single element + * + * @global_cfg_db: Global Cfg DB Handle + * @db_index: DB Index, indicates which DB entry to perform the + * action on. + * @hcapi_type: Pointer to the hcapi type for the specified db_index + */ +struct tf_global_cfg_get_hcapi_parms { + void *global_cfg_db; + u16 db_index; + u16 *hcapi_type; +}; + +/** + * Check global_cfg_type and return hwrm type. + * + * @global_cfg_type: Global Cfg type + * @hwrm_type: HWRM device data type + * + * Returns: + * 0 - Success + * -EOPNOTSUPP - Type not supported + */ +static int tf_global_cfg_get_hcapi_type(struct tf_global_cfg_get_hcapi_parms + *parms) +{ + struct tf_global_cfg_cfg *global_cfg; + enum tf_global_cfg_cfg_type cfg_type; + + global_cfg = (struct tf_global_cfg_cfg *)parms->global_cfg_db; + cfg_type = global_cfg[parms->db_index].cfg_type; + + if (cfg_type != TF_GLOBAL_CFG_CFG_HCAPI) + return -EOPNOTSUPP; + + *parms->hcapi_type = global_cfg[parms->db_index].hcapi_type; + + return 0; +} + +int tf_global_cfg_bind(struct tf *tfp, struct tf_global_cfg_cfg_parms *parms) +{ + struct tf_global_cfg_db *global_cfg_db; + + if (!tfp || !parms) + return -EINVAL; + + global_cfg_db = vzalloc(sizeof(*global_cfg_db)); + if (!global_cfg_db) + return -ENOMEM; + + global_cfg_db->global_cfg_db[TF_DIR_RX] = parms->cfg; + global_cfg_db->global_cfg_db[TF_DIR_TX] = parms->cfg; + tf_session_set_global_db(tfp, (void *)global_cfg_db); + + netdev_dbg(tfp->bp->dev, "Global Cfg - initialized\n"); + return 0; +} + +int tf_global_cfg_unbind(struct tf *tfp) +{ + struct tf_global_cfg_db *global_cfg_db_ptr; + int rc; + + if (!tfp) + return -EINVAL; + + rc = tf_session_get_global_db(tfp, (void **)&global_cfg_db_ptr); + if (rc) { + netdev_dbg(tfp->bp->dev, "global_cfg_db is not initialized\n"); + return 0; + } + + tf_session_set_global_db(tfp, NULL); + vfree(global_cfg_db_ptr); + + return 0; +} + +int tf_global_cfg_set(struct tf *tfp, struct tf_global_cfg_parms *parms) +{ + struct tf_global_cfg_get_hcapi_parms hparms; + struct tf_global_cfg_db *global_cfg_db_ptr; + u8 fw_session_id; + u16 hcapi_type; + int rc; + + if (!tfp || !parms || !parms->config) + return -EINVAL; + + rc = tf_session_get_fw_session_id(tfp, &fw_session_id); + if (rc) + return rc; + + rc = tf_session_get_global_db(tfp, (void **)&global_cfg_db_ptr); + if (rc) { + netdev_dbg(tfp->bp->dev, "No global cfg DBs initialized\n"); + return 0; + } + + /* Convert TF type to HCAPI type */ + hparms.global_cfg_db = global_cfg_db_ptr->global_cfg_db[parms->dir]; + hparms.db_index = parms->type; + hparms.hcapi_type = &hcapi_type; + rc = tf_global_cfg_get_hcapi_type(&hparms); + if (rc) { + netdev_dbg(tfp->bp->dev, + "%s, Failed type lookup, type:%d, rc:%d\n", + tf_dir_2_str(parms->dir), parms->type, rc); + return rc; + } + + rc = tf_msg_set_global_cfg(tfp, parms, fw_session_id); + if (rc) { + netdev_dbg(tfp->bp->dev, "%s, Set failed, type:%d, rc:%d\n", + tf_dir_2_str(parms->dir), parms->type, -rc); + } + + return 0; +} + +int tf_global_cfg_get(struct tf *tfp, struct tf_global_cfg_parms *parms) +{ + struct tf_global_cfg_get_hcapi_parms hparms; + struct tf_global_cfg_db *global_cfg_db_ptr; + u8 fw_session_id; + u16 hcapi_type; + int rc; + + if (!tfp || !parms || !parms->config) + return -EINVAL; + + rc = tf_session_get_fw_session_id(tfp, &fw_session_id); + if (rc) + return rc; + + rc = tf_session_get_global_db(tfp, (void **)&global_cfg_db_ptr); + if (rc) { + netdev_dbg(tfp->bp->dev, "No Global cfg DBs initialized\n"); + return 0; + } + + hparms.global_cfg_db = global_cfg_db_ptr->global_cfg_db[parms->dir]; + hparms.db_index = parms->type; + hparms.hcapi_type = &hcapi_type; + rc = tf_global_cfg_get_hcapi_type(&hparms); + if (rc) { + netdev_dbg(tfp->bp->dev, + "%s, Failed type lookup, type:%d, rc:%d\n", + tf_dir_2_str(parms->dir), parms->type, rc); + return rc; + } + + /* Get the entry */ + rc = tf_msg_get_global_cfg(tfp, parms, fw_session_id); + if (rc) { + netdev_dbg(tfp->bp->dev, "%s, Get failed, type:%d, rc:%d\n", + tf_dir_2_str(parms->dir), parms->type, rc); + } + + return 0; +} diff --git a/drivers/thirdparty/release-drivers/bnxt/tf_core/tf_global_cfg.h b/drivers/thirdparty/release-drivers/bnxt/tf_core/tf_global_cfg.h new file mode 100644 index 000000000000..00e88148dfee --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/tf_core/tf_global_cfg.h @@ -0,0 +1,104 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019-2021 Broadcom + * All rights reserved. + */ + +#ifndef TF_GLOBAL_CFG_H_ +#define TF_GLOBAL_CFG_H_ + +#include "tf_core.h" + +/* The global cfg module provides processing of global cfg types. */ + +/* struct tf; */ + +/* Internal type not available to user + * but available internally within Truflow + */ +enum tf_global_config_internal_type { + TF_GLOBAL_CFG_INTERNAL_PARIF_2_PF = TF_GLOBAL_CFG_TYPE_MAX, + TF_GLOBAL_CFG_INTERNAL_TYPE_MAX +}; + +/** + * Global cfg configuration enumeration. + */ +enum tf_global_cfg_cfg_type { + TF_GLOBAL_CFG_CFG_NULL, /* No configuration */ + TF_GLOBAL_CFG_CFG_HCAPI, /* HCAPI 'controlled' */ +}; + +/** + * Global cfg configuration structure, used by the Device to configure + * how an individual global cfg type is configured in regard to the HCAPI type. + * + * @cfg_type: Global cfg config controls how the DB for that element + * is processed. + * @hcapi_type: HCAPI Type for the element. Used for TF to HCAPI type + * conversion. + */ +struct tf_global_cfg_cfg { + enum tf_global_cfg_cfg_type cfg_type; + u16 hcapi_type; +}; + +/** + * Global Cfg configuration parameters + * @num_elements: Number of table types in the configuration array + * @cfg: Table Type element configuration array + */ +struct tf_global_cfg_cfg_parms { + u16 num_elements; + struct tf_global_cfg_cfg *cfg; +}; + +/** + * Initializes the Global Cfg module with the requested DBs. Must be + * invoked as the first thing before any of the access functions. + * + * @tfp: Pointer to TF handle + * @parms: Pointer to Global Cfg configuration parameters + * + * Returns + * - (0) if successful. + * - (-ENOMEM) on failure. + */ +int tf_global_cfg_bind(struct tf *tfp, struct tf_global_cfg_cfg_parms *parms); + +/** + * Cleans up the private DBs and releases all the data. + * + * @tfp: Pointer to TF handle + * @parms: Pointer to Global Cfg configuration parameters + * + * Returns + * - (0) if successful. + * - (-EINVAL) on failure. + */ +int tf_global_cfg_unbind(struct tf *tfp); + +/** + * Updates the global configuration table + * + * @tfp: Pointer to TF handle, used for HCAPI communication + * @parms: Pointer to global cfg parameters + * + * Returns + * - (0) if successful. + * - (-EINVAL) on failure. + */ +int tf_global_cfg_set(struct tf *tfp, struct tf_global_cfg_parms *parms); + +/** + * Get global configuration + * + * @tfp: Pointer to TF handle, used for HCAPI communication + * @parms: Pointer to global cfg parameters + * + * Returns + * - (0) if successful. + * - (-EINVAL) on failure. + */ +int tf_global_cfg_get(struct tf *tfp, struct tf_global_cfg_parms *parms); + +#endif /* TF_GLOBAL_CFG_H */ diff --git a/drivers/thirdparty/release-drivers/bnxt/tf_core/tf_identifier.c b/drivers/thirdparty/release-drivers/bnxt/tf_core/tf_identifier.c new file mode 100644 index 000000000000..ee39cf2924c6 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/tf_core/tf_identifier.c @@ -0,0 +1,240 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* Copyright(c) 2019-2021 Broadcom + * All rights reserved. + */ + +#include +#include +#include +#include "tf_identifier.h" +#include "tf_rm.h" +#include "tf_util.h" +#include "tf_session.h" +#include "bnxt_compat.h" +#include "bnxt_hsi.h" +#include "bnxt.h" + +struct tf; + +int tf_ident_bind(struct tf *tfp, struct tf_ident_cfg_parms *parms) +{ + struct tf_rm_create_db_parms db_cfg = { 0 }; + int db_rc[TF_DIR_MAX] = { 0 }; + struct ident_rm_db *ident_db; + struct tf_session *tfs; + int rc; + int i; + + if (!tfp || !parms) + return -EINVAL; + + /* Retrieve the session information */ + rc = tf_session_get_session_internal(tfp, &tfs); + if (rc) + return rc; + + ident_db = vzalloc(sizeof(*ident_db)); + if (!ident_db) + return -ENOMEM; + + for (i = 0; i < TF_DIR_MAX; i++) + ident_db->ident_db[i] = NULL; + tf_session_set_db(tfp, TF_MODULE_TYPE_IDENTIFIER, ident_db); + + db_cfg.module = TF_MODULE_TYPE_IDENTIFIER; + db_cfg.num_elements = parms->num_elements; + db_cfg.cfg = parms->cfg; + + for (i = 0; i < TF_DIR_MAX; i++) { + db_cfg.rm_db = (void *)&ident_db->ident_db[i]; + db_cfg.dir = i; + db_cfg.alloc_cnt = parms->resources->ident_cnt[i].cnt; + if (tf_session_is_shared_session(tfs) && + (!tf_session_is_shared_session_creator(tfs))) + db_rc[i] = tf_rm_create_db_no_reservation(tfp, &db_cfg); + else + db_rc[i] = tf_rm_create_db(tfp, &db_cfg); + + if (db_rc[i]) + netdev_dbg(tfp->bp->dev, + "%s: No Identifier DB required\n", + tf_dir_2_str(i)); + } + + /* No db created */ + if (db_rc[TF_DIR_RX] && db_rc[TF_DIR_TX]) { + netdev_dbg(tfp->bp->dev, "No Identifier DB created\n"); + return db_rc[TF_DIR_RX]; + } + + netdev_dbg(tfp->bp->dev, "Identifier - initialized\n"); + + return 0; +} + +int tf_ident_unbind(struct tf *tfp) +{ + struct tf_rm_free_db_parms fparms = { 0 }; + struct ident_rm_db *ident_db; + void *ident_db_ptr = NULL; + int rc = 0; + int i; + + if (!tfp) + return -EINVAL; + + rc = tf_session_get_db(tfp, TF_MODULE_TYPE_IDENTIFIER, &ident_db_ptr); + if (rc) { + netdev_dbg(tfp->bp->dev, "Ident_db is not initialized\n"); + return 0; + } + ident_db = (struct ident_rm_db *)ident_db_ptr; + + for (i = 0; i < TF_DIR_MAX; i++) { + if (!ident_db->ident_db[i]) + continue; + fparms.rm_db = ident_db->ident_db[i]; + fparms.dir = i; + rc = tf_rm_free_db(tfp, &fparms); + if (rc) { + netdev_dbg(tfp->bp->dev, + "rm free failed on unbind\n"); + } + + ident_db->ident_db[i] = NULL; + } + tf_session_set_db(tfp, TF_MODULE_TYPE_IDENTIFIER, NULL); + vfree(ident_db); + return 0; +} + +int tf_ident_alloc(struct tf *tfp, struct tf_ident_alloc_parms *parms) +{ + struct tf_rm_allocate_parms aparms = { 0 }; + struct ident_rm_db *ident_db; + void *ident_db_ptr = NULL; + u32 base_id; + u32 id; + int rc; + + if (!tfp || !parms) + return -EINVAL; + + rc = tf_session_get_db(tfp, TF_MODULE_TYPE_IDENTIFIER, &ident_db_ptr); + if (rc) { + netdev_dbg(tfp->bp->dev, + "Failed to get ident_db from session, rc:%d\n", + rc); + return rc; + } + ident_db = (struct ident_rm_db *)ident_db_ptr; + + aparms.rm_db = ident_db->ident_db[parms->dir]; + aparms.subtype = parms->type; + aparms.index = &id; + aparms.base_index = &base_id; + rc = tf_rm_allocate(&aparms); + if (rc) { + netdev_dbg(tfp->bp->dev, "%s: Failed allocate, type:%d\n", + tf_dir_2_str(parms->dir), parms->type); + return rc; + } + + *parms->id = id; + + return 0; +} + +int tf_ident_free(struct tf *tfp, struct tf_ident_free_parms *parms) +{ + struct tf_rm_is_allocated_parms aparms = { 0 }; + struct tf_rm_free_parms fparms = { 0 }; + struct ident_rm_db *ident_db; + void *ident_db_ptr = NULL; + int allocated = 0; + u32 base_id; + int rc; + + if (!tfp || !parms) + return -EINVAL; + + rc = tf_session_get_db(tfp, TF_MODULE_TYPE_IDENTIFIER, &ident_db_ptr); + if (rc) { + netdev_dbg(tfp->bp->dev, + "Failed to get ident_db from session, rc:%d\n", + rc); + return rc; + } + ident_db = (struct ident_rm_db *)ident_db_ptr; + + /* Check if element is in use */ + aparms.rm_db = ident_db->ident_db[parms->dir]; + aparms.subtype = parms->type; + aparms.index = parms->id; + aparms.base_index = &base_id; + aparms.allocated = &allocated; + rc = tf_rm_is_allocated(&aparms); + if (rc) + return rc; + + if (allocated != TF_RM_ALLOCATED_ENTRY_IN_USE) { + netdev_dbg(tfp->bp->dev, + "%s: Entry already free, type:%d, index:%d\n", + tf_dir_2_str(parms->dir), parms->type, parms->id); + return -EINVAL; + } + + /* Free requested element */ + fparms.rm_db = ident_db->ident_db[parms->dir]; + fparms.subtype = parms->type; + fparms.index = parms->id; + rc = tf_rm_free(&fparms); + if (rc) { + netdev_dbg(tfp->bp->dev, + "%s: Free failed, type:%d, index:%d\n", + tf_dir_2_str(parms->dir), parms->type, parms->id); + return rc; + } + + return 0; +} + +int tf_ident_get_resc_info(struct tf *tfp, + struct tf_identifier_resource_info *ident) +{ + struct tf_rm_get_alloc_info_parms ainfo; + struct tf_resource_info *dinfo; + struct ident_rm_db *ident_db; + void *ident_db_ptr = NULL; + int rc; + int d; + + if (!tfp || !ident) + return -EINVAL; + + rc = tf_session_get_db(tfp, TF_MODULE_TYPE_IDENTIFIER, &ident_db_ptr); + if (rc == -ENOMEM) + return 0; /* db doesn't exist */ + else if (rc) + return rc; /* error getting db */ + + ident_db = (struct ident_rm_db *)ident_db_ptr; + + /* check if reserved resource for WC is multiple of num_slices */ + for (d = 0; d < TF_DIR_MAX; d++) { + ainfo.rm_db = ident_db->ident_db[d]; + + if (!ainfo.rm_db) + continue; + + dinfo = ident[d].info; + + ainfo.info = (struct tf_rm_alloc_info *)dinfo; + ainfo.subtype = 0; + rc = tf_rm_get_all_info(&ainfo, TF_IDENT_TYPE_MAX); + if (rc) + return rc; + } + + return 0; +} diff --git a/drivers/thirdparty/release-drivers/bnxt/tf_core/tf_identifier.h b/drivers/thirdparty/release-drivers/bnxt/tf_core/tf_identifier.h new file mode 100644 index 000000000000..90a1a8f32543 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/tf_core/tf_identifier.h @@ -0,0 +1,140 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019-2021 Broadcom + * All rights reserved. + */ + +#ifndef _TF_IDENTIFIER_H_ +#define _TF_IDENTIFIER_H_ + +#include "tf_core.h" + +/* The Identifier module provides processing of Identifiers. */ + +/** + * Identifer config params. + * + * @num_elements: Number of identifier types in each of the + * configuration arrays + * @cfg: Identifier configuration array + * @shadow_copy: Boolean controlling the request shadow copy. + * @resources: Session resource allocations + */ +struct tf_ident_cfg_parms { + u16 num_elements; + struct tf_rm_element_cfg *cfg; + struct tf_session_resources *resources; +}; + +/** + * Identifier allocation parameter definition + * + * @dir: receive or transmit direction + * @type: Identifier type + * @id: Identifier allocated + */ +struct tf_ident_alloc_parms { + enum tf_dir dir; + enum tf_identifier_type type; + u16 *id; +}; + +/** + * Identifier free parameter definition + * + * @dir: receive or transmit direction + * @type: Identifier type + * @id: ID to free + * @ref_cnt: (experimental)Current refcnt after free + */ +struct tf_ident_free_parms { + enum tf_dir dir; + enum tf_identifier_type type; + u16 id; + u32 *ref_cnt; +}; + +/** + * Identifier search parameter definition + * + * @dir: receive or transmit direction + * @type: Identifier type + * @search_id: Identifier data to search for + * @hit: Set if matching identifier found + * @ref_cnt: Current ref count after allocation + */ +struct tf_ident_search_parms { + enum tf_dir dir; + enum tf_identifier_type type; + u16 search_id; + bool *hit; + u32 *ref_cnt; +}; + +/* Identifier RM database */ +struct ident_rm_db { + struct rm_db *ident_db[TF_DIR_MAX]; +}; + +/** + * Initializes the Identifier module with the requested DBs. Must be + * invoked as the first thing before any of the access functions. + * + * @tfp: Pointer to TF handle, used for HCAPI communication + * @parms: Pointer to parameters + * + * Returns + * - (0) if successful. + * - (-EINVAL) on failure. + */ +int tf_ident_bind(struct tf *tfp, struct tf_ident_cfg_parms *parms); + +/** + * Cleans up the private DBs and releases all the data. + * + * @tfp: Pointer to TF handle, used for HCAPI communication + * @parms: Pointer to parameters + * + * Returns + * - (0) if successful. + * - (-EINVAL) on failure. + */ +int tf_ident_unbind(struct tf *tfp); + +/** + * Allocates a single identifier type. + * + * @tfp: Pointer to TF handle, used for HCAPI communication + * @parms: Pointer to parameters + * + * Returns + * - (0) if successful. + * - (-EINVAL) on failure. + */ +int tf_ident_alloc(struct tf *tfp, struct tf_ident_alloc_parms *parms); + +/** + * Free's a single identifier type. + * + * @tfp: Pointer to TF handle, used for HCAPI communication + * @parms: Pointer to parameters + * + * Returns + * - (0) if successful. + * - (-EINVAL) on failure. + */ +int tf_ident_free(struct tf *tfp, struct tf_ident_free_parms *parms); + +/** + * Retrieves the allocated resource info + * + * @tfp: Pointer to TF handle, used for HCAPI communication + * @parms: Pointer to parameters + * + * Returns + * - (0) if successful. + * - (-EINVAL) on failure. + */ +int tf_ident_get_resc_info(struct tf *tfp, + struct tf_identifier_resource_info *parms); + +#endif /* _TF_IDENTIFIER_H_ */ diff --git a/drivers/thirdparty/release-drivers/bnxt/tf_core/tf_if_tbl.c b/drivers/thirdparty/release-drivers/bnxt/tf_core/tf_if_tbl.c new file mode 100644 index 000000000000..298218758f01 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/tf_core/tf_if_tbl.c @@ -0,0 +1,158 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* Copyright(c) 2019-2021 Broadcom + * All rights reserved. + */ +#include +#include +#include +#include "tf_if_tbl.h" +#include "tf_rm.h" +#include "tf_util.h" +#include "tf_msg.h" +#include "tf_session.h" + +struct tf; + +/* IF Table database */ +struct tf_if_tbl_db { + struct tf_if_tbl_cfg *if_tbl_cfg_db[TF_DIR_MAX]; +}; + +/** + * Convert if_tbl_type to hwrm type. + * + * @if_tbl_type: Interface table type + * @hwrm_type: HWRM device data type + * + * Returns: + * 0 - Success + * -EOPNOTSUPP - Type not supported + */ +static int tf_if_tbl_get_hcapi_type(struct tf_if_tbl_get_hcapi_parms *parms) +{ + enum tf_if_tbl_cfg_type cfg_type; + struct tf_if_tbl_cfg *tbl_cfg; + + tbl_cfg = (struct tf_if_tbl_cfg *)parms->tbl_db; + cfg_type = tbl_cfg[parms->db_index].cfg_type; + + if (cfg_type != TF_IF_TBL_CFG) + return -EOPNOTSUPP; + + *parms->hcapi_type = tbl_cfg[parms->db_index].hcapi_type; + + return 0; +} + +int tf_if_tbl_bind(struct tf *tfp, struct tf_if_tbl_cfg_parms *parms) +{ + struct tf_if_tbl_db *if_tbl_db; + + if (!tfp || !parms) + return -EINVAL; + + if_tbl_db = vzalloc(sizeof(*if_tbl_db)); + if (!if_tbl_db) + return -ENOMEM; + + if_tbl_db->if_tbl_cfg_db[TF_DIR_RX] = parms->cfg; + if_tbl_db->if_tbl_cfg_db[TF_DIR_TX] = parms->cfg; + tf_session_set_if_tbl_db(tfp, (void *)if_tbl_db); + + netdev_dbg(tfp->bp->dev, "Table Type - initialized\n"); + return 0; +} + +int tf_if_tbl_unbind(struct tf *tfp) +{ + struct tf_if_tbl_db *if_tbl_db_ptr; + int rc; + + rc = tf_session_get_if_tbl_db(tfp, (void **)&if_tbl_db_ptr); + if (rc) { + netdev_dbg(tfp->bp->dev, "No IF Table DBs initialized\n"); + return 0; + } + + tf_session_set_if_tbl_db(tfp, NULL); + vfree(if_tbl_db_ptr); + + return 0; +} + +int tf_if_tbl_set(struct tf *tfp, struct tf_if_tbl_set_parms *parms) +{ + struct tf_if_tbl_get_hcapi_parms hparms; + struct tf_if_tbl_db *if_tbl_db_ptr; + u8 fw_session_id; + int rc; + + if (!tfp || !parms || !parms->data) + return -EINVAL; + + rc = tf_session_get_fw_session_id(tfp, &fw_session_id); + if (rc) + return rc; + + rc = tf_session_get_if_tbl_db(tfp, (void **)&if_tbl_db_ptr); + if (rc) { + netdev_dbg(tfp->bp->dev, "No IF Table DBs initialized\n"); + return 0; + } + + /* Convert TF type to HCAPI type */ + hparms.tbl_db = if_tbl_db_ptr->if_tbl_cfg_db[parms->dir]; + hparms.db_index = parms->type; + hparms.hcapi_type = &parms->hcapi_type; + rc = tf_if_tbl_get_hcapi_type(&hparms); + if (rc) + return rc; + + rc = tf_msg_set_if_tbl_entry(tfp, parms, fw_session_id); + if (rc) { + netdev_dbg(tfp->bp->dev, + "%s, If Tbl set failed, type:%d, rc:%d\n", + tf_dir_2_str(parms->dir), parms->type, rc); + } + + return 0; +} + +int tf_if_tbl_get(struct tf *tfp, struct tf_if_tbl_get_parms *parms) +{ + struct tf_if_tbl_get_hcapi_parms hparms; + struct tf_if_tbl_db *if_tbl_db_ptr; + u8 fw_session_id; + int rc = 0; + + if (!tfp || !parms || !parms->data) + return -EINVAL; + + rc = tf_session_get_fw_session_id(tfp, &fw_session_id); + if (rc) + return rc; + + rc = tf_session_get_if_tbl_db(tfp, (void **)&if_tbl_db_ptr); + if (rc) { + netdev_dbg(tfp->bp->dev, "No IF Table DBs initialized\n"); + return 0; + } + + /* Convert TF type to HCAPI type */ + hparms.tbl_db = if_tbl_db_ptr->if_tbl_cfg_db[parms->dir]; + hparms.db_index = parms->type; + hparms.hcapi_type = &parms->hcapi_type; + rc = tf_if_tbl_get_hcapi_type(&hparms); + if (rc) + return rc; + + /* Get the entry */ + rc = tf_msg_get_if_tbl_entry(tfp, parms, fw_session_id); + if (rc) { + netdev_dbg(tfp->bp->dev, + "%s, If Tbl get failed, type:%d, rc:%d\n", + tf_dir_2_str(parms->dir), parms->type, -rc); + } + + return 0; +} diff --git a/drivers/thirdparty/release-drivers/bnxt/tf_core/tf_if_tbl.h b/drivers/thirdparty/release-drivers/bnxt/tf_core/tf_if_tbl.h new file mode 100644 index 000000000000..aeafddb85d73 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/tf_core/tf_if_tbl.h @@ -0,0 +1,159 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019-2021 Broadcom + * All rights reserved. + */ + +#ifndef TF_IF_TBL_TYPE_H_ +#define TF_IF_TBL_TYPE_H_ + +#include "tf_core.h" + +#define CFA_IF_TBL_TYPE_INVALID 65535 /* Invalid CFA types */ + +struct tf; + +/* The IF Table module provides processing of Internal TF + * interface table types. + */ + +/* IF table configuration enumeration. */ +enum tf_if_tbl_cfg_type { + TF_IF_TBL_CFG_NULL, /* No configuration */ + TF_IF_TBL_CFG, /* HCAPI 'controlled' */ +}; + +/** + * IF table configuration structure, used by the Device to configure + * how an individual TF type is configured in regard to the HCAPI type. + * + * @cfg_type: IF table config controls how the DB for that element is + * processed. + * @hcapi_type: HCAPI Type for the element. Used for TF to HCAPI type + * conversion. + */ +struct tf_if_tbl_cfg { + enum tf_if_tbl_cfg_type cfg_type; + u16 hcapi_type; +}; + +/** + * Get HCAPI type parameters for a single element + * + * @tbl_db: IF Tbl DB Handle + * @db_index: DB Index, indicates which DB entry to perform the + * action on. + * @hcapi_type: Pointer to the hcapi type for the specified db_index + */ +struct tf_if_tbl_get_hcapi_parms { + void *tbl_db; + u16 db_index; + u16 *hcapi_type; +}; + +/** + * Table configuration parameters + * + * @num_elements: Number of table types in each of the configuration arrays + * @cfg: Table Type element configuration array + * @shadow_cfg: Shadow table type configuration array + * @shadow_copy: Boolean controlling the request shadow copy. + */ +struct tf_if_tbl_cfg_parms { + u16 num_elements; + struct tf_if_tbl_cfg *cfg; + struct tf_shadow_if_tbl_cfg *shadow_cfg; + bool shadow_copy; +}; + +/** + * IF Table set parameters + * + * @dir: Receive or transmit direction + * @type: Type of object to set + * @hcapi_type: Type of HCAPI + * @data: Entry data + * @data_sz_in_bytes: Entry size + * @idx: Entry index to write to + */ +struct tf_if_tbl_set_parms { + enum tf_dir dir; + enum tf_if_tbl_type type; + u16 hcapi_type; + u8 *data; + u16 data_sz_in_bytes; + u32 idx; +}; + +/** + * IF Table get parameters + * + * @dir: Receive or transmit direction + * @type: Type of object to get + * @hcapi_type: Type of HCAPI + * @data: Entry data + * @data_sz_in_bytes: Entry size + * @idx: Entry index to read + */ +struct tf_if_tbl_get_parms { + enum tf_dir dir; + enum tf_if_tbl_type type; + u16 hcapi_type; + u8 *data; + u16 data_sz_in_bytes; + u32 idx; +}; + +/** + * Initializes the Table module with the requested DBs. Must be + * invoked as the first thing before any of the access functions. + * + * @tfp: Pointer to TF handle, used for HCAPI communication + * @parms: Pointer to Table configuration parameters + * + * Returns + * - (0) if successful. + * - (-EINVAL) on failure. + */ +int tf_if_tbl_bind(struct tf *tfp, struct tf_if_tbl_cfg_parms *parms); + +/** + * Cleans up the private DBs and releases all the data. + * + * @tfp: Pointer to TF handle, used for HCAPI communication + * @parms: Pointer to parameters + * + * Returns + * - (0) if successful. + * - (-EINVAL) on failure. + */ +int tf_if_tbl_unbind(struct tf *tfp); + +/** + * Configures the requested element by sending a firmware request which + * then installs it into the device internal structures. + * + * @tfp: Pointer to TF handle, used for HCAPI communication + * @parms: Pointer to Interface Table set parameters + * + * Returns + * - (0) if successful. + * - (-EINVAL) on failure. + */ +int tf_if_tbl_set(struct tf *tfp, + struct tf_if_tbl_set_parms *parms); + +/** + * Retrieves the requested element by sending a firmware request to get + * the element. + * + * @tfp: Pointer to TF handle, used for HCAPI communication + * @parms: Pointer to Interface Table get parameters + * + * Returns + * - (0) if successful. + * - (-EINVAL) on failure. + */ +int tf_if_tbl_get(struct tf *tfp, + struct tf_if_tbl_get_parms *parms); + +#endif /* TF_IF_TBL_TYPE_H */ diff --git a/drivers/thirdparty/release-drivers/bnxt/tf_core/tf_msg.c b/drivers/thirdparty/release-drivers/bnxt/tf_core/tf_msg.c new file mode 100644 index 000000000000..9ebac23f15e8 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/tf_core/tf_msg.c @@ -0,0 +1,1218 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* Copyright(c) 2019-2021 Broadcom + * All rights reserved. + */ +#include +#include "tf_msg.h" +#include "tf_util.h" +#include "bnxt_hwrm.h" + +#define HWRM_TF_SESSION_OPEN_OUTPUT_FLAGS_SHARED_SESSION_CREATOR \ + TF_SESSION_OPEN_RESP_FLAGS_SHARED_SESSION_CREATOR + +#define HWRM_TF_SESSION_RESC_QCAPS_OUTPUT_FLAGS_SESS_RESV_STRATEGY_MASK \ + TF_SESSION_RESC_QCAPS_RESP_FLAGS_SESS_RESV_STRATEGY_MASK + +#define HWRM_TF_EM_INSERT_INPUT_FLAGS_DIR_TX \ + TF_EM_INSERT_REQ_FLAGS_DIR_TX + +#define HWRM_TF_EM_INSERT_INPUT_FLAGS_DIR_RX \ + TF_EM_INSERT_REQ_FLAGS_DIR_RX + +#define HWRM_TF_EM_DELETE_INPUT_FLAGS_DIR_TX \ + TF_EM_DELETE_REQ_FLAGS_DIR_TX + +#define HWRM_TF_EM_DELETE_INPUT_FLAGS_DIR_RX \ + TF_EM_DELETE_REQ_FLAGS_DIR_RX + +#define HWRM_TF_TCAM_SET_INPUT_FLAGS_DIR_TX \ + TF_TCAM_SET_REQ_FLAGS_DIR_TX + +#define HWRM_TF_TCAM_SET_INPUT_FLAGS_DMA \ + TF_TCAM_SET_REQ_FLAGS_DMA + +#define HWRM_TF_TCAM_GET_INPUT_FLAGS_DIR_TX \ + TF_TCAM_GET_REQ_FLAGS_DIR_TX + +#define HWRM_TF_TCAM_FREE_INPUT_FLAGS_DIR_TX \ + TF_TCAM_FREE_REQ_FLAGS_DIR_TX + +#define HWRM_TF_GLOBAL_CFG_GET_INPUT_FLAGS_DIR_TX \ + TF_GLOBAL_CFG_GET_REQ_FLAGS_DIR_TX + +#define HWRM_TF_GLOBAL_CFG_GET_INPUT_FLAGS_DIR_RX \ + TF_GLOBAL_CFG_GET_REQ_FLAGS_DIR_RX + +#define HWRM_TF_GLOBAL_CFG_SET_INPUT_FLAGS_DIR_TX \ + TF_GLOBAL_CFG_SET_REQ_FLAGS_DIR_TX + +#define HWRM_TF_GLOBAL_CFG_SET_INPUT_FLAGS_DIR_RX \ + TF_GLOBAL_CFG_SET_REQ_FLAGS_DIR_RX +#define HWRM_TF_IF_TBL_GET_INPUT_FLAGS_DIR_TX \ + TF_IF_TBL_GET_REQ_FLAGS_DIR_TX + +#define HWRM_TF_IF_TBL_GET_INPUT_FLAGS_DIR_RX \ + TF_IF_TBL_GET_REQ_FLAGS_DIR_RX + +#define HWRM_TF_IF_TBL_SET_INPUT_FLAGS_DIR_TX \ + TF_IF_TBL_SET_REQ_FLAGS_DIR_TX + +#define HWRM_TF_IF_TBL_SET_INPUT_FLAGS_DIR_RX \ + TF_IF_TBL_SET_REQ_FLAGS_DIR_RX + +/* Specific msg size defines as we cannot use defines in tf.yaml. This + * means we have to manually sync hwrm with these defines if the + * tf.yaml changes. + */ +#define TF_MSG_SET_GLOBAL_CFG_DATA_SIZE 8 +#define TF_MSG_EM_INSERT_KEY_SIZE 64 +#define TF_MSG_EM_INSERT_RECORD_SIZE 96 +#define TF_MSG_TBL_TYPE_SET_DATA_SIZE 88 + +/* Compile check - Catch any msg changes that we depend on, like the + * defines listed above for array size checking. + * + * Checking array size is dangerous in that the type could change and + * we wouldn't be able to catch it. Thus we check if the complete msg + * changed instead. Best we can do. + * + * If failure is observed then both msg size (defines below) and the + * array size (define above) should be checked and compared. + */ +#define TF_MSG_SIZE_HWRM_TF_GLOBAL_CFG_SET 56 +#define TF_MSG_SIZE_HWRM_TF_EM_INSERT 104 +#define TF_MSG_SIZE_HWRM_TF_TBL_TYPE_SET 128 + +/* This is the MAX data we can transport across regular HWRM */ +#define TF_PCI_BUF_SIZE_MAX 88 + +/* This is the length of shared session name "tf_share" */ +#define TF_SHARED_SESSION_NAME_LEN 9 + +/* Max uint8_t value */ +#define UINT8_MAX 0xff + +/* If data bigger than TF_PCI_BUF_SIZE_MAX then use DMA method */ +struct tf_msg_dma_buf { + void *va_addr; + dma_addr_t pa_addr; +}; + +int tf_msg_session_open(struct bnxt *bp, char *ctrl_chan_name, + u8 *fw_session_id, u8 *fw_session_client_id, + bool *shared_session_creator) +{ + struct hwrm_tf_session_open_output *resp; + struct hwrm_tf_session_open_input *req; + char *session_name; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_TF_SESSION_OPEN); + if (rc) + return rc; + + session_name = strstr(ctrl_chan_name, "tf_shared"); + if (session_name) + memcpy(&req->session_name, session_name, + TF_SHARED_SESSION_NAME_LEN); + else + memcpy(&req->session_name, ctrl_chan_name, + TF_SESSION_NAME_MAX); + + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); + if (rc) { + hwrm_req_drop(bp, req); + return rc; + } + + if ((le32_to_cpu(resp->fw_session_id) > UINT8_MAX) || + (le32_to_cpu(resp->fw_session_client_id) > UINT8_MAX)) { + hwrm_req_drop(bp, req); + return -EINVAL; + } + *fw_session_id = (u8)le32_to_cpu(resp->fw_session_id); + *fw_session_client_id = + (u8)le32_to_cpu(resp->fw_session_client_id); + *shared_session_creator = + (bool)le32_to_cpu(resp->flags & + TF_SESSION_OPEN_RESP_FLAGS_SHARED_SESSION_CREATOR); + + netdev_dbg(bp->dev, + "fw_session_id: 0x%x, fw_session_client_id: 0x%x\n", + *fw_session_id, *fw_session_client_id); + + hwrm_req_drop(bp, req); + return rc; +} + +int tf_msg_session_client_register(struct tf *tfp, char *ctrl_channel_name, + u8 fw_session_id, u8 *fw_session_client_id) +{ + struct hwrm_tf_session_register_output *resp; + struct hwrm_tf_session_register_input *req; + struct bnxt *bp = tfp->bp; + char *session_name; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_TF_SESSION_REGISTER); + if (rc) + return rc; + + /* Populate the request */ + req->fw_session_id = cpu_to_le32(fw_session_id); + + session_name = strstr(ctrl_channel_name, "tf_shared"); + if (session_name) + memcpy(&req->session_client_name, session_name, + TF_SHARED_SESSION_NAME_LEN); + else + memcpy(&req->session_client_name, ctrl_channel_name, + TF_SESSION_NAME_MAX); + + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); + if (rc) { + hwrm_req_drop(bp, req); + return rc; + } + + if (le32_to_cpu(resp->fw_session_client_id) > UINT8_MAX) { + hwrm_req_drop(bp, req); + return -EINVAL; + } + *fw_session_client_id = + (u8)le32_to_cpu(resp->fw_session_client_id); + + hwrm_req_drop(bp, req); + return rc; +} + +int tf_msg_session_client_unregister(struct tf *tfp, u8 fw_session_id, + u8 fw_session_client_id) +{ + struct hwrm_tf_session_unregister_input *req; + struct bnxt *bp = tfp->bp; + int rc; + + /* Populate the request */ + rc = hwrm_req_init(bp, req, HWRM_TF_SESSION_UNREGISTER); + if (rc) + return rc; + + req->fw_session_id = cpu_to_le32(fw_session_id); + req->fw_session_client_id = cpu_to_le32(fw_session_client_id); + + rc = hwrm_req_send(bp, req); + return rc; +} + +int tf_msg_session_close(struct tf *tfp, u8 fw_session_id) +{ + struct hwrm_tf_session_close_input *req; + int rc; + + rc = hwrm_req_init(tfp->bp, req, HWRM_TF_SESSION_CLOSE); + if (rc) + return rc; + + /* Populate the request */ + req->fw_session_id = cpu_to_le32(fw_session_id); + + rc = hwrm_req_send(tfp->bp, req); + return rc; +} + +int tf_msg_session_qcfg(struct tf *tfp, u8 fw_session_id) +{ + struct hwrm_tf_session_qcfg_input *req; + int rc; + + rc = hwrm_req_init(tfp->bp, req, HWRM_TF_SESSION_QCFG); + if (rc) + return rc; + + /* Populate the request */ + req->fw_session_id = cpu_to_le32(fw_session_id); + + rc = hwrm_req_send(tfp->bp, req); + return rc; +} + +int tf_msg_session_resc_qcaps(struct tf *tfp, enum tf_dir dir, u16 size, + struct tf_rm_resc_req_entry *query, + enum tf_rm_resc_resv_strategy *resv_strategy, + u8 *sram_profile) +{ + struct hwrm_tf_session_resc_qcaps_output *resp; + struct hwrm_tf_session_resc_qcaps_input *req; + struct tf_msg_dma_buf qcaps_buf = { 0 }; + struct tf_rm_resc_req_entry *data; + struct bnxt *bp; + int dma_size; + int rc; + int i; + + if (!tfp || !query || !resv_strategy) + return -EINVAL; + + bp = tfp->bp; + + rc = hwrm_req_init(bp, req, HWRM_TF_SESSION_RESC_QCAPS); + if (rc) + return rc; + + resp = hwrm_req_hold(bp, req); + hwrm_req_alloc_flags(bp, req, GFP_KERNEL | __GFP_ZERO); + + /* Prepare DMA buffer */ + dma_size = size * sizeof(struct tf_rm_resc_req_entry); + qcaps_buf.va_addr = dma_alloc_coherent(&bp->pdev->dev, dma_size, + &qcaps_buf.pa_addr, GFP_KERNEL); + if (!qcaps_buf.va_addr) { + rc = -ENOMEM; + goto cleanup; + } + + /* Populate the request */ + req->fw_session_id = 0; + req->flags = cpu_to_le16(dir); + req->qcaps_size = cpu_to_le16(size); + req->qcaps_addr = cpu_to_le64(qcaps_buf.pa_addr); + + rc = hwrm_req_send(bp, req); + if (rc) + goto cleanup; + + /* Process the response + * Should always get expected number of entries + */ + if (le32_to_cpu(resp->size) != size) { + netdev_warn(bp->dev, + "%s: QCAPS message size error:%d req %d resp %d\n", + tf_dir_2_str(dir), EINVAL, size, resp->size); + } + + netdev_dbg(bp->dev, "QCAPS Count: %d\n", le32_to_cpu(resp->size)); + netdev_dbg(bp->dev, "\nQCAPS Dir:%s\n", tf_dir_2_str(dir)); + + /* Post process the response */ + data = (struct tf_rm_resc_req_entry *)qcaps_buf.va_addr; + for (i = 0; i < size; i++) { + query[i].type = le32_to_cpu(data[i].type); + query[i].min = le16_to_cpu(data[i].min); + query[i].max = le16_to_cpu(data[i].max); + } + + *resv_strategy = resp->flags & + HWRM_TF_SESSION_RESC_QCAPS_OUTPUT_FLAGS_SESS_RESV_STRATEGY_MASK; + + if (sram_profile) + *sram_profile = resp->sram_profile; + +cleanup: + if (qcaps_buf.va_addr) + dma_free_coherent(&bp->pdev->dev, dma_size, + qcaps_buf.va_addr, qcaps_buf.pa_addr); + hwrm_req_drop(bp, req); + + if (!rc) { + netdev_dbg(bp->dev, "%s: dir:%s Success\n", __func__, + tf_dir_2_str(dir)); + } else { + netdev_dbg(bp->dev, "%s: dir:%s Failure\n", __func__, + tf_dir_2_str(dir)); + } + return rc; +} + +int tf_msg_session_resc_alloc(struct tf *tfp, enum tf_dir dir, u16 size, + struct tf_rm_resc_req_entry *request, + u8 fw_session_id, struct tf_rm_resc_entry *resv) +{ + struct hwrm_tf_session_resc_alloc_output *resp = NULL; + struct hwrm_tf_session_resc_alloc_input *req = NULL; + struct tf_msg_dma_buf resv_buf = { 0 }; + struct tf_msg_dma_buf req_buf = { 0 }; + struct tf_rm_resc_req_entry *req_data; + struct tf_rm_resc_entry *resv_data; + int dma_size_1 = 0; + int dma_size_2 = 0; + struct bnxt *bp; + int rc; + int i; + + if (!tfp || !request || !resv) + return -EINVAL; + + bp = tfp->bp; + + rc = hwrm_req_init(bp, req, HWRM_TF_SESSION_RESC_ALLOC); + if (rc) + return rc; + + resp = hwrm_req_hold(bp, req); + hwrm_req_alloc_flags(bp, req, GFP_KERNEL | __GFP_ZERO); + + /* Prepare DMA buffers */ + dma_size_1 = size * sizeof(struct tf_rm_resc_req_entry); + req_buf.va_addr = dma_alloc_coherent(&bp->pdev->dev, dma_size_1, + &req_buf.pa_addr, GFP_KERNEL); + if (!req_buf.va_addr) { + rc = -ENOMEM; + goto cleanup; + } + + dma_size_2 = size * sizeof(struct tf_rm_resc_entry); + resv_buf.va_addr = dma_alloc_coherent(&bp->pdev->dev, dma_size_2, + &resv_buf.pa_addr, GFP_KERNEL); + if (!resv_buf.va_addr) { + rc = -ENOMEM; + goto cleanup; + } + + /* Populate the request */ + req->fw_session_id = cpu_to_le32(fw_session_id); + req->flags = cpu_to_le16(dir); + req->req_size = cpu_to_le16(size); + + req_data = (struct tf_rm_resc_req_entry *)req_buf.va_addr; + for (i = 0; i < size; i++) { + req_data[i].type = cpu_to_le32(request[i].type); + req_data[i].min = cpu_to_le16(request[i].min); + req_data[i].max = cpu_to_le16(request[i].max); + } + + req->req_addr = cpu_to_le64(req_buf.pa_addr); + req->resc_addr = cpu_to_le64(resv_buf.pa_addr); + + rc = hwrm_req_send(bp, req); + if (rc) + goto cleanup; + + /* Process the response + * Should always get expected number of entries + */ + if (le32_to_cpu(resp->size) != size) { + netdev_dbg(bp->dev, "%s: Alloc message size error, rc:%d\n", + tf_dir_2_str(dir), EINVAL); + rc = -EINVAL; + goto cleanup; + } + + netdev_dbg(bp->dev, "\nRESV: %s\n", tf_dir_2_str(dir)); + netdev_dbg(bp->dev, "size: %d\n", le32_to_cpu(resp->size)); + + /* Post process the response */ + resv_data = (struct tf_rm_resc_entry *)resv_buf.va_addr; + for (i = 0; i < size; i++) { + resv[i].type = le32_to_cpu(resv_data[i].type); + resv[i].start = le16_to_cpu(resv_data[i].start); + resv[i].stride = le16_to_cpu(resv_data[i].stride); + } + +cleanup: + if (req_buf.va_addr) + dma_free_coherent(&bp->pdev->dev, dma_size_1, + req_buf.va_addr, req_buf.pa_addr); + if (resv_buf.va_addr) + dma_free_coherent(&bp->pdev->dev, dma_size_2, + resv_buf.va_addr, resv_buf.pa_addr); + hwrm_req_drop(bp, req); + return rc; +} + +int tf_msg_session_resc_info(struct tf *tfp, enum tf_dir dir, u16 size, + struct tf_rm_resc_req_entry *request, + u8 fw_session_id, struct tf_rm_resc_entry *resv) +{ + struct hwrm_tf_session_resc_info_output *resp; + struct hwrm_tf_session_resc_info_input *req; + struct tf_msg_dma_buf resv_buf = { 0 }; + struct tf_msg_dma_buf req_buf = { 0 }; + struct tf_rm_resc_req_entry *req_data; + struct tf_rm_resc_entry *resv_data; + int dma_size_1 = 0; + int dma_size_2 = 0; + struct bnxt *bp; + int rc; + int i; + + if (!tfp || !request || !resv) + return -EINVAL; + + bp = tfp->bp; + + rc = hwrm_req_init(bp, req, HWRM_TF_SESSION_RESC_INFO); + if (rc) + return rc; + + resp = hwrm_req_hold(bp, req); + hwrm_req_alloc_flags(bp, req, GFP_KERNEL | __GFP_ZERO); + + /* Prepare DMA buffers */ + dma_size_1 = size * sizeof(struct tf_rm_resc_req_entry); + req_buf.va_addr = dma_alloc_coherent(&bp->pdev->dev, dma_size_1, + &req_buf.pa_addr, GFP_KERNEL); + if (!req_buf.va_addr) { + rc = -ENOMEM; + goto cleanup; + } + + dma_size_2 = size * sizeof(struct tf_rm_resc_entry); + resv_buf.va_addr = dma_alloc_coherent(&bp->pdev->dev, dma_size_2, + &resv_buf.pa_addr, GFP_KERNEL); + if (!resv_buf.va_addr) { + rc = -ENOMEM; + goto cleanup; + } + + /* Populate the request */ + req->fw_session_id = cpu_to_le32(fw_session_id); + req->flags = cpu_to_le16(dir); + req->req_size = cpu_to_le16(size); + + req_data = (struct tf_rm_resc_req_entry *)req_buf.va_addr; + for (i = 0; i < size; i++) { + req_data[i].type = cpu_to_le32(request[i].type); + req_data[i].min = cpu_to_le16(request[i].min); + req_data[i].max = cpu_to_le16(request[i].max); + } + + req->req_addr = cpu_to_le64(req_buf.pa_addr); + req->resc_addr = cpu_to_le64(resv_buf.pa_addr); + + rc = hwrm_req_send(bp, req); + if (rc) + goto cleanup; + + /* Process the response + * Should always get expected number of entries + */ + if (le32_to_cpu(resp->size) != size) { + netdev_dbg(bp->dev, "%s: Alloc message size error, rc:%d\n", + tf_dir_2_str(dir), EINVAL); + rc = -EINVAL; + goto cleanup; + } + + netdev_dbg(bp->dev, "\nRESV: %s\n", tf_dir_2_str(dir)); + netdev_dbg(bp->dev, "size: %d\n", le32_to_cpu(resp->size)); + + /* Post process the response */ + resv_data = (struct tf_rm_resc_entry *)resv_buf.va_addr; + for (i = 0; i < size; i++) { + resv[i].type = le32_to_cpu(resv_data[i].type); + resv[i].start = le16_to_cpu(resv_data[i].start); + resv[i].stride = le16_to_cpu(resv_data[i].stride); + } + +cleanup: + if (req_buf.va_addr) + dma_free_coherent(&bp->pdev->dev, dma_size_1, + req_buf.va_addr, req_buf.pa_addr); + if (resv_buf.va_addr) + dma_free_coherent(&bp->pdev->dev, dma_size_2, + resv_buf.va_addr, resv_buf.pa_addr); + hwrm_req_drop(bp, req); + return rc; +} + +int tf_msg_session_resc_flush(struct tf *tfp, enum tf_dir dir, u16 size, + u8 fw_session_id, struct tf_rm_resc_entry *resv) +{ + struct hwrm_tf_session_resc_flush_input *req; + struct tf_msg_dma_buf resv_buf = { 0 }; + struct tf_rm_resc_entry *resv_data; + struct bnxt *bp; + int dma_size; + int rc; + int i; + + if (!tfp || !resv) + return -EINVAL; + + bp = tfp->bp; + + rc = hwrm_req_init(bp, req, HWRM_TF_SESSION_RESC_FLUSH); + if (rc) + return rc; + + hwrm_req_hold(bp, req); + + /* Prepare DMA buffers */ + dma_size = size * sizeof(struct tf_rm_resc_entry); + resv_buf.va_addr = dma_alloc_coherent(&bp->pdev->dev, dma_size, + &resv_buf.pa_addr, GFP_KERNEL); + if (!resv_buf.va_addr) { + rc = -ENOMEM; + goto cleanup; + } + + /* Populate the request */ + req->fw_session_id = cpu_to_le32(fw_session_id); + req->flags = cpu_to_le16(dir); + req->flush_size = cpu_to_le16(size); + + resv_data = (struct tf_rm_resc_entry *)resv_buf.va_addr; + for (i = 0; i < size; i++) { + resv_data[i].type = cpu_to_le32(resv[i].type); + resv_data[i].start = cpu_to_le16(resv[i].start); + resv_data[i].stride = cpu_to_le16(resv[i].stride); + } + + req->flush_addr = cpu_to_le64(resv_buf.pa_addr); + rc = hwrm_req_send(bp, req); + +cleanup: + if (resv_buf.va_addr) + dma_free_coherent(&bp->pdev->dev, dma_size, + resv_buf.va_addr, resv_buf.pa_addr); + hwrm_req_drop(bp, req); + return rc; +} + +int tf_msg_insert_em_internal_entry(struct tf *tfp, + struct tf_insert_em_entry_parms *em_parms, + u8 fw_session_id, u16 *rptr_index, + u8 *rptr_entry, u8 *num_of_entries) +{ + struct tf_em_64b_entry *em_result = + (struct tf_em_64b_entry *)em_parms->em_record; + struct hwrm_tf_em_insert_output *resp = NULL; + struct hwrm_tf_em_insert_input *req = NULL; + struct bnxt *bp = tfp->bp; + u8 msg_key_size; + u16 flags; + int rc; + + BUILD_BUG_ON_MSG(sizeof(struct hwrm_tf_em_insert_input) != + TF_MSG_SIZE_HWRM_TF_EM_INSERT, + "HWRM message size changed: hwrm_tf_em_insert_input"); + + rc = hwrm_req_init(bp, req, HWRM_TF_EM_INSERT); + if (rc) + return rc; + resp = hwrm_req_hold(bp, req); + + /* Populate the request */ + req->fw_session_id = cpu_to_le32(fw_session_id); + + /* Check for key size conformity */ + msg_key_size = (em_parms->key_sz_in_bits + 7) / 8; + if (msg_key_size > TF_MSG_EM_INSERT_KEY_SIZE) { + rc = -EINVAL; + netdev_dbg(bp->dev, + "%s: Invalid parameters for msg type, rc:%d\n", + tf_dir_2_str(em_parms->dir), rc); + goto cleanup; + } + + memcpy(req->em_key, em_parms->key, msg_key_size); + + flags = (em_parms->dir == TF_DIR_TX ? + HWRM_TF_EM_INSERT_INPUT_FLAGS_DIR_TX : + HWRM_TF_EM_INSERT_INPUT_FLAGS_DIR_RX); + req->flags = cpu_to_le16(flags); + req->strength = (cpu_to_le16(em_result->hdr.word1) & + CFA_P4_EEM_ENTRY_STRENGTH_MASK) >> + CFA_P4_EEM_ENTRY_STRENGTH_SHIFT; + req->em_key_bitlen = cpu_to_le16(em_parms->key_sz_in_bits); + req->action_ptr = cpu_to_le32(em_result->hdr.pointer); + req->em_record_idx = cpu_to_le16(*rptr_index); + + rc = hwrm_req_send(bp, req); + if (rc) + goto cleanup; + + *rptr_entry = resp->rptr_entry; + *rptr_index = le16_to_cpu(resp->rptr_index); + *num_of_entries = resp->num_of_entries; + +cleanup: + hwrm_req_drop(bp, req); + return rc; +} + +int tf_msg_hash_insert_em_internal_entry(struct tf *tfp, + struct tf_insert_em_entry_parms + *em_parms, u32 key0_hash, + u32 key1_hash, u8 fw_session_id, + u16 *rptr_index, u8 *rptr_entry, + u8 *num_of_entries) +{ + struct hwrm_tf_em_hash_insert_output *resp = NULL; + struct hwrm_tf_em_hash_insert_input *req = NULL; + struct bnxt *bp = tfp->bp; + u8 msg_record_size; + u16 flags; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_TF_EM_HASH_INSERT); + if (rc) + return rc; + resp = hwrm_req_hold(bp, req); + + /* Populate the request */ + req->fw_session_id = cpu_to_le32(fw_session_id); + + /* Check for key size conformity */ + msg_record_size = (em_parms->em_record_sz_in_bits + 7) / 8; + + if (msg_record_size > TF_MSG_EM_INSERT_RECORD_SIZE) { + rc = -EINVAL; + netdev_dbg(bp->dev, "%s: Record size too large, rc:%d\n", + tf_dir_2_str(em_parms->dir), rc); + goto cleanup; + } + + memcpy((char *)req->em_record, em_parms->em_record, msg_record_size); + + flags = (em_parms->dir == TF_DIR_TX ? + HWRM_TF_EM_INSERT_INPUT_FLAGS_DIR_TX : + HWRM_TF_EM_INSERT_INPUT_FLAGS_DIR_RX); + req->flags = cpu_to_le16(flags); + req->em_record_size_bits = em_parms->em_record_sz_in_bits; + req->em_record_idx = *rptr_index; + req->key0_hash = key0_hash; + req->key1_hash = key1_hash; + + rc = hwrm_req_send(bp, req); + if (rc) + goto cleanup; + + *rptr_entry = resp->rptr_entry; + *rptr_index = resp->rptr_index; + *num_of_entries = resp->num_of_entries; + +cleanup: + hwrm_req_drop(bp, req); + return rc; +} + +int tf_msg_delete_em_entry(struct tf *tfp, + struct tf_delete_em_entry_parms *em_parms, + u8 fw_session_id) +{ + struct hwrm_tf_em_delete_output *resp = NULL; + struct hwrm_tf_em_delete_input *req = NULL; + struct bnxt *bp = tfp->bp; + u16 flags; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_TF_EM_DELETE); + if (rc) + return rc; + resp = hwrm_req_hold(bp, req); + + /* Populate the request */ + req->fw_session_id = cpu_to_le32(fw_session_id); + + flags = (em_parms->dir == TF_DIR_TX ? + HWRM_TF_EM_DELETE_INPUT_FLAGS_DIR_TX : + HWRM_TF_EM_DELETE_INPUT_FLAGS_DIR_RX); + req->flags = cpu_to_le16(flags); + req->flow_handle = cpu_to_le64(em_parms->flow_handle); + + rc = hwrm_req_send(bp, req); + if (rc) + goto cleanup; + + em_parms->index = le16_to_cpu(resp->em_index); + +cleanup: + hwrm_req_drop(bp, req); + return rc; +} + +int tf_msg_move_em_entry(struct tf *tfp, + struct tf_move_em_entry_parms *em_parms, + u8 fw_session_id) +{ + struct hwrm_tf_em_move_output *resp = NULL; + struct hwrm_tf_em_move_input *req = NULL; + struct bnxt *bp = tfp->bp; + u16 flags; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_TF_EM_MOVE); + if (rc) + return rc; + resp = hwrm_req_hold(bp, req); + + /* Populate the request */ + req->fw_session_id = cpu_to_le32(fw_session_id); + + flags = (em_parms->dir == TF_DIR_TX ? + HWRM_TF_EM_DELETE_INPUT_FLAGS_DIR_TX : + HWRM_TF_EM_DELETE_INPUT_FLAGS_DIR_RX); + req->flags = cpu_to_le16(flags); + req->flow_handle = cpu_to_le64(em_parms->flow_handle); + req->new_index = cpu_to_le32(em_parms->new_index); + + rc = hwrm_req_send(bp, req); + if (rc) + goto cleanup; + + em_parms->index = le16_to_cpu(resp->em_index); + +cleanup: + hwrm_req_drop(bp, req); + return rc; +} + +int tf_msg_tcam_entry_set(struct tf *tfp, struct tf_tcam_set_parms *parms, + u8 fw_session_id) +{ + struct hwrm_tf_tcam_set_input *req = NULL; + struct tf_msg_dma_buf buf = { 0 }; + struct bnxt *bp = tfp->bp; + int data_size = 0; + u8 *data = NULL; + int rc; + + if (!bp) + return 0; + + if (bp && test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) + return 0; + + rc = hwrm_req_init(bp, req, HWRM_TF_TCAM_SET); + if (rc) + return rc; + + hwrm_req_hold(bp, req); + + /* Populate the request */ + req->fw_session_id = cpu_to_le32(fw_session_id); + req->type = parms->hcapi_type; + req->idx = cpu_to_le16(parms->idx); + if (parms->dir == TF_DIR_TX) + req->flags |= HWRM_TF_TCAM_SET_INPUT_FLAGS_DIR_TX; + + req->key_size = parms->key_size; + req->mask_offset = parms->key_size; + + /* Result follows after key and mask, thus multiply by 2 */ + req->result_offset = 2 * parms->key_size; + req->result_size = parms->result_size; + data_size = 2 * req->key_size + req->result_size; + + if (data_size <= TF_PCI_BUF_SIZE_MAX) { + /* use pci buffer */ + data = &req->dev_data[0]; + } else { + /* use dma buffer */ + req->flags |= HWRM_TF_TCAM_SET_INPUT_FLAGS_DMA; + buf.va_addr = dma_alloc_coherent(&bp->pdev->dev, data_size, + &buf.pa_addr, GFP_KERNEL); + if (!buf.va_addr) { + rc = -ENOMEM; + goto cleanup; + } + data = buf.va_addr; + memcpy(&req->dev_data[0], &buf.pa_addr, sizeof(buf.pa_addr)); + } + + memcpy(&data[0], parms->key, parms->key_size); + memcpy(&data[parms->key_size], parms->mask, parms->key_size); + memcpy(&data[req->result_offset], parms->result, parms->result_size); + + rc = hwrm_req_send(bp, req); + +cleanup: + if (buf.va_addr) + dma_free_coherent(&bp->pdev->dev, data_size, buf.va_addr, + buf.pa_addr); + hwrm_req_drop(bp, req); + return rc; +} + +int tf_msg_tcam_entry_get(struct tf *tfp, struct tf_tcam_get_parms *parms, + u8 fw_session_id) +{ + struct hwrm_tf_tcam_get_output *resp = NULL; + struct hwrm_tf_tcam_get_input *req = NULL; + struct bnxt *bp = tfp->bp; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_TF_TCAM_GET); + if (rc) + return rc; + resp = hwrm_req_hold(bp, req); + + /* Populate the request */ + req->fw_session_id = cpu_to_le32(fw_session_id); + req->type = parms->hcapi_type; + req->idx = cpu_to_le16(parms->idx); + if (parms->dir == TF_DIR_TX) + req->flags |= HWRM_TF_TCAM_GET_INPUT_FLAGS_DIR_TX; + + rc = hwrm_req_send(bp, req); + if (rc) + goto cleanup; + + if (parms->key_size < resp->key_size || + parms->result_size < resp->result_size) { + rc = -EINVAL; + netdev_dbg(bp->dev, + "%s: Key buffer(%d) is < the key(%d), rc:%d\n", + tf_dir_2_str(parms->dir), parms->key_size, + resp->key_size, rc); + goto cleanup; + } + parms->key_size = resp->key_size; + parms->result_size = resp->result_size; + memcpy(parms->key, resp->dev_data, resp->key_size); + memcpy(parms->mask, &resp->dev_data[resp->key_size], resp->key_size); + memcpy(parms->result, &resp->dev_data[resp->result_offset], + resp->result_size); + +cleanup: + hwrm_req_drop(bp, req); + return rc; +} + +int tf_msg_tcam_entry_free(struct tf *tfp, struct tf_tcam_free_parms *in_parms, + u8 fw_session_id) +{ + struct hwrm_tf_tcam_free_input *req = NULL; + struct bnxt *bp = tfp->bp; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_TF_TCAM_FREE); + if (rc) + return rc; + + /* Populate the request */ + req->fw_session_id = cpu_to_le32(fw_session_id); + req->type = in_parms->hcapi_type; + req->count = 1; + req->idx_list[0] = cpu_to_le16(in_parms->idx); + if (in_parms->dir == TF_DIR_TX) + req->flags |= HWRM_TF_TCAM_FREE_INPUT_FLAGS_DIR_TX; + + rc = hwrm_req_send(bp, req); + return rc; +} + +int tf_msg_set_tbl_entry(struct tf *tfp, enum tf_dir dir, u16 hcapi_type, + u16 size, u8 *data, u32 index, u8 fw_session_id) +{ + struct hwrm_tf_tbl_type_set_input *req = NULL; + struct bnxt *bp = tfp->bp; + int rc; + + BUILD_BUG_ON_MSG(sizeof(struct hwrm_tf_tbl_type_set_input) != + TF_MSG_SIZE_HWRM_TF_TBL_TYPE_SET, + "HWRM message size changed: tf_tbl_type_set_input"); + + rc = hwrm_req_init(bp, req, HWRM_TF_TBL_TYPE_SET); + if (rc) + return rc; + + /* Populate the request */ + req->fw_session_id = cpu_to_le32(fw_session_id); + req->flags = cpu_to_le16(dir); + req->type = cpu_to_le32(hcapi_type); + req->size = cpu_to_le16(size); + req->index = cpu_to_le32(index); + + /* Check for data size conformity */ + if (size > TF_MSG_TBL_TYPE_SET_DATA_SIZE) { + rc = -EINVAL; + netdev_dbg(bp->dev, + "%s: Invalid parameters for msg type, rc:%d\n", + tf_dir_2_str(dir), rc); + return rc; + } + + memcpy(&req->data, data, size); + + rc = hwrm_req_send(bp, req); + return rc; +} + +int tf_msg_get_tbl_entry(struct tf *tfp, enum tf_dir dir, u16 hcapi_type, + u16 size, u8 *data, u32 index, bool clear_on_read, + u8 fw_session_id) +{ + struct hwrm_tf_tbl_type_get_output *resp = NULL; + struct hwrm_tf_tbl_type_get_input *req = NULL; + struct bnxt *bp = tfp->bp; + u32 flags = 0; + int rc; + + flags = (dir == TF_DIR_TX ? + TF_TBL_TYPE_GET_REQ_FLAGS_DIR_TX : + TF_TBL_TYPE_GET_REQ_FLAGS_DIR_RX); + + if (clear_on_read) + flags |= TF_TBL_TYPE_GET_REQ_FLAGS_CLEAR_ON_READ; + + rc = hwrm_req_init(bp, req, HWRM_TF_TBL_TYPE_GET); + if (rc) + return rc; + resp = hwrm_req_hold(bp, req); + + /* Populate the request */ + req->fw_session_id = cpu_to_le32(fw_session_id); + req->flags = cpu_to_le16(flags); + req->type = cpu_to_le32(hcapi_type); + req->index = cpu_to_le32(index); + + rc = hwrm_req_send(bp, req); + if (rc) + goto cleanup; + + /* The response will be 64 bytes long, the response size will + * be in words (16). All we can test for is that the response + * size is < to the requested size. + */ + if ((le32_to_cpu(resp->size) * 4) < size) { + rc = -EINVAL; + goto cleanup; + } + + /* Copy the requested number of bytes */ + memcpy(data, &resp->data, size); + +cleanup: + hwrm_req_drop(bp, req); + return rc; +} + +/* HWRM Tunneled messages */ +int tf_msg_get_global_cfg(struct tf *tfp, struct tf_global_cfg_parms *params, + u8 fw_session_id) +{ + struct hwrm_tf_global_cfg_get_output *resp = NULL; + struct hwrm_tf_global_cfg_get_input *req = NULL; + struct bnxt *bp = tfp->bp; + u16 resp_size = 0; + u32 flags = 0; + int rc = 0; + + rc = hwrm_req_init(bp, req, HWRM_TF_GLOBAL_CFG_GET); + if (rc) + return rc; + resp = hwrm_req_hold(bp, req); + + flags = (params->dir == TF_DIR_TX ? + HWRM_TF_GLOBAL_CFG_GET_INPUT_FLAGS_DIR_TX : + HWRM_TF_GLOBAL_CFG_GET_INPUT_FLAGS_DIR_RX); + + /* Populate the request */ + req->fw_session_id = cpu_to_le32(fw_session_id); + req->flags = cpu_to_le32(flags); + req->type = cpu_to_le32(params->type); + req->offset = cpu_to_le32(params->offset); + req->size = cpu_to_le32(params->config_sz_in_bytes); + + rc = hwrm_req_send(bp, req); + if (rc) + goto cleanup; + + /* Verify that we got enough buffer to return the requested data */ + resp_size = le16_to_cpu(resp->size); + if (resp_size < params->config_sz_in_bytes) { + rc = -EINVAL; + goto cleanup; + } + + if (params->config) + memcpy(params->config, resp->data, resp_size); + else + rc = -EFAULT; + +cleanup: + hwrm_req_drop(bp, req); + return rc; +} + +int tf_msg_set_global_cfg(struct tf *tfp, struct tf_global_cfg_parms *params, + u8 fw_session_id) +{ + struct hwrm_tf_global_cfg_set_input *req = NULL; + struct tf_msg_dma_buf buf = { 0 }; + struct bnxt *bp = tfp->bp; + int data_size; + u32 flags; + u8 *data; + u8 *mask; + int rc; + int i; + + BUILD_BUG_ON_MSG(sizeof(struct hwrm_tf_global_cfg_set_input) != + TF_MSG_SIZE_HWRM_TF_GLOBAL_CFG_SET, + "HWRM message size changed: tf_global_cfg_set_input"); + + rc = hwrm_req_init(bp, req, HWRM_TF_GLOBAL_CFG_SET); + if (rc) + return rc; + + flags = (params->dir == TF_DIR_TX ? + HWRM_TF_GLOBAL_CFG_SET_INPUT_FLAGS_DIR_TX : + HWRM_TF_GLOBAL_CFG_SET_INPUT_FLAGS_DIR_RX); + hwrm_req_hold(bp, req); + + data_size = 2 * params->config_sz_in_bytes; /* data + mask */ + if (data_size <= TF_PCI_BUF_SIZE_MAX) { + /* use pci buffer */ + data = &req->data[0]; + mask = &req->mask[0]; + } else { + /* use dma buffer */ + netdev_dbg(bp->dev, "%s: Using dma data\n", __func__); + flags |= TF_GLOBAL_CFG_SET_REQ_FLAGS_DMA; + buf.va_addr = dma_alloc_coherent(&bp->pdev->dev, data_size, + &buf.pa_addr, GFP_KERNEL); + if (!buf.va_addr) { + rc = -ENOMEM; + goto cleanup; + } + data = buf.va_addr; + mask = data + params->config_sz_in_bytes; + + /* set dma address in the request */ + memcpy(&req->data[0], &buf.pa_addr, sizeof(buf.pa_addr)); + } + + /* copy data and mask to req */ + memcpy(data, params->config, params->config_sz_in_bytes); + if (!params->config_mask) { + for (i = 0; i < params->config_sz_in_bytes; i++) + mask[i] = 0xff; + } else { + memcpy(mask, params->config_mask, params->config_sz_in_bytes); + } + netdev_dbg(bp->dev, "HWRM_TF_GLOBAL_CFG_SET: data: %*ph\n", + params->config_sz_in_bytes, (void *)data); + + /* Populate the request */ + req->fw_session_id = cpu_to_le32(fw_session_id); + req->flags = cpu_to_le32(flags); + req->type = cpu_to_le32(params->type); + req->offset = cpu_to_le32(params->offset); + req->size = cpu_to_le32(params->config_sz_in_bytes); + + rc = hwrm_req_send(bp, req); + +cleanup: + if (buf.va_addr) + dma_free_coherent(&bp->pdev->dev, data_size, buf.va_addr, + buf.pa_addr); + hwrm_req_drop(bp, req); + return rc; +} + +int tf_msg_bulk_get_tbl_entry(struct tf *tfp, enum tf_dir dir, + u16 hcapi_type, u32 starting_idx, + u16 num_entries, u16 entry_sz_in_bytes, + u64 physical_mem_addr, bool clear_on_read) +{ + /* TBD */ + return -EINVAL; +} + +int tf_msg_get_if_tbl_entry(struct tf *tfp, + struct tf_if_tbl_get_parms *params, + u8 fw_session_id) +{ + struct hwrm_tf_if_tbl_get_output *resp = NULL; + struct hwrm_tf_if_tbl_get_input *req = NULL; + struct bnxt *bp = tfp->bp; + u32 flags = 0; + int rc = 0; + + rc = hwrm_req_init(bp, req, HWRM_TF_IF_TBL_GET); + if (rc) + return rc; + resp = hwrm_req_hold(bp, req); + + flags = (params->dir == TF_DIR_TX ? + HWRM_TF_IF_TBL_GET_INPUT_FLAGS_DIR_TX : + HWRM_TF_IF_TBL_GET_INPUT_FLAGS_DIR_RX); + + /* Populate the request */ + req->fw_session_id = cpu_to_le32(fw_session_id); + req->flags = flags; + req->type = params->hcapi_type; + req->index = cpu_to_le16(params->idx); + req->size = cpu_to_le16(params->data_sz_in_bytes); + + rc = hwrm_req_send(bp, req); + if (rc) + goto cleanup; + + memcpy(¶ms->data[0], resp->data, req->size); + +cleanup: + hwrm_req_drop(bp, req); + return rc; +} + +int tf_msg_set_if_tbl_entry(struct tf *tfp, + struct tf_if_tbl_set_parms *params, + u8 fw_session_id) +{ + struct hwrm_tf_if_tbl_set_input *req = NULL; + struct bnxt *bp = tfp->bp; + u32 flags = 0; + int rc = 0; + + rc = hwrm_req_init(bp, req, HWRM_TF_IF_TBL_SET); + if (rc) + return rc; + + flags = (params->dir == TF_DIR_TX ? + HWRM_TF_IF_TBL_SET_INPUT_FLAGS_DIR_TX : + HWRM_TF_IF_TBL_SET_INPUT_FLAGS_DIR_RX); + + /* Populate the request */ + req->fw_session_id = cpu_to_le32(fw_session_id); + req->flags = flags; + req->type = params->hcapi_type; + req->index = cpu_to_le32(params->idx); + req->size = cpu_to_le32(params->data_sz_in_bytes); + memcpy(&req->data[0], params->data, params->data_sz_in_bytes); + + rc = hwrm_req_send(bp, req); + return rc; +} + +int +tf_msg_get_version(struct bnxt *bp, + struct tf_dev_info *dev, + struct tf_get_version_parms *params) + +{ + struct hwrm_tf_version_get_output *resp; + struct hwrm_tf_version_get_input *req; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_TF_VERSION_GET); + if (rc) + return rc; + + resp = hwrm_req_hold(bp, req); + + rc = hwrm_req_send(bp, req); + if (rc) + goto cleanup; + + params->major = resp->major; + params->minor = resp->minor; + params->update = resp->update; + + dev->ops->tf_dev_map_hcapi_caps(resp->dev_caps_cfg, + ¶ms->dev_ident_caps, + ¶ms->dev_tcam_caps, + ¶ms->dev_tbl_caps, + ¶ms->dev_em_caps); + +cleanup: + hwrm_req_drop(bp, req); + + return rc; +} diff --git a/drivers/thirdparty/release-drivers/bnxt/tf_core/tf_msg.h b/drivers/thirdparty/release-drivers/bnxt/tf_core/tf_msg.h new file mode 100644 index 000000000000..f39ba4e49fb3 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/tf_core/tf_msg.h @@ -0,0 +1,386 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019-2021 Broadcom + * All rights reserved. + */ + +#ifndef _TF_MSG_H_ +#define _TF_MSG_H_ + +#include +#include "bnxt_compat.h" +#include "bnxt_hsi.h" +#include "bnxt.h" +#include "tf_tbl.h" +#include "tf_rm.h" +#include "tf_tcam.h" +#include "tf_global_cfg.h" +#include "tf_em.h" + +struct tf; + +/* HWRM Direct messages */ + +/** + * tf_msg_session_open: Sends session open request to Firmware + * + * @bp: Pointer to bnxt handle + * @ctrl_chan_name: PCI name of the control channel + * @fw_session_id: Pointer to the fw_session_id that is allocated + * on firmware side (output) + * @fw_session_client_id: Pointer to the fw_session_client_id that + * is allocated on firmware side (output) + * @shared_session_creator: Pointer to the shared_session_creator + * + * Returns: + * 0 on Success else internal Truflow error + */ +int tf_msg_session_open(struct bnxt *bp, char *ctrl_chan_name, + u8 *fw_session_id, u8 *fw_session_client_id, + bool *shared_session_creator); + +/** + * tf_msg_session_client_register: Sends session client register request + * to Firmware + * + * @session: Pointer to session handle + * @ctrl_chan_name: PCI name of the control channel + * @fw_session_id: FW session id + * @fw_session_client_id: Pointer to the fw_session_client_id that + * is allocated on firmware side (output) + * + * Returns: + * 0 on Success else internal Truflow error + */ +int tf_msg_session_client_register(struct tf *tfp, char *ctrl_channel_name, + u8 fw_session_id, u8 *fw_session_client_id); + +/** + * tf_msg_session_client_unregister: Sends session client unregister + * request to Firmware + * + * @fw_session_id: FW session id + * @fw_session_client_id: Pointer to the fw_session_client_id that + * is allocated on firmware side + * + * Returns: + * 0 on Success else internal Truflow error + */ +int tf_msg_session_client_unregister(struct tf *tfp, u8 fw_session_id, + u8 fw_session_client_id); + +/** + * tf_msg_session_close: Sends session close request to Firmware + * @session: Pointer to session handle + * @fw_session_id: fw session id + * + * Returns: + * 0 on Success else internal Truflow error + */ +int tf_msg_session_close(struct tf *tfp, u8 fw_session_id); + +/** + * tf_msg_session_qcfg: Sends session query config request to TF Firmware + * @session: Pointer to session handle + * @fw_session_id: fw session id + * + * Returns: + * 0 on Success else internal Truflow error + */ +int tf_msg_session_qcfg(struct tf *tfp, u8 fw_session_id); + +/** + * tf_msg_session_resc_qcaps: Sends session HW resource query + * capability request to TF Firmware + * + * @tfp: Pointer to TF handle + * @dir: Receive or Transmit direction + * @size: Number of elements in the query. Should be set to the max + * elements for the device type + * @query: Pointer to an array of query elements (output) + * @resv_strategy: Pointer to the reservation strategy (output) + * @sram_profile: Pointer to the sram profile + * + * Returns: + * 0 on Success else internal Truflow error + */ +int tf_msg_session_resc_qcaps(struct tf *tfp, enum tf_dir dir, u16 size, + struct tf_rm_resc_req_entry *query, + enum tf_rm_resc_resv_strategy *resv_strategy, + u8 *sram_profile); + +/** + * tf_msg_session_resc_alloc: Sends session HW resource allocation + * request to TF Firmware + * + * @tfp: Pointer to TF handle + * @dir: Receive or Transmit direction + * @size: Number of elements in the req and resv arrays + * @req: Pointer to an array of request elements + * @fw_session_id: fw session id + * @resv: Pointer to an array of reserved elements + * + * Returns: + * 0 on Success else internal Truflow error + */ +int tf_msg_session_resc_alloc(struct tf *tfp, enum tf_dir dir, u16 size, + struct tf_rm_resc_req_entry *request, + u8 fw_session_id, struct tf_rm_resc_entry *resv); + +/** + * tf_msg_session_resc_info: Sends session HW resource allocation + * request to TF Firmware + * @tfp: Pointer to TF handle + * @dir: Receive or Transmit direction + * @size: Number of elements in the req and resv arrays + * @req: Pointer to an array of request elements + * @fw_session_id: fw session id + * @resv: Pointer to an array of reserved elements + * + * Returns: + * 0 on Success else internal Truflow error + */ +int tf_msg_session_resc_info(struct tf *tfp, enum tf_dir dir, u16 size, + struct tf_rm_resc_req_entry *request, + u8 fw_session_id, struct tf_rm_resc_entry *resv); + +/** + * tf_msg_session_resc_flush: Sends session resource flush request + * to TF Firmware + * @tfp: Pointer to TF handle + * @dir: Receive or Transmit direction + * @size: Number of elements in the req and resv arrays + * @fw_session_id: fw session id + * @resv: Pointer to an array of reserved elements that needs to be flushed + * + * Returns: + * 0 on Success else internal Truflow error + */ +int tf_msg_session_resc_flush(struct tf *tfp, enum tf_dir dir, u16 size, + u8 fw_session_id, struct tf_rm_resc_entry *resv); + +/** + * Sends EM internal insert request to Firmware + * + * @tfp: Pointer to TF handle + * @params: Pointer to em insert parameter list + * @fw_session_id: fw session id + * @rptr_index: Record ptr index + * @rptr_entry: Record ptr entry + * @num_of_entries: Number of entries to insert + * + * Returns: + * 0 on Success else internal Truflow error + */ +int tf_msg_insert_em_internal_entry(struct tf *tfp, + struct tf_insert_em_entry_parms *em_parms, + u8 fw_session_id, u16 *rptr_index, + u8 *rptr_entry, u8 *num_of_entries); + +/** + * Sends EM hash internal insert request to Firmware + * + * @tfp: Pointer to TF handle + * @params: Pointer to em insert parameter list + * @key0_hash: CRC32 hash of key + * @key1_hash: Lookup3 hash of key + * @fw_session_id: fw session id + * @rptr_index: Record ptr index + * @rptr_entry: Record ptr entry + * @num_of_entries: Number of entries to insert + * + * Returns: + * 0 on Success else internal Truflow error + */ +int tf_msg_hash_insert_em_internal_entry(struct tf *tfp, + struct tf_insert_em_entry_parms + *em_parms, u32 key0_hash, + u32 key1_hash, u8 fw_session_id, + u16 *rptr_index, u8 *rptr_entry, + u8 *num_of_entries); + +/** + * Sends EM internal delete request to Firmware + * + * @tfp: Pointer to TF handle + * @em_parms: Pointer to em delete parameters + * @fw_session_id: fw session id + * + * Returns: + * 0 on Success else internal Truflow error + */ +int tf_msg_delete_em_entry(struct tf *tfp, + struct tf_delete_em_entry_parms *em_parms, + u8 fw_session_id); + +/** + * Sends EM internal move request to Firmware + * + * @tfp: Pointer to TF handle + * @em_parms: Pointer to em move parameters + * @fw_session_id: fw session id + * + * Returns: + * 0 on Success else internal Truflow error + */ +int tf_msg_move_em_entry(struct tf *tfp, + struct tf_move_em_entry_parms *em_parms, + u8 fw_session_id); + +/** + * Sends tcam entry 'set' to the Firmware. + * + * @tfp: Pointer to session handle + * @parms: Pointer to set parameters + * @fw_session_id: fw session id + * + * Returns: + * 0 on Success else internal Truflow error + */ +int tf_msg_tcam_entry_set(struct tf *tfp, struct tf_tcam_set_parms *parms, + u8 fw_session_id); + +/** + * Sends tcam entry 'get' to the Firmware. + * + * @tfp: Pointer to session handle + * @parms: Pointer to get parameters + * @fw_session_id: fw session id + * + * Returns: + * 0 on Success else internal Truflow error + */ +int tf_msg_tcam_entry_get(struct tf *tfp, struct tf_tcam_get_parms *parms, + u8 fw_session_id); + +/** + * Sends tcam entry 'free' to the Firmware. + * + * @tfp: Pointer to session handle + * @parms: Pointer to free parameters + * @fw_session_id: fw session id + * + * Returns: + * 0 on Success else internal Truflow error + */ +int tf_msg_tcam_entry_free(struct tf *tfp, struct tf_tcam_free_parms *in_parms, + u8 fw_session_id); + +/** + * Sends Set message of a Table Type element to the firmware. + * + * @tfp: Pointer to session handle + * @dir: Direction location of the element to set + * @hcapi_type: Type of the object to set + * @size: Size of the data to set + * @data: Data to set + * @index: Index to set + * @fw_session_id: fw session id + * + * Returns: + * 0 - Success + */ +int tf_msg_set_tbl_entry(struct tf *tfp, enum tf_dir dir, u16 hcapi_type, + u16 size, u8 *data, u32 index, u8 fw_session_id); + +/** + * Sends get message of a Table Type element to the firmware. + * + * @tfp: Pointer to session handle + * @dir: Direction location of the element to get + * @hcapi_type: Type of the object to get + * @size: Size of the data read + * @data: Data read + * @index: Index to get + * @fw_session_id: fw session id + * + * Returns: + * 0 - Success + */ +int tf_msg_get_tbl_entry(struct tf *tfp, enum tf_dir dir, u16 hcapi_type, + u16 size, u8 *data, u32 index, bool clear_on_read, + u8 fw_session_id); + +/* HWRM Tunneled messages */ + +/** + * Sends global cfg read request to Firmware + * + * @tfp: Pointer to TF handle + * @params: Pointer to read parameters + * @fw_session_id: fw session id + * + * Returns: + * 0 on Success else internal Truflow error + */ +int tf_msg_get_global_cfg(struct tf *tfp, struct tf_global_cfg_parms *params, + u8 fw_session_id); + +/** + * Sends global cfg update request to Firmware + * + * @tfp: Pointer to TF handle + * @params: Pointer to write parameters + * @fw_session_id: fw session id + * + * Returns: + * 0 on Success else internal Truflow error + */ +int tf_msg_set_global_cfg(struct tf *tfp, struct tf_global_cfg_parms *params, + u8 fw_session_id); + +/** + * Sends bulk get message of a Table Type element to the firmware. + * + * @tfp: Pointer to session handle + * @parms: Pointer to table get bulk parameters + * + * Returns: + * 0 on Success else internal Truflow error + */ +int tf_msg_bulk_get_tbl_entry(struct tf *tfp, enum tf_dir dir, u16 hcapi_type, + u32 starting_idx, u16 num_entries, + u16 entry_sz_in_bytes, u64 physical_mem_addr, + bool clear_on_read); + +/** + * Sends Set message of a IF Table Type element to the firmware. + * + * @tfp: Pointer to session handle + * @parms: Pointer to IF table set parameters + * @fw_session_id: fw session id + * + * Returns: + * 0 on Success else internal Truflow error + */ +int tf_msg_set_if_tbl_entry(struct tf *tfp, + struct tf_if_tbl_set_parms *params, + u8 fw_session_id); + +/** + * Sends get message of a IF Table Type element to the firmware. + * + * @tfp: Pointer to session handle + * @parms: Pointer to IF table get parameters + * @fw_session_id: fw session id + * + * Returns: + * 0 on Success else internal Truflow error + */ +int tf_msg_get_if_tbl_entry(struct tf *tfp, + struct tf_if_tbl_get_parms *params, + u8 fw_session_id); + +/** + * Send get version request to the firmware. + * + * @bp: Pointer to bnxt handle + * @dev: Pointer to the associated device + * @parms: Pointer to the version info parameter + * + * Returns: + * 0 on Success else internal Truflow error + */ +int tf_msg_get_version(struct bnxt *bp, struct tf_dev_info *dev, + struct tf_get_version_parms *parms); + +#endif /* _TF_MSG_H_ */ diff --git a/drivers/thirdparty/release-drivers/bnxt/tf_core/tf_rm.c b/drivers/thirdparty/release-drivers/bnxt/tf_core/tf_rm.c new file mode 100644 index 000000000000..f6e93d198d07 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/tf_core/tf_rm.c @@ -0,0 +1,1399 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* Copyright(c) 2019-2021 Broadcom + * All rights reserved. + */ + +#include +#include +#include +#include "cfa_resource_types.h" +#include "tf_rm.h" +#include "tf_util.h" +#include "tf_session.h" +#include "tf_device.h" +#include "tf_msg.h" + +/** + * Generic RM Element data type that an RM DB is build upon. + * @cfg_type: RM Element configuration type. If Private then the + * hcapi_type can be ignored. If Null then the element + * is not valid for the device. + * @hcapi_type: HCAPI RM Type for the element. + * @slices: Resource slices. How many slices will fit in the + * resource pool chunk size. + * @alloc: HCAPI RM allocated range information for the element. + * @parent_subtype: If cfg_type == HCAPI_BA_CHILD, this field indicates + * the parent module subtype for look up into the parent + * pool. + * An example subtype is TF_TBL_TYPE_FULL_ACT_RECORD + * which is a module subtype of TF_MODULE_TYPE_TABLE. + * @pool: Bit allocator pool for the element. Pool size is + * controlled by the struct tf_session_resources at + * time of session creation. Null indicates that the + * pool is not used for the element. + */ +struct tf_rm_element { + enum tf_rm_elem_cfg_type cfg_type; + u16 hcapi_type; + u8 slices; + struct tf_rm_alloc_info alloc; + u16 parent_subtype; + struct bitalloc *pool; +}; + +/** + * TF RM DB definition + * @num_entries: Number of elements in the DB + * @dir: Direction this DB controls. + * @module: Module type, used for logging purposes. + * @db: The DB consists of an array of elements + */ +struct tf_rm_new_db { + u16 num_entries; + enum tf_dir dir; + enum tf_module_type module; + struct tf_rm_element *db; +}; + +/** + * Adjust an index according to the allocation information. + * + * All resources are controlled in a 0 based pool. Some resources, by + * design, are not 0 based, i.e. Full Action Records (SRAM) thus they + * need to be adjusted before they are handed out. + * + * @cfg: Pointer to the DB configuration + * @reservations: Pointer to the allocation values associated with + * the module + * @count: Number of DB configuration elements + * @valid_count: Number of HCAPI entries with a reservation value + * greater than 0 + * + * Returns: + * 0 - Success + * - EOPNOTSUPP - Operation not supported + */ +static void tf_rm_count_hcapi_reservations(enum tf_dir dir, + enum tf_module_type module, + struct tf_rm_element_cfg *cfg, + u16 *reservations, u16 count, + u16 *valid_count) +{ + u16 cnt = 0; + int i; + + for (i = 0; i < count; i++) { + if (cfg[i].cfg_type != TF_RM_ELEM_CFG_NULL && + reservations[i] > 0) + cnt++; + + /* Only log msg if a type is attempted reserved and + * not supported. We ignore EM module as its using a + * split configuration array thus it would fail for + * this type of check. + */ + if (module != TF_MODULE_TYPE_EM && + cfg[i].cfg_type == TF_RM_ELEM_CFG_NULL && + reservations[i] > 0) { + netdev_dbg(NULL, + "%s %s %s allocation of %d unsupported\n", + tf_module_2_str(module), tf_dir_2_str(dir), + tf_module_subtype_2_str(module, i), + reservations[i]); + } + } + + *valid_count = cnt; +} + +/* Resource Manager Adjust of base index definitions. */ +enum tf_rm_adjust_type { + TF_RM_ADJUST_ADD_BASE, /* Adds base to the index */ + TF_RM_ADJUST_RM_BASE /* Removes base from the index */ +}; + +/** + * Adjust an index according to the allocation information. + * + * All resources are controlled in a 0 based pool. Some resources, by + * design, are not 0 based, i.e. Full Action Records (SRAM) thus they + * need to be adjusted before they are handed out. + * + * @db: Pointer to the db, used for the lookup + * @action: Adjust action + * @subtype: TF module subtype used as an index into the database. + * An example subtype is TF_TBL_TYPE_FULL_ACT_RECORD which is a + * module subtype of TF_MODULE_TYPE_TABLE. + * @index: Index to convert + * @adj_index: Adjusted index + * + * Returns: + * 0 - Success + * - EOPNOTSUPP - Operation not supported + */ +static int tf_rm_adjust_index(struct tf_rm_element *db, + enum tf_rm_adjust_type action, u32 subtype, + u32 index, u32 *adj_index) +{ + u32 base_index; + int rc = 0; + + base_index = db[subtype].alloc.entry.start; + + switch (action) { + case TF_RM_ADJUST_RM_BASE: + *adj_index = index - base_index; + break; + case TF_RM_ADJUST_ADD_BASE: + *adj_index = index + base_index; + break; + default: + return -EOPNOTSUPP; + } + + return rc; +} + +/** + * Logs an array of found residual entries to the console. + * + * @dir: Receive or transmit direction + * @module: Type of Device Module + * @count: Number of entries in the residual array + * @residuals: Pointer to an array of residual entries. Array is index + * same as the DB in which this function is used. Each entry + * holds residual value for that entry. + */ +static void tf_rm_log_residuals(enum tf_dir dir, enum tf_module_type module, + u16 count, u16 *residuals) +{ + int i; + + /* Walk the residual array and log the types that wasn't + * cleaned up to the console. + */ + for (i = 0; i < count; i++) { + if (residuals[i] == 0) + continue; + netdev_dbg(NULL, + "%s, %s was not cleaned up, %d outstanding\n", + tf_dir_2_str(dir), + tf_module_subtype_2_str(module, i), residuals[i]); + } +} + +/** + * Performs a check of the passed in DB for any lingering elements. If + * a resource type was found to not have been cleaned up by the caller + * then its residual values are recorded, logged and passed back in an + * allocate reservation array that the caller can pass to the FW for + * cleanup. + * + * @db: Pointer to the db, used for the lookup + * @resv_size: Pointer to the reservation size of the generated reservation + * array. + * resv: Pointer to a reservation array. The reservation array is + * allocated after the residual scan and holds any found + * residual entries. Thus it can be smaller than the DB that + * the check was performed on. Array must be freed by the caller. + * @residuals_present: Pointer to a bool flag indicating if residual was + * present in the DB + * + * Returns: + * 0 - Success + * - EOPNOTSUPP - Operation not supported + */ +static int tf_rm_check_residuals(struct tf_rm_new_db *rm_db, u16 *resv_size, + struct tf_rm_resc_entry **resv, + bool *residuals_present) +{ + struct tf_rm_resc_entry *local_resv = NULL; + struct tf_rm_get_inuse_count_parms iparms; + struct tf_rm_get_alloc_info_parms aparms; + struct tf_rm_get_hcapi_parms hparms; + struct tf_rm_alloc_info info; + u16 *residuals = NULL; + u16 hcapi_type; + size_t len; + u16 count; + u16 found; + int rc; + int i; + int f; + + *residuals_present = false; + + /* Create array to hold the entries that have residuals */ + len = rm_db->num_entries * sizeof(u16); + residuals = vzalloc(len); + if (!residuals) + return -ENOMEM; + + /* Traverse the DB and collect any residual elements */ + iparms.rm_db = rm_db; + iparms.count = &count; + for (i = 0, found = 0; i < rm_db->num_entries; i++) { + iparms.subtype = i; + rc = tf_rm_get_inuse_count(&iparms); + /* Not a device supported entry, just skip */ + if (rc == -EOPNOTSUPP) + continue; + if (rc) + goto cleanup_residuals; + + if (count) { + found++; + residuals[i] = count; + *residuals_present = true; + } + } + + if (*residuals_present) { + /* Populate a reduced resv array with only the entries + * that have residuals. + */ + len = found * sizeof(struct tf_rm_resc_entry); + local_resv = vzalloc(len); + if (!local_resv) { + rc = -ENOMEM; + goto cleanup_residuals; + } + + aparms.rm_db = rm_db; + hparms.rm_db = rm_db; + hparms.hcapi_type = &hcapi_type; + for (i = 0, f = 0; i < rm_db->num_entries; i++) { + if (residuals[i] == 0) + continue; + aparms.subtype = i; + aparms.info = &info; + rc = tf_rm_get_info(&aparms); + if (rc) + goto cleanup_all; + + hparms.subtype = i; + rc = tf_rm_get_hcapi_type(&hparms); + if (rc) + goto cleanup_all; + + local_resv[f].type = hcapi_type; + local_resv[f].start = info.entry.start; + local_resv[f].stride = info.entry.stride; + f++; + } + *resv_size = found; + } + + tf_rm_log_residuals(rm_db->dir, + rm_db->module, + rm_db->num_entries, + residuals); + vfree(residuals); + *resv = local_resv; + + return 0; + + cleanup_all: + vfree(local_resv); + *resv = NULL; + cleanup_residuals: + vfree(residuals); + + return rc; +} + +/** + * Some resources do not have a 1:1 mapping between the Truflow type and the + * cfa resource type (HCAPI RM). These resources have multiple Truflow types + * which map to a single HCAPI RM type. In order to support this, one Truflow + * type sharing the HCAPI resources is designated the parent. All other + * Truflow types associated with that HCAPI RM type are designated the + * children. + * + * This function updates the resource counts of any HCAPI_BA_PARENT with the + * counts of the HCAPI_BA_CHILDREN. These are read from the alloc_cnt and + * written back to the req_cnt. + * + * @cfg: Pointer to an array of module specific Truflow type indexed + * RM cfg items + * @alloc_cnt: Pointer to the tf_open_session() configured array of module + * specific Truflow type indexed requested counts. + * @req_cnt: Pointer to the location to put the updated resource counts. + * + * Returns: + * 0 - Success + * - - Failure if negative + */ +static int tf_rm_update_parent_reservations(struct tf *tfp, + struct tf_dev_info *dev, + struct tf_rm_element_cfg *cfg, + u16 *alloc_cnt, u16 num_elements, + u16 *req_cnt, + bool shared_session) +{ + const char *type_str = "Invalid"; + int parent, child; + + /* Search through all the elements */ + for (parent = 0; parent < num_elements; parent++) { + u16 combined_cnt = 0; + + /* If I am a parent */ + if (cfg[parent].cfg_type == TF_RM_ELEM_CFG_HCAPI_BA_PARENT) { + u8 p_slices = 1; + + /* Shared session doesn't support slices */ + if (!shared_session) + p_slices = cfg[parent].slices; + + WARN_ON(!p_slices); + + combined_cnt = alloc_cnt[parent] / p_slices; + + if (alloc_cnt[parent] % p_slices) + combined_cnt++; + + if (alloc_cnt[parent]) { + dev->ops->tf_dev_get_resource_str(tfp, + cfg[parent].hcapi_type, + &type_str); + netdev_dbg(tfp->bp->dev, + "%s:%s cnt(%d) slices(%d)\n", + type_str, tf_tbl_type_2_str(parent), + alloc_cnt[parent], + p_slices); + } + + /* Search again through all the elements */ + for (child = 0; child < num_elements; child++) { + /* If this is one of my children */ + if (cfg[child].cfg_type == + TF_RM_ELEM_CFG_HCAPI_BA_CHILD && + cfg[child].parent_subtype == parent && + alloc_cnt[child]) { + u8 c_slices = 1; + u16 cnt = 0; + + if (!shared_session) + c_slices = cfg[child].slices; + + WARN_ON(!c_slices); + + dev->ops->tf_dev_get_resource_str(tfp, + cfg[child].hcapi_type, + &type_str); + netdev_dbg(tfp->bp->dev, + "%s:%s cnt:%d slices:%d\n", + type_str, + tf_tbl_type_2_str(child), + alloc_cnt[child], + c_slices); + + /* Increment the parents combined + * count with each child's count + * adjusted for number of slices per + * RM alloc item. + */ + cnt = alloc_cnt[child] / c_slices; + + if (alloc_cnt[child] % c_slices) + cnt++; + + combined_cnt += cnt; + /* Clear the requested child count */ + req_cnt[child] = 0; + } + } + /* Save the parent count to be requested */ + req_cnt[parent] = combined_cnt; + netdev_dbg(tfp->bp->dev, "%s calculated total:%d\n\n", + type_str, req_cnt[parent]); + } + } + return 0; +} + +static void tf_rm_dbg_print_resc_qcaps(struct tf *tfp, + struct tf_dev_info *dev, + u16 size, + struct tf_rm_resc_req_entry *query) +{ + u16 i; + + for (i = 0; i < size; i++) { + const char *type_str; + + dev->ops->tf_dev_get_resource_str(tfp, query[i].type, + &type_str); + netdev_dbg(tfp->bp->dev, "type: %2d-%s\tmin:%d max:%d\n", + query[i].type, type_str, query[i].min, + query[i].max); + } +} + +static void tf_rm_dbg_print_resc(struct tf *tfp, struct tf_dev_info *dev, + u16 size, struct tf_rm_resc_entry *resv) +{ + u16 i; + + for (i = 0; i < size; i++) { + const char *type_str; + + dev->ops->tf_dev_get_resource_str(tfp, resv[i].type, &type_str); + netdev_dbg(tfp->bp->dev, + "%2d type: %d-%s\tstart:%d stride:%d\n", + i, resv[i].type, type_str, resv[i].start, + resv[i].stride); + } +} + +int tf_rm_create_db(struct tf *tfp, struct tf_rm_create_db_parms *parms) +{ + enum tf_rm_resc_resv_strategy resv_strategy; + u16 max_types, hcapi_items, *req_cnt = NULL; + struct tf_rm_resc_req_entry *query = NULL; + struct tf_rm_resc_req_entry *req = NULL; + struct tf_rm_resc_entry *resv = NULL; + struct tf_rm_new_db *rm_db = NULL; + struct tf_rm_element *db = NULL; + struct tf_dev_info *dev; + bool shared_session = 0; + struct tf_session *tfs; + u8 fw_session_id; + size_t len; + int i, j; + int rc; + + if (!tfp || !parms) + return -EINVAL; + + /* Retrieve the session information */ + rc = tf_session_get_session_internal(tfp, &tfs); + if (rc) + return rc; + + /* Retrieve device information */ + rc = tf_session_get_device(tfs, &dev); + if (rc) + return rc; + + rc = tf_session_get_fw_session_id(tfp, &fw_session_id); + if (rc) + return rc; + + /* Need device max number of elements for the RM QCAPS */ + rc = dev->ops->tf_dev_get_max_types(tfp, &max_types); + + /* Allocate memory for RM QCAPS request */ + len = max_types * sizeof(struct tf_rm_resc_req_entry); + query = vzalloc(len); + if (!query) { + rc = -ENOMEM; + return rc; + } + + /* Get Firmware Capabilities */ + rc = tf_msg_session_resc_qcaps(tfp, parms->dir, max_types, query, + &resv_strategy, NULL); + if (rc) { + vfree(query); + return rc; + } + + tf_rm_dbg_print_resc_qcaps(tfp, dev, max_types, query); + + /* Copy requested counts (alloc_cnt) from tf_open_session() to local + * copy (req_cnt) so that it can be updated if required. + */ + len = parms->num_elements * sizeof(u16); + req_cnt = vzalloc(len); + if (!req_cnt) { + rc = -ENOMEM; + goto fail; + } + + memcpy(req_cnt, parms->alloc_cnt, parms->num_elements * sizeof(u16)); + + shared_session = tf_session_is_shared_session(tfs); + + /* Update the req_cnt based upon the element configuration */ + tf_rm_update_parent_reservations(tfp, dev, parms->cfg, + parms->alloc_cnt, + parms->num_elements, + req_cnt, + shared_session); + + /* Process capabilities against DB requirements. However, as a + * DB can hold elements that are not HCAPI we can reduce the + * req msg content by removing those out of the request yet + * the DB holds them all as to give a fast lookup. We can also + * remove entries where there are no request for elements. + */ + tf_rm_count_hcapi_reservations(parms->dir, + parms->module, + parms->cfg, + req_cnt, + parms->num_elements, + &hcapi_items); + + if (hcapi_items == 0) { + netdev_dbg(tfp->bp->dev, + "%s: module: %s Empty RM DB create request\n", + tf_dir_2_str(parms->dir), + tf_module_2_str(parms->module)); + parms->rm_db = NULL; + rc = -ENOMEM; + goto fail; + } + + /* Alloc request */ + req = vzalloc(hcapi_items * sizeof(struct tf_rm_resc_req_entry)); + if (!req) { + rc = -ENOMEM; + goto fail; + } + + /* Alloc reservation */ + resv = vzalloc(hcapi_items * sizeof(struct tf_rm_resc_entry)); + if (!resv) { + rc = -ENOMEM; + goto fail; + } + + /* Build the request */ + for (i = 0, j = 0; i < parms->num_elements; i++) { + struct tf_rm_element_cfg *cfg = &parms->cfg[i]; + u16 hcapi_type = cfg->hcapi_type; + + /* Only perform reservation for requested entries + */ + if (req_cnt[i] == 0) + continue; + + + /* Skip any children or invalid entries in the request */ + if (cfg->cfg_type == TF_RM_ELEM_CFG_HCAPI_BA_CHILD || + cfg->cfg_type == TF_RM_ELEM_CFG_NULL) + continue; + /* Error if we cannot get the full count based on qcaps */ + if (req_cnt[i] > query[hcapi_type].max) { + const char *type_str; + + dev->ops->tf_dev_get_resource_str(tfp, + hcapi_type, + &type_str); + netdev_dbg(tfp->bp->dev, + "Failure, %s:%d:%s req:%d avail:%d\n", + tf_dir_2_str(parms->dir), + hcapi_type, type_str, req_cnt[i], + query[hcapi_type].max); + rc = -EINVAL; + goto fail; + } + /* Full amount available, fill element request */ + req[j].type = hcapi_type; + req[j].min = req_cnt[i]; + req[j].max = req_cnt[i]; + j++; + } + + /* Allocate all resources for the module type */ + rc = tf_msg_session_resc_alloc(tfp, parms->dir, hcapi_items, req, + fw_session_id, resv); + if (rc) + goto fail; + + tf_rm_dbg_print_resc(tfp, dev, hcapi_items, resv); + + /* Build the RM DB per the request */ + rm_db = vzalloc(sizeof(*rm_db)); + if (!rm_db) { + rc = -ENOMEM; + goto fail; + } + + /* Build the DB within RM DB */ + len = parms->num_elements * sizeof(struct tf_rm_element); + rm_db->db = vzalloc(len); + if (!rm_db->db) { + rc = -ENOMEM; + goto fail; + } + + db = rm_db->db; + memset(db, 0, len); + + for (i = 0, j = 0; i < parms->num_elements; i++) { + struct tf_rm_element_cfg *cfg = &parms->cfg[i]; + const char *type_str; + + dev->ops->tf_dev_get_resource_str(tfp, + cfg->hcapi_type, + &type_str); + + db[i].cfg_type = cfg->cfg_type; + db[i].hcapi_type = cfg->hcapi_type; + db[i].slices = cfg->slices; + + /* Save the parent subtype for later use to find the pool */ + if (cfg->cfg_type == TF_RM_ELEM_CFG_HCAPI_BA_CHILD) + db[i].parent_subtype = cfg->parent_subtype; + + /* If the element didn't request an allocation no need + * to create a pool nor verify if we got a reservation. + */ + if (req_cnt[i] == 0) + continue; + + /* Skip any children or invalid */ + if (cfg->cfg_type == TF_RM_ELEM_CFG_HCAPI_BA_CHILD || + cfg->cfg_type == TF_RM_ELEM_CFG_NULL) + continue; + + /* If the element had requested an allocation and that + * allocation was a success (full amount) then + * allocate the pool. + */ + if (req_cnt[i] == resv[j].stride) { + db[i].alloc.entry.start = resv[j].start; + db[i].alloc.entry.stride = resv[j].stride; + + /* Only alloc BA pool if a BA type but not BA_CHILD */ + if (cfg->cfg_type == TF_RM_ELEM_CFG_HCAPI_BA || + cfg->cfg_type == TF_RM_ELEM_CFG_HCAPI_BA_PARENT) { + /* Create pool */ + db[i].pool = vzalloc(sizeof(*db[i].pool)); + if (!db[i].pool) { + rc = -ENOMEM; + goto fail_db; + } + + rc = bnxt_ba_init(db[i].pool, resv[j].stride, + true); + if (rc) { + netdev_dbg(tfp->bp->dev, + "%s: Pool init failed rc:%d, type:%d:%s\n", + tf_dir_2_str(parms->dir), + rc, + cfg->hcapi_type, type_str); + goto fail_db; + } + } + j++; + } else { + /* Bail out as we want what we requested for + * all elements, not any less. + */ + netdev_dbg(tfp->bp->dev, + "%s: Alloc failed %d:%s req:%d, alloc:%d\n", + tf_dir_2_str(parms->dir), cfg->hcapi_type, + type_str, req_cnt[i], resv[j].stride); + rc = -EINVAL; + goto fail_db; + } + } + + rm_db->num_entries = parms->num_elements; + rm_db->dir = parms->dir; + rm_db->module = parms->module; + *parms->rm_db = (void *)rm_db; + + netdev_dbg(tfp->bp->dev, "%s: module:%s\n", tf_dir_2_str(parms->dir), + tf_module_2_str(parms->module)); + + vfree(req); + vfree(resv); + vfree(req_cnt); + return 0; + +fail_db: + for (i = 0; i < parms->num_elements; i++) { + if (!rm_db->db[i].pool) + continue; + bnxt_ba_deinit(rm_db->db[i].pool); + vfree(rm_db->db[i].pool); + } +fail: + vfree(req); + vfree(resv); + vfree(db); + vfree(rm_db); + vfree(req_cnt); + vfree(query); + parms->rm_db = NULL; + + return rc; +} + +int tf_rm_create_db_no_reservation(struct tf *tfp, + struct tf_rm_create_db_parms *parms) +{ + struct tf_rm_resc_req_entry *req = NULL; + struct tf_rm_resc_entry *resv = NULL; + struct tf_rm_new_db *rm_db = NULL; + u16 hcapi_items, *req_cnt = NULL; + struct tf_rm_element *db = NULL; + bool shared_session = 0; + struct tf_dev_info *dev; + struct tf_session *tfs; + u8 fw_session_id; + size_t len; + int i, j; + int rc; + + if (!tfp || !parms) + return -EINVAL; + + /* Retrieve the session information */ + rc = tf_session_get_session_internal(tfp, &tfs); + if (rc) + return rc; + + /* Retrieve device information */ + rc = tf_session_get_device(tfs, &dev); + if (rc) + return rc; + + rc = tf_session_get_fw_session_id(tfp, &fw_session_id); + if (rc) + return rc; + + /* Copy requested counts (alloc_cnt) from tf_open_session() to local + * copy (req_cnt) so that it can be updated if required. + */ + len = parms->num_elements * sizeof(u16); + req_cnt = vzalloc(len); + if (!req_cnt) { + rc = -ENOMEM; + return rc; + } + + memcpy(req_cnt, parms->alloc_cnt, len); + + shared_session = tf_session_is_shared_session(tfs); + + /* Update the req_cnt based upon the element configuration */ + tf_rm_update_parent_reservations(tfp, dev, parms->cfg, + parms->alloc_cnt, parms->num_elements, + req_cnt, shared_session); + /* Process capabilities against DB requirements. However, as a + * DB can hold elements that are not HCAPI we can reduce the + * req msg content by removing those out of the request yet + * the DB holds them all as to give a fast lookup. We can also + * remove entries where there are no request for elements. + */ + tf_rm_count_hcapi_reservations(parms->dir, + parms->module, + parms->cfg, + req_cnt, + parms->num_elements, + &hcapi_items); + + if (hcapi_items == 0) { + netdev_dbg(tfp->bp->dev, + "%s: module:%s Empty RM DB create request\n", + tf_dir_2_str(parms->dir), + tf_module_2_str(parms->module)); + + parms->rm_db = NULL; + rc = -ENOMEM; + goto fail; + } + + /* Alloc request */ + req = vzalloc(hcapi_items * sizeof(struct tf_rm_resc_req_entry)); + if (!req) { + rc = -ENOMEM; + goto fail; + } + + /* Alloc reservation */ + resv = vzalloc(hcapi_items * sizeof(struct tf_rm_resc_entry)); + if (!resv) { + rc = -ENOMEM; + goto fail; + } + + /* Build the request */ + for (i = 0, j = 0; i < parms->num_elements; i++) { + struct tf_rm_element_cfg *cfg = &parms->cfg[i]; + u16 hcapi_type = cfg->hcapi_type; + + /* Only perform reservation for requested entries */ + if (req_cnt[i] == 0) + continue; + + /* Skip any children in the request */ + if (cfg->cfg_type == TF_RM_ELEM_CFG_HCAPI || + cfg->cfg_type == TF_RM_ELEM_CFG_HCAPI_BA || + cfg->cfg_type == TF_RM_ELEM_CFG_HCAPI_BA_PARENT) { + req[j].type = hcapi_type; + req[j].min = req_cnt[i]; + req[j].max = req_cnt[i]; + j++; + } + } + + /* Get all resources info for the module type */ + rc = tf_msg_session_resc_info(tfp, parms->dir, hcapi_items, req, + fw_session_id, resv); + if (rc) + goto fail; + + tf_rm_dbg_print_resc(tfp, dev, hcapi_items, resv); + + /* Build the RM DB per the request */ + rm_db = vzalloc(sizeof(*rm_db)); + if (!rm_db) { + rc = -ENOMEM; + goto fail; + } + + /* Build the DB within RM DB */ + len = parms->num_elements * sizeof(struct tf_rm_element); + rm_db->db = vzalloc(len); + if (!rm_db->db) { + rc = -ENOMEM; + goto fail; + } + + db = rm_db->db; + memset(db, 0, len); + + for (i = 0, j = 0; i < parms->num_elements; i++) { + struct tf_rm_element_cfg *cfg = &parms->cfg[i]; + const char *type_str; + + dev->ops->tf_dev_get_resource_str(tfp, + cfg->hcapi_type, + &type_str); + + db[i].cfg_type = cfg->cfg_type; + db[i].hcapi_type = cfg->hcapi_type; + db[i].slices = cfg->slices; + + /* Save the parent subtype for later use to find the pool */ + if (cfg->cfg_type == TF_RM_ELEM_CFG_HCAPI_BA_CHILD) + db[i].parent_subtype = cfg->parent_subtype; + + /* If the element didn't request an allocation no need + * to create a pool nor verify if we got a reservation. + */ + if (req_cnt[i] == 0) + continue; + + /* Skip any children or invalid */ + if (cfg->cfg_type != TF_RM_ELEM_CFG_HCAPI && + cfg->cfg_type != TF_RM_ELEM_CFG_HCAPI_BA && + cfg->cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_PARENT) + continue; + + /* If the element had requested an allocation and that + * allocation was a success (full amount) then + * allocate the pool. + */ + if (req_cnt[i] == resv[j].stride) { + db[i].alloc.entry.start = resv[j].start; + db[i].alloc.entry.stride = resv[j].stride; + + /* Only allocate BA pool if a BA type not a child */ + if (cfg->cfg_type == TF_RM_ELEM_CFG_HCAPI_BA || + cfg->cfg_type == TF_RM_ELEM_CFG_HCAPI_BA_PARENT) { + db[i].pool = vzalloc(sizeof(*db[i].pool)); + if (!db[i].pool) { + rc = -ENOMEM; + goto fail; + } + + rc = bnxt_ba_init(db[i].pool, + resv[j].stride, + true); + if (rc) { + netdev_dbg(tfp->bp->dev, + "%s: Pool init failed rc:%d, type:%d:%s\n", + tf_dir_2_str(parms->dir), + rc, + cfg->hcapi_type, type_str); + goto fail; + } + } + j++; + } else { + /* Bail out as we want what we requested for + * all elements, not any less. + */ + netdev_dbg(tfp->bp->dev, + "%s: Alloc failed %d:%s req:%d alloc:%d\n", + tf_dir_2_str(parms->dir), cfg->hcapi_type, + type_str, req_cnt[i], resv[j].stride); + rc = -EINVAL; + goto fail; + } + } + + rm_db->num_entries = parms->num_elements; + rm_db->dir = parms->dir; + rm_db->module = parms->module; + *parms->rm_db = (void *)rm_db; + + netdev_dbg(tfp->bp->dev, "%s: module:%s\n", tf_dir_2_str(parms->dir), + tf_module_2_str(parms->module)); + + vfree(req); + vfree(resv); + vfree(req_cnt); + return 0; + + fail: + vfree(req); + vfree(resv); + if (rm_db && rm_db->db) { + for (i = 0; i < parms->num_elements; i++) { + if (!rm_db->db[i].pool) + continue; + bnxt_ba_deinit(rm_db->db[i].pool); + vfree(rm_db->db[i].pool); + } + } + vfree(db); + vfree(rm_db); + vfree(req_cnt); + parms->rm_db = NULL; + + return rc; +} + +/* Device unbind happens when the TF Session is closed and the + * session ref count is 0. Device unbind will cleanup each of + * its support modules, i.e. Identifier, thus we're ending up + * here to close the DB. + * + * On TF Session close it is assumed that the session has already + * cleaned up all its resources, individually, while + * destroying its flows. + * + * To assist in the 'cleanup checking' the DB is checked for any + * remaining elements and logged if found to be the case. + * + * Any such elements will need to be 'cleared' ahead of + * returning the resources to the HCAPI RM. + * + * RM will signal FW to flush the DB resources. FW will + * perform the invalidation. TF Session close will return the + * previous allocated elements to the RM and then close the + * HCAPI RM registration. That then saves several 'free' msgs + * from being required. + */ +int tf_rm_free_db(struct tf *tfp, struct tf_rm_free_db_parms *parms) +{ + struct tf_rm_resc_entry *resv; + bool residuals_found = false; + struct tf_rm_new_db *rm_db; + u16 resv_size = 0; + u8 fw_session_id; + int rc; + int i; + + if (!parms || !parms->rm_db) + return -EINVAL; + + rc = tf_session_get_fw_session_id(tfp, &fw_session_id); + if (rc) + return rc; + + rm_db = (struct tf_rm_new_db *)parms->rm_db; + + /* Check for residuals that the client didn't clean up */ + rc = tf_rm_check_residuals(rm_db, &resv_size, &resv, &residuals_found); + if (rc) + goto cleanup; + + /* Invalidate any residuals followed by a DB traversal for + * pool cleanup. + */ + if (residuals_found) { + rc = tf_msg_session_resc_flush(tfp, parms->dir, resv_size, + fw_session_id, resv); + vfree(resv); + /* On failure we still have to cleanup so we can only + * log that FW failed. + */ + if (rc) + netdev_dbg(tfp->bp->dev, + "%s: Internal Flush error, module:%s\n", + tf_dir_2_str(parms->dir), + tf_module_2_str(rm_db->module)); + } + + cleanup: + /* No need to check for configuration type, even if we do not + * have a BA pool we just delete on a null ptr, no harm + */ + for (i = 0; i < rm_db->num_entries; i++) { + bnxt_ba_deinit(rm_db->db[i].pool); + vfree(rm_db->db[i].pool); + } + + vfree(rm_db->db); + vfree(parms->rm_db); + + return rc; +} + +/** + * Get the bit allocator pool associated with the subtype and the db + * + * @rm_db: Pointer to the DB + * @subtype: Module subtype used to index into the module specific + * database. An example subtype is TF_TBL_TYPE_FULL_ACT_RECORD + * which is a module subtype of TF_MODULE_TYPE_TABLE. + * @pool: Pointer to the bit allocator pool used + * @new_subtype: Pointer to the subtype of the actual pool used + * + * Returns: + * 0 - Success + * - EOPNOTSUPP - Operation not supported + */ +static int tf_rm_get_pool(struct tf_rm_new_db *rm_db, u16 subtype, + struct bitalloc **pool, u16 *new_subtype) +{ + u16 tmp_subtype = subtype; + int rc = 0; + + /* If we are a child, get the parent table index */ + if (rm_db->db[subtype].cfg_type == TF_RM_ELEM_CFG_HCAPI_BA_CHILD) + tmp_subtype = rm_db->db[subtype].parent_subtype; + + *pool = rm_db->db[tmp_subtype].pool; + + /* Bail out if the pool is not valid, should never happen */ + if (!rm_db->db[tmp_subtype].pool) { + rc = -EOPNOTSUPP; + netdev_dbg(NULL, "%s: Invalid pool for this type:%d, rc:%d\n", + tf_dir_2_str(rm_db->dir), tmp_subtype, rc); + return rc; + } + *new_subtype = tmp_subtype; + return rc; +} + +int tf_rm_allocate(struct tf_rm_allocate_parms *parms) +{ + enum tf_rm_elem_cfg_type cfg_type; + struct tf_rm_new_db *rm_db; + struct bitalloc *pool; + u16 subtype; + u32 index; + int rc; + int id; + + if (!parms || !parms->rm_db) + return -EINVAL; + + rm_db = (struct tf_rm_new_db *)parms->rm_db; + if (!rm_db->db) + return -EINVAL; + + cfg_type = rm_db->db[parms->subtype].cfg_type; + + /* Bail out if not controlled by RM */ + if (cfg_type != TF_RM_ELEM_CFG_HCAPI_BA && + cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_PARENT && + cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_CHILD) + return -EOPNOTSUPP; + + rc = tf_rm_get_pool(rm_db, parms->subtype, &pool, &subtype); + if (rc) + return rc; + /* priority 0: allocate from top of the tcam i.e. high + * priority !0: allocate index from bottom i.e lowest + */ + if (parms->priority) + id = bnxt_ba_alloc_reverse(pool); + else + id = bnxt_ba_alloc(pool); + if (id < 0) { + rc = -ENOSPC; + netdev_dbg(NULL, "%s: Allocation failed, rc:%d\n", + tf_dir_2_str(rm_db->dir), rc); + return rc; + } + + /* Adjust for any non zero start value */ + rc = tf_rm_adjust_index(rm_db->db, + TF_RM_ADJUST_ADD_BASE, + subtype, + id, + &index); + if (rc) { + netdev_dbg(NULL, + "%s: Alloc adjust of base index failed, rc:%d\n", + tf_dir_2_str(rm_db->dir), rc); + return -EINVAL; + } + + *parms->index = index; + if (parms->base_index) + *parms->base_index = id; + + return rc; +} + +int tf_rm_free(struct tf_rm_free_parms *parms) +{ + enum tf_rm_elem_cfg_type cfg_type; + struct tf_rm_new_db *rm_db; + struct bitalloc *pool; + u32 adj_index; + u16 subtype; + int rc; + + if (!parms || !parms->rm_db) + return -EINVAL; + rm_db = (struct tf_rm_new_db *)parms->rm_db; + if (!rm_db->db) + return -EINVAL; + + cfg_type = rm_db->db[parms->subtype].cfg_type; + + /* Bail out if not controlled by RM */ + if (cfg_type != TF_RM_ELEM_CFG_HCAPI_BA && + cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_PARENT && + cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_CHILD) + return -EOPNOTSUPP; + + rc = tf_rm_get_pool(rm_db, parms->subtype, &pool, &subtype); + if (rc) + return rc; + + /* Adjust for any non zero start value */ + rc = tf_rm_adjust_index(rm_db->db, + TF_RM_ADJUST_RM_BASE, + subtype, + parms->index, + &adj_index); + if (rc) + return rc; + + rc = bnxt_ba_free(pool, adj_index); + /* No logging direction matters and that is not available here */ + if (rc) + return rc; + + return rc; +} + +int +tf_rm_is_allocated(struct tf_rm_is_allocated_parms *parms) +{ + enum tf_rm_elem_cfg_type cfg_type; + struct tf_rm_new_db *rm_db; + struct bitalloc *pool; + u32 adj_index; + u16 subtype; + int rc; + + if (!parms || !parms->rm_db) + return -EINVAL; + rm_db = (struct tf_rm_new_db *)parms->rm_db; + if (!rm_db->db) + return -EINVAL; + + cfg_type = rm_db->db[parms->subtype].cfg_type; + + /* Bail out if not controlled by RM */ + if (cfg_type != TF_RM_ELEM_CFG_HCAPI_BA && + cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_PARENT && + cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_CHILD) + return -EOPNOTSUPP; + + rc = tf_rm_get_pool(rm_db, parms->subtype, &pool, &subtype); + if (rc) + return rc; + + /* Adjust for any non zero start value */ + rc = tf_rm_adjust_index(rm_db->db, + TF_RM_ADJUST_RM_BASE, + subtype, + parms->index, + &adj_index); + if (rc) + return rc; + + if (parms->base_index) + *parms->base_index = adj_index; + + *parms->allocated = bnxt_ba_inuse(pool, adj_index); + + return rc; +} + +int +tf_rm_get_info(struct tf_rm_get_alloc_info_parms *parms) +{ + enum tf_rm_elem_cfg_type cfg_type; + struct tf_rm_new_db *rm_db; + + if (!parms || !parms->rm_db) + return -EINVAL; + rm_db = (struct tf_rm_new_db *)parms->rm_db; + if (!rm_db->db) + return -EINVAL; + + cfg_type = rm_db->db[parms->subtype].cfg_type; + + /* Bail out if not controlled by HCAPI */ + if (cfg_type == TF_RM_ELEM_CFG_NULL) + return -EOPNOTSUPP; + + memcpy(parms->info, &rm_db->db[parms->subtype].alloc, + sizeof(struct tf_rm_alloc_info)); + + return 0; +} + +int tf_rm_get_all_info(struct tf_rm_get_alloc_info_parms *parms, int size) +{ + struct tf_rm_get_alloc_info_parms gparms; + int i; + int rc; + + if (!parms) + return -EINVAL; + + gparms = *parms; + + /* Walk through all items */ + for (i = 0; i < size; i++) { + + gparms.subtype = i; + + /* Get subtype info */ + rc = tf_rm_get_info(&gparms); + + if (rc && (rc != -EOPNOTSUPP)) + return rc; + + /* Next subtype memory location */ + gparms.info++; + } + return 0; +} + +int tf_rm_get_hcapi_type(struct tf_rm_get_hcapi_parms *parms) +{ + enum tf_rm_elem_cfg_type cfg_type; + struct tf_rm_new_db *rm_db; + + if (!parms || !parms->rm_db) + return -EINVAL; + rm_db = (struct tf_rm_new_db *)parms->rm_db; + if (!rm_db->db) + return -EINVAL; + + cfg_type = rm_db->db[parms->subtype].cfg_type; + + /* Bail out if not controlled by HCAPI */ + if (cfg_type == TF_RM_ELEM_CFG_NULL) + return -EOPNOTSUPP; + + *parms->hcapi_type = rm_db->db[parms->subtype].hcapi_type; + + return 0; +} + +int tf_rm_get_slices(struct tf_rm_get_slices_parms *parms) +{ + enum tf_rm_elem_cfg_type cfg_type; + struct tf_rm_new_db *rm_db; + + if (!parms || !parms->rm_db) + return -EINVAL; + rm_db = (struct tf_rm_new_db *)parms->rm_db; + if (!rm_db->db) + return -EINVAL; + + cfg_type = rm_db->db[parms->subtype].cfg_type; + + /* Bail out if not controlled by HCAPI */ + if (cfg_type == TF_RM_ELEM_CFG_NULL) + return -EOPNOTSUPP; + + *parms->slices = rm_db->db[parms->subtype].slices; + + return 0; +} + +int tf_rm_get_inuse_count(struct tf_rm_get_inuse_count_parms *parms) +{ + enum tf_rm_elem_cfg_type cfg_type; + struct tf_rm_new_db *rm_db; + int rc = 0; + + if (!parms || !parms->rm_db) + return -EINVAL; + rm_db = (struct tf_rm_new_db *)parms->rm_db; + if (!rm_db->db) + return -EINVAL; + + cfg_type = rm_db->db[parms->subtype].cfg_type; + + /* Bail out if not a BA pool */ + if (cfg_type != TF_RM_ELEM_CFG_HCAPI_BA && + cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_PARENT && + cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_CHILD) + return -EOPNOTSUPP; + + /* Bail silently (no logging), if the pool is not valid there + * was no elements allocated for it. + */ + if (!rm_db->db[parms->subtype].pool) { + *parms->count = 0; + return 0; + } + + *parms->count = bnxt_ba_inuse_count(rm_db->db[parms->subtype].pool); + + return rc; +} + +/* Only used for table bulk get at this time + */ +int tf_rm_check_indexes_in_range(struct tf_rm_check_indexes_in_range_parms + *parms) +{ + enum tf_rm_elem_cfg_type cfg_type; + struct tf_rm_new_db *rm_db; + struct bitalloc *pool; + u32 base_index; + u16 subtype; + u32 stride; + int rc = 0; + + if (!parms || !parms->rm_db) + return -EINVAL; + rm_db = (struct tf_rm_new_db *)parms->rm_db; + if (!rm_db->db) + return -EINVAL; + + cfg_type = rm_db->db[parms->subtype].cfg_type; + + /* Bail out if not a BA pool */ + if (cfg_type != TF_RM_ELEM_CFG_HCAPI_BA && + cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_PARENT && + cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_CHILD) + return -EOPNOTSUPP; + + rc = tf_rm_get_pool(rm_db, parms->subtype, &pool, &subtype); + if (rc) + return rc; + + base_index = rm_db->db[subtype].alloc.entry.start; + stride = rm_db->db[subtype].alloc.entry.stride; + + if (parms->starting_index < base_index || + parms->starting_index + parms->num_entries > base_index + stride) + return -EINVAL; + + return rc; +} diff --git a/drivers/thirdparty/release-drivers/bnxt/tf_core/tf_rm.h b/drivers/thirdparty/release-drivers/bnxt/tf_core/tf_rm.h new file mode 100644 index 000000000000..d0ffeb4600c2 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/tf_core/tf_rm.h @@ -0,0 +1,453 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019-2021 Broadcom + * All rights reserved. + */ + +#ifndef TF_RM_NEW_H_ +#define TF_RM_NEW_H_ + +#include "tf_core.h" +#include "bitalloc.h" +#include "tf_device.h" + +struct tf; + +/* RM return codes */ +#define TF_RM_ALLOCATED_ENTRY_FREE 0 +#define TF_RM_ALLOCATED_ENTRY_IN_USE 1 +#define TF_RM_ALLOCATED_NO_ENTRY_FOUND -1 + +/** + * The Resource Manager (RM) module provides basic DB handling for + * internal resources. These resources exists within the actual device + * and are controlled by the HCAPI Resource Manager running on the + * firmware. + * + * The RM DBs are all intended to be indexed using TF types there for + * a lookup requires no additional conversion. The DB configuration + * specifies the TF Type to HCAPI Type mapping and it becomes the + * responsibility of the DB initialization to handle this static + * mapping. + * + * Accessor functions are providing access to the DB, thus hiding the + * implementation. + * + * The RM DB will work on its initial allocated sizes so the + * capability of dynamically growing a particular resource is not + * possible. If this capability later becomes a requirement then the + * MAX pool size of the chip needs to be added to the tf_rm_elem_info + * structure and several new APIs would need to be added to allow for + * growth of a single TF resource type. + * + * The access functions do not check for NULL pointers as they are a + * support module, not called directly. + */ + +/** + * RM Element configuration enumeration. Used by the Device to + * indicate how the RM elements the DB consists off, are to be + * configured at time of DB creation. The TF may present types to the + * ULP layer that is not controlled by HCAPI within the Firmware. + */ +enum tf_rm_elem_cfg_type { + TF_RM_ELEM_CFG_NULL, /* No configuration */ + TF_RM_ELEM_CFG_HCAPI, /* HCAPI 'controlled', no RM storage so + * the module using the RM can chose to + * handle storage locally. + */ + TF_RM_ELEM_CFG_HCAPI_BA, /* HCAPI 'controlled', uses a bit + * allocator pool for internal + * storage in the RM. + */ + TF_RM_ELEM_CFG_HCAPI_BA_PARENT, /* HCAPI 'controlled', uses a bit + * allocator pool for internal storage + * in the RM but multiple TF types map + * to a single HCAPI type. Parent + * manages the table. + */ + TF_RM_ELEM_CFG_HCAPI_BA_CHILD, /* HCAPI 'controlled', uses a bit + * allocator pool for internal storage + * in the RM but multiple TF types map + * to a single HCAPI type. Child + * accesses the parent db. + */ + TF_RM_TYPE_MAX +}; + +/* RM Reservation strategy enumeration. Type of strategy comes from + * the HCAPI RM QCAPS handshake. + */ +enum tf_rm_resc_resv_strategy { + TF_RM_RESC_RESV_STATIC_PARTITION, + TF_RM_RESC_RESV_STRATEGY_1, + TF_RM_RESC_RESV_STRATEGY_2, + TF_RM_RESC_RESV_STRATEGY_3, + TF_RM_RESC_RESV_MAX +}; + +/** + * RM Element configuration structure, used by the Device to configure + * how an individual TF type is configured in regard to the HCAPI RM + * of same type. + * + * @cfg_type: RM Element config controls how the DB for that element is + * processed. + * @hcapi_type: HCAPI RM Type for the element. Used for + * TF to HCAPI type conversion. + * @parent_subtype: if cfg_type == TF_RM_ELEM_CFG_HCAPI_BA_CHILD/PARENT + * Parent Truflow module subtype associated with this + * resource type. + * @slices: if cfg_type == TF_RM_ELEM_CFG_HCAPI_BA_CHILD/PARENT + * Resource slices. How many slices will fit in the + * resource pool chunk size. + */ +struct tf_rm_element_cfg { + enum tf_rm_elem_cfg_type cfg_type; + u16 hcapi_type; + u16 parent_subtype; + u8 slices; +}; + +/** + * Allocation information for a single element. + * @entry: HCAPI RM allocated range information. + * NOTE: In case of dynamic allocation support this would have + * to be changed to linked list of tf_rm_entry instead. + */ +struct tf_rm_alloc_info { + struct tf_resource_info entry; +}; + +/** + * Create RM DB parameters + * + * @module: Module type. Used for logging purposes. + * @dir: Receive or transmit direction. + * @num_elements: Number of elements. + * @cfg: Parameter structure array. Array size is num_elements. + * @alloc_cnt: Resource allocation count array. This array content + * originates from the tf_session_resources that is passed in + * on session open. Array size is num_elements. + * @rm_db: RM DB Handle[out] + */ +struct tf_rm_create_db_parms { + enum tf_module_type module; + enum tf_dir dir; + u16 num_elements; + struct tf_rm_element_cfg *cfg; + u16 *alloc_cnt; + void **rm_db; +}; + +/** + * Free RM DB parameters + * + * @dir: Receive or transmit direction + * @rm_db: RM DB Handle + */ +struct tf_rm_free_db_parms { + enum tf_dir dir; + void *rm_db; +}; + +/** + * Allocate RM parameters for a single element + * + * @rm_db: RM DB Handle + * @subtype: Module subtype indicates which DB entry to perform the + * action on. (e.g. TF_TCAM_TBL_TYPE_L2_CTXT subtype of module + * TF_MODULE_TYPE_TCAM) + * @index: Pointer to the allocated index in normalized form. Normalized + * means the index has been adjusted, i.e. Full Action Record + * offsets. + * @priority: Priority, indicates the priority of the entry + * priority 0: allocate from top of the tcam (from index 0 + * or lowest available index) priority !0: allocate from bottom + * of the tcam (from highest available index) + * @base_index: Pointer to the allocated index before adjusted. + */ +struct tf_rm_allocate_parms { + void *rm_db; + u16 subtype; + u32 *index; + u32 priority; + u32 *base_index; +}; + +/** + * Free RM parameters for a single element + * + * @rm_db: RM DB Handle + * @subtype: TF subtype indicates which DB entry to perform the action on. + * (e.g. TF_TCAM_TBL_TYPE_L2_CTXT subtype of module + * TF_MODULE_TYPE_TCAM) + * @index: Index to free + */ +struct tf_rm_free_parms { + void *rm_db; + u16 subtype; + u16 index; +}; + +/** + * Is Allocated parameters for a single element + * + * @rm_db: RM DB Handle + * @subtype: TF subtype indicates which DB entry to perform the + * action on. (e.g. TF_TCAM_TBL_TYPE_L2_CTXT subtype of module + * TF_MODULE_TYPE_TCAM) + * @index: Index to free + * @allocated: Pointer to flag that indicates the state of the query + * @base_index: Pointer to the allocated index before adjusted. + */ +struct tf_rm_is_allocated_parms { + void *rm_db; + u16 subtype; + u32 index; + int *allocated; + u32 *base_index; +}; + +/** + * Get Allocation information for a single element + * + * @rm_db: RM DB Handle + * @subtype: TF subtype indicates which DB entry to perform the + * action on. (e.g. TF_TCAM_TBL_TYPE_L2_CTXT subtype of module + * TF_MODULE_TYPE_TCAM) + * @info: Pointer to the requested allocation information for + * the specified subtype + */ +struct tf_rm_get_alloc_info_parms { + void *rm_db; + u16 subtype; + struct tf_rm_alloc_info *info; +}; + +/** + * Get HCAPI type parameters for a single element + * + * @rm_db: RM DB Handle + * @subtype: TF subtype indicates which DB entry to perform the + * action on. (e.g. TF_TCAM_TBL_TYPE_L2_CTXT subtype of module + * TF_MODULE_TYPE_TCAM) + * @hcapi_type: Pointer to the hcapi type for the specified subtype[out] + */ +struct tf_rm_get_hcapi_parms { + void *rm_db; + u16 subtype; + u16 *hcapi_type; +}; + +/** + * Get Slices parameters for a single element + * + * @rm_db: RM DB Handle + * @subtype: TF subtype indicates which DB entry to perform the action on. + * (e.g. TF_TBL_TYPE_FULL_ACTION subtype of module + * TF_MODULE_TYPE_TABLE) + * @slices: Pointer to number of slices for the given type + */ +struct tf_rm_get_slices_parms { + void *rm_db; + u16 subtype; + u16 *slices; +}; + +/** + * Get InUse count parameters for single element + * + * @rm_db: RM DB Handle + * @subtype: TF subtype indicates which DB entry to perform the + * action on. (e.g. TF_TCAM_TBL_TYPE_L2_CTXT subtype of module + * TF_MODULE_TYPE_TCAM) + * @count: Pointer to the inuse count for the specified subtype[out] + */ +struct tf_rm_get_inuse_count_parms { + void *rm_db; + u16 subtype; + u16 *count; +}; + +/** + * Check if the indexes are in the range of reserved resource + * + * @rm_db: RM DB Handle + * @subtype: TF subtype indicates which DB entry to perform the + * action on. (e.g. TF_TCAM_TBL_TYPE_L2_CTXT subtype of module + * TF_MODULE_TYPE_TCAM) + * @starting_index: Starting index + * @num_entries: number of entries + * + */ +struct tf_rm_check_indexes_in_range_parms { + void *rm_db; + u16 subtype; + u16 starting_index; + u16 num_entries; +}; + +/** + * Creates and fills a Resource Manager (RM) DB with requested + * elements. The DB is indexed per the parms structure. + * + * @tfp: Pointer to TF handle, used for HCAPI communication + * @parms: Pointer to create parameters + * + * NOTE: + * - Fail on parameter check + * - Fail on DB creation, i.e. alloc amount is not possible or validation fails + * - Fail on DB creation if DB already exist + * + * - Allocs local DB + * - Does hcapi qcaps + * - Does hcapi reservation + * - Populates the pool with allocated elements + * - Returns handle to the created DB + * + * Returns + * - (0) if successful. + * - (-EINVAL) on failure. + */ +int tf_rm_create_db(struct tf *tfp, struct tf_rm_create_db_parms *parms); + +/** + * Creates and fills a Resource Manager (RM) DB with requested + * elements. The DB is indexed per the parms structure. It only retrieve + * allocated resource information for a exist session. + * + * @tfp: Pointer to TF handle, used for HCAPI communication + * @parms: Pointer to create parameters + * + * Returns + * - (0) if successful. + * - (-EINVAL) on failure. + */ +int tf_rm_create_db_no_reservation(struct tf *tfp, + struct tf_rm_create_db_parms *parms); + +/** + * Closes the Resource Manager (RM) DB and frees all allocated + * resources per the associated database. + * + * @tfp: Pointer to TF handle, used for HCAPI communication + * @parms: Pointer to free parameters + * + * Returns + * - (0) if successful. + * - (-EINVAL) on failure. + */ +int tf_rm_free_db(struct tf *tfp, struct tf_rm_free_db_parms *parms); + +/** + * Allocates a single element for the type specified, within the DB. + * + * @parms: Pointer to allocate parameters + * + * Returns + * - (0) if successful. + * - (-EINVAL) on failure. + * - (-ENOMEM) if pool is empty + */ +int tf_rm_allocate(struct tf_rm_allocate_parms *parms); + +/** + * Free's a single element for the type specified, within the DB. + * + * @parms: Pointer to free parameters + * + * Returns + * - (0) if successful. + * - (-EINVAL) on failure. + */ +int tf_rm_free(struct tf_rm_free_parms *parms); + +/** + * Performs an allocation verification check on a specified element. + * + * @parms: Pointer to is allocated parameters + * + * NOTE: + * - If pool is set to Chip MAX, then the query index must be checked + * against the allocated range and query index must be allocated as well. + * - If pool is allocated size only, then check if query index is allocated. + * + * Returns + * - (0) if successful. + * - (-EINVAL) on failure. + */ +int tf_rm_is_allocated(struct tf_rm_is_allocated_parms *parms); + +/** + * Retrieves an elements allocation information from the Resource + * Manager (RM) DB. + * + * @parms: Pointer to get info parameters + * + * Returns + * - (0) if successful. + * - (-EINVAL) on failure. + */ +int tf_rm_get_info(struct tf_rm_get_alloc_info_parms *parms); + +/** + * Retrieves all elements allocation information from the Resource + * Manager (RM) DB. + * + * @parms: Pointer to get info parameters + * @size: number of the elements for the specific module + * + * Returns + * - (0) if successful. + * - (-EINVAL) on failure. + */ +int tf_rm_get_all_info(struct tf_rm_get_alloc_info_parms *parms, int size); + +/** + * Performs a lookup in the Resource Manager DB and retrieves the + * requested HCAPI RM type. + * + * @parms: Pointer to get hcapi parameters + * + * Returns + * - (0) if successful. + * - (-EINVAL) on failure. + */ +int tf_rm_get_hcapi_type(struct tf_rm_get_hcapi_parms *parms); + +/** + * Performs a lookup in the Resource Manager DB and retrieves the + * requested HCAPI RM type inuse count. + * + * @parms: Pointer to get inuse parameters + * + * Returns + * - (0) if successful. + * - (-EINVAL) on failure. + */ +int tf_rm_get_inuse_count(struct tf_rm_get_inuse_count_parms *parms); + +/** + * Check if the requested indexes are in the range of reserved resource. + * + * @parms: Pointer to get inuse parameters + * + * Returns + * - (0) if successful. + * - (-EINVAL) on failure. + */ +int tf_rm_check_indexes_in_range(struct tf_rm_check_indexes_in_range_parms + *parms); + +/** + * Get the number of slices per resource bit allocator for the resource type + * + * @parms: Pointer to get inuse parameters + * + * Returns + * - (0) if successful. + * - (-EINVAL) on failure. + */ +int tf_rm_get_slices(struct tf_rm_get_slices_parms *parms); + +#endif /* TF_RM_NEW_H_ */ diff --git a/drivers/thirdparty/release-drivers/bnxt/tf_core/tf_session.c b/drivers/thirdparty/release-drivers/bnxt/tf_core/tf_session.c new file mode 100644 index 000000000000..62970e369b27 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/tf_core/tf_session.c @@ -0,0 +1,952 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* Copyright(c) 2019-2021 Broadcom + * All rights reserved. + */ + +#include +#include +#include +#include "tf_session.h" +#include "tf_msg.h" +#include "bnxt.h" + +struct tf_session_client_create_parms { + char *ctrl_chan_name; /* Control channel name string */ + union tf_session_client_id *session_client_id; /* Firmware Session + * Client ID (out) + */ +}; + +struct tf_session_client_destroy_parms { + union tf_session_client_id session_client_id; /* Firmware Session + * Client ID (out) + */ +}; + +static int tfp_get_fid(struct tf *tfp, uint16_t *fw_fid) +{ + struct bnxt *bp = NULL; + + if (!tfp || !fw_fid) + return -EINVAL; + + bp = (struct bnxt *)tfp->bp; + if (!bp) + return -EINVAL; + + *fw_fid = bp->pf.fw_fid; + + return 0; +} + +/** + * Creates a Session and the associated client. + * + * @tfp: Pointer to TF handle + * @parms: Pointer to session client create parameters + * + * Returns + * - (0) if successful. + * - (-EINVAL) on failure. + * - (-ENOMEM) if max session clients has been reached. + */ +static int tf_session_create(struct tf *tfp, + struct tf_session_open_session_parms *parms) +{ + struct tf_session *session = NULL; + struct tf_session_client *client; + union tf_session_id *session_id; + bool shared_session_creator; + u8 fw_session_client_id; + struct tf_dev_info dev; + char *shared_name; + u8 fw_session_id; + void *core_data; + int rc; + + if (!tfp || !parms) + return -EINVAL; + tf_dev_bind_ops(parms->open_cfg->device_type, + &dev); + + /* Open FW session and get a new session_id */ + rc = tf_msg_session_open(parms->open_cfg->bp, + parms->open_cfg->ctrl_chan_name, + &fw_session_id, + &fw_session_client_id, + &shared_session_creator); + if (rc) { + /* Log error */ + if (rc == -EEXIST) { + netdev_dbg(tfp->bp->dev, + "Session is already open, rc:%d\n", rc); + } else { + netdev_dbg(tfp->bp->dev, + "Open message send failed, rc:%d\n", rc); + } + + parms->open_cfg->session_id.id = TF_FW_SESSION_ID_INVALID; + return rc; + } + /* Allocate session */ + tfp->session = vzalloc(sizeof(*tfp->session)); + if (!tfp->session) { + rc = -ENOMEM; + goto cleanup_fw_session; + } + + /* Allocate core data for the session */ + core_data = vzalloc(sizeof(*session)); + if (!core_data) { + rc = -ENOMEM; + goto cleanup_session; + } + + tfp->session->core_data = core_data; + session_id = &parms->open_cfg->session_id; + + /* Update Session Info, which is what is visible to the caller */ + tfp->session->ver.major = 0; + tfp->session->ver.minor = 0; + tfp->session->ver.update = 0; + + tfp->session->session_id.internal.domain = session_id->internal.domain; + tfp->session->session_id.internal.bus = session_id->internal.bus; + tfp->session->session_id.internal.device = session_id->internal.device; + tfp->session->session_id.internal.fw_session_id = fw_session_id; + + /* Initialize Session and Device, which is private */ + session = (struct tf_session *)tfp->session->core_data; + session->ver.major = 0; + session->ver.minor = 0; + session->ver.update = 0; + + session->session_id.internal.domain = session_id->internal.domain; + session->session_id.internal.bus = session_id->internal.bus; + session->session_id.internal.device = session_id->internal.device; + session->session_id.internal.fw_session_id = fw_session_id; + /* Return the allocated session id */ + session_id->id = session->session_id.id; + + /* Init session client list */ + INIT_LIST_HEAD(&session->client_ll); + + /* Create the local session client, initialize and attach to + * the session + */ + client = vzalloc(sizeof(*client)); + if (!client) { + rc = -ENOMEM; + goto cleanup_core_data; + } + + /* Register FID with the client */ + rc = tfp_get_fid(tfp, &client->fw_fid); + if (rc) + goto cleanup_client; + + client->session_client_id.internal.fw_session_id = fw_session_id; + client->session_client_id.internal.fw_session_client_id = + fw_session_client_id; + + memcpy(client->ctrl_chan_name, parms->open_cfg->ctrl_chan_name, + TF_SESSION_NAME_MAX); + + list_add(&client->ll_entry, &session->client_ll); + session->ref_count++; + + /* Init session em_ext_db */ + session->em_ext_db_handle = NULL; + + /* Populate the request */ + shared_name = strstr(parms->open_cfg->ctrl_chan_name, "tf_shared"); + if (shared_name) + session->shared_session = true; + + if (session->shared_session && shared_session_creator) { + session->shared_session_creator = true; + parms->open_cfg->shared_session_creator = true; + } + + rc = tf_dev_bind(tfp, + parms->open_cfg->device_type, + &parms->open_cfg->resources, + parms->open_cfg->wc_num_slices, + &session->dev); + + /* Logging handled by dev_bind */ + if (rc) + goto cleanup_client; + + session->dev_init = true; + + return 0; + +cleanup_client: + vfree(client); +cleanup_core_data: + vfree(tfp->session->core_data); +cleanup_session: + vfree(tfp->session); + tfp->session = NULL; +cleanup_fw_session: + if (tf_msg_session_close(tfp, fw_session_id)) + netdev_dbg(tfp->bp->dev, "FW Session close failed"); + + return rc; +} + +/** + * Creates a Session Client on an existing Session. + * + * @tfp: Pointer to TF handle + * @parms: Pointer to session client create parameters + * + * Returns + * - (0) if successful. + * - (-EINVAL) on failure. + * - (-ENOMEM) if max session clients has been reached. + */ +static int tf_session_client_create(struct tf *tfp, + struct tf_session_client_create_parms + *parms) +{ + union tf_session_client_id session_client_id; + struct tf_session *session = NULL; + struct tf_session_client *client; + u8 fw_session_id; + int rc; + + if (!tfp || !parms) + return -EINVAL; + + /* Using internal version as session client may not exist yet */ + rc = tf_session_get_session_internal(tfp, &session); + if (rc) { + netdev_dbg(tfp->bp->dev, "Failed to lookup session, rc:%d\n", + rc); + return rc; + } + + rc = tf_session_get_fw_session_id(tfp, &fw_session_id); + if (rc) { + netdev_dbg(tfp->bp->dev, + "Session Firmware id lookup failed, rc:%d\n", rc); + return rc; + } + + client = tf_session_find_session_client_by_name(session, + parms->ctrl_chan_name); + if (client) { + netdev_dbg(tfp->bp->dev, + "Client %s already registered with this session\n", + parms->ctrl_chan_name); + return -EOPNOTSUPP; + } + + rc = tf_msg_session_client_register + (tfp, + parms->ctrl_chan_name, + fw_session_id, + &session_client_id.internal.fw_session_client_id); + if (rc) { + netdev_dbg(tfp->bp->dev, + "Failed to create client on session, rc:%d\n", rc); + return rc; + } + + /* Create the local session client, initialize and attach to + * the session + */ + client = vzalloc(sizeof(*client)); + if (!client) { + rc = -ENOMEM; + goto cleanup; + } + + /* Register FID with the client */ + rc = tfp_get_fid(tfp, &client->fw_fid); + if (rc) + return rc; + + /* Build the Session Client ID by adding the fw_session_id */ + session_client_id.internal.fw_session_id = fw_session_id; + memcpy(client->ctrl_chan_name, parms->ctrl_chan_name, + TF_SESSION_NAME_MAX); + + client->session_client_id.id = session_client_id.id; + + list_add(&client->ll_entry, &session->client_ll); + + session->ref_count++; + + /* Build the return value */ + parms->session_client_id->id = session_client_id.id; + + cleanup: + /* TBD - Add code to unregister newly create client from fw */ + + return rc; +} + +/** + * Destroys a Session Client on an existing Session. + * + * @tfp: Pointer to TF handle + * @parms: Pointer to the session client destroy parameters + * + * Returns + * - (0) if successful. + * - (-EINVAL) on failure. + * - (-ENOTFOUND) error, client not owned by the session. + * - (-EOPNOTSUPP) error, unable to destroy client as its the last + * client. Please use the tf_session_close(). + */ +static int tf_session_client_destroy(struct tf *tfp, + struct tf_session_client_destroy_parms + *parms) +{ + struct tf_session_client *client; + u8 fw_session_client_id; + struct tf_session *tfs; + u8 fw_session_id; + int rc; + + if (!tfp || !parms) + return -EINVAL; + + fw_session_client_id = + parms->session_client_id.internal.fw_session_client_id; + + rc = tf_session_get_session(tfp, &tfs); + if (rc) { + netdev_dbg(tfp->bp->dev, "Failed to lookup session, rc:%d\n", + rc); + return rc; + } + + rc = tf_session_get_fw_session_id(tfp, &fw_session_id); + if (rc) { + netdev_dbg(tfp->bp->dev, + "Session Firmware id lookup failed, rc:%d\n", rc); + return rc; + } + + /* Check session owns this client and that we're not the last client */ + client = tf_session_get_session_client(tfs, + parms->session_client_id); + if (!client) { + netdev_dbg(tfp->bp->dev, + "Client %d, not found within this session\n", + parms->session_client_id.id); + return -EINVAL; + } + + /* If last client the request is rejected and cleanup should + * be done by session close. + */ + if (tfs->ref_count == 1) + return -EOPNOTSUPP; + + rc = tf_msg_session_client_unregister(tfp, fw_session_id, + fw_session_client_id); + + /* Log error, but continue. If FW fails we do not really have + * a way to fix this but the client would no longer be valid + * thus we remove from the session. + */ + if (rc) { + netdev_dbg(tfp->bp->dev, + "Client destroy on FW Failed, rc:%d\n", rc); + } + + list_del(&client->ll_entry); + + /* Decrement the session ref_count */ + tfs->ref_count--; + + vfree(client); + + return rc; +} + +int tf_session_open_session(struct tf *tfp, + struct tf_session_open_session_parms *parms) +{ + struct tf_session_client_create_parms scparms; + int rc; + + if (!tfp || !parms || !parms->open_cfg->bp) + return -EINVAL; + + tfp->bp = parms->open_cfg->bp; + /* Decide if we're creating a new session or session client */ + if (!tfp->session) { + rc = tf_session_create(tfp, parms); + if (rc) { + netdev_dbg(tfp->bp->dev, + "Failed to create session: %s, rc:%d\n", + parms->open_cfg->ctrl_chan_name, rc); + return rc; + } + + netdev_dbg(tfp->bp->dev, + "Session created, session_client_id:%d, session_id:0x%08x, fw_session_id:%d\n", + parms->open_cfg->session_client_id.id, + parms->open_cfg->session_id.id, + parms->open_cfg->session_id.internal.fw_session_id); + return 0; + } + + scparms.ctrl_chan_name = parms->open_cfg->ctrl_chan_name; + scparms.session_client_id = &parms->open_cfg->session_client_id; + + /* Create the new client and get it associated with + * the session. + */ + rc = tf_session_client_create(tfp, &scparms); + if (rc) { + netdev_dbg(tfp->bp->dev, + "Failed to create client on session 0x%x, rc:%d\n", + parms->open_cfg->session_id.id, rc); + return rc; + } + + netdev_dbg(tfp->bp->dev, + "Session Client:%d registered on session:0x%8x\n", + scparms.session_client_id->internal.fw_session_client_id, + tfp->session->session_id.id); + + return 0; +} + +int tf_session_attach_session(struct tf *tfp, + struct tf_session_attach_session_parms *parms) +{ + int rc = -EOPNOTSUPP; + + if (!tfp || !parms) + return -EINVAL; + + netdev_dbg(tfp->bp->dev, "Attach not yet supported, rc:%d\n", rc); + return rc; +} + +int tf_session_close_session(struct tf *tfp, + struct tf_session_close_session_parms *parms) +{ + struct tf_session_client_destroy_parms scdparms; + struct tf_session_client *client; + struct tf_dev_info *tfd = NULL; + struct tf_session *tfs = NULL; + u8 fw_session_id = 1; + u16 fid; + int rc; + + if (!tfp || !parms || !tfp->session) + return -EINVAL; + + rc = tf_session_get_session(tfp, &tfs); + if (rc) { + netdev_dbg(tfp->bp->dev, "Session lookup failed, rc:%d\n", + rc); + return rc; + } + + if (tfs->session_id.id == TF_SESSION_ID_INVALID) { + rc = -EINVAL; + netdev_dbg(tfp->bp->dev, + "Invalid session id, unable to close, rc:%d\n", + rc); + return rc; + } + + /* Get the client, we need it independently of the closure + * type (client or session closure). + * + * We find the client by way of the fid. Thus one cannot close + * a client on behalf of someone else. + */ + rc = tfp_get_fid(tfp, &fid); + if (rc) + return rc; + + client = tf_session_find_session_client_by_fid(tfs, + fid); + if (!client) { + rc = -EINVAL; + netdev_dbg(tfp->bp->dev, + "%s: Client not part of session, rc:%d\n", + __func__, rc); + return rc; + } + + /* In case multiple clients we chose to close those first */ + if (tfs->ref_count > 1) { + /* Linaro gcc can't static init this structure */ + memset(&scdparms, + 0, + sizeof(struct tf_session_client_destroy_parms)); + + scdparms.session_client_id = client->session_client_id; + /* Destroy requested client so its no longer + * registered with this session. + */ + rc = tf_session_client_destroy(tfp, &scdparms); + if (rc) { + netdev_dbg(tfp->bp->dev, + "Failed to unregister Client %d, rc:%d\n", + client->session_client_id.id, rc); + return rc; + } + + netdev_dbg(tfp->bp->dev, + "Closed session client, session_client_id:%d\n", + client->session_client_id.id); + + netdev_dbg(tfp->bp->dev, "session_id:0x%08x, ref_count:%d\n", + tfs->session_id.id, tfs->ref_count); + + return 0; + } + + /* Record the session we're closing so the caller knows the + * details. + */ + *parms->session_id = tfs->session_id; + + rc = tf_session_get_device(tfs, &tfd); + if (rc) { + netdev_dbg(tfp->bp->dev, "Device lookup failed, rc:%d\n", rc); + return rc; + } + + rc = tf_session_get_fw_session_id(tfp, &fw_session_id); + if (rc) { + netdev_dbg(tfp->bp->dev, "Unable to lookup FW id, rc:%d\n", + rc); + return rc; + } + + /* Unbind the device */ + rc = tf_dev_unbind(tfp, tfd); + if (rc) { + /* Log error */ + netdev_dbg(tfp->bp->dev, "Device unbind failed, rc:%d\n", rc); + } + + rc = tf_msg_session_close(tfp, fw_session_id); + if (rc) { + /* Log error */ + netdev_dbg(tfp->bp->dev, "FW Session close failed, rc:%d\n", + rc); + } + + /* Final cleanup as we're last user of the session thus we + * also delete the last client. + */ + list_del(&client->ll_entry); + vfree(client); + + tfs->ref_count--; + + netdev_dbg(tfp->bp->dev, + "Closed session, session_id:0x%08x, ref_count:%d\n", + tfs->session_id.id, tfs->ref_count); + + tfs->dev_init = false; + + vfree(tfp->session->core_data); + vfree(tfp->session); + tfp->session = NULL; + + return 0; +} + +bool tf_session_is_fid_supported(struct tf_session *tfs, u16 fid) +{ + struct tf_session_client *client; + struct list_head *c_entry; + + list_for_each(c_entry, &tfs->client_ll) { + client = list_entry(c_entry, struct tf_session_client, + ll_entry); + if (client->fw_fid == fid) + return true; + } + + return false; +} + +int tf_session_get_session_internal(struct tf *tfp, struct tf_session **tfs) +{ + int rc = 0; + + /* Skip using the check macro as we want to control the error msg */ + if (!tfp->session || !tfp->session->core_data) { + rc = -EINVAL; + netdev_dbg(tfp->bp->dev, "Session not created, rc:%d\n", rc); + return rc; + } + + *tfs = (struct tf_session *)(tfp->session->core_data); + + return rc; +} + +int tf_session_get_session(struct tf *tfp, struct tf_session **tfs) +{ + bool supported = false; + u16 fw_fid; + int rc; + + rc = tf_session_get_session_internal(tfp, + tfs); + /* Logging done by tf_session_get_session_internal */ + if (rc) + return rc; + + /* As session sharing among functions aka 'individual clients' + * is supported we have to assure that the client is indeed + * registered before we get deep in the TruFlow api stack. + */ + rc = tfp_get_fid(tfp, &fw_fid); + if (rc) { + netdev_dbg(tfp->bp->dev, "Internal FID lookup\n, rc:%d\n", rc); + return rc; + } + + supported = tf_session_is_fid_supported(*tfs, fw_fid); + if (!supported) { + netdev_dbg(tfp->bp->dev, + "Ctrl channel not registered\n, rc:%d\n", rc); + return -EINVAL; + } + + return rc; +} + +int tf_session_get(struct tf *tfp, struct tf_session **tfs, + struct tf_dev_info **tfd) +{ + int rc; + + rc = tf_session_get_session_internal(tfp, tfs); + /* Logging done by tf_session_get_session_internal */ + if (rc) + return rc; + + rc = tf_session_get_device(*tfs, tfd); + return rc; +} + +struct tf_session_client * +tf_session_get_session_client(struct tf_session *tfs, + union tf_session_client_id session_client_id) +{ + struct tf_session_client *client; + struct list_head *c_entry; + + /* Skip using the check macro as we just want to return */ + if (!tfs) + return NULL; + + list_for_each(c_entry, &tfs->client_ll) { + client = list_entry(c_entry, struct tf_session_client, + ll_entry); + if (client->session_client_id.id == session_client_id.id) + return client; + } + + return NULL; +} + +struct tf_session_client * +tf_session_find_session_client_by_name(struct tf_session *tfs, + const char *ctrl_chan_name) +{ + struct tf_session_client *client; + struct list_head *c_entry; + + /* Skip using the check macro as we just want to return */ + if (!tfs || !ctrl_chan_name) + return NULL; + + list_for_each(c_entry, &tfs->client_ll) { + client = list_entry(c_entry, struct tf_session_client, + ll_entry); + if (strncmp(client->ctrl_chan_name, + ctrl_chan_name, + TF_SESSION_NAME_MAX) == 0) + return client; + } + + return NULL; +} + +struct tf_session_client * +tf_session_find_session_client_by_fid(struct tf_session *tfs, u16 fid) +{ + struct tf_session_client *client; + struct list_head *c_entry; + + /* Skip using the check macro as we just want to return */ + if (!tfs) + return NULL; + + list_for_each(c_entry, &tfs->client_ll) { + client = list_entry(c_entry, struct tf_session_client, + ll_entry); + if (client->fw_fid == fid) + return client; + } + + return NULL; +} + +int tf_session_get_device(struct tf_session *tfs, struct tf_dev_info **tfd) +{ + *tfd = &tfs->dev; + + return 0; +} + +int +tf_session_get_fw_session_id(struct tf *tfp, uint8_t *fw_session_id) +{ + struct tf_session *tfs = NULL; + int rc; + + /* Skip using the check macro as we want to control the error msg */ + if (!tfp->session) { + rc = -EINVAL; + netdev_dbg(tfp->bp->dev, "Session not created, rc:%d\n", rc); + return rc; + } + + if (!fw_session_id) { + rc = -EINVAL; + netdev_dbg(tfp->bp->dev, "Invalid Argument(s), rc:%d\n", rc); + return rc; + } + + rc = tf_session_get_session_internal(tfp, &tfs); + if (rc) + return rc; + + *fw_session_id = tfs->session_id.internal.fw_session_id; + + return 0; +} + +int tf_session_get_session_id(struct tf *tfp, union tf_session_id *session_id) +{ + struct tf_session *tfs = NULL; + int rc; + + if (!tfp->session) { + rc = -EINVAL; + netdev_dbg(tfp->bp->dev, "Session not created, rc:%d\n", rc); + return rc; + } + + if (!session_id) { + rc = -EINVAL; + netdev_dbg(tfp->bp->dev, "Invalid Argument(s), rc:%d\n", rc); + return rc; + } + + /* Using internal version as session client may not exist yet */ + rc = tf_session_get_session_internal(tfp, &tfs); + if (rc) + return rc; + + *session_id = tfs->session_id; + + return 0; +} + +int tf_session_get_db(struct tf *tfp, enum tf_module_type type, + void **db_handle) +{ + struct tf_session *tfs = NULL; + int rc = 0; + + *db_handle = NULL; + + if (!tfp) + return (-EINVAL); + + rc = tf_session_get_session_internal(tfp, &tfs); + if (rc) + return rc; + + switch (type) { + case TF_MODULE_TYPE_IDENTIFIER: + if (tfs->id_db_handle) + *db_handle = tfs->id_db_handle; + else + rc = -ENOMEM; + break; + case TF_MODULE_TYPE_TABLE: + if (tfs->tbl_db_handle) + *db_handle = tfs->tbl_db_handle; + else + rc = -ENOMEM; + + break; + case TF_MODULE_TYPE_TCAM: + if (tfs->tcam_db_handle) + *db_handle = tfs->tcam_db_handle; + else + rc = -ENOMEM; + break; + case TF_MODULE_TYPE_EM: + if (tfs->em_db_handle) + *db_handle = tfs->em_db_handle; + else + rc = -ENOMEM; + break; + default: + rc = -EINVAL; + break; + } + + return rc; +} + +int tf_session_set_db(struct tf *tfp, enum tf_module_type type, + void *db_handle) +{ + struct tf_session *tfs = NULL; + int rc = 0; + + if (!tfp) + return (-EINVAL); + + rc = tf_session_get_session_internal(tfp, &tfs); + if (rc) + return rc; + + switch (type) { + case TF_MODULE_TYPE_IDENTIFIER: + tfs->id_db_handle = db_handle; + break; + case TF_MODULE_TYPE_TABLE: + tfs->tbl_db_handle = db_handle; + break; + case TF_MODULE_TYPE_TCAM: + tfs->tcam_db_handle = db_handle; + break; + case TF_MODULE_TYPE_EM: + tfs->em_db_handle = db_handle; + break; + default: + rc = -EINVAL; + break; + } + + return rc; +} + +int tf_session_get_global_db(struct tf *tfp, void **global_handle) +{ + struct tf_session *tfs = NULL; + int rc = 0; + + *global_handle = NULL; + + if (!tfp) + return (-EINVAL); + + rc = tf_session_get_session_internal(tfp, &tfs); + if (rc) + return rc; + + *global_handle = tfs->global_db_handle; + return rc; +} + +int tf_session_set_global_db(struct tf *tfp, void *global_handle) +{ + struct tf_session *tfs = NULL; + int rc = 0; + + if (!tfp) + return (-EINVAL); + + rc = tf_session_get_session_internal(tfp, &tfs); + if (rc) + return rc; + + tfs->global_db_handle = global_handle; + return rc; +} + +int tf_session_get_sram_db(struct tf *tfp, void **sram_handle) +{ + struct tf_session *tfs = NULL; + int rc = 0; + + *sram_handle = NULL; + + if (!tfp) + return (-EINVAL); + + rc = tf_session_get_session_internal(tfp, &tfs); + if (rc) + return rc; + + *sram_handle = tfs->sram_handle; + return rc; +} + +int tf_session_set_sram_db(struct tf *tfp, void *sram_handle) +{ + struct tf_session *tfs = NULL; + int rc = 0; + + if (!tfp) + return (-EINVAL); + + rc = tf_session_get_session_internal(tfp, &tfs); + if (rc) + return rc; + + tfs->sram_handle = sram_handle; + return rc; +} + +int tf_session_get_if_tbl_db(struct tf *tfp, void **if_tbl_handle) +{ + struct tf_session *tfs = NULL; + int rc = 0; + + *if_tbl_handle = NULL; + + if (!tfp) + return (-EINVAL); + + rc = tf_session_get_session_internal(tfp, &tfs); + if (rc) + return rc; + + *if_tbl_handle = tfs->if_tbl_db_handle; + return rc; +} + +int tf_session_set_if_tbl_db(struct tf *tfp, void *if_tbl_handle) +{ + struct tf_session *tfs = NULL; + int rc = 0; + + if (!tfp) + return (-EINVAL); + + rc = tf_session_get_session_internal(tfp, &tfs); + if (rc) + return rc; + + tfs->if_tbl_db_handle = if_tbl_handle; + return rc; +} diff --git a/drivers/thirdparty/release-drivers/bnxt/tf_core/tf_session.h b/drivers/thirdparty/release-drivers/bnxt/tf_core/tf_session.h new file mode 100644 index 000000000000..d55c09e63e08 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/tf_core/tf_session.h @@ -0,0 +1,501 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019-2021 Broadcom + * All rights reserved. + */ + +#ifndef _TF_SESSION_H_ +#define _TF_SESSION_H_ + +#include +#include "bitalloc.h" +#include "tf_core.h" +#include "tf_device.h" +#include "tf_rm.h" + +/* The Session module provides session control support. A session is + * to the ULP layer known as a session_info instance. The session + * private data is the actual session. + * + * Session manages: + * - The device and all the resources related to the device. + * - Any session sharing between ULP applications + */ + +/* Session defines */ +#define TF_SESSION_ID_INVALID 0xFFFFFFFF /** Invalid Session ID define */ + +/* At this stage we are using fixed size entries so that each + * stack entry represents either 2 or 4 RT (f/n)blocks. So we + * take the total block allocation for truflow and divide that + * by either 2 or 4. + */ +#ifdef TF_EM_ENTRY_IPV4_ONLY +#define TF_SESSION_EM_ENTRY_SIZE 2 /* 2 blocks per entry */ +#else +#define TF_SESSION_EM_ENTRY_SIZE 4 /* 4 blocks per entry */ +#endif + +/** + * TF Session + * + * @ver: TruFlow Version. Used to control the structure + * layout when sharing sessions. No guarantee that a + * secondary process would come from the same version + * of an executable. + * @session_id: Session ID, allocated by FW on tf_open_session() + * @shared_session: Boolean controlling the use and availability of + * shared session. Shared session will allow the + * application to share resources on the firmware side + * without having to allocate them on firmware. + * Additional private session core_data will be + * allocated if this boolean is set to 'true', default + * 'false'. + * @shared_session_creator: This flag indicates the shared session on + * firmware side is created by this session. Some + * privileges may be assigned to this session. + * @shadow_copy: Boolean controlling the use and availability of + * shadow copy. Shadow copy will allow the TruFlow + * Core to keep track of resource content on the + * firmware side without having to query firmware. + * Additional private session core_data will be + * allocated if this boolean is set to 'true', + * default 'false'. Size of memory depends on the + * NVM Resource settings for the control channel. + * @ref_cnt: Session Reference Count. To keep track of functions + * per session the ref_count is updated. There is also a + * parallel TruFlow Firmware ref_count in case the + * TruFlow Core goes away without informing the Firmware. + * @ref_count_attach: Session Reference Count for attached sessions. + * To keep track of application sharing of a session + * the ref_count_attach is updated. + * @dev: Device handle. + * @dev_init: Device init flag. False if Device is not fully + * initialized, else true. + * @client_ll: Linked list of clients registered for this session + * @em_ext_db_handle: em ext db reference for the session + * @tcam_db_handle: tcam db reference for the session + * @tbl_db_handle: table db reference for the session + * @id_db_handle: identifier db reference for the session + * @em_db_handle: em db reference for the session + * @em_pool: EM allocator for session + * #ifdef TF_TCAM_SHARED + * @tcam_shared_db_handle: tcam db reference for the session + * #endif + * @sram_handle: SRAM db reference for the session + * @if_tbl_db_handle: if table db reference for the session + * @global_db_handle: global db reference for the session + * @wc_num_slices_per_row: Number of slices per row for WC TCAM + * @tcam_mgr_control: Indicates if TCAM is controlled by TCAM Manager + * + * Shared memory containing private TruFlow session information. + * Through this structure the session can keep track of resource + * allocations and (if so configured) any shadow copy of flow + * information. It also holds info about Session Clients. + * + * Memory is assigned to the Truflow instance by way of + * tf_open_session. Memory is allocated and owned by i.e. ULP. + * + * Access control to this shared memory is handled by the spin_lock in + * tf_session_info. + */ +struct tf_session { + struct tf_session_version ver; + union tf_session_id session_id; + bool shared_session; + bool shared_session_creator; + bool shadow_copy; + u8 ref_count; + u8 ref_count_attach; + struct tf_dev_info dev; + bool dev_init; + struct list_head client_ll; + void *em_ext_db_handle; + void *tcam_db_handle; + void *tbl_db_handle; + void *id_db_handle; + void *em_db_handle; + void *em_pool[TF_DIR_MAX]; + void *sram_handle; + void *if_tbl_db_handle; + void *global_db_handle; + u16 wc_num_slices_per_row; + int tcam_mgr_control[TF_DIR_MAX][TF_TCAM_TBL_TYPE_MAX]; + void *tcam_mgr_handle; +}; + +/** + * Session Client + * + * @ll_entry: Linked list of clients. For inserting in link list, + * must be first field of struct. + * @ctrl_chan_name: String containing name of control channel interface + * to be used for this session to communicate with + * firmware. ctrl_chan_name will be used as part of a + * name for any shared memory allocation. + * @fw_fid: Firmware FID, learned at time of Session Client create. + * @session_client_id: Session Client ID, allocated by FW on + * tf_register_session() + * + * Shared memory for each of the Session Clients. A session can have + * one or more clients. + */ +struct tf_session_client { + struct list_head ll_entry; + char ctrl_chan_name[TF_SESSION_NAME_MAX]; + u16 fw_fid; + union tf_session_client_id session_client_id; +}; + +/** + * Session open parameter definition + * @open_cfg: Pointer to the TF open session configuration + */ +struct tf_session_open_session_parms { + struct tf_open_session_parms *open_cfg; +}; + +/** + * Session attach parameter definition + * @attach_cfg: Pointer to the TF attach session configuration + */ +struct tf_session_attach_session_parms { + struct tf_attach_session_parms *attach_cfg; +}; + +/* Session close parameter definition */ +struct tf_session_close_session_parms { + u8 *ref_count; + union tf_session_id *session_id; +}; + +/** + * Creates a host session with a corresponding firmware session. + * + * @tfp: Pointer to TF handle + * @parms: Pointer to the session open parameters + * + * Returns + * - (0) if successful. + * - (-EINVAL) on failure. + */ +int tf_session_open_session(struct tf *tfp, + struct tf_session_open_session_parms *parms); + +/** + * Attaches a previous created session. + * + * @tfp: Pointer to TF handle + * @parms: Pointer to the session attach parameters + * + * Returns + * - (0) if successful. + * - (-EINVAL) on failure. + */ +int tf_session_attach_session(struct tf *tfp, + struct tf_session_attach_session_parms *parms); + +/** + * Closes a previous created session. Only possible if previous + * registered Clients had been unregistered first. + * + * @tfp: Pointer to TF handle + * @parms: Pointer to the session close parameters. + * + * Returns + * - (0) if successful. + * - (-EUSERS) if clients are still registered with the session. + * - (-EINVAL) on failure. + */ +int tf_session_close_session(struct tf *tfp, + struct tf_session_close_session_parms *parms); + +/** + * Verifies that the fid is supported by the session. Used to assure + * that a function i.e. client/control channel is registered with the + * session. + * + * @tfs: Pointer to TF Session handle + * @fid: FID value to check + * + * Returns + * - (true) if successful, else false + * - (-EINVAL) on failure. + */ +bool tf_session_is_fid_supported(struct tf_session *tfs, u16 fid); + +/** + * Looks up the private session information from the TF session + * info. Does not perform a fid check against the registered + * clients. Should be used if tf_session_get_session() was used + * previously i.e. at the TF API boundary. + * + * @tfp: Pointer to TF handle + * @tfs: Pointer to the session + * + * Returns + * - (0) if successful. + * - (-EINVAL) on failure. + */ +int tf_session_get_session_internal(struct tf *tfp, struct tf_session **tfs); + +/** + * Looks up the private session information from the TF session + * info. Performs a fid check against the clients on the session. + * + * @tfp: Pointer to TF handle + * @tfs: Pointer to the session + * + * Returns + * - (0) if successful. + * - (-EINVAL) on failure. + */ +int tf_session_get_session(struct tf *tfp, struct tf_session **tfs); + +/** + * Looks up client within the session. + * + * @tfs: Pointer to the session + * @session_client_id: Client id to look for within the session + * + * Returns + * client if successful. + * - (NULL) on failure, client not found. + */ +struct tf_session_client * +tf_session_get_session_client(struct tf_session *tfs, + union tf_session_client_id session_client_id); + +/** + * Looks up client using name within the session. + * + * @tfs: pointer to the session + * @ctrl_chan_name: name of the client to lookup in the session + * + * Returns: + * - Pointer to the session, if found. + * - (NULL) on failure, client not found. + */ +struct tf_session_client * +tf_session_find_session_client_by_name(struct tf_session *tfs, + const char *ctrl_chan_name); + +/** + * Looks up client using the fid. + * + * @session: pointer to the session + * @fid: fid of the client to find + * + * Returns: + * - Pointer to the session, if found. + * - (NULL) on failure, client not found. + */ +struct tf_session_client * +tf_session_find_session_client_by_fid(struct tf_session *tfs, u16 fid); + +/** + * Looks up the device information from the TF Session. + * + * @tfp: Pointer to TF handle + * @tfd: Pointer to the device [out] + * + * Returns + * - (0) if successful. + * - (-EINVAL) on failure. + */ +int tf_session_get_device(struct tf_session *tfs, struct tf_dev_info **tfd); + +/** + * Returns the session and the device from the tfp. + * + * @tfp: Pointer to TF handle + * @tfs: Pointer to the session + * @tfd: Pointer to the device + + * Returns + * - (0) if successful. + * - (-EINVAL) on failure. + */ +int tf_session_get(struct tf *tfp, struct tf_session **tfs, + struct tf_dev_info **tfd); + +/** + * Looks up the FW Session id the requested TF handle. + * + * @tfp: Pointer to TF handle + * @session_id: Pointer to the session_id [out] + * + * Returns + * - (0) if successful. + * - (-EINVAL) on failure. + */ +int tf_session_get_fw_session_id(struct tf *tfp, u8 *fw_session_id); + +/** + * Looks up the Session id the requested TF handle. + * + * @tfp: Pointer to TF handle + * @session_id: Pointer to the session_id [out] + * + * Returns + * - (0) if successful. + * - (-EINVAL) on failure. + */ +int tf_session_get_session_id(struct tf *tfp, union tf_session_id *session_id); + +/** + * API to get the em_ext_db from tf_session. + * + * @tfp: Pointer to TF handle + * @em_ext_db_handle: pointer to eem handle + * + * Returns: + * - (0) if successful. + * - (-EINVAL) on failure. + */ +int tf_session_get_em_ext_db(struct tf *tfp, void **em_ext_db_handle); + +/** + * API to set the em_ext_db in tf_session. + * + * @tfp: Pointer to TF handle + * @em_ext_db_handle: pointer to eem handle + * + * Returns: + * - (0) if successful. + * - (-EINVAL) on failure. + */ +int tf_session_set_em_ext_db(struct tf *tfp, void *em_ext_db_handle); + +/** + * API to get the db from tf_session. + * + * @tfp: Pointer to TF handle + * @db_handle: pointer to db handle + * + * Returns: + * - (0) if successful. + * - (-EINVAL) on failure. + */ +int tf_session_get_db(struct tf *tfp, enum tf_module_type type, + void **db_handle); + +/** + * API to set the db in tf_session. + * + * @tfp: Pointer to TF handle + * @db_handle: pointer to db handle + * + * Returns: + * - (0) if successful. + * - (-EINVAL) on failure. + */ +int tf_session_set_db(struct tf *tfp, enum tf_module_type type, + void *db_handle); + +/** + * Check if the session is shared session. + * + * @session: pointer to the session + * + * Returns: + * - true if it is shared session + * - false if it is not shared session + */ +static inline bool tf_session_is_shared_session(struct tf_session *tfs) +{ + return tfs->shared_session; +} + +/** + * Check if the session is the shared session creator + * + * @session: pointer to the session + * + * Returns: + * - true if it is the shared session creator + * - false if it is not the shared session creator + */ +static inline bool tf_session_is_shared_session_creator(struct tf_session *tfs) +{ + return tfs->shared_session_creator; +} + +/** + * Get the pointer to the parent bnxt struct + * + * @session: pointer to the session + * + * Returns: + * - the pointer to the parent bnxt struct + */ +static inline struct bnxt* +tf_session_get_bp(struct tf *tfp) +{ + return tfp->bp; +} + +/** + * Set the pointer to the SRAM database + * + * @session: pointer to the session + * + * Returns: + * - the pointer to the parent bnxt struct + */ +int tf_session_set_sram_db(struct tf *tfp, void *sram_handle); + +/** + * Get the pointer to the SRAM database + * + * @session: pointer to the session + * + * Returns: + * - the pointer to the parent bnxt struct + */ +int tf_session_get_sram_db(struct tf *tfp, void **sram_handle); + +/** + * Set the pointer to the global cfg database + * + * @session: pointer to the session + * + * Returns: + * - (0) if successful. + * - (-EINVAL) on failure. + */ +int tf_session_set_global_db(struct tf *tfp, void *global_handle); + +/** + * Get the pointer to the global cfg database + * + * @session: pointer to the session + * + * Returns: + * - (0) if successful. + * - (-EINVAL) on failure. + */ +int tf_session_get_global_db(struct tf *tfp, void **global_handle); + +/** + * Set the pointer to the if table cfg database + * + * @session: pointer to the session + * + * Returns: + * - (0) if successful. + * - (-EINVAL) on failure. + */ +int tf_session_set_if_tbl_db(struct tf *tfp, void *if_tbl_handle); + +/** + * Get the pointer to the if table cfg database + * + * @session: pointer to the session + * + * Returns: + * - (0) if successful. + * - (-EINVAL) on failure. + */ +int tf_session_get_if_tbl_db(struct tf *tfp, void **if_tbl_handle); + +#endif /* _TF_SESSION_H_ */ diff --git a/drivers/thirdparty/release-drivers/bnxt/tf_core/tf_sram_mgr.c b/drivers/thirdparty/release-drivers/bnxt/tf_core/tf_sram_mgr.c new file mode 100644 index 000000000000..2e8cbd58befa --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/tf_core/tf_sram_mgr.c @@ -0,0 +1,775 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* Copyright(c) 2019-2021 Broadcom + * All rights reserved. + */ +#include +#include +#include +#include +#include "tf_sram_mgr.h" +#include "tf_core.h" +#include "tf_rm.h" +#include "tf_util.h" +/*************************** + * Internal Data Structures + ***************************/ + +/** + * TF SRAM block info + * + * @prev: Previous block + * @next: Next block + * @in_use_mask: Bitmap indicating which slices are in use + * If a bit is set, it indicates the slice + * in the row is in use. + * @block_id: Block id - this is a 64B offset + * + * Contains all the information about a particular 64B SRAM + * block and the slices within it. + */ +struct tf_sram_block { + struct tf_sram_block *prev; + struct tf_sram_block *next; + u8 in_use_mask; + u16 block_id; +}; + +/** + * TF SRAM block list + * + * @head: Pointer to head of linked list of blocks. + * @tail: Pointer to tail of linked list of blocks. + * @cnt: Total count of blocks + * @first_not_full_block: First non-full block in the list + * @size: Entry slice size for this list + * + * List of 64B SRAM blocks used for fixed size slices (8, 16, 32, 64B) + */ +struct tf_sram_slice_list { + struct tf_sram_block *head; + struct tf_sram_block *tail; + u32 cnt; + struct tf_sram_block *first_not_full_block; + enum tf_sram_slice_size size; +}; + +/* TF SRAM bank info consists of lists of different slice sizes per bank */ +struct tf_sram_bank_info { + struct tf_sram_slice_list slice[TF_SRAM_SLICE_SIZE_MAX]; +}; + +/* SRAM banks consist of SRAM bank information */ +struct tf_sram_bank { + struct tf_sram_bank_info bank[TF_SRAM_BANK_ID_MAX]; +}; + +/* SRAM banks consist of SRAM bank information */ +struct tf_sram { + struct tf_sram_bank dir[TF_DIR_MAX]; +}; + +/* Internal functions */ + +/* Get slice size in string format */ +const char *tf_sram_slice_2_str(enum tf_sram_slice_size slice_size) +{ + switch (slice_size) { + case TF_SRAM_SLICE_SIZE_8B: + return "8B slice"; + case TF_SRAM_SLICE_SIZE_16B: + return "16B slice"; + case TF_SRAM_SLICE_SIZE_32B: + return "32B slice"; + case TF_SRAM_SLICE_SIZE_64B: + return "64B slice"; + default: + return "Invalid slice size"; + } +} + +/* Get bank in string format */ +const char *tf_sram_bank_2_str(enum tf_sram_bank_id bank_id) +{ + switch (bank_id) { + case TF_SRAM_BANK_ID_0: + return "bank_0"; + case TF_SRAM_BANK_ID_1: + return "bank_1"; + case TF_SRAM_BANK_ID_2: + return "bank_2"; + case TF_SRAM_BANK_ID_3: + return "bank_3"; + default: + return "Invalid bank_id"; + } +} + +/* TF SRAM get slice list */ +static int tf_sram_get_slice_list(struct tf_sram *sram, + struct tf_sram_slice_list **slice_list, + enum tf_sram_slice_size slice_size, + enum tf_dir dir, + enum tf_sram_bank_id bank_id) +{ + if (!sram || !slice_list) + return -EINVAL; + + *slice_list = &sram->dir[dir].bank[bank_id].slice[slice_size]; + return 0; +} + +u16 tf_sram_bank_2_base_offset[TF_SRAM_BANK_ID_MAX] = { + 0, + 2048, + 4096, + 6144 +}; + +/* Translate a block id and bank_id to an 8B offset */ +static void tf_sram_block_id_2_offset(enum tf_sram_bank_id bank_id, + u16 block_id, u16 *offset) +{ + *offset = (block_id + tf_sram_bank_2_base_offset[bank_id]) << 3; +} + +/* Translates an 8B offset and bank_id to a block_id */ +static void tf_sram_offset_2_block_id(enum tf_sram_bank_id bank_id, + u16 offset, u16 *block_id, + u16 *slice_offset) +{ + *slice_offset = offset & 0x7; + *block_id = ((offset & ~0x7) >> 3) - + tf_sram_bank_2_base_offset[bank_id]; +} + +/* Find a matching block_id within the slice list */ +static struct tf_sram_block *tf_sram_find_block(u16 block_id, + struct tf_sram_slice_list + *slice_list) +{ + struct tf_sram_block *block; + u32 cnt; + + cnt = slice_list->cnt; + block = slice_list->head; + + while (cnt > 0 && block) { + if (block->block_id == block_id) + return block; + block = block->next; + cnt--; + } + return NULL; +} + +/* Given the current block get the next block within the slice list + * List is not changed. + */ +static struct tf_sram_block *tf_sram_get_next_block(struct tf_sram_block *block) +{ + struct tf_sram_block *nblock; + + if (block) + nblock = block->next; + else + nblock = NULL; + return nblock; +} + +/* Free an allocated slice from a block and if the block is empty, + * return an indication so that the block can be freed. + */ +static int tf_sram_free_slice(enum tf_sram_slice_size slice_size, + u16 slice_offset, struct tf_sram_block *block, + bool *block_is_empty) +{ + u8 slice_mask = 0; + int rc = 0; + u8 shift; + + if (!block || !block_is_empty) + return -EINVAL; + + switch (slice_size) { + case TF_SRAM_SLICE_SIZE_8B: + shift = slice_offset >> 0; + WARN_ON(!(shift < 8)); + slice_mask = 1 << shift; + break; + + case TF_SRAM_SLICE_SIZE_16B: + shift = slice_offset >> 1; + WARN_ON(!(shift < 4)); + slice_mask = 1 << shift; + break; + + case TF_SRAM_SLICE_SIZE_32B: + shift = slice_offset >> 2; + WARN_ON(!(shift < 2)); + slice_mask = 1 << shift; + break; + + case TF_SRAM_SLICE_SIZE_64B: + default: + shift = slice_offset >> 0; + WARN_ON(!(shift < 1)); + slice_mask = 1 << shift; + break; + } + + if ((block->in_use_mask & slice_mask) == 0) { + rc = -EINVAL; + netdev_dbg(NULL, + "block_id(0x%x) slice(%d) was not allocated\n", + block->block_id, slice_offset); + return rc; + } + + block->in_use_mask &= ~slice_mask; + + if (block->in_use_mask == 0) + *block_is_empty = true; + else + *block_is_empty = false; + + return rc; +} + +/* TF SRAM get next slice + * Gets the next slice_offset available in the block + * and updates the in_use_mask. + */ +static int tf_sram_get_next_slice_in_block(struct tf_sram_block *block, + enum tf_sram_slice_size slice_size, + u16 *slice_offset, + bool *block_is_full) +{ + u8 shift, max_slices, mask, i, full_mask; + int rc, free_id = -1; + + if (!block || !slice_offset || !block_is_full) + return -EINVAL; + + switch (slice_size) { + case TF_SRAM_SLICE_SIZE_8B: + shift = 0; + max_slices = 8; + full_mask = 0xff; + break; + case TF_SRAM_SLICE_SIZE_16B: + shift = 1; + max_slices = 4; + full_mask = 0xf; + break; + case TF_SRAM_SLICE_SIZE_32B: + shift = 2; + max_slices = 2; + full_mask = 0x3; + break; + case TF_SRAM_SLICE_SIZE_64B: + default: + shift = 0; + max_slices = 1; + full_mask = 1; + break; + } + + mask = block->in_use_mask; + + for (i = 0; i < max_slices; i++) { + if ((mask & 1) == 0) { + free_id = i; + block->in_use_mask |= 1 << free_id; + break; + } + mask = mask >> 1; + } + + if (block->in_use_mask == full_mask) + *block_is_full = true; + else + *block_is_full = false; + + if (free_id >= 0) { + *slice_offset = free_id << shift; + rc = 0; + } else { + *slice_offset = 0; + rc = -ENOMEM; + } + + return rc; +} + +/* TF SRAM get indication as to whether the slice offset is + * allocated in the block. + */ +static int tf_sram_is_slice_allocated_in_block(struct tf_sram_block *block, + enum tf_sram_slice_size + slice_size, u16 slice_offset, + bool *is_allocated) +{ + u8 slice_mask = 0; + int rc = 0; + u8 shift; + + if (!block || !is_allocated) + return -EINVAL; + + *is_allocated = false; + + switch (slice_size) { + case TF_SRAM_SLICE_SIZE_8B: + shift = slice_offset >> 0; + WARN_ON(!(shift < 8)); + slice_mask = 1 << shift; + break; + + case TF_SRAM_SLICE_SIZE_16B: + shift = slice_offset >> 1; + WARN_ON(!(shift < 4)); + slice_mask = 1 << shift; + break; + + case TF_SRAM_SLICE_SIZE_32B: + shift = slice_offset >> 2; + WARN_ON(!(shift < 2)); + slice_mask = 1 << shift; + break; + + case TF_SRAM_SLICE_SIZE_64B: + default: + shift = slice_offset >> 0; + WARN_ON(!(shift < 1)); + slice_mask = 1 << shift; + break; + } + + if ((block->in_use_mask & slice_mask) == 0) { + netdev_dbg(NULL, + "block_id(0x%x) slice(%d) was not allocated\n", + block->block_id, slice_offset); + *is_allocated = false; + } else { + *is_allocated = true; + } + + return rc; +} + +/* Get the block count */ +static u32 tf_sram_get_block_cnt(struct tf_sram_slice_list *slice_list) +{ + return slice_list->cnt; +} + +/* Free a block data structure - does not free to the RM */ +static void tf_sram_free_block(struct tf_sram_slice_list *slice_list, + struct tf_sram_block *block) +{ + if (slice_list->head == block && slice_list->tail == block) { + slice_list->head = NULL; + slice_list->tail = NULL; + } else if (slice_list->head == block) { + slice_list->head = block->next; + slice_list->head->prev = NULL; + } else if (slice_list->tail == block) { + slice_list->tail = block->prev; + slice_list->tail->next = NULL; + } else { + block->prev->next = block->next; + block->next->prev = block->prev; + } + vfree(block); + slice_list->cnt--; +} + +/* Free the entire slice_list */ +static void tf_sram_free_slice_list(struct tf_sram_slice_list *slice_list) +{ + struct tf_sram_block *nblock, *block; + u32 i, block_cnt; + + block_cnt = tf_sram_get_block_cnt(slice_list); + block = slice_list->head; + + for (i = 0; i < block_cnt; i++) { + nblock = block->next; + tf_sram_free_block(slice_list, block); + block = nblock; + } +} + +/* Allocate a single SRAM block from memory and add it to the slice list */ +static struct tf_sram_block *tf_sram_alloc_block(struct tf_sram_slice_list + *slice_list, u16 block_id) +{ + struct tf_sram_block *block; + + block = vzalloc(sizeof(*block)); + if (!block) + return NULL; + + block->block_id = block_id; + + if (!slice_list->head) { + slice_list->head = block; + slice_list->tail = block; + block->next = NULL; + block->prev = NULL; + } else { + block->next = slice_list->head; + block->prev = NULL; + block->next->prev = block; + slice_list->head = block->next->prev; + } + slice_list->cnt++; + return block; +} + +/* Find the first not full block in the slice list */ +static void tf_sram_find_first_not_full_block(struct tf_sram_slice_list + *slice_list, + enum tf_sram_slice_size + slice_size, + struct tf_sram_block + **first_not_full_block) +{ + struct tf_sram_block *block = slice_list->head; + u8 slice_mask, mask; + + switch (slice_size) { + case TF_SRAM_SLICE_SIZE_8B: + slice_mask = 0xff; + break; + + case TF_SRAM_SLICE_SIZE_16B: + slice_mask = 0xf; + break; + + case TF_SRAM_SLICE_SIZE_32B: + slice_mask = 0x3; + break; + + case TF_SRAM_SLICE_SIZE_64B: + default: + slice_mask = 0x1; + break; + } + + *first_not_full_block = NULL; + + while (block) { + mask = block->in_use_mask & slice_mask; + if (mask != slice_mask) { + *first_not_full_block = block; + break; + } + block = block->next; + } +} + +static void tf_sram_dump_block(struct tf_sram_block *block) +{ + netdev_dbg(NULL, "block_id(0x%x) in_use_mask(0x%02x)\n", + block->block_id, block->in_use_mask); +} + +/* External functions */ +int tf_sram_mgr_bind(void **sram_handle) +{ + struct tf_sram *sram; + int rc = 0; + + if (!sram_handle) + return -EINVAL; + + sram = vzalloc(sizeof(*sram)); + if (!sram) + return -ENOMEM; + + *sram_handle = sram; + return rc; +} + +int tf_sram_mgr_unbind(void *sram_handle) +{ + struct tf_sram_slice_list *slice_list; + enum tf_sram_slice_size slice_size; + enum tf_sram_bank_id bank_id; + struct tf_sram *sram; + enum tf_dir dir; + int rc = 0; + + if (!sram_handle) + return -EINVAL; + + sram = (struct tf_sram *)sram_handle; + + for (dir = 0; dir < TF_DIR_MAX; dir++) { + /* For each bank */ + for (bank_id = TF_SRAM_BANK_ID_0; + bank_id < TF_SRAM_BANK_ID_MAX; + bank_id++) { + /* For each slice size */ + for (slice_size = TF_SRAM_SLICE_SIZE_8B; + slice_size < TF_SRAM_SLICE_SIZE_MAX; + slice_size++) { + rc = tf_sram_get_slice_list(sram, &slice_list, + slice_size, dir, + bank_id); + if (rc) { + /* Log error */ + netdev_dbg(NULL, + "No SRAM slice list:%d\n", + rc); + return rc; + } + if (tf_sram_get_block_cnt(slice_list)) + tf_sram_free_slice_list(slice_list); + } + } + } + + vfree(sram); + + /* Freeing of the RM resources is handled by the table manager */ + return rc; +} + +int tf_sram_mgr_alloc(void *sram_handle, struct tf_sram_mgr_alloc_parms *parms) +{ + struct tf_rm_allocate_parms aparms = { 0 }; + struct tf_sram_slice_list *slice_list; + u16 block_id, slice_offset = 0; + struct tf_sram_block *block; + struct tf_sram *sram; + bool block_is_full; + u16 block_offset; + int rc = 0; + u32 index; + + if (!sram_handle || !parms || !parms->sram_offset) + return -EINVAL; + + sram = (struct tf_sram *)sram_handle; + + /* Check the current slice list */ + rc = tf_sram_get_slice_list(sram, &slice_list, parms->slice_size, + parms->dir, parms->bank_id); + if (rc) { + /* Log error */ + netdev_dbg(NULL, "No SRAM slice list:%d\n", rc); + return rc; + } + + /* If the list is empty or all entries are full allocate a new block */ + if (!slice_list->first_not_full_block) { + /* Allocate and insert a new block */ + aparms.index = &index; + aparms.subtype = parms->tbl_type; + aparms.rm_db = parms->rm_db; + rc = tf_rm_allocate(&aparms); + if (rc) + return rc; + + block_id = index; + block = tf_sram_alloc_block(slice_list, block_id); + } else { + /* Block exists */ + block = + (struct tf_sram_block *)(slice_list->first_not_full_block); + } + rc = tf_sram_get_next_slice_in_block(block, + parms->slice_size, + &slice_offset, + &block_is_full); + + /* Find the new first non-full block in the list */ + tf_sram_find_first_not_full_block(slice_list, + parms->slice_size, + &slice_list->first_not_full_block); + + tf_sram_block_id_2_offset(parms->bank_id, block->block_id, &block_offset); + + *parms->sram_offset = block_offset + slice_offset; + return rc; +} + +int tf_sram_mgr_free(void *sram_handle, struct tf_sram_mgr_free_parms *parms) +{ + struct tf_rm_free_parms fparms = { 0 }; + struct tf_sram_slice_list *slice_list; + struct tf_sram_block *block; + u16 block_id, slice_offset; + struct tf_sram *sram; + bool block_is_empty; + int rc = 0; + + if (!sram_handle || !parms) + return -EINVAL; + + sram = (struct tf_sram *)sram_handle; + + /* Check the current slice list */ + rc = tf_sram_get_slice_list(sram, &slice_list, parms->slice_size, + parms->dir, parms->bank_id); + if (rc) { + /* Log error */ + netdev_dbg(NULL, "No SRAM slice list:%d\n", rc); + return rc; + } + + /* Determine the block id and slice offset from the SRAM offset */ + tf_sram_offset_2_block_id(parms->bank_id, parms->sram_offset, &block_id, + &slice_offset); + + /* Search the list of blocks for the matching block id */ + block = tf_sram_find_block(block_id, slice_list); + if (!block) { + netdev_dbg(NULL, "block not found 0x%x\n", block_id); + return rc; + } + + /* If found, search for the matching SRAM slice in use. */ + rc = tf_sram_free_slice(parms->slice_size, slice_offset, + block, &block_is_empty); + if (rc) { + netdev_dbg(NULL, "Error freeing slice (%d)\n", rc); + return rc; + } + /* If the block is empty, free the block to the RM */ + if (block_is_empty) { + fparms.rm_db = parms->rm_db; + fparms.subtype = parms->tbl_type; + fparms.index = block_id; + rc = tf_rm_free(&fparms); + + if (rc) { + netdev_dbg(NULL, + "Free block_id(%d) failed error(%d)\n", + block_id, rc); + } + /* Free local entry regardless */ + tf_sram_free_block(slice_list, block); + + /* Clear the not full block to set it again */ + slice_list->first_not_full_block = NULL; + } + if (slice_list->first_not_full_block) + return rc; + + /* set the non full block so it can be used in next alloc */ + tf_sram_find_first_not_full_block(slice_list, + parms->slice_size, + &slice_list->first_not_full_block); + + return rc; +} + +int tf_sram_mgr_dump(void *sram_handle, struct tf_sram_mgr_dump_parms *parms) +{ + struct tf_sram_slice_list *slice_list; + struct tf_sram_block *block; + struct tf_sram *sram; + u32 block_cnt, i; + int rc = 0; + + if (!sram_handle || !parms) + return -EINVAL; + + sram = (struct tf_sram *)sram_handle; + + rc = tf_sram_get_slice_list(sram, &slice_list, parms->slice_size, + parms->dir, parms->bank_id); + if (rc) + return rc; + + if (slice_list->cnt || slice_list->first_not_full_block) { + netdev_dbg(NULL, "\n********** %s: %s: %s ***********\n", + tf_sram_bank_2_str(parms->bank_id), + tf_dir_2_str(parms->dir), + tf_sram_slice_2_str(parms->slice_size)); + + block_cnt = tf_sram_get_block_cnt(slice_list); + netdev_dbg(NULL, "block_cnt(%d)\n", block_cnt); + if (slice_list->first_not_full_block) + netdev_dbg(NULL, "first_not_full_block(0x%x)\n", + slice_list->first_not_full_block->block_id); + block = slice_list->head; + for (i = 0; i < block_cnt; i++) { + tf_sram_dump_block(block); + block = tf_sram_get_next_block(block); + } + netdev_dbg(NULL, "*********************************\n"); + } + return rc; +} + +/** + * Validate an SRAM Slice is allocated + * + * Validate whether the SRAM slice is allocated + * + * @sram_handle: Pointer to SRAM handle + * @parms: Pointer to the SRAM alloc parameters + * + * Returns + * - (0) if successful + * - (-EINVAL) on failure + * + */ +int tf_sram_mgr_is_allocated(void *sram_handle, + struct tf_sram_mgr_is_allocated_parms *parms) +{ + struct tf_sram_slice_list *slice_list; + struct tf_sram_block *block; + u16 block_id, slice_offset; + struct tf_sram *sram; + int rc = 0; + + if (!sram_handle || !parms || !parms->is_allocated) + return -EINVAL; + + sram = (struct tf_sram *)sram_handle; + + /* Check the current slice list */ + rc = tf_sram_get_slice_list(sram, &slice_list, parms->slice_size, + parms->dir, parms->bank_id); + if (rc) { + /* Log error */ + netdev_dbg(NULL, "No SRAM slice list, rc:%d\n", rc); + return rc; + } + + /* If the list is empty, then it cannot be allocated */ + if (!slice_list->cnt) { + netdev_dbg(NULL, "List is empty for %s:%s:%s\n", + tf_dir_2_str(parms->dir), + tf_sram_slice_2_str(parms->slice_size), + tf_sram_bank_2_str(parms->bank_id)); + + parms->is_allocated = NULL; + goto done; + } + + /* Determine the block id and slice offset from the SRAM offset */ + tf_sram_offset_2_block_id(parms->bank_id, parms->sram_offset, &block_id, + &slice_offset); + + /* Search the list of blocks for the matching block id */ + block = tf_sram_find_block(block_id, slice_list); + if (!block) { + netdev_dbg(NULL, "block not found in list 0x%x\n", + parms->sram_offset); + parms->is_allocated = NULL; + goto done; + } + + rc = tf_sram_is_slice_allocated_in_block(block, + parms->slice_size, + slice_offset, + parms->is_allocated); +done: + return rc; +} diff --git a/drivers/thirdparty/release-drivers/bnxt/tf_core/tf_sram_mgr.h b/drivers/thirdparty/release-drivers/bnxt/tf_core/tf_sram_mgr.h new file mode 100644 index 000000000000..3e39fd1c186f --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/tf_core/tf_sram_mgr.h @@ -0,0 +1,201 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019-2021 Broadcom + * All rights reserved. + */ + +#ifndef _TF_SRAM_MGR_H_ +#define _TF_SRAM_MGR_H_ + +#include +#include "tf_core.h" +#include "tf_rm.h" + +#define TF_SRAM_MGR_BLOCK_SZ_BYTES 64 +#define TF_SRAM_MGR_MIN_SLICE_BYTES 8 + +/** + * TF slice size. + * + * A slice is part of a 64B row + * Each slice is a multiple of 8B + */ +enum tf_sram_slice_size { + TF_SRAM_SLICE_SIZE_8B, /* 8 byte SRAM slice */ + TF_SRAM_SLICE_SIZE_16B, /* 16 byte SRAM slice */ + TF_SRAM_SLICE_SIZE_32B, /* 32 byte SRAM slice */ + TF_SRAM_SLICE_SIZE_64B, /* 64 byte SRAM slice */ + TF_SRAM_SLICE_SIZE_MAX /* slice limit */ +}; + +/** Initialize the SRAM slice manager + * + * The SRAM slice manager manages slices within 64B rows. Slices are of size + * tf_sram_slice_size. This function provides a handle to the SRAM manager + * data. + * + * SRAM manager data may dynamically allocate data upon initialization if + * running on the host. + * + * @sram_handle: Pointer to SRAM handle + * + * Returns + * - (0) if successful + * - (-EINVAL) on failure + * + * Returns the handle for the SRAM slice manager + */ +int tf_sram_mgr_bind(void **sram_handle); + +/** Uninitialize the SRAM slice manager + * + * Frees any dynamically allocated data structures for SRAM slice management. + * + * @sram_handle: Pointer to SRAM handle + * + * Returns + * - (0) if successful + * - (-EINVAL) on failure + */ +int tf_sram_mgr_unbind(void *sram_handle); + +/** + * tf_sram_mgr_alloc_parms parameter definition + * + * @dir: direction + * @bank_id: the SRAM bank to allocate from + * @slice_size: the slice size to allocate + * @sram_slice: A pointer to be filled with an 8B sram slice offset + * @rm_db: RM DB Handle required for RM allocation + * @tbl_type: tf table type + */ +struct tf_sram_mgr_alloc_parms { + enum tf_dir dir; + enum tf_sram_bank_id bank_id; + enum tf_sram_slice_size slice_size; + u16 *sram_offset; + void *rm_db; + enum tf_tbl_type tbl_type; +}; + +/** + * Allocate an SRAM Slice + * + * Allocate an SRAM slice from the indicated bank. If successful an 8B SRAM + * offset will be returned. Slices are variable sized. This may result in + * a row being allocated from the RM SRAM bank pool if required. + * + * @sram_handle: Pointer to SRAM handle + * @parms: Pointer to the SRAM alloc parameters + * + * Returns + * - (0) if successful + * - (-EINVAL) on failure + * + */ +int tf_sram_mgr_alloc(void *sram_handle, + struct tf_sram_mgr_alloc_parms *parms); + +/** + * tf_sram_mgr_free_parms parameter definition + * + * @dir: direction + * @bank_id: the SRAM bank to free to + * @slice_size: the slice size to be returned + * @sram_offset: the SRAM slice offset (8B) to be returned + * @rm_db: RM DB Handle required for RM free + * @tbl_type: tf table type + * @tfp: A pointer to the tf handle + */ +struct tf_sram_mgr_free_parms { + enum tf_dir dir; + enum tf_sram_bank_id bank_id; + enum tf_sram_slice_size slice_size; + u16 sram_offset; + void *rm_db; + enum tf_tbl_type tbl_type; +}; + +/** + * Free an SRAM Slice + * + * Free an SRAM slice to the indicated bank. This may result in a 64B row + * being returned to the RM SRAM bank pool. + * + * @sram_handle: Pointer to SRAM handle + * @parms: Pointer to the SRAM free parameters + * + * Returns + * - (0) if successful + * - (-EINVAL) on failure + * + */ +int tf_sram_mgr_free(void *sram_handle, struct tf_sram_mgr_free_parms *parms); + +/** + * tf_sram_mgr_dump_parms parameter definition + * + * @dir: direction + * @bank_id: the SRAM bank to dump + * @slice_size: the slice size to be dumped + */ +struct tf_sram_mgr_dump_parms { + enum tf_dir dir; + enum tf_sram_bank_id bank_id; + enum tf_sram_slice_size slice_size; +}; + +/** + * Dump a slice list + * + * Dump the slice list given the SRAM bank and the slice size + * + * @sram_handle: Pointer to SRAM handle + * @parms: Pointer to the SRAM free parameters + * + * Returns + * - (0) if successful + * - (-EINVAL) on failure + * + */ +int tf_sram_mgr_dump(void *sram_handle, struct tf_sram_mgr_dump_parms *parms); + +/** + * tf_sram_mgr_is_allocated_parms parameter definition + * + * @dir: direction + * @bank_id: the SRAM bank allocated from + * @slice_size: the slice size which was allocated + * @sram_offset: the SRAM slice offset to validate + * @is_allocated: indication of allocation + */ +struct tf_sram_mgr_is_allocated_parms { + enum tf_dir dir; + enum tf_sram_bank_id bank_id; + enum tf_sram_slice_size slice_size; + u16 sram_offset; + bool *is_allocated; +}; + +/** + * Validate an SRAM Slice is allocated + * + * Validate whether the SRAM slice is allocated + * + * @sram_handle: Pointer to SRAM handle + * @parms: Pointer to the SRAM alloc parameters + * + * Returns + * - (0) if successful + * - (-EINVAL) on failure + * + */ +int tf_sram_mgr_is_allocated(void *sram_handle, + struct tf_sram_mgr_is_allocated_parms *parms); + +/* Given the slice size, return a char string */ +const char *tf_sram_slice_2_str(enum tf_sram_slice_size slice_size); + +/* Given the bank_id, return a char string */ +const char *tf_sram_bank_2_str(enum tf_sram_bank_id bank_id); + +#endif /* _TF_SRAM_MGR_H_ */ diff --git a/drivers/thirdparty/release-drivers/bnxt/tf_core/tf_tbl.c b/drivers/thirdparty/release-drivers/bnxt/tf_core/tf_tbl.c new file mode 100644 index 000000000000..28c7390702ab --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/tf_core/tf_tbl.c @@ -0,0 +1,600 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* Copyright(c) 2019-2021 Broadcom + * All rights reserved. + */ + +/* Truflow Table APIs and supporting code */ + +#include +#include +#include +#include "tf_tbl.h" +#include "tf_rm.h" +#include "tf_util.h" +#include "tf_msg.h" +#include "tf_session.h" +#include "tf_device.h" + +struct tf; + +#define TF_TBL_RM_TO_PTR(new_idx, idx, base, shift) { \ + *(new_idx) = (((idx) + (base)) << (shift)); \ +} + +int tf_tbl_bind(struct tf *tfp, struct tf_tbl_cfg_parms *parms) +{ + struct tf_rm_create_db_parms db_cfg = { 0 }; + int db_rc[TF_DIR_MAX] = { 0 }; + struct tbl_rm_db *tbl_db; + struct tf_session *tfs; + int rc, d, i; + + if (!tfp || !parms) + return -EINVAL; + + /* Retrieve the session information */ + rc = tf_session_get_session_internal(tfp, &tfs); + if (rc) + return rc; + + tbl_db = vzalloc(sizeof(*tbl_db)); + if (!tbl_db) + return -ENOMEM; + + for (i = 0; i < TF_DIR_MAX; i++) + tbl_db->tbl_db[i] = NULL; + tf_session_set_db(tfp, TF_MODULE_TYPE_TABLE, tbl_db); + + db_cfg.num_elements = parms->num_elements; + db_cfg.module = TF_MODULE_TYPE_TABLE; + db_cfg.num_elements = parms->num_elements; + + for (d = 0; d < TF_DIR_MAX; d++) { + db_cfg.dir = d; + db_cfg.cfg = &parms->cfg[d ? TF_TBL_TYPE_MAX : 0]; + db_cfg.alloc_cnt = parms->resources->tbl_cnt[d].cnt; + db_cfg.rm_db = (void *)&tbl_db->tbl_db[d]; + if (tf_session_is_shared_session(tfs) && + (!tf_session_is_shared_session_creator(tfs))) + db_rc[d] = tf_rm_create_db_no_reservation(tfp, + &db_cfg); + else + db_rc[d] = tf_rm_create_db(tfp, &db_cfg); + + if (db_rc[d]) { + netdev_dbg(tfp->bp->dev, + "%s: No Table DB creation required\n", + tf_dir_2_str(d)); + } + } + + /* No db created */ + if (db_rc[TF_DIR_RX] && db_rc[TF_DIR_TX]) { + netdev_dbg(tfp->bp->dev, "%s: No Table DB created\n", + tf_dir_2_str(d)); + return db_rc[TF_DIR_RX]; + } + + netdev_dbg(tfp->bp->dev, "Table Type - initialized\n"); + return 0; +} + +int tf_tbl_unbind(struct tf *tfp) +{ + struct tf_rm_free_db_parms fparms = { 0 }; + struct tbl_rm_db *tbl_db; + void *tbl_db_ptr = NULL; + int rc; + int i; + + if (!tfp) + return -EINVAL; + + rc = tf_session_get_db(tfp, TF_MODULE_TYPE_TABLE, &tbl_db_ptr); + if (rc) { + netdev_dbg(tfp->bp->dev, "Tbl_db is not initialized\n"); + return 0; + } + tbl_db = (struct tbl_rm_db *)tbl_db_ptr; + + for (i = 0; i < TF_DIR_MAX; i++) { + if (!tbl_db->tbl_db[i]) + continue; + fparms.dir = i; + fparms.rm_db = tbl_db->tbl_db[i]; + rc = tf_rm_free_db(tfp, &fparms); + if (rc) + return rc; + + tbl_db->tbl_db[i] = NULL; + } + tf_session_set_db(tfp, TF_MODULE_TYPE_TABLE, NULL); + vfree(tbl_db); + return 0; +} + +int tf_tbl_alloc(struct tf *tfp, struct tf_tbl_alloc_parms *parms) +{ + struct tf_rm_allocate_parms aparms = { 0 }; + struct bnxt *bp; + struct tbl_rm_db *tbl_db; + struct tf_dev_info *dev; + void *tbl_db_ptr = NULL; + struct tf_session *tfs; + u32 idx; + int rc; + + if (!tfp || !parms) + return -EINVAL; + + bp = tfp->bp; + + /* Retrieve the session information */ + rc = tf_session_get_session_internal(tfp, &tfs); + if (rc) + return rc; + + /* Retrieve the device information */ + rc = tf_session_get_device(tfs, &dev); + if (rc) + return rc; + + rc = tf_session_get_db(tfp, TF_MODULE_TYPE_TABLE, &tbl_db_ptr); + if (rc) { + netdev_dbg(bp->dev, + "Failed to get tbl_db from session, rc:%d\n", rc); + return rc; + } + tbl_db = (struct tbl_rm_db *)tbl_db_ptr; + + /* Allocate requested element */ + aparms.rm_db = tbl_db->tbl_db[parms->dir]; + aparms.subtype = parms->type; + aparms.index = &idx; + rc = tf_rm_allocate(&aparms); + if (rc) { + netdev_dbg(bp->dev, "%s: Failed allocate, type:%s\n", + tf_dir_2_str(parms->dir), + tf_tbl_type_2_str(parms->type)); + return rc; + } + + *parms->idx = idx; + + return 0; +} + +int tf_tbl_free(struct tf *tfp, struct tf_tbl_free_parms *parms) +{ + struct tf_rm_is_allocated_parms aparms = { 0 }; + struct tf_rm_free_parms fparms = { 0 }; + struct tbl_rm_db *tbl_db; + struct tf_dev_info *dev; + void *tbl_db_ptr = NULL; + struct tf_session *tfs; + int allocated = 0; + u8 fw_session_id; + struct bnxt *bp; + int rc; + + if (!tfp || !parms) + return -EINVAL; + + bp = tfp->bp; + + /* Retrieve the session information */ + rc = tf_session_get_session_internal(tfp, &tfs); + if (rc) + return rc; + + /* Retrieve the device information */ + rc = tf_session_get_device(tfs, &dev); + if (rc) + return rc; + + rc = tf_session_get_fw_session_id(tfp, &fw_session_id); + if (rc) + return rc; + + rc = tf_session_get_db(tfp, TF_MODULE_TYPE_TABLE, &tbl_db_ptr); + if (rc) { + netdev_dbg(bp->dev, + "Failed to get em_ext_db from session, rc:%d\n", + rc); + return rc; + } + tbl_db = (struct tbl_rm_db *)tbl_db_ptr; + + /* Check if element is in use */ + aparms.rm_db = tbl_db->tbl_db[parms->dir]; + aparms.subtype = parms->type; + aparms.index = parms->idx; + aparms.allocated = &allocated; + rc = tf_rm_is_allocated(&aparms); + if (rc) + return rc; + + if (allocated != TF_RM_ALLOCATED_ENTRY_IN_USE) { + netdev_dbg(bp->dev, + "%s: Entry already free, type:%s, index:%d\n", + tf_dir_2_str(parms->dir), + tf_tbl_type_2_str(parms->type), parms->idx); + return -EINVAL; + } + + /* If this is counter table, clear the entry on free */ + if (parms->type == TF_TBL_TYPE_ACT_STATS_64) { + u8 data[8] = { 0 }; + u16 hcapi_type = 0; + struct tf_rm_get_hcapi_parms hparms = { 0 }; + + /* Get the hcapi type */ + hparms.rm_db = tbl_db->tbl_db[parms->dir]; + hparms.subtype = parms->type; + hparms.hcapi_type = &hcapi_type; + rc = tf_rm_get_hcapi_type(&hparms); + if (rc) { + netdev_dbg(bp->dev, + "%s, Failed type lookup, type:%s, rc:%d\n", + tf_dir_2_str(parms->dir), + tf_tbl_type_2_str(parms->type), rc); + return rc; + } + /* Clear the counter + */ + rc = tf_msg_set_tbl_entry(tfp, parms->dir, hcapi_type, + sizeof(data), data, parms->idx, + fw_session_id); + if (rc) { + netdev_dbg(bp->dev, "%s, Set failed, type:%s, rc:%d\n", + tf_dir_2_str(parms->dir), + tf_tbl_type_2_str(parms->type), rc); + return rc; + } + } + + /* Free requested element */ + fparms.rm_db = tbl_db->tbl_db[parms->dir]; + fparms.subtype = parms->type; + fparms.index = parms->idx; + rc = tf_rm_free(&fparms); + if (rc) { + netdev_dbg(bp->dev, "%s: Free failed, type:%s, index:%d\n", + tf_dir_2_str(parms->dir), + tf_tbl_type_2_str(parms->type), parms->idx); + return rc; + } + + return 0; +} + +int tf_tbl_set(struct tf *tfp, struct tf_tbl_set_parms *parms) +{ + struct tf_rm_is_allocated_parms aparms = { 0 }; + struct tf_rm_get_hcapi_parms hparms = { 0 }; + struct tbl_rm_db *tbl_db; + struct tf_dev_info *dev; + void *tbl_db_ptr = NULL; + struct tf_session *tfs; + int allocated = 0; + u8 fw_session_id; + struct bnxt *bp; + u16 hcapi_type; + int rc; + + if (!tfp || !parms || !parms->data) + return -EINVAL; + + bp = tfp->bp; + + /* Retrieve the session information */ + rc = tf_session_get_session_internal(tfp, &tfs); + if (rc) + return rc; + + /* Retrieve the device information */ + rc = tf_session_get_device(tfs, &dev); + if (rc) + return rc; + + rc = tf_session_get_fw_session_id(tfp, &fw_session_id); + if (rc) + return rc; + + rc = tf_session_get_db(tfp, TF_MODULE_TYPE_TABLE, &tbl_db_ptr); + if (rc) { + netdev_dbg(bp->dev, + "Failed to get em_ext_db from session, rc:%d\n", + rc); + return rc; + } + tbl_db = (struct tbl_rm_db *)tbl_db_ptr; + + /* Verify that the entry has been previously allocated. + * for meter drop counter, check the corresponding meter + * entry + */ + aparms.rm_db = tbl_db->tbl_db[parms->dir]; + if (parms->type != TF_TBL_TYPE_METER_DROP_CNT) + aparms.subtype = parms->type; + else + aparms.subtype = TF_TBL_TYPE_METER_INST; + aparms.allocated = &allocated; + aparms.index = parms->idx; + rc = tf_rm_is_allocated(&aparms); + if (rc) + return rc; + + if (allocated != TF_RM_ALLOCATED_ENTRY_IN_USE) { + netdev_dbg(tfp->bp->dev, + "%s, Invalid index, type:%s, idx:%d\n", + tf_dir_2_str(parms->dir), + tf_tbl_type_2_str(parms->type), parms->idx); + return -EINVAL; + } + + /* Set the entry */ + hparms.rm_db = tbl_db->tbl_db[parms->dir]; + hparms.subtype = parms->type; + hparms.hcapi_type = &hcapi_type; + rc = tf_rm_get_hcapi_type(&hparms); + if (rc) { + netdev_dbg(bp->dev, "%s, Failed type lookup, type:%s, rc:%d\n", + tf_dir_2_str(parms->dir), + tf_tbl_type_2_str(parms->type), rc); + return rc; + } + + rc = tf_msg_set_tbl_entry(tfp, parms->dir, hcapi_type, + parms->data_sz_in_bytes, parms->data, + parms->idx, fw_session_id); + if (rc) { + netdev_dbg(bp->dev, "%s, Set failed, type:%s, rc:%d\n", + tf_dir_2_str(parms->dir), + tf_tbl_type_2_str(parms->type), rc); + return rc; + } + + return 0; +} + +int tf_tbl_get(struct tf *tfp, struct tf_tbl_get_parms *parms) +{ + struct tf_rm_is_allocated_parms aparms = { 0 }; + struct tf_rm_get_hcapi_parms hparms = { 0 }; + struct tbl_rm_db *tbl_db; + struct tf_dev_info *dev; + void *tbl_db_ptr = NULL; + struct tf_session *tfs; + int allocated = 0; + u8 fw_session_id; + struct bnxt *bp; + u16 hcapi_type; + int rc; + + if (!tfp || !parms || !parms->data) + return -EINVAL; + + bp = tfp->bp; + + /* Retrieve the session information */ + rc = tf_session_get_session_internal(tfp, &tfs); + if (rc) + return rc; + + /* Retrieve the device information */ + rc = tf_session_get_device(tfs, &dev); + if (rc) + return rc; + + rc = tf_session_get_fw_session_id(tfp, &fw_session_id); + if (rc) + return rc; + + rc = tf_session_get_db(tfp, TF_MODULE_TYPE_TABLE, &tbl_db_ptr); + if (rc) { + netdev_dbg(bp->dev, + "Failed to get em_ext_db from session, rc:%d\n", + rc); + return rc; + } + tbl_db = (struct tbl_rm_db *)tbl_db_ptr; + + /* Verify that the entry has been previously allocated. + * for meter drop counter, check the corresponding meter + * entry + */ + aparms.rm_db = tbl_db->tbl_db[parms->dir]; + if (parms->type != TF_TBL_TYPE_METER_DROP_CNT) + aparms.subtype = parms->type; + else + aparms.subtype = TF_TBL_TYPE_METER_INST; + aparms.index = parms->idx; + aparms.allocated = &allocated; + rc = tf_rm_is_allocated(&aparms); + if (rc) + return rc; + + if (allocated != TF_RM_ALLOCATED_ENTRY_IN_USE) { + netdev_dbg(tfp->bp->dev, + "%s, Invalid index, type:%s, idx:%d\n", + tf_dir_2_str(parms->dir), + tf_tbl_type_2_str(parms->type), parms->idx); + return -EINVAL; + } + + /* Set the entry */ + hparms.rm_db = tbl_db->tbl_db[parms->dir]; + hparms.subtype = parms->type; + hparms.hcapi_type = &hcapi_type; + rc = tf_rm_get_hcapi_type(&hparms); + if (rc) { + netdev_dbg(bp->dev, "%s, Failed type lookup, type:%s, rc:%d\n", + tf_dir_2_str(parms->dir), + tf_tbl_type_2_str(parms->type), rc); + return rc; + } + + /* Get the entry */ + rc = tf_msg_get_tbl_entry(tfp, parms->dir, hcapi_type, + parms->data_sz_in_bytes, parms->data, + parms->idx, false, fw_session_id); + if (rc) { + netdev_dbg(bp->dev, "%s, Get failed, type:%s, rc:%d\n", + tf_dir_2_str(parms->dir), + tf_tbl_type_2_str(parms->type), rc); + return rc; + } + + return 0; +} + +int tf_tbl_bulk_get(struct tf *tfp, struct tf_tbl_get_bulk_parms *parms) +{ + struct tf_rm_check_indexes_in_range_parms cparms = { 0 }; + struct tf_rm_get_hcapi_parms hparms = { 0 }; + struct bnxt *bp; + struct tbl_rm_db *tbl_db; + struct tf_dev_info *dev; + void *tbl_db_ptr = NULL; + struct tf_session *tfs; + u16 hcapi_type; + int rc; + + if (!tfp || !parms) + return -EINVAL; + + bp = tfp->bp; + + /* Retrieve the session information */ + rc = tf_session_get_session_internal(tfp, &tfs); + if (rc) + return rc; + + /* Retrieve the device information */ + rc = tf_session_get_device(tfs, &dev); + if (rc) + return rc; + + rc = tf_session_get_db(tfp, TF_MODULE_TYPE_TABLE, &tbl_db_ptr); + if (rc) { + netdev_dbg(bp->dev, + "Failed to get em_ext_db from session, rc:%d\n", + rc); + return rc; + } + tbl_db = (struct tbl_rm_db *)tbl_db_ptr; + + /* Verify that the entries are in the range of reserved resources. */ + cparms.rm_db = tbl_db->tbl_db[parms->dir]; + cparms.subtype = parms->type; + cparms.num_entries = parms->num_entries; + cparms.starting_index = parms->starting_idx; + + rc = tf_rm_check_indexes_in_range(&cparms); + if (rc) { + netdev_dbg(bp->dev, + "%s, index %d entries: %d not in range, type:%s", + tf_dir_2_str(parms->dir), parms->starting_idx, + parms->num_entries, tf_tbl_type_2_str(parms->type)); + return rc; + } + + hparms.rm_db = tbl_db->tbl_db[parms->dir]; + hparms.subtype = parms->type; + hparms.hcapi_type = &hcapi_type; + rc = tf_rm_get_hcapi_type(&hparms); + if (rc) { + netdev_dbg(bp->dev, "%s, Failed type lookup, type:%s, rc:%d\n", + tf_dir_2_str(parms->dir), + tf_tbl_type_2_str(parms->type), rc); + return rc; + } + + /* Get the entries */ + rc = tf_msg_bulk_get_tbl_entry(tfp, parms->dir, hcapi_type, + parms->starting_idx, + parms->num_entries, + parms->entry_sz_in_bytes, + parms->physical_mem_addr, false); + if (rc) { + netdev_dbg(bp->dev, "%s, Bulk get failed, type:%s, rc:%d\n", + tf_dir_2_str(parms->dir), + tf_tbl_type_2_str(parms->type), rc); + } + + return rc; +} + +int tf_tbl_get_resc_info(struct tf *tfp, struct tf_tbl_resource_info *tbl) +{ + struct tf_rm_get_alloc_info_parms ainfo; + struct tf_resource_info *dinfo; + struct tbl_rm_db *tbl_db; + void *tbl_db_ptr = NULL; + struct tf_dev_info *dev; + u16 base = 0, shift = 0; + struct tf_session *tfs; + int d, i; + int rc; + + if (!tfp || !tbl) + return -EINVAL; + + /* Retrieve the session information */ + rc = tf_session_get_session_internal(tfp, &tfs); + if (rc) + return rc; + + /* Retrieve the device information */ + rc = tf_session_get_device(tfs, &dev); + if (rc) + return rc; + + rc = tf_session_get_db(tfp, TF_MODULE_TYPE_TABLE, &tbl_db_ptr); + if (rc == -ENOMEM) + return 0; /* db doesn't exist */ + else if (rc) + return rc; /* error getting db */ + + tbl_db = (struct tbl_rm_db *)tbl_db_ptr; + + for (d = 0; d < TF_DIR_MAX; d++) { + ainfo.rm_db = tbl_db->tbl_db[d]; + dinfo = tbl[d].info; + + if (!ainfo.rm_db) + continue; + + ainfo.info = (struct tf_rm_alloc_info *)dinfo; + ainfo.subtype = 0; + rc = tf_rm_get_all_info(&ainfo, TF_TBL_TYPE_MAX); + if (rc) + return rc; + + if (dev->ops->tf_dev_get_tbl_info) { + /* Adjust all */ + for (i = 0; i < TF_TBL_TYPE_MAX; i++) { + /* Only get table info if required for the device */ + rc = dev->ops->tf_dev_get_tbl_info(tfp, + tbl_db->tbl_db[d], + i, + &base, + &shift); + if (rc) { + netdev_dbg(tfp->bp->dev, + "%s: Failed to get table info:%d\n", + tf_dir_2_str(d), i); + return rc; + } + if (dinfo[i].stride) + TF_TBL_RM_TO_PTR(&dinfo[i].start, + dinfo[i].start, + base, + shift); + } + } + } + + return 0; +} diff --git a/drivers/thirdparty/release-drivers/bnxt/tf_core/tf_tbl.h b/drivers/thirdparty/release-drivers/bnxt/tf_core/tf_tbl.h new file mode 100644 index 000000000000..4e16632f7ae3 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/tf_core/tf_tbl.h @@ -0,0 +1,219 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019-2021 Broadcom + * All rights reserved. + */ + +#ifndef TF_TBL_TYPE_H_ +#define TF_TBL_TYPE_H_ + +#include "tf_core.h" + +struct tf; + +/* The Table module provides processing of Internal TF table types. */ + +/** + * Table configuration parameters + * + * @num_elements: Number of table types in each of the configuration arrays + * @cfg: Table Type element configuration array + * @resources: Session resource allocations + */ +struct tf_tbl_cfg_parms { + u16 num_elements; + struct tf_rm_element_cfg *cfg; + struct tf_session_resources *resources; +}; + +/** + * Table allocation parameters + * + * @dir: Receive or transmit direction + * @type: Type of the allocation + * @tbl_scope_id: Table scope identifier (ignored unless TF_TBL_TYPE_EXT) + * @idx: Idx of allocated entry or found entry (if search_enable) + */ +struct tf_tbl_alloc_parms { + enum tf_dir dir; + enum tf_tbl_type type; + u32 tbl_scope_id; + u32 *idx; +}; + +/** + * Table free parameters + * + * @dir: Receive or transmit direction + * @type: Type of the allocation + * @tbl_scope_id: Table scope identifier (ignored unless TF_TBL_TYPE_EXT) + * @idx: Index to free + */ +struct tf_tbl_free_parms { + enum tf_dir dir; + enum tf_tbl_type type; + u32 tbl_scope_id; + u32 idx; +}; + +/** + * Table set parameters + * + * @dir: Receive or transmit direction + * @type: Type of object to set + * @tbl_scope_id: Table scope identifier (ignored unless TF_TBL_TYPE_EXT) + * @data: Entry data + * @data_sz_in_bytes: Entry size + * @idx: Entry index to write to + */ +struct tf_tbl_set_parms { + enum tf_dir dir; + enum tf_tbl_type type; + u32 tbl_scope_id; + u8 *data; + u16 data_sz_in_bytes; + u32 idx; +}; + +/** + * Table get parameters + * + * @dir: Receive or transmit direction + * @type: Type of object to get + * @data: Entry data + * @data_sz_in_bytes: Entry size + * @idx: Entry index to read + */ +struct tf_tbl_get_parms { + enum tf_dir dir; + enum tf_tbl_type type; + u8 *data; + u16 data_sz_in_bytes; + u32 idx; +}; + +/** + * Table get bulk parameters + * + * @dir: Receive or transmit direction + * @type: Type of object to get + * @starting_idx: Starting index to read from + * @num_entries: Number of sequential entries + * @entry_sz_in_bytes: Size of the single entry + * @physical_mem_addr: Host physical address, where the data will be copied + * to by the firmware. + */ +struct tf_tbl_get_bulk_parms { + enum tf_dir dir; + enum tf_tbl_type type; + u32 starting_idx; + u16 num_entries; + u16 entry_sz_in_bytes; + u64 physical_mem_addr; +}; + +/* Table RM database */ +struct tbl_rm_db { + struct rm_db *tbl_db[TF_DIR_MAX]; +}; + +/** + * Initializes the Table module with the requested DBs. Must be + * invoked as the first thing before any of the access functions. + * + * @tfp: Pointer to TF handle, used for HCAPI communication + * @parms: Pointer to Table configuration parameters + * + * Returns + * - (0) if successful. + * - (-EINVAL) on failure. + */ +int tf_tbl_bind(struct tf *tfp, struct tf_tbl_cfg_parms *parms); + +/** + * Cleans up the private DBs and releases all the data. + * + * @tfp: Pointer to TF handle, used for HCAPI communication + * @parms: Pointer to parameters + * + * Returns + * - (0) if successful. + * - (-EINVAL) on failure. + */ +int tf_tbl_unbind(struct tf *tfp); + +/** + * Allocates the requested table type from the internal RM DB. + * + * @tfp: Pointer to TF handle, used for HCAPI communication + * @parms: Pointer to Table allocation parameters + * + * Returns + * - (0) if successful. + * - (-EINVAL) on failure. + */ +int tf_tbl_alloc(struct tf *tfp, struct tf_tbl_alloc_parms *parms); + +/** + * Frees the requested table type and returns it to the DB. + * + * @tfp: Pointer to TF handle, used for HCAPI communication + * @parms: Pointer to Table free parameters + * + * Returns + * - (0) if successful. + * - (-EINVAL) on failure. + */ +int tf_tbl_free(struct tf *tfp, struct tf_tbl_free_parms *parms); + +/** + * Configures the requested element by sending a firmware request which + * then installs it into the device internal structures. + * + * @tfp: Pointer to TF handle, used for HCAPI communication + * @parms: Pointer to Table set parameters + * + * Returns + * - (0) if successful. + * - (-EINVAL) on failure. + */ +int tf_tbl_set(struct tf *tfp, struct tf_tbl_set_parms *parms); + +/** + * Retrieves the requested element by sending a firmware request to get + * the element. + * + * @tfp: Pointer to TF handle, used for HCAPI communication + * @parms: Pointer to Table get parameters + * + * Returns + * - (0) if successful. + * - (-EINVAL) on failure. + */ +int tf_tbl_get(struct tf *tfp, struct tf_tbl_get_parms *parms); + +/** + * Retrieves bulk block of elements by sending a firmware request to + * get the elements. + * + * @tfp: Pointer to TF handle, used for HCAPI communication + * @parms: Pointer to Table get bulk parameters + * + * Returns + * - (0) if successful. + * - (-EINVAL) on failure. + */ +int tf_tbl_bulk_get(struct tf *tfp, struct tf_tbl_get_bulk_parms *parms); + +/** + * Retrieves the allocated resource info + * + * @tfp: Pointer to TF handle, used for HCAPI communication + * @parms: Pointer to Table resource info parameters + * + * Returns + * - (0) if successful. + * - (-EINVAL) on failure. + */ +int tf_tbl_get_resc_info(struct tf *tfp, struct tf_tbl_resource_info *tbl); + +#endif /* TF_TBL_TYPE_H */ diff --git a/drivers/thirdparty/release-drivers/bnxt/tf_core/tf_tbl_sram.c b/drivers/thirdparty/release-drivers/bnxt/tf_core/tf_tbl_sram.c new file mode 100644 index 000000000000..39d9afc57443 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/tf_core/tf_tbl_sram.c @@ -0,0 +1,659 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* Copyright(c) 2019-2021 Broadcom + * All rights reserved. + */ + +/* Truflow Table APIs and supporting code */ + +#include +#include "tf_tbl.h" +#include "tf_tbl_sram.h" +#include "tf_sram_mgr.h" +#include "tf_rm.h" +#include "tf_util.h" +#include "tf_msg.h" +#include "tf_session.h" +#include "tf_device.h" +#include "cfa_resource_types.h" + +#define TF_TBL_PTR_TO_RM(new_idx, idx, base, shift) { \ + *(new_idx) = (((idx) >> (shift)) - (base)); \ +} + +/** + * tf_sram_tbl_get_info_parms parameter definition + * + * @rm_db: table RM database + * @dir: Receive or transmit direction + * @tbl_type: the TF index table type + * @bank_id: The SRAM bank associated with the type + * @slice_size: the slice size for the indicated table type + * + */ +struct tf_tbl_sram_get_info_parms { + void *rm_db; + enum tf_dir dir; + enum tf_tbl_type tbl_type; + enum tf_sram_bank_id bank_id; + enum tf_sram_slice_size slice_size; +}; + +/* Translate HCAPI type to SRAM Manager bank */ +const u16 tf_tbl_sram_hcapi_2_bank[CFA_RESOURCE_TYPE_P58_LAST] = { + [CFA_RESOURCE_TYPE_P58_SRAM_BANK_0] = TF_SRAM_BANK_ID_0, + [CFA_RESOURCE_TYPE_P58_SRAM_BANK_1] = TF_SRAM_BANK_ID_1, + [CFA_RESOURCE_TYPE_P58_SRAM_BANK_2] = TF_SRAM_BANK_ID_2, + [CFA_RESOURCE_TYPE_P58_SRAM_BANK_3] = TF_SRAM_BANK_ID_3 +}; + +#define TF_TBL_SRAM_SLICES_MAX \ + (TF_SRAM_MGR_BLOCK_SZ_BYTES / TF_SRAM_MGR_MIN_SLICE_BYTES) + +/* Translate HCAPI type to SRAM Manager bank */ +const u8 tf_tbl_sram_slices_2_size[TF_TBL_SRAM_SLICES_MAX + 1] = { + [0] = TF_SRAM_SLICE_SIZE_64B, /* if 0 slices assume 1 64B block */ + [1] = TF_SRAM_SLICE_SIZE_64B, /* 1 slice per 64B block */ + [2] = TF_SRAM_SLICE_SIZE_32B, /* 2 slices per 64B block */ + [4] = TF_SRAM_SLICE_SIZE_16B, /* 4 slices per 64B block */ + [8] = TF_SRAM_SLICE_SIZE_8B /* 8 slices per 64B block */ +}; + +/** + * Get SRAM Table Information for a given index table type + * + * @sram_handle: Pointer to SRAM handle + * @parms: Pointer to the SRAM get info parameters + * + * Returns + * - (0) if successful + * - (-EINVAL) on failure + * + */ +static int tf_tbl_sram_get_info(struct tf_tbl_sram_get_info_parms *parms) +{ + struct tf_rm_get_slices_parms sparms; + struct tf_rm_get_hcapi_parms hparms; + u16 hcapi_type; + u16 slices; + int rc = 0; + + hparms.rm_db = parms->rm_db; + hparms.subtype = parms->tbl_type; + hparms.hcapi_type = &hcapi_type; + + rc = tf_rm_get_hcapi_type(&hparms); + if (rc) { + netdev_dbg(NULL, "%s: Failed to get hcapi_type %s, rc:%d\n", + tf_dir_2_str(parms->dir), + tf_tbl_type_2_str(parms->tbl_type), rc); + return rc; + } + parms->bank_id = tf_tbl_sram_hcapi_2_bank[hcapi_type]; + + sparms.rm_db = parms->rm_db; + sparms.subtype = parms->tbl_type; + sparms.slices = &slices; + + rc = tf_rm_get_slices(&sparms); + if (rc) { + netdev_dbg(NULL, "%s: Failed to get slice cnt %s, rc:%d\n", + tf_dir_2_str(parms->dir), + tf_tbl_type_2_str(parms->tbl_type), rc); + return rc; + } + if (slices) + parms->slice_size = tf_tbl_sram_slices_2_size[slices]; + + netdev_dbg(NULL, "(%s) bank(%s) slice_size(%s)\n", + tf_tbl_type_2_str(parms->tbl_type), + tf_sram_bank_2_str(parms->bank_id), + tf_sram_slice_2_str(parms->slice_size)); + return rc; +} + +int tf_tbl_sram_bind(struct tf *tfp) +{ + void *sram_handle = NULL; + int rc = 0; + + if (!tfp) + return -EINVAL; + + rc = tf_sram_mgr_bind(&sram_handle); + + tf_session_set_sram_db(tfp, sram_handle); + + netdev_dbg(tfp->bp->dev, "SRAM Table - initialized\n"); + + return rc; +} + +int tf_tbl_sram_unbind(struct tf *tfp) +{ + void *sram_handle = NULL; + int rc = 0; + + if (!tfp) + return -EINVAL; + + rc = tf_session_get_sram_db(tfp, &sram_handle); + if (rc) { + netdev_dbg(NULL, + "Failed to get sram_handle from session, rc:%d\n", + rc); + return rc; + } + tf_session_set_sram_db(tfp, NULL); + + if (sram_handle) + rc = tf_sram_mgr_unbind(sram_handle); + + netdev_dbg(tfp->bp->dev, "SRAM Table - deinitialized\n"); + return rc; +} + +int tf_tbl_sram_alloc(struct tf *tfp, struct tf_tbl_alloc_parms *parms) +{ + struct tf_tbl_sram_get_info_parms iparms = { 0 }; + struct tf_sram_mgr_alloc_parms aparms = { 0 }; + void *sram_handle = NULL; + struct tbl_rm_db *tbl_db; + void *tbl_db_ptr = NULL; + struct tf_dev_info *dev; + struct tf_session *tfs; + u16 idx; + int rc; + + if (!tfp || !parms) + return -EINVAL; + + /* Retrieve the session information */ + rc = tf_session_get(tfp, &tfs, &dev); + if (rc) + return rc; + + rc = tf_session_get_db(tfp, TF_MODULE_TYPE_TABLE, &tbl_db_ptr); + if (rc) { + netdev_dbg(tfp->bp->dev, + "Failed to get tbl_db from session, rc:%d\n", + rc); + return rc; + } + + tbl_db = (struct tbl_rm_db *)tbl_db_ptr; + + rc = tf_session_get_sram_db(tfp, &sram_handle); + if (rc) { + netdev_dbg(tfp->bp->dev, + "Failed to get sram_handle from session, rc:%d\n", + rc); + return rc; + } + + iparms.rm_db = tbl_db->tbl_db[parms->dir]; + iparms.dir = parms->dir; + iparms.tbl_type = parms->type; + + rc = tf_tbl_sram_get_info(&iparms); + if (rc) { + netdev_dbg(tfp->bp->dev, + "%s: Failed to get SRAM info %s\n", + tf_dir_2_str(parms->dir), + tf_tbl_type_2_str(parms->type)); + return rc; + } + + aparms.dir = parms->dir; + aparms.bank_id = iparms.bank_id; + aparms.slice_size = iparms.slice_size; + aparms.sram_offset = &idx; + aparms.tbl_type = parms->type; + aparms.rm_db = tbl_db->tbl_db[parms->dir]; + + rc = tf_sram_mgr_alloc(sram_handle, &aparms); + if (rc) { + netdev_dbg(tfp->bp->dev, + "%s: Failed to allocate SRAM table:%s\n", + tf_dir_2_str(parms->dir), + tf_tbl_type_2_str(parms->type)); + return rc; + } + *parms->idx = idx; + + return rc; +} + +int tf_tbl_sram_free(struct tf *tfp, struct tf_tbl_free_parms *parms) +{ + struct tf_sram_mgr_is_allocated_parms aparms = { 0 }; + struct tf_tbl_sram_get_info_parms iparms = { 0 }; + struct tf_sram_mgr_free_parms fparms = { 0 }; + struct tbl_rm_db *tbl_db; + void *sram_handle = NULL; + struct tf_dev_info *dev; + void *tbl_db_ptr = NULL; + struct tf_session *tfs; + bool allocated = false; + int rc; + + if (!tfp || !parms) + return -EINVAL; + + /* Retrieve the session information */ + rc = tf_session_get(tfp, &tfs, &dev); + if (rc) + return rc; + + rc = tf_session_get_db(tfp, TF_MODULE_TYPE_TABLE, &tbl_db_ptr); + if (rc) { + netdev_dbg(tfp->bp->dev, + "Failed to get em_ext_db from session, rc:%d\n", + rc); + return rc; + } + tbl_db = (struct tbl_rm_db *)tbl_db_ptr; + + rc = tf_session_get_sram_db(tfp, &sram_handle); + if (rc) { + netdev_dbg(tfp->bp->dev, + "Failed to get sram_handle from session, rc:%d\n", + rc); + return rc; + } + + iparms.rm_db = tbl_db->tbl_db[parms->dir]; + iparms.dir = parms->dir; + iparms.tbl_type = parms->type; + + rc = tf_tbl_sram_get_info(&iparms); + if (rc) { + netdev_dbg(tfp->bp->dev, "%s: Failed to get table info:%s\n", + tf_dir_2_str(parms->dir), + tf_tbl_type_2_str(parms->type)); + return rc; + } + + aparms.sram_offset = parms->idx; + aparms.slice_size = iparms.slice_size; + aparms.bank_id = iparms.bank_id; + aparms.dir = parms->dir; + aparms.is_allocated = &allocated; + + rc = tf_sram_mgr_is_allocated(sram_handle, &aparms); + if (rc || !allocated) { + netdev_dbg(tfp->bp->dev, + "%s: Free of invalid entry:%s idx(%d):(%d)\n", + tf_dir_2_str(parms->dir), + tf_tbl_type_2_str(parms->type), parms->idx, rc); + rc = -ENOMEM; + return rc; + } + + fparms.rm_db = tbl_db->tbl_db[parms->dir]; + fparms.tbl_type = parms->type; + fparms.sram_offset = parms->idx; + fparms.slice_size = iparms.slice_size; + fparms.bank_id = iparms.bank_id; + fparms.dir = parms->dir; + rc = tf_sram_mgr_free(sram_handle, &fparms); + if (rc) { + netdev_dbg(tfp->bp->dev, + "%s: Failed to free entry:%s idx(%d)\n", + tf_dir_2_str(parms->dir), + tf_tbl_type_2_str(parms->type), parms->idx); + return rc; + } + + return rc; +} + +int tf_tbl_sram_set(struct tf *tfp, struct tf_tbl_set_parms *parms) +{ + struct tf_sram_mgr_is_allocated_parms aparms = { 0 }; + struct tf_tbl_sram_get_info_parms iparms = { 0 }; + struct tf_rm_is_allocated_parms raparms = { 0 }; + struct tf_rm_get_hcapi_parms hparms = { 0 }; + struct tbl_rm_db *tbl_db; + void *sram_handle = NULL; + u16 base = 0, shift = 0; + struct tf_dev_info *dev; + void *tbl_db_ptr = NULL; + struct tf_session *tfs; + bool allocated = 0; + int rallocated = 0; + u8 fw_session_id; + u16 hcapi_type; + int rc; + + if (!tfp || !parms || !parms->data) + return -EINVAL; + + /* Retrieve the session information */ + rc = tf_session_get(tfp, &tfs, &dev); + if (rc) + return rc; + + rc = tf_session_get_fw_session_id(tfp, &fw_session_id); + if (rc) + return rc; + + rc = tf_session_get_db(tfp, TF_MODULE_TYPE_TABLE, &tbl_db_ptr); + if (rc) { + netdev_dbg(tfp->bp->dev, + "Failed to get em_ext_db from session, rc:%d\n", + rc); + return rc; + } + tbl_db = (struct tbl_rm_db *)tbl_db_ptr; + + rc = tf_session_get_sram_db(tfp, &sram_handle); + if (rc) { + netdev_dbg(tfp->bp->dev, + "Failed to get sram_handle from session, rc:%d\n", + rc); + return rc; + } + + iparms.rm_db = tbl_db->tbl_db[parms->dir]; + iparms.dir = parms->dir; + iparms.tbl_type = parms->type; + + rc = tf_tbl_sram_get_info(&iparms); + if (rc) { + netdev_dbg(tfp->bp->dev, "%s: Failed to get table info:%s\n", + tf_dir_2_str(parms->dir), + tf_tbl_type_2_str(parms->type)); + return rc; + } + + if (tf_session_is_shared_session(tfs)) { + /* Only get table info if required for the device */ + if (dev->ops->tf_dev_get_tbl_info) { + rc = dev->ops->tf_dev_get_tbl_info(tfp, + tbl_db->tbl_db[parms->dir], + parms->type, + &base, + &shift); + if (rc) { + netdev_dbg(tfp->bp->dev, + "%s: Failed to get table info:%d\n", + tf_dir_2_str(parms->dir), + parms->type); + return rc; + } + } + TF_TBL_PTR_TO_RM(&raparms.index, parms->idx, base, shift); + + raparms.rm_db = tbl_db->tbl_db[parms->dir]; + raparms.subtype = parms->type; + raparms.allocated = &rallocated; + rc = tf_rm_is_allocated(&raparms); + if (rc) + return rc; + + if (rallocated != TF_RM_ALLOCATED_ENTRY_IN_USE) { + netdev_dbg(tfp->bp->dev, + "%s, Invalid index, type:%s, idx:%d\n", + tf_dir_2_str(parms->dir), + tf_tbl_type_2_str(parms->type), + parms->idx); + return -EINVAL; + } + } else { + aparms.sram_offset = parms->idx; + aparms.slice_size = iparms.slice_size; + aparms.bank_id = iparms.bank_id; + aparms.dir = parms->dir; + aparms.is_allocated = &allocated; + rc = tf_sram_mgr_is_allocated(sram_handle, &aparms); + if (rc || !allocated) { + netdev_dbg(tfp->bp->dev, + "%s: Entry not allocated:%s idx(%d):(%d)\n", + tf_dir_2_str(parms->dir), + tf_tbl_type_2_str(parms->type), + parms->idx, rc); + rc = -ENOMEM; + return rc; + } + } + + /* Set the entry */ + hparms.rm_db = tbl_db->tbl_db[parms->dir]; + hparms.subtype = parms->type; + hparms.hcapi_type = &hcapi_type; + rc = tf_rm_get_hcapi_type(&hparms); + if (rc) { + netdev_dbg(tfp->bp->dev, + "%s, Failed type lookup, type:%s, rc:%d\n", + tf_dir_2_str(parms->dir), + tf_tbl_type_2_str(parms->type), rc); + return rc; + } + + rc = tf_msg_set_tbl_entry(tfp, parms->dir, hcapi_type, + parms->data_sz_in_bytes, parms->data, + parms->idx, fw_session_id); + if (rc) { + netdev_dbg(tfp->bp->dev, "%s, Set failed, type:%s, rc:%d\n", + tf_dir_2_str(parms->dir), + tf_tbl_type_2_str(parms->type), rc); + return rc; + } + return rc; +} + +int tf_tbl_sram_get(struct tf *tfp, struct tf_tbl_get_parms *parms) +{ + struct tf_sram_mgr_is_allocated_parms aparms = { 0 }; + struct tf_tbl_sram_get_info_parms iparms = { 0 }; + struct tf_rm_get_hcapi_parms hparms = { 0 }; + bool clear_on_read = false; + struct tbl_rm_db *tbl_db; + void *sram_handle = NULL; + struct tf_dev_info *dev; + void *tbl_db_ptr = NULL; + struct tf_session *tfs; + bool allocated = 0; + u8 fw_session_id; + u16 hcapi_type; + int rc; + + if (!tfp || !parms || !parms->data) + return -EINVAL; + + /* Retrieve the session information */ + rc = tf_session_get(tfp, &tfs, &dev); + if (rc) + return rc; + + rc = tf_session_get_fw_session_id(tfp, &fw_session_id); + if (rc) + return rc; + + rc = tf_session_get_db(tfp, TF_MODULE_TYPE_TABLE, &tbl_db_ptr); + if (rc) { + netdev_dbg(tfp->bp->dev, + "Failed to get em_ext_db from session, rc:%d\n", + rc); + return rc; + } + tbl_db = (struct tbl_rm_db *)tbl_db_ptr; + + rc = tf_session_get_sram_db(tfp, &sram_handle); + if (rc) { + netdev_dbg(tfp->bp->dev, + "Failed to get sram_handle from session, rc:%d\n", + rc); + return rc; + } + + iparms.rm_db = tbl_db->tbl_db[parms->dir]; + iparms.dir = parms->dir; + iparms.tbl_type = parms->type; + + rc = tf_tbl_sram_get_info(&iparms); + if (rc) { + netdev_dbg(tfp->bp->dev, "%s: Failed to get table info:%s\n", + tf_dir_2_str(parms->dir), + tf_tbl_type_2_str(parms->type)); + return rc; + } + + aparms.sram_offset = parms->idx; + aparms.slice_size = iparms.slice_size; + aparms.bank_id = iparms.bank_id; + aparms.dir = parms->dir; + aparms.is_allocated = &allocated; + + rc = tf_sram_mgr_is_allocated(sram_handle, &aparms); + if (rc || !allocated) { + netdev_dbg(tfp->bp->dev, + "%s: Entry not allocated:%s idx(%d):(%d)\n", + tf_dir_2_str(parms->dir), + tf_tbl_type_2_str(parms->type), parms->idx, rc); + rc = -ENOMEM; + return rc; + } + + /* Get the entry */ + hparms.rm_db = tbl_db->tbl_db[parms->dir]; + hparms.subtype = parms->type; + hparms.hcapi_type = &hcapi_type; + rc = tf_rm_get_hcapi_type(&hparms); + if (rc) { + netdev_dbg(tfp->bp->dev, + "%s, Failed type lookup, type:%s, rc:%d\n", + tf_dir_2_str(parms->dir), + tf_tbl_type_2_str(parms->type), rc); + return rc; + } + + /* Get the entry */ + rc = tf_msg_get_tbl_entry(tfp, parms->dir, hcapi_type, + parms->data_sz_in_bytes, parms->data, + parms->idx, clear_on_read, fw_session_id); + if (rc) { + netdev_dbg(tfp->bp->dev, "%s, Get failed, type:%s, rc:%d\n", + tf_dir_2_str(parms->dir), + tf_tbl_type_2_str(parms->type), rc); + return rc; + } + return rc; +} + +int tf_tbl_sram_bulk_get(struct tf *tfp, struct tf_tbl_get_bulk_parms *parms) +{ + struct tf_sram_mgr_is_allocated_parms aparms = { 0 }; + struct tf_tbl_sram_get_info_parms iparms = { 0 }; + struct tf_rm_get_hcapi_parms hparms = { 0 }; + bool clear_on_read = false; + struct tbl_rm_db *tbl_db; + void *sram_handle = NULL; + void *tbl_db_ptr = NULL; + struct tf_dev_info *dev; + struct tf_session *tfs; + bool allocated = false; + u16 hcapi_type; + u16 idx; + int rc; + + if (!tfp || !parms) + return -EINVAL; + + /* Retrieve the session information */ + rc = tf_session_get(tfp, &tfs, &dev); + if (rc) + return rc; + + rc = tf_session_get_db(tfp, TF_MODULE_TYPE_TABLE, &tbl_db_ptr); + if (rc) { + netdev_dbg(tfp->bp->dev, + "Failed to get em_ext_db from session, rc:%d\n", + rc); + return rc; + } + tbl_db = (struct tbl_rm_db *)tbl_db_ptr; + + rc = tf_session_get_sram_db(tfp, &sram_handle); + if (rc) { + netdev_dbg(tfp->bp->dev, + "Failed to get sram_handle from session, rc:%d\n", + rc); + return rc; + } + + iparms.rm_db = tbl_db->tbl_db[parms->dir]; + iparms.dir = parms->dir; + iparms.tbl_type = parms->type; + + rc = tf_tbl_sram_get_info(&iparms); + if (rc) { + netdev_dbg(tfp->bp->dev, "%s: Failed to get table info:%s\n", + tf_dir_2_str(parms->dir), + tf_tbl_type_2_str(parms->type)); + return rc; + } + + /* Validate the start offset and the end offset is allocated + * This API is only used for statistics. 8 Byte entry allocation + * is used to verify + */ + aparms.sram_offset = parms->starting_idx; + aparms.slice_size = iparms.slice_size; + aparms.bank_id = iparms.bank_id; + aparms.dir = parms->dir; + aparms.is_allocated = &allocated; + rc = tf_sram_mgr_is_allocated(sram_handle, &aparms); + if (rc || !allocated) { + netdev_dbg(tfp->bp->dev, + "%s: Entry not allocated:%s start_idx(%d):(%d)\n", + tf_dir_2_str(parms->dir), + tf_tbl_type_2_str(parms->type), parms->starting_idx, + rc); + rc = -ENOMEM; + return rc; + } + idx = parms->starting_idx + parms->num_entries - 1; + aparms.sram_offset = idx; + rc = tf_sram_mgr_is_allocated(sram_handle, &aparms); + if (rc || !allocated) { + netdev_dbg(tfp->bp->dev, + "%s: Entry not allocated:%s last_idx(%d):(%d)\n", + tf_dir_2_str(parms->dir), + tf_tbl_type_2_str(parms->type), idx, rc); + rc = -ENOMEM; + return rc; + } + + hparms.rm_db = tbl_db->tbl_db[parms->dir]; + hparms.subtype = parms->type; + hparms.hcapi_type = &hcapi_type; + rc = tf_rm_get_hcapi_type(&hparms); + if (rc) { + netdev_dbg(tfp->bp->dev, + "%s, Failed type lookup, type:%s, rc:%d\n", + tf_dir_2_str(parms->dir), + tf_tbl_type_2_str(parms->type), rc); + return rc; + } + + if (parms->type == TF_TBL_TYPE_ACT_STATS_64) + clear_on_read = true; + + /* Get the entries */ + rc = tf_msg_bulk_get_tbl_entry(tfp, + parms->dir, + hcapi_type, + parms->starting_idx, + parms->num_entries, + parms->entry_sz_in_bytes, + parms->physical_mem_addr, + clear_on_read); + if (rc) { + netdev_dbg(tfp->bp->dev, + "%s, Bulk get failed, type:%s, rc:%d\n", + tf_dir_2_str(parms->dir), + tf_tbl_type_2_str(parms->type), rc); + } + return rc; +} diff --git a/drivers/thirdparty/release-drivers/bnxt/tf_core/tf_tbl_sram.h b/drivers/thirdparty/release-drivers/bnxt/tf_core/tf_tbl_sram.h new file mode 100644 index 000000000000..2059498c0b1b --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/tf_core/tf_tbl_sram.h @@ -0,0 +1,104 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019-2021 Broadcom + * All rights reserved. + */ + +#ifndef TF_TBL_SRAM_H_ +#define TF_TBL_SRAM_H_ + +#include "tf_core.h" + +/* The SRAM Table module provides processing of managed SRAM types. */ + +/** + * Initializes the Table module with the requested DBs. Must be + * invoked as the first thing before any of the access functions. + * + * @tfp: Pointer to TF handle, used for HCAPI communication + * @parms: Pointer to Table configuration parameters + * + * Returns + * - (0) if successful. + * - (-EINVAL) on failure. + */ +int tf_tbl_sram_bind(struct tf *tfp); + +/** + * Cleans up the private DBs and releases all the data. + * + * @tfp: Pointer to TF handle, used for HCAPI communication + * @parms: Pointer to parameters + * + * Returns + * - (0) if successful. + * - (-EINVAL) on failure. + */ +int tf_tbl_sram_unbind(struct tf *tfp); + +/** + * Allocates the requested table type from the internal RM DB. + * + * @tfp: Pointer to TF handle, used for HCAPI communication + * @parms: Pointer to Table allocation parameters + * + * Returns + * - (0) if successful. + * - (-EINVAL) on failure. + */ +int tf_tbl_sram_alloc(struct tf *tfp, struct tf_tbl_alloc_parms *parms); + +/** + * Free's the requested table type and returns it to the DB. If shadow + * DB is enabled its searched first and if found the element refcount + * is decremented. If refcount goes to 0 then its returned to the + * table type DB. + * + * @tfp: Pointer to TF handle, used for HCAPI communication + * @parms: Pointer to Table free parameters + * + * Returns + * - (0) if successful. + * - (-EINVAL) on failure. + */ +int tf_tbl_sram_free(struct tf *tfp, struct tf_tbl_free_parms *parms); + +/** + * Configures the requested element by sending a firmware request which + * then installs it into the device internal structures. + * + * @tfp: Pointer to TF handle, used for HCAPI communication + * @parms: Pointer to Table set parameters + * + * Returns + * - (0) if successful. + * - (-EINVAL) on failure. + */ +int tf_tbl_sram_set(struct tf *tfp, struct tf_tbl_set_parms *parms); + +/** + * Retrieves the requested element by sending a firmware request to get + * the element. + * + * @tfp: Pointer to TF handle, used for HCAPI communication + * @parms: Pointer to Table get parameters + * + * Returns + * - (0) if successful. + * - (-EINVAL) on failure. + */ +int tf_tbl_sram_get(struct tf *tfp, struct tf_tbl_get_parms *parms); + +/** + * Retrieves bulk block of elements by sending a firmware request to + * get the elements. + * + * @tfp: Pointer to TF handle, used for HCAPI communication + * @parms: Pointer to Table get bulk parameters + * + * Returns + * - (0) if successful. + * - (-EINVAL) on failure. + */ +int tf_tbl_sram_bulk_get(struct tf *tfp, struct tf_tbl_get_bulk_parms *parms); + +#endif /* TF_TBL_SRAM_H */ diff --git a/drivers/thirdparty/release-drivers/bnxt/tf_core/tf_tcam.c b/drivers/thirdparty/release-drivers/bnxt/tf_core/tf_tcam.c new file mode 100644 index 000000000000..29244aea14c5 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/tf_core/tf_tcam.c @@ -0,0 +1,762 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* Copyright(c) 2019-2021 Broadcom + * All rights reserved. + */ + +#include +#include +#include "tf_tcam.h" +#include "tf_util.h" +#include "tf_rm.h" +#include "tf_device.h" +#include "tf_session.h" +#include "tf_msg.h" +#include "tf_tcam_mgr_msg.h" + +struct tf; + +int tf_tcam_bind(struct tf *tfp, struct tf_tcam_cfg_parms *parms) +{ + struct tf_resource_info resv_res[TF_DIR_MAX][TF_TCAM_TBL_TYPE_MAX]; + struct tf_tcam_resources local_tcam_cnt[TF_DIR_MAX]; + struct tf_rm_get_alloc_info_parms ainfo; + struct tf_rm_create_db_parms db_cfg = { 0 }; + struct tf_tcam_resources *tcam_cnt; + struct tf_rm_free_db_parms fparms; + int db_rc[TF_DIR_MAX] = { 0 }; + struct tf_rm_alloc_info info; + struct tcam_rm_db *tcam_db; + struct tf_dev_info *dev; + struct tf_session *tfs; + u16 num_slices = 1; + bool no_req = true; + u32 rx_supported; + u32 tx_supported; + int d, t; + int rc; + + if (!tfp || !parms) + return -EINVAL; + + /* Retrieve the session information */ + rc = tf_session_get_session_internal(tfp, &tfs); + if (rc) + return rc; + + /* Retrieve the device information */ + rc = tf_session_get_device(tfs, &dev); + if (rc) + return rc; + + if (!dev->ops->tf_dev_get_tcam_slice_info) { + rc = -EOPNOTSUPP; + netdev_dbg(tfp->bp->dev, "Operation not supported, rc:%d\n", + rc); + return rc; + } + + tcam_cnt = parms->resources->tcam_cnt; + + for (d = 0; d < TF_DIR_MAX; d++) { + for (t = 0; t < TF_TCAM_TBL_TYPE_MAX; t++) { + rc = dev->ops->tf_dev_get_tcam_slice_info(tfp, t, 0, + &num_slices); + if (rc) + return rc; + + if (num_slices == 1) + continue; + + if (tcam_cnt[d].cnt[t] % num_slices) { + netdev_err(NULL, + "%s: Requested num of %s entries has to be multiple of %d\n", + tf_dir_2_str(d), + tf_tcam_tbl_2_str(t), num_slices); + return -EINVAL; + } + } + } + + tcam_db = vzalloc(sizeof(*tcam_db)); + if (!tcam_db) + return -ENOMEM; + + for (d = 0; d < TF_DIR_MAX; d++) + tcam_db->tcam_db[d] = NULL; + + tf_session_set_db(tfp, TF_MODULE_TYPE_TCAM, tcam_db); + + db_cfg.module = TF_MODULE_TYPE_TCAM; + db_cfg.num_elements = parms->num_elements; + db_cfg.cfg = parms->cfg; + + for (d = 0; d < TF_DIR_MAX; d++) { + db_cfg.dir = d; + db_cfg.alloc_cnt = tcam_cnt[d].cnt; + db_cfg.rm_db = (void *)&tcam_db->tcam_db[d]; + db_rc[d] = tf_rm_create_db(tfp, &db_cfg); + + if (db_rc[d]) { + netdev_dbg(tfp->bp->dev, "%s: no TCAM DB required\n", + tf_dir_2_str(d)); + } + } + + /* No db created */ + if (db_rc[TF_DIR_RX] && db_rc[TF_DIR_TX]) { + netdev_dbg(tfp->bp->dev, "No TCAM DB created\n"); + tf_session_set_db(tfp, TF_MODULE_TYPE_TCAM, NULL); + vfree(tcam_db); + return db_rc[TF_DIR_RX]; + } + + /* Collect info on which entries were reserved. */ + for (d = 0; d < TF_DIR_MAX; d++) { + for (t = 0; t < TF_TCAM_TBL_TYPE_MAX; t++) { + memset(&info, 0, sizeof(info)); + if (tcam_cnt[d].cnt[t] == 0) { + resv_res[d][t].start = 0; + resv_res[d][t].stride = 0; + continue; + } + ainfo.rm_db = tcam_db->tcam_db[d]; + ainfo.subtype = t; + ainfo.info = &info; + rc = tf_rm_get_info(&ainfo); + if (rc) + goto error; + + rc = dev->ops->tf_dev_get_tcam_slice_info(tfp, t, 0, + &num_slices); + if (rc) + return rc; + + if (num_slices > 1) { + /* check if reserved resource for is multiple of + * num_slices + */ + if (info.entry.start % num_slices != 0 || + info.entry.stride % num_slices != 0) { + netdev_err(tfp->bp->dev, + "%s: %s reserved resource is not multiple of %d\n", + tf_dir_2_str(d), + tf_tcam_tbl_2_str(t), + num_slices); + rc = -EINVAL; + goto error; + } + } + + resv_res[d][t].start = info.entry.start; + resv_res[d][t].stride = info.entry.stride; + } + } + + rc = tf_tcam_mgr_bind_msg(tfp, dev, parms, resv_res); + if (rc) + return rc; + + rc = tf_tcam_mgr_qcaps_msg(tfp, dev, + &rx_supported, &tx_supported); + if (rc) + return rc; + + for (t = 0; t < TF_TCAM_TBL_TYPE_MAX; t++) { + if (rx_supported & 1 << t) + tfs->tcam_mgr_control[TF_DIR_RX][t] = 1; + if (tx_supported & 1 << t) + tfs->tcam_mgr_control[TF_DIR_TX][t] = 1; + } + + /* Make a local copy of tcam_cnt with only resources not managed by + * TCAM Manager requested. + */ + memcpy(&local_tcam_cnt, tcam_cnt, sizeof(local_tcam_cnt)); + tcam_cnt = local_tcam_cnt; + for (d = 0; d < TF_DIR_MAX; d++) { + for (t = 0; t < TF_TCAM_TBL_TYPE_MAX; t++) { + /* If controlled by TCAM Manager */ + if (tfs->tcam_mgr_control[d][t]) + tcam_cnt[d].cnt[t] = 0; + else if (tcam_cnt[d].cnt[t] > 0) + no_req = false; + } + } + + /* If no resources left to request */ + if (no_req) + goto finished; + +finished: + netdev_dbg(tfp->bp->dev, "TCAM - initialized\n"); + return 0; + +error: + for (d = 0; d < TF_DIR_MAX; d++) { + if (tcam_db->tcam_db[d]) { + memset(&fparms, 0, sizeof(fparms)); + fparms.dir = d; + fparms.rm_db = tcam_db->tcam_db[d]; + /* Ignoring return here as we are in the error case */ + (void)tf_rm_free_db(tfp, &fparms); + tcam_db->tcam_db[d] = NULL; + } + + tcam_db->tcam_db[d] = NULL; + tf_session_set_db(tfp, TF_MODULE_TYPE_TCAM, NULL); + } + vfree(tcam_db); + + return rc; +} + +int tf_tcam_unbind(struct tf *tfp) +{ + struct tf_rm_free_db_parms fparms = { 0 }; + struct tcam_rm_db *tcam_db; + void *tcam_db_ptr = NULL; + struct tf_dev_info *dev; + struct tf_session *tfs; + int rc; + int i; + + if (!tfp) + return -EINVAL; + + /* Retrieve the session information */ + rc = tf_session_get_session_internal(tfp, &tfs); + if (rc) + return rc; + + /* Retrieve the device information */ + rc = tf_session_get_device(tfs, &dev); + if (rc) + return rc; + + rc = tf_session_get_db(tfp, TF_MODULE_TYPE_TCAM, &tcam_db_ptr); + if (rc) { + netdev_dbg(tfp->bp->dev, "Tcam_db is not initialized\n"); + return 0; + } + tcam_db = (struct tcam_rm_db *)tcam_db_ptr; + + for (i = 0; i < TF_DIR_MAX; i++) { + if (!tcam_db->tcam_db[i]) + continue; + memset(&fparms, 0, sizeof(fparms)); + fparms.dir = i; + fparms.rm_db = tcam_db->tcam_db[i]; + rc = tf_rm_free_db(tfp, &fparms); + if (rc) + return rc; + + tcam_db->tcam_db[i] = NULL; + } + + /* free TCAM database pointer */ + tf_session_set_db(tfp, TF_MODULE_TYPE_TCAM, NULL); + vfree(tcam_db); + + rc = tf_tcam_mgr_unbind_msg(tfp, dev); + if (rc) + return rc; + return 0; +} + +int tf_tcam_alloc(struct tf *tfp, struct tf_tcam_alloc_parms *parms) +{ + struct tf_rm_allocate_parms aparms = { 0 }; + struct tcam_rm_db *tcam_db; + struct bnxt *bp; + struct tf_dev_info *dev; + struct tf_session *tfs; + void *tcam_db_ptr = NULL; + u16 num_slices = 1; + int rc, i; + u32 index; + + if (!tfp || !parms) + return -EINVAL; + + bp = tfp->bp; + + /* Retrieve the session information */ + rc = tf_session_get_session_internal(tfp, &tfs); + if (rc) + return rc; + + /* Retrieve the device information */ + rc = tf_session_get_device(tfs, &dev); + if (rc) + return rc; + + if (!dev->ops->tf_dev_get_tcam_slice_info) { + rc = -EOPNOTSUPP; + netdev_dbg(bp->dev, "%s: Operation not supported, rc:%d\n", + tf_dir_2_str(parms->dir), rc); + return rc; + } + + /* Need to retrieve number of slices based on the key_size */ + rc = dev->ops->tf_dev_get_tcam_slice_info(tfp, + parms->type, + parms->key_size, + &num_slices); + if (rc) + return rc; + + /* If TCAM controlled by TCAM Manager */ + if (tfs->tcam_mgr_control[parms->dir][parms->type]) + return tf_tcam_mgr_alloc_msg(tfp, dev, parms); + rc = tf_session_get_db(tfp, TF_MODULE_TYPE_TCAM, &tcam_db_ptr); + if (rc) { + netdev_dbg(bp->dev, + "Failed to get tcam_db from session, rc:%d\n", rc); + return rc; + } + tcam_db = (struct tcam_rm_db *)tcam_db_ptr; + + /* For WC TCAM, number of slices could be 4, 2, 1 based on + * the key_size. For other TCAM, it is always 1 + */ + for (i = 0; i < num_slices; i++) { + aparms.rm_db = tcam_db->tcam_db[parms->dir]; + aparms.subtype = parms->type; + aparms.priority = parms->priority; + aparms.index = &index; + rc = tf_rm_allocate(&aparms); + if (rc) { + netdev_dbg(bp->dev, "%s: Failed tcam, type:%d\n", + tf_dir_2_str(parms->dir), parms->type); + return rc; + } + + /* return the start index of each row */ + if (i == 0) + parms->idx = index; + } + + return 0; +} + +int tf_tcam_free(struct tf *tfp, struct tf_tcam_free_parms *parms) +{ + struct tf_rm_is_allocated_parms aparms = { 0 }; + struct tf_rm_get_hcapi_parms hparms = { 0 }; + struct tf_rm_free_parms fparms = { 0 }; + struct tcam_rm_db *tcam_db; + void *tcam_db_ptr = NULL; + struct tf_dev_info *dev; + struct tf_session *tfs; + u16 num_slices = 1; + int allocated = 0; + u8 fw_session_id; + struct bnxt *bp; + int rc; + int i; + + if (!tfp || !parms) + return -EINVAL; + + bp = tfp->bp; + + /* Retrieve the session information */ + rc = tf_session_get_session_internal(tfp, &tfs); + if (rc) + return rc; + + /* Retrieve the device information */ + rc = tf_session_get_device(tfs, &dev); + if (rc) + return rc; + + rc = tf_session_get_fw_session_id(tfp, &fw_session_id); + if (rc) + return rc; + + if (!dev->ops->tf_dev_get_tcam_slice_info) { + rc = -EOPNOTSUPP; + netdev_dbg(bp->dev, "%s: Operation not supported, rc:%d\n", + tf_dir_2_str(parms->dir), rc); + return rc; + } + + /* Need to retrieve row size etc */ + rc = dev->ops->tf_dev_get_tcam_slice_info(tfp, + parms->type, + 0, + &num_slices); + if (rc) + return rc; + + /* If TCAM controlled by TCAM Manager */ + if (tfs->tcam_mgr_control[parms->dir][parms->type]) { + /* If a session can have multiple references to an entry, check + * the reference count here before actually freeing the entry. + */ + parms->ref_cnt = 0; + return tf_tcam_mgr_free_msg(tfp, dev, parms); + } + + if (parms->idx % num_slices) { + netdev_dbg(bp->dev, + "%s: TCAM reserved resource not multiple of %d\n", + tf_dir_2_str(parms->dir), num_slices); + return -EINVAL; + } + + rc = tf_session_get_db(tfp, TF_MODULE_TYPE_TCAM, &tcam_db_ptr); + if (rc) { + netdev_dbg(bp->dev, + "Failed to get em_ext_db from session, rc:%d\n", + rc); + return rc; + } + tcam_db = (struct tcam_rm_db *)tcam_db_ptr; + + /* Check if element is in use */ + aparms.rm_db = tcam_db->tcam_db[parms->dir]; + aparms.subtype = parms->type; + aparms.index = parms->idx; + aparms.allocated = &allocated; + rc = tf_rm_is_allocated(&aparms); + if (rc) + return rc; + + if (allocated != TF_RM_ALLOCATED_ENTRY_IN_USE) { + netdev_dbg(bp->dev, + "%s: Entry already free, type:%d, index:%d\n", + tf_dir_2_str(parms->dir), parms->type, parms->idx); + return -EINVAL; + } + + for (i = 0; i < num_slices; i++) { + /* Free requested element */ + fparms.rm_db = tcam_db->tcam_db[parms->dir]; + fparms.subtype = parms->type; + fparms.index = parms->idx + i; + rc = tf_rm_free(&fparms); + if (rc) { + netdev_dbg(bp->dev, + "%s: Free failed, type:%d, index:%d\n", + tf_dir_2_str(parms->dir), parms->type, + parms->idx); + return rc; + } + } + + /* Convert TF type to HCAPI RM type */ + hparms.rm_db = tcam_db->tcam_db[parms->dir]; + hparms.subtype = parms->type; + hparms.hcapi_type = &parms->hcapi_type; + + rc = tf_rm_get_hcapi_type(&hparms); + if (rc) + return rc; + + rc = tf_msg_tcam_entry_free(tfp, parms, fw_session_id); + if (rc) { + /* Log error */ + netdev_dbg(bp->dev, "%s: %s: Entry %d free failed, rc:%d\n", + tf_dir_2_str(parms->dir), + tf_tcam_tbl_2_str(parms->type), parms->idx, rc); + return rc; + } + + return 0; +} + +int tf_tcam_alloc_search(struct tf *tfp, + struct tf_tcam_alloc_search_parms *parms) +{ + struct tf_tcam_alloc_parms aparms = { 0 }; + u16 num_slice_per_row = 1; + struct bnxt *bp; + struct tf_dev_info *dev; + struct tf_session *tfs; + int rc; + + if (!tfp || !parms) + return -EINVAL; + + bp = tfp->bp; + + /* Retrieve the session information */ + rc = tf_session_get_session_internal(tfp, &tfs); + if (rc) + return rc; + + /* Retrieve the device information */ + rc = tf_session_get_device(tfs, &dev); + if (rc) + return rc; + + if (!dev->ops->tf_dev_get_tcam_slice_info) { + rc = -EOPNOTSUPP; + netdev_dbg(bp->dev, "%s: Operation not supported, rc:%d\n", + tf_dir_2_str(parms->dir), rc); + return rc; + } + + /* Need to retrieve row size etc */ + rc = dev->ops->tf_dev_get_tcam_slice_info(tfp, parms->type, + parms->key_size, + &num_slice_per_row); + if (rc) + return rc; + + /* If TCAM controlled by TCAM Manager */ + if (tfs->tcam_mgr_control[parms->dir][parms->type]) { + /* If a session can have multiple references to an entry, + * search the session's entries first. If the caller + * requested an alloc and a match was found, update the + * ref_cnt before returning. + */ + return -EINVAL; + } + + /* The app didn't request us to alloc the entry, so return now. + * The hit should have been updated in the original search parm. + */ + if (!parms->alloc || parms->search_status != MISS) + return rc; + + /* Caller desires an allocate on miss */ + if (!dev->ops->tf_dev_alloc_tcam) { + rc = -EOPNOTSUPP; + netdev_dbg(bp->dev, "%s: Operation not supported, rc:%d\n", + tf_dir_2_str(parms->dir), rc); + return rc; + } + aparms.dir = parms->dir; + aparms.type = parms->type; + aparms.key_size = parms->key_size; + aparms.priority = parms->priority; + rc = dev->ops->tf_dev_alloc_tcam(tfp, &aparms); + if (rc) + return rc; + + /* Add the allocated index to output and done */ + parms->idx = aparms.idx; + + return 0; +} + +int tf_tcam_set(struct tf *tfp, struct tf_tcam_set_parms *parms) +{ + struct tf_rm_is_allocated_parms aparms = { 0 }; + struct tf_rm_get_hcapi_parms hparms = { 0 }; + struct tcam_rm_db *tcam_db; + u16 num_slice_per_row = 1; + void *tcam_db_ptr = NULL; + struct tf_dev_info *dev; + struct tf_session *tfs; + u8 fw_session_id; + int allocated = 0; + struct bnxt *bp; + int rc; + + if (!tfp || !parms) + return -EINVAL; + + bp = tfp->bp; + + /* Retrieve the session information */ + rc = tf_session_get_session_internal(tfp, &tfs); + if (rc) + return rc; + + /* Retrieve the device information */ + rc = tf_session_get_device(tfs, &dev); + if (rc) + return rc; + + rc = tf_session_get_fw_session_id(tfp, &fw_session_id); + if (rc) + return rc; + + if (!dev->ops->tf_dev_get_tcam_slice_info) { + rc = -EOPNOTSUPP; + netdev_dbg(bp->dev, "%s: Operation not supported, rc:%d\n", + tf_dir_2_str(parms->dir), rc); + return rc; + } + + /* Need to retrieve row size etc */ + rc = dev->ops->tf_dev_get_tcam_slice_info(tfp, + parms->type, + parms->key_size, + &num_slice_per_row); + if (rc) + return rc; + + /* If TCAM controlled by TCAM Manager */ + if (tfs->tcam_mgr_control[parms->dir][parms->type]) + return tf_tcam_mgr_set_msg(tfp, dev, parms); + + rc = tf_session_get_db(tfp, TF_MODULE_TYPE_TCAM, &tcam_db_ptr); + if (rc) { + netdev_dbg(bp->dev, + "Failed to get em_ext_db from session, rc:%d\n", + rc); + return rc; + } + tcam_db = (struct tcam_rm_db *)tcam_db_ptr; + + /* Check if element is in use */ + aparms.rm_db = tcam_db->tcam_db[parms->dir]; + aparms.subtype = parms->type; + aparms.index = parms->idx; + aparms.allocated = &allocated; + rc = tf_rm_is_allocated(&aparms); + if (rc) + return rc; + + if (allocated != TF_RM_ALLOCATED_ENTRY_IN_USE) { + netdev_dbg(bp->dev, + "%s: Entry is not allocated, type:%d, index:%d\n", + tf_dir_2_str(parms->dir), parms->type, parms->idx); + return -EINVAL; + } + + /* Convert TF type to HCAPI RM type */ + hparms.rm_db = tcam_db->tcam_db[parms->dir]; + hparms.subtype = parms->type; + hparms.hcapi_type = &parms->hcapi_type; + + rc = tf_rm_get_hcapi_type(&hparms); + if (rc) + return rc; + + rc = tf_msg_tcam_entry_set(tfp, parms, fw_session_id); + if (rc) { + /* Log error */ + netdev_dbg(bp->dev, "%s: %s: Entry %d set failed, rc:%d", + tf_dir_2_str(parms->dir), + tf_tcam_tbl_2_str(parms->type), parms->idx, rc); + return rc; + } + + return 0; +} + +int tf_tcam_get(struct tf *tfp, struct tf_tcam_get_parms *parms) +{ + struct tf_rm_is_allocated_parms aparms = { 0 }; + struct tf_rm_get_hcapi_parms hparms = { 0 }; + struct tcam_rm_db *tcam_db; + void *tcam_db_ptr = NULL; + struct tf_dev_info *dev; + struct tf_session *tfs; + int allocated = 0; + u8 fw_session_id; + struct bnxt *bp; + int rc; + + if (!tfp || !parms) + return -EINVAL; + + bp = tfp->bp; + + /* Retrieve the session information */ + rc = tf_session_get_session_internal(tfp, &tfs); + if (rc) + return rc; + + /* Retrieve the device information */ + rc = tf_session_get_device(tfs, &dev); + if (rc) + return rc; + + rc = tf_session_get_fw_session_id(tfp, &fw_session_id); + if (rc) + return rc; + + /* If TCAM controlled by TCAM Manager */ + if (tfs->tcam_mgr_control[parms->dir][parms->type]) + return tf_tcam_mgr_get_msg(tfp, dev, parms); + + rc = tf_session_get_db(tfp, TF_MODULE_TYPE_TCAM, &tcam_db_ptr); + if (rc) { + netdev_dbg(bp->dev, + "Failed to get em_ext_db from session, rc:%d\n", + rc); + return rc; + } + tcam_db = (struct tcam_rm_db *)tcam_db_ptr; + + /* Check if element is in use */ + aparms.rm_db = tcam_db->tcam_db[parms->dir]; + aparms.subtype = parms->type; + aparms.index = parms->idx; + aparms.allocated = &allocated; + rc = tf_rm_is_allocated(&aparms); + if (rc) + return rc; + + if (allocated != TF_RM_ALLOCATED_ENTRY_IN_USE) { + netdev_dbg(bp->dev, + "%s: Entry is not allocated, type:%d, index:%d\n", + tf_dir_2_str(parms->dir), parms->type, parms->idx); + return -EINVAL; + } + + /* Convert TF type to HCAPI RM type */ + hparms.rm_db = tcam_db->tcam_db[parms->dir]; + hparms.subtype = parms->type; + hparms.hcapi_type = &parms->hcapi_type; + + rc = tf_rm_get_hcapi_type(&hparms); + if (rc) + return rc; + + rc = tf_msg_tcam_entry_get(tfp, parms, fw_session_id); + if (rc) { + /* Log error */ + netdev_dbg(bp->dev, "%s: %s: Entry %d set failed, rc:%d", + tf_dir_2_str(parms->dir), + tf_tcam_tbl_2_str(parms->type), parms->idx, rc); + return rc; + } + + return 0; +} + +int tf_tcam_get_resc_info(struct tf *tfp, struct tf_tcam_resource_info *tcam) +{ + struct tf_rm_get_alloc_info_parms ainfo; + struct tf_resource_info *dinfo; + struct tcam_rm_db *tcam_db; + void *tcam_db_ptr = NULL; + int rc; + int d; + + if (!tfp || !tcam) + return -EINVAL; + + rc = tf_session_get_db(tfp, TF_MODULE_TYPE_TCAM, &tcam_db_ptr); + if (rc == -ENOMEM) + return 0; /* db doesn't exist */ + else if (rc) + return rc; /* error getting db */ + + tcam_db = (struct tcam_rm_db *)tcam_db_ptr; + + /* check if reserved resource for WC is multiple of num_slices */ + for (d = 0; d < TF_DIR_MAX; d++) { + ainfo.rm_db = tcam_db->tcam_db[d]; + + if (!ainfo.rm_db) + continue; + + dinfo = tcam[d].info; + + ainfo.info = (struct tf_rm_alloc_info *)dinfo; + ainfo.subtype = 0; + rc = tf_rm_get_all_info(&ainfo, TF_TCAM_TBL_TYPE_MAX); + if (rc && rc != -EOPNOTSUPP) + return rc; + } + + return 0; +} diff --git a/drivers/thirdparty/release-drivers/bnxt/tf_core/tf_tcam.h b/drivers/thirdparty/release-drivers/bnxt/tf_core/tf_tcam.h new file mode 100644 index 000000000000..0ee151da20db --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/tf_core/tf_tcam.h @@ -0,0 +1,266 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019-2021 Broadcom + * All rights reserved. + */ + +#ifndef _TF_TCAM_H_ +#define _TF_TCAM_H_ + +#include "tf_core.h" + +/* The TCAM module provides processing of Internal TCAM types. */ + +/* Number of slices per row for WC TCAM */ +extern u16 g_wc_num_slices_per_row; + +/** + * TCAM configuration parameters + * + * @num_elements: Number of tcam types in each of the configuration arrays + * @cfg: TCAM configuration array + * @shadow_cfg: Shadow table type configuration array + * @shadow_copy: Boolean controlling the request shadow copy. + * @resources: Session resource allocations + * @wc_num_slices: WC number of slices per row. + */ +struct tf_tcam_cfg_parms { + u16 num_elements; + struct tf_rm_element_cfg *cfg; + struct tf_shadow_tcam_cfg *shadow_cfg; + bool shadow_copy; + struct tf_session_resources *resources; + enum tf_wc_num_slice wc_num_slices; +}; + +/** + * TCAM allocation parameters + * + * @dir: Receive or transmit direction + * @type: Type of the allocation + * @key_size: key size + * @priority: Priority of entry requested (definition TBD) + * @idx: Idx of allocated entry or found entry (if search_enable) + */ +struct tf_tcam_alloc_parms { + enum tf_dir dir; + enum tf_tcam_tbl_type type; + u16 key_size; + u32 priority; + u16 idx; +}; + +/** + * TCAM free parameters + * + * @dir: Receive or transmit direction + * @type: Type of the allocation + * @hcapi_type: Type of HCAPI + * @idx: Index to free + * @ref_cnt: Reference count after free, only valid if session has been + * created with shadow_copy. + */ +struct tf_tcam_free_parms { + enum tf_dir dir; + enum tf_tcam_tbl_type type; + u16 hcapi_type; + u16 idx; + u16 ref_cnt; +}; + +/** + * TCAM allocate search parameters + * + * @dir: Receive or transmit direction + * @type: TCAM table type + * @hcapi_type: Type of HCAPI + * @key: Key data to match on + * @key_size: Key size in bits + * @mask: Mask data to match on + * @priority: Priority of entry requested (definition TBD) + * @alloc: Allocate on miss. + * @hit: Set if matching entry found + * @search_status: Search result status (hit, miss, reject) + * @ref_cnt: Current refcnt after allocation + * @result: The result data from the search is copied here + * @result_size: result size in bits for the result data + * @idx: Index found + */ +struct tf_tcam_alloc_search_parms { + enum tf_dir dir; + enum tf_tcam_tbl_type type; + u16 hcapi_type; + u8 *key; + u16 key_size; + u8 *mask; + u32 priority; + u8 alloc; + u8 hit; + enum tf_search_status search_status; + u16 ref_cnt; + u8 *result; + u16 result_size; + u16 idx; +}; + +/** + * TCAM set parameters + * + * @dir: Receive or transmit direction + * @type: Type of object to set + * @hcapi_type: Type of HCAPI + * @idx: Entry index to write to + * @key: array containing key + * @mask: array containing mask fields + * @key_size: key size + * @result: array containing result + * @result_size: result size + */ +struct tf_tcam_set_parms { + enum tf_dir dir; + enum tf_tcam_tbl_type type; + u16 hcapi_type; + u32 idx; + u8 *key; + u8 *mask; + u16 key_size; + u8 *result; + u16 result_size; +}; + +/** + * TCAM get parameters + * + * @dir: Receive or transmit direction + * @type: Type of object to get + * @hcapi_type: Type of HCAPI + * @idx: Entry index to read + * @key: array containing key + * @mask: array containing mask fields + * @key_size: key size + * @result: array containing result + * @result_size: result size + */ +struct tf_tcam_get_parms { + enum tf_dir dir; + enum tf_tcam_tbl_type type; + u16 hcapi_type; + u32 idx; + u8 *key; + u8 *mask; + u16 key_size; + u8 *result; + u16 result_size; +}; + +/* TCAM RM database */ +struct tcam_rm_db { + struct rm_db *tcam_db[TF_DIR_MAX]; +}; + +/** + * Initializes the TCAM module with the requested DBs. Must be + * invoked as the first thing before any of the access functions. + * + * @tfp: Pointer to TF handle, used for HCAPI communication + * @parms: Pointer to parameters + * + * Returns + * - (0) if successful. + * - (-EINVAL) on failure. + */ +int tf_tcam_bind(struct tf *tfp, struct tf_tcam_cfg_parms *parms); + +/** + * Cleans up the private DBs and releases all the data. + * + * @tfp: Pointer to TF handle, used for HCAPI communication + * @parms: Pointer to parameters + * + * Returns + * - (0) if successful. + * - (-EINVAL) on failure. + */ +int tf_tcam_unbind(struct tf *tfp); + +/** + * Allocates the requested tcam type from the internal RM DB. + * + * @tfp: Pointer to TF handle, used for HCAPI communication + * @parms: Pointer to parameters + * + * Returns + * - (0) if successful. + * - (-EINVAL) on failure. + */ +int tf_tcam_alloc(struct tf *tfp, struct tf_tcam_alloc_parms *parms); + +/** + * Free's the requested table type and returns it to the DB. If shadow + * DB is enabled its searched first and if found the element refcount + * is decremented. If refcount goes to 0 then its returned to the + * table type DB. + * + * @tfp: Pointer to TF handle, used for HCAPI communication + * @parms: Pointer to parameters + * + * Returns + * - (0) if successful. + * - (-EINVAL) on failure. + */ +int tf_tcam_free(struct tf *tfp, struct tf_tcam_free_parms *parms); + +/** + * Supported if Shadow DB is configured. Searches the Shadow DB for + * any matching element. If found the refcount in the shadow DB is + * updated accordingly. If not found a new element is allocated and + * installed into the shadow DB. + * + * @tfp: Pointer to TF handle, used for HCAPI communication + * @parms: Pointer to parameters + * + * Returns + * - (0) if successful. + * - (-EINVAL) on failure. + */ +int tf_tcam_alloc_search(struct tf *tfp, + struct tf_tcam_alloc_search_parms *parms); + +/** + * Configures the requested element by sending a firmware request which + * then installs it into the device internal structures. + * + * @tfp: Pointer to TF handle, used for HCAPI communication + * @parms: Pointer to parameters + * + * Returns + * - (0) if successful. + * - (-EINVAL) on failure. + */ +int tf_tcam_set(struct tf *tfp, struct tf_tcam_set_parms *parms); + +/** + * Retrieves the requested element by sending a firmware request to get + * the element. + * + * @tfp: Pointer to TF handle, used for HCAPI communication + * @parms: Pointer to parameters + * + * Returns + * - (0) if successful. + * - (-EINVAL) on failure. + */ +int tf_tcam_get(struct tf *tfp, struct tf_tcam_get_parms *parms); + +/** + * Retrieves the allocated resource info + * + * @tfp: Pointer to TF handle, used for HCAPI communication + * @parms: Pointer to parameters + * + * Returns + * - (0) if successful. + * - (-EINVAL) on failure. + */ +int tf_tcam_get_resc_info(struct tf *tfp, struct tf_tcam_resource_info *parms); + +#endif /* _TF_TCAM_H */ diff --git a/drivers/thirdparty/release-drivers/bnxt/tf_core/tf_tcam_mgr_msg.c b/drivers/thirdparty/release-drivers/bnxt/tf_core/tf_tcam_mgr_msg.c new file mode 100644 index 000000000000..68639612b0e1 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/tf_core/tf_tcam_mgr_msg.c @@ -0,0 +1,211 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* Copyright(c) 2021-2022 Broadcom + * All rights reserved. + */ + +#include +#include +#include "tf_tcam.h" +#include "cfa_tcam_mgr.h" +#include "tf_tcam_mgr_msg.h" +#include "bnxt_compat.h" +#include "bnxt.h" + +/* Table to convert TCAM type to logical TCAM type for applications. + * Index is tf_tcam_tbl_type. + */ +static enum cfa_tcam_mgr_tbl_type tcam_types[TF_TCAM_TBL_TYPE_MAX] = { + [TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_HIGH] = + CFA_TCAM_MGR_TBL_TYPE_L2_CTXT_TCAM_HIGH_APPS, + [TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_LOW] = + CFA_TCAM_MGR_TBL_TYPE_L2_CTXT_TCAM_LOW_APPS, + [TF_TCAM_TBL_TYPE_PROF_TCAM] = + CFA_TCAM_MGR_TBL_TYPE_PROF_TCAM_APPS, + [TF_TCAM_TBL_TYPE_WC_TCAM] = + CFA_TCAM_MGR_TBL_TYPE_WC_TCAM_APPS, + [TF_TCAM_TBL_TYPE_SP_TCAM] = + CFA_TCAM_MGR_TBL_TYPE_SP_TCAM_APPS, + [TF_TCAM_TBL_TYPE_CT_RULE_TCAM] = + CFA_TCAM_MGR_TBL_TYPE_CT_RULE_TCAM_APPS, + [TF_TCAM_TBL_TYPE_VEB_TCAM] = + CFA_TCAM_MGR_TBL_TYPE_VEB_TCAM_APPS, +}; + +static u16 hcapi_type[TF_TCAM_TBL_TYPE_MAX]; + +/* This is the glue between the core tf_tcam and the TCAM manager. It is + * intended to abstract out the location of the TCAM manager so that the core + * code will be the same if the TCAM manager is in the core or in firmware. + * + * If the TCAM manager is in the core, then this file will just translate to + * TCAM manager APIs. If TCAM manager is in firmware, then this file will cause + * messages to be sent (except for bind and unbind). + */ +int tf_tcam_mgr_qcaps_msg(struct tf *tfp, struct tf_dev_info *dev, + u32 *rx_tcam_supported, + u32 *tx_tcam_supported) +{ + struct cfa_tcam_mgr_qcaps_parms mgr_parms; + int rc; + + memset(&mgr_parms, 0, sizeof(mgr_parms)); + rc = cfa_tcam_mgr_qcaps(tfp, &mgr_parms); + if (rc >= 0) { + *rx_tcam_supported = mgr_parms.rx_tcam_supported; + *tx_tcam_supported = mgr_parms.tx_tcam_supported; + } + return rc; +} + +int tf_tcam_mgr_bind_msg(struct tf *tfp, struct tf_dev_info *dev, + struct tf_tcam_cfg_parms *parms, + struct tf_resource_info + resv_res[][TF_TCAM_TBL_TYPE_MAX]) +{ + struct tf_rm_resc_entry + mgr_resv_res[TF_DIR_MAX][CFA_TCAM_MGR_TBL_TYPE_MAX]; + struct cfa_tcam_mgr_cfg_parms mgr_parms; + int dir, rc; + int type; + + if (parms->num_elements != TF_TCAM_TBL_TYPE_MAX) { + netdev_dbg(tfp->bp->dev, + "Invalid num elements in tcam mgr bind request\n"); + netdev_dbg(tfp->bp->dev, "expected:%d received:%d\n", + TF_TCAM_TBL_TYPE_MAX, parms->num_elements); + return -EINVAL; + } + + for (type = 0; type < TF_TCAM_TBL_TYPE_MAX; type++) + hcapi_type[type] = parms->cfg[type].hcapi_type; + + memset(&mgr_parms, 0, sizeof(mgr_parms)); + + mgr_parms.num_elements = CFA_TCAM_MGR_TBL_TYPE_MAX; + + /* Convert the data to logical tables */ + for (dir = 0; dir < TF_DIR_MAX; dir++) { + for (type = 0; type < TF_TCAM_TBL_TYPE_MAX; type++) { + mgr_parms.tcam_cnt[dir][tcam_types[type]] = + parms->resources->tcam_cnt[dir].cnt[type]; + mgr_resv_res[dir][tcam_types[type]].start = + resv_res[dir][type].start; + mgr_resv_res[dir][tcam_types[type]].stride = + resv_res[dir][type].stride; + } + } + mgr_parms.resv_res = mgr_resv_res; + + rc = cfa_tcam_mgr_bind(tfp, &mgr_parms); + + return rc; +} + +int tf_tcam_mgr_unbind_msg(struct tf *tfp, struct tf_dev_info *dev) +{ + return cfa_tcam_mgr_unbind(tfp); +} + +int tf_tcam_mgr_alloc_msg(struct tf *tfp, struct tf_dev_info *dev, + struct tf_tcam_alloc_parms *parms) +{ + struct cfa_tcam_mgr_alloc_parms mgr_parms; + int rc; + + if (parms->type >= TF_TCAM_TBL_TYPE_MAX) { + netdev_dbg(tfp->bp->dev, "No such TCAM table %d\n", + parms->type); + return -EINVAL; + } + + mgr_parms.dir = parms->dir; + mgr_parms.type = tcam_types[parms->type]; + mgr_parms.hcapi_type = hcapi_type[parms->type]; + mgr_parms.key_size = parms->key_size; + if (parms->priority >= TF_TCAM_PRIORITY_MAX) + mgr_parms.priority = 0; + else + mgr_parms.priority = TF_TCAM_PRIORITY_MAX - parms->priority - 1; + + rc = cfa_tcam_mgr_alloc(tfp, &mgr_parms); + if (rc) + return rc; + + parms->idx = mgr_parms.id; + return 0; +} + +int tf_tcam_mgr_free_msg(struct tf *tfp, struct tf_dev_info *dev, + struct tf_tcam_free_parms *parms) +{ + struct cfa_tcam_mgr_free_parms mgr_parms; + + if (parms->type >= TF_TCAM_TBL_TYPE_MAX) { + netdev_dbg(tfp->bp->dev, "No such TCAM table %d\n", + parms->type); + return -EINVAL; + } + + mgr_parms.dir = parms->dir; + mgr_parms.type = tcam_types[parms->type]; + mgr_parms.hcapi_type = hcapi_type[parms->type]; + mgr_parms.id = parms->idx; + + return cfa_tcam_mgr_free(tfp, &mgr_parms); +} + +int tf_tcam_mgr_set_msg(struct tf *tfp, struct tf_dev_info *dev, + struct tf_tcam_set_parms *parms) +{ + struct cfa_tcam_mgr_set_parms mgr_parms; + + if (parms->type >= TF_TCAM_TBL_TYPE_MAX) { + netdev_dbg(tfp->bp->dev, "No such TCAM table %d\n", + parms->type); + return -EINVAL; + } + + mgr_parms.dir = parms->dir; + mgr_parms.type = tcam_types[parms->type]; + mgr_parms.hcapi_type = hcapi_type[parms->type]; + mgr_parms.id = parms->idx; + mgr_parms.key = parms->key; + mgr_parms.mask = parms->mask; + mgr_parms.key_size = parms->key_size; + mgr_parms.result = parms->result; + mgr_parms.result_size = parms->result_size; + + return cfa_tcam_mgr_set(tfp, &mgr_parms); +} + +int tf_tcam_mgr_get_msg(struct tf *tfp, struct tf_dev_info *dev, + struct tf_tcam_get_parms *parms) +{ + struct cfa_tcam_mgr_get_parms mgr_parms; + int rc; + + if (parms->type >= TF_TCAM_TBL_TYPE_MAX) { + netdev_dbg(tfp->bp->dev, "No such TCAM table %d\n", + parms->type); + return -EINVAL; + } + + mgr_parms.dir = parms->dir; + mgr_parms.type = tcam_types[parms->type]; + mgr_parms.hcapi_type = hcapi_type[parms->type]; + mgr_parms.id = parms->idx; + mgr_parms.key = parms->key; + mgr_parms.mask = parms->mask; + mgr_parms.key_size = parms->key_size; + mgr_parms.result = parms->result; + mgr_parms.result_size = parms->result_size; + + rc = cfa_tcam_mgr_get(tfp, &mgr_parms); + if (rc) + return rc; + + parms->key_size = mgr_parms.key_size; + parms->result_size = mgr_parms.result_size; + + return rc; +} diff --git a/drivers/thirdparty/release-drivers/bnxt/tf_core/tf_tcam_mgr_msg.h b/drivers/thirdparty/release-drivers/bnxt/tf_core/tf_tcam_mgr_msg.h new file mode 100644 index 000000000000..afaecf75c330 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/tf_core/tf_tcam_mgr_msg.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2021-2021 Broadcom + * All rights reserved. + */ + +#ifndef _TF_TCAM_MGR_MSG_H_ +#define _TF_TCAM_MGR_MSG_H_ + +#include "tf_tcam.h" +#include "tf_rm.h" + +int tf_tcam_mgr_qcaps_msg(struct tf *tfp, struct tf_dev_info *dev, + u32 *rx_tcam_supported, u32 *tx_tcam_supported); +int tf_tcam_mgr_bind_msg(struct tf *tfp, struct tf_dev_info *dev, + struct tf_tcam_cfg_parms *parms, + struct tf_resource_info + resv_res[][TF_TCAM_TBL_TYPE_MAX]); +int tf_tcam_mgr_unbind_msg(struct tf *tfp, struct tf_dev_info *dev); +int tf_tcam_mgr_alloc_msg(struct tf *tfp, struct tf_dev_info *dev, + struct tf_tcam_alloc_parms *parms); +int tf_tcam_mgr_free_msg(struct tf *tfp, struct tf_dev_info *dev, + struct tf_tcam_free_parms *parms); +int tf_tcam_mgr_set_msg(struct tf *tfp, struct tf_dev_info *dev, + struct tf_tcam_set_parms *parms); +int tf_tcam_mgr_get_msg(struct tf *tfp, struct tf_dev_info *dev, + struct tf_tcam_get_parms *parms); + +#endif /* _TF_TCAM_MGR_MSG_H_ */ diff --git a/drivers/thirdparty/release-drivers/bnxt/tf_core/tf_util.c b/drivers/thirdparty/release-drivers/bnxt/tf_core/tf_util.c new file mode 100644 index 000000000000..72d7511d04c6 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/tf_core/tf_util.c @@ -0,0 +1,164 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* Copyright(c) 2014-2021 Broadcom + * All rights reserved. + */ + +#include +#include "cfa_resource_types.h" +#include "tf_util.h" + +const char *tf_dir_2_str(enum tf_dir dir) +{ + switch (dir) { + case TF_DIR_RX: + return "RX"; + case TF_DIR_TX: + return "TX"; + default: + return "Invalid direction"; + } +} + +const char *tf_ident_2_str(enum tf_identifier_type id_type) +{ + switch (id_type) { + case TF_IDENT_TYPE_L2_CTXT_HIGH: + return "l2_ctxt_remap_high"; + case TF_IDENT_TYPE_L2_CTXT_LOW: + return "l2_ctxt_remap_low"; + case TF_IDENT_TYPE_PROF_FUNC: + return "prof_func"; + case TF_IDENT_TYPE_WC_PROF: + return "wc_prof"; + case TF_IDENT_TYPE_EM_PROF: + return "em_prof"; + case TF_IDENT_TYPE_L2_FUNC: + return "l2_func"; + default: + return "Invalid identifier"; + } +} + +const char *tf_tcam_tbl_2_str(enum tf_tcam_tbl_type tcam_type) +{ + switch (tcam_type) { + case TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_HIGH: + return "l2_ctxt_tcam_high"; + case TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_LOW: + return "l2_ctxt_tcam_low"; + case TF_TCAM_TBL_TYPE_PROF_TCAM: + return "prof_tcam"; + case TF_TCAM_TBL_TYPE_WC_TCAM: + return "wc_tcam"; + case TF_TCAM_TBL_TYPE_VEB_TCAM: + return "veb_tcam"; + case TF_TCAM_TBL_TYPE_SP_TCAM: + return "sp_tcam"; + case TF_TCAM_TBL_TYPE_CT_RULE_TCAM: + return "ct_rule_tcam"; + default: + return "Invalid tcam table type"; + } +} + +const char *tf_tbl_type_2_str(enum tf_tbl_type tbl_type) +{ + switch (tbl_type) { + case TF_TBL_TYPE_FULL_ACT_RECORD: + return "Full Action record"; + case TF_TBL_TYPE_COMPACT_ACT_RECORD: + return "Compact Action record"; + case TF_TBL_TYPE_MCAST_GROUPS: + return "Multicast Groups"; + case TF_TBL_TYPE_ACT_ENCAP_8B: + return "Encap 8B"; + case TF_TBL_TYPE_ACT_ENCAP_16B: + return "Encap 16B"; + case TF_TBL_TYPE_ACT_ENCAP_32B: + return "Encap 32B"; + case TF_TBL_TYPE_ACT_ENCAP_64B: + return "Encap 64B"; + case TF_TBL_TYPE_ACT_SP_SMAC: + return "Source Properties SMAC"; + case TF_TBL_TYPE_ACT_SP_SMAC_IPV4: + return "Source Properties SMAC IPv4"; + case TF_TBL_TYPE_ACT_SP_SMAC_IPV6: + return "Source Properties SMAC IPv6"; + case TF_TBL_TYPE_ACT_STATS_64: + return "Stats 64B"; + case TF_TBL_TYPE_ACT_MODIFY_IPV4: + return "Modify IPv4"; + case TF_TBL_TYPE_ACT_MODIFY_8B: + return "Modify 8B"; + case TF_TBL_TYPE_ACT_MODIFY_16B: + return "Modify 16B"; + case TF_TBL_TYPE_ACT_MODIFY_32B: + return "Modify 32B"; + case TF_TBL_TYPE_ACT_MODIFY_64B: + return "Modify 64B"; + case TF_TBL_TYPE_METER_PROF: + return "Meter Profile"; + case TF_TBL_TYPE_METER_INST: + return "Meter"; + case TF_TBL_TYPE_MIRROR_CONFIG: + return "Mirror"; + case TF_TBL_TYPE_UPAR: + return "UPAR"; + case TF_TBL_TYPE_METADATA: + return "Metadata"; + case TF_TBL_TYPE_EM_FKB: + return "EM Flexible Key Builder"; + case TF_TBL_TYPE_WC_FKB: + return "WC Flexible Key Builder"; + case TF_TBL_TYPE_EXT: + return "External"; + case TF_TBL_TYPE_METER_DROP_CNT: + return "Meter drop counter"; + default: + return "Invalid tbl type"; + } +} + +const char *tf_em_tbl_type_2_str(enum tf_em_tbl_type em_type) +{ + switch (em_type) { + case TF_EM_TBL_TYPE_EM_RECORD: + return "EM Record"; + case TF_EM_TBL_TYPE_TBL_SCOPE: + return "Table Scope"; + default: + return "Invalid EM type"; + } +} + +const char *tf_module_subtype_2_str(enum tf_module_type module, u16 subtype) +{ + switch (module) { + case TF_MODULE_TYPE_IDENTIFIER: + return tf_ident_2_str(subtype); + case TF_MODULE_TYPE_TABLE: + return tf_tbl_type_2_str(subtype); + case TF_MODULE_TYPE_TCAM: + return tf_tcam_tbl_2_str(subtype); + case TF_MODULE_TYPE_EM: + return tf_em_tbl_type_2_str(subtype); + default: + return "Invalid Module type"; + } +} + +const char *tf_module_2_str(enum tf_module_type module) +{ + switch (module) { + case TF_MODULE_TYPE_IDENTIFIER: + return "Identifier"; + case TF_MODULE_TYPE_TABLE: + return "Table"; + case TF_MODULE_TYPE_TCAM: + return "TCAM"; + case TF_MODULE_TYPE_EM: + return "EM"; + default: + return "Invalid Device Module type"; + } +} diff --git a/drivers/thirdparty/release-drivers/bnxt/tf_core/tf_util.h b/drivers/thirdparty/release-drivers/bnxt/tf_core/tf_util.h new file mode 100644 index 000000000000..757de8275d16 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/tf_core/tf_util.h @@ -0,0 +1,93 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2021 Broadcom + * All rights reserved. + */ + +#ifndef _TF_UTIL_H_ +#define _TF_UTIL_H_ + +#ifdef HAVE_ALIGN +#include +#else +#include +#endif +#include "tf_core.h" + +#define TF_BITS2BYTES(x) ((ALIGN((x), 8)) >> 3) +#define TF_BITS2BYTES_WORD_ALIGN(x) ((ALIGN((x), 32)) >> 3) +#define TF_BITS2BYTES_64B_WORD_ALIGN(x) ((ALIGN((x), 64)) >> 3) + +/** + * Helper function converting direction to text string + * + * @dir: Receive or transmit direction identifier + * + * Returns: + * Pointer to a char string holding the string for the direction + */ +const char *tf_dir_2_str(enum tf_dir dir); + +/** + * Helper function converting identifier to text string + * + * @id_type: Identifier type + * + * Returns: + * Pointer to a char string holding the string for the identifier + */ +const char *tf_ident_2_str(enum tf_identifier_type id_type); + +/** + * Helper function converting tcam type to text string + * + * @tcam_type: TCAM type + * + * Returns: + * Pointer to a char string holding the string for the tcam + */ +const char *tf_tcam_tbl_2_str(enum tf_tcam_tbl_type tcam_type); + +/** + * Helper function converting tbl type to text string + * + * @tbl_type: Table type + * + * Returns: + * Pointer to a char string holding the string for the table type + */ +const char *tf_tbl_type_2_str(enum tf_tbl_type tbl_type); + +/** + * Helper function converting em tbl type to text string + * + * @em_type: EM type + * + * Returns: + * Pointer to a char string holding the string for the EM type + */ +const char *tf_em_tbl_type_2_str(enum tf_em_tbl_type em_type); + +/** + * Helper function converting module and submodule type to + * text string. + * + * @module: Module type + * @submodule: Module specific subtype + * + * Returns: + * Pointer to a char string holding the string for the EM type + */ +const char *tf_module_subtype_2_str(enum tf_module_type module, + u16 subtype); + +/** + * Helper function converting module type to text string + * + * @module: Module type + * + * Returns: + * Pointer to a char string holding the string for the EM type + */ +const char *tf_module_2_str(enum tf_module_type module); + +#endif /* _TF_UTIL_H_ */ diff --git a/drivers/thirdparty/release-drivers/bnxt/tf_ulp/bnxt_tf_common.h b/drivers/thirdparty/release-drivers/bnxt/tf_ulp/bnxt_tf_common.h new file mode 100644 index 000000000000..90e062fb33f4 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/tf_ulp/bnxt_tf_common.h @@ -0,0 +1,79 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019-2023 Broadcom + * All rights reserved. + */ + +#ifndef _BNXT_TF_COMMON_H_ +#define _BNXT_TF_COMMON_H_ + +#include "bnxt_tf_ulp.h" +#include "ulp_template_db_enum.h" + +#define BNXT_ULP_EM_FLOWS 8192 +#define BNXT_ULP_1M_FLOWS 1000000 +#define BNXT_EEM_RX_GLOBAL_ID_MASK (BNXT_ULP_1M_FLOWS - 1) +#define BNXT_EEM_TX_GLOBAL_ID_MASK (BNXT_ULP_1M_FLOWS - 1) +#define BNXT_EEM_HASH_KEY2_USED 0x8000000 +#define BNXT_EEM_RX_HW_HASH_KEY2_BIT BNXT_ULP_1M_FLOWS +#define BNXT_ULP_DFLT_RX_MAX_KEY 512 +#define BNXT_ULP_DFLT_RX_MAX_ACTN_ENTRY 256 +#define BNXT_ULP_DFLT_RX_MEM 0 +#define BNXT_ULP_RX_NUM_FLOWS 32 +#define BNXT_ULP_DFLT_TX_MAX_KEY 512 +#define BNXT_ULP_DFLT_TX_MAX_ACTN_ENTRY 256 +#define BNXT_ULP_DFLT_TX_MEM 0 +#define BNXT_ULP_TX_NUM_FLOWS 32 + +enum bnxt_tf_rc { + BNXT_TF_RC_PARSE_ERR_NOTSUPP = -3, + BNXT_TF_RC_PARSE_ERR = -2, + BNXT_TF_RC_ERROR = -1, + BNXT_TF_RC_SUCCESS = 0, + BNXT_TF_RC_NORMAL = 1, + BNXT_TF_RC_FID = 2, +}; + +/* eth IPv4 Type */ +enum bnxt_ulp_eth_ip_type { + BNXT_ULP_ETH_IPV4 = 4, + BNXT_ULP_ETH_IPV6 = 5, + BNXT_ULP_MAX_ETH_IP_TYPE = 0 +}; + +/* ulp direction Type */ +enum bnxt_ulp_direction_type { + BNXT_ULP_DIR_INVALID, + BNXT_ULP_DIR_INGRESS, + BNXT_ULP_DIR_EGRESS, +}; + +/* enumeration of the interface types */ +enum bnxt_ulp_intf_type { + BNXT_ULP_INTF_TYPE_INVALID = 0, + BNXT_ULP_INTF_TYPE_PF, + BNXT_ULP_INTF_TYPE_TRUSTED_VF, + BNXT_ULP_INTF_TYPE_VF, + BNXT_ULP_INTF_TYPE_PF_REP, + BNXT_ULP_INTF_TYPE_VF_REP, + BNXT_ULP_INTF_TYPE_PHY_PORT, + BNXT_ULP_INTF_TYPE_LAST +}; + +/* Truflow declarations */ +void bnxt_get_parent_mac_addr(struct bnxt *bp, u8 *mac); +void bnxt_get_iface_mac(struct bnxt *bp, enum bnxt_ulp_intf_type type, + u8 *mac, u8 *parent_mac); +u16 bnxt_get_vnic_id(struct bnxt *bp, enum bnxt_ulp_intf_type type); +u16 bnxt_get_parent_vnic_id(struct bnxt *bp, + enum bnxt_ulp_intf_type type); +u16 bnxt_get_svif(struct bnxt *bp_id, bool func_svif, + enum bnxt_ulp_intf_type type); +u16 bnxt_get_fw_func_id(struct bnxt *bp, enum bnxt_ulp_intf_type type); +u16 bnxt_get_parif(struct bnxt *bp); +u16 bnxt_get_phy_port_id(struct bnxt *bp); +u16 bnxt_get_vport(struct bnxt *bp); +enum bnxt_ulp_intf_type bnxt_get_interface_type(struct bnxt *bp); +int bnxt_ulp_create_vfr_default_rules(void *vf_rep); +int bnxt_ulp_delete_vfr_default_rules(void *vf_rep); + +#endif /* _BNXT_TF_COMMON_H_ */ diff --git a/drivers/thirdparty/release-drivers/bnxt/tf_ulp/bnxt_tf_tc_shim.c b/drivers/thirdparty/release-drivers/bnxt/tf_ulp/bnxt_tf_tc_shim.c new file mode 100644 index 000000000000..fc6a35831b4e --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/tf_ulp/bnxt_tf_tc_shim.c @@ -0,0 +1,295 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2023-2023 Broadcom + * All rights reserved. + */ + +#include "ulp_linux.h" +#include "bnxt_compat.h" +#include "bnxt_hsi.h" +#include "bnxt.h" +#include "bnxt_hwrm.h" +#include "bnxt_ethtool.h" +#include "bnxt_tf_common.h" +#include "bnxt_tf_tc_shim.h" +#include "ulp_mapper.h" +#include "ulp_udcc.h" + +#if defined(CONFIG_BNXT_FLOWER_OFFLOAD) +int bnxt_ulp_tf_v6_subnet_add(struct bnxt *bp, u8 *byte_key, u8 *byte_mask, + u8 *byte_data, u16 *subnet_hndl) +{ + u8 v6dst[16] = { 0 }; + u8 v6msk[16] = { 0 }; + u8 dmac[6] = { 0 }; + u8 smac[6] = { 0 }; + u16 src_fid; + + memcpy(&src_fid, byte_key, sizeof(src_fid)); + memcpy(v6dst, byte_key + sizeof(src_fid), sizeof(v6dst)); + memcpy(v6msk, byte_mask + sizeof(src_fid), sizeof(v6msk)); + memcpy(dmac, byte_data, sizeof(dmac)); + memcpy(smac, byte_data + sizeof(dmac), sizeof(smac)); + + return bnxt_ulp_udcc_v6_subnet_add(bp, + &src_fid, v6dst, v6msk, + dmac, smac, + subnet_hndl); +} + +int bnxt_ulp_tf_v6_subnet_del(struct bnxt *bp, u16 subnet_hndl) +{ + return bnxt_ulp_udcc_v6_subnet_del(bp, subnet_hndl); +} +#endif /* #if defined(CONFIG_BNXT_FLOWER_OFFLOAD) */ + +#if defined(CONFIG_BNXT_FLOWER_OFFLOAD) || defined(CONFIG_BNXT_CUSTOM_FLOWER_OFFLOAD) +static int bnxt_get_vnic_info_idx(struct bnxt *bp) +{ + int idx; + + for (idx = 0; idx < bp->nr_vnics; idx++) { + if (bp->vnic_info[idx].fw_vnic_id == INVALID_HW_RING_ID) + return idx; + } + + return -EINVAL; +} + +static void bnxt_clear_queue_vnic(struct bnxt *bp, u16 vnic_id) +{ + int i, nr_ctxs; + + if (!bp->vnic_info) + return; + + if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) + return; + + /* before free the vnic, undo the vnic tpa settings */ + if (bp->flags & BNXT_FLAG_TPA) + bnxt_hwrm_vnic_set_tpa(bp, &bp->vnic_info[vnic_id], 0); + + bnxt_hwrm_vnic_free_one(bp, &bp->vnic_info[vnic_id]); + + nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings); + for (i = 0; i < nr_ctxs; i++) { + if (bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[i] != INVALID_HW_RING_ID) { + bnxt_hwrm_vnic_ctx_free_one(bp, &bp->vnic_info[vnic_id], i); + bp->rsscos_nr_ctxs--; + } + } +} + +static int bnxt_vnic_queue_action_free(struct bnxt *bp, u16 vnic_id) +{ + struct bnxt_vnic_info *vnic_info; + u16 vnic_idx = vnic_id; + int rc = -EINVAL; + + /* validate the given vnic idx */ + if (vnic_idx >= bp->nr_vnics) { + netdev_err(bp->dev, "invalid vnic idx %d\n", vnic_idx); + return rc; + } + + /* validate the vnic info */ + vnic_info = &bp->vnic_info[vnic_idx]; + if (vnic_info && !vnic_info->ref_cnt) { + netdev_err(bp->dev, "Invalid vnic idx, no queues being used\n"); + return rc; + } + + if (vnic_info->ref_cnt) { + vnic_info->ref_cnt--; + if (!vnic_info->ref_cnt) + bnxt_clear_queue_vnic(bp, vnic_idx); + } + return 0; +} + +static int +bnxt_setup_queue_vnic(struct bnxt *bp, u16 vnic_id, u16 q_index) +{ + int rc, nr_ctxs, i; + + /* It's a queue action, so only one queue */ + rc = bnxt_hwrm_vnic_alloc(bp, &bp->vnic_info[vnic_id], q_index, 1); + if (rc) { + rc = -EINVAL; + goto cleanup; + } + + rc = bnxt_hwrm_vnic_cfg(bp, &bp->vnic_info[vnic_id], q_index); + if (rc) { + rc = -EINVAL; + goto vnic_free; + } + + rc = bnxt_hwrm_vnic_set_tpa(bp, &bp->vnic_info[vnic_id], bp->flags & BNXT_FLAG_TPA); + if (rc) { + rc = -EINVAL; + goto vnic_free; + } + + if (bp->flags & BNXT_FLAG_AGG_RINGS) { + rc = bnxt_hwrm_vnic_set_hds(bp, &bp->vnic_info[vnic_id]); + if (rc) { + netdev_info(bp->dev, "hwrm vnic %d set hds failure rc: %x\n", + vnic_id, rc); + goto clear_tpa; + } + } + + /* Even though this vnic is going to have only one queue, RSS is still + * enabled as the RX completion handler expects a valid RSS hash in the + * rx completion. + */ + bp->vnic_info[vnic_id].flags |= + BNXT_VNIC_RSS_FLAG | BNXT_VNIC_MCAST_FLAG | BNXT_VNIC_UCAST_FLAG; + nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings); + for (i = 0; i < nr_ctxs; i++) { + rc = bnxt_hwrm_vnic_ctx_alloc(bp, &bp->vnic_info[vnic_id], i); + if (rc) { + netdev_err(bp->dev, "hwrm vnic %d ctx %d alloc failure rc: %x\n", + vnic_id, i, rc); + break; + } + bp->rsscos_nr_ctxs++; + } + if (i < nr_ctxs) + goto ctx_free; + + rc = bnxt_hwrm_vnic_set_rss_p5(bp, &bp->vnic_info[vnic_id], true); + if (rc) { + netdev_info(bp->dev, "Failed to enable RSS on vnic_id %d\n", vnic_id); + goto ctx_free; + } + + return 0; + +ctx_free: + for (i = 0; i < nr_ctxs; i++) { + if (bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[i] != INVALID_HW_RING_ID) + bnxt_hwrm_vnic_ctx_free_one(bp, &bp->vnic_info[vnic_id], i); + } +clear_tpa: + bnxt_hwrm_vnic_set_tpa(bp, &bp->vnic_info[vnic_id], 0); +vnic_free: + bnxt_hwrm_vnic_free_one(bp, &bp->vnic_info[vnic_id]); +cleanup: + return rc; +} + +static int bnxt_vnic_queue_action_alloc(struct bnxt *bp, u16 q_index, u16 *vnic_idx, u16 *vnicid) +{ + struct vnic_info_meta *vnic_meta; + int rc = -EINVAL; + int idx; + + if (!bp->vnic_meta) { + netdev_err(bp->dev, + "Queue action is invalid while ntuple-filter is on\n"); + return rc; + } + + /* validate the given queue id */ + if (q_index >= bp->rx_nr_rings) { + netdev_err(bp->dev, "invalid queue id should be less than %d\n", + bp->rx_nr_rings); + return rc; + } + + vnic_meta = &bp->vnic_meta[q_index]; + /* Scenario 1: Queue is under use by non-truflow entity */ + if (vnic_meta && !vnic_meta->meta_valid && + vnic_meta->fw_vnic_id != INVALID_HW_RING_ID) + return -EINVAL; + /* Scenario 2: Queue is under by truflow entity, just increase the reference count */ + if (vnic_meta && vnic_meta->meta_valid) { + idx = vnic_meta->vnic_idx; + goto done; + } + /* Scenario 3: New vnic must be allocated*/ + /* Get new vnic */ + idx = bnxt_get_vnic_info_idx(bp); + if (idx < 0) + return -EINVAL; + + bp->vnic_info[idx].q_index = q_index; + rc = bnxt_setup_queue_vnic(bp, idx, q_index); + if (rc) { + bp->vnic_info[idx].q_index = INVALID_HW_RING_ID; + return rc; + } + + /* Populate all important data only when everything in this routine is successful */ + vnic_meta->meta_valid = true; + vnic_meta->vnic_idx = idx; + bp->vnic_info[idx].vnic_meta = vnic_meta; + +done: + bp->vnic_info[idx].ref_cnt++; + *vnic_idx = (u16)idx; + *vnicid = bp->vnic_info[idx].fw_vnic_id; + + return 0; +} + +int bnxt_queue_action_create(struct bnxt_ulp_mapper_parms *parms, + u16 *vnic_idx, u16 *vnic_id) +{ + struct ulp_tc_act_prop *ap = parms->act_prop; + struct bnxt *bp = parms->ulp_ctx->bp; + u16 q_index; + + memcpy(&q_index, &ap->act_details[BNXT_ULP_ACT_PROP_IDX_QUEUE_INDEX], + BNXT_ULP_ACT_PROP_SZ_QUEUE_INDEX); + + return bnxt_vnic_queue_action_alloc(bp, q_index, vnic_idx, vnic_id); +} + +int bnxt_queue_action_delete(struct tf *tfp, u16 vnic_idx) +{ + struct bnxt *bp = NULL; + + bp = tfp->bp; + if (!bp) { + netdev_err(NULL, "Invalid bp\n"); + return -EINVAL; + } + return bnxt_vnic_queue_action_free(bp, vnic_idx); +} + +int bnxt_bd_act_set(struct bnxt *bp, u16 port_id, u32 act) +{ + struct bnxt_ulp_context *ulp_ctx = bp->ulp_ctx; + struct bnxt_vf_rep *vfr; + u32 ulp_flags = 0; + int rc = 0; + + if (!ulp_ctx) { + netdev_dbg(bp->dev, + "ULP context is not initialized. Failed to create dflt flow.\n"); + rc = -EINVAL; + goto err1; + } + + /* update the vf rep flag */ + if (bnxt_ulp_cntxt_ptr2_ulp_flags_get(ulp_ctx, &ulp_flags)) { + netdev_dbg(bp->dev, "Error in getting ULP context flags\n"); + rc = -EINVAL; + goto err1; + } + + if (ULP_VF_REP_IS_ENABLED(ulp_flags)) { + vfr = netdev_priv(bp->dev); + if (!vfr) + return rc; + vfr->tx_cfa_action = act; + } else { + bp->tx_cfa_action = act; + } +err1: + return rc; +} +#endif /* CONFIG_BNXT_FLOWER_OFFLOAD */ diff --git a/drivers/thirdparty/release-drivers/bnxt/tf_ulp/bnxt_tf_tc_shim.h b/drivers/thirdparty/release-drivers/bnxt/tf_ulp/bnxt_tf_tc_shim.h new file mode 100644 index 000000000000..f6ce496df4af --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/tf_ulp/bnxt_tf_tc_shim.h @@ -0,0 +1,45 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2021-2023 Broadcom + * All rights reserved. + */ + +#ifndef _BNXT_TF_TC_SHIM_H_ +#define _BNXT_TF_TC_SHIM_H_ + +#if defined(CONFIG_BNXT_FLOWER_OFFLOAD) || defined(CONFIG_BNXT_CUSTOM_FLOWER_OFFLOAD) +#include "ulp_linux.h" +#include "bnxt_compat.h" +#include "bnxt_hsi.h" +#include "bnxt.h" +#include "bnxt_tf_common.h" +#include "ulp_mapper.h" + +/* Internal Tunnel type, */ +enum bnxt_global_register_tunnel_type { + BNXT_GLOBAL_REGISTER_TUNNEL_UNUSED = 0, + BNXT_GLOBAL_REGISTER_TUNNEL_VXLAN, + BNXT_GLOBAL_REGISTER_TUNNEL_ECPRI, + BNXT_GLOBAL_REGISTER_TUNNEL_VXLAN_GPE, + BNXT_GLOBAL_REGISTER_TUNNEL_VXLAN_GPE_V6, + BNXT_GLOBAL_REGISTER_TUNNEL_MAX +}; + +#define BNXT_VNIC_MAX_QUEUE_SIZE 256 +#define BNXT_VNIC_MAX_QUEUE_SZ_IN_8BITS (BNXT_VNIC_MAX_QUEUE_SIZE / 8) +#define BNXT_VNIC_MAX_QUEUE_SZ_IN_64BITS (BNXT_VNIC_MAX_QUEUE_SIZE / 64) + +int bnxt_queue_action_create(struct bnxt_ulp_mapper_parms *parms, + u16 *vnic_idx, u16 *vnic_id); +int bnxt_queue_action_delete(struct tf *tfp, u16 vnic_idx); +int bnxt_bd_act_set(struct bnxt *bp, u16 port_id, u32 act); +#endif + +#if defined(CONFIG_BNXT_FLOWER_OFFLOAD) +int bnxt_ulp_tf_v6_subnet_add(struct bnxt *bp, + u8 *byte_key, u8 *byte_mask, + u8 *byte_data, u16 *subnet_hndl); + +int bnxt_ulp_tf_v6_subnet_del(struct bnxt *bp, u16 subnet_hndl); +#endif /* #if defined(CONFIG_BNXT_FLOWER_OFFLOAD) */ + +#endif diff --git a/drivers/thirdparty/release-drivers/bnxt/tf_ulp/bnxt_tf_ulp.c b/drivers/thirdparty/release-drivers/bnxt/tf_ulp/bnxt_tf_ulp.c new file mode 100644 index 000000000000..f9d8d120e303 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/tf_ulp/bnxt_tf_ulp.c @@ -0,0 +1,1576 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* Copyright(c) 2019-2023 Broadcom + * All rights reserved. + */ + +#include "ulp_linux.h" +#include "bnxt_compat.h" +#include "bnxt_hsi.h" +#include "bnxt.h" +#include "bnxt_vfr.h" +#include "bnxt_tf_ulp.h" +#include "bnxt_ulp_flow.h" +#include "bnxt_tf_common.h" +#include "tf_core.h" +#include "tfc.h" +#include "tf_ext_flow_handle.h" + +#include "ulp_template_db_enum.h" +#include "ulp_template_struct.h" +#include "ulp_mark_mgr.h" +#include "ulp_fc_mgr.h" +#include "ulp_flow_db.h" +#include "ulp_mapper.h" +#include "ulp_matcher.h" +#include "ulp_port_db.h" +#include "bnxt_tfc.h" + +#if defined(CONFIG_BNXT_FLOWER_OFFLOAD) || defined(CONFIG_BNXT_CUSTOM_FLOWER_OFFLOAD) +/* Linked list of all TF sessions. */ +HLIST_HEAD(bnxt_ulp_session_list); +/* Mutex to synchronize bnxt_ulp_session_list operations. */ +DEFINE_MUTEX(bnxt_ulp_global_mutex); + +/* Spin lock to protect context global list */ +u32 bnxt_ulp_ctxt_lock_created; +DEFINE_MUTEX(bnxt_ulp_ctxt_lock); +static HLIST_HEAD(ulp_cntx_list); + +/* Allow the deletion of context only for the bnxt device that + * created the session. + */ +bool +ulp_ctx_deinit_allowed(struct bnxt_ulp_context *ulp_ctx) +{ + if (!ulp_ctx || !ulp_ctx->cfg_data) + return false; + + if (!ulp_ctx->cfg_data->ref_cnt) { + netdev_dbg(ulp_ctx->bp->dev, "ulp ctx shall initiate deinit\n"); + return true; + } + + return false; +} + +int +bnxt_ulp_devid_get(struct bnxt *bp, + enum bnxt_ulp_device_id *ulp_dev_id) +{ + if (BNXT_CHIP_P7(bp)) + *ulp_dev_id = BNXT_ULP_DEVICE_ID_THOR2; + else if (BNXT_CHIP_P5(bp)) + *ulp_dev_id = BNXT_ULP_DEVICE_ID_THOR; + else if (BNXT_CHIP_P4(bp)) + *ulp_dev_id = BNXT_ULP_DEVICE_ID_WH_PLUS; + else + return -ENODEV; + + return 0; +} + +struct bnxt_ulp_app_capabilities_info * +bnxt_ulp_app_cap_list_get(u32 *num_entries) +{ + if (!num_entries) + return NULL; + *num_entries = BNXT_ULP_APP_CAP_TBL_MAX_SZ; + return ulp_app_cap_info_list; +} + +struct bnxt_ulp_resource_resv_info * +bnxt_ulp_resource_resv_list_get(u32 *num_entries) +{ + if (!num_entries) + return NULL; + *num_entries = BNXT_ULP_RESOURCE_RESV_LIST_MAX_SZ; + return ulp_resource_resv_list; +} + +struct bnxt_ulp_resource_resv_info * +bnxt_ulp_app_resource_resv_list_get(u32 *num_entries) +{ + if (!num_entries) + return NULL; + *num_entries = BNXT_ULP_APP_RESOURCE_RESV_LIST_MAX_SZ; + return ulp_app_resource_resv_list; +} + +struct bnxt_ulp_glb_resource_info * +bnxt_ulp_app_glb_resource_info_list_get(u32 *num_entries) +{ + if (!num_entries) + return NULL; + *num_entries = BNXT_ULP_APP_GLB_RESOURCE_TBL_MAX_SZ; + return ulp_app_glb_resource_tbl; +} + +/* Function to set the number for vxlan_ip (custom vxlan) port into the context */ +int +bnxt_ulp_cntxt_ecpri_udp_port_set(struct bnxt_ulp_context *ulp_ctx, + u32 ecpri_udp_port) +{ + if (!ulp_ctx || !ulp_ctx->cfg_data) + return -EINVAL; + + ulp_ctx->cfg_data->ecpri_udp_port = ecpri_udp_port; + + return 0; +} + +/* Function to retrieve the vxlan_ip (custom vxlan) port from the context. */ +unsigned int +bnxt_ulp_cntxt_ecpri_udp_port_get(struct bnxt_ulp_context *ulp_ctx) +{ + if (!ulp_ctx || !ulp_ctx->cfg_data) + return 0; + + return (unsigned int)ulp_ctx->cfg_data->ecpri_udp_port; +} + +/* Function to set the number for vxlan_ip (custom vxlan) port into the context */ +int +bnxt_ulp_cntxt_vxlan_ip_port_set(struct bnxt_ulp_context *ulp_ctx, + u32 vxlan_ip_port) +{ + if (!ulp_ctx || !ulp_ctx->cfg_data) + return -EINVAL; + + ulp_ctx->cfg_data->vxlan_ip_port = vxlan_ip_port; + + return 0; +} + +/* Function to retrieve the vxlan_ip (custom vxlan) port from the context. */ +unsigned int +bnxt_ulp_cntxt_vxlan_ip_port_get(struct bnxt_ulp_context *ulp_ctx) +{ + if (!ulp_ctx || !ulp_ctx->cfg_data) + return 0; + + return (unsigned int)ulp_ctx->cfg_data->vxlan_ip_port; +} + +/* Function to set the number for vxlan_gpe next_proto into the context */ +u32 +bnxt_ulp_vxlan_gpe_next_proto_set(struct bnxt_ulp_context *ulp_ctx, + u8 tunnel_next_proto) +{ + if (!ulp_ctx || !ulp_ctx->cfg_data) + return -EINVAL; + + ulp_ctx->cfg_data->tunnel_next_proto = tunnel_next_proto; + + return 0; +} + +/* Function to retrieve the vxlan_gpe next_proto from the context. */ +uint8_t +bnxt_ulp_vxlan_gpe_next_proto_get(struct bnxt_ulp_context *ulp_ctx) +{ + if (!ulp_ctx || !ulp_ctx->cfg_data) + return 0; + + return ulp_ctx->cfg_data->tunnel_next_proto; +} + +/* Function to set the number for vxlan port into the context */ +int +bnxt_ulp_cntxt_vxlan_port_set(struct bnxt_ulp_context *ulp_ctx, + u32 vxlan_port) +{ + if (!ulp_ctx || !ulp_ctx->cfg_data) + return -EINVAL; + + ulp_ctx->cfg_data->vxlan_port = vxlan_port; + + return 0; +} + +/* Function to retrieve the vxlan port from the context. */ +unsigned int +bnxt_ulp_cntxt_vxlan_port_get(struct bnxt_ulp_context *ulp_ctx) +{ + if (!ulp_ctx || !ulp_ctx->cfg_data) + return 0; + + return (unsigned int)ulp_ctx->cfg_data->vxlan_port; +} + +int +bnxt_ulp_default_app_priority_set(struct bnxt_ulp_context *ulp_ctx, + u32 prio) +{ + if (!ulp_ctx || !ulp_ctx->cfg_data) + return -EINVAL; + + ulp_ctx->cfg_data->default_priority = prio; + return 0; +} + +/* Function to retrieve the default app priority from the context. */ +unsigned int +bnxt_ulp_default_app_priority_get(struct bnxt_ulp_context *ulp_ctx) +{ + if (!ulp_ctx || !ulp_ctx->cfg_data) + return 0; + + return (unsigned int)ulp_ctx->cfg_data->default_priority; +} + +int +bnxt_ulp_max_def_priority_set(struct bnxt_ulp_context *ulp_ctx, + u32 prio) +{ + if (!ulp_ctx || !ulp_ctx->cfg_data) + return -EINVAL; + + ulp_ctx->cfg_data->max_def_priority = prio; + return 0; +} + +unsigned int +bnxt_ulp_max_def_priority_get(struct bnxt_ulp_context *ulp_ctx) +{ + if (!ulp_ctx || !ulp_ctx->cfg_data) + return 0; + + return (unsigned int)ulp_ctx->cfg_data->max_def_priority; +} + +int +bnxt_ulp_min_flow_priority_set(struct bnxt_ulp_context *ulp_ctx, u32 prio) +{ + if (!ulp_ctx || !ulp_ctx->cfg_data) + return -EINVAL; + + ulp_ctx->cfg_data->min_flow_priority = prio; + return 0; +} + +unsigned int +bnxt_ulp_min_flow_priority_get(struct bnxt_ulp_context *ulp_ctx) +{ + if (!ulp_ctx || !ulp_ctx->cfg_data) + return 0; + + return ulp_ctx->cfg_data->min_flow_priority; +} + +int +bnxt_ulp_max_flow_priority_set(struct bnxt_ulp_context *ulp_ctx, u32 prio) +{ + if (!ulp_ctx || !ulp_ctx->cfg_data) + return -EINVAL; + + ulp_ctx->cfg_data->max_flow_priority = prio; + return 0; +} + +unsigned int +bnxt_ulp_max_flow_priority_get(struct bnxt_ulp_context *ulp_ctx) +{ + if (!ulp_ctx || !ulp_ctx->cfg_data) + return 0; + + return ulp_ctx->cfg_data->max_flow_priority; +} + +/* The function to initialize bp flags with truflow features */ +static int +ulp_dparms_dev_port_intf_update(struct bnxt *bp, + struct bnxt_ulp_context *ulp_ctx) +{ + enum bnxt_ulp_flow_mem_type mtype; + + if (bnxt_ulp_cntxt_mem_type_get(ulp_ctx, &mtype)) + return -EINVAL; + /* Update the bp flag with gfid flag */ + if (mtype == BNXT_ULP_FLOW_MEM_TYPE_EXT) + bp->tf_flags |= BNXT_TF_FLAG_GFID_ENABLE; + + return 0; +} + +/* Initialize the state of an ULP session. + * If the state of an ULP session is not initialized, set it's state to + * initialized. If the state is already initialized, do nothing. + */ +static void +ulp_context_initialized(struct bnxt_ulp_session_state *session, bool *init) +{ + mutex_lock(&session->bnxt_ulp_mutex); + if (!session->bnxt_ulp_init) { + session->bnxt_ulp_init = true; + *init = false; + } else { + *init = true; + } + mutex_unlock(&session->bnxt_ulp_mutex); +} + +/* Check if an ULP session is already allocated for a specific PCI + * domain & bus. If it is already allocated simply return the session + * pointer, otherwise allocate a new session. + */ +static struct bnxt_ulp_session_state * +ulp_get_session(struct bnxt *bp) +{ + struct bnxt_ulp_session_state *session; + struct hlist_node *node; + + hlist_for_each_entry_safe(session, node, &bnxt_ulp_session_list, next) { + if (!memcmp(session->dsn, bp->dsn, sizeof(bp->dsn))) + return session; + } + return NULL; +} + +/* Allocate and Initialize an ULP session and set it's state to INITIALIZED. + * If it's already initialized simply return the already existing session. + */ +static struct bnxt_ulp_session_state * +ulp_session_init(struct bnxt *bp, + bool *init) +{ + struct bnxt_ulp_session_state *session; + + mutex_lock(&bnxt_ulp_global_mutex); + session = ulp_get_session(bp); + if (!session) { + /* Not Found the session Allocate a new one */ + session = vzalloc(sizeof(*session)); + if (!session) { + mutex_unlock(&bnxt_ulp_global_mutex); + return NULL; + + } else { + /* Add it to the queue */ + memcpy(session->dsn, bp->dsn, sizeof(bp->dsn)); + mutex_init(&session->bnxt_ulp_mutex); + hlist_add_head(&session->next, &bnxt_ulp_session_list); + } + } + ulp_context_initialized(session, init); + mutex_unlock(&bnxt_ulp_global_mutex); + return session; +} + +/* When a device is closed, remove it's associated session from the global + * session list. + */ +static void +ulp_session_deinit(struct bnxt_ulp_session_state *session) +{ + if (!session) + return; + + if (!session->cfg_data) { + mutex_lock(&bnxt_ulp_global_mutex); + hlist_del(&session->next); + mutex_destroy(&session->bnxt_ulp_mutex); + vfree(session); + mutex_unlock(&bnxt_ulp_global_mutex); + } +} + +/* Internal function to delete all the flows belonging to the given port */ +static void +bnxt_ulp_flush_port_flows(struct bnxt *bp) +{ + u16 func_id; + + /* it is assumed that port is either TVF or PF */ + if (ulp_port_db_port_func_id_get(bp->ulp_ctx, + bp->pf.fw_fid, + &func_id)) { + netdev_dbg(bp->dev, "Invalid argument\n"); + return; + } + (void)ulp_flow_db_function_flow_flush(bp->ulp_ctx, func_id); +} + +static const struct bnxt_ulp_core_ops * +bnxt_ulp_port_func_ops_get(struct bnxt *bp) +{ + const struct bnxt_ulp_core_ops *func_ops; + enum bnxt_ulp_device_id dev_id; + int rc; + + rc = bnxt_ulp_devid_get(bp, &dev_id); + if (rc) + return NULL; + + switch (dev_id) { + case BNXT_ULP_DEVICE_ID_THOR2: + func_ops = &bnxt_ulp_tfc_core_ops; + break; + case BNXT_ULP_DEVICE_ID_THOR: + case BNXT_ULP_DEVICE_ID_WH_PLUS: + func_ops = &bnxt_ulp_tf_core_ops; + break; + default: + func_ops = NULL; + break; + } + return func_ops; +} + +/* Entry point for Truflow feature initialization. + */ +int +bnxt_ulp_port_init(struct bnxt *bp) +{ + struct bnxt_ulp_session_state *session; + enum bnxt_ulp_device_id dev_id; + bool initialized; + u32 ulp_flags; + enum cfa_app_type app_type = CFA_APP_TYPE_TF; + int rc = 0; + + if (!BNXT_TRUFLOW_EN(bp)) { + netdev_dbg(bp->dev, + "Skip ULP init for port:%d, truflow is not enabled\n", + bp->pf.fw_fid); + return -EINVAL; + } + + if (!(bp->flags & BNXT_FLAG_DSN_VALID)) { + netdev_dbg(bp->dev, "Invalid DSN, don't create ULP session\n"); + return -EINVAL; + } + + rc = bnxt_ulp_devid_get(bp, &dev_id); + if (rc) { + netdev_dbg(bp->dev, "Unsupported device %x\n", rc); + return rc; + } + + if (bp->ulp_ctx) { + netdev_dbg(bp->dev, "ulp ctx already allocated\n"); + return rc; + } + + rc = bnxt_hwrm_port_mac_qcfg(bp); + if (rc) + return rc; + + if (BNXT_TF_RX_NIC_FLOW_CAP(bp)) + app_type = CFA_APP_TYPE_AFM; + + bp->ulp_ctx = vzalloc(sizeof(struct bnxt_ulp_context)); + if (!bp->ulp_ctx) + return -ENOMEM; + + rc = bnxt_ulp_cntxt_bp_set(bp->ulp_ctx, bp); + if (rc) { + netdev_dbg(bp->dev, "Failed to set bp in ulp_ctx\n"); + vfree(bp->ulp_ctx); + return -EIO; + } + + /* This shouldn't fail, unless we have a unknown device */ + ((struct bnxt_ulp_context *)bp->ulp_ctx)->ops = bnxt_ulp_port_func_ops_get(bp); + if (!((struct bnxt_ulp_context *)bp->ulp_ctx)->ops) { + netdev_dbg(bp->dev, "Failed to get ulp ops\n"); + vfree(bp->ulp_ctx); + bp->ulp_ctx = NULL; + return -EIO; + } + + if (!BNXT_CHIP_P7(bp)) { + /* Thor needs to initialize tfp structure during ulp init only. + * Thor2 has done this at bnxt open due to requirements regarding + * table scopes which is shared by truflow and cfa. + */ + bp->tfp = vzalloc(sizeof(*bp->tfp) * BNXT_SESSION_TYPE_LAST); + if (!bp->tfp) { + vfree(bp->ulp_ctx); + return -ENOMEM; + } + } + + /* Multiple uplink ports can be associated with a single vswitch. + * Make sure only the port that is started first will initialize + * the TF session. + */ + session = ulp_session_init(bp, &initialized); + if (!session) { + netdev_dbg(bp->dev, "Failed to initialize the tf session\n"); + rc = -EIO; + goto jump_to_error; + } + + if (initialized) { + /* If ULP is already initialized for a specific domain then + * simply assign the ulp context to this netdev as well. + */ + rc = ((struct bnxt_ulp_context *)bp->ulp_ctx)->ops->ulp_ctx_attach(bp, session, + app_type); + if (rc) { + netdev_dbg(bp->dev, "Failed to attach the ulp context\n"); + goto jump_to_error; + } + } else { + rc = ((struct bnxt_ulp_context *)bp->ulp_ctx)->ops->ulp_init(bp, session, + app_type); + if (rc) { + netdev_dbg(bp->dev, "Failed to initialize the ulp init\n"); + goto jump_to_error; + } + } + + /* Update bnxt driver flags */ + rc = ulp_dparms_dev_port_intf_update(bp, bp->ulp_ctx); + if (rc) { + netdev_dbg(bp->dev, "Failed to update driver flags\n"); + goto jump_to_error; + } + + /* update the port database for the given interface */ + rc = ulp_port_db_dev_port_intf_update(bp->ulp_ctx, bp, NULL); + if (rc) { + netdev_dbg(bp->dev, "Failed to update port database\n"); + goto jump_to_error; + } + + /* create the default rules */ + rc = bnxt_ulp_create_df_rules(bp); + if (rc) { + netdev_dbg(bp->dev, "Failed to create default flow\n"); + goto jump_to_error; + } + + /* set the unicast mode */ + if (bnxt_ulp_cntxt_ptr2_ulp_flags_get(bp->ulp_ctx, &ulp_flags)) { + netdev_dbg(bp->dev, "Error in getting ULP context flags\n"); + goto jump_to_error; + } + /* NIC flow doesn't need VNIC Metadata update */ + if (app_type == CFA_APP_TYPE_AFM) + return rc; + + if (BNXT_CHIP_P7(bp)) { + struct bnxt_vnic_info *vnic = bp->vnic_info; + + vnic->metadata_format = VNIC_UPDATE_REQ_METADATA_FORMAT_TYPE_3; + rc = bnxt_hwrm_vnic_update(bp, + vnic, + VNIC_UPDATE_REQ_ENABLES_METADATA_FORMAT_TYPE_VALID); + if (rc) { + netdev_dbg(bp->dev, "Failed to set metadata format\n"); + goto jump_to_error; + } + } + + return rc; + +jump_to_error: + bnxt_ulp_port_deinit(bp); + return rc; +} + +/* When a port is de-initialized. This functions clears up + * the port specific details. + */ +void +bnxt_ulp_port_deinit(struct bnxt *bp) +{ + struct bnxt_ulp_context *ulp_ctx = bp->ulp_ctx; + struct bnxt_ulp_session_state *session; + + if (!BNXT_TRUFLOW_EN(bp)) { + netdev_dbg(bp->dev, + "Skip ULP deinit for port:%d, truflow is not enabled\n", + bp->pf.fw_fid); + return; + } + + if (!BNXT_PF(bp) && !BNXT_VF_IS_TRUSTED(bp)) { + netdev_dbg(bp->dev, + "Skip ULP deinit port:%d, not a TVF or PF\n", + bp->pf.fw_fid); + return; + } + + if (!bp->ulp_ctx) { + netdev_dbg(bp->dev, "ulp ctx already de-allocated\n"); + return; + } + + netdev_dbg(bp->dev, "BNXT Port:%d ULP port deinit\n", + bp->pf.fw_fid); + + /* Get the session details */ + mutex_lock(&bnxt_ulp_global_mutex); + session = ulp_get_session(bp); + mutex_unlock(&bnxt_ulp_global_mutex); + /* session not found then just exit */ + if (!session) { + /* Free the ulp context */ + vfree(bp->ulp_ctx); + vfree(bp->tfp); + + bp->ulp_ctx = NULL; + bp->tfp = NULL; + return; + } + + /* Check the reference count to deinit or deattach */ + if (ulp_ctx->cfg_data && ulp_ctx->cfg_data->ref_cnt) { + ulp_ctx->cfg_data->ref_cnt--; + if (ulp_ctx->cfg_data->ref_cnt) { + /* free the port details */ + /* Free the default flow rule associated to this port */ + bnxt_ulp_destroy_df_rules(bp, false); + + /* free flows associated with this port */ + bnxt_ulp_flush_port_flows(bp); + + /* close the session associated with this port */ + ((struct bnxt_ulp_context *)bp->ulp_ctx)->ops->ulp_ctx_detach(bp, session); + } else { + /* Free the default flow rule associated to this port */ + bnxt_ulp_destroy_df_rules(bp, true); + + /* free flows associated with this port */ + bnxt_ulp_flush_port_flows(bp); + + /* Perform ulp ctx deinit */ + ((struct bnxt_ulp_context *)bp->ulp_ctx)->ops->ulp_deinit(bp, session); + } + } + + /* Free the ulp context in the context entry list */ + bnxt_ulp_cntxt_list_del(bp->ulp_ctx); + + /* clean up the session */ + ulp_session_deinit(session); + + /* Free the ulp context */ + vfree(bp->ulp_ctx); + if (!BNXT_CHIP_P7(bp)) { + /* Only free resources for Thor. Thor2 remains + * available for table scope operations. + */ + vfree(bp->tfp); + bp->tfp = NULL; + } + bp->ulp_ctx = NULL; +} + +/* Below are the access functions to access internal data of ulp context. */ +/* Function to set the Mark DB into the context */ +int +bnxt_ulp_cntxt_ptr2_mark_db_set(struct bnxt_ulp_context *ulp_ctx, + struct bnxt_ulp_mark_tbl *mark_tbl) +{ + if (!ulp_ctx || !ulp_ctx->cfg_data) + return -EINVAL; + + ulp_ctx->cfg_data->mark_tbl = mark_tbl; + + return 0; +} + +/* Function to retrieve the Mark DB from the context. */ +struct bnxt_ulp_mark_tbl * +bnxt_ulp_cntxt_ptr2_mark_db_get(struct bnxt_ulp_context *ulp_ctx) +{ + if (!ulp_ctx || !ulp_ctx->cfg_data) + return NULL; + + return ulp_ctx->cfg_data->mark_tbl; +} + +bool bnxt_ulp_cntxt_shared_session_enabled(struct bnxt_ulp_context *ulp_ctx) +{ + return ULP_SHARED_SESSION_IS_ENABLED(ulp_ctx->cfg_data->ulp_flags); +} + +bool +bnxt_ulp_cntxt_multi_shared_session_enabled(struct bnxt_ulp_context *ulp_ctx) +{ + return ULP_MULTI_SHARED_IS_SUPPORTED(ulp_ctx); +} + +int +bnxt_ulp_cntxt_app_id_set(struct bnxt_ulp_context *ulp_ctx, u8 app_id) +{ + if (!ulp_ctx) + return -EINVAL; + ulp_ctx->cfg_data->app_id = app_id; + netdev_dbg(ulp_ctx->bp->dev, "%s: Truflow APP ID is %d\n", __func__, + app_id & ~BNXT_ULP_APP_ID_SET_CONFIGURED); + return 0; +} + +int +bnxt_ulp_cntxt_app_id_get(struct bnxt_ulp_context *ulp_ctx, u8 *app_id) +{ + /* Default APP id is zero */ + if (!ulp_ctx || !app_id) + return -EINVAL; + *app_id = ulp_ctx->cfg_data->app_id & ~BNXT_ULP_APP_ID_SET_CONFIGURED; + netdev_dbg(ulp_ctx->bp->dev, "%s: Truflow APP ID is %d\n", __func__, + ulp_ctx->cfg_data->app_id & ~BNXT_ULP_APP_ID_SET_CONFIGURED); + return 0; +} + +/* Function to set the device id of the hardware. */ +int +bnxt_ulp_cntxt_dev_id_set(struct bnxt_ulp_context *ulp_ctx, + u32 dev_id) +{ + if (ulp_ctx && ulp_ctx->cfg_data) { + ulp_ctx->cfg_data->dev_id = dev_id; + return 0; + } + + return -EINVAL; +} + +/* Function to get the device id of the hardware. */ +int +bnxt_ulp_cntxt_dev_id_get(struct bnxt_ulp_context *ulp_ctx, + u32 *dev_id) +{ + if (ulp_ctx && ulp_ctx->cfg_data) { + *dev_id = ulp_ctx->cfg_data->dev_id; + return 0; + } + *dev_id = BNXT_ULP_DEVICE_ID_LAST; + return -EINVAL; +} + +int +bnxt_ulp_cntxt_mem_type_set(struct bnxt_ulp_context *ulp_ctx, + enum bnxt_ulp_flow_mem_type mem_type) +{ + if (ulp_ctx && ulp_ctx->cfg_data) { + ulp_ctx->cfg_data->mem_type = mem_type; + return 0; + } + + return -EINVAL; +} + +int +bnxt_ulp_cntxt_mem_type_get(struct bnxt_ulp_context *ulp_ctx, + enum bnxt_ulp_flow_mem_type *mem_type) +{ + if (ulp_ctx && ulp_ctx->cfg_data) { + *mem_type = ulp_ctx->cfg_data->mem_type; + return 0; + } + + *mem_type = BNXT_ULP_FLOW_MEM_TYPE_LAST; + return -EINVAL; +} + +/* Function to get the table scope id of the EEM table. */ +int +bnxt_ulp_cntxt_tbl_scope_id_get(struct bnxt_ulp_context *ulp_ctx, + u32 *tbl_scope_id) +{ + if (ulp_ctx && ulp_ctx->cfg_data) { + *tbl_scope_id = ulp_ctx->cfg_data->tbl_scope_id; + return 0; + } + + return -EINVAL; +} + +/* Function to set the table scope id of the EEM table. */ +int +bnxt_ulp_cntxt_tbl_scope_id_set(struct bnxt_ulp_context *ulp_ctx, + u32 tbl_scope_id) +{ + if (ulp_ctx && ulp_ctx->cfg_data) { + ulp_ctx->cfg_data->tbl_scope_id = tbl_scope_id; + return 0; + } + + return -EINVAL; +} + +/* Function to set the v3 table scope id, only works for tfc objects */ +int +bnxt_ulp_cntxt_tsid_set(struct bnxt_ulp_context *ulp_ctx, uint8_t tsid) +{ + if (ulp_ctx && ulp_ctx->tfo_type == BNXT_ULP_TFO_TYPE_P7) { + ulp_ctx->tsid = tsid; + ULP_BITMAP_SET(ulp_ctx->tfo_flags, BNXT_ULP_TFO_TSID_FLAG); + return 0; + } + return -EINVAL; +} + +/* Function to reset the v3 table scope id, only works for tfc objects */ +void +bnxt_ulp_cntxt_tsid_reset(struct bnxt_ulp_context *ulp_ctx) +{ + if (ulp_ctx && ulp_ctx->tfo_type == BNXT_ULP_TFO_TYPE_P7) + ULP_BITMAP_RESET(ulp_ctx->tfo_flags, BNXT_ULP_TFO_TSID_FLAG); +} + +/* Function to set the v3 table scope id, only works for tfc objects */ +int +bnxt_ulp_cntxt_tsid_get(struct bnxt_ulp_context *ulp_ctx, uint8_t *tsid) +{ + if (ulp_ctx && tsid && + ulp_ctx->tfo_type == BNXT_ULP_TFO_TYPE_P7 && + ULP_BITMAP_ISSET(ulp_ctx->tfo_flags, BNXT_ULP_TFO_TSID_FLAG)) { + *tsid = ulp_ctx->tsid; + return 0; + } + return -EINVAL; +} + +/* Function to set the v3 session id, only works for tfc objects */ +int +bnxt_ulp_cntxt_sid_set(struct bnxt_ulp_context *ulp_ctx, + uint16_t sid) +{ + if (ulp_ctx && ulp_ctx->tfo_type == BNXT_ULP_TFO_TYPE_P7) { + ulp_ctx->sid = sid; + ULP_BITMAP_SET(ulp_ctx->tfo_flags, BNXT_ULP_TFO_SID_FLAG); + return 0; + } + return -EINVAL; +} + +/* Function to reset the v3 session id, only works for tfc objects + * There isn't a known invalid value for sid, so this is necessary + */ +void +bnxt_ulp_cntxt_sid_reset(struct bnxt_ulp_context *ulp_ctx) +{ + if (ulp_ctx && ulp_ctx->tfo_type == BNXT_ULP_TFO_TYPE_P7) + ULP_BITMAP_RESET(ulp_ctx->tfo_flags, BNXT_ULP_TFO_SID_FLAG); +} + +/* Function to get the v3 session id, only works for tfc objects */ +int +bnxt_ulp_cntxt_sid_get(struct bnxt_ulp_context *ulp_ctx, + uint16_t *sid) +{ + if (ulp_ctx && sid && + ulp_ctx->tfo_type == BNXT_ULP_TFO_TYPE_P7 && + ULP_BITMAP_ISSET(ulp_ctx->tfo_flags, BNXT_ULP_TFO_SID_FLAG)) { + *sid = ulp_ctx->sid; + return 0; + } + return -EINVAL; +} + +/* Function to set the number of shared clients */ +int +bnxt_ulp_cntxt_num_shared_clients_set(struct bnxt_ulp_context *ulp, bool incr) +{ + if (!ulp || !ulp->cfg_data) + return 0; + + if (incr) + ulp->cfg_data->num_shared_clients++; + else if (ulp->cfg_data->num_shared_clients) + ulp->cfg_data->num_shared_clients--; + + netdev_dbg(ulp->bp->dev, "%d:clients(%d)\n", incr, + ulp->cfg_data->num_shared_clients); + + return 0; +} + +int +bnxt_ulp_cntxt_bp_set(struct bnxt_ulp_context *ulp, struct bnxt *bp) +{ + if (!ulp) { + netdev_dbg(bp->dev, "Invalid arguments\n"); + return -EINVAL; + } + ulp->bp = bp; + return 0; +} + +struct bnxt* +bnxt_ulp_cntxt_bp_get(struct bnxt_ulp_context *ulp) +{ + if (!ulp) { + netdev_dbg(NULL, "Invalid arguments\n"); + return NULL; + } + return ulp->bp; +} + +int +bnxt_ulp_cntxt_fid_get(struct bnxt_ulp_context *ulp, uint16_t *fid) +{ + if (!ulp || !fid) + return -EINVAL; + + *fid = ulp->bp->pf.fw_fid; + return 0; +} + +void +bnxt_ulp_cntxt_ptr2_default_class_bits_set(struct bnxt_ulp_context *ulp_ctx, + u64 bits) +{ + if (!ulp_ctx || !ulp_ctx->cfg_data) + return; + ulp_ctx->cfg_data->default_class_bits = bits; +} + +u64 +bnxt_ulp_cntxt_ptr2_default_class_bits_get(struct bnxt_ulp_context *ulp_ctx) +{ + if (!ulp_ctx || !ulp_ctx->cfg_data) + return 0; + return ulp_ctx->cfg_data->default_class_bits; +} + +void +bnxt_ulp_cntxt_ptr2_default_act_bits_set(struct bnxt_ulp_context *ulp_ctx, + u64 bits) +{ + if (!ulp_ctx || !ulp_ctx->cfg_data) + return; + ulp_ctx->cfg_data->default_act_bits = bits; +} + +u64 +bnxt_ulp_cntxt_ptr2_default_act_bits_get(struct bnxt_ulp_context *ulp_ctx) +{ + if (!ulp_ctx || !ulp_ctx->cfg_data) + return 0; + return ulp_ctx->cfg_data->default_act_bits; +} + +/** + * Get the device table entry based on the device id. + * + * @dev_id: The device id of the hardware + * + * Returns the pointer to the device parameters. + */ +struct bnxt_ulp_device_params * +bnxt_ulp_device_params_get(u32 dev_id) +{ + if (dev_id < BNXT_ULP_MAX_NUM_DEVICES) + return &ulp_device_params[dev_id]; + return NULL; +} + +/* Function to set the flow database to the ulp context. */ +int +bnxt_ulp_cntxt_ptr2_flow_db_set(struct bnxt_ulp_context *ulp_ctx, + struct bnxt_ulp_flow_db *flow_db) +{ + if (!ulp_ctx || !ulp_ctx->cfg_data) + return -EINVAL; + + ulp_ctx->cfg_data->flow_db = flow_db; + return 0; +} + +/* Function to get the flow database from the ulp context. */ +struct bnxt_ulp_flow_db * +bnxt_ulp_cntxt_ptr2_flow_db_get(struct bnxt_ulp_context *ulp_ctx) +{ + if (!ulp_ctx || !ulp_ctx->cfg_data) + return NULL; + + return ulp_ctx->cfg_data->flow_db; +} + +/* Function to get the ulp context from eth device. */ +struct bnxt_ulp_context * +bnxt_ulp_bp_ptr2_cntxt_get(struct bnxt *bp) +{ + if (!bp) + return NULL; + + return bp->ulp_ctx; +} + +int +bnxt_ulp_cntxt_ptr2_mapper_data_set(struct bnxt_ulp_context *ulp_ctx, + void *mapper_data) +{ + if (!ulp_ctx || !ulp_ctx->cfg_data) + return -EINVAL; + + ulp_ctx->cfg_data->mapper_data = mapper_data; + return 0; +} + +void * +bnxt_ulp_cntxt_ptr2_mapper_data_get(struct bnxt_ulp_context *ulp_ctx) +{ + if (!ulp_ctx || !ulp_ctx->cfg_data) + return NULL; + + return ulp_ctx->cfg_data->mapper_data; +} + +int +bnxt_ulp_cntxt_ptr2_matcher_data_set(struct bnxt_ulp_context *ulp_ctx, + void *matcher_data) +{ + if (!ulp_ctx || !ulp_ctx->cfg_data) + return -EINVAL; + + ulp_ctx->cfg_data->matcher_data = matcher_data; + return 0; +} + +void * +bnxt_ulp_cntxt_ptr2_matcher_data_get(struct bnxt_ulp_context *ulp_ctx) +{ + if (!ulp_ctx || !ulp_ctx->cfg_data) + return NULL; + + return ulp_ctx->cfg_data->matcher_data; +} + +/* Function to set the port database to the ulp context. */ +int +bnxt_ulp_cntxt_ptr2_port_db_set(struct bnxt_ulp_context *ulp_ctx, + struct bnxt_ulp_port_db *port_db) +{ + if (!ulp_ctx || !ulp_ctx->cfg_data) + return -EINVAL; + + ulp_ctx->cfg_data->port_db = port_db; + return 0; +} + +/* Function to get the port database from the ulp context. */ +struct bnxt_ulp_port_db * +bnxt_ulp_cntxt_ptr2_port_db_get(struct bnxt_ulp_context *ulp_ctx) +{ + if (!ulp_ctx || !ulp_ctx->cfg_data) + return NULL; + + return ulp_ctx->cfg_data->port_db; +} + +/* Function to set the flow counter info into the context */ +int +bnxt_ulp_cntxt_ptr2_fc_info_set(struct bnxt_ulp_context *ulp_ctx, + struct bnxt_ulp_fc_info *ulp_fc_info) +{ + if (!ulp_ctx || !ulp_ctx->cfg_data) + return -EINVAL; + + ulp_ctx->cfg_data->fc_info = ulp_fc_info; + + return 0; +} + +/* Function to retrieve the flow counter info from the context. */ +struct bnxt_ulp_fc_info * +bnxt_ulp_cntxt_ptr2_fc_info_get(struct bnxt_ulp_context *ulp_ctx) +{ + if (!ulp_ctx || !ulp_ctx->cfg_data) + return NULL; + + return ulp_ctx->cfg_data->fc_info; +} + +/* Function to get the ulp flags from the ulp context. */ +int +bnxt_ulp_cntxt_ptr2_ulp_flags_get(struct bnxt_ulp_context *ulp_ctx, + u32 *flags) +{ + if (!ulp_ctx || !ulp_ctx->cfg_data) + return -1; + + *flags = ulp_ctx->cfg_data->ulp_flags; + return 0; +} + +/* Function to get the ulp vfr info from the ulp context. */ +struct bnxt_ulp_vfr_rule_info* +bnxt_ulp_cntxt_ptr2_ulp_vfr_info_get(struct bnxt_ulp_context *ulp_ctx, + u32 port_id) +{ + if (!ulp_ctx || !ulp_ctx->cfg_data || port_id >= BNXT_TC_MAX_PORTS) + return NULL; + + return &ulp_ctx->cfg_data->vfr_rule_info[port_id]; +} + +int +bnxt_ulp_cntxt_list_init(void) +{ + /* Create the cntxt spin lock only once*/ + if (!bnxt_ulp_ctxt_lock_created) + mutex_init(&bnxt_ulp_ctxt_lock); + bnxt_ulp_ctxt_lock_created = 1; + return 0; +} + +int +bnxt_ulp_cntxt_list_add(struct bnxt_ulp_context *ulp_ctx) +{ + struct ulp_context_list_entry *entry; + + entry = vzalloc(sizeof(*entry)); + if (!entry) + return -ENOMEM; + + mutex_lock(&bnxt_ulp_ctxt_lock); + entry->ulp_ctx = ulp_ctx; + hlist_add_head(&entry->next, &ulp_cntx_list); + mutex_unlock(&bnxt_ulp_ctxt_lock); + + return 0; +} + +void +bnxt_ulp_cntxt_list_del(struct bnxt_ulp_context *ulp_ctx) +{ + struct ulp_context_list_entry *entry; + struct hlist_node *node; + + mutex_lock(&bnxt_ulp_ctxt_lock); + hlist_for_each_entry_safe(entry, node, &ulp_cntx_list, next) { + if (entry && entry->ulp_ctx == ulp_ctx) { + hlist_del(&entry->next); + vfree(entry); + break; + } + } + mutex_unlock(&bnxt_ulp_ctxt_lock); +} + +struct bnxt_ulp_context * +bnxt_ulp_cntxt_entry_lookup(void *cfg_data) +{ + struct ulp_context_list_entry *entry; + struct hlist_node *node; + + /* take a lock and get the first ulp context available */ + hlist_for_each_entry_safe(entry, node, &ulp_cntx_list, next) { + if (entry && entry->ulp_ctx && + entry->ulp_ctx->cfg_data == cfg_data) + return entry->ulp_ctx; + } + + return NULL; +} + +void +bnxt_ulp_cntxt_lock_acquire(void) +{ + mutex_lock(&bnxt_ulp_ctxt_lock); +} + +void +bnxt_ulp_cntxt_lock_release(void) +{ + mutex_unlock(&bnxt_ulp_ctxt_lock); +} + +/* Function to convert ulp dev id to regular dev id. */ +u32 +bnxt_ulp_cntxt_convert_dev_id(struct bnxt *bp, u32 ulp_dev_id) +{ + enum tf_device_type type = 0; + + switch (ulp_dev_id) { + case BNXT_ULP_DEVICE_ID_WH_PLUS: + type = TF_DEVICE_TYPE_P4; + break; + case BNXT_ULP_DEVICE_ID_THOR: + type = TF_DEVICE_TYPE_P5; + break; + default: + netdev_dbg(bp->dev, "Invalid device id\n"); + break; + } + return type; +} + +/* CFA code retrieval for THOR2 + * This process differs from THOR in that the code is kept in the + * metadata field instead of the errors_v2 field. + */ +int +bnxt_ulp_get_mark_from_cfacode_p7(struct bnxt *bp, struct rx_cmp_ext *rxcmp1, + struct bnxt_tpa_info *tpa_info, u32 *mark_id) +{ + bool gfid = false; + u32 vfr_flag; + u32 cfa_code; + u32 meta_fmt; + u32 flags2; + u32 meta; + int rc; + + if (rxcmp1) { + cfa_code = RX_CMP_CFA_V3_CODE(rxcmp1); + flags2 = le32_to_cpu(rxcmp1->rx_cmp_flags2); + meta = le32_to_cpu(rxcmp1->rx_cmp_meta_data); + } else { + cfa_code = le16_to_cpu(tpa_info->cfa_code); + flags2 = le32_to_cpu(tpa_info->flags2); + meta = le32_to_cpu(tpa_info->metadata); + } + + /* The flags field holds extra bits of info from [6:4] + * which indicate if the flow is in TCAM or EM or EEM + */ + meta_fmt = (flags2 & BNXT_CFA_META_FMT_MASK) >> + BNXT_CFA_META_FMT_SHFT; + switch (meta_fmt) { + case 0: + if (BNXT_GFID_ENABLED(bp)) + /* Not an LFID or GFID, a flush cmd. */ + goto skip_mark; + break; + case 4: + fallthrough; + case 5: + /* EM/TCAM case + * Assume that EM doesn't support Mark due to GFID + * collisions with EEM. Simply return without setting the mark + * in the mbuf. + */ + /* If it is not EM then it is a TCAM entry, so it is an LFID. + * The TCAM IDX and Mode can also be determined + * by decoding the meta_data. We are not + * using these for now. + */ + if (BNXT_CFA_META_EM_TEST(meta)) { + /*This is EM hit {EM(1), GFID[27:16], 19'd0 or vtag } */ + gfid = true; + meta >>= BNXT_RX_META_CFA_CODE_SHIFT; + cfa_code |= meta << BNXT_CFA_CODE_META_SHIFT; + } + break; + case 6: + fallthrough; + case 7: + /* EEM Case, only using gfid in EEM for now. */ + gfid = true; + + /* For EEM flows, The first part of cfa_code is 16 bits. + * The second part is embedded in the + * metadata field from bit 19 onwards. The driver needs to + * ignore the first 19 bits of metadata and use the next 12 + * bits as higher 12 bits of cfa_code. + */ + meta >>= BNXT_RX_META_CFA_CODE_SHIFT; + cfa_code |= meta << BNXT_CFA_CODE_META_SHIFT; + break; + default: + /* For other values, the cfa_code is assumed to be an LFID. */ + break; + } + + rc = ulp_mark_db_mark_get(bp->ulp_ctx, gfid, + cfa_code, &vfr_flag, mark_id); + if (!rc) { + /* mark_id is the fw_fid of the endpoint vf's and + * it is used to identify the VFR. + */ + if (vfr_flag) + return 0; + } + +skip_mark: + return -EINVAL; +} + +int +bnxt_ulp_get_mark_from_cfacode(struct bnxt *bp, struct rx_cmp_ext *rxcmp1, + struct bnxt_tpa_info *tpa_info, u32 *mark_id) +{ + bool gfid = false; + u32 vfr_flag; + u32 cfa_code; + u32 meta_fmt; + u32 flags2; + u32 meta; + int rc; + + if (rxcmp1) { + cfa_code = RX_CMP_CFA_CODE(rxcmp1); + flags2 = le32_to_cpu(rxcmp1->rx_cmp_flags2); + meta = le32_to_cpu(rxcmp1->rx_cmp_meta_data); + } else { + cfa_code = le16_to_cpu(tpa_info->cfa_code); + flags2 = le32_to_cpu(tpa_info->flags2); + meta = le32_to_cpu(tpa_info->metadata); + } + + /* The flags field holds extra bits of info from [6:4] + * which indicate if the flow is in TCAM or EM or EEM + */ + meta_fmt = (flags2 & BNXT_CFA_META_FMT_MASK) >> + BNXT_CFA_META_FMT_SHFT; + switch (meta_fmt) { + case 0: + if (BNXT_GFID_ENABLED(bp)) + /* Not an LFID or GFID, a flush cmd. */ + goto skip_mark; + break; + case 4: + fallthrough; + case 5: + /* EM/TCAM case + * Assume that EM doesn't support Mark due to GFID + * collisions with EEM. Simply return without setting the mark + * in the mbuf. + */ + /* If it is not EM then it is a TCAM entry, so it is an LFID. + * The TCAM IDX and Mode can also be determined + * by decoding the meta_data. We are not + * using these for now. + */ + if (BNXT_CFA_META_EM_TEST(meta)) { + /*This is EM hit {EM(1), GFID[27:16], 19'd0 or vtag } */ + gfid = true; + meta >>= BNXT_RX_META_CFA_CODE_SHIFT; + cfa_code |= meta << BNXT_CFA_CODE_META_SHIFT; + } + break; + case 6: + fallthrough; + case 7: + /* EEM Case, only using gfid in EEM for now. */ + gfid = true; + + /* For EEM flows, The first part of cfa_code is 16 bits. + * The second part is embedded in the + * metadata field from bit 19 onwards. The driver needs to + * ignore the first 19 bits of metadata and use the next 12 + * bits as higher 12 bits of cfa_code. + */ + meta >>= BNXT_RX_META_CFA_CODE_SHIFT; + cfa_code |= meta << BNXT_CFA_CODE_META_SHIFT; + break; + default: + /* For other values, the cfa_code is assumed to be an LFID. */ + break; + } + + rc = ulp_mark_db_mark_get(bp->ulp_ctx, gfid, + cfa_code, &vfr_flag, mark_id); + if (!rc) { + /* mark_id is the fw_fid of the endpoint vf's and + * it is used to identify the VFR. + */ + if (vfr_flag) + return 0; + } + +skip_mark: + return -EINVAL; +} + +int bnxt_ulp_alloc_vf_rep(struct bnxt *bp, void *vfr) +{ + int rc; + + rc = ulp_port_db_dev_port_intf_update(bp->ulp_ctx, bp, vfr); + if (rc) { + netdev_dbg(bp->dev, "Failed to update port database\n"); + return -EINVAL; + } + + rc = bnxt_hwrm_cfa_pair_exists(bp, vfr); + if (!rc) + bnxt_hwrm_cfa_pair_free(bp, vfr); + + rc = bnxt_ulp_create_vfr_default_rules(vfr); + if (rc) { + netdev_dbg(bp->dev, "Failed to create VFR default rules\n"); + return rc; + } + + rc = bnxt_hwrm_cfa_pair_alloc(bp, vfr); + if (rc) { + netdev_dbg(bp->dev, "CFA_PAIR_ALLOC hwrm command failed\n"); + return rc; + } + + return 0; +} + +int bnxt_ulp_alloc_vf_rep_p7(struct bnxt *bp, void *vfr) +{ + struct bnxt_vf_rep *vf_rep = vfr; + u16 vfr_fid; + int rc; + + rc = ulp_port_db_dev_port_intf_update(bp->ulp_ctx, bp, vf_rep); + if (rc) { + netdev_dbg(bp->dev, "Failed to update port database\n"); + return -EINVAL; + } + + vfr_fid = bnxt_vfr_get_fw_func_id(vf_rep); + rc = bnxt_hwrm_release_afm_func(bp, + vfr_fid, + bp->pf.fw_fid, + CFA_RELEASE_AFM_FUNC_REQ_TYPE_EFID, + 0); + + if (rc) { + netdev_dbg(bp->dev, + "Failed to release EFID:%d from RFID:%d rc=%d\n", + vfr_fid, bp->pf.fw_fid, rc); + goto error_del_rules; + } + netdev_dbg(bp->dev, "Released EFID:%d from RFID:%d\n", vfr_fid, bp->pf.fw_fid); + + /* This will add the vfr endpoint to the session. */ + rc = bnxt_ulp_vfr_session_fid_add(bp->ulp_ctx, vfr_fid); + if (rc) + goto error_del_rules; + else + netdev_dbg(bp->dev, "VFR EFID %d created and initialized\n", vfr_fid); + + /* Create the VFR default rules once we've initialized the VF rep. */ + rc = bnxt_ulp_create_vfr_default_rules(vf_rep); + if (rc) { + netdev_dbg(bp->dev, "Failed to create VFR default rules\n"); + return rc; + } + + /* bnxt vfrep cfact update */ + vf_rep->dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX, GFP_KERNEL); + if (!vf_rep->dst) + return -ENOMEM; + + /* only cfa_action is needed to mux a packet while TXing */ + vf_rep->dst->u.port_info.port_id = vf_rep->tx_cfa_action; + vf_rep->dst->u.port_info.lower_dev = bp->dev; + + /* disable TLS on the VFR */ + vf_rep->dev->hw_features &= ~(NETIF_F_HW_TLS_TX | NETIF_F_HW_TLS_RX); + vf_rep->dev->features &= ~(NETIF_F_HW_TLS_TX | NETIF_F_HW_TLS_RX); + + return rc; + +error_del_rules: + (void)bnxt_ulp_delete_vfr_default_rules(vf_rep); + return rc; +} + +void bnxt_ulp_free_vf_rep(struct bnxt *bp, void *vfr) +{ + int rc; + + rc = bnxt_ulp_delete_vfr_default_rules(vfr); + if (rc) + netdev_dbg(bp->dev, "Failed to delete VFR default rules\n"); + + bnxt_hwrm_cfa_pair_free(bp, vfr); +} + +void bnxt_ulp_free_vf_rep_p7(struct bnxt *bp, void *vfr) +{ + u16 vfr_fid = bnxt_vfr_get_fw_func_id(vfr); + int rc; + + rc = bnxt_ulp_delete_vfr_default_rules(vfr); + if (rc) + netdev_dbg(bp->dev, "Failed to delete VFR default rules\n"); + + rc = bnxt_ulp_vfr_session_fid_rem(bp->ulp_ctx, vfr_fid); + if (rc) + netdev_dbg(bp->dev, + "Failed to remove VFR EFID %d from session\n", vfr_fid); +} + +/* Function to check if allowing multicast and broadcast flow offload. */ +bool +bnxt_ulp_validate_bcast_mcast(struct bnxt *bp) +{ + struct bnxt_ulp_context *ulp_ctx; + u8 app_id; + + ulp_ctx = bnxt_ulp_bp_ptr2_cntxt_get(bp); + if (!ulp_ctx) { + netdev_dbg(bp->dev, "%s: ULP context is not initialized\n", + __func__); + return false; + } + + if (bnxt_ulp_cntxt_app_id_get(ulp_ctx, &app_id)) { + netdev_dbg(bp->dev, "%s: Failed to get the app id\n", __func__); + return false; + } + + /* app_id=0 supports mc/bc flow offload */ + if (app_id != 0) + return false; + + return true; +} + +/* This function sets the number of key recipes supported + * Generally, this should be set to the number of flexible keys + * supported + */ +void +bnxt_ulp_num_key_recipes_set(struct bnxt_ulp_context *ulp_ctx, + u16 num_recipes) +{ + if (!ulp_ctx || !ulp_ctx->cfg_data) + return; + ulp_ctx->cfg_data->num_key_recipes_per_dir = num_recipes; +} + +/* This function gets the number of key recipes supported */ +int +bnxt_ulp_num_key_recipes_get(struct bnxt_ulp_context *ulp_ctx) +{ + if (!ulp_ctx || !ulp_ctx->cfg_data) + return 0; + return ulp_ctx->cfg_data->num_key_recipes_per_dir; +} + +/* This function gets the feature bits */ +u64 +bnxt_ulp_feature_bits_get(struct bnxt_ulp_context *ulp_ctx) +{ + if (!ulp_ctx || !ulp_ctx->cfg_data) + return 0; + return ulp_ctx->cfg_data->feature_bits; +} + +/* Add the VF Rep endpoint to the session */ +int +bnxt_ulp_vfr_session_fid_add(struct bnxt_ulp_context *ulp_ctx, + u16 vfr_fid) +{ + int rc = 0; + + if (!ulp_ctx || !ulp_ctx->ops) + return -EINVAL; + if (ulp_ctx->ops->ulp_vfr_session_fid_add) + rc = ulp_ctx->ops->ulp_vfr_session_fid_add(ulp_ctx, vfr_fid); + + return rc; +} + +/* Remove the VF Rep endpoint from the session */ +int +bnxt_ulp_vfr_session_fid_rem(struct bnxt_ulp_context *ulp_ctx, + u16 vfr_fid) +{ + int rc = 0; + + if (!ulp_ctx || !ulp_ctx->ops) + return -EINVAL; + if (ulp_ctx->ops->ulp_vfr_session_fid_rem) + rc = ulp_ctx->ops->ulp_vfr_session_fid_rem(ulp_ctx, vfr_fid); + return rc; +} +#endif /* CONFIG_BNXT_FLOWER_OFFLOAD */ diff --git a/drivers/thirdparty/release-drivers/bnxt/tf_ulp/bnxt_tf_ulp.h b/drivers/thirdparty/release-drivers/bnxt/tf_ulp/bnxt_tf_ulp.h new file mode 100644 index 000000000000..28edebddbb4d --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/tf_ulp/bnxt_tf_ulp.h @@ -0,0 +1,619 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019-2023 Broadcom + * All rights reserved. + */ + +#ifndef _BNXT_ULP_H_ +#define _BNXT_ULP_H_ + +#include +#include +#include +#include + +#include "bnxt_compat.h" +#include "bnxt.h" + +#include "tf_core.h" +#include "cfa_types.h" +#include "ulp_template_db_enum.h" +#include "bnxt_tf_common.h" + +/* NAT defines to reuse existing inner L2 SMAC and DMAC */ +#define BNXT_ULP_NAT_INNER_L2_HEADER_SMAC 0x2000 +#define BNXT_ULP_NAT_OUTER_MOST_L2_HDR_SMAC 0x6000 +#define BNXT_ULP_NAT_OUTER_MOST_L2_VLAN_TAGS 0xc00 +#define BNXT_ULP_NAT_INNER_L2_HEADER_DMAC 0x100 +#define BNXT_ULP_NAT_OUTER_MOST_L2_HDR_DMAC 0x300 +#define BNXT_ULP_NAT_OUTER_MOST_FLAGS (BNXT_ULP_NAT_OUTER_MOST_L2_HDR_SMAC |\ + BNXT_ULP_NAT_OUTER_MOST_L2_VLAN_TAGS |\ + BNXT_ULP_NAT_OUTER_MOST_L2_HDR_DMAC) + +/* defines for the ulp_flags */ +#define BNXT_ULP_VF_REP_ENABLED 0x1 +#define BNXT_ULP_SHARED_SESSION_ENABLED 0x2 +#define BNXT_ULP_APP_DEV_UNSUPPORTED 0x4 +#define BNXT_ULP_HIGH_AVAIL_ENABLED 0x8 +#define BNXT_ULP_APP_UNICAST_ONLY 0x10 +#define BNXT_ULP_APP_SOCKET_DIRECT 0x20 +#define BNXT_ULP_APP_TOS_PROTO_SUPPORT 0x40 +#define BNXT_ULP_APP_BC_MC_SUPPORT 0x80 +#define BNXT_ULP_CUST_VXLAN_SUPPORT 0x100 +#define BNXT_ULP_MULTI_SHARED_SUPPORT 0x200 +#define BNXT_ULP_APP_HA_DYNAMIC 0x400 +#define BNXT_ULP_APP_SRV6 0x800 +#define BNXT_ULP_APP_L2_ETYPE 0x1000 +#define BNXT_ULP_SHARED_TBL_SCOPE_ENABLED 0x2000 +#define BNXT_ULP_APP_DSCP_REMAP_ENABLED 0x4000 + +#define ULP_VF_REP_IS_ENABLED(flag) ((flag) & BNXT_ULP_VF_REP_ENABLED) +#define ULP_SHARED_SESSION_IS_ENABLED(flag) ((flag) &\ + BNXT_ULP_SHARED_SESSION_ENABLED) +#define ULP_APP_DEV_UNSUPPORTED_ENABLED(flag) ((flag) &\ + BNXT_ULP_APP_DEV_UNSUPPORTED) +#define ULP_HIGH_AVAIL_IS_ENABLED(flag) ((flag) & BNXT_ULP_HIGH_AVAIL_ENABLED) +#define ULP_DSCP_REMAP_IS_ENABLED(flag) ((flag) & BNXT_ULP_APP_DSCP_REMAP_ENABLED) +#define ULP_SOCKET_DIRECT_IS_ENABLED(flag) ((flag) & BNXT_ULP_APP_SOCKET_DIRECT) +#define ULP_APP_TOS_PROTO_SUPPORT(ctx) ((ctx)->cfg_data->ulp_flags &\ + BNXT_ULP_APP_TOS_PROTO_SUPPORT) +#define ULP_APP_BC_MC_SUPPORT(ctx) ((ctx)->cfg_data->ulp_flags &\ + BNXT_ULP_APP_BC_MC_SUPPORT) +#define ULP_MULTI_SHARED_IS_SUPPORTED(ctx) ((ctx)->cfg_data->ulp_flags &\ + BNXT_ULP_MULTI_SHARED_SUPPORT) +#define ULP_APP_HA_IS_DYNAMIC(ctx) ((ctx)->cfg_data->ulp_flags &\ + BNXT_ULP_APP_HA_DYNAMIC) + +#define ULP_APP_CUST_VXLAN_SUPPORT(ctx) ((ctx)->cfg_data->vxlan_port != 0) +#define ULP_APP_VXLAN_GPE_SUPPORT(ctx) ((ctx)->cfg_data->vxlan_gpe_port != 0) +#define ULP_APP_CUST_VXLAN_IP_SUPPORT(ctx) ((ctx)->cfg_data->vxlan_ip_port != 0) + +#define ULP_APP_SRV6_SUPPORT(ctx) ((ctx)->cfg_data->ulp_flags &\ + BNXT_ULP_APP_SRV6) +#define ULP_APP_L2_ETYPE_SUPPORT(ctx) ((ctx)->cfg_data->ulp_flags &\ + BNXT_ULP_APP_L2_ETYPE) + +enum bnxt_ulp_flow_mem_type { + BNXT_ULP_FLOW_MEM_TYPE_INT = 0, + BNXT_ULP_FLOW_MEM_TYPE_EXT = 1, + BNXT_ULP_FLOW_MEM_TYPE_BOTH = 2, + BNXT_ULP_FLOW_MEM_TYPE_LAST = 3 +}; + +enum bnxt_tc_flow_item_type { + BNXT_TC_FLOW_ITEM_TYPE_END = (u32)INT_MIN, + BNXT_TC_FLOW_ITEM_TYPE_VXLAN_DECAP, + BNXT_TC_FLOW_ITEM_TYPE_LAST +}; + +enum bnxt_tc_flow_action_type { + BNXT_TC_FLOW_ACTION_TYPE_END = S32_MIN, + BNXT_TC_FLOW_ACTION_TYPE_VXLAN_DECAP, + BNXT_TC_FLOW_ACTION_TYPE_LAST +}; + +enum bnxt_session_type { + BNXT_SESSION_TYPE_REGULAR = 0, + BNXT_SESSION_TYPE_SHARED_COMMON, + BNXT_SESSION_TYPE_SHARED_WC, + BNXT_SESSION_TYPE_LAST +}; + +struct bnxt_ulp_df_rule_info { + u32 def_port_flow_id; + u8 valid; +}; + +struct bnxt_ulp_vfr_rule_info { + u32 vfr_flow_id; + u16 parent_port_id; + u8 valid; +}; + +struct bnxt_ulp_data { + u32 tbl_scope_id; + struct bnxt_ulp_mark_tbl *mark_tbl; + u32 dev_id; + u32 ref_cnt; + struct bnxt_ulp_flow_db *flow_db; + /* Serialize flow db operations */ + struct mutex flow_db_lock; /* flow db lock */ + void *mapper_data; + void *matcher_data; + struct bnxt_ulp_port_db *port_db; + struct bnxt_ulp_fc_info *fc_info; + u32 ulp_flags; +#define BNXT_TC_MAX_PORTS 1024 + struct bnxt_ulp_df_rule_info df_rule_info[BNXT_TC_MAX_PORTS]; + struct bnxt_ulp_vfr_rule_info vfr_rule_info[BNXT_TC_MAX_PORTS]; + enum bnxt_ulp_flow_mem_type mem_type; +#define BNXT_ULP_TUN_ENTRY_INVALID -1 +#define BNXT_ULP_MAX_TUN_CACHE_ENTRIES 16 + u8 app_id; + u8 num_shared_clients; + u32 default_priority; + u32 max_def_priority; + u32 min_flow_priority; + u32 max_flow_priority; + u32 vxlan_port; + u32 vxlan_gpe_port; + u32 vxlan_ip_port; + u32 ecpri_udp_port; + u32 hu_session_type; + u32 max_pools; + u32 num_rx_flows; + u32 num_tx_flows; + u16 act_rx_max_sz; + u16 act_tx_max_sz; + u16 em_rx_key_max_sz; + u16 em_tx_key_max_sz; + u32 page_sz; + u8 hu_reg_state; + u8 hu_reg_cnt; + u8 ha_pool_id; + u8 tunnel_next_proto; + u8 em_multiplier; + enum bnxt_ulp_session_type def_session_type; + u16 num_key_recipes_per_dir; + struct delayed_work fc_work; + u64 feature_bits; + u64 default_class_bits; + u64 default_act_bits; + bool meter_initialized; + /* Below three members are protected by flow_db_lock */ + bool dscp_remap_initialized; + __be32 dscp_remap_val; + u32 dscp_remap_ref; +}; + +enum bnxt_ulp_tfo_type { + BNXT_ULP_TFO_TYPE_INVALID = 0, + BNXT_ULP_TFO_TYPE_P5, + BNXT_ULP_TFO_TYPE_P7 +}; + +#define BNXT_ULP_SESSION_MAX 3 +#define BNXT_ULP_TFO_SID_FLAG (1) +#define BNXT_ULP_TFO_TSID_FLAG (2) + +struct bnxt_ulp_context { + struct bnxt_ulp_data *cfg_data; + struct bnxt *bp; + enum bnxt_ulp_tfo_type tfo_type; + union { + void *g_tfp[BNXT_ULP_SESSION_MAX]; + struct { + u32 tfo_flags; + void *tfcp; + u16 sid; + u8 tsid; + }; + }; + const struct bnxt_ulp_core_ops *ops; +}; + +struct bnxt_ulp_pci_info { + u32 domain; + u8 bus; +}; + +#define BNXT_ULP_DEVICE_SERIAL_NUM_SIZE 8 +struct bnxt_ulp_session_state { + struct hlist_node next; + bool bnxt_ulp_init; + /* Serialize session operations */ + struct mutex bnxt_ulp_mutex; /* ulp lock */ + struct bnxt_ulp_pci_info pci_info; + u8 dsn[BNXT_ULP_DEVICE_SERIAL_NUM_SIZE]; + struct bnxt_ulp_data *cfg_data; + struct tf *g_tfp[BNXT_ULP_SESSION_MAX]; + u32 session_opened[BNXT_ULP_SESSION_MAX]; + /* Need to revisit a union for the tf related data */ + u16 session_id; +}; + +/* ULP flow id structure */ +struct tc_tf_flow { + u32 flow_id; +}; + +struct ulp_tlv_param { + enum bnxt_ulp_df_param_type type; + u32 length; + u8 value[16]; +}; + +struct ulp_context_list_entry { + struct hlist_node next; + struct bnxt_ulp_context *ulp_ctx; +}; + +struct bnxt_ulp_core_ops { + int + (*ulp_init)(struct bnxt *bp, + struct bnxt_ulp_session_state *session, + enum cfa_app_type app_type); + void + (*ulp_deinit)(struct bnxt *bp, + struct bnxt_ulp_session_state *session); + int + (*ulp_ctx_attach)(struct bnxt *bp, + struct bnxt_ulp_session_state *session, + enum cfa_app_type app_type); + void + (*ulp_ctx_detach)(struct bnxt *bp, + struct bnxt_ulp_session_state *session); + void * + (*ulp_tfp_get)(struct bnxt_ulp_context *ulp, + enum bnxt_ulp_session_type s_type); + int + (*ulp_vfr_session_fid_add)(struct bnxt_ulp_context *ulp_ctx, + u16 rep_fid); + int + (*ulp_vfr_session_fid_rem)(struct bnxt_ulp_context *ulp_ctx, + u16 rep_fid); +}; + +extern const struct bnxt_ulp_core_ops bnxt_ulp_tf_core_ops; +extern const struct bnxt_ulp_core_ops bnxt_ulp_tfc_core_ops; + +int +bnxt_ulp_devid_get(struct bnxt *bp, enum bnxt_ulp_device_id *ulp_dev_id); + +/* Allow the deletion of context only for the bnxt device that + * created the session + */ +bool +ulp_ctx_deinit_allowed(struct bnxt_ulp_context *ulp_ctx); + +/* Function to set the device id of the hardware. */ +int +bnxt_ulp_cntxt_dev_id_set(struct bnxt_ulp_context *ulp_ctx, u32 dev_id); + +/* Function to get the device id of the hardware. */ +int +bnxt_ulp_cntxt_dev_id_get(struct bnxt_ulp_context *ulp_ctx, u32 *dev_id); + +/* Function to get whether or not ext mem is used for EM */ +int +bnxt_ulp_cntxt_mem_type_get(struct bnxt_ulp_context *ulp_ctx, + enum bnxt_ulp_flow_mem_type *mem_type); + +/* Function to set whether or not ext mem is used for EM */ +int +bnxt_ulp_cntxt_mem_type_set(struct bnxt_ulp_context *ulp_ctx, + enum bnxt_ulp_flow_mem_type mem_type); + +/* Function to set the table scope id of the EEM table. */ +int +bnxt_ulp_cntxt_tbl_scope_id_set(struct bnxt_ulp_context *ulp_ctx, + u32 tbl_scope_id); + +/* Function to get the table scope id of the EEM table. */ +int +bnxt_ulp_cntxt_tbl_scope_id_get(struct bnxt_ulp_context *ulp_ctx, + u32 *tbl_scope_id); + +int +bnxt_ulp_cntxt_bp_set(struct bnxt_ulp_context *ulp, struct bnxt *bp); + +/* Function to get the bp associated with the ulp_ctx */ +struct bnxt * +bnxt_ulp_cntxt_bp_get(struct bnxt_ulp_context *ulp); + +/* Function to set the v3 table scope id, only works for tfc objects */ +int +bnxt_ulp_cntxt_tsid_set(struct bnxt_ulp_context *ulp_ctx, u8 tsid); + +/* + * Function to set the v3 table scope id, only works for tfc objects + * There isn't a known invalid value for tsid, so this is necessary in order to + * know that the tsid is not set. + */ +void +bnxt_ulp_cntxt_tsid_reset(struct bnxt_ulp_context *ulp_ctx); + +/* Function to set the v3 table scope id, only works for tfc objects */ +int +bnxt_ulp_cntxt_tsid_get(struct bnxt_ulp_context *ulp_ctx, u8 *tsid); + +/* Function to set the v3 session id, only works for tfc objects */ +int +bnxt_ulp_cntxt_sid_set(struct bnxt_ulp_context *ulp_ctx, u16 session_id); + +/* + * Function to reset the v3 session id, only works for tfc objects + * There isn't a known invalid value for sid, so this is necessary in order to + * know that the sid is not set. + */ +void +bnxt_ulp_cntxt_sid_reset(struct bnxt_ulp_context *ulp_ctx); + +/* Function to get the v3 session id, only works for tfc objects */ +int +bnxt_ulp_cntxt_sid_get(struct bnxt_ulp_context *ulp_ctx, u16 *sid); + +int +bnxt_ulp_cntxt_fid_get(struct bnxt_ulp_context *ulp, u16 *fid); + +struct tf * +bnxt_ulp_bp_tfp_get(struct bnxt *bp, enum bnxt_ulp_session_type type); + +/* Get the device table entry based on the device id. */ +struct bnxt_ulp_device_params * +bnxt_ulp_device_params_get(u32 dev_id); + +int +bnxt_ulp_cntxt_ptr2_mark_db_set(struct bnxt_ulp_context *ulp_ctx, + struct bnxt_ulp_mark_tbl *mark_tbl); + +struct bnxt_ulp_mark_tbl * +bnxt_ulp_cntxt_ptr2_mark_db_get(struct bnxt_ulp_context *ulp_ctx); + +/* Function to set the flow database to the ulp context. */ +int +bnxt_ulp_cntxt_ptr2_flow_db_set(struct bnxt_ulp_context *ulp_ctx, + struct bnxt_ulp_flow_db *flow_db); + +/* Function to get the flow database from the ulp context. */ +struct bnxt_ulp_flow_db * +bnxt_ulp_cntxt_ptr2_flow_db_get(struct bnxt_ulp_context *ulp_ctx); + +/* Function to get the tunnel cache table info from the ulp context. */ +struct bnxt_tun_cache_entry * +bnxt_ulp_cntxt_ptr2_tun_tbl_get(struct bnxt_ulp_context *ulp_ctx); + +/* Function to get the ulp context from eth device. */ +struct bnxt_ulp_context * +bnxt_ulp_bp_ptr2_cntxt_get(struct bnxt *bp); + +/* Function to add the ulp mapper data to the ulp context */ +int +bnxt_ulp_cntxt_ptr2_mapper_data_set(struct bnxt_ulp_context *ulp_ctx, + void *mapper_data); + +/* Function to get the ulp mapper data from the ulp context */ +void * +bnxt_ulp_cntxt_ptr2_mapper_data_get(struct bnxt_ulp_context *ulp_ctx); + +/* Function to add the ulp matcher data to the ulp context */ +int +bnxt_ulp_cntxt_ptr2_matcher_data_set(struct bnxt_ulp_context *ulp_ctx, + void *matcher_data); + +/* Function to get the ulp matcher data from the ulp context */ +void * +bnxt_ulp_cntxt_ptr2_matcher_data_get(struct bnxt_ulp_context *ulp_ctx); + +/* Function to set the port database to the ulp context. */ +int +bnxt_ulp_cntxt_ptr2_port_db_set(struct bnxt_ulp_context *ulp_ctx, + struct bnxt_ulp_port_db *port_db); + +/* Function to get the port database from the ulp context. */ +struct bnxt_ulp_port_db * +bnxt_ulp_cntxt_ptr2_port_db_get(struct bnxt_ulp_context *ulp_ctx); + +/* Function to create default flows. */ +int +ulp_default_flow_create(struct bnxt *bp, + struct ulp_tlv_param *param_list, + u32 ulp_class_tid, + u16 port_id, + u32 *flow_id); + +/* Function to destroy default flows. */ +int +ulp_default_flow_destroy(struct bnxt *bp, + u32 flow_id); + +int +bnxt_ulp_cntxt_ptr2_fc_info_set(struct bnxt_ulp_context *ulp_ctx, + struct bnxt_ulp_fc_info *ulp_fc_info); + +struct bnxt_ulp_fc_info * +bnxt_ulp_cntxt_ptr2_fc_info_get(struct bnxt_ulp_context *ulp_ctx); + +int +bnxt_ulp_cntxt_ptr2_ulp_flags_get(struct bnxt_ulp_context *ulp_ctx, + u32 *flags); + +int +bnxt_ulp_get_df_rule_info(u8 port_id, struct bnxt_ulp_context *ulp_ctx, + struct bnxt_ulp_df_rule_info *info); + +struct bnxt_ulp_vfr_rule_info* +bnxt_ulp_cntxt_ptr2_ulp_vfr_info_get(struct bnxt_ulp_context *ulp_ctx, + u32 port_id); + +int +bnxt_ulp_cntxt_acquire_fdb_lock(struct bnxt_ulp_context *ulp_ctx); + +void +bnxt_ulp_cntxt_release_fdb_lock(struct bnxt_ulp_context *ulp_ctx); + +struct bnxt_ulp_shared_act_info * +bnxt_ulp_shared_act_info_get(u32 *num_entries); + +struct bnxt_ulp_glb_resource_info * +bnxt_ulp_app_glb_resource_info_list_get(u32 *num_entries); + +int +bnxt_ulp_cntxt_app_id_set(struct bnxt_ulp_context *ulp_ctx, u8 app_id); + +int +bnxt_ulp_cntxt_app_id_get(struct bnxt_ulp_context *ulp_ctx, u8 *app_id); + +bool +bnxt_ulp_cntxt_shared_session_enabled(struct bnxt_ulp_context *ulp_ctx); + +bool +bnxt_ulp_cntxt_multi_shared_session_enabled(struct bnxt_ulp_context *ulp_ctx); + +struct bnxt_ulp_app_capabilities_info * +bnxt_ulp_app_cap_list_get(u32 *num_entries); + +struct bnxt_ulp_resource_resv_info * +bnxt_ulp_app_resource_resv_list_get(u32 *num_entries); + +struct bnxt_ulp_resource_resv_info * +bnxt_ulp_resource_resv_list_get(u32 *num_entries); + +bool +bnxt_ulp_cntxt_ha_enabled(struct bnxt_ulp_context *ulp_ctx); + +struct bnxt_ulp_context * +bnxt_ulp_cntxt_entry_lookup(void *cfg_data); + +void +bnxt_ulp_cntxt_lock_acquire(void); + +void +bnxt_ulp_cntxt_lock_release(void); + +int +bnxt_ulp_cntxt_num_shared_clients_set(struct bnxt_ulp_context *ulp_ctx, + bool incr); + +struct bnxt_flow_app_tun_ent * +bnxt_ulp_cntxt_ptr2_app_tun_list_get(struct bnxt_ulp_context *ulp); + +/* Function to get the truflow app id. This defined in the build file */ +u32 +bnxt_ulp_default_app_id_get(void); + +int +bnxt_ulp_vxlan_port_set(struct bnxt_ulp_context *ulp_ctx, + u32 vxlan_port); +unsigned int +bnxt_ulp_vxlan_port_get(struct bnxt_ulp_context *ulp_ctx); + +int +bnxt_ulp_vxlan_ip_port_set(struct bnxt_ulp_context *ulp_ctx, + u32 vxlan_ip_port); +unsigned int +bnxt_ulp_vxlan_ip_port_get(struct bnxt_ulp_context *ulp_ctx); + +int +bnxt_ulp_ecpri_udp_port_set(struct bnxt_ulp_context *ulp_ctx, + u32 ecpri_udp_port); + +unsigned int +bnxt_ulp_ecpri_udp_port_get(struct bnxt_ulp_context *ulp_ctx); + +u32 +bnxt_ulp_vxlan_gpe_next_proto_set(struct bnxt_ulp_context *ulp_ctx, + u8 tunnel_next_proto); + +u8 +bnxt_ulp_vxlan_gpe_next_proto_get(struct bnxt_ulp_context *ulp_ctx); + +int +bnxt_ulp_cntxt_vxlan_port_set(struct bnxt_ulp_context *ulp_ctx, + u32 vxlan_port); +unsigned int +bnxt_ulp_cntxt_vxlan_port_get(struct bnxt_ulp_context *ulp_ctx); + +int +bnxt_ulp_default_app_priority_set(struct bnxt_ulp_context *ulp_ctx, u32 prio); + +unsigned int +bnxt_ulp_default_app_priority_get(struct bnxt_ulp_context *ulp_ctx); + +int +bnxt_ulp_max_def_priority_set(struct bnxt_ulp_context *ulp_ctx, u32 prio); + +unsigned int +bnxt_ulp_max_def_priority_get(struct bnxt_ulp_context *ulp_ctx); + +int +bnxt_ulp_min_flow_priority_set(struct bnxt_ulp_context *ulp_ctx, u32 prio); + +unsigned int +bnxt_ulp_min_flow_priority_get(struct bnxt_ulp_context *ulp_ctx); + +int +bnxt_ulp_max_flow_priority_set(struct bnxt_ulp_context *ulp_ctx, u32 prio); + +unsigned int +bnxt_ulp_max_flow_priority_get(struct bnxt_ulp_context *ulp_ctx); + +int +bnxt_ulp_cntxt_vxlan_ip_port_set(struct bnxt_ulp_context *ulp_ctx, + u32 vxlan_ip_port); +unsigned int +bnxt_ulp_cntxt_vxlan_ip_port_get(struct bnxt_ulp_context *ulp_ctx); +int +bnxt_ulp_cntxt_ecpri_udp_port_set(struct bnxt_ulp_context *ulp_ctx, + u32 ecpri_udp_port); +unsigned int +bnxt_ulp_cntxt_ecpri_udp_port_get(struct bnxt_ulp_context *ulp_ctx); + +int +bnxt_flow_meter_init(struct bnxt *bp); + +u32 +bnxt_ulp_cntxt_convert_dev_id(struct bnxt *bp, u32 ulp_dev_id); + +struct tf * +bnxt_ulp_bp_tfp_get(struct bnxt *bp, enum bnxt_ulp_session_type type); + +int +bnxt_ulp_cntxt_ha_reg_set(struct bnxt_ulp_context *ulp_ctx, + u8 state, u8 cnt); + +u32 +bnxt_ulp_cntxt_ha_reg_state_get(struct bnxt_ulp_context *ulp_ctx); + +u32 +bnxt_ulp_cntxt_ha_reg_cnt_get(struct bnxt_ulp_context *ulp_ctx); + +int +bnxt_ulp_cntxt_list_init(void); + +int +bnxt_ulp_cntxt_list_add(struct bnxt_ulp_context *ulp_ctx); + +void +bnxt_ulp_cntxt_list_del(struct bnxt_ulp_context *ulp_ctx); + +void +bnxt_ulp_num_key_recipes_set(struct bnxt_ulp_context *ulp_ctx, + u16 recipes); + +int +bnxt_ulp_num_key_recipes_get(struct bnxt_ulp_context *ulp_ctx); + +int +bnxt_ulp_create_df_rules(struct bnxt *bp); + +void +bnxt_ulp_destroy_df_rules(struct bnxt *bp, bool global); + +/* Function to check if allowing multicast and broadcast flow offload. */ +bool +bnxt_ulp_validate_bcast_mcast(struct bnxt *bp); + +int bnxt_flow_meter_init(struct bnxt *bp); + +u64 +bnxt_ulp_feature_bits_get(struct bnxt_ulp_context *ulp_ctx); + +int +bnxt_ulp_vfr_session_fid_add(struct bnxt_ulp_context *ulp_ctx, + u16 vfr_fid); +int +bnxt_ulp_vfr_session_fid_rem(struct bnxt_ulp_context *ulp_ctx, + u16 vfr_fid); +void +bnxt_ulp_cntxt_ptr2_default_class_bits_set(struct bnxt_ulp_context *ulp_ctx, + u64 bits); + +u64 +bnxt_ulp_cntxt_ptr2_default_class_bits_get(struct bnxt_ulp_context *ulp_ctx); + +void +bnxt_ulp_cntxt_ptr2_default_act_bits_set(struct bnxt_ulp_context *ulp_ctx, + u64 bits); +u64 +bnxt_ulp_cntxt_ptr2_default_act_bits_get(struct bnxt_ulp_context *ulp_ctx); +#endif /* _BNXT_ULP_H_ */ diff --git a/drivers/thirdparty/release-drivers/bnxt/tf_ulp/bnxt_tf_ulp_p5.c b/drivers/thirdparty/release-drivers/bnxt/tf_ulp/bnxt_tf_ulp_p5.c new file mode 100644 index 000000000000..0a05c833ffd7 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/tf_ulp/bnxt_tf_ulp_p5.c @@ -0,0 +1,1474 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* Copyright(c) 2019-2023 Broadcom + * All rights reserved. + */ + +#include "ulp_linux.h" +#include "bnxt_compat.h" +#include "bnxt_hsi.h" +#include "bnxt.h" +#include "bnxt_vfr.h" +#include "bnxt_tf_ulp.h" +#include "bnxt_tf_ulp_p5.h" +#include "bnxt_ulp_flow.h" +#include "bnxt_tf_common.h" +#include "tf_core.h" +#include "tf_ext_flow_handle.h" + +#include "ulp_template_db_enum.h" +#include "ulp_template_struct.h" +#include "ulp_mark_mgr.h" +#include "ulp_fc_mgr.h" +#include "ulp_flow_db.h" +#include "ulp_mapper.h" +#include "ulp_matcher.h" +#include "ulp_port_db.h" + +#ifdef CONFIG_BNXT_FLOWER_OFFLOAD +/* Function to set the tfp session details from the ulp context. */ +static int +bnxt_tf_ulp_cntxt_tfp_set(struct bnxt_ulp_context *ulp, + enum bnxt_ulp_session_type s_type, + struct tf *tfp) +{ + u32 idx = 0; + enum bnxt_ulp_tfo_type tfo_type = BNXT_ULP_TFO_TYPE_P5; + + if (!ulp) + return -EINVAL; + + if (ULP_MULTI_SHARED_IS_SUPPORTED(ulp)) { + if (s_type & BNXT_ULP_SESSION_TYPE_SHARED) + idx = 1; + else if (s_type & BNXT_ULP_SESSION_TYPE_SHARED_WC) + idx = 2; + + } else { + if ((s_type & BNXT_ULP_SESSION_TYPE_SHARED) || + (s_type & BNXT_ULP_SESSION_TYPE_SHARED_WC)) + idx = 1; + } + + ulp->g_tfp[idx] = tfp; + + if (!tfp) { + u32 i = 0; + + while (i < BNXT_ULP_SESSION_MAX && !ulp->g_tfp[i]) + i++; + if (i == BNXT_ULP_SESSION_MAX) + ulp->tfo_type = BNXT_ULP_TFO_TYPE_INVALID; + } else { + ulp->tfo_type = tfo_type; + } + netdev_dbg(ulp->bp->dev, "%s Setting tfo_type %d session tpye %d\n", + __func__, tfo_type, s_type); + return 0; +} + +/* Function to get the tfp session details from the ulp context. */ +void * +bnxt_tf_ulp_cntxt_tfp_get(struct bnxt_ulp_context *ulp, + enum bnxt_ulp_session_type s_type) +{ + u32 idx = 0; + + if (!ulp) + return NULL; + + if (ulp->tfo_type != BNXT_ULP_TFO_TYPE_P5) { + netdev_dbg(ulp->bp->dev, "Wrong tf type %d != %d\n", + ulp->tfo_type, BNXT_ULP_TFO_TYPE_P5); + return NULL; + } + + if (ULP_MULTI_SHARED_IS_SUPPORTED(ulp)) { + if (s_type & BNXT_ULP_SESSION_TYPE_SHARED) + idx = 1; + else if (s_type & BNXT_ULP_SESSION_TYPE_SHARED_WC) + idx = 2; + } else { + if ((s_type & BNXT_ULP_SESSION_TYPE_SHARED) || + (s_type & BNXT_ULP_SESSION_TYPE_SHARED_WC)) + idx = 1; + } + return (struct tf *)ulp->g_tfp[idx]; +} + +struct tf *bnxt_get_tfp_session(struct bnxt *bp, enum bnxt_session_type type) +{ + struct tf *tfp = bp->tfp; + + return (type >= BNXT_SESSION_TYPE_LAST) ? + &tfp[BNXT_SESSION_TYPE_REGULAR] : &tfp[type]; +} + +struct tf * +bnxt_ulp_bp_tfp_get(struct bnxt *bp, enum bnxt_ulp_session_type type) +{ + enum bnxt_session_type btype; + + if (type & BNXT_ULP_SESSION_TYPE_SHARED) + btype = BNXT_SESSION_TYPE_SHARED_COMMON; + else if (type & BNXT_ULP_SESSION_TYPE_SHARED_WC) + btype = BNXT_SESSION_TYPE_SHARED_WC; + else + btype = BNXT_SESSION_TYPE_REGULAR; + + return bnxt_get_tfp_session(bp, btype); +} + +static int +ulp_tf_named_resources_calc(struct bnxt_ulp_context *ulp_ctx, + struct bnxt_ulp_glb_resource_info *info, + u32 num, + enum bnxt_ulp_session_type stype, + struct tf_session_resources *res) +{ + u32 dev_id = BNXT_ULP_DEVICE_ID_LAST, res_type, i; + enum tf_dir dir; + int rc = 0; + u8 app_id; + + if (!ulp_ctx || !info || !res || !num) + return -EINVAL; + + rc = bnxt_ulp_cntxt_app_id_get(ulp_ctx, &app_id); + if (rc) { + netdev_dbg(ulp_ctx->bp->dev, "Unable to get the app id from ulp.\n"); + return -EINVAL; + } + + rc = bnxt_ulp_cntxt_dev_id_get(ulp_ctx, &dev_id); + if (rc) { + netdev_dbg(ulp_ctx->bp->dev, "Unable to get the dev id from ulp.\n"); + return -EINVAL; + } + + for (i = 0; i < num; i++) { + if (dev_id != info[i].device_id || app_id != info[i].app_id) + continue; + /* check to see if the session type matches only then include */ + if ((stype || info[i].session_type) && + !(info[i].session_type & stype)) + continue; + + dir = info[i].direction; + res_type = info[i].resource_type; + + switch (info[i].resource_func) { + case BNXT_ULP_RESOURCE_FUNC_IDENTIFIER: + res->ident_cnt[dir].cnt[res_type]++; + break; + case BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE: + res->tbl_cnt[dir].cnt[res_type]++; + break; + case BNXT_ULP_RESOURCE_FUNC_TCAM_TABLE: + res->tcam_cnt[dir].cnt[res_type]++; + break; + case BNXT_ULP_RESOURCE_FUNC_EM_TABLE: + res->em_cnt[dir].cnt[res_type]++; + break; + default: + netdev_dbg(ulp_ctx->bp->dev, "Unknown resource func (0x%x)\n,", + info[i].resource_func); + continue; + } + } + + return 0; +} + +static int +ulp_tf_unnamed_resources_calc(struct bnxt_ulp_context *ulp_ctx, + struct bnxt_ulp_resource_resv_info *info, + u32 num, + enum bnxt_ulp_session_type stype, + struct tf_session_resources *res) +{ + u32 dev_id, res_type, i; + enum tf_dir dir; + int rc = 0; + u8 app_id; + + if (!ulp_ctx || !res || !info || num == 0) + return -EINVAL; + + rc = bnxt_ulp_cntxt_app_id_get(ulp_ctx, &app_id); + if (rc) { + netdev_dbg(ulp_ctx->bp->dev, "Unable to get the app id from ulp.\n"); + return -EINVAL; + } + + rc = bnxt_ulp_cntxt_dev_id_get(ulp_ctx, &dev_id); + if (rc) { + netdev_dbg(ulp_ctx->bp->dev, "Unable to get the dev id from ulp.\n"); + return -EINVAL; + } + + for (i = 0; i < num; i++) { + if (app_id != info[i].app_id || dev_id != info[i].device_id) + continue; + + /* check to see if the session type matches only then include */ + if ((stype || info[i].session_type) && + !(info[i].session_type & stype)) + continue; + + dir = info[i].direction; + res_type = info[i].resource_type; + + switch (info[i].resource_func) { + case BNXT_ULP_RESOURCE_FUNC_IDENTIFIER: + res->ident_cnt[dir].cnt[res_type] = info[i].count; + break; + case BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE: + res->tbl_cnt[dir].cnt[res_type] = info[i].count; + break; + case BNXT_ULP_RESOURCE_FUNC_TCAM_TABLE: + res->tcam_cnt[dir].cnt[res_type] = info[i].count; + break; + case BNXT_ULP_RESOURCE_FUNC_EM_TABLE: + res->em_cnt[dir].cnt[res_type] = info[i].count; + break; + default: + netdev_dbg(ulp_ctx->bp->dev, "Unsupported resource\n"); + return -EINVAL; + } + } + return 0; +} + +static int +ulp_tf_resources_get(struct bnxt_ulp_context *ulp_ctx, + enum bnxt_ulp_session_type stype, + struct tf_session_resources *res) +{ + struct bnxt_ulp_resource_resv_info *unnamed = NULL; + int rc = 0; + u32 unum; + + if (!ulp_ctx || !res) + return -EINVAL; + + /* use DEFAULT_NON_HA instead of DEFAULT resources if HA is disabled */ + if (ULP_APP_HA_IS_DYNAMIC(ulp_ctx)) + stype = ulp_ctx->cfg_data->def_session_type; + + unnamed = bnxt_ulp_resource_resv_list_get(&unum); + if (!unnamed) { + netdev_dbg(ulp_ctx->bp->dev, "Unable to get resource resv list.\n"); + return -EINVAL; + } + + rc = ulp_tf_unnamed_resources_calc(ulp_ctx, unnamed, unum, stype, res); + if (rc) + netdev_dbg(ulp_ctx->bp->dev, "Unable to calc resources for session.\n"); + + return rc; +} + +static int +ulp_tf_shared_session_resources_get(struct bnxt_ulp_context *ulp_ctx, + enum bnxt_ulp_session_type stype, + struct tf_session_resources *res) +{ + struct bnxt_ulp_resource_resv_info *unnamed; + struct bnxt_ulp_glb_resource_info *named; + u32 unum = 0, nnum = 0; + int rc; + + if (!ulp_ctx || !res) + return -EINVAL; + + /* Make sure the resources are zero before accumulating. */ + memset(res, 0, sizeof(struct tf_session_resources)); + + /* Shared resources are comprised of both named and unnamed resources. + * First get the unnamed counts, and then add the named to the result. + */ + /* Get the baseline counts */ + unnamed = bnxt_ulp_app_resource_resv_list_get(&unum); + if (unum) { + rc = ulp_tf_unnamed_resources_calc(ulp_ctx, unnamed, + unum, stype, res); + if (rc) { + netdev_dbg(ulp_ctx->bp->dev, + "Unable to calc resources for shared session.\n"); + return -EINVAL; + } + } + + /* Get the named list and add the totals */ + named = bnxt_ulp_app_glb_resource_info_list_get(&nnum); + /* No need to calc resources, none to calculate */ + if (!nnum) + return 0; + + rc = ulp_tf_named_resources_calc(ulp_ctx, named, nnum, stype, res); + if (rc) + netdev_dbg(ulp_ctx->bp->dev, "Unable to calc named resources\n"); + + return rc; +} + +/* Function to set the hot upgrade support into the context */ +static int +ulp_tf_multi_shared_session_support_set(struct bnxt *bp, + enum bnxt_ulp_device_id devid, + u32 fw_hu_update) +{ + struct bnxt_ulp_context *ulp_ctx = bp->ulp_ctx; + struct tf_get_version_parms v_params = { 0 }; + int32_t new_fw = 0; + struct tf *tfp; + int32_t rc = 0; + + v_params.device_type = bnxt_ulp_cntxt_convert_dev_id(bp, devid); + v_params.bp = bp; + + tfp = bnxt_ulp_bp_tfp_get(bp, BNXT_ULP_SESSION_TYPE_DEFAULT); + rc = tf_get_version(tfp, &v_params); + if (rc) { + netdev_dbg(bp->dev, "Unable to get tf version.\n"); + return rc; + } + + if (v_params.major == 1 && v_params.minor == 0 && + v_params.update == 1) { + new_fw = 1; + } + /* if the version update is greater than 0 then set support for + * multiple version + */ + if (new_fw) { + ulp_ctx->cfg_data->ulp_flags |= BNXT_ULP_MULTI_SHARED_SUPPORT; + ulp_ctx->cfg_data->hu_session_type = + BNXT_ULP_SESSION_TYPE_SHARED; + } + if (!new_fw && fw_hu_update) { + ulp_ctx->cfg_data->ulp_flags &= ~BNXT_ULP_HIGH_AVAIL_ENABLED; + ulp_ctx->cfg_data->hu_session_type = + BNXT_ULP_SESSION_TYPE_SHARED | + BNXT_ULP_SESSION_TYPE_SHARED_OWC; + } + + if (!new_fw && !fw_hu_update) { + ulp_ctx->cfg_data->hu_session_type = + BNXT_ULP_SESSION_TYPE_SHARED | + BNXT_ULP_SESSION_TYPE_SHARED_OWC; + } + + return rc; +} + +static int +ulp_tf_cntxt_app_caps_init(struct bnxt *bp, + u8 app_id, u32 dev_id) +{ + struct bnxt_ulp_context *ulp_ctx = bp->ulp_ctx; + struct bnxt_ulp_app_capabilities_info *info; + uint32_t num = 0, fw = 0; + bool found = false; + uint16_t i; + + if (ULP_APP_DEV_UNSUPPORTED_ENABLED(ulp_ctx->cfg_data->ulp_flags)) { + netdev_dbg(bp->dev, "APP ID %d, Device ID: 0x%x not supported.\n", + app_id, dev_id); + return -EINVAL; + } + + info = bnxt_ulp_app_cap_list_get(&num); + if (!info || !num) { + netdev_dbg(bp->dev, "Failed to get app capabilities.\n"); + return -EINVAL; + } + + for (i = 0; i < num; i++) { + if (info[i].app_id != app_id || info[i].device_id != dev_id) + continue; + found = true; + if (info[i].flags & BNXT_ULP_APP_CAP_SHARED_EN) + ulp_ctx->cfg_data->ulp_flags |= + BNXT_ULP_SHARED_SESSION_ENABLED; + if (info[i].flags & BNXT_ULP_APP_CAP_HOT_UPGRADE_EN) + ulp_ctx->cfg_data->ulp_flags |= + BNXT_ULP_HIGH_AVAIL_ENABLED; + if (info[i].flags & BNXT_ULP_APP_CAP_UNICAST_ONLY) + ulp_ctx->cfg_data->ulp_flags |= + BNXT_ULP_APP_UNICAST_ONLY; + if (info[i].flags & BNXT_ULP_APP_CAP_IP_TOS_PROTO_SUPPORT) + ulp_ctx->cfg_data->ulp_flags |= + BNXT_ULP_APP_TOS_PROTO_SUPPORT; + if (info[i].flags & BNXT_ULP_APP_CAP_BC_MC_SUPPORT) + ulp_ctx->cfg_data->ulp_flags |= + BNXT_ULP_APP_BC_MC_SUPPORT; + if (info[i].flags & BNXT_ULP_APP_CAP_SOCKET_DIRECT) { + /* Enable socket direction only if MR is enabled in fw*/ + if (BNXT_MR(bp)) { + ulp_ctx->cfg_data->ulp_flags |= + BNXT_ULP_APP_SOCKET_DIRECT; + netdev_dbg(bp->dev, + "Socket Direct feature is enabled\n"); + } + } + if (info[i].flags & BNXT_ULP_APP_CAP_SRV6) + ulp_ctx->cfg_data->ulp_flags |= + BNXT_ULP_APP_SRV6; + + if (info[i].flags & BNXT_ULP_APP_CAP_L2_ETYPE) + ulp_ctx->cfg_data->ulp_flags |= + BNXT_ULP_APP_L2_ETYPE; + + if (info[i].flags & BNXT_ULP_APP_CAP_DSCP_REMAP) + ulp_ctx->cfg_data->ulp_flags |= + BNXT_ULP_APP_DSCP_REMAP_ENABLED; + + bnxt_ulp_cntxt_vxlan_ip_port_set(ulp_ctx, info[i].vxlan_ip_port); + bnxt_ulp_cntxt_vxlan_port_set(ulp_ctx, info[i].vxlan_port); + bnxt_ulp_cntxt_ecpri_udp_port_set(ulp_ctx, info[i].ecpri_udp_port); + bnxt_ulp_vxlan_gpe_next_proto_set(ulp_ctx, info[i].tunnel_next_proto); + bnxt_ulp_num_key_recipes_set(ulp_ctx, + info[i].num_key_recipes_per_dir); + + /* set the shared session support from firmware */ + fw = info[i].upgrade_fw_update; + if (ULP_HIGH_AVAIL_IS_ENABLED(ulp_ctx->cfg_data->ulp_flags) && + ulp_tf_multi_shared_session_support_set(bp, dev_id, fw)) { + netdev_dbg(bp->dev, + "Unable to get shared session support\n"); + return -EINVAL; + } + ulp_ctx->cfg_data->ha_pool_id = info[i].ha_pool_id; + bnxt_ulp_default_app_priority_set(ulp_ctx, + info[i].default_priority); + bnxt_ulp_max_def_priority_set(ulp_ctx, + info[i].max_def_priority); + bnxt_ulp_min_flow_priority_set(ulp_ctx, + info[i].min_flow_priority); + bnxt_ulp_max_flow_priority_set(ulp_ctx, + info[i].max_flow_priority); + ulp_ctx->cfg_data->feature_bits = info[i].feature_bits; + bnxt_ulp_cntxt_ptr2_default_class_bits_set(ulp_ctx, + info[i].default_class_bits); + bnxt_ulp_cntxt_ptr2_default_act_bits_set(ulp_ctx, + info[i].default_act_bits); + } + if (!found) { + netdev_dbg(bp->dev, "APP ID %d, Device ID: 0x%x not supported.\n", + app_id, dev_id); + ulp_ctx->cfg_data->ulp_flags |= BNXT_ULP_APP_DEV_UNSUPPORTED; + return -EINVAL; + } + + return 0; +} + +static inline u32 +ulp_tf_session_idx_get(enum bnxt_ulp_session_type session_type) { + if (session_type & BNXT_ULP_SESSION_TYPE_SHARED) + return 1; + else if (session_type & BNXT_ULP_SESSION_TYPE_SHARED_WC) + return 2; + return 0; +} + +/* Function to set the tfp session details in session */ +static int +ulp_tf_session_tfp_set(struct bnxt_ulp_session_state *session, + enum bnxt_ulp_session_type session_type, + struct tf *tfp) +{ + u32 idx = ulp_tf_session_idx_get(session_type); + struct tf *local_tfp; + int rc = 0; + + if (!session->session_opened[idx]) { + local_tfp = vzalloc(sizeof(*tfp)); + if (!local_tfp) + return -ENOMEM; + local_tfp->session = tfp->session; + session->g_tfp[idx] = local_tfp; + session->session_opened[idx] = 1; + } + return rc; +} + +/* Function to get the tfp session details in session */ +static struct tf_session_info * +ulp_tf_session_tfp_get(struct bnxt_ulp_session_state *session, + enum bnxt_ulp_session_type session_type) +{ + u32 idx = ulp_tf_session_idx_get(session_type); + struct tf *local_tfp = session->g_tfp[idx]; + + if (session->session_opened[idx]) + return local_tfp->session; + return NULL; +} + +static u32 +ulp_tf_session_is_open(struct bnxt_ulp_session_state *session, + enum bnxt_ulp_session_type session_type) +{ + u32 idx = ulp_tf_session_idx_get(session_type); + + return session->session_opened[idx]; +} + +/* Function to reset the tfp session details in session */ +static void +ulp_tf_session_tfp_reset(struct bnxt_ulp_session_state *session, + enum bnxt_ulp_session_type session_type) +{ + u32 idx = ulp_tf_session_idx_get(session_type); + + if (session->session_opened[idx]) { + session->session_opened[idx] = 0; + vfree(session->g_tfp[idx]); + session->g_tfp[idx] = NULL; + } +} + +static void +ulp_tf_ctx_shared_session_close(struct bnxt *bp, + enum bnxt_ulp_session_type session_type, + struct bnxt_ulp_session_state *session) +{ + struct tf *tfp; + int rc; + + tfp = bnxt_tf_ulp_cntxt_tfp_get(bp->ulp_ctx, session_type); + if (!tfp) { + /* Log it under debug since this is likely a case of the + * shared session not being created. For example, a failed + * initialization. + */ + netdev_dbg(bp->dev, "Failed to get shared tfp on close\n"); + return; + } + rc = tf_close_session(tfp); + if (rc) + netdev_dbg(bp->dev, "Failed to close the shared session rc=%d\n", rc); + + bnxt_tf_ulp_cntxt_tfp_set(bp->ulp_ctx, session_type, NULL); + ulp_tf_session_tfp_reset(session, session_type); +} + +static void +ulp_tf_get_ctrl_chan_name(struct bnxt *bp, + struct tf_open_session_parms *params) +{ + struct net_device *dev = bp->dev; + + memset(params->ctrl_chan_name, '\0', TF_SESSION_NAME_MAX); + + if ((strlen(dev_name(dev->dev.parent)) >= + strlen(params->ctrl_chan_name)) || + (strlen(dev_name(dev->dev.parent)) >= + sizeof(params->ctrl_chan_name))) { + strncpy(params->ctrl_chan_name, dev_name(dev->dev.parent), + TF_SESSION_NAME_MAX - 1); + /* Make sure the string is terminated */ + params->ctrl_chan_name[TF_SESSION_NAME_MAX - 1] = '\0'; + return; + } + + strcpy(params->ctrl_chan_name, dev_name(dev->dev.parent)); +} + +static int +ulp_tf_ctx_shared_session_open(struct bnxt *bp, + enum bnxt_ulp_session_type session_type, + struct bnxt_ulp_session_state *session) +{ + struct bnxt_ulp_context *ulp_ctx = bp->ulp_ctx; + uint ulp_dev_id = BNXT_ULP_DEVICE_ID_LAST; + struct tf_session_resources *resources; + struct tf_open_session_parms parms; + struct tf *tfp; + u8 pool_id; + int rc = 0; + size_t nb; + u8 app_id; + + memset(&parms, 0, sizeof(parms)); + ulp_tf_get_ctrl_chan_name(bp, &parms); + + resources = &parms.resources; + + /* Need to account for size of ctrl_chan_name and 1 extra for Null + * terminator + */ + nb = sizeof(parms.ctrl_chan_name) - strlen(parms.ctrl_chan_name) - 1; + + /* Build the ctrl_chan_name with shared token. + */ + pool_id = ulp_ctx->cfg_data->ha_pool_id; + if (!bnxt_ulp_cntxt_multi_shared_session_enabled(bp->ulp_ctx)) { + strncat(parms.ctrl_chan_name, "-tf_shared", nb); + } else if (bnxt_ulp_cntxt_multi_shared_session_enabled(bp->ulp_ctx)) { + if (session_type == BNXT_ULP_SESSION_TYPE_SHARED) { + strncat(parms.ctrl_chan_name, "-tf_shared", nb); + } else if (session_type == BNXT_ULP_SESSION_TYPE_SHARED_WC) { + char session_pool_name[64]; + + sprintf(session_pool_name, "-tf_shared-pool%d", + pool_id); + + if (nb >= strlen(session_pool_name)) { + strncat(parms.ctrl_chan_name, session_pool_name, nb); + } else { + netdev_dbg(bp->dev, "No space left for session_name\n"); + return -EINVAL; + } + } + } + + rc = ulp_tf_shared_session_resources_get(bp->ulp_ctx, session_type, + resources); + if (rc) { + netdev_dbg(bp->dev, + "Failed to get shared session resources: %d\n", rc); + return rc; + } + + rc = bnxt_ulp_cntxt_app_id_get(bp->ulp_ctx, &app_id); + if (rc) { + netdev_dbg(bp->dev, "Unable to get the app id from ulp\n"); + return rc; + } + + rc = bnxt_ulp_cntxt_dev_id_get(bp->ulp_ctx, &ulp_dev_id); + if (rc) { + netdev_dbg(bp->dev, "Unable to get device id from ulp.\n"); + return rc; + } + + tfp = bnxt_ulp_bp_tfp_get(bp, session_type); + parms.device_type = bnxt_ulp_cntxt_convert_dev_id(bp, ulp_dev_id); + parms.bp = bp; + + /* Open the session here, but the collect the resources during the + * mapper initialization. + */ + rc = tf_open_session(tfp, &parms); + if (rc) + return rc; + + if (parms.shared_session_creator) + netdev_dbg(bp->dev, "Shared session creator\n"); + else + netdev_dbg(bp->dev, "Shared session attached\n"); + + /* Save the shared session in global data */ + rc = ulp_tf_session_tfp_set(session, session_type, tfp); + if (rc) { + netdev_dbg(bp->dev, "Failed to add shared tfp to session\n"); + return rc; + } + + rc = bnxt_tf_ulp_cntxt_tfp_set(bp->ulp_ctx, session_type, tfp); + if (rc) { + netdev_dbg(bp->dev, + "Failed to add shared tfp to ulp: %d\n", rc); + return rc; + } + + return rc; +} + +static int +ulp_tf_ctx_shared_session_attach(struct bnxt *bp, + struct bnxt_ulp_session_state *ses) +{ + enum bnxt_ulp_session_type type; + struct tf *tfp; + int rc = 0; + + /* Simply return success if shared session not enabled */ + if (bnxt_ulp_cntxt_shared_session_enabled(bp->ulp_ctx)) { + type = BNXT_ULP_SESSION_TYPE_SHARED; + tfp = bnxt_ulp_bp_tfp_get(bp, type); + tfp->session = ulp_tf_session_tfp_get(ses, type); + rc = ulp_tf_ctx_shared_session_open(bp, type, ses); + } + + if (bnxt_ulp_cntxt_multi_shared_session_enabled(bp->ulp_ctx)) { + type = BNXT_ULP_SESSION_TYPE_SHARED_WC; + tfp = bnxt_ulp_bp_tfp_get(bp, type); + tfp->session = ulp_tf_session_tfp_get(ses, type); + rc = ulp_tf_ctx_shared_session_open(bp, type, ses); + } + + if (!rc) + bnxt_ulp_cntxt_num_shared_clients_set(bp->ulp_ctx, true); + + return rc; +} + +static void +ulp_tf_ctx_shared_session_detach(struct bnxt *bp) +{ + struct tf *tfp; + + if (bnxt_ulp_cntxt_shared_session_enabled(bp->ulp_ctx)) { + tfp = bnxt_ulp_bp_tfp_get(bp, BNXT_ULP_SESSION_TYPE_SHARED); + if (tfp->session) { + tf_close_session(tfp); + tfp->session = NULL; + } + } + if (bnxt_ulp_cntxt_multi_shared_session_enabled(bp->ulp_ctx)) { + tfp = bnxt_ulp_bp_tfp_get(bp, BNXT_ULP_SESSION_TYPE_SHARED_WC); + if (tfp->session) { + tf_close_session(tfp); + tfp->session = NULL; + } + } + bnxt_ulp_cntxt_num_shared_clients_set(bp->ulp_ctx, false); +} + +/* Initialize an ULP session. + * An ULP session will contain all the resources needed to support flow + * offloads. A session is initialized as part of switchdev mode transition. + * A single vswitch instance can have multiple uplinks which means + * switchdev mode transitino will be called for each of these devices. + * ULP session manager will make sure that a single ULP session is only + * initialized once. Apart from this, it also initializes MARK database, + * EEM table & flow database. ULP session manager also manages a list of + * all opened ULP sessions. + */ +static int +ulp_tf_ctx_session_open(struct bnxt *bp, + struct bnxt_ulp_session_state *session) +{ + u32 ulp_dev_id = BNXT_ULP_DEVICE_ID_LAST; + struct tf_session_resources *resources; + struct tf_open_session_parms params = {{ 0 }}; + struct net_device *dev = bp->dev; + struct tf *tfp; + int rc = 0; + u8 app_id; + + memset(¶ms, 0, sizeof(params)); + memset(params.ctrl_chan_name, '\0', TF_SESSION_NAME_MAX); + if ((strlen(dev_name(dev->dev.parent)) >= strlen(params.ctrl_chan_name)) || + (strlen(dev_name(dev->dev.parent)) >= sizeof(params.ctrl_chan_name))) { + strncpy(params.ctrl_chan_name, dev_name(dev->dev.parent), TF_SESSION_NAME_MAX - 1); + /* Make sure the string is terminated */ + params.ctrl_chan_name[TF_SESSION_NAME_MAX - 1] = '\0'; + } else { + strcpy(params.ctrl_chan_name, dev_name(dev->dev.parent)); + } + + rc = bnxt_ulp_cntxt_app_id_get(bp->ulp_ctx, &app_id); + if (rc) { + netdev_dbg(bp->dev, "Unable to get the app id from ulp.\n"); + return -EINVAL; + } + + rc = bnxt_ulp_cntxt_dev_id_get(bp->ulp_ctx, &ulp_dev_id); + if (rc) { + netdev_dbg(bp->dev, "Unable to get device id from ulp.\n"); + return rc; + } + + params.device_type = bnxt_ulp_cntxt_convert_dev_id(bp, ulp_dev_id); + resources = ¶ms.resources; + rc = ulp_tf_resources_get(bp->ulp_ctx, + BNXT_ULP_SESSION_TYPE_DEFAULT, + resources); + if (rc) + return rc; + + params.bp = bp; + + tfp = bnxt_ulp_bp_tfp_get(bp, BNXT_ULP_SESSION_TYPE_DEFAULT); + rc = tf_open_session(tfp, ¶ms); + if (rc) { + netdev_dbg(bp->dev, "Failed to open TF session - %s, rc = %d\n", + params.ctrl_chan_name, rc); + return -EINVAL; + } + rc = ulp_tf_session_tfp_set(session, BNXT_ULP_SESSION_TYPE_DEFAULT, tfp); + if (rc) { + netdev_dbg(bp->dev, "Failed to set TF session - %s, rc = %d\n", + params.ctrl_chan_name, rc); + return -EINVAL; + } + return rc; +} + +/* Close the ULP session. + * It takes the ulp context pointer. + */ +static void ulp_tf_ctx_session_close(struct bnxt *bp, + struct bnxt_ulp_session_state *session) +{ + struct tf *tfp; + + /* close the session in the hardware */ + if (ulp_tf_session_is_open(session, BNXT_ULP_SESSION_TYPE_DEFAULT)) { + tfp = bnxt_ulp_bp_tfp_get(bp, BNXT_ULP_SESSION_TYPE_DEFAULT); + tf_close_session(tfp); + } + ulp_tf_session_tfp_reset(session, BNXT_ULP_SESSION_TYPE_DEFAULT); +} + +static void +ulp_tf_init_tbl_scope_parms(struct bnxt *bp, + struct tf_alloc_tbl_scope_parms *params) +{ + struct bnxt_ulp_device_params *dparms; + u32 dev_id; + int rc = 0; + + rc = bnxt_ulp_cntxt_dev_id_get(bp->ulp_ctx, &dev_id); + if (rc) + /* TBD: For now, just use default. */ + dparms = 0; + else + dparms = bnxt_ulp_device_params_get(dev_id); + + /* Set the flush timer for EEM entries. The value is in 100ms intervals, + * so 100 is 10s. + */ + params->hw_flow_cache_flush_timer = 100; + + if (!dparms) { + params->rx_max_key_sz_in_bits = BNXT_ULP_DFLT_RX_MAX_KEY; + params->rx_max_action_entry_sz_in_bits = + BNXT_ULP_DFLT_RX_MAX_ACTN_ENTRY; + params->rx_mem_size_in_mb = BNXT_ULP_DFLT_RX_MEM; + params->rx_num_flows_in_k = BNXT_ULP_RX_NUM_FLOWS; + + params->tx_max_key_sz_in_bits = BNXT_ULP_DFLT_TX_MAX_KEY; + params->tx_max_action_entry_sz_in_bits = + BNXT_ULP_DFLT_TX_MAX_ACTN_ENTRY; + params->tx_mem_size_in_mb = BNXT_ULP_DFLT_TX_MEM; + params->tx_num_flows_in_k = BNXT_ULP_TX_NUM_FLOWS; + } else { + params->rx_max_key_sz_in_bits = BNXT_ULP_DFLT_RX_MAX_KEY; + params->rx_max_action_entry_sz_in_bits = + BNXT_ULP_DFLT_RX_MAX_ACTN_ENTRY; + params->rx_mem_size_in_mb = BNXT_ULP_DFLT_RX_MEM; + params->rx_num_flows_in_k = + dparms->ext_flow_db_num_entries / 1024; + + params->tx_max_key_sz_in_bits = BNXT_ULP_DFLT_TX_MAX_KEY; + params->tx_max_action_entry_sz_in_bits = + BNXT_ULP_DFLT_TX_MAX_ACTN_ENTRY; + params->tx_mem_size_in_mb = BNXT_ULP_DFLT_TX_MEM; + params->tx_num_flows_in_k = + dparms->ext_flow_db_num_entries / 1024; + } + netdev_dbg(bp->dev, "Table Scope initialized with %uK flows.\n", + params->rx_num_flows_in_k); +} + +/* Initialize Extended Exact Match host memory. */ +static int +ulp_tf_eem_tbl_scope_init(struct bnxt *bp) +{ + struct tf_alloc_tbl_scope_parms params = {0}; + struct bnxt_ulp_device_params *dparms; + enum bnxt_ulp_flow_mem_type mtype; + struct tf *tfp = NULL; + u32 dev_id; + int rc = 0; + + /* Get the dev specific number of flows that needed to be supported. */ + if (bnxt_ulp_cntxt_dev_id_get(bp->ulp_ctx, &dev_id)) { + netdev_dbg(bp->dev, "Invalid device id\n"); + return -EINVAL; + } + + dparms = bnxt_ulp_device_params_get(dev_id); + if (!dparms) { + netdev_dbg(bp->dev, "could not fetch the device params\n"); + return -ENODEV; + } + + if (bnxt_ulp_cntxt_mem_type_get(bp->ulp_ctx, &mtype)) + return -EINVAL; + if (mtype != BNXT_ULP_FLOW_MEM_TYPE_EXT) { + netdev_dbg(bp->dev, "Table Scope alloc is not required\n"); + return 0; + } + + ulp_tf_init_tbl_scope_parms(bp, ¶ms); + tfp = bnxt_ulp_bp_tfp_get(bp, BNXT_ULP_SESSION_TYPE_DEFAULT); + rc = tf_alloc_tbl_scope(tfp, ¶ms); + if (rc) { + netdev_dbg(bp->dev, "Unable to allocate eem table scope rc = %d\n", rc); + return rc; + } + + netdev_dbg(bp->dev, "TableScope=0x%0x %d\n", + params.tbl_scope_id, + params.tbl_scope_id); + + rc = bnxt_ulp_cntxt_tbl_scope_id_set(bp->ulp_ctx, params.tbl_scope_id); + if (rc) { + netdev_dbg(bp->dev, "Unable to set table scope id\n"); + return rc; + } + + return 0; +} + +/* Free Extended Exact Match host memory */ +static int +ulp_tf_eem_tbl_scope_deinit(struct bnxt *bp, struct bnxt_ulp_context *ulp_ctx) +{ + struct tf_free_tbl_scope_parms params = { 0 }; + struct bnxt_ulp_device_params *dparms; + enum bnxt_ulp_flow_mem_type mtype; + struct tf *tfp; + u32 dev_id; + int rc = 0; + + if (!ulp_ctx || !ulp_ctx->cfg_data) + return -EINVAL; + + tfp = bnxt_tf_ulp_cntxt_tfp_get(ulp_ctx, BNXT_ULP_SESSION_TYPE_DEFAULT); + if (!tfp) { + netdev_dbg(bp->dev, "Failed to get the truflow pointer\n"); + return -EINVAL; + } + + /* Get the dev specific number of flows that needed to be supported. */ + if (bnxt_ulp_cntxt_dev_id_get(bp->ulp_ctx, &dev_id)) { + netdev_dbg(bp->dev, "Unable to get the dev id from ulp.\n"); + return -EINVAL; + } + + dparms = bnxt_ulp_device_params_get(dev_id); + if (!dparms) { + netdev_dbg(bp->dev, "could not fetch the device params\n"); + return -ENODEV; + } + + if (bnxt_ulp_cntxt_mem_type_get(ulp_ctx, &mtype)) + return -EINVAL; + if (mtype != BNXT_ULP_FLOW_MEM_TYPE_EXT) { + netdev_dbg(bp->dev, "Table Scope free is not required\n"); + return 0; + } + + rc = bnxt_ulp_cntxt_tbl_scope_id_get(ulp_ctx, ¶ms.tbl_scope_id); + if (rc) { + netdev_dbg(bp->dev, "Failed to get the table scope id\n"); + return -EINVAL; + } + + rc = tf_free_tbl_scope(tfp, ¶ms); + if (rc) { + netdev_dbg(bp->dev, "Unable to free table scope\n"); + return -EINVAL; + } + return rc; +} + +/* The function to free and deinit the ulp context data. */ +static int +ulp_tf_ctx_deinit(struct bnxt *bp, + struct bnxt_ulp_session_state *session) +{ + struct bnxt_ulp_context *ulp_ctx = bp->ulp_ctx; + + /* close the tf session */ + ulp_tf_ctx_session_close(bp, session); + + /* The shared session must be closed last. */ + if (bnxt_ulp_cntxt_shared_session_enabled(bp->ulp_ctx)) + ulp_tf_ctx_shared_session_close(bp, BNXT_ULP_SESSION_TYPE_SHARED, + session); + + if (bnxt_ulp_cntxt_multi_shared_session_enabled(bp->ulp_ctx)) + ulp_tf_ctx_shared_session_close(bp, + BNXT_ULP_SESSION_TYPE_SHARED_WC, + session); + + bnxt_ulp_cntxt_num_shared_clients_set(bp->ulp_ctx, false); + + /* Free the contents */ + vfree(session->cfg_data); + ulp_ctx->cfg_data = NULL; + session->cfg_data = NULL; + return 0; +} + +/* The function to allocate and initialize the ulp context data. */ +static int +ulp_tf_ctx_init(struct bnxt *bp, + struct bnxt_ulp_session_state *session) +{ + struct bnxt_ulp_context *ulp_ctx = bp->ulp_ctx; + enum bnxt_ulp_session_type stype; + struct bnxt_ulp_data *ulp_data; + enum bnxt_ulp_device_id devid; + struct tf *tfp; + u8 app_id = 0; + int rc = 0; + + /* Initialize the context entries list */ + bnxt_ulp_cntxt_list_init(); + + /* Allocate memory to hold ulp context data. */ + ulp_data = vzalloc(sizeof(*ulp_data)); + if (!ulp_data) + goto error_deinit; + + /* Increment the ulp context data reference count usage. */ + ulp_ctx->cfg_data = ulp_data; + session->cfg_data = ulp_data; + ulp_data->ref_cnt++; + ulp_data->ulp_flags |= BNXT_ULP_VF_REP_ENABLED; + + /* Add the context to the context entries list */ + rc = bnxt_ulp_cntxt_list_add(ulp_ctx); + if (rc) { + netdev_dbg(bp->dev, "Failed to add the context list entry\n"); + goto error_deinit; + } + + rc = bnxt_ulp_devid_get(bp, &devid); + if (rc) { + netdev_dbg(bp->dev, "Unable to get the dev id from ulp.\n"); + goto error_deinit; + } + + rc = bnxt_ulp_cntxt_dev_id_set(bp->ulp_ctx, devid); + if (rc) { + netdev_dbg(bp->dev, "Unable to set device for ULP init.\n"); + goto error_deinit; + } + + if (!(bp->app_id & BNXT_ULP_APP_ID_SET_CONFIGURED)) { + bp->app_id = BNXT_ULP_APP_ID_CONFIG; + bp->app_id |= BNXT_ULP_APP_ID_SET_CONFIGURED; + } + app_id = bp->app_id & ~BNXT_ULP_APP_ID_SET_CONFIGURED; + + rc = bnxt_ulp_cntxt_app_id_set(ulp_ctx, app_id); + if (rc) { + netdev_dbg(bp->dev, "Unable to set app_id for ULP init.\n"); + goto error_deinit; + } + + rc = ulp_tf_cntxt_app_caps_init(bp, app_id, devid); + if (rc) { + netdev_dbg(bp->dev, "Unable to set caps for app(%x)/dev(%x)\n", + app_id, devid); + goto error_deinit; + } + + /* Shared session must be created before regular + * session but after the ulp_ctx is valid. + */ + if (bnxt_ulp_cntxt_shared_session_enabled(ulp_ctx)) { + rc = ulp_tf_ctx_shared_session_open(bp, BNXT_ULP_SESSION_TYPE_SHARED, session); + if (rc) { + netdev_dbg(bp->dev, + "Unable to open shared session: %d\n", rc); + goto error_deinit; + } + } + + /* Multiple session support */ + if (bnxt_ulp_cntxt_multi_shared_session_enabled(bp->ulp_ctx)) { + stype = BNXT_ULP_SESSION_TYPE_SHARED_WC; + rc = ulp_tf_ctx_shared_session_open(bp, stype, session); + if (rc) { + netdev_dbg(bp->dev, + "Unable to open shared wc session (%d)\n", + rc); + goto error_deinit; + } + } + bnxt_ulp_cntxt_num_shared_clients_set(ulp_ctx, true); + + /* Open the ulp session. */ + rc = ulp_tf_ctx_session_open(bp, session); + if (rc) + goto error_deinit; + + tfp = bnxt_ulp_bp_tfp_get(bp, BNXT_ULP_SESSION_TYPE_DEFAULT); + bnxt_tf_ulp_cntxt_tfp_set(ulp_ctx, BNXT_ULP_SESSION_TYPE_DEFAULT, tfp); + + return rc; + +error_deinit: + session->session_opened[BNXT_ULP_SESSION_TYPE_DEFAULT] = 1; + (void)ulp_tf_ctx_deinit(bp, session); + return rc; +} + +/* The function to initialize ulp dparms with devargs */ +static int +ulp_tf_dparms_init(struct bnxt *bp, struct bnxt_ulp_context *ulp_ctx) +{ + struct bnxt_ulp_device_params *dparms; + u32 dev_id = BNXT_ULP_DEVICE_ID_LAST; + + if (!bp->max_num_kflows) { + /* Defaults to Internal */ + bnxt_ulp_cntxt_mem_type_set(ulp_ctx, + BNXT_ULP_FLOW_MEM_TYPE_INT); + return 0; + } + + /* The max_num_kflows were set, so move to external */ + if (bnxt_ulp_cntxt_mem_type_set(ulp_ctx, BNXT_ULP_FLOW_MEM_TYPE_EXT)) + return -EINVAL; + + if (bnxt_ulp_cntxt_dev_id_get(ulp_ctx, &dev_id)) { + netdev_dbg(bp->dev, "Failed to get device id\n"); + return -EINVAL; + } + + dparms = bnxt_ulp_device_params_get(dev_id); + if (!dparms) { + netdev_dbg(bp->dev, "Failed to get device parms\n"); + return -EINVAL; + } + + /* num_flows = max_num_kflows * 1024 */ + dparms->ext_flow_db_num_entries = bp->max_num_kflows * 1024; + /* GFID = 2 * num_flows */ + dparms->mark_db_gfid_entries = dparms->ext_flow_db_num_entries * 2; + netdev_dbg(bp->dev, "Set the number of flows = %lld\n", dparms->ext_flow_db_num_entries); + + return 0; +} + +static int +ulp_tf_ctx_attach(struct bnxt *bp, + struct bnxt_ulp_session_state *session, + enum cfa_app_type app_type) +{ + struct bnxt_ulp_context *ulp_ctx = bp->ulp_ctx; + u32 flags, dev_id = BNXT_ULP_DEVICE_ID_LAST; + struct tf *tfp; + int rc = 0; + u8 app_id; + + /* Increment the ulp context data reference count usage. */ + ulp_ctx->cfg_data = session->cfg_data; + ulp_ctx->cfg_data->ref_cnt++; + + /* update the session details in bnxt tfp */ + tfp = bnxt_ulp_bp_tfp_get(bp, BNXT_ULP_SESSION_TYPE_DEFAULT); + if (!tfp) { + netdev_dbg(bp->dev, "Failed to get tfp entry\n"); + return -EINVAL; + } + tfp->session = ulp_tf_session_tfp_get(session, BNXT_ULP_SESSION_TYPE_DEFAULT); + + /* Add the context to the context entries list */ + rc = bnxt_ulp_cntxt_list_add(ulp_ctx); + if (rc) { + netdev_dbg(bp->dev, "Failed to add the context list entry\n"); + return -EINVAL; + } + + /* The supported flag will be set during the init. Use it now to + * know if we should go through the attach. + */ + rc = bnxt_ulp_cntxt_app_id_get(ulp_ctx, &app_id); + if (rc) { + netdev_dbg(bp->dev, "Unable to get the app id from ulp.\n"); + return -EINVAL; + } + + rc = bnxt_ulp_devid_get(bp, &dev_id); + if (rc) { + netdev_dbg(bp->dev, "Unable to get the dev id from ulp.\n"); + return -EINVAL; + } + + flags = ulp_ctx->cfg_data->ulp_flags; + if (ULP_APP_DEV_UNSUPPORTED_ENABLED(flags)) { + netdev_dbg(bp->dev, + "%s: APP ID %d, Device ID: 0x%x not supported.\n", + __func__, app_id, dev_id); + return -EINVAL; + } + + /* Create a TF Client */ + rc = ulp_tf_ctx_session_open(bp, session); + if (rc) { + netdev_dbg(bp->dev, "Failed to open ctxt session, rc:%d\n", rc); + tfp->session = NULL; + return rc; + } + tfp = bnxt_ulp_bp_tfp_get(bp, BNXT_ULP_SESSION_TYPE_DEFAULT); + bnxt_tf_ulp_cntxt_tfp_set(ulp_ctx, BNXT_ULP_SESSION_TYPE_DEFAULT, tfp); + + /* Attach to the shared session, must be called after the + * ulp_ctx_attach in order to ensure that ulp data is available + * for attaching. + */ + rc = ulp_tf_ctx_shared_session_attach(bp, session); + if (rc) { + netdev_dbg(bp->dev, + "Failed attach to shared session: %d\n", + rc); + } + return rc; +} + +static void +ulp_tf_ctx_detach(struct bnxt *bp, + struct bnxt_ulp_session_state *session) +{ + struct tf *tfp; + + tfp = bnxt_ulp_bp_tfp_get(bp, BNXT_ULP_SESSION_TYPE_DEFAULT); + if (tfp && tfp->session) { + tf_close_session(tfp); + tfp->session = NULL; + } + + /* always detach/close shared after the session. */ + ulp_tf_ctx_shared_session_detach(bp); +} + +/* Internal api to enable NAT feature. + * Set set_flag to 1 to set the value or zero to reset the value. + * returns 0 on success. + */ +static int +ulp_tf_global_cfg_update(struct bnxt *bp, + enum tf_dir dir, + enum tf_global_config_type type, + u32 offset, + u32 value, + u32 set_flag) +{ + struct tf_global_cfg_parms parms = { 0 }; + u32 global_cfg = 0; + int rc; + + /* Initialize the params */ + parms.dir = dir, + parms.type = type, + parms.offset = offset, + parms.config = (u8 *)&global_cfg, + parms.config_sz_in_bytes = sizeof(global_cfg); + + rc = tf_get_global_cfg(bp->tfp, &parms); + if (rc) { + netdev_dbg(bp->dev, "Failed to get global cfg 0x%x rc:%d\n", + type, rc); + return rc; + } + + if (set_flag) + global_cfg |= value; + else + global_cfg &= ~value; + + /* SET the register RE_CFA_REG_ACT_TECT */ + rc = tf_set_global_cfg(bp->tfp, &parms); + if (rc) { + netdev_dbg(bp->dev, "Failed to set global cfg 0x%x rc:%d\n", + type, rc); + return rc; + } + return rc; +} + +/* + * When a port is deinit'ed by dpdk. This function is called + * and this function clears the ULP context and rest of the + * infrastructure associated with it. + */ +static void +ulp_tf_deinit(struct bnxt *bp, + struct bnxt_ulp_session_state *session) +{ + struct bnxt_ulp_context *ulp_ctx = bp->ulp_ctx; + + if (!bp->ulp_ctx || !ulp_ctx->cfg_data) + return; + + /* cleanup the eem table scope */ + ulp_tf_eem_tbl_scope_deinit(bp, ulp_ctx); + + /* cleanup the flow database */ + ulp_flow_db_deinit(ulp_ctx); + + /* Delete the Mark database */ + ulp_mark_db_deinit(ulp_ctx); + + /* cleanup the ulp mapper */ + ulp_mapper_deinit(ulp_ctx); + + /* cleanup the ulp matcher */ + ulp_matcher_deinit(ulp_ctx); + + /* Delete the Flow Counter Manager */ + ulp_fc_mgr_deinit(ulp_ctx); + + /* Delete the Port database */ + ulp_port_db_deinit(ulp_ctx); + + /* Disable NAT feature */ + (void)ulp_tf_global_cfg_update(bp, TF_DIR_RX, TF_TUNNEL_ENCAP, + TF_TUNNEL_ENCAP_NAT, + BNXT_ULP_NAT_OUTER_MOST_FLAGS, 0); + + (void)ulp_tf_global_cfg_update(bp, TF_DIR_TX, TF_TUNNEL_ENCAP, + TF_TUNNEL_ENCAP_NAT, + BNXT_ULP_NAT_OUTER_MOST_FLAGS, 0); + + /* free the flow db lock */ + mutex_destroy(&ulp_ctx->cfg_data->flow_db_lock); + + /* Delete the ulp context and tf session and free the ulp context */ + ulp_tf_ctx_deinit(bp, session); + netdev_dbg(bp->dev, "ulp ctx has been deinitialized\n"); +} + +/* + * When a port is initialized by dpdk. This functions is called + * and this function initializes the ULP context and rest of the + * infrastructure associated with it. + */ +static int32_t +ulp_tf_init(struct bnxt *bp, + struct bnxt_ulp_session_state *session, + enum cfa_app_type app_type) +{ + struct bnxt_ulp_context *ulp_ctx = bp->ulp_ctx; + u32 ulp_dev_id = BNXT_ULP_DEVICE_ID_LAST; + int rc; + + if (!bp->tfp) + return -ENOMEM; + + /* Allocate and Initialize the ulp context. */ + rc = ulp_tf_ctx_init(bp, session); + if (rc) { + netdev_dbg(bp->dev, "Failed to create the ulp context\n"); + goto jump_to_error; + } + + mutex_init(&ulp_ctx->cfg_data->flow_db_lock); + + /* Defaults to Internal */ + rc = bnxt_ulp_cntxt_mem_type_set(ulp_ctx, BNXT_ULP_FLOW_MEM_TYPE_INT); + if (rc) { + netdev_dbg(bp->dev, "Failed to write mem_type in ulp ctxt\n"); + goto jump_to_error; + } + + /* Initialize ulp dparms with values devargs passed */ + rc = ulp_tf_dparms_init(bp, bp->ulp_ctx); + if (rc) { + netdev_dbg(bp->dev, "Failed to initialize the dparms\n"); + goto jump_to_error; + } + + /* create the port database */ + rc = ulp_port_db_init(ulp_ctx, bp->port_count); + if (rc) { + netdev_dbg(bp->dev, "Failed to create the port database\n"); + goto jump_to_error; + } + + /* Create the Mark database. */ + rc = ulp_mark_db_init(ulp_ctx); + if (rc) { + netdev_dbg(bp->dev, "Failed to create the mark database\n"); + goto jump_to_error; + } + + /* Create the flow database. */ + rc = ulp_flow_db_init(ulp_ctx); + if (rc) { + netdev_dbg(bp->dev, "Failed to create the flow database\n"); + goto jump_to_error; + } + + /* Create the eem table scope. */ + rc = ulp_tf_eem_tbl_scope_init(bp); + if (rc) { + netdev_dbg(bp->dev, "Failed to create the eem scope table\n"); + goto jump_to_error; + } + + rc = ulp_matcher_init(ulp_ctx); + if (rc) { + netdev_dbg(bp->dev, "Failed to initialize ulp matcher\n"); + goto jump_to_error; + } + + rc = ulp_mapper_init(ulp_ctx); + if (rc) { + netdev_dbg(bp->dev, "Failed to initialize ulp mapper\n"); + goto jump_to_error; + } + + rc = ulp_fc_mgr_init(ulp_ctx); + if (rc) { + netdev_dbg(bp->dev, "Failed to initialize ulp flow counter mgr\n"); + goto jump_to_error; + } + + /* Enable NAT feature. Set the global configuration register + * Tunnel encap to enable NAT with the reuse of existing inner + * L2 header smac and dmac + */ + rc = ulp_tf_global_cfg_update(bp, TF_DIR_RX, TF_TUNNEL_ENCAP, + TF_TUNNEL_ENCAP_NAT, + BNXT_ULP_NAT_OUTER_MOST_FLAGS, 1); + if (rc) { + netdev_dbg(bp->dev, "Failed to set rx global configuration\n"); + goto jump_to_error; + } + + rc = ulp_tf_global_cfg_update(bp, TF_DIR_TX, TF_TUNNEL_ENCAP, + TF_TUNNEL_ENCAP_NAT, + BNXT_ULP_NAT_OUTER_MOST_FLAGS, 1); + if (rc) { + netdev_dbg(bp->dev, "Failed to set tx global configuration\n"); + goto jump_to_error; + } + + rc = bnxt_ulp_cntxt_dev_id_get(bp->ulp_ctx, &ulp_dev_id); + if (rc) { + netdev_dbg(bp->dev, "Unable to get device id from ulp.\n"); + return rc; + } + + rc = bnxt_flow_meter_init(bp); + if (rc) { + if (rc != -EOPNOTSUPP) { + netdev_err(bp->dev, "Failed to config meter\n"); + goto jump_to_error; + } + rc = 0; + } + + netdev_dbg(bp->dev, "ulp ctx has been initialized\n"); + return rc; + +jump_to_error: + ((struct bnxt_ulp_context *)bp->ulp_ctx)->ops->ulp_deinit(bp, session); + return rc; +} + +const struct bnxt_ulp_core_ops bnxt_ulp_tf_core_ops = { + .ulp_ctx_attach = ulp_tf_ctx_attach, + .ulp_ctx_detach = ulp_tf_ctx_detach, + .ulp_deinit = ulp_tf_deinit, + .ulp_init = ulp_tf_init, + .ulp_tfp_get = bnxt_tf_ulp_cntxt_tfp_get, + .ulp_vfr_session_fid_add = NULL, + .ulp_vfr_session_fid_rem = NULL, +}; +#endif /* CONFIG_BNXT_FLOWER_OFFLOAD */ diff --git a/drivers/thirdparty/release-drivers/bnxt/tf_ulp/bnxt_tf_ulp_p5.h b/drivers/thirdparty/release-drivers/bnxt/tf_ulp/bnxt_tf_ulp_p5.h new file mode 100644 index 000000000000..dbcb1281cfae --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/tf_ulp/bnxt_tf_ulp_p5.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019-2023 Broadcom + * All rights reserved. + */ + +#ifndef _BNXT_ULP_TF_H_ +#define _BNXT_ULP_TF_H_ + +#include +#include "bnxt.h" + +#include "tf_core.h" +#include "ulp_template_db_enum.h" +#include "bnxt_tf_common.h" + +struct tf * +bnxt_ulp_bp_tfp_get(struct bnxt *bp, enum bnxt_ulp_session_type type); + +struct tf * +bnxt_get_tfp_session(struct bnxt *bp, enum bnxt_session_type type); + +/* Function to get the tfp session details from ulp context. */ +void * +bnxt_tf_ulp_cntxt_tfp_get(struct bnxt_ulp_context *ulp, enum bnxt_ulp_session_type s_type); +#endif diff --git a/drivers/thirdparty/release-drivers/bnxt/tf_ulp/bnxt_tf_ulp_p7.c b/drivers/thirdparty/release-drivers/bnxt/tf_ulp/bnxt_tf_ulp_p7.c new file mode 100644 index 000000000000..409d95da1c41 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/tf_ulp/bnxt_tf_ulp_p7.c @@ -0,0 +1,1149 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* Copyright(c) 2019-2023 Broadcom + * All rights reserved. + */ + +#include +#include +#include "ulp_linux.h" +#include "bnxt_compat.h" +#include "bnxt_hsi.h" +#include "bnxt.h" +#include "bnxt_vfr.h" +#include "bnxt_tf_ulp.h" +#include "bnxt_tf_ulp_p7.h" +#include "bnxt_ulp_flow.h" +#include "bnxt_tf_common.h" +#include "bnxt_debugfs.h" +#include "tf_core.h" +#include "tfc.h" +#include "tf_ext_flow_handle.h" + +#include "ulp_template_db_enum.h" +#include "ulp_template_struct.h" +#include "ulp_mark_mgr.h" +#include "ulp_fc_mgr.h" +#include "ulp_flow_db.h" +#include "ulp_mapper.h" +#include "ulp_matcher.h" +#include "ulp_port_db.h" + +#ifdef CONFIG_BNXT_FLOWER_OFFLOAD +bool +bnxt_ulp_cntxt_shared_tbl_scope_enabled(struct bnxt_ulp_context *ulp_ctx) +{ + u32 flags = 0; + int rc; + + rc = bnxt_ulp_cntxt_ptr2_ulp_flags_get(ulp_ctx, &flags); + if (rc) + return false; + return !!(flags & BNXT_ULP_SHARED_TBL_SCOPE_ENABLED); +} + +int +bnxt_ulp_cntxt_tfcp_set(struct bnxt_ulp_context *ulp, struct tfc *tfcp) +{ + enum bnxt_ulp_tfo_type tfo_type = BNXT_ULP_TFO_TYPE_P7; + + if (!ulp) + return -EINVAL; + + /* If NULL, this is invalidating an entry */ + if (!tfcp) + tfo_type = BNXT_ULP_TFO_TYPE_INVALID; + ulp->tfo_type = tfo_type; + ulp->tfcp = tfcp; + + return 0; +} + +void * +bnxt_ulp_cntxt_tfcp_get(struct bnxt_ulp_context *ulp, enum bnxt_ulp_session_type s_type) +{ + if (!ulp) + return NULL; + + if (ulp->tfo_type != BNXT_ULP_TFO_TYPE_P7) { + netdev_dbg(ulp->bp->dev, "Wrong tf type %d != %d\n", + ulp->tfo_type, BNXT_ULP_TFO_TYPE_P7); + return NULL; + } + + return (struct tfc *)ulp->tfcp; +} + +u32 +bnxt_ulp_cntxt_tbl_scope_max_pools_get(struct bnxt_ulp_context *ulp_ctx) +{ + /* Max pools can be 1 or greater, always return workable value */ + if (ulp_ctx && + ulp_ctx->cfg_data && + ulp_ctx->cfg_data->max_pools) + return ulp_ctx->cfg_data->max_pools; + return 1; +} + +int +bnxt_ulp_cntxt_tbl_scope_max_pools_set(struct bnxt_ulp_context *ulp_ctx, + u32 max) +{ + if (!ulp_ctx || !ulp_ctx->cfg_data) + return -EINVAL; + + /* make sure that max is at least 1 */ + if (!max) + max = 1; + + ulp_ctx->cfg_data->max_pools = max; + return 0; +} + +enum tfc_tbl_scope_bucket_factor +bnxt_ulp_cntxt_em_mulitplier_get(struct bnxt_ulp_context *ulp_ctx) +{ + if (!ulp_ctx || !ulp_ctx->cfg_data) + return TFC_TBL_SCOPE_BUCKET_FACTOR_1; + + return ulp_ctx->cfg_data->em_multiplier; +} + +int +bnxt_ulp_cntxt_em_mulitplier_set(struct bnxt_ulp_context *ulp_ctx, + enum tfc_tbl_scope_bucket_factor factor) +{ + if (!ulp_ctx || !ulp_ctx->cfg_data) + return -EINVAL; + ulp_ctx->cfg_data->em_multiplier = factor; + return 0; +} + +u32 +bnxt_ulp_cntxt_num_rx_flows_get(struct bnxt_ulp_context *ulp_ctx) +{ + if (!ulp_ctx || !ulp_ctx->cfg_data) + return 0; + return ulp_ctx->cfg_data->num_rx_flows; +} + +int +bnxt_ulp_cntxt_num_rx_flows_set(struct bnxt_ulp_context *ulp_ctx, u32 num) +{ + if (!ulp_ctx || !ulp_ctx->cfg_data) + return -EINVAL; + ulp_ctx->cfg_data->num_rx_flows = num; + return 0; +} + +u32 +bnxt_ulp_cntxt_num_tx_flows_get(struct bnxt_ulp_context *ulp_ctx) +{ + if (!ulp_ctx || !ulp_ctx->cfg_data) + return 0; + return ulp_ctx->cfg_data->num_tx_flows; +} + +int +bnxt_ulp_cntxt_num_tx_flows_set(struct bnxt_ulp_context *ulp_ctx, u32 num) +{ + if (!ulp_ctx || !ulp_ctx->cfg_data) + return -EINVAL; + ulp_ctx->cfg_data->num_tx_flows = num; + return 0; +} + +u16 +bnxt_ulp_cntxt_em_rx_key_max_sz_get(struct bnxt_ulp_context *ulp_ctx) +{ + if (!ulp_ctx || !ulp_ctx->cfg_data) + return 0; + return ulp_ctx->cfg_data->em_rx_key_max_sz; +} + +int +bnxt_ulp_cntxt_em_rx_key_max_sz_set(struct bnxt_ulp_context *ulp_ctx, + u16 max) +{ + if (!ulp_ctx || !ulp_ctx->cfg_data) + return -EINVAL; + + ulp_ctx->cfg_data->em_rx_key_max_sz = max; + return 0; +} + +u16 +bnxt_ulp_cntxt_em_tx_key_max_sz_get(struct bnxt_ulp_context *ulp_ctx) +{ + if (!ulp_ctx || !ulp_ctx->cfg_data) + return 0; + return ulp_ctx->cfg_data->em_tx_key_max_sz; +} + +int +bnxt_ulp_cntxt_em_tx_key_max_sz_set(struct bnxt_ulp_context *ulp_ctx, + u16 max) +{ + if (!ulp_ctx || !ulp_ctx->cfg_data) + return -EINVAL; + + ulp_ctx->cfg_data->em_tx_key_max_sz = max; + return 0; +} + +u16 +bnxt_ulp_cntxt_act_rec_rx_max_sz_get(struct bnxt_ulp_context *ulp_ctx) +{ + if (!ulp_ctx || !ulp_ctx->cfg_data) + return 0; + return ulp_ctx->cfg_data->act_rx_max_sz; +} + +int +bnxt_ulp_cntxt_act_rec_rx_max_sz_set(struct bnxt_ulp_context *ulp_ctx, + int16_t max) +{ + if (!ulp_ctx || !ulp_ctx->cfg_data) + return -EINVAL; + + ulp_ctx->cfg_data->act_rx_max_sz = max; + return 0; +} + +u16 +bnxt_ulp_cntxt_act_rec_tx_max_sz_get(struct bnxt_ulp_context *ulp_ctx) +{ + if (!ulp_ctx || !ulp_ctx->cfg_data) + return 0; + return ulp_ctx->cfg_data->act_tx_max_sz; +} + +int +bnxt_ulp_cntxt_act_rec_tx_max_sz_set(struct bnxt_ulp_context *ulp_ctx, + int16_t max) +{ + if (!ulp_ctx || !ulp_ctx->cfg_data) + return -EINVAL; + + ulp_ctx->cfg_data->act_tx_max_sz = max; + return 0; +} + +u32 +bnxt_ulp_cntxt_page_sz_get(struct bnxt_ulp_context *ulp_ctx) +{ + if (!ulp_ctx) + return 0; + + return ulp_ctx->cfg_data->page_sz; +} + +int +bnxt_ulp_cntxt_page_sz_set(struct bnxt_ulp_context *ulp_ctx, + u32 page_sz) +{ + if (!ulp_ctx) + return -EINVAL; + ulp_ctx->cfg_data->page_sz = page_sz; + return 0; +} + +static int +ulp_tfc_dparms_init(struct bnxt *bp, + struct bnxt_ulp_context *ulp_ctx, + u32 dev_id) +{ + u32 num_flows = 0, num_rx_flows = 0, num_tx_flows = 0; + struct bnxt_ulp_device_params *dparms; + + /* The max_num_kflows were set, so move to external */ + if (bnxt_ulp_cntxt_mem_type_set(ulp_ctx, BNXT_ULP_FLOW_MEM_TYPE_EXT)) { + netdev_dbg(bp->dev, "%s: ulp_cntxt_mem_type_set failed\n", __func__); + return -EINVAL; + } + + dparms = bnxt_ulp_device_params_get(dev_id); + if (!dparms) { + netdev_dbg(bp->dev, "Failed to get device parms\n"); + return -EINVAL; + } + + if (bp->max_num_kflows) { + num_flows = bp->max_num_kflows * 1024; + dparms->ext_flow_db_num_entries = bp->max_num_kflows * 1024; + } else { + num_rx_flows = bnxt_ulp_cntxt_num_rx_flows_get(ulp_ctx); + num_tx_flows = bnxt_ulp_cntxt_num_tx_flows_get(ulp_ctx); + num_flows = num_rx_flows + num_tx_flows; + } + + dparms->ext_flow_db_num_entries = num_flows; + + /* GFID = 2 * num_flows */ + dparms->mark_db_gfid_entries = dparms->ext_flow_db_num_entries * 2; + netdev_dbg(bp->dev, "Set the number of flows = %llu\n", + dparms->ext_flow_db_num_entries); + + return 0; +} + +static void +ulp_tfc_tbl_scope_deinit(struct bnxt *bp) +{ + u16 fid = 0, fid_cnt = 0; + struct tfc *tfcp; + u8 tsid = 0; + int rc; + + tfcp = bnxt_ulp_cntxt_tfcp_get(bp->ulp_ctx, BNXT_ULP_SESSION_TYPE_DEFAULT); + if (!tfcp) + return; + + rc = bnxt_ulp_cntxt_tsid_get(bp->ulp_ctx, &tsid); + if (rc) + return; + + rc = bnxt_ulp_cntxt_fid_get(bp->ulp_ctx, &fid); + if (rc) + return; + + rc = tfc_tbl_scope_cpm_free(tfcp, tsid); + if (rc) + netdev_dbg(bp->dev, "Failed Freeing CPM TSID:%d FID:%d\n", + tsid, fid); + else + netdev_dbg(bp->dev, "Freed CPM TSID:%d FID:%d\n", tsid, fid); + + rc = tfc_tbl_scope_mem_free(tfcp, fid, tsid); + if (rc) + netdev_dbg(bp->dev, "Failed freeing tscope mem TSID:%d FID:%d\n", + tsid, fid); + else + netdev_dbg(bp->dev, "Freed tscope mem TSID:%d FID:%d\n", + tsid, fid); + + rc = tfc_tbl_scope_fid_rem(tfcp, fid, tsid, &fid_cnt); + if (rc) + netdev_dbg(bp->dev, "Failed removing FID from TSID:%d FID:%d\n", + tsid, fid); + else + netdev_dbg(bp->dev, "Removed FID from TSID:%d FID:%d\n", + tsid, fid); +} + +static int +ulp_tfc_tbl_scope_query(struct bnxt *bp, struct tfc *tfcp, u16 fid, u16 max_pools, + bool shared, struct tfc_tbl_scope_size_query_parms *qparms) +{ + u16 max_lkup_sz[CFA_DIR_MAX], max_act_sz[CFA_DIR_MAX]; + int rc; + + max_lkup_sz[CFA_DIR_RX] = + bnxt_ulp_cntxt_em_rx_key_max_sz_get(bp->ulp_ctx); + max_lkup_sz[CFA_DIR_TX] = + bnxt_ulp_cntxt_em_tx_key_max_sz_get(bp->ulp_ctx); + max_act_sz[CFA_DIR_RX] = + bnxt_ulp_cntxt_act_rec_rx_max_sz_get(bp->ulp_ctx); + max_act_sz[CFA_DIR_TX] = + bnxt_ulp_cntxt_act_rec_tx_max_sz_get(bp->ulp_ctx); + + /* Calculate the sizes for setting up memory */ + qparms->shared = shared; + qparms->max_pools = max_pools; + qparms->factor = bnxt_ulp_cntxt_em_mulitplier_get(bp->ulp_ctx); + qparms->flow_cnt[CFA_DIR_RX] = bnxt_ulp_cntxt_num_rx_flows_get(bp->ulp_ctx); + qparms->flow_cnt[CFA_DIR_TX] = bnxt_ulp_cntxt_num_tx_flows_get(bp->ulp_ctx); + qparms->key_sz_in_bytes[CFA_DIR_RX] = max_lkup_sz[CFA_DIR_RX]; + qparms->key_sz_in_bytes[CFA_DIR_TX] = max_lkup_sz[CFA_DIR_TX]; + qparms->act_rec_sz_in_bytes[CFA_DIR_RX] = max_act_sz[CFA_DIR_RX]; + qparms->act_rec_sz_in_bytes[CFA_DIR_TX] = max_act_sz[CFA_DIR_TX]; + rc = tfc_tbl_scope_size_query(tfcp, qparms); + if (rc) + return rc; + + return 0; +} + +#define ULP_SHARED_TSID_WAIT_TIMEOUT 5000 +#define ULP_SHARED_TSID_WAIT_TIME 50 +static int +ulp_tfc_tbl_scope_configure(struct bnxt *bp, struct tfc *tfcp, bool shared, bool first, u8 tsid) +{ + u32 timeout = ULP_SHARED_TSID_WAIT_TIMEOUT; + u32 timeout_max = timeout * 2; + u32 timeout_min = timeout; + bool configured; + int rc; + + /* If we are shared and not the first table scope creator + */ + if (shared && !first) { + do { + usleep_range(timeout_min, timeout_max); + rc = tfc_tbl_scope_config_state_get(tfcp, tsid, &configured); + if (rc) { + netdev_dbg(bp->dev, "Failed get tsid(%d) config state\n", rc); + return rc; + } + timeout -= ULP_SHARED_TSID_WAIT_TIME; + netdev_dbg(bp->dev, "Waiting %d ms for shared tsid(%d)\n", timeout, tsid); + } while (!configured && timeout > 0); + if (timeout <= 0) { + netdev_dbg(bp->dev, "Timed out on shared tsid(%d)\n", tsid); + return -ETIMEDOUT; + } + } + return 0; +} + +static int +ulp_tfc_tbl_scope_mem_alloc(struct bnxt *bp, struct tfc *tfcp, bool first, u8 tsid, u16 max_pools, + struct tfc_tbl_scope_size_query_parms *qparms) +{ + u16 max_lkup_sz[CFA_DIR_MAX], max_act_sz[CFA_DIR_MAX]; + struct tfc_tbl_scope_mem_alloc_parms mem_parms; + struct tfc_tbl_scope_cpm_alloc_parms cparms; + u16 fid = bp->pf.fw_fid; + int rc = 0; + + mem_parms.first = first; + mem_parms.static_bucket_cnt_exp[CFA_DIR_RX] = qparms->static_bucket_cnt_exp[CFA_DIR_RX]; + mem_parms.static_bucket_cnt_exp[CFA_DIR_TX] = qparms->static_bucket_cnt_exp[CFA_DIR_TX]; + mem_parms.lkup_rec_cnt[CFA_DIR_RX] = qparms->lkup_rec_cnt[CFA_DIR_RX]; + mem_parms.lkup_rec_cnt[CFA_DIR_TX] = qparms->lkup_rec_cnt[CFA_DIR_TX]; + mem_parms.act_rec_cnt[CFA_DIR_RX] = qparms->act_rec_cnt[CFA_DIR_RX]; + mem_parms.act_rec_cnt[CFA_DIR_TX] = qparms->act_rec_cnt[CFA_DIR_TX]; + mem_parms.pbl_page_sz_in_bytes = bnxt_ulp_cntxt_page_sz_get(bp->ulp_ctx); + mem_parms.max_pools = max_pools; + + mem_parms.lkup_pool_sz_exp[CFA_DIR_RX] = qparms->lkup_pool_sz_exp[CFA_DIR_RX]; + mem_parms.lkup_pool_sz_exp[CFA_DIR_TX] = qparms->lkup_pool_sz_exp[CFA_DIR_TX]; + + mem_parms.act_pool_sz_exp[CFA_DIR_RX] = qparms->act_pool_sz_exp[CFA_DIR_RX]; + mem_parms.act_pool_sz_exp[CFA_DIR_TX] = qparms->act_pool_sz_exp[CFA_DIR_TX]; + mem_parms.local = true; + rc = tfc_tbl_scope_mem_alloc(tfcp, fid, tsid, &mem_parms); + if (rc) { + netdev_dbg(bp->dev, + "Failed to allocate tscope mem TSID:%d on FID:%d\n", tsid, fid); + return rc; + } + netdev_dbg(bp->dev, "Allocated tscope mem TSID:%d on FID:%d\n", tsid, fid); + + max_lkup_sz[CFA_DIR_RX] = + bnxt_ulp_cntxt_em_rx_key_max_sz_get(bp->ulp_ctx); + max_lkup_sz[CFA_DIR_TX] = + bnxt_ulp_cntxt_em_tx_key_max_sz_get(bp->ulp_ctx); + max_act_sz[CFA_DIR_RX] = + bnxt_ulp_cntxt_act_rec_rx_max_sz_get(bp->ulp_ctx); + max_act_sz[CFA_DIR_TX] = + bnxt_ulp_cntxt_act_rec_tx_max_sz_get(bp->ulp_ctx); + + /* The max contiguous is in 32 Bytes records, so convert Bytes to 32 + * Byte records. + */ + cparms.lkup_max_contig_rec[CFA_DIR_RX] = (max_lkup_sz[CFA_DIR_RX] + 31) / 32; + cparms.lkup_max_contig_rec[CFA_DIR_TX] = (max_lkup_sz[CFA_DIR_TX] + 31) / 32; + cparms.act_max_contig_rec[CFA_DIR_RX] = (max_act_sz[CFA_DIR_RX] + 31) / 32; + cparms.act_max_contig_rec[CFA_DIR_TX] = (max_act_sz[CFA_DIR_TX] + 31) / 32; + cparms.max_pools = max_pools; + + rc = tfc_tbl_scope_cpm_alloc(tfcp, tsid, &cparms); + if (rc) + netdev_dbg(bp->dev, "Failed to allocate CPM TSID:%d FID:%d\n", tsid, fid); + else + netdev_dbg(bp->dev, "Allocated CPM TSID:%d FID:%d\n", tsid, fid); + + return rc; +} + +static int +ulp_tfc_tbl_scope_init(struct bnxt *bp, enum cfa_app_type app_type) +{ + struct tfc_tbl_scope_size_query_parms qparms = { 0 }; + bool first = true, shared = false; + u16 fid = bp->pf.fw_fid; + struct tfc *tfcp; + u16 max_pools; + u8 tsid = 0; + int rc = 0; + + tfcp = bnxt_ulp_cntxt_tfcp_get(bp->ulp_ctx, BNXT_ULP_SESSION_TYPE_DEFAULT); + if (!tfcp) + return -EINVAL; + + max_pools = bnxt_ulp_cntxt_tbl_scope_max_pools_get(bp->ulp_ctx); + + rc = ulp_tfc_tbl_scope_query(bp, tfcp, fid, max_pools, shared, &qparms); + if (rc) { + netdev_dbg(bp->dev, "%s:Failed to query tbl scope size during init, rc %d\n", + __func__, rc); + return rc; + } + + rc = tfc_tbl_scope_id_alloc(tfcp, shared, app_type, &tsid, &first); + if (rc) { + netdev_dbg(bp->dev, "Failed to allocate tscope\n"); + return rc; + } + + rc = bnxt_ulp_cntxt_tsid_set(bp->ulp_ctx, tsid); + if (rc) + return rc; + + netdev_dbg(bp->dev, "Allocated tscope TSID:%d type:%s\n", tsid, + app_type == CFA_APP_TYPE_AFM ? "NIC FLOW" : "TRUFLOW"); + + rc = ulp_tfc_tbl_scope_configure(bp, tfcp, shared, first, tsid); + if (rc) { + netdev_dbg(bp->dev, "Could not configure tscope state, rc = %d\n", rc); + return rc; + } + + rc = ulp_tfc_tbl_scope_mem_alloc(bp, tfcp, first, tsid, max_pools, &qparms); + if (rc) { + netdev_dbg(bp->dev, "Failed to allocate tbl scope resources, rc = %d\n", rc); + return rc; + } + + return 0; +} + +static int +ulp_tfc_cntxt_app_caps_init(struct bnxt *bp, u8 app_id, u32 dev_id) +{ + struct bnxt_ulp_context *ulp_ctx = bp->ulp_ctx; + struct bnxt_ulp_app_capabilities_info *info; + bool found = false; + u32 num = 0, rc; + u16 i; + + if (ULP_APP_DEV_UNSUPPORTED_ENABLED(ulp_ctx->cfg_data->ulp_flags)) { + netdev_dbg(bp->dev, "APP ID %d, Device ID: 0x%x not supported.\n", + app_id, dev_id); + return -EINVAL; + } + + info = bnxt_ulp_app_cap_list_get(&num); + if (!info || !num) { + netdev_dbg(bp->dev, "Failed to get app capabilities.\n"); + return -EINVAL; + } + + for (i = 0; i < num && !found; i++) { + if (info[i].app_id != app_id || info[i].device_id != dev_id) + continue; + found = true; + if (info[i].flags & BNXT_ULP_APP_CAP_SHARED_EN) + ulp_ctx->cfg_data->ulp_flags |= + BNXT_ULP_SHARED_SESSION_ENABLED; + if (info[i].flags & BNXT_ULP_APP_CAP_HOT_UPGRADE_EN) + ulp_ctx->cfg_data->ulp_flags |= + BNXT_ULP_HIGH_AVAIL_ENABLED; + if (info[i].flags & BNXT_ULP_APP_CAP_UNICAST_ONLY) + ulp_ctx->cfg_data->ulp_flags |= + BNXT_ULP_APP_UNICAST_ONLY; + if (info[i].flags & BNXT_ULP_APP_CAP_IP_TOS_PROTO_SUPPORT) + ulp_ctx->cfg_data->ulp_flags |= + BNXT_ULP_APP_TOS_PROTO_SUPPORT; + if (info[i].flags & BNXT_ULP_APP_CAP_BC_MC_SUPPORT) + ulp_ctx->cfg_data->ulp_flags |= + BNXT_ULP_APP_BC_MC_SUPPORT; + if (info[i].flags & BNXT_ULP_APP_CAP_SOCKET_DIRECT) { + /* Enable socket direction only if MR is enabled in fw*/ + if (BNXT_MR(bp)) { + ulp_ctx->cfg_data->ulp_flags |= + BNXT_ULP_APP_SOCKET_DIRECT; + netdev_dbg(bp->dev, + "Socket Direct feature is enabled\n"); + } + } + bnxt_ulp_default_app_priority_set(ulp_ctx, + info[i].default_priority); + bnxt_ulp_max_def_priority_set(ulp_ctx, + info[i].max_def_priority); + bnxt_ulp_min_flow_priority_set(ulp_ctx, + info[i].min_flow_priority); + bnxt_ulp_max_flow_priority_set(ulp_ctx, + info[i].max_flow_priority); + ulp_ctx->cfg_data->feature_bits = info[i].feature_bits; + bnxt_ulp_cntxt_ptr2_default_class_bits_set(ulp_ctx, + info[i].default_class_bits); + bnxt_ulp_cntxt_ptr2_default_act_bits_set(ulp_ctx, + info[i].default_act_bits); + if (info[i].flags & BNXT_ULP_APP_CAP_DSCP_REMAP) + ulp_ctx->cfg_data->ulp_flags |= + BNXT_ULP_APP_DSCP_REMAP_ENABLED; + + rc = bnxt_ulp_cntxt_tbl_scope_max_pools_set(ulp_ctx, + info[i].max_pools); + if (rc) + return rc; + rc = bnxt_ulp_cntxt_em_mulitplier_set(ulp_ctx, + info[i].em_multiplier); + if (rc) + return rc; + + rc = bnxt_ulp_cntxt_num_rx_flows_set(ulp_ctx, + info[i].num_rx_flows); + if (rc) + return rc; + + rc = bnxt_ulp_cntxt_num_tx_flows_set(ulp_ctx, + info[i].num_tx_flows); + if (rc) + return rc; + + rc = bnxt_ulp_cntxt_em_rx_key_max_sz_set(ulp_ctx, + info[i].em_rx_key_max_sz); + if (rc) + return rc; + + rc = bnxt_ulp_cntxt_em_tx_key_max_sz_set(ulp_ctx, + info[i].em_tx_key_max_sz); + if (rc) + return rc; + + rc = bnxt_ulp_cntxt_act_rec_rx_max_sz_set(ulp_ctx, + info[i].act_rx_max_sz); + if (rc) + return rc; + + rc = bnxt_ulp_cntxt_act_rec_tx_max_sz_set(ulp_ctx, + info[i].act_tx_max_sz); + if (rc) + return rc; + + rc = bnxt_ulp_cntxt_page_sz_set(ulp_ctx, + info[i].pbl_page_sz_in_bytes); + if (rc) + return rc; + bnxt_ulp_num_key_recipes_set(ulp_ctx, + info[i].num_key_recipes_per_dir); + } + if (!found) { + netdev_dbg(bp->dev, "APP ID %d, Device ID: 0x%x not supported.\n", + app_id, dev_id); + ulp_ctx->cfg_data->ulp_flags |= BNXT_ULP_APP_DEV_UNSUPPORTED; + return -EINVAL; + } + + return 0; +} + +/* The function to free and deinit the ulp context data. */ +static int +ulp_tfc_ctx_deinit(struct bnxt *bp, + struct bnxt_ulp_session_state *session) +{ + struct bnxt_ulp_context *ulp_ctx = bp->ulp_ctx; + /* Free the contents */ + vfree(session->cfg_data); + ulp_ctx->cfg_data = NULL; + session->cfg_data = NULL; + return 0; +} + +/* The function to allocate and initialize the ulp context data. */ +static int +ulp_tfc_ctx_init(struct bnxt *bp, + struct bnxt_ulp_session_state *session, + enum cfa_app_type app_type) +{ + struct bnxt_ulp_context *ulp_ctx = bp->ulp_ctx; + struct bnxt_ulp_data *ulp_data; + enum bnxt_ulp_device_id devid; + u8 app_id = 0; + int rc = 0; + + /* Initialize the context entries list */ + bnxt_ulp_cntxt_list_init(); + + /* Allocate memory to hold ulp context data. */ + ulp_data = vzalloc(sizeof(*ulp_data)); + if (!ulp_data) + return -ENOMEM; + + /* Increment the ulp context data reference count usage. */ + ulp_ctx->cfg_data = ulp_data; + session->cfg_data = ulp_data; + ulp_data->ref_cnt++; + + if (app_type == CFA_APP_TYPE_TF) + ulp_data->ulp_flags |= BNXT_ULP_VF_REP_ENABLED; + + /* Add the context to the context entries list */ + rc = bnxt_ulp_cntxt_list_add(ulp_ctx); + if (rc) { + netdev_dbg(bp->dev, "Failed to add the context list entry\n"); + goto error_deinit; + } + + rc = bnxt_ulp_devid_get(bp, &devid); + if (rc) { + netdev_dbg(bp->dev, "Unable to determine device for ULP init.\n"); + goto error_deinit; + } + + rc = bnxt_ulp_cntxt_dev_id_set(ulp_ctx, devid); + if (rc) { + netdev_dbg(bp->dev, "Unable to set device for ULP init.\n"); + goto error_deinit; + } + + if (!(bp->app_id & BNXT_ULP_APP_ID_SET_CONFIGURED)) { + bp->app_id = BNXT_ULP_APP_ID_CONFIG; + bp->app_id |= BNXT_ULP_APP_ID_SET_CONFIGURED; + } + app_id = bp->app_id & ~BNXT_ULP_APP_ID_SET_CONFIGURED; + + rc = bnxt_ulp_cntxt_app_id_set(ulp_ctx, app_id); + if (rc) { + netdev_dbg(bp->dev, "Unable to set app_id for ULP init.\n"); + goto error_deinit; + } + netdev_dbg(bp->dev, "Ulp initialized with app id %d\n", app_id); + + rc = ulp_tfc_dparms_init(bp, ulp_ctx, devid); + if (rc) { + netdev_dbg(bp->dev, "Unable to init dparms for app(%x)/dev(%x)\n", + app_id, devid); + goto error_deinit; + } + + rc = ulp_tfc_cntxt_app_caps_init(bp, app_id, devid); + if (rc) { + netdev_dbg(bp->dev, "Unable to set caps for app(%x)/dev(%x)\n", + app_id, devid); + goto error_deinit; + } + + return rc; + +error_deinit: + session->session_opened[BNXT_ULP_SESSION_TYPE_DEFAULT] = 1; + (void)ulp_tfc_ctx_deinit(bp, session); + return rc; +} + +static int +ulp_tfc_vfr_session_fid_add(struct bnxt_ulp_context *ulp_ctx, u16 rep_fid) +{ + u16 fid_cnt = 0, sid = 0; + struct tfc *tfcp = NULL; + int rc; + + tfcp = bnxt_ulp_cntxt_tfcp_get(ulp_ctx, BNXT_ULP_SESSION_TYPE_DEFAULT); + if (!tfcp) { + netdev_dbg(ulp_ctx->bp->dev, "Unable to get tfcp from ulp_ctx\n"); + return -EINVAL; + } + + /* Get the session id */ + rc = bnxt_ulp_cntxt_sid_get(ulp_ctx, &sid); + if (rc) { + netdev_dbg(ulp_ctx->bp->dev, "Unable to get SID for VFR FID=%d\n", rep_fid); + return rc; + } + + rc = tfc_session_fid_add(tfcp, rep_fid, sid, &fid_cnt); + if (!rc) + netdev_dbg(ulp_ctx->bp->dev, + "EFID=%d added to SID=%d, %d total.\n", + rep_fid, sid, fid_cnt); + else + netdev_dbg(ulp_ctx->bp->dev, + "Failed to add EFID=%d to SID=%d\n", + rep_fid, sid); + return rc; +} + +static int +ulp_tfc_vfr_session_fid_rem(struct bnxt_ulp_context *ulp_ctx, u16 rep_fid) +{ + u16 fid_cnt = 0, sid = 0; + struct tfc *tfcp = NULL; + int rc; + + tfcp = bnxt_ulp_cntxt_tfcp_get(ulp_ctx, BNXT_ULP_SESSION_TYPE_DEFAULT); + if (!tfcp) { + netdev_dbg(ulp_ctx->bp->dev, "Unable tfcp from ulp_ctx\n"); + return -EINVAL; + } + + /* Get the session id */ + rc = bnxt_ulp_cntxt_sid_get(ulp_ctx, &sid); + if (rc) { + netdev_dbg(ulp_ctx->bp->dev, "Unable to get SID for VFR FID=%d\n", rep_fid); + return rc; + } + + rc = tfc_session_fid_rem(tfcp, rep_fid, &fid_cnt); + if (!rc) + netdev_dbg(ulp_ctx->bp->dev, + "Removed EFID=%d from SID=%d, %d remain.\n", + rep_fid, sid, fid_cnt); + else + netdev_dbg(ulp_ctx->bp->dev, "Failed to remove EFID=%d from SID=%d\n", + rep_fid, sid); + + return rc; +} + +/* Entry point for Truflow tfo allocation. + */ +int +bnxt_ulp_tfo_init(struct bnxt *bp) +{ + struct tfc *tfp = NULL; + int rc; + + tfp = vzalloc(sizeof(*tfp)); + if (!tfp) + return -ENOMEM; + + bp->tfp = tfp; + tfp->bp = bp; + rc = tfc_open(tfp); + if (rc) { + netdev_dbg(bp->dev, "tfc_open() failed: %d\n", rc); + vfree(bp->tfp); + bp->tfp = NULL; + } + + return rc; +} + +/* When a port is de-initialized. This functions clears up + * the tfo region. + */ +void +bnxt_ulp_tfo_deinit(struct bnxt *bp) +{ + /* Free TFC here until Nic Flow support enabled in ULP */ + tfc_close(bp->tfp); + vfree(bp->tfp); + bp->tfp = NULL; +} + +static int +ulp_tfc_ctx_attach(struct bnxt *bp, + struct bnxt_ulp_session_state *session, + enum cfa_app_type app_type) +{ + struct bnxt_ulp_context *ulp_ctx = bp->ulp_ctx; + u32 flags, dev_id = BNXT_ULP_DEVICE_ID_LAST; + struct tfc *tfcp = bp->tfp; + u16 fid_cnt = 0; + int rc = 0; + u8 app_id; + + rc = bnxt_ulp_cntxt_tfcp_set(bp->ulp_ctx, tfcp); + if (rc) { + netdev_dbg(bp->dev, "Failed to add tfcp to ulp ctxt\n"); + return rc; + } + + rc = bnxt_ulp_devid_get(bp, &dev_id); + if (rc) { + netdev_dbg(bp->dev, "Unable to get device id from ulp.\n"); + return rc; + } + + /* Increment the ulp context data reference count usage. */ + ulp_ctx->cfg_data = session->cfg_data; + ulp_ctx->cfg_data->ref_cnt++; + + if (app_type != CFA_APP_TYPE_AFM) { + rc = tfc_session_fid_add(tfcp, bp->pf.fw_fid, session->session_id, &fid_cnt); + if (rc) { + netdev_dbg(bp->dev, "Failed to add RFID:%d to SID:%d.\n", + bp->pf.fw_fid, session->session_id); + return rc; + } + netdev_dbg(bp->dev, "SID:%d added RFID:%d\n", session->session_id, bp->pf.fw_fid); + } + + rc = bnxt_ulp_cntxt_sid_set(bp->ulp_ctx, session->session_id); + if (rc) { + netdev_dbg(bp->dev, "Failed to add fid to session.\n"); + return rc; + } + + /* Add the context to the context entries list */ + rc = bnxt_ulp_cntxt_list_add(bp->ulp_ctx); + if (rc) { + netdev_dbg(bp->dev, "Failed to add the context list entry\n"); + return -EINVAL; + } + + /* + * The supported flag will be set during the init. Use it now to + * know if we should go through the attach. + */ + rc = bnxt_ulp_cntxt_app_id_get(bp->ulp_ctx, &app_id); + if (rc) { + netdev_dbg(bp->dev, "Unable to get the app id from ulp.\n"); + return -EINVAL; + } + + flags = ulp_ctx->cfg_data->ulp_flags; + if (ULP_APP_DEV_UNSUPPORTED_ENABLED(flags)) { + netdev_dbg(bp->dev, "APP ID %d, Device ID: 0x%x not supported.\n", + app_id, dev_id); + return -EINVAL; + } + + rc = ulp_tfc_tbl_scope_init(bp, app_type); + + rc = bnxt_debug_tf_create(bp, ulp_ctx->tsid); + if (rc) { + netdev_dbg(bp->dev, "%s port(%d_ tsid(%d) Failed to create debugfs entry\n", + __func__, bp->pf.port_id, ulp_ctx->tsid); + rc = 0; + } + return rc; +} + +static void +ulp_tfc_ctx_detach(struct bnxt *bp, + struct bnxt_ulp_session_state *session) +{ + struct tfc *tfcp = bp->tfp; + u16 fid_cnt = 0; + u16 sid = 0; + int rc; + + /* Get the session id */ + rc = bnxt_ulp_cntxt_sid_get(bp->ulp_ctx, &sid); + if (rc) { + netdev_err(bp->dev, "Unable to get SID for FID=%d\n", bp->pf.fw_fid); + return; + } + + if (sid) { + rc = tfc_session_fid_rem(tfcp, bp->pf.fw_fid, &fid_cnt); + if (rc) + netdev_dbg(bp->dev, "Failed to remove RFID:%d from SID:%d\n", + bp->pf.fw_fid, session->session_id); + else + netdev_dbg(bp->dev, "SID:%d removed RFID:%d CNT:%d\n", + session->session_id, bp->pf.fw_fid, fid_cnt); + } + + bnxt_debug_tf_delete(bp); + ulp_tfc_tbl_scope_deinit(bp); + + bnxt_ulp_cntxt_sid_reset(bp->ulp_ctx); +} + +/* + * When a port is deinit'ed by dpdk. This function is called + * and this function clears the ULP context and rest of the + * infrastructure associated with it. + */ +static void +ulp_tfc_deinit(struct bnxt *bp, + struct bnxt_ulp_session_state *session) +{ + struct bnxt_ulp_context *ulp_ctx = bp->ulp_ctx; + struct tfc *tfcp = bp->tfp; + u16 fid_cnt = 0; + u16 sid = 0; + int rc; + + if (!ulp_ctx || + !ulp_ctx->cfg_data || + !tfcp) + return; + + /* cleanup the flow database */ + ulp_flow_db_deinit(ulp_ctx); + + /* Delete the Mark database */ + ulp_mark_db_deinit(ulp_ctx); + + /* cleanup the ulp mapper */ + ulp_mapper_deinit(ulp_ctx); + + /* cleanup the ulp matcher */ + ulp_matcher_deinit(ulp_ctx); + + /* Delete the Flow Counter Manager */ + ulp_fc_mgr_deinit(ulp_ctx); + + /* Delete the Port database */ + ulp_port_db_deinit(ulp_ctx); + + /* free the flow db lock */ + mutex_destroy(&ulp_ctx->cfg_data->flow_db_lock); + + /* remove debugfs entries */ + bnxt_debug_tf_delete(bp); + + ulp_tfc_tbl_scope_deinit(bp); + + rc = bnxt_ulp_cntxt_sid_get(ulp_ctx, &sid); + if (rc) { + netdev_dbg(ulp_ctx->bp->dev, "Unable to get SID for FID=%d\n", bp->pf.fw_fid); + return; + } + if (sid) { + rc = tfc_session_fid_rem(tfcp, bp->pf.fw_fid, &fid_cnt); + if (rc) + netdev_dbg(bp->dev, "Failed to remove RFID:%d from SID:%d\n", + bp->pf.fw_fid, session->session_id); + else + netdev_dbg(bp->dev, "SID:%d removed RFID:%d CNT:%d\n", + session->session_id, bp->pf.fw_fid, fid_cnt); + } + + bnxt_ulp_cntxt_sid_reset(ulp_ctx); + + /* Delete the ulp context and tf session and free the ulp context */ + ulp_tfc_ctx_deinit(bp, session); + + netdev_dbg(bp->dev, "ulp ctx has been deinitialized\n"); +} + +/* + * When a port is initialized by dpdk. This functions is called + * and this function initializes the ULP context and rest of the + * infrastructure associated with it. + */ +static int +ulp_tfc_init(struct bnxt *bp, + struct bnxt_ulp_session_state *session, + enum cfa_app_type app_type) +{ + struct bnxt_ulp_context *ulp_ctx = bp->ulp_ctx; + u32 ulp_dev_id = BNXT_ULP_DEVICE_ID_LAST; + struct tfc *tfcp = bp->tfp; + u16 sid = 0; + int rc; + + if (!bp->tfp) + return -ENODEV; + + rc = bnxt_ulp_devid_get(bp, &ulp_dev_id); + if (rc) { + netdev_dbg(bp->dev, "Unable to get device id from ulp.\n"); + return rc; + } + + rc = bnxt_ulp_cntxt_tfcp_set(ulp_ctx, bp->tfp); + if (rc) { + netdev_dbg(bp->dev, "Failed to add tfcp to ulp cntxt\n"); + return -EINVAL; + } + + if (app_type != CFA_APP_TYPE_AFM) { + /* First time, so allocate a session and save it. */ + rc = tfc_session_id_alloc(tfcp, bp->pf.fw_fid, &sid); + if (rc) { + netdev_dbg(bp->dev, "Failed to allocate a session id\n"); + return -EINVAL; + } + netdev_dbg(bp->dev, "SID:%d allocated with RFID:%d\n", sid, bp->pf.fw_fid); + } + + session->session_id = sid; + rc = bnxt_ulp_cntxt_sid_set(ulp_ctx, sid); + if (rc) { + netdev_dbg(bp->dev, "Failed to sid to ulp cntxt\n"); + return -EINVAL; + } + + /* Allocate and Initialize the ulp context. */ + rc = ulp_tfc_ctx_init(bp, session, app_type); + if (rc) { + netdev_dbg(bp->dev, "Failed to create the ulp context\n"); + goto jump_to_error; + } + + rc = ulp_tfc_tbl_scope_init(bp, app_type); + if (rc) { + netdev_dbg(bp->dev, "Failed to create the ulp context\n"); + goto jump_to_error; + } + + rc = bnxt_debug_tf_create(bp, ulp_ctx->tsid); + if (rc) { + netdev_dbg(bp->dev, "%s port(%d_ tsid(%d) Failed to create debugfs entry\n", + __func__, bp->pf.port_id, ulp_ctx->tsid); + rc = 0; + } + + mutex_init(&ulp_ctx->cfg_data->flow_db_lock); + + rc = ulp_tfc_dparms_init(bp, ulp_ctx, ulp_dev_id); + if (rc) { + netdev_dbg(bp->dev, "Failed to initialize the dparms\n"); + goto jump_to_error; + } + + /* create the port database */ + rc = ulp_port_db_init(ulp_ctx, bp->port_count); + if (rc) { + netdev_dbg(bp->dev, "Failed to create the port database\n"); + goto jump_to_error; + } + + /* Create the Mark database. */ + rc = ulp_mark_db_init(ulp_ctx); + if (rc) { + netdev_dbg(bp->dev, "Failed to create the mark database\n"); + goto jump_to_error; + } + + /* Create the flow database. */ + rc = ulp_flow_db_init(ulp_ctx); + if (rc) { + netdev_dbg(bp->dev, "Failed to create the flow database\n"); + goto jump_to_error; + } + + rc = ulp_matcher_init(ulp_ctx); + if (rc) { + netdev_dbg(bp->dev, "Failed to initialize ulp matcher\n"); + goto jump_to_error; + } + + rc = ulp_mapper_init(ulp_ctx); + if (rc) { + netdev_dbg(bp->dev, "Failed to initialize ulp mapper\n"); + goto jump_to_error; + } + + rc = ulp_fc_mgr_init(ulp_ctx); + if (rc) { + netdev_dbg(bp->dev, "Failed to initialize ulp flow counter mgr\n"); + goto jump_to_error; + } + + rc = bnxt_flow_meter_init(bp); + if (rc) { + if (rc != -EOPNOTSUPP) { + netdev_err(bp->dev, "Failed to config meter\n"); + goto jump_to_error; + } + rc = 0; + } + + netdev_dbg(bp->dev, "ulp ctx has been initialized\n"); + return rc; + +jump_to_error: + ulp_ctx->ops->ulp_deinit(bp, session); + return rc; +} + +const struct bnxt_ulp_core_ops bnxt_ulp_tfc_core_ops = { + .ulp_ctx_attach = ulp_tfc_ctx_attach, + .ulp_ctx_detach = ulp_tfc_ctx_detach, + .ulp_deinit = ulp_tfc_deinit, + .ulp_init = ulp_tfc_init, + .ulp_tfp_get = bnxt_ulp_cntxt_tfcp_get, + .ulp_vfr_session_fid_add = ulp_tfc_vfr_session_fid_add, + .ulp_vfr_session_fid_rem = ulp_tfc_vfr_session_fid_rem, +}; +#endif /* CONFIG_BNXT_FLOWER_OFFLOAD */ diff --git a/drivers/thirdparty/release-drivers/bnxt/tf_ulp/bnxt_tf_ulp_p7.h b/drivers/thirdparty/release-drivers/bnxt/tf_ulp/bnxt_tf_ulp_p7.h new file mode 100644 index 000000000000..3d6072e0ea23 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/tf_ulp/bnxt_tf_ulp_p7.h @@ -0,0 +1,85 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019-2023 Broadcom + * All rights reserved. + */ + +#ifndef _BNXT_ULP_TFC_H_ +#define _BNXT_ULP_TFC_H_ + +#include +#include "bnxt.h" +#include "tfc.h" + +bool +bnxt_ulp_cntxt_shared_tbl_scope_enabled(struct bnxt_ulp_context *ulp_ctx); + +int +bnxt_ulp_cntxt_tfcp_set(struct bnxt_ulp_context *ulp, struct tfc *tfcp); + +void * +bnxt_ulp_cntxt_tfcp_get(struct bnxt_ulp_context *ulp, enum bnxt_ulp_session_type s_type); + +u32 +bnxt_ulp_cntxt_tbl_scope_max_pools_get(struct bnxt_ulp_context *ulp_ctx); + +int +bnxt_ulp_cntxt_tbl_scope_max_pools_set(struct bnxt_ulp_context *ulp_ctx, + u32 max); +enum tfc_tbl_scope_bucket_factor +bnxt_ulp_cntxt_em_mulitplier_get(struct bnxt_ulp_context *ulp_ctx); + +int +bnxt_ulp_cntxt_em_mulitplier_set(struct bnxt_ulp_context *ulp_ctx, + enum tfc_tbl_scope_bucket_factor factor); + +u32 +bnxt_ulp_cntxt_num_rx_flows_get(struct bnxt_ulp_context *ulp_ctx); + +int +bnxt_ulp_cntxt_num_rx_flows_set(struct bnxt_ulp_context *ulp_ctx, u32 num); + +u32 +bnxt_ulp_cntxt_num_tx_flows_get(struct bnxt_ulp_context *ulp_ctx); + +int +bnxt_ulp_cntxt_num_tx_flows_set(struct bnxt_ulp_context *ulp_ctx, u32 num); + +u16 +bnxt_ulp_cntxt_em_rx_key_max_sz_get(struct bnxt_ulp_context *ulp_ctx); + +int +bnxt_ulp_cntxt_em_rx_key_max_sz_set(struct bnxt_ulp_context *ulp_ctx, + u16 max); + +u16 +bnxt_ulp_cntxt_em_tx_key_max_sz_get(struct bnxt_ulp_context *ulp_ctx); + +int +bnxt_ulp_cntxt_em_tx_key_max_sz_set(struct bnxt_ulp_context *ulp_ctx, + u16 max); + +u16 +bnxt_ulp_cntxt_act_rec_rx_max_sz_get(struct bnxt_ulp_context *ulp_ctx); + +int +bnxt_ulp_cntxt_act_rec_rx_max_sz_set(struct bnxt_ulp_context *ulp_ctx, + int16_t max); + +u16 +bnxt_ulp_cntxt_act_rec_tx_max_sz_get(struct bnxt_ulp_context *ulp_ctx); + +int +bnxt_ulp_cntxt_act_rec_tx_max_sz_set(struct bnxt_ulp_context *ulp_ctx, + int16_t max); +u32 +bnxt_ulp_cntxt_page_sz_get(struct bnxt_ulp_context *ulp_ctxt); + +int +bnxt_ulp_cntxt_page_sz_set(struct bnxt_ulp_context *ulp_ctxt, + u32 page_sz); + +/* Function to get the tfp session details from ulp context. */ +void * +bnxt_tfc_ulp_cntxt_tfp_get(struct bnxt_ulp_context *ulp, + enum bnxt_ulp_session_type s_type); +#endif diff --git a/drivers/thirdparty/release-drivers/bnxt/tf_ulp/bnxt_ulp_flow.h b/drivers/thirdparty/release-drivers/bnxt/tf_ulp/bnxt_ulp_flow.h new file mode 100644 index 000000000000..ed565fd26c69 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/tf_ulp/bnxt_ulp_flow.h @@ -0,0 +1,117 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019-2023 Broadcom + * All rights reserved. + */ + +#ifndef _BNXT_ULP_FLOW_H_ +#define _BNXT_ULP_FLOW_H_ + +#if defined(CONFIG_BNXT_FLOWER_OFFLOAD) || defined(CONFIG_BNXT_CUSTOM_FLOWER_OFFLOAD) + +struct bnxt_ulp_flow_info { + u32 flow_id; + struct ip_tunnel_key *encap_key; + struct bnxt_tc_neigh_key *neigh_key; + u8 tnl_smac[ETH_ALEN]; + u8 tnl_dmac[ETH_ALEN]; + u16 tnl_ether_type; + void *mparms; + bool dscp_remap; +}; +#endif + +#ifdef CONFIG_BNXT_FLOWER_OFFLOAD +int bnxt_ulp_flow_create(struct bnxt *bp, u16 src_fid, + struct flow_cls_offload *tc_flow_cmd, + struct bnxt_ulp_flow_info *flow_info); +int bnxt_ulp_flow_destroy(struct bnxt *bp, u32 flow_id, u16 src_fid, bool + dscp_remap); +void bnxt_ulp_flow_query_count(struct bnxt *bp, u32 flow_id, u64 *packets, + u64 *bytes, unsigned long *lastused); +int +bnxt_ulp_update_flow_encap_record(struct bnxt *bp, u8 *tnl_dmac, void *mparms, + u32 *flow_id); +void bnxt_ulp_free_mapper_encap_mparams(void *mparms); +bool bnxt_ulp_flow_chain_validate(struct bnxt *bp, u16 src_fid, + struct flow_cls_offload *tc_flow_cmd); + +#ifdef CONFIG_VF_REPS +int bnxt_ulp_port_init(struct bnxt *bp); +void bnxt_ulp_port_deinit(struct bnxt *bp); +int bnxt_ulp_tfo_init(struct bnxt *bp); +void bnxt_ulp_tfo_deinit(struct bnxt *bp); +int bnxt_ulp_alloc_vf_rep(struct bnxt *bp, void *vfr); +int bnxt_ulp_alloc_vf_rep_p7(struct bnxt *bp, void *vfr); +void bnxt_ulp_free_vf_rep(struct bnxt *bp, void *vfr); +void bnxt_ulp_free_vf_rep_p7(struct bnxt *bp, void *vfr); +int bnxt_ulp_get_mark_from_cfacode(struct bnxt *bp, struct rx_cmp_ext *rxcmp1, + struct bnxt_tpa_info *tpa_info, + u32 *mark_id); +int bnxt_ulp_get_mark_from_cfacode_p7(struct bnxt *bp, struct rx_cmp_ext *rxcmp1, + struct bnxt_tpa_info *tpa_info, + u32 *mark_id); +#endif /* CONFIG_VF_REPS */ +#elif defined(CONFIG_BNXT_CUSTOM_FLOWER_OFFLOAD) +int bnxt_ulp_port_init(struct bnxt *bp); +void bnxt_ulp_port_deinit(struct bnxt *bp); +#else /* CONFIG_BNXT_FLOWER_OFFLOAD */ +#ifdef CONFIG_VF_REPS +static inline int +bnxt_ulp_port_init(struct bnxt *bp) + +{ + return -EINVAL; +} + +static inline void +bnxt_ulp_port_deinit(struct bnxt *bp) +{ +} + +static inline int +bnxt_ulp_tfo_init(struct bnxt *bp) + +{ + return -EINVAL; +} + +static inline void +bnxt_ulp_tfo_deinit(struct bnxt *bp) +{ +} + +static inline int bnxt_ulp_alloc_vf_rep(struct bnxt *bp, void *vfr) +{ + return -EINVAL; +} + +static inline int bnxt_ulp_alloc_vf_rep_p7(struct bnxt *bp, void *vfr) +{ + return -EINVAL; +} + +static inline void bnxt_ulp_free_vf_rep(struct bnxt *bp, void *vfr) +{ +} + +static inline void bnxt_ulp_free_vf_rep_p7(struct bnxt *bp, void *vfr) +{ +} + +static inline int +bnxt_ulp_get_mark_from_cfacode(struct bnxt *bp, struct rx_cmp_ext *rxcmp1, + struct bnxt_tpa_info *tpa_info, u32 *mark_id) +{ + return -EINVAL; +} + +static inline int +bnxt_ulp_get_mark_from_cfacode_p7(struct bnxt *bp, struct rx_cmp_ext *rxcmp1, + struct bnxt_tpa_info *tpa_info, u32 *mark_id) +{ + return -EINVAL; +} +#endif /* CONFIG_VF_REPS */ +#endif /* CONFIG_BNXT_FLOWER_OFFLOAD */ + +#endif /* _BNXT_ULP_FLOW_H_ */ diff --git a/drivers/thirdparty/release-drivers/bnxt/tf_ulp/bnxt_ulp_linux_flow.c b/drivers/thirdparty/release-drivers/bnxt/tf_ulp/bnxt_ulp_linux_flow.c new file mode 100644 index 000000000000..eeef7753d751 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/tf_ulp/bnxt_ulp_linux_flow.c @@ -0,0 +1,593 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* Copyright(c) 2019-2023 Broadcom + * All rights reserved. + */ + +#include "bnxt_compat.h" +#include "bnxt_hsi.h" +#include "bnxt.h" +#include "bnxt_tf_common.h" +#include "bnxt_ulp_flow.h" +#include "ulp_tc_parser.h" +#include "ulp_matcher.h" +#include "ulp_flow_db.h" +#include "ulp_mapper.h" +#include "ulp_fc_mgr.h" +#include "ulp_port_db.h" +#include "ulp_template_debug_proto.h" + +#ifdef CONFIG_BNXT_FLOWER_OFFLOAD +static inline void bnxt_ulp_set_dir_attributes(struct bnxt *bp, + struct ulp_tc_parser_params *params, + u16 src_fid) +{ + /* Set the flow attributes. + * TBD: This logic might need some port-process fixing for the + * vxlan-decap case. + */ + if (bp->pf.fw_fid == src_fid) + params->dir_attr |= BNXT_ULP_FLOW_ATTR_INGRESS; + else + params->dir_attr |= BNXT_ULP_FLOW_ATTR_EGRESS; +} + +static int +bnxt_ulp_set_prio_attribute(struct bnxt *bp, + struct ulp_tc_parser_params *params, + u32 priority) +{ + u32 max_p = bnxt_ulp_max_flow_priority_get(params->ulp_ctx); + u32 min_p = bnxt_ulp_min_flow_priority_get(params->ulp_ctx); + + if (max_p < min_p) { + if (priority > min_p || priority < max_p) { + netdev_dbg(bp->dev, "invalid prio %d, not in range %u:%u\n", + priority, max_p, min_p); + return -EINVAL; + } + params->priority = priority; + } else { + if (priority > max_p || priority < min_p) { + netdev_dbg(bp->dev, "invalid prio %d, not in range %u:%u\n", + priority, min_p, max_p); + return -EINVAL; + } + params->priority = max_p - priority; + } + /* flows with priority zero is considered as highest and put in EM */ + if (priority >= + bnxt_ulp_default_app_priority_get(params->ulp_ctx) && + priority <= bnxt_ulp_max_def_priority_get(params->ulp_ctx)) { + ULP_BITMAP_SET(params->cf_bitmap, BNXT_ULP_CF_BIT_DEF_PRIO); + /* priority 2 (ipv4) and 3 (ipv6) will be passed by OVS-TC. + * Consider them highest priority for EM and set to 0. + */ + params->priority = 0; + } + return 0; +} + +static inline void +bnxt_ulp_init_parser_cf_defaults(struct ulp_tc_parser_params *params, + u16 port_id) +{ + /* Set up defaults for Comp field */ + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_INCOMING_IF, port_id); + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DEV_PORT_ID, port_id); + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_SVIF_FLAG, + BNXT_ULP_INVALID_SVIF_VAL); +} + +static void +bnxt_ulp_init_cf_header_bitmap(struct bnxt_ulp_mapper_parms *params) +{ + uint64_t hdr_bits = 0; + + /* Remove the internal tunnel bits */ + hdr_bits = params->hdr_bitmap->bits; + ULP_BITMAP_RESET(hdr_bits, BNXT_ULP_HDR_BIT_F2); + + /* Add untag bits */ + if (!ULP_BITMAP_ISSET(hdr_bits, BNXT_ULP_HDR_BIT_OO_VLAN)) + ULP_BITMAP_SET(hdr_bits, BNXT_ULP_HDR_BIT_OO_UNTAGGED); + if (!ULP_BITMAP_ISSET(hdr_bits, BNXT_ULP_HDR_BIT_OI_VLAN)) + ULP_BITMAP_SET(hdr_bits, BNXT_ULP_HDR_BIT_OI_UNTAGGED); + if (!ULP_BITMAP_ISSET(hdr_bits, BNXT_ULP_HDR_BIT_IO_VLAN)) + ULP_BITMAP_SET(hdr_bits, BNXT_ULP_HDR_BIT_IO_UNTAGGED); + if (!ULP_BITMAP_ISSET(hdr_bits, BNXT_ULP_HDR_BIT_II_VLAN)) + ULP_BITMAP_SET(hdr_bits, BNXT_ULP_HDR_BIT_II_UNTAGGED); + + /* Add non-tunnel bit */ + if (!ULP_BITMAP_ISSET(params->cf_bitmap, BNXT_ULP_CF_BIT_IS_TUNNEL)) + ULP_BITMAP_SET(hdr_bits, BNXT_ULP_HDR_BIT_NON_TUNNEL); + + /* Add l2 only bit */ + if ((!ULP_BITMAP_ISSET(params->cf_bitmap, BNXT_ULP_CF_BIT_IS_TUNNEL) && + !ULP_BITMAP_ISSET(hdr_bits, BNXT_ULP_HDR_BIT_O_IPV4) && + !ULP_BITMAP_ISSET(hdr_bits, BNXT_ULP_HDR_BIT_O_IPV6)) || + (ULP_BITMAP_ISSET(params->cf_bitmap, BNXT_ULP_CF_BIT_IS_TUNNEL) && + !ULP_BITMAP_ISSET(hdr_bits, BNXT_ULP_HDR_BIT_I_IPV4) && + !ULP_BITMAP_ISSET(hdr_bits, BNXT_ULP_HDR_BIT_I_IPV6))) { + ULP_BITMAP_SET(hdr_bits, BNXT_ULP_HDR_BIT_L2_ONLY); + ULP_BITMAP_SET(params->cf_bitmap, BNXT_ULP_CF_BIT_L2_ONLY); + } + + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_PROFILE_BITMAP, hdr_bits); + + /* Update the l4 protocol bits */ + if ((ULP_BITMAP_ISSET(hdr_bits, BNXT_ULP_HDR_BIT_O_TCP) || + ULP_BITMAP_ISSET(hdr_bits, BNXT_ULP_HDR_BIT_O_UDP))) { + ULP_BITMAP_RESET(hdr_bits, BNXT_ULP_HDR_BIT_O_TCP); + ULP_BITMAP_RESET(hdr_bits, BNXT_ULP_HDR_BIT_O_UDP); + ULP_BITMAP_SET(hdr_bits, BNXT_ULP_HDR_BIT_O_L4_FLOW); + } + + if ((ULP_BITMAP_ISSET(hdr_bits, BNXT_ULP_HDR_BIT_I_TCP) || + ULP_BITMAP_ISSET(hdr_bits, BNXT_ULP_HDR_BIT_I_UDP))) { + ULP_BITMAP_RESET(hdr_bits, BNXT_ULP_HDR_BIT_I_TCP); + ULP_BITMAP_RESET(hdr_bits, BNXT_ULP_HDR_BIT_I_UDP); + ULP_BITMAP_SET(hdr_bits, BNXT_ULP_HDR_BIT_I_L4_FLOW); + } + + /*update the comp field header bits */ + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_HDR_BITMAP, hdr_bits); +} + +void bnxt_ulp_init_mapper_params(struct bnxt_ulp_mapper_parms *mparms, + struct ulp_tc_parser_params *params, + enum bnxt_ulp_fdb_type flow_type) +{ + u32 ulp_flags = 0; + + mparms->flow_type = flow_type; + mparms->ulp_ctx = params->ulp_ctx; + mparms->app_priority = params->priority; + mparms->class_tid = params->class_id; + mparms->act_tid = params->act_tmpl; + mparms->func_id = params->func_id; + mparms->hdr_bitmap = ¶ms->hdr_bitmap; + mparms->enc_hdr_bitmap = ¶ms->enc_hdr_bitmap; + mparms->hdr_field = params->hdr_field; + mparms->enc_field = params->enc_field; + mparms->comp_fld = params->comp_fld; + mparms->act_bitmap = ¶ms->act_bitmap; + mparms->act_prop = ¶ms->act_prop; + mparms->flow_id = params->fid; + mparms->fld_bitmap = ¶ms->fld_bitmap; + mparms->flow_pattern_id = params->flow_pattern_id; + mparms->act_pattern_id = params->act_pattern_id; + mparms->wc_field_bitmap = params->wc_field_bitmap; + mparms->app_id = params->app_id; + mparms->tun_idx = params->tun_idx; + mparms->cf_bitmap = params->cf_bitmap; + mparms->exclude_field_bitmap = params->exclude_field_bitmap; + + /* update the signature fields into the computed field list */ + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_HDR_SIG_ID, + params->class_info_idx); + + /* update the header bitmap */ + bnxt_ulp_init_cf_header_bitmap(mparms); + + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_FLOW_SIG_ID, + params->flow_sig_id); + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_FUNCTION_ID, + params->func_id); + + if (bnxt_ulp_cntxt_ptr2_ulp_flags_get(params->ulp_ctx, &ulp_flags)) + return; + + /* Update the socket direct flag */ + if (ULP_BITMAP_ISSET(params->hdr_bitmap.bits, + BNXT_ULP_HDR_BIT_SVIF_IGNORE)) { + uint32_t ifindex; + uint16_t vport; + + /* Get the port db ifindex */ + if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx, + params->port_id, + &ifindex)) { + netdev_dbg(params->ulp_ctx->bp->dev, "Invalid port id %u\n", + params->port_id); + return; + } + /* Update the phy port of the other interface */ + if (ulp_port_db_vport_get(params->ulp_ctx, ifindex, &vport)) { + netdev_dbg(params->ulp_ctx->bp->dev, "Invalid port if index %u\n", + ifindex); + return; + } + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_SOCKET_DIRECT_VPORT, + (vport == 1) ? 2 : 1); + } +} + +static int +bnxt_ulp_alloc_mapper_encap_mparams(struct bnxt_ulp_mapper_parms **mparms_dyn, + struct bnxt_ulp_mapper_parms *mparms) +{ + struct bnxt_ulp_mapper_parms *parms = NULL; + + parms = vzalloc(sizeof(*parms)); + if (!parms) + goto err; + + memcpy(parms, mparms, sizeof(*parms)); + + parms->hdr_bitmap = vzalloc(sizeof(*parms->hdr_bitmap)); + if (!parms->hdr_bitmap) + goto err_mparm; + + parms->enc_hdr_bitmap = vzalloc(sizeof(*parms->enc_hdr_bitmap)); + if (!parms->enc_hdr_bitmap) + goto err_hdr_bitmap; + + parms->hdr_field = vzalloc(sizeof(*parms->hdr_field) * BNXT_ULP_PROTO_HDR_MAX); + if (!parms->hdr_field) + goto err_enc_hdr_bitmap; + + parms->enc_field = vzalloc(sizeof(*parms->enc_field) * BNXT_ULP_PROTO_HDR_ENCAP_MAX); + if (!parms->enc_field) + goto err_hdr_field; + + parms->comp_fld = vzalloc(sizeof(*parms->comp_fld) * BNXT_ULP_CF_IDX_LAST); + if (!parms->comp_fld) + goto err_enc_field; + + parms->act_bitmap = vzalloc(sizeof(*parms->act_bitmap)); + if (!parms->act_bitmap) + goto err_comp_fld; + + parms->act_prop = vzalloc(sizeof(*parms->act_prop)); + if (!parms->act_prop) + goto err_act; + + parms->fld_bitmap = vzalloc(sizeof(*parms->fld_bitmap)); + if (!parms->fld_bitmap) + goto err_act_prop; + + memcpy(parms->hdr_bitmap, mparms->hdr_bitmap, sizeof(*parms->hdr_bitmap)); + memcpy(parms->enc_hdr_bitmap, mparms->enc_hdr_bitmap, + sizeof(*parms->enc_hdr_bitmap)); + memcpy(parms->hdr_field, mparms->hdr_field, + sizeof(*parms->hdr_field) * BNXT_ULP_PROTO_HDR_MAX); + memcpy(parms->enc_field, mparms->enc_field, + sizeof(*parms->enc_field) * BNXT_ULP_PROTO_HDR_ENCAP_MAX); + memcpy(parms->comp_fld, mparms->comp_fld, + sizeof(*parms->comp_fld) * BNXT_ULP_CF_IDX_LAST); + memcpy(parms->act_bitmap, mparms->act_bitmap, sizeof(*parms->act_bitmap)); + memcpy(parms->act_prop, mparms->act_prop, sizeof(*parms->act_prop)); + memcpy(parms->fld_bitmap, mparms->fld_bitmap, sizeof(*parms->fld_bitmap)); + + *mparms_dyn = parms; + return 0; + +err_act_prop: + vfree(parms->act_prop); +err_act: + vfree(parms->act_bitmap); +err_comp_fld: + vfree(parms->comp_fld); +err_enc_field: + vfree(parms->enc_field); +err_hdr_field: + vfree(parms->hdr_field); +err_enc_hdr_bitmap: + vfree(parms->enc_hdr_bitmap); +err_hdr_bitmap: + vfree(parms->hdr_bitmap); +err_mparm: + vfree(parms); +err: + return -ENOMEM; +} + +void bnxt_ulp_free_mapper_encap_mparams(void *mapper_mparms) +{ + struct bnxt_ulp_mapper_parms *parms = mapper_mparms; + + vfree(parms->act_prop); + vfree(parms->act_bitmap); + vfree(parms->comp_fld); + vfree(parms->enc_field); + vfree(parms->hdr_field); + vfree(parms->enc_hdr_bitmap); + vfree(parms->hdr_bitmap); + vfree(parms); +} + +/* Function to create the ulp flow. */ +int bnxt_ulp_flow_create(struct bnxt *bp, u16 src_fid, + struct flow_cls_offload *tc_flow_cmd, + struct bnxt_ulp_flow_info *flow_info) +{ + struct bnxt_ulp_mapper_parms *encap_parms = NULL; + struct bnxt_ulp_mapper_parms mparms = { 0 }; + struct ulp_tc_parser_params *params = NULL; + struct bnxt_ulp_context *ulp_ctx; + int rc, ret = BNXT_TF_RC_ERROR; + u32 chain_index; + u16 func_id; + u32 fid; + + ulp_ctx = bnxt_ulp_bp_ptr2_cntxt_get(bp); + if (!ulp_ctx) { + netdev_dbg(bp->dev, "ULP context is not initialized\n"); + goto flow_error; + } + + /* Initialize the parser params */ + params = vzalloc(sizeof(*params)); + params->ulp_ctx = ulp_ctx; + + if (bnxt_ulp_cntxt_app_id_get(params->ulp_ctx, ¶ms->app_id)) { + netdev_dbg(bp->dev, "failed to get the app id\n"); + goto flow_error; + } + + /* Set the flow attributes */ + bnxt_ulp_set_dir_attributes(bp, params, src_fid); + + if (bnxt_ulp_set_prio_attribute(bp, params, tc_flow_cmd->common.prio)) + goto flow_error; + + bnxt_ulp_init_parser_cf_defaults(params, src_fid); + + /* Get the function id */ + if (ulp_port_db_port_func_id_get(ulp_ctx, src_fid, &func_id)) { + netdev_dbg(bp->dev, "conversion of port to func id failed\n"); + goto flow_error; + } + + /* Protect flow creation */ + mutex_lock(&ulp_ctx->cfg_data->flow_db_lock); + + /* Allocate a Flow ID to attach all resources for the flow. + * Once allocated, all errors have to walk the list of resources and + * free each of them. + */ + rc = ulp_flow_db_fid_alloc(ulp_ctx, BNXT_ULP_FDB_TYPE_REGULAR, + func_id, &fid); + if (rc) { + netdev_dbg(bp->dev, "Unable to allocate flow table entry\n"); + goto release_lock; + } + + /* Parse the tc flow pattern */ + ret = bnxt_ulp_tc_parser_hdr_parse(bp, tc_flow_cmd, params); + if (ret != BNXT_TF_RC_SUCCESS) + goto free_fid; + + /* Parse the tc flow action */ + ret = bnxt_ulp_tc_parser_act_parse(bp, tc_flow_cmd, params); + if (ret != BNXT_TF_RC_SUCCESS) + goto free_fid; + + params->fid = fid; + params->func_id = func_id; + params->port_id = src_fid; + + chain_index = tc_flow_cmd->common.chain_index; + if (chain_index) { + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_CHAIN_ID_METADATA, + chain_index); + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_GROUP_ID, + cpu_to_le32(chain_index)); + ULP_BITMAP_SET(params->cf_bitmap, BNXT_ULP_CF_BIT_GROUP_ID); + + netdev_dbg(bp->dev, "%s: Chain metadata: 0x%x chain: %u\n", + __func__, + (chain_index + ULP_THOR_SYM_CHAIN_META_VAL), + chain_index); + } + params->match_chain_id = chain_index; + + netdev_dbg(bp->dev, "Flow prio: %u chain: %u\n", + params->priority, params->match_chain_id); + + /* Perform the tc flow post process */ + ret = bnxt_ulp_tc_parser_post_process(params); + if (ret == BNXT_TF_RC_ERROR) + goto free_fid; + else if (ret == BNXT_TF_RC_FID) + goto return_fid; + + /* Dump the tc flow pattern */ + ulp_parser_hdr_info_dump(params); + /* Dump the tc flow action */ + ulp_parser_act_info_dump(params); + + ret = ulp_matcher_pattern_match(params, ¶ms->class_id); + if (ret != BNXT_TF_RC_SUCCESS) + goto free_fid; + + ret = ulp_matcher_action_match(params, ¶ms->act_tmpl); + if (ret != BNXT_TF_RC_SUCCESS) + goto free_fid; + + bnxt_ulp_init_mapper_params(&mparms, params, + BNXT_ULP_FDB_TYPE_REGULAR); + /* Call the ulp mapper to create the flow in the hardware. */ + ret = ulp_mapper_flow_create(ulp_ctx, &mparms, NULL); + if (ret) + goto free_fid; + + if (params->tnl_key) { + ret = bnxt_ulp_alloc_mapper_encap_mparams(&encap_parms, + &mparms); + if (ret) + goto mapper_destroy; + } + + if (ULP_BITMAP_ISSET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_METER)) { + ulp_ctx->cfg_data->dscp_remap_ref++; + flow_info->dscp_remap = true; + } + +return_fid: + flow_info->flow_id = fid; + if (params->tnl_key) { + flow_info->mparms = encap_parms; + ether_addr_copy(flow_info->tnl_dmac, params->tnl_dmac); + ether_addr_copy(flow_info->tnl_smac, params->tnl_smac); + flow_info->tnl_ether_type = params->tnl_ether_type; + flow_info->encap_key = params->tnl_key; + flow_info->neigh_key = params->neigh_key; + } + vfree(params); + mutex_unlock(&ulp_ctx->cfg_data->flow_db_lock); + + return 0; + +mapper_destroy: + ulp_mapper_flow_destroy(ulp_ctx, mparms.flow_type, + mparms.flow_id, NULL); +free_fid: + vfree(params->tnl_key); + vfree(params->neigh_key); + ulp_flow_db_fid_free(ulp_ctx, BNXT_ULP_FDB_TYPE_REGULAR, fid); +release_lock: + mutex_unlock(&ulp_ctx->cfg_data->flow_db_lock); +flow_error: + vfree(params); + if (ret == -ENOSPC) + return ret; + else + return (ret == BNXT_TF_RC_PARSE_ERR_NOTSUPP) ? -EOPNOTSUPP : -EIO; +} + +/* Function to destroy the ulp flow. */ +int bnxt_ulp_flow_destroy(struct bnxt *bp, u32 flow_id, u16 src_fid, + bool dscp_remap) +{ + struct bnxt_ulp_context *ulp_ctx; + u16 func_id; + int ret; + + ulp_ctx = bnxt_ulp_bp_ptr2_cntxt_get(bp); + if (!ulp_ctx) { + netdev_dbg(bp->dev, "ULP context is not initialized\n"); + return -ENOENT; + } + + if (ulp_port_db_port_func_id_get(ulp_ctx, src_fid, &func_id)) { + netdev_dbg(bp->dev, "Conversion of port to func id failed\n"); + return -EINVAL; + } + + ret = ulp_flow_db_validate_flow_func(ulp_ctx, flow_id, func_id); + if (ret) + return ret; + + mutex_lock(&ulp_ctx->cfg_data->flow_db_lock); + ret = ulp_mapper_flow_destroy(ulp_ctx, BNXT_ULP_FDB_TYPE_REGULAR, + flow_id, NULL); + if (dscp_remap) { + ulp_ctx->cfg_data->dscp_remap_ref--; + if (!ulp_ctx->cfg_data->dscp_remap_ref) + bnxt_tc_clear_dscp_ipv6(bp, ulp_ctx); + } + mutex_unlock(&ulp_ctx->cfg_data->flow_db_lock); + + return ret; +} + +void bnxt_ulp_flow_query_count(struct bnxt *bp, u32 flow_id, u64 *packets, + u64 *bytes, unsigned long *lastused) +{ + ulp_tf_fc_mgr_query_count_get(bp->ulp_ctx, flow_id, packets, bytes, + lastused, NULL); +} + +int +bnxt_ulp_update_flow_encap_record(struct bnxt *bp, u8 *tnl_dmac, void *mparms, + u32 *flow_id) +{ + struct bnxt_ulp_mapper_parms *parms = mparms; + struct bnxt_ulp_context *ulp_ctx = bp->ulp_ctx; + struct ulp_tc_hdr_field *field; + u32 local_flow_id; + u16 func_id; + int ret; + + if (!mparms) { + netdev_dbg(bp->dev, "Function %s: pointer is NULL\n", __func__); + return -EINVAL; + } + + mutex_lock(&ulp_ctx->cfg_data->flow_db_lock); + ret = ulp_mapper_flow_destroy(bp->ulp_ctx, BNXT_ULP_FDB_TYPE_REGULAR, *flow_id, NULL); + if (ret) + goto err; + + /* Get the function id */ + if (ulp_port_db_port_func_id_get(ulp_ctx, + bp->pf.port_id, + &func_id)) { + netdev_dbg(bp->dev, "conversion of port to func id failed\n"); + goto err; + } + + netdev_dbg(bp->dev, "Function %s: flow destroy successful\n", __func__); + field = &parms->enc_field[BNXT_ULP_ENC_FIELD_ETH_DMAC]; + memcpy(field->spec, tnl_dmac, ETH_ALEN); + ret = ulp_flow_db_fid_alloc(ulp_ctx, BNXT_ULP_FDB_TYPE_REGULAR, + func_id, &local_flow_id); + if (ret) { + netdev_dbg(bp->dev, "Function %s: flow_id alloc failed\n", __func__); + goto invalidate_flow_id; + } + *flow_id = local_flow_id; + parms->flow_id = local_flow_id; + ret = ulp_mapper_flow_create(bp->ulp_ctx, parms, NULL); + if (!ret) + goto done; + netdev_dbg(bp->dev, "Function %s flow_create failed\n", __func__); + ulp_flow_db_fid_free(ulp_ctx, BNXT_ULP_FDB_TYPE_REGULAR, + *flow_id); +invalidate_flow_id: + /* flow_id == 0 means invalid flow id. Invalidate the flow_id + * when the flow creation under the hood fails, so that when + * the user deletes the flow, we will not try to delete it + * again in the hardware + */ + *flow_id = 0; +err: +done: + mutex_unlock(&ulp_ctx->cfg_data->flow_db_lock); + return ret; +} + +bool bnxt_ulp_flow_chain_validate(struct bnxt *bp, u16 src_fid, + struct flow_cls_offload *tc_flow_cmd) +{ + u32 chain = tc_flow_cmd->common.chain_index; + struct bnxt_ulp_context *ulp_ctx; + u8 app_id; + + ulp_ctx = bnxt_ulp_bp_ptr2_cntxt_get(bp); + if (!ulp_ctx) { + netdev_dbg(bp->dev, "%s: ULP context is not initialized\n", + __func__); + return false; + } + + if (bnxt_ulp_cntxt_app_id_get(ulp_ctx, &app_id)) { + netdev_dbg(bp->dev, "%s: Failed to get the app id\n", __func__); + return false; + } + + if (!chain) + return true; + + /* non-zero chain */ + if (app_id != 0 && app_id != 1) { + netdev_dbg(bp->dev, + "%s: Flow chaining is unsupported, app:%u chain:%u\n", + __func__, app_id, chain); + return false; + } + return true; +} + +#endif /* CONFIG_BNXT_FLOWER_OFFLOAD */ diff --git a/drivers/thirdparty/release-drivers/bnxt/tf_ulp/bnxt_ulp_meter.c b/drivers/thirdparty/release-drivers/bnxt/tf_ulp/bnxt_ulp_meter.c new file mode 100644 index 000000000000..a8ac19d54d51 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/tf_ulp/bnxt_ulp_meter.c @@ -0,0 +1,502 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* Copyright(c) 2019-2023 Broadcom + * All rights reserved. + */ + +#include "ulp_linux.h" +#include "bnxt_compat.h" +#include "bnxt_hsi.h" +#include "bnxt.h" +#include "bnxt_tf_ulp.h" +#include "ulp_template_db_enum.h" +#include "ulp_template_struct.h" +#include "ulp_tc_parser.h" +#include "ulp_mapper.h" +#include "ulp_matcher.h" +#include "ulp_port_db.h" +#include "ulp_template_debug_proto.h" + +/* Meter init status */ +void bnxt_ulp_init_mapper_params(struct bnxt_ulp_mapper_parms *mparms, + struct ulp_tc_parser_params *params, + enum bnxt_ulp_fdb_type flow_type); + +#ifdef CONFIG_BNXT_FLOWER_OFFLOAD + +/* Internal api to setup global config. + * returns 0 on success. + */ +static int bnxt_meter_global_cfg_update(struct bnxt *bp, enum tf_dir dir, + enum tf_global_config_type type, + u32 offset, u32 value, u32 set_flag) +{ + struct tf_global_cfg_parms parms = { 0 }; + u32 global_cfg = 0; + struct tf *tfp; + int rc = 0; + + parms.dir = dir, + parms.type = type, + parms.offset = offset, + parms.config = (u8 *)&global_cfg, + parms.config_sz_in_bytes = sizeof(global_cfg); + + tfp = bnxt_ulp_bp_tfp_get(bp, BNXT_ULP_SESSION_TYPE_DEFAULT); + rc = tf_get_global_cfg(tfp, &parms); + if (rc) { + netdev_dbg(bp->dev, "Failed to get global cfg 0x%x rc:%d\n", + type, rc); + return rc; + } + + if (set_flag) + global_cfg |= value; + else + global_cfg &= ~value; + + rc = tf_set_global_cfg(tfp, &parms); + if (rc) { + netdev_dbg(bp->dev, "Failed to set global cfg 0x%x rc:%d\n", + type, rc); + return rc; + } + return rc; +} + +#define BNXT_THOR_FMTCR_NUM_MET_MET_1K (0x7UL << 20) +#define BNXT_THOR_FMTCR_REMAP (0x1UL << 24) +#define BNXT_THOR_FMTCR_CNTRS_ENABLE (0x1UL << 25) +#define BNXT_THOR_FMTCR_INTERVAL_1K (1024) + +int bnxt_flow_meter_init(struct bnxt *bp) +{ + struct bnxt_ulp_context *ulp_ctx; + int rc = 0; + + ulp_ctx = bnxt_ulp_bp_ptr2_cntxt_get(bp); + if (!ulp_ctx || !ulp_ctx->cfg_data) { + netdev_dbg(bp->dev, "ULP Context is not initialized\n"); + return -EINVAL; + } + + /* Meters are supported only for DSCP Remap feature */ + if (!ULP_DSCP_REMAP_IS_ENABLED(ulp_ctx->cfg_data->ulp_flags)) { + netdev_dbg(bp->dev, "DSCP_REMAP Capability is not enabled\n"); + return -EOPNOTSUPP; + } + + /* Enable metering. Set the meter global configuration register. + * Set number of meter to 1K. Disable the drop counter for now. + */ + rc = bnxt_meter_global_cfg_update(bp, TF_DIR_RX, TF_METER_CFG, + 0, + BNXT_THOR_FMTCR_NUM_MET_MET_1K | + BNXT_THOR_FMTCR_REMAP, + 1); + if (rc) { + netdev_dbg(bp->dev, "Failed to set rx meter configuration\n"); + return rc; + } + + rc = bnxt_meter_global_cfg_update(bp, TF_DIR_TX, TF_METER_CFG, + 0, + BNXT_THOR_FMTCR_NUM_MET_MET_1K | + BNXT_THOR_FMTCR_REMAP, + 1); + if (rc) { + netdev_dbg(bp->dev, "Failed to set tx meter configuration\n"); + return rc; + } + + /* Set meter refresh rate to 1024 clock cycle. This value works for + * most bit rates especially for high rates. + */ + rc = bnxt_meter_global_cfg_update(bp, TF_DIR_RX, TF_METER_INTERVAL_CFG, + 0, BNXT_THOR_FMTCR_INTERVAL_1K, + 1); + if (rc) { + netdev_dbg(bp->dev, "Failed to set rx meter interval\n"); + return rc; + } + + rc = bnxt_meter_global_cfg_update(bp, TF_DIR_TX, TF_METER_INTERVAL_CFG, + 0, BNXT_THOR_FMTCR_INTERVAL_1K, + 1); + if (rc) { + netdev_dbg(bp->dev, "Failed to set tx meter interval\n"); + return rc; + } + + ulp_ctx->cfg_data->meter_initialized = 1; + netdev_dbg(bp->dev, "Flow meter has been initialized\n"); + return rc; +} + +/* Calculate mantissa and exponent for cir / eir reg. */ +#define BNXT_CPU_CLOCK 800 +#define MEGA 1000000 +#define NUM_BIT_PER_BYTE 8 +static void bnxt_ulp_flow_meter_xir_calc(u64 xir, u32 *reg) +{ + u8 *swap = 0; + u16 m = 0; + u16 e = 0; + u64 temp; + + /* Special case xir == 0 ? both exp and matissa are 0. */ + if (xir == 0) { + *reg = 0; + return; + } + + /* e = floor(log2(cir)) + 27 + * a (MBps) = xir (bps) / MEGA + * b (MBpc) = a (MBps) / CPU_CLOCK (Mcps) + * e = floor(log2(b)) + 27 + */ + temp = xir * (1 << 24) / (BNXT_CPU_CLOCK >> 3) / MEGA; + e = ilog2(temp); + + /* m = round(b/2^(e-27) - 1) * 2048 + * = round(b*2^(27-e) - 1) * 2^11 + * = round(b*2^(38-e) - 2^11) + */ + m = xir * (1 << (38 - e)) / BNXT_CPU_CLOCK / MEGA - (1 << 11); + *reg = ((m & 0x7FF) << 6) | (e & 0x3F); + swap = (u8 *)reg; + *reg = swap[0] << 16 | swap[1] << 8 | swap[2]; +} + +/* Calculate mantissa and exponent for cbs / ebs reg */ +static void bnxt_ulp_flow_meter_xbs_calc(u64 xbs, u16 *reg) +{ + u16 m = 0; + u16 e = 0; + + if (xbs == 0) { + *reg = 0; + return; + } + + /* e = floor(log2(xbs)) + 1 */ + e = ilog2(xbs) + 1; + + /* m = round(xbs/2^(e-1) - 1) * 128 + * = round(xbs*2^(1-e) - 1) * 2^7 + * = round(xbs*2^(8-e) - 2^7) + */ + m = xbs / (1 << (e - 8)) - (1 << 7); + *reg = ((m & 0x7F) << 5) | (e & 0x1F); + *reg = cpu_to_be16(*reg); +} + +/* Parse the meter profile. */ +static int bnxt_ulp_meter_profile_alloc(struct bnxt *bp, + struct ulp_tc_act_prop *act_prop, + u64 cir, u64 eir, u64 cbs, u64 ebs) +{ + bool alg_rfc2698 = false; + u32 cir_reg, eir_reg; + u16 cbs_reg, ebs_reg; + bool cbnd = true; + bool ebnd = true; + bool pm = false; + + /* The CBS and EBS must be configured so that at least one + * of them is larger than 0. It is recommended that when + * the value of the CBS or the EBS is larger than 0, it + * is larger than or equal to the size of the largest possible + * IP packet in the stream. + */ + if (cbs == 0 && ebs == 0) { + netdev_dbg(bp->dev, + "CBS & EBS cannot both be 0; one of them should be > MTU\n"); + return -EINVAL; + } + + bnxt_ulp_flow_meter_xir_calc(cir, &cir_reg); + memcpy(&act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_METER_PROF_CIR], + &cir_reg, + BNXT_ULP_ACT_PROP_SZ_METER_PROF_CIR); + + bnxt_ulp_flow_meter_xir_calc(eir, &eir_reg); + memcpy(&act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_METER_PROF_EIR], + &eir_reg, + BNXT_ULP_ACT_PROP_SZ_METER_PROF_EIR); + + bnxt_ulp_flow_meter_xbs_calc(cbs, &cbs_reg); + memcpy(&act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_METER_PROF_CBS], + &cbs_reg, + BNXT_ULP_ACT_PROP_SZ_METER_PROF_CBS); + + bnxt_ulp_flow_meter_xbs_calc(ebs, &ebs_reg); + memcpy(&act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_METER_PROF_EBS], + &ebs_reg, + BNXT_ULP_ACT_PROP_SZ_METER_PROF_EBS); + + memcpy(&act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_METER_PROF_RFC2698], + &alg_rfc2698, + BNXT_ULP_ACT_PROP_SZ_METER_PROF_RFC2698); + + memcpy(&act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_METER_PROF_PM], + &pm, + BNXT_ULP_ACT_PROP_SZ_METER_PROF_PM); + + memcpy(&act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_METER_PROF_CBND], + &cbnd, + BNXT_ULP_ACT_PROP_SZ_METER_PROF_CBND); + + memcpy(&act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_METER_PROF_EBND], + &ebnd, + BNXT_ULP_ACT_PROP_SZ_METER_PROF_EBND); + + return 0; +} + +#define MTR_PROF_DEFAULT_CIR 128000000 +#define MTR_PROF_DEFAULT_EIR 128000000 +#define MTR_PROF_DEFAULT_CBS 131072 +#define MTR_PROF_DEFAULT_EBS 131072 + +static struct bnxt_ulp_mapper_parms mapper_mparms = { 0 }; +static struct ulp_tc_parser_params pparams = {{ 0 }}; + +/* Add MTR profile. */ +int bnxt_flow_meter_profile_add(struct bnxt *bp, u32 meter_profile_id, u32 dir) +{ + struct ulp_tc_act_prop *act_prop; + struct bnxt_ulp_context *ulp_ctx; + u32 tmp_profile_id; + u32 act_tid; + int rc; + + ulp_ctx = bnxt_ulp_bp_ptr2_cntxt_get(bp); + if (!ulp_ctx) { + netdev_dbg(bp->dev, "ULP Context is not initialized\n"); + return -EINVAL; + } + + act_prop = &pparams.act_prop; + + /* Initialize the parser params */ + memset(&pparams, 0, sizeof(struct ulp_tc_parser_params)); + pparams.ulp_ctx = ulp_ctx; + pparams.act_bitmap.bits = BNXT_ULP_ACT_BIT_METER_PROFILE; + pparams.act_bitmap.bits |= (dir == BNXT_ULP_FLOW_ATTR_INGRESS ? + BNXT_ULP_FLOW_DIR_BITMASK_ING : + BNXT_ULP_FLOW_DIR_BITMASK_EGR); + pparams.app_id = 1; + pparams.dir_attr |= dir; + + tmp_profile_id = cpu_to_be32(meter_profile_id); + memcpy(&act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_METER_PROF_ID], + &tmp_profile_id, + BNXT_ULP_ACT_PROP_SZ_METER_PROF_ID); + + rc = bnxt_ulp_meter_profile_alloc(bp, act_prop, + MTR_PROF_DEFAULT_CIR, + MTR_PROF_DEFAULT_EIR, + MTR_PROF_DEFAULT_CBS, + MTR_PROF_DEFAULT_EBS); + if (rc) + return rc; + + ulp_parser_act_info_dump(&pparams); + rc = ulp_matcher_action_match(&pparams, &act_tid); + if (rc != BNXT_TF_RC_SUCCESS) + return rc; + + bnxt_ulp_init_mapper_params(&mapper_mparms, &pparams, + BNXT_ULP_FDB_TYPE_REGULAR); + mapper_mparms.act_tid = act_tid; + + rc = ulp_mapper_flow_create(ulp_ctx, &mapper_mparms, NULL); + if (rc) + return rc; + netdev_dbg(bp->dev, "Flow meter profile %d is created\n", meter_profile_id); + return 0; +} + +/* Delete meter profile */ +int bnxt_flow_meter_profile_delete(struct bnxt *bp, u32 meter_profile_id, u32 dir) +{ + struct ulp_tc_act_prop *act_prop; + struct bnxt_ulp_context *ulp_ctx; + u32 tmp_profile_id; + u32 act_tid; + int rc; + + ulp_ctx = bnxt_ulp_bp_ptr2_cntxt_get(bp); + if (!ulp_ctx || !ulp_ctx->cfg_data) { + netdev_dbg(bp->dev, "ULP Context is not initialized\n"); + return -EINVAL; + } + + if (!ulp_ctx->cfg_data->meter_initialized) { + netdev_dbg(bp->dev, "Meter is not initialized\n"); + return -EOPNOTSUPP; + } + + act_prop = &pparams.act_prop; + + /* Initialize the parser params */ + memset(&pparams, 0, sizeof(struct ulp_tc_parser_params)); + pparams.ulp_ctx = ulp_ctx; + pparams.act_bitmap.bits = BNXT_ULP_ACT_BIT_METER_PROFILE; + pparams.act_bitmap.bits |= BNXT_ULP_ACT_BIT_DELETE; + pparams.act_bitmap.bits |= (dir == BNXT_ULP_FLOW_ATTR_INGRESS ? + BNXT_ULP_FLOW_DIR_BITMASK_ING : + BNXT_ULP_FLOW_DIR_BITMASK_EGR); + pparams.app_id = 1; + pparams.dir_attr |= dir; + + tmp_profile_id = cpu_to_be32(meter_profile_id); + memcpy(&act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_METER_PROF_ID], + &tmp_profile_id, + BNXT_ULP_ACT_PROP_SZ_METER_PROF_ID); + + ulp_parser_act_info_dump(&pparams); + rc = ulp_matcher_action_match(&pparams, &act_tid); + if (rc != BNXT_TF_RC_SUCCESS) + return rc; + + bnxt_ulp_init_mapper_params(&mapper_mparms, &pparams, + BNXT_ULP_FDB_TYPE_REGULAR); + mapper_mparms.act_tid = act_tid; + + rc = ulp_mapper_flow_create(ulp_ctx, &mapper_mparms, NULL); + if (rc) + return rc; + + netdev_dbg(bp->dev, "Flow meter profile %d deleted\n", + meter_profile_id); + return 0; +} + +/* Create meter */ +int bnxt_flow_meter_create(struct bnxt *bp, u32 meter_profile_id, u32 meter_id, u32 dir) +{ + struct ulp_tc_act_prop *act_prop; + struct bnxt_ulp_context *ulp_ctx; + u32 tmp_meter_id, tmp_profile_id; + bool meter_en = true; + u32 act_tid; + int rc; + + ulp_ctx = bnxt_ulp_bp_ptr2_cntxt_get(bp); + if (!ulp_ctx || !ulp_ctx->cfg_data) { + netdev_dbg(bp->dev, "ULP Context is not initialized\n"); + return -EINVAL; + } + + if (!ulp_ctx->cfg_data->meter_initialized) { + netdev_dbg(bp->dev, "Meter is not initialized\n"); + return -EOPNOTSUPP; + } + + act_prop = &pparams.act_prop; + + /* Initialize the parser params */ + memset(&pparams, 0, sizeof(struct ulp_tc_parser_params)); + pparams.ulp_ctx = ulp_ctx; + pparams.act_bitmap.bits = BNXT_ULP_ACT_BIT_SHARED_METER; + pparams.act_bitmap.bits |= (dir == BNXT_ULP_FLOW_ATTR_INGRESS ? + BNXT_ULP_FLOW_DIR_BITMASK_ING : + BNXT_ULP_FLOW_DIR_BITMASK_EGR); + pparams.app_id = 1; + pparams.dir_attr |= dir; + + tmp_meter_id = cpu_to_be32(meter_id); + memcpy(&act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_METER_INST_ID], + &tmp_meter_id, + BNXT_ULP_ACT_PROP_SZ_METER_INST_ID); + + tmp_profile_id = cpu_to_be32(meter_profile_id); + memcpy(&act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_METER_PROF_ID], + &tmp_profile_id, + BNXT_ULP_ACT_PROP_SZ_METER_PROF_ID); + + memcpy(&act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_METER_INST_MTR_VAL], + &meter_en, + BNXT_ULP_ACT_PROP_SZ_METER_INST_MTR_VAL); + + ulp_parser_act_info_dump(&pparams); + rc = ulp_matcher_action_match(&pparams, &act_tid); + if (rc != BNXT_TF_RC_SUCCESS) + return rc; + + bnxt_ulp_init_mapper_params(&mapper_mparms, &pparams, + BNXT_ULP_FDB_TYPE_REGULAR); + mapper_mparms.act_tid = act_tid; + + rc = ulp_mapper_flow_create(ulp_ctx, &mapper_mparms, NULL); + if (rc) + return rc; + + netdev_dbg(bp->dev, "Flow meter %d is created\n", meter_id); + return 0; +} + +/* Destroy meter */ +int bnxt_flow_meter_destroy(struct bnxt *bp, u32 meter_id, u32 dir) +{ + struct ulp_tc_act_prop *act_prop; + struct bnxt_ulp_context *ulp_ctx; + u32 tmp_meter_id; + u32 act_tid; + int rc; + + ulp_ctx = bnxt_ulp_bp_ptr2_cntxt_get(bp); + if (!ulp_ctx || !ulp_ctx->cfg_data) { + netdev_dbg(bp->dev, "ULP Context is not initialized\n"); + return -EINVAL; + } + + if (!ulp_ctx->cfg_data->meter_initialized) { + netdev_dbg(bp->dev, "Meter is not initialized\n"); + return -EOPNOTSUPP; + } + + act_prop = &pparams.act_prop; + + /* Initialize the parser params */ + memset(&pparams, 0, sizeof(struct ulp_tc_parser_params)); + pparams.ulp_ctx = ulp_ctx; + pparams.act_bitmap.bits = BNXT_ULP_ACT_BIT_SHARED_METER; + pparams.act_bitmap.bits |= BNXT_ULP_ACT_BIT_DELETE; + pparams.act_bitmap.bits |= (dir == BNXT_ULP_FLOW_ATTR_INGRESS ? + BNXT_ULP_FLOW_DIR_BITMASK_ING : + BNXT_ULP_FLOW_DIR_BITMASK_EGR); + pparams.app_id = 1; + pparams.dir_attr |= dir; + + tmp_meter_id = cpu_to_be32(meter_id); + memcpy(&act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_METER_INST_ID], + &tmp_meter_id, + BNXT_ULP_ACT_PROP_SZ_METER_INST_ID); + + ulp_parser_act_info_dump(&pparams); + rc = ulp_matcher_action_match(&pparams, &act_tid); + if (rc != BNXT_TF_RC_SUCCESS) + return rc; + + bnxt_ulp_init_mapper_params(&mapper_mparms, &pparams, + BNXT_ULP_FDB_TYPE_REGULAR); + mapper_mparms.act_tid = act_tid; + + rc = ulp_mapper_flow_create(ulp_ctx, &mapper_mparms, NULL); + if (rc) + return rc; + + netdev_dbg(bp->dev, "Flow meter %d is deleted\n", meter_id); + return 0; +} + +#else /* CONFIG_BNXT_FLOWER_OFFLOAD */ + +int bnxt_flow_meter_init(struct bnxt *bp) +{ + return -EOPNOTSUPP; +} + +#endif /* CONFIG_BNXT_FLOWER_OFFLOAD */ diff --git a/drivers/thirdparty/release-drivers/bnxt/tf_ulp/generic_templates/ulp_template_db_act.c b/drivers/thirdparty/release-drivers/bnxt/tf_ulp/generic_templates/ulp_template_db_act.c new file mode 100644 index 000000000000..69a37b458a1d --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/tf_ulp/generic_templates/ulp_template_db_act.c @@ -0,0 +1,262 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* Copyright(c) 2014-2024 Broadcom + * All rights reserved. + */ + +#include "ulp_template_db_enum.h" +#include "ulp_template_db_field.h" +#include "ulp_template_struct.h" +#include "ulp_template_db_tbl.h" + +/* Array for the act matcher list */ +struct bnxt_ulp_act_match_info ulp_act_match_list[] = { + [1] = { + .act_bitmap = { .bits = + BNXT_ULP_ACT_BIT_GENERIC | + BNXT_ULP_ACT_BIT_DROP | + BNXT_ULP_ACT_BIT_COUNT | + BNXT_ULP_ACT_BIT_RSS | + BNXT_ULP_ACT_BIT_QUEUE | + BNXT_ULP_ACT_BIT_POP_VLAN | + BNXT_ULP_ACT_BIT_DEC_TTL | + BNXT_ULP_ACT_BIT_SET_MAC_SRC | + BNXT_ULP_ACT_BIT_SET_MAC_DST | + BNXT_ULP_ACT_BIT_SET_IPV4_SRC | + BNXT_ULP_ACT_BIT_SET_IPV4_DST | + BNXT_ULP_ACT_BIT_SET_IPV6_SRC | + BNXT_ULP_ACT_BIT_SET_IPV6_DST | + BNXT_ULP_ACT_BIT_SET_TP_SRC | + BNXT_ULP_ACT_BIT_SET_TP_DST | + BNXT_ULP_ACT_BIT_VXLAN_DECAP | + BNXT_ULP_ACT_BIT_GENEVE_DECAP | + BNXT_ULP_ACT_BIT_METER | + BNXT_ULP_ACT_BIT_SHARED_SAMPLE | + BNXT_ULP_FLOW_DIR_BITMASK_ING }, + .act_tid = 1 + }, + [2] = { + .act_bitmap = { .bits = + BNXT_ULP_ACT_BIT_GENERIC | + BNXT_ULP_ACT_BIT_GOTO_CHAIN | + BNXT_ULP_ACT_BIT_COUNT | + BNXT_ULP_FLOW_DIR_BITMASK_ING }, + .act_tid = 1 + }, + [3] = { + .act_bitmap = { .bits = + BNXT_ULP_ACT_BIT_NON_GENERIC | + BNXT_ULP_ACT_BIT_DROP | + BNXT_ULP_ACT_BIT_POP_VLAN | + BNXT_ULP_ACT_BIT_DEC_TTL | + BNXT_ULP_ACT_BIT_VXLAN_DECAP | + BNXT_ULP_ACT_BIT_GENEVE_DECAP | + BNXT_ULP_ACT_BIT_COUNT | + BNXT_ULP_ACT_BIT_METER | + BNXT_ULP_ACT_BIT_SHARED_SAMPLE | + BNXT_ULP_ACT_BIT_SET_MAC_SRC | + BNXT_ULP_ACT_BIT_SET_MAC_DST | + BNXT_ULP_ACT_BIT_MULTIPLE_PORT | + BNXT_ULP_ACT_BIT_GOTO_CHAIN | + BNXT_ULP_FLOW_DIR_BITMASK_ING }, + .act_tid = 2 + }, + [4] = { + .act_bitmap = { .bits = + BNXT_ULP_ACT_BIT_NON_GENERIC | + BNXT_ULP_ACT_BIT_GENERIC | + BNXT_ULP_ACT_BIT_SHARED | + BNXT_ULP_ACT_BIT_SAMPLE | + BNXT_ULP_ACT_BIT_COUNT | + BNXT_ULP_FLOW_DIR_BITMASK_ING }, + .act_tid = 3 + }, + [5] = { + .act_bitmap = { .bits = + BNXT_ULP_ACT_BIT_NON_GENERIC | + BNXT_ULP_ACT_BIT_GENERIC | + BNXT_ULP_ACT_BIT_DELETE | + BNXT_ULP_ACT_BIT_SHARED_SAMPLE | + BNXT_ULP_FLOW_DIR_BITMASK_ING }, + .act_tid = 3 + }, + [6] = { + .act_bitmap = { .bits = + BNXT_ULP_ACT_BIT_NON_GENERIC | + BNXT_ULP_ACT_BIT_COUNT | + BNXT_ULP_ACT_BIT_DEC_TTL | + BNXT_ULP_ACT_BIT_SHARED_SAMPLE | + BNXT_ULP_ACT_BIT_MULTIPLE_PORT | + BNXT_ULP_ACT_BIT_SET_MAC_SRC | + BNXT_ULP_ACT_BIT_SET_MAC_DST | + BNXT_ULP_ACT_BIT_SET_IPV4_SRC | + BNXT_ULP_ACT_BIT_SET_IPV4_DST | + BNXT_ULP_ACT_BIT_SET_IPV6_SRC | + BNXT_ULP_ACT_BIT_SET_IPV6_DST | + BNXT_ULP_ACT_BIT_SET_TP_SRC | + BNXT_ULP_ACT_BIT_SET_TP_DST | + BNXT_ULP_FLOW_DIR_BITMASK_ING }, + .act_tid = 4 + }, + [7] = { + .act_bitmap = { .bits = + BNXT_ULP_ACT_BIT_NON_GENERIC | + BNXT_ULP_ACT_BIT_SHARED_SAMPLE | + BNXT_ULP_ACT_BIT_RSS | + BNXT_ULP_ACT_BIT_QUEUE | + BNXT_ULP_ACT_BIT_COUNT | + BNXT_ULP_FLOW_DIR_BITMASK_ING }, + .act_tid = 5 + }, + [8] = { + .act_bitmap = { .bits = + BNXT_ULP_ACT_BIT_NON_GENERIC | + BNXT_ULP_ACT_BIT_GENERIC | + BNXT_ULP_ACT_BIT_METER_PROFILE | + BNXT_ULP_FLOW_DIR_BITMASK_ING }, + .act_tid = 6 + }, + [9] = { + .act_bitmap = { .bits = + BNXT_ULP_ACT_BIT_NON_GENERIC | + BNXT_ULP_ACT_BIT_GENERIC | + BNXT_ULP_ACT_BIT_SHARED_METER | + BNXT_ULP_FLOW_DIR_BITMASK_ING }, + .act_tid = 6 + }, + [10] = { + .act_bitmap = { .bits = + BNXT_ULP_ACT_BIT_NON_GENERIC | + BNXT_ULP_ACT_BIT_GENERIC | + BNXT_ULP_ACT_BIT_DELETE | + BNXT_ULP_ACT_BIT_METER_PROFILE | + BNXT_ULP_FLOW_DIR_BITMASK_ING }, + .act_tid = 6 + }, + [11] = { + .act_bitmap = { .bits = + BNXT_ULP_ACT_BIT_NON_GENERIC | + BNXT_ULP_ACT_BIT_GENERIC | + BNXT_ULP_ACT_BIT_DELETE | + BNXT_ULP_ACT_BIT_SHARED_METER | + BNXT_ULP_FLOW_DIR_BITMASK_ING }, + .act_tid = 6 + }, + [12] = { + .act_bitmap = { .bits = + BNXT_ULP_ACT_BIT_NON_GENERIC | + BNXT_ULP_ACT_BIT_GENERIC | + BNXT_ULP_ACT_BIT_UPDATE | + BNXT_ULP_ACT_BIT_SHARED_METER | + BNXT_ULP_FLOW_DIR_BITMASK_ING }, + .act_tid = 6 + }, + [13] = { + .act_bitmap = { .bits = + BNXT_ULP_ACT_BIT_GENERIC | + BNXT_ULP_ACT_BIT_DROP | + BNXT_ULP_ACT_BIT_COUNT | + BNXT_ULP_ACT_BIT_DEC_TTL | + BNXT_ULP_ACT_BIT_VF_TO_VF | + BNXT_ULP_ACT_BIT_SHARED_SAMPLE | + BNXT_ULP_ACT_BIT_SET_MAC_SRC | + BNXT_ULP_ACT_BIT_SET_MAC_DST | + BNXT_ULP_ACT_BIT_SET_IPV4_SRC | + BNXT_ULP_ACT_BIT_SET_IPV4_DST | + BNXT_ULP_ACT_BIT_SET_IPV6_SRC | + BNXT_ULP_ACT_BIT_SET_IPV6_DST | + BNXT_ULP_ACT_BIT_SET_TP_SRC | + BNXT_ULP_ACT_BIT_SET_TP_DST | + BNXT_ULP_ACT_BIT_PUSH_VLAN | + BNXT_ULP_ACT_BIT_SET_VLAN_PCP | + BNXT_ULP_ACT_BIT_SET_VLAN_VID | + BNXT_ULP_ACT_BIT_VXLAN_ENCAP | + BNXT_ULP_ACT_BIT_GENEVE_ENCAP | + BNXT_ULP_FLOW_DIR_BITMASK_EGR }, + .act_tid = 7 + }, + [14] = { + .act_bitmap = { .bits = + BNXT_ULP_ACT_BIT_GENERIC | + BNXT_ULP_ACT_BIT_GOTO_CHAIN | + BNXT_ULP_ACT_BIT_COUNT | + BNXT_ULP_FLOW_DIR_BITMASK_EGR }, + .act_tid = 7 + }, + [15] = { + .act_bitmap = { .bits = + BNXT_ULP_ACT_BIT_NON_GENERIC | + BNXT_ULP_ACT_BIT_DROP | + BNXT_ULP_ACT_BIT_SET_VLAN_PCP | + BNXT_ULP_ACT_BIT_SET_VLAN_VID | + BNXT_ULP_ACT_BIT_PUSH_VLAN | + BNXT_ULP_ACT_BIT_COUNT | + BNXT_ULP_ACT_BIT_DEC_TTL | + BNXT_ULP_ACT_BIT_SHARED_SAMPLE | + BNXT_ULP_ACT_BIT_MULTIPLE_PORT | + BNXT_ULP_ACT_BIT_GOTO_CHAIN | + BNXT_ULP_FLOW_DIR_BITMASK_EGR }, + .act_tid = 8 + }, + [16] = { + .act_bitmap = { .bits = + BNXT_ULP_ACT_BIT_NON_GENERIC | + BNXT_ULP_ACT_BIT_COUNT | + BNXT_ULP_ACT_BIT_SET_MAC_SRC | + BNXT_ULP_ACT_BIT_SET_MAC_DST | + BNXT_ULP_ACT_BIT_SET_IPV4_SRC | + BNXT_ULP_ACT_BIT_SET_IPV4_DST | + BNXT_ULP_ACT_BIT_SET_IPV6_SRC | + BNXT_ULP_ACT_BIT_SET_IPV6_DST | + BNXT_ULP_ACT_BIT_SET_TP_SRC | + BNXT_ULP_ACT_BIT_SET_TP_DST | + BNXT_ULP_ACT_BIT_DEC_TTL | + BNXT_ULP_ACT_BIT_SHARED_SAMPLE | + BNXT_ULP_ACT_BIT_MULTIPLE_PORT | + BNXT_ULP_FLOW_DIR_BITMASK_EGR }, + .act_tid = 9 + }, + [17] = { + .act_bitmap = { .bits = + BNXT_ULP_ACT_BIT_NON_GENERIC | + BNXT_ULP_ACT_BIT_SHARED_SAMPLE | + BNXT_ULP_ACT_BIT_MULTIPLE_PORT | + BNXT_ULP_ACT_BIT_SET_MAC_SRC | + BNXT_ULP_ACT_BIT_SET_MAC_DST | + BNXT_ULP_ACT_BIT_VXLAN_ENCAP | + BNXT_ULP_ACT_BIT_GENEVE_ENCAP | + BNXT_ULP_ACT_BIT_COUNT | + BNXT_ULP_FLOW_DIR_BITMASK_EGR }, + .act_tid = 10 + }, + [18] = { + .act_bitmap = { .bits = + BNXT_ULP_ACT_BIT_NON_GENERIC | + BNXT_ULP_ACT_BIT_SHARED_SAMPLE | + BNXT_ULP_ACT_BIT_MULTIPLE_PORT | + BNXT_ULP_ACT_BIT_VF_TO_VF | + BNXT_ULP_ACT_BIT_COUNT | + BNXT_ULP_FLOW_DIR_BITMASK_EGR }, + .act_tid = 11 + }, + [19] = { + .act_bitmap = { .bits = + BNXT_ULP_ACT_BIT_NON_GENERIC | + BNXT_ULP_ACT_BIT_GENERIC | + BNXT_ULP_ACT_BIT_SHARED | + BNXT_ULP_ACT_BIT_SAMPLE | + BNXT_ULP_ACT_BIT_VF_TO_VF | + BNXT_ULP_ACT_BIT_COUNT | + BNXT_ULP_FLOW_DIR_BITMASK_EGR }, + .act_tid = 12 + }, + [20] = { + .act_bitmap = { .bits = + BNXT_ULP_ACT_BIT_NON_GENERIC | + BNXT_ULP_ACT_BIT_GENERIC | + BNXT_ULP_ACT_BIT_DELETE | + BNXT_ULP_ACT_BIT_SHARED_SAMPLE | + BNXT_ULP_FLOW_DIR_BITMASK_EGR }, + .act_tid = 12 + } +}; + diff --git a/drivers/thirdparty/release-drivers/bnxt/tf_ulp/generic_templates/ulp_template_db_class.c b/drivers/thirdparty/release-drivers/bnxt/tf_ulp/generic_templates/ulp_template_db_class.c new file mode 100644 index 000000000000..05c5a7ff1ff8 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/tf_ulp/generic_templates/ulp_template_db_class.c @@ -0,0 +1,5183 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* Copyright(c) 2014-2024 Broadcom + * All rights reserved. + */ + +#include "ulp_template_db_enum.h" +#include "ulp_template_db_field.h" +#include "ulp_template_struct.h" +#include "ulp_template_db_tbl.h" + +/* Define the template match patterns */ +/* + * List of protocol matches + */ +struct bnxt_ulp_class_match_info ulp_class_match_list[] = { + [1] = { + .app_id = 0, + .hdr_bitmap = { .bits = + BNXT_ULP_HDR_BIT_O_ETH | + BNXT_ULP_FLOW_DIR_BITMASK_ING }, + .field_man_bitmap = 0x0, + .field_opt_bitmap = 0xB800000000000000, + .field_exclude_bitmap = 0x2000000000000000, + .class_tid = 1, + .flow_pattern_id = 0, + .field_list = { + [1] = 1, + [10] = 2, + [12] = 3, + [14] = 4, + }, + }, + [2] = { + .app_id = 0, + .hdr_bitmap = { .bits = + BNXT_ULP_HDR_BIT_O_ETH | + BNXT_ULP_HDR_BIT_OO_VLAN | + BNXT_ULP_FLOW_DIR_BITMASK_ING }, + .field_man_bitmap = 0x0, + .field_opt_bitmap = 0xBA00000000000000, + .field_exclude_bitmap = 0x2000000000000000, + .class_tid = 1, + .flow_pattern_id = 0, + .field_list = { + [1] = 1, + [10] = 2, + [12] = 3, + [14] = 4, + [114] = 5, + [118] = 6, + [122] = 7, + }, + }, + [3] = { + .app_id = 0, + .hdr_bitmap = { .bits = + BNXT_ULP_HDR_BIT_O_ETH | + BNXT_ULP_HDR_BIT_OI_VLAN | + BNXT_ULP_FLOW_DIR_BITMASK_ING }, + .field_man_bitmap = 0x0, + .field_opt_bitmap = 0xBA00000000000000, + .field_exclude_bitmap = 0x2000000000000000, + .class_tid = 1, + .flow_pattern_id = 0, + .field_list = { + [1] = 1, + [10] = 2, + [12] = 3, + [14] = 4, + [115] = 5, + [119] = 6, + [123] = 7, + }, + }, + [4] = { + .app_id = 0, + .hdr_bitmap = { .bits = + BNXT_ULP_HDR_BIT_O_ETH | + BNXT_ULP_HDR_BIT_OO_VLAN | + BNXT_ULP_HDR_BIT_OI_VLAN | + BNXT_ULP_FLOW_DIR_BITMASK_ING }, + .field_man_bitmap = 0x0, + .field_opt_bitmap = 0xBA40000000000000, + .field_exclude_bitmap = 0x2000000000000000, + .class_tid = 1, + .flow_pattern_id = 0, + .field_list = { + [1] = 1, + [10] = 2, + [12] = 3, + [14] = 4, + [114] = 5, + [115] = 8, + [118] = 6, + [119] = 9, + [122] = 7, + [123] = 10, + }, + }, + [5] = { + .app_id = 0, + .hdr_bitmap = { .bits = + BNXT_ULP_HDR_BIT_O_ETH | + BNXT_ULP_HDR_BIT_O_IPV6 | + BNXT_ULP_FLOW_DIR_BITMASK_ING }, + .field_man_bitmap = 0x0, + .field_opt_bitmap = 0xBA78000000000000, + .field_exclude_bitmap = 0x2000000000000000, + .class_tid = 1, + .flow_pattern_id = 0, + .field_list = { + [1] = 1, + [10] = 2, + [12] = 3, + [14] = 4, + [56] = 5, + [58] = 6, + [60] = 7, + [62] = 8, + [64] = 9, + [66] = 10, + [68] = 11, + [70] = 12, + }, + }, + [6] = { + .app_id = 0, + .hdr_bitmap = { .bits = + BNXT_ULP_HDR_BIT_O_ETH | + BNXT_ULP_HDR_BIT_O_IPV4 | + BNXT_ULP_FLOW_DIR_BITMASK_ING }, + .field_man_bitmap = 0x0, + .field_opt_bitmap = 0xBA36000000000000, + .field_exclude_bitmap = 0x2000000000000000, + .class_tid = 1, + .flow_pattern_id = 0, + .field_list = { + [1] = 1, + [10] = 2, + [12] = 3, + [14] = 4, + [36] = 5, + [38] = 6, + [40] = 7, + [42] = 8, + [44] = 9, + [46] = 10, + [48] = 11, + [50] = 12, + [52] = 13, + [54] = 14, + }, + }, + [7] = { + .app_id = 0, + .hdr_bitmap = { .bits = + BNXT_ULP_HDR_BIT_O_ETH | + BNXT_ULP_HDR_BIT_OO_VLAN | + BNXT_ULP_HDR_BIT_O_IPV6 | + BNXT_ULP_FLOW_DIR_BITMASK_ING }, + .field_man_bitmap = 0x0, + .field_opt_bitmap = 0xBA4F000000000000, + .field_exclude_bitmap = 0x2000000000000000, + .class_tid = 1, + .flow_pattern_id = 0, + .field_list = { + [1] = 1, + [10] = 2, + [12] = 3, + [14] = 4, + [56] = 8, + [58] = 9, + [60] = 10, + [62] = 11, + [64] = 12, + [66] = 13, + [68] = 14, + [70] = 15, + [114] = 5, + [118] = 6, + [122] = 7, + }, + }, + [8] = { + .app_id = 0, + .hdr_bitmap = { .bits = + BNXT_ULP_HDR_BIT_O_ETH | + BNXT_ULP_HDR_BIT_OO_VLAN | + BNXT_ULP_HDR_BIT_O_IPV4 | + BNXT_ULP_FLOW_DIR_BITMASK_ING }, + .field_man_bitmap = 0x0, + .field_opt_bitmap = 0xBA46C00000000000, + .field_exclude_bitmap = 0x2000000000000000, + .class_tid = 1, + .flow_pattern_id = 0, + .field_list = { + [1] = 1, + [10] = 2, + [12] = 3, + [14] = 4, + [36] = 8, + [38] = 9, + [40] = 10, + [42] = 11, + [44] = 12, + [46] = 13, + [48] = 14, + [50] = 15, + [52] = 16, + [54] = 17, + [114] = 5, + [118] = 6, + [122] = 7, + }, + }, + [9] = { + .app_id = 0, + .hdr_bitmap = { .bits = + BNXT_ULP_HDR_BIT_O_ETH | + BNXT_ULP_HDR_BIT_OI_VLAN | + BNXT_ULP_HDR_BIT_O_IPV6 | + BNXT_ULP_FLOW_DIR_BITMASK_ING }, + .field_man_bitmap = 0x0, + .field_opt_bitmap = 0xBA4F000000000000, + .field_exclude_bitmap = 0x2000000000000000, + .class_tid = 1, + .flow_pattern_id = 0, + .field_list = { + [1] = 1, + [10] = 2, + [12] = 3, + [14] = 4, + [56] = 8, + [58] = 9, + [60] = 10, + [62] = 11, + [64] = 12, + [66] = 13, + [68] = 14, + [70] = 15, + [115] = 5, + [119] = 6, + [123] = 7, + }, + }, + [10] = { + .app_id = 0, + .hdr_bitmap = { .bits = + BNXT_ULP_HDR_BIT_O_ETH | + BNXT_ULP_HDR_BIT_OI_VLAN | + BNXT_ULP_HDR_BIT_O_IPV4 | + BNXT_ULP_FLOW_DIR_BITMASK_ING }, + .field_man_bitmap = 0x0, + .field_opt_bitmap = 0xBA46C00000000000, + .field_exclude_bitmap = 0x2000000000000000, + .class_tid = 1, + .flow_pattern_id = 0, + .field_list = { + [1] = 1, + [10] = 2, + [12] = 3, + [14] = 4, + [36] = 8, + [38] = 9, + [40] = 10, + [42] = 11, + [44] = 12, + [46] = 13, + [48] = 14, + [50] = 15, + [52] = 16, + [54] = 17, + [115] = 5, + [119] = 6, + [123] = 7, + }, + }, + [11] = { + .app_id = 0, + .hdr_bitmap = { .bits = + BNXT_ULP_HDR_BIT_O_ETH | + BNXT_ULP_HDR_BIT_OO_VLAN | + BNXT_ULP_HDR_BIT_OI_VLAN | + BNXT_ULP_HDR_BIT_O_IPV6 | + BNXT_ULP_FLOW_DIR_BITMASK_ING }, + .field_man_bitmap = 0x0, + .field_opt_bitmap = 0xBA49E00000000000, + .field_exclude_bitmap = 0x2000000000000000, + .class_tid = 1, + .flow_pattern_id = 0, + .field_list = { + [1] = 1, + [10] = 2, + [12] = 3, + [14] = 4, + [56] = 11, + [58] = 12, + [60] = 13, + [62] = 14, + [64] = 15, + [66] = 16, + [68] = 17, + [70] = 18, + [114] = 5, + [115] = 8, + [118] = 6, + [119] = 9, + [122] = 7, + [123] = 10, + }, + }, + [12] = { + .app_id = 0, + .hdr_bitmap = { .bits = + BNXT_ULP_HDR_BIT_O_ETH | + BNXT_ULP_HDR_BIT_OO_VLAN | + BNXT_ULP_HDR_BIT_OI_VLAN | + BNXT_ULP_HDR_BIT_O_IPV4 | + BNXT_ULP_FLOW_DIR_BITMASK_ING }, + .field_man_bitmap = 0x0, + .field_opt_bitmap = 0xBA48D80000000000, + .field_exclude_bitmap = 0x2000000000000000, + .class_tid = 1, + .flow_pattern_id = 0, + .field_list = { + [1] = 1, + [10] = 2, + [12] = 3, + [14] = 4, + [36] = 11, + [38] = 12, + [40] = 13, + [42] = 14, + [44] = 15, + [46] = 16, + [48] = 17, + [50] = 18, + [52] = 19, + [54] = 20, + [114] = 5, + [115] = 8, + [118] = 6, + [119] = 9, + [122] = 7, + [123] = 10, + }, + }, + [13] = { + .app_id = 0, + .hdr_bitmap = { .bits = + BNXT_ULP_HDR_BIT_O_ETH | + BNXT_ULP_HDR_BIT_O_TCP | + BNXT_ULP_FLOW_DIR_BITMASK_ING }, + .field_man_bitmap = 0x0, + .field_opt_bitmap = 0xBE20000000000000, + .field_exclude_bitmap = 0x2000000000000000, + .class_tid = 1, + .flow_pattern_id = 0, + .field_list = { + [1] = 1, + [10] = 2, + [12] = 3, + [14] = 4, + [88] = 5, + [90] = 6, + [92] = 7, + [94] = 8, + [96] = 9, + [98] = 10, + [100] = 11, + [102] = 12, + [104] = 13, + }, + }, + [14] = { + .app_id = 0, + .hdr_bitmap = { .bits = + BNXT_ULP_HDR_BIT_O_ETH | + BNXT_ULP_HDR_BIT_O_UDP | + BNXT_ULP_FLOW_DIR_BITMASK_ING }, + .field_man_bitmap = 0x0, + .field_opt_bitmap = 0xBE00000000000000, + .field_exclude_bitmap = 0x2000000000000000, + .class_tid = 1, + .flow_pattern_id = 0, + .field_list = { + [1] = 1, + [10] = 2, + [12] = 3, + [14] = 4, + [106] = 5, + [108] = 6, + [110] = 7, + [112] = 8, + }, + }, + [15] = { + .app_id = 0, + .hdr_bitmap = { .bits = + BNXT_ULP_HDR_BIT_O_ETH | + BNXT_ULP_HDR_BIT_OO_VLAN | + BNXT_ULP_HDR_BIT_O_TCP | + BNXT_ULP_FLOW_DIR_BITMASK_ING }, + .field_man_bitmap = 0x0, + .field_opt_bitmap = 0xBAC4000000000000, + .field_exclude_bitmap = 0x2000000000000000, + .class_tid = 1, + .flow_pattern_id = 0, + .field_list = { + [1] = 1, + [10] = 2, + [12] = 3, + [14] = 4, + [88] = 8, + [90] = 9, + [92] = 10, + [94] = 11, + [96] = 12, + [98] = 13, + [100] = 14, + [102] = 15, + [104] = 16, + [114] = 5, + [118] = 6, + [122] = 7, + }, + }, + [16] = { + .app_id = 0, + .hdr_bitmap = { .bits = + BNXT_ULP_HDR_BIT_O_ETH | + BNXT_ULP_HDR_BIT_OO_VLAN | + BNXT_ULP_HDR_BIT_O_UDP | + BNXT_ULP_FLOW_DIR_BITMASK_ING }, + .field_man_bitmap = 0x0, + .field_opt_bitmap = 0xBAC0000000000000, + .field_exclude_bitmap = 0x2000000000000000, + .class_tid = 1, + .flow_pattern_id = 0, + .field_list = { + [1] = 1, + [10] = 2, + [12] = 3, + [14] = 4, + [106] = 8, + [108] = 9, + [110] = 10, + [112] = 11, + [114] = 5, + [118] = 6, + [122] = 7, + }, + }, + [17] = { + .app_id = 0, + .hdr_bitmap = { .bits = + BNXT_ULP_HDR_BIT_O_ETH | + BNXT_ULP_HDR_BIT_OI_VLAN | + BNXT_ULP_HDR_BIT_O_TCP | + BNXT_ULP_FLOW_DIR_BITMASK_ING }, + .field_man_bitmap = 0x0, + .field_opt_bitmap = 0xBAC4000000000000, + .field_exclude_bitmap = 0x2000000000000000, + .class_tid = 1, + .flow_pattern_id = 0, + .field_list = { + [1] = 1, + [10] = 2, + [12] = 3, + [14] = 4, + [88] = 8, + [90] = 9, + [92] = 10, + [94] = 11, + [96] = 12, + [98] = 13, + [100] = 14, + [102] = 15, + [104] = 16, + [115] = 5, + [119] = 6, + [123] = 7, + }, + }, + [18] = { + .app_id = 0, + .hdr_bitmap = { .bits = + BNXT_ULP_HDR_BIT_O_ETH | + BNXT_ULP_HDR_BIT_OI_VLAN | + BNXT_ULP_HDR_BIT_O_UDP | + BNXT_ULP_FLOW_DIR_BITMASK_ING }, + .field_man_bitmap = 0x0, + .field_opt_bitmap = 0xBAC0000000000000, + .field_exclude_bitmap = 0x2000000000000000, + .class_tid = 1, + .flow_pattern_id = 0, + .field_list = { + [1] = 1, + [10] = 2, + [12] = 3, + [14] = 4, + [106] = 8, + [108] = 9, + [110] = 10, + [112] = 11, + [115] = 5, + [119] = 6, + [123] = 7, + }, + }, + [19] = { + .app_id = 0, + .hdr_bitmap = { .bits = + BNXT_ULP_HDR_BIT_O_ETH | + BNXT_ULP_HDR_BIT_OO_VLAN | + BNXT_ULP_HDR_BIT_OI_VLAN | + BNXT_ULP_HDR_BIT_O_TCP | + BNXT_ULP_FLOW_DIR_BITMASK_ING }, + .field_man_bitmap = 0x0, + .field_opt_bitmap = 0xBA58800000000000, + .field_exclude_bitmap = 0x2000000000000000, + .class_tid = 1, + .flow_pattern_id = 0, + .field_list = { + [1] = 1, + [10] = 2, + [12] = 3, + [14] = 4, + [88] = 11, + [90] = 12, + [92] = 13, + [94] = 14, + [96] = 15, + [98] = 16, + [100] = 17, + [102] = 18, + [104] = 19, + [114] = 5, + [115] = 8, + [118] = 6, + [119] = 9, + [122] = 7, + [123] = 10, + }, + }, + [20] = { + .app_id = 0, + .hdr_bitmap = { .bits = + BNXT_ULP_HDR_BIT_O_ETH | + BNXT_ULP_HDR_BIT_OO_VLAN | + BNXT_ULP_HDR_BIT_OI_VLAN | + BNXT_ULP_HDR_BIT_O_UDP | + BNXT_ULP_FLOW_DIR_BITMASK_ING }, + .field_man_bitmap = 0x0, + .field_opt_bitmap = 0xBA58000000000000, + .field_exclude_bitmap = 0x2000000000000000, + .class_tid = 1, + .flow_pattern_id = 0, + .field_list = { + [1] = 1, + [10] = 2, + [12] = 3, + [14] = 4, + [106] = 11, + [108] = 12, + [110] = 13, + [112] = 14, + [114] = 5, + [115] = 8, + [118] = 6, + [119] = 9, + [122] = 7, + [123] = 10, + }, + }, + [21] = { + .app_id = 0, + .hdr_bitmap = { .bits = + BNXT_ULP_HDR_BIT_O_ETH | + BNXT_ULP_HDR_BIT_O_IPV6 | + BNXT_ULP_HDR_BIT_O_TCP | + BNXT_ULP_FLOW_DIR_BITMASK_ING }, + .field_man_bitmap = 0x0, + .field_opt_bitmap = 0xBA7E200000000000, + .field_exclude_bitmap = 0x2000000000000000, + .class_tid = 1, + .flow_pattern_id = 0, + .field_list = { + [1] = 1, + [10] = 2, + [12] = 3, + [14] = 4, + [56] = 5, + [58] = 6, + [60] = 7, + [62] = 8, + [64] = 9, + [66] = 10, + [68] = 11, + [70] = 12, + [88] = 13, + [90] = 14, + [92] = 15, + [94] = 16, + [96] = 17, + [98] = 18, + [100] = 19, + [102] = 20, + [104] = 21, + }, + }, + [22] = { + .app_id = 0, + .hdr_bitmap = { .bits = + BNXT_ULP_HDR_BIT_O_ETH | + BNXT_ULP_HDR_BIT_O_IPV4 | + BNXT_ULP_HDR_BIT_O_TCP | + BNXT_ULP_FLOW_DIR_BITMASK_ING }, + .field_man_bitmap = 0x0, + .field_opt_bitmap = 0xBA37880000000000, + .field_exclude_bitmap = 0x2000000000000000, + .class_tid = 1, + .flow_pattern_id = 0, + .field_list = { + [1] = 1, + [10] = 2, + [12] = 3, + [14] = 4, + [36] = 5, + [38] = 6, + [40] = 7, + [42] = 8, + [44] = 9, + [46] = 10, + [48] = 11, + [50] = 12, + [52] = 13, + [54] = 14, + [88] = 15, + [90] = 16, + [92] = 17, + [94] = 18, + [96] = 19, + [98] = 20, + [100] = 21, + [102] = 22, + [104] = 23, + }, + }, + [23] = { + .app_id = 0, + .hdr_bitmap = { .bits = + BNXT_ULP_HDR_BIT_O_ETH | + BNXT_ULP_HDR_BIT_O_IPV6 | + BNXT_ULP_HDR_BIT_O_UDP | + BNXT_ULP_FLOW_DIR_BITMASK_ING }, + .field_man_bitmap = 0x0, + .field_opt_bitmap = 0xBA7E000000000000, + .field_exclude_bitmap = 0x2000000000000000, + .class_tid = 1, + .flow_pattern_id = 0, + .field_list = { + [1] = 1, + [10] = 2, + [12] = 3, + [14] = 4, + [56] = 5, + [58] = 6, + [60] = 7, + [62] = 8, + [64] = 9, + [66] = 10, + [68] = 11, + [70] = 12, + [106] = 13, + [108] = 14, + [110] = 15, + [112] = 16, + }, + }, + [24] = { + .app_id = 0, + .hdr_bitmap = { .bits = + BNXT_ULP_HDR_BIT_O_ETH | + BNXT_ULP_HDR_BIT_O_IPV4 | + BNXT_ULP_HDR_BIT_O_UDP | + BNXT_ULP_FLOW_DIR_BITMASK_ING }, + .field_man_bitmap = 0x0, + .field_opt_bitmap = 0xBA37800000000000, + .field_exclude_bitmap = 0x2000000000000000, + .class_tid = 1, + .flow_pattern_id = 0, + .field_list = { + [1] = 1, + [10] = 2, + [12] = 3, + [14] = 4, + [36] = 5, + [38] = 6, + [40] = 7, + [42] = 8, + [44] = 9, + [46] = 10, + [48] = 11, + [50] = 12, + [52] = 13, + [54] = 14, + [106] = 15, + [108] = 16, + [110] = 17, + [112] = 18, + }, + }, + [25] = { + .app_id = 0, + .hdr_bitmap = { .bits = + BNXT_ULP_HDR_BIT_O_ETH | + BNXT_ULP_HDR_BIT_OO_VLAN | + BNXT_ULP_HDR_BIT_O_IPV6 | + BNXT_ULP_HDR_BIT_O_TCP | + BNXT_ULP_FLOW_DIR_BITMASK_ING }, + .field_man_bitmap = 0x0, + .field_opt_bitmap = 0xBA4FC40000000000, + .field_exclude_bitmap = 0x2000000000000000, + .class_tid = 1, + .flow_pattern_id = 0, + .field_list = { + [1] = 1, + [10] = 2, + [12] = 3, + [14] = 4, + [56] = 8, + [58] = 9, + [60] = 10, + [62] = 11, + [64] = 12, + [66] = 13, + [68] = 14, + [70] = 15, + [88] = 16, + [90] = 17, + [92] = 18, + [94] = 19, + [96] = 20, + [98] = 21, + [100] = 22, + [102] = 23, + [104] = 24, + [114] = 5, + [118] = 6, + [122] = 7, + }, + }, + [26] = { + .app_id = 0, + .hdr_bitmap = { .bits = + BNXT_ULP_HDR_BIT_O_ETH | + BNXT_ULP_HDR_BIT_OO_VLAN | + BNXT_ULP_HDR_BIT_O_IPV4 | + BNXT_ULP_HDR_BIT_O_TCP | + BNXT_ULP_FLOW_DIR_BITMASK_ING }, + .field_man_bitmap = 0x0, + .field_opt_bitmap = 0xBA46F10000000000, + .field_exclude_bitmap = 0x2000000000000000, + .class_tid = 1, + .flow_pattern_id = 0, + .field_list = { + [1] = 1, + [10] = 2, + [12] = 3, + [14] = 4, + [36] = 8, + [38] = 9, + [40] = 10, + [42] = 11, + [44] = 12, + [46] = 13, + [48] = 14, + [50] = 15, + [52] = 16, + [54] = 17, + [88] = 18, + [90] = 19, + [92] = 20, + [94] = 21, + [96] = 22, + [98] = 23, + [100] = 24, + [102] = 25, + [104] = 26, + [114] = 5, + [118] = 6, + [122] = 7, + }, + }, + [27] = { + .app_id = 0, + .hdr_bitmap = { .bits = + BNXT_ULP_HDR_BIT_O_ETH | + BNXT_ULP_HDR_BIT_OO_VLAN | + BNXT_ULP_HDR_BIT_O_IPV6 | + BNXT_ULP_HDR_BIT_O_UDP | + BNXT_ULP_FLOW_DIR_BITMASK_ING }, + .field_man_bitmap = 0x0, + .field_opt_bitmap = 0xBA4FC00000000000, + .field_exclude_bitmap = 0x2000000000000000, + .class_tid = 1, + .flow_pattern_id = 0, + .field_list = { + [1] = 1, + [10] = 2, + [12] = 3, + [14] = 4, + [56] = 8, + [58] = 9, + [60] = 10, + [62] = 11, + [64] = 12, + [66] = 13, + [68] = 14, + [70] = 15, + [106] = 16, + [108] = 17, + [110] = 18, + [112] = 19, + [114] = 5, + [118] = 6, + [122] = 7, + }, + }, + [28] = { + .app_id = 0, + .hdr_bitmap = { .bits = + BNXT_ULP_HDR_BIT_O_ETH | + BNXT_ULP_HDR_BIT_OO_VLAN | + BNXT_ULP_HDR_BIT_O_IPV4 | + BNXT_ULP_HDR_BIT_O_UDP | + BNXT_ULP_FLOW_DIR_BITMASK_ING }, + .field_man_bitmap = 0x0, + .field_opt_bitmap = 0xBA46F00000000000, + .field_exclude_bitmap = 0x2000000000000000, + .class_tid = 1, + .flow_pattern_id = 0, + .field_list = { + [1] = 1, + [10] = 2, + [12] = 3, + [14] = 4, + [36] = 8, + [38] = 9, + [40] = 10, + [42] = 11, + [44] = 12, + [46] = 13, + [48] = 14, + [50] = 15, + [52] = 16, + [54] = 17, + [106] = 18, + [108] = 19, + [110] = 20, + [112] = 21, + [114] = 5, + [118] = 6, + [122] = 7, + }, + }, + [29] = { + .app_id = 0, + .hdr_bitmap = { .bits = + BNXT_ULP_HDR_BIT_O_ETH | + BNXT_ULP_HDR_BIT_OI_VLAN | + BNXT_ULP_HDR_BIT_O_IPV6 | + BNXT_ULP_HDR_BIT_O_TCP | + BNXT_ULP_FLOW_DIR_BITMASK_ING }, + .field_man_bitmap = 0x0, + .field_opt_bitmap = 0xBA4FC40000000000, + .field_exclude_bitmap = 0x2000000000000000, + .class_tid = 1, + .flow_pattern_id = 0, + .field_list = { + [1] = 1, + [10] = 2, + [12] = 3, + [14] = 4, + [56] = 8, + [58] = 9, + [60] = 10, + [62] = 11, + [64] = 12, + [66] = 13, + [68] = 14, + [70] = 15, + [88] = 16, + [90] = 17, + [92] = 18, + [94] = 19, + [96] = 20, + [98] = 21, + [100] = 22, + [102] = 23, + [104] = 24, + [115] = 5, + [119] = 6, + [123] = 7, + }, + }, + [30] = { + .app_id = 0, + .hdr_bitmap = { .bits = + BNXT_ULP_HDR_BIT_O_ETH | + BNXT_ULP_HDR_BIT_OI_VLAN | + BNXT_ULP_HDR_BIT_O_IPV4 | + BNXT_ULP_HDR_BIT_O_TCP | + BNXT_ULP_FLOW_DIR_BITMASK_ING }, + .field_man_bitmap = 0x0, + .field_opt_bitmap = 0xBA46F10000000000, + .field_exclude_bitmap = 0x2000000000000000, + .class_tid = 1, + .flow_pattern_id = 0, + .field_list = { + [1] = 1, + [10] = 2, + [12] = 3, + [14] = 4, + [36] = 8, + [38] = 9, + [40] = 10, + [42] = 11, + [44] = 12, + [46] = 13, + [48] = 14, + [50] = 15, + [52] = 16, + [54] = 17, + [88] = 18, + [90] = 19, + [92] = 20, + [94] = 21, + [96] = 22, + [98] = 23, + [100] = 24, + [102] = 25, + [104] = 26, + [115] = 5, + [119] = 6, + [123] = 7, + }, + }, + [31] = { + .app_id = 0, + .hdr_bitmap = { .bits = + BNXT_ULP_HDR_BIT_O_ETH | + BNXT_ULP_HDR_BIT_OI_VLAN | + BNXT_ULP_HDR_BIT_O_IPV6 | + BNXT_ULP_HDR_BIT_O_UDP | + BNXT_ULP_FLOW_DIR_BITMASK_ING }, + .field_man_bitmap = 0x0, + .field_opt_bitmap = 0xBA4FC00000000000, + .field_exclude_bitmap = 0x2000000000000000, + .class_tid = 1, + .flow_pattern_id = 0, + .field_list = { + [1] = 1, + [10] = 2, + [12] = 3, + [14] = 4, + [56] = 8, + [58] = 9, + [60] = 10, + [62] = 11, + [64] = 12, + [66] = 13, + [68] = 14, + [70] = 15, + [106] = 16, + [108] = 17, + [110] = 18, + [112] = 19, + [115] = 5, + [119] = 6, + [123] = 7, + }, + }, + [32] = { + .app_id = 0, + .hdr_bitmap = { .bits = + BNXT_ULP_HDR_BIT_O_ETH | + BNXT_ULP_HDR_BIT_OI_VLAN | + BNXT_ULP_HDR_BIT_O_IPV4 | + BNXT_ULP_HDR_BIT_O_UDP | + BNXT_ULP_FLOW_DIR_BITMASK_ING }, + .field_man_bitmap = 0x0, + .field_opt_bitmap = 0xBA46F00000000000, + .field_exclude_bitmap = 0x2000000000000000, + .class_tid = 1, + .flow_pattern_id = 0, + .field_list = { + [1] = 1, + [10] = 2, + [12] = 3, + [14] = 4, + [36] = 8, + [38] = 9, + [40] = 10, + [42] = 11, + [44] = 12, + [46] = 13, + [48] = 14, + [50] = 15, + [52] = 16, + [54] = 17, + [106] = 18, + [108] = 19, + [110] = 20, + [112] = 21, + [115] = 5, + [119] = 6, + [123] = 7, + }, + }, + [33] = { + .app_id = 0, + .hdr_bitmap = { .bits = + BNXT_ULP_HDR_BIT_O_ETH | + BNXT_ULP_HDR_BIT_OO_VLAN | + BNXT_ULP_HDR_BIT_OI_VLAN | + BNXT_ULP_HDR_BIT_O_IPV6 | + BNXT_ULP_HDR_BIT_O_TCP | + BNXT_ULP_FLOW_DIR_BITMASK_ING }, + .field_man_bitmap = 0x0, + .field_opt_bitmap = 0xBA49F88000000000, + .field_exclude_bitmap = 0x2000000000000000, + .class_tid = 1, + .flow_pattern_id = 0, + .field_list = { + [1] = 1, + [10] = 2, + [12] = 3, + [14] = 4, + [56] = 11, + [58] = 12, + [60] = 13, + [62] = 14, + [64] = 15, + [66] = 16, + [68] = 17, + [70] = 18, + [88] = 19, + [90] = 20, + [92] = 21, + [94] = 22, + [96] = 23, + [98] = 24, + [100] = 25, + [102] = 26, + [104] = 27, + [114] = 5, + [115] = 8, + [118] = 6, + [119] = 9, + [122] = 7, + [123] = 10, + }, + }, + [34] = { + .app_id = 0, + .hdr_bitmap = { .bits = + BNXT_ULP_HDR_BIT_O_ETH | + BNXT_ULP_HDR_BIT_OO_VLAN | + BNXT_ULP_HDR_BIT_OI_VLAN | + BNXT_ULP_HDR_BIT_O_IPV4 | + BNXT_ULP_HDR_BIT_O_TCP | + BNXT_ULP_FLOW_DIR_BITMASK_ING }, + .field_man_bitmap = 0x0, + .field_opt_bitmap = 0xBA48DE2000000000, + .field_exclude_bitmap = 0x2000000000000000, + .class_tid = 1, + .flow_pattern_id = 0, + .field_list = { + [1] = 1, + [10] = 2, + [12] = 3, + [14] = 4, + [36] = 11, + [38] = 12, + [40] = 13, + [42] = 14, + [44] = 15, + [46] = 16, + [48] = 17, + [50] = 18, + [52] = 19, + [54] = 20, + [88] = 21, + [90] = 22, + [92] = 23, + [94] = 24, + [96] = 25, + [98] = 26, + [100] = 27, + [102] = 28, + [104] = 29, + [114] = 5, + [115] = 8, + [118] = 6, + [119] = 9, + [122] = 7, + [123] = 10, + }, + }, + [35] = { + .app_id = 0, + .hdr_bitmap = { .bits = + BNXT_ULP_HDR_BIT_O_ETH | + BNXT_ULP_HDR_BIT_OO_VLAN | + BNXT_ULP_HDR_BIT_OI_VLAN | + BNXT_ULP_HDR_BIT_O_IPV6 | + BNXT_ULP_HDR_BIT_O_UDP | + BNXT_ULP_FLOW_DIR_BITMASK_ING }, + .field_man_bitmap = 0x0, + .field_opt_bitmap = 0xBA49F80000000000, + .field_exclude_bitmap = 0x2000000000000000, + .class_tid = 1, + .flow_pattern_id = 0, + .field_list = { + [1] = 1, + [10] = 2, + [12] = 3, + [14] = 4, + [56] = 11, + [58] = 12, + [60] = 13, + [62] = 14, + [64] = 15, + [66] = 16, + [68] = 17, + [70] = 18, + [106] = 19, + [108] = 20, + [110] = 21, + [112] = 22, + [114] = 5, + [115] = 8, + [118] = 6, + [119] = 9, + [122] = 7, + [123] = 10, + }, + }, + [36] = { + .app_id = 0, + .hdr_bitmap = { .bits = + BNXT_ULP_HDR_BIT_O_ETH | + BNXT_ULP_HDR_BIT_OO_VLAN | + BNXT_ULP_HDR_BIT_OI_VLAN | + BNXT_ULP_HDR_BIT_O_IPV4 | + BNXT_ULP_HDR_BIT_O_UDP | + BNXT_ULP_FLOW_DIR_BITMASK_ING }, + .field_man_bitmap = 0x0, + .field_opt_bitmap = 0xBA48DE0000000000, + .field_exclude_bitmap = 0x2000000000000000, + .class_tid = 1, + .flow_pattern_id = 0, + .field_list = { + [1] = 1, + [10] = 2, + [12] = 3, + [14] = 4, + [36] = 11, + [38] = 12, + [40] = 13, + [42] = 14, + [44] = 15, + [46] = 16, + [48] = 17, + [50] = 18, + [52] = 19, + [54] = 20, + [106] = 21, + [108] = 22, + [110] = 23, + [112] = 24, + [114] = 5, + [115] = 8, + [118] = 6, + [119] = 9, + [122] = 7, + [123] = 10, + }, + }, + [37] = { + .app_id = 0, + .hdr_bitmap = { .bits = + BNXT_ULP_HDR_BIT_O_ETH | + BNXT_ULP_HDR_BIT_O_IPV6 | + BNXT_ULP_HDR_BIT_O_UDP | + BNXT_ULP_HDR_BIT_T_VXLAN | + BNXT_ULP_HDR_BIT_I_ETH | + BNXT_ULP_FLOW_DIR_BITMASK_ING }, + .field_man_bitmap = 0x0, + .field_opt_bitmap = 0xB01A170000000000, + .field_exclude_bitmap = 0x2000000000000000, + .class_tid = 1, + .flow_pattern_id = 1, + .field_list = { + [1] = 1, + [10] = 2, + [11] = 21, + [12] = 3, + [13] = 22, + [14] = 4, + [15] = 23, + [56] = 5, + [58] = 6, + [60] = 7, + [62] = 8, + [64] = 9, + [66] = 10, + [68] = 11, + [70] = 12, + [106] = 13, + [108] = 14, + [110] = 15, + [112] = 16, + [126] = 17, + [127] = 18, + [128] = 19, + [129] = 20, + }, + }, + [38] = { + .app_id = 0, + .hdr_bitmap = { .bits = + BNXT_ULP_HDR_BIT_O_ETH | + BNXT_ULP_HDR_BIT_O_IPV4 | + BNXT_ULP_HDR_BIT_O_UDP | + BNXT_ULP_HDR_BIT_T_VXLAN | + BNXT_ULP_HDR_BIT_I_ETH | + BNXT_ULP_FLOW_DIR_BITMASK_ING }, + .field_man_bitmap = 0x0, + .field_opt_bitmap = 0xB00685C000000000, + .field_exclude_bitmap = 0x2000000000000000, + .class_tid = 1, + .flow_pattern_id = 1, + .field_list = { + [1] = 1, + [10] = 2, + [11] = 23, + [12] = 3, + [13] = 24, + [14] = 4, + [15] = 25, + [36] = 5, + [38] = 6, + [40] = 7, + [42] = 8, + [44] = 9, + [46] = 10, + [48] = 11, + [50] = 12, + [52] = 13, + [54] = 14, + [106] = 15, + [108] = 16, + [110] = 17, + [112] = 18, + [126] = 19, + [127] = 20, + [128] = 21, + [129] = 22, + }, + }, + [39] = { + .app_id = 0, + .hdr_bitmap = { .bits = + BNXT_ULP_HDR_BIT_O_ETH | + BNXT_ULP_HDR_BIT_O_IPV6 | + BNXT_ULP_HDR_BIT_O_UDP | + BNXT_ULP_HDR_BIT_T_VXLAN | + BNXT_ULP_HDR_BIT_I_ETH | + BNXT_ULP_HDR_BIT_I_IPV6 | + BNXT_ULP_FLOW_DIR_BITMASK_ING }, + .field_man_bitmap = 0x0, + .field_opt_bitmap = 0xB01A170B00000000, + .field_exclude_bitmap = 0x2000000000000000, + .class_tid = 1, + .flow_pattern_id = 1, + .field_list = { + [1] = 1, + [10] = 2, + [11] = 21, + [12] = 3, + [13] = 22, + [14] = 4, + [15] = 23, + [56] = 5, + [57] = 24, + [58] = 6, + [59] = 25, + [60] = 7, + [61] = 26, + [62] = 8, + [63] = 27, + [64] = 9, + [65] = 28, + [66] = 10, + [67] = 29, + [68] = 11, + [69] = 30, + [70] = 12, + [71] = 31, + [106] = 13, + [108] = 14, + [110] = 15, + [112] = 16, + [126] = 17, + [127] = 18, + [128] = 19, + [129] = 20, + }, + }, + [40] = { + .app_id = 0, + .hdr_bitmap = { .bits = + BNXT_ULP_HDR_BIT_O_ETH | + BNXT_ULP_HDR_BIT_O_IPV4 | + BNXT_ULP_HDR_BIT_O_UDP | + BNXT_ULP_HDR_BIT_T_VXLAN | + BNXT_ULP_HDR_BIT_I_ETH | + BNXT_ULP_HDR_BIT_I_IPV6 | + BNXT_ULP_FLOW_DIR_BITMASK_ING }, + .field_man_bitmap = 0x0, + .field_opt_bitmap = 0xB00685C2C0000000, + .field_exclude_bitmap = 0x2000000000000000, + .class_tid = 1, + .flow_pattern_id = 1, + .field_list = { + [1] = 1, + [10] = 2, + [11] = 23, + [12] = 3, + [13] = 24, + [14] = 4, + [15] = 25, + [36] = 5, + [38] = 6, + [40] = 7, + [42] = 8, + [44] = 9, + [46] = 10, + [48] = 11, + [50] = 12, + [52] = 13, + [54] = 14, + [57] = 26, + [59] = 27, + [61] = 28, + [63] = 29, + [65] = 30, + [67] = 31, + [69] = 32, + [71] = 33, + [106] = 15, + [108] = 16, + [110] = 17, + [112] = 18, + [126] = 19, + [127] = 20, + [128] = 21, + [129] = 22, + }, + }, + [41] = { + .app_id = 0, + .hdr_bitmap = { .bits = + BNXT_ULP_HDR_BIT_O_ETH | + BNXT_ULP_HDR_BIT_O_IPV6 | + BNXT_ULP_HDR_BIT_O_UDP | + BNXT_ULP_HDR_BIT_T_VXLAN | + BNXT_ULP_HDR_BIT_I_ETH | + BNXT_ULP_HDR_BIT_I_IPV4 | + BNXT_ULP_FLOW_DIR_BITMASK_ING }, + .field_man_bitmap = 0x0, + .field_opt_bitmap = 0xB01A1702C0000000, + .field_exclude_bitmap = 0x2000000000000000, + .class_tid = 1, + .flow_pattern_id = 1, + .field_list = { + [1] = 1, + [10] = 2, + [11] = 21, + [12] = 3, + [13] = 22, + [14] = 4, + [15] = 23, + [37] = 24, + [39] = 25, + [41] = 26, + [43] = 27, + [45] = 28, + [47] = 29, + [49] = 30, + [51] = 31, + [53] = 32, + [55] = 33, + [56] = 5, + [58] = 6, + [60] = 7, + [62] = 8, + [64] = 9, + [66] = 10, + [68] = 11, + [70] = 12, + [106] = 13, + [108] = 14, + [110] = 15, + [112] = 16, + [126] = 17, + [127] = 18, + [128] = 19, + [129] = 20, + }, + }, + [42] = { + .app_id = 0, + .hdr_bitmap = { .bits = + BNXT_ULP_HDR_BIT_O_ETH | + BNXT_ULP_HDR_BIT_O_IPV4 | + BNXT_ULP_HDR_BIT_O_UDP | + BNXT_ULP_HDR_BIT_T_VXLAN | + BNXT_ULP_HDR_BIT_I_ETH | + BNXT_ULP_HDR_BIT_I_IPV4 | + BNXT_ULP_FLOW_DIR_BITMASK_ING }, + .field_man_bitmap = 0x0, + .field_opt_bitmap = 0xB00685C0B0000000, + .field_exclude_bitmap = 0x2000000000000000, + .class_tid = 1, + .flow_pattern_id = 1, + .field_list = { + [1] = 1, + [10] = 2, + [11] = 23, + [12] = 3, + [13] = 24, + [14] = 4, + [15] = 25, + [36] = 5, + [37] = 26, + [38] = 6, + [39] = 27, + [40] = 7, + [41] = 28, + [42] = 8, + [43] = 29, + [44] = 9, + [45] = 30, + [46] = 10, + [47] = 31, + [48] = 11, + [49] = 32, + [50] = 12, + [51] = 33, + [52] = 13, + [53] = 34, + [54] = 14, + [55] = 35, + [106] = 15, + [108] = 16, + [110] = 17, + [112] = 18, + [126] = 19, + [127] = 20, + [128] = 21, + [129] = 22, + }, + }, + [43] = { + .app_id = 0, + .hdr_bitmap = { .bits = + BNXT_ULP_HDR_BIT_O_ETH | + BNXT_ULP_HDR_BIT_O_IPV6 | + BNXT_ULP_HDR_BIT_O_UDP | + BNXT_ULP_HDR_BIT_T_VXLAN | + BNXT_ULP_HDR_BIT_I_ETH | + BNXT_ULP_HDR_BIT_I_TCP | + BNXT_ULP_FLOW_DIR_BITMASK_ING }, + .field_man_bitmap = 0x0, + .field_opt_bitmap = 0xB01A17C000000000, + .field_exclude_bitmap = 0x2000000000000000, + .class_tid = 1, + .flow_pattern_id = 1, + .field_list = { + [1] = 1, + [10] = 2, + [11] = 21, + [12] = 3, + [13] = 22, + [14] = 4, + [15] = 23, + [56] = 5, + [58] = 6, + [60] = 7, + [62] = 8, + [64] = 9, + [66] = 10, + [68] = 11, + [70] = 12, + [89] = 24, + [91] = 25, + [93] = 26, + [95] = 27, + [97] = 28, + [99] = 29, + [101] = 30, + [103] = 31, + [105] = 32, + [106] = 13, + [108] = 14, + [110] = 15, + [112] = 16, + [126] = 17, + [127] = 18, + [128] = 19, + [129] = 20, + }, + }, + [44] = { + .app_id = 0, + .hdr_bitmap = { .bits = + BNXT_ULP_HDR_BIT_O_ETH | + BNXT_ULP_HDR_BIT_O_IPV4 | + BNXT_ULP_HDR_BIT_O_UDP | + BNXT_ULP_HDR_BIT_T_VXLAN | + BNXT_ULP_HDR_BIT_I_ETH | + BNXT_ULP_HDR_BIT_I_TCP | + BNXT_ULP_FLOW_DIR_BITMASK_ING }, + .field_man_bitmap = 0x0, + .field_opt_bitmap = 0xB00685F000000000, + .field_exclude_bitmap = 0x2000000000000000, + .class_tid = 1, + .flow_pattern_id = 1, + .field_list = { + [1] = 1, + [10] = 2, + [11] = 23, + [12] = 3, + [13] = 24, + [14] = 4, + [15] = 25, + [36] = 5, + [38] = 6, + [40] = 7, + [42] = 8, + [44] = 9, + [46] = 10, + [48] = 11, + [50] = 12, + [52] = 13, + [54] = 14, + [89] = 26, + [91] = 27, + [93] = 28, + [95] = 29, + [97] = 30, + [99] = 31, + [101] = 32, + [103] = 33, + [105] = 34, + [106] = 15, + [108] = 16, + [110] = 17, + [112] = 18, + [126] = 19, + [127] = 20, + [128] = 21, + [129] = 22, + }, + }, + [45] = { + .app_id = 0, + .hdr_bitmap = { .bits = + BNXT_ULP_HDR_BIT_O_ETH | + BNXT_ULP_HDR_BIT_O_IPV6 | + BNXT_ULP_HDR_BIT_O_UDP | + BNXT_ULP_HDR_BIT_T_VXLAN | + BNXT_ULP_HDR_BIT_I_ETH | + BNXT_ULP_HDR_BIT_I_UDP | + BNXT_ULP_FLOW_DIR_BITMASK_ING }, + .field_man_bitmap = 0x0, + .field_opt_bitmap = 0xB01A17C000000000, + .field_exclude_bitmap = 0x2000000000000000, + .class_tid = 1, + .flow_pattern_id = 1, + .field_list = { + [1] = 1, + [10] = 2, + [11] = 21, + [12] = 3, + [13] = 22, + [14] = 4, + [15] = 23, + [56] = 5, + [58] = 6, + [60] = 7, + [62] = 8, + [64] = 9, + [66] = 10, + [68] = 11, + [70] = 12, + [106] = 13, + [107] = 24, + [108] = 14, + [109] = 25, + [110] = 15, + [111] = 26, + [112] = 16, + [113] = 27, + [126] = 17, + [127] = 18, + [128] = 19, + [129] = 20, + }, + }, + [46] = { + .app_id = 0, + .hdr_bitmap = { .bits = + BNXT_ULP_HDR_BIT_O_ETH | + BNXT_ULP_HDR_BIT_O_IPV4 | + BNXT_ULP_HDR_BIT_O_UDP | + BNXT_ULP_HDR_BIT_T_VXLAN | + BNXT_ULP_HDR_BIT_I_ETH | + BNXT_ULP_HDR_BIT_I_UDP | + BNXT_ULP_FLOW_DIR_BITMASK_ING }, + .field_man_bitmap = 0x0, + .field_opt_bitmap = 0xB00685F000000000, + .field_exclude_bitmap = 0x2000000000000000, + .class_tid = 1, + .flow_pattern_id = 1, + .field_list = { + [1] = 1, + [10] = 2, + [11] = 23, + [12] = 3, + [13] = 24, + [14] = 4, + [15] = 25, + [36] = 5, + [38] = 6, + [40] = 7, + [42] = 8, + [44] = 9, + [46] = 10, + [48] = 11, + [50] = 12, + [52] = 13, + [54] = 14, + [106] = 15, + [107] = 26, + [108] = 16, + [109] = 27, + [110] = 17, + [111] = 28, + [112] = 18, + [113] = 29, + [126] = 19, + [127] = 20, + [128] = 21, + [129] = 22, + }, + }, + [47] = { + .app_id = 0, + .hdr_bitmap = { .bits = + BNXT_ULP_HDR_BIT_O_ETH | + BNXT_ULP_HDR_BIT_O_IPV6 | + BNXT_ULP_HDR_BIT_O_UDP | + BNXT_ULP_HDR_BIT_T_VXLAN | + BNXT_ULP_HDR_BIT_I_ETH | + BNXT_ULP_HDR_BIT_I_IPV6 | + BNXT_ULP_HDR_BIT_I_TCP | + BNXT_ULP_FLOW_DIR_BITMASK_ING }, + .field_man_bitmap = 0x0, + .field_opt_bitmap = 0xB01A170BC0000000, + .field_exclude_bitmap = 0x2000000000000000, + .class_tid = 1, + .flow_pattern_id = 1, + .field_list = { + [1] = 1, + [10] = 2, + [11] = 21, + [12] = 3, + [13] = 22, + [14] = 4, + [15] = 23, + [56] = 5, + [57] = 24, + [58] = 6, + [59] = 25, + [60] = 7, + [61] = 26, + [62] = 8, + [63] = 27, + [64] = 9, + [65] = 28, + [66] = 10, + [67] = 29, + [68] = 11, + [69] = 30, + [70] = 12, + [71] = 31, + [89] = 32, + [91] = 33, + [93] = 34, + [95] = 35, + [97] = 36, + [99] = 37, + [101] = 38, + [103] = 39, + [105] = 40, + [106] = 13, + [108] = 14, + [110] = 15, + [112] = 16, + [126] = 17, + [127] = 18, + [128] = 19, + [129] = 20, + }, + }, + [48] = { + .app_id = 0, + .hdr_bitmap = { .bits = + BNXT_ULP_HDR_BIT_O_ETH | + BNXT_ULP_HDR_BIT_O_IPV4 | + BNXT_ULP_HDR_BIT_O_UDP | + BNXT_ULP_HDR_BIT_T_VXLAN | + BNXT_ULP_HDR_BIT_I_ETH | + BNXT_ULP_HDR_BIT_I_IPV6 | + BNXT_ULP_HDR_BIT_I_TCP | + BNXT_ULP_FLOW_DIR_BITMASK_ING }, + .field_man_bitmap = 0x0, + .field_opt_bitmap = 0xB00685C2F0000000, + .field_exclude_bitmap = 0x2000000000000000, + .class_tid = 1, + .flow_pattern_id = 1, + .field_list = { + [1] = 1, + [10] = 2, + [11] = 23, + [12] = 3, + [13] = 24, + [14] = 4, + [15] = 25, + [36] = 5, + [38] = 6, + [40] = 7, + [42] = 8, + [44] = 9, + [46] = 10, + [48] = 11, + [50] = 12, + [52] = 13, + [54] = 14, + [57] = 26, + [59] = 27, + [61] = 28, + [63] = 29, + [65] = 30, + [67] = 31, + [69] = 32, + [71] = 33, + [89] = 34, + [91] = 35, + [93] = 36, + [95] = 37, + [97] = 38, + [99] = 39, + [101] = 40, + [103] = 41, + [105] = 42, + [106] = 15, + [108] = 16, + [110] = 17, + [112] = 18, + [126] = 19, + [127] = 20, + [128] = 21, + [129] = 22, + }, + }, + [49] = { + .app_id = 0, + .hdr_bitmap = { .bits = + BNXT_ULP_HDR_BIT_O_ETH | + BNXT_ULP_HDR_BIT_O_IPV6 | + BNXT_ULP_HDR_BIT_O_UDP | + BNXT_ULP_HDR_BIT_T_VXLAN | + BNXT_ULP_HDR_BIT_I_ETH | + BNXT_ULP_HDR_BIT_I_IPV4 | + BNXT_ULP_HDR_BIT_I_TCP | + BNXT_ULP_FLOW_DIR_BITMASK_ING }, + .field_man_bitmap = 0x0, + .field_opt_bitmap = 0xB01A1702F0000000, + .field_exclude_bitmap = 0x2000000000000000, + .class_tid = 1, + .flow_pattern_id = 1, + .field_list = { + [1] = 1, + [10] = 2, + [11] = 21, + [12] = 3, + [13] = 22, + [14] = 4, + [15] = 23, + [37] = 24, + [39] = 25, + [41] = 26, + [43] = 27, + [45] = 28, + [47] = 29, + [49] = 30, + [51] = 31, + [53] = 32, + [55] = 33, + [56] = 5, + [58] = 6, + [60] = 7, + [62] = 8, + [64] = 9, + [66] = 10, + [68] = 11, + [70] = 12, + [89] = 34, + [91] = 35, + [93] = 36, + [95] = 37, + [97] = 38, + [99] = 39, + [101] = 40, + [103] = 41, + [105] = 42, + [106] = 13, + [108] = 14, + [110] = 15, + [112] = 16, + [126] = 17, + [127] = 18, + [128] = 19, + [129] = 20, + }, + }, + [50] = { + .app_id = 0, + .hdr_bitmap = { .bits = + BNXT_ULP_HDR_BIT_O_ETH | + BNXT_ULP_HDR_BIT_O_IPV4 | + BNXT_ULP_HDR_BIT_O_UDP | + BNXT_ULP_HDR_BIT_T_VXLAN | + BNXT_ULP_HDR_BIT_I_ETH | + BNXT_ULP_HDR_BIT_I_IPV4 | + BNXT_ULP_HDR_BIT_I_TCP | + BNXT_ULP_FLOW_DIR_BITMASK_ING }, + .field_man_bitmap = 0x0, + .field_opt_bitmap = 0xB00685C0BC000000, + .field_exclude_bitmap = 0x2000000000000000, + .class_tid = 1, + .flow_pattern_id = 1, + .field_list = { + [1] = 1, + [10] = 2, + [11] = 23, + [12] = 3, + [13] = 24, + [14] = 4, + [15] = 25, + [36] = 5, + [37] = 26, + [38] = 6, + [39] = 27, + [40] = 7, + [41] = 28, + [42] = 8, + [43] = 29, + [44] = 9, + [45] = 30, + [46] = 10, + [47] = 31, + [48] = 11, + [49] = 32, + [50] = 12, + [51] = 33, + [52] = 13, + [53] = 34, + [54] = 14, + [55] = 35, + [89] = 36, + [91] = 37, + [93] = 38, + [95] = 39, + [97] = 40, + [99] = 41, + [101] = 42, + [103] = 43, + [105] = 44, + [106] = 15, + [108] = 16, + [110] = 17, + [112] = 18, + [126] = 19, + [127] = 20, + [128] = 21, + [129] = 22, + }, + }, + [51] = { + .app_id = 0, + .hdr_bitmap = { .bits = + BNXT_ULP_HDR_BIT_O_ETH | + BNXT_ULP_HDR_BIT_O_IPV6 | + BNXT_ULP_HDR_BIT_O_UDP | + BNXT_ULP_HDR_BIT_T_VXLAN | + BNXT_ULP_HDR_BIT_I_ETH | + BNXT_ULP_HDR_BIT_I_IPV6 | + BNXT_ULP_HDR_BIT_I_UDP | + BNXT_ULP_FLOW_DIR_BITMASK_ING }, + .field_man_bitmap = 0x0, + .field_opt_bitmap = 0xB01A170BC0000000, + .field_exclude_bitmap = 0x2000000000000000, + .class_tid = 1, + .flow_pattern_id = 1, + .field_list = { + [1] = 1, + [10] = 2, + [11] = 21, + [12] = 3, + [13] = 22, + [14] = 4, + [15] = 23, + [56] = 5, + [57] = 24, + [58] = 6, + [59] = 25, + [60] = 7, + [61] = 26, + [62] = 8, + [63] = 27, + [64] = 9, + [65] = 28, + [66] = 10, + [67] = 29, + [68] = 11, + [69] = 30, + [70] = 12, + [71] = 31, + [106] = 13, + [107] = 32, + [108] = 14, + [109] = 33, + [110] = 15, + [111] = 34, + [112] = 16, + [113] = 35, + [126] = 17, + [127] = 18, + [128] = 19, + [129] = 20, + }, + }, + [52] = { + .app_id = 0, + .hdr_bitmap = { .bits = + BNXT_ULP_HDR_BIT_O_ETH | + BNXT_ULP_HDR_BIT_O_IPV4 | + BNXT_ULP_HDR_BIT_O_UDP | + BNXT_ULP_HDR_BIT_T_VXLAN | + BNXT_ULP_HDR_BIT_I_ETH | + BNXT_ULP_HDR_BIT_I_IPV6 | + BNXT_ULP_HDR_BIT_I_UDP | + BNXT_ULP_FLOW_DIR_BITMASK_ING }, + .field_man_bitmap = 0x0, + .field_opt_bitmap = 0xB00685C2F0000000, + .field_exclude_bitmap = 0x2000000000000000, + .class_tid = 1, + .flow_pattern_id = 1, + .field_list = { + [1] = 1, + [10] = 2, + [11] = 23, + [12] = 3, + [13] = 24, + [14] = 4, + [15] = 25, + [36] = 5, + [38] = 6, + [40] = 7, + [42] = 8, + [44] = 9, + [46] = 10, + [48] = 11, + [50] = 12, + [52] = 13, + [54] = 14, + [57] = 26, + [59] = 27, + [61] = 28, + [63] = 29, + [65] = 30, + [67] = 31, + [69] = 32, + [71] = 33, + [106] = 15, + [107] = 34, + [108] = 16, + [109] = 35, + [110] = 17, + [111] = 36, + [112] = 18, + [113] = 37, + [126] = 19, + [127] = 20, + [128] = 21, + [129] = 22, + }, + }, + [53] = { + .app_id = 0, + .hdr_bitmap = { .bits = + BNXT_ULP_HDR_BIT_O_ETH | + BNXT_ULP_HDR_BIT_O_IPV6 | + BNXT_ULP_HDR_BIT_O_UDP | + BNXT_ULP_HDR_BIT_T_VXLAN | + BNXT_ULP_HDR_BIT_I_ETH | + BNXT_ULP_HDR_BIT_I_IPV4 | + BNXT_ULP_HDR_BIT_I_UDP | + BNXT_ULP_FLOW_DIR_BITMASK_ING }, + .field_man_bitmap = 0x0, + .field_opt_bitmap = 0xB01A1702F0000000, + .field_exclude_bitmap = 0x2000000000000000, + .class_tid = 1, + .flow_pattern_id = 1, + .field_list = { + [1] = 1, + [10] = 2, + [11] = 21, + [12] = 3, + [13] = 22, + [14] = 4, + [15] = 23, + [37] = 24, + [39] = 25, + [41] = 26, + [43] = 27, + [45] = 28, + [47] = 29, + [49] = 30, + [51] = 31, + [53] = 32, + [55] = 33, + [56] = 5, + [58] = 6, + [60] = 7, + [62] = 8, + [64] = 9, + [66] = 10, + [68] = 11, + [70] = 12, + [106] = 13, + [107] = 34, + [108] = 14, + [109] = 35, + [110] = 15, + [111] = 36, + [112] = 16, + [113] = 37, + [126] = 17, + [127] = 18, + [128] = 19, + [129] = 20, + }, + }, + [54] = { + .app_id = 0, + .hdr_bitmap = { .bits = + BNXT_ULP_HDR_BIT_O_ETH | + BNXT_ULP_HDR_BIT_O_IPV4 | + BNXT_ULP_HDR_BIT_O_UDP | + BNXT_ULP_HDR_BIT_T_VXLAN | + BNXT_ULP_HDR_BIT_I_ETH | + BNXT_ULP_HDR_BIT_I_IPV4 | + BNXT_ULP_HDR_BIT_I_UDP | + BNXT_ULP_FLOW_DIR_BITMASK_ING }, + .field_man_bitmap = 0x0, + .field_opt_bitmap = 0xB00685C0BC000000, + .field_exclude_bitmap = 0x2000000000000000, + .class_tid = 1, + .flow_pattern_id = 1, + .field_list = { + [1] = 1, + [10] = 2, + [11] = 23, + [12] = 3, + [13] = 24, + [14] = 4, + [15] = 25, + [36] = 5, + [37] = 26, + [38] = 6, + [39] = 27, + [40] = 7, + [41] = 28, + [42] = 8, + [43] = 29, + [44] = 9, + [45] = 30, + [46] = 10, + [47] = 31, + [48] = 11, + [49] = 32, + [50] = 12, + [51] = 33, + [52] = 13, + [53] = 34, + [54] = 14, + [55] = 35, + [106] = 15, + [107] = 36, + [108] = 16, + [109] = 37, + [110] = 17, + [111] = 38, + [112] = 18, + [113] = 39, + [126] = 19, + [127] = 20, + [128] = 21, + [129] = 22, + }, + }, + [55] = { + .app_id = 0, + .hdr_bitmap = { .bits = + BNXT_ULP_HDR_BIT_F1 | + BNXT_ULP_HDR_BIT_O_ETH | + BNXT_ULP_HDR_BIT_O_IPV6 | + BNXT_ULP_HDR_BIT_O_UDP | + BNXT_ULP_HDR_BIT_T_VXLAN | + BNXT_ULP_FLOW_DIR_BITMASK_ING }, + .field_man_bitmap = 0x200A000000000000, + .field_opt_bitmap = 0x9000000000000000, + .field_exclude_bitmap = 0x0, + .class_tid = 1, + .flow_pattern_id = 2, + .field_list = { + [1] = 1, + [10] = 2, + [12] = 3, + [14] = 4, + [56] = 5, + [58] = 6, + [60] = 7, + [62] = 8, + [64] = 9, + [66] = 10, + [68] = 11, + [70] = 12, + [106] = 13, + [108] = 14, + [110] = 15, + [112] = 16, + [126] = 17, + [127] = 18, + [128] = 19, + [129] = 20, + }, + }, + [56] = { + .app_id = 0, + .hdr_bitmap = { .bits = + BNXT_ULP_HDR_BIT_F1 | + BNXT_ULP_HDR_BIT_O_ETH | + BNXT_ULP_HDR_BIT_O_IPV4 | + BNXT_ULP_HDR_BIT_O_UDP | + BNXT_ULP_HDR_BIT_T_VXLAN | + BNXT_ULP_FLOW_DIR_BITMASK_ING }, + .field_man_bitmap = 0x2002800000000000, + .field_opt_bitmap = 0x9000000000000000, + .field_exclude_bitmap = 0x0, + .class_tid = 1, + .flow_pattern_id = 2, + .field_list = { + [1] = 1, + [10] = 2, + [12] = 3, + [14] = 4, + [36] = 5, + [38] = 6, + [40] = 7, + [42] = 8, + [44] = 9, + [46] = 10, + [48] = 11, + [50] = 12, + [52] = 13, + [54] = 14, + [106] = 15, + [108] = 16, + [110] = 17, + [112] = 18, + [126] = 19, + [127] = 20, + [128] = 21, + [129] = 22, + }, + }, + [57] = { + .app_id = 0, + .hdr_bitmap = { .bits = + BNXT_ULP_HDR_BIT_F2 | + BNXT_ULP_HDR_BIT_O_IPV6 | + BNXT_ULP_HDR_BIT_O_UDP | + BNXT_ULP_HDR_BIT_T_VXLAN | + BNXT_ULP_HDR_BIT_I_ETH | + BNXT_ULP_HDR_BIT_I_IPV6 | + BNXT_ULP_FLOW_DIR_BITMASK_ING }, + .field_man_bitmap = 0xC0800000000000, + .field_opt_bitmap = 0x8010301800000000, + .field_exclude_bitmap = 0x0, + .class_tid = 1, + .flow_pattern_id = 3, + .field_list = { + [1] = 1, + [11] = 18, + [13] = 19, + [15] = 20, + [56] = 2, + [57] = 21, + [58] = 3, + [59] = 22, + [60] = 4, + [61] = 23, + [62] = 5, + [63] = 24, + [64] = 6, + [65] = 25, + [66] = 7, + [67] = 26, + [68] = 8, + [69] = 27, + [70] = 9, + [71] = 28, + [106] = 10, + [108] = 11, + [110] = 12, + [112] = 13, + [126] = 14, + [127] = 15, + [128] = 16, + [129] = 17, + }, + }, + [58] = { + .app_id = 0, + .hdr_bitmap = { .bits = + BNXT_ULP_HDR_BIT_F2 | + BNXT_ULP_HDR_BIT_O_IPV4 | + BNXT_ULP_HDR_BIT_O_UDP | + BNXT_ULP_HDR_BIT_T_VXLAN | + BNXT_ULP_HDR_BIT_I_ETH | + BNXT_ULP_HDR_BIT_I_IPV6 | + BNXT_ULP_FLOW_DIR_BITMASK_ING }, + .field_man_bitmap = 0x30200000000000, + .field_opt_bitmap = 0x80040C0600000000, + .field_exclude_bitmap = 0x0, + .class_tid = 1, + .flow_pattern_id = 3, + .field_list = { + [1] = 1, + [11] = 20, + [13] = 21, + [15] = 22, + [36] = 2, + [38] = 3, + [40] = 4, + [42] = 5, + [44] = 6, + [46] = 7, + [48] = 8, + [50] = 9, + [52] = 10, + [54] = 11, + [57] = 23, + [59] = 24, + [61] = 25, + [63] = 26, + [65] = 27, + [67] = 28, + [69] = 29, + [71] = 30, + [106] = 12, + [108] = 13, + [110] = 14, + [112] = 15, + [126] = 16, + [127] = 17, + [128] = 18, + [129] = 19, + }, + }, + [59] = { + .app_id = 0, + .hdr_bitmap = { .bits = + BNXT_ULP_HDR_BIT_F2 | + BNXT_ULP_HDR_BIT_O_IPV6 | + BNXT_ULP_HDR_BIT_O_UDP | + BNXT_ULP_HDR_BIT_T_VXLAN | + BNXT_ULP_HDR_BIT_I_ETH | + BNXT_ULP_HDR_BIT_I_IPV4 | + BNXT_ULP_FLOW_DIR_BITMASK_ING }, + .field_man_bitmap = 0xC0800000000000, + .field_opt_bitmap = 0x8010300600000000, + .field_exclude_bitmap = 0x0, + .class_tid = 1, + .flow_pattern_id = 3, + .field_list = { + [1] = 1, + [11] = 18, + [13] = 19, + [15] = 20, + [37] = 21, + [39] = 22, + [41] = 23, + [43] = 24, + [45] = 25, + [47] = 26, + [49] = 27, + [51] = 28, + [53] = 29, + [55] = 30, + [56] = 2, + [58] = 3, + [60] = 4, + [62] = 5, + [64] = 6, + [66] = 7, + [68] = 8, + [70] = 9, + [106] = 10, + [108] = 11, + [110] = 12, + [112] = 13, + [126] = 14, + [127] = 15, + [128] = 16, + [129] = 17, + }, + }, + [60] = { + .app_id = 0, + .hdr_bitmap = { .bits = + BNXT_ULP_HDR_BIT_F2 | + BNXT_ULP_HDR_BIT_O_IPV4 | + BNXT_ULP_HDR_BIT_O_UDP | + BNXT_ULP_HDR_BIT_T_VXLAN | + BNXT_ULP_HDR_BIT_I_ETH | + BNXT_ULP_HDR_BIT_I_IPV4 | + BNXT_ULP_FLOW_DIR_BITMASK_ING }, + .field_man_bitmap = 0x30200000000000, + .field_opt_bitmap = 0x80040C0180000000, + .field_exclude_bitmap = 0x0, + .class_tid = 1, + .flow_pattern_id = 3, + .field_list = { + [1] = 1, + [11] = 20, + [13] = 21, + [15] = 22, + [36] = 2, + [37] = 23, + [38] = 3, + [39] = 24, + [40] = 4, + [41] = 25, + [42] = 5, + [43] = 26, + [44] = 6, + [45] = 27, + [46] = 7, + [47] = 28, + [48] = 8, + [49] = 29, + [50] = 9, + [51] = 30, + [52] = 10, + [53] = 31, + [54] = 11, + [55] = 32, + [106] = 12, + [108] = 13, + [110] = 14, + [112] = 15, + [126] = 16, + [127] = 17, + [128] = 18, + [129] = 19, + }, + }, + [61] = { + .app_id = 0, + .hdr_bitmap = { .bits = + BNXT_ULP_HDR_BIT_F2 | + BNXT_ULP_HDR_BIT_O_IPV6 | + BNXT_ULP_HDR_BIT_O_UDP | + BNXT_ULP_HDR_BIT_T_VXLAN | + BNXT_ULP_HDR_BIT_I_ETH | + BNXT_ULP_HDR_BIT_I_IPV6 | + BNXT_ULP_HDR_BIT_I_TCP | + BNXT_ULP_FLOW_DIR_BITMASK_ING }, + .field_man_bitmap = 0xC0800000000000, + .field_opt_bitmap = 0x8010301E00000000, + .field_exclude_bitmap = 0x0, + .class_tid = 1, + .flow_pattern_id = 3, + .field_list = { + [1] = 1, + [11] = 18, + [13] = 19, + [15] = 20, + [56] = 2, + [57] = 21, + [58] = 3, + [59] = 22, + [60] = 4, + [61] = 23, + [62] = 5, + [63] = 24, + [64] = 6, + [65] = 25, + [66] = 7, + [67] = 26, + [68] = 8, + [69] = 27, + [70] = 9, + [71] = 28, + [89] = 29, + [91] = 30, + [93] = 31, + [95] = 32, + [97] = 33, + [99] = 34, + [101] = 35, + [103] = 36, + [105] = 37, + [106] = 10, + [108] = 11, + [110] = 12, + [112] = 13, + [126] = 14, + [127] = 15, + [128] = 16, + [129] = 17, + }, + }, + [62] = { + .app_id = 0, + .hdr_bitmap = { .bits = + BNXT_ULP_HDR_BIT_F2 | + BNXT_ULP_HDR_BIT_O_IPV4 | + BNXT_ULP_HDR_BIT_O_UDP | + BNXT_ULP_HDR_BIT_T_VXLAN | + BNXT_ULP_HDR_BIT_I_ETH | + BNXT_ULP_HDR_BIT_I_IPV6 | + BNXT_ULP_HDR_BIT_I_TCP | + BNXT_ULP_FLOW_DIR_BITMASK_ING }, + .field_man_bitmap = 0x30200000000000, + .field_opt_bitmap = 0x80040C0780000000, + .field_exclude_bitmap = 0x0, + .class_tid = 1, + .flow_pattern_id = 3, + .field_list = { + [1] = 1, + [11] = 20, + [13] = 21, + [15] = 22, + [36] = 2, + [38] = 3, + [40] = 4, + [42] = 5, + [44] = 6, + [46] = 7, + [48] = 8, + [50] = 9, + [52] = 10, + [54] = 11, + [57] = 23, + [59] = 24, + [61] = 25, + [63] = 26, + [65] = 27, + [67] = 28, + [69] = 29, + [71] = 30, + [89] = 31, + [91] = 32, + [93] = 33, + [95] = 34, + [97] = 35, + [99] = 36, + [101] = 37, + [103] = 38, + [105] = 39, + [106] = 12, + [108] = 13, + [110] = 14, + [112] = 15, + [126] = 16, + [127] = 17, + [128] = 18, + [129] = 19, + }, + }, + [63] = { + .app_id = 0, + .hdr_bitmap = { .bits = + BNXT_ULP_HDR_BIT_F2 | + BNXT_ULP_HDR_BIT_O_IPV6 | + BNXT_ULP_HDR_BIT_O_UDP | + BNXT_ULP_HDR_BIT_T_VXLAN | + BNXT_ULP_HDR_BIT_I_ETH | + BNXT_ULP_HDR_BIT_I_IPV4 | + BNXT_ULP_HDR_BIT_I_TCP | + BNXT_ULP_FLOW_DIR_BITMASK_ING }, + .field_man_bitmap = 0xC0800000000000, + .field_opt_bitmap = 0x8010300780000000, + .field_exclude_bitmap = 0x0, + .class_tid = 1, + .flow_pattern_id = 3, + .field_list = { + [1] = 1, + [11] = 18, + [13] = 19, + [15] = 20, + [37] = 21, + [39] = 22, + [41] = 23, + [43] = 24, + [45] = 25, + [47] = 26, + [49] = 27, + [51] = 28, + [53] = 29, + [55] = 30, + [56] = 2, + [58] = 3, + [60] = 4, + [62] = 5, + [64] = 6, + [66] = 7, + [68] = 8, + [70] = 9, + [89] = 31, + [91] = 32, + [93] = 33, + [95] = 34, + [97] = 35, + [99] = 36, + [101] = 37, + [103] = 38, + [105] = 39, + [106] = 10, + [108] = 11, + [110] = 12, + [112] = 13, + [126] = 14, + [127] = 15, + [128] = 16, + [129] = 17, + }, + }, + [64] = { + .app_id = 0, + .hdr_bitmap = { .bits = + BNXT_ULP_HDR_BIT_F2 | + BNXT_ULP_HDR_BIT_O_IPV4 | + BNXT_ULP_HDR_BIT_O_UDP | + BNXT_ULP_HDR_BIT_T_VXLAN | + BNXT_ULP_HDR_BIT_I_ETH | + BNXT_ULP_HDR_BIT_I_IPV4 | + BNXT_ULP_HDR_BIT_I_TCP | + BNXT_ULP_FLOW_DIR_BITMASK_ING }, + .field_man_bitmap = 0x30200000000000, + .field_opt_bitmap = 0x80040C01E0000000, + .field_exclude_bitmap = 0x0, + .class_tid = 1, + .flow_pattern_id = 3, + .field_list = { + [1] = 1, + [11] = 20, + [13] = 21, + [15] = 22, + [36] = 2, + [37] = 23, + [38] = 3, + [39] = 24, + [40] = 4, + [41] = 25, + [42] = 5, + [43] = 26, + [44] = 6, + [45] = 27, + [46] = 7, + [47] = 28, + [48] = 8, + [49] = 29, + [50] = 9, + [51] = 30, + [52] = 10, + [53] = 31, + [54] = 11, + [55] = 32, + [89] = 33, + [91] = 34, + [93] = 35, + [95] = 36, + [97] = 37, + [99] = 38, + [101] = 39, + [103] = 40, + [105] = 41, + [106] = 12, + [108] = 13, + [110] = 14, + [112] = 15, + [126] = 16, + [127] = 17, + [128] = 18, + [129] = 19, + }, + }, + [65] = { + .app_id = 0, + .hdr_bitmap = { .bits = + BNXT_ULP_HDR_BIT_F2 | + BNXT_ULP_HDR_BIT_O_IPV6 | + BNXT_ULP_HDR_BIT_O_UDP | + BNXT_ULP_HDR_BIT_T_VXLAN | + BNXT_ULP_HDR_BIT_I_ETH | + BNXT_ULP_HDR_BIT_I_IPV6 | + BNXT_ULP_HDR_BIT_I_UDP | + BNXT_ULP_FLOW_DIR_BITMASK_ING }, + .field_man_bitmap = 0xC0800000000000, + .field_opt_bitmap = 0x8010301E00000000, + .field_exclude_bitmap = 0x0, + .class_tid = 1, + .flow_pattern_id = 3, + .field_list = { + [1] = 1, + [11] = 18, + [13] = 19, + [15] = 20, + [56] = 2, + [57] = 21, + [58] = 3, + [59] = 22, + [60] = 4, + [61] = 23, + [62] = 5, + [63] = 24, + [64] = 6, + [65] = 25, + [66] = 7, + [67] = 26, + [68] = 8, + [69] = 27, + [70] = 9, + [71] = 28, + [106] = 10, + [107] = 29, + [108] = 11, + [109] = 30, + [110] = 12, + [111] = 31, + [112] = 13, + [113] = 32, + [126] = 14, + [127] = 15, + [128] = 16, + [129] = 17, + }, + }, + [66] = { + .app_id = 0, + .hdr_bitmap = { .bits = + BNXT_ULP_HDR_BIT_F2 | + BNXT_ULP_HDR_BIT_O_IPV4 | + BNXT_ULP_HDR_BIT_O_UDP | + BNXT_ULP_HDR_BIT_T_VXLAN | + BNXT_ULP_HDR_BIT_I_ETH | + BNXT_ULP_HDR_BIT_I_IPV6 | + BNXT_ULP_HDR_BIT_I_UDP | + BNXT_ULP_FLOW_DIR_BITMASK_ING }, + .field_man_bitmap = 0x30200000000000, + .field_opt_bitmap = 0x80040C0780000000, + .field_exclude_bitmap = 0x0, + .class_tid = 1, + .flow_pattern_id = 3, + .field_list = { + [1] = 1, + [11] = 20, + [13] = 21, + [15] = 22, + [36] = 2, + [38] = 3, + [40] = 4, + [42] = 5, + [44] = 6, + [46] = 7, + [48] = 8, + [50] = 9, + [52] = 10, + [54] = 11, + [57] = 23, + [59] = 24, + [61] = 25, + [63] = 26, + [65] = 27, + [67] = 28, + [69] = 29, + [71] = 30, + [106] = 12, + [107] = 31, + [108] = 13, + [109] = 32, + [110] = 14, + [111] = 33, + [112] = 15, + [113] = 34, + [126] = 16, + [127] = 17, + [128] = 18, + [129] = 19, + }, + }, + [67] = { + .app_id = 0, + .hdr_bitmap = { .bits = + BNXT_ULP_HDR_BIT_F2 | + BNXT_ULP_HDR_BIT_O_IPV6 | + BNXT_ULP_HDR_BIT_O_UDP | + BNXT_ULP_HDR_BIT_T_VXLAN | + BNXT_ULP_HDR_BIT_I_ETH | + BNXT_ULP_HDR_BIT_I_IPV4 | + BNXT_ULP_HDR_BIT_I_UDP | + BNXT_ULP_FLOW_DIR_BITMASK_ING }, + .field_man_bitmap = 0xC0800000000000, + .field_opt_bitmap = 0x8010300780000000, + .field_exclude_bitmap = 0x0, + .class_tid = 1, + .flow_pattern_id = 3, + .field_list = { + [1] = 1, + [11] = 18, + [13] = 19, + [15] = 20, + [37] = 21, + [39] = 22, + [41] = 23, + [43] = 24, + [45] = 25, + [47] = 26, + [49] = 27, + [51] = 28, + [53] = 29, + [55] = 30, + [56] = 2, + [58] = 3, + [60] = 4, + [62] = 5, + [64] = 6, + [66] = 7, + [68] = 8, + [70] = 9, + [106] = 10, + [107] = 31, + [108] = 11, + [109] = 32, + [110] = 12, + [111] = 33, + [112] = 13, + [113] = 34, + [126] = 14, + [127] = 15, + [128] = 16, + [129] = 17, + }, + }, + [68] = { + .app_id = 0, + .hdr_bitmap = { .bits = + BNXT_ULP_HDR_BIT_F2 | + BNXT_ULP_HDR_BIT_O_IPV4 | + BNXT_ULP_HDR_BIT_O_UDP | + BNXT_ULP_HDR_BIT_T_VXLAN | + BNXT_ULP_HDR_BIT_I_ETH | + BNXT_ULP_HDR_BIT_I_IPV4 | + BNXT_ULP_HDR_BIT_I_UDP | + BNXT_ULP_FLOW_DIR_BITMASK_ING }, + .field_man_bitmap = 0x30200000000000, + .field_opt_bitmap = 0x80040C01E0000000, + .field_exclude_bitmap = 0x0, + .class_tid = 1, + .flow_pattern_id = 3, + .field_list = { + [1] = 1, + [11] = 20, + [13] = 21, + [15] = 22, + [36] = 2, + [37] = 23, + [38] = 3, + [39] = 24, + [40] = 4, + [41] = 25, + [42] = 5, + [43] = 26, + [44] = 6, + [45] = 27, + [46] = 7, + [47] = 28, + [48] = 8, + [49] = 29, + [50] = 9, + [51] = 30, + [52] = 10, + [53] = 31, + [54] = 11, + [55] = 32, + [106] = 12, + [107] = 33, + [108] = 13, + [109] = 34, + [110] = 14, + [111] = 35, + [112] = 15, + [113] = 36, + [126] = 16, + [127] = 17, + [128] = 18, + [129] = 19, + }, + }, + [69] = { + .app_id = 0, + .hdr_bitmap = { .bits = + BNXT_ULP_HDR_BIT_F2 | + BNXT_ULP_HDR_BIT_O_IPV6 | + BNXT_ULP_HDR_BIT_O_UDP | + BNXT_ULP_HDR_BIT_T_VXLAN | + BNXT_ULP_HDR_BIT_I_ETH | + BNXT_ULP_HDR_BIT_I_IPV4 | + BNXT_ULP_HDR_BIT_I_ICMP | + BNXT_ULP_FLOW_DIR_BITMASK_ING }, + .field_man_bitmap = 0xC0800000000000, + .field_opt_bitmap = 0x8010300600000000, + .field_exclude_bitmap = 0x0, + .class_tid = 1, + .flow_pattern_id = 4, + .field_list = { + [1] = 1, + [11] = 18, + [13] = 19, + [15] = 20, + [27] = 31, + [29] = 32, + [31] = 33, + [33] = 34, + [35] = 35, + [37] = 21, + [39] = 22, + [41] = 23, + [43] = 24, + [45] = 25, + [47] = 26, + [49] = 27, + [51] = 28, + [53] = 29, + [55] = 30, + [56] = 2, + [58] = 3, + [60] = 4, + [62] = 5, + [64] = 6, + [66] = 7, + [68] = 8, + [70] = 9, + [106] = 10, + [108] = 11, + [110] = 12, + [112] = 13, + [126] = 14, + [127] = 15, + [128] = 16, + [129] = 17, + }, + }, + [70] = { + .app_id = 0, + .hdr_bitmap = { .bits = + BNXT_ULP_HDR_BIT_F2 | + BNXT_ULP_HDR_BIT_O_IPV4 | + BNXT_ULP_HDR_BIT_O_UDP | + BNXT_ULP_HDR_BIT_T_VXLAN | + BNXT_ULP_HDR_BIT_I_ETH | + BNXT_ULP_HDR_BIT_I_IPV4 | + BNXT_ULP_HDR_BIT_I_ICMP | + BNXT_ULP_FLOW_DIR_BITMASK_ING }, + .field_man_bitmap = 0x30200000000000, + .field_opt_bitmap = 0x80040C0180000000, + .field_exclude_bitmap = 0x0, + .class_tid = 1, + .flow_pattern_id = 4, + .field_list = { + [1] = 1, + [11] = 20, + [13] = 21, + [15] = 22, + [27] = 33, + [29] = 34, + [31] = 35, + [33] = 36, + [35] = 37, + [36] = 2, + [37] = 23, + [38] = 3, + [39] = 24, + [40] = 4, + [41] = 25, + [42] = 5, + [43] = 26, + [44] = 6, + [45] = 27, + [46] = 7, + [47] = 28, + [48] = 8, + [49] = 29, + [50] = 9, + [51] = 30, + [52] = 10, + [53] = 31, + [54] = 11, + [55] = 32, + [106] = 12, + [108] = 13, + [110] = 14, + [112] = 15, + [126] = 16, + [127] = 17, + [128] = 18, + [129] = 19, + }, + }, + [71] = { + .app_id = 0, + .hdr_bitmap = { .bits = + BNXT_ULP_HDR_BIT_O_ETH | + BNXT_ULP_HDR_BIT_O_IPV4 | + BNXT_ULP_HDR_BIT_O_UDP | + BNXT_ULP_HDR_BIT_T_GENEVE | + BNXT_ULP_FLOW_DIR_BITMASK_ING }, + .field_man_bitmap = 0x0, + .field_opt_bitmap = 0xA002800000000000, + .field_exclude_bitmap = 0x2000000000000000, + .class_tid = 1, + .flow_pattern_id = 5, + .field_list = { + [1] = 1, + [10] = 2, + [12] = 3, + [14] = 4, + [36] = 5, + [38] = 6, + [40] = 7, + [42] = 8, + [44] = 9, + [46] = 10, + [48] = 11, + [50] = 12, + [52] = 13, + [54] = 14, + [106] = 15, + [108] = 16, + [110] = 17, + [112] = 18, + }, + }, + [72] = { + .app_id = 0, + .hdr_bitmap = { .bits = + BNXT_ULP_HDR_BIT_SVIF | + BNXT_ULP_FLOW_DIR_BITMASK_ING }, + .field_man_bitmap = 0x0, + .field_opt_bitmap = 0x8000000000000000, + .field_exclude_bitmap = 0x0, + .class_tid = 1, + .flow_pattern_id = 6, + .field_list = { + [1] = 1, + }, + }, + [73] = { + .app_id = 0, + .hdr_bitmap = { .bits = + BNXT_ULP_HDR_BIT_O_ETH | + BNXT_ULP_FLOW_DIR_BITMASK_EGR }, + .field_man_bitmap = 0x0, + .field_opt_bitmap = 0xB800000000000000, + .field_exclude_bitmap = 0x0, + .class_tid = 2, + .flow_pattern_id = 0, + .field_list = { + [1] = 1, + [10] = 2, + [12] = 3, + [14] = 4, + }, + }, + [74] = { + .app_id = 0, + .hdr_bitmap = { .bits = + BNXT_ULP_HDR_BIT_O_ETH | + BNXT_ULP_HDR_BIT_OO_VLAN | + BNXT_ULP_FLOW_DIR_BITMASK_EGR }, + .field_man_bitmap = 0x0, + .field_opt_bitmap = 0xBA00000000000000, + .field_exclude_bitmap = 0x0, + .class_tid = 2, + .flow_pattern_id = 0, + .field_list = { + [1] = 1, + [10] = 2, + [12] = 3, + [14] = 4, + [114] = 5, + [118] = 6, + [122] = 7, + }, + }, + [75] = { + .app_id = 0, + .hdr_bitmap = { .bits = + BNXT_ULP_HDR_BIT_O_ETH | + BNXT_ULP_HDR_BIT_OI_VLAN | + BNXT_ULP_FLOW_DIR_BITMASK_EGR }, + .field_man_bitmap = 0x0, + .field_opt_bitmap = 0xBA00000000000000, + .field_exclude_bitmap = 0x0, + .class_tid = 2, + .flow_pattern_id = 0, + .field_list = { + [1] = 1, + [10] = 2, + [12] = 3, + [14] = 4, + [115] = 5, + [119] = 6, + [123] = 7, + }, + }, + [76] = { + .app_id = 0, + .hdr_bitmap = { .bits = + BNXT_ULP_HDR_BIT_O_ETH | + BNXT_ULP_HDR_BIT_OO_VLAN | + BNXT_ULP_HDR_BIT_OI_VLAN | + BNXT_ULP_FLOW_DIR_BITMASK_EGR }, + .field_man_bitmap = 0x0, + .field_opt_bitmap = 0xBA40000000000000, + .field_exclude_bitmap = 0x0, + .class_tid = 2, + .flow_pattern_id = 0, + .field_list = { + [1] = 1, + [10] = 2, + [12] = 3, + [14] = 4, + [114] = 5, + [115] = 8, + [118] = 6, + [119] = 9, + [122] = 7, + [123] = 10, + }, + }, + [77] = { + .app_id = 0, + .hdr_bitmap = { .bits = + BNXT_ULP_HDR_BIT_O_ETH | + BNXT_ULP_HDR_BIT_O_IPV6 | + BNXT_ULP_FLOW_DIR_BITMASK_EGR }, + .field_man_bitmap = 0x0, + .field_opt_bitmap = 0xBA78000000000000, + .field_exclude_bitmap = 0x0, + .class_tid = 2, + .flow_pattern_id = 0, + .field_list = { + [1] = 1, + [10] = 2, + [12] = 3, + [14] = 4, + [56] = 5, + [58] = 6, + [60] = 7, + [62] = 8, + [64] = 9, + [66] = 10, + [68] = 11, + [70] = 12, + }, + }, + [78] = { + .app_id = 0, + .hdr_bitmap = { .bits = + BNXT_ULP_HDR_BIT_O_ETH | + BNXT_ULP_HDR_BIT_O_IPV4 | + BNXT_ULP_FLOW_DIR_BITMASK_EGR }, + .field_man_bitmap = 0x0, + .field_opt_bitmap = 0xBA36000000000000, + .field_exclude_bitmap = 0x0, + .class_tid = 2, + .flow_pattern_id = 0, + .field_list = { + [1] = 1, + [10] = 2, + [12] = 3, + [14] = 4, + [36] = 5, + [38] = 6, + [40] = 7, + [42] = 8, + [44] = 9, + [46] = 10, + [48] = 11, + [50] = 12, + [52] = 13, + [54] = 14, + }, + }, + [79] = { + .app_id = 0, + .hdr_bitmap = { .bits = + BNXT_ULP_HDR_BIT_O_ETH | + BNXT_ULP_HDR_BIT_OO_VLAN | + BNXT_ULP_HDR_BIT_O_IPV6 | + BNXT_ULP_FLOW_DIR_BITMASK_EGR }, + .field_man_bitmap = 0x0, + .field_opt_bitmap = 0xBA4F000000000000, + .field_exclude_bitmap = 0x0, + .class_tid = 2, + .flow_pattern_id = 0, + .field_list = { + [1] = 1, + [10] = 2, + [12] = 3, + [14] = 4, + [56] = 8, + [58] = 9, + [60] = 10, + [62] = 11, + [64] = 12, + [66] = 13, + [68] = 14, + [70] = 15, + [114] = 5, + [118] = 6, + [122] = 7, + }, + }, + [80] = { + .app_id = 0, + .hdr_bitmap = { .bits = + BNXT_ULP_HDR_BIT_O_ETH | + BNXT_ULP_HDR_BIT_OO_VLAN | + BNXT_ULP_HDR_BIT_O_IPV4 | + BNXT_ULP_FLOW_DIR_BITMASK_EGR }, + .field_man_bitmap = 0x0, + .field_opt_bitmap = 0xBA46C00000000000, + .field_exclude_bitmap = 0x0, + .class_tid = 2, + .flow_pattern_id = 0, + .field_list = { + [1] = 1, + [10] = 2, + [12] = 3, + [14] = 4, + [36] = 8, + [38] = 9, + [40] = 10, + [42] = 11, + [44] = 12, + [46] = 13, + [48] = 14, + [50] = 15, + [52] = 16, + [54] = 17, + [114] = 5, + [118] = 6, + [122] = 7, + }, + }, + [81] = { + .app_id = 0, + .hdr_bitmap = { .bits = + BNXT_ULP_HDR_BIT_O_ETH | + BNXT_ULP_HDR_BIT_OI_VLAN | + BNXT_ULP_HDR_BIT_O_IPV6 | + BNXT_ULP_FLOW_DIR_BITMASK_EGR }, + .field_man_bitmap = 0x0, + .field_opt_bitmap = 0xBA4F000000000000, + .field_exclude_bitmap = 0x0, + .class_tid = 2, + .flow_pattern_id = 0, + .field_list = { + [1] = 1, + [10] = 2, + [12] = 3, + [14] = 4, + [56] = 8, + [58] = 9, + [60] = 10, + [62] = 11, + [64] = 12, + [66] = 13, + [68] = 14, + [70] = 15, + [115] = 5, + [119] = 6, + [123] = 7, + }, + }, + [82] = { + .app_id = 0, + .hdr_bitmap = { .bits = + BNXT_ULP_HDR_BIT_O_ETH | + BNXT_ULP_HDR_BIT_OI_VLAN | + BNXT_ULP_HDR_BIT_O_IPV4 | + BNXT_ULP_FLOW_DIR_BITMASK_EGR }, + .field_man_bitmap = 0x0, + .field_opt_bitmap = 0xBA46C00000000000, + .field_exclude_bitmap = 0x0, + .class_tid = 2, + .flow_pattern_id = 0, + .field_list = { + [1] = 1, + [10] = 2, + [12] = 3, + [14] = 4, + [36] = 8, + [38] = 9, + [40] = 10, + [42] = 11, + [44] = 12, + [46] = 13, + [48] = 14, + [50] = 15, + [52] = 16, + [54] = 17, + [115] = 5, + [119] = 6, + [123] = 7, + }, + }, + [83] = { + .app_id = 0, + .hdr_bitmap = { .bits = + BNXT_ULP_HDR_BIT_O_ETH | + BNXT_ULP_HDR_BIT_OO_VLAN | + BNXT_ULP_HDR_BIT_OI_VLAN | + BNXT_ULP_HDR_BIT_O_IPV6 | + BNXT_ULP_FLOW_DIR_BITMASK_EGR }, + .field_man_bitmap = 0x0, + .field_opt_bitmap = 0xBA49E00000000000, + .field_exclude_bitmap = 0x0, + .class_tid = 2, + .flow_pattern_id = 0, + .field_list = { + [1] = 1, + [10] = 2, + [12] = 3, + [14] = 4, + [56] = 11, + [58] = 12, + [60] = 13, + [62] = 14, + [64] = 15, + [66] = 16, + [68] = 17, + [70] = 18, + [114] = 5, + [115] = 8, + [118] = 6, + [119] = 9, + [122] = 7, + [123] = 10, + }, + }, + [84] = { + .app_id = 0, + .hdr_bitmap = { .bits = + BNXT_ULP_HDR_BIT_O_ETH | + BNXT_ULP_HDR_BIT_OO_VLAN | + BNXT_ULP_HDR_BIT_OI_VLAN | + BNXT_ULP_HDR_BIT_O_IPV4 | + BNXT_ULP_FLOW_DIR_BITMASK_EGR }, + .field_man_bitmap = 0x0, + .field_opt_bitmap = 0xBA48D80000000000, + .field_exclude_bitmap = 0x0, + .class_tid = 2, + .flow_pattern_id = 0, + .field_list = { + [1] = 1, + [10] = 2, + [12] = 3, + [14] = 4, + [36] = 11, + [38] = 12, + [40] = 13, + [42] = 14, + [44] = 15, + [46] = 16, + [48] = 17, + [50] = 18, + [52] = 19, + [54] = 20, + [114] = 5, + [115] = 8, + [118] = 6, + [119] = 9, + [122] = 7, + [123] = 10, + }, + }, + [85] = { + .app_id = 0, + .hdr_bitmap = { .bits = + BNXT_ULP_HDR_BIT_O_ETH | + BNXT_ULP_HDR_BIT_O_TCP | + BNXT_ULP_FLOW_DIR_BITMASK_EGR }, + .field_man_bitmap = 0x0, + .field_opt_bitmap = 0xBE20000000000000, + .field_exclude_bitmap = 0x0, + .class_tid = 2, + .flow_pattern_id = 0, + .field_list = { + [1] = 1, + [10] = 2, + [12] = 3, + [14] = 4, + [88] = 5, + [90] = 6, + [92] = 7, + [94] = 8, + [96] = 9, + [98] = 10, + [100] = 11, + [102] = 12, + [104] = 13, + }, + }, + [86] = { + .app_id = 0, + .hdr_bitmap = { .bits = + BNXT_ULP_HDR_BIT_O_ETH | + BNXT_ULP_HDR_BIT_O_UDP | + BNXT_ULP_FLOW_DIR_BITMASK_EGR }, + .field_man_bitmap = 0x0, + .field_opt_bitmap = 0xBE00000000000000, + .field_exclude_bitmap = 0x0, + .class_tid = 2, + .flow_pattern_id = 0, + .field_list = { + [1] = 1, + [10] = 2, + [12] = 3, + [14] = 4, + [106] = 5, + [108] = 6, + [110] = 7, + [112] = 8, + }, + }, + [87] = { + .app_id = 0, + .hdr_bitmap = { .bits = + BNXT_ULP_HDR_BIT_O_ETH | + BNXT_ULP_HDR_BIT_OO_VLAN | + BNXT_ULP_HDR_BIT_O_TCP | + BNXT_ULP_FLOW_DIR_BITMASK_EGR }, + .field_man_bitmap = 0x0, + .field_opt_bitmap = 0xBAC4000000000000, + .field_exclude_bitmap = 0x0, + .class_tid = 2, + .flow_pattern_id = 0, + .field_list = { + [1] = 1, + [10] = 2, + [12] = 3, + [14] = 4, + [88] = 8, + [90] = 9, + [92] = 10, + [94] = 11, + [96] = 12, + [98] = 13, + [100] = 14, + [102] = 15, + [104] = 16, + [114] = 5, + [118] = 6, + [122] = 7, + }, + }, + [88] = { + .app_id = 0, + .hdr_bitmap = { .bits = + BNXT_ULP_HDR_BIT_O_ETH | + BNXT_ULP_HDR_BIT_OO_VLAN | + BNXT_ULP_HDR_BIT_O_UDP | + BNXT_ULP_FLOW_DIR_BITMASK_EGR }, + .field_man_bitmap = 0x0, + .field_opt_bitmap = 0xBAC0000000000000, + .field_exclude_bitmap = 0x0, + .class_tid = 2, + .flow_pattern_id = 0, + .field_list = { + [1] = 1, + [10] = 2, + [12] = 3, + [14] = 4, + [106] = 8, + [108] = 9, + [110] = 10, + [112] = 11, + [114] = 5, + [118] = 6, + [122] = 7, + }, + }, + [89] = { + .app_id = 0, + .hdr_bitmap = { .bits = + BNXT_ULP_HDR_BIT_O_ETH | + BNXT_ULP_HDR_BIT_OI_VLAN | + BNXT_ULP_HDR_BIT_O_TCP | + BNXT_ULP_FLOW_DIR_BITMASK_EGR }, + .field_man_bitmap = 0x0, + .field_opt_bitmap = 0xBAC4000000000000, + .field_exclude_bitmap = 0x0, + .class_tid = 2, + .flow_pattern_id = 0, + .field_list = { + [1] = 1, + [10] = 2, + [12] = 3, + [14] = 4, + [88] = 8, + [90] = 9, + [92] = 10, + [94] = 11, + [96] = 12, + [98] = 13, + [100] = 14, + [102] = 15, + [104] = 16, + [115] = 5, + [119] = 6, + [123] = 7, + }, + }, + [90] = { + .app_id = 0, + .hdr_bitmap = { .bits = + BNXT_ULP_HDR_BIT_O_ETH | + BNXT_ULP_HDR_BIT_OI_VLAN | + BNXT_ULP_HDR_BIT_O_UDP | + BNXT_ULP_FLOW_DIR_BITMASK_EGR }, + .field_man_bitmap = 0x0, + .field_opt_bitmap = 0xBAC0000000000000, + .field_exclude_bitmap = 0x0, + .class_tid = 2, + .flow_pattern_id = 0, + .field_list = { + [1] = 1, + [10] = 2, + [12] = 3, + [14] = 4, + [106] = 8, + [108] = 9, + [110] = 10, + [112] = 11, + [115] = 5, + [119] = 6, + [123] = 7, + }, + }, + [91] = { + .app_id = 0, + .hdr_bitmap = { .bits = + BNXT_ULP_HDR_BIT_O_ETH | + BNXT_ULP_HDR_BIT_OO_VLAN | + BNXT_ULP_HDR_BIT_OI_VLAN | + BNXT_ULP_HDR_BIT_O_TCP | + BNXT_ULP_FLOW_DIR_BITMASK_EGR }, + .field_man_bitmap = 0x0, + .field_opt_bitmap = 0xBA58800000000000, + .field_exclude_bitmap = 0x0, + .class_tid = 2, + .flow_pattern_id = 0, + .field_list = { + [1] = 1, + [10] = 2, + [12] = 3, + [14] = 4, + [88] = 11, + [90] = 12, + [92] = 13, + [94] = 14, + [96] = 15, + [98] = 16, + [100] = 17, + [102] = 18, + [104] = 19, + [114] = 5, + [115] = 8, + [118] = 6, + [119] = 9, + [122] = 7, + [123] = 10, + }, + }, + [92] = { + .app_id = 0, + .hdr_bitmap = { .bits = + BNXT_ULP_HDR_BIT_O_ETH | + BNXT_ULP_HDR_BIT_OO_VLAN | + BNXT_ULP_HDR_BIT_OI_VLAN | + BNXT_ULP_HDR_BIT_O_UDP | + BNXT_ULP_FLOW_DIR_BITMASK_EGR }, + .field_man_bitmap = 0x0, + .field_opt_bitmap = 0xBA58000000000000, + .field_exclude_bitmap = 0x0, + .class_tid = 2, + .flow_pattern_id = 0, + .field_list = { + [1] = 1, + [10] = 2, + [12] = 3, + [14] = 4, + [106] = 11, + [108] = 12, + [110] = 13, + [112] = 14, + [114] = 5, + [115] = 8, + [118] = 6, + [119] = 9, + [122] = 7, + [123] = 10, + }, + }, + [93] = { + .app_id = 0, + .hdr_bitmap = { .bits = + BNXT_ULP_HDR_BIT_O_ETH | + BNXT_ULP_HDR_BIT_O_IPV6 | + BNXT_ULP_HDR_BIT_O_TCP | + BNXT_ULP_FLOW_DIR_BITMASK_EGR }, + .field_man_bitmap = 0x0, + .field_opt_bitmap = 0xBA7E200000000000, + .field_exclude_bitmap = 0x0, + .class_tid = 2, + .flow_pattern_id = 0, + .field_list = { + [1] = 1, + [10] = 2, + [12] = 3, + [14] = 4, + [56] = 5, + [58] = 6, + [60] = 7, + [62] = 8, + [64] = 9, + [66] = 10, + [68] = 11, + [70] = 12, + [88] = 13, + [90] = 14, + [92] = 15, + [94] = 16, + [96] = 17, + [98] = 18, + [100] = 19, + [102] = 20, + [104] = 21, + }, + }, + [94] = { + .app_id = 0, + .hdr_bitmap = { .bits = + BNXT_ULP_HDR_BIT_O_ETH | + BNXT_ULP_HDR_BIT_O_IPV4 | + BNXT_ULP_HDR_BIT_O_TCP | + BNXT_ULP_FLOW_DIR_BITMASK_EGR }, + .field_man_bitmap = 0x0, + .field_opt_bitmap = 0xBA37880000000000, + .field_exclude_bitmap = 0x0, + .class_tid = 2, + .flow_pattern_id = 0, + .field_list = { + [1] = 1, + [10] = 2, + [12] = 3, + [14] = 4, + [36] = 5, + [38] = 6, + [40] = 7, + [42] = 8, + [44] = 9, + [46] = 10, + [48] = 11, + [50] = 12, + [52] = 13, + [54] = 14, + [88] = 15, + [90] = 16, + [92] = 17, + [94] = 18, + [96] = 19, + [98] = 20, + [100] = 21, + [102] = 22, + [104] = 23, + }, + }, + [95] = { + .app_id = 0, + .hdr_bitmap = { .bits = + BNXT_ULP_HDR_BIT_O_ETH | + BNXT_ULP_HDR_BIT_O_IPV6 | + BNXT_ULP_HDR_BIT_O_UDP | + BNXT_ULP_FLOW_DIR_BITMASK_EGR }, + .field_man_bitmap = 0x0, + .field_opt_bitmap = 0xBA7E000000000000, + .field_exclude_bitmap = 0x0, + .class_tid = 2, + .flow_pattern_id = 0, + .field_list = { + [1] = 1, + [10] = 2, + [12] = 3, + [14] = 4, + [56] = 5, + [58] = 6, + [60] = 7, + [62] = 8, + [64] = 9, + [66] = 10, + [68] = 11, + [70] = 12, + [106] = 13, + [108] = 14, + [110] = 15, + [112] = 16, + }, + }, + [96] = { + .app_id = 0, + .hdr_bitmap = { .bits = + BNXT_ULP_HDR_BIT_O_ETH | + BNXT_ULP_HDR_BIT_O_IPV4 | + BNXT_ULP_HDR_BIT_O_UDP | + BNXT_ULP_FLOW_DIR_BITMASK_EGR }, + .field_man_bitmap = 0x0, + .field_opt_bitmap = 0xBA37800000000000, + .field_exclude_bitmap = 0x0, + .class_tid = 2, + .flow_pattern_id = 0, + .field_list = { + [1] = 1, + [10] = 2, + [12] = 3, + [14] = 4, + [36] = 5, + [38] = 6, + [40] = 7, + [42] = 8, + [44] = 9, + [46] = 10, + [48] = 11, + [50] = 12, + [52] = 13, + [54] = 14, + [106] = 15, + [108] = 16, + [110] = 17, + [112] = 18, + }, + }, + [97] = { + .app_id = 0, + .hdr_bitmap = { .bits = + BNXT_ULP_HDR_BIT_O_ETH | + BNXT_ULP_HDR_BIT_OO_VLAN | + BNXT_ULP_HDR_BIT_O_IPV6 | + BNXT_ULP_HDR_BIT_O_TCP | + BNXT_ULP_FLOW_DIR_BITMASK_EGR }, + .field_man_bitmap = 0x0, + .field_opt_bitmap = 0xBA4FC40000000000, + .field_exclude_bitmap = 0x0, + .class_tid = 2, + .flow_pattern_id = 0, + .field_list = { + [1] = 1, + [10] = 2, + [12] = 3, + [14] = 4, + [56] = 8, + [58] = 9, + [60] = 10, + [62] = 11, + [64] = 12, + [66] = 13, + [68] = 14, + [70] = 15, + [88] = 16, + [90] = 17, + [92] = 18, + [94] = 19, + [96] = 20, + [98] = 21, + [100] = 22, + [102] = 23, + [104] = 24, + [114] = 5, + [118] = 6, + [122] = 7, + }, + }, + [98] = { + .app_id = 0, + .hdr_bitmap = { .bits = + BNXT_ULP_HDR_BIT_O_ETH | + BNXT_ULP_HDR_BIT_OO_VLAN | + BNXT_ULP_HDR_BIT_O_IPV4 | + BNXT_ULP_HDR_BIT_O_TCP | + BNXT_ULP_FLOW_DIR_BITMASK_EGR }, + .field_man_bitmap = 0x0, + .field_opt_bitmap = 0xBA46F10000000000, + .field_exclude_bitmap = 0x0, + .class_tid = 2, + .flow_pattern_id = 0, + .field_list = { + [1] = 1, + [10] = 2, + [12] = 3, + [14] = 4, + [36] = 8, + [38] = 9, + [40] = 10, + [42] = 11, + [44] = 12, + [46] = 13, + [48] = 14, + [50] = 15, + [52] = 16, + [54] = 17, + [88] = 18, + [90] = 19, + [92] = 20, + [94] = 21, + [96] = 22, + [98] = 23, + [100] = 24, + [102] = 25, + [104] = 26, + [114] = 5, + [118] = 6, + [122] = 7, + }, + }, + [99] = { + .app_id = 0, + .hdr_bitmap = { .bits = + BNXT_ULP_HDR_BIT_O_ETH | + BNXT_ULP_HDR_BIT_OO_VLAN | + BNXT_ULP_HDR_BIT_O_IPV6 | + BNXT_ULP_HDR_BIT_O_UDP | + BNXT_ULP_FLOW_DIR_BITMASK_EGR }, + .field_man_bitmap = 0x0, + .field_opt_bitmap = 0xBA4FC00000000000, + .field_exclude_bitmap = 0x0, + .class_tid = 2, + .flow_pattern_id = 0, + .field_list = { + [1] = 1, + [10] = 2, + [12] = 3, + [14] = 4, + [56] = 8, + [58] = 9, + [60] = 10, + [62] = 11, + [64] = 12, + [66] = 13, + [68] = 14, + [70] = 15, + [106] = 16, + [108] = 17, + [110] = 18, + [112] = 19, + [114] = 5, + [118] = 6, + [122] = 7, + }, + }, + [100] = { + .app_id = 0, + .hdr_bitmap = { .bits = + BNXT_ULP_HDR_BIT_O_ETH | + BNXT_ULP_HDR_BIT_OO_VLAN | + BNXT_ULP_HDR_BIT_O_IPV4 | + BNXT_ULP_HDR_BIT_O_UDP | + BNXT_ULP_FLOW_DIR_BITMASK_EGR }, + .field_man_bitmap = 0x0, + .field_opt_bitmap = 0xBA46F00000000000, + .field_exclude_bitmap = 0x0, + .class_tid = 2, + .flow_pattern_id = 0, + .field_list = { + [1] = 1, + [10] = 2, + [12] = 3, + [14] = 4, + [36] = 8, + [38] = 9, + [40] = 10, + [42] = 11, + [44] = 12, + [46] = 13, + [48] = 14, + [50] = 15, + [52] = 16, + [54] = 17, + [106] = 18, + [108] = 19, + [110] = 20, + [112] = 21, + [114] = 5, + [118] = 6, + [122] = 7, + }, + }, + [101] = { + .app_id = 0, + .hdr_bitmap = { .bits = + BNXT_ULP_HDR_BIT_O_ETH | + BNXT_ULP_HDR_BIT_OI_VLAN | + BNXT_ULP_HDR_BIT_O_IPV6 | + BNXT_ULP_HDR_BIT_O_TCP | + BNXT_ULP_FLOW_DIR_BITMASK_EGR }, + .field_man_bitmap = 0x0, + .field_opt_bitmap = 0xBA4FC40000000000, + .field_exclude_bitmap = 0x0, + .class_tid = 2, + .flow_pattern_id = 0, + .field_list = { + [1] = 1, + [10] = 2, + [12] = 3, + [14] = 4, + [56] = 8, + [58] = 9, + [60] = 10, + [62] = 11, + [64] = 12, + [66] = 13, + [68] = 14, + [70] = 15, + [88] = 16, + [90] = 17, + [92] = 18, + [94] = 19, + [96] = 20, + [98] = 21, + [100] = 22, + [102] = 23, + [104] = 24, + [115] = 5, + [119] = 6, + [123] = 7, + }, + }, + [102] = { + .app_id = 0, + .hdr_bitmap = { .bits = + BNXT_ULP_HDR_BIT_O_ETH | + BNXT_ULP_HDR_BIT_OI_VLAN | + BNXT_ULP_HDR_BIT_O_IPV4 | + BNXT_ULP_HDR_BIT_O_TCP | + BNXT_ULP_FLOW_DIR_BITMASK_EGR }, + .field_man_bitmap = 0x0, + .field_opt_bitmap = 0xBA46F10000000000, + .field_exclude_bitmap = 0x0, + .class_tid = 2, + .flow_pattern_id = 0, + .field_list = { + [1] = 1, + [10] = 2, + [12] = 3, + [14] = 4, + [36] = 8, + [38] = 9, + [40] = 10, + [42] = 11, + [44] = 12, + [46] = 13, + [48] = 14, + [50] = 15, + [52] = 16, + [54] = 17, + [88] = 18, + [90] = 19, + [92] = 20, + [94] = 21, + [96] = 22, + [98] = 23, + [100] = 24, + [102] = 25, + [104] = 26, + [115] = 5, + [119] = 6, + [123] = 7, + }, + }, + [103] = { + .app_id = 0, + .hdr_bitmap = { .bits = + BNXT_ULP_HDR_BIT_O_ETH | + BNXT_ULP_HDR_BIT_OI_VLAN | + BNXT_ULP_HDR_BIT_O_IPV6 | + BNXT_ULP_HDR_BIT_O_UDP | + BNXT_ULP_FLOW_DIR_BITMASK_EGR }, + .field_man_bitmap = 0x0, + .field_opt_bitmap = 0xBA4FC00000000000, + .field_exclude_bitmap = 0x0, + .class_tid = 2, + .flow_pattern_id = 0, + .field_list = { + [1] = 1, + [10] = 2, + [12] = 3, + [14] = 4, + [56] = 8, + [58] = 9, + [60] = 10, + [62] = 11, + [64] = 12, + [66] = 13, + [68] = 14, + [70] = 15, + [106] = 16, + [108] = 17, + [110] = 18, + [112] = 19, + [115] = 5, + [119] = 6, + [123] = 7, + }, + }, + [104] = { + .app_id = 0, + .hdr_bitmap = { .bits = + BNXT_ULP_HDR_BIT_O_ETH | + BNXT_ULP_HDR_BIT_OI_VLAN | + BNXT_ULP_HDR_BIT_O_IPV4 | + BNXT_ULP_HDR_BIT_O_UDP | + BNXT_ULP_FLOW_DIR_BITMASK_EGR }, + .field_man_bitmap = 0x0, + .field_opt_bitmap = 0xBA46F00000000000, + .field_exclude_bitmap = 0x0, + .class_tid = 2, + .flow_pattern_id = 0, + .field_list = { + [1] = 1, + [10] = 2, + [12] = 3, + [14] = 4, + [36] = 8, + [38] = 9, + [40] = 10, + [42] = 11, + [44] = 12, + [46] = 13, + [48] = 14, + [50] = 15, + [52] = 16, + [54] = 17, + [106] = 18, + [108] = 19, + [110] = 20, + [112] = 21, + [115] = 5, + [119] = 6, + [123] = 7, + }, + }, + [105] = { + .app_id = 0, + .hdr_bitmap = { .bits = + BNXT_ULP_HDR_BIT_O_ETH | + BNXT_ULP_HDR_BIT_OO_VLAN | + BNXT_ULP_HDR_BIT_OI_VLAN | + BNXT_ULP_HDR_BIT_O_IPV6 | + BNXT_ULP_HDR_BIT_O_TCP | + BNXT_ULP_FLOW_DIR_BITMASK_EGR }, + .field_man_bitmap = 0x0, + .field_opt_bitmap = 0xBA49F88000000000, + .field_exclude_bitmap = 0x0, + .class_tid = 2, + .flow_pattern_id = 0, + .field_list = { + [1] = 1, + [10] = 2, + [12] = 3, + [14] = 4, + [56] = 11, + [58] = 12, + [60] = 13, + [62] = 14, + [64] = 15, + [66] = 16, + [68] = 17, + [70] = 18, + [88] = 19, + [90] = 20, + [92] = 21, + [94] = 22, + [96] = 23, + [98] = 24, + [100] = 25, + [102] = 26, + [104] = 27, + [114] = 5, + [115] = 8, + [118] = 6, + [119] = 9, + [122] = 7, + [123] = 10, + }, + }, + [106] = { + .app_id = 0, + .hdr_bitmap = { .bits = + BNXT_ULP_HDR_BIT_O_ETH | + BNXT_ULP_HDR_BIT_OO_VLAN | + BNXT_ULP_HDR_BIT_OI_VLAN | + BNXT_ULP_HDR_BIT_O_IPV4 | + BNXT_ULP_HDR_BIT_O_TCP | + BNXT_ULP_FLOW_DIR_BITMASK_EGR }, + .field_man_bitmap = 0x0, + .field_opt_bitmap = 0xBA48DE2000000000, + .field_exclude_bitmap = 0x0, + .class_tid = 2, + .flow_pattern_id = 0, + .field_list = { + [1] = 1, + [10] = 2, + [12] = 3, + [14] = 4, + [36] = 11, + [38] = 12, + [40] = 13, + [42] = 14, + [44] = 15, + [46] = 16, + [48] = 17, + [50] = 18, + [52] = 19, + [54] = 20, + [88] = 21, + [90] = 22, + [92] = 23, + [94] = 24, + [96] = 25, + [98] = 26, + [100] = 27, + [102] = 28, + [104] = 29, + [114] = 5, + [115] = 8, + [118] = 6, + [119] = 9, + [122] = 7, + [123] = 10, + }, + }, + [107] = { + .app_id = 0, + .hdr_bitmap = { .bits = + BNXT_ULP_HDR_BIT_O_ETH | + BNXT_ULP_HDR_BIT_OO_VLAN | + BNXT_ULP_HDR_BIT_OI_VLAN | + BNXT_ULP_HDR_BIT_O_IPV6 | + BNXT_ULP_HDR_BIT_O_UDP | + BNXT_ULP_FLOW_DIR_BITMASK_EGR }, + .field_man_bitmap = 0x0, + .field_opt_bitmap = 0xBA49F80000000000, + .field_exclude_bitmap = 0x0, + .class_tid = 2, + .flow_pattern_id = 0, + .field_list = { + [1] = 1, + [10] = 2, + [12] = 3, + [14] = 4, + [56] = 11, + [58] = 12, + [60] = 13, + [62] = 14, + [64] = 15, + [66] = 16, + [68] = 17, + [70] = 18, + [106] = 19, + [108] = 20, + [110] = 21, + [112] = 22, + [114] = 5, + [115] = 8, + [118] = 6, + [119] = 9, + [122] = 7, + [123] = 10, + }, + }, + [108] = { + .app_id = 0, + .hdr_bitmap = { .bits = + BNXT_ULP_HDR_BIT_O_ETH | + BNXT_ULP_HDR_BIT_OO_VLAN | + BNXT_ULP_HDR_BIT_OI_VLAN | + BNXT_ULP_HDR_BIT_O_IPV4 | + BNXT_ULP_HDR_BIT_O_UDP | + BNXT_ULP_FLOW_DIR_BITMASK_EGR }, + .field_man_bitmap = 0x0, + .field_opt_bitmap = 0xBA48DE0000000000, + .field_exclude_bitmap = 0x0, + .class_tid = 2, + .flow_pattern_id = 0, + .field_list = { + [1] = 1, + [10] = 2, + [12] = 3, + [14] = 4, + [36] = 11, + [38] = 12, + [40] = 13, + [42] = 14, + [44] = 15, + [46] = 16, + [48] = 17, + [50] = 18, + [52] = 19, + [54] = 20, + [106] = 21, + [108] = 22, + [110] = 23, + [112] = 24, + [114] = 5, + [115] = 8, + [118] = 6, + [119] = 9, + [122] = 7, + [123] = 10, + }, + }, + [109] = { + .app_id = 0, + .hdr_bitmap = { .bits = + BNXT_ULP_HDR_BIT_O_ETH | + BNXT_ULP_HDR_BIT_O_IPV6 | + BNXT_ULP_HDR_BIT_O_UDP | + BNXT_ULP_HDR_BIT_T_VXLAN | + BNXT_ULP_HDR_BIT_I_ETH | + BNXT_ULP_FLOW_DIR_BITMASK_EGR }, + .field_man_bitmap = 0x0, + .field_opt_bitmap = 0xB00A170000000000, + .field_exclude_bitmap = 0x0, + .class_tid = 2, + .flow_pattern_id = 1, + .field_list = { + [1] = 1, + [10] = 2, + [11] = 21, + [12] = 3, + [13] = 22, + [14] = 4, + [15] = 23, + [56] = 5, + [58] = 6, + [60] = 7, + [62] = 8, + [64] = 9, + [66] = 10, + [68] = 11, + [70] = 12, + [106] = 13, + [108] = 14, + [110] = 15, + [112] = 16, + [126] = 17, + [127] = 18, + [128] = 19, + [129] = 20, + }, + }, + [110] = { + .app_id = 0, + .hdr_bitmap = { .bits = + BNXT_ULP_HDR_BIT_O_ETH | + BNXT_ULP_HDR_BIT_O_IPV4 | + BNXT_ULP_HDR_BIT_O_UDP | + BNXT_ULP_HDR_BIT_T_VXLAN | + BNXT_ULP_HDR_BIT_I_ETH | + BNXT_ULP_FLOW_DIR_BITMASK_EGR }, + .field_man_bitmap = 0x0, + .field_opt_bitmap = 0xB00285C000000000, + .field_exclude_bitmap = 0x0, + .class_tid = 2, + .flow_pattern_id = 1, + .field_list = { + [1] = 1, + [10] = 2, + [11] = 23, + [12] = 3, + [13] = 24, + [14] = 4, + [15] = 25, + [36] = 5, + [38] = 6, + [40] = 7, + [42] = 8, + [44] = 9, + [46] = 10, + [48] = 11, + [50] = 12, + [52] = 13, + [54] = 14, + [106] = 15, + [108] = 16, + [110] = 17, + [112] = 18, + [126] = 19, + [127] = 20, + [128] = 21, + [129] = 22, + }, + }, + [111] = { + .app_id = 0, + .hdr_bitmap = { .bits = + BNXT_ULP_HDR_BIT_O_ETH | + BNXT_ULP_HDR_BIT_O_IPV6 | + BNXT_ULP_HDR_BIT_O_UDP | + BNXT_ULP_HDR_BIT_T_VXLAN | + BNXT_ULP_HDR_BIT_I_ETH | + BNXT_ULP_HDR_BIT_I_IPV6 | + BNXT_ULP_FLOW_DIR_BITMASK_EGR }, + .field_man_bitmap = 0x0, + .field_opt_bitmap = 0xB00A170B00000000, + .field_exclude_bitmap = 0x0, + .class_tid = 2, + .flow_pattern_id = 1, + .field_list = { + [1] = 1, + [10] = 2, + [11] = 21, + [12] = 3, + [13] = 22, + [14] = 4, + [15] = 23, + [56] = 5, + [57] = 24, + [58] = 6, + [59] = 25, + [60] = 7, + [61] = 26, + [62] = 8, + [63] = 27, + [64] = 9, + [65] = 28, + [66] = 10, + [67] = 29, + [68] = 11, + [69] = 30, + [70] = 12, + [71] = 31, + [106] = 13, + [108] = 14, + [110] = 15, + [112] = 16, + [126] = 17, + [127] = 18, + [128] = 19, + [129] = 20, + }, + }, + [112] = { + .app_id = 0, + .hdr_bitmap = { .bits = + BNXT_ULP_HDR_BIT_O_ETH | + BNXT_ULP_HDR_BIT_O_IPV4 | + BNXT_ULP_HDR_BIT_O_UDP | + BNXT_ULP_HDR_BIT_T_VXLAN | + BNXT_ULP_HDR_BIT_I_ETH | + BNXT_ULP_HDR_BIT_I_IPV6 | + BNXT_ULP_FLOW_DIR_BITMASK_EGR }, + .field_man_bitmap = 0x0, + .field_opt_bitmap = 0xB00285C2C0000000, + .field_exclude_bitmap = 0x0, + .class_tid = 2, + .flow_pattern_id = 1, + .field_list = { + [1] = 1, + [10] = 2, + [11] = 23, + [12] = 3, + [13] = 24, + [14] = 4, + [15] = 25, + [36] = 5, + [38] = 6, + [40] = 7, + [42] = 8, + [44] = 9, + [46] = 10, + [48] = 11, + [50] = 12, + [52] = 13, + [54] = 14, + [57] = 26, + [59] = 27, + [61] = 28, + [63] = 29, + [65] = 30, + [67] = 31, + [69] = 32, + [71] = 33, + [106] = 15, + [108] = 16, + [110] = 17, + [112] = 18, + [126] = 19, + [127] = 20, + [128] = 21, + [129] = 22, + }, + }, + [113] = { + .app_id = 0, + .hdr_bitmap = { .bits = + BNXT_ULP_HDR_BIT_O_ETH | + BNXT_ULP_HDR_BIT_O_IPV6 | + BNXT_ULP_HDR_BIT_O_UDP | + BNXT_ULP_HDR_BIT_T_VXLAN | + BNXT_ULP_HDR_BIT_I_ETH | + BNXT_ULP_HDR_BIT_I_IPV4 | + BNXT_ULP_FLOW_DIR_BITMASK_EGR }, + .field_man_bitmap = 0x0, + .field_opt_bitmap = 0xB00A1702C0000000, + .field_exclude_bitmap = 0x0, + .class_tid = 2, + .flow_pattern_id = 1, + .field_list = { + [1] = 1, + [10] = 2, + [11] = 21, + [12] = 3, + [13] = 22, + [14] = 4, + [15] = 23, + [37] = 24, + [39] = 25, + [41] = 26, + [43] = 27, + [45] = 28, + [47] = 29, + [49] = 30, + [51] = 31, + [53] = 32, + [55] = 33, + [56] = 5, + [58] = 6, + [60] = 7, + [62] = 8, + [64] = 9, + [66] = 10, + [68] = 11, + [70] = 12, + [106] = 13, + [108] = 14, + [110] = 15, + [112] = 16, + [126] = 17, + [127] = 18, + [128] = 19, + [129] = 20, + }, + }, + [114] = { + .app_id = 0, + .hdr_bitmap = { .bits = + BNXT_ULP_HDR_BIT_O_ETH | + BNXT_ULP_HDR_BIT_O_IPV4 | + BNXT_ULP_HDR_BIT_O_UDP | + BNXT_ULP_HDR_BIT_T_VXLAN | + BNXT_ULP_HDR_BIT_I_ETH | + BNXT_ULP_HDR_BIT_I_IPV4 | + BNXT_ULP_FLOW_DIR_BITMASK_EGR }, + .field_man_bitmap = 0x0, + .field_opt_bitmap = 0xB00285C0B0000000, + .field_exclude_bitmap = 0x0, + .class_tid = 2, + .flow_pattern_id = 1, + .field_list = { + [1] = 1, + [10] = 2, + [11] = 23, + [12] = 3, + [13] = 24, + [14] = 4, + [15] = 25, + [36] = 5, + [37] = 26, + [38] = 6, + [39] = 27, + [40] = 7, + [41] = 28, + [42] = 8, + [43] = 29, + [44] = 9, + [45] = 30, + [46] = 10, + [47] = 31, + [48] = 11, + [49] = 32, + [50] = 12, + [51] = 33, + [52] = 13, + [53] = 34, + [54] = 14, + [55] = 35, + [106] = 15, + [108] = 16, + [110] = 17, + [112] = 18, + [126] = 19, + [127] = 20, + [128] = 21, + [129] = 22, + }, + }, + [115] = { + .app_id = 0, + .hdr_bitmap = { .bits = + BNXT_ULP_HDR_BIT_O_ETH | + BNXT_ULP_HDR_BIT_O_IPV6 | + BNXT_ULP_HDR_BIT_O_UDP | + BNXT_ULP_HDR_BIT_T_VXLAN | + BNXT_ULP_HDR_BIT_I_ETH | + BNXT_ULP_HDR_BIT_I_TCP | + BNXT_ULP_FLOW_DIR_BITMASK_EGR }, + .field_man_bitmap = 0x0, + .field_opt_bitmap = 0xB00A17C000000000, + .field_exclude_bitmap = 0x0, + .class_tid = 2, + .flow_pattern_id = 1, + .field_list = { + [1] = 1, + [10] = 2, + [11] = 21, + [12] = 3, + [13] = 22, + [14] = 4, + [15] = 23, + [56] = 5, + [58] = 6, + [60] = 7, + [62] = 8, + [64] = 9, + [66] = 10, + [68] = 11, + [70] = 12, + [89] = 24, + [91] = 25, + [93] = 26, + [95] = 27, + [97] = 28, + [99] = 29, + [101] = 30, + [103] = 31, + [105] = 32, + [106] = 13, + [108] = 14, + [110] = 15, + [112] = 16, + [126] = 17, + [127] = 18, + [128] = 19, + [129] = 20, + }, + }, + [116] = { + .app_id = 0, + .hdr_bitmap = { .bits = + BNXT_ULP_HDR_BIT_O_ETH | + BNXT_ULP_HDR_BIT_O_IPV4 | + BNXT_ULP_HDR_BIT_O_UDP | + BNXT_ULP_HDR_BIT_T_VXLAN | + BNXT_ULP_HDR_BIT_I_ETH | + BNXT_ULP_HDR_BIT_I_TCP | + BNXT_ULP_FLOW_DIR_BITMASK_EGR }, + .field_man_bitmap = 0x0, + .field_opt_bitmap = 0xB00285F000000000, + .field_exclude_bitmap = 0x0, + .class_tid = 2, + .flow_pattern_id = 1, + .field_list = { + [1] = 1, + [10] = 2, + [11] = 23, + [12] = 3, + [13] = 24, + [14] = 4, + [15] = 25, + [36] = 5, + [38] = 6, + [40] = 7, + [42] = 8, + [44] = 9, + [46] = 10, + [48] = 11, + [50] = 12, + [52] = 13, + [54] = 14, + [89] = 26, + [91] = 27, + [93] = 28, + [95] = 29, + [97] = 30, + [99] = 31, + [101] = 32, + [103] = 33, + [105] = 34, + [106] = 15, + [108] = 16, + [110] = 17, + [112] = 18, + [126] = 19, + [127] = 20, + [128] = 21, + [129] = 22, + }, + }, + [117] = { + .app_id = 0, + .hdr_bitmap = { .bits = + BNXT_ULP_HDR_BIT_O_ETH | + BNXT_ULP_HDR_BIT_O_IPV6 | + BNXT_ULP_HDR_BIT_O_UDP | + BNXT_ULP_HDR_BIT_T_VXLAN | + BNXT_ULP_HDR_BIT_I_ETH | + BNXT_ULP_HDR_BIT_I_UDP | + BNXT_ULP_FLOW_DIR_BITMASK_EGR }, + .field_man_bitmap = 0x0, + .field_opt_bitmap = 0xB00A17C000000000, + .field_exclude_bitmap = 0x0, + .class_tid = 2, + .flow_pattern_id = 1, + .field_list = { + [1] = 1, + [10] = 2, + [11] = 21, + [12] = 3, + [13] = 22, + [14] = 4, + [15] = 23, + [56] = 5, + [58] = 6, + [60] = 7, + [62] = 8, + [64] = 9, + [66] = 10, + [68] = 11, + [70] = 12, + [106] = 13, + [107] = 24, + [108] = 14, + [109] = 25, + [110] = 15, + [111] = 26, + [112] = 16, + [113] = 27, + [126] = 17, + [127] = 18, + [128] = 19, + [129] = 20, + }, + }, + [118] = { + .app_id = 0, + .hdr_bitmap = { .bits = + BNXT_ULP_HDR_BIT_O_ETH | + BNXT_ULP_HDR_BIT_O_IPV4 | + BNXT_ULP_HDR_BIT_O_UDP | + BNXT_ULP_HDR_BIT_T_VXLAN | + BNXT_ULP_HDR_BIT_I_ETH | + BNXT_ULP_HDR_BIT_I_UDP | + BNXT_ULP_FLOW_DIR_BITMASK_EGR }, + .field_man_bitmap = 0x0, + .field_opt_bitmap = 0xB00285F000000000, + .field_exclude_bitmap = 0x0, + .class_tid = 2, + .flow_pattern_id = 1, + .field_list = { + [1] = 1, + [10] = 2, + [11] = 23, + [12] = 3, + [13] = 24, + [14] = 4, + [15] = 25, + [36] = 5, + [38] = 6, + [40] = 7, + [42] = 8, + [44] = 9, + [46] = 10, + [48] = 11, + [50] = 12, + [52] = 13, + [54] = 14, + [106] = 15, + [107] = 26, + [108] = 16, + [109] = 27, + [110] = 17, + [111] = 28, + [112] = 18, + [113] = 29, + [126] = 19, + [127] = 20, + [128] = 21, + [129] = 22, + }, + }, + [119] = { + .app_id = 0, + .hdr_bitmap = { .bits = + BNXT_ULP_HDR_BIT_O_ETH | + BNXT_ULP_HDR_BIT_O_IPV6 | + BNXT_ULP_HDR_BIT_O_UDP | + BNXT_ULP_HDR_BIT_T_VXLAN | + BNXT_ULP_HDR_BIT_I_ETH | + BNXT_ULP_HDR_BIT_I_IPV6 | + BNXT_ULP_HDR_BIT_I_TCP | + BNXT_ULP_FLOW_DIR_BITMASK_EGR }, + .field_man_bitmap = 0x0, + .field_opt_bitmap = 0xB00A170BC0000000, + .field_exclude_bitmap = 0x0, + .class_tid = 2, + .flow_pattern_id = 1, + .field_list = { + [1] = 1, + [10] = 2, + [11] = 21, + [12] = 3, + [13] = 22, + [14] = 4, + [15] = 23, + [56] = 5, + [57] = 24, + [58] = 6, + [59] = 25, + [60] = 7, + [61] = 26, + [62] = 8, + [63] = 27, + [64] = 9, + [65] = 28, + [66] = 10, + [67] = 29, + [68] = 11, + [69] = 30, + [70] = 12, + [71] = 31, + [89] = 32, + [91] = 33, + [93] = 34, + [95] = 35, + [97] = 36, + [99] = 37, + [101] = 38, + [103] = 39, + [105] = 40, + [106] = 13, + [108] = 14, + [110] = 15, + [112] = 16, + [126] = 17, + [127] = 18, + [128] = 19, + [129] = 20, + }, + }, + [120] = { + .app_id = 0, + .hdr_bitmap = { .bits = + BNXT_ULP_HDR_BIT_O_ETH | + BNXT_ULP_HDR_BIT_O_IPV4 | + BNXT_ULP_HDR_BIT_O_UDP | + BNXT_ULP_HDR_BIT_T_VXLAN | + BNXT_ULP_HDR_BIT_I_ETH | + BNXT_ULP_HDR_BIT_I_IPV6 | + BNXT_ULP_HDR_BIT_I_TCP | + BNXT_ULP_FLOW_DIR_BITMASK_EGR }, + .field_man_bitmap = 0x0, + .field_opt_bitmap = 0xB00285C2F0000000, + .field_exclude_bitmap = 0x0, + .class_tid = 2, + .flow_pattern_id = 1, + .field_list = { + [1] = 1, + [10] = 2, + [11] = 23, + [12] = 3, + [13] = 24, + [14] = 4, + [15] = 25, + [36] = 5, + [38] = 6, + [40] = 7, + [42] = 8, + [44] = 9, + [46] = 10, + [48] = 11, + [50] = 12, + [52] = 13, + [54] = 14, + [57] = 26, + [59] = 27, + [61] = 28, + [63] = 29, + [65] = 30, + [67] = 31, + [69] = 32, + [71] = 33, + [89] = 34, + [91] = 35, + [93] = 36, + [95] = 37, + [97] = 38, + [99] = 39, + [101] = 40, + [103] = 41, + [105] = 42, + [106] = 15, + [108] = 16, + [110] = 17, + [112] = 18, + [126] = 19, + [127] = 20, + [128] = 21, + [129] = 22, + }, + }, + [121] = { + .app_id = 0, + .hdr_bitmap = { .bits = + BNXT_ULP_HDR_BIT_O_ETH | + BNXT_ULP_HDR_BIT_O_IPV6 | + BNXT_ULP_HDR_BIT_O_UDP | + BNXT_ULP_HDR_BIT_T_VXLAN | + BNXT_ULP_HDR_BIT_I_ETH | + BNXT_ULP_HDR_BIT_I_IPV4 | + BNXT_ULP_HDR_BIT_I_TCP | + BNXT_ULP_FLOW_DIR_BITMASK_EGR }, + .field_man_bitmap = 0x0, + .field_opt_bitmap = 0xB00A1702F0000000, + .field_exclude_bitmap = 0x0, + .class_tid = 2, + .flow_pattern_id = 1, + .field_list = { + [1] = 1, + [10] = 2, + [11] = 21, + [12] = 3, + [13] = 22, + [14] = 4, + [15] = 23, + [37] = 24, + [39] = 25, + [41] = 26, + [43] = 27, + [45] = 28, + [47] = 29, + [49] = 30, + [51] = 31, + [53] = 32, + [55] = 33, + [56] = 5, + [58] = 6, + [60] = 7, + [62] = 8, + [64] = 9, + [66] = 10, + [68] = 11, + [70] = 12, + [89] = 34, + [91] = 35, + [93] = 36, + [95] = 37, + [97] = 38, + [99] = 39, + [101] = 40, + [103] = 41, + [105] = 42, + [106] = 13, + [108] = 14, + [110] = 15, + [112] = 16, + [126] = 17, + [127] = 18, + [128] = 19, + [129] = 20, + }, + }, + [122] = { + .app_id = 0, + .hdr_bitmap = { .bits = + BNXT_ULP_HDR_BIT_O_ETH | + BNXT_ULP_HDR_BIT_O_IPV4 | + BNXT_ULP_HDR_BIT_O_UDP | + BNXT_ULP_HDR_BIT_T_VXLAN | + BNXT_ULP_HDR_BIT_I_ETH | + BNXT_ULP_HDR_BIT_I_IPV4 | + BNXT_ULP_HDR_BIT_I_TCP | + BNXT_ULP_FLOW_DIR_BITMASK_EGR }, + .field_man_bitmap = 0x0, + .field_opt_bitmap = 0xB00285C0BC000000, + .field_exclude_bitmap = 0x0, + .class_tid = 2, + .flow_pattern_id = 1, + .field_list = { + [1] = 1, + [10] = 2, + [11] = 23, + [12] = 3, + [13] = 24, + [14] = 4, + [15] = 25, + [36] = 5, + [37] = 26, + [38] = 6, + [39] = 27, + [40] = 7, + [41] = 28, + [42] = 8, + [43] = 29, + [44] = 9, + [45] = 30, + [46] = 10, + [47] = 31, + [48] = 11, + [49] = 32, + [50] = 12, + [51] = 33, + [52] = 13, + [53] = 34, + [54] = 14, + [55] = 35, + [89] = 36, + [91] = 37, + [93] = 38, + [95] = 39, + [97] = 40, + [99] = 41, + [101] = 42, + [103] = 43, + [105] = 44, + [106] = 15, + [108] = 16, + [110] = 17, + [112] = 18, + [126] = 19, + [127] = 20, + [128] = 21, + [129] = 22, + }, + }, + [123] = { + .app_id = 0, + .hdr_bitmap = { .bits = + BNXT_ULP_HDR_BIT_O_ETH | + BNXT_ULP_HDR_BIT_O_IPV6 | + BNXT_ULP_HDR_BIT_O_UDP | + BNXT_ULP_HDR_BIT_T_VXLAN | + BNXT_ULP_HDR_BIT_I_ETH | + BNXT_ULP_HDR_BIT_I_IPV6 | + BNXT_ULP_HDR_BIT_I_UDP | + BNXT_ULP_FLOW_DIR_BITMASK_EGR }, + .field_man_bitmap = 0x0, + .field_opt_bitmap = 0xB00A170BC0000000, + .field_exclude_bitmap = 0x0, + .class_tid = 2, + .flow_pattern_id = 1, + .field_list = { + [1] = 1, + [10] = 2, + [11] = 21, + [12] = 3, + [13] = 22, + [14] = 4, + [15] = 23, + [56] = 5, + [57] = 24, + [58] = 6, + [59] = 25, + [60] = 7, + [61] = 26, + [62] = 8, + [63] = 27, + [64] = 9, + [65] = 28, + [66] = 10, + [67] = 29, + [68] = 11, + [69] = 30, + [70] = 12, + [71] = 31, + [106] = 13, + [107] = 32, + [108] = 14, + [109] = 33, + [110] = 15, + [111] = 34, + [112] = 16, + [113] = 35, + [126] = 17, + [127] = 18, + [128] = 19, + [129] = 20, + }, + }, + [124] = { + .app_id = 0, + .hdr_bitmap = { .bits = + BNXT_ULP_HDR_BIT_O_ETH | + BNXT_ULP_HDR_BIT_O_IPV4 | + BNXT_ULP_HDR_BIT_O_UDP | + BNXT_ULP_HDR_BIT_T_VXLAN | + BNXT_ULP_HDR_BIT_I_ETH | + BNXT_ULP_HDR_BIT_I_IPV6 | + BNXT_ULP_HDR_BIT_I_UDP | + BNXT_ULP_FLOW_DIR_BITMASK_EGR }, + .field_man_bitmap = 0x0, + .field_opt_bitmap = 0xB00285C2F0000000, + .field_exclude_bitmap = 0x0, + .class_tid = 2, + .flow_pattern_id = 1, + .field_list = { + [1] = 1, + [10] = 2, + [11] = 23, + [12] = 3, + [13] = 24, + [14] = 4, + [15] = 25, + [36] = 5, + [38] = 6, + [40] = 7, + [42] = 8, + [44] = 9, + [46] = 10, + [48] = 11, + [50] = 12, + [52] = 13, + [54] = 14, + [57] = 26, + [59] = 27, + [61] = 28, + [63] = 29, + [65] = 30, + [67] = 31, + [69] = 32, + [71] = 33, + [106] = 15, + [107] = 34, + [108] = 16, + [109] = 35, + [110] = 17, + [111] = 36, + [112] = 18, + [113] = 37, + [126] = 19, + [127] = 20, + [128] = 21, + [129] = 22, + }, + }, + [125] = { + .app_id = 0, + .hdr_bitmap = { .bits = + BNXT_ULP_HDR_BIT_O_ETH | + BNXT_ULP_HDR_BIT_O_IPV6 | + BNXT_ULP_HDR_BIT_O_UDP | + BNXT_ULP_HDR_BIT_T_VXLAN | + BNXT_ULP_HDR_BIT_I_ETH | + BNXT_ULP_HDR_BIT_I_IPV4 | + BNXT_ULP_HDR_BIT_I_UDP | + BNXT_ULP_FLOW_DIR_BITMASK_EGR }, + .field_man_bitmap = 0x0, + .field_opt_bitmap = 0xB00A1702F0000000, + .field_exclude_bitmap = 0x0, + .class_tid = 2, + .flow_pattern_id = 1, + .field_list = { + [1] = 1, + [10] = 2, + [11] = 21, + [12] = 3, + [13] = 22, + [14] = 4, + [15] = 23, + [37] = 24, + [39] = 25, + [41] = 26, + [43] = 27, + [45] = 28, + [47] = 29, + [49] = 30, + [51] = 31, + [53] = 32, + [55] = 33, + [56] = 5, + [58] = 6, + [60] = 7, + [62] = 8, + [64] = 9, + [66] = 10, + [68] = 11, + [70] = 12, + [106] = 13, + [107] = 34, + [108] = 14, + [109] = 35, + [110] = 15, + [111] = 36, + [112] = 16, + [113] = 37, + [126] = 17, + [127] = 18, + [128] = 19, + [129] = 20, + }, + }, + [126] = { + .app_id = 0, + .hdr_bitmap = { .bits = + BNXT_ULP_HDR_BIT_O_ETH | + BNXT_ULP_HDR_BIT_O_IPV4 | + BNXT_ULP_HDR_BIT_O_UDP | + BNXT_ULP_HDR_BIT_T_VXLAN | + BNXT_ULP_HDR_BIT_I_ETH | + BNXT_ULP_HDR_BIT_I_IPV4 | + BNXT_ULP_HDR_BIT_I_UDP | + BNXT_ULP_FLOW_DIR_BITMASK_EGR }, + .field_man_bitmap = 0x0, + .field_opt_bitmap = 0xB00285C0BC000000, + .field_exclude_bitmap = 0x0, + .class_tid = 2, + .flow_pattern_id = 1, + .field_list = { + [1] = 1, + [10] = 2, + [11] = 23, + [12] = 3, + [13] = 24, + [14] = 4, + [15] = 25, + [36] = 5, + [37] = 26, + [38] = 6, + [39] = 27, + [40] = 7, + [41] = 28, + [42] = 8, + [43] = 29, + [44] = 9, + [45] = 30, + [46] = 10, + [47] = 31, + [48] = 11, + [49] = 32, + [50] = 12, + [51] = 33, + [52] = 13, + [53] = 34, + [54] = 14, + [55] = 35, + [106] = 15, + [107] = 36, + [108] = 16, + [109] = 37, + [110] = 17, + [111] = 38, + [112] = 18, + [113] = 39, + [126] = 19, + [127] = 20, + [128] = 21, + [129] = 22, + }, + }, + [127] = { + .app_id = 0, + .hdr_bitmap = { .bits = + BNXT_ULP_HDR_BIT_O_ETH | + BNXT_ULP_HDR_BIT_O_IPV4 | + BNXT_ULP_HDR_BIT_O_UDP | + BNXT_ULP_HDR_BIT_T_GENEVE | + BNXT_ULP_FLOW_DIR_BITMASK_EGR }, + .field_man_bitmap = 0x0, + .field_opt_bitmap = 0xB002800000000000, + .field_exclude_bitmap = 0x0, + .class_tid = 2, + .flow_pattern_id = 2, + .field_list = { + [1] = 1, + [10] = 2, + [12] = 3, + [14] = 4, + [36] = 5, + [38] = 6, + [40] = 7, + [42] = 8, + [44] = 9, + [46] = 10, + [48] = 11, + [50] = 12, + [52] = 13, + [54] = 14, + [106] = 15, + [108] = 16, + [110] = 17, + [112] = 18, + }, + }, + [128] = { + .app_id = 0, + .hdr_bitmap = { .bits = + BNXT_ULP_HDR_BIT_SVIF | + BNXT_ULP_FLOW_DIR_BITMASK_EGR }, + .field_man_bitmap = 0x0, + .field_opt_bitmap = 0x8000000000000000, + .field_exclude_bitmap = 0x0, + .class_tid = 2, + .flow_pattern_id = 3, + .field_list = { + [1] = 1, + }, + } +}; + diff --git a/drivers/thirdparty/release-drivers/bnxt/tf_ulp/generic_templates/ulp_template_db_enum.h b/drivers/thirdparty/release-drivers/bnxt/tf_ulp/generic_templates/ulp_template_db_enum.h new file mode 100644 index 000000000000..202b69f147f8 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/tf_ulp/generic_templates/ulp_template_db_enum.h @@ -0,0 +1,2120 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2024 Broadcom + * All rights reserved. + */ + +#ifndef ULP_TEMPLATE_DB_H_ +#define ULP_TEMPLATE_DB_H_ + +#define BNXT_ULP_REGFILE_MAX_SZ 104 +#define BNXT_ULP_MAX_NUM_DEVICES 5 +#define BNXT_ULP_LOG2_MAX_NUM_DEV 2.32192809488736 +#define BNXT_ULP_GEN_TBL_MAX_SZ 60 +#define BNXT_ULP_ALLOCATOR_TBL_MAX_SZ 2 +#define BNXT_ULP_CLASS_MATCH_LIST_MAX_SZ 129 +#define BNXT_ULP_ACT_MATCH_LIST_MAX_SZ 21 +#define BNXT_ULP_APP_RESOURCE_RESV_LIST_MAX_SZ 0 +#define BNXT_ULP_GLB_RESOURCE_TBL_MAX_SZ 64 +#define BNXT_ULP_APP_GLB_RESOURCE_TBL_MAX_SZ 0 +#define BNXT_ULP_RESOURCE_RESV_LIST_MAX_SZ 73 +#define BNXT_ULP_APP_CAP_TBL_MAX_SZ 3 +#define BNXT_ULP_COND_GOTO_REJECT 1023 +#define BNXT_ULP_COND_GOTO_RF 0x10000 +#define BNXT_ULP_APP_ID_CONFIG 0 +#define BNXT_ULP_GLB_FIELD_TBL_SIZE 135 +#define BNXT_ULP_GLB_SIG_TBL_SIZE 1 +#define ULP_WH_PLUS_CLASS_TMPL_LIST_SIZE 5 +#define ULP_WH_PLUS_CLASS_TBL_LIST_SIZE 74 +#define ULP_WH_PLUS_CLASS_KEY_INFO_LIST_SIZE 507 +#define ULP_WH_PLUS_CLASS_KEY_EXT_LIST_SIZE 0 +#define ULP_WH_PLUS_CLASS_IDENT_LIST_SIZE 20 +#define ULP_WH_PLUS_CLASS_RESULT_FIELD_LIST_SIZE 552 +#define ULP_WH_PLUS_CLASS_COND_LIST_SIZE 43 +#define ULP_WH_PLUS_CLASS_COND_OPER_LIST_SIZE 0 +#define ULP_THOR_CLASS_TMPL_LIST_SIZE 5 +#define ULP_THOR_CLASS_TBL_LIST_SIZE 142 +#define ULP_THOR_CLASS_KEY_INFO_LIST_SIZE 640 +#define ULP_THOR_CLASS_KEY_EXT_LIST_SIZE 635 +#define ULP_THOR_CLASS_IDENT_LIST_SIZE 47 +#define ULP_THOR_CLASS_RESULT_FIELD_LIST_SIZE 1266 +#define ULP_THOR_CLASS_COND_LIST_SIZE 3157 +#define ULP_THOR_CLASS_COND_OPER_LIST_SIZE 7 +#define ULP_THOR2_CLASS_TMPL_LIST_SIZE 5 +#define ULP_THOR2_CLASS_TBL_LIST_SIZE 125 +#define ULP_THOR2_CLASS_KEY_INFO_LIST_SIZE 673 +#define ULP_THOR2_CLASS_KEY_EXT_LIST_SIZE 632 +#define ULP_THOR2_CLASS_IDENT_LIST_SIZE 53 +#define ULP_THOR2_CLASS_RESULT_FIELD_LIST_SIZE 1519 +#define ULP_THOR2_CLASS_COND_LIST_SIZE 3312 +#define ULP_THOR2_CLASS_COND_OPER_LIST_SIZE 7 +#define ULP_WH_PLUS_ACT_TMPL_LIST_SIZE 13 +#define ULP_WH_PLUS_ACT_TBL_LIST_SIZE 47 +#define ULP_WH_PLUS_ACT_KEY_INFO_LIST_SIZE 2 +#define ULP_WH_PLUS_ACT_KEY_EXT_LIST_SIZE 0 +#define ULP_WH_PLUS_ACT_IDENT_LIST_SIZE 1 +#define ULP_WH_PLUS_ACT_RESULT_FIELD_LIST_SIZE 616 +#define ULP_WH_PLUS_ACT_COND_LIST_SIZE 73 +#define ULP_WH_PLUS_ACT_COND_OPER_LIST_SIZE 6 +#define ULP_THOR_ACT_TMPL_LIST_SIZE 13 +#define ULP_THOR_ACT_TBL_LIST_SIZE 117 +#define ULP_THOR_ACT_KEY_INFO_LIST_SIZE 85 +#define ULP_THOR_ACT_KEY_EXT_LIST_SIZE 5 +#define ULP_THOR_ACT_IDENT_LIST_SIZE 19 +#define ULP_THOR_ACT_RESULT_FIELD_LIST_SIZE 492 +#define ULP_THOR_ACT_COND_LIST_SIZE 92 +#define ULP_THOR_ACT_COND_OPER_LIST_SIZE 0 +#define ULP_THOR2_ACT_TMPL_LIST_SIZE 13 +#define ULP_THOR2_ACT_TBL_LIST_SIZE 115 +#define ULP_THOR2_ACT_KEY_INFO_LIST_SIZE 85 +#define ULP_THOR2_ACT_KEY_EXT_LIST_SIZE 5 +#define ULP_THOR2_ACT_IDENT_LIST_SIZE 40 +#define ULP_THOR2_ACT_RESULT_FIELD_LIST_SIZE 446 +#define ULP_THOR2_ACT_COND_LIST_SIZE 96 +#define ULP_THOR2_ACT_COND_OPER_LIST_SIZE 0 + +enum bnxt_ulp_act_bit { + BNXT_ULP_ACT_BIT_MARK = 0x0000000000000001, + BNXT_ULP_ACT_BIT_DROP = 0x0000000000000002, + BNXT_ULP_ACT_BIT_COUNT = 0x0000000000000004, + BNXT_ULP_ACT_BIT_RSS = 0x0000000000000008, + BNXT_ULP_ACT_BIT_METER = 0x0000000000000010, + BNXT_ULP_ACT_BIT_VXLAN_DECAP = 0x0000000000000020, + BNXT_ULP_ACT_BIT_POP_MPLS = 0x0000000000000040, + BNXT_ULP_ACT_BIT_PUSH_MPLS = 0x0000000000000080, + BNXT_ULP_ACT_BIT_MAC_SWAP = 0x0000000000000100, + BNXT_ULP_ACT_BIT_SET_MAC_SRC = 0x0000000000000200, + BNXT_ULP_ACT_BIT_SET_MAC_DST = 0x0000000000000400, + BNXT_ULP_ACT_BIT_POP_VLAN = 0x0000000000000800, + BNXT_ULP_ACT_BIT_PUSH_VLAN = 0x0000000000001000, + BNXT_ULP_ACT_BIT_SET_VLAN_PCP = 0x0000000000002000, + BNXT_ULP_ACT_BIT_SET_VLAN_VID = 0x0000000000004000, + BNXT_ULP_ACT_BIT_SET_IPV4_SRC = 0x0000000000008000, + BNXT_ULP_ACT_BIT_SET_IPV4_DST = 0x0000000000010000, + BNXT_ULP_ACT_BIT_SET_IPV6_SRC = 0x0000000000020000, + BNXT_ULP_ACT_BIT_SET_IPV6_DST = 0x0000000000040000, + BNXT_ULP_ACT_BIT_DEC_TTL = 0x0000000000080000, + BNXT_ULP_ACT_BIT_SET_TTL = 0x0000000000100000, + BNXT_ULP_ACT_BIT_SET_TP_SRC = 0x0000000000200000, + BNXT_ULP_ACT_BIT_SET_TP_DST = 0x0000000000400000, + BNXT_ULP_ACT_BIT_VXLAN_ENCAP = 0x0000000000800000, + BNXT_ULP_ACT_BIT_JUMP = 0x0000000001000000, + BNXT_ULP_ACT_BIT_SHARED = 0x0000000002000000, + BNXT_ULP_ACT_BIT_SAMPLE = 0x0000000004000000, + BNXT_ULP_ACT_BIT_SHARED_SAMPLE = 0x0000000008000000, + BNXT_ULP_ACT_BIT_QUEUE = 0x0000000010000000, + BNXT_ULP_ACT_BIT_DELETE = 0x0000000020000000, + BNXT_ULP_ACT_BIT_UPDATE = 0x0000000040000000, + BNXT_ULP_ACT_BIT_SHARED_METER = 0x0000000080000000, + BNXT_ULP_ACT_BIT_METER_PROFILE = 0x0000000100000000, + BNXT_ULP_ACT_BIT_GOTO_CHAIN = 0x0000000200000000, + BNXT_ULP_ACT_BIT_VF_TO_VF = 0x0000000400000000, + BNXT_ULP_ACT_BIT_IP_ENCAP = 0x0000000800000000, + BNXT_ULP_ACT_BIT_IP_DECAP = 0x0000001000000000, + BNXT_ULP_ACT_BIT_L2_ENCAP = 0x0000002000000000, + BNXT_ULP_ACT_BIT_L2_DECAP = 0x0000004000000000, + BNXT_ULP_ACT_BIT_GENEVE_DECAP = 0x0000008000000000, + BNXT_ULP_ACT_BIT_GENEVE_ENCAP = 0x0000010000000000, + BNXT_ULP_ACT_BIT_MULTIPLE_PORT = 0x0000020000000000, + BNXT_ULP_ACT_BIT_NON_GENERIC = 0x0000040000000000, + BNXT_ULP_ACT_BIT_GENERIC = 0x0000080000000000, + BNXT_ULP_ACT_BIT_LAST = 0x0000100000000000 +}; + +enum bnxt_ulp_cf_bit { + BNXT_ULP_CF_BIT_IS_TUNNEL = 0x0000000000000001, + BNXT_ULP_CF_BIT_UPAR1 = 0x0000000000000002, + BNXT_ULP_CF_BIT_UPAR2 = 0x0000000000000004, + BNXT_ULP_CF_BIT_L2_CNTXT_ID = 0x0000000000000008, + BNXT_ULP_CF_BIT_RECYCLE_CNT = 0x0000000000000010, + BNXT_ULP_CF_BIT_METADATA = 0x0000000000000020, + BNXT_ULP_CF_BIT_L2_ONLY = 0x0000000000000040, + BNXT_ULP_CF_BIT_DIX_TRAFFIC = 0x0000000000000080, + BNXT_ULP_CF_BIT_GROUP_ID = 0x0000000000000100, + BNXT_ULP_CF_BIT_DEF_PRIO = 0x0000000000000200, + BNXT_ULP_CF_BIT_LAST = 0x0000000000000400 +}; + +enum bnxt_ulp_dev_ft { + BNXT_ULP_DEV_FT_STAT_SW_AGG = 0x0000000000000001, + BNXT_ULP_DEV_FT_STAT_PARENT_AGG = 0x0000000000000002, + BNXT_ULP_DEV_FT_LAST = 0x0000000000000004 +}; + +enum bnxt_ulp_hdr_bit { + BNXT_ULP_HDR_BIT_O_ETH = 0x0000000000000001, + BNXT_ULP_HDR_BIT_OO_VLAN = 0x0000000000000002, + BNXT_ULP_HDR_BIT_OI_VLAN = 0x0000000000000004, + BNXT_ULP_HDR_BIT_O_IPV4 = 0x0000000000000008, + BNXT_ULP_HDR_BIT_O_IPV6 = 0x0000000000000010, + BNXT_ULP_HDR_BIT_O_TCP = 0x0000000000000020, + BNXT_ULP_HDR_BIT_O_UDP = 0x0000000000000040, + BNXT_ULP_HDR_BIT_O_ICMP = 0x0000000000000080, + BNXT_ULP_HDR_BIT_T_VXLAN = 0x0000000000000100, + BNXT_ULP_HDR_BIT_T_GRE = 0x0000000000000200, + BNXT_ULP_HDR_BIT_I_ETH = 0x0000000000000400, + BNXT_ULP_HDR_BIT_IO_VLAN = 0x0000000000000800, + BNXT_ULP_HDR_BIT_II_VLAN = 0x0000000000001000, + BNXT_ULP_HDR_BIT_I_IPV4 = 0x0000000000002000, + BNXT_ULP_HDR_BIT_I_IPV6 = 0x0000000000004000, + BNXT_ULP_HDR_BIT_I_TCP = 0x0000000000008000, + BNXT_ULP_HDR_BIT_I_UDP = 0x0000000000010000, + BNXT_ULP_HDR_BIT_I_ICMP = 0x0000000000020000, + BNXT_ULP_HDR_BIT_O_ECPRI = 0x0000000000040000, + BNXT_ULP_HDR_BIT_O_ROE = 0x0000000000080000, + BNXT_ULP_HDR_BIT_F1 = 0x0000000000100000, + BNXT_ULP_HDR_BIT_F2 = 0x0000000000200000, + BNXT_ULP_HDR_BIT_SVIF_IGNORE = 0x0000000000400000, + BNXT_ULP_HDR_BIT_SVIF = 0x0000000000800000, + BNXT_ULP_HDR_BIT_O_SRV6 = 0x0000000001000000, + BNXT_ULP_HDR_BIT_I_BTH = 0x0000000002000000, + BNXT_ULP_HDR_BIT_O_BTH = 0x0000000004000000, + BNXT_ULP_HDR_BIT_T_VXLAN_GPE = 0x0000000008000000, + BNXT_ULP_HDR_BIT_T_GENEVE = 0x0000000010000000, + BNXT_ULP_HDR_BIT_OO_UNTAGGED = 0x0000000020000000, + BNXT_ULP_HDR_BIT_OI_UNTAGGED = 0x0000000040000000, + BNXT_ULP_HDR_BIT_IO_UNTAGGED = 0x0000000080000000, + BNXT_ULP_HDR_BIT_II_UNTAGGED = 0x0000000100000000, + BNXT_ULP_HDR_BIT_NON_TUNNEL = 0x0000000200000000, + BNXT_ULP_HDR_BIT_L2_ONLY = 0x0000000400000000, + BNXT_ULP_HDR_BIT_O_L4_FLOW = 0x0000000800000000, + BNXT_ULP_HDR_BIT_I_L4_FLOW = 0x0000001000000000, + BNXT_ULP_HDR_BIT_NON_GENERIC = 0x0000002000000000, + BNXT_ULP_HDR_BIT_GENERIC = 0x0000004000000000, + BNXT_ULP_HDR_BIT_O_L2_FILTER = 0x0000008000000000, + BNXT_ULP_HDR_BIT_I_L2_FILTER = 0x0000010000000000, + BNXT_ULP_HDR_BIT_LAST = 0x0000020000000000 +}; + +enum bnxt_ulp_accept_opc { + BNXT_ULP_ACCEPT_OPC_ALWAYS = 0, + BNXT_ULP_ACCEPT_OPC_FLOW_SIG_ID_MATCH = 1, + BNXT_ULP_ACCEPT_OPC_LAST = 2 +}; + +enum bnxt_ulp_act_type { + BNXT_ULP_ACT_TYPE_NOT_SUPPORTED = 0, + BNXT_ULP_ACT_TYPE_SUPPORTED = 1, + BNXT_ULP_ACT_TYPE_END = 2, + BNXT_ULP_ACT_TYPE_LAST = 3 +}; + +enum bnxt_ulp_alloc_tbl_opc { + BNXT_ULP_ALLOC_TBL_OPC_NOP = 0, + BNXT_ULP_ALLOC_TBL_OPC_ALLOC = 1, + BNXT_ULP_ALLOC_TBL_OPC_LAST = 2 +}; + +enum bnxt_ulp_byte_order { + BNXT_ULP_BYTE_ORDER_BE = 0, + BNXT_ULP_BYTE_ORDER_LE = 1, + BNXT_ULP_BYTE_ORDER_LAST = 2 +}; + +enum bnxt_ulp_cf_idx { + BNXT_ULP_CF_IDX_NOT_USED = 0, + BNXT_ULP_CF_IDX_MPLS_TAG_NUM = 1, + BNXT_ULP_CF_IDX_O_VTAG_NUM = 2, + BNXT_ULP_CF_IDX_O_HAS_VTAG = 3, + BNXT_ULP_CF_IDX_O_ONE_VTAG = 4, + BNXT_ULP_CF_IDX_O_TWO_VTAGS = 5, + BNXT_ULP_CF_IDX_I_VTAG_NUM = 6, + BNXT_ULP_CF_IDX_I_HAS_VTAG = 7, + BNXT_ULP_CF_IDX_I_ONE_VTAG = 8, + BNXT_ULP_CF_IDX_I_TWO_VTAGS = 9, + BNXT_ULP_CF_IDX_INCOMING_IF = 10, + BNXT_ULP_CF_IDX_DIRECTION = 11, + BNXT_ULP_CF_IDX_SVIF_FLAG = 12, + BNXT_ULP_CF_IDX_O_L3 = 13, + BNXT_ULP_CF_IDX_I_L3 = 14, + BNXT_ULP_CF_IDX_O_L4 = 15, + BNXT_ULP_CF_IDX_I_L4 = 16, + BNXT_ULP_CF_IDX_O_L4_SRC_PORT = 17, + BNXT_ULP_CF_IDX_O_L4_DST_PORT = 18, + BNXT_ULP_CF_IDX_I_L4_SRC_PORT = 19, + BNXT_ULP_CF_IDX_I_L4_DST_PORT = 20, + BNXT_ULP_CF_IDX_O_L4_SRC_PORT_MASK = 21, + BNXT_ULP_CF_IDX_O_L4_DST_PORT_MASK = 22, + BNXT_ULP_CF_IDX_I_L4_SRC_PORT_MASK = 23, + BNXT_ULP_CF_IDX_I_L4_DST_PORT_MASK = 24, + BNXT_ULP_CF_IDX_O_L4_FB_SRC_PORT = 25, + BNXT_ULP_CF_IDX_O_L4_FB_DST_PORT = 26, + BNXT_ULP_CF_IDX_I_L4_FB_SRC_PORT = 27, + BNXT_ULP_CF_IDX_I_L4_FB_DST_PORT = 28, + BNXT_ULP_CF_IDX_O_L3_FB_PROTO_ID = 29, + BNXT_ULP_CF_IDX_I_L3_FB_PROTO_ID = 30, + BNXT_ULP_CF_IDX_O_L3_PROTO_ID = 31, + BNXT_ULP_CF_IDX_I_L3_PROTO_ID = 32, + BNXT_ULP_CF_IDX_O_L3_TTL = 33, + BNXT_ULP_CF_IDX_DEV_PORT_ID = 34, + BNXT_ULP_CF_IDX_DRV_FUNC_SVIF = 35, + BNXT_ULP_CF_IDX_DRV_FUNC_SPIF = 36, + BNXT_ULP_CF_IDX_DRV_FUNC_PARIF = 37, + BNXT_ULP_CF_IDX_DRV_FUNC_VNIC = 38, + BNXT_ULP_CF_IDX_DRV_FUNC_PHY_PORT = 39, + BNXT_ULP_CF_IDX_VF_FUNC_SVIF = 40, + BNXT_ULP_CF_IDX_VF_FUNC_SPIF = 41, + BNXT_ULP_CF_IDX_VF_FUNC_PARIF = 42, + BNXT_ULP_CF_IDX_VF_FUNC_VNIC = 43, + BNXT_ULP_CF_IDX_VNIC = 44, + BNXT_ULP_CF_IDX_PHY_PORT_SVIF = 45, + BNXT_ULP_CF_IDX_PHY_PORT_SPIF = 46, + BNXT_ULP_CF_IDX_PHY_PORT_PARIF = 47, + BNXT_ULP_CF_IDX_PHY_PORT_VPORT = 48, + BNXT_ULP_CF_IDX_ACT_ENCAP_IPV4_FLAG = 49, + BNXT_ULP_CF_IDX_ACT_ENCAP_IPV6_FLAG = 50, + BNXT_ULP_CF_IDX_ACT_DEC_TTL = 51, + BNXT_ULP_CF_IDX_ACT_T_DEC_TTL = 52, + BNXT_ULP_CF_IDX_ACT_PORT_IS_SET = 53, + BNXT_ULP_CF_IDX_ACT_PORT_TYPE = 54, + BNXT_ULP_CF_IDX_ACT_MIRR_PORT_IS_SET = 55, + BNXT_ULP_CF_IDX_ACT_MIRR_PORT_TYPE = 56, + BNXT_ULP_CF_IDX_MATCH_PORT_TYPE = 57, + BNXT_ULP_CF_IDX_MATCH_PORT_IS_VFREP = 58, + BNXT_ULP_CF_IDX_MATCH_PORT_IS_PF = 59, + BNXT_ULP_CF_IDX_VF_TO_VF = 60, + BNXT_ULP_CF_IDX_L3_HDR_CNT = 61, + BNXT_ULP_CF_IDX_L4_HDR_CNT = 62, + BNXT_ULP_CF_IDX_VFR_MODE = 63, + BNXT_ULP_CF_IDX_L3_TUN = 64, + BNXT_ULP_CF_IDX_L3_TUN_DECAP = 65, + BNXT_ULP_CF_IDX_FID = 66, + BNXT_ULP_CF_IDX_HDR_SIG_ID = 67, + BNXT_ULP_CF_IDX_FLOW_SIG_ID = 68, + BNXT_ULP_CF_IDX_WC_MATCH = 69, + BNXT_ULP_CF_IDX_WC_IS_HA_HIGH_REG = 70, + BNXT_ULP_CF_IDX_TUNNEL_ID = 71, + BNXT_ULP_CF_IDX_TUN_OFF_DIP_ID = 72, + BNXT_ULP_CF_IDX_TUN_OFF_DMAC_ID = 73, + BNXT_ULP_CF_IDX_OO_VLAN_FB_VID = 74, + BNXT_ULP_CF_IDX_OI_VLAN_FB_VID = 75, + BNXT_ULP_CF_IDX_IO_VLAN_FB_VID = 76, + BNXT_ULP_CF_IDX_II_VLAN_FB_VID = 77, + BNXT_ULP_CF_IDX_SOCKET_DIRECT = 78, + BNXT_ULP_CF_IDX_SOCKET_DIRECT_VPORT = 79, + BNXT_ULP_CF_IDX_TUNNEL_SPORT = 80, + BNXT_ULP_CF_IDX_VF_META_FID = 81, + BNXT_ULP_CF_IDX_DEV_ACT_PORT_ID = 82, + BNXT_ULP_CF_IDX_DEV_ACT_MIRR_PORT_ID = 83, + BNXT_ULP_CF_IDX_O_VLAN_NO_IGNORE = 84, + BNXT_ULP_CF_IDX_I_VLAN_NO_IGNORE = 85, + BNXT_ULP_CF_IDX_HA_SUPPORT_DISABLED = 86, + BNXT_ULP_CF_IDX_FUNCTION_ID = 87, + BNXT_ULP_CF_IDX_CHAIN_ID_METADATA = 88, + BNXT_ULP_CF_IDX_SRV6_UPAR_ID = 89, + BNXT_ULP_CF_IDX_SRV6_T_ID = 90, + BNXT_ULP_CF_IDX_GENERIC_SIZE = 91, + BNXT_ULP_CF_IDX_APP_PRIORITY = 92, + BNXT_ULP_CF_IDX_MIRROR_COPY_ING_OR_EGR = 93, + BNXT_ULP_CF_IDX_EM_FOR_IPV6 = 94, + BNXT_ULP_CF_IDX_VF_ROCE_EN = 95, + BNXT_ULP_CF_IDX_EM_FOR_TC = 96, + BNXT_ULP_CF_IDX_L2_CUSTOM_UPAR_ID = 97, + BNXT_ULP_CF_IDX_CUSTOM_GRE_EN = 98, + BNXT_ULP_CF_IDX_UPAR_HIGH_EN = 99, + BNXT_ULP_CF_IDX_MP_NPORTS = 100, + BNXT_ULP_CF_IDX_MP_PORT_A = 101, + BNXT_ULP_CF_IDX_MP_VNIC_A = 102, + BNXT_ULP_CF_IDX_MP_VPORT_A = 103, + BNXT_ULP_CF_IDX_MP_MDATA_A = 104, + BNXT_ULP_CF_IDX_MP_A_IS_VFREP = 105, + BNXT_ULP_CF_IDX_MP_PORT_B = 106, + BNXT_ULP_CF_IDX_MP_VNIC_B = 107, + BNXT_ULP_CF_IDX_MP_VPORT_B = 108, + BNXT_ULP_CF_IDX_MP_MDATA_B = 109, + BNXT_ULP_CF_IDX_MP_B_IS_VFREP = 110, + BNXT_ULP_CF_IDX_VXLAN_IP_UPAR_ID = 111, + BNXT_ULP_CF_IDX_ACT_REJ_COND_EN = 112, + BNXT_ULP_CF_IDX_HDR_BITMAP = 113, + BNXT_ULP_CF_IDX_PROFILE_BITMAP = 114, + BNXT_ULP_CF_IDX_DSCP_REMAP = 115, + BNXT_ULP_CF_IDX_UDCC_EN = 116, + BNXT_ULP_CF_IDX_L2_CNTXT_ID = 117, + BNXT_ULP_CF_IDX_PROF_FUNC_ID = 118, + BNXT_ULP_CF_IDX_GROUP_ID = 119, + BNXT_ULP_CF_IDX_LAST = 120 +}; + +enum bnxt_ulp_cond_list_opc { + BNXT_ULP_COND_LIST_OPC_TRUE = 0, + BNXT_ULP_COND_LIST_OPC_FALSE = 1, + BNXT_ULP_COND_LIST_OPC_OR = 2, + BNXT_ULP_COND_LIST_OPC_AND = 3, + BNXT_ULP_COND_LIST_OPC_LIST_OR = 4, + BNXT_ULP_COND_LIST_OPC_LIST_AND = 5, + BNXT_ULP_COND_LIST_OPC_LAST = 6 +}; + +enum bnxt_ulp_cond_opc { + BNXT_ULP_COND_OPC_CF_IS_SET = 0, + BNXT_ULP_COND_OPC_CF_NOT_SET = 1, + BNXT_ULP_COND_OPC_ACT_BIT_IS_SET = 2, + BNXT_ULP_COND_OPC_ACT_BIT_NOT_SET = 3, + BNXT_ULP_COND_OPC_HDR_BIT_IS_SET = 4, + BNXT_ULP_COND_OPC_HDR_BIT_NOT_SET = 5, + BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET = 6, + BNXT_ULP_COND_OPC_FIELD_BIT_NOT_SET = 7, + BNXT_ULP_COND_OPC_RF_IS_SET = 8, + BNXT_ULP_COND_OPC_RF_NOT_SET = 9, + BNXT_ULP_COND_OPC_FLOW_PAT_MATCH = 10, + BNXT_ULP_COND_OPC_ACT_PAT_MATCH = 11, + BNXT_ULP_COND_OPC_EXT_MEM_IS_SET = 12, + BNXT_ULP_COND_OPC_EXT_MEM_NOT_SET = 13, + BNXT_ULP_COND_OPC_ENC_HDR_BIT_IS_SET = 14, + BNXT_ULP_COND_OPC_ENC_HDR_BIT_NOT_SET = 15, + BNXT_ULP_COND_OPC_ACT_PROP_IS_SET = 16, + BNXT_ULP_COND_OPC_ACT_PROP_NOT_SET = 17, + BNXT_ULP_COND_OPC_CF_BIT_IS_SET = 18, + BNXT_ULP_COND_OPC_CF_BIT_NOT_SET = 19, + BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET = 20, + BNXT_ULP_COND_OPC_WC_FIELD_BIT_NOT_SET = 21, + BNXT_ULP_COND_OPC_EXCLUDE_FIELD_BIT_IS_SET = 22, + BNXT_ULP_COND_OPC_EXCLUDE_FIELD_BIT_NOT_SET = 23, + BNXT_ULP_COND_OPC_FEATURE_BIT_IS_SET = 24, + BNXT_ULP_COND_OPC_FEATURE_BIT_NOT_SET = 25, + BNXT_ULP_COND_OPC_LAST = 26 +}; + +enum bnxt_ulp_critical_resource { + BNXT_ULP_CRITICAL_RESOURCE_NO = 0, + BNXT_ULP_CRITICAL_RESOURCE_YES = 1, + BNXT_ULP_CRITICAL_RESOURCE_LAST = 2 +}; + +enum bnxt_ulp_device_id { + BNXT_ULP_DEVICE_ID_WH_PLUS = 0, + BNXT_ULP_DEVICE_ID_THOR = 1, + BNXT_ULP_DEVICE_ID_STINGRAY = 2, + BNXT_ULP_DEVICE_ID_STINGRAY2 = 3, + BNXT_ULP_DEVICE_ID_THOR2 = 4, + BNXT_ULP_DEVICE_ID_LAST = 5 +}; + +enum bnxt_ulp_df_param_type { + BNXT_ULP_DF_PARAM_TYPE_DEV_PORT_ID = 0, + BNXT_ULP_DF_PARAM_TYPE_LAST = 1 +}; + +enum bnxt_ulp_direction { + BNXT_ULP_DIRECTION_INGRESS = 0, + BNXT_ULP_DIRECTION_EGRESS = 1, + BNXT_ULP_DIRECTION_LAST = 2 +}; + +enum bnxt_ulp_em_tbl_opc { + BNXT_ULP_EM_TBL_OPC_NOT_USED = 0, + BNXT_ULP_EM_TBL_OPC_WR_REGFILE = 1, + BNXT_ULP_EM_TBL_OPC_LAST = 2 +}; + +enum bnxt_ulp_enc_field { + BNXT_ULP_ENC_FIELD_ETH_DMAC = 0, + BNXT_ULP_ENC_FIELD_ETH_SMAC = 1, + BNXT_ULP_ENC_FIELD_ETH_TYPE = 2, + BNXT_ULP_ENC_FIELD_O_VLAN_TCI = 3, + BNXT_ULP_ENC_FIELD_O_VLAN_TYPE = 4, + BNXT_ULP_ENC_FIELD_I_VLAN_TCI = 5, + BNXT_ULP_ENC_FIELD_I_VLAN_TYPE = 6, + BNXT_ULP_ENC_FIELD_IPV4_IHL = 7, + BNXT_ULP_ENC_FIELD_IPV4_TOS = 8, + BNXT_ULP_ENC_FIELD_IPV4_PKT_ID = 9, + BNXT_ULP_ENC_FIELD_IPV4_FRAG = 10, + BNXT_ULP_ENC_FIELD_IPV4_TTL = 11, + BNXT_ULP_ENC_FIELD_IPV4_PROTO = 12, + BNXT_ULP_ENC_FIELD_IPV4_SADDR = 13, + BNXT_ULP_ENC_FIELD_IPV4_DADDR = 14, + BNXT_ULP_ENC_FIELD_IPV6_VTC_FLOW = 15, + BNXT_ULP_ENC_FIELD_IPV6_PROTO = 16, + BNXT_ULP_ENC_FIELD_IPV6_TTL = 17, + BNXT_ULP_ENC_FIELD_IPV6_SADDR = 18, + BNXT_ULP_ENC_FIELD_IPV6_DADDR = 19, + BNXT_ULP_ENC_FIELD_UDP_SPORT = 20, + BNXT_ULP_ENC_FIELD_UDP_DPORT = 21, + BNXT_ULP_ENC_FIELD_VXLAN_FLAGS = 22, + BNXT_ULP_ENC_FIELD_VXLAN_RSVD0 = 23, + BNXT_ULP_ENC_FIELD_VXLAN_VNI = 24, + BNXT_ULP_ENC_FIELD_VXLAN_RSVD1 = 25, + BNXT_ULP_ENC_FIELD_SRV6_NEXT_HDR = 26, + BNXT_ULP_ENC_FIELD_SRV6_HDR_LEN = 27, + BNXT_ULP_ENC_FIELD_SRV6_ROUTING_TYPE = 28, + BNXT_ULP_ENC_FIELD_SRV6_SEG_LEFT = 29, + BNXT_ULP_ENC_FIELD_SRV6_LAST_ENTRY = 30, + BNXT_ULP_ENC_FIELD_SRV6_FLAGS = 31, + BNXT_ULP_ENC_FIELD_SRV6_TAG = 32, + BNXT_ULP_ENC_FIELD_SRV6_SEG_LIST0 = 33, + BNXT_ULP_ENC_FIELD_SRV6_SEG_LIST1 = 34, + BNXT_ULP_ENC_FIELD_SRV6_SEG_LIST2 = 35, + BNXT_ULP_ENC_FIELD_SRV6_SEG_LIST3 = 36, + BNXT_ULP_ENC_FIELD_GENERIC_SIZE = 37, + BNXT_ULP_ENC_FIELD_GENERIC_RSVD = 38, + BNXT_ULP_ENC_FIELD_VXLAN_GPE_FLAGS = 39, + BNXT_ULP_ENC_FIELD_VXLAN_GPE_RSVD0 = 40, + BNXT_ULP_ENC_FIELD_VXLAN_GPE_NEXT_PROTO = 41, + BNXT_ULP_ENC_FIELD_VXLAN_GPE_VNI = 42, + BNXT_ULP_ENC_FIELD_VXLAN_GPE_RSVD1 = 43, + BNXT_ULP_ENC_FIELD_GENEVE_VER_OPT_LEN_O_C_RSVD0 = 44, + BNXT_ULP_ENC_FIELD_GENEVE_PROTO_TYPE = 45, + BNXT_ULP_ENC_FIELD_GENEVE_VNI = 46, + BNXT_ULP_ENC_FIELD_GENEVE_RSVD1 = 47, + BNXT_ULP_ENC_FIELD_GENEVE_OPT_W0 = 48, + BNXT_ULP_ENC_FIELD_GENEVE_OPT_W1 = 49, + BNXT_ULP_ENC_FIELD_GENEVE_OPT_W2 = 50, + BNXT_ULP_ENC_FIELD_GENEVE_OPT_W3 = 51, + BNXT_ULP_ENC_FIELD_GENEVE_OPT_W4 = 52, + BNXT_ULP_ENC_FIELD_GENEVE_OPT_W5 = 53, + BNXT_ULP_ENC_FIELD_LAST = 54 +}; + +enum bnxt_ulp_fdb_opc { + BNXT_ULP_FDB_OPC_PUSH_FID = 0, + BNXT_ULP_FDB_OPC_PUSH_RID_REGFILE = 1, + BNXT_ULP_FDB_OPC_ALLOC_RID_REGFILE = 2, + BNXT_ULP_FDB_OPC_DELETE_RID_REGFILE = 3, + BNXT_ULP_FDB_OPC_CLEAR_RID_REGFILE = 4, + BNXT_ULP_FDB_OPC_NOP = 5, + BNXT_ULP_FDB_OPC_PUSH_FID_SW_ONLY = 6, + BNXT_ULP_FDB_OPC_LAST = 7 +}; + +enum bnxt_ulp_fdb_type { + BNXT_ULP_FDB_TYPE_REGULAR = 0, + BNXT_ULP_FDB_TYPE_DEFAULT = 1, + BNXT_ULP_FDB_TYPE_RID = 2, + BNXT_ULP_FDB_TYPE_LAST = 3 +}; + +enum bnxt_ulp_field_opc { + BNXT_ULP_FIELD_OPC_SRC1 = 0, + BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3 = 1, + BNXT_ULP_FIELD_OPC_SRC1_PLUS_SRC2 = 2, + BNXT_ULP_FIELD_OPC_SRC1_MINUS_SRC2 = 3, + BNXT_ULP_FIELD_OPC_SRC1_PLUS_SRC2_POST = 4, + BNXT_ULP_FIELD_OPC_SRC1_MINUS_SRC2_POST = 5, + BNXT_ULP_FIELD_OPC_SRC1_OR_SRC2 = 6, + BNXT_ULP_FIELD_OPC_SRC1_OR_SRC2_OR_SRC3 = 7, + BNXT_ULP_FIELD_OPC_SRC1_AND_SRC2 = 8, + BNXT_ULP_FIELD_OPC_SRC1_AND_SRC2_OR_SRC3 = 9, + BNXT_ULP_FIELD_OPC_SKIP = 10, + BNXT_ULP_FIELD_OPC_TERNARY_LIST = 11, + BNXT_ULP_FIELD_OPC_LAST = 12 +}; + +enum bnxt_ulp_field_src { + BNXT_ULP_FIELD_SRC_ZERO = 0, + BNXT_ULP_FIELD_SRC_ONES = 1, + BNXT_ULP_FIELD_SRC_CONST = 2, + BNXT_ULP_FIELD_SRC_CF = 3, + BNXT_ULP_FIELD_SRC_RF = 4, + BNXT_ULP_FIELD_SRC_ACT_PROP = 5, + BNXT_ULP_FIELD_SRC_ACT_PROP_SZ = 6, + BNXT_ULP_FIELD_SRC_GLB_RF = 7, + BNXT_ULP_FIELD_SRC_HF = 8, + BNXT_ULP_FIELD_SRC_SUB_HF = 9, + BNXT_ULP_FIELD_SRC_HDR_BIT = 10, + BNXT_ULP_FIELD_SRC_ACT_BIT = 11, + BNXT_ULP_FIELD_SRC_FIELD_BIT = 12, + BNXT_ULP_FIELD_SRC_SKIP = 13, + BNXT_ULP_FIELD_SRC_REJECT = 14, + BNXT_ULP_FIELD_SRC_PORT_TABLE = 15, + BNXT_ULP_FIELD_SRC_ENC_HDR_BIT = 16, + BNXT_ULP_FIELD_SRC_ENC_FIELD = 17, + BNXT_ULP_FIELD_SRC_LIST_AND = 18, + BNXT_ULP_FIELD_SRC_LIST_OR = 19, + BNXT_ULP_FIELD_SRC_NEXT = 20, + BNXT_ULP_FIELD_SRC_LAST = 21 +}; + +enum bnxt_ulp_func_opc { + BNXT_ULP_FUNC_OPC_NOP = 0, + BNXT_ULP_FUNC_OPC_EQ = 1, + BNXT_ULP_FUNC_OPC_NE = 2, + BNXT_ULP_FUNC_OPC_GT = 3, + BNXT_ULP_FUNC_OPC_GE = 4, + BNXT_ULP_FUNC_OPC_LT = 5, + BNXT_ULP_FUNC_OPC_LE = 6, + BNXT_ULP_FUNC_OPC_COPY_SRC1_TO_RF = 7, + BNXT_ULP_FUNC_OPC_RSS_CONFIG = 8, + BNXT_ULP_FUNC_OPC_GET_PARENT_MAC_ADDR = 9, + BNXT_ULP_FUNC_OPC_ALLOC_L2_CTX_ID = 10, + BNXT_ULP_FUNC_OPC_TUNNEL_DST_PORT_ALLOC = 11, + BNXT_ULP_FUNC_OPC_TUNNEL_DST_PORT_FREE = 12, + BNXT_ULP_FUNC_OPC_HANDLE_TO_OFFSET = 13, + BNXT_ULP_FUNC_OPC_VFR_MARK_SET = 14, + BNXT_ULP_FUNC_OPC_BD_ACT_SET = 15, + BNXT_ULP_FUNC_OPC_LEFT_SHIFT = 16, + BNXT_ULP_FUNC_OPC_RIGHT_SHIFT = 17, + BNXT_ULP_FUNC_OPC_BIT_OR = 18, + BNXT_ULP_FUNC_OPC_BIT_AND = 19, + BNXT_ULP_FUNC_OPC_BIT_XOR = 20, + BNXT_ULP_FUNC_OPC_LOG_AND = 21, + BNXT_ULP_FUNC_OPC_LOG_OR = 22, + BNXT_ULP_FUNC_OPC_NOT_NOT = 23, + BNXT_ULP_FUNC_OPC_ADD = 24, + BNXT_ULP_FUNC_OPC_SUB = 25, + BNXT_ULP_FUNC_OPC_COND_LIST = 26, + BNXT_ULP_FUNC_OPC_LAST = 27 +}; + +enum bnxt_ulp_func_src { + BNXT_ULP_FUNC_SRC_REGFILE = 0, + BNXT_ULP_FUNC_SRC_GLB_REGFILE = 1, + BNXT_ULP_FUNC_SRC_COMP_FIELD = 2, + BNXT_ULP_FUNC_SRC_CONST = 3, + BNXT_ULP_FUNC_SRC_ACTION_BITMAP = 4, + BNXT_ULP_FUNC_SRC_HEADER_BITMAP = 5, + BNXT_ULP_FUNC_SRC_KEY_EXT_LIST = 6, + BNXT_ULP_FUNC_SRC_LAST = 7 +}; + +enum bnxt_ulp_gen_tbl_type { + BNXT_ULP_GEN_TBL_TYPE_KEY_LIST = 0, + BNXT_ULP_GEN_TBL_TYPE_HASH_LIST = 1, + BNXT_ULP_GEN_TBL_TYPE_SIMPLE_LIST = 2, + BNXT_ULP_GEN_TBL_TYPE_LAST = 3 +}; + +enum bnxt_ulp_generic_tbl_lkup_type { + BNXT_ULP_GENERIC_TBL_LKUP_TYPE_INDEX = 0, + BNXT_ULP_GENERIC_TBL_LKUP_TYPE_HASH = 1, + BNXT_ULP_GENERIC_TBL_LKUP_TYPE_SEQ = 2, + BNXT_ULP_GENERIC_TBL_LKUP_TYPE_LAST = 3 +}; + +enum bnxt_ulp_generic_tbl_opc { + BNXT_ULP_GENERIC_TBL_OPC_NOT_USED = 0, + BNXT_ULP_GENERIC_TBL_OPC_READ = 1, + BNXT_ULP_GENERIC_TBL_OPC_WRITE = 2, + BNXT_ULP_GENERIC_TBL_OPC_LAST = 3 +}; + +enum bnxt_ulp_glb_rf_idx { + BNXT_ULP_GLB_RF_IDX_NOT_USED = 0, + BNXT_ULP_GLB_RF_IDX_GLB_PROF_FUNC_ID = 1, + BNXT_ULP_GLB_RF_IDX_GLB_PROF_FUNC_ID_0 = 2, + BNXT_ULP_GLB_RF_IDX_GLB_PROF_FUNC_ID_1 = 3, + BNXT_ULP_GLB_RF_IDX_GLB_PROF_FUNC_ID_2 = 4, + BNXT_ULP_GLB_RF_IDX_GLB_LB_AREC_PTR = 5, + BNXT_ULP_GLB_RF_IDX_L2_PROF_FUNC_ID = 6, + BNXT_ULP_GLB_RF_IDX_GLB_L2_CNTXT_ID_0 = 7, + BNXT_ULP_GLB_RF_IDX_GLB_L2_CNTXT_ID_1 = 8, + BNXT_ULP_GLB_RF_IDX_VXLAN_PROF_FUNC_ID = 9, + BNXT_ULP_GLB_RF_IDX_VF_2_VFR_PROF_FUNC_ID = 10, + BNXT_ULP_GLB_RF_IDX_ANY_2_VF_PROF_FUNC_ID = 11, + BNXT_ULP_GLB_RF_IDX_GLB_MODIFY_PTR = 12, + BNXT_ULP_GLB_RF_IDX_GLB_DROP_AREC_PTR = 13, + BNXT_ULP_GLB_RF_IDX_ENCAP_MAC_PTR = 14, + BNXT_ULP_GLB_RF_IDX_GLB_WC_PROFILE_ID_0 = 15, + BNXT_ULP_GLB_RF_IDX_GLB_WC_PROFILE_ID_1 = 16, + BNXT_ULP_GLB_RF_IDX_GLB_WC_PROFILE_ID_2 = 17, + BNXT_ULP_GLB_RF_IDX_GLB_WC_PROFILE_ID_3 = 18, + BNXT_ULP_GLB_RF_IDX_GLB_WC_PROFILE_ID_4 = 19, + BNXT_ULP_GLB_RF_IDX_GLB_WC_PROFILE_ID_5 = 20, + BNXT_ULP_GLB_RF_IDX_GLB_WC_PROFILE_ID_6 = 21, + BNXT_ULP_GLB_RF_IDX_GLB_WC_PROFILE_ID_7 = 22, + BNXT_ULP_GLB_RF_IDX_GLB_WC_PROFILE_ID_8 = 23, + BNXT_ULP_GLB_RF_IDX_GLB_WC_PROFILE_ID_9 = 24, + BNXT_ULP_GLB_RF_IDX_GLB_WC_KEY_ID_0 = 25, + BNXT_ULP_GLB_RF_IDX_GLB_WC_KEY_ID_1 = 26, + BNXT_ULP_GLB_RF_IDX_GLB_WC_KEY_ID_2 = 27, + BNXT_ULP_GLB_RF_IDX_GLB_WC_KEY_ID_3 = 28, + BNXT_ULP_GLB_RF_IDX_GLB_WC_KEY_ID_4 = 29, + BNXT_ULP_GLB_RF_IDX_GLB_WC_KEY_ID_5 = 30, + BNXT_ULP_GLB_RF_IDX_GLB_WC_KEY_ID_6 = 31, + BNXT_ULP_GLB_RF_IDX_GLB_WC_KEY_ID_7 = 32, + BNXT_ULP_GLB_RF_IDX_GLB_WC_KEY_ID_8 = 33, + BNXT_ULP_GLB_RF_IDX_GLB_EM_PROFILE_ID_0 = 34, + BNXT_ULP_GLB_RF_IDX_GLB_EM_PROFILE_ID_1 = 35, + BNXT_ULP_GLB_RF_IDX_GLB_EM_PROFILE_ID_2 = 36, + BNXT_ULP_GLB_RF_IDX_GLB_EM_PROFILE_ID_3 = 37, + BNXT_ULP_GLB_RF_IDX_GLB_EM_PROFILE_ID_4 = 38, + BNXT_ULP_GLB_RF_IDX_GLB_EM_PROFILE_ID_5 = 39, + BNXT_ULP_GLB_RF_IDX_GLB_EM_PROFILE_ID_6 = 40, + BNXT_ULP_GLB_RF_IDX_GLB_EM_PROFILE_ID_7 = 41, + BNXT_ULP_GLB_RF_IDX_GLB_EM_PROFILE_ID_8 = 42, + BNXT_ULP_GLB_RF_IDX_GLB_EM_PROFILE_ID_9 = 43, + BNXT_ULP_GLB_RF_IDX_GLB_EM_KEY_ID_0 = 44, + BNXT_ULP_GLB_RF_IDX_GLB_EM_KEY_ID_1 = 45, + BNXT_ULP_GLB_RF_IDX_GLB_EM_KEY_ID_2 = 46, + BNXT_ULP_GLB_RF_IDX_GLB_EM_KEY_ID_3 = 47, + BNXT_ULP_GLB_RF_IDX_GLB_EM_KEY_ID_4 = 48, + BNXT_ULP_GLB_RF_IDX_GLB_EM_KEY_ID_5 = 49, + BNXT_ULP_GLB_RF_IDX_GLB_METADATA_PROF_0 = 50, + BNXT_ULP_GLB_RF_IDX_GLB_METADATA_PROF_1 = 51, + BNXT_ULP_GLB_RF_IDX_GLB_METADATA_RX_PROF_0 = 52, + BNXT_ULP_GLB_RF_IDX_GLB_METADATA_RX_ACT_0 = 53, + BNXT_ULP_GLB_RF_IDX_GLB_METADATA_RX_LKUP_0 = 54, + BNXT_ULP_GLB_RF_IDX_GLB_METADATA_TX_PROF_0 = 55, + BNXT_ULP_GLB_RF_IDX_GLB_METADATA_TX_ACT_0 = 56, + BNXT_ULP_GLB_RF_IDX_GLB_METADATA_TX_LKUP_0 = 57, + BNXT_ULP_GLB_RF_IDX_GLB_VFR_EM_KEY_ID_0 = 58, + BNXT_ULP_GLB_RF_IDX_GLB_VFR_EM_KEY_ID_1 = 59, + BNXT_ULP_GLB_RF_IDX_GLB_VFR_EM_PROF_ID_0 = 60, + BNXT_ULP_GLB_RF_IDX_GLB_VFR_EM_PROF_ID_1 = 61, + BNXT_ULP_GLB_RF_IDX_APP_GLB_PROF_FUNC_ID_0 = 62, + BNXT_ULP_GLB_RF_IDX_APP_GLB_PROF_FUNC_ID_1 = 63, + BNXT_ULP_GLB_RF_IDX_APP_GLB_PROF_FUNC_ID_2 = 64, + BNXT_ULP_GLB_RF_IDX_APP_GLB_PROF_FUNC_ID_3 = 65, + BNXT_ULP_GLB_RF_IDX_APP_GLB_PROF_FUNC_ID_4 = 66, + BNXT_ULP_GLB_RF_IDX_APP_GLB_L2_CNTXT_ID_0 = 67, + BNXT_ULP_GLB_RF_IDX_APP_GLB_L2_CNTXT_ID_1 = 68, + BNXT_ULP_GLB_RF_IDX_APP_GLB_L2_CNTXT_ID_2 = 69, + BNXT_ULP_GLB_RF_IDX_APP_GLB_L2_CNTXT_ID_3 = 70, + BNXT_ULP_GLB_RF_IDX_APP_GLB_EM_PROFILE_ID_0 = 71, + BNXT_ULP_GLB_RF_IDX_APP_GLB_EM_PROFILE_ID_1 = 72, + BNXT_ULP_GLB_RF_IDX_APP_GLB_WC_PROFILE_ID_0 = 73, + BNXT_ULP_GLB_RF_IDX_APP_GLB_WC_PROFILE_ID_1 = 74, + BNXT_ULP_GLB_RF_IDX_APP_GLB_WC_PROFILE_ID_2 = 75, + BNXT_ULP_GLB_RF_IDX_APP_GLB_WC_PROFILE_ID_3 = 76, + BNXT_ULP_GLB_RF_IDX_APP_GLB_WC_PROFILE_ID_4 = 77, + BNXT_ULP_GLB_RF_IDX_APP_GLB_WC_PROFILE_ID_5 = 78, + BNXT_ULP_GLB_RF_IDX_APP_GLB_WC_PROFILE_ID_6 = 79, + BNXT_ULP_GLB_RF_IDX_APP_GLB_WC_PROFILE_ID_7 = 80, + BNXT_ULP_GLB_RF_IDX_APP_GLB_WC_PROFILE_ID_8 = 81, + BNXT_ULP_GLB_RF_IDX_APP_GLB_WC_PROFILE_ID_9 = 82, + BNXT_ULP_GLB_RF_IDX_APP_GLB_WC_PROFILE_ID_10 = 83, + BNXT_ULP_GLB_RF_IDX_APP_GLB_EM_KEY_ID_0 = 84, + BNXT_ULP_GLB_RF_IDX_APP_GLB_EM_KEY_ID_1 = 85, + BNXT_ULP_GLB_RF_IDX_APP_GLB_WC_KEY_ID_0 = 86, + BNXT_ULP_GLB_RF_IDX_APP_GLB_WC_KEY_ID_1 = 87, + BNXT_ULP_GLB_RF_IDX_APP_GLB_WC_KEY_ID_2 = 88, + BNXT_ULP_GLB_RF_IDX_APP_GLB_WC_KEY_ID_3 = 89, + BNXT_ULP_GLB_RF_IDX_APP_GLB_WC_KEY_ID_4 = 90, + BNXT_ULP_GLB_RF_IDX_APP_GLB_WC_KEY_ID_5 = 91, + BNXT_ULP_GLB_RF_IDX_APP_GLB_AREC_PTR_0 = 92, + BNXT_ULP_GLB_RF_IDX_APP_GLB_AREC_PTR_1 = 93, + BNXT_ULP_GLB_RF_IDX_GRE_PROF_FUNC_ID = 94, + BNXT_ULP_GLB_RF_IDX_RECYCLE_PROF_FUNC_ID = 95, + BNXT_ULP_GLB_RF_IDX_GLB_ECPRI_UPAR_ID = 96, + BNXT_ULP_GLB_RF_IDX_GLB_ECPRI_PROF_FUNC_ID = 97, + BNXT_ULP_GLB_RF_IDX_LAST = 98 +}; + +enum bnxt_ulp_global_register_tbl_opc { + BNXT_ULP_GLOBAL_REGISTER_TBL_OPC_NOT_USED = 0, + BNXT_ULP_GLOBAL_REGISTER_TBL_OPC_WR_REGFILE = 1, + BNXT_ULP_GLOBAL_REGISTER_TBL_OPC_LAST = 2 +}; + +enum bnxt_ulp_hdr_type { + BNXT_ULP_HDR_TYPE_NOT_SUPPORTED = 0, + BNXT_ULP_HDR_TYPE_SUPPORTED = 1, + BNXT_ULP_HDR_TYPE_END = 2, + BNXT_ULP_HDR_TYPE_LAST = 3 +}; + +enum bnxt_ulp_if_tbl_opc { + BNXT_ULP_IF_TBL_OPC_NOT_USED = 0, + BNXT_ULP_IF_TBL_OPC_WR_COMP_FIELD = 1, + BNXT_ULP_IF_TBL_OPC_WR_REGFILE = 2, + BNXT_ULP_IF_TBL_OPC_WR_CONST = 3, + BNXT_ULP_IF_TBL_OPC_RD_COMP_FIELD = 4, + BNXT_ULP_IF_TBL_OPC_LAST = 5 +}; + +enum bnxt_ulp_index_tbl_opc { + BNXT_ULP_INDEX_TBL_OPC_NOT_USED = 0, + BNXT_ULP_INDEX_TBL_OPC_ALLOC_REGFILE = 1, + BNXT_ULP_INDEX_TBL_OPC_WR_REGFILE = 2, + BNXT_ULP_INDEX_TBL_OPC_ALLOC_WR_REGFILE = 3, + BNXT_ULP_INDEX_TBL_OPC_RD_REGFILE = 4, + BNXT_ULP_INDEX_TBL_OPC_ALLOC_WR_GLB_REGFILE = 5, + BNXT_ULP_INDEX_TBL_OPC_WR_GLB_REGFILE = 6, + BNXT_ULP_INDEX_TBL_OPC_UPDATE_REGFILE = 7, + BNXT_ULP_INDEX_TBL_OPC_NOP_REGFILE = 8, + BNXT_ULP_INDEX_TBL_OPC_LAST = 9 +}; + +enum bnxt_ulp_key_recipe_opc { + BNXT_ULP_KEY_RECIPE_OPC_NOP = 0, + BNXT_ULP_KEY_RECIPE_OPC_DYN_KEY = 1, + BNXT_ULP_KEY_RECIPE_OPC_LAST = 2 +}; + +enum bnxt_ulp_key_recipe_tbl_opc { + BNXT_ULP_KEY_RECIPE_TBL_OPC_NOT_USED = 0, + BNXT_ULP_KEY_RECIPE_TBL_OPC_ALLOC_REGFILE = 1, + BNXT_ULP_KEY_RECIPE_TBL_OPC_ALLOC_WR_REGFILE = 2, + BNXT_ULP_KEY_RECIPE_TBL_OPC_WR_REGFILE = 3, + BNXT_ULP_KEY_RECIPE_TBL_OPC_LAST = 4 +}; + +enum bnxt_ulp_mark_db_opc { + BNXT_ULP_MARK_DB_OPC_NOP = 0, + BNXT_ULP_MARK_DB_OPC_PUSH_IF_MARK_ACTION = 1, + BNXT_ULP_MARK_DB_OPC_PUSH_AND_SET_VFR_FLAG = 2, + BNXT_ULP_MARK_DB_OPC_LAST = 3 +}; + +enum bnxt_ulp_match_type { + BNXT_ULP_MATCH_TYPE_EM = 0, + BNXT_ULP_MATCH_TYPE_WM = 1, + BNXT_ULP_MATCH_TYPE_LAST = 2 +}; + +enum bnxt_ulp_port_table { + BNXT_ULP_PORT_TABLE_DRV_FUNC_PARENT_MAC = 0, + BNXT_ULP_PORT_TABLE_DRV_FUNC_PARENT_VNIC = 1, + BNXT_ULP_PORT_TABLE_DRV_FUNC_SVIF = 2, + BNXT_ULP_PORT_TABLE_DRV_FUNC_SPIF = 3, + BNXT_ULP_PORT_TABLE_DRV_FUNC_PARIF = 4, + BNXT_ULP_PORT_TABLE_DRV_FUNC_VNIC = 5, + BNXT_ULP_PORT_TABLE_DRV_FUNC_PHY_PORT = 6, + BNXT_ULP_PORT_TABLE_DRV_FUNC_MAC = 7, + BNXT_ULP_PORT_TABLE_VF_FUNC_SVIF = 8, + BNXT_ULP_PORT_TABLE_VF_FUNC_SPIF = 9, + BNXT_ULP_PORT_TABLE_VF_FUNC_PARIF = 10, + BNXT_ULP_PORT_TABLE_VF_FUNC_VNIC = 11, + BNXT_ULP_PORT_TABLE_VF_FUNC_MAC = 12, + BNXT_ULP_PORT_TABLE_PHY_PORT_SVIF = 13, + BNXT_ULP_PORT_TABLE_PHY_PORT_SPIF = 14, + BNXT_ULP_PORT_TABLE_PHY_PORT_PARIF = 15, + BNXT_ULP_PORT_TABLE_PHY_PORT_VPORT = 16, + BNXT_ULP_PORT_TABLE_PORT_IS_PF = 17, + BNXT_ULP_PORT_TABLE_VF_FUNC_METADATA = 18, + BNXT_ULP_PORT_TABLE_VF_FUNC_FID = 19, + BNXT_ULP_PORT_TABLE_TABLE_SCOPE = 20, + BNXT_ULP_PORT_TABLE_DRV_FUNC_ROCE_VNIC = 21, + BNXT_ULP_PORT_TABLE_LAST = 22 +}; + +enum bnxt_ulp_pri_opc { + BNXT_ULP_PRI_OPC_NOT_USED = 0, + BNXT_ULP_PRI_OPC_CONST = 1, + BNXT_ULP_PRI_OPC_APP_PRI = 2, + BNXT_ULP_PRI_OPC_APP_PRI_OR_CONST = 3, + BNXT_ULP_PRI_OPC_REGFILE = 4, + BNXT_ULP_PRI_OPC_COMP_FIELD = 5, + BNXT_ULP_PRI_OPC_LAST = 6 +}; + +enum bnxt_ulp_ref_cnt_opc { + BNXT_ULP_REF_CNT_OPC_DEFAULT = 0, + BNXT_ULP_REF_CNT_OPC_NOP = 1, + BNXT_ULP_REF_CNT_OPC_DEC = 2, + BNXT_ULP_REF_CNT_OPC_INC = 3, + BNXT_ULP_REF_CNT_OPC_LAST = 4 +}; + +enum bnxt_ulp_rf_idx { + BNXT_ULP_RF_IDX_NOT_USED = 0, + BNXT_ULP_RF_IDX_L2_CNTXT_ID_0 = 1, + BNXT_ULP_RF_IDX_L2_CNTXT_ID_1 = 2, + BNXT_ULP_RF_IDX_PROF_FUNC_ID_0 = 3, + BNXT_ULP_RF_IDX_PROF_FUNC_ID_1 = 4, + BNXT_ULP_RF_IDX_EM_PROFILE_ID_0 = 5, + BNXT_ULP_RF_IDX_EM_PROFILE_ID_1 = 6, + BNXT_ULP_RF_IDX_WC_PROFILE_ID_0 = 7, + BNXT_ULP_RF_IDX_WC_PROFILE_ID_1 = 8, + BNXT_ULP_RF_IDX_META_ACTION_PTR = 9, + BNXT_ULP_RF_IDX_MIRR_ACTION_PTR = 10, + BNXT_ULP_RF_IDX_MAIN_ACTION_PTR = 11, + BNXT_ULP_RF_IDX_ACTION_PTR_0 = 12, + BNXT_ULP_RF_IDX_ENCAP_PTR_0 = 13, + BNXT_ULP_RF_IDX_ENCAP_PTR_1 = 14, + BNXT_ULP_RF_IDX_MIRR_ENCAP_PTR_0 = 15, + BNXT_ULP_RF_IDX_CRITICAL_RESOURCE = 16, + BNXT_ULP_RF_IDX_FLOW_CNTR_PTR_0 = 17, + BNXT_ULP_RF_IDX_FLOW_CNTR_PTR_1 = 18, + BNXT_ULP_RF_IDX_MIRR_FLOW_CNTR_PTR_0 = 19, + BNXT_ULP_RF_IDX_MAIN_SP_PTR = 20, + BNXT_ULP_RF_IDX_MODIFY_IPV4_SRC_PTR_0 = 21, + BNXT_ULP_RF_IDX_MODIFY_IPV4_DST_PTR_0 = 22, + BNXT_ULP_RF_IDX_ACTION_REC_SIZE = 23, + BNXT_ULP_RF_IDX_L2_CNTXT_TCAM_INDEX_0 = 24, + BNXT_ULP_RF_IDX_L2_CNTXT_TCAM_INDEX_1 = 25, + BNXT_ULP_RF_IDX_PROFILE_TCAM_INDEX_0 = 26, + BNXT_ULP_RF_IDX_PROFILE_TCAM_INDEX_1 = 27, + BNXT_ULP_RF_IDX_WC_TCAM_INDEX_0 = 28, + BNXT_ULP_RF_IDX_WC_TCAM_INDEX_1 = 29, + BNXT_ULP_RF_IDX_SRC_PROPERTY_PTR = 30, + BNXT_ULP_RF_IDX_GENERIC_TBL_MISS = 31, + BNXT_ULP_RF_IDX_MIRROR_PTR_0 = 32, + BNXT_ULP_RF_IDX_MIRROR_ID_0 = 33, + BNXT_ULP_RF_IDX_HDR_SIG_ID = 34, + BNXT_ULP_RF_IDX_FLOW_SIG_ID = 35, + BNXT_ULP_RF_IDX_RID = 36, + BNXT_ULP_RF_IDX_WC_KEY_ID_0 = 37, + BNXT_ULP_RF_IDX_EM_KEY_ID_0 = 38, + BNXT_ULP_RF_IDX_DRV_FUNC_MAC = 39, + BNXT_ULP_RF_IDX_DRV_FUNC_PARENT_MAC = 40, + BNXT_ULP_RF_IDX_DEFAULT_AREC_PTR = 41, + BNXT_ULP_RF_IDX_CC = 42, + BNXT_ULP_RF_IDX_CF_FLOW_SIG_ID = 43, + BNXT_ULP_RF_IDX_PHY_PORT = 44, + BNXT_ULP_RF_IDX_METADATA_PROF = 45, + BNXT_ULP_RF_IDX_MIRR_MODIFY_PTR = 46, + BNXT_ULP_RF_IDX_MODIFY_PTR = 47, + BNXT_ULP_RF_IDX_SOCK_DIR_SVIF = 48, + BNXT_ULP_RF_IDX_SOCK_DIR_PARIF = 49, + BNXT_ULP_RF_IDX_SOCK_DIR_ACT_PTR = 50, + BNXT_ULP_RF_IDX_SOCK_DIR_PARENT_MAC = 51, + BNXT_ULP_RF_IDX_RSS_VNIC = 52, + BNXT_ULP_RF_IDX_PORT_IS_PF = 53, + BNXT_ULP_RF_IDX_METER_PROFILE_PTR_0 = 54, + BNXT_ULP_RF_IDX_METER_PTR_0 = 55, + BNXT_ULP_RF_IDX_REF_CNT = 56, + BNXT_ULP_RF_IDX_RF_0 = 57, + BNXT_ULP_RF_IDX_RF_1 = 58, + BNXT_ULP_RF_IDX_RF_2 = 59, + BNXT_ULP_RF_IDX_RF_3 = 60, + BNXT_ULP_RF_IDX_RF_4 = 61, + BNXT_ULP_RF_IDX_RF_5 = 62, + BNXT_ULP_RF_IDX_RF_6 = 63, + BNXT_ULP_RF_IDX_RF_7 = 64, + BNXT_ULP_RF_IDX_CMM_ACT_HNDL = 65, + BNXT_ULP_RF_IDX_CMM_STAT_HNDL = 66, + BNXT_ULP_RF_IDX_CMM_MOD_HNDL = 67, + BNXT_ULP_RF_IDX_CMM_ENC_HNDL = 68, + BNXT_ULP_RF_IDX_CMM_SRP_HNDL = 69, + BNXT_ULP_RF_IDX_VF_FUNC_METADATA = 70, + BNXT_ULP_RF_IDX_CHAIN_ID_METADATA = 71, + BNXT_ULP_RF_IDX_RECYCLE_CNT = 72, + BNXT_ULP_RF_IDX_DEST_VNIC = 73, + BNXT_ULP_RF_IDX_DEST_VPORT = 74, + BNXT_ULP_RF_IDX_DEST_METADATA = 75, + BNXT_ULP_RF_IDX_PROF_TCAM_PRI = 76, + BNXT_ULP_RF_IDX_EM_INSERT_FAIL = 77, + BNXT_ULP_RF_IDX_PROF_TCAM_PRIORITY = 78, + BNXT_ULP_RF_IDX_TERM_FLOW = 79, + BNXT_ULP_RF_IDX_O_DMAC = 80, + BNXT_ULP_RF_IDX_FLOW_CNTR_PTR_F1 = 81, + BNXT_ULP_RF_IDX_CMM_STAT_HNDL_F1 = 82, + BNXT_ULP_RF_IDX_WC_RECIPE_ID = 83, + BNXT_ULP_RF_IDX_EM_RECIPE_ID = 84, + BNXT_ULP_RF_IDX_DEFAULT_AREC_PTR_ROCE = 85, + BNXT_ULP_RF_IDX_JUMP_META_IDX = 86, + BNXT_ULP_RF_IDX_JUMP_META = 87, + BNXT_ULP_RF_IDX_TUNNEL_PORT = 88, + BNXT_ULP_RF_IDX_CF_0 = 89, + BNXT_ULP_RF_IDX_PM_0 = 90, + BNXT_ULP_RF_IDX_RFC2698_0 = 91, + BNXT_ULP_RF_IDX_CBSM_0 = 92, + BNXT_ULP_RF_IDX_EBSM_0 = 93, + BNXT_ULP_RF_IDX_CBND_0 = 94, + BNXT_ULP_RF_IDX_EBND_0 = 95, + BNXT_ULP_RF_IDX_CBS_0 = 96, + BNXT_ULP_RF_IDX_EBS_0 = 97, + BNXT_ULP_RF_IDX_CIR_0 = 98, + BNXT_ULP_RF_IDX_EIR_0 = 99, + BNXT_ULP_RF_IDX_OUTER_LOOP = 100, + BNXT_ULP_RF_IDX_INNER_LOOP = 101, + BNXT_ULP_RF_IDX_OUTER_ADD = 102, + BNXT_ULP_RF_IDX_WC_TCAM_PRIORITY = 103, + BNXT_ULP_RF_IDX_LAST = 104 +}; + +enum bnxt_ulp_tcam_tbl_opc { + BNXT_ULP_TCAM_TBL_OPC_NOT_USED = 0, + BNXT_ULP_TCAM_TBL_OPC_ALLOC_WR_REGFILE = 1, + BNXT_ULP_TCAM_TBL_OPC_SRCH_ALLOC_WR_REGFILE = 2, + BNXT_ULP_TCAM_TBL_OPC_ALLOC_REGFILE = 3, + BNXT_ULP_TCAM_TBL_OPC_WR_REGFILE = 4, + BNXT_ULP_TCAM_TBL_OPC_ALLOC_IDENT = 5, + BNXT_ULP_TCAM_TBL_OPC_LAST = 6 +}; + +enum bnxt_ulp_template_type { + BNXT_ULP_TEMPLATE_TYPE_CLASS = 0, + BNXT_ULP_TEMPLATE_TYPE_ACTION = 1, + BNXT_ULP_TEMPLATE_TYPE_LAST = 2 +}; + +enum bnxt_ulp_vnic_tbl_opc { + BNXT_ULP_VNIC_TBL_OPC_NOT_USED = 0, + BNXT_ULP_VNIC_TBL_OPC_ALLOC_WR_REGFILE = 1, + BNXT_ULP_VNIC_TBL_OPC_LAST = 2 +}; + +enum bnxt_ulp_app_cap { + BNXT_ULP_APP_CAP_SHARED_EN = 0x00000001, + BNXT_ULP_APP_CAP_HOT_UPGRADE_EN = 0x00000002, + BNXT_ULP_APP_CAP_UNICAST_ONLY = 0x00000004, + BNXT_ULP_APP_CAP_SOCKET_DIRECT = 0x00000008, + BNXT_ULP_APP_CAP_IP_TOS_PROTO_SUPPORT = 0x00000010, + BNXT_ULP_APP_CAP_BC_MC_SUPPORT = 0x00000020, + BNXT_ULP_APP_CAP_CUST_VXLAN = 0x00000040, + BNXT_ULP_APP_CAP_HA_DYNAMIC = 0x00000080, + BNXT_ULP_APP_CAP_SRV6 = 0x00000100, + BNXT_ULP_APP_CAP_L2_ETYPE = 0x00000200, + BNXT_ULP_APP_CAP_DSCP_REMAP = 0x00000400 +}; + +enum bnxt_ulp_fdb_resource_flags { + BNXT_ULP_FDB_RESOURCE_FLAGS_DIR_INGR = 0x00, + BNXT_ULP_FDB_RESOURCE_FLAGS_DIR_EGR = 0x01 +}; + +enum bnxt_ulp_feature_bit { + BNXT_ULP_FEATURE_BIT_PARENT_DMAC = 0x00000001, + BNXT_ULP_FEATURE_BIT_PORT_DMAC = 0x00000002 +}; + +enum bnxt_ulp_flow_dir_bitmask { + BNXT_ULP_FLOW_DIR_BITMASK_ING = 0x4000000000000000, + BNXT_ULP_FLOW_DIR_BITMASK_EGR = 0x8000000000000000 +}; + +enum bnxt_ulp_resource_func { + BNXT_ULP_RESOURCE_FUNC_INVALID = 0x00, + BNXT_ULP_RESOURCE_FUNC_EM_TABLE = 0x20, + BNXT_ULP_RESOURCE_FUNC_CMM_TABLE = 0x40, + BNXT_ULP_RESOURCE_FUNC_CMM_STAT = 0x60, + BNXT_ULP_RESOURCE_FUNC_TCAM_TABLE = 0x80, + BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE = 0x81, + BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE = 0x82, + BNXT_ULP_RESOURCE_FUNC_IDENTIFIER = 0x83, + BNXT_ULP_RESOURCE_FUNC_IF_TABLE = 0x84, + BNXT_ULP_RESOURCE_FUNC_HW_FID = 0x85, + BNXT_ULP_RESOURCE_FUNC_PARENT_FLOW = 0x86, + BNXT_ULP_RESOURCE_FUNC_CHILD_FLOW = 0x87, + BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE = 0x88, + BNXT_ULP_RESOURCE_FUNC_VNIC_TABLE = 0x89, + BNXT_ULP_RESOURCE_FUNC_GLOBAL_REGISTER_TABLE = 0x8a, + BNXT_ULP_RESOURCE_FUNC_UDCC_V6SUBNET_TABLE = 0x8b, + BNXT_ULP_RESOURCE_FUNC_KEY_RECIPE_TABLE = 0x8c, + BNXT_ULP_RESOURCE_FUNC_ALLOCATOR_TABLE = 0x8d +}; + +enum bnxt_ulp_resource_sub_type { + BNXT_ULP_RESOURCE_SUB_TYPE_NOT_USED = 0, + BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_NORMAL = 0, + BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_VFR_CFA_ACTION = 1, + BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_INT_COUNT = 2, + BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_INT_COUNT_ACC = 3, + BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_EXT_COUNT = 4, + BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_CFA_TBLS = 5, + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_L2_CNTXT_TCAM = 0, + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_PROFILE_TCAM = 1, + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_SHARED_MIRROR = 2, + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_MAC_ADDR_CACHE = 3, + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_PORT_TABLE = 4, + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_TUNNEL_CACHE = 5, + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_SOURCE_PROPERTY_CACHE = 6, + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_VXLAN_ENCAP_REC_CACHE = 7, + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_SOCKET_DIRECT_CACHE = 8, + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_VXLAN_ENCAP_IPV6_REC_CACHE = 9, + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_SOURCE_PROPERTY_IPV6_CACHE = 10, + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_OUTER_TUNNEL_CACHE = 11, + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_SHARED_METER_TBL_CACHE = 12, + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_METER_PROFILE_TBL_CACHE = 13, + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_GLOBAL_REGISTER_TBL = 14, + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_CHAIN_ID_CACHE = 15, + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_L2_ENCAP_REC_CACHE = 16, + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_SRV6_ENCAP_REC_CACHE = 17, + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_RSS_PARAMS = 18, + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_TABLE_SCOPE_CACHE = 19, + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_GENEVE_ENCAP_REC_CACHE = 20, + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_PROTO_HEADER = 21, + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_EM_FLOW_CONFLICT = 22, + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_HDR_OVERLAP = 23, + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_MULTI_SHARED_MIRROR = 24, + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_FLOW_CHAIN_CACHE = 25, + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_FLOW_CHAIN_L2_CNTXT = 26, + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_TUNNEL_GPARSE_CACHE = 27, + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_MULTI_FLOW_TUNNEL_CACHE = 28, + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_L2_FILTER = 29, + BNXT_ULP_RESOURCE_SUB_TYPE_ALLOCATOR_TABLE_JUMP_INDEX = 0, + BNXT_ULP_RESOURCE_SUB_TYPE_VNIC_TABLE_RSS = 0, + BNXT_ULP_RESOURCE_SUB_TYPE_VNIC_TABLE_QUEUE = 1, + BNXT_ULP_RESOURCE_SUB_TYPE_GLOBAL_REGISTER_CUST_VXLAN = 0, + BNXT_ULP_RESOURCE_SUB_TYPE_GLOBAL_REGISTER_CUST_ECPRI = 1, + BNXT_ULP_RESOURCE_SUB_TYPE_GLOBAL_REGISTER_CUST_VXLAN_GPE = 2, + BNXT_ULP_RESOURCE_SUB_TYPE_GLOBAL_REGISTER_CUST_VXLAN_GPE_V6 = 3, + BNXT_ULP_RESOURCE_SUB_TYPE_CMM_TABLE_ACT = 4, + BNXT_ULP_RESOURCE_SUB_TYPE_CMM_TABLE_LKUP = 5, + BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_STAT_64 = 6, + BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_CMM_MCG_ACT = 2, + BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_CMM_MODIFY_REC = 3, + BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_CMM_STAT_COUNTER = 4, + BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_CMM_SRC_PROP = 5, + BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_CMM_ENCAP_REC = 6, + BNXT_ULP_RESOURCE_SUB_TYPE_KEY_RECIPE_TABLE_EM = 0, + BNXT_ULP_RESOURCE_SUB_TYPE_KEY_RECIPE_TABLE_WM = 1 +}; + +enum bnxt_ulp_session_type { + BNXT_ULP_SESSION_TYPE_DEFAULT = 0x00, + BNXT_ULP_SESSION_TYPE_SHARED = 0x01, + BNXT_ULP_SESSION_TYPE_SHARED_WC = 0x02, + BNXT_ULP_SESSION_TYPE_SHARED_OWC = 0x04, + BNXT_ULP_SESSION_TYPE_DEFAULT_NON_HA = 0x08, + BNXT_ULP_SESSION_TYPE_LAST = 0x10 +}; + +enum bnxt_ulp_act_prop_sz { + BNXT_ULP_ACT_PROP_SZ_ENCAP_TUN_SZ = 4, + BNXT_ULP_ACT_PROP_SZ_ENCAP_IP_SZ = 4, + BNXT_ULP_ACT_PROP_SZ_ENCAP_VTAG_SZ = 4, + BNXT_ULP_ACT_PROP_SZ_ENCAP_VTAG_TYPE = 4, + BNXT_ULP_ACT_PROP_SZ_ENCAP_VTAG_NUM = 4, + BNXT_ULP_ACT_PROP_SZ_ENCAP_L3_TYPE = 4, + BNXT_ULP_ACT_PROP_SZ_MPLS_POP_NUM = 4, + BNXT_ULP_ACT_PROP_SZ_MPLS_PUSH_NUM = 4, + BNXT_ULP_ACT_PROP_SZ_PORT_ID = 4, + BNXT_ULP_ACT_PROP_SZ_VNIC = 4, + BNXT_ULP_ACT_PROP_SZ_VPORT = 4, + BNXT_ULP_ACT_PROP_SZ_MIRR_VNIC = 4, + BNXT_ULP_ACT_PROP_SZ_MIRR_VPORT = 4, + BNXT_ULP_ACT_PROP_SZ_MARK = 4, + BNXT_ULP_ACT_PROP_SZ_COUNT = 4, + BNXT_ULP_ACT_PROP_SZ_METER = 4, + BNXT_ULP_ACT_PROP_SZ_SET_MAC_SRC = 6, + BNXT_ULP_ACT_PROP_SZ_SET_MAC_DST = 6, + BNXT_ULP_ACT_PROP_SZ_PUSH_VLAN = 2, + BNXT_ULP_ACT_PROP_SZ_SET_VLAN_PCP = 1, + BNXT_ULP_ACT_PROP_SZ_SET_VLAN_VID = 2, + BNXT_ULP_ACT_PROP_SZ_SET_IPV4_SRC = 4, + BNXT_ULP_ACT_PROP_SZ_SET_IPV4_DST = 4, + BNXT_ULP_ACT_PROP_SZ_SET_IPV6_SRC = 16, + BNXT_ULP_ACT_PROP_SZ_SET_IPV6_DST = 16, + BNXT_ULP_ACT_PROP_SZ_SET_TP_SRC = 2, + BNXT_ULP_ACT_PROP_SZ_SET_TP_DST = 2, + BNXT_ULP_ACT_PROP_SZ_OF_PUSH_MPLS_0 = 4, + BNXT_ULP_ACT_PROP_SZ_OF_PUSH_MPLS_1 = 4, + BNXT_ULP_ACT_PROP_SZ_OF_PUSH_MPLS_2 = 4, + BNXT_ULP_ACT_PROP_SZ_OF_PUSH_MPLS_3 = 4, + BNXT_ULP_ACT_PROP_SZ_OF_PUSH_MPLS_4 = 4, + BNXT_ULP_ACT_PROP_SZ_OF_PUSH_MPLS_5 = 4, + BNXT_ULP_ACT_PROP_SZ_OF_PUSH_MPLS_6 = 4, + BNXT_ULP_ACT_PROP_SZ_OF_PUSH_MPLS_7 = 4, + BNXT_ULP_ACT_PROP_SZ_ENCAP_L2_DMAC = 6, + BNXT_ULP_ACT_PROP_SZ_ENCAP_L2_SMAC = 6, + BNXT_ULP_ACT_PROP_SZ_ENCAP_VTAG = 8, + BNXT_ULP_ACT_PROP_SZ_ENCAP_IP = 32, + BNXT_ULP_ACT_PROP_SZ_ENCAP_IP_SRC = 16, + BNXT_ULP_ACT_PROP_SZ_ENCAP_UDP = 4, + BNXT_ULP_ACT_PROP_SZ_ENCAP_TUN = 32, + BNXT_ULP_ACT_PROP_SZ_JUMP = 4, + BNXT_ULP_ACT_PROP_SZ_SHARED_HANDLE = 8, + BNXT_ULP_ACT_PROP_SZ_RSS_FUNC = 1, + BNXT_ULP_ACT_PROP_SZ_RSS_TYPES = 8, + BNXT_ULP_ACT_PROP_SZ_RSS_LEVEL = 4, + BNXT_ULP_ACT_PROP_SZ_RSS_KEY_LEN = 4, + BNXT_ULP_ACT_PROP_SZ_RSS_KEY = 40, + BNXT_ULP_ACT_PROP_SZ_RSS_QUEUE_NUM = 2, + BNXT_ULP_ACT_PROP_SZ_RSS_QUEUE = 32, + BNXT_ULP_ACT_PROP_SZ_QUEUE_INDEX = 2, + BNXT_ULP_ACT_PROP_SZ_METER_PROF_ID_UPDATE = 1, + BNXT_ULP_ACT_PROP_SZ_METER_PROF_ID = 4, + BNXT_ULP_ACT_PROP_SZ_METER_PROF_CIR = 3, + BNXT_ULP_ACT_PROP_SZ_METER_PROF_EIR = 3, + BNXT_ULP_ACT_PROP_SZ_METER_PROF_CBS = 2, + BNXT_ULP_ACT_PROP_SZ_METER_PROF_EBS = 2, + BNXT_ULP_ACT_PROP_SZ_METER_PROF_RFC2698 = 1, + BNXT_ULP_ACT_PROP_SZ_METER_PROF_PM = 1, + BNXT_ULP_ACT_PROP_SZ_METER_PROF_EBND = 1, + BNXT_ULP_ACT_PROP_SZ_METER_PROF_CBND = 1, + BNXT_ULP_ACT_PROP_SZ_METER_PROF_EBSM = 1, + BNXT_ULP_ACT_PROP_SZ_METER_PROF_CBSM = 1, + BNXT_ULP_ACT_PROP_SZ_METER_PROF_CF = 1, + BNXT_ULP_ACT_PROP_SZ_METER_INST_ID = 4, + BNXT_ULP_ACT_PROP_SZ_METER_INST_ECN_RMP_EN_UPDATE = 1, + BNXT_ULP_ACT_PROP_SZ_METER_INST_ECN_RMP_EN = 1, + BNXT_ULP_ACT_PROP_SZ_METER_INST_MTR_VAL_UPDATE = 1, + BNXT_ULP_ACT_PROP_SZ_METER_INST_MTR_VAL = 1, + BNXT_ULP_ACT_PROP_SZ_GOTO_CHAIN = 4, + BNXT_ULP_ACT_PROP_SZ_METER_INST_CIR = 3, + BNXT_ULP_ACT_PROP_SZ_METER_INST_EIR = 3, + BNXT_ULP_ACT_PROP_SZ_METER_INST_CBS = 2, + BNXT_ULP_ACT_PROP_SZ_METER_INST_EBS = 2, + BNXT_ULP_ACT_PROP_SZ_METER_INST_RFC2698 = 1, + BNXT_ULP_ACT_PROP_SZ_METER_INST_PM = 1, + BNXT_ULP_ACT_PROP_SZ_METER_INST_EBND = 1, + BNXT_ULP_ACT_PROP_SZ_METER_INST_CBND = 1, + BNXT_ULP_ACT_PROP_SZ_METER_INST_EBSM = 1, + BNXT_ULP_ACT_PROP_SZ_METER_INST_CBSM = 1, + BNXT_ULP_ACT_PROP_SZ_METER_INST_CF = 1, + BNXT_ULP_ACT_PROP_SZ_SET_TTL = 1, + BNXT_ULP_ACT_PROP_SZ_LAST = 4 +}; + +enum bnxt_ulp_act_prop_idx { + BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN_SZ = 0, + BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ = 4, + BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_SZ = 8, + BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_TYPE = 12, + BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_NUM = 16, + BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE = 20, + BNXT_ULP_ACT_PROP_IDX_MPLS_POP_NUM = 24, + BNXT_ULP_ACT_PROP_IDX_MPLS_PUSH_NUM = 28, + BNXT_ULP_ACT_PROP_IDX_PORT_ID = 32, + BNXT_ULP_ACT_PROP_IDX_VNIC = 36, + BNXT_ULP_ACT_PROP_IDX_VPORT = 40, + BNXT_ULP_ACT_PROP_IDX_MIRR_VNIC = 44, + BNXT_ULP_ACT_PROP_IDX_MIRR_VPORT = 48, + BNXT_ULP_ACT_PROP_IDX_MARK = 52, + BNXT_ULP_ACT_PROP_IDX_COUNT = 56, + BNXT_ULP_ACT_PROP_IDX_METER = 60, + BNXT_ULP_ACT_PROP_IDX_SET_MAC_SRC = 64, + BNXT_ULP_ACT_PROP_IDX_SET_MAC_DST = 70, + BNXT_ULP_ACT_PROP_IDX_PUSH_VLAN = 76, + BNXT_ULP_ACT_PROP_IDX_SET_VLAN_PCP = 78, + BNXT_ULP_ACT_PROP_IDX_SET_VLAN_VID = 79, + BNXT_ULP_ACT_PROP_IDX_SET_IPV4_SRC = 81, + BNXT_ULP_ACT_PROP_IDX_SET_IPV4_DST = 85, + BNXT_ULP_ACT_PROP_IDX_SET_IPV6_SRC = 89, + BNXT_ULP_ACT_PROP_IDX_SET_IPV6_DST = 105, + BNXT_ULP_ACT_PROP_IDX_SET_TP_SRC = 121, + BNXT_ULP_ACT_PROP_IDX_SET_TP_DST = 123, + BNXT_ULP_ACT_PROP_IDX_OF_PUSH_MPLS_0 = 125, + BNXT_ULP_ACT_PROP_IDX_OF_PUSH_MPLS_1 = 129, + BNXT_ULP_ACT_PROP_IDX_OF_PUSH_MPLS_2 = 133, + BNXT_ULP_ACT_PROP_IDX_OF_PUSH_MPLS_3 = 137, + BNXT_ULP_ACT_PROP_IDX_OF_PUSH_MPLS_4 = 141, + BNXT_ULP_ACT_PROP_IDX_OF_PUSH_MPLS_5 = 145, + BNXT_ULP_ACT_PROP_IDX_OF_PUSH_MPLS_6 = 149, + BNXT_ULP_ACT_PROP_IDX_OF_PUSH_MPLS_7 = 153, + BNXT_ULP_ACT_PROP_IDX_ENCAP_L2_DMAC = 157, + BNXT_ULP_ACT_PROP_IDX_ENCAP_L2_SMAC = 163, + BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG = 169, + BNXT_ULP_ACT_PROP_IDX_ENCAP_IP = 177, + BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SRC = 209, + BNXT_ULP_ACT_PROP_IDX_ENCAP_UDP = 225, + BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN = 229, + BNXT_ULP_ACT_PROP_IDX_JUMP = 261, + BNXT_ULP_ACT_PROP_IDX_SHARED_HANDLE = 265, + BNXT_ULP_ACT_PROP_IDX_RSS_FUNC = 273, + BNXT_ULP_ACT_PROP_IDX_RSS_TYPES = 274, + BNXT_ULP_ACT_PROP_IDX_RSS_LEVEL = 282, + BNXT_ULP_ACT_PROP_IDX_RSS_KEY_LEN = 286, + BNXT_ULP_ACT_PROP_IDX_RSS_KEY = 290, + BNXT_ULP_ACT_PROP_IDX_RSS_QUEUE_NUM = 330, + BNXT_ULP_ACT_PROP_IDX_RSS_QUEUE = 332, + BNXT_ULP_ACT_PROP_IDX_QUEUE_INDEX = 364, + BNXT_ULP_ACT_PROP_IDX_METER_PROF_ID_UPDATE = 366, + BNXT_ULP_ACT_PROP_IDX_METER_PROF_ID = 367, + BNXT_ULP_ACT_PROP_IDX_METER_PROF_CIR = 371, + BNXT_ULP_ACT_PROP_IDX_METER_PROF_EIR = 374, + BNXT_ULP_ACT_PROP_IDX_METER_PROF_CBS = 377, + BNXT_ULP_ACT_PROP_IDX_METER_PROF_EBS = 379, + BNXT_ULP_ACT_PROP_IDX_METER_PROF_RFC2698 = 381, + BNXT_ULP_ACT_PROP_IDX_METER_PROF_PM = 382, + BNXT_ULP_ACT_PROP_IDX_METER_PROF_EBND = 383, + BNXT_ULP_ACT_PROP_IDX_METER_PROF_CBND = 384, + BNXT_ULP_ACT_PROP_IDX_METER_PROF_EBSM = 385, + BNXT_ULP_ACT_PROP_IDX_METER_PROF_CBSM = 386, + BNXT_ULP_ACT_PROP_IDX_METER_PROF_CF = 387, + BNXT_ULP_ACT_PROP_IDX_METER_INST_ID = 388, + BNXT_ULP_ACT_PROP_IDX_METER_INST_ECN_RMP_EN_UPDATE = 392, + BNXT_ULP_ACT_PROP_IDX_METER_INST_ECN_RMP_EN = 393, + BNXT_ULP_ACT_PROP_IDX_METER_INST_MTR_VAL_UPDATE = 394, + BNXT_ULP_ACT_PROP_IDX_METER_INST_MTR_VAL = 395, + BNXT_ULP_ACT_PROP_IDX_GOTO_CHAIN = 396, + BNXT_ULP_ACT_PROP_IDX_METER_INST_CIR = 400, + BNXT_ULP_ACT_PROP_IDX_METER_INST_EIR = 403, + BNXT_ULP_ACT_PROP_IDX_METER_INST_CBS = 406, + BNXT_ULP_ACT_PROP_IDX_METER_INST_EBS = 408, + BNXT_ULP_ACT_PROP_IDX_METER_INST_RFC2698 = 410, + BNXT_ULP_ACT_PROP_IDX_METER_INST_PM = 411, + BNXT_ULP_ACT_PROP_IDX_METER_INST_EBND = 412, + BNXT_ULP_ACT_PROP_IDX_METER_INST_CBND = 413, + BNXT_ULP_ACT_PROP_IDX_METER_INST_EBSM = 414, + BNXT_ULP_ACT_PROP_IDX_METER_INST_CBSM = 415, + BNXT_ULP_ACT_PROP_IDX_METER_INST_CF = 416, + BNXT_ULP_ACT_PROP_IDX_SET_TTL = 417, + BNXT_ULP_ACT_PROP_IDX_LAST = 418 +}; + +enum ulp_wp_sym { + ULP_WP_SYM_METADATA_OP_NORMAL = 0, + ULP_WP_SYM_METADATA_OP_L2_HASH = 0, + ULP_WP_SYM_METADATA_OP_L4_HASH = 0, + ULP_WP_SYM_FWD_OP_BYPASS_CFA = 0, + ULP_WP_SYM_FWD_OP_BYPASS_CFA_ROCE = 0, + ULP_WP_SYM_FWD_OP_BYPASS_LKUP = 0, + ULP_WP_SYM_FWD_OP_NORMAL_FLOW = 0, + ULP_WP_SYM_FWD_OP_DROP = 0, + ULP_WP_SYM_CTXT_OPCODE_BYPASS_CFA = 0, + ULP_WP_SYM_CTXT_OPCODE_BYPASS_LKUP = 0, + ULP_WP_SYM_CTXT_OPCODE_META_UPDATE = 0, + ULP_WP_SYM_CTXT_OPCODE_NORMAL_FLOW = 0, + ULP_WP_SYM_CTXT_OPCODE_DROP = 0, + ULP_WP_SYM_L2_CTXT_PRI_CATCHALL = 0, + ULP_WP_SYM_L2_CTXT_PRI_MC_BC = 0, + ULP_WP_SYM_L2_CTXT_PRI_PORT = 0, + ULP_WP_SYM_L2_CTXT_PRI_APP = 0, + ULP_WP_SYM_PROF_TCAM_PRI_CATCHALL = 0, + ULP_WP_SYM_PROF_TCAM_PRI_APP = 0, + ULP_WP_SYM_PROF_TCAM_PRI_L4 = 0, + ULP_WP_SYM_PROF_TCAM_PRI_L3 = 0, + ULP_WP_SYM_PROF_TCAM_PRI_L2 = 0, + ULP_WP_SYM_PKT_TYPE_IGNORE = 0, + ULP_WP_SYM_PKT_TYPE_L2 = 0, + ULP_WP_SYM_PKT_TYPE_0_IGNORE = 0, + ULP_WP_SYM_PKT_TYPE_0_L2 = 0, + ULP_WP_SYM_PKT_TYPE_1_IGNORE = 0, + ULP_WP_SYM_PKT_TYPE_1_L2 = 0, + ULP_WP_SYM_RECYCLE_CNT_IGNORE = 0, + ULP_WP_SYM_RECYCLE_CNT_ZERO = 0, + ULP_WP_SYM_RECYCLE_CNT_ONE = 1, + ULP_WP_SYM_RECYCLE_CNT_TWO = 2, + ULP_WP_SYM_RECYCLE_CNT_THREE = 3, + ULP_WP_SYM_AGG_ERROR_IGNORE = 0, + ULP_WP_SYM_AGG_ERROR_NO = 0, + ULP_WP_SYM_AGG_ERROR_YES = 1, + ULP_WP_SYM_RESERVED_IGNORE = 0, + ULP_WP_SYM_HREC_NEXT_IGNORE = 0, + ULP_WP_SYM_HREC_NEXT_NO = 0, + ULP_WP_SYM_HREC_NEXT_YES = 1, + ULP_WP_SYM_TL2_HDR_VALID_IGNORE = 0, + ULP_WP_SYM_TL2_HDR_VALID_NO = 0, + ULP_WP_SYM_TL2_HDR_VALID_YES = 1, + ULP_WP_SYM_TL2_HDR_TYPE_IGNORE = 0, + ULP_WP_SYM_TL2_HDR_TYPE_DIX = 0, + ULP_WP_SYM_TL2_UC_MC_BC_IGNORE = 0, + ULP_WP_SYM_TL2_UC_MC_BC_UC = 0, + ULP_WP_SYM_TL2_UC_MC_BC_MC = 2, + ULP_WP_SYM_TL2_UC_MC_BC_BC = 3, + ULP_WP_SYM_TL2_VTAG_PRESENT_IGNORE = 0, + ULP_WP_SYM_TL2_VTAG_PRESENT_NO = 0, + ULP_WP_SYM_TL2_VTAG_PRESENT_YES = 1, + ULP_WP_SYM_TL2_TWO_VTAGS_IGNORE = 0, + ULP_WP_SYM_TL2_TWO_VTAGS_NO = 0, + ULP_WP_SYM_TL2_TWO_VTAGS_YES = 1, + ULP_WP_SYM_TL3_HDR_VALID_IGNORE = 0, + ULP_WP_SYM_TL3_HDR_VALID_NO = 0, + ULP_WP_SYM_TL3_HDR_VALID_YES = 1, + ULP_WP_SYM_TL3_HDR_ERROR_IGNORE = 0, + ULP_WP_SYM_TL3_HDR_ERROR_NO = 0, + ULP_WP_SYM_TL3_HDR_ERROR_YES = 1, + ULP_WP_SYM_TL3_HDR_TYPE_IGNORE = 0, + ULP_WP_SYM_TL3_HDR_TYPE_IPV4 = 0, + ULP_WP_SYM_TL3_HDR_TYPE_IPV6 = 1, + ULP_WP_SYM_TL3_HDR_ISIP_IGNORE = 0, + ULP_WP_SYM_TL3_HDR_ISIP_NO = 0, + ULP_WP_SYM_TL3_HDR_ISIP_YES = 1, + ULP_WP_SYM_TL3_IPV6_CMP_SRC_IGNORE = 0, + ULP_WP_SYM_TL3_IPV6_CMP_SRC_NO = 0, + ULP_WP_SYM_TL3_IPV6_CMP_SRC_YES = 1, + ULP_WP_SYM_TL3_IPV6_CMP_DST_IGNORE = 0, + ULP_WP_SYM_TL3_IPV6_CMP_DST_NO = 0, + ULP_WP_SYM_TL3_IPV6_CMP_DST_YES = 1, + ULP_WP_SYM_TL4_HDR_VALID_IGNORE = 0, + ULP_WP_SYM_TL4_HDR_VALID_NO = 0, + ULP_WP_SYM_TL4_HDR_VALID_YES = 1, + ULP_WP_SYM_TL4_HDR_ERROR_IGNORE = 0, + ULP_WP_SYM_TL4_HDR_ERROR_NO = 0, + ULP_WP_SYM_TL4_HDR_ERROR_YES = 1, + ULP_WP_SYM_TL4_HDR_IS_UDP_TCP_IGNORE = 0, + ULP_WP_SYM_TL4_HDR_IS_UDP_TCP_NO = 0, + ULP_WP_SYM_TL4_HDR_IS_UDP_TCP_YES = 1, + ULP_WP_SYM_TL4_HDR_TYPE_IGNORE = 0, + ULP_WP_SYM_TL4_HDR_TYPE_TCP = 0, + ULP_WP_SYM_TL4_HDR_TYPE_UDP = 1, + ULP_WP_SYM_TUN_HDR_VALID_IGNORE = 0, + ULP_WP_SYM_TUN_HDR_VALID_NO = 0, + ULP_WP_SYM_TUN_HDR_VALID_YES = 1, + ULP_WP_SYM_TUN_HDR_ERROR_IGNORE = 0, + ULP_WP_SYM_TUN_HDR_ERROR_NO = 0, + ULP_WP_SYM_TUN_HDR_ERROR_YES = 1, + ULP_WP_SYM_TUN_HDR_TYPE_IGNORE = 0, + ULP_WP_SYM_TUN_HDR_TYPE_VXLAN = 0, + ULP_WP_SYM_TUN_HDR_TYPE_VXLAN_GPE = 0, + ULP_WP_SYM_TUN_HDR_TYPE_GENEVE = 1, + ULP_WP_SYM_TUN_HDR_TYPE_NVGRE = 2, + ULP_WP_SYM_TUN_HDR_TYPE_GRE = 3, + ULP_WP_SYM_TUN_HDR_TYPE_IPV4 = 4, + ULP_WP_SYM_TUN_HDR_TYPE_IPV6 = 5, + ULP_WP_SYM_TUN_HDR_TYPE_PPPOE = 6, + ULP_WP_SYM_TUN_HDR_TYPE_MPLS = 7, + ULP_WP_SYM_TUN_HDR_TYPE_UPAR1 = 8, + ULP_WP_SYM_TUN_HDR_TYPE_UPAR2 = 9, + ULP_WP_SYM_TUN_HDR_TYPE_UPAR3 = 10, + ULP_WP_SYM_TUN_HDR_TYPE_UPAR4 = 11, + ULP_WP_SYM_TUN_HDR_TYPE_UPAR5 = 0, + ULP_WP_SYM_TUN_HDR_TYPE_UPAR6 = 0, + ULP_WP_SYM_TUN_HDR_TYPE_UPAR7 = 0, + ULP_WP_SYM_TUN_HDR_TYPE_UPAR8 = 0, + ULP_WP_SYM_TUN_HDR_TYPE_ROE = 0, + ULP_WP_SYM_TUN_HDR_TYPE_ECPRI = 0, + ULP_WP_SYM_TUN_HDR_TYPE_GTP_V1_U = 0, + ULP_WP_SYM_TUN_HDR_TYPE_GTP_V2_C = 0, + ULP_WP_SYM_TUN_HDR_TYPE_PFCP_SESS = 0, + ULP_WP_SYM_TUN_HDR_TYPE_PFCP_NODE = 0, + ULP_WP_SYM_TUN_HDR_TYPE_NSH = 0, + ULP_WP_SYM_TUN_HDR_TYPE_VXLAN_IP = 0, + ULP_WP_SYM_TUN_HDR_TYPE_GRE_TEN = 0, + ULP_WP_SYM_TUN_HDR_TYPE_NONE = 15, + ULP_WP_SYM_TUN_HDR_TYPE_UPAR_MASK = 14, + ULP_WP_SYM_TUN_HDR_TYPE_TID_MASK = 0, + ULP_WP_SYM_TUN_HDR_FLAGS_IGNORE = 0, + ULP_WP_SYM_L2_HDR_VALID_IGNORE = 0, + ULP_WP_SYM_L2_HDR_VALID_NO = 0, + ULP_WP_SYM_L2_HDR_VALID_YES = 1, + ULP_WP_SYM_L2_HDR_ERROR_IGNORE = 0, + ULP_WP_SYM_L2_HDR_ERROR_NO = 0, + ULP_WP_SYM_L2_HDR_ERROR_YES = 1, + ULP_WP_SYM_L2_HDR_TYPE_IGNORE = 0, + ULP_WP_SYM_L2_HDR_TYPE_DIX = 0, + ULP_WP_SYM_L2_HDR_TYPE_LLC_SNAP = 1, + ULP_WP_SYM_L2_HDR_TYPE_LLC = 2, + ULP_WP_SYM_L2_UC_MC_BC_IGNORE = 0, + ULP_WP_SYM_L2_UC_MC_BC_UC = 0, + ULP_WP_SYM_L2_UC_MC_BC_MC = 2, + ULP_WP_SYM_L2_UC_MC_BC_BC = 3, + ULP_WP_SYM_L2_VTAG_PRESENT_IGNORE = 0, + ULP_WP_SYM_L2_VTAG_PRESENT_NO = 0, + ULP_WP_SYM_L2_VTAG_PRESENT_YES = 1, + ULP_WP_SYM_L2_TWO_VTAGS_IGNORE = 0, + ULP_WP_SYM_L2_TWO_VTAGS_NO = 0, + ULP_WP_SYM_L2_TWO_VTAGS_YES = 1, + ULP_WP_SYM_L2_CNTX_VLAN_SELECT_INNER = 0, + ULP_WP_SYM_L2_CNTX_VLAN_SELECT_TUN = 0, + ULP_WP_SYM_L2_CNTX_VLAN_SELECT_O_TUN = 0, + ULP_WP_SYM_L2_CNTX_VLAN_SELECT_OM_TUN = 0, + ULP_WP_SYM_L2_CNTX_TUN_SELECT_TUN_ID = 0, + ULP_WP_SYM_L2_CNTX_TUN_SELECT_TUN_CNTX = 0, + ULP_WP_SYM_L2_CNTX_TUN_SELECT_O_TUN_ID = 0, + ULP_WP_SYM_L2_CNTX_TUN_SELECT_O_TUN_CNTX = 0, + ULP_WP_SYM_L2_CNTX_TUN_SELECT_I_L4_PORTS = 0, + ULP_WP_SYM_L2_CNTX_TUN_SELECT_O_L4_PORTS = 0, + ULP_WP_SYM_L2_CNTX_TUN_SELECT_OM_TUN_ID = 0, + ULP_WP_SYM_L2_CNTX_TUN_SELECT_OM_TUN_CNTX = 0, + ULP_WP_SYM_L3_HDR_VALID_IGNORE = 0, + ULP_WP_SYM_L3_HDR_VALID_NO = 0, + ULP_WP_SYM_L3_HDR_VALID_YES = 1, + ULP_WP_SYM_L3_HDR_ERROR_IGNORE = 0, + ULP_WP_SYM_L3_HDR_ERROR_NO = 0, + ULP_WP_SYM_L3_HDR_ERROR_YES = 1, + ULP_WP_SYM_L3_HDR_TYPE_IGNORE = 0, + ULP_WP_SYM_L3_HDR_TYPE_IPV4 = 0, + ULP_WP_SYM_L3_HDR_TYPE_IPV6 = 1, + ULP_WP_SYM_L3_HDR_TYPE_ARP = 2, + ULP_WP_SYM_L3_HDR_TYPE_PTP = 3, + ULP_WP_SYM_L3_HDR_TYPE_EAPOL = 4, + ULP_WP_SYM_L3_HDR_TYPE_ROCE = 5, + ULP_WP_SYM_L3_HDR_TYPE_FCOE = 6, + ULP_WP_SYM_L3_HDR_TYPE_UPAR1 = 7, + ULP_WP_SYM_L3_HDR_TYPE_UPAR2 = 8, + ULP_WP_SYM_L3_HDR_ISIP_IGNORE = 0, + ULP_WP_SYM_L3_HDR_ISIP_NO = 0, + ULP_WP_SYM_L3_HDR_ISIP_YES = 1, + ULP_WP_SYM_L3_IPV6_CMP_SRC_IGNORE = 0, + ULP_WP_SYM_L3_IPV6_CMP_SRC_NO = 0, + ULP_WP_SYM_L3_IPV6_CMP_SRC_YES = 1, + ULP_WP_SYM_L3_IPV6_CMP_DST_IGNORE = 0, + ULP_WP_SYM_L3_IPV6_CMP_DST_NO = 0, + ULP_WP_SYM_L3_IPV6_CMP_DST_YES = 1, + ULP_WP_SYM_L4_HDR_VALID_IGNORE = 0, + ULP_WP_SYM_L4_HDR_VALID_NO = 0, + ULP_WP_SYM_L4_HDR_VALID_YES = 1, + ULP_WP_SYM_L4_HDR_ERROR_IGNORE = 0, + ULP_WP_SYM_L4_HDR_ERROR_NO = 0, + ULP_WP_SYM_L4_HDR_ERROR_YES = 1, + ULP_WP_SYM_L4_HDR_TYPE_IGNORE = 0, + ULP_WP_SYM_L4_HDR_TYPE_TCP = 0, + ULP_WP_SYM_L4_HDR_TYPE_UDP = 1, + ULP_WP_SYM_L4_HDR_TYPE_ICMP = 2, + ULP_WP_SYM_L4_HDR_TYPE_UPAR1 = 3, + ULP_WP_SYM_L4_HDR_TYPE_UPAR2 = 4, + ULP_WP_SYM_L4_HDR_TYPE_BTH_V1 = 5, + ULP_WP_SYM_L4_HDR_TYPE_IPSEC_AH = 0, + ULP_WP_SYM_L4_HDR_TYPE_IPSEC_ESP = 0, + ULP_WP_SYM_L4_HDR_SUBTYPE_NONE = 0, + ULP_WP_SYM_L4_HDR_SUBTYPE_PTP = 0, + ULP_WP_SYM_L4_HDR_SUBTYPE_ROCE = 0, + ULP_WP_SYM_L4_HDR_SUBTYPE_QUIC = 0, + ULP_WP_SYM_L4_HDR_SUBTYPE_ESP = 0, + ULP_WP_SYM_L4_HDR_IS_UDP_TCP_IGNORE = 0, + ULP_WP_SYM_L4_HDR_IS_UDP_TCP_NO = 0, + ULP_WP_SYM_L4_HDR_IS_UDP_TCP_YES = 1, + ULP_WP_SYM_EM_WM_OPCODE_OP_NORMAL = 0, + ULP_WP_SYM_EM_WM_OPCODE_OP_NORMAL_RFS = 0, + ULP_WP_SYM_EM_WM_OPCODE_OP_RFS_FAST = 0, + ULP_WP_SYM_EM_WM_OPCODE_OP_FAST = 0, + ULP_WP_SYM_EM_WM_OPCODE_OP_RFS_ACT = 0, + ULP_WP_SYM_EM_WM_OPCODE_OP_CT_MISS_DEF = 0, + ULP_WP_SYM_EM_WM_OPCODE_OP_CT_HIT_DEF = 0, + ULP_WP_SYM_EM_WM_OPCODE_OP_RECYCLE = 0, + ULP_WP_SYM_POP_VLAN_NO = 0, + ULP_WP_SYM_POP_VLAN_YES = 1, + ULP_WP_SYM_VLAN_DEL_RPT_DISABLED = 0, + ULP_WP_SYM_VLAN_DEL_RPT_STRIP_OUTER = 0, + ULP_WP_SYM_VLAN_DEL_RPT_STRIP_BOTH = 0, + ULP_WP_SYM_VLAN_DEL_RPT_DYN_STRIP = 0, + ULP_WP_SYM_DECAP_FUNC_NONE = 0, + ULP_WP_SYM_DECAP_FUNC_THRU_TL2 = 3, + ULP_WP_SYM_DECAP_FUNC_THRU_TL3 = 8, + ULP_WP_SYM_DECAP_FUNC_THRU_TL4 = 9, + ULP_WP_SYM_DECAP_FUNC_THRU_TUN = 10, + ULP_WP_SYM_DECAP_FUNC_THRU_L2 = 11, + ULP_WP_SYM_DECAP_FUNC_THRU_L3 = 12, + ULP_WP_SYM_DECAP_FUNC_THRU_L4 = 13, + ULP_WP_SYM_ECV_VALID_NO = 0, + ULP_WP_SYM_ECV_VALID_YES = 1, + ULP_WP_SYM_ECV_CUSTOM_EN_NO = 0, + ULP_WP_SYM_ECV_CUSTOM_EN_YES = 1, + ULP_WP_SYM_ECV_L2_EN_NO = 0, + ULP_WP_SYM_ECV_L2_EN_YES = 1, + ULP_WP_SYM_ECV_VTAG_TYPE_NOP = 0, + ULP_WP_SYM_ECV_VTAG_TYPE_ADD_1_ENCAP_PRI = 1, + ULP_WP_SYM_ECV_VTAG_TYPE_ADD_1_IVLAN_PRI = 2, + ULP_WP_SYM_ECV_VTAG_TYPE_ADD_1_REMAP_DIFFSERV = 3, + ULP_WP_SYM_ECV_VTAG_TYPE_ADD_2_ENCAP_PRI = 4, + ULP_WP_SYM_ECV_VTAG_TYPE_ADD_2_REMAP_DIFFSERV = 5, + ULP_WP_SYM_ECV_VTAG_TYPE_ADD_0_ENCAP_PRI = 6, + ULP_WP_SYM_ECV_VTAG_TYPE_ADD_0_REMAP_DIFFSERV = 7, + ULP_WP_SYM_ECV_VTAG_TYPE_ADD_0_PRI_0 = 8, + ULP_WP_SYM_ECV_VTAG_TYPE_ADD_0_PRI_1 = 8, + ULP_WP_SYM_ECV_VTAG_TYPE_ADD_0_PRI_2 = 8, + ULP_WP_SYM_ECV_VTAG_TYPE_ADD_0_PRI_3 = 8, + ULP_WP_SYM_ECV_VTAG_TYPE_ADD_0_PRI_4 = 8, + ULP_WP_SYM_ECV_VTAG_TYPE_ADD_0_PRI_5 = 8, + ULP_WP_SYM_ECV_VTAG_TYPE_ADD_0_PRI_6 = 8, + ULP_WP_SYM_ECV_VTAG_TYPE_ADD_0_PRI_7 = 8, + ULP_WP_SYM_ECV_L3_TYPE_NONE = 0, + ULP_WP_SYM_ECV_L3_TYPE_IPV4 = 4, + ULP_WP_SYM_ECV_L3_TYPE_IPV6 = 5, + ULP_WP_SYM_ECV_L3_TYPE_MPLS_8847 = 6, + ULP_WP_SYM_ECV_L3_TYPE_MPLS_8848 = 7, + ULP_WP_SYM_ECV_L4_TYPE_NONE = 0, + ULP_WP_SYM_ECV_L4_TYPE_UDP = 4, + ULP_WP_SYM_ECV_L4_TYPE_UDP_CSUM = 5, + ULP_WP_SYM_ECV_L4_TYPE_UDP_ENTROPY = 6, + ULP_WP_SYM_ECV_L4_TYPE_UDP_ENTROPY_CSUM = 7, + ULP_WP_SYM_ECV_TUN_TYPE_NONE = 0, + ULP_WP_SYM_ECV_TUN_TYPE_GENERIC = 1, + ULP_WP_SYM_ECV_TUN_TYPE_VXLAN = 2, + ULP_WP_SYM_ECV_TUN_TYPE_NGE = 3, + ULP_WP_SYM_ECV_TUN_TYPE_NVGRE = 4, + ULP_WP_SYM_ECV_TUN_TYPE_GRE = 5, + ULP_WP_SYM_EEM_ACT_REC_INT = 1, + ULP_WP_SYM_EEM_EXT_FLOW_CNTR = 0, + ULP_WP_SYM_UC_ACT_REC = 0, + ULP_WP_SYM_MC_ACT_REC = 1, + ULP_WP_SYM_ACT_REC_DROP_YES = 1, + ULP_WP_SYM_ACT_REC_DROP_NO = 0, + ULP_WP_SYM_ACT_REC_POP_VLAN_YES = 1, + ULP_WP_SYM_ACT_REC_POP_VLAN_NO = 0, + ULP_WP_SYM_ACT_REC_METER_EN_YES = 1, + ULP_WP_SYM_ACT_REC_METER_EN_NO = 0, + ULP_WP_SYM_LOOPBACK_PORT = 4, + ULP_WP_SYM_LOOPBACK_PARIF = 15, + ULP_WP_SYM_EXT_EM_MAX_KEY_SIZE = 448, + ULP_WP_SYM_MATCH_TYPE_EM = 0, + ULP_WP_SYM_MATCH_TYPE_WM = 1, + ULP_WP_SYM_IP_PROTO_ICMP = 1, + ULP_WP_SYM_IP_PROTO_IGMP = 2, + ULP_WP_SYM_IP_PROTO_IP_IN_IP = 4, + ULP_WP_SYM_IP_PROTO_TCP = 6, + ULP_WP_SYM_IP_PROTO_UDP = 17, + ULP_WP_SYM_VF_FUNC_PARIF = 15, + ULP_WP_SYM_NO = 0, + ULP_WP_SYM_YES = 1, + ULP_WP_SYM_RECYCLE_DST = 0x800, + ULP_WP_SYM_VF_2_VFR_META_VAL = 8192, + ULP_WP_SYM_VF_2_VF_META_VAL = 0, + ULP_WP_SYM_VF_2_VFR_META_MASK = 0, + ULP_WP_SYM_META_PROFILE_0 = 0, + ULP_WP_SYM_CHAIN_META_VAL = 0, + ULP_WP_SYM_CHAIN_META_VAL_MASK = 0, + ULP_WP_SYM_CHAIN_META_TYPE = 0, + ULP_WP_SYM_L2_ECPRI_ETYPE = 0, + ULP_WP_SYM_L4_ECPRI_ETYPE = 0, + ULP_WP_SYM_L2_ROE_ETYPE = 0 +}; + +enum ulp_thor_sym { + ULP_THOR_SYM_METADATA_OP_NORMAL = 0, + ULP_THOR_SYM_METADATA_OP_L2_HASH = 1, + ULP_THOR_SYM_METADATA_OP_L4_HASH = 2, + ULP_THOR_SYM_FWD_OP_BYPASS_CFA = 0, + ULP_THOR_SYM_FWD_OP_BYPASS_CFA_ROCE = 1, + ULP_THOR_SYM_FWD_OP_BYPASS_LKUP = 2, + ULP_THOR_SYM_FWD_OP_NORMAL_FLOW = 3, + ULP_THOR_SYM_FWD_OP_DROP = 0, + ULP_THOR_SYM_CTXT_OPCODE_BYPASS_CFA = 0, + ULP_THOR_SYM_CTXT_OPCODE_BYPASS_LKUP = 1, + ULP_THOR_SYM_CTXT_OPCODE_META_UPDATE = 2, + ULP_THOR_SYM_CTXT_OPCODE_NORMAL_FLOW = 3, + ULP_THOR_SYM_CTXT_OPCODE_DROP = 4, + ULP_THOR_SYM_L2_CTXT_PRI_CATCHALL = 0, + ULP_THOR_SYM_L2_CTXT_PRI_MC_BC = 0, + ULP_THOR_SYM_L2_CTXT_PRI_PORT = 0, + ULP_THOR_SYM_L2_CTXT_PRI_APP = 0, + ULP_THOR_SYM_PROF_TCAM_PRI_CATCHALL = 0, + ULP_THOR_SYM_PROF_TCAM_PRI_APP = 0, + ULP_THOR_SYM_PROF_TCAM_PRI_L4 = 0, + ULP_THOR_SYM_PROF_TCAM_PRI_L3 = 0, + ULP_THOR_SYM_PROF_TCAM_PRI_L2 = 0, + ULP_THOR_SYM_PKT_TYPE_IGNORE = 0, + ULP_THOR_SYM_PKT_TYPE_L2 = 0, + ULP_THOR_SYM_PKT_TYPE_0_IGNORE = 0, + ULP_THOR_SYM_PKT_TYPE_0_L2 = 0, + ULP_THOR_SYM_PKT_TYPE_1_IGNORE = 0, + ULP_THOR_SYM_PKT_TYPE_1_L2 = 0, + ULP_THOR_SYM_RECYCLE_CNT_IGNORE = 0, + ULP_THOR_SYM_RECYCLE_CNT_ZERO = 0, + ULP_THOR_SYM_RECYCLE_CNT_ONE = 1, + ULP_THOR_SYM_RECYCLE_CNT_TWO = 2, + ULP_THOR_SYM_RECYCLE_CNT_THREE = 3, + ULP_THOR_SYM_AGG_ERROR_IGNORE = 0, + ULP_THOR_SYM_AGG_ERROR_NO = 0, + ULP_THOR_SYM_AGG_ERROR_YES = 1, + ULP_THOR_SYM_RESERVED_IGNORE = 0, + ULP_THOR_SYM_HREC_NEXT_IGNORE = 0, + ULP_THOR_SYM_HREC_NEXT_NO = 0, + ULP_THOR_SYM_HREC_NEXT_YES = 1, + ULP_THOR_SYM_TL2_HDR_VALID_IGNORE = 0, + ULP_THOR_SYM_TL2_HDR_VALID_NO = 0, + ULP_THOR_SYM_TL2_HDR_VALID_YES = 1, + ULP_THOR_SYM_TL2_HDR_TYPE_IGNORE = 0, + ULP_THOR_SYM_TL2_HDR_TYPE_DIX = 0, + ULP_THOR_SYM_TL2_UC_MC_BC_IGNORE = 0, + ULP_THOR_SYM_TL2_UC_MC_BC_UC = 0, + ULP_THOR_SYM_TL2_UC_MC_BC_MC = 2, + ULP_THOR_SYM_TL2_UC_MC_BC_BC = 3, + ULP_THOR_SYM_TL2_VTAG_PRESENT_IGNORE = 0, + ULP_THOR_SYM_TL2_VTAG_PRESENT_NO = 0, + ULP_THOR_SYM_TL2_VTAG_PRESENT_YES = 1, + ULP_THOR_SYM_TL2_TWO_VTAGS_IGNORE = 0, + ULP_THOR_SYM_TL2_TWO_VTAGS_NO = 0, + ULP_THOR_SYM_TL2_TWO_VTAGS_YES = 1, + ULP_THOR_SYM_TL3_HDR_VALID_IGNORE = 0, + ULP_THOR_SYM_TL3_HDR_VALID_NO = 0, + ULP_THOR_SYM_TL3_HDR_VALID_YES = 1, + ULP_THOR_SYM_TL3_HDR_ERROR_IGNORE = 0, + ULP_THOR_SYM_TL3_HDR_ERROR_NO = 0, + ULP_THOR_SYM_TL3_HDR_ERROR_YES = 1, + ULP_THOR_SYM_TL3_HDR_TYPE_IGNORE = 0, + ULP_THOR_SYM_TL3_HDR_TYPE_IPV4 = 0, + ULP_THOR_SYM_TL3_HDR_TYPE_IPV6 = 1, + ULP_THOR_SYM_TL3_HDR_ISIP_IGNORE = 0, + ULP_THOR_SYM_TL3_HDR_ISIP_NO = 0, + ULP_THOR_SYM_TL3_HDR_ISIP_YES = 1, + ULP_THOR_SYM_TL3_IPV6_CMP_SRC_IGNORE = 0, + ULP_THOR_SYM_TL3_IPV6_CMP_SRC_NO = 0, + ULP_THOR_SYM_TL3_IPV6_CMP_SRC_YES = 1, + ULP_THOR_SYM_TL3_IPV6_CMP_DST_IGNORE = 0, + ULP_THOR_SYM_TL3_IPV6_CMP_DST_NO = 0, + ULP_THOR_SYM_TL3_IPV6_CMP_DST_YES = 1, + ULP_THOR_SYM_TL4_HDR_VALID_IGNORE = 0, + ULP_THOR_SYM_TL4_HDR_VALID_NO = 0, + ULP_THOR_SYM_TL4_HDR_VALID_YES = 1, + ULP_THOR_SYM_TL4_HDR_ERROR_IGNORE = 0, + ULP_THOR_SYM_TL4_HDR_ERROR_NO = 0, + ULP_THOR_SYM_TL4_HDR_ERROR_YES = 1, + ULP_THOR_SYM_TL4_HDR_IS_UDP_TCP_IGNORE = 0, + ULP_THOR_SYM_TL4_HDR_IS_UDP_TCP_NO = 0, + ULP_THOR_SYM_TL4_HDR_IS_UDP_TCP_YES = 1, + ULP_THOR_SYM_TL4_HDR_TYPE_IGNORE = 0, + ULP_THOR_SYM_TL4_HDR_TYPE_TCP = 0, + ULP_THOR_SYM_TL4_HDR_TYPE_UDP = 1, + ULP_THOR_SYM_TUN_HDR_VALID_IGNORE = 0, + ULP_THOR_SYM_TUN_HDR_VALID_NO = 0, + ULP_THOR_SYM_TUN_HDR_VALID_YES = 1, + ULP_THOR_SYM_TUN_HDR_ERROR_IGNORE = 0, + ULP_THOR_SYM_TUN_HDR_ERROR_NO = 0, + ULP_THOR_SYM_TUN_HDR_ERROR_YES = 1, + ULP_THOR_SYM_TUN_HDR_TYPE_IGNORE = 0, + ULP_THOR_SYM_TUN_HDR_TYPE_VXLAN = 0, + ULP_THOR_SYM_TUN_HDR_TYPE_VXLAN_GPE = 0, + ULP_THOR_SYM_TUN_HDR_TYPE_GENEVE = 1, + ULP_THOR_SYM_TUN_HDR_TYPE_NVGRE = 2, + ULP_THOR_SYM_TUN_HDR_TYPE_GRE = 3, + ULP_THOR_SYM_TUN_HDR_TYPE_IPV4 = 4, + ULP_THOR_SYM_TUN_HDR_TYPE_IPV6 = 5, + ULP_THOR_SYM_TUN_HDR_TYPE_PPPOE = 6, + ULP_THOR_SYM_TUN_HDR_TYPE_MPLS = 7, + ULP_THOR_SYM_TUN_HDR_TYPE_UPAR1 = 8, + ULP_THOR_SYM_TUN_HDR_TYPE_UPAR2 = 9, + ULP_THOR_SYM_TUN_HDR_TYPE_UPAR3 = 10, + ULP_THOR_SYM_TUN_HDR_TYPE_UPAR4 = 11, + ULP_THOR_SYM_TUN_HDR_TYPE_UPAR5 = 0, + ULP_THOR_SYM_TUN_HDR_TYPE_UPAR6 = 0, + ULP_THOR_SYM_TUN_HDR_TYPE_UPAR7 = 0, + ULP_THOR_SYM_TUN_HDR_TYPE_UPAR8 = 0, + ULP_THOR_SYM_TUN_HDR_TYPE_ROE = 0, + ULP_THOR_SYM_TUN_HDR_TYPE_ECPRI = 0, + ULP_THOR_SYM_TUN_HDR_TYPE_GTP_V1_U = 0, + ULP_THOR_SYM_TUN_HDR_TYPE_GTP_V2_C = 0, + ULP_THOR_SYM_TUN_HDR_TYPE_PFCP_SESS = 0, + ULP_THOR_SYM_TUN_HDR_TYPE_PFCP_NODE = 0, + ULP_THOR_SYM_TUN_HDR_TYPE_NSH = 0, + ULP_THOR_SYM_TUN_HDR_TYPE_VXLAN_IP = 0, + ULP_THOR_SYM_TUN_HDR_TYPE_GRE_TEN = 0, + ULP_THOR_SYM_TUN_HDR_TYPE_NONE = 15, + ULP_THOR_SYM_TUN_HDR_TYPE_UPAR_MASK = 14, + ULP_THOR_SYM_TUN_HDR_TYPE_TID_MASK = 3840, + ULP_THOR_SYM_TUN_HDR_FLAGS_IGNORE = 0, + ULP_THOR_SYM_L2_HDR_VALID_IGNORE = 0, + ULP_THOR_SYM_L2_HDR_VALID_NO = 0, + ULP_THOR_SYM_L2_HDR_VALID_YES = 1, + ULP_THOR_SYM_L2_HDR_ERROR_IGNORE = 0, + ULP_THOR_SYM_L2_HDR_ERROR_NO = 0, + ULP_THOR_SYM_L2_HDR_ERROR_YES = 1, + ULP_THOR_SYM_L2_HDR_TYPE_IGNORE = 0, + ULP_THOR_SYM_L2_HDR_TYPE_DIX = 0, + ULP_THOR_SYM_L2_HDR_TYPE_LLC_SNAP = 1, + ULP_THOR_SYM_L2_HDR_TYPE_LLC = 2, + ULP_THOR_SYM_L2_UC_MC_BC_IGNORE = 0, + ULP_THOR_SYM_L2_UC_MC_BC_UC = 0, + ULP_THOR_SYM_L2_UC_MC_BC_MC = 2, + ULP_THOR_SYM_L2_UC_MC_BC_BC = 3, + ULP_THOR_SYM_L2_VTAG_PRESENT_IGNORE = 0, + ULP_THOR_SYM_L2_VTAG_PRESENT_NO = 0, + ULP_THOR_SYM_L2_VTAG_PRESENT_YES = 1, + ULP_THOR_SYM_L2_TWO_VTAGS_IGNORE = 0, + ULP_THOR_SYM_L2_TWO_VTAGS_NO = 0, + ULP_THOR_SYM_L2_TWO_VTAGS_YES = 1, + ULP_THOR_SYM_L2_CNTX_VLAN_SELECT_INNER = 0, + ULP_THOR_SYM_L2_CNTX_VLAN_SELECT_TUN = 0, + ULP_THOR_SYM_L2_CNTX_VLAN_SELECT_O_TUN = 0, + ULP_THOR_SYM_L2_CNTX_VLAN_SELECT_OM_TUN = 0, + ULP_THOR_SYM_L2_CNTX_TUN_SELECT_TUN_ID = 0, + ULP_THOR_SYM_L2_CNTX_TUN_SELECT_TUN_CNTX = 0, + ULP_THOR_SYM_L2_CNTX_TUN_SELECT_O_TUN_ID = 0, + ULP_THOR_SYM_L2_CNTX_TUN_SELECT_O_TUN_CNTX = 0, + ULP_THOR_SYM_L2_CNTX_TUN_SELECT_I_L4_PORTS = 0, + ULP_THOR_SYM_L2_CNTX_TUN_SELECT_O_L4_PORTS = 0, + ULP_THOR_SYM_L2_CNTX_TUN_SELECT_OM_TUN_ID = 0, + ULP_THOR_SYM_L2_CNTX_TUN_SELECT_OM_TUN_CNTX = 0, + ULP_THOR_SYM_L3_HDR_VALID_IGNORE = 0, + ULP_THOR_SYM_L3_HDR_VALID_NO = 0, + ULP_THOR_SYM_L3_HDR_VALID_YES = 1, + ULP_THOR_SYM_L3_HDR_ERROR_IGNORE = 0, + ULP_THOR_SYM_L3_HDR_ERROR_NO = 0, + ULP_THOR_SYM_L3_HDR_ERROR_YES = 1, + ULP_THOR_SYM_L3_HDR_TYPE_IGNORE = 0, + ULP_THOR_SYM_L3_HDR_TYPE_IPV4 = 0, + ULP_THOR_SYM_L3_HDR_TYPE_IPV6 = 1, + ULP_THOR_SYM_L3_HDR_TYPE_ARP = 2, + ULP_THOR_SYM_L3_HDR_TYPE_PTP = 3, + ULP_THOR_SYM_L3_HDR_TYPE_EAPOL = 4, + ULP_THOR_SYM_L3_HDR_TYPE_ROCE = 5, + ULP_THOR_SYM_L3_HDR_TYPE_FCOE = 6, + ULP_THOR_SYM_L3_HDR_TYPE_UPAR1 = 7, + ULP_THOR_SYM_L3_HDR_TYPE_UPAR2 = 8, + ULP_THOR_SYM_L3_HDR_ISIP_IGNORE = 0, + ULP_THOR_SYM_L3_HDR_ISIP_NO = 0, + ULP_THOR_SYM_L3_HDR_ISIP_YES = 1, + ULP_THOR_SYM_L3_IPV6_CMP_SRC_IGNORE = 0, + ULP_THOR_SYM_L3_IPV6_CMP_SRC_NO = 0, + ULP_THOR_SYM_L3_IPV6_CMP_SRC_YES = 1, + ULP_THOR_SYM_L3_IPV6_CMP_DST_IGNORE = 0, + ULP_THOR_SYM_L3_IPV6_CMP_DST_NO = 0, + ULP_THOR_SYM_L3_IPV6_CMP_DST_YES = 1, + ULP_THOR_SYM_L4_HDR_VALID_IGNORE = 0, + ULP_THOR_SYM_L4_HDR_VALID_NO = 0, + ULP_THOR_SYM_L4_HDR_VALID_YES = 1, + ULP_THOR_SYM_L4_HDR_ERROR_IGNORE = 0, + ULP_THOR_SYM_L4_HDR_ERROR_NO = 0, + ULP_THOR_SYM_L4_HDR_ERROR_YES = 1, + ULP_THOR_SYM_L4_HDR_TYPE_IGNORE = 0, + ULP_THOR_SYM_L4_HDR_TYPE_TCP = 0, + ULP_THOR_SYM_L4_HDR_TYPE_UDP = 1, + ULP_THOR_SYM_L4_HDR_TYPE_ICMP = 2, + ULP_THOR_SYM_L4_HDR_TYPE_UPAR1 = 3, + ULP_THOR_SYM_L4_HDR_TYPE_UPAR2 = 4, + ULP_THOR_SYM_L4_HDR_TYPE_BTH_V1 = 5, + ULP_THOR_SYM_L4_HDR_TYPE_IPSEC_AH = 0, + ULP_THOR_SYM_L4_HDR_TYPE_IPSEC_ESP = 0, + ULP_THOR_SYM_L4_HDR_SUBTYPE_NONE = 0, + ULP_THOR_SYM_L4_HDR_SUBTYPE_PTP = 0, + ULP_THOR_SYM_L4_HDR_SUBTYPE_ROCE = 0, + ULP_THOR_SYM_L4_HDR_SUBTYPE_QUIC = 0, + ULP_THOR_SYM_L4_HDR_SUBTYPE_ESP = 0, + ULP_THOR_SYM_L4_HDR_IS_UDP_TCP_IGNORE = 0, + ULP_THOR_SYM_L4_HDR_IS_UDP_TCP_NO = 0, + ULP_THOR_SYM_L4_HDR_IS_UDP_TCP_YES = 1, + ULP_THOR_SYM_EM_WM_OPCODE_OP_NORMAL = 0, + ULP_THOR_SYM_EM_WM_OPCODE_OP_NORMAL_RFS = 0, + ULP_THOR_SYM_EM_WM_OPCODE_OP_RFS_FAST = 1, + ULP_THOR_SYM_EM_WM_OPCODE_OP_FAST = 2, + ULP_THOR_SYM_EM_WM_OPCODE_OP_RFS_ACT = 3, + ULP_THOR_SYM_EM_WM_OPCODE_OP_CT_MISS_DEF = 0, + ULP_THOR_SYM_EM_WM_OPCODE_OP_CT_HIT_DEF = 0, + ULP_THOR_SYM_EM_WM_OPCODE_OP_RECYCLE = 4, + ULP_THOR_SYM_POP_VLAN_NO = 0, + ULP_THOR_SYM_POP_VLAN_YES = 1, + ULP_THOR_SYM_VLAN_DEL_RPT_DISABLED = 0, + ULP_THOR_SYM_VLAN_DEL_RPT_STRIP_OUTER = 1, + ULP_THOR_SYM_VLAN_DEL_RPT_STRIP_BOTH = 2, + ULP_THOR_SYM_VLAN_DEL_RPT_DYN_STRIP = 3, + ULP_THOR_SYM_DECAP_FUNC_NONE = 0, + ULP_THOR_SYM_DECAP_FUNC_THRU_TL2 = 3, + ULP_THOR_SYM_DECAP_FUNC_THRU_TL3 = 8, + ULP_THOR_SYM_DECAP_FUNC_THRU_TL4 = 9, + ULP_THOR_SYM_DECAP_FUNC_THRU_TUN = 10, + ULP_THOR_SYM_DECAP_FUNC_THRU_L2 = 11, + ULP_THOR_SYM_DECAP_FUNC_THRU_L3 = 12, + ULP_THOR_SYM_DECAP_FUNC_THRU_L4 = 13, + ULP_THOR_SYM_ECV_VALID_NO = 0, + ULP_THOR_SYM_ECV_VALID_YES = 1, + ULP_THOR_SYM_ECV_CUSTOM_EN_NO = 0, + ULP_THOR_SYM_ECV_CUSTOM_EN_YES = 1, + ULP_THOR_SYM_ECV_L2_EN_NO = 0, + ULP_THOR_SYM_ECV_L2_EN_YES = 1, + ULP_THOR_SYM_ECV_VTAG_TYPE_NOP = 0, + ULP_THOR_SYM_ECV_VTAG_TYPE_ADD_1_ENCAP_PRI = 1, + ULP_THOR_SYM_ECV_VTAG_TYPE_ADD_1_IVLAN_PRI = 2, + ULP_THOR_SYM_ECV_VTAG_TYPE_ADD_1_REMAP_DIFFSERV = 3, + ULP_THOR_SYM_ECV_VTAG_TYPE_ADD_2_ENCAP_PRI = 4, + ULP_THOR_SYM_ECV_VTAG_TYPE_ADD_2_REMAP_DIFFSERV = 5, + ULP_THOR_SYM_ECV_VTAG_TYPE_ADD_0_ENCAP_PRI = 6, + ULP_THOR_SYM_ECV_VTAG_TYPE_ADD_0_REMAP_DIFFSERV = 7, + ULP_THOR_SYM_ECV_VTAG_TYPE_ADD_0_PRI_0 = 8, + ULP_THOR_SYM_ECV_VTAG_TYPE_ADD_0_PRI_1 = 8, + ULP_THOR_SYM_ECV_VTAG_TYPE_ADD_0_PRI_2 = 8, + ULP_THOR_SYM_ECV_VTAG_TYPE_ADD_0_PRI_3 = 8, + ULP_THOR_SYM_ECV_VTAG_TYPE_ADD_0_PRI_4 = 8, + ULP_THOR_SYM_ECV_VTAG_TYPE_ADD_0_PRI_5 = 8, + ULP_THOR_SYM_ECV_VTAG_TYPE_ADD_0_PRI_6 = 8, + ULP_THOR_SYM_ECV_VTAG_TYPE_ADD_0_PRI_7 = 8, + ULP_THOR_SYM_ECV_L3_TYPE_NONE = 0, + ULP_THOR_SYM_ECV_L3_TYPE_IPV4 = 4, + ULP_THOR_SYM_ECV_L3_TYPE_IPV6 = 5, + ULP_THOR_SYM_ECV_L3_TYPE_MPLS_8847 = 6, + ULP_THOR_SYM_ECV_L3_TYPE_MPLS_8848 = 7, + ULP_THOR_SYM_ECV_L4_TYPE_NONE = 0, + ULP_THOR_SYM_ECV_L4_TYPE_UDP = 4, + ULP_THOR_SYM_ECV_L4_TYPE_UDP_CSUM = 5, + ULP_THOR_SYM_ECV_L4_TYPE_UDP_ENTROPY = 6, + ULP_THOR_SYM_ECV_L4_TYPE_UDP_ENTROPY_CSUM = 7, + ULP_THOR_SYM_ECV_TUN_TYPE_NONE = 0, + ULP_THOR_SYM_ECV_TUN_TYPE_GENERIC = 1, + ULP_THOR_SYM_ECV_TUN_TYPE_VXLAN = 2, + ULP_THOR_SYM_ECV_TUN_TYPE_NGE = 3, + ULP_THOR_SYM_ECV_TUN_TYPE_NVGRE = 4, + ULP_THOR_SYM_ECV_TUN_TYPE_GRE = 5, + ULP_THOR_SYM_EEM_ACT_REC_INT = 0, + ULP_THOR_SYM_EEM_EXT_FLOW_CNTR = 0, + ULP_THOR_SYM_UC_ACT_REC = 0, + ULP_THOR_SYM_MC_ACT_REC = 1, + ULP_THOR_SYM_ACT_REC_DROP_YES = 1, + ULP_THOR_SYM_ACT_REC_DROP_NO = 0, + ULP_THOR_SYM_ACT_REC_POP_VLAN_YES = 1, + ULP_THOR_SYM_ACT_REC_POP_VLAN_NO = 0, + ULP_THOR_SYM_ACT_REC_METER_EN_YES = 1, + ULP_THOR_SYM_ACT_REC_METER_EN_NO = 0, + ULP_THOR_SYM_LOOPBACK_PORT = 16, + ULP_THOR_SYM_LOOPBACK_PARIF = 15, + ULP_THOR_SYM_EXT_EM_MAX_KEY_SIZE = 0, + ULP_THOR_SYM_MATCH_TYPE_EM = 0, + ULP_THOR_SYM_MATCH_TYPE_WM = 1, + ULP_THOR_SYM_IP_PROTO_ICMP = 1, + ULP_THOR_SYM_IP_PROTO_IGMP = 2, + ULP_THOR_SYM_IP_PROTO_IP_IN_IP = 4, + ULP_THOR_SYM_IP_PROTO_TCP = 6, + ULP_THOR_SYM_IP_PROTO_UDP = 17, + ULP_THOR_SYM_VF_FUNC_PARIF = 15, + ULP_THOR_SYM_NO = 0, + ULP_THOR_SYM_YES = 1, + ULP_THOR_SYM_RECYCLE_DST = 1039, + ULP_THOR_SYM_VF_2_VFR_META_VAL = 8192, + ULP_THOR_SYM_VF_2_VF_META_VAL = 4096, + ULP_THOR_SYM_VF_2_VFR_META_MASK = 61440, + ULP_THOR_SYM_META_PROFILE_0 = 0, + ULP_THOR_SYM_CHAIN_META_VAL = 12288, + ULP_THOR_SYM_CHAIN_META_VAL_MASK = 61440, + ULP_THOR_SYM_CHAIN_META_TYPE = 3, + ULP_THOR_SYM_L2_ECPRI_ETYPE = 44798, + ULP_THOR_SYM_L4_ECPRI_ETYPE = 2048, + ULP_THOR_SYM_L2_ROE_ETYPE = 64573 +}; + +enum ulp_thor2_sym { + ULP_THOR2_SYM_METADATA_OP_NORMAL = 0, + ULP_THOR2_SYM_METADATA_OP_L2_HASH = 1, + ULP_THOR2_SYM_METADATA_OP_L4_HASH = 2, + ULP_THOR2_SYM_FWD_OP_BYPASS_CFA = 0, + ULP_THOR2_SYM_FWD_OP_BYPASS_CFA_ROCE = 1, + ULP_THOR2_SYM_FWD_OP_BYPASS_LKUP = 2, + ULP_THOR2_SYM_FWD_OP_NORMAL_FLOW = 3, + ULP_THOR2_SYM_FWD_OP_DROP = 4, + ULP_THOR2_SYM_CTXT_OPCODE_BYPASS_CFA = 0, + ULP_THOR2_SYM_CTXT_OPCODE_BYPASS_LKUP = 1, + ULP_THOR2_SYM_CTXT_OPCODE_META_UPDATE = 0, + ULP_THOR2_SYM_CTXT_OPCODE_NORMAL_FLOW = 2, + ULP_THOR2_SYM_CTXT_OPCODE_DROP = 3, + ULP_THOR2_SYM_L2_CTXT_PRI_CATCHALL = 5, + ULP_THOR2_SYM_L2_CTXT_PRI_MC_BC = 40, + ULP_THOR2_SYM_L2_CTXT_PRI_PORT = 70, + ULP_THOR2_SYM_L2_CTXT_PRI_APP = 140, + ULP_THOR2_SYM_PROF_TCAM_PRI_CATCHALL = 1, + ULP_THOR2_SYM_PROF_TCAM_PRI_APP = 10, + ULP_THOR2_SYM_PROF_TCAM_PRI_L4 = 10, + ULP_THOR2_SYM_PROF_TCAM_PRI_L3 = 8, + ULP_THOR2_SYM_PROF_TCAM_PRI_L2 = 4, + ULP_THOR2_SYM_PKT_TYPE_IGNORE = 0, + ULP_THOR2_SYM_PKT_TYPE_L2 = 0, + ULP_THOR2_SYM_PKT_TYPE_0_IGNORE = 0, + ULP_THOR2_SYM_PKT_TYPE_0_L2 = 0, + ULP_THOR2_SYM_PKT_TYPE_1_IGNORE = 0, + ULP_THOR2_SYM_PKT_TYPE_1_L2 = 0, + ULP_THOR2_SYM_RECYCLE_CNT_IGNORE = 0, + ULP_THOR2_SYM_RECYCLE_CNT_ZERO = 0, + ULP_THOR2_SYM_RECYCLE_CNT_ONE = 1, + ULP_THOR2_SYM_RECYCLE_CNT_TWO = 2, + ULP_THOR2_SYM_RECYCLE_CNT_THREE = 3, + ULP_THOR2_SYM_AGG_ERROR_IGNORE = 0, + ULP_THOR2_SYM_AGG_ERROR_NO = 0, + ULP_THOR2_SYM_AGG_ERROR_YES = 1, + ULP_THOR2_SYM_RESERVED_IGNORE = 0, + ULP_THOR2_SYM_HREC_NEXT_IGNORE = 0, + ULP_THOR2_SYM_HREC_NEXT_NO = 0, + ULP_THOR2_SYM_HREC_NEXT_YES = 1, + ULP_THOR2_SYM_TL2_HDR_VALID_IGNORE = 0, + ULP_THOR2_SYM_TL2_HDR_VALID_NO = 0, + ULP_THOR2_SYM_TL2_HDR_VALID_YES = 1, + ULP_THOR2_SYM_TL2_HDR_TYPE_IGNORE = 0, + ULP_THOR2_SYM_TL2_HDR_TYPE_DIX = 0, + ULP_THOR2_SYM_TL2_UC_MC_BC_IGNORE = 0, + ULP_THOR2_SYM_TL2_UC_MC_BC_UC = 0, + ULP_THOR2_SYM_TL2_UC_MC_BC_MC = 2, + ULP_THOR2_SYM_TL2_UC_MC_BC_BC = 3, + ULP_THOR2_SYM_TL2_VTAG_PRESENT_IGNORE = 0, + ULP_THOR2_SYM_TL2_VTAG_PRESENT_NO = 0, + ULP_THOR2_SYM_TL2_VTAG_PRESENT_YES = 1, + ULP_THOR2_SYM_TL2_TWO_VTAGS_IGNORE = 0, + ULP_THOR2_SYM_TL2_TWO_VTAGS_NO = 0, + ULP_THOR2_SYM_TL2_TWO_VTAGS_YES = 1, + ULP_THOR2_SYM_TL3_HDR_VALID_IGNORE = 0, + ULP_THOR2_SYM_TL3_HDR_VALID_NO = 0, + ULP_THOR2_SYM_TL3_HDR_VALID_YES = 1, + ULP_THOR2_SYM_TL3_HDR_ERROR_IGNORE = 0, + ULP_THOR2_SYM_TL3_HDR_ERROR_NO = 0, + ULP_THOR2_SYM_TL3_HDR_ERROR_YES = 1, + ULP_THOR2_SYM_TL3_HDR_TYPE_IGNORE = 0, + ULP_THOR2_SYM_TL3_HDR_TYPE_IPV4 = 0, + ULP_THOR2_SYM_TL3_HDR_TYPE_IPV6 = 1, + ULP_THOR2_SYM_TL3_HDR_ISIP_IGNORE = 0, + ULP_THOR2_SYM_TL3_HDR_ISIP_NO = 0, + ULP_THOR2_SYM_TL3_HDR_ISIP_YES = 1, + ULP_THOR2_SYM_TL3_IPV6_CMP_SRC_IGNORE = 0, + ULP_THOR2_SYM_TL3_IPV6_CMP_SRC_NO = 0, + ULP_THOR2_SYM_TL3_IPV6_CMP_SRC_YES = 1, + ULP_THOR2_SYM_TL3_IPV6_CMP_DST_IGNORE = 0, + ULP_THOR2_SYM_TL3_IPV6_CMP_DST_NO = 0, + ULP_THOR2_SYM_TL3_IPV6_CMP_DST_YES = 1, + ULP_THOR2_SYM_TL4_HDR_VALID_IGNORE = 0, + ULP_THOR2_SYM_TL4_HDR_VALID_NO = 0, + ULP_THOR2_SYM_TL4_HDR_VALID_YES = 1, + ULP_THOR2_SYM_TL4_HDR_ERROR_IGNORE = 0, + ULP_THOR2_SYM_TL4_HDR_ERROR_NO = 0, + ULP_THOR2_SYM_TL4_HDR_ERROR_YES = 1, + ULP_THOR2_SYM_TL4_HDR_IS_UDP_TCP_IGNORE = 0, + ULP_THOR2_SYM_TL4_HDR_IS_UDP_TCP_NO = 0, + ULP_THOR2_SYM_TL4_HDR_IS_UDP_TCP_YES = 1, + ULP_THOR2_SYM_TL4_HDR_TYPE_IGNORE = 0, + ULP_THOR2_SYM_TL4_HDR_TYPE_TCP = 0, + ULP_THOR2_SYM_TL4_HDR_TYPE_UDP = 1, + ULP_THOR2_SYM_TUN_HDR_VALID_IGNORE = 0, + ULP_THOR2_SYM_TUN_HDR_VALID_NO = 0, + ULP_THOR2_SYM_TUN_HDR_VALID_YES = 1, + ULP_THOR2_SYM_TUN_HDR_ERROR_IGNORE = 0, + ULP_THOR2_SYM_TUN_HDR_ERROR_NO = 0, + ULP_THOR2_SYM_TUN_HDR_ERROR_YES = 1, + ULP_THOR2_SYM_TUN_HDR_TYPE_IGNORE = 0, + ULP_THOR2_SYM_TUN_HDR_TYPE_VXLAN = 0, + ULP_THOR2_SYM_TUN_HDR_TYPE_VXLAN_GPE = 27, + ULP_THOR2_SYM_TUN_HDR_TYPE_GENEVE = 1, + ULP_THOR2_SYM_TUN_HDR_TYPE_NVGRE = 2, + ULP_THOR2_SYM_TUN_HDR_TYPE_GRE = 3, + ULP_THOR2_SYM_TUN_HDR_TYPE_IPV4 = 4, + ULP_THOR2_SYM_TUN_HDR_TYPE_IPV6 = 5, + ULP_THOR2_SYM_TUN_HDR_TYPE_PPPOE = 6, + ULP_THOR2_SYM_TUN_HDR_TYPE_MPLS = 7, + ULP_THOR2_SYM_TUN_HDR_TYPE_UPAR1 = 8, + ULP_THOR2_SYM_TUN_HDR_TYPE_UPAR2 = 9, + ULP_THOR2_SYM_TUN_HDR_TYPE_UPAR3 = 10, + ULP_THOR2_SYM_TUN_HDR_TYPE_UPAR4 = 11, + ULP_THOR2_SYM_TUN_HDR_TYPE_UPAR5 = 12, + ULP_THOR2_SYM_TUN_HDR_TYPE_UPAR6 = 13, + ULP_THOR2_SYM_TUN_HDR_TYPE_UPAR7 = 14, + ULP_THOR2_SYM_TUN_HDR_TYPE_UPAR8 = 15, + ULP_THOR2_SYM_TUN_HDR_TYPE_ROE = 20, + ULP_THOR2_SYM_TUN_HDR_TYPE_ECPRI = 21, + ULP_THOR2_SYM_TUN_HDR_TYPE_GTP_V1_U = 22, + ULP_THOR2_SYM_TUN_HDR_TYPE_GTP_V2_C = 23, + ULP_THOR2_SYM_TUN_HDR_TYPE_PFCP_SESS = 24, + ULP_THOR2_SYM_TUN_HDR_TYPE_PFCP_NODE = 25, + ULP_THOR2_SYM_TUN_HDR_TYPE_NSH = 26, + ULP_THOR2_SYM_TUN_HDR_TYPE_VXLAN_IP = 28, + ULP_THOR2_SYM_TUN_HDR_TYPE_GRE_TEN = 29, + ULP_THOR2_SYM_TUN_HDR_TYPE_NONE = 31, + ULP_THOR2_SYM_TUN_HDR_TYPE_UPAR_MASK = 32, + ULP_THOR2_SYM_TUN_HDR_TYPE_TID_MASK = 33, + ULP_THOR2_SYM_TUN_HDR_FLAGS_IGNORE = 0, + ULP_THOR2_SYM_L2_HDR_VALID_IGNORE = 0, + ULP_THOR2_SYM_L2_HDR_VALID_NO = 0, + ULP_THOR2_SYM_L2_HDR_VALID_YES = 1, + ULP_THOR2_SYM_L2_HDR_ERROR_IGNORE = 0, + ULP_THOR2_SYM_L2_HDR_ERROR_NO = 0, + ULP_THOR2_SYM_L2_HDR_ERROR_YES = 1, + ULP_THOR2_SYM_L2_HDR_TYPE_IGNORE = 0, + ULP_THOR2_SYM_L2_HDR_TYPE_DIX = 0, + ULP_THOR2_SYM_L2_HDR_TYPE_LLC_SNAP = 1, + ULP_THOR2_SYM_L2_HDR_TYPE_LLC = 2, + ULP_THOR2_SYM_L2_UC_MC_BC_IGNORE = 0, + ULP_THOR2_SYM_L2_UC_MC_BC_UC = 0, + ULP_THOR2_SYM_L2_UC_MC_BC_MC = 2, + ULP_THOR2_SYM_L2_UC_MC_BC_BC = 3, + ULP_THOR2_SYM_L2_VTAG_PRESENT_IGNORE = 0, + ULP_THOR2_SYM_L2_VTAG_PRESENT_NO = 0, + ULP_THOR2_SYM_L2_VTAG_PRESENT_YES = 1, + ULP_THOR2_SYM_L2_TWO_VTAGS_IGNORE = 0, + ULP_THOR2_SYM_L2_TWO_VTAGS_NO = 0, + ULP_THOR2_SYM_L2_TWO_VTAGS_YES = 1, + ULP_THOR2_SYM_L2_CNTX_VLAN_SELECT_INNER = 0, + ULP_THOR2_SYM_L2_CNTX_VLAN_SELECT_TUN = 1, + ULP_THOR2_SYM_L2_CNTX_VLAN_SELECT_O_TUN = 2, + ULP_THOR2_SYM_L2_CNTX_VLAN_SELECT_OM_TUN = 3, + ULP_THOR2_SYM_L2_CNTX_TUN_SELECT_TUN_ID = 0, + ULP_THOR2_SYM_L2_CNTX_TUN_SELECT_TUN_CNTX = 1, + ULP_THOR2_SYM_L2_CNTX_TUN_SELECT_O_TUN_ID = 2, + ULP_THOR2_SYM_L2_CNTX_TUN_SELECT_O_TUN_CNTX = 3, + ULP_THOR2_SYM_L2_CNTX_TUN_SELECT_I_L4_PORTS = 4, + ULP_THOR2_SYM_L2_CNTX_TUN_SELECT_O_L4_PORTS = 5, + ULP_THOR2_SYM_L2_CNTX_TUN_SELECT_OM_TUN_ID = 6, + ULP_THOR2_SYM_L2_CNTX_TUN_SELECT_OM_TUN_CNTX = 7, + ULP_THOR2_SYM_L3_HDR_VALID_IGNORE = 0, + ULP_THOR2_SYM_L3_HDR_VALID_NO = 0, + ULP_THOR2_SYM_L3_HDR_VALID_YES = 1, + ULP_THOR2_SYM_L3_HDR_ERROR_IGNORE = 0, + ULP_THOR2_SYM_L3_HDR_ERROR_NO = 0, + ULP_THOR2_SYM_L3_HDR_ERROR_YES = 1, + ULP_THOR2_SYM_L3_HDR_TYPE_IGNORE = 0, + ULP_THOR2_SYM_L3_HDR_TYPE_IPV4 = 0, + ULP_THOR2_SYM_L3_HDR_TYPE_IPV6 = 1, + ULP_THOR2_SYM_L3_HDR_TYPE_ARP = 2, + ULP_THOR2_SYM_L3_HDR_TYPE_PTP = 3, + ULP_THOR2_SYM_L3_HDR_TYPE_EAPOL = 4, + ULP_THOR2_SYM_L3_HDR_TYPE_ROCE = 5, + ULP_THOR2_SYM_L3_HDR_TYPE_FCOE = 6, + ULP_THOR2_SYM_L3_HDR_TYPE_UPAR1 = 7, + ULP_THOR2_SYM_L3_HDR_TYPE_UPAR2 = 8, + ULP_THOR2_SYM_L3_HDR_ISIP_IGNORE = 0, + ULP_THOR2_SYM_L3_HDR_ISIP_NO = 0, + ULP_THOR2_SYM_L3_HDR_ISIP_YES = 1, + ULP_THOR2_SYM_L3_IPV6_CMP_SRC_IGNORE = 0, + ULP_THOR2_SYM_L3_IPV6_CMP_SRC_NO = 0, + ULP_THOR2_SYM_L3_IPV6_CMP_SRC_YES = 1, + ULP_THOR2_SYM_L3_IPV6_CMP_DST_IGNORE = 0, + ULP_THOR2_SYM_L3_IPV6_CMP_DST_NO = 0, + ULP_THOR2_SYM_L3_IPV6_CMP_DST_YES = 1, + ULP_THOR2_SYM_L4_HDR_VALID_IGNORE = 0, + ULP_THOR2_SYM_L4_HDR_VALID_NO = 0, + ULP_THOR2_SYM_L4_HDR_VALID_YES = 1, + ULP_THOR2_SYM_L4_HDR_ERROR_IGNORE = 0, + ULP_THOR2_SYM_L4_HDR_ERROR_NO = 0, + ULP_THOR2_SYM_L4_HDR_ERROR_YES = 1, + ULP_THOR2_SYM_L4_HDR_TYPE_IGNORE = 0, + ULP_THOR2_SYM_L4_HDR_TYPE_TCP = 0, + ULP_THOR2_SYM_L4_HDR_TYPE_UDP = 1, + ULP_THOR2_SYM_L4_HDR_TYPE_ICMP = 2, + ULP_THOR2_SYM_L4_HDR_TYPE_UPAR1 = 3, + ULP_THOR2_SYM_L4_HDR_TYPE_UPAR2 = 4, + ULP_THOR2_SYM_L4_HDR_TYPE_BTH_V1 = 0, + ULP_THOR2_SYM_L4_HDR_TYPE_IPSEC_AH = 8, + ULP_THOR2_SYM_L4_HDR_TYPE_IPSEC_ESP = 9, + ULP_THOR2_SYM_L4_HDR_SUBTYPE_NONE = 0, + ULP_THOR2_SYM_L4_HDR_SUBTYPE_PTP = 1, + ULP_THOR2_SYM_L4_HDR_SUBTYPE_ROCE = 2, + ULP_THOR2_SYM_L4_HDR_SUBTYPE_QUIC = 3, + ULP_THOR2_SYM_L4_HDR_SUBTYPE_ESP = 4, + ULP_THOR2_SYM_L4_HDR_IS_UDP_TCP_IGNORE = 0, + ULP_THOR2_SYM_L4_HDR_IS_UDP_TCP_NO = 0, + ULP_THOR2_SYM_L4_HDR_IS_UDP_TCP_YES = 1, + ULP_THOR2_SYM_EM_WM_OPCODE_OP_NORMAL = 0, + ULP_THOR2_SYM_EM_WM_OPCODE_OP_NORMAL_RFS = 1, + ULP_THOR2_SYM_EM_WM_OPCODE_OP_RFS_FAST = 3, + ULP_THOR2_SYM_EM_WM_OPCODE_OP_FAST = 2, + ULP_THOR2_SYM_EM_WM_OPCODE_OP_RFS_ACT = 0, + ULP_THOR2_SYM_EM_WM_OPCODE_OP_CT_MISS_DEF = 4, + ULP_THOR2_SYM_EM_WM_OPCODE_OP_CT_HIT_DEF = 6, + ULP_THOR2_SYM_EM_WM_OPCODE_OP_RECYCLE = 8, + ULP_THOR2_SYM_POP_VLAN_NO = 0, + ULP_THOR2_SYM_POP_VLAN_YES = 1, + ULP_THOR2_SYM_VLAN_DEL_RPT_DISABLED = 0, + ULP_THOR2_SYM_VLAN_DEL_RPT_STRIP_OUTER = 1, + ULP_THOR2_SYM_VLAN_DEL_RPT_STRIP_BOTH = 2, + ULP_THOR2_SYM_VLAN_DEL_RPT_DYN_STRIP = 3, + ULP_THOR2_SYM_DECAP_FUNC_NONE = 0, + ULP_THOR2_SYM_DECAP_FUNC_THRU_TL2 = 3, + ULP_THOR2_SYM_DECAP_FUNC_THRU_TL3 = 8, + ULP_THOR2_SYM_DECAP_FUNC_THRU_TL4 = 9, + ULP_THOR2_SYM_DECAP_FUNC_THRU_TUN = 10, + ULP_THOR2_SYM_DECAP_FUNC_THRU_L2 = 11, + ULP_THOR2_SYM_DECAP_FUNC_THRU_L3 = 12, + ULP_THOR2_SYM_DECAP_FUNC_THRU_L4 = 13, + ULP_THOR2_SYM_ECV_VALID_NO = 0, + ULP_THOR2_SYM_ECV_VALID_YES = 1, + ULP_THOR2_SYM_ECV_CUSTOM_EN_NO = 0, + ULP_THOR2_SYM_ECV_CUSTOM_EN_YES = 1, + ULP_THOR2_SYM_ECV_L2_EN_NO = 0, + ULP_THOR2_SYM_ECV_L2_EN_YES = 1, + ULP_THOR2_SYM_ECV_VTAG_TYPE_NOP = 0, + ULP_THOR2_SYM_ECV_VTAG_TYPE_ADD_1_ENCAP_PRI = 1, + ULP_THOR2_SYM_ECV_VTAG_TYPE_ADD_1_IVLAN_PRI = 2, + ULP_THOR2_SYM_ECV_VTAG_TYPE_ADD_1_REMAP_DIFFSERV = 3, + ULP_THOR2_SYM_ECV_VTAG_TYPE_ADD_2_ENCAP_PRI = 4, + ULP_THOR2_SYM_ECV_VTAG_TYPE_ADD_2_REMAP_DIFFSERV = 5, + ULP_THOR2_SYM_ECV_VTAG_TYPE_ADD_0_ENCAP_PRI = 6, + ULP_THOR2_SYM_ECV_VTAG_TYPE_ADD_0_REMAP_DIFFSERV = 7, + ULP_THOR2_SYM_ECV_VTAG_TYPE_ADD_0_PRI_0 = 8, + ULP_THOR2_SYM_ECV_VTAG_TYPE_ADD_0_PRI_1 = 8, + ULP_THOR2_SYM_ECV_VTAG_TYPE_ADD_0_PRI_2 = 8, + ULP_THOR2_SYM_ECV_VTAG_TYPE_ADD_0_PRI_3 = 8, + ULP_THOR2_SYM_ECV_VTAG_TYPE_ADD_0_PRI_4 = 8, + ULP_THOR2_SYM_ECV_VTAG_TYPE_ADD_0_PRI_5 = 8, + ULP_THOR2_SYM_ECV_VTAG_TYPE_ADD_0_PRI_6 = 8, + ULP_THOR2_SYM_ECV_VTAG_TYPE_ADD_0_PRI_7 = 8, + ULP_THOR2_SYM_ECV_L3_TYPE_NONE = 0, + ULP_THOR2_SYM_ECV_L3_TYPE_IPV4 = 4, + ULP_THOR2_SYM_ECV_L3_TYPE_IPV6 = 5, + ULP_THOR2_SYM_ECV_L3_TYPE_MPLS_8847 = 6, + ULP_THOR2_SYM_ECV_L3_TYPE_MPLS_8848 = 7, + ULP_THOR2_SYM_ECV_L4_TYPE_NONE = 0, + ULP_THOR2_SYM_ECV_L4_TYPE_UDP = 4, + ULP_THOR2_SYM_ECV_L4_TYPE_UDP_CSUM = 5, + ULP_THOR2_SYM_ECV_L4_TYPE_UDP_ENTROPY = 6, + ULP_THOR2_SYM_ECV_L4_TYPE_UDP_ENTROPY_CSUM = 7, + ULP_THOR2_SYM_ECV_TUN_TYPE_NONE = 0, + ULP_THOR2_SYM_ECV_TUN_TYPE_GENERIC = 1, + ULP_THOR2_SYM_ECV_TUN_TYPE_VXLAN = 2, + ULP_THOR2_SYM_ECV_TUN_TYPE_NGE = 3, + ULP_THOR2_SYM_ECV_TUN_TYPE_NVGRE = 4, + ULP_THOR2_SYM_ECV_TUN_TYPE_GRE = 5, + ULP_THOR2_SYM_EEM_ACT_REC_INT = 0, + ULP_THOR2_SYM_EEM_EXT_FLOW_CNTR = 0, + ULP_THOR2_SYM_UC_ACT_REC = 0, + ULP_THOR2_SYM_MC_ACT_REC = 1, + ULP_THOR2_SYM_ACT_REC_DROP_YES = 1, + ULP_THOR2_SYM_ACT_REC_DROP_NO = 0, + ULP_THOR2_SYM_ACT_REC_POP_VLAN_YES = 1, + ULP_THOR2_SYM_ACT_REC_POP_VLAN_NO = 0, + ULP_THOR2_SYM_ACT_REC_METER_EN_YES = 1, + ULP_THOR2_SYM_ACT_REC_METER_EN_NO = 0, + ULP_THOR2_SYM_LOOPBACK_PORT = 16, + ULP_THOR2_SYM_LOOPBACK_PARIF = 15, + ULP_THOR2_SYM_EXT_EM_MAX_KEY_SIZE = 0, + ULP_THOR2_SYM_MATCH_TYPE_EM = 0, + ULP_THOR2_SYM_MATCH_TYPE_WM = 1, + ULP_THOR2_SYM_IP_PROTO_ICMP = 1, + ULP_THOR2_SYM_IP_PROTO_IGMP = 2, + ULP_THOR2_SYM_IP_PROTO_IP_IN_IP = 4, + ULP_THOR2_SYM_IP_PROTO_TCP = 6, + ULP_THOR2_SYM_IP_PROTO_UDP = 17, + ULP_THOR2_SYM_VF_FUNC_PARIF = 15, + ULP_THOR2_SYM_NO = 0, + ULP_THOR2_SYM_YES = 1, + ULP_THOR2_SYM_RECYCLE_DST = 0x800, + ULP_THOR2_SYM_VF_2_VFR_META_VAL = 536870912, + ULP_THOR2_SYM_VF_2_VF_META_VAL = 536870912, + ULP_THOR2_SYM_VF_2_VFR_META_MASK = 4026531840, + ULP_THOR2_SYM_META_PROFILE_0 = 0, + ULP_THOR2_SYM_CHAIN_META_VAL = 12288, + ULP_THOR2_SYM_CHAIN_META_VAL_MASK = 61440, + ULP_THOR2_SYM_CHAIN_META_TYPE = 3, + ULP_THOR2_SYM_L2_ECPRI_ETYPE = 44798, + ULP_THOR2_SYM_L4_ECPRI_ETYPE = 2048, + ULP_THOR2_SYM_L2_ROE_ETYPE = 64573 +}; + +enum bnxt_ulp_df_tpl { + BNXT_ULP_DF_TPL_DEFAULT_UPLINK_PORT = 3, + BNXT_ULP_DF_TPL_DEFAULT_VFR = 4 +}; + +#endif + diff --git a/drivers/thirdparty/release-drivers/bnxt/tf_ulp/generic_templates/ulp_template_db_field.h b/drivers/thirdparty/release-drivers/bnxt/tf_ulp/generic_templates/ulp_template_db_field.h new file mode 100644 index 000000000000..276da1c479bf --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/tf_ulp/generic_templates/ulp_template_db_field.h @@ -0,0 +1,147 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2024 Broadcom + * All rights reserved. + */ + +#ifndef ULP_HDR_FIELD_ENUMS_H_ +#define ULP_HDR_FIELD_ENUMS_H_ + +enum bnxt_ulp_glb_hf { + BNXT_ULP_GLB_HF_ID_WM, + BNXT_ULP_GLB_HF_ID_SVIF_INDEX, + BNXT_ULP_GLB_HF_ID_O_BTH_OP_CODE, + BNXT_ULP_GLB_HF_ID_I_BTH_OP_CODE, + BNXT_ULP_GLB_HF_ID_O_BTH_DST_QPN, + BNXT_ULP_GLB_HF_ID_I_BTH_DST_QPN, + BNXT_ULP_GLB_HF_ID_O_ECPRI_TYPE, + BNXT_ULP_GLB_HF_ID_I_ECPRI_TYPE, + BNXT_ULP_GLB_HF_ID_O_ECPRI_ID, + BNXT_ULP_GLB_HF_ID_I_ECPRI_ID, + BNXT_ULP_GLB_HF_ID_O_ETH_DMAC, + BNXT_ULP_GLB_HF_ID_I_ETH_DMAC, + BNXT_ULP_GLB_HF_ID_O_ETH_SMAC, + BNXT_ULP_GLB_HF_ID_I_ETH_SMAC, + BNXT_ULP_GLB_HF_ID_O_ETH_TYPE, + BNXT_ULP_GLB_HF_ID_I_ETH_TYPE, + BNXT_ULP_GLB_HF_ID_O_GENEVE_VER_OPT_LEN_O_C_RSVD0, + BNXT_ULP_GLB_HF_ID_I_GENEVE_VER_OPT_LEN_O_C_RSVD0, + BNXT_ULP_GLB_HF_ID_O_GENEVE_PROTO_TYPE, + BNXT_ULP_GLB_HF_ID_I_GENEVE_PROTO_TYPE, + BNXT_ULP_GLB_HF_ID_O_GENEVE_VNI, + BNXT_ULP_GLB_HF_ID_I_GENEVE_VNI, + BNXT_ULP_GLB_HF_ID_O_GENEVE_RSVD1, + BNXT_ULP_GLB_HF_ID_I_GENEVE_RSVD1, + BNXT_ULP_GLB_HF_ID_T_GRE_VER, + BNXT_ULP_GLB_HF_ID_T_GRE_PROTO_TYPE, + BNXT_ULP_GLB_HF_ID_O_ICMP_TYPE, + BNXT_ULP_GLB_HF_ID_I_ICMP_TYPE, + BNXT_ULP_GLB_HF_ID_O_ICMP_CODE, + BNXT_ULP_GLB_HF_ID_I_ICMP_CODE, + BNXT_ULP_GLB_HF_ID_O_ICMP_CSUM, + BNXT_ULP_GLB_HF_ID_I_ICMP_CSUM, + BNXT_ULP_GLB_HF_ID_O_ICMP_IDENT, + BNXT_ULP_GLB_HF_ID_I_ICMP_IDENT, + BNXT_ULP_GLB_HF_ID_O_ICMP_SEQ_NUM, + BNXT_ULP_GLB_HF_ID_I_ICMP_SEQ_NUM, + BNXT_ULP_GLB_HF_ID_O_IPV4_VER, + BNXT_ULP_GLB_HF_ID_I_IPV4_VER, + BNXT_ULP_GLB_HF_ID_O_IPV4_QOS, + BNXT_ULP_GLB_HF_ID_I_IPV4_QOS, + BNXT_ULP_GLB_HF_ID_O_IPV4_LEN, + BNXT_ULP_GLB_HF_ID_I_IPV4_LEN, + BNXT_ULP_GLB_HF_ID_O_IPV4_FRAG_ID, + BNXT_ULP_GLB_HF_ID_I_IPV4_FRAG_ID, + BNXT_ULP_GLB_HF_ID_O_IPV4_FRAG_OFF, + BNXT_ULP_GLB_HF_ID_I_IPV4_FRAG_OFF, + BNXT_ULP_GLB_HF_ID_O_IPV4_TTL, + BNXT_ULP_GLB_HF_ID_I_IPV4_TTL, + BNXT_ULP_GLB_HF_ID_O_IPV4_PROTO_ID, + BNXT_ULP_GLB_HF_ID_I_IPV4_PROTO_ID, + BNXT_ULP_GLB_HF_ID_O_IPV4_CSUM, + BNXT_ULP_GLB_HF_ID_I_IPV4_CSUM, + BNXT_ULP_GLB_HF_ID_O_IPV4_SRC_ADDR, + BNXT_ULP_GLB_HF_ID_I_IPV4_SRC_ADDR, + BNXT_ULP_GLB_HF_ID_O_IPV4_DST_ADDR, + BNXT_ULP_GLB_HF_ID_I_IPV4_DST_ADDR, + BNXT_ULP_GLB_HF_ID_O_IPV6_VER, + BNXT_ULP_GLB_HF_ID_I_IPV6_VER, + BNXT_ULP_GLB_HF_ID_O_IPV6_QOS, + BNXT_ULP_GLB_HF_ID_I_IPV6_QOS, + BNXT_ULP_GLB_HF_ID_O_IPV6_FLOW_LABEL, + BNXT_ULP_GLB_HF_ID_I_IPV6_FLOW_LABEL, + BNXT_ULP_GLB_HF_ID_O_IPV6_PAYLOAD_LEN, + BNXT_ULP_GLB_HF_ID_I_IPV6_PAYLOAD_LEN, + BNXT_ULP_GLB_HF_ID_O_IPV6_PROTO_ID, + BNXT_ULP_GLB_HF_ID_I_IPV6_PROTO_ID, + BNXT_ULP_GLB_HF_ID_O_IPV6_TTL, + BNXT_ULP_GLB_HF_ID_I_IPV6_TTL, + BNXT_ULP_GLB_HF_ID_O_IPV6_SRC_ADDR, + BNXT_ULP_GLB_HF_ID_I_IPV6_SRC_ADDR, + BNXT_ULP_GLB_HF_ID_O_IPV6_DST_ADDR, + BNXT_ULP_GLB_HF_ID_I_IPV6_DST_ADDR, + BNXT_ULP_GLB_HF_ID_O_L2_FILTER_L2_FILTER_ID, + BNXT_ULP_GLB_HF_ID_I_L2_FILTER_L2_FILTER_ID, + BNXT_ULP_GLB_HF_ID_O_SRV6_NEXT_HDR, + BNXT_ULP_GLB_HF_ID_I_SRV6_NEXT_HDR, + BNXT_ULP_GLB_HF_ID_O_SRV6_HDR_LEN, + BNXT_ULP_GLB_HF_ID_I_SRV6_HDR_LEN, + BNXT_ULP_GLB_HF_ID_O_SRV6_ROUTING_TYPE, + BNXT_ULP_GLB_HF_ID_I_SRV6_ROUTING_TYPE, + BNXT_ULP_GLB_HF_ID_O_SRV6_SEG_LEFT, + BNXT_ULP_GLB_HF_ID_I_SRV6_SEG_LEFT, + BNXT_ULP_GLB_HF_ID_O_SRV6_LAST_ENTRY, + BNXT_ULP_GLB_HF_ID_I_SRV6_LAST_ENTRY, + BNXT_ULP_GLB_HF_ID_O_SRV6_FLAGS, + BNXT_ULP_GLB_HF_ID_I_SRV6_FLAGS, + BNXT_ULP_GLB_HF_ID_O_SRV6_TAG, + BNXT_ULP_GLB_HF_ID_I_SRV6_TAG, + BNXT_ULP_GLB_HF_ID_O_TCP_SRC_PORT, + BNXT_ULP_GLB_HF_ID_I_TCP_SRC_PORT, + BNXT_ULP_GLB_HF_ID_O_TCP_DST_PORT, + BNXT_ULP_GLB_HF_ID_I_TCP_DST_PORT, + BNXT_ULP_GLB_HF_ID_O_TCP_SENT_SEQ, + BNXT_ULP_GLB_HF_ID_I_TCP_SENT_SEQ, + BNXT_ULP_GLB_HF_ID_O_TCP_RECV_ACK, + BNXT_ULP_GLB_HF_ID_I_TCP_RECV_ACK, + BNXT_ULP_GLB_HF_ID_O_TCP_DATA_OFF, + BNXT_ULP_GLB_HF_ID_I_TCP_DATA_OFF, + BNXT_ULP_GLB_HF_ID_O_TCP_TCP_FLAGS, + BNXT_ULP_GLB_HF_ID_I_TCP_TCP_FLAGS, + BNXT_ULP_GLB_HF_ID_O_TCP_RX_WIN, + BNXT_ULP_GLB_HF_ID_I_TCP_RX_WIN, + BNXT_ULP_GLB_HF_ID_O_TCP_CSUM, + BNXT_ULP_GLB_HF_ID_I_TCP_CSUM, + BNXT_ULP_GLB_HF_ID_O_TCP_URP, + BNXT_ULP_GLB_HF_ID_I_TCP_URP, + BNXT_ULP_GLB_HF_ID_O_UDP_SRC_PORT, + BNXT_ULP_GLB_HF_ID_I_UDP_SRC_PORT, + BNXT_ULP_GLB_HF_ID_O_UDP_DST_PORT, + BNXT_ULP_GLB_HF_ID_I_UDP_DST_PORT, + BNXT_ULP_GLB_HF_ID_O_UDP_LENGTH, + BNXT_ULP_GLB_HF_ID_I_UDP_LENGTH, + BNXT_ULP_GLB_HF_ID_O_UDP_CSUM, + BNXT_ULP_GLB_HF_ID_I_UDP_CSUM, + BNXT_ULP_GLB_HF_ID_OO_VLAN_CFI_PRI, + BNXT_ULP_GLB_HF_ID_OI_VLAN_CFI_PRI, + BNXT_ULP_GLB_HF_ID_IO_VLAN_CFI_PRI, + BNXT_ULP_GLB_HF_ID_II_VLAN_CFI_PRI, + BNXT_ULP_GLB_HF_ID_OO_VLAN_VID, + BNXT_ULP_GLB_HF_ID_OI_VLAN_VID, + BNXT_ULP_GLB_HF_ID_IO_VLAN_VID, + BNXT_ULP_GLB_HF_ID_II_VLAN_VID, + BNXT_ULP_GLB_HF_ID_OO_VLAN_TYPE, + BNXT_ULP_GLB_HF_ID_OI_VLAN_TYPE, + BNXT_ULP_GLB_HF_ID_IO_VLAN_TYPE, + BNXT_ULP_GLB_HF_ID_II_VLAN_TYPE, + BNXT_ULP_GLB_HF_ID_T_VXLAN_FLAGS, + BNXT_ULP_GLB_HF_ID_T_VXLAN_RSVD0, + BNXT_ULP_GLB_HF_ID_T_VXLAN_VNI, + BNXT_ULP_GLB_HF_ID_T_VXLAN_RSVD1, + BNXT_ULP_GLB_HF_ID_T_VXLAN_GPE_FLAGS, + BNXT_ULP_GLB_HF_ID_T_VXLAN_GPE_RSVD0, + BNXT_ULP_GLB_HF_ID_T_VXLAN_GPE_NEXT_PROTO, + BNXT_ULP_GLB_HF_ID_T_VXLAN_GPE_VNI, + BNXT_ULP_GLB_HF_ID_T_VXLAN_GPE_RSVD1 +}; + +#endif diff --git a/drivers/thirdparty/release-drivers/bnxt/tf_ulp/generic_templates/ulp_template_db_tbl.c b/drivers/thirdparty/release-drivers/bnxt/tf_ulp/generic_templates/ulp_template_db_tbl.c new file mode 100644 index 000000000000..72a98f3644f6 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/tf_ulp/generic_templates/ulp_template_db_tbl.c @@ -0,0 +1,3957 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* Copyright(c) 2014-2024 Broadcom + * All rights reserved. + */ + +#include "ulp_template_db_enum.h" +#include "ulp_template_db_field.h" +#include "ulp_template_struct.h" +#include "ulp_template_db_tbl.h" + +const struct bnxt_ulp_generic_tbl_params ulp_wh_plus_generic_tbl_params[] = { + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_L2_CNTXT_TCAM << 1 | + BNXT_ULP_DIRECTION_INGRESS] = { + .name = "INGRESS GENERIC_TABLE_L2_CNTXT_TCAM", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_KEY_LIST, + .result_num_entries = 2048, + .result_num_bytes = 9, + .key_num_bytes = 1, + .partial_key_num_bytes = 0, + .num_buckets = 0, + .hash_tbl_entries = 0, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_L2_CNTXT_TCAM << 1 | + BNXT_ULP_DIRECTION_EGRESS] = { + .name = "EGRESS GENERIC_TABLE_L2_CNTXT_TCAM", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_KEY_LIST, + .result_num_entries = 2048, + .result_num_bytes = 9, + .key_num_bytes = 1, + .partial_key_num_bytes = 0, + .num_buckets = 0, + .hash_tbl_entries = 0, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_PROFILE_TCAM << 1 | + BNXT_ULP_DIRECTION_INGRESS] = { + .name = "INGRESS GENERIC_TABLE_PROFILE_TCAM", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_KEY_LIST, + .result_num_entries = 16384, + .result_num_bytes = 16, + .key_num_bytes = 2, + .partial_key_num_bytes = 0, + .num_buckets = 0, + .hash_tbl_entries = 0, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_PROFILE_TCAM << 1 | + BNXT_ULP_DIRECTION_EGRESS] = { + .name = "EGRESS GENERIC_TABLE_PROFILE_TCAM", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_KEY_LIST, + .result_num_entries = 16384, + .result_num_bytes = 16, + .key_num_bytes = 2, + .partial_key_num_bytes = 0, + .num_buckets = 0, + .hash_tbl_entries = 0, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_SHARED_MIRROR << 1 | + BNXT_ULP_DIRECTION_INGRESS] = { + .name = "INGRESS GENERIC_TABLE_SHARED_MIRROR", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_KEY_LIST, + .result_num_entries = 32, + .result_num_bytes = 5, + .key_num_bytes = 1, + .partial_key_num_bytes = 0, + .num_buckets = 0, + .hash_tbl_entries = 0, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_SHARED_MIRROR << 1 | + BNXT_ULP_DIRECTION_EGRESS] = { + .name = "EGRESS GENERIC_TABLE_SHARED_MIRROR", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_KEY_LIST, + .result_num_entries = 0, + .result_num_bytes = 0, + .key_num_bytes = 0, + .partial_key_num_bytes = 0, + .num_buckets = 0, + .hash_tbl_entries = 0, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE, + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_MAC_ADDR_CACHE << 1 | + BNXT_ULP_DIRECTION_INGRESS] = { + .name = "INGRESS GENERIC_TABLE_MAC_ADDR_CACHE", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_HASH_LIST, + .result_num_entries = 512, + .result_num_bytes = 9, + .key_num_bytes = 21, + .partial_key_num_bytes = 0, + .num_buckets = 8, + .hash_tbl_entries = 2048, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_MAC_ADDR_CACHE << 1 | + BNXT_ULP_DIRECTION_EGRESS] = { + .name = "EGRESS GENERIC_TABLE_MAC_ADDR_CACHE", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_HASH_LIST, + .result_num_entries = 512, + .result_num_bytes = 9, + .key_num_bytes = 21, + .partial_key_num_bytes = 0, + .num_buckets = 8, + .hash_tbl_entries = 2048, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_PORT_TABLE << 1 | + BNXT_ULP_DIRECTION_INGRESS] = { + .name = "INGRESS GENERIC_TABLE_PORT_TABLE", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_KEY_LIST, + .result_num_entries = 0, + .result_num_bytes = 0, + .key_num_bytes = 0, + .partial_key_num_bytes = 0, + .num_buckets = 0, + .hash_tbl_entries = 0, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE, + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_PORT_TABLE << 1 | + BNXT_ULP_DIRECTION_EGRESS] = { + .name = "EGRESS GENERIC_TABLE_PORT_TABLE", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_KEY_LIST, + .result_num_entries = 0, + .result_num_bytes = 0, + .key_num_bytes = 0, + .partial_key_num_bytes = 0, + .num_buckets = 0, + .hash_tbl_entries = 0, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE, + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_TUNNEL_CACHE << 1 | + BNXT_ULP_DIRECTION_INGRESS] = { + .name = "INGRESS GENERIC_TABLE_TUNNEL_CACHE", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_HASH_LIST, + .result_num_entries = 0, + .result_num_bytes = 0, + .key_num_bytes = 0, + .partial_key_num_bytes = 0, + .num_buckets = 0, + .hash_tbl_entries = 0, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE, + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_TUNNEL_CACHE << 1 | + BNXT_ULP_DIRECTION_EGRESS] = { + .name = "EGRESS GENERIC_TABLE_TUNNEL_CACHE", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_HASH_LIST, + .result_num_entries = 0, + .result_num_bytes = 0, + .key_num_bytes = 0, + .partial_key_num_bytes = 0, + .num_buckets = 0, + .hash_tbl_entries = 0, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE, + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_SOURCE_PROPERTY_CACHE << 1 | + BNXT_ULP_DIRECTION_INGRESS] = { + .name = "INGRESS GENERIC_TABLE_SOURCE_PROPERTY_CACHE", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_HASH_LIST, + .result_num_entries = 0, + .result_num_bytes = 0, + .key_num_bytes = 0, + .partial_key_num_bytes = 0, + .num_buckets = 0, + .hash_tbl_entries = 0, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE, + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_SOURCE_PROPERTY_CACHE << 1 | + BNXT_ULP_DIRECTION_EGRESS] = { + .name = "EGRESS GENERIC_TABLE_SOURCE_PROPERTY_CACHE", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_HASH_LIST, + .result_num_entries = 0, + .result_num_bytes = 0, + .key_num_bytes = 0, + .partial_key_num_bytes = 0, + .num_buckets = 0, + .hash_tbl_entries = 0, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE, + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_VXLAN_ENCAP_REC_CACHE << 1 | + BNXT_ULP_DIRECTION_INGRESS] = { + .name = "INGRESS GENERIC_TABLE_VXLAN_ENCAP_REC_CACHE", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_HASH_LIST, + .result_num_entries = 0, + .result_num_bytes = 0, + .key_num_bytes = 0, + .partial_key_num_bytes = 0, + .num_buckets = 0, + .hash_tbl_entries = 0, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE, + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_VXLAN_ENCAP_REC_CACHE << 1 | + BNXT_ULP_DIRECTION_EGRESS] = { + .name = "EGRESS GENERIC_TABLE_VXLAN_ENCAP_REC_CACHE", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_HASH_LIST, + .result_num_entries = 0, + .result_num_bytes = 0, + .key_num_bytes = 0, + .partial_key_num_bytes = 0, + .num_buckets = 0, + .hash_tbl_entries = 0, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE, + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_SOCKET_DIRECT_CACHE << 1 | + BNXT_ULP_DIRECTION_INGRESS] = { + .name = "INGRESS GENERIC_TABLE_SOCKET_DIRECT_CACHE", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_KEY_LIST, + .result_num_entries = 0, + .result_num_bytes = 0, + .key_num_bytes = 0, + .partial_key_num_bytes = 0, + .num_buckets = 0, + .hash_tbl_entries = 0, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE, + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_SOCKET_DIRECT_CACHE << 1 | + BNXT_ULP_DIRECTION_EGRESS] = { + .name = "EGRESS GENERIC_TABLE_SOCKET_DIRECT_CACHE", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_KEY_LIST, + .result_num_entries = 0, + .result_num_bytes = 0, + .key_num_bytes = 0, + .partial_key_num_bytes = 0, + .num_buckets = 0, + .hash_tbl_entries = 0, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE, + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_VXLAN_ENCAP_IPV6_REC_CACHE << 1 | + BNXT_ULP_DIRECTION_INGRESS] = { + .name = "INGRESS GENERIC_TABLE_VXLAN_ENCAP_IPV6_REC_CACHE", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_HASH_LIST, + .result_num_entries = 0, + .result_num_bytes = 0, + .key_num_bytes = 0, + .partial_key_num_bytes = 0, + .num_buckets = 0, + .hash_tbl_entries = 0, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE, + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_VXLAN_ENCAP_IPV6_REC_CACHE << 1 | + BNXT_ULP_DIRECTION_EGRESS] = { + .name = "EGRESS GENERIC_TABLE_VXLAN_ENCAP_IPV6_REC_CACHE", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_HASH_LIST, + .result_num_entries = 0, + .result_num_bytes = 0, + .key_num_bytes = 0, + .partial_key_num_bytes = 0, + .num_buckets = 0, + .hash_tbl_entries = 0, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE, + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_SOURCE_PROPERTY_IPV6_CACHE << 1 | + BNXT_ULP_DIRECTION_INGRESS] = { + .name = "INGRESS GENERIC_TABLE_SOURCE_PROPERTY_IPV6_CACHE", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_HASH_LIST, + .result_num_entries = 0, + .result_num_bytes = 0, + .key_num_bytes = 0, + .partial_key_num_bytes = 0, + .num_buckets = 0, + .hash_tbl_entries = 0, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE, + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_SOURCE_PROPERTY_IPV6_CACHE << 1 | + BNXT_ULP_DIRECTION_EGRESS] = { + .name = "EGRESS GENERIC_TABLE_SOURCE_PROPERTY_IPV6_CACHE", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_HASH_LIST, + .result_num_entries = 0, + .result_num_bytes = 0, + .key_num_bytes = 0, + .partial_key_num_bytes = 0, + .num_buckets = 0, + .hash_tbl_entries = 0, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE, + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_OUTER_TUNNEL_CACHE << 1 | + BNXT_ULP_DIRECTION_INGRESS] = { + .name = "INGRESS GENERIC_TABLE_OUTER_TUNNEL_CACHE", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_HASH_LIST, + .result_num_entries = 0, + .result_num_bytes = 0, + .key_num_bytes = 0, + .partial_key_num_bytes = 0, + .num_buckets = 0, + .hash_tbl_entries = 0, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE, + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_OUTER_TUNNEL_CACHE << 1 | + BNXT_ULP_DIRECTION_EGRESS] = { + .name = "EGRESS GENERIC_TABLE_OUTER_TUNNEL_CACHE", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_HASH_LIST, + .result_num_entries = 0, + .result_num_bytes = 0, + .key_num_bytes = 0, + .partial_key_num_bytes = 0, + .num_buckets = 0, + .hash_tbl_entries = 0, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE, + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_SHARED_METER_TBL_CACHE << 1 | + BNXT_ULP_DIRECTION_INGRESS] = { + .name = "INGRESS GENERIC_TABLE_SHARED_METER_TBL_CACHE", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_KEY_LIST, + .result_num_entries = 0, + .result_num_bytes = 0, + .key_num_bytes = 0, + .partial_key_num_bytes = 0, + .num_buckets = 0, + .hash_tbl_entries = 0, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE, + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_SHARED_METER_TBL_CACHE << 1 | + BNXT_ULP_DIRECTION_EGRESS] = { + .name = "EGRESS GENERIC_TABLE_SHARED_METER_TBL_CACHE", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_KEY_LIST, + .result_num_entries = 0, + .result_num_bytes = 0, + .key_num_bytes = 0, + .partial_key_num_bytes = 0, + .num_buckets = 0, + .hash_tbl_entries = 0, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE, + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_METER_PROFILE_TBL_CACHE << 1 | + BNXT_ULP_DIRECTION_INGRESS] = { + .name = "INGRESS GENERIC_TABLE_METER_PROFILE_TBL_CACHE", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_KEY_LIST, + .result_num_entries = 0, + .result_num_bytes = 0, + .key_num_bytes = 0, + .partial_key_num_bytes = 0, + .num_buckets = 0, + .hash_tbl_entries = 0, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE, + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_METER_PROFILE_TBL_CACHE << 1 | + BNXT_ULP_DIRECTION_EGRESS] = { + .name = "EGRESS GENERIC_TABLE_METER_PROFILE_TBL_CACHE", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_KEY_LIST, + .result_num_entries = 0, + .result_num_bytes = 0, + .key_num_bytes = 0, + .partial_key_num_bytes = 0, + .num_buckets = 0, + .hash_tbl_entries = 0, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE, + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_GLOBAL_REGISTER_TBL << 1 | + BNXT_ULP_DIRECTION_INGRESS] = { + .name = "INGRESS GENERIC_TABLE_GLOBAL_REGISTER_TBL", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_HASH_LIST, + .result_num_entries = 0, + .result_num_bytes = 0, + .key_num_bytes = 0, + .partial_key_num_bytes = 0, + .num_buckets = 0, + .hash_tbl_entries = 0, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE, + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_GLOBAL_REGISTER_TBL << 1 | + BNXT_ULP_DIRECTION_EGRESS] = { + .name = "EGRESS GENERIC_TABLE_GLOBAL_REGISTER_TBL", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_HASH_LIST, + .result_num_entries = 0, + .result_num_bytes = 0, + .key_num_bytes = 0, + .partial_key_num_bytes = 0, + .num_buckets = 0, + .hash_tbl_entries = 0, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE, + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_CHAIN_ID_CACHE << 1 | + BNXT_ULP_DIRECTION_INGRESS] = { + .name = "INGRESS GENERIC_TABLE_CHAIN_ID_CACHE", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_HASH_LIST, + .result_num_entries = 0, + .result_num_bytes = 0, + .key_num_bytes = 0, + .partial_key_num_bytes = 0, + .num_buckets = 0, + .hash_tbl_entries = 0, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE, + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_CHAIN_ID_CACHE << 1 | + BNXT_ULP_DIRECTION_EGRESS] = { + .name = "EGRESS GENERIC_TABLE_CHAIN_ID_CACHE", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_HASH_LIST, + .result_num_entries = 0, + .result_num_bytes = 0, + .key_num_bytes = 0, + .partial_key_num_bytes = 0, + .num_buckets = 0, + .hash_tbl_entries = 0, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE, + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_L2_ENCAP_REC_CACHE << 1 | + BNXT_ULP_DIRECTION_INGRESS] = { + .name = "INGRESS GENERIC_TABLE_L2_ENCAP_REC_CACHE", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_HASH_LIST, + .result_num_entries = 0, + .result_num_bytes = 0, + .key_num_bytes = 0, + .partial_key_num_bytes = 0, + .num_buckets = 0, + .hash_tbl_entries = 0, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE, + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_L2_ENCAP_REC_CACHE << 1 | + BNXT_ULP_DIRECTION_EGRESS] = { + .name = "EGRESS GENERIC_TABLE_L2_ENCAP_REC_CACHE", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_HASH_LIST, + .result_num_entries = 0, + .result_num_bytes = 0, + .key_num_bytes = 0, + .partial_key_num_bytes = 0, + .num_buckets = 0, + .hash_tbl_entries = 0, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE, + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_SRV6_ENCAP_REC_CACHE << 1 | + BNXT_ULP_DIRECTION_INGRESS] = { + .name = "INGRESS GENERIC_TABLE_SRV6_ENCAP_REC_CACHE", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_HASH_LIST, + .result_num_entries = 0, + .result_num_bytes = 0, + .key_num_bytes = 0, + .partial_key_num_bytes = 0, + .num_buckets = 0, + .hash_tbl_entries = 0, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE, + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_SRV6_ENCAP_REC_CACHE << 1 | + BNXT_ULP_DIRECTION_EGRESS] = { + .name = "EGRESS GENERIC_TABLE_SRV6_ENCAP_REC_CACHE", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_HASH_LIST, + .result_num_entries = 0, + .result_num_bytes = 0, + .key_num_bytes = 0, + .partial_key_num_bytes = 0, + .num_buckets = 0, + .hash_tbl_entries = 0, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE, + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_RSS_PARAMS << 1 | + BNXT_ULP_DIRECTION_INGRESS] = { + .name = "INGRESS GENERIC_TABLE_RSS_PARAMS", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_KEY_LIST, + .result_num_entries = 0, + .result_num_bytes = 0, + .key_num_bytes = 0, + .partial_key_num_bytes = 0, + .num_buckets = 0, + .hash_tbl_entries = 0, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE, + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_RSS_PARAMS << 1 | + BNXT_ULP_DIRECTION_EGRESS] = { + .name = "EGRESS GENERIC_TABLE_RSS_PARAMS", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_KEY_LIST, + .result_num_entries = 0, + .result_num_bytes = 0, + .key_num_bytes = 0, + .partial_key_num_bytes = 0, + .num_buckets = 0, + .hash_tbl_entries = 0, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE, + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_TABLE_SCOPE_CACHE << 1 | + BNXT_ULP_DIRECTION_INGRESS] = { + .name = "INGRESS GENERIC_TABLE_TABLE_SCOPE_CACHE", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_KEY_LIST, + .result_num_entries = 0, + .result_num_bytes = 0, + .key_num_bytes = 0, + .partial_key_num_bytes = 0, + .num_buckets = 0, + .hash_tbl_entries = 0, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE, + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_TABLE_SCOPE_CACHE << 1 | + BNXT_ULP_DIRECTION_EGRESS] = { + .name = "EGRESS GENERIC_TABLE_TABLE_SCOPE_CACHE", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_KEY_LIST, + .result_num_entries = 0, + .result_num_bytes = 0, + .key_num_bytes = 0, + .partial_key_num_bytes = 0, + .num_buckets = 0, + .hash_tbl_entries = 0, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE, + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_GENEVE_ENCAP_REC_CACHE << 1 | + BNXT_ULP_DIRECTION_INGRESS] = { + .name = "INGRESS GENERIC_TABLE_GENEVE_ENCAP_REC_CACHE", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_HASH_LIST, + .result_num_entries = 0, + .result_num_bytes = 0, + .key_num_bytes = 0, + .partial_key_num_bytes = 0, + .num_buckets = 0, + .hash_tbl_entries = 0, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE, + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_GENEVE_ENCAP_REC_CACHE << 1 | + BNXT_ULP_DIRECTION_EGRESS] = { + .name = "EGRESS GENERIC_TABLE_GENEVE_ENCAP_REC_CACHE", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_HASH_LIST, + .result_num_entries = 0, + .result_num_bytes = 0, + .key_num_bytes = 0, + .partial_key_num_bytes = 0, + .num_buckets = 0, + .hash_tbl_entries = 0, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE, + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_PROTO_HEADER << 1 | + BNXT_ULP_DIRECTION_INGRESS] = { + .name = "INGRESS GENERIC_TABLE_PROTO_HEADER", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_HASH_LIST, + .result_num_entries = 0, + .result_num_bytes = 0, + .key_num_bytes = 0, + .partial_key_num_bytes = 0, + .num_buckets = 0, + .hash_tbl_entries = 0, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE, + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_PROTO_HEADER << 1 | + BNXT_ULP_DIRECTION_EGRESS] = { + .name = "EGRESS GENERIC_TABLE_PROTO_HEADER", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_HASH_LIST, + .result_num_entries = 0, + .result_num_bytes = 0, + .key_num_bytes = 0, + .partial_key_num_bytes = 0, + .num_buckets = 0, + .hash_tbl_entries = 0, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE, + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_EM_FLOW_CONFLICT << 1 | + BNXT_ULP_DIRECTION_INGRESS] = { + .name = "INGRESS GENERIC_TABLE_EM_FLOW_CONFLICT", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_KEY_LIST, + .result_num_entries = 0, + .result_num_bytes = 0, + .key_num_bytes = 0, + .partial_key_num_bytes = 0, + .num_buckets = 0, + .hash_tbl_entries = 0, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE, + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_EM_FLOW_CONFLICT << 1 | + BNXT_ULP_DIRECTION_EGRESS] = { + .name = "EGRESS GENERIC_TABLE_EM_FLOW_CONFLICT", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_KEY_LIST, + .result_num_entries = 0, + .result_num_bytes = 0, + .key_num_bytes = 0, + .partial_key_num_bytes = 0, + .num_buckets = 0, + .hash_tbl_entries = 0, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE, + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_HDR_OVERLAP << 1 | + BNXT_ULP_DIRECTION_INGRESS] = { + .name = "INGRESS GENERIC_TABLE_HDR_OVERLAP", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_KEY_LIST, + .result_num_entries = 0, + .result_num_bytes = 0, + .key_num_bytes = 0, + .partial_key_num_bytes = 0, + .num_buckets = 0, + .hash_tbl_entries = 0, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE, + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_HDR_OVERLAP << 1 | + BNXT_ULP_DIRECTION_EGRESS] = { + .name = "EGRESS GENERIC_TABLE_HDR_OVERLAP", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_KEY_LIST, + .result_num_entries = 0, + .result_num_bytes = 0, + .key_num_bytes = 0, + .partial_key_num_bytes = 0, + .num_buckets = 0, + .hash_tbl_entries = 0, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE, + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_MULTI_SHARED_MIRROR << 1 | + BNXT_ULP_DIRECTION_INGRESS] = { + .name = "INGRESS GENERIC_TABLE_MULTI_SHARED_MIRROR", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_KEY_LIST, + .result_num_entries = 0, + .result_num_bytes = 0, + .key_num_bytes = 0, + .partial_key_num_bytes = 0, + .num_buckets = 0, + .hash_tbl_entries = 0, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE, + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_MULTI_SHARED_MIRROR << 1 | + BNXT_ULP_DIRECTION_EGRESS] = { + .name = "EGRESS GENERIC_TABLE_MULTI_SHARED_MIRROR", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_KEY_LIST, + .result_num_entries = 0, + .result_num_bytes = 0, + .key_num_bytes = 0, + .partial_key_num_bytes = 0, + .num_buckets = 0, + .hash_tbl_entries = 0, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE, + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_FLOW_CHAIN_CACHE << 1 | + BNXT_ULP_DIRECTION_INGRESS] = { + .name = "INGRESS GENERIC_TABLE_FLOW_CHAIN_CACHE", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_HASH_LIST, + .result_num_entries = 0, + .result_num_bytes = 0, + .key_num_bytes = 0, + .partial_key_num_bytes = 0, + .num_buckets = 0, + .hash_tbl_entries = 0, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE, + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_FLOW_CHAIN_CACHE << 1 | + BNXT_ULP_DIRECTION_EGRESS] = { + .name = "EGRESS GENERIC_TABLE_FLOW_CHAIN_CACHE", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_HASH_LIST, + .result_num_entries = 0, + .result_num_bytes = 0, + .key_num_bytes = 0, + .partial_key_num_bytes = 0, + .num_buckets = 0, + .hash_tbl_entries = 0, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE, + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_FLOW_CHAIN_L2_CNTXT << 1 | + BNXT_ULP_DIRECTION_INGRESS] = { + .name = "INGRESS GENERIC_TABLE_FLOW_CHAIN_L2_CNTXT", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_SIMPLE_LIST, + .result_num_entries = 0, + .result_num_bytes = 0, + .key_num_bytes = 0, + .partial_key_num_bytes = 0, + .num_buckets = 0, + .hash_tbl_entries = 0, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE, + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_FLOW_CHAIN_L2_CNTXT << 1 | + BNXT_ULP_DIRECTION_EGRESS] = { + .name = "EGRESS GENERIC_TABLE_FLOW_CHAIN_L2_CNTXT", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_SIMPLE_LIST, + .result_num_entries = 0, + .result_num_bytes = 0, + .key_num_bytes = 0, + .partial_key_num_bytes = 0, + .num_buckets = 0, + .hash_tbl_entries = 0, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE, + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_TUNNEL_GPARSE_CACHE << 1 | + BNXT_ULP_DIRECTION_INGRESS] = { + .name = "INGRESS GENERIC_TABLE_TUNNEL_GPARSE_CACHE", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_HASH_LIST, + .result_num_entries = 0, + .result_num_bytes = 0, + .key_num_bytes = 0, + .partial_key_num_bytes = 0, + .num_buckets = 0, + .hash_tbl_entries = 0, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE, + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_TUNNEL_GPARSE_CACHE << 1 | + BNXT_ULP_DIRECTION_EGRESS] = { + .name = "EGRESS GENERIC_TABLE_TUNNEL_GPARSE_CACHE", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_HASH_LIST, + .result_num_entries = 0, + .result_num_bytes = 0, + .key_num_bytes = 0, + .partial_key_num_bytes = 0, + .num_buckets = 0, + .hash_tbl_entries = 0, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE, + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_MULTI_FLOW_TUNNEL_CACHE << 1 | + BNXT_ULP_DIRECTION_INGRESS] = { + .name = "INGRESS GENERIC_TABLE_MULTI_FLOW_TUNNEL_CACHE", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_HASH_LIST, + .result_num_entries = 0, + .result_num_bytes = 0, + .key_num_bytes = 0, + .partial_key_num_bytes = 0, + .num_buckets = 0, + .hash_tbl_entries = 0, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE, + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_MULTI_FLOW_TUNNEL_CACHE << 1 | + BNXT_ULP_DIRECTION_EGRESS] = { + .name = "EGRESS GENERIC_TABLE_MULTI_FLOW_TUNNEL_CACHE", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_HASH_LIST, + .result_num_entries = 0, + .result_num_bytes = 0, + .key_num_bytes = 0, + .partial_key_num_bytes = 0, + .num_buckets = 0, + .hash_tbl_entries = 0, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE, + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_L2_FILTER << 1 | + BNXT_ULP_DIRECTION_INGRESS] = { + .name = "INGRESS GENERIC_TABLE_L2_FILTER", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_KEY_LIST, + .result_num_entries = 0, + .result_num_bytes = 0, + .key_num_bytes = 0, + .partial_key_num_bytes = 0, + .num_buckets = 0, + .hash_tbl_entries = 0, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE, + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_L2_FILTER << 1 | + BNXT_ULP_DIRECTION_EGRESS] = { + .name = "EGRESS GENERIC_TABLE_L2_FILTER", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_KEY_LIST, + .result_num_entries = 0, + .result_num_bytes = 0, + .key_num_bytes = 0, + .partial_key_num_bytes = 0, + .num_buckets = 0, + .hash_tbl_entries = 0, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE, + } +}; + +const struct bnxt_ulp_generic_tbl_params ulp_thor_generic_tbl_params[] = { + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_L2_CNTXT_TCAM << 1 | + BNXT_ULP_DIRECTION_INGRESS] = { + .name = "INGRESS GENERIC_TABLE_L2_CNTXT_TCAM", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_KEY_LIST, + .result_num_entries = 2048, + .result_num_bytes = 9, + .key_num_bytes = 2, + .partial_key_num_bytes = 0, + .num_buckets = 0, + .hash_tbl_entries = 0, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_L2_CNTXT_TCAM << 1 | + BNXT_ULP_DIRECTION_EGRESS] = { + .name = "EGRESS GENERIC_TABLE_L2_CNTXT_TCAM", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_KEY_LIST, + .result_num_entries = 2048, + .result_num_bytes = 9, + .key_num_bytes = 2, + .partial_key_num_bytes = 0, + .num_buckets = 0, + .hash_tbl_entries = 0, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_PROFILE_TCAM << 1 | + BNXT_ULP_DIRECTION_INGRESS] = { + .name = "INGRESS GENERIC_TABLE_PROFILE_TCAM", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_KEY_LIST, + .result_num_entries = 16384, + .result_num_bytes = 18, + .key_num_bytes = 2, + .partial_key_num_bytes = 0, + .num_buckets = 0, + .hash_tbl_entries = 0, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_PROFILE_TCAM << 1 | + BNXT_ULP_DIRECTION_EGRESS] = { + .name = "EGRESS GENERIC_TABLE_PROFILE_TCAM", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_KEY_LIST, + .result_num_entries = 16384, + .result_num_bytes = 18, + .key_num_bytes = 2, + .partial_key_num_bytes = 0, + .num_buckets = 0, + .hash_tbl_entries = 0, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_SHARED_MIRROR << 1 | + BNXT_ULP_DIRECTION_INGRESS] = { + .name = "INGRESS GENERIC_TABLE_SHARED_MIRROR", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_KEY_LIST, + .result_num_entries = 256, + .result_num_bytes = 5, + .key_num_bytes = 1, + .partial_key_num_bytes = 0, + .num_buckets = 0, + .hash_tbl_entries = 0, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_SHARED_MIRROR << 1 | + BNXT_ULP_DIRECTION_EGRESS] = { + .name = "EGRESS GENERIC_TABLE_SHARED_MIRROR", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_KEY_LIST, + .result_num_entries = 256, + .result_num_bytes = 5, + .key_num_bytes = 1, + .partial_key_num_bytes = 0, + .num_buckets = 0, + .hash_tbl_entries = 0, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_MAC_ADDR_CACHE << 1 | + BNXT_ULP_DIRECTION_INGRESS] = { + .name = "INGRESS GENERIC_TABLE_MAC_ADDR_CACHE", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_HASH_LIST, + .result_num_entries = 512, + .result_num_bytes = 9, + .key_num_bytes = 22, + .partial_key_num_bytes = 0, + .num_buckets = 8, + .hash_tbl_entries = 2048, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_MAC_ADDR_CACHE << 1 | + BNXT_ULP_DIRECTION_EGRESS] = { + .name = "EGRESS GENERIC_TABLE_MAC_ADDR_CACHE", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_HASH_LIST, + .result_num_entries = 0, + .result_num_bytes = 0, + .key_num_bytes = 0, + .partial_key_num_bytes = 0, + .num_buckets = 0, + .hash_tbl_entries = 0, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE, + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_PORT_TABLE << 1 | + BNXT_ULP_DIRECTION_INGRESS] = { + .name = "INGRESS GENERIC_TABLE_PORT_TABLE", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_KEY_LIST, + .result_num_entries = 1024, + .result_num_bytes = 25, + .key_num_bytes = 2, + .partial_key_num_bytes = 0, + .num_buckets = 0, + .hash_tbl_entries = 0, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_PORT_TABLE << 1 | + BNXT_ULP_DIRECTION_EGRESS] = { + .name = "EGRESS GENERIC_TABLE_PORT_TABLE", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_KEY_LIST, + .result_num_entries = 1024, + .result_num_bytes = 25, + .key_num_bytes = 2, + .partial_key_num_bytes = 0, + .num_buckets = 0, + .hash_tbl_entries = 0, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_TUNNEL_CACHE << 1 | + BNXT_ULP_DIRECTION_INGRESS] = { + .name = "INGRESS GENERIC_TABLE_TUNNEL_CACHE", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_HASH_LIST, + .result_num_entries = 256, + .result_num_bytes = 7, + .key_num_bytes = 3, + .partial_key_num_bytes = 0, + .num_buckets = 8, + .hash_tbl_entries = 1024, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_TUNNEL_CACHE << 1 | + BNXT_ULP_DIRECTION_EGRESS] = { + .name = "EGRESS GENERIC_TABLE_TUNNEL_CACHE", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_HASH_LIST, + .result_num_entries = 0, + .result_num_bytes = 0, + .key_num_bytes = 0, + .partial_key_num_bytes = 0, + .num_buckets = 0, + .hash_tbl_entries = 0, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE, + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_SOURCE_PROPERTY_CACHE << 1 | + BNXT_ULP_DIRECTION_INGRESS] = { + .name = "INGRESS GENERIC_TABLE_SOURCE_PROPERTY_CACHE", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_HASH_LIST, + .result_num_entries = 0, + .result_num_bytes = 0, + .key_num_bytes = 0, + .partial_key_num_bytes = 0, + .num_buckets = 0, + .hash_tbl_entries = 0, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE, + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_SOURCE_PROPERTY_CACHE << 1 | + BNXT_ULP_DIRECTION_EGRESS] = { + .name = "EGRESS GENERIC_TABLE_SOURCE_PROPERTY_CACHE", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_HASH_LIST, + .result_num_entries = 128, + .result_num_bytes = 6, + .key_num_bytes = 11, + .partial_key_num_bytes = 0, + .num_buckets = 4, + .hash_tbl_entries = 512, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_VXLAN_ENCAP_REC_CACHE << 1 | + BNXT_ULP_DIRECTION_INGRESS] = { + .name = "INGRESS GENERIC_TABLE_VXLAN_ENCAP_REC_CACHE", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_HASH_LIST, + .result_num_entries = 0, + .result_num_bytes = 0, + .key_num_bytes = 0, + .partial_key_num_bytes = 0, + .num_buckets = 0, + .hash_tbl_entries = 0, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE, + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_VXLAN_ENCAP_REC_CACHE << 1 | + BNXT_ULP_DIRECTION_EGRESS] = { + .name = "EGRESS GENERIC_TABLE_VXLAN_ENCAP_REC_CACHE", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_HASH_LIST, + .result_num_entries = 4096, + .result_num_bytes = 6, + .key_num_bytes = 18, + .partial_key_num_bytes = 0, + .num_buckets = 8, + .hash_tbl_entries = 16384, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_SOCKET_DIRECT_CACHE << 1 | + BNXT_ULP_DIRECTION_INGRESS] = { + .name = "INGRESS GENERIC_TABLE_SOCKET_DIRECT_CACHE", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_KEY_LIST, + .result_num_entries = 0, + .result_num_bytes = 0, + .key_num_bytes = 0, + .partial_key_num_bytes = 0, + .num_buckets = 0, + .hash_tbl_entries = 0, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE, + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_SOCKET_DIRECT_CACHE << 1 | + BNXT_ULP_DIRECTION_EGRESS] = { + .name = "EGRESS GENERIC_TABLE_SOCKET_DIRECT_CACHE", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_KEY_LIST, + .result_num_entries = 0, + .result_num_bytes = 0, + .key_num_bytes = 0, + .partial_key_num_bytes = 0, + .num_buckets = 0, + .hash_tbl_entries = 0, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE, + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_VXLAN_ENCAP_IPV6_REC_CACHE << 1 | + BNXT_ULP_DIRECTION_INGRESS] = { + .name = "INGRESS GENERIC_TABLE_VXLAN_ENCAP_IPV6_REC_CACHE", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_HASH_LIST, + .result_num_entries = 0, + .result_num_bytes = 0, + .key_num_bytes = 0, + .partial_key_num_bytes = 0, + .num_buckets = 0, + .hash_tbl_entries = 0, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE, + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_VXLAN_ENCAP_IPV6_REC_CACHE << 1 | + BNXT_ULP_DIRECTION_EGRESS] = { + .name = "EGRESS GENERIC_TABLE_VXLAN_ENCAP_IPV6_REC_CACHE", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_HASH_LIST, + .result_num_entries = 4096, + .result_num_bytes = 6, + .key_num_bytes = 30, + .partial_key_num_bytes = 0, + .num_buckets = 8, + .hash_tbl_entries = 16384, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_SOURCE_PROPERTY_IPV6_CACHE << 1 | + BNXT_ULP_DIRECTION_INGRESS] = { + .name = "INGRESS GENERIC_TABLE_SOURCE_PROPERTY_IPV6_CACHE", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_HASH_LIST, + .result_num_entries = 0, + .result_num_bytes = 0, + .key_num_bytes = 0, + .partial_key_num_bytes = 0, + .num_buckets = 0, + .hash_tbl_entries = 0, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE, + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_SOURCE_PROPERTY_IPV6_CACHE << 1 | + BNXT_ULP_DIRECTION_EGRESS] = { + .name = "EGRESS GENERIC_TABLE_SOURCE_PROPERTY_IPV6_CACHE", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_HASH_LIST, + .result_num_entries = 2048, + .result_num_bytes = 6, + .key_num_bytes = 22, + .partial_key_num_bytes = 0, + .num_buckets = 4, + .hash_tbl_entries = 8192, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_OUTER_TUNNEL_CACHE << 1 | + BNXT_ULP_DIRECTION_INGRESS] = { + .name = "INGRESS GENERIC_TABLE_OUTER_TUNNEL_CACHE", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_HASH_LIST, + .result_num_entries = 0, + .result_num_bytes = 0, + .key_num_bytes = 0, + .partial_key_num_bytes = 0, + .num_buckets = 0, + .hash_tbl_entries = 0, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE, + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_OUTER_TUNNEL_CACHE << 1 | + BNXT_ULP_DIRECTION_EGRESS] = { + .name = "EGRESS GENERIC_TABLE_OUTER_TUNNEL_CACHE", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_HASH_LIST, + .result_num_entries = 0, + .result_num_bytes = 0, + .key_num_bytes = 0, + .partial_key_num_bytes = 0, + .num_buckets = 0, + .hash_tbl_entries = 0, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE, + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_SHARED_METER_TBL_CACHE << 1 | + BNXT_ULP_DIRECTION_INGRESS] = { + .name = "INGRESS GENERIC_TABLE_SHARED_METER_TBL_CACHE", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_HASH_LIST, + .result_num_entries = 1024, + .result_num_bytes = 10, + .key_num_bytes = 4, + .partial_key_num_bytes = 0, + .num_buckets = 8, + .hash_tbl_entries = 2048, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_SHARED_METER_TBL_CACHE << 1 | + BNXT_ULP_DIRECTION_EGRESS] = { + .name = "EGRESS GENERIC_TABLE_SHARED_METER_TBL_CACHE", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_HASH_LIST, + .result_num_entries = 0, + .result_num_bytes = 0, + .key_num_bytes = 0, + .partial_key_num_bytes = 0, + .num_buckets = 0, + .hash_tbl_entries = 0, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE, + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_METER_PROFILE_TBL_CACHE << 1 | + BNXT_ULP_DIRECTION_INGRESS] = { + .name = "INGRESS GENERIC_TABLE_METER_PROFILE_TBL_CACHE", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_HASH_LIST, + .result_num_entries = 512, + .result_num_bytes = 6, + .key_num_bytes = 4, + .partial_key_num_bytes = 0, + .num_buckets = 8, + .hash_tbl_entries = 2048, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_METER_PROFILE_TBL_CACHE << 1 | + BNXT_ULP_DIRECTION_EGRESS] = { + .name = "EGRESS GENERIC_TABLE_METER_PROFILE_TBL_CACHE", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_HASH_LIST, + .result_num_entries = 0, + .result_num_bytes = 0, + .key_num_bytes = 0, + .partial_key_num_bytes = 0, + .num_buckets = 0, + .hash_tbl_entries = 0, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE, + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_GLOBAL_REGISTER_TBL << 1 | + BNXT_ULP_DIRECTION_INGRESS] = { + .name = "INGRESS GENERIC_TABLE_GLOBAL_REGISTER_TBL", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_HASH_LIST, + .result_num_entries = 0, + .result_num_bytes = 0, + .key_num_bytes = 0, + .partial_key_num_bytes = 0, + .num_buckets = 0, + .hash_tbl_entries = 0, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE, + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_GLOBAL_REGISTER_TBL << 1 | + BNXT_ULP_DIRECTION_EGRESS] = { + .name = "EGRESS GENERIC_TABLE_GLOBAL_REGISTER_TBL", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_HASH_LIST, + .result_num_entries = 0, + .result_num_bytes = 0, + .key_num_bytes = 0, + .partial_key_num_bytes = 0, + .num_buckets = 0, + .hash_tbl_entries = 0, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE, + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_CHAIN_ID_CACHE << 1 | + BNXT_ULP_DIRECTION_INGRESS] = { + .name = "INGRESS GENERIC_TABLE_CHAIN_ID_CACHE", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_HASH_LIST, + .result_num_entries = 0, + .result_num_bytes = 0, + .key_num_bytes = 0, + .partial_key_num_bytes = 0, + .num_buckets = 0, + .hash_tbl_entries = 0, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE, + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_CHAIN_ID_CACHE << 1 | + BNXT_ULP_DIRECTION_EGRESS] = { + .name = "EGRESS GENERIC_TABLE_CHAIN_ID_CACHE", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_HASH_LIST, + .result_num_entries = 0, + .result_num_bytes = 0, + .key_num_bytes = 0, + .partial_key_num_bytes = 0, + .num_buckets = 0, + .hash_tbl_entries = 0, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE, + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_L2_ENCAP_REC_CACHE << 1 | + BNXT_ULP_DIRECTION_INGRESS] = { + .name = "INGRESS GENERIC_TABLE_L2_ENCAP_REC_CACHE", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_HASH_LIST, + .result_num_entries = 0, + .result_num_bytes = 0, + .key_num_bytes = 0, + .partial_key_num_bytes = 0, + .num_buckets = 0, + .hash_tbl_entries = 0, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE, + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_L2_ENCAP_REC_CACHE << 1 | + BNXT_ULP_DIRECTION_EGRESS] = { + .name = "EGRESS GENERIC_TABLE_L2_ENCAP_REC_CACHE", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_HASH_LIST, + .result_num_entries = 0, + .result_num_bytes = 0, + .key_num_bytes = 0, + .partial_key_num_bytes = 0, + .num_buckets = 0, + .hash_tbl_entries = 0, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE, + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_SRV6_ENCAP_REC_CACHE << 1 | + BNXT_ULP_DIRECTION_INGRESS] = { + .name = "INGRESS GENERIC_TABLE_SRV6_ENCAP_REC_CACHE", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_HASH_LIST, + .result_num_entries = 0, + .result_num_bytes = 0, + .key_num_bytes = 0, + .partial_key_num_bytes = 0, + .num_buckets = 0, + .hash_tbl_entries = 0, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE, + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_SRV6_ENCAP_REC_CACHE << 1 | + BNXT_ULP_DIRECTION_EGRESS] = { + .name = "EGRESS GENERIC_TABLE_SRV6_ENCAP_REC_CACHE", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_HASH_LIST, + .result_num_entries = 0, + .result_num_bytes = 0, + .key_num_bytes = 0, + .partial_key_num_bytes = 0, + .num_buckets = 0, + .hash_tbl_entries = 0, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE, + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_RSS_PARAMS << 1 | + BNXT_ULP_DIRECTION_INGRESS] = { + .name = "INGRESS GENERIC_TABLE_RSS_PARAMS", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_KEY_LIST, + .result_num_entries = 0, + .result_num_bytes = 0, + .key_num_bytes = 0, + .partial_key_num_bytes = 0, + .num_buckets = 0, + .hash_tbl_entries = 0, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE, + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_RSS_PARAMS << 1 | + BNXT_ULP_DIRECTION_EGRESS] = { + .name = "EGRESS GENERIC_TABLE_RSS_PARAMS", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_KEY_LIST, + .result_num_entries = 0, + .result_num_bytes = 0, + .key_num_bytes = 0, + .partial_key_num_bytes = 0, + .num_buckets = 0, + .hash_tbl_entries = 0, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE, + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_TABLE_SCOPE_CACHE << 1 | + BNXT_ULP_DIRECTION_INGRESS] = { + .name = "INGRESS GENERIC_TABLE_TABLE_SCOPE_CACHE", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_KEY_LIST, + .result_num_entries = 0, + .result_num_bytes = 0, + .key_num_bytes = 0, + .partial_key_num_bytes = 0, + .num_buckets = 0, + .hash_tbl_entries = 0, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE, + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_TABLE_SCOPE_CACHE << 1 | + BNXT_ULP_DIRECTION_EGRESS] = { + .name = "EGRESS GENERIC_TABLE_TABLE_SCOPE_CACHE", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_KEY_LIST, + .result_num_entries = 0, + .result_num_bytes = 0, + .key_num_bytes = 0, + .partial_key_num_bytes = 0, + .num_buckets = 0, + .hash_tbl_entries = 0, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE, + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_GENEVE_ENCAP_REC_CACHE << 1 | + BNXT_ULP_DIRECTION_INGRESS] = { + .name = "INGRESS GENERIC_TABLE_GENEVE_ENCAP_REC_CACHE", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_HASH_LIST, + .result_num_entries = 0, + .result_num_bytes = 0, + .key_num_bytes = 0, + .partial_key_num_bytes = 0, + .num_buckets = 0, + .hash_tbl_entries = 0, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE, + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_GENEVE_ENCAP_REC_CACHE << 1 | + BNXT_ULP_DIRECTION_EGRESS] = { + .name = "EGRESS GENERIC_TABLE_GENEVE_ENCAP_REC_CACHE", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_HASH_LIST, + .result_num_entries = 4096, + .result_num_bytes = 8, + .key_num_bytes = 62, + .partial_key_num_bytes = 0, + .num_buckets = 8, + .hash_tbl_entries = 16384, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_PROTO_HEADER << 1 | + BNXT_ULP_DIRECTION_INGRESS] = { + .name = "INGRESS GENERIC_TABLE_PROTO_HEADER", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_HASH_LIST, + .result_num_entries = 256, + .result_num_bytes = 14, + .key_num_bytes = 10, + .partial_key_num_bytes = 0, + .num_buckets = 4, + .hash_tbl_entries = 1024, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_PROTO_HEADER << 1 | + BNXT_ULP_DIRECTION_EGRESS] = { + .name = "EGRESS GENERIC_TABLE_PROTO_HEADER", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_HASH_LIST, + .result_num_entries = 256, + .result_num_bytes = 14, + .key_num_bytes = 10, + .partial_key_num_bytes = 0, + .num_buckets = 4, + .hash_tbl_entries = 1024, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_EM_FLOW_CONFLICT << 1 | + BNXT_ULP_DIRECTION_INGRESS] = { + .name = "INGRESS GENERIC_TABLE_EM_FLOW_CONFLICT", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_HASH_LIST, + .result_num_entries = 256, + .result_num_bytes = 12, + .key_num_bytes = 10, + .partial_key_num_bytes = 0, + .num_buckets = 4, + .hash_tbl_entries = 1024, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_EM_FLOW_CONFLICT << 1 | + BNXT_ULP_DIRECTION_EGRESS] = { + .name = "EGRESS GENERIC_TABLE_EM_FLOW_CONFLICT", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_HASH_LIST, + .result_num_entries = 256, + .result_num_bytes = 12, + .key_num_bytes = 10, + .partial_key_num_bytes = 0, + .num_buckets = 4, + .hash_tbl_entries = 1024, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_HDR_OVERLAP << 1 | + BNXT_ULP_DIRECTION_INGRESS] = { + .name = "INGRESS GENERIC_TABLE_HDR_OVERLAP", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_SIMPLE_LIST, + .result_num_entries = 256, + .result_num_bytes = 6, + .key_num_bytes = 2, + .partial_key_num_bytes = 8, + .num_buckets = 0, + .hash_tbl_entries = 0, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_HDR_OVERLAP << 1 | + BNXT_ULP_DIRECTION_EGRESS] = { + .name = "EGRESS GENERIC_TABLE_HDR_OVERLAP", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_SIMPLE_LIST, + .result_num_entries = 256, + .result_num_bytes = 6, + .key_num_bytes = 2, + .partial_key_num_bytes = 8, + .num_buckets = 0, + .hash_tbl_entries = 0, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_MULTI_SHARED_MIRROR << 1 | + BNXT_ULP_DIRECTION_INGRESS] = { + .name = "INGRESS GENERIC_TABLE_MULTI_SHARED_MIRROR", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_KEY_LIST, + .result_num_entries = 0, + .result_num_bytes = 0, + .key_num_bytes = 0, + .partial_key_num_bytes = 0, + .num_buckets = 0, + .hash_tbl_entries = 0, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE, + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_MULTI_SHARED_MIRROR << 1 | + BNXT_ULP_DIRECTION_EGRESS] = { + .name = "EGRESS GENERIC_TABLE_MULTI_SHARED_MIRROR", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_KEY_LIST, + .result_num_entries = 0, + .result_num_bytes = 0, + .key_num_bytes = 0, + .partial_key_num_bytes = 0, + .num_buckets = 0, + .hash_tbl_entries = 0, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE, + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_FLOW_CHAIN_CACHE << 1 | + BNXT_ULP_DIRECTION_INGRESS] = { + .name = "INGRESS GENERIC_TABLE_FLOW_CHAIN_CACHE", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_HASH_LIST, + .result_num_entries = 32, + .result_num_bytes = 6, + .key_num_bytes = 4, + .partial_key_num_bytes = 0, + .num_buckets = 4, + .hash_tbl_entries = 128, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_FLOW_CHAIN_CACHE << 1 | + BNXT_ULP_DIRECTION_EGRESS] = { + .name = "EGRESS GENERIC_TABLE_FLOW_CHAIN_CACHE", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_HASH_LIST, + .result_num_entries = 32, + .result_num_bytes = 6, + .key_num_bytes = 4, + .partial_key_num_bytes = 0, + .num_buckets = 4, + .hash_tbl_entries = 128, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_FLOW_CHAIN_L2_CNTXT << 1 | + BNXT_ULP_DIRECTION_INGRESS] = { + .name = "INGRESS GENERIC_TABLE_FLOW_CHAIN_L2_CNTXT", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_SIMPLE_LIST, + .result_num_entries = 16, + .result_num_bytes = 6, + .key_num_bytes = 2, + .partial_key_num_bytes = 0, + .num_buckets = 0, + .hash_tbl_entries = 0, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_FLOW_CHAIN_L2_CNTXT << 1 | + BNXT_ULP_DIRECTION_EGRESS] = { + .name = "EGRESS GENERIC_TABLE_FLOW_CHAIN_L2_CNTXT", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_SIMPLE_LIST, + .result_num_entries = 0, + .result_num_bytes = 0, + .key_num_bytes = 0, + .partial_key_num_bytes = 0, + .num_buckets = 0, + .hash_tbl_entries = 0, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE, + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_TUNNEL_GPARSE_CACHE << 1 | + BNXT_ULP_DIRECTION_INGRESS] = { + .name = "INGRESS GENERIC_TABLE_TUNNEL_GPARSE_CACHE", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_HASH_LIST, + .result_num_entries = 0, + .result_num_bytes = 0, + .key_num_bytes = 0, + .partial_key_num_bytes = 0, + .num_buckets = 0, + .hash_tbl_entries = 0, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE, + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_TUNNEL_GPARSE_CACHE << 1 | + BNXT_ULP_DIRECTION_EGRESS] = { + .name = "EGRESS GENERIC_TABLE_TUNNEL_GPARSE_CACHE", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_HASH_LIST, + .result_num_entries = 0, + .result_num_bytes = 0, + .key_num_bytes = 0, + .partial_key_num_bytes = 0, + .num_buckets = 0, + .hash_tbl_entries = 0, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE, + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_MULTI_FLOW_TUNNEL_CACHE << 1 | + BNXT_ULP_DIRECTION_INGRESS] = { + .name = "INGRESS GENERIC_TABLE_MULTI_FLOW_TUNNEL_CACHE", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_HASH_LIST, + .result_num_entries = 0, + .result_num_bytes = 0, + .key_num_bytes = 0, + .partial_key_num_bytes = 0, + .num_buckets = 0, + .hash_tbl_entries = 0, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE, + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_MULTI_FLOW_TUNNEL_CACHE << 1 | + BNXT_ULP_DIRECTION_EGRESS] = { + .name = "EGRESS GENERIC_TABLE_MULTI_FLOW_TUNNEL_CACHE", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_HASH_LIST, + .result_num_entries = 0, + .result_num_bytes = 0, + .key_num_bytes = 0, + .partial_key_num_bytes = 0, + .num_buckets = 0, + .hash_tbl_entries = 0, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE, + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_L2_FILTER << 1 | + BNXT_ULP_DIRECTION_INGRESS] = { + .name = "INGRESS GENERIC_TABLE_L2_FILTER", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_KEY_LIST, + .result_num_entries = 0, + .result_num_bytes = 0, + .key_num_bytes = 0, + .partial_key_num_bytes = 0, + .num_buckets = 0, + .hash_tbl_entries = 0, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE, + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_L2_FILTER << 1 | + BNXT_ULP_DIRECTION_EGRESS] = { + .name = "EGRESS GENERIC_TABLE_L2_FILTER", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_KEY_LIST, + .result_num_entries = 0, + .result_num_bytes = 0, + .key_num_bytes = 0, + .partial_key_num_bytes = 0, + .num_buckets = 0, + .hash_tbl_entries = 0, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE, + } +}; + +const struct bnxt_ulp_generic_tbl_params ulp_thor2_generic_tbl_params[] = { + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_L2_CNTXT_TCAM << 1 | + BNXT_ULP_DIRECTION_INGRESS] = { + .name = "INGRESS GENERIC_TABLE_L2_CNTXT_TCAM", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_KEY_LIST, + .result_num_entries = 2048, + .result_num_bytes = 12, + .key_num_bytes = 2, + .partial_key_num_bytes = 0, + .num_buckets = 0, + .hash_tbl_entries = 0, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_L2_CNTXT_TCAM << 1 | + BNXT_ULP_DIRECTION_EGRESS] = { + .name = "EGRESS GENERIC_TABLE_L2_CNTXT_TCAM", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_KEY_LIST, + .result_num_entries = 2048, + .result_num_bytes = 12, + .key_num_bytes = 2, + .partial_key_num_bytes = 0, + .num_buckets = 0, + .hash_tbl_entries = 0, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_PROFILE_TCAM << 1 | + BNXT_ULP_DIRECTION_INGRESS] = { + .name = "INGRESS GENERIC_TABLE_PROFILE_TCAM", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_KEY_LIST, + .result_num_entries = 0, + .result_num_bytes = 0, + .key_num_bytes = 0, + .partial_key_num_bytes = 0, + .num_buckets = 0, + .hash_tbl_entries = 0, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE, + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_PROFILE_TCAM << 1 | + BNXT_ULP_DIRECTION_EGRESS] = { + .name = "EGRESS GENERIC_TABLE_PROFILE_TCAM", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_KEY_LIST, + .result_num_entries = 0, + .result_num_bytes = 0, + .key_num_bytes = 0, + .partial_key_num_bytes = 0, + .num_buckets = 0, + .hash_tbl_entries = 0, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE, + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_SHARED_MIRROR << 1 | + BNXT_ULP_DIRECTION_INGRESS] = { + .name = "INGRESS GENERIC_TABLE_SHARED_MIRROR", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_KEY_LIST, + .result_num_entries = 32, + .result_num_bytes = 5, + .key_num_bytes = 1, + .partial_key_num_bytes = 0, + .num_buckets = 0, + .hash_tbl_entries = 0, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_SHARED_MIRROR << 1 | + BNXT_ULP_DIRECTION_EGRESS] = { + .name = "EGRESS GENERIC_TABLE_SHARED_MIRROR", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_KEY_LIST, + .result_num_entries = 32, + .result_num_bytes = 5, + .key_num_bytes = 1, + .partial_key_num_bytes = 0, + .num_buckets = 0, + .hash_tbl_entries = 0, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_MAC_ADDR_CACHE << 1 | + BNXT_ULP_DIRECTION_INGRESS] = { + .name = "INGRESS GENERIC_TABLE_MAC_ADDR_CACHE", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_HASH_LIST, + .result_num_entries = 512, + .result_num_bytes = 12, + .key_num_bytes = 25, + .partial_key_num_bytes = 0, + .num_buckets = 8, + .hash_tbl_entries = 2048, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_MAC_ADDR_CACHE << 1 | + BNXT_ULP_DIRECTION_EGRESS] = { + .name = "EGRESS GENERIC_TABLE_MAC_ADDR_CACHE", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_HASH_LIST, + .result_num_entries = 0, + .result_num_bytes = 0, + .key_num_bytes = 0, + .partial_key_num_bytes = 0, + .num_buckets = 0, + .hash_tbl_entries = 0, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE, + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_PORT_TABLE << 1 | + BNXT_ULP_DIRECTION_INGRESS] = { + .name = "INGRESS GENERIC_TABLE_PORT_TABLE", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_KEY_LIST, + .result_num_entries = 1024, + .result_num_bytes = 24, + .key_num_bytes = 2, + .partial_key_num_bytes = 0, + .num_buckets = 0, + .hash_tbl_entries = 0, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_PORT_TABLE << 1 | + BNXT_ULP_DIRECTION_EGRESS] = { + .name = "EGRESS GENERIC_TABLE_PORT_TABLE", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_KEY_LIST, + .result_num_entries = 1024, + .result_num_bytes = 24, + .key_num_bytes = 2, + .partial_key_num_bytes = 0, + .num_buckets = 0, + .hash_tbl_entries = 0, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_TUNNEL_CACHE << 1 | + BNXT_ULP_DIRECTION_INGRESS] = { + .name = "INGRESS GENERIC_TABLE_TUNNEL_CACHE", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_HASH_LIST, + .result_num_entries = 256, + .result_num_bytes = 23, + .key_num_bytes = 3, + .partial_key_num_bytes = 0, + .num_buckets = 8, + .hash_tbl_entries = 1024, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_TUNNEL_CACHE << 1 | + BNXT_ULP_DIRECTION_EGRESS] = { + .name = "EGRESS GENERIC_TABLE_TUNNEL_CACHE", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_HASH_LIST, + .result_num_entries = 0, + .result_num_bytes = 0, + .key_num_bytes = 0, + .partial_key_num_bytes = 0, + .num_buckets = 0, + .hash_tbl_entries = 0, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE, + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_SOURCE_PROPERTY_CACHE << 1 | + BNXT_ULP_DIRECTION_INGRESS] = { + .name = "INGRESS GENERIC_TABLE_SOURCE_PROPERTY_CACHE", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_HASH_LIST, + .result_num_entries = 0, + .result_num_bytes = 0, + .key_num_bytes = 0, + .partial_key_num_bytes = 0, + .num_buckets = 0, + .hash_tbl_entries = 0, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE, + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_SOURCE_PROPERTY_CACHE << 1 | + BNXT_ULP_DIRECTION_EGRESS] = { + .name = "EGRESS GENERIC_TABLE_SOURCE_PROPERTY_CACHE", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_HASH_LIST, + .result_num_entries = 128, + .result_num_bytes = 8, + .key_num_bytes = 11, + .partial_key_num_bytes = 0, + .num_buckets = 4, + .hash_tbl_entries = 512, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_VXLAN_ENCAP_REC_CACHE << 1 | + BNXT_ULP_DIRECTION_INGRESS] = { + .name = "INGRESS GENERIC_TABLE_VXLAN_ENCAP_REC_CACHE", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_HASH_LIST, + .result_num_entries = 0, + .result_num_bytes = 0, + .key_num_bytes = 0, + .partial_key_num_bytes = 0, + .num_buckets = 0, + .hash_tbl_entries = 0, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE, + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_VXLAN_ENCAP_REC_CACHE << 1 | + BNXT_ULP_DIRECTION_EGRESS] = { + .name = "EGRESS GENERIC_TABLE_VXLAN_ENCAP_REC_CACHE", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_HASH_LIST, + .result_num_entries = 4096, + .result_num_bytes = 8, + .key_num_bytes = 18, + .partial_key_num_bytes = 0, + .num_buckets = 8, + .hash_tbl_entries = 16384, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_SOCKET_DIRECT_CACHE << 1 | + BNXT_ULP_DIRECTION_INGRESS] = { + .name = "INGRESS GENERIC_TABLE_SOCKET_DIRECT_CACHE", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_KEY_LIST, + .result_num_entries = 0, + .result_num_bytes = 0, + .key_num_bytes = 0, + .partial_key_num_bytes = 0, + .num_buckets = 0, + .hash_tbl_entries = 0, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE, + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_SOCKET_DIRECT_CACHE << 1 | + BNXT_ULP_DIRECTION_EGRESS] = { + .name = "EGRESS GENERIC_TABLE_SOCKET_DIRECT_CACHE", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_KEY_LIST, + .result_num_entries = 0, + .result_num_bytes = 0, + .key_num_bytes = 0, + .partial_key_num_bytes = 0, + .num_buckets = 0, + .hash_tbl_entries = 0, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE, + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_VXLAN_ENCAP_IPV6_REC_CACHE << 1 | + BNXT_ULP_DIRECTION_INGRESS] = { + .name = "INGRESS GENERIC_TABLE_VXLAN_ENCAP_IPV6_REC_CACHE", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_HASH_LIST, + .result_num_entries = 0, + .result_num_bytes = 0, + .key_num_bytes = 0, + .partial_key_num_bytes = 0, + .num_buckets = 0, + .hash_tbl_entries = 0, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE, + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_VXLAN_ENCAP_IPV6_REC_CACHE << 1 | + BNXT_ULP_DIRECTION_EGRESS] = { + .name = "EGRESS GENERIC_TABLE_VXLAN_ENCAP_IPV6_REC_CACHE", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_HASH_LIST, + .result_num_entries = 4096, + .result_num_bytes = 8, + .key_num_bytes = 30, + .partial_key_num_bytes = 0, + .num_buckets = 8, + .hash_tbl_entries = 16384, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_SOURCE_PROPERTY_IPV6_CACHE << 1 | + BNXT_ULP_DIRECTION_INGRESS] = { + .name = "INGRESS GENERIC_TABLE_SOURCE_PROPERTY_IPV6_CACHE", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_HASH_LIST, + .result_num_entries = 0, + .result_num_bytes = 0, + .key_num_bytes = 0, + .partial_key_num_bytes = 0, + .num_buckets = 0, + .hash_tbl_entries = 0, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE, + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_SOURCE_PROPERTY_IPV6_CACHE << 1 | + BNXT_ULP_DIRECTION_EGRESS] = { + .name = "EGRESS GENERIC_TABLE_SOURCE_PROPERTY_IPV6_CACHE", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_HASH_LIST, + .result_num_entries = 128, + .result_num_bytes = 8, + .key_num_bytes = 23, + .partial_key_num_bytes = 0, + .num_buckets = 4, + .hash_tbl_entries = 512, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_OUTER_TUNNEL_CACHE << 1 | + BNXT_ULP_DIRECTION_INGRESS] = { + .name = "INGRESS GENERIC_TABLE_OUTER_TUNNEL_CACHE", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_HASH_LIST, + .result_num_entries = 0, + .result_num_bytes = 0, + .key_num_bytes = 0, + .partial_key_num_bytes = 0, + .num_buckets = 0, + .hash_tbl_entries = 0, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE, + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_OUTER_TUNNEL_CACHE << 1 | + BNXT_ULP_DIRECTION_EGRESS] = { + .name = "EGRESS GENERIC_TABLE_OUTER_TUNNEL_CACHE", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_HASH_LIST, + .result_num_entries = 0, + .result_num_bytes = 0, + .key_num_bytes = 0, + .partial_key_num_bytes = 0, + .num_buckets = 0, + .hash_tbl_entries = 0, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE, + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_SHARED_METER_TBL_CACHE << 1 | + BNXT_ULP_DIRECTION_INGRESS] = { + .name = "INGRESS GENERIC_TABLE_SHARED_METER_TBL_CACHE", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_HASH_LIST, + .result_num_entries = 1024, + .result_num_bytes = 10, + .key_num_bytes = 4, + .partial_key_num_bytes = 0, + .num_buckets = 8, + .hash_tbl_entries = 2048, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_SHARED_METER_TBL_CACHE << 1 | + BNXT_ULP_DIRECTION_EGRESS] = { + .name = "EGRESS GENERIC_TABLE_SHARED_METER_TBL_CACHE", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_HASH_LIST, + .result_num_entries = 0, + .result_num_bytes = 0, + .key_num_bytes = 0, + .partial_key_num_bytes = 0, + .num_buckets = 0, + .hash_tbl_entries = 0, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE, + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_METER_PROFILE_TBL_CACHE << 1 | + BNXT_ULP_DIRECTION_INGRESS] = { + .name = "INGRESS GENERIC_TABLE_METER_PROFILE_TBL_CACHE", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_HASH_LIST, + .result_num_entries = 512, + .result_num_bytes = 13, + .key_num_bytes = 4, + .partial_key_num_bytes = 0, + .num_buckets = 8, + .hash_tbl_entries = 2048, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_METER_PROFILE_TBL_CACHE << 1 | + BNXT_ULP_DIRECTION_EGRESS] = { + .name = "EGRESS GENERIC_TABLE_METER_PROFILE_TBL_CACHE", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_HASH_LIST, + .result_num_entries = 0, + .result_num_bytes = 0, + .key_num_bytes = 0, + .partial_key_num_bytes = 0, + .num_buckets = 0, + .hash_tbl_entries = 0, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE, + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_GLOBAL_REGISTER_TBL << 1 | + BNXT_ULP_DIRECTION_INGRESS] = { + .name = "INGRESS GENERIC_TABLE_GLOBAL_REGISTER_TBL", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_HASH_LIST, + .result_num_entries = 0, + .result_num_bytes = 0, + .key_num_bytes = 0, + .partial_key_num_bytes = 0, + .num_buckets = 0, + .hash_tbl_entries = 0, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE, + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_GLOBAL_REGISTER_TBL << 1 | + BNXT_ULP_DIRECTION_EGRESS] = { + .name = "EGRESS GENERIC_TABLE_GLOBAL_REGISTER_TBL", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_HASH_LIST, + .result_num_entries = 0, + .result_num_bytes = 0, + .key_num_bytes = 0, + .partial_key_num_bytes = 0, + .num_buckets = 0, + .hash_tbl_entries = 0, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE, + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_CHAIN_ID_CACHE << 1 | + BNXT_ULP_DIRECTION_INGRESS] = { + .name = "INGRESS GENERIC_TABLE_CHAIN_ID_CACHE", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_HASH_LIST, + .result_num_entries = 0, + .result_num_bytes = 0, + .key_num_bytes = 0, + .partial_key_num_bytes = 0, + .num_buckets = 0, + .hash_tbl_entries = 0, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE, + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_CHAIN_ID_CACHE << 1 | + BNXT_ULP_DIRECTION_EGRESS] = { + .name = "EGRESS GENERIC_TABLE_CHAIN_ID_CACHE", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_HASH_LIST, + .result_num_entries = 0, + .result_num_bytes = 0, + .key_num_bytes = 0, + .partial_key_num_bytes = 0, + .num_buckets = 0, + .hash_tbl_entries = 0, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE, + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_L2_ENCAP_REC_CACHE << 1 | + BNXT_ULP_DIRECTION_INGRESS] = { + .name = "INGRESS GENERIC_TABLE_L2_ENCAP_REC_CACHE", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_HASH_LIST, + .result_num_entries = 0, + .result_num_bytes = 0, + .key_num_bytes = 0, + .partial_key_num_bytes = 0, + .num_buckets = 0, + .hash_tbl_entries = 0, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE, + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_L2_ENCAP_REC_CACHE << 1 | + BNXT_ULP_DIRECTION_EGRESS] = { + .name = "EGRESS GENERIC_TABLE_L2_ENCAP_REC_CACHE", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_HASH_LIST, + .result_num_entries = 0, + .result_num_bytes = 0, + .key_num_bytes = 0, + .partial_key_num_bytes = 0, + .num_buckets = 0, + .hash_tbl_entries = 0, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE, + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_SRV6_ENCAP_REC_CACHE << 1 | + BNXT_ULP_DIRECTION_INGRESS] = { + .name = "INGRESS GENERIC_TABLE_SRV6_ENCAP_REC_CACHE", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_HASH_LIST, + .result_num_entries = 0, + .result_num_bytes = 0, + .key_num_bytes = 0, + .partial_key_num_bytes = 0, + .num_buckets = 0, + .hash_tbl_entries = 0, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE, + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_SRV6_ENCAP_REC_CACHE << 1 | + BNXT_ULP_DIRECTION_EGRESS] = { + .name = "EGRESS GENERIC_TABLE_SRV6_ENCAP_REC_CACHE", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_HASH_LIST, + .result_num_entries = 0, + .result_num_bytes = 0, + .key_num_bytes = 0, + .partial_key_num_bytes = 0, + .num_buckets = 0, + .hash_tbl_entries = 0, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE, + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_RSS_PARAMS << 1 | + BNXT_ULP_DIRECTION_INGRESS] = { + .name = "INGRESS GENERIC_TABLE_RSS_PARAMS", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_KEY_LIST, + .result_num_entries = 0, + .result_num_bytes = 0, + .key_num_bytes = 0, + .partial_key_num_bytes = 0, + .num_buckets = 0, + .hash_tbl_entries = 0, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE, + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_RSS_PARAMS << 1 | + BNXT_ULP_DIRECTION_EGRESS] = { + .name = "EGRESS GENERIC_TABLE_RSS_PARAMS", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_KEY_LIST, + .result_num_entries = 0, + .result_num_bytes = 0, + .key_num_bytes = 0, + .partial_key_num_bytes = 0, + .num_buckets = 0, + .hash_tbl_entries = 0, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE, + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_TABLE_SCOPE_CACHE << 1 | + BNXT_ULP_DIRECTION_INGRESS] = { + .name = "INGRESS GENERIC_TABLE_TABLE_SCOPE_CACHE", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_KEY_LIST, + .result_num_entries = 64, + .result_num_bytes = 11, + .key_num_bytes = 1, + .partial_key_num_bytes = 0, + .num_buckets = 0, + .hash_tbl_entries = 0, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_TABLE_SCOPE_CACHE << 1 | + BNXT_ULP_DIRECTION_EGRESS] = { + .name = "EGRESS GENERIC_TABLE_TABLE_SCOPE_CACHE", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_KEY_LIST, + .result_num_entries = 64, + .result_num_bytes = 11, + .key_num_bytes = 1, + .partial_key_num_bytes = 0, + .num_buckets = 0, + .hash_tbl_entries = 0, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_GENEVE_ENCAP_REC_CACHE << 1 | + BNXT_ULP_DIRECTION_INGRESS] = { + .name = "INGRESS GENERIC_TABLE_GENEVE_ENCAP_REC_CACHE", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_HASH_LIST, + .result_num_entries = 0, + .result_num_bytes = 0, + .key_num_bytes = 0, + .partial_key_num_bytes = 0, + .num_buckets = 0, + .hash_tbl_entries = 0, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE, + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_GENEVE_ENCAP_REC_CACHE << 1 | + BNXT_ULP_DIRECTION_EGRESS] = { + .name = "EGRESS GENERIC_TABLE_GENEVE_ENCAP_REC_CACHE", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_HASH_LIST, + .result_num_entries = 4096, + .result_num_bytes = 8, + .key_num_bytes = 62, + .partial_key_num_bytes = 0, + .num_buckets = 8, + .hash_tbl_entries = 16384, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_PROTO_HEADER << 1 | + BNXT_ULP_DIRECTION_INGRESS] = { + .name = "INGRESS GENERIC_TABLE_PROTO_HEADER", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_HASH_LIST, + .result_num_entries = 256, + .result_num_bytes = 14, + .key_num_bytes = 10, + .partial_key_num_bytes = 0, + .num_buckets = 4, + .hash_tbl_entries = 1024, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_PROTO_HEADER << 1 | + BNXT_ULP_DIRECTION_EGRESS] = { + .name = "EGRESS GENERIC_TABLE_PROTO_HEADER", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_HASH_LIST, + .result_num_entries = 256, + .result_num_bytes = 14, + .key_num_bytes = 10, + .partial_key_num_bytes = 0, + .num_buckets = 4, + .hash_tbl_entries = 1024, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_EM_FLOW_CONFLICT << 1 | + BNXT_ULP_DIRECTION_INGRESS] = { + .name = "INGRESS GENERIC_TABLE_EM_FLOW_CONFLICT", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_HASH_LIST, + .result_num_entries = 256, + .result_num_bytes = 12, + .key_num_bytes = 10, + .partial_key_num_bytes = 0, + .num_buckets = 4, + .hash_tbl_entries = 1024, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_EM_FLOW_CONFLICT << 1 | + BNXT_ULP_DIRECTION_EGRESS] = { + .name = "EGRESS GENERIC_TABLE_EM_FLOW_CONFLICT", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_HASH_LIST, + .result_num_entries = 256, + .result_num_bytes = 12, + .key_num_bytes = 10, + .partial_key_num_bytes = 0, + .num_buckets = 4, + .hash_tbl_entries = 1024, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_HDR_OVERLAP << 1 | + BNXT_ULP_DIRECTION_INGRESS] = { + .name = "INGRESS GENERIC_TABLE_HDR_OVERLAP", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_SIMPLE_LIST, + .result_num_entries = 256, + .result_num_bytes = 6, + .key_num_bytes = 2, + .partial_key_num_bytes = 8, + .num_buckets = 0, + .hash_tbl_entries = 0, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_HDR_OVERLAP << 1 | + BNXT_ULP_DIRECTION_EGRESS] = { + .name = "EGRESS GENERIC_TABLE_HDR_OVERLAP", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_SIMPLE_LIST, + .result_num_entries = 256, + .result_num_bytes = 6, + .key_num_bytes = 2, + .partial_key_num_bytes = 8, + .num_buckets = 0, + .hash_tbl_entries = 0, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_MULTI_SHARED_MIRROR << 1 | + BNXT_ULP_DIRECTION_INGRESS] = { + .name = "INGRESS GENERIC_TABLE_MULTI_SHARED_MIRROR", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_KEY_LIST, + .result_num_entries = 0, + .result_num_bytes = 0, + .key_num_bytes = 0, + .partial_key_num_bytes = 0, + .num_buckets = 0, + .hash_tbl_entries = 0, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE, + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_MULTI_SHARED_MIRROR << 1 | + BNXT_ULP_DIRECTION_EGRESS] = { + .name = "EGRESS GENERIC_TABLE_MULTI_SHARED_MIRROR", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_KEY_LIST, + .result_num_entries = 0, + .result_num_bytes = 0, + .key_num_bytes = 0, + .partial_key_num_bytes = 0, + .num_buckets = 0, + .hash_tbl_entries = 0, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE, + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_FLOW_CHAIN_CACHE << 1 | + BNXT_ULP_DIRECTION_INGRESS] = { + .name = "INGRESS GENERIC_TABLE_FLOW_CHAIN_CACHE", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_HASH_LIST, + .result_num_entries = 32, + .result_num_bytes = 8, + .key_num_bytes = 4, + .partial_key_num_bytes = 0, + .num_buckets = 4, + .hash_tbl_entries = 128, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_FLOW_CHAIN_CACHE << 1 | + BNXT_ULP_DIRECTION_EGRESS] = { + .name = "EGRESS GENERIC_TABLE_FLOW_CHAIN_CACHE", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_HASH_LIST, + .result_num_entries = 32, + .result_num_bytes = 8, + .key_num_bytes = 4, + .partial_key_num_bytes = 0, + .num_buckets = 4, + .hash_tbl_entries = 128, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_FLOW_CHAIN_L2_CNTXT << 1 | + BNXT_ULP_DIRECTION_INGRESS] = { + .name = "INGRESS GENERIC_TABLE_FLOW_CHAIN_L2_CNTXT", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_SIMPLE_LIST, + .result_num_entries = 0, + .result_num_bytes = 0, + .key_num_bytes = 0, + .partial_key_num_bytes = 0, + .num_buckets = 0, + .hash_tbl_entries = 0, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE, + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_FLOW_CHAIN_L2_CNTXT << 1 | + BNXT_ULP_DIRECTION_EGRESS] = { + .name = "EGRESS GENERIC_TABLE_FLOW_CHAIN_L2_CNTXT", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_SIMPLE_LIST, + .result_num_entries = 0, + .result_num_bytes = 0, + .key_num_bytes = 0, + .partial_key_num_bytes = 0, + .num_buckets = 0, + .hash_tbl_entries = 0, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE, + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_TUNNEL_GPARSE_CACHE << 1 | + BNXT_ULP_DIRECTION_INGRESS] = { + .name = "INGRESS GENERIC_TABLE_TUNNEL_GPARSE_CACHE", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_HASH_LIST, + .result_num_entries = 0, + .result_num_bytes = 0, + .key_num_bytes = 0, + .partial_key_num_bytes = 0, + .num_buckets = 0, + .hash_tbl_entries = 0, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE, + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_TUNNEL_GPARSE_CACHE << 1 | + BNXT_ULP_DIRECTION_EGRESS] = { + .name = "EGRESS GENERIC_TABLE_TUNNEL_GPARSE_CACHE", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_HASH_LIST, + .result_num_entries = 0, + .result_num_bytes = 0, + .key_num_bytes = 0, + .partial_key_num_bytes = 0, + .num_buckets = 0, + .hash_tbl_entries = 0, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE, + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_MULTI_FLOW_TUNNEL_CACHE << 1 | + BNXT_ULP_DIRECTION_INGRESS] = { + .name = "INGRESS GENERIC_TABLE_MULTI_FLOW_TUNNEL_CACHE", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_HASH_LIST, + .result_num_entries = 0, + .result_num_bytes = 0, + .key_num_bytes = 0, + .partial_key_num_bytes = 0, + .num_buckets = 0, + .hash_tbl_entries = 0, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE, + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_MULTI_FLOW_TUNNEL_CACHE << 1 | + BNXT_ULP_DIRECTION_EGRESS] = { + .name = "EGRESS GENERIC_TABLE_MULTI_FLOW_TUNNEL_CACHE", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_HASH_LIST, + .result_num_entries = 0, + .result_num_bytes = 0, + .key_num_bytes = 0, + .partial_key_num_bytes = 0, + .num_buckets = 0, + .hash_tbl_entries = 0, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE, + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_L2_FILTER << 1 | + BNXT_ULP_DIRECTION_INGRESS] = { + .name = "INGRESS GENERIC_TABLE_L2_FILTER", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_KEY_LIST, + .result_num_entries = 0, + .result_num_bytes = 0, + .key_num_bytes = 0, + .partial_key_num_bytes = 0, + .num_buckets = 0, + .hash_tbl_entries = 0, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE, + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_L2_FILTER << 1 | + BNXT_ULP_DIRECTION_EGRESS] = { + .name = "EGRESS GENERIC_TABLE_L2_FILTER", + .gen_tbl_type = BNXT_ULP_GEN_TBL_TYPE_KEY_LIST, + .result_num_entries = 0, + .result_num_bytes = 0, + .key_num_bytes = 0, + .partial_key_num_bytes = 0, + .num_buckets = 0, + .hash_tbl_entries = 0, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE, + } +}; + +const struct bnxt_ulp_allocator_tbl_params ulp_wh_plus_allocator_tbl_params[] = { + [BNXT_ULP_RESOURCE_SUB_TYPE_ALLOCATOR_TABLE_JUMP_INDEX << 1 | + BNXT_ULP_DIRECTION_INGRESS] = { + .name = "INGRESS ALLOCATOR_TABLE_JUMP_INDEX", + .num_entries = 0, + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_ALLOCATOR_TABLE_JUMP_INDEX << 1 | + BNXT_ULP_DIRECTION_EGRESS] = { + .name = "EGRESS ALLOCATOR_TABLE_JUMP_INDEX", + .num_entries = 0, + } +}; + +const struct bnxt_ulp_allocator_tbl_params ulp_thor_allocator_tbl_params[] = { + [BNXT_ULP_RESOURCE_SUB_TYPE_ALLOCATOR_TABLE_JUMP_INDEX << 1 | + BNXT_ULP_DIRECTION_INGRESS] = { + .name = "INGRESS ALLOCATOR_TABLE_JUMP_INDEX", + .num_entries = 32, + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_ALLOCATOR_TABLE_JUMP_INDEX << 1 | + BNXT_ULP_DIRECTION_EGRESS] = { + .name = "EGRESS ALLOCATOR_TABLE_JUMP_INDEX", + .num_entries = 32, + } +}; + +const struct bnxt_ulp_allocator_tbl_params ulp_thor2_allocator_tbl_params[] = { + [BNXT_ULP_RESOURCE_SUB_TYPE_ALLOCATOR_TABLE_JUMP_INDEX << 1 | + BNXT_ULP_DIRECTION_INGRESS] = { + .name = "INGRESS ALLOCATOR_TABLE_JUMP_INDEX", + .num_entries = 32, + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_ALLOCATOR_TABLE_JUMP_INDEX << 1 | + BNXT_ULP_DIRECTION_EGRESS] = { + .name = "EGRESS ALLOCATOR_TABLE_JUMP_INDEX", + .num_entries = 32, + } +}; + +/* device tables */ +const struct bnxt_ulp_template_device_tbls ulp_template_wh_plus_tbls[] = { + [BNXT_ULP_TEMPLATE_TYPE_CLASS] = { + .tmpl_list = ulp_wh_plus_class_tmpl_list, + .tmpl_list_size = ULP_WH_PLUS_CLASS_TMPL_LIST_SIZE, + .tbl_list = ulp_wh_plus_class_tbl_list, + .tbl_list_size = ULP_WH_PLUS_CLASS_TBL_LIST_SIZE, + .key_info_list = ulp_wh_plus_class_key_info_list, + .key_info_list_size = ULP_WH_PLUS_CLASS_KEY_INFO_LIST_SIZE, + .key_ext_list = ulp_wh_plus_class_key_ext_list, + .key_ext_list_size = ULP_WH_PLUS_CLASS_KEY_EXT_LIST_SIZE, + .ident_list = ulp_wh_plus_class_ident_list, + .ident_list_size = ULP_WH_PLUS_CLASS_IDENT_LIST_SIZE, + .cond_list = ulp_wh_plus_class_cond_list, + .cond_list_size = ULP_WH_PLUS_CLASS_COND_LIST_SIZE, + .result_field_list = ulp_wh_plus_class_result_field_list, + .result_field_list_size = ULP_WH_PLUS_CLASS_RESULT_FIELD_LIST_SIZE, + .cond_oper_list = ulp_wh_plus_class_cond_oper_list, + .cond_oper_list_size = ULP_WH_PLUS_CLASS_COND_OPER_LIST_SIZE + }, + [BNXT_ULP_TEMPLATE_TYPE_ACTION] = { + .tmpl_list = ulp_wh_plus_act_tmpl_list, + .tmpl_list_size = ULP_WH_PLUS_ACT_TMPL_LIST_SIZE, + .tbl_list = ulp_wh_plus_act_tbl_list, + .tbl_list_size = ULP_WH_PLUS_ACT_TBL_LIST_SIZE, + .key_info_list = ulp_wh_plus_act_key_info_list, + .key_info_list_size = ULP_WH_PLUS_ACT_KEY_INFO_LIST_SIZE, + .key_ext_list = ulp_wh_plus_act_key_ext_list, + .key_ext_list_size = ULP_WH_PLUS_ACT_KEY_EXT_LIST_SIZE, + .ident_list = ulp_wh_plus_act_ident_list, + .ident_list_size = ULP_WH_PLUS_ACT_IDENT_LIST_SIZE, + .cond_list = ulp_wh_plus_act_cond_list, + .cond_list_size = ULP_WH_PLUS_ACT_COND_LIST_SIZE, + .result_field_list = ulp_wh_plus_act_result_field_list, + .result_field_list_size = ULP_WH_PLUS_ACT_RESULT_FIELD_LIST_SIZE, + .cond_oper_list = ulp_wh_plus_act_cond_oper_list, + .cond_oper_list_size = ULP_WH_PLUS_ACT_COND_OPER_LIST_SIZE + } +}; + +/* device tables */ +const struct bnxt_ulp_template_device_tbls ulp_template_thor_tbls[] = { + [BNXT_ULP_TEMPLATE_TYPE_CLASS] = { + .tmpl_list = ulp_thor_class_tmpl_list, + .tmpl_list_size = ULP_THOR_CLASS_TMPL_LIST_SIZE, + .tbl_list = ulp_thor_class_tbl_list, + .tbl_list_size = ULP_THOR_CLASS_TBL_LIST_SIZE, + .key_info_list = ulp_thor_class_key_info_list, + .key_info_list_size = ULP_THOR_CLASS_KEY_INFO_LIST_SIZE, + .key_ext_list = ulp_thor_class_key_ext_list, + .key_ext_list_size = ULP_THOR_CLASS_KEY_EXT_LIST_SIZE, + .ident_list = ulp_thor_class_ident_list, + .ident_list_size = ULP_THOR_CLASS_IDENT_LIST_SIZE, + .cond_list = ulp_thor_class_cond_list, + .cond_list_size = ULP_THOR_CLASS_COND_LIST_SIZE, + .result_field_list = ulp_thor_class_result_field_list, + .result_field_list_size = ULP_THOR_CLASS_RESULT_FIELD_LIST_SIZE, + .cond_oper_list = ulp_thor_class_cond_oper_list, + .cond_oper_list_size = ULP_THOR_CLASS_COND_OPER_LIST_SIZE + }, + [BNXT_ULP_TEMPLATE_TYPE_ACTION] = { + .tmpl_list = ulp_thor_act_tmpl_list, + .tmpl_list_size = ULP_THOR_ACT_TMPL_LIST_SIZE, + .tbl_list = ulp_thor_act_tbl_list, + .tbl_list_size = ULP_THOR_ACT_TBL_LIST_SIZE, + .key_info_list = ulp_thor_act_key_info_list, + .key_info_list_size = ULP_THOR_ACT_KEY_INFO_LIST_SIZE, + .key_ext_list = ulp_thor_act_key_ext_list, + .key_ext_list_size = ULP_THOR_ACT_KEY_EXT_LIST_SIZE, + .ident_list = ulp_thor_act_ident_list, + .ident_list_size = ULP_THOR_ACT_IDENT_LIST_SIZE, + .cond_list = ulp_thor_act_cond_list, + .cond_list_size = ULP_THOR_ACT_COND_LIST_SIZE, + .result_field_list = ulp_thor_act_result_field_list, + .result_field_list_size = ULP_THOR_ACT_RESULT_FIELD_LIST_SIZE, + .cond_oper_list = ulp_thor_act_cond_oper_list, + .cond_oper_list_size = ULP_THOR_ACT_COND_OPER_LIST_SIZE + } +}; + +/* device tables */ +const struct bnxt_ulp_template_device_tbls ulp_template_thor2_tbls[] = { + [BNXT_ULP_TEMPLATE_TYPE_CLASS] = { + .tmpl_list = ulp_thor2_class_tmpl_list, + .tmpl_list_size = ULP_THOR2_CLASS_TMPL_LIST_SIZE, + .tbl_list = ulp_thor2_class_tbl_list, + .tbl_list_size = ULP_THOR2_CLASS_TBL_LIST_SIZE, + .key_info_list = ulp_thor2_class_key_info_list, + .key_info_list_size = ULP_THOR2_CLASS_KEY_INFO_LIST_SIZE, + .key_ext_list = ulp_thor2_class_key_ext_list, + .key_ext_list_size = ULP_THOR2_CLASS_KEY_EXT_LIST_SIZE, + .ident_list = ulp_thor2_class_ident_list, + .ident_list_size = ULP_THOR2_CLASS_IDENT_LIST_SIZE, + .cond_list = ulp_thor2_class_cond_list, + .cond_list_size = ULP_THOR2_CLASS_COND_LIST_SIZE, + .result_field_list = ulp_thor2_class_result_field_list, + .result_field_list_size = ULP_THOR2_CLASS_RESULT_FIELD_LIST_SIZE, + .cond_oper_list = ulp_thor2_class_cond_oper_list, + .cond_oper_list_size = ULP_THOR2_CLASS_COND_OPER_LIST_SIZE + }, + [BNXT_ULP_TEMPLATE_TYPE_ACTION] = { + .tmpl_list = ulp_thor2_act_tmpl_list, + .tmpl_list_size = ULP_THOR2_ACT_TMPL_LIST_SIZE, + .tbl_list = ulp_thor2_act_tbl_list, + .tbl_list_size = ULP_THOR2_ACT_TBL_LIST_SIZE, + .key_info_list = ulp_thor2_act_key_info_list, + .key_info_list_size = ULP_THOR2_ACT_KEY_INFO_LIST_SIZE, + .key_ext_list = ulp_thor2_act_key_ext_list, + .key_ext_list_size = ULP_THOR2_ACT_KEY_EXT_LIST_SIZE, + .ident_list = ulp_thor2_act_ident_list, + .ident_list_size = ULP_THOR2_ACT_IDENT_LIST_SIZE, + .cond_list = ulp_thor2_act_cond_list, + .cond_list_size = ULP_THOR2_ACT_COND_LIST_SIZE, + .result_field_list = ulp_thor2_act_result_field_list, + .result_field_list_size = ULP_THOR2_ACT_RESULT_FIELD_LIST_SIZE, + .cond_oper_list = ulp_thor2_act_cond_oper_list, + .cond_oper_list_size = ULP_THOR2_ACT_COND_OPER_LIST_SIZE + } +}; + +/* List of device specific parameters */ +struct bnxt_ulp_device_params ulp_device_params[BNXT_ULP_DEVICE_ID_LAST] = { + [BNXT_ULP_DEVICE_ID_WH_PLUS] = { + .description = "Whitney_Plus", + .key_byte_order = BNXT_ULP_BYTE_ORDER_LE, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE, + .encap_byte_order = BNXT_ULP_BYTE_ORDER_BE, + .wc_key_byte_order = BNXT_ULP_BYTE_ORDER_BE, + .em_byte_order = BNXT_ULP_BYTE_ORDER_LE, + .encap_byte_swap = 1, + .int_flow_db_num_entries = 16384, + .ext_flow_db_num_entries = 32768, + .mark_db_lfid_entries = 65536, + .mark_db_gfid_entries = 65536, + .flow_count_db_entries = 16384, + .fdb_parent_flow_entries = 2, + .num_resources_per_flow = 8, + .num_phy_ports = 2, + .ext_cntr_table_type = 0, + .byte_count_mask = 0x0000000fffffffff, + .packet_count_mask = 0xfffffff000000000, + .byte_count_shift = 0, + .packet_count_shift = 36, + .wc_dynamic_pad_en = 1, + .em_dynamic_pad_en = 0, + .dynamic_sram_en = 0, + .wc_slice_width = 80, + .wc_max_slices = 4, + .wc_mode_list = {0x00000000, 0x00000002, + 0x00000003, 0x00000003}, + .wc_mod_list_max_size = 4, + .wc_ctl_size_bits = 16, + .gen_tbl_params = ulp_wh_plus_generic_tbl_params, + .allocator_tbl_params = ulp_wh_plus_allocator_tbl_params, + .dev_tbls = ulp_template_wh_plus_tbls, + .dev_features = BNXT_ULP_DEV_FT_STAT_SW_AGG | BNXT_ULP_DEV_FT_STAT_PARENT_AGG + }, + [BNXT_ULP_DEVICE_ID_THOR] = { + .description = "Thor", + .key_byte_order = BNXT_ULP_BYTE_ORDER_LE, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE, + .encap_byte_order = BNXT_ULP_BYTE_ORDER_BE, + .wc_key_byte_order = BNXT_ULP_BYTE_ORDER_BE, + .em_byte_order = BNXT_ULP_BYTE_ORDER_BE, + .encap_byte_swap = 1, + .int_flow_db_num_entries = 16384, + .ext_flow_db_num_entries = 32768, + .mark_db_lfid_entries = 65536, + .mark_db_gfid_entries = 65536, + .flow_count_db_entries = 16384, + .fdb_parent_flow_entries = 2, + .num_resources_per_flow = 8, + .num_phy_ports = 2, + .ext_cntr_table_type = 0, + .byte_count_mask = 0x00000007ffffffff, + .packet_count_mask = 0xfffffff800000000, + .byte_count_shift = 0, + .packet_count_shift = 35, + .wc_dynamic_pad_en = 1, + .em_dynamic_pad_en = 1, + .dynamic_sram_en = 1, + .dyn_encap_list_size = 5, + .dyn_encap_sizes = {{64, TF_TBL_TYPE_ACT_ENCAP_8B}, + {128, TF_TBL_TYPE_ACT_ENCAP_16B}, + {256, TF_TBL_TYPE_ACT_ENCAP_32B}, + {512, TF_TBL_TYPE_ACT_ENCAP_64B}, + {1024, TF_TBL_TYPE_ACT_ENCAP_128B}}, + .dyn_modify_list_size = 4, + .dyn_modify_sizes = {{64, TF_TBL_TYPE_ACT_MODIFY_8B}, + {128, TF_TBL_TYPE_ACT_MODIFY_16B}, + {256, TF_TBL_TYPE_ACT_MODIFY_32B}, + {512, TF_TBL_TYPE_ACT_MODIFY_64B}}, + .em_blk_size_bits = 100, + .em_blk_align_bits = 128, + .em_key_align_bytes = 80, + .wc_slice_width = 160, + .wc_max_slices = 4, + .wc_mode_list = {0x0000000c, 0x0000000e, + 0x0000000f, 0x0000000f}, + .wc_mod_list_max_size = 4, + .wc_ctl_size_bits = 32, + .gen_tbl_params = ulp_thor_generic_tbl_params, + .allocator_tbl_params = ulp_thor_allocator_tbl_params, + .dev_tbls = ulp_template_thor_tbls, + .dev_features = BNXT_ULP_DEV_FT_STAT_SW_AGG | BNXT_ULP_DEV_FT_STAT_PARENT_AGG + }, + [BNXT_ULP_DEVICE_ID_THOR2] = { + .description = "Thor2", + .key_byte_order = BNXT_ULP_BYTE_ORDER_LE, + .result_byte_order = BNXT_ULP_BYTE_ORDER_LE, + .encap_byte_order = BNXT_ULP_BYTE_ORDER_BE, + .wc_key_byte_order = BNXT_ULP_BYTE_ORDER_BE, + .em_byte_order = BNXT_ULP_BYTE_ORDER_BE, + .encap_byte_swap = 1, + .int_flow_db_num_entries = 16384, + .ext_flow_db_num_entries = 32768, + .mark_db_lfid_entries = 65536, + .mark_db_gfid_entries = 65536, + .flow_count_db_entries = 16384, + .fdb_parent_flow_entries = 2, + .num_resources_per_flow = 8, + .num_phy_ports = 2, + .ext_cntr_table_type = 0, + .byte_count_mask = 0x00000007ffffffff, + .packet_count_mask = 0xfffffff800000000, + .byte_count_shift = 0, + .packet_count_shift = 35, + .wc_dynamic_pad_en = 1, + .em_dynamic_pad_en = 1, + .dynamic_sram_en = 1, + .dyn_encap_list_size = 4, + .dyn_encap_sizes = {{64, TF_TBL_TYPE_ACT_ENCAP_8B}, + {128, TF_TBL_TYPE_ACT_ENCAP_16B}, + {256, TF_TBL_TYPE_ACT_ENCAP_32B}, + {512, TF_TBL_TYPE_ACT_ENCAP_64B}}, + .dyn_modify_list_size = 4, + .dyn_modify_sizes = {{64, TF_TBL_TYPE_ACT_MODIFY_8B}, + {128, TF_TBL_TYPE_ACT_MODIFY_16B}, + {256, TF_TBL_TYPE_ACT_MODIFY_32B}, + {512, TF_TBL_TYPE_ACT_MODIFY_64B}}, + .em_blk_size_bits = 256, + .em_blk_align_bits = 128, + .em_key_align_bytes = 16, + .wc_slice_width = 172, + .wc_max_slices = 4, + .wc_mode_list = {0x00000004, 0x00000005, + 0x00000000, 0x00000006}, + .wc_mod_list_max_size = 4, + .wc_ctl_size_bits = 3, + .gen_tbl_params = ulp_thor2_generic_tbl_params, + .allocator_tbl_params = ulp_thor2_allocator_tbl_params, + .dev_tbls = ulp_template_thor2_tbls + } +}; + +/* Provides act_bitmask */ +struct bnxt_ulp_shared_act_info ulp_shared_act_info[] = { + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_SHARED_MIRROR << 1 | + BNXT_ULP_DIRECTION_INGRESS] = { + .act_bitmask = BNXT_ULP_ACT_BIT_SHARED_SAMPLE + }, + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_SHARED_MIRROR << 1 | + BNXT_ULP_DIRECTION_EGRESS] = { + .act_bitmask = BNXT_ULP_ACT_BIT_SHARED_SAMPLE + } +}; + +/* List of device specific parameters */ +struct bnxt_ulp_app_capabilities_info ulp_app_cap_info_list[] = { + { + .app_id = 0, + .device_id = BNXT_ULP_DEVICE_ID_WH_PLUS, + .flags = BNXT_ULP_APP_CAP_L2_ETYPE, + .default_priority = 0, + .max_def_priority = 3, + .min_flow_priority = 49152, + .max_flow_priority = 0, + .vxlan_port = 4789, + .vxlan_ip_port = 0, + .default_class_bits = 0, + .default_act_bits = BNXT_ULP_ACT_BIT_NON_GENERIC + }, + { + .app_id = 0, + .device_id = BNXT_ULP_DEVICE_ID_THOR, + .flags = 0, + .default_priority = 0, + .max_def_priority = 3, + .min_flow_priority = 49152, + .max_flow_priority = 0, + .vxlan_port = 0, + .vxlan_ip_port = 0, + .num_key_recipes_per_dir = 256, + .default_class_bits = 0, + .default_act_bits = BNXT_ULP_ACT_BIT_GENERIC + }, + { + .app_id = 0, + .device_id = BNXT_ULP_DEVICE_ID_THOR2, + .flags = 0, + .default_priority = 0, + .max_def_priority = 3, + .min_flow_priority = 49152, + .max_flow_priority = 0, + .vxlan_port = 0, + .vxlan_ip_port = 0, + .max_pools = 1, + .em_multiplier = 4, + .num_rx_flows = 524288, + .num_tx_flows = 524288, + .act_rx_max_sz = 256, + .act_tx_max_sz = 256, + .em_rx_key_max_sz = 112, + .em_tx_key_max_sz = 112, + .pbl_page_sz_in_bytes = 4096, + .num_key_recipes_per_dir = 256, + .default_class_bits = 0, + .default_act_bits = BNXT_ULP_ACT_BIT_GENERIC + } +}; + +/* List of unnamed app tf resources required to be reserved per app/device */ +struct bnxt_ulp_resource_resv_info ulp_app_resource_resv_list[] = {}; + +/* List of global app tf resources required to be reserved per app/device */ +struct bnxt_ulp_glb_resource_info ulp_app_glb_resource_tbl[] = {}; + +/* List of global tf resources required to be reserved per app/device */ +struct bnxt_ulp_glb_resource_info ulp_glb_resource_tbl[] = { + { + .app_id = 0, + .device_id = BNXT_ULP_DEVICE_ID_WH_PLUS, + .session_type = BNXT_ULP_SESSION_TYPE_DEFAULT, + .resource_func = BNXT_ULP_RESOURCE_FUNC_IDENTIFIER, + .resource_type = TF_IDENT_TYPE_PROF_FUNC, + .glb_regfile_index = BNXT_ULP_GLB_RF_IDX_GLB_PROF_FUNC_ID, + .direction = TF_DIR_RX + }, + { + .app_id = 0, + .device_id = BNXT_ULP_DEVICE_ID_WH_PLUS, + .session_type = BNXT_ULP_SESSION_TYPE_DEFAULT, + .resource_func = BNXT_ULP_RESOURCE_FUNC_IDENTIFIER, + .resource_type = TF_IDENT_TYPE_PROF_FUNC, + .glb_regfile_index = BNXT_ULP_GLB_RF_IDX_GLB_PROF_FUNC_ID, + .direction = TF_DIR_TX + }, + { + .app_id = 0, + .device_id = BNXT_ULP_DEVICE_ID_WH_PLUS, + .session_type = BNXT_ULP_SESSION_TYPE_DEFAULT, + .resource_func = BNXT_ULP_RESOURCE_FUNC_IDENTIFIER, + .resource_type = TF_IDENT_TYPE_PROF_FUNC, + .glb_regfile_index = BNXT_ULP_GLB_RF_IDX_VF_2_VFR_PROF_FUNC_ID, + .direction = TF_DIR_RX + }, + { + .app_id = 0, + .device_id = BNXT_ULP_DEVICE_ID_WH_PLUS, + .session_type = BNXT_ULP_SESSION_TYPE_DEFAULT, + .resource_func = BNXT_ULP_RESOURCE_FUNC_IDENTIFIER, + .resource_type = TF_IDENT_TYPE_PROF_FUNC, + .glb_regfile_index = BNXT_ULP_GLB_RF_IDX_ANY_2_VF_PROF_FUNC_ID, + .direction = TF_DIR_RX + }, + { + .app_id = 0, + .device_id = BNXT_ULP_DEVICE_ID_WH_PLUS, + .session_type = BNXT_ULP_SESSION_TYPE_DEFAULT, + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_FULL_ACT_RECORD, + .glb_regfile_index = BNXT_ULP_GLB_RF_IDX_GLB_LB_AREC_PTR, + .direction = TF_DIR_TX + }, + { + .app_id = 0, + .device_id = BNXT_ULP_DEVICE_ID_WH_PLUS, + .session_type = BNXT_ULP_SESSION_TYPE_DEFAULT, + .resource_func = BNXT_ULP_RESOURCE_FUNC_IDENTIFIER, + .resource_type = TF_IDENT_TYPE_PROF_FUNC, + .glb_regfile_index = BNXT_ULP_GLB_RF_IDX_L2_PROF_FUNC_ID, + .direction = TF_DIR_RX + }, + { + .app_id = 0, + .device_id = BNXT_ULP_DEVICE_ID_WH_PLUS, + .session_type = BNXT_ULP_SESSION_TYPE_DEFAULT, + .resource_func = BNXT_ULP_RESOURCE_FUNC_IDENTIFIER, + .resource_type = TF_IDENT_TYPE_PROF_FUNC, + .glb_regfile_index = BNXT_ULP_GLB_RF_IDX_L2_PROF_FUNC_ID, + .direction = TF_DIR_TX + }, + { + .app_id = 0, + .device_id = BNXT_ULP_DEVICE_ID_WH_PLUS, + .session_type = BNXT_ULP_SESSION_TYPE_DEFAULT, + .resource_func = BNXT_ULP_RESOURCE_FUNC_IDENTIFIER, + .resource_type = TF_IDENT_TYPE_PROF_FUNC, + .glb_regfile_index = BNXT_ULP_GLB_RF_IDX_VXLAN_PROF_FUNC_ID, + .direction = TF_DIR_RX + }, + { + .app_id = 0, + .device_id = BNXT_ULP_DEVICE_ID_WH_PLUS, + .session_type = BNXT_ULP_SESSION_TYPE_DEFAULT, + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_ACT_ENCAP_16B, + .glb_regfile_index = BNXT_ULP_GLB_RF_IDX_ENCAP_MAC_PTR, + .direction = TF_DIR_RX + }, + { + .app_id = 0, + .device_id = BNXT_ULP_DEVICE_ID_WH_PLUS, + .session_type = BNXT_ULP_SESSION_TYPE_DEFAULT, + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_ACT_ENCAP_16B, + .glb_regfile_index = BNXT_ULP_GLB_RF_IDX_ENCAP_MAC_PTR, + .direction = TF_DIR_TX + }, + { + .app_id = 0, + .device_id = BNXT_ULP_DEVICE_ID_WH_PLUS, + .session_type = BNXT_ULP_SESSION_TYPE_DEFAULT, + .resource_func = BNXT_ULP_RESOURCE_FUNC_IDENTIFIER, + .resource_type = TF_IDENT_TYPE_EM_PROF, + .glb_regfile_index = BNXT_ULP_GLB_RF_IDX_GLB_EM_PROFILE_ID_0, + .direction = TF_DIR_RX + }, + { + .app_id = 0, + .device_id = BNXT_ULP_DEVICE_ID_WH_PLUS, + .session_type = BNXT_ULP_SESSION_TYPE_DEFAULT, + .resource_func = BNXT_ULP_RESOURCE_FUNC_IDENTIFIER, + .resource_type = TF_IDENT_TYPE_WC_PROF, + .glb_regfile_index = BNXT_ULP_GLB_RF_IDX_GLB_WC_PROFILE_ID_0, + .direction = TF_DIR_RX + }, + { + .app_id = 0, + .device_id = BNXT_ULP_DEVICE_ID_WH_PLUS, + .session_type = BNXT_ULP_SESSION_TYPE_DEFAULT, + .resource_func = BNXT_ULP_RESOURCE_FUNC_IDENTIFIER, + .resource_type = TF_IDENT_TYPE_EM_PROF, + .glb_regfile_index = BNXT_ULP_GLB_RF_IDX_GLB_VFR_EM_PROF_ID_0, + .direction = TF_DIR_RX + }, + { + .app_id = 0, + .device_id = BNXT_ULP_DEVICE_ID_WH_PLUS, + .session_type = BNXT_ULP_SESSION_TYPE_DEFAULT, + .resource_func = BNXT_ULP_RESOURCE_FUNC_IDENTIFIER, + .resource_type = TF_IDENT_TYPE_EM_PROF, + .glb_regfile_index = BNXT_ULP_GLB_RF_IDX_GLB_VFR_EM_PROF_ID_1, + .direction = TF_DIR_RX + }, + { + .app_id = 0, + .device_id = BNXT_ULP_DEVICE_ID_THOR, + .session_type = BNXT_ULP_SESSION_TYPE_DEFAULT, + .resource_func = BNXT_ULP_RESOURCE_FUNC_IDENTIFIER, + .resource_type = TF_IDENT_TYPE_PROF_FUNC, + .glb_regfile_index = BNXT_ULP_GLB_RF_IDX_GLB_PROF_FUNC_ID, + .direction = TF_DIR_RX + }, + { + .app_id = 0, + .device_id = BNXT_ULP_DEVICE_ID_THOR, + .session_type = BNXT_ULP_SESSION_TYPE_DEFAULT, + .resource_func = BNXT_ULP_RESOURCE_FUNC_IDENTIFIER, + .resource_type = TF_IDENT_TYPE_PROF_FUNC, + .glb_regfile_index = BNXT_ULP_GLB_RF_IDX_GLB_PROF_FUNC_ID, + .direction = TF_DIR_TX + }, + { + .app_id = 0, + .device_id = BNXT_ULP_DEVICE_ID_THOR, + .session_type = BNXT_ULP_SESSION_TYPE_DEFAULT, + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_FULL_ACT_RECORD, + .glb_regfile_index = BNXT_ULP_GLB_RF_IDX_GLB_LB_AREC_PTR, + .direction = TF_DIR_TX + }, + { + .app_id = 0, + .device_id = BNXT_ULP_DEVICE_ID_THOR, + .session_type = BNXT_ULP_SESSION_TYPE_DEFAULT, + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_FULL_ACT_RECORD, + .glb_regfile_index = BNXT_ULP_GLB_RF_IDX_GLB_DROP_AREC_PTR, + .direction = TF_DIR_RX + }, + { + .app_id = 0, + .device_id = BNXT_ULP_DEVICE_ID_THOR, + .session_type = BNXT_ULP_SESSION_TYPE_DEFAULT, + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_FULL_ACT_RECORD, + .glb_regfile_index = BNXT_ULP_GLB_RF_IDX_GLB_DROP_AREC_PTR, + .direction = TF_DIR_TX + }, + { + .app_id = 0, + .device_id = BNXT_ULP_DEVICE_ID_THOR, + .session_type = BNXT_ULP_SESSION_TYPE_DEFAULT, + .resource_func = BNXT_ULP_RESOURCE_FUNC_IDENTIFIER, + .resource_type = TF_IDENT_TYPE_PROF_FUNC, + .glb_regfile_index = BNXT_ULP_GLB_RF_IDX_L2_PROF_FUNC_ID, + .direction = TF_DIR_RX + }, + { + .app_id = 0, + .device_id = BNXT_ULP_DEVICE_ID_THOR, + .session_type = BNXT_ULP_SESSION_TYPE_DEFAULT, + .resource_func = BNXT_ULP_RESOURCE_FUNC_IDENTIFIER, + .resource_type = TF_IDENT_TYPE_PROF_FUNC, + .glb_regfile_index = BNXT_ULP_GLB_RF_IDX_L2_PROF_FUNC_ID, + .direction = TF_DIR_TX + }, + { + .app_id = 0, + .device_id = BNXT_ULP_DEVICE_ID_THOR, + .session_type = BNXT_ULP_SESSION_TYPE_DEFAULT, + .resource_func = BNXT_ULP_RESOURCE_FUNC_IDENTIFIER, + .resource_type = TF_IDENT_TYPE_PROF_FUNC, + .glb_regfile_index = BNXT_ULP_GLB_RF_IDX_VXLAN_PROF_FUNC_ID, + .direction = TF_DIR_RX + }, + { + .app_id = 0, + .device_id = BNXT_ULP_DEVICE_ID_THOR, + .session_type = BNXT_ULP_SESSION_TYPE_DEFAULT, + .resource_func = BNXT_ULP_RESOURCE_FUNC_IDENTIFIER, + .resource_type = TF_IDENT_TYPE_PROF_FUNC, + .glb_regfile_index = BNXT_ULP_GLB_RF_IDX_GRE_PROF_FUNC_ID, + .direction = TF_DIR_RX + }, + { + .app_id = 0, + .device_id = BNXT_ULP_DEVICE_ID_THOR, + .session_type = BNXT_ULP_SESSION_TYPE_DEFAULT, + .resource_func = BNXT_ULP_RESOURCE_FUNC_IDENTIFIER, + .resource_type = TF_IDENT_TYPE_PROF_FUNC, + .glb_regfile_index = BNXT_ULP_GLB_RF_IDX_VF_2_VFR_PROF_FUNC_ID, + .direction = TF_DIR_RX + }, + { + .app_id = 0, + .device_id = BNXT_ULP_DEVICE_ID_THOR, + .session_type = BNXT_ULP_SESSION_TYPE_DEFAULT, + .resource_func = BNXT_ULP_RESOURCE_FUNC_IDENTIFIER, + .resource_type = TF_IDENT_TYPE_PROF_FUNC, + .glb_regfile_index = BNXT_ULP_GLB_RF_IDX_ANY_2_VF_PROF_FUNC_ID, + .direction = TF_DIR_RX + }, + { + .app_id = 0, + .device_id = BNXT_ULP_DEVICE_ID_THOR, + .session_type = BNXT_ULP_SESSION_TYPE_DEFAULT, + .resource_func = BNXT_ULP_RESOURCE_FUNC_IDENTIFIER, + .resource_type = TF_IDENT_TYPE_PROF_FUNC, + .glb_regfile_index = BNXT_ULP_GLB_RF_IDX_ANY_2_VF_PROF_FUNC_ID, + .direction = TF_DIR_TX + }, + { + .app_id = 0, + .device_id = BNXT_ULP_DEVICE_ID_THOR, + .session_type = BNXT_ULP_SESSION_TYPE_DEFAULT, + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_ACT_MODIFY_8B, + .glb_regfile_index = BNXT_ULP_GLB_RF_IDX_GLB_MODIFY_PTR, + .direction = TF_DIR_TX + }, + { + .app_id = 0, + .device_id = BNXT_ULP_DEVICE_ID_THOR, + .session_type = BNXT_ULP_SESSION_TYPE_DEFAULT, + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_ACT_ENCAP_16B, + .glb_regfile_index = BNXT_ULP_GLB_RF_IDX_ENCAP_MAC_PTR, + .direction = TF_DIR_RX + }, + { + .app_id = 0, + .device_id = BNXT_ULP_DEVICE_ID_THOR, + .session_type = BNXT_ULP_SESSION_TYPE_DEFAULT, + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_ACT_ENCAP_16B, + .glb_regfile_index = BNXT_ULP_GLB_RF_IDX_ENCAP_MAC_PTR, + .direction = TF_DIR_TX + }, + { + .app_id = 0, + .device_id = BNXT_ULP_DEVICE_ID_THOR, + .session_type = BNXT_ULP_SESSION_TYPE_DEFAULT, + .resource_func = BNXT_ULP_RESOURCE_FUNC_IDENTIFIER, + .resource_type = TF_IDENT_TYPE_EM_PROF, + .glb_regfile_index = BNXT_ULP_GLB_RF_IDX_GLB_EM_PROFILE_ID_0, + .direction = TF_DIR_RX + }, + { + .app_id = 0, + .device_id = BNXT_ULP_DEVICE_ID_THOR, + .session_type = BNXT_ULP_SESSION_TYPE_DEFAULT, + .resource_func = BNXT_ULP_RESOURCE_FUNC_IDENTIFIER, + .resource_type = TF_IDENT_TYPE_WC_PROF, + .glb_regfile_index = BNXT_ULP_GLB_RF_IDX_GLB_WC_PROFILE_ID_0, + .direction = TF_DIR_RX + }, + { + .app_id = 0, + .device_id = BNXT_ULP_DEVICE_ID_THOR, + .session_type = BNXT_ULP_SESSION_TYPE_DEFAULT, + .resource_func = BNXT_ULP_RESOURCE_FUNC_IDENTIFIER, + .resource_type = TF_IDENT_TYPE_WC_PROF, + .glb_regfile_index = BNXT_ULP_GLB_RF_IDX_GLB_WC_PROFILE_ID_1, + .direction = TF_DIR_RX + }, + { + .app_id = 0, + .device_id = BNXT_ULP_DEVICE_ID_THOR, + .session_type = BNXT_ULP_SESSION_TYPE_DEFAULT, + .resource_func = BNXT_ULP_RESOURCE_FUNC_IDENTIFIER, + .resource_type = TF_IDENT_TYPE_WC_PROF, + .glb_regfile_index = BNXT_ULP_GLB_RF_IDX_GLB_WC_PROFILE_ID_2, + .direction = TF_DIR_RX + }, + { + .app_id = 0, + .device_id = BNXT_ULP_DEVICE_ID_THOR, + .session_type = BNXT_ULP_SESSION_TYPE_DEFAULT, + .resource_func = BNXT_ULP_RESOURCE_FUNC_IDENTIFIER, + .resource_type = TF_IDENT_TYPE_WC_PROF, + .glb_regfile_index = BNXT_ULP_GLB_RF_IDX_GLB_WC_PROFILE_ID_3, + .direction = TF_DIR_RX + }, + { + .app_id = 0, + .device_id = BNXT_ULP_DEVICE_ID_THOR, + .session_type = BNXT_ULP_SESSION_TYPE_DEFAULT, + .resource_func = BNXT_ULP_RESOURCE_FUNC_IDENTIFIER, + .resource_type = TF_IDENT_TYPE_WC_PROF, + .glb_regfile_index = BNXT_ULP_GLB_RF_IDX_GLB_WC_PROFILE_ID_4, + .direction = TF_DIR_RX + }, + { + .app_id = 0, + .device_id = BNXT_ULP_DEVICE_ID_THOR, + .session_type = BNXT_ULP_SESSION_TYPE_DEFAULT, + .resource_func = BNXT_ULP_RESOURCE_FUNC_IDENTIFIER, + .resource_type = TF_IDENT_TYPE_WC_PROF, + .glb_regfile_index = BNXT_ULP_GLB_RF_IDX_GLB_WC_PROFILE_ID_5, + .direction = TF_DIR_RX + }, + { + .app_id = 0, + .device_id = BNXT_ULP_DEVICE_ID_THOR, + .session_type = BNXT_ULP_SESSION_TYPE_DEFAULT, + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_EM_FKB, + .glb_regfile_index = BNXT_ULP_GLB_RF_IDX_GLB_EM_KEY_ID_0, + .direction = TF_DIR_RX + }, + { + .app_id = 0, + .device_id = BNXT_ULP_DEVICE_ID_THOR, + .session_type = BNXT_ULP_SESSION_TYPE_DEFAULT, + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_WC_FKB, + .glb_regfile_index = BNXT_ULP_GLB_RF_IDX_GLB_WC_KEY_ID_0, + .direction = TF_DIR_RX + }, + { + .app_id = 0, + .device_id = BNXT_ULP_DEVICE_ID_THOR, + .session_type = BNXT_ULP_SESSION_TYPE_DEFAULT, + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_WC_FKB, + .glb_regfile_index = BNXT_ULP_GLB_RF_IDX_GLB_WC_KEY_ID_1, + .direction = TF_DIR_RX + }, + { + .app_id = 0, + .device_id = BNXT_ULP_DEVICE_ID_THOR, + .session_type = BNXT_ULP_SESSION_TYPE_DEFAULT, + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_WC_FKB, + .glb_regfile_index = BNXT_ULP_GLB_RF_IDX_GLB_WC_KEY_ID_2, + .direction = TF_DIR_RX + }, + { + .app_id = 0, + .device_id = BNXT_ULP_DEVICE_ID_THOR, + .session_type = BNXT_ULP_SESSION_TYPE_DEFAULT, + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_WC_FKB, + .glb_regfile_index = BNXT_ULP_GLB_RF_IDX_GLB_WC_KEY_ID_3, + .direction = TF_DIR_RX + }, + { + .app_id = 0, + .device_id = BNXT_ULP_DEVICE_ID_THOR, + .session_type = BNXT_ULP_SESSION_TYPE_DEFAULT, + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_WC_FKB, + .glb_regfile_index = BNXT_ULP_GLB_RF_IDX_GLB_WC_KEY_ID_4, + .direction = TF_DIR_RX + }, + { + .app_id = 0, + .device_id = BNXT_ULP_DEVICE_ID_THOR, + .session_type = BNXT_ULP_SESSION_TYPE_DEFAULT, + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_EM_FKB, + .glb_regfile_index = BNXT_ULP_GLB_RF_IDX_GLB_EM_KEY_ID_0, + .direction = TF_DIR_TX + }, + { + .app_id = 0, + .device_id = BNXT_ULP_DEVICE_ID_THOR, + .session_type = BNXT_ULP_SESSION_TYPE_DEFAULT, + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_WC_FKB, + .glb_regfile_index = BNXT_ULP_GLB_RF_IDX_GLB_WC_KEY_ID_0, + .direction = TF_DIR_TX + }, + { + .app_id = 0, + .device_id = BNXT_ULP_DEVICE_ID_THOR, + .session_type = BNXT_ULP_SESSION_TYPE_DEFAULT, + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_WC_FKB, + .glb_regfile_index = BNXT_ULP_GLB_RF_IDX_GLB_WC_KEY_ID_1, + .direction = TF_DIR_TX + }, + { + .app_id = 0, + .device_id = BNXT_ULP_DEVICE_ID_THOR, + .session_type = BNXT_ULP_SESSION_TYPE_DEFAULT, + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_WC_FKB, + .glb_regfile_index = BNXT_ULP_GLB_RF_IDX_GLB_WC_KEY_ID_2, + .direction = TF_DIR_TX + }, + { + .app_id = 0, + .device_id = BNXT_ULP_DEVICE_ID_THOR, + .session_type = BNXT_ULP_SESSION_TYPE_DEFAULT, + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_WC_FKB, + .glb_regfile_index = BNXT_ULP_GLB_RF_IDX_GLB_WC_KEY_ID_3, + .direction = TF_DIR_TX + }, + { + .app_id = 0, + .device_id = BNXT_ULP_DEVICE_ID_THOR, + .session_type = BNXT_ULP_SESSION_TYPE_DEFAULT, + .resource_func = BNXT_ULP_RESOURCE_FUNC_IDENTIFIER, + .resource_type = TF_IDENT_TYPE_EM_PROF, + .glb_regfile_index = BNXT_ULP_GLB_RF_IDX_GLB_EM_PROFILE_ID_0, + .direction = TF_DIR_TX + }, + { + .app_id = 0, + .device_id = BNXT_ULP_DEVICE_ID_THOR, + .session_type = BNXT_ULP_SESSION_TYPE_DEFAULT, + .resource_func = BNXT_ULP_RESOURCE_FUNC_IDENTIFIER, + .resource_type = TF_IDENT_TYPE_WC_PROF, + .glb_regfile_index = BNXT_ULP_GLB_RF_IDX_GLB_WC_PROFILE_ID_0, + .direction = TF_DIR_TX + }, + { + .app_id = 0, + .device_id = BNXT_ULP_DEVICE_ID_THOR, + .session_type = BNXT_ULP_SESSION_TYPE_DEFAULT, + .resource_func = BNXT_ULP_RESOURCE_FUNC_IDENTIFIER, + .resource_type = TF_IDENT_TYPE_WC_PROF, + .glb_regfile_index = BNXT_ULP_GLB_RF_IDX_GLB_WC_PROFILE_ID_1, + .direction = TF_DIR_TX + }, + { + .app_id = 0, + .device_id = BNXT_ULP_DEVICE_ID_THOR, + .session_type = BNXT_ULP_SESSION_TYPE_DEFAULT, + .resource_func = BNXT_ULP_RESOURCE_FUNC_IDENTIFIER, + .resource_type = TF_IDENT_TYPE_WC_PROF, + .glb_regfile_index = BNXT_ULP_GLB_RF_IDX_GLB_WC_PROFILE_ID_2, + .direction = TF_DIR_TX + }, + { + .app_id = 0, + .device_id = BNXT_ULP_DEVICE_ID_THOR, + .session_type = BNXT_ULP_SESSION_TYPE_DEFAULT, + .resource_func = BNXT_ULP_RESOURCE_FUNC_IDENTIFIER, + .resource_type = TF_IDENT_TYPE_WC_PROF, + .glb_regfile_index = BNXT_ULP_GLB_RF_IDX_GLB_WC_PROFILE_ID_3, + .direction = TF_DIR_TX + }, + { + .app_id = 0, + .device_id = BNXT_ULP_DEVICE_ID_THOR, + .session_type = BNXT_ULP_SESSION_TYPE_DEFAULT, + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_WC_FKB, + .glb_regfile_index = BNXT_ULP_GLB_RF_IDX_GLB_WC_KEY_ID_1, + .direction = TF_DIR_TX + }, + { + .app_id = 0, + .device_id = BNXT_ULP_DEVICE_ID_THOR, + .session_type = BNXT_ULP_SESSION_TYPE_DEFAULT, + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_METADATA, + .glb_regfile_index = BNXT_ULP_GLB_RF_IDX_GLB_METADATA_PROF_0, + .direction = TF_DIR_TX + }, + { + .app_id = 0, + .device_id = BNXT_ULP_DEVICE_ID_THOR, + .session_type = BNXT_ULP_SESSION_TYPE_DEFAULT, + .resource_func = BNXT_ULP_RESOURCE_FUNC_IDENTIFIER, + .resource_type = TF_IDENT_TYPE_EM_PROF, + .glb_regfile_index = BNXT_ULP_GLB_RF_IDX_GLB_VFR_EM_PROF_ID_0, + .direction = TF_DIR_RX + }, + { + .app_id = 0, + .device_id = BNXT_ULP_DEVICE_ID_THOR, + .session_type = BNXT_ULP_SESSION_TYPE_DEFAULT, + .resource_func = BNXT_ULP_RESOURCE_FUNC_IDENTIFIER, + .resource_type = TF_IDENT_TYPE_EM_PROF, + .glb_regfile_index = BNXT_ULP_GLB_RF_IDX_GLB_VFR_EM_PROF_ID_1, + .direction = TF_DIR_RX + }, + { + .app_id = 0, + .device_id = BNXT_ULP_DEVICE_ID_THOR, + .session_type = BNXT_ULP_SESSION_TYPE_DEFAULT, + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_EM_FKB, + .glb_regfile_index = BNXT_ULP_GLB_RF_IDX_GLB_VFR_EM_KEY_ID_0, + .direction = TF_DIR_RX + }, + { + .app_id = 0, + .device_id = BNXT_ULP_DEVICE_ID_THOR, + .session_type = BNXT_ULP_SESSION_TYPE_DEFAULT, + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_EM_FKB, + .glb_regfile_index = BNXT_ULP_GLB_RF_IDX_GLB_VFR_EM_KEY_ID_1, + .direction = TF_DIR_RX + }, + { + .app_id = 0, + .device_id = BNXT_ULP_DEVICE_ID_THOR2, + .session_type = BNXT_ULP_SESSION_TYPE_DEFAULT, + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = CFA_RSUBTYPE_IDX_TBL_METADATA_PROF, + .glb_regfile_index = BNXT_ULP_GLB_RF_IDX_GLB_METADATA_RX_PROF_0, + .direction = TF_DIR_RX + }, + { + .app_id = 0, + .device_id = BNXT_ULP_DEVICE_ID_THOR2, + .session_type = BNXT_ULP_SESSION_TYPE_DEFAULT, + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = CFA_RSUBTYPE_IDX_TBL_METADATA_LKUP, + .glb_regfile_index = BNXT_ULP_GLB_RF_IDX_GLB_METADATA_RX_LKUP_0, + .direction = TF_DIR_RX + }, + { + .app_id = 0, + .device_id = BNXT_ULP_DEVICE_ID_THOR2, + .session_type = BNXT_ULP_SESSION_TYPE_DEFAULT, + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = CFA_RSUBTYPE_IDX_TBL_METADATA_ACT, + .glb_regfile_index = BNXT_ULP_GLB_RF_IDX_GLB_METADATA_RX_ACT_0, + .direction = TF_DIR_RX + }, + { + .app_id = 0, + .device_id = BNXT_ULP_DEVICE_ID_THOR2, + .session_type = BNXT_ULP_SESSION_TYPE_DEFAULT, + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = CFA_RSUBTYPE_IDX_TBL_METADATA_PROF, + .glb_regfile_index = BNXT_ULP_GLB_RF_IDX_GLB_METADATA_TX_PROF_0, + .direction = TF_DIR_TX + }, + { + .app_id = 0, + .device_id = BNXT_ULP_DEVICE_ID_THOR2, + .session_type = BNXT_ULP_SESSION_TYPE_DEFAULT, + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = CFA_RSUBTYPE_IDX_TBL_METADATA_LKUP, + .glb_regfile_index = BNXT_ULP_GLB_RF_IDX_GLB_METADATA_TX_LKUP_0, + .direction = TF_DIR_TX + }, + { + .app_id = 0, + .device_id = BNXT_ULP_DEVICE_ID_THOR2, + .session_type = BNXT_ULP_SESSION_TYPE_DEFAULT, + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = CFA_RSUBTYPE_IDX_TBL_METADATA_ACT, + .glb_regfile_index = BNXT_ULP_GLB_RF_IDX_GLB_METADATA_TX_ACT_0, + .direction = TF_DIR_TX + } +}; + +/* List of tf resources required to be reserved per app/device */ +struct bnxt_ulp_resource_resv_info ulp_resource_resv_list[] = { + { + .app_id = 0, + .device_id = BNXT_ULP_DEVICE_ID_WH_PLUS, + .direction = TF_DIR_RX, + .session_type = BNXT_ULP_SESSION_TYPE_DEFAULT, + .resource_func = BNXT_ULP_RESOURCE_FUNC_IDENTIFIER, + .resource_type = TF_IDENT_TYPE_L2_CTXT_HIGH, + .count = 422 + }, + { + .app_id = 0, + .device_id = BNXT_ULP_DEVICE_ID_WH_PLUS, + .direction = TF_DIR_RX, + .session_type = BNXT_ULP_SESSION_TYPE_DEFAULT, + .resource_func = BNXT_ULP_RESOURCE_FUNC_IDENTIFIER, + .resource_type = TF_IDENT_TYPE_L2_CTXT_LOW, + .count = 6 + }, + { + .app_id = 0, + .device_id = BNXT_ULP_DEVICE_ID_WH_PLUS, + .direction = TF_DIR_RX, + .session_type = BNXT_ULP_SESSION_TYPE_DEFAULT, + .resource_func = BNXT_ULP_RESOURCE_FUNC_IDENTIFIER, + .resource_type = TF_IDENT_TYPE_WC_PROF, + .count = 191 + }, + { + .app_id = 0, + .device_id = BNXT_ULP_DEVICE_ID_WH_PLUS, + .direction = TF_DIR_RX, + .session_type = BNXT_ULP_SESSION_TYPE_DEFAULT, + .resource_func = BNXT_ULP_RESOURCE_FUNC_IDENTIFIER, + .resource_type = TF_IDENT_TYPE_PROF_FUNC, + .count = 63 + }, + { + .app_id = 0, + .device_id = BNXT_ULP_DEVICE_ID_WH_PLUS, + .direction = TF_DIR_RX, + .session_type = BNXT_ULP_SESSION_TYPE_DEFAULT, + .resource_func = BNXT_ULP_RESOURCE_FUNC_IDENTIFIER, + .resource_type = TF_IDENT_TYPE_EM_PROF, + .count = 192 + }, + { + .app_id = 0, + .device_id = BNXT_ULP_DEVICE_ID_WH_PLUS, + .direction = TF_DIR_RX, + .session_type = BNXT_ULP_SESSION_TYPE_DEFAULT, + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_FULL_ACT_RECORD, + .count = 8192 + }, + { + .app_id = 0, + .device_id = BNXT_ULP_DEVICE_ID_WH_PLUS, + .direction = TF_DIR_RX, + .session_type = BNXT_ULP_SESSION_TYPE_DEFAULT, + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_ACT_STATS_64, + .count = 6912 + }, + { + .app_id = 0, + .device_id = BNXT_ULP_DEVICE_ID_WH_PLUS, + .direction = TF_DIR_RX, + .session_type = BNXT_ULP_SESSION_TYPE_DEFAULT, + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_ACT_MODIFY_IPV4, + .count = 1023 + }, + { + .app_id = 0, + .device_id = BNXT_ULP_DEVICE_ID_WH_PLUS, + .direction = TF_DIR_RX, + .session_type = BNXT_ULP_SESSION_TYPE_DEFAULT, + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_ACT_ENCAP_8B, + .count = 511 + }, + { + .app_id = 0, + .device_id = BNXT_ULP_DEVICE_ID_WH_PLUS, + .direction = TF_DIR_RX, + .session_type = BNXT_ULP_SESSION_TYPE_DEFAULT, + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_ACT_ENCAP_16B, + .count = 15 + }, + { + .app_id = 0, + .device_id = BNXT_ULP_DEVICE_ID_WH_PLUS, + .direction = TF_DIR_RX, + .session_type = BNXT_ULP_SESSION_TYPE_DEFAULT, + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_ACT_SP_SMAC, + .count = 255 + }, + { + .app_id = 0, + .device_id = BNXT_ULP_DEVICE_ID_WH_PLUS, + .direction = TF_DIR_RX, + .session_type = BNXT_ULP_SESSION_TYPE_DEFAULT, + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_MIRROR_CONFIG, + .count = 1 + }, + { + .app_id = 0, + .device_id = BNXT_ULP_DEVICE_ID_WH_PLUS, + .direction = TF_DIR_RX, + .session_type = BNXT_ULP_SESSION_TYPE_DEFAULT, + .resource_func = BNXT_ULP_RESOURCE_FUNC_TCAM_TABLE, + .resource_type = TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_HIGH, + .count = 422 + }, + { + .app_id = 0, + .device_id = BNXT_ULP_DEVICE_ID_WH_PLUS, + .direction = TF_DIR_RX, + .session_type = BNXT_ULP_SESSION_TYPE_DEFAULT, + .resource_func = BNXT_ULP_RESOURCE_FUNC_TCAM_TABLE, + .resource_type = TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_LOW, + .count = 6 + }, + { + .app_id = 0, + .device_id = BNXT_ULP_DEVICE_ID_WH_PLUS, + .direction = TF_DIR_RX, + .session_type = BNXT_ULP_SESSION_TYPE_DEFAULT, + .resource_func = BNXT_ULP_RESOURCE_FUNC_TCAM_TABLE, + .resource_type = TF_TCAM_TBL_TYPE_PROF_TCAM, + .count = 960 + }, + { + .app_id = 0, + .device_id = BNXT_ULP_DEVICE_ID_WH_PLUS, + .direction = TF_DIR_RX, + .session_type = BNXT_ULP_SESSION_TYPE_DEFAULT, + .resource_func = BNXT_ULP_RESOURCE_FUNC_TCAM_TABLE, + .resource_type = TF_TCAM_TBL_TYPE_WC_TCAM, + .count = 88 + }, + { + .app_id = 0, + .device_id = BNXT_ULP_DEVICE_ID_WH_PLUS, + .direction = TF_DIR_RX, + .session_type = BNXT_ULP_SESSION_TYPE_DEFAULT, + .resource_func = BNXT_ULP_RESOURCE_FUNC_EM_TABLE, + .resource_type = TF_EM_TBL_TYPE_EM_RECORD, + .count = 13168 + }, + { + .app_id = 0, + .device_id = BNXT_ULP_DEVICE_ID_WH_PLUS, + .direction = TF_DIR_RX, + .session_type = BNXT_ULP_SESSION_TYPE_DEFAULT, + .resource_func = BNXT_ULP_RESOURCE_FUNC_EM_TABLE, + .resource_type = TF_EM_TBL_TYPE_TBL_SCOPE, + .count = 1 + }, + { + .app_id = 0, + .device_id = BNXT_ULP_DEVICE_ID_WH_PLUS, + .direction = TF_DIR_TX, + .session_type = BNXT_ULP_SESSION_TYPE_DEFAULT, + .resource_func = BNXT_ULP_RESOURCE_FUNC_IDENTIFIER, + .resource_type = TF_IDENT_TYPE_L2_CTXT_HIGH, + .count = 292 + }, + { + .app_id = 0, + .device_id = BNXT_ULP_DEVICE_ID_WH_PLUS, + .direction = TF_DIR_TX, + .session_type = BNXT_ULP_SESSION_TYPE_DEFAULT, + .resource_func = BNXT_ULP_RESOURCE_FUNC_IDENTIFIER, + .resource_type = TF_IDENT_TYPE_L2_CTXT_LOW, + .count = 148 + }, + { + .app_id = 0, + .device_id = BNXT_ULP_DEVICE_ID_WH_PLUS, + .direction = TF_DIR_TX, + .session_type = BNXT_ULP_SESSION_TYPE_DEFAULT, + .resource_func = BNXT_ULP_RESOURCE_FUNC_IDENTIFIER, + .resource_type = TF_IDENT_TYPE_WC_PROF, + .count = 191 + }, + { + .app_id = 0, + .device_id = BNXT_ULP_DEVICE_ID_WH_PLUS, + .direction = TF_DIR_TX, + .session_type = BNXT_ULP_SESSION_TYPE_DEFAULT, + .resource_func = BNXT_ULP_RESOURCE_FUNC_IDENTIFIER, + .resource_type = TF_IDENT_TYPE_PROF_FUNC, + .count = 63 + }, + { + .app_id = 0, + .device_id = BNXT_ULP_DEVICE_ID_WH_PLUS, + .direction = TF_DIR_TX, + .session_type = BNXT_ULP_SESSION_TYPE_DEFAULT, + .resource_func = BNXT_ULP_RESOURCE_FUNC_IDENTIFIER, + .resource_type = TF_IDENT_TYPE_EM_PROF, + .count = 192 + }, + { + .app_id = 0, + .device_id = BNXT_ULP_DEVICE_ID_WH_PLUS, + .direction = TF_DIR_TX, + .session_type = BNXT_ULP_SESSION_TYPE_DEFAULT, + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_FULL_ACT_RECORD, + .count = 8192 + }, + { + .app_id = 0, + .device_id = BNXT_ULP_DEVICE_ID_WH_PLUS, + .direction = TF_DIR_TX, + .session_type = BNXT_ULP_SESSION_TYPE_DEFAULT, + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_ACT_STATS_64, + .count = 6912 + }, + { + .app_id = 0, + .device_id = BNXT_ULP_DEVICE_ID_WH_PLUS, + .direction = TF_DIR_TX, + .session_type = BNXT_ULP_SESSION_TYPE_DEFAULT, + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_ACT_MODIFY_IPV4, + .count = 1023 + }, + { + .app_id = 0, + .device_id = BNXT_ULP_DEVICE_ID_WH_PLUS, + .direction = TF_DIR_TX, + .session_type = BNXT_ULP_SESSION_TYPE_DEFAULT, + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_ACT_ENCAP_64B, + .count = 511 + }, + { + .app_id = 0, + .device_id = BNXT_ULP_DEVICE_ID_WH_PLUS, + .direction = TF_DIR_TX, + .session_type = BNXT_ULP_SESSION_TYPE_DEFAULT, + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_ACT_ENCAP_16B, + .count = 223 + }, + { + .app_id = 0, + .device_id = BNXT_ULP_DEVICE_ID_WH_PLUS, + .direction = TF_DIR_TX, + .session_type = BNXT_ULP_SESSION_TYPE_DEFAULT, + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_ACT_ENCAP_8B, + .count = 255 + }, + { + .app_id = 0, + .device_id = BNXT_ULP_DEVICE_ID_WH_PLUS, + .direction = TF_DIR_TX, + .session_type = BNXT_ULP_SESSION_TYPE_DEFAULT, + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_ACT_SP_SMAC_IPV4, + .count = 488 + }, + { + .app_id = 0, + .device_id = BNXT_ULP_DEVICE_ID_WH_PLUS, + .direction = TF_DIR_TX, + .session_type = BNXT_ULP_SESSION_TYPE_DEFAULT, + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_ACT_SP_SMAC_IPV6, + .count = 511 + }, + { + .app_id = 0, + .device_id = BNXT_ULP_DEVICE_ID_WH_PLUS, + .direction = TF_DIR_TX, + .session_type = BNXT_ULP_SESSION_TYPE_DEFAULT, + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_MIRROR_CONFIG, + .count = 1 + }, + { + .app_id = 0, + .device_id = BNXT_ULP_DEVICE_ID_WH_PLUS, + .direction = TF_DIR_TX, + .session_type = BNXT_ULP_SESSION_TYPE_DEFAULT, + .resource_func = BNXT_ULP_RESOURCE_FUNC_TCAM_TABLE, + .resource_type = TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_HIGH, + .count = 292 + }, + { + .app_id = 0, + .device_id = BNXT_ULP_DEVICE_ID_WH_PLUS, + .direction = TF_DIR_TX, + .session_type = BNXT_ULP_SESSION_TYPE_DEFAULT, + .resource_func = BNXT_ULP_RESOURCE_FUNC_TCAM_TABLE, + .resource_type = TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_LOW, + .count = 144 + }, + { + .app_id = 0, + .device_id = BNXT_ULP_DEVICE_ID_WH_PLUS, + .direction = TF_DIR_TX, + .session_type = BNXT_ULP_SESSION_TYPE_DEFAULT, + .resource_func = BNXT_ULP_RESOURCE_FUNC_TCAM_TABLE, + .resource_type = TF_TCAM_TBL_TYPE_PROF_TCAM, + .count = 960 + }, + { + .app_id = 0, + .device_id = BNXT_ULP_DEVICE_ID_WH_PLUS, + .direction = TF_DIR_TX, + .session_type = BNXT_ULP_SESSION_TYPE_DEFAULT, + .resource_func = BNXT_ULP_RESOURCE_FUNC_TCAM_TABLE, + .resource_type = TF_TCAM_TBL_TYPE_WC_TCAM, + .count = 928 + }, + { + .app_id = 0, + .device_id = BNXT_ULP_DEVICE_ID_WH_PLUS, + .direction = TF_DIR_TX, + .session_type = BNXT_ULP_SESSION_TYPE_DEFAULT, + .resource_func = BNXT_ULP_RESOURCE_FUNC_EM_TABLE, + .resource_type = TF_EM_TBL_TYPE_EM_RECORD, + .count = 15232 + }, + { + .app_id = 0, + .device_id = BNXT_ULP_DEVICE_ID_WH_PLUS, + .direction = TF_DIR_TX, + .session_type = BNXT_ULP_SESSION_TYPE_DEFAULT, + .resource_func = BNXT_ULP_RESOURCE_FUNC_EM_TABLE, + .resource_type = TF_EM_TBL_TYPE_TBL_SCOPE, + .count = 1 + }, + { + .app_id = 0, + .device_id = BNXT_ULP_DEVICE_ID_THOR, + .direction = TF_DIR_RX, + .session_type = BNXT_ULP_SESSION_TYPE_DEFAULT, + .resource_func = BNXT_ULP_RESOURCE_FUNC_IDENTIFIER, + .resource_type = TF_IDENT_TYPE_L2_CTXT_HIGH, + .count = 272 + }, + { + .app_id = 0, + .device_id = BNXT_ULP_DEVICE_ID_THOR, + .direction = TF_DIR_RX, + .session_type = BNXT_ULP_SESSION_TYPE_DEFAULT, + .resource_func = BNXT_ULP_RESOURCE_FUNC_IDENTIFIER, + .resource_type = TF_IDENT_TYPE_L2_CTXT_LOW, + .count = 6 + }, + { + .app_id = 0, + .device_id = BNXT_ULP_DEVICE_ID_THOR, + .direction = TF_DIR_RX, + .session_type = BNXT_ULP_SESSION_TYPE_DEFAULT, + .resource_func = BNXT_ULP_RESOURCE_FUNC_IDENTIFIER, + .resource_type = TF_IDENT_TYPE_WC_PROF, + .count = 32 + }, + { + .app_id = 0, + .device_id = BNXT_ULP_DEVICE_ID_THOR, + .direction = TF_DIR_RX, + .session_type = BNXT_ULP_SESSION_TYPE_DEFAULT, + .resource_func = BNXT_ULP_RESOURCE_FUNC_IDENTIFIER, + .resource_type = TF_IDENT_TYPE_PROF_FUNC, + .count = 32 + }, + { + .app_id = 0, + .device_id = BNXT_ULP_DEVICE_ID_THOR, + .direction = TF_DIR_RX, + .session_type = BNXT_ULP_SESSION_TYPE_DEFAULT, + .resource_func = BNXT_ULP_RESOURCE_FUNC_IDENTIFIER, + .resource_type = TF_IDENT_TYPE_EM_PROF, + .count = 32 + }, + { + .app_id = 0, + .device_id = BNXT_ULP_DEVICE_ID_THOR, + .direction = TF_DIR_RX, + .session_type = BNXT_ULP_SESSION_TYPE_DEFAULT, + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_FULL_ACT_RECORD, + .count = 8192 + }, + { + .app_id = 0, + .device_id = BNXT_ULP_DEVICE_ID_THOR, + .direction = TF_DIR_RX, + .session_type = BNXT_ULP_SESSION_TYPE_DEFAULT, + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_ACT_STATS_64, + .count = 8192 + }, + { + .app_id = 0, + .device_id = BNXT_ULP_DEVICE_ID_THOR, + .direction = TF_DIR_RX, + .session_type = BNXT_ULP_SESSION_TYPE_DEFAULT, + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_MIRROR_CONFIG, + .count = 14 + }, + { + .app_id = 0, + .device_id = BNXT_ULP_DEVICE_ID_THOR, + .direction = TF_DIR_RX, + .session_type = BNXT_ULP_SESSION_TYPE_DEFAULT, + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_METER_PROF, + .count = 256 + }, + { + .app_id = 0, + .device_id = BNXT_ULP_DEVICE_ID_THOR, + .direction = TF_DIR_RX, + .session_type = BNXT_ULP_SESSION_TYPE_DEFAULT, + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_METER_INST, + .count = 1023 + }, + { + .app_id = 0, + .device_id = BNXT_ULP_DEVICE_ID_THOR, + .direction = TF_DIR_RX, + .session_type = BNXT_ULP_SESSION_TYPE_DEFAULT, + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_EM_FKB, + .count = 32 + }, + { + .app_id = 0, + .device_id = BNXT_ULP_DEVICE_ID_THOR, + .direction = TF_DIR_RX, + .session_type = BNXT_ULP_SESSION_TYPE_DEFAULT, + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_WC_FKB, + .count = 31 + }, + { + .app_id = 0, + .device_id = BNXT_ULP_DEVICE_ID_THOR, + .direction = TF_DIR_RX, + .session_type = BNXT_ULP_SESSION_TYPE_DEFAULT, + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_ACT_ENCAP_64B, + .count = 2048 + }, + { + .app_id = 0, + .device_id = BNXT_ULP_DEVICE_ID_THOR, + .direction = TF_DIR_RX, + .session_type = BNXT_ULP_SESSION_TYPE_DEFAULT, + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_ACT_SP_SMAC_IPV4, + .count = 64 + }, + { + .app_id = 0, + .device_id = BNXT_ULP_DEVICE_ID_THOR, + .direction = TF_DIR_RX, + .session_type = BNXT_ULP_SESSION_TYPE_DEFAULT, + .resource_func = BNXT_ULP_RESOURCE_FUNC_TCAM_TABLE, + .resource_type = TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_HIGH, + .count = 272 + }, + { + .app_id = 0, + .device_id = BNXT_ULP_DEVICE_ID_THOR, + .direction = TF_DIR_RX, + .session_type = BNXT_ULP_SESSION_TYPE_DEFAULT, + .resource_func = BNXT_ULP_RESOURCE_FUNC_TCAM_TABLE, + .resource_type = TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_LOW, + .count = 6 + }, + { + .app_id = 0, + .device_id = BNXT_ULP_DEVICE_ID_THOR, + .direction = TF_DIR_RX, + .session_type = BNXT_ULP_SESSION_TYPE_DEFAULT, + .resource_func = BNXT_ULP_RESOURCE_FUNC_TCAM_TABLE, + .resource_type = TF_TCAM_TBL_TYPE_PROF_TCAM, + .count = 128 + }, + { + .app_id = 0, + .device_id = BNXT_ULP_DEVICE_ID_THOR, + .direction = TF_DIR_RX, + .session_type = BNXT_ULP_SESSION_TYPE_DEFAULT, + .resource_func = BNXT_ULP_RESOURCE_FUNC_TCAM_TABLE, + .resource_type = TF_TCAM_TBL_TYPE_WC_TCAM, + .count = 4096 + }, + { + .app_id = 0, + .device_id = BNXT_ULP_DEVICE_ID_THOR, + .direction = TF_DIR_RX, + .session_type = BNXT_ULP_SESSION_TYPE_DEFAULT, + .resource_func = BNXT_ULP_RESOURCE_FUNC_EM_TABLE, + .resource_type = TF_EM_TBL_TYPE_EM_RECORD, + .count = 16384 + }, + { + .app_id = 0, + .device_id = BNXT_ULP_DEVICE_ID_THOR, + .direction = TF_DIR_TX, + .session_type = BNXT_ULP_SESSION_TYPE_DEFAULT, + .resource_func = BNXT_ULP_RESOURCE_FUNC_IDENTIFIER, + .resource_type = TF_IDENT_TYPE_L2_CTXT_LOW, + .count = 272 + }, + { + .app_id = 0, + .device_id = BNXT_ULP_DEVICE_ID_THOR, + .direction = TF_DIR_TX, + .session_type = BNXT_ULP_SESSION_TYPE_DEFAULT, + .resource_func = BNXT_ULP_RESOURCE_FUNC_IDENTIFIER, + .resource_type = TF_IDENT_TYPE_WC_PROF, + .count = 32 + }, + { + .app_id = 0, + .device_id = BNXT_ULP_DEVICE_ID_THOR, + .direction = TF_DIR_TX, + .session_type = BNXT_ULP_SESSION_TYPE_DEFAULT, + .resource_func = BNXT_ULP_RESOURCE_FUNC_IDENTIFIER, + .resource_type = TF_IDENT_TYPE_PROF_FUNC, + .count = 63 + }, + { + .app_id = 0, + .device_id = BNXT_ULP_DEVICE_ID_THOR, + .direction = TF_DIR_TX, + .session_type = BNXT_ULP_SESSION_TYPE_DEFAULT, + .resource_func = BNXT_ULP_RESOURCE_FUNC_IDENTIFIER, + .resource_type = TF_IDENT_TYPE_EM_PROF, + .count = 32 + }, + { + .app_id = 0, + .device_id = BNXT_ULP_DEVICE_ID_THOR, + .direction = TF_DIR_TX, + .session_type = BNXT_ULP_SESSION_TYPE_DEFAULT, + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_FULL_ACT_RECORD, + .count = 8192 + }, + { + .app_id = 0, + .device_id = BNXT_ULP_DEVICE_ID_THOR, + .direction = TF_DIR_TX, + .session_type = BNXT_ULP_SESSION_TYPE_DEFAULT, + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_ACT_STATS_64, + .count = 8192 + }, + { + .app_id = 0, + .device_id = BNXT_ULP_DEVICE_ID_THOR, + .direction = TF_DIR_TX, + .session_type = BNXT_ULP_SESSION_TYPE_DEFAULT, + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_MIRROR_CONFIG, + .count = 14 + }, + { + .app_id = 0, + .device_id = BNXT_ULP_DEVICE_ID_THOR, + .direction = TF_DIR_TX, + .session_type = BNXT_ULP_SESSION_TYPE_DEFAULT, + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_EM_FKB, + .count = 32 + }, + { + .app_id = 0, + .device_id = BNXT_ULP_DEVICE_ID_THOR, + .direction = TF_DIR_TX, + .session_type = BNXT_ULP_SESSION_TYPE_DEFAULT, + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_WC_FKB, + .count = 32 + }, + { + .app_id = 0, + .device_id = BNXT_ULP_DEVICE_ID_THOR, + .direction = TF_DIR_TX, + .session_type = BNXT_ULP_SESSION_TYPE_DEFAULT, + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_ACT_ENCAP_64B, + .count = 2048 + }, + { + .app_id = 0, + .device_id = BNXT_ULP_DEVICE_ID_THOR, + .direction = TF_DIR_TX, + .session_type = BNXT_ULP_SESSION_TYPE_DEFAULT, + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_ACT_SP_SMAC_IPV4, + .count = 100 + }, + { + .app_id = 0, + .device_id = BNXT_ULP_DEVICE_ID_THOR, + .direction = TF_DIR_TX, + .session_type = BNXT_ULP_SESSION_TYPE_DEFAULT, + .resource_func = BNXT_ULP_RESOURCE_FUNC_TCAM_TABLE, + .resource_type = TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_LOW, + .count = 272 + }, + { + .app_id = 0, + .device_id = BNXT_ULP_DEVICE_ID_THOR, + .direction = TF_DIR_TX, + .session_type = BNXT_ULP_SESSION_TYPE_DEFAULT, + .resource_func = BNXT_ULP_RESOURCE_FUNC_TCAM_TABLE, + .resource_type = TF_TCAM_TBL_TYPE_PROF_TCAM, + .count = 128 + }, + { + .app_id = 0, + .device_id = BNXT_ULP_DEVICE_ID_THOR, + .direction = TF_DIR_TX, + .session_type = BNXT_ULP_SESSION_TYPE_DEFAULT, + .resource_func = BNXT_ULP_RESOURCE_FUNC_TCAM_TABLE, + .resource_type = TF_TCAM_TBL_TYPE_WC_TCAM, + .count = 4096 + }, + { + .app_id = 0, + .device_id = BNXT_ULP_DEVICE_ID_THOR, + .direction = TF_DIR_TX, + .session_type = BNXT_ULP_SESSION_TYPE_DEFAULT, + .resource_func = BNXT_ULP_RESOURCE_FUNC_EM_TABLE, + .resource_type = TF_EM_TBL_TYPE_EM_RECORD, + .count = 16384 + }, + { + .app_id = 0, + .device_id = BNXT_ULP_DEVICE_ID_THOR, + .direction = TF_DIR_TX, + .session_type = BNXT_ULP_SESSION_TYPE_DEFAULT, + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_METADATA, + .count = 1 + } +}; + +uint32_t ulp_act_prop_map_table[] = { + [BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN_SZ] = + BNXT_ULP_ACT_PROP_SZ_ENCAP_TUN_SZ, + [BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ] = + BNXT_ULP_ACT_PROP_SZ_ENCAP_IP_SZ, + [BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_SZ] = + BNXT_ULP_ACT_PROP_SZ_ENCAP_VTAG_SZ, + [BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_TYPE] = + BNXT_ULP_ACT_PROP_SZ_ENCAP_VTAG_TYPE, + [BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_NUM] = + BNXT_ULP_ACT_PROP_SZ_ENCAP_VTAG_NUM, + [BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE] = + BNXT_ULP_ACT_PROP_SZ_ENCAP_L3_TYPE, + [BNXT_ULP_ACT_PROP_IDX_MPLS_POP_NUM] = + BNXT_ULP_ACT_PROP_SZ_MPLS_POP_NUM, + [BNXT_ULP_ACT_PROP_IDX_MPLS_PUSH_NUM] = + BNXT_ULP_ACT_PROP_SZ_MPLS_PUSH_NUM, + [BNXT_ULP_ACT_PROP_IDX_PORT_ID] = + BNXT_ULP_ACT_PROP_SZ_PORT_ID, + [BNXT_ULP_ACT_PROP_IDX_VNIC] = + BNXT_ULP_ACT_PROP_SZ_VNIC, + [BNXT_ULP_ACT_PROP_IDX_VPORT] = + BNXT_ULP_ACT_PROP_SZ_VPORT, + [BNXT_ULP_ACT_PROP_IDX_MIRR_VNIC] = + BNXT_ULP_ACT_PROP_SZ_MIRR_VNIC, + [BNXT_ULP_ACT_PROP_IDX_MIRR_VPORT] = + BNXT_ULP_ACT_PROP_SZ_MIRR_VPORT, + [BNXT_ULP_ACT_PROP_IDX_MARK] = + BNXT_ULP_ACT_PROP_SZ_MARK, + [BNXT_ULP_ACT_PROP_IDX_COUNT] = + BNXT_ULP_ACT_PROP_SZ_COUNT, + [BNXT_ULP_ACT_PROP_IDX_METER] = + BNXT_ULP_ACT_PROP_SZ_METER, + [BNXT_ULP_ACT_PROP_IDX_SET_MAC_SRC] = + BNXT_ULP_ACT_PROP_SZ_SET_MAC_SRC, + [BNXT_ULP_ACT_PROP_IDX_SET_MAC_DST] = + BNXT_ULP_ACT_PROP_SZ_SET_MAC_DST, + [BNXT_ULP_ACT_PROP_IDX_PUSH_VLAN] = + BNXT_ULP_ACT_PROP_SZ_PUSH_VLAN, + [BNXT_ULP_ACT_PROP_IDX_SET_VLAN_PCP] = + BNXT_ULP_ACT_PROP_SZ_SET_VLAN_PCP, + [BNXT_ULP_ACT_PROP_IDX_SET_VLAN_VID] = + BNXT_ULP_ACT_PROP_SZ_SET_VLAN_VID, + [BNXT_ULP_ACT_PROP_IDX_SET_IPV4_SRC] = + BNXT_ULP_ACT_PROP_SZ_SET_IPV4_SRC, + [BNXT_ULP_ACT_PROP_IDX_SET_IPV4_DST] = + BNXT_ULP_ACT_PROP_SZ_SET_IPV4_DST, + [BNXT_ULP_ACT_PROP_IDX_SET_IPV6_SRC] = + BNXT_ULP_ACT_PROP_SZ_SET_IPV6_SRC, + [BNXT_ULP_ACT_PROP_IDX_SET_IPV6_DST] = + BNXT_ULP_ACT_PROP_SZ_SET_IPV6_DST, + [BNXT_ULP_ACT_PROP_IDX_SET_TP_SRC] = + BNXT_ULP_ACT_PROP_SZ_SET_TP_SRC, + [BNXT_ULP_ACT_PROP_IDX_SET_TP_DST] = + BNXT_ULP_ACT_PROP_SZ_SET_TP_DST, + [BNXT_ULP_ACT_PROP_IDX_OF_PUSH_MPLS_0] = + BNXT_ULP_ACT_PROP_SZ_OF_PUSH_MPLS_0, + [BNXT_ULP_ACT_PROP_IDX_OF_PUSH_MPLS_1] = + BNXT_ULP_ACT_PROP_SZ_OF_PUSH_MPLS_1, + [BNXT_ULP_ACT_PROP_IDX_OF_PUSH_MPLS_2] = + BNXT_ULP_ACT_PROP_SZ_OF_PUSH_MPLS_2, + [BNXT_ULP_ACT_PROP_IDX_OF_PUSH_MPLS_3] = + BNXT_ULP_ACT_PROP_SZ_OF_PUSH_MPLS_3, + [BNXT_ULP_ACT_PROP_IDX_OF_PUSH_MPLS_4] = + BNXT_ULP_ACT_PROP_SZ_OF_PUSH_MPLS_4, + [BNXT_ULP_ACT_PROP_IDX_OF_PUSH_MPLS_5] = + BNXT_ULP_ACT_PROP_SZ_OF_PUSH_MPLS_5, + [BNXT_ULP_ACT_PROP_IDX_OF_PUSH_MPLS_6] = + BNXT_ULP_ACT_PROP_SZ_OF_PUSH_MPLS_6, + [BNXT_ULP_ACT_PROP_IDX_OF_PUSH_MPLS_7] = + BNXT_ULP_ACT_PROP_SZ_OF_PUSH_MPLS_7, + [BNXT_ULP_ACT_PROP_IDX_ENCAP_L2_DMAC] = + BNXT_ULP_ACT_PROP_SZ_ENCAP_L2_DMAC, + [BNXT_ULP_ACT_PROP_IDX_ENCAP_L2_SMAC] = + BNXT_ULP_ACT_PROP_SZ_ENCAP_L2_SMAC, + [BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG] = + BNXT_ULP_ACT_PROP_SZ_ENCAP_VTAG, + [BNXT_ULP_ACT_PROP_IDX_ENCAP_IP] = + BNXT_ULP_ACT_PROP_SZ_ENCAP_IP, + [BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SRC] = + BNXT_ULP_ACT_PROP_SZ_ENCAP_IP_SRC, + [BNXT_ULP_ACT_PROP_IDX_ENCAP_UDP] = + BNXT_ULP_ACT_PROP_SZ_ENCAP_UDP, + [BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN] = + BNXT_ULP_ACT_PROP_SZ_ENCAP_TUN, + [BNXT_ULP_ACT_PROP_IDX_JUMP] = + BNXT_ULP_ACT_PROP_SZ_JUMP, + [BNXT_ULP_ACT_PROP_IDX_SHARED_HANDLE] = + BNXT_ULP_ACT_PROP_SZ_SHARED_HANDLE, + [BNXT_ULP_ACT_PROP_IDX_RSS_FUNC] = + BNXT_ULP_ACT_PROP_SZ_RSS_FUNC, + [BNXT_ULP_ACT_PROP_IDX_RSS_TYPES] = + BNXT_ULP_ACT_PROP_SZ_RSS_TYPES, + [BNXT_ULP_ACT_PROP_IDX_RSS_LEVEL] = + BNXT_ULP_ACT_PROP_SZ_RSS_LEVEL, + [BNXT_ULP_ACT_PROP_IDX_RSS_KEY_LEN] = + BNXT_ULP_ACT_PROP_SZ_RSS_KEY_LEN, + [BNXT_ULP_ACT_PROP_IDX_RSS_KEY] = + BNXT_ULP_ACT_PROP_SZ_RSS_KEY, + [BNXT_ULP_ACT_PROP_IDX_RSS_QUEUE_NUM] = + BNXT_ULP_ACT_PROP_SZ_RSS_QUEUE_NUM, + [BNXT_ULP_ACT_PROP_IDX_RSS_QUEUE] = + BNXT_ULP_ACT_PROP_SZ_RSS_QUEUE, + [BNXT_ULP_ACT_PROP_IDX_QUEUE_INDEX] = + BNXT_ULP_ACT_PROP_SZ_QUEUE_INDEX, + [BNXT_ULP_ACT_PROP_IDX_METER_PROF_ID_UPDATE] = + BNXT_ULP_ACT_PROP_SZ_METER_PROF_ID_UPDATE, + [BNXT_ULP_ACT_PROP_IDX_METER_PROF_ID] = + BNXT_ULP_ACT_PROP_SZ_METER_PROF_ID, + [BNXT_ULP_ACT_PROP_IDX_METER_PROF_CIR] = + BNXT_ULP_ACT_PROP_SZ_METER_PROF_CIR, + [BNXT_ULP_ACT_PROP_IDX_METER_PROF_EIR] = + BNXT_ULP_ACT_PROP_SZ_METER_PROF_EIR, + [BNXT_ULP_ACT_PROP_IDX_METER_PROF_CBS] = + BNXT_ULP_ACT_PROP_SZ_METER_PROF_CBS, + [BNXT_ULP_ACT_PROP_IDX_METER_PROF_EBS] = + BNXT_ULP_ACT_PROP_SZ_METER_PROF_EBS, + [BNXT_ULP_ACT_PROP_IDX_METER_PROF_RFC2698] = + BNXT_ULP_ACT_PROP_SZ_METER_PROF_RFC2698, + [BNXT_ULP_ACT_PROP_IDX_METER_PROF_PM] = + BNXT_ULP_ACT_PROP_SZ_METER_PROF_PM, + [BNXT_ULP_ACT_PROP_IDX_METER_PROF_EBND] = + BNXT_ULP_ACT_PROP_SZ_METER_PROF_EBND, + [BNXT_ULP_ACT_PROP_IDX_METER_PROF_CBND] = + BNXT_ULP_ACT_PROP_SZ_METER_PROF_CBND, + [BNXT_ULP_ACT_PROP_IDX_METER_PROF_EBSM] = + BNXT_ULP_ACT_PROP_SZ_METER_PROF_EBSM, + [BNXT_ULP_ACT_PROP_IDX_METER_PROF_CBSM] = + BNXT_ULP_ACT_PROP_SZ_METER_PROF_CBSM, + [BNXT_ULP_ACT_PROP_IDX_METER_PROF_CF] = + BNXT_ULP_ACT_PROP_SZ_METER_PROF_CF, + [BNXT_ULP_ACT_PROP_IDX_METER_INST_ID] = + BNXT_ULP_ACT_PROP_SZ_METER_INST_ID, + [BNXT_ULP_ACT_PROP_IDX_METER_INST_ECN_RMP_EN_UPDATE] = + BNXT_ULP_ACT_PROP_SZ_METER_INST_ECN_RMP_EN_UPDATE, + [BNXT_ULP_ACT_PROP_IDX_METER_INST_ECN_RMP_EN] = + BNXT_ULP_ACT_PROP_SZ_METER_INST_ECN_RMP_EN, + [BNXT_ULP_ACT_PROP_IDX_METER_INST_MTR_VAL_UPDATE] = + BNXT_ULP_ACT_PROP_SZ_METER_INST_MTR_VAL_UPDATE, + [BNXT_ULP_ACT_PROP_IDX_METER_INST_MTR_VAL] = + BNXT_ULP_ACT_PROP_SZ_METER_INST_MTR_VAL, + [BNXT_ULP_ACT_PROP_IDX_GOTO_CHAIN] = + BNXT_ULP_ACT_PROP_SZ_GOTO_CHAIN, + [BNXT_ULP_ACT_PROP_IDX_METER_INST_CIR] = + BNXT_ULP_ACT_PROP_SZ_METER_INST_CIR, + [BNXT_ULP_ACT_PROP_IDX_METER_INST_EIR] = + BNXT_ULP_ACT_PROP_SZ_METER_INST_EIR, + [BNXT_ULP_ACT_PROP_IDX_METER_INST_CBS] = + BNXT_ULP_ACT_PROP_SZ_METER_INST_CBS, + [BNXT_ULP_ACT_PROP_IDX_METER_INST_EBS] = + BNXT_ULP_ACT_PROP_SZ_METER_INST_EBS, + [BNXT_ULP_ACT_PROP_IDX_METER_INST_RFC2698] = + BNXT_ULP_ACT_PROP_SZ_METER_INST_RFC2698, + [BNXT_ULP_ACT_PROP_IDX_METER_INST_PM] = + BNXT_ULP_ACT_PROP_SZ_METER_INST_PM, + [BNXT_ULP_ACT_PROP_IDX_METER_INST_EBND] = + BNXT_ULP_ACT_PROP_SZ_METER_INST_EBND, + [BNXT_ULP_ACT_PROP_IDX_METER_INST_CBND] = + BNXT_ULP_ACT_PROP_SZ_METER_INST_CBND, + [BNXT_ULP_ACT_PROP_IDX_METER_INST_EBSM] = + BNXT_ULP_ACT_PROP_SZ_METER_INST_EBSM, + [BNXT_ULP_ACT_PROP_IDX_METER_INST_CBSM] = + BNXT_ULP_ACT_PROP_SZ_METER_INST_CBSM, + [BNXT_ULP_ACT_PROP_IDX_METER_INST_CF] = + BNXT_ULP_ACT_PROP_SZ_METER_INST_CF, + [BNXT_ULP_ACT_PROP_IDX_SET_TTL] = + BNXT_ULP_ACT_PROP_SZ_SET_TTL, + [BNXT_ULP_ACT_PROP_IDX_LAST] = + BNXT_ULP_ACT_PROP_SZ_LAST +}; + diff --git a/drivers/thirdparty/release-drivers/bnxt/tf_ulp/generic_templates/ulp_template_db_tbl.h b/drivers/thirdparty/release-drivers/bnxt/tf_ulp/generic_templates/ulp_template_db_tbl.h new file mode 100644 index 000000000000..f6fbf561bea8 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/tf_ulp/generic_templates/ulp_template_db_tbl.h @@ -0,0 +1,184 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2021 Broadcom + * All rights reserved. + */ + +/* date: Mon Sep 21 14:21:33 2020 */ + +#ifndef ULP_TEMPLATE_DB_TBL_H_ +#define ULP_TEMPLATE_DB_TBL_H_ + +#include "ulp_template_struct.h" + +/* WH_PLUS template table declarations */ +extern struct bnxt_ulp_mapper_tmpl_info ulp_wh_plus_class_tmpl_list[]; + +extern struct bnxt_ulp_mapper_tbl_info ulp_wh_plus_class_tbl_list[]; + +extern struct +bnxt_ulp_mapper_key_info ulp_wh_plus_class_key_info_list[]; + +extern struct +bnxt_ulp_mapper_field_info ulp_wh_plus_class_key_ext_list[]; + +extern struct +bnxt_ulp_mapper_field_info ulp_wh_plus_class_result_field_list[]; + +extern struct bnxt_ulp_mapper_ident_info ulp_wh_plus_class_ident_list[]; + +extern struct +bnxt_ulp_mapper_cond_list_info ulp_wh_plus_class_cond_oper_list[]; + +extern struct bnxt_ulp_mapper_tmpl_info ulp_wh_plus_act_tmpl_list[]; + +extern struct bnxt_ulp_mapper_tbl_info ulp_wh_plus_act_tbl_list[]; + +extern struct +bnxt_ulp_mapper_field_info ulp_wh_plus_act_key_ext_list[]; + +extern struct +bnxt_ulp_mapper_field_info ulp_wh_plus_act_result_field_list[]; + +extern struct +bnxt_ulp_mapper_cond_info ulp_wh_plus_class_cond_list[]; + +extern struct +bnxt_ulp_mapper_cond_info ulp_wh_plus_act_cond_list[]; + +extern struct +bnxt_ulp_mapper_cond_list_info ulp_wh_plus_act_cond_oper_list[]; + +extern struct bnxt_ulp_mapper_key_info ulp_wh_plus_act_key_info_list[]; + +extern struct bnxt_ulp_mapper_ident_info ulp_wh_plus_act_ident_list[]; + +/* STINGRAY template table declarations */ +extern struct bnxt_ulp_mapper_tmpl_info ulp_stingray_class_tmpl_list[]; + +extern struct bnxt_ulp_mapper_tbl_info ulp_stingray_class_tbl_list[]; + +extern struct +bnxt_ulp_mapper_key_info ulp_stingray_class_key_info_list[]; + +extern struct +bnxt_ulp_mapper_field_info ulp_stingray_class_result_field_list[]; + +extern struct bnxt_ulp_mapper_ident_info ulp_stingray_class_ident_list[]; + +extern struct bnxt_ulp_mapper_tmpl_info ulp_stingray_act_tmpl_list[]; + +extern struct bnxt_ulp_mapper_tbl_info ulp_stingray_act_tbl_list[]; + +extern struct bnxt_ulp_mapper_key_info ulp_stingray_act_key_info_list[]; + +extern struct bnxt_ulp_mapper_ident_info ulp_stingray_act_ident_list[]; + +extern struct +bnxt_ulp_mapper_field_info ulp_stingray_act_result_field_list[]; + +extern struct +bnxt_ulp_mapper_cond_info ulp_stingray_class_cond_list[]; + +extern struct +bnxt_ulp_mapper_cond_info ulp_stingray_act_cond_list[]; + +extern struct +bnxt_ulp_mapper_cond_list_info ulp_stingray_act_cond_oper_list[]; + +/* Thor template table declarations */ +extern struct bnxt_ulp_mapper_tmpl_info ulp_thor_class_tmpl_list[]; + +extern struct bnxt_ulp_mapper_tbl_info ulp_thor_class_tbl_list[]; + +extern struct +bnxt_ulp_mapper_key_info ulp_thor_class_key_info_list[]; + +extern struct +bnxt_ulp_mapper_field_info ulp_thor_class_key_ext_list[]; + +extern struct +bnxt_ulp_mapper_field_info ulp_thor_class_result_field_list[]; + +extern struct bnxt_ulp_mapper_ident_info ulp_thor_class_ident_list[]; + +extern struct +bnxt_ulp_mapper_cond_list_info ulp_thor_class_cond_oper_list[]; + +extern struct bnxt_ulp_mapper_tmpl_info ulp_thor_act_tmpl_list[]; + +extern struct bnxt_ulp_mapper_tbl_info ulp_thor_act_tbl_list[]; + +extern struct bnxt_ulp_mapper_key_info ulp_thor_act_key_info_list[]; + +extern struct bnxt_ulp_mapper_ident_info ulp_thor_act_ident_list[]; + +extern struct +bnxt_ulp_mapper_field_info ulp_thor_act_key_ext_list[]; + +extern struct +bnxt_ulp_mapper_field_info ulp_thor_act_result_field_list[]; + +extern struct +bnxt_ulp_mapper_cond_info ulp_thor_class_cond_list[]; + +extern struct +bnxt_ulp_mapper_cond_info ulp_thor_act_cond_list[]; + +extern struct +bnxt_ulp_mapper_cond_list_info ulp_thor_act_cond_oper_list[]; + +extern struct bnxt_ulp_mapper_key_info ulp_wh_plus_act_key_info_list[]; + +extern struct bnxt_ulp_mapper_ident_info ulp_wh_plus_act_ident_list[]; + +/* Global declarations */ +extern u8 ulp_glb_field_tbl[]; +extern u32 ulp_glb_app_sig_tbl[]; + +extern struct +bnxt_ulp_shared_act_info ulp_shared_act_info[]; + +/* Thor2 template table declarations */ +extern struct bnxt_ulp_mapper_tmpl_info ulp_thor2_class_tmpl_list[]; + +extern struct bnxt_ulp_mapper_tbl_info ulp_thor2_class_tbl_list[]; + +extern struct +bnxt_ulp_mapper_key_info ulp_thor2_class_key_info_list[]; + +extern struct +bnxt_ulp_mapper_field_info ulp_thor2_class_key_ext_list[]; + +extern struct +bnxt_ulp_mapper_field_info ulp_thor2_class_result_field_list[]; + +extern struct bnxt_ulp_mapper_ident_info ulp_thor2_class_ident_list[]; + +extern struct +bnxt_ulp_mapper_cond_list_info ulp_thor2_class_cond_oper_list[]; + +extern struct bnxt_ulp_mapper_tmpl_info ulp_thor2_act_tmpl_list[]; + +extern struct bnxt_ulp_mapper_tbl_info ulp_thor2_act_tbl_list[]; + +extern struct +bnxt_ulp_mapper_key_info ulp_thor2_act_key_info_list[]; + +extern struct +bnxt_ulp_mapper_field_info ulp_thor2_act_key_ext_list[]; + +extern struct +bnxt_ulp_mapper_field_info ulp_thor2_act_result_field_list[]; + +extern struct bnxt_ulp_mapper_ident_info ulp_thor2_act_ident_list[]; + +extern struct +bnxt_ulp_mapper_cond_info ulp_thor2_class_cond_list[]; + +extern struct +bnxt_ulp_mapper_cond_info ulp_thor2_act_cond_list[]; + +extern struct +bnxt_ulp_mapper_cond_list_info ulp_thor2_act_cond_oper_list[]; + +#endif diff --git a/drivers/thirdparty/release-drivers/bnxt/tf_ulp/generic_templates/ulp_template_db_thor2_act.c b/drivers/thirdparty/release-drivers/bnxt/tf_ulp/generic_templates/ulp_template_db_thor2_act.c new file mode 100644 index 000000000000..d975940c7803 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/tf_ulp/generic_templates/ulp_template_db_thor2_act.c @@ -0,0 +1,9864 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* Copyright(c) 2014-2024 Broadcom + * All rights reserved. + */ + +#include "ulp_template_db_enum.h" +#include "ulp_template_db_field.h" +#include "ulp_template_struct.h" +#include "ulp_template_db_tbl.h" + +/* Mapper templates for header act list */ +struct bnxt_ulp_mapper_tmpl_info ulp_thor2_act_tmpl_list[] = { + /* act_tid: 1, ingress */ + [1] = { + .device_name = BNXT_ULP_DEVICE_ID_THOR2, + .num_tbls = 24, + .start_tbl_idx = 0, + .reject_info = { + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_FALSE, + .cond_start_idx = 0, + .cond_nums = 0 } + }, + /* act_tid: 2, ingress */ + [2] = { + .device_name = BNXT_ULP_DEVICE_ID_THOR2, + .num_tbls = 1, + .start_tbl_idx = 24, + .reject_info = { + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_FALSE, + .cond_start_idx = 23, + .cond_nums = 0 } + }, + /* act_tid: 3, ingress */ + [3] = { + .device_name = BNXT_ULP_DEVICE_ID_THOR2, + .num_tbls = 12, + .start_tbl_idx = 25, + .reject_info = { + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_FALSE, + .cond_start_idx = 23, + .cond_nums = 0 } + }, + /* act_tid: 4, ingress */ + [4] = { + .device_name = BNXT_ULP_DEVICE_ID_THOR2, + .num_tbls = 1, + .start_tbl_idx = 37, + .reject_info = { + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_FALSE, + .cond_start_idx = 27, + .cond_nums = 0 } + }, + /* act_tid: 5, ingress */ + [5] = { + .device_name = BNXT_ULP_DEVICE_ID_THOR2, + .num_tbls = 1, + .start_tbl_idx = 38, + .reject_info = { + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_FALSE, + .cond_start_idx = 27, + .cond_nums = 0 } + }, + /* act_tid: 6, ingress */ + [6] = { + .device_name = BNXT_ULP_DEVICE_ID_THOR2, + .num_tbls = 19, + .start_tbl_idx = 39, + .reject_info = { + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_FALSE, + .cond_start_idx = 27, + .cond_nums = 0 } + }, + /* act_tid: 7, egress */ + [7] = { + .device_name = BNXT_ULP_DEVICE_ID_THOR2, + .num_tbls = 47, + .start_tbl_idx = 58, + .reject_info = { + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_OR, + .cond_start_idx = 42, + .cond_nums = 1 } + }, + /* act_tid: 8, egress */ + [8] = { + .device_name = BNXT_ULP_DEVICE_ID_THOR2, + .num_tbls = 1, + .start_tbl_idx = 105, + .reject_info = { + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_FALSE, + .cond_start_idx = 91, + .cond_nums = 0 } + }, + /* act_tid: 9, egress */ + [9] = { + .device_name = BNXT_ULP_DEVICE_ID_THOR2, + .num_tbls = 1, + .start_tbl_idx = 106, + .reject_info = { + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_FALSE, + .cond_start_idx = 91, + .cond_nums = 0 } + }, + /* act_tid: 10, egress */ + [10] = { + .device_name = BNXT_ULP_DEVICE_ID_THOR2, + .num_tbls = 1, + .start_tbl_idx = 107, + .reject_info = { + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_FALSE, + .cond_start_idx = 91, + .cond_nums = 0 } + }, + /* act_tid: 11, egress */ + [11] = { + .device_name = BNXT_ULP_DEVICE_ID_THOR2, + .num_tbls = 1, + .start_tbl_idx = 108, + .reject_info = { + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_FALSE, + .cond_start_idx = 91, + .cond_nums = 0 } + }, + /* act_tid: 12, egress */ + [12] = { + .device_name = BNXT_ULP_DEVICE_ID_THOR2, + .num_tbls = 6, + .start_tbl_idx = 109, + .reject_info = { + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_OR, + .cond_start_idx = 91, + .cond_nums = 4 } + } +}; + +struct bnxt_ulp_mapper_tbl_info ulp_thor2_act_tbl_list[] = { + { /* act_tid: 1, , table: flow_chain_cache.rd */ + .description = "flow_chain_cache.rd", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_FLOW_CHAIN_CACHE, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 5, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 0, + .cond_nums = 1 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_READ, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_HASH, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .key_start_idx = 0, + .blob_key_bit_size = 32, + .key_bit_size = 32, + .key_num_fields = 1, + .ident_start_idx = 0, + .ident_nums = 1 + }, + { /* act_tid: 1, , table: control.flow_chain */ + .description = "control.flow_chain", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 4, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 1, + .cond_nums = 1 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_ALLOC_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID + }, + { /* act_tid: 1, , table: jump_index_table.alloc */ + .description = "jump_index_table.alloc", + .resource_func = BNXT_ULP_RESOURCE_FUNC_ALLOCATOR_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_ALLOCATOR_TABLE_JUMP_INDEX, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 2, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_ALLOC_TBL_OPC_ALLOC, + .tbl_operand = BNXT_ULP_RF_IDX_JUMP_META_IDX, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID, + .result_start_idx = 0, + .result_bit_size = 0, + .result_num_fields = 0 + }, + { /* act_tid: 1, , table: control.metadata_cal */ + .description = "control.metadata_cal", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 2, + .cond_nums = 0 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP, + .func_info = { + .func_opc = BNXT_ULP_FUNC_OPC_BIT_OR, + .func_oper_size = 32, + .func_src1 = BNXT_ULP_FUNC_SRC_REGFILE, + .func_opr1 = BNXT_ULP_RF_IDX_JUMP_META_IDX, + .func_src2 = BNXT_ULP_FUNC_SRC_CONST, + .func_opr2 = ULP_THOR2_SYM_CHAIN_META_VAL, + .func_dst_opr = BNXT_ULP_RF_IDX_JUMP_META } + }, + { /* act_tid: 1, , table: flow_chain_cache.write */ + .description = "flow_chain_cache.write", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_FLOW_CHAIN_CACHE, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 2, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_WRITE, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_HASH, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .key_start_idx = 1, + .blob_key_bit_size = 32, + .key_bit_size = 32, + .key_num_fields = 1, + .result_start_idx = 0, + .result_bit_size = 64, + .result_num_fields = 2 + }, + { /* act_tid: 1, , table: shared_meter_tbl_cache.rd */ + .description = "shared_meter_tbl_cache.rd", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_SHARED_METER_TBL_CACHE, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 2, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 2, + .cond_nums = 1 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_READ, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_HASH, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .key_start_idx = 2, + .blob_key_bit_size = 32, + .key_bit_size = 32, + .key_num_fields = 1, + .ident_start_idx = 1, + .ident_nums = 1 + }, + { /* act_tid: 1, , table: control.meter_chk */ + .description = "control.meter_chk", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_RX, + .true_message = "Reject due to unknown meter.", + .execute_info = { + .cond_true_goto = 1023, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 3, + .cond_nums = 1 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP + }, + { /* act_tid: 1, , table: shared_mirror_record.rd */ + .description = "shared_mirror_record.rd", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_type = TF_TBL_TYPE_MIRROR_CONFIG, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_SHARED_MIRROR, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 2, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 4, + .cond_nums = 1 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_READ, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_INDEX, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .key_start_idx = 3, + .blob_key_bit_size = 5, + .key_bit_size = 5, + .key_num_fields = 1, + .ident_start_idx = 2, + .ident_nums = 1 + }, + { /* act_tid: 1, , table: control.mirror */ + .description = "control.mirror", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_RX, + .true_message = "Reject due to non-existent handle", + .execute_info = { + .cond_true_goto = 1023, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 5, + .cond_nums = 1 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP + }, + { /* act_tid: 1, , table: control.do_mod */ + .description = "control.do_mod", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 4, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 6, + .cond_nums = 1 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP, + .func_info = { + .func_opc = BNXT_ULP_FUNC_OPC_BIT_AND, + .func_src1 = BNXT_ULP_FUNC_SRC_ACTION_BITMAP, + .func_opr1 = BNXT_ULP_FUNC_SRC_ACTION_BITMAP, + .func_src2 = BNXT_ULP_FUNC_SRC_CONST, + .func_opr2 = BNXT_ULP_ACT_BIT_DEC_TTL | + BNXT_ULP_ACT_BIT_SET_MAC_SRC | + BNXT_ULP_ACT_BIT_SET_MAC_DST | + BNXT_ULP_ACT_BIT_SET_IPV4_SRC | + BNXT_ULP_ACT_BIT_SET_IPV4_DST | + BNXT_ULP_ACT_BIT_SET_IPV6_SRC | + BNXT_ULP_ACT_BIT_SET_IPV6_DST | + BNXT_ULP_ACT_BIT_SET_TP_SRC | + BNXT_ULP_ACT_BIT_SET_TP_DST | + BNXT_ULP_ACT_BIT_GOTO_CHAIN, + .func_dst_opr = BNXT_ULP_RF_IDX_RF_0 } + }, + { /* act_tid: 1, , table: mod_record.ttl_0 */ + .description = "mod_record.ttl_0", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CMM_TABLE, + .resource_type = CFA_RSUBTYPE_CMM_ACT, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_CMM_TABLE_ACT, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 2, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 7, + .cond_nums = 1 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_ALLOC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_CMM_MOD_HNDL, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .result_start_idx = 2, + .result_bit_size = 0, + .result_num_fields = 0, + .encap_num_fields = 36 + }, + { /* act_tid: 1, , table: mod_record.non_ttl_0 */ + .description = "mod_record.non_ttl_0", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CMM_TABLE, + .resource_type = CFA_RSUBTYPE_CMM_ACT, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_CMM_TABLE_ACT, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 8, + .cond_nums = 1 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_ALLOC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_CMM_MOD_HNDL, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .result_start_idx = 38, + .result_bit_size = 0, + .result_num_fields = 0, + .encap_num_fields = 28 + }, + { /* act_tid: 1, , table: control.mod_handle_to_offset */ + .description = "control.mod_handle_to_offset", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 9, + .cond_nums = 0 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP, + .func_info = { + .func_opc = BNXT_ULP_FUNC_OPC_HANDLE_TO_OFFSET, + .func_src1 = BNXT_ULP_FUNC_SRC_REGFILE, + .func_opr1 = BNXT_ULP_RF_IDX_CMM_MOD_HNDL, + .func_src2 = BNXT_ULP_FUNC_SRC_CONST, + .func_opr2 = 8, + .func_dst_opr = BNXT_ULP_RF_IDX_MODIFY_PTR } + }, + { /* act_tid: 1, , table: tunnel_cache.f1_f2_act_rd */ + .description = "tunnel_cache.f1_f2_act_rd", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_TUNNEL_CACHE, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 4, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_OR, + .cond_start_idx = 9, + .cond_nums = 2 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_READ, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_HASH, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .key_start_idx = 4, + .blob_key_bit_size = 19, + .key_bit_size = 19, + .key_num_fields = 2, + .ident_start_idx = 3, + .ident_nums = 2 + }, + { /* act_tid: 1, , table: control.tunnel_cache_check_act */ + .description = "control.tunnel_cache_check_act", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 3, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 11, + .cond_nums = 1 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP + }, + { /* act_tid: 1, , table: cmm_stat_record.f1_flow */ + .description = "cmm_stat_record.f1_flow", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CMM_STAT, + .resource_type = CFA_RSUBTYPE_CMM_ACT, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_CMM_TABLE_ACT, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 12, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_ALLOC_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_CMM_STAT_HNDL_F1, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP, + .mark_db_opcode = BNXT_ULP_MARK_DB_OPC_NOP, + .result_start_idx = 66, + .result_bit_size = 128, + .result_num_fields = 2 + }, + { /* act_tid: 1, , table: control.stat_handle_to_offset_ptr_1 */ + .description = "control.stat_handle_to_offset_ptr_1", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 12, + .cond_nums = 0 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP, + .func_info = { + .func_opc = BNXT_ULP_FUNC_OPC_HANDLE_TO_OFFSET, + .func_src1 = BNXT_ULP_FUNC_SRC_REGFILE, + .func_opr1 = BNXT_ULP_RF_IDX_CMM_STAT_HNDL_F1, + .func_src2 = BNXT_ULP_FUNC_SRC_CONST, + .func_opr2 = 8, + .func_dst_opr = BNXT_ULP_RF_IDX_FLOW_CNTR_PTR_F1 } + }, + { /* act_tid: 1, , table: cmm_stat_record.0 */ + .description = "cmm_stat_record.0", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CMM_STAT, + .resource_type = CFA_RSUBTYPE_CMM_ACT, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_CMM_TABLE_ACT, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 2, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 12, + .cond_nums = 1 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_ALLOC_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_CMM_STAT_HNDL, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .mark_db_opcode = BNXT_ULP_MARK_DB_OPC_NOP, + .result_start_idx = 68, + .result_bit_size = 128, + .result_num_fields = 2 + }, + { /* act_tid: 1, , table: control.stat_handle_to_offset */ + .description = "control.stat_handle_to_offset", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 13, + .cond_nums = 0 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP, + .func_info = { + .func_opc = BNXT_ULP_FUNC_OPC_HANDLE_TO_OFFSET, + .func_src1 = BNXT_ULP_FUNC_SRC_REGFILE, + .func_opr1 = BNXT_ULP_RF_IDX_CMM_STAT_HNDL, + .func_src2 = BNXT_ULP_FUNC_SRC_CONST, + .func_opr2 = 8, + .func_dst_opr = BNXT_ULP_RF_IDX_FLOW_CNTR_PTR_0 } + }, + { /* act_tid: 1, , table: control.queue_and_rss_test */ + .description = "control.queue_and_rss_test", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_RX, + .true_message = "Reject due to both queue and rss set", + .execute_info = { + .cond_true_goto = 1023, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 13, + .cond_nums = 2 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP + }, + { /* act_tid: 1, , table: vnic_interface_rss_config.0 */ + .description = "vnic_interface_rss_config.0", + .resource_func = BNXT_ULP_RESOURCE_FUNC_VNIC_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_VNIC_TABLE_RSS, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 2, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 15, + .cond_nums = 1 }, + .tbl_opcode = BNXT_ULP_VNIC_TBL_OPC_ALLOC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_RSS_VNIC, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .result_start_idx = 70, + .result_bit_size = 0, + .result_num_fields = 0 + }, + { /* act_tid: 1, , table: vnic_interface_queue_config.0 */ + .description = "vnic_interface_queue_config.0", + .resource_func = BNXT_ULP_RESOURCE_FUNC_VNIC_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_VNIC_TABLE_QUEUE, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 16, + .cond_nums = 1 }, + .tbl_opcode = BNXT_ULP_VNIC_TBL_OPC_ALLOC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_RSS_VNIC, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .result_start_idx = 70, + .result_bit_size = 0, + .result_num_fields = 0 + }, + { /* act_tid: 1, , table: cmm_full_act_record.0 */ + .description = "cmm_full_act_record.0", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CMM_TABLE, + .resource_type = CFA_RSUBTYPE_CMM_ACT, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_CMM_TABLE_ACT, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 17, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_ALLOC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_CMM_ACT_HNDL, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .mark_db_opcode = BNXT_ULP_MARK_DB_OPC_NOP, + .result_start_idx = 70, + .result_bit_size = 192, + .result_num_fields = 18 + }, + { /* act_tid: 1, , table: control.act_handle_to_offset */ + .description = "control.act_handle_to_offset", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 0, + .cond_false_goto = 0, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 23, + .cond_nums = 0 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP, + .func_info = { + .func_opc = BNXT_ULP_FUNC_OPC_HANDLE_TO_OFFSET, + .func_src1 = BNXT_ULP_FUNC_SRC_REGFILE, + .func_opr1 = BNXT_ULP_RF_IDX_CMM_ACT_HNDL, + .func_src2 = BNXT_ULP_FUNC_SRC_CONST, + .func_opr2 = 32, + .func_dst_opr = BNXT_ULP_RF_IDX_MAIN_ACTION_PTR } + }, + { /* act_tid: 2, , table: control.reject */ + .description = "control.reject", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_RX, + .true_message = "Thor 2 not supporting non-generic template", + .execute_info = { + .cond_true_goto = 1023, + .cond_false_goto = 0, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 23, + .cond_nums = 0 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP + }, + { /* act_tid: 3, , table: control.delete_chk */ + .description = "control.delete_chk", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 4, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 23, + .cond_nums = 1 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP + }, + { /* act_tid: 3, , table: shared_mirror_record.del_chk */ + .description = "shared_mirror_record.del_chk", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_type = CFA_RSUBTYPE_IDX_TBL_MIRROR, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_SHARED_MIRROR, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 24, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_READ, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_INDEX, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP, + .ref_cnt_opcode = BNXT_ULP_REF_CNT_OPC_NOP, + .key_start_idx = 6, + .blob_key_bit_size = 5, + .key_bit_size = 5, + .key_num_fields = 1, + .ident_start_idx = 5, + .ident_nums = 1 + }, + { /* act_tid: 3, , table: control.mirror_del_exist_chk */ + .description = "control.mirror_del_exist_chk", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 0, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 24, + .cond_nums = 1 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP + }, + { /* act_tid: 3, , table: control.mirror_ref_cnt_chk */ + .description = "control.mirror_ref_cnt_chk", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 0, + .cond_false_goto = 1023, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 25, + .cond_nums = 1 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_DELETE_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID, + .func_info = { + .func_opc = BNXT_ULP_FUNC_OPC_EQ, + .func_src1 = BNXT_ULP_FUNC_SRC_REGFILE, + .func_opr1 = BNXT_ULP_RF_IDX_REF_CNT, + .func_src2 = BNXT_ULP_FUNC_SRC_CONST, + .func_opr2 = 1, + .func_dst_opr = BNXT_ULP_RF_IDX_CC } + }, + { /* act_tid: 3, , table: control.create */ + .description = "control.create", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 26, + .cond_nums = 0 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_ALLOC_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID + }, + { /* act_tid: 3, , table: mirror_tbl.alloc */ + .description = "mirror_tbl.alloc", + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = CFA_RSUBTYPE_IDX_TBL_MIRROR, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_NORMAL, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 26, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_ALLOC_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_MIRROR_PTR_0, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID, + .mark_db_opcode = BNXT_ULP_MARK_DB_OPC_NOP, + .track_type = CFA_TRACK_TYPE_SID, + .result_start_idx = 88, + .result_bit_size = 128, + .result_num_fields = 12 + }, + { /* act_tid: 3, , table: cmm_stat_record.0 */ + .description = "cmm_stat_record.0", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CMM_STAT, + .resource_type = CFA_RSUBTYPE_CMM_ACT, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_CMM_TABLE_ACT, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 26, + .cond_nums = 1 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_ALLOC_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_CMM_STAT_HNDL, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID, + .mark_db_opcode = BNXT_ULP_MARK_DB_OPC_NOP, + .result_start_idx = 100, + .result_bit_size = 128, + .result_num_fields = 2 + }, + { /* act_tid: 3, , table: control.stat_handle_to_offset */ + .description = "control.stat_handle_to_offset", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 27, + .cond_nums = 0 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP, + .func_info = { + .func_opc = BNXT_ULP_FUNC_OPC_HANDLE_TO_OFFSET, + .func_src1 = BNXT_ULP_FUNC_SRC_REGFILE, + .func_opr1 = BNXT_ULP_RF_IDX_CMM_STAT_HNDL, + .func_src2 = BNXT_ULP_FUNC_SRC_CONST, + .func_opr2 = 8, + .func_dst_opr = BNXT_ULP_RF_IDX_FLOW_CNTR_PTR_0 } + }, + { /* act_tid: 3, , table: cmm_full_act_record.0 */ + .description = "cmm_full_act_record.0", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CMM_TABLE, + .resource_type = CFA_RSUBTYPE_CMM_ACT, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_CMM_TABLE_ACT, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 27, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_ALLOC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_CMM_ACT_HNDL, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID, + .mark_db_opcode = BNXT_ULP_MARK_DB_OPC_NOP, + .result_start_idx = 102, + .result_bit_size = 192, + .result_num_fields = 18 + }, + { /* act_tid: 3, , table: control.act_handle_to_offset */ + .description = "control.act_handle_to_offset", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 27, + .cond_nums = 0 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP, + .func_info = { + .func_opc = BNXT_ULP_FUNC_OPC_HANDLE_TO_OFFSET, + .func_src1 = BNXT_ULP_FUNC_SRC_REGFILE, + .func_opr1 = BNXT_ULP_RF_IDX_CMM_ACT_HNDL, + .func_src2 = BNXT_ULP_FUNC_SRC_CONST, + .func_opr2 = 32, + .func_dst_opr = BNXT_ULP_RF_IDX_MAIN_ACTION_PTR } + }, + { /* act_tid: 3, , table: mirror_tbl.wr */ + .description = "mirror_tbl.wr", + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = CFA_RSUBTYPE_IDX_TBL_MIRROR, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_NORMAL, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 27, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_MIRROR_PTR_0, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP, + .mark_db_opcode = BNXT_ULP_MARK_DB_OPC_NOP, + .track_type = CFA_TRACK_TYPE_SID, + .result_start_idx = 120, + .result_bit_size = 128, + .result_num_fields = 12 + }, + { /* act_tid: 3, , table: shared_mirror_record.wr */ + .description = "shared_mirror_record.wr", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_type = CFA_RSUBTYPE_IDX_TBL_MIRROR, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_SHARED_MIRROR, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 0, + .cond_false_goto = 0, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 27, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_WRITE, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_INDEX, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID, + .ref_cnt_opcode = BNXT_ULP_REF_CNT_OPC_INC, + .key_start_idx = 7, + .blob_key_bit_size = 5, + .key_bit_size = 5, + .key_num_fields = 1, + .result_start_idx = 132, + .result_bit_size = 37, + .result_num_fields = 2 + }, + { /* act_tid: 4, , table: control.reject */ + .description = "control.reject", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_RX, + .true_message = "Thor 2 not supporting non-generic template", + .execute_info = { + .cond_true_goto = 1023, + .cond_false_goto = 0, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 27, + .cond_nums = 0 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP + }, + { /* act_tid: 5, , table: control.reject */ + .description = "control.reject", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_RX, + .true_message = "Thor 2 not supporting non-generic template", + .execute_info = { + .cond_true_goto = 1023, + .cond_false_goto = 0, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 27, + .cond_nums = 0 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP + }, + { /* act_tid: 6, , table: control.create_check */ + .description = "control.create_check", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 10, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 27, + .cond_nums = 2 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP + }, + { /* act_tid: 6, , table: meter_profile_tbl_cache.rd */ + .description = "meter_profile_tbl_cache.rd", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_METER_PROFILE_TBL_CACHE, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 3, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 29, + .cond_nums = 1 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_READ, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_HASH, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP, + .ref_cnt_opcode = BNXT_ULP_REF_CNT_OPC_NOP, + .key_start_idx = 8, + .blob_key_bit_size = 32, + .key_bit_size = 32, + .key_num_fields = 1, + .ident_start_idx = 6, + .ident_nums = 0 + }, + { /* act_tid: 6, , table: control.shared_meter_profile_0 */ + .description = "control.shared_meter_profile_0", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1023, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 30, + .cond_nums = 1 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_ALLOC_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID + }, + { /* act_tid: 6, , table: meter_profile_tbl_cache.wr */ + .description = "meter_profile_tbl_cache.wr", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_type = TF_TBL_TYPE_METER_PROF, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_METER_PROFILE_TBL_CACHE, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 0, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 31, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_WRITE, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_HASH, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID, + .key_start_idx = 9, + .blob_key_bit_size = 32, + .key_bit_size = 32, + .key_num_fields = 1, + .result_start_idx = 134, + .result_bit_size = 97, + .result_num_fields = 12 + }, + { /* act_tid: 6, , table: shared_meter_tbl_cache.rd */ + .description = "shared_meter_tbl_cache.rd", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_SHARED_METER_TBL_CACHE, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1023, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 31, + .cond_nums = 1 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_READ, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_HASH, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP, + .ref_cnt_opcode = BNXT_ULP_REF_CNT_OPC_NOP, + .key_start_idx = 10, + .blob_key_bit_size = 32, + .key_bit_size = 32, + .key_num_fields = 1, + .ident_start_idx = 6, + .ident_nums = 0 + }, + { /* act_tid: 6, , table: control.meter_created_chk */ + .description = "control.meter_created_chk", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1023, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 32, + .cond_nums = 1 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_ALLOC_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID + }, + { /* act_tid: 6, , table: meter_profile_tbl_cache.rd2 */ + .description = "meter_profile_tbl_cache.rd2", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_METER_PROFILE_TBL_CACHE, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 33, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_READ, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_HASH, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID, + .key_start_idx = 11, + .blob_key_bit_size = 32, + .key_bit_size = 32, + .key_num_fields = 1, + .ident_start_idx = 6, + .ident_nums = 11 + }, + { /* act_tid: 6, , table: control.shared_meter_profile_chk */ + .description = "control.shared_meter_profile_chk", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1023, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 33, + .cond_nums = 1 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP + }, + { /* act_tid: 6, , table: meter_tbl.0 */ + .description = "meter_tbl.0", + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = CFA_RSUBTYPE_IDX_TBL_METER_INST, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_NORMAL, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 34, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_ALLOC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_METER_PTR_0, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID, + .track_type = CFA_TRACK_TYPE_SID, + .result_start_idx = 146, + .result_bit_size = 128, + .result_num_fields = 18 + }, + { /* act_tid: 6, , table: shared_meter_tbl_cache.wr */ + .description = "shared_meter_tbl_cache.wr", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_SHARED_METER_TBL_CACHE, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 0, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 34, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_WRITE, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_HASH, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID, + .key_start_idx = 12, + .blob_key_bit_size = 32, + .key_bit_size = 32, + .key_num_fields = 1, + .result_start_idx = 164, + .result_bit_size = 74, + .result_num_fields = 3 + }, + { /* act_tid: 6, , table: control.delete_check */ + .description = "control.delete_check", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 5, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 34, + .cond_nums = 1 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP + }, + { /* act_tid: 6, , table: meter_profile_tbl_cache.del_chk */ + .description = "meter_profile_tbl_cache.del_chk", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_METER_PROFILE_TBL_CACHE, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 2, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 35, + .cond_nums = 1 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_READ, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_HASH, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP, + .ref_cnt_opcode = BNXT_ULP_REF_CNT_OPC_NOP, + .key_start_idx = 13, + .blob_key_bit_size = 32, + .key_bit_size = 32, + .key_num_fields = 1, + .ident_start_idx = 17, + .ident_nums = 1 + }, + { /* act_tid: 6, , table: control.mtr_prof_ref_cnt_chk */ + .description = "control.mtr_prof_ref_cnt_chk", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 0, + .cond_false_goto = 1023, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 36, + .cond_nums = 1 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_DELETE_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID, + .func_info = { + .func_opc = BNXT_ULP_FUNC_OPC_EQ, + .func_src1 = BNXT_ULP_FUNC_SRC_REGFILE, + .func_opr1 = BNXT_ULP_RF_IDX_REF_CNT, + .func_src2 = BNXT_ULP_FUNC_SRC_CONST, + .func_opr2 = 1, + .func_dst_opr = BNXT_ULP_RF_IDX_CC } + }, + { /* act_tid: 6, , table: shared_meter_tbl_cache.del_chk */ + .description = "shared_meter_tbl_cache.del_chk", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_SHARED_METER_TBL_CACHE, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1023, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 37, + .cond_nums = 1 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_READ, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_HASH, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP, + .ref_cnt_opcode = BNXT_ULP_REF_CNT_OPC_NOP, + .key_start_idx = 14, + .blob_key_bit_size = 32, + .key_bit_size = 32, + .key_num_fields = 1, + .ident_start_idx = 18, + .ident_nums = 1 + }, + { /* act_tid: 6, , table: control.shared_mtr_ref_cnt_chk */ + .description = "control.shared_mtr_ref_cnt_chk", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 0, + .cond_false_goto = 1023, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 38, + .cond_nums = 1 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_DELETE_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID, + .func_info = { + .func_opc = BNXT_ULP_FUNC_OPC_EQ, + .func_src1 = BNXT_ULP_FUNC_SRC_REGFILE, + .func_opr1 = BNXT_ULP_RF_IDX_REF_CNT, + .func_src2 = BNXT_ULP_FUNC_SRC_CONST, + .func_opr2 = 1, + .func_dst_opr = BNXT_ULP_RF_IDX_CC } + }, + { /* act_tid: 6, , table: control.update_check */ + .description = "control.update_check", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1023, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 39, + .cond_nums = 0 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP + }, + { /* act_tid: 6, , table: shared_meter_tbl_cache.rd_update */ + .description = "shared_meter_tbl_cache.rd_update", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_SHARED_METER_TBL_CACHE, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1023, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 39, + .cond_nums = 1 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_READ, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_HASH, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP, + .ref_cnt_opcode = BNXT_ULP_REF_CNT_OPC_NOP, + .key_start_idx = 15, + .blob_key_bit_size = 32, + .key_bit_size = 32, + .key_num_fields = 1, + .ident_start_idx = 19, + .ident_nums = 1 + }, + { /* act_tid: 6, , table: meter_tbl.update_rd */ + .description = "meter_tbl.update_rd", + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = CFA_RSUBTYPE_IDX_TBL_METER_INST, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_NORMAL, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1023, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 40, + .cond_nums = 2 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_RD_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_METER_PTR_0, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP, + .track_type = CFA_TRACK_TYPE_SID, + .ident_start_idx = 20, + .ident_nums = 13, + .result_bit_size = 128 + }, + { /* act_tid: 6, , table: meter_tbl.update_wr */ + .description = "meter_tbl.update_wr", + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = CFA_RSUBTYPE_IDX_TBL_METER_INST, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_NORMAL, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 0, + .cond_false_goto = 0, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 42, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_METER_PTR_0, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP, + .track_type = CFA_TRACK_TYPE_SID, + .result_start_idx = 167, + .result_bit_size = 128, + .result_num_fields = 18 + }, + { /* act_tid: 7, , table: flow_chain_cache.rd */ + .description = "flow_chain_cache.rd", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_FLOW_CHAIN_CACHE, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 5, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 43, + .cond_nums = 1 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_READ, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_HASH, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .key_start_idx = 16, + .blob_key_bit_size = 32, + .key_bit_size = 32, + .key_num_fields = 1, + .ident_start_idx = 33, + .ident_nums = 1 + }, + { /* act_tid: 7, , table: control.flow_chain */ + .description = "control.flow_chain", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 4, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 44, + .cond_nums = 1 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_ALLOC_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID + }, + { /* act_tid: 7, , table: jump_index_table.alloc */ + .description = "jump_index_table.alloc", + .resource_func = BNXT_ULP_RESOURCE_FUNC_ALLOCATOR_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_ALLOCATOR_TABLE_JUMP_INDEX, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 45, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_ALLOC_TBL_OPC_ALLOC, + .tbl_operand = BNXT_ULP_RF_IDX_JUMP_META_IDX, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID, + .result_start_idx = 185, + .result_bit_size = 0, + .result_num_fields = 0 + }, + { /* act_tid: 7, , table: control.metadata_cal */ + .description = "control.metadata_cal", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 45, + .cond_nums = 0 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP, + .func_info = { + .func_opc = BNXT_ULP_FUNC_OPC_BIT_OR, + .func_oper_size = 32, + .func_src1 = BNXT_ULP_FUNC_SRC_REGFILE, + .func_opr1 = BNXT_ULP_RF_IDX_JUMP_META_IDX, + .func_src2 = BNXT_ULP_FUNC_SRC_CONST, + .func_opr2 = ULP_THOR2_SYM_CHAIN_META_VAL, + .func_dst_opr = BNXT_ULP_RF_IDX_JUMP_META } + }, + { /* act_tid: 7, , table: flow_chain_cache.write */ + .description = "flow_chain_cache.write", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_FLOW_CHAIN_CACHE, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 45, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_WRITE, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_HASH, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .key_start_idx = 17, + .blob_key_bit_size = 32, + .key_bit_size = 32, + .key_num_fields = 1, + .result_start_idx = 185, + .result_bit_size = 64, + .result_num_fields = 2 + }, + { /* act_tid: 7, , table: cmm_stat_record.0 */ + .description = "cmm_stat_record.0", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CMM_STAT, + .resource_type = CFA_RSUBTYPE_CMM_ACT, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_CMM_TABLE_ACT, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 2, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 45, + .cond_nums = 1 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_ALLOC_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_CMM_STAT_HNDL, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .mark_db_opcode = BNXT_ULP_MARK_DB_OPC_NOP, + .result_start_idx = 187, + .result_bit_size = 128, + .result_num_fields = 2 + }, + { /* act_tid: 7, , table: control.stat_handle_to_offset */ + .description = "control.stat_handle_to_offset", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 46, + .cond_nums = 0 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP, + .func_info = { + .func_opc = BNXT_ULP_FUNC_OPC_HANDLE_TO_OFFSET, + .func_src1 = BNXT_ULP_FUNC_SRC_REGFILE, + .func_opr1 = BNXT_ULP_RF_IDX_CMM_STAT_HNDL, + .func_src2 = BNXT_ULP_FUNC_SRC_CONST, + .func_opr2 = 8, + .func_dst_opr = BNXT_ULP_RF_IDX_FLOW_CNTR_PTR_0 } + }, + { /* act_tid: 7, , table: shared_mirror_record.rd */ + .description = "shared_mirror_record.rd", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_type = TF_TBL_TYPE_MIRROR_CONFIG, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_SHARED_MIRROR, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 2, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 46, + .cond_nums = 1 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_READ, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_INDEX, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .key_start_idx = 18, + .blob_key_bit_size = 5, + .key_bit_size = 5, + .key_num_fields = 1, + .ident_start_idx = 34, + .ident_nums = 1 + }, + { /* act_tid: 7, , table: control.mirror */ + .description = "control.mirror", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_TX, + .true_message = "Reject due to non-existent handle", + .execute_info = { + .cond_true_goto = 1023, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 47, + .cond_nums = 1 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP + }, + { /* act_tid: 7, , table: control.do_mod */ + .description = "control.do_mod", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 5, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 48, + .cond_nums = 1 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP, + .func_info = { + .func_opc = BNXT_ULP_FUNC_OPC_BIT_AND, + .func_src1 = BNXT_ULP_FUNC_SRC_ACTION_BITMAP, + .func_opr1 = BNXT_ULP_FUNC_SRC_ACTION_BITMAP, + .func_src2 = BNXT_ULP_FUNC_SRC_CONST, + .func_opr2 = BNXT_ULP_ACT_BIT_DEC_TTL | + BNXT_ULP_ACT_BIT_SET_MAC_SRC | + BNXT_ULP_ACT_BIT_SET_MAC_DST | + BNXT_ULP_ACT_BIT_SET_IPV4_SRC | + BNXT_ULP_ACT_BIT_SET_IPV4_DST | + BNXT_ULP_ACT_BIT_SET_IPV6_SRC | + BNXT_ULP_ACT_BIT_SET_IPV6_DST | + BNXT_ULP_ACT_BIT_SET_TP_SRC | + BNXT_ULP_ACT_BIT_SET_TP_DST | + BNXT_ULP_ACT_BIT_VF_TO_VF | + BNXT_ULP_ACT_BIT_GOTO_CHAIN, + .func_dst_opr = BNXT_ULP_RF_IDX_RF_0 } + }, + { /* act_tid: 7, , table: control.vf_to_vf_calc */ + .description = "control.vf_to_vf_calc", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 49, + .cond_nums = 1 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP, + .func_info = { + .func_opc = BNXT_ULP_FUNC_OPC_BIT_OR, + .func_src1 = BNXT_ULP_FUNC_SRC_COMP_FIELD, + .func_opr1 = BNXT_ULP_CF_IDX_VNIC, + .func_src2 = BNXT_ULP_FUNC_SRC_CONST, + .func_opr2 = ULP_THOR2_SYM_VF_2_VF_META_VAL, + .func_dst_opr = BNXT_ULP_RF_IDX_RF_1 } + }, + { /* act_tid: 7, , table: mod_record.ttl_0 */ + .description = "mod_record.ttl_0", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CMM_TABLE, + .resource_type = CFA_RSUBTYPE_CMM_ACT, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_CMM_TABLE_ACT, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 2, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 50, + .cond_nums = 1 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_ALLOC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_CMM_MOD_HNDL, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .result_start_idx = 189, + .result_bit_size = 0, + .result_num_fields = 0, + .encap_num_fields = 36 + }, + { /* act_tid: 7, , table: mod_record.non_ttl_0 */ + .description = "mod_record.non_ttl_0", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CMM_TABLE, + .resource_type = CFA_RSUBTYPE_CMM_ACT, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_CMM_TABLE_ACT, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 61, + .cond_nums = 1 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_ALLOC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_CMM_MOD_HNDL, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .result_start_idx = 225, + .result_bit_size = 0, + .result_num_fields = 0, + .encap_num_fields = 28 + }, + { /* act_tid: 7, , table: control.mod_handle_to_offset */ + .description = "control.mod_handle_to_offset", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 72, + .cond_nums = 0 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP, + .func_info = { + .func_opc = BNXT_ULP_FUNC_OPC_HANDLE_TO_OFFSET, + .func_src1 = BNXT_ULP_FUNC_SRC_REGFILE, + .func_opr1 = BNXT_ULP_RF_IDX_CMM_MOD_HNDL, + .func_src2 = BNXT_ULP_FUNC_SRC_CONST, + .func_opr2 = 8, + .func_dst_opr = BNXT_ULP_RF_IDX_MODIFY_PTR } + }, + { /* act_tid: 7, , table: control.do_tunnel_check */ + .description = "control.do_tunnel_check", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 29, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_OR, + .cond_start_idx = 72, + .cond_nums = 2 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP + }, + { /* act_tid: 7, , table: control.do_tunnel_vlan_exclusion */ + .description = "control.do_tunnel_vlan_exclusion", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_TX, + .true_message = "Tunnel Encap + Push VLAN unsupported.", + .execute_info = { + .cond_true_goto = 1023, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_OR, + .cond_start_idx = 74, + .cond_nums = 1 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP + }, + { /* act_tid: 7, , table: source_property_cache.rd */ + .description = "source_property_cache.rd", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_SOURCE_PROPERTY_CACHE, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 5, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 75, + .cond_nums = 1 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_READ, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_HASH, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .key_start_idx = 19, + .blob_key_bit_size = 85, + .key_bit_size = 85, + .key_num_fields = 3, + .ident_start_idx = 35, + .ident_nums = 1 + }, + { /* act_tid: 7, , table: control.sp_rec_v4 */ + .description = "control.sp_rec_v4", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 9, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 76, + .cond_nums = 1 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_ALLOC_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID + }, + { /* act_tid: 7, , table: sp_smac_ipv4.0 */ + .description = "sp_smac_ipv4.0", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CMM_TABLE, + .resource_type = CFA_RSUBTYPE_CMM_ACT, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_CMM_TABLE_ACT, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 77, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_ALLOC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_CMM_SRP_HNDL, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID, + .result_start_idx = 253, + .result_bit_size = 0, + .result_num_fields = 0, + .encap_num_fields = 3 + }, + { /* act_tid: 7, , table: control.srp_v4_handle_to_offset */ + .description = "control.srp_v4_handle_to_offset", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 77, + .cond_nums = 0 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP, + .func_info = { + .func_opc = BNXT_ULP_FUNC_OPC_HANDLE_TO_OFFSET, + .func_src1 = BNXT_ULP_FUNC_SRC_REGFILE, + .func_opr1 = BNXT_ULP_RF_IDX_CMM_SRP_HNDL, + .func_src2 = BNXT_ULP_FUNC_SRC_CONST, + .func_opr2 = 8, + .func_dst_opr = BNXT_ULP_RF_IDX_MAIN_SP_PTR } + }, + { /* act_tid: 7, , table: source_property_cache.wr */ + .description = "source_property_cache.wr", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_SOURCE_PROPERTY_CACHE, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 6, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 77, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_WRITE, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_HASH, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .key_start_idx = 22, + .blob_key_bit_size = 85, + .key_bit_size = 85, + .key_num_fields = 3, + .result_start_idx = 256, + .result_bit_size = 64, + .result_num_fields = 2 + }, + { /* act_tid: 7, , table: source_property_ipv6_cache.rd */ + .description = "source_property_ipv6_cache.rd", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_SOURCE_PROPERTY_IPV6_CACHE, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 5, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 77, + .cond_nums = 1 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_READ, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_HASH, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .key_start_idx = 25, + .blob_key_bit_size = 181, + .key_bit_size = 181, + .key_num_fields = 3, + .ident_start_idx = 36, + .ident_nums = 1 + }, + { /* act_tid: 7, , table: control.sp_rec_v6 */ + .description = "control.sp_rec_v6", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 4, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 78, + .cond_nums = 1 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_ALLOC_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID + }, + { /* act_tid: 7, , table: sp_smac_ipv6.0 */ + .description = "sp_smac_ipv6.0", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CMM_TABLE, + .resource_type = CFA_RSUBTYPE_CMM_ACT, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_CMM_TABLE_ACT, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 79, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_ALLOC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_CMM_SRP_HNDL, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID, + .result_start_idx = 258, + .result_bit_size = 0, + .result_num_fields = 0, + .encap_num_fields = 3 + }, + { /* act_tid: 7, , table: control.srp_v6_handle_to_offset */ + .description = "control.srp_v6_handle_to_offset", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 79, + .cond_nums = 0 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP, + .func_info = { + .func_opc = BNXT_ULP_FUNC_OPC_HANDLE_TO_OFFSET, + .func_src1 = BNXT_ULP_FUNC_SRC_REGFILE, + .func_opr1 = BNXT_ULP_RF_IDX_CMM_SRP_HNDL, + .func_src2 = BNXT_ULP_FUNC_SRC_CONST, + .func_opr2 = 8, + .func_dst_opr = BNXT_ULP_RF_IDX_MAIN_SP_PTR } + }, + { /* act_tid: 7, , table: source_property_ipv6_cache.wr */ + .description = "source_property_ipv6_cache.wr", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_SOURCE_PROPERTY_IPV6_CACHE, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 79, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_WRITE, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_HASH, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .key_start_idx = 28, + .blob_key_bit_size = 181, + .key_bit_size = 181, + .key_num_fields = 3, + .result_start_idx = 261, + .result_bit_size = 64, + .result_num_fields = 2 + }, + { /* act_tid: 7, , table: control.do_vxlan_check */ + .description = "control.do_vxlan_check", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 11, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_OR, + .cond_start_idx = 79, + .cond_nums = 1 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP + }, + { /* act_tid: 7, , table: vxlan_encap_rec_cache.rd */ + .description = "vxlan_encap_rec_cache.rd", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_VXLAN_ENCAP_REC_CACHE, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 5, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 80, + .cond_nums = 1 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_READ, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_HASH, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .key_start_idx = 31, + .blob_key_bit_size = 141, + .key_bit_size = 141, + .key_num_fields = 6, + .ident_start_idx = 37, + .ident_nums = 1 + }, + { /* act_tid: 7, , table: control.vxlan_v4_encap */ + .description = "control.vxlan_v4_encap", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 17, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 81, + .cond_nums = 1 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_ALLOC_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID + }, + { /* act_tid: 7, , table: ext_tun_vxlan_encap_record.ipv4_vxlan */ + .description = "ext_tun_vxlan_encap_record.ipv4_vxlan", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CMM_TABLE, + .resource_type = CFA_RSUBTYPE_CMM_ACT, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_CMM_TABLE_ACT, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 82, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_ALLOC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_CMM_ENC_HNDL, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID, + .result_start_idx = 263, + .result_bit_size = 0, + .result_num_fields = 0, + .encap_num_fields = 25 + }, + { /* act_tid: 7, , table: control.enc_handle_to_offset */ + .description = "control.enc_handle_to_offset", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 82, + .cond_nums = 0 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP, + .func_info = { + .func_opc = BNXT_ULP_FUNC_OPC_HANDLE_TO_OFFSET, + .func_src1 = BNXT_ULP_FUNC_SRC_REGFILE, + .func_opr1 = BNXT_ULP_RF_IDX_CMM_ENC_HNDL, + .func_src2 = BNXT_ULP_FUNC_SRC_CONST, + .func_opr2 = 8, + .func_dst_opr = BNXT_ULP_RF_IDX_ENCAP_PTR_0 } + }, + { /* act_tid: 7, , table: vxlan_encap_rec_cache.wr */ + .description = "vxlan_encap_rec_cache.wr", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_VXLAN_ENCAP_REC_CACHE, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 14, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 82, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_WRITE, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_HASH, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .key_start_idx = 37, + .blob_key_bit_size = 141, + .key_bit_size = 141, + .key_num_fields = 6, + .result_start_idx = 288, + .result_bit_size = 64, + .result_num_fields = 2 + }, + { /* act_tid: 7, , table: vxlan_encap_ipv6_rec_cache.rd */ + .description = "vxlan_encap_ipv6_rec_cache.rd", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_VXLAN_ENCAP_IPV6_REC_CACHE, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1023, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 82, + .cond_nums = 1 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_READ, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_HASH, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .key_start_idx = 43, + .blob_key_bit_size = 237, + .key_bit_size = 237, + .key_num_fields = 6, + .ident_start_idx = 38, + .ident_nums = 1 + }, + { /* act_tid: 7, , table: control.vxlan_v6_encap */ + .description = "control.vxlan_v6_encap", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 12, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 83, + .cond_nums = 1 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_ALLOC_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID + }, + { /* act_tid: 7, , table: ext_tun_vxlan_encap_record.ipv6_vxlan */ + .description = "ext_tun_vxlan_encap_record.ipv6_vxlan", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CMM_TABLE, + .resource_type = CFA_RSUBTYPE_CMM_ACT, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_CMM_TABLE_ACT, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 84, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_ALLOC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_CMM_ENC_HNDL, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID, + .result_start_idx = 290, + .result_bit_size = 0, + .result_num_fields = 0, + .encap_num_fields = 23 + }, + { /* act_tid: 7, , table: control.v6_vxlan_enc_handle_to_offset */ + .description = "control.v6_vxlan_enc_handle_to_offset", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 84, + .cond_nums = 0 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP, + .func_info = { + .func_opc = BNXT_ULP_FUNC_OPC_HANDLE_TO_OFFSET, + .func_src1 = BNXT_ULP_FUNC_SRC_REGFILE, + .func_opr1 = BNXT_ULP_RF_IDX_CMM_ENC_HNDL, + .func_src2 = BNXT_ULP_FUNC_SRC_CONST, + .func_opr2 = 8, + .func_dst_opr = BNXT_ULP_RF_IDX_ENCAP_PTR_0 } + }, + { /* act_tid: 7, , table: vxlan_encap_ipv6_rec_cache.wr */ + .description = "vxlan_encap_ipv6_rec_cache.wr", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_VXLAN_ENCAP_IPV6_REC_CACHE, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 9, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 84, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_WRITE, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_HASH, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .key_start_idx = 49, + .blob_key_bit_size = 237, + .key_bit_size = 237, + .key_num_fields = 6, + .result_start_idx = 313, + .result_bit_size = 64, + .result_num_fields = 2 + }, + { /* act_tid: 7, , table: geneve_encap_rec_cache.rd */ + .description = "geneve_encap_rec_cache.rd", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_GENEVE_ENCAP_REC_CACHE, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1023, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 84, + .cond_nums = 1 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_READ, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_HASH, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .key_start_idx = 55, + .blob_key_bit_size = 493, + .key_bit_size = 493, + .key_num_fields = 15, + .ident_start_idx = 39, + .ident_nums = 1 + }, + { /* act_tid: 7, , table: control.geneve_encap */ + .description = "control.geneve_encap", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 7, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 85, + .cond_nums = 1 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_ALLOC_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID + }, + { /* act_tid: 7, , table: ext_tun_geneve_encap_record.ipv4_vxlan */ + .description = "ext_tun_geneve_encap_record.ipv4_vxlan", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CMM_TABLE, + .resource_type = CFA_RSUBTYPE_CMM_ACT, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_CMM_TABLE_ACT, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 2, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 86, + .cond_nums = 1 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_ALLOC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_CMM_ENC_HNDL, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID, + .result_start_idx = 315, + .result_bit_size = 0, + .result_num_fields = 0, + .encap_num_fields = 31 + }, + { /* act_tid: 7, , table: ext_tun_geneve_encap_record.ipv6_geneve */ + .description = "ext_tun_geneve_encap_record.ipv6_geneve", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CMM_TABLE, + .resource_type = CFA_RSUBTYPE_CMM_ACT, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_CMM_TABLE_ACT, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 87, + .cond_nums = 1 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_ALLOC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_CMM_ENC_HNDL, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID, + .result_start_idx = 346, + .result_bit_size = 0, + .result_num_fields = 0, + .encap_num_fields = 29 + }, + { /* act_tid: 7, , table: control.geneve_enc_handle_to_offset */ + .description = "control.geneve_enc_handle_to_offset", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 88, + .cond_nums = 0 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP, + .func_info = { + .func_opc = BNXT_ULP_FUNC_OPC_HANDLE_TO_OFFSET, + .func_src1 = BNXT_ULP_FUNC_SRC_REGFILE, + .func_opr1 = BNXT_ULP_RF_IDX_CMM_ENC_HNDL, + .func_src2 = BNXT_ULP_FUNC_SRC_CONST, + .func_opr2 = 8, + .func_dst_opr = BNXT_ULP_RF_IDX_ENCAP_PTR_0 } + }, + { /* act_tid: 7, , table: geneve_encap_rec_cache.wr */ + .description = "geneve_encap_rec_cache.wr", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_GENEVE_ENCAP_REC_CACHE, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 3, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 88, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_WRITE, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_HASH, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .key_start_idx = 70, + .blob_key_bit_size = 493, + .key_bit_size = 493, + .key_num_fields = 15, + .result_start_idx = 375, + .result_bit_size = 64, + .result_num_fields = 2 + }, + { /* act_tid: 7, , table: ext_vtag_encap_record.0 */ + .description = "ext_vtag_encap_record.0", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CMM_TABLE, + .resource_type = CFA_RSUBTYPE_CMM_ACT, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_CMM_TABLE_ACT, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 88, + .cond_nums = 1 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_ALLOC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_CMM_ENC_HNDL, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .result_start_idx = 377, + .result_bit_size = 0, + .result_num_fields = 0, + .encap_num_fields = 11 + }, + { /* act_tid: 7, , table: control.vtag_enc_handle_to_offset */ + .description = "control.vtag_enc_handle_to_offset", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 89, + .cond_nums = 0 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP, + .func_info = { + .func_opc = BNXT_ULP_FUNC_OPC_HANDLE_TO_OFFSET, + .func_src1 = BNXT_ULP_FUNC_SRC_REGFILE, + .func_opr1 = BNXT_ULP_RF_IDX_CMM_ENC_HNDL, + .func_src2 = BNXT_ULP_FUNC_SRC_CONST, + .func_opr2 = 8, + .func_dst_opr = BNXT_ULP_RF_IDX_ENCAP_PTR_0 } + }, + { /* act_tid: 7, , table: cmm_full_act_record.0 */ + .description = "cmm_full_act_record.0", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CMM_TABLE, + .resource_type = CFA_RSUBTYPE_CMM_ACT, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_CMM_TABLE_ACT, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 89, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_ALLOC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_CMM_ACT_HNDL, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .mark_db_opcode = BNXT_ULP_MARK_DB_OPC_NOP, + .result_start_idx = 388, + .result_bit_size = 192, + .result_num_fields = 18 + }, + { /* act_tid: 7, , table: control.act_handle_to_offset */ + .description = "control.act_handle_to_offset", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 0, + .cond_false_goto = 0, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 91, + .cond_nums = 0 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP, + .func_info = { + .func_opc = BNXT_ULP_FUNC_OPC_HANDLE_TO_OFFSET, + .func_src1 = BNXT_ULP_FUNC_SRC_REGFILE, + .func_opr1 = BNXT_ULP_RF_IDX_CMM_ACT_HNDL, + .func_src2 = BNXT_ULP_FUNC_SRC_CONST, + .func_opr2 = 32, + .func_dst_opr = BNXT_ULP_RF_IDX_MAIN_ACTION_PTR } + }, + { /* act_tid: 8, , table: control.reject */ + .description = "control.reject", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_TX, + .true_message = "Thor 2 not supporting non-generic template", + .execute_info = { + .cond_true_goto = 1023, + .cond_false_goto = 0, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 91, + .cond_nums = 0 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP + }, + { /* act_tid: 9, , table: control.reject */ + .description = "control.reject", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_TX, + .true_message = "Thor 2 not supporting non-generic template", + .execute_info = { + .cond_true_goto = 1023, + .cond_false_goto = 0, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 91, + .cond_nums = 0 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP + }, + { /* act_tid: 10, , table: control.reject */ + .description = "control.reject", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_TX, + .true_message = "Thor 2 not supporting non-generic template", + .execute_info = { + .cond_true_goto = 1023, + .cond_false_goto = 0, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 91, + .cond_nums = 0 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP + }, + { /* act_tid: 11, , table: control.reject */ + .description = "control.reject", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_TX, + .true_message = "Thor 2 not supporting non-generic template", + .execute_info = { + .cond_true_goto = 1023, + .cond_false_goto = 0, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 91, + .cond_nums = 0 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP + }, + { /* act_tid: 12, , table: mod_record.meta */ + .description = "mod_record.meta", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CMM_TABLE, + .resource_type = CFA_RSUBTYPE_CMM_ACT, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_CMM_TABLE_ACT, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 95, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_ALLOC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_CMM_MOD_HNDL, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .func_info = { + .func_opc = BNXT_ULP_FUNC_OPC_BIT_OR, + .func_src1 = BNXT_ULP_FUNC_SRC_COMP_FIELD, + .func_opr1 = BNXT_ULP_CF_IDX_VNIC, + .func_src2 = BNXT_ULP_FUNC_SRC_CONST, + .func_opr2 = ULP_THOR2_SYM_VF_2_VF_META_VAL, + .func_dst_opr = BNXT_ULP_RF_IDX_RF_0 }, + .result_start_idx = 406, + .result_bit_size = 0, + .result_num_fields = 0, + .encap_num_fields = 20 + }, + { /* act_tid: 12, , table: control.mod_handle_to_offset */ + .description = "control.mod_handle_to_offset", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 95, + .cond_nums = 0 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP, + .func_info = { + .func_opc = BNXT_ULP_FUNC_OPC_HANDLE_TO_OFFSET, + .func_src1 = BNXT_ULP_FUNC_SRC_REGFILE, + .func_opr1 = BNXT_ULP_RF_IDX_CMM_MOD_HNDL, + .func_src2 = BNXT_ULP_FUNC_SRC_CONST, + .func_opr2 = 8, + .func_dst_opr = BNXT_ULP_RF_IDX_MODIFY_PTR } + }, + { /* act_tid: 12, , table: cmm_stat_record.0 */ + .description = "cmm_stat_record.0", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CMM_STAT, + .resource_type = CFA_RSUBTYPE_CMM_ACT, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_CMM_TABLE_ACT, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 2, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 95, + .cond_nums = 1 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_ALLOC_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_CMM_STAT_HNDL, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .mark_db_opcode = BNXT_ULP_MARK_DB_OPC_NOP, + .result_start_idx = 426, + .result_bit_size = 128, + .result_num_fields = 2 + }, + { /* act_tid: 12, , table: control.stat_handle_to_offset */ + .description = "control.stat_handle_to_offset", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 96, + .cond_nums = 0 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP, + .func_info = { + .func_opc = BNXT_ULP_FUNC_OPC_HANDLE_TO_OFFSET, + .func_src1 = BNXT_ULP_FUNC_SRC_REGFILE, + .func_opr1 = BNXT_ULP_RF_IDX_CMM_STAT_HNDL, + .func_src2 = BNXT_ULP_FUNC_SRC_CONST, + .func_opr2 = 8, + .func_dst_opr = BNXT_ULP_RF_IDX_FLOW_CNTR_PTR_0 } + }, + { /* act_tid: 12, , table: cmm_full_act_record.0 */ + .description = "cmm_full_act_record.0", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CMM_TABLE, + .resource_type = CFA_RSUBTYPE_CMM_ACT, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_CMM_TABLE_ACT, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 96, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_ALLOC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_CMM_ACT_HNDL, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .mark_db_opcode = BNXT_ULP_MARK_DB_OPC_NOP, + .result_start_idx = 428, + .result_bit_size = 192, + .result_num_fields = 18 + }, + { /* act_tid: 12, , table: control.act_handle_to_offset */ + .description = "control.act_handle_to_offset", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 0, + .cond_false_goto = 0, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 96, + .cond_nums = 0 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP, + .func_info = { + .func_opc = BNXT_ULP_FUNC_OPC_HANDLE_TO_OFFSET, + .func_src1 = BNXT_ULP_FUNC_SRC_REGFILE, + .func_opr1 = BNXT_ULP_RF_IDX_CMM_ACT_HNDL, + .func_src2 = BNXT_ULP_FUNC_SRC_CONST, + .func_opr2 = 32, + .func_dst_opr = BNXT_ULP_RF_IDX_MAIN_ACTION_PTR } + } +}; + +struct bnxt_ulp_mapper_cond_list_info ulp_thor2_act_cond_oper_list[] = { +}; + +struct bnxt_ulp_mapper_cond_info ulp_thor2_act_cond_list[] = { + /* cond_execute: act_tid: 1, flow_chain_cache.rd:0*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_GOTO_CHAIN + }, + /* cond_execute: act_tid: 1, control.flow_chain:1*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_RF_IS_SET, + .cond_operand = BNXT_ULP_RF_IDX_GENERIC_TBL_MISS + }, + /* cond_execute: act_tid: 1, shared_meter_tbl_cache.rd:2*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_METER + }, + /* cond_execute: act_tid: 1, control.meter_chk:3*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_RF_IS_SET, + .cond_operand = BNXT_ULP_RF_IDX_GENERIC_TBL_MISS + }, + /* cond_execute: act_tid: 1, shared_mirror_record.rd:4*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_SHARED_SAMPLE + }, + /* cond_execute: act_tid: 1, control.mirror:5*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_RF_IS_SET, + .cond_operand = BNXT_ULP_RF_IDX_GENERIC_TBL_MISS + }, + /* cond_execute: act_tid: 1, control.do_mod:6*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_RF_IS_SET, + .cond_operand = BNXT_ULP_RF_IDX_RF_0 + }, + /* cond_execute: act_tid: 1, mod_record.ttl_0:7*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_DEC_TTL + }, + /* cond_execute: act_tid: 1, mod_record.non_ttl_0:8*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_NOT_SET, + .cond_operand = BNXT_ULP_ACT_BIT_DEC_TTL + }, + /* cond_execute: act_tid: 1, tunnel_cache.f1_f2_act_rd:9*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_F1 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_F2 + }, + /* cond_execute: act_tid: 1, control.tunnel_cache_check_act:11*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_RF_IS_SET, + .cond_operand = BNXT_ULP_RF_IDX_GENERIC_TBL_MISS + }, + /* cond_execute: act_tid: 1, cmm_stat_record.0:12*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_COUNT + }, + /* cond_execute: act_tid: 1, control.queue_and_rss_test:13*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_QUEUE + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_RSS + }, + /* cond_execute: act_tid: 1, vnic_interface_rss_config.0:15*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_RSS + }, + /* cond_execute: act_tid: 1, vnic_interface_queue_config.0:16*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_QUEUE + }, + /* field_cond: act_tid: 1, cmm_full_act_record.0:17*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_DROP + }, + /* field_cond: act_tid: 1, cmm_full_act_record.0:18*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_RSS + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_QUEUE + }, + /* field_cond: act_tid: 1, cmm_full_act_record.0:20*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_GOTO_CHAIN + }, + /* field_cond: act_tid: 1, cmm_full_act_record.0:21*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_VXLAN_DECAP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_GENEVE_DECAP + }, + /* cond_execute: act_tid: 3, control.delete_chk:23*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_DELETE + }, + /* cond_execute: act_tid: 3, control.mirror_del_exist_chk:24*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_RF_IS_SET, + .cond_operand = BNXT_ULP_RF_IDX_GENERIC_TBL_MISS + }, + /* cond_execute: act_tid: 3, control.mirror_ref_cnt_chk:25*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_RF_IS_SET, + .cond_operand = BNXT_ULP_RF_IDX_CC + }, + /* cond_execute: act_tid: 3, cmm_stat_record.0:26*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_COUNT + }, + /* cond_execute: act_tid: 6, control.create_check:27*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_NOT_SET, + .cond_operand = BNXT_ULP_ACT_BIT_UPDATE + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_NOT_SET, + .cond_operand = BNXT_ULP_ACT_BIT_DELETE + }, + /* cond_execute: act_tid: 6, meter_profile_tbl_cache.rd:29*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_METER_PROFILE + }, + /* cond_execute: act_tid: 6, control.shared_meter_profile_0:30*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_RF_IS_SET, + .cond_operand = BNXT_ULP_RF_IDX_GENERIC_TBL_MISS + }, + /* cond_execute: act_tid: 6, shared_meter_tbl_cache.rd:31*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_SHARED_METER + }, + /* cond_execute: act_tid: 6, control.meter_created_chk:32*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_RF_IS_SET, + .cond_operand = BNXT_ULP_RF_IDX_GENERIC_TBL_MISS + }, + /* cond_execute: act_tid: 6, control.shared_meter_profile_chk:33*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_RF_IS_SET, + .cond_operand = BNXT_ULP_RF_IDX_GENERIC_TBL_MISS + }, + /* cond_execute: act_tid: 6, control.delete_check:34*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_DELETE + }, + /* cond_execute: act_tid: 6, meter_profile_tbl_cache.del_chk:35*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_METER_PROFILE + }, + /* cond_execute: act_tid: 6, control.mtr_prof_ref_cnt_chk:36*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_RF_IS_SET, + .cond_operand = BNXT_ULP_RF_IDX_CC + }, + /* cond_execute: act_tid: 6, shared_meter_tbl_cache.del_chk:37*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_SHARED_METER + }, + /* cond_execute: act_tid: 6, control.shared_mtr_ref_cnt_chk:38*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_RF_IS_SET, + .cond_operand = BNXT_ULP_RF_IDX_CC + }, + /* cond_execute: act_tid: 6, shared_meter_tbl_cache.rd_update:39*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_SHARED_METER + }, + /* cond_execute: act_tid: 6, meter_tbl.update_rd:40*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_RF_NOT_SET, + .cond_operand = BNXT_ULP_RF_IDX_GENERIC_TBL_MISS + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_PROP_NOT_SET, + .cond_operand = BNXT_ULP_ACT_PROP_IDX_METER_PROF_ID_UPDATE + }, + /* cond_reject: thor2, act_tid: 7 */ + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_SHARED_SAMPLE + }, + /* cond_execute: act_tid: 7, flow_chain_cache.rd:43*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_GOTO_CHAIN + }, + /* cond_execute: act_tid: 7, control.flow_chain:44*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_RF_IS_SET, + .cond_operand = BNXT_ULP_RF_IDX_GENERIC_TBL_MISS + }, + /* cond_execute: act_tid: 7, cmm_stat_record.0:45*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_COUNT + }, + /* cond_execute: act_tid: 7, shared_mirror_record.rd:46*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_SHARED_SAMPLE + }, + /* cond_execute: act_tid: 7, control.mirror:47*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_RF_IS_SET, + .cond_operand = BNXT_ULP_RF_IDX_GENERIC_TBL_MISS + }, + /* cond_execute: act_tid: 7, control.do_mod:48*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_RF_IS_SET, + .cond_operand = BNXT_ULP_RF_IDX_RF_0 + }, + /* cond_execute: act_tid: 7, control.vf_to_vf_calc:49*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_VF_TO_VF + }, + /* cond_execute: act_tid: 7, mod_record.ttl_0:50*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_DEC_TTL + }, + /* field_cond: act_tid: 7, mod_record.ttl_0:51*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_VF_TO_VF + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_GOTO_CHAIN + }, + /* field_cond: act_tid: 7, mod_record.ttl_0:53*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_VF_TO_VF + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_GOTO_CHAIN + }, + /* field_cond: act_tid: 7, mod_record.ttl_0:55*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_VF_TO_VF + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_GOTO_CHAIN + }, + /* field_cond: act_tid: 7, mod_record.ttl_0:57*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_VF_TO_VF + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_GOTO_CHAIN + }, + /* field_cond: act_tid: 7, mod_record.ttl_0:59*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_VF_TO_VF + }, + /* field_cond: act_tid: 7, mod_record.ttl_0:60*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_GOTO_CHAIN + }, + /* cond_execute: act_tid: 7, mod_record.non_ttl_0:61*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_NOT_SET, + .cond_operand = BNXT_ULP_ACT_BIT_DEC_TTL + }, + /* field_cond: act_tid: 7, mod_record.non_ttl_0:62*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_VF_TO_VF + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_GOTO_CHAIN + }, + /* field_cond: act_tid: 7, mod_record.non_ttl_0:64*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_VF_TO_VF + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_GOTO_CHAIN + }, + /* field_cond: act_tid: 7, mod_record.non_ttl_0:66*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_VF_TO_VF + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_GOTO_CHAIN + }, + /* field_cond: act_tid: 7, mod_record.non_ttl_0:68*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_VF_TO_VF + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_GOTO_CHAIN + }, + /* field_cond: act_tid: 7, mod_record.non_ttl_0:70*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_VF_TO_VF + }, + /* field_cond: act_tid: 7, mod_record.non_ttl_0:71*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_GOTO_CHAIN + }, + /* cond_execute: act_tid: 7, control.do_tunnel_check:72*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_VXLAN_ENCAP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_GENEVE_ENCAP + }, + /* cond_execute: act_tid: 7, control.do_tunnel_vlan_exclusion:74*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_PUSH_VLAN + }, + /* cond_execute: act_tid: 7, source_property_cache.rd:75*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_IS_SET, + .cond_operand = BNXT_ULP_CF_IDX_ACT_ENCAP_IPV4_FLAG + }, + /* cond_execute: act_tid: 7, control.sp_rec_v4:76*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_RF_IS_SET, + .cond_operand = BNXT_ULP_RF_IDX_GENERIC_TBL_MISS + }, + /* cond_execute: act_tid: 7, source_property_ipv6_cache.rd:77*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_IS_SET, + .cond_operand = BNXT_ULP_CF_IDX_ACT_ENCAP_IPV6_FLAG + }, + /* cond_execute: act_tid: 7, control.sp_rec_v6:78*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_RF_IS_SET, + .cond_operand = BNXT_ULP_RF_IDX_GENERIC_TBL_MISS + }, + /* cond_execute: act_tid: 7, control.do_vxlan_check:79*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_VXLAN_ENCAP + }, + /* cond_execute: act_tid: 7, vxlan_encap_rec_cache.rd:80*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_ENC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + /* cond_execute: act_tid: 7, control.vxlan_v4_encap:81*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_RF_IS_SET, + .cond_operand = BNXT_ULP_RF_IDX_GENERIC_TBL_MISS + }, + /* cond_execute: act_tid: 7, vxlan_encap_ipv6_rec_cache.rd:82*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_ENC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + /* cond_execute: act_tid: 7, control.vxlan_v6_encap:83*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_RF_IS_SET, + .cond_operand = BNXT_ULP_RF_IDX_GENERIC_TBL_MISS + }, + /* cond_execute: act_tid: 7, geneve_encap_rec_cache.rd:84*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_ENC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_T_GENEVE + }, + /* cond_execute: act_tid: 7, control.geneve_encap:85*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_RF_IS_SET, + .cond_operand = BNXT_ULP_RF_IDX_GENERIC_TBL_MISS + }, + /* cond_execute: act_tid: 7, ext_tun_geneve_encap_record.ipv4_vxlan:86*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_ENC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + /* cond_execute: act_tid: 7, ext_tun_geneve_encap_record.ipv6_geneve:87*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_ENC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + /* cond_execute: act_tid: 7, ext_vtag_encap_record.0:88*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_PUSH_VLAN + }, + /* field_cond: act_tid: 7, cmm_full_act_record.0:89*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_GOTO_CHAIN + }, + /* field_cond: act_tid: 7, cmm_full_act_record.0:90*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_VF_TO_VF + }, + /* cond_reject: thor2, act_tid: 12 */ + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_SHARED + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_SAMPLE + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_DELETE + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_SHARED_SAMPLE + }, + /* cond_execute: act_tid: 12, cmm_stat_record.0:95*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_COUNT + } +}; + +struct bnxt_ulp_mapper_key_info ulp_thor2_act_key_info_list[] = { + /* act_tid: 1, , table: flow_chain_cache.rd */ + { + .field_info_mask = { + .description = "group_id", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff, + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "group_id", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr1 = { + (BNXT_ULP_ACT_PROP_IDX_GOTO_CHAIN >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_GOTO_CHAIN & 0xff} + } + }, + /* act_tid: 1, , table: flow_chain_cache.write */ + { + .field_info_mask = { + .description = "group_id", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff, + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "group_id", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr1 = { + (BNXT_ULP_ACT_PROP_IDX_GOTO_CHAIN >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_GOTO_CHAIN & 0xff} + } + }, + /* act_tid: 1, , table: shared_meter_tbl_cache.rd */ + { + .field_info_mask = { + .description = "sw_meter_id", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff, + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "sw_meter_id", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr1 = { + (BNXT_ULP_ACT_PROP_IDX_METER >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_METER & 0xff} + } + }, + /* act_tid: 1, , table: shared_mirror_record.rd */ + { + .field_info_mask = { + .description = "shared_index", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "shared_index", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr1 = { + (BNXT_ULP_ACT_PROP_IDX_SHARED_HANDLE >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_SHARED_HANDLE & 0xff} + } + }, + /* act_tid: 1, , table: tunnel_cache.f1_f2_act_rd */ + { + .field_info_mask = { + .description = "svif", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_HF, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_SVIF_INDEX >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_SVIF_INDEX & 0xff} + }, + .field_info_spec = { + .description = "svif", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_HF, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_SVIF_INDEX >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_SVIF_INDEX & 0xff} + } + }, + { + .field_info_mask = { + .description = "tunnel_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "tunnel_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_TUNNEL_ID >> 8) & 0xff, + BNXT_ULP_CF_IDX_TUNNEL_ID & 0xff} + } + }, + /* act_tid: 3, , table: shared_mirror_record.del_chk */ + { + .field_info_mask = { + .description = "shared_index", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "shared_index", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr1 = { + (BNXT_ULP_ACT_PROP_IDX_SHARED_HANDLE >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_SHARED_HANDLE & 0xff} + } + }, + /* act_tid: 3, , table: shared_mirror_record.wr */ + { + .field_info_mask = { + .description = "shared_index", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "shared_index", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_MIRROR_PTR_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_MIRROR_PTR_0 & 0xff} + } + }, + /* act_tid: 6, , table: meter_profile_tbl_cache.rd */ + { + .field_info_mask = { + .description = "sw_meter_profile_id", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff, + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "sw_meter_profile_id", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr1 = { + (BNXT_ULP_ACT_PROP_IDX_METER_PROF_ID >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_METER_PROF_ID & 0xff} + } + }, + /* act_tid: 6, , table: meter_profile_tbl_cache.wr */ + { + .field_info_mask = { + .description = "sw_meter_profile_id", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff, + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "sw_meter_profile_id", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr1 = { + (BNXT_ULP_ACT_PROP_IDX_METER_PROF_ID >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_METER_PROF_ID & 0xff} + } + }, + /* act_tid: 6, , table: shared_meter_tbl_cache.rd */ + { + .field_info_mask = { + .description = "sw_meter_id", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff, + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "sw_meter_id", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr1 = { + (BNXT_ULP_ACT_PROP_IDX_METER_INST_ID >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_METER_INST_ID & 0xff} + } + }, + /* act_tid: 6, , table: meter_profile_tbl_cache.rd2 */ + { + .field_info_mask = { + .description = "sw_meter_profile_id", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff, + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "sw_meter_profile_id", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr1 = { + (BNXT_ULP_ACT_PROP_IDX_METER_PROF_ID >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_METER_PROF_ID & 0xff} + } + }, + /* act_tid: 6, , table: shared_meter_tbl_cache.wr */ + { + .field_info_mask = { + .description = "sw_meter_id", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff, + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "sw_meter_id", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr1 = { + (BNXT_ULP_ACT_PROP_IDX_METER_INST_ID >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_METER_INST_ID & 0xff} + } + }, + /* act_tid: 6, , table: meter_profile_tbl_cache.del_chk */ + { + .field_info_mask = { + .description = "sw_meter_profile_id", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff, + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "sw_meter_profile_id", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr1 = { + (BNXT_ULP_ACT_PROP_IDX_METER_PROF_ID >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_METER_PROF_ID & 0xff} + } + }, + /* act_tid: 6, , table: shared_meter_tbl_cache.del_chk */ + { + .field_info_mask = { + .description = "sw_meter_id", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff, + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "sw_meter_id", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr1 = { + (BNXT_ULP_ACT_PROP_IDX_METER_INST_ID >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_METER_INST_ID & 0xff} + } + }, + /* act_tid: 6, , table: shared_meter_tbl_cache.rd_update */ + { + .field_info_mask = { + .description = "sw_meter_id", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff, + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "sw_meter_id", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr1 = { + (BNXT_ULP_ACT_PROP_IDX_METER_INST_ID >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_METER_INST_ID & 0xff} + } + }, + /* act_tid: 7, , table: flow_chain_cache.rd */ + { + .field_info_mask = { + .description = "group_id", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff, + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "group_id", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr1 = { + (BNXT_ULP_ACT_PROP_IDX_GOTO_CHAIN >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_GOTO_CHAIN & 0xff} + } + }, + /* act_tid: 7, , table: flow_chain_cache.write */ + { + .field_info_mask = { + .description = "group_id", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff, + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "group_id", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr1 = { + (BNXT_ULP_ACT_PROP_IDX_GOTO_CHAIN >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_GOTO_CHAIN & 0xff} + } + }, + /* act_tid: 7, , table: shared_mirror_record.rd */ + { + .field_info_mask = { + .description = "shared_index", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "shared_index", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr1 = { + (BNXT_ULP_ACT_PROP_IDX_SHARED_HANDLE >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_SHARED_HANDLE & 0xff} + } + }, + /* act_tid: 7, , table: source_property_cache.rd */ + { + .field_info_mask = { + .description = "smac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "smac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_ETH_SMAC >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_ETH_SMAC & 0xff} + } + }, + { + .field_info_mask = { + .description = "ipv4_src_addr", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff, + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "ipv4_src_addr", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_IPV4_SADDR >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_IPV4_SADDR & 0xff} + } + }, + { + .field_info_mask = { + .description = "tbl_scope", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "tbl_scope", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_PORT_TABLE, + .field_opr1 = { + (BNXT_ULP_CF_IDX_DEV_PORT_ID >> 8) & 0xff, + BNXT_ULP_CF_IDX_DEV_PORT_ID & 0xff, + (BNXT_ULP_PORT_TABLE_TABLE_SCOPE >> 8) & 0xff, + BNXT_ULP_PORT_TABLE_TABLE_SCOPE & 0xff} + } + }, + /* act_tid: 7, , table: source_property_cache.wr */ + { + .field_info_mask = { + .description = "smac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "smac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_ETH_SMAC >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_ETH_SMAC & 0xff} + } + }, + { + .field_info_mask = { + .description = "ipv4_src_addr", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff, + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "ipv4_src_addr", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_IPV4_SADDR >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_IPV4_SADDR & 0xff} + } + }, + { + .field_info_mask = { + .description = "tbl_scope", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "tbl_scope", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_PORT_TABLE, + .field_opr1 = { + (BNXT_ULP_CF_IDX_DEV_PORT_ID >> 8) & 0xff, + BNXT_ULP_CF_IDX_DEV_PORT_ID & 0xff, + (BNXT_ULP_PORT_TABLE_TABLE_SCOPE >> 8) & 0xff, + BNXT_ULP_PORT_TABLE_TABLE_SCOPE & 0xff} + } + }, + /* act_tid: 7, , table: source_property_ipv6_cache.rd */ + { + .field_info_mask = { + .description = "smac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "smac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_ETH_SMAC >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_ETH_SMAC & 0xff} + } + }, + { + .field_info_mask = { + .description = "ipv6_src_addr", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "ipv6_src_addr", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_IPV6_SADDR >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_IPV6_SADDR & 0xff} + } + }, + { + .field_info_mask = { + .description = "tbl_scope", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "tbl_scope", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_PORT_TABLE, + .field_opr1 = { + (BNXT_ULP_CF_IDX_DEV_PORT_ID >> 8) & 0xff, + BNXT_ULP_CF_IDX_DEV_PORT_ID & 0xff, + (BNXT_ULP_PORT_TABLE_TABLE_SCOPE >> 8) & 0xff, + BNXT_ULP_PORT_TABLE_TABLE_SCOPE & 0xff} + } + }, + /* act_tid: 7, , table: source_property_ipv6_cache.wr */ + { + .field_info_mask = { + .description = "smac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "smac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_ETH_SMAC >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_ETH_SMAC & 0xff} + } + }, + { + .field_info_mask = { + .description = "ipv6_src_addr", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "ipv6_src_addr", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_IPV6_SADDR >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_IPV6_SADDR & 0xff} + } + }, + { + .field_info_mask = { + .description = "tbl_scope", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "tbl_scope", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_PORT_TABLE, + .field_opr1 = { + (BNXT_ULP_CF_IDX_DEV_PORT_ID >> 8) & 0xff, + BNXT_ULP_CF_IDX_DEV_PORT_ID & 0xff, + (BNXT_ULP_PORT_TABLE_TABLE_SCOPE >> 8) & 0xff, + BNXT_ULP_PORT_TABLE_TABLE_SCOPE & 0xff} + } + }, + /* act_tid: 7, , table: vxlan_encap_rec_cache.rd */ + { + .field_info_mask = { + .description = "dmac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "dmac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_ETH_DMAC >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_ETH_DMAC & 0xff} + } + }, + { + .field_info_mask = { + .description = "ipv4_dst_addr", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff, + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "ipv4_dst_addr", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_IPV4_DADDR >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_IPV4_DADDR & 0xff} + } + }, + { + .field_info_mask = { + .description = "udp_sport", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "udp_sport", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_UDP_SPORT >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_UDP_SPORT & 0xff} + } + }, + { + .field_info_mask = { + .description = "udp_dport", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "udp_dport", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_UDP_DPORT >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_UDP_DPORT & 0xff} + } + }, + { + .field_info_mask = { + .description = "vni", + .field_bit_size = 24, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "vni", + .field_bit_size = 24, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_VXLAN_VNI >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_VXLAN_VNI & 0xff} + } + }, + { + .field_info_mask = { + .description = "tbl_scope", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "tbl_scope", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_PORT_TABLE, + .field_opr1 = { + (BNXT_ULP_CF_IDX_DEV_PORT_ID >> 8) & 0xff, + BNXT_ULP_CF_IDX_DEV_PORT_ID & 0xff, + (BNXT_ULP_PORT_TABLE_TABLE_SCOPE >> 8) & 0xff, + BNXT_ULP_PORT_TABLE_TABLE_SCOPE & 0xff} + } + }, + /* act_tid: 7, , table: vxlan_encap_rec_cache.wr */ + { + .field_info_mask = { + .description = "dmac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "dmac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_ETH_DMAC >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_ETH_DMAC & 0xff} + } + }, + { + .field_info_mask = { + .description = "ipv4_dst_addr", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff, + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "ipv4_dst_addr", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_IPV4_DADDR >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_IPV4_DADDR & 0xff} + } + }, + { + .field_info_mask = { + .description = "udp_sport", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "udp_sport", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_UDP_SPORT >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_UDP_SPORT & 0xff} + } + }, + { + .field_info_mask = { + .description = "udp_dport", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "udp_dport", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_UDP_DPORT >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_UDP_DPORT & 0xff} + } + }, + { + .field_info_mask = { + .description = "vni", + .field_bit_size = 24, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "vni", + .field_bit_size = 24, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_VXLAN_VNI >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_VXLAN_VNI & 0xff} + } + }, + { + .field_info_mask = { + .description = "tbl_scope", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "tbl_scope", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_PORT_TABLE, + .field_opr1 = { + (BNXT_ULP_CF_IDX_DEV_PORT_ID >> 8) & 0xff, + BNXT_ULP_CF_IDX_DEV_PORT_ID & 0xff, + (BNXT_ULP_PORT_TABLE_TABLE_SCOPE >> 8) & 0xff, + BNXT_ULP_PORT_TABLE_TABLE_SCOPE & 0xff} + } + }, + /* act_tid: 7, , table: vxlan_encap_ipv6_rec_cache.rd */ + { + .field_info_mask = { + .description = "dmac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "dmac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_ETH_DMAC >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_ETH_DMAC & 0xff} + } + }, + { + .field_info_mask = { + .description = "ipv6_dst_addr", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "ipv6_dst_addr", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_IPV6_DADDR >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_IPV6_DADDR & 0xff} + } + }, + { + .field_info_mask = { + .description = "udp_sport", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "udp_sport", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_UDP_SPORT >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_UDP_SPORT & 0xff} + } + }, + { + .field_info_mask = { + .description = "udp_dport", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "udp_dport", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_UDP_DPORT >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_UDP_DPORT & 0xff} + } + }, + { + .field_info_mask = { + .description = "vni", + .field_bit_size = 24, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "vni", + .field_bit_size = 24, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_VXLAN_VNI >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_VXLAN_VNI & 0xff} + } + }, + { + .field_info_mask = { + .description = "tbl_scope", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tbl_scope", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + /* act_tid: 7, , table: vxlan_encap_ipv6_rec_cache.wr */ + { + .field_info_mask = { + .description = "dmac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "dmac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_ETH_DMAC >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_ETH_DMAC & 0xff} + } + }, + { + .field_info_mask = { + .description = "ipv6_dst_addr", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "ipv6_dst_addr", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_IPV6_DADDR >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_IPV6_DADDR & 0xff} + } + }, + { + .field_info_mask = { + .description = "udp_sport", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "udp_sport", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_UDP_SPORT >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_UDP_SPORT & 0xff} + } + }, + { + .field_info_mask = { + .description = "udp_dport", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "udp_dport", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_UDP_DPORT >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_UDP_DPORT & 0xff} + } + }, + { + .field_info_mask = { + .description = "vni", + .field_bit_size = 24, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "vni", + .field_bit_size = 24, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_VXLAN_VNI >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_VXLAN_VNI & 0xff} + } + }, + { + .field_info_mask = { + .description = "tbl_scope", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tbl_scope", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + /* act_tid: 7, , table: geneve_encap_rec_cache.rd */ + { + .field_info_mask = { + .description = "dmac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "dmac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_ETH_DMAC >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_ETH_DMAC & 0xff} + } + }, + { + .field_info_mask = { + .description = "ipv4_dst_addr", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff, + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "ipv4_dst_addr", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_HDR_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV4 >> 56) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV4 >> 48) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV4 >> 40) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV4 >> 32) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV4 >> 24) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV4 >> 16) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV4 >> 8) & 0xff, + (uint64_t)BNXT_ULP_HDR_BIT_O_IPV4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr2 = { + (BNXT_ULP_ENC_FIELD_IPV4_DADDR >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_IPV4_DADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "ipv6_dst_addr", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "ipv6_dst_addr", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_HDR_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV6 >> 56) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV6 >> 48) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV6 >> 40) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV6 >> 32) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV6 >> 24) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV6 >> 16) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV6 >> 8) & 0xff, + (uint64_t)BNXT_ULP_HDR_BIT_O_IPV6 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr2 = { + (BNXT_ULP_ENC_FIELD_IPV6_DADDR >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_IPV6_DADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "udp_sport", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "udp_sport", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_UDP_SPORT >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_UDP_SPORT & 0xff} + } + }, + { + .field_info_mask = { + .description = "udp_dport", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "udp_dport", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_UDP_DPORT >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_UDP_DPORT & 0xff} + } + }, + { + .field_info_mask = { + .description = "ver_opt_len_o_c_rsvd0", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "ver_opt_len_o_c_rsvd0", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_GENEVE_VER_OPT_LEN_O_C_RSVD0 >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_GENEVE_VER_OPT_LEN_O_C_RSVD0 & 0xff} + } + }, + { + .field_info_mask = { + .description = "proto_type", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "proto_type", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_GENEVE_PROTO_TYPE >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_GENEVE_PROTO_TYPE & 0xff} + } + }, + { + .field_info_mask = { + .description = "vni", + .field_bit_size = 24, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "vni", + .field_bit_size = 24, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_GENEVE_VNI >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_GENEVE_VNI & 0xff} + } + }, + { + .field_info_mask = { + .description = "opt_w0", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff, + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "opt_w0", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_GENEVE_OPT_W0 >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_GENEVE_OPT_W0 & 0xff} + } + }, + { + .field_info_mask = { + .description = "opt_w1", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff, + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "opt_w1", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_GENEVE_OPT_W1 >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_GENEVE_OPT_W1 & 0xff} + } + }, + { + .field_info_mask = { + .description = "opt_w2", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff, + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "opt_w2", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_GENEVE_OPT_W2 >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_GENEVE_OPT_W2 & 0xff} + } + }, + { + .field_info_mask = { + .description = "opt_w3", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff, + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "opt_w3", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_GENEVE_OPT_W3 >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_GENEVE_OPT_W3 & 0xff} + } + }, + { + .field_info_mask = { + .description = "opt_w4", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff, + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "opt_w4", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_GENEVE_OPT_W4 >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_GENEVE_OPT_W4 & 0xff} + } + }, + { + .field_info_mask = { + .description = "opt_w5", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff, + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "opt_w5", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_GENEVE_OPT_W5 >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_GENEVE_OPT_W5 & 0xff} + } + }, + { + .field_info_mask = { + .description = "tbl_scope", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tbl_scope", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + /* act_tid: 7, , table: geneve_encap_rec_cache.wr */ + { + .field_info_mask = { + .description = "dmac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "dmac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_ETH_DMAC >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_ETH_DMAC & 0xff} + } + }, + { + .field_info_mask = { + .description = "ipv4_dst_addr", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff, + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "ipv4_dst_addr", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_HDR_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV4 >> 56) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV4 >> 48) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV4 >> 40) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV4 >> 32) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV4 >> 24) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV4 >> 16) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV4 >> 8) & 0xff, + (uint64_t)BNXT_ULP_HDR_BIT_O_IPV4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr2 = { + (BNXT_ULP_ENC_FIELD_IPV4_DADDR >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_IPV4_DADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "ipv6_dst_addr", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "ipv6_dst_addr", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_HDR_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV6 >> 56) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV6 >> 48) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV6 >> 40) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV6 >> 32) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV6 >> 24) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV6 >> 16) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV6 >> 8) & 0xff, + (uint64_t)BNXT_ULP_HDR_BIT_O_IPV6 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr2 = { + (BNXT_ULP_ENC_FIELD_IPV6_DADDR >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_IPV6_DADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "udp_sport", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "udp_sport", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_UDP_SPORT >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_UDP_SPORT & 0xff} + } + }, + { + .field_info_mask = { + .description = "udp_dport", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "udp_dport", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_UDP_DPORT >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_UDP_DPORT & 0xff} + } + }, + { + .field_info_mask = { + .description = "ver_opt_len_o_c_rsvd0", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "ver_opt_len_o_c_rsvd0", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "proto_type", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "proto_type", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "vni", + .field_bit_size = 24, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "vni", + .field_bit_size = 24, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_GENEVE_VNI >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_GENEVE_VNI & 0xff} + } + }, + { + .field_info_mask = { + .description = "opt_w0", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff, + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "opt_w0", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_GENEVE_OPT_W0 >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_GENEVE_OPT_W0 & 0xff} + } + }, + { + .field_info_mask = { + .description = "opt_w1", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff, + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "opt_w1", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_GENEVE_OPT_W1 >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_GENEVE_OPT_W1 & 0xff} + } + }, + { + .field_info_mask = { + .description = "opt_w2", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff, + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "opt_w2", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_GENEVE_OPT_W2 >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_GENEVE_OPT_W2 & 0xff} + } + }, + { + .field_info_mask = { + .description = "opt_w3", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff, + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "opt_w3", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_GENEVE_OPT_W3 >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_GENEVE_OPT_W3 & 0xff} + } + }, + { + .field_info_mask = { + .description = "opt_w4", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff, + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "opt_w4", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_GENEVE_OPT_W4 >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_GENEVE_OPT_W4 & 0xff} + } + }, + { + .field_info_mask = { + .description = "opt_w5", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff, + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "opt_w5", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_GENEVE_OPT_W5 >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_GENEVE_OPT_W5 & 0xff} + } + }, + { + .field_info_mask = { + .description = "tbl_scope", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tbl_scope", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + } +}; + +struct bnxt_ulp_mapper_field_info ulp_thor2_act_key_ext_list[] = { + { + .description = "vnic_or_vport", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_OR, + .field_opr1 = { + (18 >> 8) & 0xff, + 18 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_RF, + .field_opr2 = { + (BNXT_ULP_RF_IDX_RSS_VNIC >> 8) & 0xff, + BNXT_ULP_RF_IDX_RSS_VNIC & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (1 >> 8) & 0xff, + 1 & 0xff} + }, + { + .description = "vnic_or_vport", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (20 >> 8) & 0xff, + 20 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + (ULP_THOR2_SYM_RECYCLE_DST >> 8) & 0xff, + ULP_THOR2_SYM_RECYCLE_DST & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr3 = { + (BNXT_ULP_ACT_PROP_IDX_VNIC >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_VNIC & 0xff} + }, + { + .description = "metadata_data", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (60 >> 8) & 0xff, + 60 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_RF, + .field_opr2 = { + (BNXT_ULP_RF_IDX_JUMP_META >> 8) & 0xff, + BNXT_ULP_RF_IDX_JUMP_META & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "metadata_data", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (71 >> 8) & 0xff, + 71 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_RF, + .field_opr2 = { + (BNXT_ULP_RF_IDX_JUMP_META >> 8) & 0xff, + BNXT_ULP_RF_IDX_JUMP_META & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "vnic_or_vport", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (90 >> 8) & 0xff, + 90 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + (ULP_THOR2_SYM_LOOPBACK_PORT >> 8) & 0xff, + ULP_THOR2_SYM_LOOPBACK_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr3 = { + (BNXT_ULP_ACT_PROP_IDX_VPORT >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_VPORT & 0xff} + } +}; + +struct bnxt_ulp_mapper_field_info ulp_thor2_act_result_field_list[] = { + /* act_tid: 1, , table: jump_index_table.alloc */ + /* act_tid: 1, , table: flow_chain_cache.write */ + { + .description = "rid", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_RID >> 8) & 0xff, + BNXT_ULP_RF_IDX_RID & 0xff} + }, + { + .description = "metadata", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_JUMP_META >> 8) & 0xff, + BNXT_ULP_RF_IDX_JUMP_META & 0xff} + }, + /* act_tid: 1, , table: mod_record.ttl_0 */ + { + .description = "metadata_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "rem_ovlan", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "rem_ivlan", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "rep_add_ivlan", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "rep_add_ovlan", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "ttl_update", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + { + .description = "tun_md_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "reserved_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_dmac_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_DST >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_DST >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_DST >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_DST >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_DST >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_DST >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_DST >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_DST & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_smac_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_SRC >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_SRC >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_SRC >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_SRC >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_SRC >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_SRC >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_SRC >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_SRC & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_sip_ipv6_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_SRC >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_SRC >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_SRC >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_SRC >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_SRC >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_SRC >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_SRC >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_SRC & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_dip_ipv6_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_DST >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_DST >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_DST >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_DST >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_DST >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_DST >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_DST >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_DST & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_sip_ipv4_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_SRC >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_SRC >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_SRC >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_SRC >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_SRC >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_SRC >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_SRC >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_SRC & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_dip_ipv4_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_DST >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_DST >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_DST >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_DST >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_DST >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_DST >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_DST >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_DST & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_sport_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_dport_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "metadata_rsvd", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ZERO, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "metadata_op", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR2_SYM_METADATA_OP_NORMAL}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "metadata_prof", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR2_SYM_META_PROFILE_0}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "metadata_data", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_RF, + .field_opr2 = { + (BNXT_ULP_RF_IDX_JUMP_META >> 8) & 0xff, + BNXT_ULP_RF_IDX_JUMP_META & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "alt_pfid", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "alt_vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "ttl_rsvd", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "ttl_tl3_dec", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_ACT_T_DEC_TTL >> 8) & 0xff, + BNXT_ULP_CF_IDX_ACT_T_DEC_TTL & 0xff} + }, + { + .description = "ttl_il3_dec", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_ACT_DEC_TTL >> 8) & 0xff, + BNXT_ULP_CF_IDX_ACT_DEC_TTL & 0xff} + }, + { + .description = "ttl_otl3_rdir", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "ttl_tl3_rdir", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "ttl_il3_rdir", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_dmac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_DST >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_DST >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_DST >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_DST >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_DST >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_DST >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_DST >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_DST & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr2 = { + (BNXT_ULP_ACT_PROP_IDX_SET_MAC_DST >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_SET_MAC_DST & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l2_smac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_SRC >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_SRC >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_SRC >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_SRC >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_SRC >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_SRC >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_SRC >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_SRC & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr2 = { + (BNXT_ULP_ACT_PROP_IDX_SET_MAC_SRC >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_SET_MAC_SRC & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l3_sip_ipv6", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_SRC >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_SRC >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_SRC >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_SRC >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_SRC >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_SRC >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_SRC >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_SRC & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr2 = { + (BNXT_ULP_ACT_PROP_IDX_SET_IPV6_SRC >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_SET_IPV6_SRC & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l3_dip_ipv6", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_DST >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_DST >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_DST >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_DST >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_DST >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_DST >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_DST >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_DST & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr2 = { + (BNXT_ULP_ACT_PROP_IDX_SET_IPV6_DST >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_SET_IPV6_DST & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l3_sip_ipv4", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_SRC >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_SRC >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_SRC >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_SRC >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_SRC >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_SRC >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_SRC >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_SRC & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr2 = { + (BNXT_ULP_ACT_PROP_IDX_SET_IPV4_SRC >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_SET_IPV4_SRC & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l3_dip_ipv4", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_DST >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_DST >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_DST >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_DST >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_DST >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_DST >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_DST >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_DST & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr2 = { + (BNXT_ULP_ACT_PROP_IDX_SET_IPV4_DST >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_SET_IPV4_DST & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l4_sport", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr2 = { + (BNXT_ULP_ACT_PROP_IDX_SET_TP_SRC >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_SET_TP_SRC & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l4_dport", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr2 = { + (BNXT_ULP_ACT_PROP_IDX_SET_TP_DST >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_SET_TP_DST & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + /* act_tid: 1, , table: mod_record.non_ttl_0 */ + { + .description = "metadata_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "rem_ovlan", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "rem_ivlan", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "rep_add_ivlan", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "rep_add_ovlan", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "ttl_update", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tun_md_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "reserved_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_dmac_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_DST >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_DST >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_DST >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_DST >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_DST >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_DST >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_DST >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_DST & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_smac_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_SRC >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_SRC >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_SRC >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_SRC >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_SRC >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_SRC >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_SRC >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_SRC & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_sip_ipv6_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_SRC >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_SRC >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_SRC >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_SRC >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_SRC >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_SRC >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_SRC >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_SRC & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_dip_ipv6_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_DST >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_DST >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_DST >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_DST >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_DST >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_DST >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_DST >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_DST & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_sip_ipv4_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_SRC >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_SRC >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_SRC >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_SRC >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_SRC >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_SRC >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_SRC >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_SRC & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_dip_ipv4_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_DST >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_DST >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_DST >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_DST >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_DST >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_DST >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_DST >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_DST & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_sport_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_dport_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "metadata_rsvd", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ZERO, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "metadata_op", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR2_SYM_METADATA_OP_NORMAL}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "metadata_prof", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR2_SYM_META_PROFILE_0}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "metadata_data", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_RF, + .field_opr2 = { + (BNXT_ULP_RF_IDX_JUMP_META >> 8) & 0xff, + BNXT_ULP_RF_IDX_JUMP_META & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l2_dmac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_DST >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_DST >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_DST >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_DST >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_DST >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_DST >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_DST >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_DST & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr2 = { + (BNXT_ULP_ACT_PROP_IDX_SET_MAC_DST >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_SET_MAC_DST & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l2_smac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_SRC >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_SRC >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_SRC >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_SRC >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_SRC >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_SRC >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_SRC >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_SRC & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr2 = { + (BNXT_ULP_ACT_PROP_IDX_SET_MAC_SRC >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_SET_MAC_SRC & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l3_sip_ipv6", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_SRC >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_SRC >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_SRC >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_SRC >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_SRC >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_SRC >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_SRC >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_SRC & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr2 = { + (BNXT_ULP_ACT_PROP_IDX_SET_IPV6_SRC >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_SET_IPV6_SRC & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l3_dip_ipv6", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_DST >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_DST >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_DST >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_DST >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_DST >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_DST >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_DST >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_DST & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr2 = { + (BNXT_ULP_ACT_PROP_IDX_SET_IPV6_DST >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_SET_IPV6_DST & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l3_sip_ipv4", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_SRC >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_SRC >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_SRC >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_SRC >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_SRC >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_SRC >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_SRC >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_SRC & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr2 = { + (BNXT_ULP_ACT_PROP_IDX_SET_IPV4_SRC >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_SET_IPV4_SRC & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l3_dip_ipv4", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_DST >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_DST >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_DST >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_DST >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_DST >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_DST >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_DST >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_DST & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr2 = { + (BNXT_ULP_ACT_PROP_IDX_SET_IPV4_DST >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_SET_IPV4_DST & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l4_sport", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr2 = { + (BNXT_ULP_ACT_PROP_IDX_SET_TP_SRC >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_SET_TP_SRC & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l4_dport", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr2 = { + (BNXT_ULP_ACT_PROP_IDX_SET_TP_DST >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_SET_TP_DST & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + /* act_tid: 1, , table: cmm_stat_record.f1_flow */ + { + .description = "packet_count", + .field_bit_size = 64, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "byte_count", + .field_bit_size = 64, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + /* act_tid: 1, , table: cmm_stat_record.0 */ + { + .description = "packet_count", + .field_bit_size = 64, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "byte_count", + .field_bit_size = 64, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + /* act_tid: 1, , table: vnic_interface_rss_config.0 */ + /* act_tid: 1, , table: vnic_interface_queue_config.0 */ + /* act_tid: 1, , table: cmm_full_act_record.0 */ + { + .description = "type", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + { + .description = "drop", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_DROP >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_DROP >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_DROP >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_DROP >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_DROP >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_DROP >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_DROP >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_DROP & 0xff} + }, + { + .description = "vlan_del_rpt", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_POP_VLAN >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_POP_VLAN >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_POP_VLAN >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_POP_VLAN >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_POP_VLAN >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_POP_VLAN >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_POP_VLAN >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_POP_VLAN & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR2_SYM_VLAN_DEL_RPT_STRIP_OUTER}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "vnic_or_vport", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (17 >> 8) & 0xff, + 17 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ZERO, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT + }, + { + .description = "dest_op", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "decap_func", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_OR, + .field_opr1 = { + (21 >> 8) & 0xff, + 21 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR2_SYM_DECAP_FUNC_THRU_TUN}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "mirror", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_MIRROR_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_MIRROR_ID_0 & 0xff} + }, + { + .description = "meter_ptr", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_METER_PTR_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_METER_PTR_0 & 0xff} + }, + { + .description = "stat0_ptr", + .field_bit_size = 28, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_FLOW_CNTR_PTR_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_FLOW_CNTR_PTR_0 & 0xff} + }, + { + .description = "stat0_ing_egr", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "stat0_ctr_type", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "stat1_ptr", + .field_bit_size = 28, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_FLOW_CNTR_PTR_F1 >> 8) & 0xff, + BNXT_ULP_RF_IDX_FLOW_CNTR_PTR_F1 & 0xff} + }, + { + .description = "stat1_ing_egr", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "stat1_ctr_type", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "mod_rec_ptr", + .field_bit_size = 28, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_MODIFY_PTR >> 8) & 0xff, + BNXT_ULP_RF_IDX_MODIFY_PTR & 0xff} + }, + { + .description = "encap_ptr", + .field_bit_size = 28, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "src_ptr", + .field_bit_size = 28, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_MAIN_SP_PTR >> 8) & 0xff, + BNXT_ULP_RF_IDX_MAIN_SP_PTR & 0xff} + }, + { + .description = "rsvd0", + .field_bit_size = 7, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + /* act_tid: 3, , table: mirror_tbl.alloc */ + { + .description = "reserved1", + .field_bit_size = 21, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "arp_relative", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "action_hint", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "sample_mode", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "trunc_mode", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "ignore_drop", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "copy_mode", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "mirr_cond", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "act_rec_ptr", + .field_bit_size = 26, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "reserved2", + .field_bit_size = 6, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "samp_cfg", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "padding1", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + /* act_tid: 3, , table: cmm_stat_record.0 */ + { + .description = "packet_count", + .field_bit_size = 64, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "byte_count", + .field_bit_size = 64, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + /* act_tid: 3, , table: cmm_full_act_record.0 */ + { + .description = "type", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + { + .description = "drop", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "vlan_del_rpt", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "vnic_or_vport", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr1 = { + (BNXT_ULP_ACT_PROP_IDX_VNIC >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_VNIC & 0xff} + }, + { + .description = "dest_op", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "decap_func", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "mirror", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "meter_ptr", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "stat0_ptr", + .field_bit_size = 28, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_FLOW_CNTR_PTR_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_FLOW_CNTR_PTR_0 & 0xff} + }, + { + .description = "stat0_ing_egr", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "stat0_ctr_type", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "stat1_ptr", + .field_bit_size = 28, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "stat1_ing_egr", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "stat1_ctr_type", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "mod_rec_ptr", + .field_bit_size = 28, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "encap_ptr", + .field_bit_size = 28, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "src_ptr", + .field_bit_size = 28, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "rsvd0", + .field_bit_size = 7, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + /* act_tid: 3, , table: mirror_tbl.wr */ + { + .description = "reserved1", + .field_bit_size = 21, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "arp_relative", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "action_hint", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "sample_mode", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "trunc_mode", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "ignore_drop", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "copy_mode", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 3} + }, + { + .description = "mirr_cond", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "act_rec_ptr", + .field_bit_size = 26, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_MAIN_ACTION_PTR >> 8) & 0xff, + BNXT_ULP_RF_IDX_MAIN_ACTION_PTR & 0xff} + }, + { + .description = "reserved2", + .field_bit_size = 6, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "samp_cfg", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff, + 0xff, + 0xff} + }, + { + .description = "padding1", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + /* act_tid: 3, , table: shared_mirror_record.wr */ + { + .description = "rid", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_RID >> 8) & 0xff, + BNXT_ULP_RF_IDX_RID & 0xff} + }, + { + .description = "mirror_id", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_MIRROR_PTR_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_MIRROR_PTR_0 & 0xff} + }, + /* act_tid: 6, , table: meter_profile_tbl_cache.wr */ + { + .description = "rid", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_RID >> 8) & 0xff, + BNXT_ULP_RF_IDX_RID & 0xff} + }, + { + .description = "cf", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr1 = { + (BNXT_ULP_ACT_PROP_IDX_METER_PROF_CF >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_METER_PROF_CF & 0xff} + }, + { + .description = "pm", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr1 = { + (BNXT_ULP_ACT_PROP_IDX_METER_PROF_PM >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_METER_PROF_PM & 0xff} + }, + { + .description = "rfc2698", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr1 = { + (BNXT_ULP_ACT_PROP_IDX_METER_PROF_RFC2698 >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_METER_PROF_RFC2698 & 0xff} + }, + { + .description = "cbsm", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr1 = { + (BNXT_ULP_ACT_PROP_IDX_METER_PROF_CBSM >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_METER_PROF_CBSM & 0xff} + }, + { + .description = "ebsm", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr1 = { + (BNXT_ULP_ACT_PROP_IDX_METER_PROF_EBSM >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_METER_PROF_EBSM & 0xff} + }, + { + .description = "cbnd", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr1 = { + (BNXT_ULP_ACT_PROP_IDX_METER_PROF_CBND >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_METER_PROF_CBND & 0xff} + }, + { + .description = "ebnd", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr1 = { + (BNXT_ULP_ACT_PROP_IDX_METER_PROF_EBND >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_METER_PROF_EBND & 0xff} + }, + { + .description = "cbs", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr1 = { + (BNXT_ULP_ACT_PROP_IDX_METER_PROF_CBS >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_METER_PROF_CBS & 0xff} + }, + { + .description = "ebs", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr1 = { + (BNXT_ULP_ACT_PROP_IDX_METER_PROF_EBS >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_METER_PROF_EBS & 0xff} + }, + { + .description = "cir", + .field_bit_size = 17, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr1 = { + (BNXT_ULP_ACT_PROP_IDX_METER_PROF_CIR >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_METER_PROF_CIR & 0xff} + }, + { + .description = "eir", + .field_bit_size = 17, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr1 = { + (BNXT_ULP_ACT_PROP_IDX_METER_PROF_EIR >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_METER_PROF_EIR & 0xff} + }, + /* act_tid: 6, , table: meter_tbl.0 */ + { + .description = "bkt_c", + .field_bit_size = 27, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + (134217727 >> 24) & 0xff, + (134217727 >> 16) & 0xff, + (134217727 >> 8) & 0xff, + 134217727 & 0xff} + }, + { + .description = "bkt_e", + .field_bit_size = 27, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + (134217727 >> 24) & 0xff, + (134217727 >> 16) & 0xff, + (134217727 >> 8) & 0xff, + 134217727 & 0xff} + }, + { + .description = "mtr_val", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr1 = { + (BNXT_ULP_ACT_PROP_IDX_METER_INST_MTR_VAL >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_METER_INST_MTR_VAL & 0xff} + }, + { + .description = "ecn_rmp_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr1 = { + (BNXT_ULP_ACT_PROP_IDX_METER_INST_ECN_RMP_EN >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_METER_INST_ECN_RMP_EN & 0xff} + }, + { + .description = "cf", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_CF_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_CF_0 & 0xff} + }, + { + .description = "pm", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_PM_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_PM_0 & 0xff} + }, + { + .description = "rfc2698", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_RFC2698_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_RFC2698_0 & 0xff} + }, + { + .description = "cbsm", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_CBSM_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_CBSM_0 & 0xff} + }, + { + .description = "ebsm", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_EBSM_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_EBSM_0 & 0xff} + }, + { + .description = "cbnd", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_CBND_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_CBND_0 & 0xff} + }, + { + .description = "ebnd", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_EBND_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_EBND_0 & 0xff} + }, + { + .description = "cbs", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_CBS_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_CBS_0 & 0xff} + }, + { + .description = "ebs", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_EBS_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_EBS_0 & 0xff} + }, + { + .description = "cir", + .field_bit_size = 17, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_CIR_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_CIR_0 & 0xff} + }, + { + .description = "eir", + .field_bit_size = 17, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_EIR_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_EIR_0 & 0xff} + }, + { + .description = "scope", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_PORT_TABLE, + .field_opr1 = { + (BNXT_ULP_CF_IDX_DEV_PORT_ID >> 8) & 0xff, + BNXT_ULP_CF_IDX_DEV_PORT_ID & 0xff, + (BNXT_ULP_PORT_TABLE_TABLE_SCOPE >> 8) & 0xff, + BNXT_ULP_PORT_TABLE_TABLE_SCOPE & 0xff} + }, + { + .description = "rsvd", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "prot_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + /* act_tid: 6, , table: shared_meter_tbl_cache.wr */ + { + .description = "rid", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_RID >> 8) & 0xff, + BNXT_ULP_RF_IDX_RID & 0xff} + }, + { + .description = "meter_ptr", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_METER_PTR_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_METER_PTR_0 & 0xff} + }, + { + .description = "sw_meter_profile_id", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr1 = { + (BNXT_ULP_ACT_PROP_IDX_METER_PROF_ID >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_METER_PROF_ID & 0xff} + }, + /* act_tid: 6, , table: meter_tbl.update_wr */ + { + .description = "bkt_c", + .field_bit_size = 27, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + (134217727 >> 24) & 0xff, + (134217727 >> 16) & 0xff, + (134217727 >> 8) & 0xff, + 134217727 & 0xff} + }, + { + .description = "bkt_e", + .field_bit_size = 27, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + (134217727 >> 24) & 0xff, + (134217727 >> 16) & 0xff, + (134217727 >> 8) & 0xff, + 134217727 & 0xff} + }, + { + .description = "mtr_val", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr1 = { + (BNXT_ULP_ACT_PROP_IDX_METER_INST_MTR_VAL_UPDATE >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_METER_INST_MTR_VAL_UPDATE & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr2 = { + (BNXT_ULP_ACT_PROP_IDX_METER_INST_MTR_VAL >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_METER_INST_MTR_VAL & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_RF, + .field_opr3 = { + (BNXT_ULP_RF_IDX_RF_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_RF_0 & 0xff} + }, + { + .description = "ecn_rmp_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr1 = { + (BNXT_ULP_ACT_PROP_IDX_METER_INST_ECN_RMP_EN_UPDATE >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_METER_INST_ECN_RMP_EN_UPDATE & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr2 = { + (BNXT_ULP_ACT_PROP_IDX_METER_INST_ECN_RMP_EN >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_METER_INST_ECN_RMP_EN & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_RF, + .field_opr3 = { + (BNXT_ULP_RF_IDX_RF_1 >> 8) & 0xff, + BNXT_ULP_RF_IDX_RF_1 & 0xff} + }, + { + .description = "cf", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_CF_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_CF_0 & 0xff} + }, + { + .description = "pm", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_PM_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_PM_0 & 0xff} + }, + { + .description = "rfc2698", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_RFC2698_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_RFC2698_0 & 0xff} + }, + { + .description = "cbsm", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_CBSM_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_CBSM_0 & 0xff} + }, + { + .description = "ebsm", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_EBSM_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_EBSM_0 & 0xff} + }, + { + .description = "cbnd", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_CBND_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_CBND_0 & 0xff} + }, + { + .description = "ebnd", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_EBND_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_EBND_0 & 0xff} + }, + { + .description = "cbs", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_CBS_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_CBS_0 & 0xff} + }, + { + .description = "ebs", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_EBS_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_EBS_0 & 0xff} + }, + { + .description = "cir", + .field_bit_size = 17, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_CIR_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_CIR_0 & 0xff} + }, + { + .description = "eir", + .field_bit_size = 17, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_EIR_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_EIR_0 & 0xff} + }, + { + .description = "scope", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "rsvd", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "prot_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + /* act_tid: 7, , table: jump_index_table.alloc */ + /* act_tid: 7, , table: flow_chain_cache.write */ + { + .description = "rid", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_RID >> 8) & 0xff, + BNXT_ULP_RF_IDX_RID & 0xff} + }, + { + .description = "metadata", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_JUMP_META >> 8) & 0xff, + BNXT_ULP_RF_IDX_JUMP_META & 0xff} + }, + /* act_tid: 7, , table: cmm_stat_record.0 */ + { + .description = "packet_count", + .field_bit_size = 64, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "byte_count", + .field_bit_size = 64, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + /* act_tid: 7, , table: mod_record.ttl_0 */ + { + .description = "metadata_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_OR, + .field_opr1 = { + (51 >> 8) & 0xff, + 51 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "rem_ovlan", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "rem_ivlan", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "rep_add_ivlan", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "rep_add_ovlan", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "ttl_update", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + { + .description = "tun_md_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "reserved_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_dmac_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_DST >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_DST >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_DST >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_DST >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_DST >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_DST >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_DST >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_DST & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_smac_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_SRC >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_SRC >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_SRC >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_SRC >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_SRC >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_SRC >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_SRC >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_SRC & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_sip_ipv6_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_SRC >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_SRC >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_SRC >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_SRC >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_SRC >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_SRC >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_SRC >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_SRC & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_dip_ipv6_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_DST >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_DST >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_DST >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_DST >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_DST >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_DST >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_DST >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_DST & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_sip_ipv4_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_SRC >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_SRC >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_SRC >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_SRC >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_SRC >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_SRC >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_SRC >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_SRC & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_dip_ipv4_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_DST >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_DST >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_DST >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_DST >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_DST >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_DST >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_DST >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_DST & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_sport_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_dport_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "metadata_rsvd", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_OR, + .field_opr1 = { + (53 >> 8) & 0xff, + 53 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ZERO, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "metadata_op", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_OR, + .field_opr1 = { + (55 >> 8) & 0xff, + 55 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ZERO, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "metadata_prof", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_OR, + .field_opr1 = { + (57 >> 8) & 0xff, + 57 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ZERO, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "metadata_data", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (59 >> 8) & 0xff, + 59 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_RF, + .field_opr2 = { + (BNXT_ULP_RF_IDX_RF_1 >> 8) & 0xff, + BNXT_ULP_RF_IDX_RF_1 & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (2 >> 8) & 0xff, + 2 & 0xff} + }, + { + .description = "alt_pfid", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "alt_vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "ttl_rsvd", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "ttl_tl3_dec", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_ACT_T_DEC_TTL >> 8) & 0xff, + BNXT_ULP_CF_IDX_ACT_T_DEC_TTL & 0xff} + }, + { + .description = "ttl_il3_dec", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_ACT_DEC_TTL >> 8) & 0xff, + BNXT_ULP_CF_IDX_ACT_DEC_TTL & 0xff} + }, + { + .description = "ttl_otl3_rdir", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "ttl_tl3_rdir", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "ttl_il3_rdir", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_dmac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_DST >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_DST >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_DST >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_DST >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_DST >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_DST >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_DST >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_DST & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr2 = { + (BNXT_ULP_ACT_PROP_IDX_SET_MAC_DST >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_SET_MAC_DST & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l2_smac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_SRC >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_SRC >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_SRC >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_SRC >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_SRC >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_SRC >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_SRC >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_SRC & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr2 = { + (BNXT_ULP_ACT_PROP_IDX_SET_MAC_SRC >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_SET_MAC_SRC & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l3_sip_ipv6", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_SRC >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_SRC >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_SRC >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_SRC >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_SRC >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_SRC >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_SRC >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_SRC & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr2 = { + (BNXT_ULP_ACT_PROP_IDX_SET_IPV6_SRC >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_SET_IPV6_SRC & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l3_dip_ipv6", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_DST >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_DST >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_DST >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_DST >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_DST >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_DST >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_DST >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_DST & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr2 = { + (BNXT_ULP_ACT_PROP_IDX_SET_IPV6_DST >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_SET_IPV6_DST & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l3_sip_ipv4", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_SRC >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_SRC >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_SRC >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_SRC >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_SRC >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_SRC >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_SRC >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_SRC & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr2 = { + (BNXT_ULP_ACT_PROP_IDX_SET_IPV4_SRC >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_SET_IPV4_SRC & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l3_dip_ipv4", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_DST >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_DST >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_DST >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_DST >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_DST >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_DST >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_DST >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_DST & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr2 = { + (BNXT_ULP_ACT_PROP_IDX_SET_IPV4_DST >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_SET_IPV4_DST & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l4_sport", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr2 = { + (BNXT_ULP_ACT_PROP_IDX_SET_TP_SRC >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_SET_TP_SRC & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l4_dport", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr2 = { + (BNXT_ULP_ACT_PROP_IDX_SET_TP_DST >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_SET_TP_DST & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + /* act_tid: 7, , table: mod_record.non_ttl_0 */ + { + .description = "metadata_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_OR, + .field_opr1 = { + (62 >> 8) & 0xff, + 62 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "rem_ovlan", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "rem_ivlan", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "rep_add_ivlan", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "rep_add_ovlan", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "ttl_update", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tun_md_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "reserved_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_dmac_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_DST >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_DST >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_DST >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_DST >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_DST >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_DST >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_DST >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_DST & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_smac_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_SRC >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_SRC >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_SRC >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_SRC >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_SRC >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_SRC >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_SRC >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_SRC & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_sip_ipv6_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_SRC >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_SRC >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_SRC >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_SRC >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_SRC >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_SRC >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_SRC >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_SRC & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_dip_ipv6_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_DST >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_DST >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_DST >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_DST >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_DST >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_DST >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_DST >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_DST & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_sip_ipv4_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_SRC >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_SRC >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_SRC >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_SRC >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_SRC >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_SRC >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_SRC >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_SRC & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_dip_ipv4_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_DST >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_DST >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_DST >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_DST >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_DST >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_DST >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_DST >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_DST & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_sport_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_dport_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "metadata_rsvd", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_OR, + .field_opr1 = { + (64 >> 8) & 0xff, + 64 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ZERO, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "metadata_op", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_OR, + .field_opr1 = { + (66 >> 8) & 0xff, + 66 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ZERO, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "metadata_prof", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_OR, + .field_opr1 = { + (68 >> 8) & 0xff, + 68 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ZERO, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "metadata_data", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (70 >> 8) & 0xff, + 70 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_RF, + .field_opr2 = { + (BNXT_ULP_RF_IDX_RF_1 >> 8) & 0xff, + BNXT_ULP_RF_IDX_RF_1 & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (3 >> 8) & 0xff, + 3 & 0xff} + }, + { + .description = "l2_dmac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_DST >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_DST >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_DST >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_DST >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_DST >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_DST >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_DST >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_DST & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr2 = { + (BNXT_ULP_ACT_PROP_IDX_SET_MAC_DST >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_SET_MAC_DST & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l2_smac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_SRC >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_SRC >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_SRC >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_SRC >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_SRC >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_SRC >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_SRC >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_SRC & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr2 = { + (BNXT_ULP_ACT_PROP_IDX_SET_MAC_SRC >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_SET_MAC_SRC & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l3_sip_ipv6", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_SRC >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_SRC >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_SRC >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_SRC >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_SRC >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_SRC >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_SRC >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_SRC & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr2 = { + (BNXT_ULP_ACT_PROP_IDX_SET_IPV6_SRC >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_SET_IPV6_SRC & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l3_dip_ipv6", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_DST >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_DST >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_DST >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_DST >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_DST >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_DST >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_DST >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_DST & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr2 = { + (BNXT_ULP_ACT_PROP_IDX_SET_IPV6_DST >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_SET_IPV6_DST & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l3_sip_ipv4", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_SRC >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_SRC >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_SRC >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_SRC >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_SRC >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_SRC >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_SRC >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_SRC & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr2 = { + (BNXT_ULP_ACT_PROP_IDX_SET_IPV4_SRC >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_SET_IPV4_SRC & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l3_dip_ipv4", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_DST >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_DST >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_DST >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_DST >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_DST >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_DST >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_DST >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_DST & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr2 = { + (BNXT_ULP_ACT_PROP_IDX_SET_IPV4_DST >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_SET_IPV4_DST & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l4_sport", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr2 = { + (BNXT_ULP_ACT_PROP_IDX_SET_TP_SRC >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_SET_TP_SRC & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l4_dport", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr2 = { + (BNXT_ULP_ACT_PROP_IDX_SET_TP_DST >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_SET_TP_DST & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + /* act_tid: 7, , table: sp_smac_ipv4.0 */ + { + .description = "smac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_ETH_SMAC >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_ETH_SMAC & 0xff} + }, + { + .description = "ipv4_src_addr", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_IPV4_SADDR >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_IPV4_SADDR & 0xff} + }, + { + .description = "reserved", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + /* act_tid: 7, , table: source_property_cache.wr */ + { + .description = "rid", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_RID >> 8) & 0xff, + BNXT_ULP_RF_IDX_RID & 0xff} + }, + { + .description = "sp_rec_ptr", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_MAIN_SP_PTR >> 8) & 0xff, + BNXT_ULP_RF_IDX_MAIN_SP_PTR & 0xff} + }, + /* act_tid: 7, , table: sp_smac_ipv6.0 */ + { + .description = "smac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_ETH_SMAC >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_ETH_SMAC & 0xff} + }, + { + .description = "ipv6_src_addr", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_IPV6_SADDR >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_IPV6_SADDR & 0xff} + }, + { + .description = "reserved", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + /* act_tid: 7, , table: source_property_ipv6_cache.wr */ + { + .description = "rid", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_RID >> 8) & 0xff, + BNXT_ULP_RF_IDX_RID & 0xff} + }, + { + .description = "sp_rec_ptr", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_MAIN_SP_PTR >> 8) & 0xff, + BNXT_ULP_RF_IDX_MAIN_SP_PTR & 0xff} + }, + /* act_tid: 7, , table: ext_tun_vxlan_encap_record.ipv4_vxlan */ + { + .description = "ecv_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + ULP_THOR2_SYM_ECV_VALID_YES} + }, + { + .description = "ecv_custom_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "ecv_vtag_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr1 = { + (BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_TYPE >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_TYPE & 0xff} + }, + { + .description = "ecv_l2_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + ULP_THOR2_SYM_ECV_L2_EN_YES} + }, + { + .description = "ecv_l3_type", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr1 = { + (BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE & 0xff} + }, + { + .description = "ecv_l4_type", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + ULP_THOR2_SYM_ECV_L4_TYPE_UDP_CSUM} + }, + { + .description = "ecv_tun_type", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + ULP_THOR2_SYM_ECV_TUN_TYPE_VXLAN} + }, + { + .description = "enc_eth_dmac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_ETH_DMAC >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_ETH_DMAC & 0xff} + }, + { + .description = "enc_o_vlan_tag", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_HDR_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 56) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 48) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 40) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 32) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 24) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 16) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 8) & 0xff, + (uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr2 = { + (BNXT_ULP_ENC_FIELD_O_VLAN_TCI >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_O_VLAN_TCI & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "enc_o_vlan_type", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_HDR_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 56) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 48) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 40) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 32) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 24) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 16) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 8) & 0xff, + (uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr2 = { + (BNXT_ULP_ENC_FIELD_O_VLAN_TYPE >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_O_VLAN_TYPE & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "enc_i_vlan_tag", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_HDR_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN >> 56) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN >> 48) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN >> 40) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN >> 32) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN >> 24) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN >> 16) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN >> 8) & 0xff, + (uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr2 = { + (BNXT_ULP_ENC_FIELD_I_VLAN_TCI >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_I_VLAN_TCI & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "enc_i_vlan_type", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_HDR_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN >> 56) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN >> 48) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN >> 40) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN >> 32) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN >> 24) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN >> 16) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN >> 8) & 0xff, + (uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr2 = { + (BNXT_ULP_ENC_FIELD_I_VLAN_TYPE >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_I_VLAN_TYPE & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "enc_ipv4_ihl", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_IPV4_IHL >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_IPV4_IHL & 0xff} + }, + { + .description = "enc_ipv4_tos", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_IPV4_TOS >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_IPV4_TOS & 0xff} + }, + { + .description = "enc_ipv4_pkt_id", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_IPV4_PKT_ID >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_IPV4_PKT_ID & 0xff} + }, + { + .description = "enc_ipv4_frag", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_IPV4_FRAG >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_IPV4_FRAG & 0xff} + }, + { + .description = "enc_ipv4_ttl", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_IPV4_TTL >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_IPV4_TTL & 0xff} + }, + { + .description = "enc_ipv4_proto", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_IPV4_PROTO >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_IPV4_PROTO & 0xff} + }, + { + .description = "enc_ipv4_daddr", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_IPV4_DADDR >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_IPV4_DADDR & 0xff} + }, + { + .description = "enc_udp_sport", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_UDP_SPORT >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_UDP_SPORT & 0xff} + }, + { + .description = "enc_udp_dport", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_UDP_DPORT >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_UDP_DPORT & 0xff} + }, + { + .description = "enc_vxlan_flags", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_VXLAN_FLAGS >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_VXLAN_FLAGS & 0xff} + }, + { + .description = "enc_vxlan_rsvd0", + .field_bit_size = 24, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_VXLAN_RSVD0 >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_VXLAN_RSVD0 & 0xff} + }, + { + .description = "enc_vxlan_vni", + .field_bit_size = 24, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_VXLAN_VNI >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_VXLAN_VNI & 0xff} + }, + { + .description = "enc_vxlan_rsvd1", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_VXLAN_RSVD1 >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_VXLAN_RSVD1 & 0xff} + }, + /* act_tid: 7, , table: vxlan_encap_rec_cache.wr */ + { + .description = "rid", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_RID >> 8) & 0xff, + BNXT_ULP_RF_IDX_RID & 0xff} + }, + { + .description = "enc_rec_ptr", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_ENCAP_PTR_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_ENCAP_PTR_0 & 0xff} + }, + /* act_tid: 7, , table: ext_tun_vxlan_encap_record.ipv6_vxlan */ + { + .description = "ecv_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + ULP_THOR2_SYM_ECV_VALID_YES} + }, + { + .description = "ecv_custom_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "ecv_vtag_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr1 = { + (BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_TYPE >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_TYPE & 0xff} + }, + { + .description = "ecv_l2_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + ULP_THOR2_SYM_ECV_L2_EN_YES} + }, + { + .description = "ecv_l3_type", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr1 = { + (BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE & 0xff} + }, + { + .description = "ecv_l4_type", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + ULP_THOR2_SYM_ECV_L4_TYPE_UDP_CSUM} + }, + { + .description = "ecv_tun_type", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + ULP_THOR2_SYM_ECV_TUN_TYPE_VXLAN} + }, + { + .description = "enc_eth_dmac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_ETH_DMAC >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_ETH_DMAC & 0xff} + }, + { + .description = "enc_o_vlan_tag", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_HDR_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 56) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 48) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 40) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 32) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 24) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 16) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 8) & 0xff, + (uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr2 = { + (BNXT_ULP_ENC_FIELD_O_VLAN_TCI >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_O_VLAN_TCI & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "enc_o_vlan_type", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_HDR_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 56) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 48) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 40) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 32) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 24) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 16) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 8) & 0xff, + (uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr2 = { + (BNXT_ULP_ENC_FIELD_O_VLAN_TYPE >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_O_VLAN_TYPE & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "enc_i_vlan_tag", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_HDR_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN >> 56) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN >> 48) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN >> 40) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN >> 32) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN >> 24) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN >> 16) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN >> 8) & 0xff, + (uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr2 = { + (BNXT_ULP_ENC_FIELD_I_VLAN_TCI >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_I_VLAN_TCI & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "enc_i_vlan_type", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_HDR_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN >> 56) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN >> 48) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN >> 40) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN >> 32) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN >> 24) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN >> 16) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN >> 8) & 0xff, + (uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr2 = { + (BNXT_ULP_ENC_FIELD_I_VLAN_TYPE >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_I_VLAN_TYPE & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "enc_ipv6_vtc", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_IPV6_VTC_FLOW >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_IPV6_VTC_FLOW & 0xff} + }, + { + .description = "enc_ipv6_zero", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "enc_ipv6_proto", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_IPV6_PROTO >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_IPV6_PROTO & 0xff} + }, + { + .description = "enc_ipv6_ttl", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_IPV6_TTL >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_IPV6_TTL & 0xff} + }, + { + .description = "enc_ipv6_daddr", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_IPV6_DADDR >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_IPV6_DADDR & 0xff} + }, + { + .description = "enc_udp_sport", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_UDP_SPORT >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_UDP_SPORT & 0xff} + }, + { + .description = "enc_udp_dport", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_UDP_DPORT >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_UDP_DPORT & 0xff} + }, + { + .description = "enc_vxlan_flags", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_VXLAN_FLAGS >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_VXLAN_FLAGS & 0xff} + }, + { + .description = "enc_vxlan_rsvd0", + .field_bit_size = 24, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_VXLAN_RSVD0 >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_VXLAN_RSVD0 & 0xff} + }, + { + .description = "enc_vxlan_vni", + .field_bit_size = 24, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_VXLAN_VNI >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_VXLAN_VNI & 0xff} + }, + { + .description = "enc_vxlan_rsvd1", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_VXLAN_RSVD1 >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_VXLAN_RSVD1 & 0xff} + }, + /* act_tid: 7, , table: vxlan_encap_ipv6_rec_cache.wr */ + { + .description = "rid", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_RID >> 8) & 0xff, + BNXT_ULP_RF_IDX_RID & 0xff} + }, + { + .description = "enc_rec_ptr", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_ENCAP_PTR_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_ENCAP_PTR_0 & 0xff} + }, + /* act_tid: 7, , table: ext_tun_geneve_encap_record.ipv4_vxlan */ + { + .description = "ecv_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + ULP_THOR2_SYM_ECV_VALID_YES} + }, + { + .description = "ecv_custom_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "ecv_vtag_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr1 = { + (BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_TYPE >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_TYPE & 0xff} + }, + { + .description = "ecv_l2_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + ULP_THOR2_SYM_ECV_L2_EN_YES} + }, + { + .description = "ecv_l3_type", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr1 = { + (BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE & 0xff} + }, + { + .description = "ecv_l4_type", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + ULP_THOR2_SYM_ECV_L4_TYPE_UDP_CSUM} + }, + { + .description = "ecv_tun_type", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + ULP_THOR2_SYM_ECV_TUN_TYPE_NGE} + }, + { + .description = "enc_eth_dmac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_ETH_DMAC >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_ETH_DMAC & 0xff} + }, + { + .description = "enc_o_vlan_tag", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_HDR_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 56) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 48) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 40) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 32) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 24) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 16) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 8) & 0xff, + (uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr2 = { + (BNXT_ULP_ENC_FIELD_O_VLAN_TCI >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_O_VLAN_TCI & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "enc_o_vlan_type", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_HDR_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 56) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 48) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 40) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 32) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 24) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 16) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 8) & 0xff, + (uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr2 = { + (BNXT_ULP_ENC_FIELD_O_VLAN_TYPE >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_O_VLAN_TYPE & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "enc_i_vlan_tag", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_HDR_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN >> 56) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN >> 48) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN >> 40) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN >> 32) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN >> 24) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN >> 16) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN >> 8) & 0xff, + (uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr2 = { + (BNXT_ULP_ENC_FIELD_I_VLAN_TCI >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_I_VLAN_TCI & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "enc_i_vlan_type", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_HDR_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN >> 56) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN >> 48) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN >> 40) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN >> 32) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN >> 24) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN >> 16) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN >> 8) & 0xff, + (uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr2 = { + (BNXT_ULP_ENC_FIELD_I_VLAN_TYPE >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_I_VLAN_TYPE & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "enc_ipv4_ihl", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_IPV4_IHL >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_IPV4_IHL & 0xff} + }, + { + .description = "enc_ipv4_tos", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_IPV4_TOS >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_IPV4_TOS & 0xff} + }, + { + .description = "enc_ipv4_pkt_id", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_IPV4_PKT_ID >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_IPV4_PKT_ID & 0xff} + }, + { + .description = "enc_ipv4_frag", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_IPV4_FRAG >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_IPV4_FRAG & 0xff} + }, + { + .description = "enc_ipv4_ttl", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_IPV4_TTL >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_IPV4_TTL & 0xff} + }, + { + .description = "enc_ipv4_proto", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_IPV4_PROTO >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_IPV4_PROTO & 0xff} + }, + { + .description = "enc_ipv4_daddr", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_IPV4_DADDR >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_IPV4_DADDR & 0xff} + }, + { + .description = "enc_udp_sport", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_UDP_SPORT >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_UDP_SPORT & 0xff} + }, + { + .description = "enc_udp_dport", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_UDP_DPORT >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_UDP_DPORT & 0xff} + }, + { + .description = "enc_geneve_ver_opt_len_o_c_rsvd0", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_GENEVE_VER_OPT_LEN_O_C_RSVD0 >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_GENEVE_VER_OPT_LEN_O_C_RSVD0 & 0xff} + }, + { + .description = "enc_geneve_proto_type", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_GENEVE_PROTO_TYPE >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_GENEVE_PROTO_TYPE & 0xff} + }, + { + .description = "enc_geneve_vni", + .field_bit_size = 24, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_GENEVE_VNI >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_GENEVE_VNI & 0xff} + }, + { + .description = "enc_geneve_rsvd1", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_GENEVE_RSVD1 >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_GENEVE_RSVD1 & 0xff} + }, + { + .description = "enc_geneve_opt_w0", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_GENEVE_OPT_W0 >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_GENEVE_OPT_W0 & 0xff} + }, + { + .description = "enc_geneve_opt_w1", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_GENEVE_OPT_W1 >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_GENEVE_OPT_W1 & 0xff} + }, + { + .description = "enc_geneve_opt_w2", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_GENEVE_OPT_W2 >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_GENEVE_OPT_W2 & 0xff} + }, + { + .description = "enc_geneve_opt_w3", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_GENEVE_OPT_W3 >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_GENEVE_OPT_W3 & 0xff} + }, + { + .description = "enc_geneve_opt_w4", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_GENEVE_OPT_W4 >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_GENEVE_OPT_W4 & 0xff} + }, + { + .description = "enc_geneve_opt_w5", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_GENEVE_OPT_W5 >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_GENEVE_OPT_W5 & 0xff} + }, + /* act_tid: 7, , table: ext_tun_geneve_encap_record.ipv6_geneve */ + { + .description = "ecv_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + ULP_THOR2_SYM_ECV_VALID_YES} + }, + { + .description = "ecv_custom_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "ecv_vtag_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr1 = { + (BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_TYPE >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_TYPE & 0xff} + }, + { + .description = "ecv_l2_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + ULP_THOR2_SYM_ECV_L2_EN_YES} + }, + { + .description = "ecv_l3_type", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr1 = { + (BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE & 0xff} + }, + { + .description = "ecv_l4_type", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + ULP_THOR2_SYM_ECV_L4_TYPE_UDP_CSUM} + }, + { + .description = "ecv_tun_type", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + ULP_THOR2_SYM_ECV_TUN_TYPE_NGE} + }, + { + .description = "enc_eth_dmac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_ETH_DMAC >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_ETH_DMAC & 0xff} + }, + { + .description = "enc_o_vlan_tag", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_HDR_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 56) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 48) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 40) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 32) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 24) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 16) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 8) & 0xff, + (uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr2 = { + (BNXT_ULP_ENC_FIELD_O_VLAN_TCI >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_O_VLAN_TCI & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "enc_o_vlan_type", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_HDR_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 56) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 48) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 40) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 32) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 24) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 16) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 8) & 0xff, + (uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr2 = { + (BNXT_ULP_ENC_FIELD_O_VLAN_TYPE >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_O_VLAN_TYPE & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "enc_i_vlan_tag", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_HDR_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN >> 56) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN >> 48) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN >> 40) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN >> 32) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN >> 24) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN >> 16) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN >> 8) & 0xff, + (uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr2 = { + (BNXT_ULP_ENC_FIELD_I_VLAN_TCI >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_I_VLAN_TCI & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "enc_i_vlan_type", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_HDR_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN >> 56) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN >> 48) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN >> 40) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN >> 32) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN >> 24) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN >> 16) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN >> 8) & 0xff, + (uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr2 = { + (BNXT_ULP_ENC_FIELD_I_VLAN_TYPE >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_I_VLAN_TYPE & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "enc_ipv6_vtc", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_IPV6_VTC_FLOW >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_IPV6_VTC_FLOW & 0xff} + }, + { + .description = "enc_ipv6_zero", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "enc_ipv6_proto", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_IPV6_PROTO >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_IPV6_PROTO & 0xff} + }, + { + .description = "enc_ipv6_ttl", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_IPV6_TTL >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_IPV6_TTL & 0xff} + }, + { + .description = "enc_ipv6_daddr", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_IPV6_DADDR >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_IPV6_DADDR & 0xff} + }, + { + .description = "enc_udp_sport", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_UDP_SPORT >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_UDP_SPORT & 0xff} + }, + { + .description = "enc_udp_dport", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_UDP_DPORT >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_UDP_DPORT & 0xff} + }, + { + .description = "enc_geneve_ver_opt_len_o_c_rsvd0", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_GENEVE_VER_OPT_LEN_O_C_RSVD0 >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_GENEVE_VER_OPT_LEN_O_C_RSVD0 & 0xff} + }, + { + .description = "enc_geneve_proto_type", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_GENEVE_PROTO_TYPE >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_GENEVE_PROTO_TYPE & 0xff} + }, + { + .description = "enc_geneve_vni", + .field_bit_size = 24, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_GENEVE_VNI >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_GENEVE_VNI & 0xff} + }, + { + .description = "enc_geneve_rsvd1", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_GENEVE_RSVD1 >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_GENEVE_RSVD1 & 0xff} + }, + { + .description = "enc_geneve_opt_w0", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_GENEVE_OPT_W0 >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_GENEVE_OPT_W0 & 0xff} + }, + { + .description = "enc_geneve_opt_w1", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_GENEVE_OPT_W1 >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_GENEVE_OPT_W1 & 0xff} + }, + { + .description = "enc_geneve_opt_w2", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_GENEVE_OPT_W2 >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_GENEVE_OPT_W2 & 0xff} + }, + { + .description = "enc_geneve_opt_w3", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_GENEVE_OPT_W3 >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_GENEVE_OPT_W3 & 0xff} + }, + { + .description = "enc_geneve_opt_w4", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_GENEVE_OPT_W4 >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_GENEVE_OPT_W4 & 0xff} + }, + { + .description = "enc_geneve_opt_w5", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_GENEVE_OPT_W5 >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_GENEVE_OPT_W5 & 0xff} + }, + /* act_tid: 7, , table: geneve_encap_rec_cache.wr */ + { + .description = "rid", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_RID >> 8) & 0xff, + BNXT_ULP_RF_IDX_RID & 0xff} + }, + { + .description = "enc_rec_ptr", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_ENCAP_PTR_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_ENCAP_PTR_0 & 0xff} + }, + /* act_tid: 7, , table: ext_vtag_encap_record.0 */ + { + .description = "ecv_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + { + .description = "ecv_custom_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "ecv_vtag_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + ULP_THOR2_SYM_ECV_VTAG_TYPE_ADD_1_ENCAP_PRI} + }, + { + .description = "ecv_l2_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "ecv_l3_type", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "ecv_l4_type", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "ecv_tun_type", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "vtag_tpid", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr1 = { + (BNXT_ULP_ACT_PROP_IDX_PUSH_VLAN >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_PUSH_VLAN & 0xff} + }, + { + .description = "vtag_pcp", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr1 = { + (BNXT_ULP_ACT_PROP_IDX_SET_VLAN_PCP >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_SET_VLAN_PCP & 0xff} + }, + { + .description = "vtag_de", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "vtag_vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr1 = { + (BNXT_ULP_ACT_PROP_IDX_SET_VLAN_VID >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_SET_VLAN_VID & 0xff} + }, + /* act_tid: 7, , table: cmm_full_act_record.0 */ + { + .description = "type", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + { + .description = "drop", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_DROP >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_DROP >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_DROP >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_DROP >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_DROP >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_DROP >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_DROP >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_DROP & 0xff} + }, + { + .description = "vlan_del_rpt", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "vnic_or_vport", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (89 >> 8) & 0xff, + 89 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + (ULP_THOR2_SYM_RECYCLE_DST >> 8) & 0xff, + ULP_THOR2_SYM_RECYCLE_DST & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (4 >> 8) & 0xff, + 4 & 0xff} + }, + { + .description = "dest_op", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "decap_func", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "mirror", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_MIRROR_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_MIRROR_ID_0 & 0xff} + }, + { + .description = "meter_ptr", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "stat0_ptr", + .field_bit_size = 28, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_FLOW_CNTR_PTR_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_FLOW_CNTR_PTR_0 & 0xff} + }, + { + .description = "stat0_ing_egr", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "stat0_ctr_type", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "stat1_ptr", + .field_bit_size = 28, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "stat1_ing_egr", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "stat1_ctr_type", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "mod_rec_ptr", + .field_bit_size = 28, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_MODIFY_PTR >> 8) & 0xff, + BNXT_ULP_RF_IDX_MODIFY_PTR & 0xff} + }, + { + .description = "encap_ptr", + .field_bit_size = 28, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_ENCAP_PTR_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_ENCAP_PTR_0 & 0xff} + }, + { + .description = "src_ptr", + .field_bit_size = 28, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_MAIN_SP_PTR >> 8) & 0xff, + BNXT_ULP_RF_IDX_MAIN_SP_PTR & 0xff} + }, + { + .description = "rsvd0", + .field_bit_size = 7, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + /* act_tid: 12, , table: mod_record.meta */ + { + .description = "metadata_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + { + .description = "rem_ovlan", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "rem_ivlan", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "rep_add_ivlan", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "rep_add_ovlan", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "ttl_update", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tun_md_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "reserved_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_dmac_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_smac_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_sip_ipv6_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_dip_ipv6_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_sip_ipv4_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_dip_ipv4_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_sport_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_dport_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "metadata_rsvd", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "metadata_op", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "metadata_prof", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_GLB_RF, + .field_opr1 = { + (BNXT_ULP_GLB_RF_IDX_GLB_METADATA_TX_ACT_0 >> 8) & 0xff, + BNXT_ULP_GLB_RF_IDX_GLB_METADATA_TX_ACT_0 & 0xff} + }, + { + .description = "metadata_data", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_RF_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_RF_0 & 0xff} + }, + /* act_tid: 12, , table: cmm_stat_record.0 */ + { + .description = "packet_count", + .field_bit_size = 64, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "byte_count", + .field_bit_size = 64, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + /* act_tid: 12, , table: cmm_full_act_record.0 */ + { + .description = "type", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + { + .description = "drop", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_DROP >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_DROP >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_DROP >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_DROP >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_DROP >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_DROP >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_DROP >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_DROP & 0xff} + }, + { + .description = "vlan_del_rpt", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "vnic_or_vport", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + (ULP_THOR2_SYM_LOOPBACK_PORT >> 8) & 0xff, + ULP_THOR2_SYM_LOOPBACK_PORT & 0xff} + }, + { + .description = "dest_op", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "decap_func", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "mirror", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "meter_ptr", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "stat0_ptr", + .field_bit_size = 28, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_FLOW_CNTR_PTR_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_FLOW_CNTR_PTR_0 & 0xff} + }, + { + .description = "stat0_ing_egr", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + { + .description = "stat0_ctr_type", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "stat1_ptr", + .field_bit_size = 28, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "stat1_ing_egr", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "stat1_ctr_type", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "mod_rec_ptr", + .field_bit_size = 28, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_MODIFY_PTR >> 8) & 0xff, + BNXT_ULP_RF_IDX_MODIFY_PTR & 0xff} + }, + { + .description = "encap_ptr", + .field_bit_size = 28, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "src_ptr", + .field_bit_size = 28, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "rsvd0", + .field_bit_size = 7, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } +}; + +struct bnxt_ulp_mapper_ident_info ulp_thor2_act_ident_list[] = { + /* act_tid: 1, , table: flow_chain_cache.rd */ + { + .description = "metadata", + .regfile_idx = BNXT_ULP_RF_IDX_JUMP_META, + .ident_bit_size = 32, + .ident_bit_pos = 32 + }, + /* act_tid: 1, , table: shared_meter_tbl_cache.rd */ + { + .description = "meter_ptr", + .regfile_idx = BNXT_ULP_RF_IDX_METER_PTR_0, + .ident_bit_size = 10, + .ident_bit_pos = 32 + }, + /* act_tid: 1, , table: shared_mirror_record.rd */ + { + .description = "mirror_id", + .regfile_idx = BNXT_ULP_RF_IDX_MIRROR_ID_0, + .ident_bit_size = 5, + .ident_bit_pos = 32 + }, + /* act_tid: 1, , table: tunnel_cache.f1_f2_act_rd */ + { + .description = "l2_cntxt_id", + .regfile_idx = BNXT_ULP_RF_IDX_L2_CNTXT_ID_0, + .ident_bit_size = 11, + .ident_bit_pos = 43 + }, + { + .description = "stat_ptr", + .regfile_idx = BNXT_ULP_RF_IDX_FLOW_CNTR_PTR_F1, + .ident_bit_size = 64, + .ident_bit_pos = 54 + }, + /* act_tid: 3, , table: shared_mirror_record.del_chk */ + { + .description = "rid", + .regfile_idx = BNXT_ULP_RF_IDX_RID, + .ident_bit_size = 32, + .ident_bit_pos = 0 + }, + /* act_tid: 6, , table: meter_profile_tbl_cache.rd2 */ + { + .description = "cbnd", + .regfile_idx = BNXT_ULP_RF_IDX_CBND_0, + .ident_bit_size = 1, + .ident_bit_pos = 37 + }, + { + .description = "cbs", + .regfile_idx = BNXT_ULP_RF_IDX_CBS_0, + .ident_bit_size = 12, + .ident_bit_pos = 39 + }, + { + .description = "cbsm", + .regfile_idx = BNXT_ULP_RF_IDX_CBSM_0, + .ident_bit_size = 1, + .ident_bit_pos = 35 + }, + { + .description = "cf", + .regfile_idx = BNXT_ULP_RF_IDX_CF_0, + .ident_bit_size = 1, + .ident_bit_pos = 32 + }, + { + .description = "cir", + .regfile_idx = BNXT_ULP_RF_IDX_CIR_0, + .ident_bit_size = 17, + .ident_bit_pos = 63 + }, + { + .description = "ebnd", + .regfile_idx = BNXT_ULP_RF_IDX_EBND_0, + .ident_bit_size = 1, + .ident_bit_pos = 38 + }, + { + .description = "ebs", + .regfile_idx = BNXT_ULP_RF_IDX_EBS_0, + .ident_bit_size = 12, + .ident_bit_pos = 51 + }, + { + .description = "ebsm", + .regfile_idx = BNXT_ULP_RF_IDX_EBSM_0, + .ident_bit_size = 1, + .ident_bit_pos = 36 + }, + { + .description = "eir", + .regfile_idx = BNXT_ULP_RF_IDX_EIR_0, + .ident_bit_size = 17, + .ident_bit_pos = 80 + }, + { + .description = "pm", + .regfile_idx = BNXT_ULP_RF_IDX_PM_0, + .ident_bit_size = 1, + .ident_bit_pos = 33 + }, + { + .description = "rfc2698", + .regfile_idx = BNXT_ULP_RF_IDX_RFC2698_0, + .ident_bit_size = 1, + .ident_bit_pos = 34 + }, + /* act_tid: 6, , table: meter_profile_tbl_cache.del_chk */ + { + .description = "rid", + .regfile_idx = BNXT_ULP_RF_IDX_RID, + .ident_bit_size = 32, + .ident_bit_pos = 0 + }, + /* act_tid: 6, , table: shared_meter_tbl_cache.del_chk */ + { + .description = "rid", + .regfile_idx = BNXT_ULP_RF_IDX_RID, + .ident_bit_size = 32, + .ident_bit_pos = 0 + }, + /* act_tid: 6, , table: shared_meter_tbl_cache.rd_update */ + { + .description = "meter_ptr", + .regfile_idx = BNXT_ULP_RF_IDX_METER_PTR_0, + .ident_bit_size = 10, + .ident_bit_pos = 32 + }, + /* act_tid: 6, , table: meter_tbl.update_rd */ + { + .description = "cbnd", + .regfile_idx = BNXT_ULP_RF_IDX_CBND_0, + .ident_bit_size = 1, + .ident_bit_pos = 61 + }, + { + .description = "cbs", + .regfile_idx = BNXT_ULP_RF_IDX_CBS_0, + .ident_bit_size = 12, + .ident_bit_pos = 63 + }, + { + .description = "cbsm", + .regfile_idx = BNXT_ULP_RF_IDX_CBSM_0, + .ident_bit_size = 1, + .ident_bit_pos = 59 + }, + { + .description = "cf", + .regfile_idx = BNXT_ULP_RF_IDX_CF_0, + .ident_bit_size = 1, + .ident_bit_pos = 56 + }, + { + .description = "cir", + .regfile_idx = BNXT_ULP_RF_IDX_CIR_0, + .ident_bit_size = 17, + .ident_bit_pos = 87 + }, + { + .description = "ebnd", + .regfile_idx = BNXT_ULP_RF_IDX_EBND_0, + .ident_bit_size = 1, + .ident_bit_pos = 62 + }, + { + .description = "ebs", + .regfile_idx = BNXT_ULP_RF_IDX_EBS_0, + .ident_bit_size = 12, + .ident_bit_pos = 75 + }, + { + .description = "ebsm", + .regfile_idx = BNXT_ULP_RF_IDX_EBSM_0, + .ident_bit_size = 1, + .ident_bit_pos = 60 + }, + { + .description = "ecn_rmp_en", + .regfile_idx = BNXT_ULP_RF_IDX_RF_1, + .ident_bit_size = 1, + .ident_bit_pos = 55 + }, + { + .description = "eir", + .regfile_idx = BNXT_ULP_RF_IDX_EIR_0, + .ident_bit_size = 17, + .ident_bit_pos = 104 + }, + { + .description = "mtr_val", + .regfile_idx = BNXT_ULP_RF_IDX_RF_0, + .ident_bit_size = 1, + .ident_bit_pos = 54 + }, + { + .description = "pm", + .regfile_idx = BNXT_ULP_RF_IDX_PM_0, + .ident_bit_size = 1, + .ident_bit_pos = 57 + }, + { + .description = "rfc2698", + .regfile_idx = BNXT_ULP_RF_IDX_RFC2698_0, + .ident_bit_size = 1, + .ident_bit_pos = 58 + }, + /* act_tid: 7, , table: flow_chain_cache.rd */ + { + .description = "metadata", + .regfile_idx = BNXT_ULP_RF_IDX_JUMP_META, + .ident_bit_size = 32, + .ident_bit_pos = 32 + }, + /* act_tid: 7, , table: shared_mirror_record.rd */ + { + .description = "mirror_id", + .regfile_idx = BNXT_ULP_RF_IDX_MIRROR_ID_0, + .ident_bit_size = 5, + .ident_bit_pos = 32 + }, + /* act_tid: 7, , table: source_property_cache.rd */ + { + .description = "sp_rec_ptr", + .regfile_idx = BNXT_ULP_RF_IDX_MAIN_SP_PTR, + .ident_bit_size = 32, + .ident_bit_pos = 32 + }, + /* act_tid: 7, , table: source_property_ipv6_cache.rd */ + { + .description = "sp_rec_ptr", + .regfile_idx = BNXT_ULP_RF_IDX_MAIN_SP_PTR, + .ident_bit_size = 32, + .ident_bit_pos = 32 + }, + /* act_tid: 7, , table: vxlan_encap_rec_cache.rd */ + { + .description = "enc_rec_ptr", + .regfile_idx = BNXT_ULP_RF_IDX_ENCAP_PTR_0, + .ident_bit_size = 32, + .ident_bit_pos = 32 + }, + /* act_tid: 7, , table: vxlan_encap_ipv6_rec_cache.rd */ + { + .description = "enc_rec_ptr", + .regfile_idx = BNXT_ULP_RF_IDX_ENCAP_PTR_0, + .ident_bit_size = 32, + .ident_bit_pos = 32 + }, + /* act_tid: 7, , table: geneve_encap_rec_cache.rd */ + { + .description = "enc_rec_ptr", + .regfile_idx = BNXT_ULP_RF_IDX_ENCAP_PTR_0, + .ident_bit_size = 32, + .ident_bit_pos = 32 + } +}; diff --git a/drivers/thirdparty/release-drivers/bnxt/tf_ulp/generic_templates/ulp_template_db_thor2_class.c b/drivers/thirdparty/release-drivers/bnxt/tf_ulp/generic_templates/ulp_template_db_thor2_class.c new file mode 100644 index 000000000000..bc051898f6d4 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/tf_ulp/generic_templates/ulp_template_db_thor2_class.c @@ -0,0 +1,53192 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* Copyright(c) 2014-2024 Broadcom + * All rights reserved. + */ + +#include "ulp_template_db_enum.h" +#include "ulp_template_db_field.h" +#include "ulp_template_struct.h" +#include "ulp_template_db_tbl.h" + +/* Mapper templates for header class list */ +struct bnxt_ulp_mapper_tmpl_info ulp_thor2_class_tmpl_list[] = { + /* class_tid: 1, ingress */ + [1] = { + .device_name = BNXT_ULP_DEVICE_ID_THOR2, + .num_tbls = 43, + .start_tbl_idx = 0, + .reject_info = { + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_FALSE, + .cond_start_idx = 0, + .cond_nums = 0 } + }, + /* class_tid: 2, egress */ + [2] = { + .device_name = BNXT_ULP_DEVICE_ID_THOR2, + .num_tbls = 28, + .start_tbl_idx = 43, + .reject_info = { + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_FALSE, + .cond_start_idx = 1662, + .cond_nums = 0 } + }, + /* class_tid: 3, ingress */ + [3] = { + .device_name = BNXT_ULP_DEVICE_ID_THOR2, + .num_tbls = 35, + .start_tbl_idx = 71, + .reject_info = { + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_FALSE, + .cond_start_idx = 3306, + .cond_nums = 0 } + }, + /* class_tid: 4, egress */ + [4] = { + .device_name = BNXT_ULP_DEVICE_ID_THOR2, + .num_tbls = 19, + .start_tbl_idx = 106, + .reject_info = { + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_FALSE, + .cond_start_idx = 3310, + .cond_nums = 0 } + } +}; + +struct bnxt_ulp_mapper_tbl_info ulp_thor2_class_tbl_list[] = { + { /* class_tid: 1, , table: port_table.get_def_rd */ + .description = "port_table.get_def_rd", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_PORT_TABLE, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 0, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_READ, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_INDEX, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP, + .key_start_idx = 0, + .blob_key_bit_size = 10, + .key_bit_size = 10, + .key_num_fields = 1, + .ident_start_idx = 0, + .ident_nums = 2 + }, + { /* class_tid: 1, , table: l2_cntxt_tcam_cache.def_rd */ + .description = "l2_cntxt_tcam_cache.def_rd", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_type = TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_LOW, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_L2_CNTXT_TCAM, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 0, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_READ, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_INDEX, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .key_start_idx = 1, + .blob_key_bit_size = 11, + .key_bit_size = 11, + .key_num_fields = 1, + .ident_start_idx = 2, + .ident_nums = 3 + }, + { /* class_tid: 1, , table: control.check_f1_f2_flow */ + .description = "control.check_f1_f2_flow", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 7, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_OR, + .cond_start_idx = 0, + .cond_nums = 2 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP + }, + { /* class_tid: 1, , table: tunnel_cache.f1_f2_rd */ + .description = "tunnel_cache.f1_f2_rd", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_TUNNEL_CACHE, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 2, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_READ, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_HASH, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .key_start_idx = 2, + .blob_key_bit_size = 19, + .key_bit_size = 19, + .key_num_fields = 2, + .ident_start_idx = 5, + .ident_nums = 3 + }, + { /* class_tid: 1, , table: control.tunnel_cache_check */ + .description = "control.tunnel_cache_check", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 4, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 2, + .cond_nums = 1 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_ALLOC_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID + }, + { /* class_tid: 1, , table: l2_cntxt_tcam.f1_f2_alloc_l2_cntxt */ + .description = "l2_cntxt_tcam.f1_f2_alloc_l2_cntxt", + .resource_func = BNXT_ULP_RESOURCE_FUNC_TCAM_TABLE, + .resource_type = CFA_RSUBTYPE_TCAM_L2CTX, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 3, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_TCAM_TBL_OPC_ALLOC_IDENT, + .tbl_operand = BNXT_ULP_RF_IDX_L2_CNTXT_TCAM_INDEX_0, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID, + .pri_opcode = BNXT_ULP_PRI_OPC_CONST, + .pri_operand = 0, + .track_type = CFA_TRACK_TYPE_SID, + .ident_start_idx = 8, + .ident_nums = 1 + }, + { /* class_tid: 1, , table: cmm_stat_record.add_stat_tunnel_cache */ + .description = "cmm_stat_record.add_stat_tunnel_cache", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CMM_STAT, + .resource_type = CFA_RSUBTYPE_CMM_ACT, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_CMM_TABLE_ACT, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 3, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_NOP_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_CMM_STAT_HNDL_F1, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID, + .mark_db_opcode = BNXT_ULP_MARK_DB_OPC_NOP, + .result_start_idx = 0, + .result_bit_size = 128, + .result_num_fields = 2 + }, + { /* class_tid: 1, , table: tunnel_cache.f1_f2_wr */ + .description = "tunnel_cache.f1_f2_wr", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_TUNNEL_CACHE, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 3, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_WRITE, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_HASH, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .key_start_idx = 4, + .blob_key_bit_size = 19, + .key_bit_size = 19, + .key_num_fields = 2, + .result_start_idx = 2, + .result_bit_size = 182, + .result_num_fields = 5 + }, + { /* class_tid: 1, , table: control.check_f2_flow */ + .description = "control.check_f2_flow", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 9, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 3, + .cond_nums = 1 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP + }, + { /* class_tid: 1, , table: control.dmac_calculation */ + .description = "control.dmac_calculation", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 4, + .cond_nums = 0 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP, + .func_info = { + .func_opc = BNXT_ULP_FUNC_OPC_COND_LIST, + .func_oper_size = 48, + .func_src1 = BNXT_ULP_FUNC_SRC_KEY_EXT_LIST, + .func_opr1 = 0, + .func_dst_opr = BNXT_ULP_RF_IDX_O_DMAC } + }, + { /* class_tid: 1, , table: mac_addr_cache.l2_table_rd */ + .description = "mac_addr_cache.l2_table_rd", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_MAC_ADDR_CACHE, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 5, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 8, + .cond_nums = 1 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_READ, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_HASH, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .key_start_idx = 6, + .blob_key_bit_size = 195, + .key_bit_size = 195, + .key_num_fields = 10, + .ident_start_idx = 9, + .ident_nums = 1 + }, + { /* class_tid: 1, , table: control.mac_addr_cache_check */ + .description = "control.mac_addr_cache_check", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 4, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 11, + .cond_nums = 1 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_ALLOC_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID + }, + { /* class_tid: 1, , table: l2_cntxt_tcam.allocate_l2_context */ + .description = "l2_cntxt_tcam.allocate_l2_context", + .resource_func = BNXT_ULP_RESOURCE_FUNC_TCAM_TABLE, + .resource_type = CFA_RSUBTYPE_TCAM_L2CTX, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 12, + .cond_nums = 2 }, + .tbl_opcode = BNXT_ULP_TCAM_TBL_OPC_ALLOC_IDENT, + .tbl_operand = BNXT_ULP_RF_IDX_L2_CNTXT_TCAM_INDEX_0, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID, + .pri_opcode = BNXT_ULP_PRI_OPC_CONST, + .pri_operand = 140, + .track_type = CFA_TRACK_TYPE_SID, + .ident_start_idx = 10, + .ident_nums = 1 + }, + { /* class_tid: 1, , table: l2_cntxt_tcam.l2_table_create */ + .description = "l2_cntxt_tcam.l2_table_create", + .resource_func = BNXT_ULP_RESOURCE_FUNC_TCAM_TABLE, + .resource_type = CFA_RSUBTYPE_TCAM_L2CTX, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 14, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_TCAM_TBL_OPC_ALLOC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_L2_CNTXT_TCAM_INDEX_0, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID, + .pri_opcode = BNXT_ULP_PRI_OPC_CONST, + .pri_operand = 140, + .track_type = CFA_TRACK_TYPE_SID, + .key_start_idx = 16, + .blob_key_bit_size = 256, + .key_bit_size = 256, + .key_num_fields = 24, + .result_start_idx = 7, + .result_bit_size = 127, + .result_num_fields = 17, + .ident_start_idx = 11, + .ident_nums = 0 + }, + { /* class_tid: 1, , table: mac_addr_cache.l2_table_wr */ + .description = "mac_addr_cache.l2_table_wr", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_MAC_ADDR_CACHE, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 18, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_WRITE, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_HASH, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .key_start_idx = 40, + .blob_key_bit_size = 195, + .key_bit_size = 195, + .key_num_fields = 10, + .result_start_idx = 24, + .result_bit_size = 94, + .result_num_fields = 5 + }, + { /* class_tid: 1, , table: control.check_f1_flow */ + .description = "control.check_f1_flow", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 2, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 20, + .cond_nums = 1 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP + }, + { /* class_tid: 1, , table: cmm_stat_record.f1_flow */ + .description = "cmm_stat_record.f1_flow", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CMM_STAT, + .resource_type = CFA_RSUBTYPE_CMM_ACT, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_CMM_TABLE_ACT, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 0, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 21, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_NOP_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_CMM_STAT_HNDL_F1, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID_SW_ONLY, + .mark_db_opcode = BNXT_ULP_MARK_DB_OPC_NOP, + .result_start_idx = 29, + .result_bit_size = 128, + .result_num_fields = 2 + }, + { /* class_tid: 1, , table: control.tunnel_ipv6_sip_check */ + .description = "control.tunnel_ipv6_sip_check", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_RX, + .true_message = "reject ipv6 tunnel flow with tunnel source ip", + .execute_info = { + .cond_true_goto = 1023, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_LIST_OR, + .cond_start_idx = 0, + .cond_nums = 1 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP + }, + { /* class_tid: 1, , table: control.l2_only_check */ + .description = "control.l2_only_check", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_RX, + .true_message = "Reject due to missing Ethertype for L2 flows", + .execute_info = { + .cond_true_goto = 1023, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_LIST_OR, + .cond_start_idx = 1, + .cond_nums = 2 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP + }, + { /* class_tid: 1, , table: control.terminating_flow */ + .description = "control.terminating_flow", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 34, + .cond_nums = 0 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP, + .func_info = { + .func_opc = BNXT_ULP_FUNC_OPC_COND_LIST, + .func_oper_size = 8, + .func_src1 = BNXT_ULP_FUNC_SRC_KEY_EXT_LIST, + .func_opr1 = 3, + .func_dst_opr = BNXT_ULP_RF_IDX_TERM_FLOW } + }, + { /* class_tid: 1, , table: proto_header_cache.rd */ + .description = "proto_header_cache.rd", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_PROTO_HEADER, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 42, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_READ, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_HASH, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .key_start_idx = 50, + .blob_key_bit_size = 76, + .key_bit_size = 76, + .key_num_fields = 3, + .ident_start_idx = 11, + .ident_nums = 7 + }, + { /* class_tid: 1, , table: control.proto_header_cache_miss */ + .description = "control.proto_header_cache_miss", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 13, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 42, + .cond_nums = 1 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP + }, + { /* class_tid: 1, , table: hdr_overlap_cache.overlap_check */ + .description = "hdr_overlap_cache.overlap_check", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_HDR_OVERLAP, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 43, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_READ, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_HASH, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP, + .key_start_idx = 53, + .blob_key_bit_size = 12, + .key_bit_size = 12, + .key_num_fields = 2, + .partial_key_start_idx = 55, + .partial_key_num_fields = 1, + .partial_key_bit_size = 64, + .ident_start_idx = 18, + .ident_nums = 2 + }, + { /* class_tid: 1, , table: control.overlap_miss */ + .description = "control.overlap_miss", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 4, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 44, + .cond_nums = 1 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_ALLOC_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID + }, + { /* class_tid: 1, , table: profile_tcam.allocate_wc_profile */ + .description = "profile_tcam.allocate_wc_profile", + .resource_func = BNXT_ULP_RESOURCE_FUNC_TCAM_TABLE, + .resource_type = CFA_RSUBTYPE_TCAM_PROF_TCAM, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 45, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_TCAM_TBL_OPC_ALLOC_IDENT, + .tbl_operand = BNXT_ULP_RF_IDX_PROFILE_TCAM_INDEX_0, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID, + .pri_opcode = BNXT_ULP_PRI_OPC_APP_PRI_OR_CONST, + .pri_operand = ULP_THOR2_SYM_PROF_TCAM_PRI_APP, + .track_type = CFA_TRACK_TYPE_SID, + .ident_start_idx = 20, + .ident_nums = 1 + }, + { /* class_tid: 1, , table: fkb_select.wc_gen_template */ + .description = "fkb_select.wc_gen_template", + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = CFA_RSUBTYPE_IDX_TBL_WC_FKB, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 45, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_ALLOC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_WC_KEY_ID_0, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID, + .track_type = CFA_TRACK_TYPE_SID, + .result_start_idx = 31, + .result_bit_size = 256, + .result_num_fields = 172 + }, + { /* class_tid: 1, , table: hdr_overlap_cache.overlap_wr */ + .description = "hdr_overlap_cache.overlap_wr", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_HDR_OVERLAP, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 326, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_WRITE, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_HASH, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .key_start_idx = 56, + .blob_key_bit_size = 12, + .key_bit_size = 12, + .key_num_fields = 2, + .partial_key_start_idx = 58, + .partial_key_num_fields = 1, + .partial_key_bit_size = 64, + .result_start_idx = 203, + .result_bit_size = 48, + .result_num_fields = 3 + }, + { /* class_tid: 1, , table: control.proto_header_rid_alloc */ + .description = "control.proto_header_rid_alloc", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 327, + .cond_nums = 0 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_ALLOC_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID + }, + { /* class_tid: 1, , table: fkb_select.em_gen_template_alloc */ + .description = "fkb_select.em_gen_template_alloc", + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = CFA_RSUBTYPE_IDX_TBL_EM_FKB, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 327, + .cond_nums = 1 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_ALLOC_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_EM_KEY_ID_0, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID, + .track_type = CFA_TRACK_TYPE_SID, + .result_start_idx = 206, + .result_bit_size = 256, + .result_num_fields = 172 + }, + { /* class_tid: 1, , table: em_key_recipe.alloc_only */ + .description = "em_key_recipe.alloc_only", + .resource_func = BNXT_ULP_RESOURCE_FUNC_KEY_RECIPE_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_KEY_RECIPE_TABLE_EM, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 328, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_KEY_RECIPE_TBL_OPC_ALLOC_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_EM_RECIPE_ID, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID, + .result_start_idx = 378, + .result_bit_size = 0, + .result_num_fields = 0 + }, + { /* class_tid: 1, , table: control.profile_tcam_priority */ + .description = "control.profile_tcam_priority", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 328, + .cond_nums = 0 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP, + .func_info = { + .func_opc = BNXT_ULP_FUNC_OPC_COND_LIST, + .func_oper_size = 8, + .func_src1 = BNXT_ULP_FUNC_SRC_KEY_EXT_LIST, + .func_opr1 = 50, + .func_dst_opr = BNXT_ULP_RF_IDX_PROF_TCAM_PRIORITY } + }, + { /* class_tid: 1, , table: profile_tcam.gen_template */ + .description = "profile_tcam.gen_template", + .resource_func = BNXT_ULP_RESOURCE_FUNC_TCAM_TABLE, + .resource_type = CFA_RSUBTYPE_TCAM_PROF_TCAM, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 344, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_TCAM_TBL_OPC_ALLOC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_PROFILE_TCAM_INDEX_0, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID, + .pri_opcode = BNXT_ULP_PRI_OPC_REGFILE, + .pri_operand = BNXT_ULP_RF_IDX_PROF_TCAM_PRIORITY, + .mark_db_opcode = BNXT_ULP_MARK_DB_OPC_NOP, + .track_type = CFA_TRACK_TYPE_SID, + .key_start_idx = 59, + .blob_key_bit_size = 256, + .key_bit_size = 256, + .key_num_fields = 66, + .result_start_idx = 378, + .result_bit_size = 64, + .result_num_fields = 10, + .ident_start_idx = 21, + .ident_nums = 1 + }, + { /* class_tid: 1, , table: wm_key_recipe.0 */ + .description = "wm_key_recipe.0", + .resource_func = BNXT_ULP_RESOURCE_FUNC_KEY_RECIPE_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_KEY_RECIPE_TABLE_WM, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 575, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_KEY_RECIPE_TBL_OPC_ALLOC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_WC_RECIPE_ID, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID, + .key_start_idx = 125, + .blob_key_bit_size = 0, + .key_bit_size = 0, + .key_num_fields = 34, + .result_start_idx = 388, + .result_bit_size = 0, + .result_num_fields = 0 + }, + { /* class_tid: 1, , table: proto_header_cache.wr */ + .description = "proto_header_cache.wr", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_PROTO_HEADER, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 993, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_WRITE, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_HASH, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .key_start_idx = 159, + .blob_key_bit_size = 76, + .key_bit_size = 76, + .key_num_fields = 3, + .result_start_idx = 388, + .result_bit_size = 106, + .result_num_fields = 8 + }, + { /* class_tid: 1, , table: em_flow_conflict_cache.rd */ + .description = "em_flow_conflict_cache.rd", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_EM_FLOW_CONFLICT, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 8, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 993, + .cond_nums = 2 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_READ, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_HASH, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .key_start_idx = 162, + .blob_key_bit_size = 78, + .key_bit_size = 78, + .key_num_fields = 4, + .ident_start_idx = 22, + .ident_nums = 1 + }, + { /* class_tid: 1, , table: control.em_flow_conflict_cache_miss */ + .description = "control.em_flow_conflict_cache_miss", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 4, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 995, + .cond_nums = 1 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_ALLOC_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID + }, + { /* class_tid: 1, , table: fkb_select.em_gen_template */ + .description = "fkb_select.em_gen_template", + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = CFA_RSUBTYPE_IDX_TBL_EM_FKB, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 996, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_EM_KEY_ID_0, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP, + .track_type = CFA_TRACK_TYPE_SID, + .result_start_idx = 396, + .result_bit_size = 256, + .result_num_fields = 172 + }, + { /* class_tid: 1, , table: em_key_recipe.0 */ + .description = "em_key_recipe.0", + .resource_func = BNXT_ULP_RESOURCE_FUNC_KEY_RECIPE_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_KEY_RECIPE_TABLE_EM, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 1253, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_KEY_RECIPE_TBL_OPC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_EM_RECIPE_ID, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP, + .key_start_idx = 166, + .blob_key_bit_size = 0, + .key_bit_size = 0, + .key_num_fields = 33, + .result_start_idx = 568, + .result_bit_size = 0, + .result_num_fields = 0 + }, + { /* class_tid: 1, , table: em_flow_conflict_cache.wr */ + .description = "em_flow_conflict_cache.wr", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_EM_FLOW_CONFLICT, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 2, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 1657, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_WRITE, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_HASH, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .key_start_idx = 199, + .blob_key_bit_size = 78, + .key_bit_size = 78, + .key_num_fields = 4, + .result_start_idx = 568, + .result_bit_size = 96, + .result_num_fields = 2 + }, + { /* class_tid: 1, , table: control.field_sig_validation */ + .description = "control.field_sig_validation", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 3, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 1657, + .cond_nums = 2 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP, + .func_info = { + .func_opc = BNXT_ULP_FUNC_OPC_EQ, + .func_src1 = BNXT_ULP_FUNC_SRC_REGFILE, + .func_opr1 = BNXT_ULP_RF_IDX_FLOW_SIG_ID, + .func_src2 = BNXT_ULP_FUNC_SRC_COMP_FIELD, + .func_opr2 = BNXT_ULP_CF_IDX_FLOW_SIG_ID, + .func_dst_opr = BNXT_ULP_RF_IDX_CC } + }, + { /* class_tid: 1, , table: em_normal.ingress_generic_template */ + .description = "em_normal.ingress_generic_template", + .resource_func = BNXT_ULP_RESOURCE_FUNC_EM_TABLE, + .resource_type = TF_MEM_INTERNAL, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 2, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 1659, + .cond_nums = 2 }, + .tbl_opcode = BNXT_ULP_EM_TBL_OPC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_EM_INSERT_FAIL, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_DYN_KEY, + .key_recipe_operand = BNXT_ULP_RF_IDX_EM_RECIPE_ID, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .critical_resource = BNXT_ULP_CRITICAL_RESOURCE_YES, + .result_start_idx = 570, + .result_bit_size = 0, + .result_num_fields = 17 + }, + { /* class_tid: 1, , table: control.em_add_check */ + .description = "control.em_add_check", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 0, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 1661, + .cond_nums = 1 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP + }, + { /* class_tid: 1, , table: wm_normal.ingress_generic_template */ + .description = "wm_normal.ingress_generic_template", + .resource_func = BNXT_ULP_RESOURCE_FUNC_TCAM_TABLE, + .resource_type = CFA_RSUBTYPE_TCAM_WC, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 0, + .cond_false_goto = 0, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 1662, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_TCAM_TBL_OPC_ALLOC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_WC_TCAM_INDEX_0, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_DYN_KEY, + .key_recipe_operand = BNXT_ULP_RF_IDX_WC_RECIPE_ID, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .pri_opcode = BNXT_ULP_PRI_OPC_CONST, + .pri_operand = 0, + .mark_db_opcode = BNXT_ULP_MARK_DB_OPC_NOP, + .critical_resource = BNXT_ULP_CRITICAL_RESOURCE_YES, + .track_type = CFA_TRACK_TYPE_SID, + .result_start_idx = 587, + .result_bit_size = 128, + .result_num_fields = 15 + }, + { /* class_tid: 2, , table: port_table.get_def_rd */ + .description = "port_table.get_def_rd", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_PORT_TABLE, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 1662, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_READ, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_INDEX, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP, + .key_start_idx = 203, + .blob_key_bit_size = 10, + .key_bit_size = 10, + .key_num_fields = 1, + .ident_start_idx = 23, + .ident_nums = 1 + }, + { /* class_tid: 2, , table: l2_cntxt_tcam_cache.def_rd */ + .description = "l2_cntxt_tcam_cache.def_rd", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_type = TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_LOW, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_L2_CNTXT_TCAM, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 1662, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_READ, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_INDEX, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .key_start_idx = 204, + .blob_key_bit_size = 11, + .key_bit_size = 11, + .key_num_fields = 1, + .ident_start_idx = 24, + .ident_nums = 2 + }, + { /* class_tid: 2, , table: control.l2_only_check */ + .description = "control.l2_only_check", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_TX, + .true_message = "Reject due to missing Ethertype for L2 flows", + .execute_info = { + .cond_true_goto = 1023, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_LIST_OR, + .cond_start_idx = 3, + .cond_nums = 2 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP + }, + { /* class_tid: 2, , table: control.tunnel_ipv6_sip_check */ + .description = "control.tunnel_ipv6_sip_check", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_TX, + .true_message = "reject ipv6 tunnel flow with tunnel source ip or source mac", + .execute_info = { + .cond_true_goto = 1023, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_LIST_OR, + .cond_start_idx = 5, + .cond_nums = 2 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP + }, + { /* class_tid: 2, , table: control.terminating_flow */ + .description = "control.terminating_flow", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 1680, + .cond_nums = 0 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP, + .func_info = { + .func_opc = BNXT_ULP_FUNC_OPC_COND_LIST, + .func_oper_size = 8, + .func_src1 = BNXT_ULP_FUNC_SRC_KEY_EXT_LIST, + .func_opr1 = 318, + .func_dst_opr = BNXT_ULP_RF_IDX_TERM_FLOW } + }, + { /* class_tid: 2, , table: proto_header_cache.rd */ + .description = "proto_header_cache.rd", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_PROTO_HEADER, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 1688, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_READ, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_HASH, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .key_start_idx = 205, + .blob_key_bit_size = 76, + .key_bit_size = 76, + .key_num_fields = 3, + .ident_start_idx = 26, + .ident_nums = 7 + }, + { /* class_tid: 2, , table: control.proto_header_cache_miss */ + .description = "control.proto_header_cache_miss", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 13, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 1688, + .cond_nums = 1 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP + }, + { /* class_tid: 2, , table: hdr_overlap_cache.overlap_check */ + .description = "hdr_overlap_cache.overlap_check", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_HDR_OVERLAP, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 1689, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_READ, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_HASH, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP, + .key_start_idx = 208, + .blob_key_bit_size = 12, + .key_bit_size = 12, + .key_num_fields = 2, + .partial_key_start_idx = 210, + .partial_key_num_fields = 1, + .partial_key_bit_size = 64, + .ident_start_idx = 33, + .ident_nums = 2 + }, + { /* class_tid: 2, , table: control.overlap_miss */ + .description = "control.overlap_miss", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 4, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 1690, + .cond_nums = 1 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_ALLOC_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID + }, + { /* class_tid: 2, , table: profile_tcam.allocate_wc_profile */ + .description = "profile_tcam.allocate_wc_profile", + .resource_func = BNXT_ULP_RESOURCE_FUNC_TCAM_TABLE, + .resource_type = TF_TCAM_TBL_TYPE_PROF_TCAM, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 1691, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_TCAM_TBL_OPC_ALLOC_IDENT, + .tbl_operand = BNXT_ULP_RF_IDX_PROFILE_TCAM_INDEX_0, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID, + .pri_opcode = BNXT_ULP_PRI_OPC_CONST, + .pri_operand = 0, + .track_type = CFA_TRACK_TYPE_SID, + .ident_start_idx = 35, + .ident_nums = 1 + }, + { /* class_tid: 2, , table: fkb_select.wc_gen_template */ + .description = "fkb_select.wc_gen_template", + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = CFA_RSUBTYPE_IDX_TBL_WC_FKB, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 1691, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_ALLOC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_WC_KEY_ID_0, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID, + .track_type = CFA_TRACK_TYPE_SID, + .result_start_idx = 602, + .result_bit_size = 256, + .result_num_fields = 172 + }, + { /* class_tid: 2, , table: hdr_overlap_cache.overlap_wr */ + .description = "hdr_overlap_cache.overlap_wr", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_HDR_OVERLAP, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 1974, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_WRITE, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_HASH, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .key_start_idx = 211, + .blob_key_bit_size = 12, + .key_bit_size = 12, + .key_num_fields = 2, + .partial_key_start_idx = 213, + .partial_key_num_fields = 1, + .partial_key_bit_size = 64, + .result_start_idx = 774, + .result_bit_size = 48, + .result_num_fields = 3 + }, + { /* class_tid: 2, , table: control.proto_header_rid_alloc */ + .description = "control.proto_header_rid_alloc", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 1975, + .cond_nums = 0 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_ALLOC_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID + }, + { /* class_tid: 2, , table: fkb_select.em_gen_template_alloc */ + .description = "fkb_select.em_gen_template_alloc", + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = CFA_RSUBTYPE_IDX_TBL_EM_FKB, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 1975, + .cond_nums = 1 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_ALLOC_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_EM_KEY_ID_0, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID, + .track_type = CFA_TRACK_TYPE_SID, + .result_start_idx = 777, + .result_bit_size = 256, + .result_num_fields = 172 + }, + { /* class_tid: 2, , table: em_key_recipe.alloc_only */ + .description = "em_key_recipe.alloc_only", + .resource_func = BNXT_ULP_RESOURCE_FUNC_KEY_RECIPE_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_KEY_RECIPE_TABLE_EM, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 1976, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_KEY_RECIPE_TBL_OPC_ALLOC_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_EM_RECIPE_ID, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID, + .result_start_idx = 949, + .result_bit_size = 0, + .result_num_fields = 0 + }, + { /* class_tid: 2, , table: control.profile_tcam_priority */ + .description = "control.profile_tcam_priority", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 1976, + .cond_nums = 0 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP, + .func_info = { + .func_opc = BNXT_ULP_FUNC_OPC_COND_LIST, + .func_oper_size = 8, + .func_src1 = BNXT_ULP_FUNC_SRC_KEY_EXT_LIST, + .func_opr1 = 366, + .func_dst_opr = BNXT_ULP_RF_IDX_PROF_TCAM_PRIORITY } + }, + { /* class_tid: 2, , table: profile_tcam.gen_template */ + .description = "profile_tcam.gen_template", + .resource_func = BNXT_ULP_RESOURCE_FUNC_TCAM_TABLE, + .resource_type = CFA_RSUBTYPE_TCAM_PROF_TCAM, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 1992, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_TCAM_TBL_OPC_ALLOC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_PROFILE_TCAM_INDEX_0, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID, + .pri_opcode = BNXT_ULP_PRI_OPC_REGFILE, + .pri_operand = BNXT_ULP_RF_IDX_PROF_TCAM_PRIORITY, + .mark_db_opcode = BNXT_ULP_MARK_DB_OPC_NOP, + .critical_resource = BNXT_ULP_CRITICAL_RESOURCE_NO, + .track_type = CFA_TRACK_TYPE_SID, + .key_start_idx = 214, + .blob_key_bit_size = 256, + .key_bit_size = 256, + .key_num_fields = 66, + .result_start_idx = 949, + .result_bit_size = 64, + .result_num_fields = 10, + .ident_start_idx = 36, + .ident_nums = 1 + }, + { /* class_tid: 2, , table: wm_key_recipe.0 */ + .description = "wm_key_recipe.0", + .resource_func = BNXT_ULP_RESOURCE_FUNC_KEY_RECIPE_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_KEY_RECIPE_TABLE_WM, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 2215, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_KEY_RECIPE_TBL_OPC_ALLOC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_WC_RECIPE_ID, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID, + .key_start_idx = 280, + .blob_key_bit_size = 0, + .key_bit_size = 0, + .key_num_fields = 34, + .result_start_idx = 959, + .result_bit_size = 0, + .result_num_fields = 0 + }, + { /* class_tid: 2, , table: proto_header_cache.wr */ + .description = "proto_header_cache.wr", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_PROTO_HEADER, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 2637, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_WRITE, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_HASH, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .key_start_idx = 314, + .blob_key_bit_size = 76, + .key_bit_size = 76, + .key_num_fields = 3, + .result_start_idx = 959, + .result_bit_size = 106, + .result_num_fields = 8 + }, + { /* class_tid: 2, , table: em_flow_conflict_cache.rd */ + .description = "em_flow_conflict_cache.rd", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_EM_FLOW_CONFLICT, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 8, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 2637, + .cond_nums = 2 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_READ, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_HASH, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .key_start_idx = 317, + .blob_key_bit_size = 78, + .key_bit_size = 78, + .key_num_fields = 4, + .ident_start_idx = 37, + .ident_nums = 1 + }, + { /* class_tid: 2, , table: control.em_flow_conflict_cache_miss */ + .description = "control.em_flow_conflict_cache_miss", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 4, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 2639, + .cond_nums = 1 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_ALLOC_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID + }, + { /* class_tid: 2, , table: fkb_select.em_gen_template */ + .description = "fkb_select.em_gen_template", + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = CFA_RSUBTYPE_IDX_TBL_EM_FKB, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 2640, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_EM_KEY_ID_0, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP, + .track_type = CFA_TRACK_TYPE_SID, + .result_start_idx = 967, + .result_bit_size = 256, + .result_num_fields = 172 + }, + { /* class_tid: 2, , table: em_key_recipe.0 */ + .description = "em_key_recipe.0", + .resource_func = BNXT_ULP_RESOURCE_FUNC_KEY_RECIPE_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_KEY_RECIPE_TABLE_EM, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 2897, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_KEY_RECIPE_TBL_OPC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_EM_RECIPE_ID, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP, + .key_start_idx = 321, + .blob_key_bit_size = 0, + .key_bit_size = 0, + .key_num_fields = 33, + .result_start_idx = 1139, + .result_bit_size = 0, + .result_num_fields = 0 + }, + { /* class_tid: 2, , table: em_flow_conflict_cache.wr */ + .description = "em_flow_conflict_cache.wr", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_EM_FLOW_CONFLICT, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 2, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 3301, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_WRITE, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_HASH, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .key_start_idx = 354, + .blob_key_bit_size = 78, + .key_bit_size = 78, + .key_num_fields = 4, + .result_start_idx = 1139, + .result_bit_size = 96, + .result_num_fields = 2 + }, + { /* class_tid: 2, , table: control.field_sig_validation */ + .description = "control.field_sig_validation", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 3, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 3301, + .cond_nums = 2 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP, + .func_info = { + .func_opc = BNXT_ULP_FUNC_OPC_EQ, + .func_src1 = BNXT_ULP_FUNC_SRC_REGFILE, + .func_opr1 = BNXT_ULP_RF_IDX_FLOW_SIG_ID, + .func_src2 = BNXT_ULP_FUNC_SRC_COMP_FIELD, + .func_opr2 = BNXT_ULP_CF_IDX_FLOW_SIG_ID, + .func_dst_opr = BNXT_ULP_RF_IDX_CC } + }, + { /* class_tid: 2, , table: em_normal.egress_generic_template */ + .description = "em_normal.egress_generic_template", + .resource_func = BNXT_ULP_RESOURCE_FUNC_EM_TABLE, + .resource_type = TF_MEM_INTERNAL, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 2, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 3303, + .cond_nums = 2 }, + .tbl_opcode = BNXT_ULP_EM_TBL_OPC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_EM_INSERT_FAIL, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_DYN_KEY, + .key_recipe_operand = BNXT_ULP_RF_IDX_EM_RECIPE_ID, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .critical_resource = BNXT_ULP_CRITICAL_RESOURCE_YES, + .result_start_idx = 1141, + .result_bit_size = 0, + .result_num_fields = 17 + }, + { /* class_tid: 2, , table: control.em_add_check */ + .description = "control.em_add_check", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 0, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 3305, + .cond_nums = 1 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP + }, + { /* class_tid: 2, , table: wm_normal.egress_generic_template */ + .description = "wm_normal.egress_generic_template", + .resource_func = BNXT_ULP_RESOURCE_FUNC_TCAM_TABLE, + .resource_type = CFA_RSUBTYPE_TCAM_WC, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 0, + .cond_false_goto = 0, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 3306, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_TCAM_TBL_OPC_ALLOC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_WC_TCAM_INDEX_0, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_DYN_KEY, + .key_recipe_operand = BNXT_ULP_RF_IDX_WC_RECIPE_ID, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .pri_opcode = BNXT_ULP_PRI_OPC_CONST, + .pri_operand = 0, + .mark_db_opcode = BNXT_ULP_MARK_DB_OPC_NOP, + .critical_resource = BNXT_ULP_CRITICAL_RESOURCE_YES, + .track_type = CFA_TRACK_TYPE_SID, + .result_start_idx = 1158, + .result_bit_size = 128, + .result_num_fields = 15 + }, + { /* class_tid: 3, , table: metadata_record.act_rx_wr */ + .description = "metadata_record.act_rx_wr", + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = CFA_RSUBTYPE_IDX_TBL_METADATA_ACT, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_CFA_TBLS, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 3306, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_WR_GLB_REGFILE, + .tbl_operand = BNXT_ULP_GLB_RF_IDX_GLB_METADATA_RX_ACT_0, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP, + .mark_db_opcode = BNXT_ULP_MARK_DB_OPC_NOP, + .track_type = CFA_TRACK_TYPE_SID, + .result_start_idx = 1173, + .result_bit_size = 32, + .result_num_fields = 1 + }, + { /* class_tid: 3, , table: metadata_record.prof_rx_wr */ + .description = "metadata_record.prof_rx_wr", + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = CFA_RSUBTYPE_IDX_TBL_METADATA_PROF, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_CFA_TBLS, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 3306, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_WR_GLB_REGFILE, + .tbl_operand = BNXT_ULP_GLB_RF_IDX_GLB_METADATA_RX_PROF_0, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP, + .mark_db_opcode = BNXT_ULP_MARK_DB_OPC_NOP, + .track_type = CFA_TRACK_TYPE_SID, + .result_start_idx = 1174, + .result_bit_size = 32, + .result_num_fields = 1 + }, + { /* class_tid: 3, , table: metadata_record.lkup_rx_wr */ + .description = "metadata_record.lkup_rx_wr", + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = CFA_RSUBTYPE_IDX_TBL_METADATA_LKUP, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_CFA_TBLS, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 3306, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_WR_GLB_REGFILE, + .tbl_operand = BNXT_ULP_GLB_RF_IDX_GLB_METADATA_RX_LKUP_0, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP, + .mark_db_opcode = BNXT_ULP_MARK_DB_OPC_NOP, + .track_type = CFA_TRACK_TYPE_SID, + .result_start_idx = 1175, + .result_bit_size = 32, + .result_num_fields = 1 + }, + { /* class_tid: 3, , table: metadata_record.act_tx_wr */ + .description = "metadata_record.act_tx_wr", + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = CFA_RSUBTYPE_IDX_TBL_METADATA_ACT, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_CFA_TBLS, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 3306, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_WR_GLB_REGFILE, + .tbl_operand = BNXT_ULP_GLB_RF_IDX_GLB_METADATA_TX_ACT_0, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP, + .mark_db_opcode = BNXT_ULP_MARK_DB_OPC_NOP, + .track_type = CFA_TRACK_TYPE_SID, + .result_start_idx = 1176, + .result_bit_size = 32, + .result_num_fields = 1 + }, + { /* class_tid: 3, , table: metadata_record.prof_tx_wr */ + .description = "metadata_record.prof_tx_wr", + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = CFA_RSUBTYPE_IDX_TBL_METADATA_PROF, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_CFA_TBLS, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 3306, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_WR_GLB_REGFILE, + .tbl_operand = BNXT_ULP_GLB_RF_IDX_GLB_METADATA_TX_PROF_0, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP, + .mark_db_opcode = BNXT_ULP_MARK_DB_OPC_NOP, + .track_type = CFA_TRACK_TYPE_SID, + .result_start_idx = 1177, + .result_bit_size = 32, + .result_num_fields = 1 + }, + { /* class_tid: 3, , table: metadata_record.lkup_tx_wr */ + .description = "metadata_record.lkup_tx_wr", + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = CFA_RSUBTYPE_IDX_TBL_METADATA_LKUP, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_CFA_TBLS, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 3306, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_WR_GLB_REGFILE, + .tbl_operand = BNXT_ULP_GLB_RF_IDX_GLB_METADATA_TX_LKUP_0, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP, + .mark_db_opcode = BNXT_ULP_MARK_DB_OPC_NOP, + .track_type = CFA_TRACK_TYPE_SID, + .result_start_idx = 1178, + .result_bit_size = 32, + .result_num_fields = 1 + }, + { /* class_tid: 3, , table: table_scope_cache.tsid_ing_rd */ + .description = "table_scope_cache.tsid_ing_rd", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_TABLE_SCOPE_CACHE, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 3306, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_READ, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_INDEX, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .key_start_idx = 358, + .blob_key_bit_size = 6, + .key_bit_size = 6, + .key_num_fields = 2, + .ident_start_idx = 38, + .ident_nums = 2 + }, + { /* class_tid: 3, , table: control.ts_ing_rd_check */ + .description = "control.ts_ing_rd_check", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 6, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 3306, + .cond_nums = 1 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_ALLOC_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID + }, + { /* class_tid: 3, , table: cmm_full_act_record.ing_default_0 */ + .description = "cmm_full_act_record.ing_default_0", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CMM_TABLE, + .resource_type = CFA_RSUBTYPE_CMM_ACT, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_CMM_TABLE_ACT, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 3307, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_ALLOC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_CMM_ACT_HNDL, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID, + .mark_db_opcode = BNXT_ULP_MARK_DB_OPC_NOP, + .result_start_idx = 1179, + .result_bit_size = 192, + .result_num_fields = 18 + }, + { /* class_tid: 3, , table: cmm_full_act_record.ing_default_1 */ + .description = "cmm_full_act_record.ing_default_1", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CMM_TABLE, + .resource_type = CFA_RSUBTYPE_CMM_ACT, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_CMM_TABLE_ACT, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 3307, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_ALLOC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_CMM_ACT_HNDL, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID, + .mark_db_opcode = BNXT_ULP_MARK_DB_OPC_NOP, + .result_start_idx = 1197, + .result_bit_size = 192, + .result_num_fields = 18 + }, + { /* class_tid: 3, , table: control.act_handle_to_offset */ + .description = "control.act_handle_to_offset", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 3307, + .cond_nums = 0 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP, + .func_info = { + .func_opc = BNXT_ULP_FUNC_OPC_HANDLE_TO_OFFSET, + .func_src1 = BNXT_ULP_FUNC_SRC_REGFILE, + .func_opr1 = BNXT_ULP_RF_IDX_CMM_ACT_HNDL, + .func_src2 = BNXT_ULP_FUNC_SRC_CONST, + .func_opr2 = 32, + .func_dst_opr = BNXT_ULP_RF_IDX_DEFAULT_AREC_PTR } + }, + { /* class_tid: 3, , table: profile_tcam_bypass.ing_catch_all */ + .description = "profile_tcam_bypass.ing_catch_all", + .resource_func = BNXT_ULP_RESOURCE_FUNC_TCAM_TABLE, + .resource_type = CFA_RSUBTYPE_TCAM_PROF_TCAM, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 3307, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_TCAM_TBL_OPC_ALLOC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_PROFILE_TCAM_INDEX_0, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID, + .pri_opcode = BNXT_ULP_PRI_OPC_APP_PRI_OR_CONST, + .pri_operand = ULP_THOR2_SYM_PROF_TCAM_PRI_CATCHALL, + .mark_db_opcode = BNXT_ULP_MARK_DB_OPC_NOP, + .track_type = CFA_TRACK_TYPE_SID, + .key_start_idx = 360, + .blob_key_bit_size = 256, + .key_bit_size = 256, + .key_num_fields = 66, + .result_start_idx = 1215, + .result_bit_size = 65, + .result_num_fields = 7, + .ident_start_idx = 40, + .ident_nums = 1 + }, + { /* class_tid: 3, , table: table_scope_cache.tsid_ing_wr */ + .description = "table_scope_cache.tsid_ing_wr", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_TABLE_SCOPE_CACHE, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 3307, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_WRITE, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_INDEX, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .key_start_idx = 426, + .blob_key_bit_size = 6, + .key_bit_size = 6, + .key_num_fields = 2, + .result_start_idx = 1222, + .result_bit_size = 88, + .result_num_fields = 5 + }, + { /* class_tid: 3, , table: port_table.ing_wr */ + .description = "port_table.ing_wr", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_PORT_TABLE, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 3307, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_WRITE, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_INDEX, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .key_start_idx = 428, + .blob_key_bit_size = 10, + .key_bit_size = 10, + .key_num_fields = 1, + .result_start_idx = 1227, + .result_bit_size = 185, + .result_num_fields = 7 + }, + { /* class_tid: 3, , table: l2_cntxt_tcam_cache.ing_rd */ + .description = "l2_cntxt_tcam_cache.ing_rd", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_L2_CNTXT_TCAM, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 3307, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_READ, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_INDEX, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .key_start_idx = 429, + .blob_key_bit_size = 11, + .key_bit_size = 11, + .key_num_fields = 1, + .ident_start_idx = 41, + .ident_nums = 1 + }, + { /* class_tid: 3, , table: control.ing_rd_check */ + .description = "control.ing_rd_check", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 4, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 3307, + .cond_nums = 1 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_ALLOC_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID + }, + { /* class_tid: 3, , table: l2_cntxt_tcam.svif_ing */ + .description = "l2_cntxt_tcam.svif_ing", + .resource_func = BNXT_ULP_RESOURCE_FUNC_TCAM_TABLE, + .resource_type = CFA_RSUBTYPE_TCAM_L2CTX, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 3308, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_TCAM_TBL_OPC_ALLOC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_L2_CNTXT_TCAM_INDEX_0, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID, + .pri_opcode = BNXT_ULP_PRI_OPC_APP_PRI_OR_CONST, + .pri_operand = ULP_THOR2_SYM_L2_CTXT_PRI_CATCHALL, + .track_type = CFA_TRACK_TYPE_SID, + .key_start_idx = 430, + .blob_key_bit_size = 256, + .key_bit_size = 256, + .key_num_fields = 24, + .result_start_idx = 1234, + .result_bit_size = 127, + .result_num_fields = 17, + .ident_start_idx = 42, + .ident_nums = 1 + }, + { /* class_tid: 3, , table: l2_cntxt_tcam_cache.ing_wr */ + .description = "l2_cntxt_tcam_cache.ing_wr", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_L2_CNTXT_TCAM, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 3308, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_WRITE, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_INDEX, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .key_start_idx = 454, + .blob_key_bit_size = 11, + .key_bit_size = 11, + .key_num_fields = 1, + .result_start_idx = 1251, + .result_bit_size = 94, + .result_num_fields = 5 + }, + { /* class_tid: 3, , table: cmm_full_act_record.throw_away_egr */ + .description = "cmm_full_act_record.throw_away_egr", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CMM_TABLE, + .resource_type = CFA_RSUBTYPE_CMM_ACT, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_CMM_TABLE_ACT, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 3308, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_ALLOC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_CMM_ACT_HNDL, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .mark_db_opcode = BNXT_ULP_MARK_DB_OPC_NOP, + .result_start_idx = 1256, + .result_bit_size = 192, + .result_num_fields = 18, + .encap_num_fields = 0 + }, + { /* class_tid: 3, , table: cmm_full_act_record.egr_default_0 */ + .description = "cmm_full_act_record.egr_default_0", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CMM_TABLE, + .resource_type = CFA_RSUBTYPE_CMM_ACT, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_CMM_TABLE_ACT, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 3308, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_ALLOC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_CMM_ACT_HNDL, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .mark_db_opcode = BNXT_ULP_MARK_DB_OPC_NOP, + .result_start_idx = 1274, + .result_bit_size = 192, + .result_num_fields = 18, + .encap_num_fields = 0 + }, + { /* class_tid: 3, , table: port_table.egr_wr_0 */ + .description = "port_table.egr_wr_0", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_PORT_TABLE, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 3308, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_WRITE, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_INDEX, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .func_info = { + .func_opc = BNXT_ULP_FUNC_OPC_HANDLE_TO_OFFSET, + .func_src1 = BNXT_ULP_FUNC_SRC_REGFILE, + .func_opr1 = BNXT_ULP_RF_IDX_CMM_ACT_HNDL, + .func_src2 = BNXT_ULP_FUNC_SRC_CONST, + .func_opr2 = 32, + .func_dst_opr = BNXT_ULP_RF_IDX_MAIN_ACTION_PTR }, + .key_start_idx = 455, + .blob_key_bit_size = 10, + .key_bit_size = 10, + .key_num_fields = 1, + .result_start_idx = 1292, + .result_bit_size = 185, + .result_num_fields = 7 + }, + { /* class_tid: 3, , table: ilt_tbl.egr */ + .description = "ilt_tbl.egr", + .resource_func = BNXT_ULP_RESOURCE_FUNC_IF_TABLE, + .resource_type = CFA_RSUBTYPE_IF_TBL_ILT, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 3308, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_IF_TBL_OPC_WR_COMP_FIELD, + .tbl_operand = BNXT_ULP_CF_IDX_DRV_FUNC_SVIF, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .result_start_idx = 1299, + .result_bit_size = 128, + .result_num_fields = 14 + }, + { /* class_tid: 3, , table: l2_cntxt_tcam_cache.no_vfr_egr_rd */ + .description = "l2_cntxt_tcam_cache.no_vfr_egr_rd", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_L2_CNTXT_TCAM, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 3308, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_READ, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_INDEX, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .key_start_idx = 456, + .blob_key_bit_size = 11, + .key_bit_size = 11, + .key_num_fields = 1, + .ident_start_idx = 43, + .ident_nums = 0 + }, + { /* class_tid: 3, , table: control.non_vfr_egr_rd_check */ + .description = "control.non_vfr_egr_rd_check", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 4, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 3308, + .cond_nums = 1 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_ALLOC_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID + }, + { /* class_tid: 3, , table: l2_cntxt_tcam.non_vfr_svif_egr */ + .description = "l2_cntxt_tcam.non_vfr_svif_egr", + .resource_func = BNXT_ULP_RESOURCE_FUNC_TCAM_TABLE, + .resource_type = CFA_RSUBTYPE_TCAM_L2CTX, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 3309, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_TCAM_TBL_OPC_ALLOC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_L2_CNTXT_TCAM_INDEX_0, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID, + .pri_opcode = BNXT_ULP_PRI_OPC_APP_PRI_OR_CONST, + .pri_operand = ULP_THOR2_SYM_L2_CTXT_PRI_APP, + .track_type = CFA_TRACK_TYPE_SID, + .key_start_idx = 457, + .blob_key_bit_size = 256, + .key_bit_size = 256, + .key_num_fields = 24, + .result_start_idx = 1313, + .result_bit_size = 127, + .result_num_fields = 17, + .ident_start_idx = 43, + .ident_nums = 2 + }, + { /* class_tid: 3, , table: profile_tcam_bypass.non_vfr_egr_catch_all */ + .description = "profile_tcam_bypass.non_vfr_egr_catch_all", + .resource_func = BNXT_ULP_RESOURCE_FUNC_TCAM_TABLE, + .resource_type = CFA_RSUBTYPE_TCAM_PROF_TCAM, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 3309, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_TCAM_TBL_OPC_ALLOC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_PROFILE_TCAM_INDEX_0, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID, + .pri_opcode = BNXT_ULP_PRI_OPC_APP_PRI_OR_CONST, + .pri_operand = ULP_THOR2_SYM_PROF_TCAM_PRI_CATCHALL, + .mark_db_opcode = BNXT_ULP_MARK_DB_OPC_NOP, + .track_type = CFA_TRACK_TYPE_SID, + .key_start_idx = 481, + .blob_key_bit_size = 256, + .key_bit_size = 256, + .key_num_fields = 66, + .result_start_idx = 1330, + .result_bit_size = 65, + .result_num_fields = 7, + .ident_start_idx = 45, + .ident_nums = 0 + }, + { /* class_tid: 3, , table: l2_cntxt_tcam_cache.non_vfr_egr_wr */ + .description = "l2_cntxt_tcam_cache.non_vfr_egr_wr", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_L2_CNTXT_TCAM, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 3309, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_WRITE, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_INDEX, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .key_start_idx = 547, + .blob_key_bit_size = 11, + .key_bit_size = 11, + .key_num_fields = 1, + .result_start_idx = 1337, + .result_bit_size = 94, + .result_num_fields = 5 + }, + { /* class_tid: 3, , table: table_scope_cache.tsid_vfr_rd */ + .description = "table_scope_cache.tsid_vfr_rd", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_TABLE_SCOPE_CACHE, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 3309, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_READ, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_INDEX, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .key_start_idx = 548, + .blob_key_bit_size = 6, + .key_bit_size = 6, + .key_num_fields = 2, + .ident_start_idx = 45, + .ident_nums = 1 + }, + { /* class_tid: 3, , table: control.tsid_vfr_rd_check */ + .description = "control.tsid_vfr_rd_check", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 0, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 3309, + .cond_nums = 1 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_ALLOC_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID + }, + { /* class_tid: 3, , table: mod_record.svif2meta */ + .description = "mod_record.svif2meta", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CMM_TABLE, + .resource_type = CFA_RSUBTYPE_CMM_ACT, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_CMM_TABLE_ACT, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 3310, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_ALLOC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_CMM_MOD_HNDL, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID, + .result_start_idx = 1342, + .result_bit_size = 0, + .result_num_fields = 0, + .encap_num_fields = 20 + }, + { /* class_tid: 3, , table: control.mod_handle_to_offset_svif2meta */ + .description = "control.mod_handle_to_offset_svif2meta", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 3310, + .cond_nums = 0 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP, + .func_info = { + .func_opc = BNXT_ULP_FUNC_OPC_HANDLE_TO_OFFSET, + .func_src1 = BNXT_ULP_FUNC_SRC_REGFILE, + .func_opr1 = BNXT_ULP_RF_IDX_CMM_MOD_HNDL, + .func_src2 = BNXT_ULP_FUNC_SRC_CONST, + .func_opr2 = 8, + .func_dst_opr = BNXT_ULP_RF_IDX_MODIFY_PTR } + }, + { /* class_tid: 3, , table: cmm_full_act_record.ing_vf2vf */ + .description = "cmm_full_act_record.ing_vf2vf", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CMM_TABLE, + .resource_type = CFA_RSUBTYPE_CMM_ACT, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_CMM_TABLE_ACT, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 3310, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_ALLOC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_CMM_ACT_HNDL, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID, + .mark_db_opcode = BNXT_ULP_MARK_DB_OPC_NOP, + .result_start_idx = 1362, + .result_bit_size = 192, + .result_num_fields = 18 + }, + { /* class_tid: 3, , table: control.act_handle_to_offset_ing_vf2vf */ + .description = "control.act_handle_to_offset_ing_vf2vf", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 3310, + .cond_nums = 0 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP, + .func_info = { + .func_opc = BNXT_ULP_FUNC_OPC_HANDLE_TO_OFFSET, + .func_src1 = BNXT_ULP_FUNC_SRC_REGFILE, + .func_opr1 = BNXT_ULP_RF_IDX_CMM_ACT_HNDL, + .func_src2 = BNXT_ULP_FUNC_SRC_CONST, + .func_opr2 = 32, + .func_dst_opr = BNXT_ULP_RF_IDX_DEFAULT_AREC_PTR } + }, + { /* class_tid: 3, , table: l2_cntxt_tcam.vf2vf_ing */ + .description = "l2_cntxt_tcam.vf2vf_ing", + .resource_func = BNXT_ULP_RESOURCE_FUNC_TCAM_TABLE, + .resource_type = CFA_RSUBTYPE_TCAM_L2CTX, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 3310, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_TCAM_TBL_OPC_ALLOC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_L2_CNTXT_TCAM_INDEX_0, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID, + .pri_opcode = BNXT_ULP_PRI_OPC_APP_PRI_OR_CONST, + .pri_operand = ULP_THOR2_SYM_L2_CTXT_PRI_APP, + .track_type = CFA_TRACK_TYPE_SID, + .key_start_idx = 550, + .blob_key_bit_size = 256, + .key_bit_size = 256, + .key_num_fields = 24, + .result_start_idx = 1380, + .result_bit_size = 127, + .result_num_fields = 17, + .ident_start_idx = 46, + .ident_nums = 1 + }, + { /* class_tid: 3, , table: table_scope_cache.tsid_vfr_wr */ + .description = "table_scope_cache.tsid_vfr_wr", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_TABLE_SCOPE_CACHE, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 0, + .cond_false_goto = 0, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 3310, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_WRITE, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_INDEX, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .key_start_idx = 574, + .blob_key_bit_size = 6, + .key_bit_size = 6, + .key_num_fields = 2, + .result_start_idx = 1397, + .result_bit_size = 88, + .result_num_fields = 5 + }, + { /* class_tid: 4, , table: table_scope_cache.tsid_vfr_egr_rd */ + .description = "table_scope_cache.tsid_vfr_egr_rd", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_TABLE_SCOPE_CACHE, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 3310, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_READ, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_INDEX, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .key_start_idx = 576, + .blob_key_bit_size = 6, + .key_bit_size = 6, + .key_num_fields = 2, + .ident_start_idx = 47, + .ident_nums = 3 + }, + { /* class_tid: 4, , table: control.tsid_vfr_egr_check */ + .description = "control.tsid_vfr_egr_check", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 7, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 3310, + .cond_nums = 1 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_ALLOC_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID + }, + { /* class_tid: 4, , table: mod_record.meta2uplink */ + .description = "mod_record.meta2uplink", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CMM_TABLE, + .resource_type = CFA_RSUBTYPE_CMM_ACT, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_CMM_TABLE_ACT, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 3311, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_ALLOC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_CMM_MOD_HNDL, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID, + .func_info = { + .func_opc = BNXT_ULP_FUNC_OPC_BIT_OR, + .func_src1 = BNXT_ULP_FUNC_SRC_COMP_FIELD, + .func_opr1 = BNXT_ULP_CF_IDX_DRV_FUNC_VNIC, + .func_src2 = BNXT_ULP_FUNC_SRC_CONST, + .func_opr2 = ULP_THOR2_SYM_VF_2_VF_META_VAL, + .func_dst_opr = BNXT_ULP_RF_IDX_RF_0 }, + .result_start_idx = 1402, + .result_bit_size = 0, + .result_num_fields = 0, + .encap_num_fields = 20 + }, + { /* class_tid: 4, , table: control.mod_handle_to_offset_meta2uplink */ + .description = "control.mod_handle_to_offset_meta2uplink", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 3311, + .cond_nums = 0 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP, + .func_info = { + .func_opc = BNXT_ULP_FUNC_OPC_HANDLE_TO_OFFSET, + .func_src1 = BNXT_ULP_FUNC_SRC_REGFILE, + .func_opr1 = BNXT_ULP_RF_IDX_CMM_MOD_HNDL, + .func_src2 = BNXT_ULP_FUNC_SRC_CONST, + .func_opr2 = 8, + .func_dst_opr = BNXT_ULP_RF_IDX_MODIFY_PTR } + }, + { /* class_tid: 4, , table: cmm_full_act_record.endpoint_def_act */ + .description = "cmm_full_act_record.endpoint_def_act", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CMM_TABLE, + .resource_type = CFA_RSUBTYPE_CMM_ACT, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_CMM_TABLE_ACT, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 3311, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_ALLOC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_CMM_ACT_HNDL, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID, + .mark_db_opcode = BNXT_ULP_MARK_DB_OPC_NOP, + .result_start_idx = 1422, + .result_bit_size = 192, + .result_num_fields = 18 + }, + { /* class_tid: 4, , table: control.act_handle_to_offset_endpoint_def_act */ + .description = "control.act_handle_to_offset_endpoint_def_act", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 3311, + .cond_nums = 0 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP, + .func_info = { + .func_opc = BNXT_ULP_FUNC_OPC_HANDLE_TO_OFFSET, + .func_src1 = BNXT_ULP_FUNC_SRC_REGFILE, + .func_opr1 = BNXT_ULP_RF_IDX_CMM_ACT_HNDL, + .func_src2 = BNXT_ULP_FUNC_SRC_CONST, + .func_opr2 = 32, + .func_dst_opr = BNXT_ULP_RF_IDX_DEFAULT_AREC_PTR } + }, + { /* class_tid: 4, , table: profile_tcam_bypass.tsid_vfr_egr_catch_all */ + .description = "profile_tcam_bypass.tsid_vfr_egr_catch_all", + .resource_func = BNXT_ULP_RESOURCE_FUNC_TCAM_TABLE, + .resource_type = CFA_RSUBTYPE_TCAM_PROF_TCAM, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 3311, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_TCAM_TBL_OPC_ALLOC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_PROFILE_TCAM_INDEX_0, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID, + .pri_opcode = BNXT_ULP_PRI_OPC_APP_PRI_OR_CONST, + .pri_operand = ULP_THOR2_SYM_PROF_TCAM_PRI_CATCHALL, + .mark_db_opcode = BNXT_ULP_MARK_DB_OPC_NOP, + .track_type = CFA_TRACK_TYPE_SID, + .key_start_idx = 578, + .blob_key_bit_size = 256, + .key_bit_size = 256, + .key_num_fields = 66, + .result_start_idx = 1440, + .result_bit_size = 65, + .result_num_fields = 7, + .ident_start_idx = 50, + .ident_nums = 1 + }, + { /* class_tid: 4, , table: table_scope_cache.tsid_vfr_egr_wr */ + .description = "table_scope_cache.tsid_vfr_egr_wr", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_TABLE_SCOPE_CACHE, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 3311, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_WRITE, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_INDEX, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .key_start_idx = 644, + .blob_key_bit_size = 6, + .key_bit_size = 6, + .key_num_fields = 2, + .result_start_idx = 1447, + .result_bit_size = 88, + .result_num_fields = 5 + }, + { /* class_tid: 4, , table: l2_cntxt_tcam_cache.endpoint_def_egr_rd */ + .description = "l2_cntxt_tcam_cache.endpoint_def_egr_rd", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_type = TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_LOW, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_L2_CNTXT_TCAM, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 3311, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_READ, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_INDEX, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .key_start_idx = 646, + .blob_key_bit_size = 11, + .key_bit_size = 11, + .key_num_fields = 1, + .ident_start_idx = 51, + .ident_nums = 1 + }, + { /* class_tid: 4, , table: control.endpoint_def_egr_rd_check */ + .description = "control.endpoint_def_egr_rd_check", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 4, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 3311, + .cond_nums = 1 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_ALLOC_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID + }, + { /* class_tid: 4, , table: l2_cntxt_tcam.vf2vf_egr */ + .description = "l2_cntxt_tcam.vf2vf_egr", + .resource_func = BNXT_ULP_RESOURCE_FUNC_TCAM_TABLE, + .resource_type = CFA_RSUBTYPE_TCAM_L2CTX, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 3312, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_TCAM_TBL_OPC_ALLOC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_L2_CNTXT_TCAM_INDEX_0, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID, + .pri_opcode = BNXT_ULP_PRI_OPC_APP_PRI_OR_CONST, + .pri_operand = ULP_THOR2_SYM_L2_CTXT_PRI_CATCHALL, + .track_type = CFA_TRACK_TYPE_SID, + .key_start_idx = 647, + .blob_key_bit_size = 256, + .key_bit_size = 256, + .key_num_fields = 24, + .result_start_idx = 1452, + .result_bit_size = 127, + .result_num_fields = 17, + .ident_start_idx = 52, + .ident_nums = 1 + }, + { /* class_tid: 4, , table: l2_cntxt_tcam_cache.endpoint_def_egr_wr */ + .description = "l2_cntxt_tcam_cache.endpoint_def_egr_wr", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_type = TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_LOW, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_L2_CNTXT_TCAM, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 3312, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_WRITE, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_INDEX, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .key_start_idx = 671, + .blob_key_bit_size = 11, + .key_bit_size = 11, + .key_num_fields = 1, + .result_start_idx = 1469, + .result_bit_size = 94, + .result_num_fields = 5 + }, + { /* class_tid: 4, , table: port_table.egr_wr_0 */ + .description = "port_table.egr_wr_0", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_PORT_TABLE, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 3312, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_WRITE, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_INDEX, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .key_start_idx = 672, + .blob_key_bit_size = 10, + .key_bit_size = 10, + .key_num_fields = 1, + .result_start_idx = 1474, + .result_bit_size = 185, + .result_num_fields = 7 + }, + { /* class_tid: 4, , table: mod_record.vfr2vf */ + .description = "mod_record.vfr2vf", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CMM_TABLE, + .resource_type = CFA_RSUBTYPE_CMM_ACT, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_CMM_TABLE_ACT, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 3312, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_ALLOC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_CMM_MOD_HNDL, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .func_info = { + .func_opc = BNXT_ULP_FUNC_OPC_BIT_OR, + .func_src1 = BNXT_ULP_FUNC_SRC_COMP_FIELD, + .func_opr1 = BNXT_ULP_CF_IDX_VF_FUNC_VNIC, + .func_src2 = BNXT_ULP_FUNC_SRC_CONST, + .func_opr2 = ULP_THOR2_SYM_VF_2_VF_META_VAL, + .func_dst_opr = BNXT_ULP_RF_IDX_RF_0 }, + .result_start_idx = 1481, + .result_bit_size = 0, + .result_num_fields = 0, + .encap_num_fields = 20 + }, + { /* class_tid: 4, , table: control.mod_handle_to_offset_vfr2vf */ + .description = "control.mod_handle_to_offset_vfr2vf", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 3312, + .cond_nums = 0 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP, + .func_info = { + .func_opc = BNXT_ULP_FUNC_OPC_HANDLE_TO_OFFSET, + .func_src1 = BNXT_ULP_FUNC_SRC_REGFILE, + .func_opr1 = BNXT_ULP_RF_IDX_CMM_MOD_HNDL, + .func_src2 = BNXT_ULP_FUNC_SRC_CONST, + .func_opr2 = 8, + .func_dst_opr = BNXT_ULP_RF_IDX_MODIFY_PTR } + }, + { /* class_tid: 4, , table: cmm_full_act_record.vfr2vf_act */ + .description = "cmm_full_act_record.vfr2vf_act", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CMM_TABLE, + .resource_type = CFA_RSUBTYPE_CMM_ACT, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_CMM_TABLE_ACT, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 3312, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_ALLOC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_CMM_ACT_HNDL, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .mark_db_opcode = BNXT_ULP_MARK_DB_OPC_NOP, + .result_start_idx = 1501, + .result_bit_size = 192, + .result_num_fields = 18 + }, + { /* class_tid: 4, , table: control.act_handle_to_offset_vfr2vf_act */ + .description = "control.act_handle_to_offset_vfr2vf_act", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 3312, + .cond_nums = 0 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP, + .func_info = { + .func_opc = BNXT_ULP_FUNC_OPC_HANDLE_TO_OFFSET, + .func_src1 = BNXT_ULP_FUNC_SRC_REGFILE, + .func_opr1 = BNXT_ULP_RF_IDX_CMM_ACT_HNDL, + .func_src2 = BNXT_ULP_FUNC_SRC_CONST, + .func_opr2 = 32, + .func_dst_opr = BNXT_ULP_RF_IDX_MAIN_ACTION_PTR } + }, + { /* class_tid: 4, , table: control.bd_act_set */ + .description = "control.bd_act_set", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 3312, + .cond_nums = 0 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP, + .func_info = { + .func_opc = BNXT_ULP_FUNC_OPC_BD_ACT_SET, + .func_src1 = BNXT_ULP_FUNC_SRC_COMP_FIELD, + .func_opr1 = BNXT_ULP_CF_IDX_DEV_PORT_ID, + .func_src2 = BNXT_ULP_FUNC_SRC_REGFILE, + .func_opr2 = BNXT_ULP_RF_IDX_MAIN_ACTION_PTR, + .func_dst_opr = BNXT_ULP_RF_IDX_CC } + }, + { /* class_tid: 4, , table: control.vfr_mark_set */ + .description = "control.vfr_mark_set", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 0, + .cond_false_goto = 0, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 3312, + .cond_nums = 0 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP, + .func_info = { + .func_opc = BNXT_ULP_FUNC_OPC_VFR_MARK_SET, + .func_src1 = BNXT_ULP_FUNC_SRC_COMP_FIELD, + .func_opr1 = BNXT_ULP_CF_IDX_VF_FUNC_SVIF, + .func_src2 = BNXT_ULP_FUNC_SRC_COMP_FIELD, + .func_opr2 = BNXT_ULP_CF_IDX_DEV_PORT_ID, + .func_dst_opr = BNXT_ULP_RF_IDX_CC }, + .mark_db_opcode = BNXT_ULP_MARK_DB_OPC_PUSH_AND_SET_VFR_FLAG + } +}; + +struct bnxt_ulp_mapper_cond_list_info ulp_thor2_class_cond_oper_list[] = { + /* cond_execute: class_tid: 1, control.tunnel_ipv6_sip_check:21*/ + { + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 21, + .cond_nums = 3 + }, + /* cond_execute: class_tid: 1, control.l2_only_check:24*/ + { + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 24, + .cond_nums = 5 + }, + /* cond_execute: class_tid: 1, control.l2_only_check:24*/ + { + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 29, + .cond_nums = 5 + }, + /* cond_execute: class_tid: 2, control.l2_only_check:1662*/ + { + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 1662, + .cond_nums = 5 + }, + /* cond_execute: class_tid: 2, control.l2_only_check:1662*/ + { + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 1667, + .cond_nums = 5 + }, + /* cond_execute: class_tid: 2, control.tunnel_ipv6_sip_check:1672*/ + { + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 1672, + .cond_nums = 4 + }, + /* cond_execute: class_tid: 2, control.tunnel_ipv6_sip_check:1672*/ + { + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 1676, + .cond_nums = 4 + } +}; + +struct bnxt_ulp_mapper_cond_info ulp_thor2_class_cond_list[] = { + /* cond_execute: class_tid: 1, control.check_f1_f2_flow:0*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_F1 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_F2 + }, + /* cond_execute: class_tid: 1, control.tunnel_cache_check:2*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_RF_IS_SET, + .cond_operand = BNXT_ULP_RF_IDX_GENERIC_TBL_MISS + }, + /* cond_execute: class_tid: 1, control.check_f2_flow:3*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_F2 + }, + /* field_cond: class_tid: 1, control.dmac_calculation:4*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_DMAC + }, + /* field_cond: class_tid: 1, control.dmac_calculation:6*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_FEATURE_BIT_IS_SET, + .cond_operand = BNXT_ULP_FEATURE_BIT_PORT_DMAC + }, + /* field_cond: class_tid: 1, control.dmac_calculation:7*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_FEATURE_BIT_IS_SET, + .cond_operand = BNXT_ULP_FEATURE_BIT_PARENT_DMAC + }, + /* cond_execute: class_tid: 1, mac_addr_cache.l2_table_rd:8*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_RF_IS_SET, + .cond_operand = BNXT_ULP_RF_IDX_O_DMAC + }, + /* field_cond: class_tid: 1, mac_addr_cache.l2_table_rd:9*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_RECYCLE_CNT + }, + /* field_cond: class_tid: 1, mac_addr_cache.l2_table_rd:10*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_METADATA + }, + /* cond_execute: class_tid: 1, control.mac_addr_cache_check:11*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_RF_IS_SET, + .cond_operand = BNXT_ULP_RF_IDX_GENERIC_TBL_MISS + }, + /* cond_execute: class_tid: 1, l2_cntxt_tcam.allocate_l2_context:12*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_NOT_SET, + .cond_operand = BNXT_ULP_HDR_BIT_F1 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_NOT_SET, + .cond_operand = BNXT_ULP_HDR_BIT_F2 + }, + /* field_cond: class_tid: 1, l2_cntxt_tcam.l2_table_create:14*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_METADATA + }, + /* field_cond: class_tid: 1, l2_cntxt_tcam.l2_table_create:15*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_METADATA + }, + /* field_cond: class_tid: 1, l2_cntxt_tcam.l2_table_create:16*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_RECYCLE_CNT + }, + /* field_cond: class_tid: 1, l2_cntxt_tcam.l2_table_create:17*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_RECYCLE_CNT + }, + /* field_cond: class_tid: 1, mac_addr_cache.l2_table_wr:18*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_RECYCLE_CNT + }, + /* field_cond: class_tid: 1, mac_addr_cache.l2_table_wr:19*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_METADATA + }, + /* cond_execute: class_tid: 1, control.check_f1_flow:20*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_F1 + }, + /* cond_execute: class_tid: 1, control.tunnel_ipv6_sip_check:21*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_SRC_ADDR + }, + /* cond_execute: class_tid: 1, control.l2_only_check:24*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_NOT_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_NOT_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_NOT_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_TYPE + }, + /* cond_execute: class_tid: 1, control.l2_only_check:24*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_NOT_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_NOT_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_NOT_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_ETH_TYPE + }, + /* field_cond: class_tid: 1, control.terminating_flow:34*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_TCP + }, + /* field_cond: class_tid: 1, control.terminating_flow:36*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_UDP + }, + /* field_cond: class_tid: 1, control.terminating_flow:38*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + /* field_cond: class_tid: 1, control.terminating_flow:40*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + /* cond_execute: class_tid: 1, control.proto_header_cache_miss:42*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_RF_IS_SET, + .cond_operand = BNXT_ULP_RF_IDX_GENERIC_TBL_MISS + }, + /* field_cond: class_tid: 1, hdr_overlap_cache.overlap_check:43*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_GROUP_ID + }, + /* cond_execute: class_tid: 1, control.overlap_miss:44*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_RF_IS_SET, + .cond_operand = BNXT_ULP_RF_IDX_GENERIC_TBL_MISS + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:45*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_CNTXT_ID + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:46*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_METADATA + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:47*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_METADATA + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:48*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_RECYCLE_CNT + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:49*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_DMAC + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:52*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_SMAC + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:55*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_OO_VLAN_VID + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:59*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_OI_VLAN_VID + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:63*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_NOT_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_OO_VLAN_VID + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:67*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_TYPE + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:71*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_SRC_ADDR + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:74*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_SRC_ADDR + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:77*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_SRC_ADDR + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:80*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_SRC_ADDR + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:83*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_SRC_ADDR + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:86*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_DST_ADDR + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:89*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_DST_ADDR + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:92*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_DST_ADDR + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:95*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_DST_ADDR + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:98*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_DST_ADDR + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:101*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_TTL + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:104*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_TTL + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:107*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_PROTO_ID + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:110*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_PROTO_ID + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:113*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_QOS + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:116*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_QOS + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:119*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_UDP_SRC_PORT + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:122*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_TCP_SRC_PORT + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:125*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_UDP_DST_PORT + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:128*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_TCP_DST_PORT + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:131*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_T_VXLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_T_VXLAN_VNI + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:133*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_T_VXLAN_GPE + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_T_VXLAN_GPE_VNI + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:135*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_ETH_DMAC + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:138*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_DMAC + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:141*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_ETH_SMAC + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:144*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_SMAC + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:147*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_IO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_II_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_IO_VLAN_VID + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:151*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_OO_VLAN_VID + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:155*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_IO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_II_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_II_VLAN_VID + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:159*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_IO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_NOT_SET, + .cond_operand = BNXT_ULP_HDR_BIT_II_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_IO_VLAN_VID + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:163*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_OI_VLAN_VID + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:167*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_NOT_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_OO_VLAN_VID + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:171*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_ETH_TYPE + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:175*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_TYPE + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:179*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV6_SRC_ADDR + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:183*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_SRC_ADDR + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:187*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV6_SRC_ADDR + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:191*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_SRC_ADDR + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:195*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV6_SRC_ADDR + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:199*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_SRC_ADDR + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:203*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV6_SRC_ADDR + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:207*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV4_SRC_ADDR + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:211*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_SRC_ADDR + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:215*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_SRC_ADDR + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:219*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV6_DST_ADDR + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:223*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_DST_ADDR + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:227*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV6_DST_ADDR + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:231*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_DST_ADDR + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:235*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV6_DST_ADDR + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:239*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_DST_ADDR + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:243*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV6_DST_ADDR + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:247*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV4_DST_ADDR + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:251*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_DST_ADDR + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:255*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_DST_ADDR + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:259*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV6_TTL + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:263*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV4_TTL + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:267*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_TTL + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:271*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_TTL + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:275*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV6_PROTO_ID + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:279*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV4_PROTO_ID + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:283*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_PROTO_ID + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:287*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_PROTO_ID + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:291*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV6_QOS + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:295*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV4_QOS + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:299*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_QOS + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:303*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_QOS + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:307*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:308*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_ICMP + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:311*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ICMP + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:314*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:315*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_ICMP + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:318*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ICMP + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:321*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:322*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_TCP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_TCP_TCP_FLAGS + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:324*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_TCP_TCP_FLAGS + }, + /* field_cond: class_tid: 1, hdr_overlap_cache.overlap_wr:326*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_GROUP_ID + }, + /* cond_execute: class_tid: 1, fkb_select.em_gen_template_alloc:327*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_RF_IS_SET, + .cond_operand = BNXT_ULP_RF_IDX_TERM_FLOW + }, + /* field_cond: class_tid: 1, control.profile_tcam_priority:328*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + /* field_cond: class_tid: 1, control.profile_tcam_priority:330*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + /* field_cond: class_tid: 1, control.profile_tcam_priority:332*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_TCP + }, + /* field_cond: class_tid: 1, control.profile_tcam_priority:334*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_UDP + }, + /* field_cond: class_tid: 1, control.profile_tcam_priority:336*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + /* field_cond: class_tid: 1, control.profile_tcam_priority:338*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + /* field_cond: class_tid: 1, control.profile_tcam_priority:340*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + /* field_cond: class_tid: 1, control.profile_tcam_priority:342*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:344*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_TCP + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:346*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:348*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_UDP + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:350*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:352*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_TCP + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:354*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:356*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_UDP + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:358*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:360*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_TCP + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:362*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:364*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_UDP + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:366*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:368*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_TCP + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:370*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:372*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_UDP + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:374*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:376*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_TCP + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:378*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:380*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_UDP + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:382*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:384*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_TCP + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:386*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:388*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_UDP + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:390*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:392*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_TCP + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:394*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:396*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_UDP + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:398*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:400*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_TCP + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:402*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:404*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_UDP + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:406*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:408*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:410*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:412*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:414*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:416*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:418*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:420*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:422*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:424*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:426*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:428*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:430*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:432*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:434*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:436*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:438*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:440*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:442*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:444*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:446*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:448*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:450*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:452*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:454*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:456*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_II_VLAN + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:459*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:462*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_IO_VLAN + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:465*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:468*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_DIX_TRAFFIC + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:469*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_DIX_TRAFFIC + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:470*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_ETH + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:472*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:474*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_ETH + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:476*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:478*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_ETH + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:480*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:482*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_T_VXLAN + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:484*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_T_VXLAN_GPE + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:486*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_T_GENEVE + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:488*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_T_GRE + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:490*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_UPAR1 + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:492*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_UPAR2 + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:494*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_T_VXLAN + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:496*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_T_VXLAN_GPE + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:498*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_T_GENEVE + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:500*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_T_GRE + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:502*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_UPAR1 + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:504*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_UPAR2 + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:506*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:507*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:508*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:509*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:511*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:513*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:515*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:517*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:519*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:521*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:523*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:525*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:527*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:529*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:531*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:533*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:535*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:537*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:538*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:540*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:542*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:543*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:545*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:547*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:549*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:551*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:553*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:555*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:557*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:559*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:561*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:563*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:566*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:569*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_DIX_TRAFFIC + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:570*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_DIX_TRAFFIC + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:571*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:573*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_F2 + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:575*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_CNTXT_ID + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:576*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_CNTXT_ID + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:577*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_METADATA + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:578*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_METADATA + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:579*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_RECYCLE_CNT + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:580*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_RECYCLE_CNT + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:581*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_DMAC + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:584*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_DMAC + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:587*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_SMAC + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:590*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_SMAC + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:593*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_OO_VLAN_VID + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:597*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_OO_VLAN_VID + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:601*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_OI_VLAN_VID + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:605*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_NOT_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_OO_VLAN_VID + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:609*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_OI_VLAN_VID + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:613*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_NOT_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_OO_VLAN_VID + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:617*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_TYPE + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:621*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_TYPE + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:625*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_SRC_ADDR + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:628*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_SRC_ADDR + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:631*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_SRC_ADDR + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:634*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_SRC_ADDR + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:637*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_DST_ADDR + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:640*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_DST_ADDR + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:643*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_DST_ADDR + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:646*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_DST_ADDR + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:649*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_TTL + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:652*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_TTL + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:655*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_TTL + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:658*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_TTL + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:661*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_PROTO_ID + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:664*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_PROTO_ID + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:667*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_PROTO_ID + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:670*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_PROTO_ID + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:673*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_QOS + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:676*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_QOS + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:679*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_QOS + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:682*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_QOS + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:685*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_UDP_SRC_PORT + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:688*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_TCP_SRC_PORT + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:691*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_UDP_SRC_PORT + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:694*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_TCP_SRC_PORT + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:697*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_UDP_DST_PORT + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:700*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_TCP_DST_PORT + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:703*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_UDP_DST_PORT + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:706*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_TCP_DST_PORT + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:709*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_T_VXLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_T_VXLAN_VNI + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:711*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_T_VXLAN_GPE + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_T_VXLAN_GPE_VNI + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:713*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_T_VXLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_T_VXLAN_VNI + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:715*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_T_VXLAN_GPE + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_T_VXLAN_GPE_VNI + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:717*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_ETH_DMAC + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:720*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_DMAC + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:723*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_ETH_DMAC + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:726*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_DMAC + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:729*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_ETH_SMAC + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:732*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_SMAC + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:735*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_ETH_SMAC + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:738*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_SMAC + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:741*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_IO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_II_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_IO_VLAN_VID + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:745*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_OO_VLAN_VID + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:749*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_IO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_II_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_IO_VLAN_VID + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:753*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_OO_VLAN_VID + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:757*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_IO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_II_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_II_VLAN_VID + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:761*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_IO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_NOT_SET, + .cond_operand = BNXT_ULP_HDR_BIT_II_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_IO_VLAN_VID + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:765*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_OI_VLAN_VID + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:769*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_NOT_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_OO_VLAN_VID + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:773*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_IO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_II_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_II_VLAN_VID + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:777*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_IO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_NOT_SET, + .cond_operand = BNXT_ULP_HDR_BIT_II_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_IO_VLAN_VID + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:781*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_OI_VLAN_VID + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:785*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_NOT_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_OO_VLAN_VID + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:789*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_ETH_TYPE + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:793*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_TYPE + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:797*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_ETH_TYPE + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:801*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_TYPE + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:805*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV4_SRC_ADDR + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:808*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_SRC_ADDR + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:811*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV4_SRC_ADDR + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:814*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_SRC_ADDR + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:817*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV6_SRC_ADDR + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:820*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_SRC_ADDR + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:823*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV6_SRC_ADDR + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:826*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_SRC_ADDR + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:829*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV4_DST_ADDR + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:832*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_DST_ADDR + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:835*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV4_DST_ADDR + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:838*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_DST_ADDR + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:841*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV6_DST_ADDR + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:844*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_DST_ADDR + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:847*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV6_DST_ADDR + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:850*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_DST_ADDR + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:853*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV6_TTL + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:856*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV4_TTL + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:859*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_TTL + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:862*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_TTL + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:865*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV6_TTL + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:868*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV4_TTL + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:871*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_TTL + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:874*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_TTL + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:877*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_TCP + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:879*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_UDP + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:881*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:883*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:885*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV6_PROTO_ID + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:888*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV4_PROTO_ID + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:891*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_PROTO_ID + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:894*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_PROTO_ID + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:897*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_TCP + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:899*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_UDP + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:901*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:903*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:905*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV6_PROTO_ID + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:908*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV4_PROTO_ID + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:911*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_PROTO_ID + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:914*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_PROTO_ID + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:917*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV6_QOS + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:920*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV4_QOS + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:923*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_QOS + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:926*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_QOS + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:929*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV6_QOS + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:932*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV4_QOS + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:935*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_QOS + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:938*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_QOS + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:941*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_UDP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_UDP_SRC_PORT + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:944*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_TCP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_TCP_SRC_PORT + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:947*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_UDP_SRC_PORT + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:950*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_TCP_SRC_PORT + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:953*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:954*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_UDP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_UDP_SRC_PORT + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:957*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_TCP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_TCP_SRC_PORT + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:960*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_UDP_SRC_PORT + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:963*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_TCP_SRC_PORT + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:966*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:967*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_UDP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_UDP_DST_PORT + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:970*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_TCP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_TCP_DST_PORT + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:973*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_UDP_DST_PORT + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:976*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_TCP_DST_PORT + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:979*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:980*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_UDP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_UDP_DST_PORT + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:983*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_TCP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_TCP_DST_PORT + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:986*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_UDP_DST_PORT + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:989*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_TCP_DST_PORT + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:992*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + /* cond_execute: class_tid: 1, em_flow_conflict_cache.rd:993*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_NOT_SET, + .cond_operand = BNXT_ULP_CF_IDX_WC_MATCH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_RF_IS_SET, + .cond_operand = BNXT_ULP_RF_IDX_TERM_FLOW + }, + /* cond_execute: class_tid: 1, control.em_flow_conflict_cache_miss:995*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_RF_IS_SET, + .cond_operand = BNXT_ULP_RF_IDX_GENERIC_TBL_MISS + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:996*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_CNTXT_ID + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:997*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_METADATA + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:998*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_METADATA + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:999*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_RECYCLE_CNT + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:1000*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_DMAC + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_EXCLUDE_FIELD_BIT_NOT_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_DMAC + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:1004*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_SMAC + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:1007*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_OO_VLAN_VID + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:1011*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_OI_VLAN_VID + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:1015*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_NOT_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_OO_VLAN_VID + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:1019*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_TYPE + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:1023*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_SRC_ADDR + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:1026*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_SRC_ADDR + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:1029*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_SRC_ADDR + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:1032*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_SRC_ADDR + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:1035*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_SRC_ADDR + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:1038*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_DST_ADDR + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:1041*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_DST_ADDR + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:1044*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_DST_ADDR + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:1047*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_DST_ADDR + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:1050*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_DST_ADDR + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:1053*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_TTL + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:1056*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_TTL + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:1059*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_PROTO_ID + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:1062*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_PROTO_ID + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:1065*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_QOS + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:1068*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_QOS + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:1071*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_UDP_SRC_PORT + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:1074*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_TCP_SRC_PORT + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:1077*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_UDP_DST_PORT + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:1080*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_TCP_DST_PORT + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:1083*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_T_VXLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_T_VXLAN_VNI + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:1085*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_T_VXLAN_GPE + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_T_VXLAN_GPE_VNI + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:1087*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_ETH_DMAC + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_EXCLUDE_FIELD_BIT_NOT_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_ETH_DMAC + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:1091*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_DMAC + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_EXCLUDE_FIELD_BIT_NOT_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_DMAC + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:1095*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_ETH_SMAC + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:1098*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_SMAC + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:1101*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_IO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_II_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_IO_VLAN_VID + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:1105*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_OO_VLAN_VID + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:1109*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_IO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_II_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_II_VLAN_VID + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:1113*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_IO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_NOT_SET, + .cond_operand = BNXT_ULP_HDR_BIT_II_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_IO_VLAN_VID + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:1117*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_OI_VLAN_VID + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:1121*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_NOT_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_OO_VLAN_VID + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:1125*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_ETH_TYPE + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:1129*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_TYPE + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:1133*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV6_SRC_ADDR + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:1136*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_SRC_ADDR + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:1139*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV6_SRC_ADDR + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:1142*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_SRC_ADDR + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:1145*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV6_SRC_ADDR + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:1148*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_SRC_ADDR + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:1151*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV6_SRC_ADDR + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:1154*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV4_SRC_ADDR + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:1157*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_SRC_ADDR + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:1160*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_SRC_ADDR + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:1163*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV6_DST_ADDR + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:1166*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_DST_ADDR + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:1169*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV6_DST_ADDR + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:1172*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_DST_ADDR + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:1175*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV6_DST_ADDR + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:1178*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_DST_ADDR + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:1181*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV6_DST_ADDR + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:1184*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV4_DST_ADDR + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:1187*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_DST_ADDR + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:1190*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_DST_ADDR + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:1193*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV6_TTL + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:1196*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV4_TTL + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:1199*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_TTL + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:1202*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_TTL + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:1205*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV6_PROTO_ID + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:1208*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV4_PROTO_ID + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:1211*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_PROTO_ID + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:1214*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_PROTO_ID + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:1217*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV6_QOS + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:1220*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV4_QOS + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:1223*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_QOS + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:1226*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_QOS + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:1229*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_UDP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_UDP_SRC_PORT + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:1232*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_TCP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_TCP_SRC_PORT + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:1235*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_UDP_SRC_PORT + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:1238*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_TCP_SRC_PORT + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:1241*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_UDP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_UDP_DST_PORT + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:1244*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_TCP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_TCP_DST_PORT + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:1247*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_UDP_DST_PORT + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:1250*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_TCP_DST_PORT + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1253*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_CNTXT_ID + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1254*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_CNTXT_ID + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1255*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_METADATA + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1256*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_METADATA + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1257*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_RECYCLE_CNT + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1258*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_RECYCLE_CNT + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1259*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_DMAC + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_EXCLUDE_FIELD_BIT_NOT_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_DMAC + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1263*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_DMAC + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_EXCLUDE_FIELD_BIT_NOT_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_DMAC + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1267*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_SMAC + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1270*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_SMAC + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1273*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_OO_VLAN_VID + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1277*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_OO_VLAN_VID + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1281*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_OI_VLAN_VID + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1285*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_NOT_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_OO_VLAN_VID + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1289*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_OI_VLAN_VID + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1293*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_NOT_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_OO_VLAN_VID + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1297*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_TYPE + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1301*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_TYPE + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1305*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_SRC_ADDR + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1308*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_SRC_ADDR + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1311*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_SRC_ADDR + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1314*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_SRC_ADDR + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1317*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_DST_ADDR + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1320*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_DST_ADDR + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1323*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_DST_ADDR + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1326*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_DST_ADDR + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1329*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_TTL + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1332*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_TTL + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1335*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_TTL + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1338*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_TTL + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1341*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_PROTO_ID + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1344*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_PROTO_ID + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1347*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_PROTO_ID + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1350*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_PROTO_ID + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1353*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_QOS + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1356*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_QOS + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1359*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_QOS + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1362*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_QOS + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1365*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_UDP_SRC_PORT + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1368*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_TCP_SRC_PORT + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1371*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_UDP_SRC_PORT + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1374*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_TCP_SRC_PORT + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1377*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_UDP_DST_PORT + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1380*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_TCP_DST_PORT + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1383*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_UDP_DST_PORT + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1386*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_TCP_DST_PORT + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1389*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_T_VXLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_T_VXLAN_VNI + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1391*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_T_VXLAN_GPE + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_T_VXLAN_GPE_VNI + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1393*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_T_VXLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_T_VXLAN_VNI + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1395*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_T_VXLAN_GPE + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_T_VXLAN_GPE_VNI + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1397*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_ETH_DMAC + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_EXCLUDE_FIELD_BIT_NOT_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_ETH_DMAC + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1401*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_DMAC + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_EXCLUDE_FIELD_BIT_NOT_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_DMAC + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1405*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_ETH_DMAC + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_EXCLUDE_FIELD_BIT_NOT_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_ETH_DMAC + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1409*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_DMAC + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_EXCLUDE_FIELD_BIT_NOT_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_DMAC + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1413*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_ETH_SMAC + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1416*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_SMAC + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1419*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_ETH_SMAC + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1422*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_SMAC + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1425*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_IO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_II_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_IO_VLAN_VID + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1429*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_OO_VLAN_VID + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1433*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_IO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_II_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_IO_VLAN_VID + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1437*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_OO_VLAN_VID + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1441*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_IO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_II_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_II_VLAN_VID + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1445*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_IO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_NOT_SET, + .cond_operand = BNXT_ULP_HDR_BIT_II_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_IO_VLAN_VID + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1449*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_OI_VLAN_VID + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1453*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_NOT_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_OO_VLAN_VID + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1457*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_IO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_II_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_II_VLAN_VID + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1461*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_IO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_NOT_SET, + .cond_operand = BNXT_ULP_HDR_BIT_II_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_IO_VLAN_VID + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1465*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_OI_VLAN_VID + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1469*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_NOT_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_OO_VLAN_VID + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1473*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_ETH_TYPE + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1477*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_TYPE + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1481*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_ETH_TYPE + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1485*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_TYPE + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1489*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV4_SRC_ADDR + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1492*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_SRC_ADDR + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1495*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV4_SRC_ADDR + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1498*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_SRC_ADDR + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1501*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV6_SRC_ADDR + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1504*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_SRC_ADDR + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1507*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV6_SRC_ADDR + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1510*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_SRC_ADDR + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1513*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV4_DST_ADDR + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1516*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_DST_ADDR + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1519*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV4_DST_ADDR + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1522*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_DST_ADDR + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1525*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV6_DST_ADDR + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1528*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_DST_ADDR + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1531*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV6_DST_ADDR + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1534*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_DST_ADDR + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1537*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV6_TTL + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1540*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV4_TTL + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1543*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_TTL + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1546*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_TTL + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1549*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV6_TTL + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1552*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV4_TTL + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1555*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_TTL + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1558*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_TTL + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1561*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV6_PROTO_ID + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1564*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV4_PROTO_ID + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1567*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_PROTO_ID + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1570*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_PROTO_ID + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1573*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV6_PROTO_ID + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1576*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV4_PROTO_ID + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1579*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_PROTO_ID + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1582*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_PROTO_ID + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1585*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV6_QOS + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1588*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV4_QOS + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1591*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_QOS + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1594*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_QOS + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1597*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV6_QOS + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1600*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV4_QOS + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1603*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_QOS + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1606*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_QOS + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1609*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_UDP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_UDP_SRC_PORT + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1612*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_TCP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_TCP_SRC_PORT + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1615*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_UDP_SRC_PORT + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1618*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_TCP_SRC_PORT + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1621*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_UDP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_UDP_SRC_PORT + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1624*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_TCP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_TCP_SRC_PORT + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1627*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_UDP_SRC_PORT + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1630*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_TCP_SRC_PORT + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1633*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_UDP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_UDP_DST_PORT + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1636*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_TCP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_TCP_DST_PORT + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1639*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_UDP_DST_PORT + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1642*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_TCP_DST_PORT + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1645*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_UDP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_UDP_DST_PORT + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1648*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_TCP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_TCP_DST_PORT + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1651*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_UDP_DST_PORT + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1654*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_TCP_DST_PORT + }, + /* cond_execute: class_tid: 1, control.field_sig_validation:1657*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_RF_NOT_SET, + .cond_operand = BNXT_ULP_RF_IDX_CC + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_RF_IS_SET, + .cond_operand = BNXT_ULP_RF_IDX_FLOW_SIG_ID + }, + /* cond_execute: class_tid: 1, em_normal.ingress_generic_template:1659*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_NOT_SET, + .cond_operand = BNXT_ULP_CF_IDX_WC_MATCH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_RF_IS_SET, + .cond_operand = BNXT_ULP_RF_IDX_TERM_FLOW + }, + /* cond_execute: class_tid: 1, control.em_add_check:1661*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_RF_IS_SET, + .cond_operand = BNXT_ULP_RF_IDX_EM_INSERT_FAIL + }, + /* cond_execute: class_tid: 2, control.l2_only_check:1662*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_NOT_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_NOT_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_NOT_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_TYPE + }, + /* cond_execute: class_tid: 2, control.l2_only_check:1662*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_NOT_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_NOT_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_NOT_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_ETH_TYPE + }, + /* cond_execute: class_tid: 2, control.tunnel_ipv6_sip_check:1672*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_SRC_ADDR + }, + /* cond_execute: class_tid: 2, control.tunnel_ipv6_sip_check:1672*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_SMAC + }, + /* field_cond: class_tid: 2, control.terminating_flow:1680*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_TCP + }, + /* field_cond: class_tid: 2, control.terminating_flow:1682*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_UDP + }, + /* field_cond: class_tid: 2, control.terminating_flow:1684*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + /* field_cond: class_tid: 2, control.terminating_flow:1686*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + /* cond_execute: class_tid: 2, control.proto_header_cache_miss:1688*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_RF_IS_SET, + .cond_operand = BNXT_ULP_RF_IDX_GENERIC_TBL_MISS + }, + /* field_cond: class_tid: 2, hdr_overlap_cache.overlap_check:1689*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_GROUP_ID + }, + /* cond_execute: class_tid: 2, control.overlap_miss:1690*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_RF_IS_SET, + .cond_operand = BNXT_ULP_RF_IDX_GENERIC_TBL_MISS + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1691*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_CNTXT_ID + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1692*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_METADATA + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1693*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_METADATA + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1694*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_RECYCLE_CNT + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1695*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_DMAC + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1698*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_SMAC + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1701*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1703*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_OO_VLAN_VID + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1707*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_OI_VLAN_VID + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1711*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_NOT_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_OO_VLAN_VID + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1715*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_TYPE + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1719*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_SRC_ADDR + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1722*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_SRC_ADDR + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1725*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_SRC_ADDR + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1728*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_SRC_ADDR + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1731*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_SRC_ADDR + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1734*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_DST_ADDR + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1737*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_DST_ADDR + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1740*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_DST_ADDR + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1743*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_DST_ADDR + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1746*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_DST_ADDR + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1749*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_TTL + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1752*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_TTL + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1755*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_PROTO_ID + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1758*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_PROTO_ID + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1761*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_QOS + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1764*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_QOS + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1767*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_UDP_SRC_PORT + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1770*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_TCP_SRC_PORT + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1773*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_UDP_DST_PORT + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1776*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_TCP_DST_PORT + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1779*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_T_VXLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_T_VXLAN_VNI + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1781*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_T_VXLAN_GPE + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_T_VXLAN_GPE_VNI + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1783*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_ETH_DMAC + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1786*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_DMAC + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1789*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_ETH_SMAC + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1792*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_SMAC + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1795*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_IO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_II_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_IO_VLAN_VID + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1799*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_OO_VLAN_VID + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1803*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_IO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_II_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_II_VLAN_VID + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1807*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_IO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_NOT_SET, + .cond_operand = BNXT_ULP_HDR_BIT_II_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_IO_VLAN_VID + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1811*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_OI_VLAN_VID + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1815*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_NOT_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_OO_VLAN_VID + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1819*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_ETH_TYPE + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1823*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_TYPE + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1827*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV6_SRC_ADDR + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1831*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_SRC_ADDR + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1835*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV6_SRC_ADDR + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1839*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_SRC_ADDR + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1843*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV6_SRC_ADDR + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1847*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_SRC_ADDR + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1851*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV6_SRC_ADDR + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1855*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV4_SRC_ADDR + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1859*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_SRC_ADDR + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1863*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_SRC_ADDR + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1867*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV6_DST_ADDR + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1871*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_DST_ADDR + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1875*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV6_DST_ADDR + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1879*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_DST_ADDR + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1883*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV6_DST_ADDR + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1887*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_DST_ADDR + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1891*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV6_DST_ADDR + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1895*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV4_DST_ADDR + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1899*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_DST_ADDR + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1903*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_DST_ADDR + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1907*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV6_TTL + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1911*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV4_TTL + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1915*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_TTL + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1919*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_TTL + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1923*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV6_PROTO_ID + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1927*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV4_PROTO_ID + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1931*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_PROTO_ID + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1935*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_PROTO_ID + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1939*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV6_QOS + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1943*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV4_QOS + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1947*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_QOS + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1951*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_QOS + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1955*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1956*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_ICMP + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1959*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ICMP + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1962*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1963*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_ICMP + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1966*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ICMP + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1969*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1970*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_TCP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_TCP_TCP_FLAGS + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1972*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_TCP_TCP_FLAGS + }, + /* field_cond: class_tid: 2, hdr_overlap_cache.overlap_wr:1974*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_GROUP_ID + }, + /* cond_execute: class_tid: 2, fkb_select.em_gen_template_alloc:1975*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_RF_IS_SET, + .cond_operand = BNXT_ULP_RF_IDX_TERM_FLOW + }, + /* field_cond: class_tid: 2, control.profile_tcam_priority:1976*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + /* field_cond: class_tid: 2, control.profile_tcam_priority:1978*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + /* field_cond: class_tid: 2, control.profile_tcam_priority:1980*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_TCP + }, + /* field_cond: class_tid: 2, control.profile_tcam_priority:1982*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_UDP + }, + /* field_cond: class_tid: 2, control.profile_tcam_priority:1984*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + /* field_cond: class_tid: 2, control.profile_tcam_priority:1986*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + /* field_cond: class_tid: 2, control.profile_tcam_priority:1988*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + /* field_cond: class_tid: 2, control.profile_tcam_priority:1990*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:1992*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_TCP + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:1994*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:1996*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_UDP + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:1998*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2000*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_TCP + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2002*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2004*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_UDP + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2006*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2008*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_TCP + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2010*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2012*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_UDP + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2014*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2016*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_TCP + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2018*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2020*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_UDP + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2022*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2024*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_TCP + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2026*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2028*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_UDP + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2030*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2032*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_TCP + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2034*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2036*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_UDP + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2038*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2040*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_TCP + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2042*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2044*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_UDP + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2046*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2048*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_TCP + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2050*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2052*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_UDP + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2054*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2056*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2058*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2060*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2062*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2064*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2066*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2068*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2070*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2072*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2074*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2076*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2078*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2080*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2082*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2084*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2086*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2088*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2090*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2092*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2094*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2096*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2098*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2100*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2102*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2104*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_II_VLAN + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2107*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2110*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_IO_VLAN + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2113*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2116*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_DIX_TRAFFIC + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2117*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_DIX_TRAFFIC + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2118*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_ETH + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2120*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2122*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_ETH + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2124*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2126*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_ETH + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2128*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2130*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_T_VXLAN + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2132*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_T_VXLAN_GPE + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2134*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_T_GENEVE + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2136*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_T_GRE + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2138*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_UPAR1 + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2140*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_UPAR2 + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2142*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_T_VXLAN + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2144*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_T_VXLAN_GPE + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2146*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_T_GENEVE + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2148*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_T_GRE + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2150*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_UPAR1 + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2152*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_UPAR2 + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2154*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2155*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2156*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2157*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2159*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2161*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2163*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2165*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2167*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2169*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2171*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2173*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2175*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2177*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2179*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2181*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2183*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2185*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2187*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2189*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2191*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2193*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2195*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2197*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2199*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2201*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2203*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2205*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2208*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2211*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_DIX_TRAFFIC + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2212*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_DIX_TRAFFIC + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2213*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2215*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_CNTXT_ID + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2216*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_CNTXT_ID + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2217*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_METADATA + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2218*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_METADATA + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2219*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_RECYCLE_CNT + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2220*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_RECYCLE_CNT + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2221*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_DMAC + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2224*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_DMAC + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2227*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_SMAC + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2230*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2232*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_SMAC + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2235*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2237*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_OO_VLAN_VID + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2241*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_OO_VLAN_VID + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2245*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_OI_VLAN_VID + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2249*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_NOT_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_OO_VLAN_VID + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2253*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_OI_VLAN_VID + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2257*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_NOT_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_OO_VLAN_VID + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2261*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_TYPE + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2265*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_TYPE + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2269*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_SRC_ADDR + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2272*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_SRC_ADDR + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2275*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_SRC_ADDR + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2278*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_SRC_ADDR + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2281*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_DST_ADDR + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2284*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_DST_ADDR + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2287*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_DST_ADDR + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2290*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_DST_ADDR + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2293*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_TTL + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2296*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_TTL + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2299*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_TTL + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2302*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_TTL + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2305*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_PROTO_ID + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2308*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_PROTO_ID + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2311*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_PROTO_ID + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2314*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_PROTO_ID + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2317*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_QOS + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2320*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_QOS + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2323*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_QOS + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2326*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_QOS + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2329*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_UDP_SRC_PORT + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2332*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_TCP_SRC_PORT + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2335*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_UDP_SRC_PORT + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2338*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_TCP_SRC_PORT + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2341*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_UDP_DST_PORT + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2344*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_TCP_DST_PORT + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2347*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_UDP_DST_PORT + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2350*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_TCP_DST_PORT + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2353*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_T_VXLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_T_VXLAN_VNI + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2355*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_T_VXLAN_GPE + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_T_VXLAN_GPE_VNI + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2357*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_T_VXLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_T_VXLAN_VNI + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2359*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_T_VXLAN_GPE + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_T_VXLAN_GPE_VNI + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2361*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_ETH_DMAC + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2364*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_DMAC + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2367*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_ETH_DMAC + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2370*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_DMAC + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2373*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_ETH_SMAC + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2376*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_SMAC + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2379*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_ETH_SMAC + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2382*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_SMAC + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2385*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_IO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_II_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_IO_VLAN_VID + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2389*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_OO_VLAN_VID + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2393*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_IO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_II_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_IO_VLAN_VID + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2397*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_OO_VLAN_VID + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2401*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_IO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_II_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_II_VLAN_VID + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2405*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_IO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_NOT_SET, + .cond_operand = BNXT_ULP_HDR_BIT_II_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_IO_VLAN_VID + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2409*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_OI_VLAN_VID + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2413*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_NOT_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_OO_VLAN_VID + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2417*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_IO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_II_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_II_VLAN_VID + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2421*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_IO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_NOT_SET, + .cond_operand = BNXT_ULP_HDR_BIT_II_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_IO_VLAN_VID + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2425*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_OI_VLAN_VID + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2429*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_NOT_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_OO_VLAN_VID + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2433*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_ETH_TYPE + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2437*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_TYPE + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2441*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_ETH_TYPE + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2445*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_TYPE + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2449*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV4_SRC_ADDR + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2452*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_SRC_ADDR + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2455*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV4_SRC_ADDR + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2458*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_SRC_ADDR + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2461*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV6_SRC_ADDR + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2464*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_SRC_ADDR + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2467*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV6_SRC_ADDR + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2470*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_SRC_ADDR + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2473*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV4_DST_ADDR + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2476*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_DST_ADDR + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2479*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV4_DST_ADDR + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2482*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_DST_ADDR + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2485*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV6_DST_ADDR + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2488*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_DST_ADDR + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2491*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV6_DST_ADDR + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2494*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_DST_ADDR + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2497*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV6_TTL + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2500*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV4_TTL + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2503*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_TTL + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2506*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_TTL + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2509*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV6_TTL + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2512*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV4_TTL + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2515*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_TTL + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2518*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_TTL + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2521*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_TCP + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2523*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_UDP + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2525*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2527*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2529*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV6_PROTO_ID + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2532*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV4_PROTO_ID + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2535*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_PROTO_ID + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2538*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_PROTO_ID + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2541*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_TCP + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2543*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_UDP + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2545*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2547*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2549*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV6_PROTO_ID + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2552*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV4_PROTO_ID + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2555*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_PROTO_ID + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2558*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_PROTO_ID + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2561*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV6_QOS + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2564*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV4_QOS + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2567*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_QOS + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2570*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_QOS + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2573*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV6_QOS + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2576*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV4_QOS + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2579*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_QOS + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2582*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_QOS + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2585*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_UDP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_UDP_SRC_PORT + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2588*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_TCP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_TCP_SRC_PORT + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2591*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_UDP_SRC_PORT + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2594*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_TCP_SRC_PORT + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2597*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2598*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_UDP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_UDP_SRC_PORT + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2601*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_TCP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_TCP_SRC_PORT + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2604*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_UDP_SRC_PORT + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2607*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_TCP_SRC_PORT + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2610*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2611*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_UDP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_UDP_DST_PORT + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2614*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_TCP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_TCP_DST_PORT + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2617*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_UDP_DST_PORT + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2620*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_TCP_DST_PORT + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2623*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2624*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_UDP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_UDP_DST_PORT + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2627*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_TCP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_TCP_DST_PORT + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2630*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_UDP_DST_PORT + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2633*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_TCP_DST_PORT + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2636*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + /* cond_execute: class_tid: 2, em_flow_conflict_cache.rd:2637*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_NOT_SET, + .cond_operand = BNXT_ULP_CF_IDX_WC_MATCH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_RF_IS_SET, + .cond_operand = BNXT_ULP_RF_IDX_TERM_FLOW + }, + /* cond_execute: class_tid: 2, control.em_flow_conflict_cache_miss:2639*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_RF_IS_SET, + .cond_operand = BNXT_ULP_RF_IDX_GENERIC_TBL_MISS + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2640*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_CNTXT_ID + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2641*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_METADATA + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2642*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_METADATA + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2643*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_RECYCLE_CNT + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2644*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_DMAC + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_EXCLUDE_FIELD_BIT_NOT_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_DMAC + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2648*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_SMAC + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2651*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_OO_VLAN_VID + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2655*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_OI_VLAN_VID + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2659*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_NOT_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_OO_VLAN_VID + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2663*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_TYPE + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2667*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_SRC_ADDR + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2670*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_SRC_ADDR + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2673*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_SRC_ADDR + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2676*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_SRC_ADDR + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2679*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_SRC_ADDR + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2682*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_DST_ADDR + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2685*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_DST_ADDR + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2688*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_DST_ADDR + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2691*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_DST_ADDR + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2694*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_DST_ADDR + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2697*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_TTL + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2700*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_TTL + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2703*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_PROTO_ID + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2706*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_PROTO_ID + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2709*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_QOS + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2712*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_QOS + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2715*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_UDP_SRC_PORT + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2718*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_TCP_SRC_PORT + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2721*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_UDP_DST_PORT + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2724*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_TCP_DST_PORT + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2727*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_T_VXLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_T_VXLAN_VNI + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2729*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_T_VXLAN_GPE + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_T_VXLAN_GPE_VNI + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2731*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_ETH_DMAC + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_EXCLUDE_FIELD_BIT_NOT_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_ETH_DMAC + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2735*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_DMAC + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_EXCLUDE_FIELD_BIT_NOT_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_DMAC + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2739*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_ETH_SMAC + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2742*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_SMAC + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2745*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_IO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_II_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_IO_VLAN_VID + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2749*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_OO_VLAN_VID + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2753*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_IO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_II_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_II_VLAN_VID + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2757*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_IO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_NOT_SET, + .cond_operand = BNXT_ULP_HDR_BIT_II_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_IO_VLAN_VID + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2761*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_OI_VLAN_VID + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2765*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_NOT_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_OO_VLAN_VID + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2769*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_ETH_TYPE + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2773*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_TYPE + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2777*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV6_SRC_ADDR + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2780*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_SRC_ADDR + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2783*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV6_SRC_ADDR + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2786*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_SRC_ADDR + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2789*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV6_SRC_ADDR + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2792*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_SRC_ADDR + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2795*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV6_SRC_ADDR + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2798*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV4_SRC_ADDR + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2801*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_SRC_ADDR + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2804*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_SRC_ADDR + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2807*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV6_DST_ADDR + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2810*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_DST_ADDR + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2813*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV6_DST_ADDR + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2816*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_DST_ADDR + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2819*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV6_DST_ADDR + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2822*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_DST_ADDR + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2825*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV6_DST_ADDR + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2828*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV4_DST_ADDR + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2831*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_DST_ADDR + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2834*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_DST_ADDR + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2837*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV6_TTL + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2840*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV4_TTL + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2843*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_TTL + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2846*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_TTL + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2849*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV6_PROTO_ID + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2852*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV4_PROTO_ID + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2855*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_PROTO_ID + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2858*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_PROTO_ID + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2861*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV6_QOS + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2864*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV4_QOS + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2867*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_QOS + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2870*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_QOS + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2873*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_UDP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_UDP_SRC_PORT + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2876*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_TCP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_TCP_SRC_PORT + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2879*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_UDP_SRC_PORT + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2882*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_TCP_SRC_PORT + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2885*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_UDP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_UDP_DST_PORT + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2888*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_TCP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_TCP_DST_PORT + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2891*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_UDP_DST_PORT + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2894*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_TCP_DST_PORT + }, + /* field_cond: class_tid: 2, em_key_recipe.0:2897*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_CNTXT_ID + }, + /* field_cond: class_tid: 2, em_key_recipe.0:2898*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_CNTXT_ID + }, + /* field_cond: class_tid: 2, em_key_recipe.0:2899*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_METADATA + }, + /* field_cond: class_tid: 2, em_key_recipe.0:2900*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_METADATA + }, + /* field_cond: class_tid: 2, em_key_recipe.0:2901*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_RECYCLE_CNT + }, + /* field_cond: class_tid: 2, em_key_recipe.0:2902*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_RECYCLE_CNT + }, + /* field_cond: class_tid: 2, em_key_recipe.0:2903*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_DMAC + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_EXCLUDE_FIELD_BIT_NOT_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_DMAC + }, + /* field_cond: class_tid: 2, em_key_recipe.0:2907*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_DMAC + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_EXCLUDE_FIELD_BIT_NOT_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_DMAC + }, + /* field_cond: class_tid: 2, em_key_recipe.0:2911*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_SMAC + }, + /* field_cond: class_tid: 2, em_key_recipe.0:2914*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_SMAC + }, + /* field_cond: class_tid: 2, em_key_recipe.0:2917*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_OO_VLAN_VID + }, + /* field_cond: class_tid: 2, em_key_recipe.0:2921*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_OO_VLAN_VID + }, + /* field_cond: class_tid: 2, em_key_recipe.0:2925*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_OI_VLAN_VID + }, + /* field_cond: class_tid: 2, em_key_recipe.0:2929*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_NOT_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_OO_VLAN_VID + }, + /* field_cond: class_tid: 2, em_key_recipe.0:2933*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_OI_VLAN_VID + }, + /* field_cond: class_tid: 2, em_key_recipe.0:2937*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_NOT_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_OO_VLAN_VID + }, + /* field_cond: class_tid: 2, em_key_recipe.0:2941*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_TYPE + }, + /* field_cond: class_tid: 2, em_key_recipe.0:2945*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_TYPE + }, + /* field_cond: class_tid: 2, em_key_recipe.0:2949*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_SRC_ADDR + }, + /* field_cond: class_tid: 2, em_key_recipe.0:2952*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_SRC_ADDR + }, + /* field_cond: class_tid: 2, em_key_recipe.0:2955*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_SRC_ADDR + }, + /* field_cond: class_tid: 2, em_key_recipe.0:2958*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_SRC_ADDR + }, + /* field_cond: class_tid: 2, em_key_recipe.0:2961*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_DST_ADDR + }, + /* field_cond: class_tid: 2, em_key_recipe.0:2964*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_DST_ADDR + }, + /* field_cond: class_tid: 2, em_key_recipe.0:2967*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_DST_ADDR + }, + /* field_cond: class_tid: 2, em_key_recipe.0:2970*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_DST_ADDR + }, + /* field_cond: class_tid: 2, em_key_recipe.0:2973*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_TTL + }, + /* field_cond: class_tid: 2, em_key_recipe.0:2976*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_TTL + }, + /* field_cond: class_tid: 2, em_key_recipe.0:2979*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_TTL + }, + /* field_cond: class_tid: 2, em_key_recipe.0:2982*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_TTL + }, + /* field_cond: class_tid: 2, em_key_recipe.0:2985*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_PROTO_ID + }, + /* field_cond: class_tid: 2, em_key_recipe.0:2988*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_PROTO_ID + }, + /* field_cond: class_tid: 2, em_key_recipe.0:2991*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_PROTO_ID + }, + /* field_cond: class_tid: 2, em_key_recipe.0:2994*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_PROTO_ID + }, + /* field_cond: class_tid: 2, em_key_recipe.0:2997*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_QOS + }, + /* field_cond: class_tid: 2, em_key_recipe.0:3000*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_QOS + }, + /* field_cond: class_tid: 2, em_key_recipe.0:3003*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_QOS + }, + /* field_cond: class_tid: 2, em_key_recipe.0:3006*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_QOS + }, + /* field_cond: class_tid: 2, em_key_recipe.0:3009*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_UDP_SRC_PORT + }, + /* field_cond: class_tid: 2, em_key_recipe.0:3012*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_TCP_SRC_PORT + }, + /* field_cond: class_tid: 2, em_key_recipe.0:3015*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_UDP_SRC_PORT + }, + /* field_cond: class_tid: 2, em_key_recipe.0:3018*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_TCP_SRC_PORT + }, + /* field_cond: class_tid: 2, em_key_recipe.0:3021*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_UDP_DST_PORT + }, + /* field_cond: class_tid: 2, em_key_recipe.0:3024*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_TCP_DST_PORT + }, + /* field_cond: class_tid: 2, em_key_recipe.0:3027*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_UDP_DST_PORT + }, + /* field_cond: class_tid: 2, em_key_recipe.0:3030*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_TCP_DST_PORT + }, + /* field_cond: class_tid: 2, em_key_recipe.0:3033*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_T_VXLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_T_VXLAN_VNI + }, + /* field_cond: class_tid: 2, em_key_recipe.0:3035*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_T_VXLAN_GPE + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_T_VXLAN_GPE_VNI + }, + /* field_cond: class_tid: 2, em_key_recipe.0:3037*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_T_VXLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_T_VXLAN_VNI + }, + /* field_cond: class_tid: 2, em_key_recipe.0:3039*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_T_VXLAN_GPE + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_T_VXLAN_GPE_VNI + }, + /* field_cond: class_tid: 2, em_key_recipe.0:3041*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_ETH_DMAC + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_EXCLUDE_FIELD_BIT_NOT_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_ETH_DMAC + }, + /* field_cond: class_tid: 2, em_key_recipe.0:3045*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_DMAC + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_EXCLUDE_FIELD_BIT_NOT_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_DMAC + }, + /* field_cond: class_tid: 2, em_key_recipe.0:3049*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_ETH_DMAC + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_EXCLUDE_FIELD_BIT_NOT_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_ETH_DMAC + }, + /* field_cond: class_tid: 2, em_key_recipe.0:3053*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_DMAC + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_EXCLUDE_FIELD_BIT_NOT_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_DMAC + }, + /* field_cond: class_tid: 2, em_key_recipe.0:3057*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_ETH_SMAC + }, + /* field_cond: class_tid: 2, em_key_recipe.0:3060*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_SMAC + }, + /* field_cond: class_tid: 2, em_key_recipe.0:3063*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_ETH_SMAC + }, + /* field_cond: class_tid: 2, em_key_recipe.0:3066*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_SMAC + }, + /* field_cond: class_tid: 2, em_key_recipe.0:3069*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_IO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_II_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_IO_VLAN_VID + }, + /* field_cond: class_tid: 2, em_key_recipe.0:3073*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_OO_VLAN_VID + }, + /* field_cond: class_tid: 2, em_key_recipe.0:3077*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_IO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_II_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_IO_VLAN_VID + }, + /* field_cond: class_tid: 2, em_key_recipe.0:3081*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_OO_VLAN_VID + }, + /* field_cond: class_tid: 2, em_key_recipe.0:3085*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_IO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_II_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_II_VLAN_VID + }, + /* field_cond: class_tid: 2, em_key_recipe.0:3089*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_IO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_NOT_SET, + .cond_operand = BNXT_ULP_HDR_BIT_II_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_IO_VLAN_VID + }, + /* field_cond: class_tid: 2, em_key_recipe.0:3093*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_OI_VLAN_VID + }, + /* field_cond: class_tid: 2, em_key_recipe.0:3097*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_NOT_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_OO_VLAN_VID + }, + /* field_cond: class_tid: 2, em_key_recipe.0:3101*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_IO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_II_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_II_VLAN_VID + }, + /* field_cond: class_tid: 2, em_key_recipe.0:3105*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_IO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_NOT_SET, + .cond_operand = BNXT_ULP_HDR_BIT_II_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_IO_VLAN_VID + }, + /* field_cond: class_tid: 2, em_key_recipe.0:3109*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_OI_VLAN_VID + }, + /* field_cond: class_tid: 2, em_key_recipe.0:3113*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_NOT_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_OO_VLAN_VID + }, + /* field_cond: class_tid: 2, em_key_recipe.0:3117*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_ETH_TYPE + }, + /* field_cond: class_tid: 2, em_key_recipe.0:3121*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_TYPE + }, + /* field_cond: class_tid: 2, em_key_recipe.0:3125*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_ETH_TYPE + }, + /* field_cond: class_tid: 2, em_key_recipe.0:3129*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_TYPE + }, + /* field_cond: class_tid: 2, em_key_recipe.0:3133*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV4_SRC_ADDR + }, + /* field_cond: class_tid: 2, em_key_recipe.0:3136*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_SRC_ADDR + }, + /* field_cond: class_tid: 2, em_key_recipe.0:3139*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV4_SRC_ADDR + }, + /* field_cond: class_tid: 2, em_key_recipe.0:3142*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_SRC_ADDR + }, + /* field_cond: class_tid: 2, em_key_recipe.0:3145*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV6_SRC_ADDR + }, + /* field_cond: class_tid: 2, em_key_recipe.0:3148*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_SRC_ADDR + }, + /* field_cond: class_tid: 2, em_key_recipe.0:3151*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV6_SRC_ADDR + }, + /* field_cond: class_tid: 2, em_key_recipe.0:3154*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_SRC_ADDR + }, + /* field_cond: class_tid: 2, em_key_recipe.0:3157*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV4_DST_ADDR + }, + /* field_cond: class_tid: 2, em_key_recipe.0:3160*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_DST_ADDR + }, + /* field_cond: class_tid: 2, em_key_recipe.0:3163*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV4_DST_ADDR + }, + /* field_cond: class_tid: 2, em_key_recipe.0:3166*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_DST_ADDR + }, + /* field_cond: class_tid: 2, em_key_recipe.0:3169*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV6_DST_ADDR + }, + /* field_cond: class_tid: 2, em_key_recipe.0:3172*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_DST_ADDR + }, + /* field_cond: class_tid: 2, em_key_recipe.0:3175*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV6_DST_ADDR + }, + /* field_cond: class_tid: 2, em_key_recipe.0:3178*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_DST_ADDR + }, + /* field_cond: class_tid: 2, em_key_recipe.0:3181*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV6_TTL + }, + /* field_cond: class_tid: 2, em_key_recipe.0:3184*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV4_TTL + }, + /* field_cond: class_tid: 2, em_key_recipe.0:3187*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_TTL + }, + /* field_cond: class_tid: 2, em_key_recipe.0:3190*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_TTL + }, + /* field_cond: class_tid: 2, em_key_recipe.0:3193*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV6_TTL + }, + /* field_cond: class_tid: 2, em_key_recipe.0:3196*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV4_TTL + }, + /* field_cond: class_tid: 2, em_key_recipe.0:3199*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_TTL + }, + /* field_cond: class_tid: 2, em_key_recipe.0:3202*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_TTL + }, + /* field_cond: class_tid: 2, em_key_recipe.0:3205*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV6_PROTO_ID + }, + /* field_cond: class_tid: 2, em_key_recipe.0:3208*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV4_PROTO_ID + }, + /* field_cond: class_tid: 2, em_key_recipe.0:3211*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_PROTO_ID + }, + /* field_cond: class_tid: 2, em_key_recipe.0:3214*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_PROTO_ID + }, + /* field_cond: class_tid: 2, em_key_recipe.0:3217*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV6_PROTO_ID + }, + /* field_cond: class_tid: 2, em_key_recipe.0:3220*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV4_PROTO_ID + }, + /* field_cond: class_tid: 2, em_key_recipe.0:3223*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_PROTO_ID + }, + /* field_cond: class_tid: 2, em_key_recipe.0:3226*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_PROTO_ID + }, + /* field_cond: class_tid: 2, em_key_recipe.0:3229*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV6_QOS + }, + /* field_cond: class_tid: 2, em_key_recipe.0:3232*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV4_QOS + }, + /* field_cond: class_tid: 2, em_key_recipe.0:3235*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_QOS + }, + /* field_cond: class_tid: 2, em_key_recipe.0:3238*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_QOS + }, + /* field_cond: class_tid: 2, em_key_recipe.0:3241*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV6_QOS + }, + /* field_cond: class_tid: 2, em_key_recipe.0:3244*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV4_QOS + }, + /* field_cond: class_tid: 2, em_key_recipe.0:3247*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_QOS + }, + /* field_cond: class_tid: 2, em_key_recipe.0:3250*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_QOS + }, + /* field_cond: class_tid: 2, em_key_recipe.0:3253*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_UDP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_UDP_SRC_PORT + }, + /* field_cond: class_tid: 2, em_key_recipe.0:3256*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_TCP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_TCP_SRC_PORT + }, + /* field_cond: class_tid: 2, em_key_recipe.0:3259*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_UDP_SRC_PORT + }, + /* field_cond: class_tid: 2, em_key_recipe.0:3262*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_TCP_SRC_PORT + }, + /* field_cond: class_tid: 2, em_key_recipe.0:3265*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_UDP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_UDP_SRC_PORT + }, + /* field_cond: class_tid: 2, em_key_recipe.0:3268*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_TCP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_TCP_SRC_PORT + }, + /* field_cond: class_tid: 2, em_key_recipe.0:3271*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_UDP_SRC_PORT + }, + /* field_cond: class_tid: 2, em_key_recipe.0:3274*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_TCP_SRC_PORT + }, + /* field_cond: class_tid: 2, em_key_recipe.0:3277*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_UDP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_UDP_DST_PORT + }, + /* field_cond: class_tid: 2, em_key_recipe.0:3280*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_TCP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_TCP_DST_PORT + }, + /* field_cond: class_tid: 2, em_key_recipe.0:3283*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_UDP_DST_PORT + }, + /* field_cond: class_tid: 2, em_key_recipe.0:3286*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_TCP_DST_PORT + }, + /* field_cond: class_tid: 2, em_key_recipe.0:3289*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_UDP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_UDP_DST_PORT + }, + /* field_cond: class_tid: 2, em_key_recipe.0:3292*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_TCP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_TCP_DST_PORT + }, + /* field_cond: class_tid: 2, em_key_recipe.0:3295*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_UDP_DST_PORT + }, + /* field_cond: class_tid: 2, em_key_recipe.0:3298*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_TCP_DST_PORT + }, + /* cond_execute: class_tid: 2, control.field_sig_validation:3301*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_RF_NOT_SET, + .cond_operand = BNXT_ULP_RF_IDX_CC + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_RF_IS_SET, + .cond_operand = BNXT_ULP_RF_IDX_FLOW_SIG_ID + }, + /* cond_execute: class_tid: 2, em_normal.egress_generic_template:3303*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_NOT_SET, + .cond_operand = BNXT_ULP_CF_IDX_WC_MATCH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_RF_IS_SET, + .cond_operand = BNXT_ULP_RF_IDX_TERM_FLOW + }, + /* cond_execute: class_tid: 2, control.em_add_check:3305*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_RF_IS_SET, + .cond_operand = BNXT_ULP_RF_IDX_EM_INSERT_FAIL + }, + /* cond_execute: class_tid: 3, control.ts_ing_rd_check:3306*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_RF_IS_SET, + .cond_operand = BNXT_ULP_RF_IDX_GENERIC_TBL_MISS + }, + /* cond_execute: class_tid: 3, control.ing_rd_check:3307*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_RF_IS_SET, + .cond_operand = BNXT_ULP_RF_IDX_GENERIC_TBL_MISS + }, + /* cond_execute: class_tid: 3, control.non_vfr_egr_rd_check:3308*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_RF_IS_SET, + .cond_operand = BNXT_ULP_RF_IDX_GENERIC_TBL_MISS + }, + /* cond_execute: class_tid: 3, control.tsid_vfr_rd_check:3309*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_RF_IS_SET, + .cond_operand = BNXT_ULP_RF_IDX_GENERIC_TBL_MISS + }, + /* cond_execute: class_tid: 4, control.tsid_vfr_egr_check:3310*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_RF_IS_SET, + .cond_operand = BNXT_ULP_RF_IDX_GENERIC_TBL_MISS + }, + /* cond_execute: class_tid: 4, control.endpoint_def_egr_rd_check:3311*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_RF_IS_SET, + .cond_operand = BNXT_ULP_RF_IDX_GENERIC_TBL_MISS + } +}; + +struct bnxt_ulp_mapper_key_info ulp_thor2_class_key_info_list[] = { + /* class_tid: 1, , table: port_table.get_def_rd */ + { + .field_info_mask = { + .description = "dev.port_id", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "dev.port_id", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_DEV_PORT_ID >> 8) & 0xff, + BNXT_ULP_CF_IDX_DEV_PORT_ID & 0xff} + } + }, + /* class_tid: 1, , table: l2_cntxt_tcam_cache.def_rd */ + { + .field_info_mask = { + .description = "svif", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_HF, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_SVIF_INDEX >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_SVIF_INDEX & 0xff} + }, + .field_info_spec = { + .description = "svif", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_HF, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_SVIF_INDEX >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_SVIF_INDEX & 0xff} + } + }, + /* class_tid: 1, , table: tunnel_cache.f1_f2_rd */ + { + .field_info_mask = { + .description = "svif", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_HF, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_SVIF_INDEX >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_SVIF_INDEX & 0xff} + }, + .field_info_spec = { + .description = "svif", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_HF, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_SVIF_INDEX >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_SVIF_INDEX & 0xff} + } + }, + { + .field_info_mask = { + .description = "tunnel_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "tunnel_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_TUNNEL_ID >> 8) & 0xff, + BNXT_ULP_CF_IDX_TUNNEL_ID & 0xff} + } + }, + /* class_tid: 1, , table: tunnel_cache.f1_f2_wr */ + { + .field_info_mask = { + .description = "svif", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_HF, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_SVIF_INDEX >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_SVIF_INDEX & 0xff} + }, + .field_info_spec = { + .description = "svif", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_HF, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_SVIF_INDEX >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_SVIF_INDEX & 0xff} + } + }, + { + .field_info_mask = { + .description = "tunnel_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "tunnel_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_TUNNEL_ID >> 8) & 0xff, + BNXT_ULP_CF_IDX_TUNNEL_ID & 0xff} + } + }, + /* class_tid: 1, , table: mac_addr_cache.l2_table_rd */ + { + .field_info_mask = { + .description = "svif", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_HF, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_SVIF_INDEX >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_SVIF_INDEX & 0xff} + }, + .field_info_spec = { + .description = "svif", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_HF, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_SVIF_INDEX >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_SVIF_INDEX & 0xff} + } + }, + { + .field_info_mask = { + .description = "tun_hdr", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "tun_hdr", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "one_tag", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "one_tag", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "mac_addr", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "mac_addr", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_O_DMAC >> 8) & 0xff, + BNXT_ULP_RF_IDX_O_DMAC & 0xff} + } + }, + { + .field_info_mask = { + .description = "etype", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "etype", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "recycle_cnt", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "recycle_cnt", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (9 >> 8) & 0xff, + 9 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "metadata", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff, + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "metadata", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (10 >> 8) & 0xff, + 10 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tbl_scope", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tbl_scope", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_filter_id", + .field_bit_size = 64, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l2_filter_id", + .field_bit_size = 64, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + /* class_tid: 1, , table: l2_cntxt_tcam.l2_table_create */ + { + .field_info_mask = { + .description = "etype", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "etype", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_ivlan_tpid_sel", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l2_ivlan_tpid_sel", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_ivlan_vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l2_ivlan_vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_ovlan_tpid_sel", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l2_ovlan_tpid_sel", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_ovlan_vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l2_ovlan_vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "two_vtags", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "two_vtags", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "vtag_present", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "vtag_present", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "addr1", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "addr1", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "addr0", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "addr0", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_O_DMAC >> 8) & 0xff, + BNXT_ULP_RF_IDX_O_DMAC & 0xff} + } + }, + { + .field_info_mask = { + .description = "tunnel_id", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tunnel_id", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tun_hdr_type", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tun_hdr_type", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "out_tun_hdr_type", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "out_tun_hdr_type", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "llc", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "llc", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "roce", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + .field_info_spec = { + .description = "roce", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2ip_func", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l2ip_func", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "metadata", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (14 >> 8) & 0xff, + 14 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "metadata", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (15 >> 8) & 0xff, + 15 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_CONST + } + }, + { + .field_info_mask = { + .description = "svif", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_HF, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_SVIF_INDEX >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_SVIF_INDEX & 0xff} + }, + .field_info_spec = { + .description = "svif", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_HF, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_SVIF_INDEX >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_SVIF_INDEX & 0xff} + } + }, + { + .field_info_mask = { + .description = "parif", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "parif", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "spif", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "spif", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "loopback", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "loopback", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "recycle_cnt", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (16 >> 8) & 0xff, + 16 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "recycle_cnt", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (17 >> 8) & 0xff, + 17 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_CONST + } + }, + { + .field_info_mask = { + .description = "mpass_cnt", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 3} + }, + .field_info_spec = { + .description = "mpass_cnt", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "spare", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "spare", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + .field_info_spec = { + .description = "valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + } + }, + /* class_tid: 1, , table: mac_addr_cache.l2_table_wr */ + { + .field_info_mask = { + .description = "svif", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_HF, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_SVIF_INDEX >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_SVIF_INDEX & 0xff} + }, + .field_info_spec = { + .description = "svif", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_HF, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_SVIF_INDEX >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_SVIF_INDEX & 0xff} + } + }, + { + .field_info_mask = { + .description = "tun_hdr", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "tun_hdr", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "one_tag", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "one_tag", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "mac_addr", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "mac_addr", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_O_DMAC >> 8) & 0xff, + BNXT_ULP_RF_IDX_O_DMAC & 0xff} + } + }, + { + .field_info_mask = { + .description = "etype", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "etype", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "recycle_cnt", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "recycle_cnt", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (18 >> 8) & 0xff, + 18 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "metadata", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff, + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "metadata", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (19 >> 8) & 0xff, + 19 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tbl_scope", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tbl_scope", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_filter_id", + .field_bit_size = 64, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l2_filter_id", + .field_bit_size = 64, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + /* class_tid: 1, , table: proto_header_cache.rd */ + { + .field_info_mask = { + .description = "group_metadata", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "group_metadata", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "prof_func_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "prof_func_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_PROF_FUNC_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_PROF_FUNC_ID_0 & 0xff} + } + }, + { + .field_info_mask = { + .description = "hdr_bitmap", + .field_bit_size = 64, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "hdr_bitmap", + .field_bit_size = 64, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_PROFILE_BITMAP >> 8) & 0xff, + BNXT_ULP_CF_IDX_PROFILE_BITMAP & 0xff} + } + }, + /* class_tid: 1, , table: hdr_overlap_cache.overlap_check */ + { + .field_info_mask = { + .description = "group_metadata", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "group_metadata", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (43 >> 8) & 0xff, + 43 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR2_SYM_CHAIN_META_TYPE}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "prof_func_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "prof_func_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_PROF_FUNC_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_PROF_FUNC_ID_0 & 0xff} + } + }, + /* class_tid: 1, , table: hdr_overlap_cache.overlap_check */ + { + .field_info_mask = { + .description = "hdr_bitmap", + .field_bit_size = 64, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "hdr_bitmap", + .field_bit_size = 64, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_HDR_BITMAP >> 8) & 0xff, + BNXT_ULP_CF_IDX_HDR_BITMAP & 0xff} + } + }, + /* class_tid: 1, , table: hdr_overlap_cache.overlap_wr */ + { + .field_info_mask = { + .description = "group_metadata", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "group_metadata", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (326 >> 8) & 0xff, + 326 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR2_SYM_CHAIN_META_TYPE}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "prof_func_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "prof_func_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_PROF_FUNC_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_PROF_FUNC_ID_0 & 0xff} + } + }, + /* class_tid: 1, , table: hdr_overlap_cache.overlap_wr */ + { + .field_info_mask = { + .description = "hdr_bitmap", + .field_bit_size = 64, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "hdr_bitmap", + .field_bit_size = 64, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_HDR_BITMAP >> 8) & 0xff, + BNXT_ULP_CF_IDX_HDR_BITMAP & 0xff} + } + }, + /* class_tid: 1, , table: profile_tcam.gen_template */ + { + .field_info_mask = { + .description = "l4_hdr_dcn_present", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l4_hdr_dcn_present", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l4_hdr_flags", + .field_bit_size = 9, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l4_hdr_flags", + .field_bit_size = 9, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l4_hdr_subtype", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l4_hdr_subtype", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l4_hdr_is_udp_tcp", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (344 >> 8) & 0xff, + 344 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (58 >> 8) & 0xff, + 58 & 0xff} + }, + .field_info_spec = { + .description = "l4_hdr_is_udp_tcp", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (352 >> 8) & 0xff, + 352 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR2_SYM_L4_HDR_IS_UDP_TCP_YES}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (61 >> 8) & 0xff, + 61 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l4_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (360 >> 8) & 0xff, + 360 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (64 >> 8) & 0xff, + 64 & 0xff} + }, + .field_info_spec = { + .description = "l4_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (368 >> 8) & 0xff, + 368 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ZERO, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (67 >> 8) & 0xff, + 67 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l4_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (376 >> 8) & 0xff, + 376 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (70 >> 8) & 0xff, + 70 & 0xff} + }, + .field_info_spec = { + .description = "l4_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (384 >> 8) & 0xff, + 384 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ZERO, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (73 >> 8) & 0xff, + 73 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l4_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (392 >> 8) & 0xff, + 392 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (76 >> 8) & 0xff, + 76 & 0xff} + }, + .field_info_spec = { + .description = "l4_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (400 >> 8) & 0xff, + 400 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR2_SYM_L4_HDR_VALID_YES}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (79 >> 8) & 0xff, + 79 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l3_protocol", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l3_protocol", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l3_hdr_isIP", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l3_hdr_isIP", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l3_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (408 >> 8) & 0xff, + 408 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (82 >> 8) & 0xff, + 82 & 0xff} + }, + .field_info_spec = { + .description = "l3_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (416 >> 8) & 0xff, + 416 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR2_SYM_L3_HDR_TYPE_IPV6}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (85 >> 8) & 0xff, + 85 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l3_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (424 >> 8) & 0xff, + 424 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (88 >> 8) & 0xff, + 88 & 0xff} + }, + .field_info_spec = { + .description = "l3_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (432 >> 8) & 0xff, + 432 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ZERO, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (91 >> 8) & 0xff, + 91 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l3_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (440 >> 8) & 0xff, + 440 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (94 >> 8) & 0xff, + 94 & 0xff} + }, + .field_info_spec = { + .description = "l3_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (448 >> 8) & 0xff, + 448 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR2_SYM_L3_HDR_VALID_YES}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (97 >> 8) & 0xff, + 97 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l2_two_vtags", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "l2_two_vtags", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (456 >> 8) & 0xff, + 456 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR2_SYM_L2_TWO_VTAGS_YES}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (100 >> 8) & 0xff, + 100 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l2_vtag_present", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "l2_vtag_present", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (462 >> 8) & 0xff, + 462 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR2_SYM_L2_VTAG_PRESENT_YES}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (101 >> 8) & 0xff, + 101 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l2_uc_mc_bc", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l2_uc_mc_bc", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_hdr_type", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (468 >> 8) & 0xff, + 468 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l2_hdr_type", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (469 >> 8) & 0xff, + 469 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ZERO, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (470 >> 8) & 0xff, + 470 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (102 >> 8) & 0xff, + 102 & 0xff} + }, + .field_info_spec = { + .description = "l2_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (474 >> 8) & 0xff, + 474 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ZERO, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (103 >> 8) & 0xff, + 103 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l2_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "l2_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (478 >> 8) & 0xff, + 478 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR2_SYM_L2_HDR_VALID_YES}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (104 >> 8) & 0xff, + 104 & 0xff} + } + }, + { + .field_info_mask = { + .description = "tun_hdr_flags", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tun_hdr_flags", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tun_hdr_type", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (482 >> 8) & 0xff, + 482 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (105 >> 8) & 0xff, + 105 & 0xff} + }, + .field_info_spec = { + .description = "tun_hdr_type", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (494 >> 8) & 0xff, + 494 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ZERO, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (110 >> 8) & 0xff, + 110 & 0xff} + } + }, + { + .field_info_mask = { + .description = "tun_hdr_err", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (506 >> 8) & 0xff, + 506 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tun_hdr_err", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (507 >> 8) & 0xff, + 507 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ZERO, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tun_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "tun_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (508 >> 8) & 0xff, + 508 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR2_SYM_TUN_HDR_VALID_YES}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl4_hdr_is_udp_tcp", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (509 >> 8) & 0xff, + 509 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (115 >> 8) & 0xff, + 115 & 0xff} + }, + .field_info_spec = { + .description = "tl4_hdr_is_udp_tcp", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (513 >> 8) & 0xff, + 513 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR2_SYM_TL4_HDR_IS_UDP_TCP_YES}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (116 >> 8) & 0xff, + 116 & 0xff} + } + }, + { + .field_info_mask = { + .description = "tl4_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (517 >> 8) & 0xff, + 517 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (117 >> 8) & 0xff, + 117 & 0xff} + }, + .field_info_spec = { + .description = "tl4_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (521 >> 8) & 0xff, + 521 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ZERO, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (118 >> 8) & 0xff, + 118 & 0xff} + } + }, + { + .field_info_mask = { + .description = "tl4_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (525 >> 8) & 0xff, + 525 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (119 >> 8) & 0xff, + 119 & 0xff} + }, + .field_info_spec = { + .description = "tl4_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (529 >> 8) & 0xff, + 529 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ZERO, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (120 >> 8) & 0xff, + 120 & 0xff} + } + }, + { + .field_info_mask = { + .description = "tl4_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (533 >> 8) & 0xff, + 533 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (121 >> 8) & 0xff, + 121 & 0xff} + }, + .field_info_spec = { + .description = "tl4_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (538 >> 8) & 0xff, + 538 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR2_SYM_TL4_HDR_VALID_YES}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (123 >> 8) & 0xff, + 123 & 0xff} + } + }, + { + .field_info_mask = { + .description = "tl3_hdr_isIP", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl3_hdr_isIP", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl3_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (543 >> 8) & 0xff, + 543 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (125 >> 8) & 0xff, + 125 & 0xff} + }, + .field_info_spec = { + .description = "tl3_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (547 >> 8) & 0xff, + 547 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR2_SYM_TL3_HDR_TYPE_IPV6}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (126 >> 8) & 0xff, + 126 & 0xff} + } + }, + { + .field_info_mask = { + .description = "tl3_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (551 >> 8) & 0xff, + 551 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (127 >> 8) & 0xff, + 127 & 0xff} + }, + .field_info_spec = { + .description = "tl3_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (555 >> 8) & 0xff, + 555 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ZERO, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (128 >> 8) & 0xff, + 128 & 0xff} + } + }, + { + .field_info_mask = { + .description = "tl3_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "tl3_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (559 >> 8) & 0xff, + 559 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR2_SYM_TL3_HDR_VALID_YES}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (129 >> 8) & 0xff, + 129 & 0xff} + } + }, + { + .field_info_mask = { + .description = "tl2_two_vtags", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "tl2_two_vtags", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (563 >> 8) & 0xff, + 563 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR2_SYM_TL2_TWO_VTAGS_YES}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl2_vtag_present", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "tl2_vtag_present", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (566 >> 8) & 0xff, + 566 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR2_SYM_TL2_VTAG_PRESENT_YES}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl2_uc_mc_bc", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl2_uc_mc_bc", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl2_hdr_type", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (569 >> 8) & 0xff, + 569 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl2_hdr_type", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (570 >> 8) & 0xff, + 570 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ZERO, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl2_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "tl2_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (571 >> 8) & 0xff, + 571 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR2_SYM_TL2_HDR_VALID_YES}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (130 >> 8) & 0xff, + 130 & 0xff} + } + }, + { + .field_info_mask = { + .description = "ot_hdr_flags", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "ot_hdr_flags", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "ot_hdr_type", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "ot_hdr_type", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "ot_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "ot_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "ot_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "ot_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "otl4_hdr_is_tcp_udp", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "otl4_hdr_is_tcp_udp", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "otl4_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "otl4_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "otl4_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "otl4_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "otl4_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "otl4_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "otl3_hdr_isIP", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "otl3_hdr_isIP", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "otl3_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "otl3_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "otl3_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "otl3_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "otl3_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "otl3_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "otl2_two_vtags", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "otl2_two_vtags", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "otl2_vtag_present", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "otl2_vtag_present", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "otl2_uc_mc_bc", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "otl2_uc_mc_bc", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "otl2_hdr_type", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "otl2_hdr_type", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "otl2_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "otl2_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "int_ifa_tail", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "int_ifa_tail", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "int_hdr_group", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "int_hdr_group", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "int_hdr_type", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "int_hdr_type", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "hrec_next", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "hrec_next", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "prof_func_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "prof_func_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_PROF_FUNC_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_PROF_FUNC_ID_0 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l2ip_func_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l2ip_func_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "agg_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "agg_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "metadata", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "metadata", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "recycle_count", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "recycle_count", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "pkt_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "pkt_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "loopback", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "loopback", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "spare", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "spare", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + .field_info_spec = { + .description = "valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + } + }, + { + .field_info_mask = { + .description = "padding", + .field_bit_size = 72, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "padding", + .field_bit_size = 72, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + /* class_tid: 1, , table: wm_key_recipe.0 */ + { + .field_info_mask = { + .description = "wc_profile_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "wc_profile_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_WC_PROFILE_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_WC_PROFILE_ID_0 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l2_cntxt_id", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (575 >> 8) & 0xff, + 575 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + .field_info_spec = { + .description = "l2_cntxt_id", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (576 >> 8) & 0xff, + 576 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_RF, + .field_opr2 = { + (BNXT_ULP_RF_IDX_L2_CNTXT_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_L2_CNTXT_ID_0 & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + } + }, + { + .field_info_mask = { + .description = "meta", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (577 >> 8) & 0xff, + 577 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + .field_info_spec = { + .description = "meta", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (578 >> 8) & 0xff, + 578 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CF, + .field_opr2 = { + (BNXT_ULP_CF_IDX_VF_META_FID >> 8) & 0xff, + BNXT_ULP_CF_IDX_VF_META_FID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + } + }, + { + .field_info_mask = { + .description = "rcyc_cnt", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (579 >> 8) & 0xff, + 579 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + .field_info_spec = { + .description = "rcyc_cnt", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (580 >> 8) & 0xff, + 580 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_RF, + .field_opr2 = { + (BNXT_ULP_RF_IDX_RECYCLE_CNT >> 8) & 0xff, + BNXT_ULP_RF_IDX_RECYCLE_CNT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + } + }, + { + .field_info_mask = { + .description = "tl2_dmac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (581 >> 8) & 0xff, + 581 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_ETH_DMAC >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_ETH_DMAC & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + .field_info_spec = { + .description = "tl2_dmac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (584 >> 8) & 0xff, + 584 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_ETH_DMAC >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_ETH_DMAC & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + } + }, + { + .field_info_mask = { + .description = "tl2_smac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (587 >> 8) & 0xff, + 587 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_ETH_SMAC >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_ETH_SMAC & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + .field_info_spec = { + .description = "tl2_smac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (590 >> 8) & 0xff, + 590 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_ETH_SMAC >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_ETH_SMAC & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + } + }, + { + .field_info_mask = { + .description = "tl2_ovv", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (593 >> 8) & 0xff, + 593 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_OO_VLAN_VID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_OO_VLAN_VID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + .field_info_spec = { + .description = "tl2_ovv", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (597 >> 8) & 0xff, + 597 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_OO_VLAN_VID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_OO_VLAN_VID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + } + }, + { + .field_info_mask = { + .description = "tl2_ivv", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (601 >> 8) & 0xff, + 601 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_OI_VLAN_VID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_OI_VLAN_VID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (131 >> 8) & 0xff, + 131 & 0xff} + }, + .field_info_spec = { + .description = "tl2_ivv", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (609 >> 8) & 0xff, + 609 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_OI_VLAN_VID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_OI_VLAN_VID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (132 >> 8) & 0xff, + 132 & 0xff} + } + }, + { + .field_info_mask = { + .description = "tl2_etype", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (617 >> 8) & 0xff, + 617 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_ETH_TYPE >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_ETH_TYPE & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + .field_info_spec = { + .description = "tl2_etype", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (621 >> 8) & 0xff, + 621 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_ETH_TYPE >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_ETH_TYPE & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + } + }, + { + .field_info_mask = { + .description = "tl3.sip.ipv4", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (625 >> 8) & 0xff, + 625 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_SRC_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_SRC_ADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + .field_info_spec = { + .description = "tl3.sip.ipv4", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (628 >> 8) & 0xff, + 628 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_SRC_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_SRC_ADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + } + }, + { + .field_info_mask = { + .description = "tl3.sip.ipv6", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (631 >> 8) & 0xff, + 631 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_SKIP, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + .field_info_spec = { + .description = "tl3.sip.ipv6", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (634 >> 8) & 0xff, + 634 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_SKIP, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + } + }, + { + .field_info_mask = { + .description = "tl3.dip.ipv4", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (637 >> 8) & 0xff, + 637 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_DST_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_DST_ADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + .field_info_spec = { + .description = "tl3.dip.ipv4", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (640 >> 8) & 0xff, + 640 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_DST_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_DST_ADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + } + }, + { + .field_info_mask = { + .description = "tl3.dip.ipv6", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (643 >> 8) & 0xff, + 643 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV6_DST_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV6_DST_ADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + .field_info_spec = { + .description = "tl3.dip.ipv6", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (646 >> 8) & 0xff, + 646 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV6_DST_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV6_DST_ADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + } + }, + { + .field_info_mask = { + .description = "tl3.ttl", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (649 >> 8) & 0xff, + 649 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV6_TTL >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV6_TTL & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (133 >> 8) & 0xff, + 133 & 0xff} + }, + .field_info_spec = { + .description = "tl3.ttl", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (655 >> 8) & 0xff, + 655 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV6_TTL >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV6_TTL & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (134 >> 8) & 0xff, + 134 & 0xff} + } + }, + { + .field_info_mask = { + .description = "tl3.prot", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (661 >> 8) & 0xff, + 661 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV6_PROTO_ID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV6_PROTO_ID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (135 >> 8) & 0xff, + 135 & 0xff} + }, + .field_info_spec = { + .description = "tl3.prot", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (667 >> 8) & 0xff, + 667 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV6_PROTO_ID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV6_PROTO_ID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (136 >> 8) & 0xff, + 136 & 0xff} + } + }, + { + .field_info_mask = { + .description = "tl3.qos", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (673 >> 8) & 0xff, + 673 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV6_QOS >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV6_QOS & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (137 >> 8) & 0xff, + 137 & 0xff} + }, + .field_info_spec = { + .description = "tl3.qos", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (679 >> 8) & 0xff, + 679 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV6_QOS >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV6_QOS & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (138 >> 8) & 0xff, + 138 & 0xff} + } + }, + { + .field_info_mask = { + .description = "tl4.src", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (685 >> 8) & 0xff, + 685 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_UDP_SRC_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_UDP_SRC_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (139 >> 8) & 0xff, + 139 & 0xff} + }, + .field_info_spec = { + .description = "tl4.src", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (691 >> 8) & 0xff, + 691 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_UDP_SRC_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_UDP_SRC_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (140 >> 8) & 0xff, + 140 & 0xff} + } + }, + { + .field_info_mask = { + .description = "tl4.dst", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (697 >> 8) & 0xff, + 697 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_UDP_DST_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_UDP_DST_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (141 >> 8) & 0xff, + 141 & 0xff} + }, + .field_info_spec = { + .description = "tl4.dst", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (703 >> 8) & 0xff, + 703 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_UDP_DST_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_UDP_DST_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (142 >> 8) & 0xff, + 142 & 0xff} + } + }, + { + .field_info_mask = { + .description = "tids", + .field_bit_size = 24, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (709 >> 8) & 0xff, + 709 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_T_VXLAN_VNI >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_T_VXLAN_VNI & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (143 >> 8) & 0xff, + 143 & 0xff} + }, + .field_info_spec = { + .description = "tids", + .field_bit_size = 24, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (713 >> 8) & 0xff, + 713 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_T_VXLAN_VNI >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_T_VXLAN_VNI & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (144 >> 8) & 0xff, + 144 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l2_dmac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (717 >> 8) & 0xff, + 717 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_ETH_DMAC >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_ETH_DMAC & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (145 >> 8) & 0xff, + 145 & 0xff} + }, + .field_info_spec = { + .description = "l2_dmac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (723 >> 8) & 0xff, + 723 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_ETH_DMAC >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_ETH_DMAC & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (146 >> 8) & 0xff, + 146 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l2_smac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (729 >> 8) & 0xff, + 729 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_ETH_SMAC >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_ETH_SMAC & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (147 >> 8) & 0xff, + 147 & 0xff} + }, + .field_info_spec = { + .description = "l2_smac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (735 >> 8) & 0xff, + 735 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_ETH_SMAC >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_ETH_SMAC & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (148 >> 8) & 0xff, + 148 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l2_ovv", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (741 >> 8) & 0xff, + 741 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_IO_VLAN_VID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_IO_VLAN_VID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (149 >> 8) & 0xff, + 149 & 0xff} + }, + .field_info_spec = { + .description = "l2_ovv", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (749 >> 8) & 0xff, + 749 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_IO_VLAN_VID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_IO_VLAN_VID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (150 >> 8) & 0xff, + 150 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l2_ivv", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (757 >> 8) & 0xff, + 757 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_II_VLAN_VID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_II_VLAN_VID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (151 >> 8) & 0xff, + 151 & 0xff} + }, + .field_info_spec = { + .description = "l2_ivv", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (773 >> 8) & 0xff, + 773 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_II_VLAN_VID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_II_VLAN_VID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (154 >> 8) & 0xff, + 154 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l2_etype", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (789 >> 8) & 0xff, + 789 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_ETH_TYPE >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_ETH_TYPE & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (157 >> 8) & 0xff, + 157 & 0xff} + }, + .field_info_spec = { + .description = "l2_etype", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (797 >> 8) & 0xff, + 797 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_ETH_TYPE >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_ETH_TYPE & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (158 >> 8) & 0xff, + 158 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l3.sip.ipv4", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (805 >> 8) & 0xff, + 805 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_IPV4_SRC_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_IPV4_SRC_ADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (159 >> 8) & 0xff, + 159 & 0xff} + }, + .field_info_spec = { + .description = "l3.sip.ipv4", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (811 >> 8) & 0xff, + 811 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_IPV4_SRC_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_IPV4_SRC_ADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (160 >> 8) & 0xff, + 160 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l3.sip.ipv6", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (817 >> 8) & 0xff, + 817 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_IPV6_SRC_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_IPV6_SRC_ADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (161 >> 8) & 0xff, + 161 & 0xff} + }, + .field_info_spec = { + .description = "l3.sip.ipv6", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (823 >> 8) & 0xff, + 823 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_IPV6_SRC_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_IPV6_SRC_ADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (162 >> 8) & 0xff, + 162 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l3.dip.ipv4", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (829 >> 8) & 0xff, + 829 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_IPV4_DST_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_IPV4_DST_ADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (163 >> 8) & 0xff, + 163 & 0xff} + }, + .field_info_spec = { + .description = "l3.dip.ipv4", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (835 >> 8) & 0xff, + 835 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_IPV4_DST_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_IPV4_DST_ADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (164 >> 8) & 0xff, + 164 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l3.dip.ipv6", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (841 >> 8) & 0xff, + 841 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_IPV6_DST_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_IPV6_DST_ADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (165 >> 8) & 0xff, + 165 & 0xff} + }, + .field_info_spec = { + .description = "l3.dip.ipv6", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (847 >> 8) & 0xff, + 847 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_IPV6_DST_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_IPV6_DST_ADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (166 >> 8) & 0xff, + 166 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l3.ttl", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (853 >> 8) & 0xff, + 853 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_IPV6_TTL >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_IPV6_TTL & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (167 >> 8) & 0xff, + 167 & 0xff} + }, + .field_info_spec = { + .description = "l3.ttl", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (865 >> 8) & 0xff, + 865 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_IPV6_TTL >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_IPV6_TTL & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (170 >> 8) & 0xff, + 170 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l3.prot", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (877 >> 8) & 0xff, + 877 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (173 >> 8) & 0xff, + 173 & 0xff} + }, + .field_info_spec = { + .description = "l3.prot", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (897 >> 8) & 0xff, + 897 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR2_SYM_IP_PROTO_TCP}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (180 >> 8) & 0xff, + 180 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l3.qos", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (917 >> 8) & 0xff, + 917 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_IPV6_QOS >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_IPV6_QOS & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (187 >> 8) & 0xff, + 187 & 0xff} + }, + .field_info_spec = { + .description = "l3.qos", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (929 >> 8) & 0xff, + 929 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_IPV6_QOS >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_IPV6_QOS & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (190 >> 8) & 0xff, + 190 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l4.src", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (941 >> 8) & 0xff, + 941 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_UDP_SRC_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_UDP_SRC_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (193 >> 8) & 0xff, + 193 & 0xff} + }, + .field_info_spec = { + .description = "l4.src", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (954 >> 8) & 0xff, + 954 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_UDP_SRC_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_UDP_SRC_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (197 >> 8) & 0xff, + 197 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l4.dst", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (967 >> 8) & 0xff, + 967 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_UDP_DST_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_UDP_DST_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (201 >> 8) & 0xff, + 201 & 0xff} + }, + .field_info_spec = { + .description = "l4.dst", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (980 >> 8) & 0xff, + 980 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_UDP_DST_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_UDP_DST_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (205 >> 8) & 0xff, + 205 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l4.flags", + .field_bit_size = 9, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_FIELD_BIT, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_O_TCP_TCP_FLAGS >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_TCP_TCP_FLAGS & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_TCP_TCP_FLAGS >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_TCP_TCP_FLAGS & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + .field_info_spec = { + .description = "l4.flags", + .field_bit_size = 9, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_FIELD_BIT, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_O_TCP_TCP_FLAGS >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_TCP_TCP_FLAGS & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_TCP_TCP_FLAGS >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_TCP_TCP_FLAGS & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + } + }, + /* class_tid: 1, , table: proto_header_cache.wr */ + { + .field_info_mask = { + .description = "group_metadata", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "group_metadata", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "prof_func_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "prof_func_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_PROF_FUNC_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_PROF_FUNC_ID_0 & 0xff} + } + }, + { + .field_info_mask = { + .description = "hdr_bitmap", + .field_bit_size = 64, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "hdr_bitmap", + .field_bit_size = 64, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_PROFILE_BITMAP >> 8) & 0xff, + BNXT_ULP_CF_IDX_PROFILE_BITMAP & 0xff} + } + }, + /* class_tid: 1, , table: em_flow_conflict_cache.rd */ + { + .field_info_mask = { + .description = "recycle_cnt", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "recycle_cnt", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "group_metadata", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "group_metadata", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "prof_func_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "prof_func_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_PROF_FUNC_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_PROF_FUNC_ID_0 & 0xff} + } + }, + { + .field_info_mask = { + .description = "hdr_bitmap", + .field_bit_size = 64, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "hdr_bitmap", + .field_bit_size = 64, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_PROFILE_BITMAP >> 8) & 0xff, + BNXT_ULP_CF_IDX_PROFILE_BITMAP & 0xff} + } + }, + /* class_tid: 1, , table: em_key_recipe.0 */ + { + .field_info_mask = { + .description = "em_profile_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "em_profile_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_EM_PROFILE_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_EM_PROFILE_ID_0 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l2_cntxt_id", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1253 >> 8) & 0xff, + 1253 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + .field_info_spec = { + .description = "l2_cntxt_id", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1254 >> 8) & 0xff, + 1254 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_RF, + .field_opr2 = { + (BNXT_ULP_RF_IDX_L2_CNTXT_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_L2_CNTXT_ID_0 & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + } + }, + { + .field_info_mask = { + .description = "meta", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1255 >> 8) & 0xff, + 1255 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + .field_info_spec = { + .description = "meta", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1256 >> 8) & 0xff, + 1256 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CF, + .field_opr2 = { + (BNXT_ULP_CF_IDX_VF_META_FID >> 8) & 0xff, + BNXT_ULP_CF_IDX_VF_META_FID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + } + }, + { + .field_info_mask = { + .description = "rcyc_cnt", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1257 >> 8) & 0xff, + 1257 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + .field_info_spec = { + .description = "rcyc_cnt", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1258 >> 8) & 0xff, + 1258 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_RF, + .field_opr2 = { + (BNXT_ULP_RF_IDX_RECYCLE_CNT >> 8) & 0xff, + BNXT_ULP_RF_IDX_RECYCLE_CNT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + } + }, + { + .field_info_mask = { + .description = "tl2_dmac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1259 >> 8) & 0xff, + 1259 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + .field_info_spec = { + .description = "tl2_dmac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1263 >> 8) & 0xff, + 1263 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_ETH_DMAC >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_ETH_DMAC & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + } + }, + { + .field_info_mask = { + .description = "tl2_smac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1267 >> 8) & 0xff, + 1267 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + .field_info_spec = { + .description = "tl2_smac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1270 >> 8) & 0xff, + 1270 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_ETH_SMAC >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_ETH_SMAC & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + } + }, + { + .field_info_mask = { + .description = "tl2_ovv", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1273 >> 8) & 0xff, + 1273 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + .field_info_spec = { + .description = "tl2_ovv", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1277 >> 8) & 0xff, + 1277 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_OO_VLAN_VID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_OO_VLAN_VID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + } + }, + { + .field_info_mask = { + .description = "tl2_ivv", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1281 >> 8) & 0xff, + 1281 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (252 >> 8) & 0xff, + 252 & 0xff} + }, + .field_info_spec = { + .description = "tl2_ivv", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1289 >> 8) & 0xff, + 1289 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_OI_VLAN_VID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_OI_VLAN_VID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (253 >> 8) & 0xff, + 253 & 0xff} + } + }, + { + .field_info_mask = { + .description = "tl2_etype", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1297 >> 8) & 0xff, + 1297 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + .field_info_spec = { + .description = "tl2_etype", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1301 >> 8) & 0xff, + 1301 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_ETH_TYPE >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_ETH_TYPE & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + } + }, + { + .field_info_mask = { + .description = "tl3.sip.ipv4", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1305 >> 8) & 0xff, + 1305 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + .field_info_spec = { + .description = "tl3.sip.ipv4", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1308 >> 8) & 0xff, + 1308 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_SRC_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_SRC_ADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + } + }, + { + .field_info_mask = { + .description = "tl3.sip.ipv6", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1311 >> 8) & 0xff, + 1311 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + .field_info_spec = { + .description = "tl3.sip.ipv6", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1314 >> 8) & 0xff, + 1314 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV6_SRC_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV6_SRC_ADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + } + }, + { + .field_info_mask = { + .description = "tl3.dip.ipv4", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1317 >> 8) & 0xff, + 1317 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + .field_info_spec = { + .description = "tl3.dip.ipv4", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1320 >> 8) & 0xff, + 1320 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_DST_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_DST_ADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + } + }, + { + .field_info_mask = { + .description = "tl3.dip.ipv6", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1323 >> 8) & 0xff, + 1323 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + .field_info_spec = { + .description = "tl3.dip.ipv6", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1326 >> 8) & 0xff, + 1326 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV6_DST_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV6_DST_ADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + } + }, + { + .field_info_mask = { + .description = "tl3.ttl", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1329 >> 8) & 0xff, + 1329 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (254 >> 8) & 0xff, + 254 & 0xff} + }, + .field_info_spec = { + .description = "tl3.ttl", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1335 >> 8) & 0xff, + 1335 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV6_TTL >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV6_TTL & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (255 >> 8) & 0xff, + 255 & 0xff} + } + }, + { + .field_info_mask = { + .description = "tl3.prot", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1341 >> 8) & 0xff, + 1341 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (256 >> 8) & 0xff, + 256 & 0xff} + }, + .field_info_spec = { + .description = "tl3.prot", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1347 >> 8) & 0xff, + 1347 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV6_PROTO_ID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV6_PROTO_ID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (257 >> 8) & 0xff, + 257 & 0xff} + } + }, + { + .field_info_mask = { + .description = "tl3.qos", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1353 >> 8) & 0xff, + 1353 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (258 >> 8) & 0xff, + 258 & 0xff} + }, + .field_info_spec = { + .description = "tl3.qos", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1359 >> 8) & 0xff, + 1359 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV6_QOS >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV6_QOS & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (259 >> 8) & 0xff, + 259 & 0xff} + } + }, + { + .field_info_mask = { + .description = "tl4.src", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1365 >> 8) & 0xff, + 1365 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (260 >> 8) & 0xff, + 260 & 0xff} + }, + .field_info_spec = { + .description = "tl4.src", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1371 >> 8) & 0xff, + 1371 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_UDP_SRC_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_UDP_SRC_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (261 >> 8) & 0xff, + 261 & 0xff} + } + }, + { + .field_info_mask = { + .description = "tl4.dst", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1377 >> 8) & 0xff, + 1377 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (262 >> 8) & 0xff, + 262 & 0xff} + }, + .field_info_spec = { + .description = "tl4.dst", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1383 >> 8) & 0xff, + 1383 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_UDP_DST_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_UDP_DST_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (263 >> 8) & 0xff, + 263 & 0xff} + } + }, + { + .field_info_mask = { + .description = "tids", + .field_bit_size = 24, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1389 >> 8) & 0xff, + 1389 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (264 >> 8) & 0xff, + 264 & 0xff} + }, + .field_info_spec = { + .description = "tids", + .field_bit_size = 24, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1393 >> 8) & 0xff, + 1393 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_T_VXLAN_VNI >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_T_VXLAN_VNI & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (265 >> 8) & 0xff, + 265 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l2_dmac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1397 >> 8) & 0xff, + 1397 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (266 >> 8) & 0xff, + 266 & 0xff} + }, + .field_info_spec = { + .description = "l2_dmac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1405 >> 8) & 0xff, + 1405 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_ETH_DMAC >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_ETH_DMAC & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (267 >> 8) & 0xff, + 267 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l2_smac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1413 >> 8) & 0xff, + 1413 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (268 >> 8) & 0xff, + 268 & 0xff} + }, + .field_info_spec = { + .description = "l2_smac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1419 >> 8) & 0xff, + 1419 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_ETH_SMAC >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_ETH_SMAC & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (269 >> 8) & 0xff, + 269 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l2_ovv", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1425 >> 8) & 0xff, + 1425 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (270 >> 8) & 0xff, + 270 & 0xff} + }, + .field_info_spec = { + .description = "l2_ovv", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1433 >> 8) & 0xff, + 1433 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_IO_VLAN_VID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_IO_VLAN_VID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (271 >> 8) & 0xff, + 271 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l2_ivv", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1441 >> 8) & 0xff, + 1441 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (272 >> 8) & 0xff, + 272 & 0xff} + }, + .field_info_spec = { + .description = "l2_ivv", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1457 >> 8) & 0xff, + 1457 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_II_VLAN_VID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_II_VLAN_VID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (275 >> 8) & 0xff, + 275 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l2_etype", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1473 >> 8) & 0xff, + 1473 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (278 >> 8) & 0xff, + 278 & 0xff} + }, + .field_info_spec = { + .description = "l2_etype", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1481 >> 8) & 0xff, + 1481 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_ETH_TYPE >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_ETH_TYPE & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (279 >> 8) & 0xff, + 279 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l3.sip.ipv4", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1489 >> 8) & 0xff, + 1489 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (280 >> 8) & 0xff, + 280 & 0xff} + }, + .field_info_spec = { + .description = "l3.sip.ipv4", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1495 >> 8) & 0xff, + 1495 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_IPV4_SRC_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_IPV4_SRC_ADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (281 >> 8) & 0xff, + 281 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l3.sip.ipv6", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1501 >> 8) & 0xff, + 1501 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (282 >> 8) & 0xff, + 282 & 0xff} + }, + .field_info_spec = { + .description = "l3.sip.ipv6", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1507 >> 8) & 0xff, + 1507 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_IPV6_SRC_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_IPV6_SRC_ADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (283 >> 8) & 0xff, + 283 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l3.dip.ipv4", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1513 >> 8) & 0xff, + 1513 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (284 >> 8) & 0xff, + 284 & 0xff} + }, + .field_info_spec = { + .description = "l3.dip.ipv4", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1519 >> 8) & 0xff, + 1519 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_IPV4_DST_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_IPV4_DST_ADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (285 >> 8) & 0xff, + 285 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l3.dip.ipv6", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1525 >> 8) & 0xff, + 1525 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (286 >> 8) & 0xff, + 286 & 0xff} + }, + .field_info_spec = { + .description = "l3.dip.ipv6", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1531 >> 8) & 0xff, + 1531 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_IPV6_DST_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_IPV6_DST_ADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (287 >> 8) & 0xff, + 287 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l3.ttl", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1537 >> 8) & 0xff, + 1537 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (288 >> 8) & 0xff, + 288 & 0xff} + }, + .field_info_spec = { + .description = "l3.ttl", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1549 >> 8) & 0xff, + 1549 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_IPV6_TTL >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_IPV6_TTL & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (291 >> 8) & 0xff, + 291 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l3.prot", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1561 >> 8) & 0xff, + 1561 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (294 >> 8) & 0xff, + 294 & 0xff} + }, + .field_info_spec = { + .description = "l3.prot", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1573 >> 8) & 0xff, + 1573 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_IPV6_PROTO_ID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_IPV6_PROTO_ID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (297 >> 8) & 0xff, + 297 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l3.qos", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1585 >> 8) & 0xff, + 1585 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (300 >> 8) & 0xff, + 300 & 0xff} + }, + .field_info_spec = { + .description = "l3.qos", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1597 >> 8) & 0xff, + 1597 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_IPV6_QOS >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_IPV6_QOS & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (303 >> 8) & 0xff, + 303 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l4.src", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1609 >> 8) & 0xff, + 1609 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (306 >> 8) & 0xff, + 306 & 0xff} + }, + .field_info_spec = { + .description = "l4.src", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1621 >> 8) & 0xff, + 1621 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_UDP_SRC_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_UDP_SRC_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (309 >> 8) & 0xff, + 309 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l4.dst", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1633 >> 8) & 0xff, + 1633 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (312 >> 8) & 0xff, + 312 & 0xff} + }, + .field_info_spec = { + .description = "l4.dst", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1645 >> 8) & 0xff, + 1645 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_UDP_DST_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_UDP_DST_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (315 >> 8) & 0xff, + 315 & 0xff} + } + }, + /* class_tid: 1, , table: em_flow_conflict_cache.wr */ + { + .field_info_mask = { + .description = "recycle_cnt", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "recycle_cnt", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "group_metadata", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "group_metadata", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "prof_func_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "prof_func_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_PROF_FUNC_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_PROF_FUNC_ID_0 & 0xff} + } + }, + { + .field_info_mask = { + .description = "hdr_bitmap", + .field_bit_size = 64, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "hdr_bitmap", + .field_bit_size = 64, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_PROFILE_BITMAP >> 8) & 0xff, + BNXT_ULP_CF_IDX_PROFILE_BITMAP & 0xff} + } + }, + /* class_tid: 2, , table: port_table.get_def_rd */ + { + .field_info_mask = { + .description = "dev.port_id", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "dev.port_id", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_DEV_PORT_ID >> 8) & 0xff, + BNXT_ULP_CF_IDX_DEV_PORT_ID & 0xff} + } + }, + /* class_tid: 2, , table: l2_cntxt_tcam_cache.def_rd */ + { + .field_info_mask = { + .description = "svif", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_HF, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_SVIF_INDEX >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_SVIF_INDEX & 0xff} + }, + .field_info_spec = { + .description = "svif", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_HF, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_SVIF_INDEX >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_SVIF_INDEX & 0xff} + } + }, + /* class_tid: 2, , table: proto_header_cache.rd */ + { + .field_info_mask = { + .description = "group_metadata", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "group_metadata", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "prof_func_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "prof_func_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_PROF_FUNC_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_PROF_FUNC_ID_0 & 0xff} + } + }, + { + .field_info_mask = { + .description = "hdr_bitmap", + .field_bit_size = 64, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "hdr_bitmap", + .field_bit_size = 64, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_PROFILE_BITMAP >> 8) & 0xff, + BNXT_ULP_CF_IDX_PROFILE_BITMAP & 0xff} + } + }, + /* class_tid: 2, , table: hdr_overlap_cache.overlap_check */ + { + .field_info_mask = { + .description = "group_metadata", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "group_metadata", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1689 >> 8) & 0xff, + 1689 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR2_SYM_CHAIN_META_TYPE}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "prof_func_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "prof_func_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_PROF_FUNC_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_PROF_FUNC_ID_0 & 0xff} + } + }, + /* class_tid: 2, , table: hdr_overlap_cache.overlap_check */ + { + .field_info_mask = { + .description = "hdr_bitmap", + .field_bit_size = 64, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "hdr_bitmap", + .field_bit_size = 64, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_HDR_BITMAP >> 8) & 0xff, + BNXT_ULP_CF_IDX_HDR_BITMAP & 0xff} + } + }, + /* class_tid: 2, , table: hdr_overlap_cache.overlap_wr */ + { + .field_info_mask = { + .description = "group_metadata", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "group_metadata", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1974 >> 8) & 0xff, + 1974 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR2_SYM_CHAIN_META_TYPE}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "prof_func_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "prof_func_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_PROF_FUNC_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_PROF_FUNC_ID_0 & 0xff} + } + }, + /* class_tid: 2, , table: hdr_overlap_cache.overlap_wr */ + { + .field_info_mask = { + .description = "hdr_bitmap", + .field_bit_size = 64, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "hdr_bitmap", + .field_bit_size = 64, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_HDR_BITMAP >> 8) & 0xff, + BNXT_ULP_CF_IDX_HDR_BITMAP & 0xff} + } + }, + /* class_tid: 2, , table: profile_tcam.gen_template */ + { + .field_info_mask = { + .description = "l4_hdr_dcn_present", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l4_hdr_dcn_present", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l4_hdr_flags", + .field_bit_size = 9, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l4_hdr_flags", + .field_bit_size = 9, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l4_hdr_subtype", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l4_hdr_subtype", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l4_hdr_is_udp_tcp", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1992 >> 8) & 0xff, + 1992 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (374 >> 8) & 0xff, + 374 & 0xff} + }, + .field_info_spec = { + .description = "l4_hdr_is_udp_tcp", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2000 >> 8) & 0xff, + 2000 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR2_SYM_L4_HDR_IS_UDP_TCP_YES}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (377 >> 8) & 0xff, + 377 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l4_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2008 >> 8) & 0xff, + 2008 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (380 >> 8) & 0xff, + 380 & 0xff} + }, + .field_info_spec = { + .description = "l4_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2016 >> 8) & 0xff, + 2016 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ZERO, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (383 >> 8) & 0xff, + 383 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l4_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2024 >> 8) & 0xff, + 2024 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (386 >> 8) & 0xff, + 386 & 0xff} + }, + .field_info_spec = { + .description = "l4_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2032 >> 8) & 0xff, + 2032 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ZERO, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (389 >> 8) & 0xff, + 389 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l4_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2040 >> 8) & 0xff, + 2040 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (392 >> 8) & 0xff, + 392 & 0xff} + }, + .field_info_spec = { + .description = "l4_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2048 >> 8) & 0xff, + 2048 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR2_SYM_L4_HDR_VALID_YES}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (395 >> 8) & 0xff, + 395 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l3_protocol", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l3_protocol", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l3_hdr_isIP", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l3_hdr_isIP", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l3_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2056 >> 8) & 0xff, + 2056 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (398 >> 8) & 0xff, + 398 & 0xff} + }, + .field_info_spec = { + .description = "l3_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2064 >> 8) & 0xff, + 2064 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR2_SYM_L3_HDR_TYPE_IPV6}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (401 >> 8) & 0xff, + 401 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l3_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2072 >> 8) & 0xff, + 2072 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (404 >> 8) & 0xff, + 404 & 0xff} + }, + .field_info_spec = { + .description = "l3_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2080 >> 8) & 0xff, + 2080 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ZERO, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (407 >> 8) & 0xff, + 407 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l3_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2088 >> 8) & 0xff, + 2088 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (410 >> 8) & 0xff, + 410 & 0xff} + }, + .field_info_spec = { + .description = "l3_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2096 >> 8) & 0xff, + 2096 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR2_SYM_L3_HDR_VALID_YES}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (413 >> 8) & 0xff, + 413 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l2_two_vtags", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "l2_two_vtags", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2104 >> 8) & 0xff, + 2104 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR2_SYM_L2_TWO_VTAGS_YES}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (416 >> 8) & 0xff, + 416 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l2_vtag_present", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "l2_vtag_present", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2110 >> 8) & 0xff, + 2110 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR2_SYM_L2_VTAG_PRESENT_YES}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (417 >> 8) & 0xff, + 417 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l2_uc_mc_bc", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l2_uc_mc_bc", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_hdr_type", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2116 >> 8) & 0xff, + 2116 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l2_hdr_type", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2117 >> 8) & 0xff, + 2117 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ZERO, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2118 >> 8) & 0xff, + 2118 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (418 >> 8) & 0xff, + 418 & 0xff} + }, + .field_info_spec = { + .description = "l2_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2122 >> 8) & 0xff, + 2122 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ZERO, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (419 >> 8) & 0xff, + 419 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l2_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "l2_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2126 >> 8) & 0xff, + 2126 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR2_SYM_L2_HDR_VALID_YES}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (420 >> 8) & 0xff, + 420 & 0xff} + } + }, + { + .field_info_mask = { + .description = "tun_hdr_flags", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tun_hdr_flags", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tun_hdr_type", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2130 >> 8) & 0xff, + 2130 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (421 >> 8) & 0xff, + 421 & 0xff} + }, + .field_info_spec = { + .description = "tun_hdr_type", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2142 >> 8) & 0xff, + 2142 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ZERO, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (426 >> 8) & 0xff, + 426 & 0xff} + } + }, + { + .field_info_mask = { + .description = "tun_hdr_err", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2154 >> 8) & 0xff, + 2154 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tun_hdr_err", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2155 >> 8) & 0xff, + 2155 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ZERO, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tun_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "tun_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2156 >> 8) & 0xff, + 2156 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR2_SYM_TUN_HDR_VALID_YES}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl4_hdr_is_udp_tcp", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2157 >> 8) & 0xff, + 2157 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (431 >> 8) & 0xff, + 431 & 0xff} + }, + .field_info_spec = { + .description = "tl4_hdr_is_udp_tcp", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2161 >> 8) & 0xff, + 2161 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR2_SYM_TL4_HDR_IS_UDP_TCP_YES}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (432 >> 8) & 0xff, + 432 & 0xff} + } + }, + { + .field_info_mask = { + .description = "tl4_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2165 >> 8) & 0xff, + 2165 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (433 >> 8) & 0xff, + 433 & 0xff} + }, + .field_info_spec = { + .description = "tl4_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2169 >> 8) & 0xff, + 2169 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ZERO, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (434 >> 8) & 0xff, + 434 & 0xff} + } + }, + { + .field_info_mask = { + .description = "tl4_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2173 >> 8) & 0xff, + 2173 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (435 >> 8) & 0xff, + 435 & 0xff} + }, + .field_info_spec = { + .description = "tl4_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2177 >> 8) & 0xff, + 2177 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ZERO, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (436 >> 8) & 0xff, + 436 & 0xff} + } + }, + { + .field_info_mask = { + .description = "tl4_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "tl4_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2181 >> 8) & 0xff, + 2181 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR2_SYM_TL4_HDR_VALID_YES}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (437 >> 8) & 0xff, + 437 & 0xff} + } + }, + { + .field_info_mask = { + .description = "tl3_hdr_isIP", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl3_hdr_isIP", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl3_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2185 >> 8) & 0xff, + 2185 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (438 >> 8) & 0xff, + 438 & 0xff} + }, + .field_info_spec = { + .description = "tl3_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2189 >> 8) & 0xff, + 2189 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR2_SYM_TL3_HDR_TYPE_IPV6}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (439 >> 8) & 0xff, + 439 & 0xff} + } + }, + { + .field_info_mask = { + .description = "tl3_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2193 >> 8) & 0xff, + 2193 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (440 >> 8) & 0xff, + 440 & 0xff} + }, + .field_info_spec = { + .description = "tl3_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2197 >> 8) & 0xff, + 2197 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ZERO, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (441 >> 8) & 0xff, + 441 & 0xff} + } + }, + { + .field_info_mask = { + .description = "tl3_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "tl3_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2201 >> 8) & 0xff, + 2201 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR2_SYM_TL3_HDR_VALID_YES}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (442 >> 8) & 0xff, + 442 & 0xff} + } + }, + { + .field_info_mask = { + .description = "tl2_two_vtags", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "tl2_two_vtags", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2205 >> 8) & 0xff, + 2205 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR2_SYM_TL2_TWO_VTAGS_YES}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl2_vtag_present", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "tl2_vtag_present", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2208 >> 8) & 0xff, + 2208 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR2_SYM_TL2_VTAG_PRESENT_YES}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl2_uc_mc_bc", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl2_uc_mc_bc", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl2_hdr_type", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2211 >> 8) & 0xff, + 2211 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl2_hdr_type", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2212 >> 8) & 0xff, + 2212 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ZERO, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl2_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "tl2_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2213 >> 8) & 0xff, + 2213 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR2_SYM_TL2_HDR_VALID_YES}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "ot_hdr_flags", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "ot_hdr_flags", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "ot_hdr_type", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "ot_hdr_type", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "ot_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "ot_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "ot_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "ot_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "otl4_hdr_is_tcp_udp", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "otl4_hdr_is_tcp_udp", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "otl4_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "otl4_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "otl4_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "otl4_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "otl4_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "otl4_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "otl3_hdr_isIP", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "otl3_hdr_isIP", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "otl3_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "otl3_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "otl3_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "otl3_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "otl3_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "otl3_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "otl2_two_vtags", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "otl2_two_vtags", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "otl2_vtag_present", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "otl2_vtag_present", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "otl2_uc_mc_bc", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "otl2_uc_mc_bc", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "otl2_hdr_type", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "otl2_hdr_type", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "otl2_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "otl2_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "int_ifa_tail", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "int_ifa_tail", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "int_hdr_group", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "int_hdr_group", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "int_hdr_type", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "int_hdr_type", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "hrec_next", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "hrec_next", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "prof_func_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "prof_func_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_PROF_FUNC_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_PROF_FUNC_ID_0 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l2ip_func_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l2ip_func_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "agg_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "agg_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "metadata", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "metadata", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "recycle_count", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "recycle_count", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "pkt_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "pkt_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "loopback", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "loopback", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "spare", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "spare", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + .field_info_spec = { + .description = "valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + } + }, + { + .field_info_mask = { + .description = "padding", + .field_bit_size = 72, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "padding", + .field_bit_size = 72, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + /* class_tid: 2, , table: wm_key_recipe.0 */ + { + .field_info_mask = { + .description = "wc_profile_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "wc_profile_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_WC_PROFILE_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_WC_PROFILE_ID_0 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l2_cntxt_id", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2215 >> 8) & 0xff, + 2215 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + .field_info_spec = { + .description = "l2_cntxt_id", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2216 >> 8) & 0xff, + 2216 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_RF, + .field_opr2 = { + (BNXT_ULP_RF_IDX_L2_CNTXT_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_L2_CNTXT_ID_0 & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + } + }, + { + .field_info_mask = { + .description = "meta", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2217 >> 8) & 0xff, + 2217 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + .field_info_spec = { + .description = "meta", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2218 >> 8) & 0xff, + 2218 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CF, + .field_opr2 = { + (BNXT_ULP_CF_IDX_VF_META_FID >> 8) & 0xff, + BNXT_ULP_CF_IDX_VF_META_FID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + } + }, + { + .field_info_mask = { + .description = "rcyc_cnt", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2219 >> 8) & 0xff, + 2219 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + .field_info_spec = { + .description = "rcyc_cnt", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2220 >> 8) & 0xff, + 2220 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_RF, + .field_opr2 = { + (BNXT_ULP_RF_IDX_RECYCLE_CNT >> 8) & 0xff, + BNXT_ULP_RF_IDX_RECYCLE_CNT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + } + }, + { + .field_info_mask = { + .description = "tl2_dmac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2221 >> 8) & 0xff, + 2221 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_ETH_DMAC >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_ETH_DMAC & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + .field_info_spec = { + .description = "tl2_dmac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2224 >> 8) & 0xff, + 2224 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_ETH_DMAC >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_ETH_DMAC & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + } + }, + { + .field_info_mask = { + .description = "tl2_smac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2227 >> 8) & 0xff, + 2227 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr2 = { + (443 >> 8) & 0xff, + 443 & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + .field_info_spec = { + .description = "tl2_smac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2232 >> 8) & 0xff, + 2232 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr2 = { + (444 >> 8) & 0xff, + 444 & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + } + }, + { + .field_info_mask = { + .description = "tl2_ovv", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2237 >> 8) & 0xff, + 2237 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_OO_VLAN_VID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_OO_VLAN_VID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + .field_info_spec = { + .description = "tl2_ovv", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2241 >> 8) & 0xff, + 2241 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_OO_VLAN_VID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_OO_VLAN_VID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + } + }, + { + .field_info_mask = { + .description = "tl2_ivv", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2245 >> 8) & 0xff, + 2245 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_OI_VLAN_VID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_OI_VLAN_VID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (445 >> 8) & 0xff, + 445 & 0xff} + }, + .field_info_spec = { + .description = "tl2_ivv", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2253 >> 8) & 0xff, + 2253 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_OI_VLAN_VID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_OI_VLAN_VID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (446 >> 8) & 0xff, + 446 & 0xff} + } + }, + { + .field_info_mask = { + .description = "tl2_etype", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2261 >> 8) & 0xff, + 2261 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_ETH_TYPE >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_ETH_TYPE & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + .field_info_spec = { + .description = "tl2_etype", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2265 >> 8) & 0xff, + 2265 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_ETH_TYPE >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_ETH_TYPE & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + } + }, + { + .field_info_mask = { + .description = "tl3.sip.ipv4", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2269 >> 8) & 0xff, + 2269 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_SRC_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_SRC_ADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + .field_info_spec = { + .description = "tl3.sip.ipv4", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2272 >> 8) & 0xff, + 2272 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_SRC_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_SRC_ADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + } + }, + { + .field_info_mask = { + .description = "tl3.sip.ipv6", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2275 >> 8) & 0xff, + 2275 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV6_SRC_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV6_SRC_ADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + .field_info_spec = { + .description = "tl3.sip.ipv6", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2278 >> 8) & 0xff, + 2278 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV6_SRC_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV6_SRC_ADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + } + }, + { + .field_info_mask = { + .description = "tl3.dip.ipv4", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2281 >> 8) & 0xff, + 2281 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_DST_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_DST_ADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + .field_info_spec = { + .description = "tl3.dip.ipv4", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2284 >> 8) & 0xff, + 2284 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_DST_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_DST_ADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + } + }, + { + .field_info_mask = { + .description = "tl3.dip.ipv6", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2287 >> 8) & 0xff, + 2287 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV6_DST_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV6_DST_ADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + .field_info_spec = { + .description = "tl3.dip.ipv6", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2290 >> 8) & 0xff, + 2290 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV6_DST_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV6_DST_ADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + } + }, + { + .field_info_mask = { + .description = "tl3.ttl", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2293 >> 8) & 0xff, + 2293 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV6_TTL >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV6_TTL & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (447 >> 8) & 0xff, + 447 & 0xff} + }, + .field_info_spec = { + .description = "tl3.ttl", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2299 >> 8) & 0xff, + 2299 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV6_TTL >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV6_TTL & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (448 >> 8) & 0xff, + 448 & 0xff} + } + }, + { + .field_info_mask = { + .description = "tl3.prot", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2305 >> 8) & 0xff, + 2305 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV6_PROTO_ID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV6_PROTO_ID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (449 >> 8) & 0xff, + 449 & 0xff} + }, + .field_info_spec = { + .description = "tl3.prot", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2311 >> 8) & 0xff, + 2311 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV6_PROTO_ID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV6_PROTO_ID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (450 >> 8) & 0xff, + 450 & 0xff} + } + }, + { + .field_info_mask = { + .description = "tl3.qos", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2317 >> 8) & 0xff, + 2317 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV6_QOS >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV6_QOS & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (451 >> 8) & 0xff, + 451 & 0xff} + }, + .field_info_spec = { + .description = "tl3.qos", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2323 >> 8) & 0xff, + 2323 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV6_QOS >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV6_QOS & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (452 >> 8) & 0xff, + 452 & 0xff} + } + }, + { + .field_info_mask = { + .description = "tl4.src", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2329 >> 8) & 0xff, + 2329 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_UDP_SRC_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_UDP_SRC_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (453 >> 8) & 0xff, + 453 & 0xff} + }, + .field_info_spec = { + .description = "tl4.src", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2335 >> 8) & 0xff, + 2335 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_UDP_SRC_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_UDP_SRC_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (454 >> 8) & 0xff, + 454 & 0xff} + } + }, + { + .field_info_mask = { + .description = "tl4.dst", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2341 >> 8) & 0xff, + 2341 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_UDP_DST_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_UDP_DST_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (455 >> 8) & 0xff, + 455 & 0xff} + }, + .field_info_spec = { + .description = "tl4.dst", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2347 >> 8) & 0xff, + 2347 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_UDP_DST_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_UDP_DST_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (456 >> 8) & 0xff, + 456 & 0xff} + } + }, + { + .field_info_mask = { + .description = "tids", + .field_bit_size = 24, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2353 >> 8) & 0xff, + 2353 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_T_VXLAN_VNI >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_T_VXLAN_VNI & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (457 >> 8) & 0xff, + 457 & 0xff} + }, + .field_info_spec = { + .description = "tids", + .field_bit_size = 24, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2357 >> 8) & 0xff, + 2357 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_T_VXLAN_VNI >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_T_VXLAN_VNI & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (458 >> 8) & 0xff, + 458 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l2_dmac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2361 >> 8) & 0xff, + 2361 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_ETH_DMAC >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_ETH_DMAC & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (459 >> 8) & 0xff, + 459 & 0xff} + }, + .field_info_spec = { + .description = "l2_dmac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2367 >> 8) & 0xff, + 2367 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_ETH_DMAC >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_ETH_DMAC & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (460 >> 8) & 0xff, + 460 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l2_smac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2373 >> 8) & 0xff, + 2373 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_ETH_SMAC >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_ETH_SMAC & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (461 >> 8) & 0xff, + 461 & 0xff} + }, + .field_info_spec = { + .description = "l2_smac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2379 >> 8) & 0xff, + 2379 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_ETH_SMAC >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_ETH_SMAC & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (462 >> 8) & 0xff, + 462 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l2_ovv", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2385 >> 8) & 0xff, + 2385 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_IO_VLAN_VID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_IO_VLAN_VID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (463 >> 8) & 0xff, + 463 & 0xff} + }, + .field_info_spec = { + .description = "l2_ovv", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2393 >> 8) & 0xff, + 2393 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_IO_VLAN_VID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_IO_VLAN_VID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (464 >> 8) & 0xff, + 464 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l2_ivv", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2401 >> 8) & 0xff, + 2401 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_II_VLAN_VID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_II_VLAN_VID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (465 >> 8) & 0xff, + 465 & 0xff} + }, + .field_info_spec = { + .description = "l2_ivv", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2417 >> 8) & 0xff, + 2417 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_II_VLAN_VID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_II_VLAN_VID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (468 >> 8) & 0xff, + 468 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l2_etype", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2433 >> 8) & 0xff, + 2433 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_ETH_TYPE >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_ETH_TYPE & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (471 >> 8) & 0xff, + 471 & 0xff} + }, + .field_info_spec = { + .description = "l2_etype", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2441 >> 8) & 0xff, + 2441 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_ETH_TYPE >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_ETH_TYPE & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (472 >> 8) & 0xff, + 472 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l3.sip.ipv4", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2449 >> 8) & 0xff, + 2449 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_IPV4_SRC_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_IPV4_SRC_ADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (473 >> 8) & 0xff, + 473 & 0xff} + }, + .field_info_spec = { + .description = "l3.sip.ipv4", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2455 >> 8) & 0xff, + 2455 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_IPV4_SRC_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_IPV4_SRC_ADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (474 >> 8) & 0xff, + 474 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l3.sip.ipv6", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2461 >> 8) & 0xff, + 2461 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_IPV6_SRC_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_IPV6_SRC_ADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (475 >> 8) & 0xff, + 475 & 0xff} + }, + .field_info_spec = { + .description = "l3.sip.ipv6", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2467 >> 8) & 0xff, + 2467 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_IPV6_SRC_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_IPV6_SRC_ADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (476 >> 8) & 0xff, + 476 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l3.dip.ipv4", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2473 >> 8) & 0xff, + 2473 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_IPV4_DST_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_IPV4_DST_ADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (477 >> 8) & 0xff, + 477 & 0xff} + }, + .field_info_spec = { + .description = "l3.dip.ipv4", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2479 >> 8) & 0xff, + 2479 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_IPV4_DST_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_IPV4_DST_ADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (478 >> 8) & 0xff, + 478 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l3.dip.ipv6", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2485 >> 8) & 0xff, + 2485 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_IPV6_DST_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_IPV6_DST_ADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (479 >> 8) & 0xff, + 479 & 0xff} + }, + .field_info_spec = { + .description = "l3.dip.ipv6", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2491 >> 8) & 0xff, + 2491 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_IPV6_DST_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_IPV6_DST_ADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (480 >> 8) & 0xff, + 480 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l3.ttl", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2497 >> 8) & 0xff, + 2497 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_IPV6_TTL >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_IPV6_TTL & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (481 >> 8) & 0xff, + 481 & 0xff} + }, + .field_info_spec = { + .description = "l3.ttl", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2509 >> 8) & 0xff, + 2509 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_IPV6_TTL >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_IPV6_TTL & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (484 >> 8) & 0xff, + 484 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l3.prot", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2521 >> 8) & 0xff, + 2521 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (487 >> 8) & 0xff, + 487 & 0xff} + }, + .field_info_spec = { + .description = "l3.prot", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2541 >> 8) & 0xff, + 2541 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR2_SYM_IP_PROTO_TCP}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (494 >> 8) & 0xff, + 494 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l3.qos", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2561 >> 8) & 0xff, + 2561 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_IPV6_QOS >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_IPV6_QOS & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (501 >> 8) & 0xff, + 501 & 0xff} + }, + .field_info_spec = { + .description = "l3.qos", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2573 >> 8) & 0xff, + 2573 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_IPV6_QOS >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_IPV6_QOS & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (504 >> 8) & 0xff, + 504 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l4.src", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2585 >> 8) & 0xff, + 2585 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_UDP_SRC_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_UDP_SRC_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (507 >> 8) & 0xff, + 507 & 0xff} + }, + .field_info_spec = { + .description = "l4.src", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2598 >> 8) & 0xff, + 2598 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_UDP_SRC_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_UDP_SRC_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (511 >> 8) & 0xff, + 511 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l4.dst", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2611 >> 8) & 0xff, + 2611 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_UDP_DST_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_UDP_DST_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (515 >> 8) & 0xff, + 515 & 0xff} + }, + .field_info_spec = { + .description = "l4.dst", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2624 >> 8) & 0xff, + 2624 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_UDP_DST_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_UDP_DST_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (519 >> 8) & 0xff, + 519 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l4.flags", + .field_bit_size = 9, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_FIELD_BIT, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_O_TCP_TCP_FLAGS >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_TCP_TCP_FLAGS & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_TCP_TCP_FLAGS >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_TCP_TCP_FLAGS & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l4.flags", + .field_bit_size = 9, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_FIELD_BIT, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_O_TCP_TCP_FLAGS >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_TCP_TCP_FLAGS & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_TCP_TCP_FLAGS >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_TCP_TCP_FLAGS & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + /* class_tid: 2, , table: proto_header_cache.wr */ + { + .field_info_mask = { + .description = "group_metadata", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "group_metadata", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "prof_func_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "prof_func_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_PROF_FUNC_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_PROF_FUNC_ID_0 & 0xff} + } + }, + { + .field_info_mask = { + .description = "hdr_bitmap", + .field_bit_size = 64, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "hdr_bitmap", + .field_bit_size = 64, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_PROFILE_BITMAP >> 8) & 0xff, + BNXT_ULP_CF_IDX_PROFILE_BITMAP & 0xff} + } + }, + /* class_tid: 2, , table: em_flow_conflict_cache.rd */ + { + .field_info_mask = { + .description = "recycle_cnt", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "recycle_cnt", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "group_metadata", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "group_metadata", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "prof_func_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "prof_func_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_PROF_FUNC_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_PROF_FUNC_ID_0 & 0xff} + } + }, + { + .field_info_mask = { + .description = "hdr_bitmap", + .field_bit_size = 64, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "hdr_bitmap", + .field_bit_size = 64, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_PROFILE_BITMAP >> 8) & 0xff, + BNXT_ULP_CF_IDX_PROFILE_BITMAP & 0xff} + } + }, + /* class_tid: 2, , table: em_key_recipe.0 */ + { + .field_info_mask = { + .description = "em_profile_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "em_profile_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_EM_PROFILE_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_EM_PROFILE_ID_0 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l2_cntxt_id", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2897 >> 8) & 0xff, + 2897 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + .field_info_spec = { + .description = "l2_cntxt_id", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2898 >> 8) & 0xff, + 2898 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_RF, + .field_opr2 = { + (BNXT_ULP_RF_IDX_L2_CNTXT_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_L2_CNTXT_ID_0 & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + } + }, + { + .field_info_mask = { + .description = "meta", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2899 >> 8) & 0xff, + 2899 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + .field_info_spec = { + .description = "meta", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2900 >> 8) & 0xff, + 2900 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CF, + .field_opr2 = { + (BNXT_ULP_CF_IDX_VF_META_FID >> 8) & 0xff, + BNXT_ULP_CF_IDX_VF_META_FID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + } + }, + { + .field_info_mask = { + .description = "rcyc_cnt", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2901 >> 8) & 0xff, + 2901 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + .field_info_spec = { + .description = "rcyc_cnt", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2902 >> 8) & 0xff, + 2902 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_RF, + .field_opr2 = { + (BNXT_ULP_RF_IDX_RECYCLE_CNT >> 8) & 0xff, + BNXT_ULP_RF_IDX_RECYCLE_CNT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + } + }, + { + .field_info_mask = { + .description = "tl2_dmac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2903 >> 8) & 0xff, + 2903 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + .field_info_spec = { + .description = "tl2_dmac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2907 >> 8) & 0xff, + 2907 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_ETH_DMAC >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_ETH_DMAC & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + } + }, + { + .field_info_mask = { + .description = "tl2_smac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2911 >> 8) & 0xff, + 2911 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + .field_info_spec = { + .description = "tl2_smac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2914 >> 8) & 0xff, + 2914 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_ETH_SMAC >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_ETH_SMAC & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + } + }, + { + .field_info_mask = { + .description = "tl2_ovv", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2917 >> 8) & 0xff, + 2917 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + .field_info_spec = { + .description = "tl2_ovv", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2921 >> 8) & 0xff, + 2921 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_OO_VLAN_VID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_OO_VLAN_VID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + } + }, + { + .field_info_mask = { + .description = "tl2_ivv", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2925 >> 8) & 0xff, + 2925 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (566 >> 8) & 0xff, + 566 & 0xff} + }, + .field_info_spec = { + .description = "tl2_ivv", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2933 >> 8) & 0xff, + 2933 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_OI_VLAN_VID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_OI_VLAN_VID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (567 >> 8) & 0xff, + 567 & 0xff} + } + }, + { + .field_info_mask = { + .description = "tl2_etype", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2941 >> 8) & 0xff, + 2941 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + .field_info_spec = { + .description = "tl2_etype", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2945 >> 8) & 0xff, + 2945 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_ETH_SMAC >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_ETH_SMAC & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + } + }, + { + .field_info_mask = { + .description = "tl3.sip.ipv4", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2949 >> 8) & 0xff, + 2949 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + .field_info_spec = { + .description = "tl3.sip.ipv4", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2952 >> 8) & 0xff, + 2952 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_SRC_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_SRC_ADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + } + }, + { + .field_info_mask = { + .description = "tl3.sip.ipv6", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2955 >> 8) & 0xff, + 2955 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + .field_info_spec = { + .description = "tl3.sip.ipv6", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2958 >> 8) & 0xff, + 2958 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV6_SRC_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV6_SRC_ADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + } + }, + { + .field_info_mask = { + .description = "tl3.dip.ipv4", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2961 >> 8) & 0xff, + 2961 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + .field_info_spec = { + .description = "tl3.dip.ipv4", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2964 >> 8) & 0xff, + 2964 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_DST_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_DST_ADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + } + }, + { + .field_info_mask = { + .description = "tl3.dip.ipv6", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2967 >> 8) & 0xff, + 2967 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + .field_info_spec = { + .description = "tl3.dip.ipv6", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2970 >> 8) & 0xff, + 2970 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV6_DST_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV6_DST_ADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + } + }, + { + .field_info_mask = { + .description = "tl3.ttl", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2973 >> 8) & 0xff, + 2973 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (568 >> 8) & 0xff, + 568 & 0xff} + }, + .field_info_spec = { + .description = "tl3.ttl", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2979 >> 8) & 0xff, + 2979 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV6_TTL >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV6_TTL & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (569 >> 8) & 0xff, + 569 & 0xff} + } + }, + { + .field_info_mask = { + .description = "tl3.prot", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2985 >> 8) & 0xff, + 2985 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (570 >> 8) & 0xff, + 570 & 0xff} + }, + .field_info_spec = { + .description = "tl3.prot", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2991 >> 8) & 0xff, + 2991 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV6_PROTO_ID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV6_PROTO_ID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (571 >> 8) & 0xff, + 571 & 0xff} + } + }, + { + .field_info_mask = { + .description = "tl3.qos", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2997 >> 8) & 0xff, + 2997 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (572 >> 8) & 0xff, + 572 & 0xff} + }, + .field_info_spec = { + .description = "tl3.qos", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (3003 >> 8) & 0xff, + 3003 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV6_QOS >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV6_QOS & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (573 >> 8) & 0xff, + 573 & 0xff} + } + }, + { + .field_info_mask = { + .description = "tl4.src", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (3009 >> 8) & 0xff, + 3009 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (574 >> 8) & 0xff, + 574 & 0xff} + }, + .field_info_spec = { + .description = "tl4.src", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (3015 >> 8) & 0xff, + 3015 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_UDP_SRC_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_UDP_SRC_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (575 >> 8) & 0xff, + 575 & 0xff} + } + }, + { + .field_info_mask = { + .description = "tl4.dst", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (3021 >> 8) & 0xff, + 3021 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (576 >> 8) & 0xff, + 576 & 0xff} + }, + .field_info_spec = { + .description = "tl4.dst", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (3027 >> 8) & 0xff, + 3027 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_UDP_DST_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_UDP_DST_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (577 >> 8) & 0xff, + 577 & 0xff} + } + }, + { + .field_info_mask = { + .description = "tids", + .field_bit_size = 24, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (3033 >> 8) & 0xff, + 3033 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (578 >> 8) & 0xff, + 578 & 0xff} + }, + .field_info_spec = { + .description = "tids", + .field_bit_size = 24, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (3037 >> 8) & 0xff, + 3037 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_T_VXLAN_VNI >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_T_VXLAN_VNI & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (579 >> 8) & 0xff, + 579 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l2_dmac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (3041 >> 8) & 0xff, + 3041 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (580 >> 8) & 0xff, + 580 & 0xff} + }, + .field_info_spec = { + .description = "l2_dmac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (3049 >> 8) & 0xff, + 3049 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_ETH_DMAC >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_ETH_DMAC & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (581 >> 8) & 0xff, + 581 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l2_smac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (3057 >> 8) & 0xff, + 3057 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (582 >> 8) & 0xff, + 582 & 0xff} + }, + .field_info_spec = { + .description = "l2_smac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (3063 >> 8) & 0xff, + 3063 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_ETH_SMAC >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_ETH_SMAC & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (583 >> 8) & 0xff, + 583 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l2_ovv", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (3069 >> 8) & 0xff, + 3069 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (584 >> 8) & 0xff, + 584 & 0xff} + }, + .field_info_spec = { + .description = "l2_ovv", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (3077 >> 8) & 0xff, + 3077 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_IO_VLAN_VID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_IO_VLAN_VID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (585 >> 8) & 0xff, + 585 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l2_ivv", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (3085 >> 8) & 0xff, + 3085 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (586 >> 8) & 0xff, + 586 & 0xff} + }, + .field_info_spec = { + .description = "l2_ivv", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (3101 >> 8) & 0xff, + 3101 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_II_VLAN_VID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_II_VLAN_VID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (589 >> 8) & 0xff, + 589 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l2_etype", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (3117 >> 8) & 0xff, + 3117 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (592 >> 8) & 0xff, + 592 & 0xff} + }, + .field_info_spec = { + .description = "l2_etype", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (3125 >> 8) & 0xff, + 3125 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_ETH_TYPE >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_ETH_TYPE & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (593 >> 8) & 0xff, + 593 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l3.sip.ipv4", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (3133 >> 8) & 0xff, + 3133 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (594 >> 8) & 0xff, + 594 & 0xff} + }, + .field_info_spec = { + .description = "l3.sip.ipv4", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (3139 >> 8) & 0xff, + 3139 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_IPV4_SRC_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_IPV4_SRC_ADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (595 >> 8) & 0xff, + 595 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l3.sip.ipv6", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (3145 >> 8) & 0xff, + 3145 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (596 >> 8) & 0xff, + 596 & 0xff} + }, + .field_info_spec = { + .description = "l3.sip.ipv6", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (3151 >> 8) & 0xff, + 3151 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_IPV6_SRC_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_IPV6_SRC_ADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (597 >> 8) & 0xff, + 597 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l3.dip.ipv4", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (3157 >> 8) & 0xff, + 3157 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (598 >> 8) & 0xff, + 598 & 0xff} + }, + .field_info_spec = { + .description = "l3.dip.ipv4", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (3163 >> 8) & 0xff, + 3163 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_IPV4_DST_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_IPV4_DST_ADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (599 >> 8) & 0xff, + 599 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l3.dip.ipv6", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (3169 >> 8) & 0xff, + 3169 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (600 >> 8) & 0xff, + 600 & 0xff} + }, + .field_info_spec = { + .description = "l3.dip.ipv6", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (3175 >> 8) & 0xff, + 3175 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_IPV6_DST_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_IPV6_DST_ADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (601 >> 8) & 0xff, + 601 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l3.ttl", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (3181 >> 8) & 0xff, + 3181 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (602 >> 8) & 0xff, + 602 & 0xff} + }, + .field_info_spec = { + .description = "l3.ttl", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (3193 >> 8) & 0xff, + 3193 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_IPV6_TTL >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_IPV6_TTL & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (605 >> 8) & 0xff, + 605 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l3.prot", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (3205 >> 8) & 0xff, + 3205 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (608 >> 8) & 0xff, + 608 & 0xff} + }, + .field_info_spec = { + .description = "l3.prot", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (3217 >> 8) & 0xff, + 3217 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_IPV6_PROTO_ID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_IPV6_PROTO_ID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (611 >> 8) & 0xff, + 611 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l3.qos", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (3229 >> 8) & 0xff, + 3229 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (614 >> 8) & 0xff, + 614 & 0xff} + }, + .field_info_spec = { + .description = "l3.qos", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (3241 >> 8) & 0xff, + 3241 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_IPV6_QOS >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_IPV6_QOS & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (617 >> 8) & 0xff, + 617 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l4.src", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (3253 >> 8) & 0xff, + 3253 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (620 >> 8) & 0xff, + 620 & 0xff} + }, + .field_info_spec = { + .description = "l4.src", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (3265 >> 8) & 0xff, + 3265 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_UDP_SRC_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_UDP_SRC_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (623 >> 8) & 0xff, + 623 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l4.dst", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (3277 >> 8) & 0xff, + 3277 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (626 >> 8) & 0xff, + 626 & 0xff} + }, + .field_info_spec = { + .description = "l4.dst", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (3289 >> 8) & 0xff, + 3289 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_UDP_DST_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_UDP_DST_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (629 >> 8) & 0xff, + 629 & 0xff} + } + }, + /* class_tid: 2, , table: em_flow_conflict_cache.wr */ + { + .field_info_mask = { + .description = "recycle_cnt", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "recycle_cnt", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "group_metadata", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "group_metadata", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "prof_func_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "prof_func_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_PROF_FUNC_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_PROF_FUNC_ID_0 & 0xff} + } + }, + { + .field_info_mask = { + .description = "hdr_bitmap", + .field_bit_size = 64, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "hdr_bitmap", + .field_bit_size = 64, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_PROFILE_BITMAP >> 8) & 0xff, + BNXT_ULP_CF_IDX_PROFILE_BITMAP & 0xff} + } + }, + /* class_tid: 3, , table: table_scope_cache.tsid_ing_rd */ + { + .field_info_mask = { + .description = "tbl_scope", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "tbl_scope", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_PORT_TABLE, + .field_opr1 = { + (BNXT_ULP_CF_IDX_DEV_PORT_ID >> 8) & 0xff, + BNXT_ULP_CF_IDX_DEV_PORT_ID & 0xff, + (BNXT_ULP_PORT_TABLE_TABLE_SCOPE >> 8) & 0xff, + BNXT_ULP_PORT_TABLE_TABLE_SCOPE & 0xff} + } + }, + { + .field_info_mask = { + .description = "loopback", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "loopback", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + /* class_tid: 3, , table: profile_tcam_bypass.ing_catch_all */ + { + .field_info_mask = { + .description = "l4_hdr_dcn_present", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l4_hdr_dcn_present", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l4_hdr_flags", + .field_bit_size = 9, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l4_hdr_flags", + .field_bit_size = 9, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l4_hdr_subtype", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l4_hdr_subtype", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l4_hdr_is_udp_tcp", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l4_hdr_is_udp_tcp", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l4_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l4_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l4_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l4_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l4_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l4_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l3_protocol", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l3_protocol", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l3_hdr_isIP", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l3_hdr_isIP", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l3_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l3_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l3_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l3_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l3_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l3_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_two_vtags", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l2_two_vtags", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_vtag_present", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l2_vtag_present", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_uc_mc_bc", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l2_uc_mc_bc", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_hdr_type", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "l2_hdr_type", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l2_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l2_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tun_hdr_flags", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tun_hdr_flags", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tun_hdr_type", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tun_hdr_type", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tun_hdr_err", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tun_hdr_err", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tun_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tun_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl4_hdr_is_udp_tcp", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl4_hdr_is_udp_tcp", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl4_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl4_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl4_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl4_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl4_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl4_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl3_hdr_isIP", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl3_hdr_isIP", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl3_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl3_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl3_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl3_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl3_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl3_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl2_two_vtags", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl2_two_vtags", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl2_vtag_present", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl2_vtag_present", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl2_uc_mc_bc", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl2_uc_mc_bc", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl2_hdr_type", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl2_hdr_type", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl2_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl2_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "ot_hdr_flags", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "ot_hdr_flags", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "ot_hdr_type", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "ot_hdr_type", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "ot_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "ot_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "ot_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "ot_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "otl4_hdr_is_tcp_udp", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "otl4_hdr_is_tcp_udp", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "otl4_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "otl4_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "otl4_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "otl4_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "otl4_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "otl4_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "otl3_hdr_isIP", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "otl3_hdr_isIP", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "otl3_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "otl3_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "otl3_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "otl3_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "otl3_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "otl3_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "otl2_two_vtags", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "otl2_two_vtags", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "otl2_vtag_present", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "otl2_vtag_present", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "otl2_uc_mc_bc", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "otl2_uc_mc_bc", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "otl2_hdr_type", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "otl2_hdr_type", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "otl2_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "otl2_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "int_ifa_tail", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "int_ifa_tail", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "int_hdr_group", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "int_hdr_group", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "int_hdr_type", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "int_hdr_type", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "hrec_next", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "hrec_next", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "prof_func_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "prof_func_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_PROF_FUNC_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_PROF_FUNC_ID_0 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l2ip_func_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l2ip_func_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "agg_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "agg_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "metadata", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "metadata", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "recycle_count", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "recycle_count", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "pkt_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "pkt_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "loopback", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "loopback", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "spare", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "spare", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + } + }, + { + .field_info_mask = { + .description = "padding", + .field_bit_size = 72, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "padding", + .field_bit_size = 72, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + /* class_tid: 3, , table: table_scope_cache.tsid_ing_wr */ + { + .field_info_mask = { + .description = "tbl_scope", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "tbl_scope", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_PORT_TABLE, + .field_opr1 = { + (BNXT_ULP_CF_IDX_DEV_PORT_ID >> 8) & 0xff, + BNXT_ULP_CF_IDX_DEV_PORT_ID & 0xff, + (BNXT_ULP_PORT_TABLE_TABLE_SCOPE >> 8) & 0xff, + BNXT_ULP_PORT_TABLE_TABLE_SCOPE & 0xff} + } + }, + { + .field_info_mask = { + .description = "loopback", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "loopback", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + /* class_tid: 3, , table: port_table.ing_wr */ + { + .field_info_mask = { + .description = "dev.port_id", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "dev.port_id", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_DEV_PORT_ID >> 8) & 0xff, + BNXT_ULP_CF_IDX_DEV_PORT_ID & 0xff} + } + }, + /* class_tid: 3, , table: l2_cntxt_tcam_cache.ing_rd */ + { + .field_info_mask = { + .description = "svif", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "svif", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_PHY_PORT_SVIF >> 8) & 0xff, + BNXT_ULP_CF_IDX_PHY_PORT_SVIF & 0xff} + } + }, + /* class_tid: 3, , table: l2_cntxt_tcam.svif_ing */ + { + .field_info_mask = { + .description = "etype", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "etype", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_ivlan_tpid_sel", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l2_ivlan_tpid_sel", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_ivlan_vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l2_ivlan_vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_ovlan_tpid_sel", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l2_ovlan_tpid_sel", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_ovlan_vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l2_ovlan_vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "two_vtags", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "two_vtags", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "vtag_present", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "vtag_present", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "addr1", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "addr1", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "addr0", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "addr0", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tunnel_id", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tunnel_id", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tun_hdr_type", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tun_hdr_type", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "out_tun_hdr_type", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "out_tun_hdr_type", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "llc", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "llc", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "roce", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "roce", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2ip_func", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l2ip_func", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "metadata", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "metadata", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "svif", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "svif", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_PHY_PORT_SVIF >> 8) & 0xff, + BNXT_ULP_CF_IDX_PHY_PORT_SVIF & 0xff} + } + }, + { + .field_info_mask = { + .description = "parif", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "parif", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "spif", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "spif", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "loopback", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "loopback", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "recycle_cnt", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "recycle_cnt", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "mpass_cnt", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "mpass_cnt", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "spare", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "spare", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + .field_info_spec = { + .description = "valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + } + }, + /* class_tid: 3, , table: l2_cntxt_tcam_cache.ing_wr */ + { + .field_info_mask = { + .description = "svif", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "svif", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_PHY_PORT_SVIF >> 8) & 0xff, + BNXT_ULP_CF_IDX_PHY_PORT_SVIF & 0xff} + } + }, + /* class_tid: 3, , table: port_table.egr_wr_0 */ + { + .field_info_mask = { + .description = "dev.port_id", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "dev.port_id", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_DEV_PORT_ID >> 8) & 0xff, + BNXT_ULP_CF_IDX_DEV_PORT_ID & 0xff} + } + }, + /* class_tid: 3, , table: l2_cntxt_tcam_cache.no_vfr_egr_rd */ + { + .field_info_mask = { + .description = "svif", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "svif", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_DRV_FUNC_SVIF >> 8) & 0xff, + BNXT_ULP_CF_IDX_DRV_FUNC_SVIF & 0xff} + } + }, + /* class_tid: 3, , table: l2_cntxt_tcam.non_vfr_svif_egr */ + { + .field_info_mask = { + .description = "etype", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "etype", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_ivlan_tpid_sel", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l2_ivlan_tpid_sel", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_ivlan_vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l2_ivlan_vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_ovlan_tpid_sel", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l2_ovlan_tpid_sel", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_ovlan_vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l2_ovlan_vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "two_vtags", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "two_vtags", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "vtag_present", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "vtag_present", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "addr1", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "addr1", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "addr0", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "addr0", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tunnel_id", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tunnel_id", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tun_hdr_type", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tun_hdr_type", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "out_tun_hdr_type", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "out_tun_hdr_type", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "llc", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "llc", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "roce", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + .field_info_spec = { + .description = "roce", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2ip_func", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l2ip_func", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "metadata", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "metadata", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "svif", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "svif", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_DRV_FUNC_SVIF >> 8) & 0xff, + BNXT_ULP_CF_IDX_DRV_FUNC_SVIF & 0xff} + } + }, + { + .field_info_mask = { + .description = "parif", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "parif", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "spif", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "spif", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "loopback", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "loopback", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "recycle_cnt", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "recycle_cnt", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "mpass_cnt", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "mpass_cnt", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "spare", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "spare", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + .field_info_spec = { + .description = "valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + } + }, + /* class_tid: 3, , table: profile_tcam_bypass.non_vfr_egr_catch_all */ + { + .field_info_mask = { + .description = "l4_hdr_dcn_present", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l4_hdr_dcn_present", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l4_hdr_flags", + .field_bit_size = 9, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l4_hdr_flags", + .field_bit_size = 9, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l4_hdr_subtype", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l4_hdr_subtype", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l4_hdr_is_udp_tcp", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l4_hdr_is_udp_tcp", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l4_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l4_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l4_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l4_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l4_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l4_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l3_protocol", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l3_protocol", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l3_hdr_isIP", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l3_hdr_isIP", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l3_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l3_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l3_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l3_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l3_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l3_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_two_vtags", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l2_two_vtags", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_vtag_present", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l2_vtag_present", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_uc_mc_bc", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l2_uc_mc_bc", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_hdr_type", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "l2_hdr_type", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l2_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l2_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tun_hdr_flags", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tun_hdr_flags", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tun_hdr_type", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tun_hdr_type", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tun_hdr_err", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tun_hdr_err", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tun_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tun_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl4_hdr_is_udp_tcp", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl4_hdr_is_udp_tcp", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl4_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl4_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl4_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl4_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl4_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl4_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl3_hdr_isIP", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl3_hdr_isIP", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl3_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl3_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl3_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl3_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl3_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl3_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl2_two_vtags", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl2_two_vtags", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl2_vtag_present", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl2_vtag_present", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl2_uc_mc_bc", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl2_uc_mc_bc", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl2_hdr_type", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl2_hdr_type", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl2_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl2_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "ot_hdr_flags", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "ot_hdr_flags", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "ot_hdr_type", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "ot_hdr_type", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "ot_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "ot_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "ot_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "ot_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "otl4_hdr_is_tcp_udp", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "otl4_hdr_is_tcp_udp", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "otl4_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "otl4_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "otl4_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "otl4_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "otl4_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "otl4_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "otl3_hdr_isIP", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "otl3_hdr_isIP", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "otl3_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "otl3_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "otl3_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "otl3_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "otl3_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "otl3_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "otl2_two_vtags", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "otl2_two_vtags", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "otl2_vtag_present", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "otl2_vtag_present", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "otl2_uc_mc_bc", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "otl2_uc_mc_bc", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "otl2_hdr_type", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "otl2_hdr_type", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "otl2_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "otl2_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "int_ifa_tail", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "int_ifa_tail", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "int_hdr_group", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "int_hdr_group", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "int_hdr_type", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "int_hdr_type", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "hrec_next", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "hrec_next", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "prof_func_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "prof_func_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_PROF_FUNC_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_PROF_FUNC_ID_0 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l2ip_func_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l2ip_func_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "agg_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "agg_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "metadata", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "metadata", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "recycle_count", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "recycle_count", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "pkt_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "pkt_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "loopback", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "loopback", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "spare", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "spare", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + } + }, + { + .field_info_mask = { + .description = "padding", + .field_bit_size = 72, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "padding", + .field_bit_size = 72, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + /* class_tid: 3, , table: l2_cntxt_tcam_cache.non_vfr_egr_wr */ + { + .field_info_mask = { + .description = "svif", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "svif", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_DRV_FUNC_SVIF >> 8) & 0xff, + BNXT_ULP_CF_IDX_DRV_FUNC_SVIF & 0xff} + } + }, + /* class_tid: 3, , table: table_scope_cache.tsid_vfr_rd */ + { + .field_info_mask = { + .description = "tbl_scope", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "tbl_scope", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_PORT_TABLE, + .field_opr1 = { + (BNXT_ULP_CF_IDX_DEV_PORT_ID >> 8) & 0xff, + BNXT_ULP_CF_IDX_DEV_PORT_ID & 0xff, + (BNXT_ULP_PORT_TABLE_TABLE_SCOPE >> 8) & 0xff, + BNXT_ULP_PORT_TABLE_TABLE_SCOPE & 0xff} + } + }, + { + .field_info_mask = { + .description = "loopback", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "loopback", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + } + }, + /* class_tid: 3, , table: l2_cntxt_tcam.vf2vf_ing */ + { + .field_info_mask = { + .description = "etype", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "etype", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_ivlan_tpid_sel", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l2_ivlan_tpid_sel", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_ivlan_vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l2_ivlan_vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_ovlan_tpid_sel", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l2_ovlan_tpid_sel", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_ovlan_vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l2_ovlan_vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "two_vtags", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "two_vtags", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "vtag_present", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "vtag_present", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "addr1", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "addr1", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "addr0", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "addr0", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tunnel_id", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tunnel_id", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tun_hdr_type", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tun_hdr_type", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "out_tun_hdr_type", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "out_tun_hdr_type", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "llc", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "llc", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "roce", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + .field_info_spec = { + .description = "roce", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2ip_func", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l2ip_func", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "metadata", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + (ULP_THOR2_SYM_VF_2_VFR_META_MASK >> 24) & 0xff, + (ULP_THOR2_SYM_VF_2_VFR_META_MASK >> 16) & 0xff, + (ULP_THOR2_SYM_VF_2_VFR_META_MASK >> 8) & 0xff, + ULP_THOR2_SYM_VF_2_VFR_META_MASK & 0xff} + }, + .field_info_spec = { + .description = "metadata", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + (ULP_THOR2_SYM_VF_2_VF_META_VAL >> 24) & 0xff, + (ULP_THOR2_SYM_VF_2_VF_META_VAL >> 16) & 0xff, + (ULP_THOR2_SYM_VF_2_VF_META_VAL >> 8) & 0xff, + ULP_THOR2_SYM_VF_2_VF_META_VAL & 0xff} + } + }, + { + .field_info_mask = { + .description = "svif", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "svif", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "parif", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "parif", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "spif", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "spif", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "loopback", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "loopback", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + } + }, + { + .field_info_mask = { + .description = "recycle_cnt", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "recycle_cnt", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "mpass_cnt", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "mpass_cnt", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "spare", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "spare", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + .field_info_spec = { + .description = "valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + } + }, + /* class_tid: 3, , table: table_scope_cache.tsid_vfr_wr */ + { + .field_info_mask = { + .description = "tbl_scope", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "tbl_scope", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_PORT_TABLE, + .field_opr1 = { + (BNXT_ULP_CF_IDX_DEV_PORT_ID >> 8) & 0xff, + BNXT_ULP_CF_IDX_DEV_PORT_ID & 0xff, + (BNXT_ULP_PORT_TABLE_TABLE_SCOPE >> 8) & 0xff, + BNXT_ULP_PORT_TABLE_TABLE_SCOPE & 0xff} + } + }, + { + .field_info_mask = { + .description = "loopback", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "loopback", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + } + }, + /* class_tid: 4, , table: table_scope_cache.tsid_vfr_egr_rd */ + { + .field_info_mask = { + .description = "tbl_scope", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "tbl_scope", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_PORT_TABLE, + .field_opr1 = { + (BNXT_ULP_CF_IDX_DEV_PORT_ID >> 8) & 0xff, + BNXT_ULP_CF_IDX_DEV_PORT_ID & 0xff, + (BNXT_ULP_PORT_TABLE_TABLE_SCOPE >> 8) & 0xff, + BNXT_ULP_PORT_TABLE_TABLE_SCOPE & 0xff} + } + }, + { + .field_info_mask = { + .description = "loopback", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "loopback", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + /* class_tid: 4, , table: profile_tcam_bypass.tsid_vfr_egr_catch_all */ + { + .field_info_mask = { + .description = "l4_hdr_dcn_present", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l4_hdr_dcn_present", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l4_hdr_flags", + .field_bit_size = 9, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l4_hdr_flags", + .field_bit_size = 9, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l4_hdr_subtype", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l4_hdr_subtype", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l4_hdr_is_udp_tcp", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l4_hdr_is_udp_tcp", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l4_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l4_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l4_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l4_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l4_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l4_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l3_protocol", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l3_protocol", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l3_hdr_isIP", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l3_hdr_isIP", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l3_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l3_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l3_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l3_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l3_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l3_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_two_vtags", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l2_two_vtags", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_vtag_present", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l2_vtag_present", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_uc_mc_bc", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l2_uc_mc_bc", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_hdr_type", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "l2_hdr_type", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l2_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l2_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tun_hdr_flags", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tun_hdr_flags", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tun_hdr_type", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tun_hdr_type", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tun_hdr_err", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tun_hdr_err", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tun_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tun_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl4_hdr_is_udp_tcp", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl4_hdr_is_udp_tcp", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl4_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl4_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl4_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl4_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl4_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl4_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl3_hdr_isIP", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl3_hdr_isIP", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl3_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl3_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl3_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl3_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl3_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl3_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl2_two_vtags", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl2_two_vtags", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl2_vtag_present", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl2_vtag_present", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl2_uc_mc_bc", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl2_uc_mc_bc", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl2_hdr_type", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl2_hdr_type", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl2_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl2_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "ot_hdr_flags", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "ot_hdr_flags", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "ot_hdr_type", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "ot_hdr_type", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "ot_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "ot_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "ot_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "ot_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "otl4_hdr_is_tcp_udp", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "otl4_hdr_is_tcp_udp", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "otl4_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "otl4_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "otl4_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "otl4_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "otl4_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "otl4_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "otl3_hdr_isIP", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "otl3_hdr_isIP", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "otl3_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "otl3_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "otl3_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "otl3_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "otl3_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "otl3_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "otl2_two_vtags", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "otl2_two_vtags", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "otl2_vtag_present", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "otl2_vtag_present", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "otl2_uc_mc_bc", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "otl2_uc_mc_bc", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "otl2_hdr_type", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "otl2_hdr_type", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "otl2_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "otl2_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "int_ifa_tail", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "int_ifa_tail", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "int_hdr_group", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "int_hdr_group", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "int_hdr_type", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "int_hdr_type", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "hrec_next", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "hrec_next", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "prof_func_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "prof_func_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_PROF_FUNC_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_PROF_FUNC_ID_0 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l2ip_func_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l2ip_func_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "agg_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "agg_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "metadata", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "metadata", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "recycle_count", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "recycle_count", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "pkt_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "pkt_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "loopback", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "loopback", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "spare", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "spare", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + } + }, + { + .field_info_mask = { + .description = "padding", + .field_bit_size = 72, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "padding", + .field_bit_size = 72, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + /* class_tid: 4, , table: table_scope_cache.tsid_vfr_egr_wr */ + { + .field_info_mask = { + .description = "tbl_scope", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "tbl_scope", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_PORT_TABLE, + .field_opr1 = { + (BNXT_ULP_CF_IDX_DEV_PORT_ID >> 8) & 0xff, + BNXT_ULP_CF_IDX_DEV_PORT_ID & 0xff, + (BNXT_ULP_PORT_TABLE_TABLE_SCOPE >> 8) & 0xff, + BNXT_ULP_PORT_TABLE_TABLE_SCOPE & 0xff} + } + }, + { + .field_info_mask = { + .description = "loopback", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "loopback", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + /* class_tid: 4, , table: l2_cntxt_tcam_cache.endpoint_def_egr_rd */ + { + .field_info_mask = { + .description = "svif", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "svif", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_VF_FUNC_SVIF >> 8) & 0xff, + BNXT_ULP_CF_IDX_VF_FUNC_SVIF & 0xff} + } + }, + /* class_tid: 4, , table: l2_cntxt_tcam.vf2vf_egr */ + { + .field_info_mask = { + .description = "etype", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "etype", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_ivlan_tpid_sel", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l2_ivlan_tpid_sel", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_ivlan_vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l2_ivlan_vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_ovlan_tpid_sel", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l2_ovlan_tpid_sel", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_ovlan_vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l2_ovlan_vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "two_vtags", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "two_vtags", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "vtag_present", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "vtag_present", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "addr1", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "addr1", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "addr0", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "addr0", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tunnel_id", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tunnel_id", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tun_hdr_type", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tun_hdr_type", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "out_tun_hdr_type", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "out_tun_hdr_type", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "llc", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "llc", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "roce", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + .field_info_spec = { + .description = "roce", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2ip_func", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l2ip_func", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "metadata", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "metadata", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "svif", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "svif", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_VF_FUNC_SVIF >> 8) & 0xff, + BNXT_ULP_CF_IDX_VF_FUNC_SVIF & 0xff} + } + }, + { + .field_info_mask = { + .description = "parif", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "parif", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "spif", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "spif", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "loopback", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "loopback", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "recycle_cnt", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "recycle_cnt", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "mpass_cnt", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "mpass_cnt", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "spare", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "spare", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + .field_info_spec = { + .description = "valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + } + }, + /* class_tid: 4, , table: l2_cntxt_tcam_cache.endpoint_def_egr_wr */ + { + .field_info_mask = { + .description = "svif", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "svif", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_VF_FUNC_SVIF >> 8) & 0xff, + BNXT_ULP_CF_IDX_VF_FUNC_SVIF & 0xff} + } + }, + /* class_tid: 4, , table: port_table.egr_wr_0 */ + { + .field_info_mask = { + .description = "dev.port_id", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "dev.port_id", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_DEV_PORT_ID >> 8) & 0xff, + BNXT_ULP_CF_IDX_DEV_PORT_ID & 0xff} + } + } +}; + +struct bnxt_ulp_mapper_field_info ulp_thor2_class_key_ext_list[] = { + /* class_tid: 1, , table: control.dmac_calculation */ + { + .description = "", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (4 >> 8) & 0xff, + 4 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_ETH_DMAC >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_ETH_DMAC & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (1 >> 8) & 0xff, + 1 & 0xff} + }, + { + .description = "", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (6 >> 8) & 0xff, + 6 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_PORT_TABLE, + .field_opr2 = { + (BNXT_ULP_CF_IDX_DEV_PORT_ID >> 8) & 0xff, + BNXT_ULP_CF_IDX_DEV_PORT_ID & 0xff, + (BNXT_ULP_PORT_TABLE_DRV_FUNC_MAC >> 8) & 0xff, + BNXT_ULP_PORT_TABLE_DRV_FUNC_MAC & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (2 >> 8) & 0xff, + 2 & 0xff} + }, + { + .description = "", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (7 >> 8) & 0xff, + 7 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_RF, + .field_opr2 = { + (BNXT_ULP_RF_IDX_DRV_FUNC_PARENT_MAC >> 8) & 0xff, + BNXT_ULP_RF_IDX_DRV_FUNC_PARENT_MAC & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + /* class_tid: 1, , table: control.terminating_flow */ + { + .description = "", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (34 >> 8) & 0xff, + 34 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (4 >> 8) & 0xff, + 4 & 0xff} + }, + { + .description = "", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (36 >> 8) & 0xff, + 36 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (5 >> 8) & 0xff, + 5 & 0xff} + }, + { + .description = "", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (38 >> 8) & 0xff, + 38 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (6 >> 8) & 0xff, + 6 & 0xff} + }, + { + .description = "", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (40 >> 8) & 0xff, + 40 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_ivv.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (63 >> 8) & 0xff, + 63 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_sip0.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (83 >> 8) & 0xff, + 83 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_dip0.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (98 >> 8) & 0xff, + 98 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_ttl.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (104 >> 8) & 0xff, + 104 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_prot.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (110 >> 8) & 0xff, + 110 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_qos.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (116 >> 8) & 0xff, + 116 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_src.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (122 >> 8) & 0xff, + 122 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_dst.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (128 >> 8) & 0xff, + 128 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tids.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (133 >> 8) & 0xff, + 133 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_dmac.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (138 >> 8) & 0xff, + 138 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_smac.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (144 >> 8) & 0xff, + 144 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_ovv.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (151 >> 8) & 0xff, + 151 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_ivv.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (159 >> 8) & 0xff, + 159 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (20 >> 8) & 0xff, + 20 & 0xff} + }, + { + .description = "l2_ivv.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (163 >> 8) & 0xff, + 163 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (21 >> 8) & 0xff, + 21 & 0xff} + }, + { + .description = "l2_ivv.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (167 >> 8) & 0xff, + 167 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_etype.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (175 >> 8) & 0xff, + 175 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_sip3.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (183 >> 8) & 0xff, + 183 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_sip2.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (191 >> 8) & 0xff, + 191 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_sip1.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (199 >> 8) & 0xff, + 199 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_sip0.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (207 >> 8) & 0xff, + 207 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (27 >> 8) & 0xff, + 27 & 0xff} + }, + { + .description = "l3_sip0.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (211 >> 8) & 0xff, + 211 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (28 >> 8) & 0xff, + 28 & 0xff} + }, + { + .description = "l3_sip0.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (215 >> 8) & 0xff, + 215 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_dip3.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (223 >> 8) & 0xff, + 223 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_dip2.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (231 >> 8) & 0xff, + 231 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_dip1.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (239 >> 8) & 0xff, + 239 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_dip0.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (247 >> 8) & 0xff, + 247 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (33 >> 8) & 0xff, + 33 & 0xff} + }, + { + .description = "l3_dip0.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (251 >> 8) & 0xff, + 251 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (34 >> 8) & 0xff, + 34 & 0xff} + }, + { + .description = "l3_dip0.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (255 >> 8) & 0xff, + 255 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_ttl.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (263 >> 8) & 0xff, + 263 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (36 >> 8) & 0xff, + 36 & 0xff} + }, + { + .description = "l3_ttl.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (267 >> 8) & 0xff, + 267 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (37 >> 8) & 0xff, + 37 & 0xff} + }, + { + .description = "l3_ttl.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (271 >> 8) & 0xff, + 271 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_prot.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (279 >> 8) & 0xff, + 279 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (39 >> 8) & 0xff, + 39 & 0xff} + }, + { + .description = "l3_prot.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (283 >> 8) & 0xff, + 283 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (40 >> 8) & 0xff, + 40 & 0xff} + }, + { + .description = "l3_prot.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (287 >> 8) & 0xff, + 287 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_qos.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (295 >> 8) & 0xff, + 295 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (42 >> 8) & 0xff, + 42 & 0xff} + }, + { + .description = "l3_qos.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (299 >> 8) & 0xff, + 299 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (43 >> 8) & 0xff, + 43 & 0xff} + }, + { + .description = "l3_qos.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (303 >> 8) & 0xff, + 303 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_src.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_OR, + .field_opr1 = { + (308 >> 8) & 0xff, + 308 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_src.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_OR, + .field_opr1 = { + (311 >> 8) & 0xff, + 311 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_dst.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_OR, + .field_opr1 = { + (315 >> 8) & 0xff, + 315 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_dst.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_OR, + .field_opr1 = { + (318 >> 8) & 0xff, + 318 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_flags.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (322 >> 8) & 0xff, + 322 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_flags.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (324 >> 8) & 0xff, + 324 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + /* class_tid: 1, , table: control.profile_tcam_priority */ + { + .description = "", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (328 >> 8) & 0xff, + 328 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR2_SYM_PROF_TCAM_PRI_L4}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (51 >> 8) & 0xff, + 51 & 0xff} + }, + { + .description = "", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (330 >> 8) & 0xff, + 330 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR2_SYM_PROF_TCAM_PRI_L4}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (52 >> 8) & 0xff, + 52 & 0xff} + }, + { + .description = "", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (332 >> 8) & 0xff, + 332 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR2_SYM_PROF_TCAM_PRI_L4}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (53 >> 8) & 0xff, + 53 & 0xff} + }, + { + .description = "", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (334 >> 8) & 0xff, + 334 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR2_SYM_PROF_TCAM_PRI_L4}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (54 >> 8) & 0xff, + 54 & 0xff} + }, + { + .description = "", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (336 >> 8) & 0xff, + 336 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR2_SYM_PROF_TCAM_PRI_L3}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (55 >> 8) & 0xff, + 55 & 0xff} + }, + { + .description = "", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (338 >> 8) & 0xff, + 338 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR2_SYM_PROF_TCAM_PRI_L3}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (56 >> 8) & 0xff, + 56 & 0xff} + }, + { + .description = "", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (340 >> 8) & 0xff, + 340 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR2_SYM_PROF_TCAM_PRI_L3}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (57 >> 8) & 0xff, + 57 & 0xff} + }, + { + .description = "", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (342 >> 8) & 0xff, + 342 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR2_SYM_PROF_TCAM_PRI_L3}, + .field_src3 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr3 = { + ULP_THOR2_SYM_PROF_TCAM_PRI_L2} + }, + { + .description = "l4_hdr_is_udp_tcp", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (346 >> 8) & 0xff, + 346 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (59 >> 8) & 0xff, + 59 & 0xff} + }, + { + .description = "l4_hdr_is_udp_tcp", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (348 >> 8) & 0xff, + 348 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (60 >> 8) & 0xff, + 60 & 0xff} + }, + { + .description = "l4_hdr_is_udp_tcp", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (350 >> 8) & 0xff, + 350 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_hdr_is_udp_tcp", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (354 >> 8) & 0xff, + 354 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR2_SYM_L4_HDR_IS_UDP_TCP_YES}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (62 >> 8) & 0xff, + 62 & 0xff} + }, + { + .description = "l4_hdr_is_udp_tcp", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (356 >> 8) & 0xff, + 356 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR2_SYM_L4_HDR_IS_UDP_TCP_YES}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (63 >> 8) & 0xff, + 63 & 0xff} + }, + { + .description = "l4_hdr_is_udp_tcp", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (358 >> 8) & 0xff, + 358 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR2_SYM_L4_HDR_IS_UDP_TCP_YES}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (362 >> 8) & 0xff, + 362 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (65 >> 8) & 0xff, + 65 & 0xff} + }, + { + .description = "l4_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (364 >> 8) & 0xff, + 364 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (66 >> 8) & 0xff, + 66 & 0xff} + }, + { + .description = "l4_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (366 >> 8) & 0xff, + 366 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (370 >> 8) & 0xff, + 370 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ZERO, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (68 >> 8) & 0xff, + 68 & 0xff} + }, + { + .description = "l4_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (372 >> 8) & 0xff, + 372 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR2_SYM_L4_HDR_TYPE_UDP}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (69 >> 8) & 0xff, + 69 & 0xff} + }, + { + .description = "l4_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (374 >> 8) & 0xff, + 374 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR2_SYM_L4_HDR_TYPE_UDP}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (378 >> 8) & 0xff, + 378 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (71 >> 8) & 0xff, + 71 & 0xff} + }, + { + .description = "l4_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (380 >> 8) & 0xff, + 380 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (72 >> 8) & 0xff, + 72 & 0xff} + }, + { + .description = "l4_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (382 >> 8) & 0xff, + 382 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (386 >> 8) & 0xff, + 386 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ZERO, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (74 >> 8) & 0xff, + 74 & 0xff} + }, + { + .description = "l4_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (388 >> 8) & 0xff, + 388 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ZERO, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (75 >> 8) & 0xff, + 75 & 0xff} + }, + { + .description = "l4_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (390 >> 8) & 0xff, + 390 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ZERO, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (394 >> 8) & 0xff, + 394 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (77 >> 8) & 0xff, + 77 & 0xff} + }, + { + .description = "l4_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (396 >> 8) & 0xff, + 396 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (78 >> 8) & 0xff, + 78 & 0xff} + }, + { + .description = "l4_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (398 >> 8) & 0xff, + 398 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (402 >> 8) & 0xff, + 402 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR2_SYM_L4_HDR_VALID_YES}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (80 >> 8) & 0xff, + 80 & 0xff} + }, + { + .description = "l4_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (404 >> 8) & 0xff, + 404 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR2_SYM_L4_HDR_VALID_YES}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (81 >> 8) & 0xff, + 81 & 0xff} + }, + { + .description = "l4_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (406 >> 8) & 0xff, + 406 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR2_SYM_L4_HDR_VALID_YES}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (410 >> 8) & 0xff, + 410 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (83 >> 8) & 0xff, + 83 & 0xff} + }, + { + .description = "l3_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (412 >> 8) & 0xff, + 412 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (84 >> 8) & 0xff, + 84 & 0xff} + }, + { + .description = "l3_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (414 >> 8) & 0xff, + 414 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (418 >> 8) & 0xff, + 418 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR2_SYM_L3_HDR_TYPE_IPV6}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (86 >> 8) & 0xff, + 86 & 0xff} + }, + { + .description = "l3_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (420 >> 8) & 0xff, + 420 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ZERO, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (87 >> 8) & 0xff, + 87 & 0xff} + }, + { + .description = "l3_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (422 >> 8) & 0xff, + 422 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ZERO, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (426 >> 8) & 0xff, + 426 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (89 >> 8) & 0xff, + 89 & 0xff} + }, + { + .description = "l3_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (428 >> 8) & 0xff, + 428 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (90 >> 8) & 0xff, + 90 & 0xff} + }, + { + .description = "l3_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (430 >> 8) & 0xff, + 430 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (434 >> 8) & 0xff, + 434 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ZERO, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (92 >> 8) & 0xff, + 92 & 0xff} + }, + { + .description = "l3_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (436 >> 8) & 0xff, + 436 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ZERO, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (93 >> 8) & 0xff, + 93 & 0xff} + }, + { + .description = "l3_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (438 >> 8) & 0xff, + 438 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ZERO, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (442 >> 8) & 0xff, + 442 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (95 >> 8) & 0xff, + 95 & 0xff} + }, + { + .description = "l3_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (444 >> 8) & 0xff, + 444 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (96 >> 8) & 0xff, + 96 & 0xff} + }, + { + .description = "l3_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (446 >> 8) & 0xff, + 446 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (450 >> 8) & 0xff, + 450 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR2_SYM_L3_HDR_VALID_YES}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (98 >> 8) & 0xff, + 98 & 0xff} + }, + { + .description = "l3_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (452 >> 8) & 0xff, + 452 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR2_SYM_L3_HDR_VALID_YES}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (99 >> 8) & 0xff, + 99 & 0xff} + }, + { + .description = "l3_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (454 >> 8) & 0xff, + 454 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR2_SYM_L3_HDR_VALID_YES}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_two_vtags", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (459 >> 8) & 0xff, + 459 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR2_SYM_L2_TWO_VTAGS_YES}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_vtag_present", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (465 >> 8) & 0xff, + 465 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR2_SYM_L2_VTAG_PRESENT_YES}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (472 >> 8) & 0xff, + 472 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (476 >> 8) & 0xff, + 476 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ZERO, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (480 >> 8) & 0xff, + 480 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR2_SYM_L2_HDR_VALID_YES}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tun_hdr_type", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (484 >> 8) & 0xff, + 484 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (106 >> 8) & 0xff, + 106 & 0xff} + }, + { + .description = "tun_hdr_type", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (486 >> 8) & 0xff, + 486 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (107 >> 8) & 0xff, + 107 & 0xff} + }, + { + .description = "tun_hdr_type", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (488 >> 8) & 0xff, + 488 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (108 >> 8) & 0xff, + 108 & 0xff} + }, + { + .description = "tun_hdr_type", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (490 >> 8) & 0xff, + 490 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (109 >> 8) & 0xff, + 109 & 0xff} + }, + { + .description = "tun_hdr_type", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (492 >> 8) & 0xff, + 492 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tun_hdr_type", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (496 >> 8) & 0xff, + 496 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR2_SYM_TUN_HDR_TYPE_VXLAN_GPE}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (111 >> 8) & 0xff, + 111 & 0xff} + }, + { + .description = "tun_hdr_type", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (498 >> 8) & 0xff, + 498 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR2_SYM_TUN_HDR_TYPE_GENEVE}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (112 >> 8) & 0xff, + 112 & 0xff} + }, + { + .description = "tun_hdr_type", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (500 >> 8) & 0xff, + 500 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR2_SYM_TUN_HDR_TYPE_GRE}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (113 >> 8) & 0xff, + 113 & 0xff} + }, + { + .description = "tun_hdr_type", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (502 >> 8) & 0xff, + 502 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR2_SYM_TUN_HDR_TYPE_UPAR1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (114 >> 8) & 0xff, + 114 & 0xff} + }, + { + .description = "tun_hdr_type", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (504 >> 8) & 0xff, + 504 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR2_SYM_TUN_HDR_TYPE_UPAR2}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_hdr_is_udp_tcp", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (511 >> 8) & 0xff, + 511 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_hdr_is_udp_tcp", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (515 >> 8) & 0xff, + 515 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR2_SYM_TL4_HDR_IS_UDP_TCP_YES}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (519 >> 8) & 0xff, + 519 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (523 >> 8) & 0xff, + 523 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR2_SYM_TL4_HDR_TYPE_UDP}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (527 >> 8) & 0xff, + 527 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (531 >> 8) & 0xff, + 531 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ZERO, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (535 >> 8) & 0xff, + 535 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (122 >> 8) & 0xff, + 122 & 0xff} + }, + { + .description = "tl4_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (537 >> 8) & 0xff, + 537 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ZERO, + .field_src3 = BNXT_ULP_FIELD_SRC_ONES + }, + { + .description = "tl4_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (540 >> 8) & 0xff, + 540 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR2_SYM_TL4_HDR_VALID_YES}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (124 >> 8) & 0xff, + 124 & 0xff} + }, + { + .description = "tl4_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (542 >> 8) & 0xff, + 542 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ZERO, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (545 >> 8) & 0xff, + 545 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (549 >> 8) & 0xff, + 549 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ZERO, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (553 >> 8) & 0xff, + 553 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (557 >> 8) & 0xff, + 557 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ZERO, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (561 >> 8) & 0xff, + 561 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR2_SYM_TL3_HDR_VALID_YES}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (573 >> 8) & 0xff, + 573 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR2_SYM_TL2_HDR_VALID_YES}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_ivv", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (605 >> 8) & 0xff, + 605 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_OO_VLAN_VID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_OO_VLAN_VID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "tl2_ivv", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (613 >> 8) & 0xff, + 613 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_OO_VLAN_VID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_OO_VLAN_VID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "tl3.ttl", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (652 >> 8) & 0xff, + 652 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_TTL >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_TTL & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "tl3.ttl", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (658 >> 8) & 0xff, + 658 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_TTL >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_TTL & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "tl3.prot", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (664 >> 8) & 0xff, + 664 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_PROTO_ID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_PROTO_ID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "tl3.prot", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (670 >> 8) & 0xff, + 670 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_PROTO_ID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_PROTO_ID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "tl3.qos", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (676 >> 8) & 0xff, + 676 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_QOS >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_QOS & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "tl3.qos", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (682 >> 8) & 0xff, + 682 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_QOS >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_QOS & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "tl4.src", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (688 >> 8) & 0xff, + 688 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_TCP_SRC_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_TCP_SRC_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "tl4.src", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (694 >> 8) & 0xff, + 694 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_TCP_SRC_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_TCP_SRC_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "tl4.dst", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (700 >> 8) & 0xff, + 700 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_TCP_DST_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_TCP_DST_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "tl4.dst", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (706 >> 8) & 0xff, + 706 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_TCP_DST_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_TCP_DST_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "tids", + .field_bit_size = 24, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (711 >> 8) & 0xff, + 711 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_T_VXLAN_GPE_VNI >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_T_VXLAN_GPE_VNI & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "tids", + .field_bit_size = 24, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (715 >> 8) & 0xff, + 715 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_T_VXLAN_GPE_VNI >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_T_VXLAN_GPE_VNI & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l2_dmac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (720 >> 8) & 0xff, + 720 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_ETH_DMAC >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_ETH_DMAC & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l2_dmac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (726 >> 8) & 0xff, + 726 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_ETH_DMAC >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_ETH_DMAC & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l2_smac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (732 >> 8) & 0xff, + 732 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_ETH_SMAC >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_ETH_SMAC & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l2_smac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (738 >> 8) & 0xff, + 738 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_ETH_SMAC >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_ETH_SMAC & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l2_ovv", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (745 >> 8) & 0xff, + 745 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_OO_VLAN_VID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_OO_VLAN_VID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l2_ovv", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (753 >> 8) & 0xff, + 753 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_OO_VLAN_VID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_OO_VLAN_VID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l2_ivv", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (761 >> 8) & 0xff, + 761 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_IO_VLAN_VID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_IO_VLAN_VID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (152 >> 8) & 0xff, + 152 & 0xff} + }, + { + .description = "l2_ivv", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (765 >> 8) & 0xff, + 765 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_OI_VLAN_VID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_OI_VLAN_VID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (153 >> 8) & 0xff, + 153 & 0xff} + }, + { + .description = "l2_ivv", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (769 >> 8) & 0xff, + 769 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_OO_VLAN_VID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_OO_VLAN_VID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l2_ivv", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (777 >> 8) & 0xff, + 777 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_IO_VLAN_VID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_IO_VLAN_VID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (155 >> 8) & 0xff, + 155 & 0xff} + }, + { + .description = "l2_ivv", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (781 >> 8) & 0xff, + 781 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_OI_VLAN_VID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_OI_VLAN_VID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (156 >> 8) & 0xff, + 156 & 0xff} + }, + { + .description = "l2_ivv", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (785 >> 8) & 0xff, + 785 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_OO_VLAN_VID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_OO_VLAN_VID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l2_etype", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (793 >> 8) & 0xff, + 793 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_ETH_TYPE >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_ETH_TYPE & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l2_etype", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (801 >> 8) & 0xff, + 801 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_ETH_TYPE >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_ETH_TYPE & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l3.sip.ipv4", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (808 >> 8) & 0xff, + 808 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_SRC_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_SRC_ADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l3.sip.ipv4", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (814 >> 8) & 0xff, + 814 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_SRC_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_SRC_ADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l3.sip.ipv6", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (820 >> 8) & 0xff, + 820 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV6_SRC_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV6_SRC_ADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l3.sip.ipv6", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (826 >> 8) & 0xff, + 826 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV6_SRC_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV6_SRC_ADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l3.dip.ipv4", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (832 >> 8) & 0xff, + 832 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_DST_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_DST_ADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l3.dip.ipv4", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (838 >> 8) & 0xff, + 838 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_DST_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_DST_ADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l3.dip.ipv6", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (844 >> 8) & 0xff, + 844 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV6_DST_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV6_DST_ADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l3.dip.ipv6", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (850 >> 8) & 0xff, + 850 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV6_DST_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV6_DST_ADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l3.ttl", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (856 >> 8) & 0xff, + 856 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_IPV4_TTL >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_IPV4_TTL & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (168 >> 8) & 0xff, + 168 & 0xff} + }, + { + .description = "l3.ttl", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (859 >> 8) & 0xff, + 859 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV6_TTL >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV6_TTL & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (169 >> 8) & 0xff, + 169 & 0xff} + }, + { + .description = "l3.ttl", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (862 >> 8) & 0xff, + 862 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_TTL >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_TTL & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l3.ttl", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (868 >> 8) & 0xff, + 868 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_IPV4_TTL >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_IPV4_TTL & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (171 >> 8) & 0xff, + 171 & 0xff} + }, + { + .description = "l3.ttl", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (871 >> 8) & 0xff, + 871 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV6_TTL >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV6_TTL & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (172 >> 8) & 0xff, + 172 & 0xff} + }, + { + .description = "l3.ttl", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (874 >> 8) & 0xff, + 874 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_TTL >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_TTL & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l3.prot", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (879 >> 8) & 0xff, + 879 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (174 >> 8) & 0xff, + 174 & 0xff} + }, + { + .description = "l3.prot", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (881 >> 8) & 0xff, + 881 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (175 >> 8) & 0xff, + 175 & 0xff} + }, + { + .description = "l3.prot", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (883 >> 8) & 0xff, + 883 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (176 >> 8) & 0xff, + 176 & 0xff} + }, + { + .description = "l3.prot", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (885 >> 8) & 0xff, + 885 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_IPV6_PROTO_ID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_IPV6_PROTO_ID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (177 >> 8) & 0xff, + 177 & 0xff} + }, + { + .description = "l3.prot", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (888 >> 8) & 0xff, + 888 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_IPV4_PROTO_ID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_IPV4_PROTO_ID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (178 >> 8) & 0xff, + 178 & 0xff} + }, + { + .description = "l3.prot", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (891 >> 8) & 0xff, + 891 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV6_PROTO_ID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV6_PROTO_ID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (179 >> 8) & 0xff, + 179 & 0xff} + }, + { + .description = "l3.prot", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (894 >> 8) & 0xff, + 894 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_PROTO_ID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_PROTO_ID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l3.prot", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (899 >> 8) & 0xff, + 899 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR2_SYM_IP_PROTO_UDP}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (181 >> 8) & 0xff, + 181 & 0xff} + }, + { + .description = "l3.prot", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (901 >> 8) & 0xff, + 901 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR2_SYM_IP_PROTO_TCP}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (182 >> 8) & 0xff, + 182 & 0xff} + }, + { + .description = "l3.prot", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (903 >> 8) & 0xff, + 903 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR2_SYM_IP_PROTO_UDP}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (183 >> 8) & 0xff, + 183 & 0xff} + }, + { + .description = "l3.prot", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (905 >> 8) & 0xff, + 905 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_IPV6_PROTO_ID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_IPV6_PROTO_ID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (184 >> 8) & 0xff, + 184 & 0xff} + }, + { + .description = "l3.prot", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (908 >> 8) & 0xff, + 908 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_IPV4_PROTO_ID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_IPV4_PROTO_ID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (185 >> 8) & 0xff, + 185 & 0xff} + }, + { + .description = "l3.prot", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (911 >> 8) & 0xff, + 911 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV6_PROTO_ID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV6_PROTO_ID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (186 >> 8) & 0xff, + 186 & 0xff} + }, + { + .description = "l3.prot", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (914 >> 8) & 0xff, + 914 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_PROTO_ID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_PROTO_ID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l3.qos", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (920 >> 8) & 0xff, + 920 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_IPV4_QOS >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_IPV4_QOS & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (188 >> 8) & 0xff, + 188 & 0xff} + }, + { + .description = "l3.qos", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (923 >> 8) & 0xff, + 923 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV6_QOS >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV6_QOS & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (189 >> 8) & 0xff, + 189 & 0xff} + }, + { + .description = "l3.qos", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (926 >> 8) & 0xff, + 926 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_QOS >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_QOS & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l3.qos", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (932 >> 8) & 0xff, + 932 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_IPV4_QOS >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_IPV4_QOS & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (191 >> 8) & 0xff, + 191 & 0xff} + }, + { + .description = "l3.qos", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (935 >> 8) & 0xff, + 935 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV6_QOS >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV6_QOS & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (192 >> 8) & 0xff, + 192 & 0xff} + }, + { + .description = "l3.qos", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (938 >> 8) & 0xff, + 938 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_QOS >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_QOS & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l4.src", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (944 >> 8) & 0xff, + 944 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_TCP_SRC_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_TCP_SRC_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (194 >> 8) & 0xff, + 194 & 0xff} + }, + { + .description = "l4.src", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (947 >> 8) & 0xff, + 947 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_UDP_SRC_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_UDP_SRC_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (195 >> 8) & 0xff, + 195 & 0xff} + }, + { + .description = "l4.src", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (950 >> 8) & 0xff, + 950 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_TCP_SRC_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_TCP_SRC_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (196 >> 8) & 0xff, + 196 & 0xff} + }, + { + .description = "l4.src", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (953 >> 8) & 0xff, + 953 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_SKIP, + .field_src3 = BNXT_ULP_FIELD_SRC_CONST + }, + { + .description = "l4.src", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (957 >> 8) & 0xff, + 957 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_TCP_SRC_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_TCP_SRC_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (198 >> 8) & 0xff, + 198 & 0xff} + }, + { + .description = "l4.src", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (960 >> 8) & 0xff, + 960 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_UDP_SRC_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_UDP_SRC_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (199 >> 8) & 0xff, + 199 & 0xff} + }, + { + .description = "l4.src", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (963 >> 8) & 0xff, + 963 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_TCP_SRC_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_TCP_SRC_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (200 >> 8) & 0xff, + 200 & 0xff} + }, + { + .description = "l4.src", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (966 >> 8) & 0xff, + 966 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_SKIP, + .field_src3 = BNXT_ULP_FIELD_SRC_CONST + }, + { + .description = "l4.dst", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (970 >> 8) & 0xff, + 970 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_TCP_DST_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_TCP_DST_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (202 >> 8) & 0xff, + 202 & 0xff} + }, + { + .description = "l4.dst", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (973 >> 8) & 0xff, + 973 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_UDP_DST_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_UDP_DST_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (203 >> 8) & 0xff, + 203 & 0xff} + }, + { + .description = "l4.dst", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (976 >> 8) & 0xff, + 976 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_TCP_DST_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_TCP_DST_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (204 >> 8) & 0xff, + 204 & 0xff} + }, + { + .description = "l4.dst", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (979 >> 8) & 0xff, + 979 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_SKIP, + .field_src3 = BNXT_ULP_FIELD_SRC_CONST + }, + { + .description = "l4.dst", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (983 >> 8) & 0xff, + 983 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_TCP_DST_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_TCP_DST_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (206 >> 8) & 0xff, + 206 & 0xff} + }, + { + .description = "l4.dst", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (986 >> 8) & 0xff, + 986 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_UDP_DST_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_UDP_DST_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (207 >> 8) & 0xff, + 207 & 0xff} + }, + { + .description = "l4.dst", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (989 >> 8) & 0xff, + 989 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_TCP_DST_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_TCP_DST_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (208 >> 8) & 0xff, + 208 & 0xff} + }, + { + .description = "l4.dst", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (992 >> 8) & 0xff, + 992 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_SKIP, + .field_src3 = BNXT_ULP_FIELD_SRC_CONST + }, + { + .description = "tl2_ivv.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1015 >> 8) & 0xff, + 1015 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_sip0.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1035 >> 8) & 0xff, + 1035 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_dip0.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1050 >> 8) & 0xff, + 1050 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_ttl.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1056 >> 8) & 0xff, + 1056 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_prot.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1062 >> 8) & 0xff, + 1062 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_qos.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1068 >> 8) & 0xff, + 1068 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_src.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1074 >> 8) & 0xff, + 1074 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_dst.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1080 >> 8) & 0xff, + 1080 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tids.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1085 >> 8) & 0xff, + 1085 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_dmac.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1091 >> 8) & 0xff, + 1091 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_smac.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1098 >> 8) & 0xff, + 1098 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_ovv.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1105 >> 8) & 0xff, + 1105 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_ivv.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1113 >> 8) & 0xff, + 1113 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (222 >> 8) & 0xff, + 222 & 0xff} + }, + { + .description = "l2_ivv.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1117 >> 8) & 0xff, + 1117 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (223 >> 8) & 0xff, + 223 & 0xff} + }, + { + .description = "l2_ivv.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1121 >> 8) & 0xff, + 1121 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_etype.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1129 >> 8) & 0xff, + 1129 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_sip3.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1136 >> 8) & 0xff, + 1136 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_sip2.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1142 >> 8) & 0xff, + 1142 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_sip1.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1148 >> 8) & 0xff, + 1148 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_sip0.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1154 >> 8) & 0xff, + 1154 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (229 >> 8) & 0xff, + 229 & 0xff} + }, + { + .description = "l3_sip0.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1157 >> 8) & 0xff, + 1157 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (230 >> 8) & 0xff, + 230 & 0xff} + }, + { + .description = "l3_sip0.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1160 >> 8) & 0xff, + 1160 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_dip3.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1166 >> 8) & 0xff, + 1166 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_dip2.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1172 >> 8) & 0xff, + 1172 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_dip1.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1178 >> 8) & 0xff, + 1178 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_dip0.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1184 >> 8) & 0xff, + 1184 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (235 >> 8) & 0xff, + 235 & 0xff} + }, + { + .description = "l3_dip0.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1187 >> 8) & 0xff, + 1187 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (236 >> 8) & 0xff, + 236 & 0xff} + }, + { + .description = "l3_dip0.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1190 >> 8) & 0xff, + 1190 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_ttl.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1196 >> 8) & 0xff, + 1196 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (238 >> 8) & 0xff, + 238 & 0xff} + }, + { + .description = "l3_ttl.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1199 >> 8) & 0xff, + 1199 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (239 >> 8) & 0xff, + 239 & 0xff} + }, + { + .description = "l3_ttl.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1202 >> 8) & 0xff, + 1202 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_prot.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1208 >> 8) & 0xff, + 1208 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (241 >> 8) & 0xff, + 241 & 0xff} + }, + { + .description = "l3_prot.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1211 >> 8) & 0xff, + 1211 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (242 >> 8) & 0xff, + 242 & 0xff} + }, + { + .description = "l3_prot.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1214 >> 8) & 0xff, + 1214 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_qos.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1220 >> 8) & 0xff, + 1220 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (244 >> 8) & 0xff, + 244 & 0xff} + }, + { + .description = "l3_qos.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1223 >> 8) & 0xff, + 1223 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (245 >> 8) & 0xff, + 245 & 0xff} + }, + { + .description = "l3_qos.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1226 >> 8) & 0xff, + 1226 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_src.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1232 >> 8) & 0xff, + 1232 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (247 >> 8) & 0xff, + 247 & 0xff} + }, + { + .description = "l4_src.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1235 >> 8) & 0xff, + 1235 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (248 >> 8) & 0xff, + 248 & 0xff} + }, + { + .description = "l4_src.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1238 >> 8) & 0xff, + 1238 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_dst.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1244 >> 8) & 0xff, + 1244 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (250 >> 8) & 0xff, + 250 & 0xff} + }, + { + .description = "l4_dst.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1247 >> 8) & 0xff, + 1247 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (251 >> 8) & 0xff, + 251 & 0xff} + }, + { + .description = "l4_dst.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1250 >> 8) & 0xff, + 1250 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_ivv", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1285 >> 8) & 0xff, + 1285 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "tl2_ivv", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1293 >> 8) & 0xff, + 1293 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_OO_VLAN_VID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_OO_VLAN_VID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "tl3.ttl", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1332 >> 8) & 0xff, + 1332 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "tl3.ttl", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1338 >> 8) & 0xff, + 1338 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_TTL >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_TTL & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "tl3.prot", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1344 >> 8) & 0xff, + 1344 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "tl3.prot", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1350 >> 8) & 0xff, + 1350 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_PROTO_ID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_PROTO_ID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "tl3.qos", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1356 >> 8) & 0xff, + 1356 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "tl3.qos", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1362 >> 8) & 0xff, + 1362 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_QOS >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_QOS & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "tl4.src", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1368 >> 8) & 0xff, + 1368 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "tl4.src", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1374 >> 8) & 0xff, + 1374 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_TCP_SRC_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_TCP_SRC_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "tl4.dst", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1380 >> 8) & 0xff, + 1380 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "tl4.dst", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1386 >> 8) & 0xff, + 1386 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_TCP_DST_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_TCP_DST_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "tids", + .field_bit_size = 24, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1391 >> 8) & 0xff, + 1391 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "tids", + .field_bit_size = 24, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1395 >> 8) & 0xff, + 1395 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_T_VXLAN_GPE_VNI >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_T_VXLAN_GPE_VNI & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l2_dmac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1401 >> 8) & 0xff, + 1401 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l2_dmac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1409 >> 8) & 0xff, + 1409 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_ETH_DMAC >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_ETH_DMAC & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l2_smac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1416 >> 8) & 0xff, + 1416 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l2_smac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1422 >> 8) & 0xff, + 1422 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_ETH_SMAC >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_ETH_SMAC & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l2_ovv", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1429 >> 8) & 0xff, + 1429 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l2_ovv", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1437 >> 8) & 0xff, + 1437 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_OO_VLAN_VID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_OO_VLAN_VID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l2_ivv", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1445 >> 8) & 0xff, + 1445 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (273 >> 8) & 0xff, + 273 & 0xff} + }, + { + .description = "l2_ivv", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1449 >> 8) & 0xff, + 1449 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (274 >> 8) & 0xff, + 274 & 0xff} + }, + { + .description = "l2_ivv", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1453 >> 8) & 0xff, + 1453 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l2_ivv", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1461 >> 8) & 0xff, + 1461 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_IO_VLAN_VID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_IO_VLAN_VID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (276 >> 8) & 0xff, + 276 & 0xff} + }, + { + .description = "l2_ivv", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1465 >> 8) & 0xff, + 1465 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_OI_VLAN_VID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_OI_VLAN_VID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (277 >> 8) & 0xff, + 277 & 0xff} + }, + { + .description = "l2_ivv", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1469 >> 8) & 0xff, + 1469 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_OO_VLAN_VID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_OO_VLAN_VID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l2_etype", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1477 >> 8) & 0xff, + 1477 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l2_etype", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1485 >> 8) & 0xff, + 1485 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_ETH_TYPE >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_ETH_TYPE & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l3.sip.ipv4", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1492 >> 8) & 0xff, + 1492 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l3.sip.ipv4", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1498 >> 8) & 0xff, + 1498 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_SRC_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_SRC_ADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l3.sip.ipv6", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1504 >> 8) & 0xff, + 1504 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l3.sip.ipv6", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1510 >> 8) & 0xff, + 1510 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV6_SRC_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV6_SRC_ADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l3.dip.ipv4", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1516 >> 8) & 0xff, + 1516 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l3.dip.ipv4", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1522 >> 8) & 0xff, + 1522 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_DST_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_DST_ADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l3.dip.ipv6", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1528 >> 8) & 0xff, + 1528 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l3.dip.ipv6", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1534 >> 8) & 0xff, + 1534 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV6_DST_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV6_DST_ADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l3.ttl", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1540 >> 8) & 0xff, + 1540 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (289 >> 8) & 0xff, + 289 & 0xff} + }, + { + .description = "l3.ttl", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1543 >> 8) & 0xff, + 1543 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (290 >> 8) & 0xff, + 290 & 0xff} + }, + { + .description = "l3.ttl", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1546 >> 8) & 0xff, + 1546 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l3.ttl", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1552 >> 8) & 0xff, + 1552 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_IPV4_TTL >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_IPV4_TTL & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (292 >> 8) & 0xff, + 292 & 0xff} + }, + { + .description = "l3.ttl", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1555 >> 8) & 0xff, + 1555 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV6_TTL >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV6_TTL & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (293 >> 8) & 0xff, + 293 & 0xff} + }, + { + .description = "l3.ttl", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1558 >> 8) & 0xff, + 1558 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_TTL >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_TTL & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l3.prot", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1564 >> 8) & 0xff, + 1564 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (295 >> 8) & 0xff, + 295 & 0xff} + }, + { + .description = "l3.prot", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1567 >> 8) & 0xff, + 1567 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (296 >> 8) & 0xff, + 296 & 0xff} + }, + { + .description = "l3.prot", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1570 >> 8) & 0xff, + 1570 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l3.prot", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1576 >> 8) & 0xff, + 1576 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_IPV4_PROTO_ID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_IPV4_PROTO_ID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (298 >> 8) & 0xff, + 298 & 0xff} + }, + { + .description = "l3.prot", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1579 >> 8) & 0xff, + 1579 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV6_PROTO_ID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV6_PROTO_ID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (299 >> 8) & 0xff, + 299 & 0xff} + }, + { + .description = "l3.prot", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1582 >> 8) & 0xff, + 1582 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_PROTO_ID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_PROTO_ID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l3.qos", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1588 >> 8) & 0xff, + 1588 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (301 >> 8) & 0xff, + 301 & 0xff} + }, + { + .description = "l3.qos", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1591 >> 8) & 0xff, + 1591 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (302 >> 8) & 0xff, + 302 & 0xff} + }, + { + .description = "l3.qos", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1594 >> 8) & 0xff, + 1594 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l3.qos", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1600 >> 8) & 0xff, + 1600 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_IPV4_QOS >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_IPV4_QOS & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (304 >> 8) & 0xff, + 304 & 0xff} + }, + { + .description = "l3.qos", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1603 >> 8) & 0xff, + 1603 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV6_QOS >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV6_QOS & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (305 >> 8) & 0xff, + 305 & 0xff} + }, + { + .description = "l3.qos", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1606 >> 8) & 0xff, + 1606 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_QOS >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_QOS & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l4.src", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1612 >> 8) & 0xff, + 1612 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (307 >> 8) & 0xff, + 307 & 0xff} + }, + { + .description = "l4.src", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1615 >> 8) & 0xff, + 1615 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (308 >> 8) & 0xff, + 308 & 0xff} + }, + { + .description = "l4.src", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1618 >> 8) & 0xff, + 1618 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l4.src", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1624 >> 8) & 0xff, + 1624 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_TCP_SRC_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_TCP_SRC_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (310 >> 8) & 0xff, + 310 & 0xff} + }, + { + .description = "l4.src", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1627 >> 8) & 0xff, + 1627 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_UDP_SRC_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_UDP_SRC_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (311 >> 8) & 0xff, + 311 & 0xff} + }, + { + .description = "l4.src", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1630 >> 8) & 0xff, + 1630 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_TCP_SRC_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_TCP_SRC_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l4.dst", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1636 >> 8) & 0xff, + 1636 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (313 >> 8) & 0xff, + 313 & 0xff} + }, + { + .description = "l4.dst", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1639 >> 8) & 0xff, + 1639 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (314 >> 8) & 0xff, + 314 & 0xff} + }, + { + .description = "l4.dst", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1642 >> 8) & 0xff, + 1642 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l4.dst", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1648 >> 8) & 0xff, + 1648 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_TCP_DST_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_TCP_DST_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (316 >> 8) & 0xff, + 316 & 0xff} + }, + { + .description = "l4.dst", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1651 >> 8) & 0xff, + 1651 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_UDP_DST_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_UDP_DST_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (317 >> 8) & 0xff, + 317 & 0xff} + }, + { + .description = "l4.dst", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1654 >> 8) & 0xff, + 1654 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_TCP_DST_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_TCP_DST_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + /* class_tid: 2, , table: control.terminating_flow */ + { + .description = "", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1680 >> 8) & 0xff, + 1680 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (319 >> 8) & 0xff, + 319 & 0xff} + }, + { + .description = "", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1682 >> 8) & 0xff, + 1682 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (320 >> 8) & 0xff, + 320 & 0xff} + }, + { + .description = "", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1684 >> 8) & 0xff, + 1684 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (321 >> 8) & 0xff, + 321 & 0xff} + }, + { + .description = "", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1686 >> 8) & 0xff, + 1686 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_smac.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1701 >> 8) & 0xff, + 1701 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ZERO, + .field_src3 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr3 = { + 1} + }, + { + .description = "tl2_ivv.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1711 >> 8) & 0xff, + 1711 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_sip0.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1731 >> 8) & 0xff, + 1731 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_dip0.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1746 >> 8) & 0xff, + 1746 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_ttl.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1752 >> 8) & 0xff, + 1752 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_prot.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1758 >> 8) & 0xff, + 1758 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_qos.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1764 >> 8) & 0xff, + 1764 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_src.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1770 >> 8) & 0xff, + 1770 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_dst.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1776 >> 8) & 0xff, + 1776 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tids.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1781 >> 8) & 0xff, + 1781 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_dmac.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1786 >> 8) & 0xff, + 1786 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_smac.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1792 >> 8) & 0xff, + 1792 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_ovv.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1799 >> 8) & 0xff, + 1799 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_ivv.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1807 >> 8) & 0xff, + 1807 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (336 >> 8) & 0xff, + 336 & 0xff} + }, + { + .description = "l2_ivv.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1811 >> 8) & 0xff, + 1811 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (337 >> 8) & 0xff, + 337 & 0xff} + }, + { + .description = "l2_ivv.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1815 >> 8) & 0xff, + 1815 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_etype.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1823 >> 8) & 0xff, + 1823 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_sip3.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1831 >> 8) & 0xff, + 1831 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_sip2.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1839 >> 8) & 0xff, + 1839 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_sip1.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1847 >> 8) & 0xff, + 1847 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_sip0.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1855 >> 8) & 0xff, + 1855 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (343 >> 8) & 0xff, + 343 & 0xff} + }, + { + .description = "l3_sip0.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1859 >> 8) & 0xff, + 1859 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (344 >> 8) & 0xff, + 344 & 0xff} + }, + { + .description = "l3_sip0.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1863 >> 8) & 0xff, + 1863 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_dip3.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1871 >> 8) & 0xff, + 1871 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_dip2.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1879 >> 8) & 0xff, + 1879 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_dip1.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1887 >> 8) & 0xff, + 1887 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_dip0.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1895 >> 8) & 0xff, + 1895 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (349 >> 8) & 0xff, + 349 & 0xff} + }, + { + .description = "l3_dip0.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1899 >> 8) & 0xff, + 1899 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (350 >> 8) & 0xff, + 350 & 0xff} + }, + { + .description = "l3_dip0.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1903 >> 8) & 0xff, + 1903 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_ttl.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1911 >> 8) & 0xff, + 1911 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (352 >> 8) & 0xff, + 352 & 0xff} + }, + { + .description = "l3_ttl.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1915 >> 8) & 0xff, + 1915 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (353 >> 8) & 0xff, + 353 & 0xff} + }, + { + .description = "l3_ttl.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1919 >> 8) & 0xff, + 1919 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_prot.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1927 >> 8) & 0xff, + 1927 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (355 >> 8) & 0xff, + 355 & 0xff} + }, + { + .description = "l3_prot.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1931 >> 8) & 0xff, + 1931 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (356 >> 8) & 0xff, + 356 & 0xff} + }, + { + .description = "l3_prot.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1935 >> 8) & 0xff, + 1935 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_qos.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1943 >> 8) & 0xff, + 1943 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (358 >> 8) & 0xff, + 358 & 0xff} + }, + { + .description = "l3_qos.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1947 >> 8) & 0xff, + 1947 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (359 >> 8) & 0xff, + 359 & 0xff} + }, + { + .description = "l3_qos.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1951 >> 8) & 0xff, + 1951 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_src.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_OR, + .field_opr1 = { + (1956 >> 8) & 0xff, + 1956 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_src.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_OR, + .field_opr1 = { + (1959 >> 8) & 0xff, + 1959 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_dst.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_OR, + .field_opr1 = { + (1963 >> 8) & 0xff, + 1963 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_dst.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_OR, + .field_opr1 = { + (1966 >> 8) & 0xff, + 1966 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_flags.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1970 >> 8) & 0xff, + 1970 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_flags.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1972 >> 8) & 0xff, + 1972 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + /* class_tid: 2, , table: control.profile_tcam_priority */ + { + .description = "", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1976 >> 8) & 0xff, + 1976 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR2_SYM_PROF_TCAM_PRI_L4}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (367 >> 8) & 0xff, + 367 & 0xff} + }, + { + .description = "", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1978 >> 8) & 0xff, + 1978 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR2_SYM_PROF_TCAM_PRI_L4}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (368 >> 8) & 0xff, + 368 & 0xff} + }, + { + .description = "", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1980 >> 8) & 0xff, + 1980 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR2_SYM_PROF_TCAM_PRI_L4}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (369 >> 8) & 0xff, + 369 & 0xff} + }, + { + .description = "", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1982 >> 8) & 0xff, + 1982 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR2_SYM_PROF_TCAM_PRI_L4}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (370 >> 8) & 0xff, + 370 & 0xff} + }, + { + .description = "", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1984 >> 8) & 0xff, + 1984 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR2_SYM_PROF_TCAM_PRI_L3}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (371 >> 8) & 0xff, + 371 & 0xff} + }, + { + .description = "", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1986 >> 8) & 0xff, + 1986 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR2_SYM_PROF_TCAM_PRI_L3}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (372 >> 8) & 0xff, + 372 & 0xff} + }, + { + .description = "", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1988 >> 8) & 0xff, + 1988 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR2_SYM_PROF_TCAM_PRI_L3}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (373 >> 8) & 0xff, + 373 & 0xff} + }, + { + .description = "", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1990 >> 8) & 0xff, + 1990 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR2_SYM_PROF_TCAM_PRI_L3}, + .field_src3 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr3 = { + ULP_THOR2_SYM_PROF_TCAM_PRI_L2} + }, + { + .description = "l4_hdr_is_udp_tcp", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1994 >> 8) & 0xff, + 1994 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (375 >> 8) & 0xff, + 375 & 0xff} + }, + { + .description = "l4_hdr_is_udp_tcp", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1996 >> 8) & 0xff, + 1996 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (376 >> 8) & 0xff, + 376 & 0xff} + }, + { + .description = "l4_hdr_is_udp_tcp", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1998 >> 8) & 0xff, + 1998 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_hdr_is_udp_tcp", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2002 >> 8) & 0xff, + 2002 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR2_SYM_L4_HDR_IS_UDP_TCP_YES}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (378 >> 8) & 0xff, + 378 & 0xff} + }, + { + .description = "l4_hdr_is_udp_tcp", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2004 >> 8) & 0xff, + 2004 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR2_SYM_L4_HDR_IS_UDP_TCP_YES}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (379 >> 8) & 0xff, + 379 & 0xff} + }, + { + .description = "l4_hdr_is_udp_tcp", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2006 >> 8) & 0xff, + 2006 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR2_SYM_L4_HDR_IS_UDP_TCP_YES}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2010 >> 8) & 0xff, + 2010 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (381 >> 8) & 0xff, + 381 & 0xff} + }, + { + .description = "l4_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2012 >> 8) & 0xff, + 2012 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (382 >> 8) & 0xff, + 382 & 0xff} + }, + { + .description = "l4_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2014 >> 8) & 0xff, + 2014 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2018 >> 8) & 0xff, + 2018 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ZERO, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (384 >> 8) & 0xff, + 384 & 0xff} + }, + { + .description = "l4_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2020 >> 8) & 0xff, + 2020 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR2_SYM_L4_HDR_TYPE_UDP}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (385 >> 8) & 0xff, + 385 & 0xff} + }, + { + .description = "l4_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2022 >> 8) & 0xff, + 2022 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR2_SYM_L4_HDR_TYPE_UDP}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2026 >> 8) & 0xff, + 2026 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (387 >> 8) & 0xff, + 387 & 0xff} + }, + { + .description = "l4_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2028 >> 8) & 0xff, + 2028 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (388 >> 8) & 0xff, + 388 & 0xff} + }, + { + .description = "l4_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2030 >> 8) & 0xff, + 2030 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2034 >> 8) & 0xff, + 2034 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ZERO, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (390 >> 8) & 0xff, + 390 & 0xff} + }, + { + .description = "l4_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2036 >> 8) & 0xff, + 2036 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ZERO, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (391 >> 8) & 0xff, + 391 & 0xff} + }, + { + .description = "l4_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2038 >> 8) & 0xff, + 2038 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ZERO, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2042 >> 8) & 0xff, + 2042 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (393 >> 8) & 0xff, + 393 & 0xff} + }, + { + .description = "l4_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2044 >> 8) & 0xff, + 2044 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (394 >> 8) & 0xff, + 394 & 0xff} + }, + { + .description = "l4_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2046 >> 8) & 0xff, + 2046 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2050 >> 8) & 0xff, + 2050 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR2_SYM_L4_HDR_VALID_YES}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (396 >> 8) & 0xff, + 396 & 0xff} + }, + { + .description = "l4_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2052 >> 8) & 0xff, + 2052 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR2_SYM_L4_HDR_VALID_YES}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (397 >> 8) & 0xff, + 397 & 0xff} + }, + { + .description = "l4_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2054 >> 8) & 0xff, + 2054 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR2_SYM_L4_HDR_VALID_YES}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2058 >> 8) & 0xff, + 2058 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (399 >> 8) & 0xff, + 399 & 0xff} + }, + { + .description = "l3_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2060 >> 8) & 0xff, + 2060 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (400 >> 8) & 0xff, + 400 & 0xff} + }, + { + .description = "l3_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2062 >> 8) & 0xff, + 2062 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2066 >> 8) & 0xff, + 2066 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR2_SYM_L3_HDR_TYPE_IPV6}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (402 >> 8) & 0xff, + 402 & 0xff} + }, + { + .description = "l3_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2068 >> 8) & 0xff, + 2068 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ZERO, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (403 >> 8) & 0xff, + 403 & 0xff} + }, + { + .description = "l3_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2070 >> 8) & 0xff, + 2070 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ZERO, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2074 >> 8) & 0xff, + 2074 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (405 >> 8) & 0xff, + 405 & 0xff} + }, + { + .description = "l3_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2076 >> 8) & 0xff, + 2076 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (406 >> 8) & 0xff, + 406 & 0xff} + }, + { + .description = "l3_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2078 >> 8) & 0xff, + 2078 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2082 >> 8) & 0xff, + 2082 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ZERO, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (408 >> 8) & 0xff, + 408 & 0xff} + }, + { + .description = "l3_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2084 >> 8) & 0xff, + 2084 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ZERO, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (409 >> 8) & 0xff, + 409 & 0xff} + }, + { + .description = "l3_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2086 >> 8) & 0xff, + 2086 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ZERO, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2090 >> 8) & 0xff, + 2090 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (411 >> 8) & 0xff, + 411 & 0xff} + }, + { + .description = "l3_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2092 >> 8) & 0xff, + 2092 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (412 >> 8) & 0xff, + 412 & 0xff} + }, + { + .description = "l3_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2094 >> 8) & 0xff, + 2094 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2098 >> 8) & 0xff, + 2098 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR2_SYM_L3_HDR_VALID_YES}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (414 >> 8) & 0xff, + 414 & 0xff} + }, + { + .description = "l3_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2100 >> 8) & 0xff, + 2100 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR2_SYM_L3_HDR_VALID_YES}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (415 >> 8) & 0xff, + 415 & 0xff} + }, + { + .description = "l3_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2102 >> 8) & 0xff, + 2102 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR2_SYM_L3_HDR_VALID_YES}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_two_vtags", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2107 >> 8) & 0xff, + 2107 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR2_SYM_L2_TWO_VTAGS_YES}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_vtag_present", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2113 >> 8) & 0xff, + 2113 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR2_SYM_L2_VTAG_PRESENT_YES}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2120 >> 8) & 0xff, + 2120 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2124 >> 8) & 0xff, + 2124 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ZERO, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2128 >> 8) & 0xff, + 2128 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR2_SYM_L2_HDR_VALID_YES}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tun_hdr_type", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2132 >> 8) & 0xff, + 2132 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (422 >> 8) & 0xff, + 422 & 0xff} + }, + { + .description = "tun_hdr_type", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2134 >> 8) & 0xff, + 2134 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (423 >> 8) & 0xff, + 423 & 0xff} + }, + { + .description = "tun_hdr_type", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2136 >> 8) & 0xff, + 2136 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (424 >> 8) & 0xff, + 424 & 0xff} + }, + { + .description = "tun_hdr_type", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2138 >> 8) & 0xff, + 2138 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (425 >> 8) & 0xff, + 425 & 0xff} + }, + { + .description = "tun_hdr_type", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2140 >> 8) & 0xff, + 2140 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tun_hdr_type", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2144 >> 8) & 0xff, + 2144 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR2_SYM_TUN_HDR_TYPE_VXLAN_GPE}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (427 >> 8) & 0xff, + 427 & 0xff} + }, + { + .description = "tun_hdr_type", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2146 >> 8) & 0xff, + 2146 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR2_SYM_TUN_HDR_TYPE_GENEVE}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (428 >> 8) & 0xff, + 428 & 0xff} + }, + { + .description = "tun_hdr_type", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2148 >> 8) & 0xff, + 2148 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR2_SYM_TUN_HDR_TYPE_GRE}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (429 >> 8) & 0xff, + 429 & 0xff} + }, + { + .description = "tun_hdr_type", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2150 >> 8) & 0xff, + 2150 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR2_SYM_TUN_HDR_TYPE_UPAR1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (430 >> 8) & 0xff, + 430 & 0xff} + }, + { + .description = "tun_hdr_type", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2152 >> 8) & 0xff, + 2152 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR2_SYM_TUN_HDR_TYPE_UPAR2}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_hdr_is_udp_tcp", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2159 >> 8) & 0xff, + 2159 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_hdr_is_udp_tcp", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2163 >> 8) & 0xff, + 2163 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR2_SYM_TL4_HDR_IS_UDP_TCP_YES}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2167 >> 8) & 0xff, + 2167 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2171 >> 8) & 0xff, + 2171 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR2_SYM_TL4_HDR_TYPE_UDP}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2175 >> 8) & 0xff, + 2175 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2179 >> 8) & 0xff, + 2179 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ZERO, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2183 >> 8) & 0xff, + 2183 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR2_SYM_TL4_HDR_VALID_YES}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2187 >> 8) & 0xff, + 2187 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2191 >> 8) & 0xff, + 2191 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ZERO, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2195 >> 8) & 0xff, + 2195 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2199 >> 8) & 0xff, + 2199 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ZERO, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2203 >> 8) & 0xff, + 2203 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR2_SYM_TL3_HDR_VALID_YES}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_smac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2230 >> 8) & 0xff, + 2230 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_SKIP, + .field_src3 = BNXT_ULP_FIELD_SRC_HF, + .field_opr3 = { + (BNXT_ULP_GLB_HF_ID_O_ETH_SMAC >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_ETH_SMAC & 0xff} + }, + { + .description = "tl2_smac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2235 >> 8) & 0xff, + 2235 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_SKIP, + .field_src3 = BNXT_ULP_FIELD_SRC_HF, + .field_opr3 = { + (BNXT_ULP_GLB_HF_ID_O_ETH_SMAC >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_ETH_SMAC & 0xff} + }, + { + .description = "tl2_ivv", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2249 >> 8) & 0xff, + 2249 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_OO_VLAN_VID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_OO_VLAN_VID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "tl2_ivv", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2257 >> 8) & 0xff, + 2257 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_OO_VLAN_VID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_OO_VLAN_VID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "tl3.ttl", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2296 >> 8) & 0xff, + 2296 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_TTL >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_TTL & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "tl3.ttl", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2302 >> 8) & 0xff, + 2302 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_TTL >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_TTL & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "tl3.prot", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2308 >> 8) & 0xff, + 2308 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_PROTO_ID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_PROTO_ID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "tl3.prot", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2314 >> 8) & 0xff, + 2314 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_PROTO_ID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_PROTO_ID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "tl3.qos", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2320 >> 8) & 0xff, + 2320 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_QOS >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_QOS & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "tl3.qos", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2326 >> 8) & 0xff, + 2326 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_QOS >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_QOS & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "tl4.src", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2332 >> 8) & 0xff, + 2332 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_TCP_SRC_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_TCP_SRC_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "tl4.src", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2338 >> 8) & 0xff, + 2338 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_TCP_SRC_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_TCP_SRC_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "tl4.dst", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2344 >> 8) & 0xff, + 2344 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_TCP_DST_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_TCP_DST_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "tl4.dst", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2350 >> 8) & 0xff, + 2350 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_TCP_DST_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_TCP_DST_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "tids", + .field_bit_size = 24, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2355 >> 8) & 0xff, + 2355 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_T_VXLAN_GPE_VNI >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_T_VXLAN_GPE_VNI & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "tids", + .field_bit_size = 24, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2359 >> 8) & 0xff, + 2359 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_T_VXLAN_GPE_VNI >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_T_VXLAN_GPE_VNI & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l2_dmac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2364 >> 8) & 0xff, + 2364 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_ETH_DMAC >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_ETH_DMAC & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l2_dmac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2370 >> 8) & 0xff, + 2370 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_ETH_DMAC >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_ETH_DMAC & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l2_smac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2376 >> 8) & 0xff, + 2376 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_ETH_SMAC >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_ETH_SMAC & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l2_smac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2382 >> 8) & 0xff, + 2382 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_ETH_SMAC >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_ETH_SMAC & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l2_ovv", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2389 >> 8) & 0xff, + 2389 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_OO_VLAN_VID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_OO_VLAN_VID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l2_ovv", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2397 >> 8) & 0xff, + 2397 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_OO_VLAN_VID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_OO_VLAN_VID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l2_ivv", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2405 >> 8) & 0xff, + 2405 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_IO_VLAN_VID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_IO_VLAN_VID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (466 >> 8) & 0xff, + 466 & 0xff} + }, + { + .description = "l2_ivv", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2409 >> 8) & 0xff, + 2409 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_OI_VLAN_VID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_OI_VLAN_VID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (467 >> 8) & 0xff, + 467 & 0xff} + }, + { + .description = "l2_ivv", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2413 >> 8) & 0xff, + 2413 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_OO_VLAN_VID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_OO_VLAN_VID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l2_ivv", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2421 >> 8) & 0xff, + 2421 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_IO_VLAN_VID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_IO_VLAN_VID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (469 >> 8) & 0xff, + 469 & 0xff} + }, + { + .description = "l2_ivv", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2425 >> 8) & 0xff, + 2425 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_OI_VLAN_VID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_OI_VLAN_VID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (470 >> 8) & 0xff, + 470 & 0xff} + }, + { + .description = "l2_ivv", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2429 >> 8) & 0xff, + 2429 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_OO_VLAN_VID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_OO_VLAN_VID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l2_etype", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2437 >> 8) & 0xff, + 2437 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_ETH_TYPE >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_ETH_TYPE & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l2_etype", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2445 >> 8) & 0xff, + 2445 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_ETH_TYPE >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_ETH_TYPE & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l3.sip.ipv4", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2452 >> 8) & 0xff, + 2452 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_SRC_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_SRC_ADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l3.sip.ipv4", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2458 >> 8) & 0xff, + 2458 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_SRC_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_SRC_ADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l3.sip.ipv6", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2464 >> 8) & 0xff, + 2464 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV6_SRC_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV6_SRC_ADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l3.sip.ipv6", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2470 >> 8) & 0xff, + 2470 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV6_SRC_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV6_SRC_ADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l3.dip.ipv4", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2476 >> 8) & 0xff, + 2476 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_DST_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_DST_ADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l3.dip.ipv4", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2482 >> 8) & 0xff, + 2482 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_DST_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_DST_ADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l3.dip.ipv6", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2488 >> 8) & 0xff, + 2488 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV6_DST_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV6_DST_ADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l3.dip.ipv6", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2494 >> 8) & 0xff, + 2494 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV6_DST_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV6_DST_ADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l3.ttl", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2500 >> 8) & 0xff, + 2500 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_IPV4_TTL >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_IPV4_TTL & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (482 >> 8) & 0xff, + 482 & 0xff} + }, + { + .description = "l3.ttl", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2503 >> 8) & 0xff, + 2503 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV6_TTL >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV6_TTL & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (483 >> 8) & 0xff, + 483 & 0xff} + }, + { + .description = "l3.ttl", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2506 >> 8) & 0xff, + 2506 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_TTL >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_TTL & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l3.ttl", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2512 >> 8) & 0xff, + 2512 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_IPV4_TTL >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_IPV4_TTL & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (485 >> 8) & 0xff, + 485 & 0xff} + }, + { + .description = "l3.ttl", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2515 >> 8) & 0xff, + 2515 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV6_TTL >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV6_TTL & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (486 >> 8) & 0xff, + 486 & 0xff} + }, + { + .description = "l3.ttl", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2518 >> 8) & 0xff, + 2518 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_TTL >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_TTL & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l3.prot", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2523 >> 8) & 0xff, + 2523 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (488 >> 8) & 0xff, + 488 & 0xff} + }, + { + .description = "l3.prot", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2525 >> 8) & 0xff, + 2525 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (489 >> 8) & 0xff, + 489 & 0xff} + }, + { + .description = "l3.prot", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2527 >> 8) & 0xff, + 2527 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (490 >> 8) & 0xff, + 490 & 0xff} + }, + { + .description = "l3.prot", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2529 >> 8) & 0xff, + 2529 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_IPV6_PROTO_ID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_IPV6_PROTO_ID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (491 >> 8) & 0xff, + 491 & 0xff} + }, + { + .description = "l3.prot", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2532 >> 8) & 0xff, + 2532 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_IPV4_PROTO_ID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_IPV4_PROTO_ID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (492 >> 8) & 0xff, + 492 & 0xff} + }, + { + .description = "l3.prot", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2535 >> 8) & 0xff, + 2535 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV6_PROTO_ID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV6_PROTO_ID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (493 >> 8) & 0xff, + 493 & 0xff} + }, + { + .description = "l3.prot", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2538 >> 8) & 0xff, + 2538 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_PROTO_ID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_PROTO_ID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l3.prot", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2543 >> 8) & 0xff, + 2543 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR2_SYM_IP_PROTO_UDP}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (495 >> 8) & 0xff, + 495 & 0xff} + }, + { + .description = "l3.prot", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2545 >> 8) & 0xff, + 2545 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR2_SYM_IP_PROTO_TCP}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (496 >> 8) & 0xff, + 496 & 0xff} + }, + { + .description = "l3.prot", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2547 >> 8) & 0xff, + 2547 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR2_SYM_IP_PROTO_UDP}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (497 >> 8) & 0xff, + 497 & 0xff} + }, + { + .description = "l3.prot", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2549 >> 8) & 0xff, + 2549 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_IPV6_PROTO_ID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_IPV6_PROTO_ID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (498 >> 8) & 0xff, + 498 & 0xff} + }, + { + .description = "l3.prot", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2552 >> 8) & 0xff, + 2552 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_IPV4_PROTO_ID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_IPV4_PROTO_ID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (499 >> 8) & 0xff, + 499 & 0xff} + }, + { + .description = "l3.prot", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2555 >> 8) & 0xff, + 2555 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV6_PROTO_ID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV6_PROTO_ID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (500 >> 8) & 0xff, + 500 & 0xff} + }, + { + .description = "l3.prot", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2558 >> 8) & 0xff, + 2558 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_PROTO_ID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_PROTO_ID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l3.qos", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2564 >> 8) & 0xff, + 2564 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_IPV4_QOS >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_IPV4_QOS & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (502 >> 8) & 0xff, + 502 & 0xff} + }, + { + .description = "l3.qos", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2567 >> 8) & 0xff, + 2567 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV6_QOS >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV6_QOS & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (503 >> 8) & 0xff, + 503 & 0xff} + }, + { + .description = "l3.qos", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2570 >> 8) & 0xff, + 2570 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_QOS >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_QOS & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l3.qos", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2576 >> 8) & 0xff, + 2576 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_IPV4_QOS >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_IPV4_QOS & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (505 >> 8) & 0xff, + 505 & 0xff} + }, + { + .description = "l3.qos", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2579 >> 8) & 0xff, + 2579 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV6_QOS >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV6_QOS & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (506 >> 8) & 0xff, + 506 & 0xff} + }, + { + .description = "l3.qos", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2582 >> 8) & 0xff, + 2582 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_QOS >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_QOS & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l4.src", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2588 >> 8) & 0xff, + 2588 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_TCP_SRC_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_TCP_SRC_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (508 >> 8) & 0xff, + 508 & 0xff} + }, + { + .description = "l4.src", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2591 >> 8) & 0xff, + 2591 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_UDP_SRC_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_UDP_SRC_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (509 >> 8) & 0xff, + 509 & 0xff} + }, + { + .description = "l4.src", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2594 >> 8) & 0xff, + 2594 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_TCP_SRC_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_TCP_SRC_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (510 >> 8) & 0xff, + 510 & 0xff} + }, + { + .description = "l4.src", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2597 >> 8) & 0xff, + 2597 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_SKIP, + .field_src3 = BNXT_ULP_FIELD_SRC_CONST + }, + { + .description = "l4.src", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2601 >> 8) & 0xff, + 2601 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_TCP_SRC_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_TCP_SRC_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (512 >> 8) & 0xff, + 512 & 0xff} + }, + { + .description = "l4.src", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2604 >> 8) & 0xff, + 2604 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_UDP_SRC_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_UDP_SRC_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (513 >> 8) & 0xff, + 513 & 0xff} + }, + { + .description = "l4.src", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2607 >> 8) & 0xff, + 2607 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_TCP_SRC_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_TCP_SRC_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (514 >> 8) & 0xff, + 514 & 0xff} + }, + { + .description = "l4.src", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2610 >> 8) & 0xff, + 2610 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_SKIP, + .field_src3 = BNXT_ULP_FIELD_SRC_CONST + }, + { + .description = "l4.dst", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2614 >> 8) & 0xff, + 2614 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_TCP_DST_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_TCP_DST_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (516 >> 8) & 0xff, + 516 & 0xff} + }, + { + .description = "l4.dst", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2617 >> 8) & 0xff, + 2617 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_UDP_DST_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_UDP_DST_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (517 >> 8) & 0xff, + 517 & 0xff} + }, + { + .description = "l4.dst", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2620 >> 8) & 0xff, + 2620 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_TCP_DST_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_TCP_DST_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (518 >> 8) & 0xff, + 518 & 0xff} + }, + { + .description = "l4.dst", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2623 >> 8) & 0xff, + 2623 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_SKIP, + .field_src3 = BNXT_ULP_FIELD_SRC_CONST + }, + { + .description = "l4.dst", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2627 >> 8) & 0xff, + 2627 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_TCP_DST_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_TCP_DST_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (520 >> 8) & 0xff, + 520 & 0xff} + }, + { + .description = "l4.dst", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2630 >> 8) & 0xff, + 2630 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_UDP_DST_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_UDP_DST_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (521 >> 8) & 0xff, + 521 & 0xff} + }, + { + .description = "l4.dst", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2633 >> 8) & 0xff, + 2633 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_TCP_DST_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_TCP_DST_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (522 >> 8) & 0xff, + 522 & 0xff} + }, + { + .description = "l4.dst", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2636 >> 8) & 0xff, + 2636 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_SKIP, + .field_src3 = BNXT_ULP_FIELD_SRC_CONST + }, + { + .description = "tl2_ivv.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2659 >> 8) & 0xff, + 2659 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_sip0.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2679 >> 8) & 0xff, + 2679 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_dip0.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2694 >> 8) & 0xff, + 2694 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_ttl.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2700 >> 8) & 0xff, + 2700 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_prot.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2706 >> 8) & 0xff, + 2706 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_qos.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2712 >> 8) & 0xff, + 2712 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_src.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2718 >> 8) & 0xff, + 2718 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_dst.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2724 >> 8) & 0xff, + 2724 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tids.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2729 >> 8) & 0xff, + 2729 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_dmac.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2735 >> 8) & 0xff, + 2735 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_smac.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2742 >> 8) & 0xff, + 2742 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_ovv.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2749 >> 8) & 0xff, + 2749 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_ivv.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2757 >> 8) & 0xff, + 2757 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (536 >> 8) & 0xff, + 536 & 0xff} + }, + { + .description = "l2_ivv.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2761 >> 8) & 0xff, + 2761 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (537 >> 8) & 0xff, + 537 & 0xff} + }, + { + .description = "l2_ivv.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2765 >> 8) & 0xff, + 2765 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_etype.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2773 >> 8) & 0xff, + 2773 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_sip3.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2780 >> 8) & 0xff, + 2780 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_sip2.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2786 >> 8) & 0xff, + 2786 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_sip1.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2792 >> 8) & 0xff, + 2792 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_sip0.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2798 >> 8) & 0xff, + 2798 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (543 >> 8) & 0xff, + 543 & 0xff} + }, + { + .description = "l3_sip0.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2801 >> 8) & 0xff, + 2801 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (544 >> 8) & 0xff, + 544 & 0xff} + }, + { + .description = "l3_sip0.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2804 >> 8) & 0xff, + 2804 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_dip3.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2810 >> 8) & 0xff, + 2810 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_dip2.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2816 >> 8) & 0xff, + 2816 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_dip1.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2822 >> 8) & 0xff, + 2822 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_dip0.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2828 >> 8) & 0xff, + 2828 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (549 >> 8) & 0xff, + 549 & 0xff} + }, + { + .description = "l3_dip0.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2831 >> 8) & 0xff, + 2831 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (550 >> 8) & 0xff, + 550 & 0xff} + }, + { + .description = "l3_dip0.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2834 >> 8) & 0xff, + 2834 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_ttl.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2840 >> 8) & 0xff, + 2840 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (552 >> 8) & 0xff, + 552 & 0xff} + }, + { + .description = "l3_ttl.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2843 >> 8) & 0xff, + 2843 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (553 >> 8) & 0xff, + 553 & 0xff} + }, + { + .description = "l3_ttl.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2846 >> 8) & 0xff, + 2846 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_prot.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2852 >> 8) & 0xff, + 2852 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (555 >> 8) & 0xff, + 555 & 0xff} + }, + { + .description = "l3_prot.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2855 >> 8) & 0xff, + 2855 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (556 >> 8) & 0xff, + 556 & 0xff} + }, + { + .description = "l3_prot.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2858 >> 8) & 0xff, + 2858 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_qos.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2864 >> 8) & 0xff, + 2864 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (558 >> 8) & 0xff, + 558 & 0xff} + }, + { + .description = "l3_qos.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2867 >> 8) & 0xff, + 2867 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (559 >> 8) & 0xff, + 559 & 0xff} + }, + { + .description = "l3_qos.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2870 >> 8) & 0xff, + 2870 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_src.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2876 >> 8) & 0xff, + 2876 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (561 >> 8) & 0xff, + 561 & 0xff} + }, + { + .description = "l4_src.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2879 >> 8) & 0xff, + 2879 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (562 >> 8) & 0xff, + 562 & 0xff} + }, + { + .description = "l4_src.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2882 >> 8) & 0xff, + 2882 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_dst.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2888 >> 8) & 0xff, + 2888 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (564 >> 8) & 0xff, + 564 & 0xff} + }, + { + .description = "l4_dst.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2891 >> 8) & 0xff, + 2891 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (565 >> 8) & 0xff, + 565 & 0xff} + }, + { + .description = "l4_dst.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2894 >> 8) & 0xff, + 2894 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_ivv", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2929 >> 8) & 0xff, + 2929 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "tl2_ivv", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2937 >> 8) & 0xff, + 2937 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_OO_VLAN_VID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_OO_VLAN_VID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "tl3.ttl", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2976 >> 8) & 0xff, + 2976 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "tl3.ttl", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2982 >> 8) & 0xff, + 2982 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_TTL >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_TTL & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "tl3.prot", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2988 >> 8) & 0xff, + 2988 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "tl3.prot", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2994 >> 8) & 0xff, + 2994 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_PROTO_ID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_PROTO_ID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "tl3.qos", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (3000 >> 8) & 0xff, + 3000 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "tl3.qos", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (3006 >> 8) & 0xff, + 3006 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_QOS >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_QOS & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "tl4.src", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (3012 >> 8) & 0xff, + 3012 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "tl4.src", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (3018 >> 8) & 0xff, + 3018 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_TCP_SRC_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_TCP_SRC_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "tl4.dst", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (3024 >> 8) & 0xff, + 3024 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "tl4.dst", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (3030 >> 8) & 0xff, + 3030 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_TCP_DST_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_TCP_DST_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "tids", + .field_bit_size = 24, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (3035 >> 8) & 0xff, + 3035 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "tids", + .field_bit_size = 24, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (3039 >> 8) & 0xff, + 3039 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_T_VXLAN_GPE_VNI >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_T_VXLAN_GPE_VNI & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l2_dmac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (3045 >> 8) & 0xff, + 3045 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l2_dmac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (3053 >> 8) & 0xff, + 3053 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_ETH_DMAC >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_ETH_DMAC & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l2_smac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (3060 >> 8) & 0xff, + 3060 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l2_smac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (3066 >> 8) & 0xff, + 3066 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_ETH_SMAC >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_ETH_SMAC & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l2_ovv", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (3073 >> 8) & 0xff, + 3073 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l2_ovv", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (3081 >> 8) & 0xff, + 3081 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_OO_VLAN_VID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_OO_VLAN_VID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l2_ivv", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (3089 >> 8) & 0xff, + 3089 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (587 >> 8) & 0xff, + 587 & 0xff} + }, + { + .description = "l2_ivv", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (3093 >> 8) & 0xff, + 3093 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (588 >> 8) & 0xff, + 588 & 0xff} + }, + { + .description = "l2_ivv", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (3097 >> 8) & 0xff, + 3097 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l2_ivv", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (3105 >> 8) & 0xff, + 3105 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_IO_VLAN_VID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_IO_VLAN_VID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (590 >> 8) & 0xff, + 590 & 0xff} + }, + { + .description = "l2_ivv", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (3109 >> 8) & 0xff, + 3109 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_OI_VLAN_VID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_OI_VLAN_VID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (591 >> 8) & 0xff, + 591 & 0xff} + }, + { + .description = "l2_ivv", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (3113 >> 8) & 0xff, + 3113 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_OO_VLAN_VID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_OO_VLAN_VID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l2_etype", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (3121 >> 8) & 0xff, + 3121 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l2_etype", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (3129 >> 8) & 0xff, + 3129 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_ETH_TYPE >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_ETH_TYPE & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l3.sip.ipv4", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (3136 >> 8) & 0xff, + 3136 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l3.sip.ipv4", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (3142 >> 8) & 0xff, + 3142 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_SRC_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_SRC_ADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l3.sip.ipv6", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (3148 >> 8) & 0xff, + 3148 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l3.sip.ipv6", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (3154 >> 8) & 0xff, + 3154 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV6_SRC_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV6_SRC_ADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l3.dip.ipv4", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (3160 >> 8) & 0xff, + 3160 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l3.dip.ipv4", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (3166 >> 8) & 0xff, + 3166 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_DST_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_DST_ADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l3.dip.ipv6", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (3172 >> 8) & 0xff, + 3172 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l3.dip.ipv6", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (3178 >> 8) & 0xff, + 3178 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV6_DST_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV6_DST_ADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l3.ttl", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (3184 >> 8) & 0xff, + 3184 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (603 >> 8) & 0xff, + 603 & 0xff} + }, + { + .description = "l3.ttl", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (3187 >> 8) & 0xff, + 3187 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (604 >> 8) & 0xff, + 604 & 0xff} + }, + { + .description = "l3.ttl", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (3190 >> 8) & 0xff, + 3190 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l3.ttl", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (3196 >> 8) & 0xff, + 3196 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_IPV4_TTL >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_IPV4_TTL & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (606 >> 8) & 0xff, + 606 & 0xff} + }, + { + .description = "l3.ttl", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (3199 >> 8) & 0xff, + 3199 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV6_TTL >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV6_TTL & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (607 >> 8) & 0xff, + 607 & 0xff} + }, + { + .description = "l3.ttl", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (3202 >> 8) & 0xff, + 3202 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_TTL >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_TTL & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l3.prot", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (3208 >> 8) & 0xff, + 3208 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (609 >> 8) & 0xff, + 609 & 0xff} + }, + { + .description = "l3.prot", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (3211 >> 8) & 0xff, + 3211 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (610 >> 8) & 0xff, + 610 & 0xff} + }, + { + .description = "l3.prot", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (3214 >> 8) & 0xff, + 3214 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l3.prot", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (3220 >> 8) & 0xff, + 3220 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_IPV4_PROTO_ID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_IPV4_PROTO_ID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (612 >> 8) & 0xff, + 612 & 0xff} + }, + { + .description = "l3.prot", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (3223 >> 8) & 0xff, + 3223 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV6_PROTO_ID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV6_PROTO_ID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (613 >> 8) & 0xff, + 613 & 0xff} + }, + { + .description = "l3.prot", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (3226 >> 8) & 0xff, + 3226 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_PROTO_ID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_PROTO_ID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l3.qos", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (3232 >> 8) & 0xff, + 3232 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (615 >> 8) & 0xff, + 615 & 0xff} + }, + { + .description = "l3.qos", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (3235 >> 8) & 0xff, + 3235 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (616 >> 8) & 0xff, + 616 & 0xff} + }, + { + .description = "l3.qos", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (3238 >> 8) & 0xff, + 3238 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l3.qos", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (3244 >> 8) & 0xff, + 3244 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_IPV4_QOS >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_IPV4_QOS & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (618 >> 8) & 0xff, + 618 & 0xff} + }, + { + .description = "l3.qos", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (3247 >> 8) & 0xff, + 3247 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV6_QOS >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV6_QOS & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (619 >> 8) & 0xff, + 619 & 0xff} + }, + { + .description = "l3.qos", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (3250 >> 8) & 0xff, + 3250 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_QOS >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_QOS & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l4.src", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (3256 >> 8) & 0xff, + 3256 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (621 >> 8) & 0xff, + 621 & 0xff} + }, + { + .description = "l4.src", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (3259 >> 8) & 0xff, + 3259 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (622 >> 8) & 0xff, + 622 & 0xff} + }, + { + .description = "l4.src", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (3262 >> 8) & 0xff, + 3262 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l4.src", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (3268 >> 8) & 0xff, + 3268 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_TCP_SRC_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_TCP_SRC_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (624 >> 8) & 0xff, + 624 & 0xff} + }, + { + .description = "l4.src", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (3271 >> 8) & 0xff, + 3271 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_UDP_SRC_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_UDP_SRC_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (625 >> 8) & 0xff, + 625 & 0xff} + }, + { + .description = "l4.src", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (3274 >> 8) & 0xff, + 3274 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_TCP_SRC_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_TCP_SRC_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l4.dst", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (3280 >> 8) & 0xff, + 3280 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (627 >> 8) & 0xff, + 627 & 0xff} + }, + { + .description = "l4.dst", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (3283 >> 8) & 0xff, + 3283 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (628 >> 8) & 0xff, + 628 & 0xff} + }, + { + .description = "l4.dst", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (3286 >> 8) & 0xff, + 3286 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l4.dst", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (3292 >> 8) & 0xff, + 3292 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_TCP_DST_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_TCP_DST_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (630 >> 8) & 0xff, + 630 & 0xff} + }, + { + .description = "l4.dst", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (3295 >> 8) & 0xff, + 3295 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_UDP_DST_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_UDP_DST_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (631 >> 8) & 0xff, + 631 & 0xff} + }, + { + .description = "l4.dst", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (3298 >> 8) & 0xff, + 3298 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_TCP_DST_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_TCP_DST_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + } +}; + +struct bnxt_ulp_mapper_field_info ulp_thor2_class_result_field_list[] = { + /* class_tid: 1, , table: cmm_stat_record.add_stat_tunnel_cache */ + { + .description = "packet_count", + .field_bit_size = 64, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "byte_count", + .field_bit_size = 64, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + /* class_tid: 1, , table: tunnel_cache.f1_f2_wr */ + { + .description = "rid", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_RID >> 8) & 0xff, + BNXT_ULP_RF_IDX_RID & 0xff} + }, + { + .description = "l2_cntxt_tcam_index", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_cntxt_id", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_L2_CNTXT_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_L2_CNTXT_ID_0 & 0xff} + }, + { + .description = "stat_ptr", + .field_bit_size = 64, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_FLOW_CNTR_PTR_F1 >> 8) & 0xff, + BNXT_ULP_RF_IDX_FLOW_CNTR_PTR_F1 & 0xff} + }, + { + .description = "cmm_stat_hndl", + .field_bit_size = 64, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_CMM_STAT_HNDL_F1 >> 8) & 0xff, + BNXT_ULP_RF_IDX_CMM_STAT_HNDL_F1 & 0xff} + }, + /* class_tid: 1, , table: l2_cntxt_tcam.l2_table_create */ + { + .description = "l2ip_dest_data", + .field_bit_size = 17, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2ip_dest_enb", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2ip_rfs_data", + .field_bit_size = 9, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2ip_rfs_enb", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2ip_act_rec_ptr", + .field_bit_size = 26, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_DEFAULT_AREC_PTR >> 8) & 0xff, + BNXT_ULP_RF_IDX_DEFAULT_AREC_PTR & 0xff} + }, + { + .description = "l2ip_act_scope", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_PORT_TABLE, + .field_opr1 = { + (BNXT_ULP_CF_IDX_DEV_PORT_ID >> 8) & 0xff, + BNXT_ULP_CF_IDX_DEV_PORT_ID & 0xff, + (BNXT_ULP_PORT_TABLE_TABLE_SCOPE >> 8) & 0xff, + BNXT_ULP_PORT_TABLE_TABLE_SCOPE & 0xff} + }, + { + .description = "l2ip_act_hint", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2ip_act_enb", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + { + .description = "l2ip_meta", + .field_bit_size = 35, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2ip_meta_enb", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "ctxt_opcode", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + ULP_THOR2_SYM_CTXT_OPCODE_NORMAL_FLOW} + }, + { + .description = "prof_func_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_PROF_FUNC_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_PROF_FUNC_ID_0 & 0xff} + }, + { + .description = "prsv_prof_func_id", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_cntxt_id", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_L2_CNTXT_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_L2_CNTXT_ID_0 & 0xff} + }, + { + .description = "prsv_l2ip_cntxt_id", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "parif", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "prsv_parif", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + /* class_tid: 1, , table: mac_addr_cache.l2_table_wr */ + { + .description = "rid", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_RID >> 8) & 0xff, + BNXT_ULP_RF_IDX_RID & 0xff} + }, + { + .description = "l2_cntxt_tcam_index", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_cntxt_id", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_L2_CNTXT_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_L2_CNTXT_ID_0 & 0xff} + }, + { + .description = "src_property_ptr", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "prof_func_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_PROF_FUNC_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_PROF_FUNC_ID_0 & 0xff} + }, + /* class_tid: 1, , table: cmm_stat_record.f1_flow */ + { + .description = "packet_count", + .field_bit_size = 64, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "byte_count", + .field_bit_size = 64, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + /* class_tid: 1, , table: fkb_select.wc_gen_template */ + { + .description = "l2_cntxt_id.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (45 >> 8) & 0xff, + 45 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2ip_func.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "parif.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "spif.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "svif.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "lcos.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "meta_hi.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (46 >> 8) & 0xff, + 46 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "meta_lo.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (47 >> 8) & 0xff, + 47 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "rcyc_cnt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (48 >> 8) & 0xff, + 48 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "loopback.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl2_l2type.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl2_dmac.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl2_smac.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl2_dt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl2_sa.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl2_nvt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl2_ovp.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl2_ovd.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl2_ovv.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl2_ovt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl2_ivp.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl2_ivd.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl2_ivv.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl2_ivt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl2_etype.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl3_l3type.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl3_sip3.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl3_sip2.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl3_sip1.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl3_sip0.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl3_dip3.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl3_dip2.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl3_dip1.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl3_dip0.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl3_ttl.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl3_prot.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl3_fid.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl3_qos.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl3_ieh_nonext.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl3_ieh_esp.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl3_ieh_auth.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl3_ieh_dest.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl3_ieh_frag.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl3_ieh_rthdr.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl3_ieh_hop.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl3_ieh_1frag.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl3_df.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl3_l3err.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl4_l4type.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl4_src.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl4_dst.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl4_flags.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl4_seq.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl4_pa.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl4_opt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl4_tcpts.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl4_err.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otuntype.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otflags.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otids.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otid.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otctxts.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otctxt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otqos.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "oterr.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_l2type.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_dmac.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (49 >> 8) & 0xff, + 49 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_smac.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (52 >> 8) & 0xff, + 52 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_dt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_sa.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_nvt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_ovp.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_ovd.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_ovv.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (55 >> 8) & 0xff, + 55 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_ovt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_ivp.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_ivd.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_ivv.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (59 >> 8) & 0xff, + 59 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (7 >> 8) & 0xff, + 7 & 0xff} + }, + { + .description = "tl2_ivt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_etype.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (67 >> 8) & 0xff, + 67 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_l3type.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_sip3.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (71 >> 8) & 0xff, + 71 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ZERO, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_sip2.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (74 >> 8) & 0xff, + 74 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ZERO, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_sip1.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (77 >> 8) & 0xff, + 77 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ZERO, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_sip0.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (80 >> 8) & 0xff, + 80 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ZERO, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (8 >> 8) & 0xff, + 8 & 0xff} + }, + { + .description = "tl3_dip3.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (86 >> 8) & 0xff, + 86 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_dip2.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (89 >> 8) & 0xff, + 89 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_dip1.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (92 >> 8) & 0xff, + 92 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_dip0.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (95 >> 8) & 0xff, + 95 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (9 >> 8) & 0xff, + 9 & 0xff} + }, + { + .description = "tl3_ttl.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (101 >> 8) & 0xff, + 101 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (10 >> 8) & 0xff, + 10 & 0xff} + }, + { + .description = "tl3_prot.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (107 >> 8) & 0xff, + 107 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (11 >> 8) & 0xff, + 11 & 0xff} + }, + { + .description = "tl3_fid.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_qos.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (113 >> 8) & 0xff, + 113 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (12 >> 8) & 0xff, + 12 & 0xff} + }, + { + .description = "tl3_ieh_nonext.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_ieh_esp.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_ieh_auth.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_ieh_dest.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_ieh_frag.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_ieh_rthdr.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_ieh_hop.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_ieh_1frag.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_df.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_l3err.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_l4type.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_src.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (119 >> 8) & 0xff, + 119 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (13 >> 8) & 0xff, + 13 & 0xff} + }, + { + .description = "tl4_dst.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (125 >> 8) & 0xff, + 125 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (14 >> 8) & 0xff, + 14 & 0xff} + }, + { + .description = "tl4_flags.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_seq.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_pa.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_opt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_tcpts.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_err.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tuntype.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tflags.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tids.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (131 >> 8) & 0xff, + 131 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (15 >> 8) & 0xff, + 15 & 0xff} + }, + { + .description = "tid.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tctxts.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tctxt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tqos.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "terr.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_l2type.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_dmac.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (135 >> 8) & 0xff, + 135 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (16 >> 8) & 0xff, + 16 & 0xff} + }, + { + .description = "l2_smac.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (141 >> 8) & 0xff, + 141 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (17 >> 8) & 0xff, + 17 & 0xff} + }, + { + .description = "l2_dt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_sa.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_nvt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_ovp.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_ovd.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_ovv.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (147 >> 8) & 0xff, + 147 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (18 >> 8) & 0xff, + 18 & 0xff} + }, + { + .description = "l2_ovt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_ivp.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_ivd.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_ivv.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (155 >> 8) & 0xff, + 155 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (19 >> 8) & 0xff, + 19 & 0xff} + }, + { + .description = "l2_ivt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_etype.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (171 >> 8) & 0xff, + 171 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (22 >> 8) & 0xff, + 22 & 0xff} + }, + { + .description = "l3_l3type.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_sip3.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (179 >> 8) & 0xff, + 179 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (23 >> 8) & 0xff, + 23 & 0xff} + }, + { + .description = "l3_sip2.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (187 >> 8) & 0xff, + 187 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (24 >> 8) & 0xff, + 24 & 0xff} + }, + { + .description = "l3_sip1.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (195 >> 8) & 0xff, + 195 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (25 >> 8) & 0xff, + 25 & 0xff} + }, + { + .description = "l3_sip0.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (203 >> 8) & 0xff, + 203 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (26 >> 8) & 0xff, + 26 & 0xff} + }, + { + .description = "l3_dip3.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (219 >> 8) & 0xff, + 219 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (29 >> 8) & 0xff, + 29 & 0xff} + }, + { + .description = "l3_dip2.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (227 >> 8) & 0xff, + 227 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (30 >> 8) & 0xff, + 30 & 0xff} + }, + { + .description = "l3_dip1.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (235 >> 8) & 0xff, + 235 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (31 >> 8) & 0xff, + 31 & 0xff} + }, + { + .description = "l3_dip0.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (243 >> 8) & 0xff, + 243 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (32 >> 8) & 0xff, + 32 & 0xff} + }, + { + .description = "l3_ttl.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (259 >> 8) & 0xff, + 259 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (35 >> 8) & 0xff, + 35 & 0xff} + }, + { + .description = "l3_prot.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (275 >> 8) & 0xff, + 275 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (38 >> 8) & 0xff, + 38 & 0xff} + }, + { + .description = "l3_fid.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_qos.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (291 >> 8) & 0xff, + 291 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (41 >> 8) & 0xff, + 41 & 0xff} + }, + { + .description = "l3_ieh_nonext.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_ieh_esp.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_ieh_auth.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_ieh_dest.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_ieh_frag.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_ieh_rthdr.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_ieh_hop.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_ieh_1frag.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_df.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_l3err.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_l4type.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_src.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (307 >> 8) & 0xff, + 307 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr2 = { + (44 >> 8) & 0xff, + 44 & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (45 >> 8) & 0xff, + 45 & 0xff} + }, + { + .description = "l4_dst.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (314 >> 8) & 0xff, + 314 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr2 = { + (46 >> 8) & 0xff, + 46 & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (47 >> 8) & 0xff, + 47 & 0xff} + }, + { + .description = "l4_flags.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (321 >> 8) & 0xff, + 321 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr2 = { + (48 >> 8) & 0xff, + 48 & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (49 >> 8) & 0xff, + 49 & 0xff} + }, + { + .description = "l4_seq.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_ack.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_win.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_pa.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_opt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_tcpts.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_tsval.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_txecr.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_err.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "padding", + .field_bit_size = 85, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + /* class_tid: 1, , table: hdr_overlap_cache.overlap_wr */ + { + .description = "rid", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_RID >> 8) & 0xff, + BNXT_ULP_RF_IDX_RID & 0xff} + }, + { + .description = "wc_profile_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_WC_PROFILE_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_WC_PROFILE_ID_0 & 0xff} + }, + { + .description = "wc_key_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_WC_KEY_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_WC_KEY_ID_0 & 0xff} + }, + /* class_tid: 1, , table: fkb_select.em_gen_template_alloc */ + { + .description = "l2_cntxt_id.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2ip_func.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "parif.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "spif.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "svif.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "lcos.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "meta_hi.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "meta_lo.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "rcyc_cnt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "loopback.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl2_l2type.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl2_dmac.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl2_smac.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl2_dt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl2_sa.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl2_nvt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl2_ovp.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl2_ovd.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl2_ovv.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl2_ovt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl2_ivp.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl2_ivd.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl2_ivv.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl2_ivt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl2_etype.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl3_l3type.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl3_sip3.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl3_sip2.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl3_sip1.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl3_sip0.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl3_dip3.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl3_dip2.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl3_dip1.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl3_dip0.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl3_ttl.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl3_prot.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl3_fid.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl3_qos.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl3_ieh_nonext.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl3_ieh_esp.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl3_ieh_auth.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl3_ieh_dest.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl3_ieh_frag.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl3_ieh_rthdr.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl3_ieh_hop.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl3_ieh_1frag.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl3_df.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl3_l3err.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl4_l4type.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl4_src.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl4_dst.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl4_flags.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl4_seq.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl4_pa.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl4_opt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl4_tcpts.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl4_err.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otuntype.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otflags.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otids.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otid.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otctxts.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otctxt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otqos.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "oterr.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_l2type.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_dmac.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_smac.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_dt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_sa.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_nvt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_ovp.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_ovd.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_ovv.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_ovt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_ivp.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_ivd.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_ivv.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_ivt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_etype.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_l3type.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_sip3.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_sip2.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_sip1.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_sip0.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_dip3.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_dip2.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_dip1.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_dip0.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_ttl.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_prot.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_fid.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_qos.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_ieh_nonext.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_ieh_esp.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_ieh_auth.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_ieh_dest.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_ieh_frag.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_ieh_rthdr.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_ieh_hop.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_ieh_1frag.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_df.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_l3err.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_l4type.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_src.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_dst.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_flags.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_seq.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_pa.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_opt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_tcpts.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_err.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tuntype.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tflags.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tids.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tid.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tctxts.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tctxt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tqos.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "terr.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_l2type.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_dmac.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_smac.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_dt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_sa.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_nvt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_ovp.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_ovd.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_ovv.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_ovt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_ivp.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_ivd.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_ivv.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_ivt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_etype.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_l3type.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_sip3.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_sip2.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_sip1.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_sip0.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_dip3.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_dip2.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_dip1.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_dip0.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_ttl.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_prot.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_fid.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_qos.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_ieh_nonext.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_ieh_esp.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_ieh_auth.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_ieh_dest.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_ieh_frag.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_ieh_rthdr.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_ieh_hop.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_ieh_1frag.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_df.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_l3err.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_l4type.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_src.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_dst.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_flags.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_seq.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_ack.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_win.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_pa.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_opt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_tcpts.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_tsval.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_txecr.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_err.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "padding", + .field_bit_size = 85, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + /* class_tid: 1, , table: profile_tcam.gen_template */ + { + .description = "wc_scope", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_PORT_TABLE, + .field_opr1 = { + (BNXT_ULP_CF_IDX_DEV_PORT_ID >> 8) & 0xff, + BNXT_ULP_CF_IDX_DEV_PORT_ID & 0xff, + (BNXT_ULP_PORT_TABLE_TABLE_SCOPE >> 8) & 0xff, + BNXT_ULP_PORT_TABLE_TABLE_SCOPE & 0xff} + }, + { + .description = "wc_key_id", + .field_bit_size = 7, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_WC_KEY_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_WC_KEY_ID_0 & 0xff} + }, + { + .description = "wc_profile_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_WC_PROFILE_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_WC_PROFILE_ID_0 & 0xff} + }, + { + .description = "wc_search_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + { + .description = "em_scope", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_PORT_TABLE, + .field_opr1 = { + (BNXT_ULP_CF_IDX_DEV_PORT_ID >> 8) & 0xff, + BNXT_ULP_CF_IDX_DEV_PORT_ID & 0xff, + (BNXT_ULP_PORT_TABLE_TABLE_SCOPE >> 8) & 0xff, + BNXT_ULP_PORT_TABLE_TABLE_SCOPE & 0xff} + }, + { + .description = "em_key_id", + .field_bit_size = 7, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_EM_KEY_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_EM_KEY_ID_0 & 0xff} + }, + { + .description = "em_profile_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_EM_PROFILE_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_EM_PROFILE_ID_0 & 0xff} + }, + { + .description = "em_search_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_TERM_FLOW >> 8) & 0xff, + BNXT_ULP_RF_IDX_TERM_FLOW & 0xff} + }, + { + .description = "pl_byp_lkup_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "padding", + .field_bit_size = 21, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + /* class_tid: 1, , table: proto_header_cache.wr */ + { + .description = "rid", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_RID >> 8) & 0xff, + BNXT_ULP_RF_IDX_RID & 0xff} + }, + { + .description = "profile_tcam_index", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_PROFILE_TCAM_INDEX_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_PROFILE_TCAM_INDEX_0 & 0xff} + }, + { + .description = "em_profile_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_EM_PROFILE_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_EM_PROFILE_ID_0 & 0xff} + }, + { + .description = "em_key_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_EM_KEY_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_EM_KEY_ID_0 & 0xff} + }, + { + .description = "wc_profile_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_WC_PROFILE_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_WC_PROFILE_ID_0 & 0xff} + }, + { + .description = "wc_key_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_WC_KEY_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_WC_KEY_ID_0 & 0xff} + }, + { + .description = "em_recipe_id", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_EM_RECIPE_ID >> 8) & 0xff, + BNXT_ULP_RF_IDX_EM_RECIPE_ID & 0xff} + }, + { + .description = "wc_recipe_id", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_WC_RECIPE_ID >> 8) & 0xff, + BNXT_ULP_RF_IDX_WC_RECIPE_ID & 0xff} + }, + /* class_tid: 1, , table: fkb_select.em_gen_template */ + { + .description = "l2_cntxt_id.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (996 >> 8) & 0xff, + 996 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2ip_func.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "parif.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "spif.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "svif.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "lcos.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "meta_hi.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (997 >> 8) & 0xff, + 997 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "meta_lo.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (998 >> 8) & 0xff, + 998 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "rcyc_cnt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (999 >> 8) & 0xff, + 999 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "loopback.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl2_l2type.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl2_dmac.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl2_smac.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl2_dt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl2_sa.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl2_nvt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl2_ovp.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl2_ovd.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl2_ovv.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl2_ovt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl2_ivp.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl2_ivd.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl2_ivv.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl2_ivt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl2_etype.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl3_l3type.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl3_sip3.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl3_sip2.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl3_sip1.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl3_sip0.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl3_dip3.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl3_dip2.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl3_dip1.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl3_dip0.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl3_ttl.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl3_prot.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl3_fid.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl3_qos.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl3_ieh_nonext.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl3_ieh_esp.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl3_ieh_auth.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl3_ieh_dest.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl3_ieh_frag.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl3_ieh_rthdr.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl3_ieh_hop.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl3_ieh_1frag.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl3_df.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl3_l3err.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl4_l4type.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl4_src.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl4_dst.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl4_flags.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl4_seq.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl4_pa.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl4_opt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl4_tcpts.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl4_err.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otuntype.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otflags.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otids.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otid.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otctxts.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otctxt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otqos.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "oterr.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_l2type.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_dmac.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1000 >> 8) & 0xff, + 1000 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_smac.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1004 >> 8) & 0xff, + 1004 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_dt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_sa.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_nvt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_ovp.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_ovd.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_ovv.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1007 >> 8) & 0xff, + 1007 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_ovt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_ivp.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_ivd.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_ivv.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1011 >> 8) & 0xff, + 1011 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (209 >> 8) & 0xff, + 209 & 0xff} + }, + { + .description = "tl2_ivt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_etype.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1019 >> 8) & 0xff, + 1019 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_l3type.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_sip3.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1023 >> 8) & 0xff, + 1023 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_sip2.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1026 >> 8) & 0xff, + 1026 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_sip1.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1029 >> 8) & 0xff, + 1029 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_sip0.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1032 >> 8) & 0xff, + 1032 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (210 >> 8) & 0xff, + 210 & 0xff} + }, + { + .description = "tl3_dip3.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1038 >> 8) & 0xff, + 1038 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_dip2.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1041 >> 8) & 0xff, + 1041 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_dip1.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1044 >> 8) & 0xff, + 1044 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_dip0.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1047 >> 8) & 0xff, + 1047 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (211 >> 8) & 0xff, + 211 & 0xff} + }, + { + .description = "tl3_ttl.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1053 >> 8) & 0xff, + 1053 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (212 >> 8) & 0xff, + 212 & 0xff} + }, + { + .description = "tl3_prot.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1059 >> 8) & 0xff, + 1059 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (213 >> 8) & 0xff, + 213 & 0xff} + }, + { + .description = "tl3_fid.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_qos.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1065 >> 8) & 0xff, + 1065 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (214 >> 8) & 0xff, + 214 & 0xff} + }, + { + .description = "tl3_ieh_nonext.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_ieh_esp.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_ieh_auth.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_ieh_dest.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_ieh_frag.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_ieh_rthdr.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_ieh_hop.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_ieh_1frag.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_df.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_l3err.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_l4type.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_src.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1071 >> 8) & 0xff, + 1071 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (215 >> 8) & 0xff, + 215 & 0xff} + }, + { + .description = "tl4_dst.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1077 >> 8) & 0xff, + 1077 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (216 >> 8) & 0xff, + 216 & 0xff} + }, + { + .description = "tl4_flags.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_seq.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_pa.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_opt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_tcpts.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_err.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tuntype.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tflags.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tids.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1083 >> 8) & 0xff, + 1083 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (217 >> 8) & 0xff, + 217 & 0xff} + }, + { + .description = "tid.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tctxts.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tctxt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tqos.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "terr.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_l2type.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_dmac.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1087 >> 8) & 0xff, + 1087 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (218 >> 8) & 0xff, + 218 & 0xff} + }, + { + .description = "l2_smac.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1095 >> 8) & 0xff, + 1095 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (219 >> 8) & 0xff, + 219 & 0xff} + }, + { + .description = "l2_dt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_sa.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_nvt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_ovp.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_ovd.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_ovv.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1101 >> 8) & 0xff, + 1101 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (220 >> 8) & 0xff, + 220 & 0xff} + }, + { + .description = "l2_ovt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_ivp.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_ivd.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_ivv.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1109 >> 8) & 0xff, + 1109 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (221 >> 8) & 0xff, + 221 & 0xff} + }, + { + .description = "l2_ivt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_etype.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1125 >> 8) & 0xff, + 1125 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (224 >> 8) & 0xff, + 224 & 0xff} + }, + { + .description = "l3_l3type.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_sip3.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1133 >> 8) & 0xff, + 1133 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (225 >> 8) & 0xff, + 225 & 0xff} + }, + { + .description = "l3_sip2.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1139 >> 8) & 0xff, + 1139 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (226 >> 8) & 0xff, + 226 & 0xff} + }, + { + .description = "l3_sip1.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1145 >> 8) & 0xff, + 1145 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (227 >> 8) & 0xff, + 227 & 0xff} + }, + { + .description = "l3_sip0.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1151 >> 8) & 0xff, + 1151 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (228 >> 8) & 0xff, + 228 & 0xff} + }, + { + .description = "l3_dip3.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1163 >> 8) & 0xff, + 1163 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (231 >> 8) & 0xff, + 231 & 0xff} + }, + { + .description = "l3_dip2.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1169 >> 8) & 0xff, + 1169 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (232 >> 8) & 0xff, + 232 & 0xff} + }, + { + .description = "l3_dip1.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1175 >> 8) & 0xff, + 1175 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (233 >> 8) & 0xff, + 233 & 0xff} + }, + { + .description = "l3_dip0.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1181 >> 8) & 0xff, + 1181 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (234 >> 8) & 0xff, + 234 & 0xff} + }, + { + .description = "l3_ttl.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1193 >> 8) & 0xff, + 1193 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (237 >> 8) & 0xff, + 237 & 0xff} + }, + { + .description = "l3_prot.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1205 >> 8) & 0xff, + 1205 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (240 >> 8) & 0xff, + 240 & 0xff} + }, + { + .description = "l3_fid.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_qos.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1217 >> 8) & 0xff, + 1217 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (243 >> 8) & 0xff, + 243 & 0xff} + }, + { + .description = "l3_ieh_nonext.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_ieh_esp.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_ieh_auth.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_ieh_dest.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_ieh_frag.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_ieh_rthdr.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_ieh_hop.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_ieh_1frag.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_df.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_l3err.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_l4type.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_src.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1229 >> 8) & 0xff, + 1229 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (246 >> 8) & 0xff, + 246 & 0xff} + }, + { + .description = "l4_dst.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1241 >> 8) & 0xff, + 1241 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (249 >> 8) & 0xff, + 249 & 0xff} + }, + { + .description = "l4_flags.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_seq.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_ack.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_win.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_pa.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_opt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_tcpts.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_tsval.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_txecr.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_err.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "padding", + .field_bit_size = 85, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + /* class_tid: 1, , table: em_flow_conflict_cache.wr */ + { + .description = "rid", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_RID >> 8) & 0xff, + BNXT_ULP_RF_IDX_RID & 0xff} + }, + { + .description = "flow_sig_id", + .field_bit_size = 64, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_FLOW_SIG_ID >> 8) & 0xff, + BNXT_ULP_CF_IDX_FLOW_SIG_ID & 0xff} + }, + /* class_tid: 1, , table: em_normal.ingress_generic_template */ + { + .description = "valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + { + .description = "rec_size", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "epoch0", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_FUNCTION_ID >> 8) & 0xff, + BNXT_ULP_CF_IDX_FUNCTION_ID & 0xff} + }, + { + .description = "epoch1", + .field_bit_size = 6, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "opcode", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "strength", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 3} + }, + { + .description = "act_hint", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "act_rec_ptr", + .field_bit_size = 26, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_MAIN_ACTION_PTR >> 8) & 0xff, + BNXT_ULP_RF_IDX_MAIN_ACTION_PTR & 0xff} + }, + { + .description = "ring_table_idx", + .field_bit_size = 9, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "act_rec_size", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "paths_m1", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "fc_op", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "fc_type", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "fc_ptr", + .field_bit_size = 28, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "pad2", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "range_profile", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "range_index", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + /* class_tid: 1, , table: wm_normal.ingress_generic_template */ + { + .description = "fc_ptr", + .field_bit_size = 28, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "fc_type", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "fc_op", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "paths_m1", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "act_rec_size", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "ring_table_idx", + .field_bit_size = 9, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "act_rec_ptr", + .field_bit_size = 26, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_MAIN_ACTION_PTR >> 8) & 0xff, + BNXT_ULP_RF_IDX_MAIN_ACTION_PTR & 0xff} + }, + { + .description = "act_hint", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + { + .description = "strength", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + { + .description = "opcode", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "epoch1", + .field_bit_size = 6, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "epoch0", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_FUNCTION_ID >> 8) & 0xff, + BNXT_ULP_CF_IDX_FUNCTION_ID & 0xff} + }, + { + .description = "rec_size", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + { + .description = "pad1", + .field_bit_size = 24, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + /* class_tid: 2, , table: fkb_select.wc_gen_template */ + { + .description = "l2_cntxt_id.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1691 >> 8) & 0xff, + 1691 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2ip_func.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "parif.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "spif.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "svif.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "lcos.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "meta_hi.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1692 >> 8) & 0xff, + 1692 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "meta_lo.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1693 >> 8) & 0xff, + 1693 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "rcyc_cnt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1694 >> 8) & 0xff, + 1694 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "loopback.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl2_l2type.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl2_dmac.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl2_smac.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl2_dt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl2_sa.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl2_nvt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl2_ovp.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl2_ovd.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl2_ovv.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl2_ovt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl2_ivp.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl2_ivd.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl2_ivv.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl2_ivt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl2_etype.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl3_l3type.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl3_sip3.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl3_sip2.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl3_sip1.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl3_sip0.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl3_dip3.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl3_dip2.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl3_dip1.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl3_dip0.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl3_ttl.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl3_prot.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl3_fid.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl3_qos.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl3_ieh_nonext.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl3_ieh_esp.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl3_ieh_auth.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl3_ieh_dest.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl3_ieh_frag.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl3_ieh_rthdr.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl3_ieh_hop.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl3_ieh_1frag.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl3_df.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl3_l3err.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl4_l4type.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl4_src.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl4_dst.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl4_flags.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl4_seq.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl4_pa.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl4_opt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl4_tcpts.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl4_err.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otuntype.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otflags.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otids.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otid.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otctxts.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otctxt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otqos.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "oterr.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_l2type.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_dmac.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1695 >> 8) & 0xff, + 1695 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_smac.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1698 >> 8) & 0xff, + 1698 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr2 = { + (322 >> 8) & 0xff, + 322 & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_dt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_sa.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_nvt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_ovp.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_ovd.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_ovv.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1703 >> 8) & 0xff, + 1703 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_ovt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_ivp.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_ivd.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_ivv.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1707 >> 8) & 0xff, + 1707 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (323 >> 8) & 0xff, + 323 & 0xff} + }, + { + .description = "tl2_ivt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_etype.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1715 >> 8) & 0xff, + 1715 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_l3type.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_sip3.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1719 >> 8) & 0xff, + 1719 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ZERO, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_sip2.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1722 >> 8) & 0xff, + 1722 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ZERO, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_sip1.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1725 >> 8) & 0xff, + 1725 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ZERO, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_sip0.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1728 >> 8) & 0xff, + 1728 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ZERO, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (324 >> 8) & 0xff, + 324 & 0xff} + }, + { + .description = "tl3_dip3.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1734 >> 8) & 0xff, + 1734 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_dip2.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1737 >> 8) & 0xff, + 1737 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_dip1.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1740 >> 8) & 0xff, + 1740 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_dip0.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1743 >> 8) & 0xff, + 1743 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (325 >> 8) & 0xff, + 325 & 0xff} + }, + { + .description = "tl3_ttl.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1749 >> 8) & 0xff, + 1749 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (326 >> 8) & 0xff, + 326 & 0xff} + }, + { + .description = "tl3_prot.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1755 >> 8) & 0xff, + 1755 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (327 >> 8) & 0xff, + 327 & 0xff} + }, + { + .description = "tl3_fid.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_qos.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1761 >> 8) & 0xff, + 1761 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (328 >> 8) & 0xff, + 328 & 0xff} + }, + { + .description = "tl3_ieh_nonext.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_ieh_esp.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_ieh_auth.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_ieh_dest.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_ieh_frag.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_ieh_rthdr.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_ieh_hop.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_ieh_1frag.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_df.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_l3err.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_l4type.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_src.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1767 >> 8) & 0xff, + 1767 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (329 >> 8) & 0xff, + 329 & 0xff} + }, + { + .description = "tl4_dst.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1773 >> 8) & 0xff, + 1773 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (330 >> 8) & 0xff, + 330 & 0xff} + }, + { + .description = "tl4_flags.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_seq.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_pa.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_opt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_tcpts.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_err.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tuntype.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tflags.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tids.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1779 >> 8) & 0xff, + 1779 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (331 >> 8) & 0xff, + 331 & 0xff} + }, + { + .description = "tid.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tctxts.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tctxt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tqos.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "terr.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_l2type.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_dmac.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1783 >> 8) & 0xff, + 1783 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (332 >> 8) & 0xff, + 332 & 0xff} + }, + { + .description = "l2_smac.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1789 >> 8) & 0xff, + 1789 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (333 >> 8) & 0xff, + 333 & 0xff} + }, + { + .description = "l2_dt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_sa.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_nvt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_ovp.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_ovd.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_ovv.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1795 >> 8) & 0xff, + 1795 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (334 >> 8) & 0xff, + 334 & 0xff} + }, + { + .description = "l2_ovt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_ivp.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_ivd.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_ivv.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1803 >> 8) & 0xff, + 1803 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (335 >> 8) & 0xff, + 335 & 0xff} + }, + { + .description = "l2_ivt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_etype.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1819 >> 8) & 0xff, + 1819 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (338 >> 8) & 0xff, + 338 & 0xff} + }, + { + .description = "l3_l3type.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_sip3.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1827 >> 8) & 0xff, + 1827 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (339 >> 8) & 0xff, + 339 & 0xff} + }, + { + .description = "l3_sip2.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1835 >> 8) & 0xff, + 1835 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (340 >> 8) & 0xff, + 340 & 0xff} + }, + { + .description = "l3_sip1.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1843 >> 8) & 0xff, + 1843 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (341 >> 8) & 0xff, + 341 & 0xff} + }, + { + .description = "l3_sip0.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1851 >> 8) & 0xff, + 1851 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (342 >> 8) & 0xff, + 342 & 0xff} + }, + { + .description = "l3_dip3.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1867 >> 8) & 0xff, + 1867 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (345 >> 8) & 0xff, + 345 & 0xff} + }, + { + .description = "l3_dip2.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1875 >> 8) & 0xff, + 1875 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (346 >> 8) & 0xff, + 346 & 0xff} + }, + { + .description = "l3_dip1.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1883 >> 8) & 0xff, + 1883 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (347 >> 8) & 0xff, + 347 & 0xff} + }, + { + .description = "l3_dip0.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1891 >> 8) & 0xff, + 1891 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (348 >> 8) & 0xff, + 348 & 0xff} + }, + { + .description = "l3_ttl.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1907 >> 8) & 0xff, + 1907 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (351 >> 8) & 0xff, + 351 & 0xff} + }, + { + .description = "l3_prot.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1923 >> 8) & 0xff, + 1923 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (354 >> 8) & 0xff, + 354 & 0xff} + }, + { + .description = "l3_fid.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_qos.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1939 >> 8) & 0xff, + 1939 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (357 >> 8) & 0xff, + 357 & 0xff} + }, + { + .description = "l3_ieh_nonext.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_ieh_esp.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_ieh_auth.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_ieh_dest.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_ieh_frag.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_ieh_rthdr.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_ieh_hop.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_ieh_1frag.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_df.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_l3err.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_l4type.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_src.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1955 >> 8) & 0xff, + 1955 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr2 = { + (360 >> 8) & 0xff, + 360 & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (361 >> 8) & 0xff, + 361 & 0xff} + }, + { + .description = "l4_dst.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1962 >> 8) & 0xff, + 1962 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr2 = { + (362 >> 8) & 0xff, + 362 & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (363 >> 8) & 0xff, + 363 & 0xff} + }, + { + .description = "l4_flags.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1969 >> 8) & 0xff, + 1969 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr2 = { + (364 >> 8) & 0xff, + 364 & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (365 >> 8) & 0xff, + 365 & 0xff} + }, + { + .description = "l4_seq.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_ack.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_win.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_pa.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_opt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_tcpts.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_tsval.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_txecr.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_err.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "padding", + .field_bit_size = 85, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + /* class_tid: 2, , table: hdr_overlap_cache.overlap_wr */ + { + .description = "rid", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_RID >> 8) & 0xff, + BNXT_ULP_RF_IDX_RID & 0xff} + }, + { + .description = "wc_profile_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_WC_PROFILE_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_WC_PROFILE_ID_0 & 0xff} + }, + { + .description = "wc_key_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_WC_KEY_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_WC_KEY_ID_0 & 0xff} + }, + /* class_tid: 2, , table: fkb_select.em_gen_template_alloc */ + { + .description = "l2_cntxt_id.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2ip_func.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "parif.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "spif.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "svif.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "lcos.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "meta_hi.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "meta_lo.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "rcyc_cnt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "loopback.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl2_l2type.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl2_dmac.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl2_smac.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl2_dt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl2_sa.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl2_nvt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl2_ovp.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl2_ovd.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl2_ovv.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl2_ovt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl2_ivp.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl2_ivd.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl2_ivv.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl2_ivt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl2_etype.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl3_l3type.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl3_sip3.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl3_sip2.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl3_sip1.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl3_sip0.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl3_dip3.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl3_dip2.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl3_dip1.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl3_dip0.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl3_ttl.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl3_prot.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl3_fid.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl3_qos.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl3_ieh_nonext.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl3_ieh_esp.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl3_ieh_auth.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl3_ieh_dest.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl3_ieh_frag.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl3_ieh_rthdr.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl3_ieh_hop.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl3_ieh_1frag.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl3_df.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl3_l3err.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl4_l4type.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl4_src.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl4_dst.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl4_flags.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl4_seq.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl4_pa.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl4_opt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl4_tcpts.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl4_err.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otuntype.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otflags.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otids.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otid.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otctxts.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otctxt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otqos.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "oterr.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_l2type.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_dmac.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_smac.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_dt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_sa.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_nvt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_ovp.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_ovd.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_ovv.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_ovt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_ivp.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_ivd.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_ivv.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_ivt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_etype.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_l3type.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_sip3.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_sip2.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_sip1.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_sip0.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_dip3.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_dip2.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_dip1.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_dip0.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_ttl.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_prot.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_fid.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_qos.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_ieh_nonext.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_ieh_esp.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_ieh_auth.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_ieh_dest.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_ieh_frag.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_ieh_rthdr.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_ieh_hop.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_ieh_1frag.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_df.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_l3err.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_l4type.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_src.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_dst.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_flags.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_seq.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_pa.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_opt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_tcpts.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_err.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tuntype.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tflags.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tids.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tid.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tctxts.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tctxt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tqos.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "terr.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_l2type.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_dmac.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_smac.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_dt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_sa.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_nvt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_ovp.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_ovd.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_ovv.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_ovt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_ivp.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_ivd.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_ivv.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_ivt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_etype.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_l3type.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_sip3.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_sip2.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_sip1.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_sip0.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_dip3.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_dip2.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_dip1.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_dip0.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_ttl.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_prot.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_fid.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_qos.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_ieh_nonext.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_ieh_esp.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_ieh_auth.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_ieh_dest.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_ieh_frag.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_ieh_rthdr.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_ieh_hop.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_ieh_1frag.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_df.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_l3err.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_l4type.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_src.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_dst.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_flags.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_seq.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_ack.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_win.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_pa.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_opt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_tcpts.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_tsval.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_txecr.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_err.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "padding", + .field_bit_size = 85, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + /* class_tid: 2, , table: profile_tcam.gen_template */ + { + .description = "wc_scope", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_PORT_TABLE, + .field_opr1 = { + (BNXT_ULP_CF_IDX_DEV_PORT_ID >> 8) & 0xff, + BNXT_ULP_CF_IDX_DEV_PORT_ID & 0xff, + (BNXT_ULP_PORT_TABLE_TABLE_SCOPE >> 8) & 0xff, + BNXT_ULP_PORT_TABLE_TABLE_SCOPE & 0xff} + }, + { + .description = "wc_key_id", + .field_bit_size = 7, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_WC_KEY_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_WC_KEY_ID_0 & 0xff} + }, + { + .description = "wc_profile_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_WC_PROFILE_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_WC_PROFILE_ID_0 & 0xff} + }, + { + .description = "wc_search_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + { + .description = "em_scope", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_PORT_TABLE, + .field_opr1 = { + (BNXT_ULP_CF_IDX_DEV_PORT_ID >> 8) & 0xff, + BNXT_ULP_CF_IDX_DEV_PORT_ID & 0xff, + (BNXT_ULP_PORT_TABLE_TABLE_SCOPE >> 8) & 0xff, + BNXT_ULP_PORT_TABLE_TABLE_SCOPE & 0xff} + }, + { + .description = "em_key_id", + .field_bit_size = 7, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_EM_KEY_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_EM_KEY_ID_0 & 0xff} + }, + { + .description = "em_profile_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_EM_PROFILE_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_EM_PROFILE_ID_0 & 0xff} + }, + { + .description = "em_search_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + { + .description = "pl_byp_lkup_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "padding", + .field_bit_size = 21, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + /* class_tid: 2, , table: proto_header_cache.wr */ + { + .description = "rid", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_RID >> 8) & 0xff, + BNXT_ULP_RF_IDX_RID & 0xff} + }, + { + .description = "profile_tcam_index", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_PROFILE_TCAM_INDEX_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_PROFILE_TCAM_INDEX_0 & 0xff} + }, + { + .description = "em_profile_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_EM_PROFILE_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_EM_PROFILE_ID_0 & 0xff} + }, + { + .description = "em_key_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_EM_KEY_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_EM_KEY_ID_0 & 0xff} + }, + { + .description = "wc_profile_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_WC_PROFILE_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_WC_PROFILE_ID_0 & 0xff} + }, + { + .description = "wc_key_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_WC_KEY_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_WC_KEY_ID_0 & 0xff} + }, + { + .description = "em_recipe_id", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_EM_RECIPE_ID >> 8) & 0xff, + BNXT_ULP_RF_IDX_EM_RECIPE_ID & 0xff} + }, + { + .description = "wc_recipe_id", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_WC_RECIPE_ID >> 8) & 0xff, + BNXT_ULP_RF_IDX_WC_RECIPE_ID & 0xff} + }, + /* class_tid: 2, , table: fkb_select.em_gen_template */ + { + .description = "l2_cntxt_id.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2640 >> 8) & 0xff, + 2640 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2ip_func.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "parif.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "spif.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "svif.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "lcos.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "meta_hi.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2641 >> 8) & 0xff, + 2641 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "meta_lo.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2642 >> 8) & 0xff, + 2642 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "rcyc_cnt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2643 >> 8) & 0xff, + 2643 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "loopback.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl2_l2type.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl2_dmac.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl2_smac.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl2_dt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl2_sa.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl2_nvt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl2_ovp.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl2_ovd.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl2_ovv.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl2_ovt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl2_ivp.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl2_ivd.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl2_ivv.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl2_ivt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl2_etype.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl3_l3type.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl3_sip3.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl3_sip2.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl3_sip1.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl3_sip0.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl3_dip3.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl3_dip2.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl3_dip1.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl3_dip0.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl3_ttl.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl3_prot.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl3_fid.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl3_qos.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl3_ieh_nonext.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl3_ieh_esp.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl3_ieh_auth.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl3_ieh_dest.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl3_ieh_frag.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl3_ieh_rthdr.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl3_ieh_hop.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl3_ieh_1frag.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl3_df.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl3_l3err.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl4_l4type.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl4_src.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl4_dst.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl4_flags.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl4_seq.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl4_pa.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl4_opt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl4_tcpts.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otl4_err.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otuntype.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otflags.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otids.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otid.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otctxts.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otctxt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "otqos.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "oterr.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_l2type.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_dmac.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2644 >> 8) & 0xff, + 2644 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_smac.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2648 >> 8) & 0xff, + 2648 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_dt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_sa.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_nvt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_ovp.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_ovd.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_ovv.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2651 >> 8) & 0xff, + 2651 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_ovt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_ivp.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_ivd.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_ivv.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2655 >> 8) & 0xff, + 2655 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (523 >> 8) & 0xff, + 523 & 0xff} + }, + { + .description = "tl2_ivt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_etype.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2663 >> 8) & 0xff, + 2663 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_l3type.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_sip3.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2667 >> 8) & 0xff, + 2667 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_sip2.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2670 >> 8) & 0xff, + 2670 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_sip1.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2673 >> 8) & 0xff, + 2673 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_sip0.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2676 >> 8) & 0xff, + 2676 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (524 >> 8) & 0xff, + 524 & 0xff} + }, + { + .description = "tl3_dip3.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2682 >> 8) & 0xff, + 2682 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_dip2.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2685 >> 8) & 0xff, + 2685 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_dip1.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2688 >> 8) & 0xff, + 2688 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_dip0.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2691 >> 8) & 0xff, + 2691 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (525 >> 8) & 0xff, + 525 & 0xff} + }, + { + .description = "tl3_ttl.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2697 >> 8) & 0xff, + 2697 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (526 >> 8) & 0xff, + 526 & 0xff} + }, + { + .description = "tl3_prot.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2703 >> 8) & 0xff, + 2703 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (527 >> 8) & 0xff, + 527 & 0xff} + }, + { + .description = "tl3_fid.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_qos.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2709 >> 8) & 0xff, + 2709 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (528 >> 8) & 0xff, + 528 & 0xff} + }, + { + .description = "tl3_ieh_nonext.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_ieh_esp.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_ieh_auth.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_ieh_dest.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_ieh_frag.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_ieh_rthdr.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_ieh_hop.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_ieh_1frag.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_df.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_l3err.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_l4type.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_src.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2715 >> 8) & 0xff, + 2715 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (529 >> 8) & 0xff, + 529 & 0xff} + }, + { + .description = "tl4_dst.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2721 >> 8) & 0xff, + 2721 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (530 >> 8) & 0xff, + 530 & 0xff} + }, + { + .description = "tl4_flags.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_seq.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_pa.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_opt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_tcpts.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_err.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tuntype.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tflags.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tids.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2727 >> 8) & 0xff, + 2727 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (531 >> 8) & 0xff, + 531 & 0xff} + }, + { + .description = "tid.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tctxts.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tctxt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tqos.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "terr.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_l2type.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_dmac.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2731 >> 8) & 0xff, + 2731 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (532 >> 8) & 0xff, + 532 & 0xff} + }, + { + .description = "l2_smac.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2739 >> 8) & 0xff, + 2739 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (533 >> 8) & 0xff, + 533 & 0xff} + }, + { + .description = "l2_dt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_sa.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_nvt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_ovp.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_ovd.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_ovv.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2745 >> 8) & 0xff, + 2745 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (534 >> 8) & 0xff, + 534 & 0xff} + }, + { + .description = "l2_ovt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_ivp.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_ivd.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_ivv.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2753 >> 8) & 0xff, + 2753 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (535 >> 8) & 0xff, + 535 & 0xff} + }, + { + .description = "l2_ivt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_etype.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2769 >> 8) & 0xff, + 2769 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (538 >> 8) & 0xff, + 538 & 0xff} + }, + { + .description = "l3_l3type.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_sip3.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2777 >> 8) & 0xff, + 2777 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (539 >> 8) & 0xff, + 539 & 0xff} + }, + { + .description = "l3_sip2.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2783 >> 8) & 0xff, + 2783 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (540 >> 8) & 0xff, + 540 & 0xff} + }, + { + .description = "l3_sip1.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2789 >> 8) & 0xff, + 2789 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (541 >> 8) & 0xff, + 541 & 0xff} + }, + { + .description = "l3_sip0.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2795 >> 8) & 0xff, + 2795 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (542 >> 8) & 0xff, + 542 & 0xff} + }, + { + .description = "l3_dip3.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2807 >> 8) & 0xff, + 2807 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (545 >> 8) & 0xff, + 545 & 0xff} + }, + { + .description = "l3_dip2.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2813 >> 8) & 0xff, + 2813 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (546 >> 8) & 0xff, + 546 & 0xff} + }, + { + .description = "l3_dip1.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2819 >> 8) & 0xff, + 2819 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (547 >> 8) & 0xff, + 547 & 0xff} + }, + { + .description = "l3_dip0.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2825 >> 8) & 0xff, + 2825 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (548 >> 8) & 0xff, + 548 & 0xff} + }, + { + .description = "l3_ttl.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2837 >> 8) & 0xff, + 2837 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (551 >> 8) & 0xff, + 551 & 0xff} + }, + { + .description = "l3_prot.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2849 >> 8) & 0xff, + 2849 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (554 >> 8) & 0xff, + 554 & 0xff} + }, + { + .description = "l3_fid.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_qos.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2861 >> 8) & 0xff, + 2861 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (557 >> 8) & 0xff, + 557 & 0xff} + }, + { + .description = "l3_ieh_nonext.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_ieh_esp.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_ieh_auth.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_ieh_dest.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_ieh_frag.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_ieh_rthdr.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_ieh_hop.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_ieh_1frag.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_df.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_l3err.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_l4type.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_src.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2873 >> 8) & 0xff, + 2873 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (560 >> 8) & 0xff, + 560 & 0xff} + }, + { + .description = "l4_dst.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2885 >> 8) & 0xff, + 2885 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (563 >> 8) & 0xff, + 563 & 0xff} + }, + { + .description = "l4_flags.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_seq.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_ack.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_win.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_pa.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_opt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_tcpts.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_tsval.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_txecr.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_err.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "padding", + .field_bit_size = 85, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + /* class_tid: 2, , table: em_flow_conflict_cache.wr */ + { + .description = "rid", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_RID >> 8) & 0xff, + BNXT_ULP_RF_IDX_RID & 0xff} + }, + { + .description = "flow_sig_id", + .field_bit_size = 64, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_FLOW_SIG_ID >> 8) & 0xff, + BNXT_ULP_CF_IDX_FLOW_SIG_ID & 0xff} + }, + /* class_tid: 2, , table: em_normal.egress_generic_template */ + { + .description = "valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + { + .description = "rec_size", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "epoch0", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_FUNCTION_ID >> 8) & 0xff, + BNXT_ULP_CF_IDX_FUNCTION_ID & 0xff} + }, + { + .description = "epoch1", + .field_bit_size = 6, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "opcode", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "strength", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 3} + }, + { + .description = "act_hint", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "act_rec_ptr", + .field_bit_size = 26, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_MAIN_ACTION_PTR >> 8) & 0xff, + BNXT_ULP_RF_IDX_MAIN_ACTION_PTR & 0xff} + }, + { + .description = "ring_table_idx", + .field_bit_size = 9, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "act_rec_size", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "paths_m1", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "fc_op", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "fc_type", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "fc_ptr", + .field_bit_size = 28, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "pad2", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "range_profile", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "range_index", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + /* class_tid: 2, , table: wm_normal.egress_generic_template */ + { + .description = "fc_ptr", + .field_bit_size = 28, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "fc_type", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "fc_op", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "paths_m1", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "act_rec_size", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "ring_table_idx", + .field_bit_size = 9, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "act_rec_ptr", + .field_bit_size = 26, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_MAIN_ACTION_PTR >> 8) & 0xff, + BNXT_ULP_RF_IDX_MAIN_ACTION_PTR & 0xff} + }, + { + .description = "act_hint", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + { + .description = "strength", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + { + .description = "opcode", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "epoch1", + .field_bit_size = 6, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "epoch0", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_FUNCTION_ID >> 8) & 0xff, + BNXT_ULP_CF_IDX_FUNCTION_ID & 0xff} + }, + { + .description = "rec_size", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + { + .description = "pad1", + .field_bit_size = 24, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + /* class_tid: 3, , table: metadata_record.act_rx_wr */ + { + .description = "meta_mask", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + (0xffffffff >> 24) & 0xff, + (0xffffffff >> 16) & 0xff, + (0xffffffff >> 8) & 0xff, + 0xffffffff & 0xff} + }, + /* class_tid: 3, , table: metadata_record.prof_rx_wr */ + { + .description = "meta_mask", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + (0xffffffff >> 24) & 0xff, + (0xffffffff >> 16) & 0xff, + (0xffffffff >> 8) & 0xff, + 0xffffffff & 0xff} + }, + /* class_tid: 3, , table: metadata_record.lkup_rx_wr */ + { + .description = "meta_mask", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + (0xffffffff >> 24) & 0xff, + (0xffffffff >> 16) & 0xff, + (0xffffffff >> 8) & 0xff, + 0xffffffff & 0xff} + }, + /* class_tid: 3, , table: metadata_record.act_tx_wr */ + { + .description = "meta_mask", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + (0xffffffff >> 24) & 0xff, + (0xffffffff >> 16) & 0xff, + (0xffffffff >> 8) & 0xff, + 0xffffffff & 0xff} + }, + /* class_tid: 3, , table: metadata_record.prof_tx_wr */ + { + .description = "meta_mask", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + (0xffffffff >> 24) & 0xff, + (0xffffffff >> 16) & 0xff, + (0xffffffff >> 8) & 0xff, + 0xffffffff & 0xff} + }, + /* class_tid: 3, , table: metadata_record.lkup_tx_wr */ + { + .description = "meta_mask", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + (0xffffffff >> 24) & 0xff, + (0xffffffff >> 16) & 0xff, + (0xffffffff >> 8) & 0xff, + 0xffffffff & 0xff} + }, + /* class_tid: 3, , table: cmm_full_act_record.ing_default_0 */ + { + .description = "type", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + { + .description = "drop", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + { + .description = "vlan_del_rpt", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "vnic_or_vport", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "dest_op", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "decap_func", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "mirror", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "meter_ptr", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "stat0_ptr", + .field_bit_size = 28, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "stat0_ing_egr", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "stat0_ctr_type", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "stat1_ptr", + .field_bit_size = 28, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "stat1_ing_egr", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "stat1_ctr_type", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "mod_rec_ptr", + .field_bit_size = 28, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "encap_ptr", + .field_bit_size = 28, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "src_ptr", + .field_bit_size = 28, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "rsvd0", + .field_bit_size = 7, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + /* class_tid: 3, , table: cmm_full_act_record.ing_default_1 */ + { + .description = "type", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + { + .description = "drop", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "vlan_del_rpt", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "vnic_or_vport", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_DRV_FUNC_VNIC >> 8) & 0xff, + BNXT_ULP_CF_IDX_DRV_FUNC_VNIC & 0xff} + }, + { + .description = "dest_op", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "decap_func", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "mirror", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "meter_ptr", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "stat0_ptr", + .field_bit_size = 28, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "stat0_ing_egr", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "stat0_ctr_type", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "stat1_ptr", + .field_bit_size = 28, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "stat1_ing_egr", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "stat1_ctr_type", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "mod_rec_ptr", + .field_bit_size = 28, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "encap_ptr", + .field_bit_size = 28, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "src_ptr", + .field_bit_size = 28, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "rsvd0", + .field_bit_size = 7, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + /* class_tid: 3, , table: profile_tcam_bypass.ing_catch_all */ + { + .description = "act_rec_ptr", + .field_bit_size = 26, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_DEFAULT_AREC_PTR >> 8) & 0xff, + BNXT_ULP_RF_IDX_DEFAULT_AREC_PTR & 0xff} + }, + { + .description = "act_scope", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_PORT_TABLE, + .field_opr1 = { + (BNXT_ULP_CF_IDX_DEV_PORT_ID >> 8) & 0xff, + BNXT_ULP_CF_IDX_DEV_PORT_ID & 0xff, + (BNXT_ULP_PORT_TABLE_TABLE_SCOPE >> 8) & 0xff, + BNXT_ULP_PORT_TABLE_TABLE_SCOPE & 0xff} + }, + { + .description = "act_hint", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "bypass_op", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + { + .description = "reserved", + .field_bit_size = 6, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "pl_byp_lkup_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + { + .description = "padding", + .field_bit_size = 22, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + /* class_tid: 3, , table: table_scope_cache.tsid_ing_wr */ + { + .description = "rid", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_RID >> 8) & 0xff, + BNXT_ULP_RF_IDX_RID & 0xff} + }, + { + .description = "default_arec_ptr", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_DEFAULT_AREC_PTR >> 8) & 0xff, + BNXT_ULP_RF_IDX_DEFAULT_AREC_PTR & 0xff} + }, + { + .description = "prof_func", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_PROF_FUNC_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_PROF_FUNC_ID_0 & 0xff} + }, + { + .description = "parif", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "uplink_vnic", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + /* class_tid: 3, , table: port_table.ing_wr */ + { + .description = "rid", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "drv_func.mac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "drv_func.parent.mac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "phy_port", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "port_is_pf", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "default_arec_ptr", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_DEFAULT_AREC_PTR >> 8) & 0xff, + BNXT_ULP_RF_IDX_DEFAULT_AREC_PTR & 0xff} + }, + { + .description = "default_arec_ptr.roce", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + /* class_tid: 3, , table: l2_cntxt_tcam.svif_ing */ + { + .description = "l2ip_dest_data", + .field_bit_size = 17, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2ip_dest_enb", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2ip_rfs_data", + .field_bit_size = 9, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2ip_rfs_enb", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2ip_act_rec_ptr", + .field_bit_size = 26, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_DEFAULT_AREC_PTR >> 8) & 0xff, + BNXT_ULP_RF_IDX_DEFAULT_AREC_PTR & 0xff} + }, + { + .description = "l2ip_act_scope", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_PORT_TABLE, + .field_opr1 = { + (BNXT_ULP_CF_IDX_DEV_PORT_ID >> 8) & 0xff, + BNXT_ULP_CF_IDX_DEV_PORT_ID & 0xff, + (BNXT_ULP_PORT_TABLE_TABLE_SCOPE >> 8) & 0xff, + BNXT_ULP_PORT_TABLE_TABLE_SCOPE & 0xff} + }, + { + .description = "l2ip_act_hint", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2ip_act_enb", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + { + .description = "l2ip_meta", + .field_bit_size = 35, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2ip_meta_enb", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "ctxt_opcode", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + ULP_THOR2_SYM_CTXT_OPCODE_NORMAL_FLOW} + }, + { + .description = "prof_func_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_PROF_FUNC_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_PROF_FUNC_ID_0 & 0xff} + }, + { + .description = "prsv_prof_func_id", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_cntxt_id", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_L2_CNTXT_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_L2_CNTXT_ID_0 & 0xff} + }, + { + .description = "prsv_l2ip_cntxt_id", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "parif", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "prsv_parif", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + /* class_tid: 3, , table: l2_cntxt_tcam_cache.ing_wr */ + { + .description = "rid", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_RID >> 8) & 0xff, + BNXT_ULP_RF_IDX_RID & 0xff} + }, + { + .description = "l2_cntxt_tcam_index", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_L2_CNTXT_TCAM_INDEX_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_L2_CNTXT_TCAM_INDEX_0 & 0xff} + }, + { + .description = "l2_cntxt_id", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_L2_CNTXT_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_L2_CNTXT_ID_0 & 0xff} + }, + { + .description = "src_property_ptr", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "prof_func_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_PROF_FUNC_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_PROF_FUNC_ID_0 & 0xff} + }, + /* class_tid: 3, , table: cmm_full_act_record.throw_away_egr */ + { + .description = "type", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + { + .description = "drop", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "vlan_del_rpt", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "vnic_or_vport", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_PHY_PORT_VPORT >> 8) & 0xff, + BNXT_ULP_CF_IDX_PHY_PORT_VPORT & 0xff} + }, + { + .description = "dest_op", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "decap_func", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "mirror", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "meter_ptr", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "stat0_ptr", + .field_bit_size = 28, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "stat0_ing_egr", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "stat0_ctr_type", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "stat1_ptr", + .field_bit_size = 28, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "stat1_ing_egr", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "stat1_ctr_type", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "mod_rec_ptr", + .field_bit_size = 28, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "encap_ptr", + .field_bit_size = 28, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "src_ptr", + .field_bit_size = 28, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "rsvd0", + .field_bit_size = 7, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + /* class_tid: 3, , table: cmm_full_act_record.egr_default_0 */ + { + .description = "type", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + { + .description = "drop", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "vlan_del_rpt", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "vnic_or_vport", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_PHY_PORT_VPORT >> 8) & 0xff, + BNXT_ULP_CF_IDX_PHY_PORT_VPORT & 0xff} + }, + { + .description = "dest_op", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "decap_func", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "mirror", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "meter_ptr", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "stat0_ptr", + .field_bit_size = 28, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "stat0_ing_egr", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "stat0_ctr_type", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "stat1_ptr", + .field_bit_size = 28, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "stat1_ing_egr", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "stat1_ctr_type", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "mod_rec_ptr", + .field_bit_size = 28, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "encap_ptr", + .field_bit_size = 28, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "src_ptr", + .field_bit_size = 28, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "rsvd0", + .field_bit_size = 7, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + /* class_tid: 3, , table: port_table.egr_wr_0 */ + { + .description = "rid", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "drv_func.mac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "drv_func.parent.mac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "phy_port", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "port_is_pf", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "default_arec_ptr", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_MAIN_ACTION_PTR >> 8) & 0xff, + BNXT_ULP_RF_IDX_MAIN_ACTION_PTR & 0xff} + }, + { + .description = "default_arec_ptr.roce", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + /* class_tid: 3, , table: ilt_tbl.egr */ + { + .description = "ilt_destination", + .field_bit_size = 17, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "act_rec_ptr", + .field_bit_size = 26, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_MAIN_ACTION_PTR >> 8) & 0xff, + BNXT_ULP_RF_IDX_MAIN_ACTION_PTR & 0xff} + }, + { + .description = "table_scope", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_PORT_TABLE, + .field_opr1 = { + (BNXT_ULP_CF_IDX_DEV_PORT_ID >> 8) & 0xff, + BNXT_ULP_CF_IDX_DEV_PORT_ID & 0xff, + (BNXT_ULP_PORT_TABLE_TABLE_SCOPE >> 8) & 0xff, + BNXT_ULP_PORT_TABLE_TABLE_SCOPE & 0xff} + }, + { + .description = "act_hint", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "fwd_op", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + ULP_THOR2_SYM_FWD_OP_NORMAL_FLOW} + }, + { + .description = "en_ilt_dest", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "en_bd_action", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + { + .description = "en_bd_meta", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2ip_func", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "parif", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_DRV_FUNC_PARIF >> 8) & 0xff, + BNXT_ULP_CF_IDX_DRV_FUNC_PARIF & 0xff} + }, + { + .description = "ilt_meta", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "ilt_meta_prof", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "en_ilt_meta", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "reserved", + .field_bit_size = 23, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + /* class_tid: 3, , table: l2_cntxt_tcam.non_vfr_svif_egr */ + { + .description = "l2ip_dest_data", + .field_bit_size = 17, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2ip_dest_enb", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2ip_rfs_data", + .field_bit_size = 9, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2ip_rfs_enb", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2ip_act_rec_ptr", + .field_bit_size = 26, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_MAIN_ACTION_PTR >> 8) & 0xff, + BNXT_ULP_RF_IDX_MAIN_ACTION_PTR & 0xff} + }, + { + .description = "l2ip_act_scope", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_PORT_TABLE, + .field_opr1 = { + (BNXT_ULP_CF_IDX_DEV_PORT_ID >> 8) & 0xff, + BNXT_ULP_CF_IDX_DEV_PORT_ID & 0xff, + (BNXT_ULP_PORT_TABLE_TABLE_SCOPE >> 8) & 0xff, + BNXT_ULP_PORT_TABLE_TABLE_SCOPE & 0xff} + }, + { + .description = "l2ip_act_hint", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2ip_act_enb", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + { + .description = "l2ip_meta", + .field_bit_size = 35, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2ip_meta_enb", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "ctxt_opcode", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + ULP_THOR2_SYM_CTXT_OPCODE_NORMAL_FLOW} + }, + { + .description = "prof_func_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_PROF_FUNC_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_PROF_FUNC_ID_0 & 0xff} + }, + { + .description = "prsv_prof_func_id", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_cntxt_id", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_L2_CNTXT_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_L2_CNTXT_ID_0 & 0xff} + }, + { + .description = "prsv_l2ip_cntxt_id", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "parif", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "prsv_parif", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + /* class_tid: 3, , table: profile_tcam_bypass.non_vfr_egr_catch_all */ + { + .description = "act_rec_ptr", + .field_bit_size = 26, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_MAIN_ACTION_PTR >> 8) & 0xff, + BNXT_ULP_RF_IDX_MAIN_ACTION_PTR & 0xff} + }, + { + .description = "act_scope", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_PORT_TABLE, + .field_opr1 = { + (BNXT_ULP_CF_IDX_DEV_PORT_ID >> 8) & 0xff, + BNXT_ULP_CF_IDX_DEV_PORT_ID & 0xff, + (BNXT_ULP_PORT_TABLE_TABLE_SCOPE >> 8) & 0xff, + BNXT_ULP_PORT_TABLE_TABLE_SCOPE & 0xff} + }, + { + .description = "act_hint", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "bypass_op", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + { + .description = "reserved", + .field_bit_size = 6, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "pl_byp_lkup_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + { + .description = "padding", + .field_bit_size = 22, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + /* class_tid: 3, , table: l2_cntxt_tcam_cache.non_vfr_egr_wr */ + { + .description = "rid", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_RID >> 8) & 0xff, + BNXT_ULP_RF_IDX_RID & 0xff} + }, + { + .description = "l2_cntxt_tcam_index", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_L2_CNTXT_TCAM_INDEX_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_L2_CNTXT_TCAM_INDEX_0 & 0xff} + }, + { + .description = "l2_cntxt_id", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_L2_CNTXT_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_L2_CNTXT_ID_0 & 0xff} + }, + { + .description = "src_property_ptr", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "prof_func_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_PROF_FUNC_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_PROF_FUNC_ID_0 & 0xff} + }, + /* class_tid: 3, , table: mod_record.svif2meta */ + { + .description = "metadata_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + { + .description = "rem_ovlan", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "rem_ivlan", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "rep_add_ivlan", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "rep_add_ovlan", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "ttl_update", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tun_md_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "reserved_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_dmac_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_smac_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_sip_ipv6_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_dip_ipv6_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_sip_ipv4_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_dip_ipv4_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_sport_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_dport_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "metadata_rsvd", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "metadata_op", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 3} + }, + { + .description = "metadata_prof", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_GLB_RF, + .field_opr1 = { + (BNXT_ULP_GLB_RF_IDX_GLB_METADATA_RX_ACT_0 >> 8) & 0xff, + BNXT_ULP_GLB_RF_IDX_GLB_METADATA_RX_ACT_0 & 0xff} + }, + { + .description = "metadata_data", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + /* class_tid: 3, , table: cmm_full_act_record.ing_vf2vf */ + { + .description = "type", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + { + .description = "drop", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "vlan_del_rpt", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "vnic_or_vport", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "dest_op", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 2} + }, + { + .description = "decap_func", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "mirror", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "meter_ptr", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "stat0_ptr", + .field_bit_size = 28, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "stat0_ing_egr", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "stat0_ctr_type", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "stat1_ptr", + .field_bit_size = 28, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "stat1_ing_egr", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "stat1_ctr_type", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "mod_rec_ptr", + .field_bit_size = 28, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_MODIFY_PTR >> 8) & 0xff, + BNXT_ULP_RF_IDX_MODIFY_PTR & 0xff} + }, + { + .description = "encap_ptr", + .field_bit_size = 28, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "src_ptr", + .field_bit_size = 28, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "rsvd0", + .field_bit_size = 7, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + /* class_tid: 3, , table: l2_cntxt_tcam.vf2vf_ing */ + { + .description = "l2ip_dest_data", + .field_bit_size = 17, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2ip_dest_enb", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2ip_rfs_data", + .field_bit_size = 9, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2ip_rfs_enb", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2ip_act_rec_ptr", + .field_bit_size = 26, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_DEFAULT_AREC_PTR >> 8) & 0xff, + BNXT_ULP_RF_IDX_DEFAULT_AREC_PTR & 0xff} + }, + { + .description = "l2ip_act_scope", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_PORT_TABLE, + .field_opr1 = { + (BNXT_ULP_CF_IDX_DEV_PORT_ID >> 8) & 0xff, + BNXT_ULP_CF_IDX_DEV_PORT_ID & 0xff, + (BNXT_ULP_PORT_TABLE_TABLE_SCOPE >> 8) & 0xff, + BNXT_ULP_PORT_TABLE_TABLE_SCOPE & 0xff} + }, + { + .description = "l2ip_act_hint", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2ip_act_enb", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + { + .description = "l2ip_meta", + .field_bit_size = 35, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2ip_meta_enb", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "ctxt_opcode", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + ULP_THOR2_SYM_CTXT_OPCODE_BYPASS_LKUP} + }, + { + .description = "prof_func_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "prsv_prof_func_id", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_cntxt_id", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "prsv_l2ip_cntxt_id", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "parif", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "prsv_parif", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + /* class_tid: 3, , table: table_scope_cache.tsid_vfr_wr */ + { + .description = "rid", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_RID >> 8) & 0xff, + BNXT_ULP_RF_IDX_RID & 0xff} + }, + { + .description = "default_arec_ptr", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_DEFAULT_AREC_PTR >> 8) & 0xff, + BNXT_ULP_RF_IDX_DEFAULT_AREC_PTR & 0xff} + }, + { + .description = "prof_func", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "parif", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "uplink_vnic", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + /* class_tid: 4, , table: mod_record.meta2uplink */ + { + .description = "metadata_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + { + .description = "rem_ovlan", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "rem_ivlan", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "rep_add_ivlan", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "rep_add_ovlan", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "ttl_update", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tun_md_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "reserved_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_dmac_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_smac_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_sip_ipv6_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_dip_ipv6_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_sip_ipv4_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_dip_ipv4_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_sport_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_dport_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "metadata_rsvd", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "metadata_op", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "metadata_prof", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_GLB_RF, + .field_opr1 = { + (BNXT_ULP_GLB_RF_IDX_GLB_METADATA_TX_ACT_0 >> 8) & 0xff, + BNXT_ULP_GLB_RF_IDX_GLB_METADATA_TX_ACT_0 & 0xff} + }, + { + .description = "metadata_data", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_RF_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_RF_0 & 0xff} + }, + /* class_tid: 4, , table: cmm_full_act_record.endpoint_def_act */ + { + .description = "type", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + { + .description = "drop", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "vlan_del_rpt", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "vnic_or_vport", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + (ULP_THOR2_SYM_LOOPBACK_PORT >> 8) & 0xff, + ULP_THOR2_SYM_LOOPBACK_PORT & 0xff} + }, + { + .description = "dest_op", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "decap_func", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "mirror", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "meter_ptr", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "stat0_ptr", + .field_bit_size = 28, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "stat0_ing_egr", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "stat0_ctr_type", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "stat1_ptr", + .field_bit_size = 28, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "stat1_ing_egr", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "stat1_ctr_type", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "mod_rec_ptr", + .field_bit_size = 28, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_MODIFY_PTR >> 8) & 0xff, + BNXT_ULP_RF_IDX_MODIFY_PTR & 0xff} + }, + { + .description = "encap_ptr", + .field_bit_size = 28, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "src_ptr", + .field_bit_size = 28, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "rsvd0", + .field_bit_size = 7, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + /* class_tid: 4, , table: profile_tcam_bypass.tsid_vfr_egr_catch_all */ + { + .description = "act_rec_ptr", + .field_bit_size = 26, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_DEFAULT_AREC_PTR >> 8) & 0xff, + BNXT_ULP_RF_IDX_DEFAULT_AREC_PTR & 0xff} + }, + { + .description = "act_scope", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_PORT_TABLE, + .field_opr1 = { + (BNXT_ULP_CF_IDX_DEV_PORT_ID >> 8) & 0xff, + BNXT_ULP_CF_IDX_DEV_PORT_ID & 0xff, + (BNXT_ULP_PORT_TABLE_TABLE_SCOPE >> 8) & 0xff, + BNXT_ULP_PORT_TABLE_TABLE_SCOPE & 0xff} + }, + { + .description = "act_hint", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "bypass_op", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + { + .description = "reserved", + .field_bit_size = 6, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "pl_byp_lkup_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + { + .description = "padding", + .field_bit_size = 22, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + /* class_tid: 4, , table: table_scope_cache.tsid_vfr_egr_wr */ + { + .description = "rid", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_RID >> 8) & 0xff, + BNXT_ULP_RF_IDX_RID & 0xff} + }, + { + .description = "default_arec_ptr", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_DEFAULT_AREC_PTR >> 8) & 0xff, + BNXT_ULP_RF_IDX_DEFAULT_AREC_PTR & 0xff} + }, + { + .description = "prof_func", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_PROF_FUNC_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_PROF_FUNC_ID_0 & 0xff} + }, + { + .description = "parif", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "uplink_vnic", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + /* class_tid: 4, , table: l2_cntxt_tcam.vf2vf_egr */ + { + .description = "l2ip_dest_data", + .field_bit_size = 17, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2ip_dest_enb", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2ip_rfs_data", + .field_bit_size = 9, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2ip_rfs_enb", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2ip_act_rec_ptr", + .field_bit_size = 26, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_DEFAULT_AREC_PTR >> 8) & 0xff, + BNXT_ULP_RF_IDX_DEFAULT_AREC_PTR & 0xff} + }, + { + .description = "l2ip_act_scope", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_PORT_TABLE, + .field_opr1 = { + (BNXT_ULP_CF_IDX_DEV_PORT_ID >> 8) & 0xff, + BNXT_ULP_CF_IDX_DEV_PORT_ID & 0xff, + (BNXT_ULP_PORT_TABLE_TABLE_SCOPE >> 8) & 0xff, + BNXT_ULP_PORT_TABLE_TABLE_SCOPE & 0xff} + }, + { + .description = "l2ip_act_hint", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2ip_act_enb", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + { + .description = "l2ip_meta", + .field_bit_size = 35, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2ip_meta_enb", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "ctxt_opcode", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + ULP_THOR2_SYM_CTXT_OPCODE_NORMAL_FLOW} + }, + { + .description = "prof_func_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_PROF_FUNC_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_PROF_FUNC_ID_0 & 0xff} + }, + { + .description = "prsv_prof_func_id", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_cntxt_id", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_L2_CNTXT_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_L2_CNTXT_ID_0 & 0xff} + }, + { + .description = "prsv_l2ip_cntxt_id", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "parif", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "prsv_parif", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + /* class_tid: 4, , table: l2_cntxt_tcam_cache.endpoint_def_egr_wr */ + { + .description = "rid", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_RID >> 8) & 0xff, + BNXT_ULP_RF_IDX_RID & 0xff} + }, + { + .description = "l2_cntxt_tcam_index", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_cntxt_id", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_L2_CNTXT_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_L2_CNTXT_ID_0 & 0xff} + }, + { + .description = "src_property_ptr", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "prof_func_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_PROF_FUNC_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_PROF_FUNC_ID_0 & 0xff} + }, + /* class_tid: 4, , table: port_table.egr_wr_0 */ + { + .description = "rid", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "drv_func.mac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "drv_func.parent.mac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "phy_port", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "port_is_pf", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "default_arec_ptr", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_DEFAULT_AREC_PTR >> 8) & 0xff, + BNXT_ULP_RF_IDX_DEFAULT_AREC_PTR & 0xff} + }, + { + .description = "default_arec_ptr.roce", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + /* class_tid: 4, , table: mod_record.vfr2vf */ + { + .description = "metadata_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + { + .description = "rem_ovlan", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "rem_ivlan", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "rep_add_ivlan", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "rep_add_ovlan", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "ttl_update", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tun_md_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "reserved_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_dmac_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_smac_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_sip_ipv6_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_dip_ipv6_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_sip_ipv4_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_dip_ipv4_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_sport_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_dport_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "metadata_rsvd", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "metadata_op", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "metadata_prof", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_GLB_RF, + .field_opr1 = { + (BNXT_ULP_GLB_RF_IDX_GLB_METADATA_TX_ACT_0 >> 8) & 0xff, + BNXT_ULP_GLB_RF_IDX_GLB_METADATA_TX_ACT_0 & 0xff} + }, + { + .description = "metadata_data", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_RF_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_RF_0 & 0xff} + }, + /* class_tid: 4, , table: cmm_full_act_record.vfr2vf_act */ + { + .description = "type", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + { + .description = "drop", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "vlan_del_rpt", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "vnic_or_vport", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + (ULP_THOR2_SYM_LOOPBACK_PORT >> 8) & 0xff, + ULP_THOR2_SYM_LOOPBACK_PORT & 0xff} + }, + { + .description = "dest_op", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "decap_func", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "mirror", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "meter_ptr", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "stat0_ptr", + .field_bit_size = 28, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "stat0_ing_egr", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "stat0_ctr_type", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "stat1_ptr", + .field_bit_size = 28, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "stat1_ing_egr", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "stat1_ctr_type", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "mod_rec_ptr", + .field_bit_size = 28, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_MODIFY_PTR >> 8) & 0xff, + BNXT_ULP_RF_IDX_MODIFY_PTR & 0xff} + }, + { + .description = "encap_ptr", + .field_bit_size = 28, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "src_ptr", + .field_bit_size = 28, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "rsvd0", + .field_bit_size = 7, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } +}; + +struct bnxt_ulp_mapper_ident_info ulp_thor2_class_ident_list[] = { + /* class_tid: 1, , table: port_table.get_def_rd */ + { + .description = "default_arec_ptr", + .regfile_idx = BNXT_ULP_RF_IDX_DEFAULT_AREC_PTR, + .ident_bit_size = 32, + .ident_bit_pos = 137 + }, + { + .description = "phy_port", + .regfile_idx = BNXT_ULP_RF_IDX_PHY_PORT, + .ident_bit_size = 8, + .ident_bit_pos = 128 + }, + /* class_tid: 1, , table: l2_cntxt_tcam_cache.def_rd */ + { + .description = "l2_cntxt_id", + .regfile_idx = BNXT_ULP_RF_IDX_L2_CNTXT_ID_0, + .ident_bit_size = 11, + .ident_bit_pos = 43 + }, + { + .description = "l2_cntxt_tcam_index", + .regfile_idx = BNXT_ULP_RF_IDX_L2_CNTXT_TCAM_INDEX_0, + .ident_bit_size = 11, + .ident_bit_pos = 32 + }, + { + .description = "prof_func_id", + .regfile_idx = BNXT_ULP_RF_IDX_PROF_FUNC_ID_0, + .ident_bit_size = 8, + .ident_bit_pos = 86 + }, + /* class_tid: 1, , table: tunnel_cache.f1_f2_rd */ + { + .description = "cmm_stat_hndl", + .regfile_idx = BNXT_ULP_RF_IDX_CMM_STAT_HNDL_F1, + .ident_bit_size = 64, + .ident_bit_pos = 118 + }, + { + .description = "l2_cntxt_id", + .regfile_idx = BNXT_ULP_RF_IDX_L2_CNTXT_ID_0, + .ident_bit_size = 11, + .ident_bit_pos = 43 + }, + { + .description = "stat_ptr", + .regfile_idx = BNXT_ULP_RF_IDX_FLOW_CNTR_PTR_F1, + .ident_bit_size = 64, + .ident_bit_pos = 54 + }, + /* class_tid: 1, , table: l2_cntxt_tcam.f1_f2_alloc_l2_cntxt */ + { + .description = "l2_cntxt_id", + .resource_func = BNXT_ULP_RESOURCE_FUNC_IDENTIFIER, + .ident_type = CFA_RSUBTYPE_IDENT_L2CTX, + .regfile_idx = BNXT_ULP_RF_IDX_L2_CNTXT_ID_0, + .ident_bit_size = 11, + .ident_bit_pos = 109 + }, + /* class_tid: 1, , table: mac_addr_cache.l2_table_rd */ + { + .description = "l2_cntxt_id", + .regfile_idx = BNXT_ULP_RF_IDX_L2_CNTXT_ID_0, + .ident_bit_size = 11, + .ident_bit_pos = 43 + }, + /* class_tid: 1, , table: l2_cntxt_tcam.allocate_l2_context */ + { + .description = "l2_cntxt_id", + .resource_func = BNXT_ULP_RESOURCE_FUNC_IDENTIFIER, + .ident_type = CFA_RSUBTYPE_IDENT_L2CTX, + .regfile_idx = BNXT_ULP_RF_IDX_L2_CNTXT_ID_0, + .ident_bit_size = 11, + .ident_bit_pos = 109 + }, + /* class_tid: 1, , table: proto_header_cache.rd */ + { + .description = "em_key_id", + .regfile_idx = BNXT_ULP_RF_IDX_EM_KEY_ID_0, + .ident_bit_size = 8, + .ident_bit_pos = 50 + }, + { + .description = "em_profile_id", + .regfile_idx = BNXT_ULP_RF_IDX_EM_PROFILE_ID_0, + .ident_bit_size = 8, + .ident_bit_pos = 42 + }, + { + .description = "em_recipe_id", + .regfile_idx = BNXT_ULP_RF_IDX_EM_RECIPE_ID, + .ident_bit_size = 16, + .ident_bit_pos = 74 + }, + { + .description = "profile_tcam_index", + .regfile_idx = BNXT_ULP_RF_IDX_PROFILE_TCAM_INDEX_0, + .ident_bit_size = 10, + .ident_bit_pos = 32 + }, + { + .description = "wc_key_id", + .regfile_idx = BNXT_ULP_RF_IDX_WC_KEY_ID_0, + .ident_bit_size = 8, + .ident_bit_pos = 66 + }, + { + .description = "wc_profile_id", + .regfile_idx = BNXT_ULP_RF_IDX_WC_PROFILE_ID_0, + .ident_bit_size = 8, + .ident_bit_pos = 58 + }, + { + .description = "wc_recipe_id", + .regfile_idx = BNXT_ULP_RF_IDX_WC_RECIPE_ID, + .ident_bit_size = 16, + .ident_bit_pos = 90 + }, + /* class_tid: 1, , table: hdr_overlap_cache.overlap_check */ + { + .description = "wc_key_id", + .regfile_idx = BNXT_ULP_RF_IDX_WC_KEY_ID_0, + .ident_bit_size = 8, + .ident_bit_pos = 40 + }, + { + .description = "wc_profile_id", + .regfile_idx = BNXT_ULP_RF_IDX_WC_PROFILE_ID_0, + .ident_bit_size = 8, + .ident_bit_pos = 32 + }, + /* class_tid: 1, , table: profile_tcam.allocate_wc_profile */ + { + .description = "wc_profile_id", + .resource_func = BNXT_ULP_RESOURCE_FUNC_IDENTIFIER, + .ident_type = CFA_RSUBTYPE_IDENT_WC_PROF, + .regfile_idx = BNXT_ULP_RF_IDX_WC_PROFILE_ID_0, + .ident_bit_size = 8, + .ident_bit_pos = 12 + }, + /* class_tid: 1, , table: profile_tcam.gen_template */ + { + .description = "em_profile_id", + .resource_func = BNXT_ULP_RESOURCE_FUNC_IDENTIFIER, + .ident_type = CFA_RSUBTYPE_IDENT_EM_PROF, + .regfile_idx = BNXT_ULP_RF_IDX_EM_PROFILE_ID_0, + .ident_bit_size = 8, + .ident_bit_pos = 33 + }, + /* class_tid: 1, , table: em_flow_conflict_cache.rd */ + { + .description = "flow_sig_id", + .regfile_idx = BNXT_ULP_RF_IDX_FLOW_SIG_ID, + .ident_bit_size = 64, + .ident_bit_pos = 32 + }, + /* class_tid: 2, , table: port_table.get_def_rd */ + { + .description = "default_arec_ptr", + .regfile_idx = BNXT_ULP_RF_IDX_DEFAULT_AREC_PTR, + .ident_bit_size = 32, + .ident_bit_pos = 137 + }, + /* class_tid: 2, , table: l2_cntxt_tcam_cache.def_rd */ + { + .description = "l2_cntxt_id", + .regfile_idx = BNXT_ULP_RF_IDX_L2_CNTXT_ID_0, + .ident_bit_size = 11, + .ident_bit_pos = 43 + }, + { + .description = "prof_func_id", + .regfile_idx = BNXT_ULP_RF_IDX_PROF_FUNC_ID_0, + .ident_bit_size = 8, + .ident_bit_pos = 86 + }, + /* class_tid: 2, , table: proto_header_cache.rd */ + { + .description = "em_key_id", + .regfile_idx = BNXT_ULP_RF_IDX_EM_KEY_ID_0, + .ident_bit_size = 8, + .ident_bit_pos = 50 + }, + { + .description = "em_profile_id", + .regfile_idx = BNXT_ULP_RF_IDX_EM_PROFILE_ID_0, + .ident_bit_size = 8, + .ident_bit_pos = 42 + }, + { + .description = "em_recipe_id", + .regfile_idx = BNXT_ULP_RF_IDX_EM_RECIPE_ID, + .ident_bit_size = 16, + .ident_bit_pos = 74 + }, + { + .description = "profile_tcam_index", + .regfile_idx = BNXT_ULP_RF_IDX_PROFILE_TCAM_INDEX_0, + .ident_bit_size = 10, + .ident_bit_pos = 32 + }, + { + .description = "wc_key_id", + .regfile_idx = BNXT_ULP_RF_IDX_WC_KEY_ID_0, + .ident_bit_size = 8, + .ident_bit_pos = 66 + }, + { + .description = "wc_profile_id", + .regfile_idx = BNXT_ULP_RF_IDX_WC_PROFILE_ID_0, + .ident_bit_size = 8, + .ident_bit_pos = 58 + }, + { + .description = "wc_recipe_id", + .regfile_idx = BNXT_ULP_RF_IDX_WC_RECIPE_ID, + .ident_bit_size = 16, + .ident_bit_pos = 90 + }, + /* class_tid: 2, , table: hdr_overlap_cache.overlap_check */ + { + .description = "wc_key_id", + .regfile_idx = BNXT_ULP_RF_IDX_WC_KEY_ID_0, + .ident_bit_size = 8, + .ident_bit_pos = 40 + }, + { + .description = "wc_profile_id", + .regfile_idx = BNXT_ULP_RF_IDX_WC_PROFILE_ID_0, + .ident_bit_size = 8, + .ident_bit_pos = 32 + }, + /* class_tid: 2, , table: profile_tcam.allocate_wc_profile */ + { + .description = "wc_profile_id", + .resource_func = BNXT_ULP_RESOURCE_FUNC_IDENTIFIER, + .ident_type = CFA_RSUBTYPE_IDENT_WC_PROF, + .regfile_idx = BNXT_ULP_RF_IDX_WC_PROFILE_ID_0, + .ident_bit_size = 8, + .ident_bit_pos = 12 + }, + /* class_tid: 2, , table: profile_tcam.gen_template */ + { + .description = "em_profile_id", + .resource_func = BNXT_ULP_RESOURCE_FUNC_IDENTIFIER, + .ident_type = CFA_RSUBTYPE_IDENT_EM_PROF, + .regfile_idx = BNXT_ULP_RF_IDX_EM_PROFILE_ID_0, + .ident_bit_size = 8, + .ident_bit_pos = 33 + }, + /* class_tid: 2, , table: em_flow_conflict_cache.rd */ + { + .description = "flow_sig_id", + .regfile_idx = BNXT_ULP_RF_IDX_FLOW_SIG_ID, + .ident_bit_size = 64, + .ident_bit_pos = 32 + }, + /* class_tid: 3, , table: table_scope_cache.tsid_ing_rd */ + { + .description = "default_arec_ptr", + .regfile_idx = BNXT_ULP_RF_IDX_DEFAULT_AREC_PTR, + .ident_bit_size = 32, + .ident_bit_pos = 32 + }, + { + .description = "prof_func", + .regfile_idx = BNXT_ULP_RF_IDX_PROF_FUNC_ID_0, + .ident_bit_size = 8, + .ident_bit_pos = 64 + }, + /* class_tid: 3, , table: profile_tcam_bypass.ing_catch_all */ + { + .description = "prof_func_id", + .resource_func = BNXT_ULP_RESOURCE_FUNC_IDENTIFIER, + .ident_type = CFA_RSUBTYPE_IDENT_PROF_FUNC, + .regfile_idx = BNXT_ULP_RF_IDX_PROF_FUNC_ID_0, + .ident_bit_size = 0, + .ident_bit_pos = 0 + }, + /* class_tid: 3, , table: l2_cntxt_tcam_cache.ing_rd */ + { + .description = "l2_cntxt_id", + .regfile_idx = BNXT_ULP_RF_IDX_L2_CNTXT_ID_0, + .ident_bit_size = 11, + .ident_bit_pos = 43 + }, + /* class_tid: 3, , table: l2_cntxt_tcam.svif_ing */ + { + .description = "l2_cntxt_id", + .resource_func = BNXT_ULP_RESOURCE_FUNC_IDENTIFIER, + .ident_type = CFA_RSUBTYPE_IDENT_L2CTX, + .regfile_idx = BNXT_ULP_RF_IDX_L2_CNTXT_ID_0, + .ident_bit_size = 11, + .ident_bit_pos = 109 + }, + /* class_tid: 3, , table: l2_cntxt_tcam.non_vfr_svif_egr */ + { + .description = "prof_func_id", + .resource_func = BNXT_ULP_RESOURCE_FUNC_IDENTIFIER, + .ident_type = CFA_RSUBTYPE_IDENT_PROF_FUNC, + .regfile_idx = BNXT_ULP_RF_IDX_PROF_FUNC_ID_0, + .ident_bit_size = 8, + .ident_bit_pos = 100 + }, + { + .description = "l2_cntxt_id", + .resource_func = BNXT_ULP_RESOURCE_FUNC_IDENTIFIER, + .ident_type = CFA_RSUBTYPE_IDENT_L2CTX, + .regfile_idx = BNXT_ULP_RF_IDX_L2_CNTXT_ID_0, + .ident_bit_size = 11, + .ident_bit_pos = 109 + }, + /* class_tid: 3, , table: table_scope_cache.tsid_vfr_rd */ + { + .description = "default_arec_ptr", + .regfile_idx = BNXT_ULP_RF_IDX_DEFAULT_AREC_PTR, + .ident_bit_size = 32, + .ident_bit_pos = 32 + }, + /* class_tid: 3, , table: l2_cntxt_tcam.vf2vf_ing */ + { + .description = "l2_cntxt_id", + .resource_func = BNXT_ULP_RESOURCE_FUNC_IDENTIFIER, + .ident_type = CFA_RSUBTYPE_IDENT_L2CTX, + .regfile_idx = BNXT_ULP_RF_IDX_L2_CNTXT_ID_0, + .ident_bit_size = 11, + .ident_bit_pos = 109 + }, + /* class_tid: 4, , table: table_scope_cache.tsid_vfr_egr_rd */ + { + .description = "default_arec_ptr", + .regfile_idx = BNXT_ULP_RF_IDX_DEFAULT_AREC_PTR, + .ident_bit_size = 32, + .ident_bit_pos = 32 + }, + { + .description = "parif", + .regfile_idx = BNXT_ULP_RF_IDX_RF_0, + .ident_bit_size = 5, + .ident_bit_pos = 72 + }, + { + .description = "prof_func", + .regfile_idx = BNXT_ULP_RF_IDX_PROF_FUNC_ID_0, + .ident_bit_size = 8, + .ident_bit_pos = 64 + }, + /* class_tid: 4, , table: profile_tcam_bypass.tsid_vfr_egr_catch_all */ + { + .description = "prof_func_id", + .resource_func = BNXT_ULP_RESOURCE_FUNC_IDENTIFIER, + .ident_type = CFA_RSUBTYPE_IDENT_PROF_FUNC, + .regfile_idx = BNXT_ULP_RF_IDX_PROF_FUNC_ID_0, + .ident_bit_size = 0, + .ident_bit_pos = 0 + }, + /* class_tid: 4, , table: l2_cntxt_tcam_cache.endpoint_def_egr_rd */ + { + .description = "l2_cntxt_id", + .regfile_idx = BNXT_ULP_RF_IDX_L2_CNTXT_ID_0, + .ident_bit_size = 11, + .ident_bit_pos = 43 + }, + /* class_tid: 4, , table: l2_cntxt_tcam.vf2vf_egr */ + { + .description = "l2_cntxt_id", + .resource_func = BNXT_ULP_RESOURCE_FUNC_IDENTIFIER, + .ident_type = CFA_RSUBTYPE_IDENT_L2CTX, + .regfile_idx = BNXT_ULP_RF_IDX_L2_CNTXT_ID_0, + .ident_bit_size = 11, + .ident_bit_pos = 109 + } +}; diff --git a/drivers/thirdparty/release-drivers/bnxt/tf_ulp/generic_templates/ulp_template_db_thor_act.c b/drivers/thirdparty/release-drivers/bnxt/tf_ulp/generic_templates/ulp_template_db_thor_act.c new file mode 100644 index 000000000000..adbd2af7c943 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/tf_ulp/generic_templates/ulp_template_db_thor_act.c @@ -0,0 +1,10166 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* Copyright(c) 2014-2024 Broadcom + * All rights reserved. + */ + +#include "ulp_template_db_enum.h" +#include "ulp_template_db_field.h" +#include "ulp_template_struct.h" +#include "ulp_template_db_tbl.h" + +/* Mapper templates for header act list */ +struct bnxt_ulp_mapper_tmpl_info ulp_thor_act_tmpl_list[] = { + /* act_tid: 1, ingress */ + [1] = { + .device_name = BNXT_ULP_DEVICE_ID_THOR, + .num_tbls = 23, + .start_tbl_idx = 0, + .reject_info = { + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_FALSE, + .cond_start_idx = 0, + .cond_nums = 0 } + }, + /* act_tid: 2, ingress */ + [2] = { + .device_name = BNXT_ULP_DEVICE_ID_THOR, + .num_tbls = 1, + .start_tbl_idx = 23, + .reject_info = { + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_FALSE, + .cond_start_idx = 21, + .cond_nums = 0 } + }, + /* act_tid: 3, ingress */ + [3] = { + .device_name = BNXT_ULP_DEVICE_ID_THOR, + .num_tbls = 10, + .start_tbl_idx = 24, + .reject_info = { + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_FALSE, + .cond_start_idx = 21, + .cond_nums = 0 } + }, + /* act_tid: 4, ingress */ + [4] = { + .device_name = BNXT_ULP_DEVICE_ID_THOR, + .num_tbls = 1, + .start_tbl_idx = 34, + .reject_info = { + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_FALSE, + .cond_start_idx = 25, + .cond_nums = 0 } + }, + /* act_tid: 5, ingress */ + [5] = { + .device_name = BNXT_ULP_DEVICE_ID_THOR, + .num_tbls = 1, + .start_tbl_idx = 35, + .reject_info = { + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_FALSE, + .cond_start_idx = 25, + .cond_nums = 0 } + }, + /* act_tid: 6, ingress */ + [6] = { + .device_name = BNXT_ULP_DEVICE_ID_THOR, + .num_tbls = 20, + .start_tbl_idx = 36, + .reject_info = { + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_FALSE, + .cond_start_idx = 25, + .cond_nums = 0 } + }, + /* act_tid: 7, egress */ + [7] = { + .device_name = BNXT_ULP_DEVICE_ID_THOR, + .num_tbls = 46, + .start_tbl_idx = 56, + .reject_info = { + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_FALSE, + .cond_start_idx = 40, + .cond_nums = 0 } + }, + /* act_tid: 8, egress */ + [8] = { + .device_name = BNXT_ULP_DEVICE_ID_THOR, + .num_tbls = 1, + .start_tbl_idx = 102, + .reject_info = { + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_FALSE, + .cond_start_idx = 88, + .cond_nums = 0 } + }, + /* act_tid: 9, egress */ + [9] = { + .device_name = BNXT_ULP_DEVICE_ID_THOR, + .num_tbls = 1, + .start_tbl_idx = 103, + .reject_info = { + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_FALSE, + .cond_start_idx = 88, + .cond_nums = 0 } + }, + /* act_tid: 10, egress */ + [10] = { + .device_name = BNXT_ULP_DEVICE_ID_THOR, + .num_tbls = 1, + .start_tbl_idx = 104, + .reject_info = { + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_FALSE, + .cond_start_idx = 88, + .cond_nums = 0 } + }, + /* act_tid: 11, egress */ + [11] = { + .device_name = BNXT_ULP_DEVICE_ID_THOR, + .num_tbls = 1, + .start_tbl_idx = 105, + .reject_info = { + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_FALSE, + .cond_start_idx = 88, + .cond_nums = 0 } + }, + /* act_tid: 12, egress */ + [12] = { + .device_name = BNXT_ULP_DEVICE_ID_THOR, + .num_tbls = 11, + .start_tbl_idx = 106, + .reject_info = { + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_FALSE, + .cond_start_idx = 88, + .cond_nums = 0 } + } +}; + +struct bnxt_ulp_mapper_tbl_info ulp_thor_act_tbl_list[] = { + { /* act_tid: 1, , table: flow_chain_cache.rd */ + .description = "flow_chain_cache.rd", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_FLOW_CHAIN_CACHE, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 5, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 0, + .cond_nums = 1 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_READ, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_HASH, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .key_start_idx = 0, + .blob_key_bit_size = 32, + .key_bit_size = 32, + .key_num_fields = 1, + .ident_start_idx = 0, + .ident_nums = 1 + }, + { /* act_tid: 1, , table: control.flow_chain */ + .description = "control.flow_chain", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 4, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 1, + .cond_nums = 1 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_ALLOC_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID + }, + { /* act_tid: 1, , table: jump_index_table.alloc */ + .description = "jump_index_table.alloc", + .resource_func = BNXT_ULP_RESOURCE_FUNC_ALLOCATOR_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_ALLOCATOR_TABLE_JUMP_INDEX, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 2, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_ALLOC_TBL_OPC_ALLOC, + .tbl_operand = BNXT_ULP_RF_IDX_JUMP_META_IDX, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID, + .result_start_idx = 0, + .result_bit_size = 0, + .result_num_fields = 0 + }, + { /* act_tid: 1, , table: control.metadata_cal */ + .description = "control.metadata_cal", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 2, + .cond_nums = 0 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP, + .func_info = { + .func_opc = BNXT_ULP_FUNC_OPC_BIT_OR, + .func_oper_size = 16, + .func_src1 = BNXT_ULP_FUNC_SRC_REGFILE, + .func_opr1 = BNXT_ULP_RF_IDX_JUMP_META_IDX, + .func_src2 = BNXT_ULP_FUNC_SRC_CONST, + .func_opr2 = ULP_THOR_SYM_CHAIN_META_VAL, + .func_dst_opr = BNXT_ULP_RF_IDX_JUMP_META } + }, + { /* act_tid: 1, , table: flow_chain_cache.write */ + .description = "flow_chain_cache.write", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_FLOW_CHAIN_CACHE, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 2, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_WRITE, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_HASH, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .key_start_idx = 1, + .blob_key_bit_size = 32, + .key_bit_size = 32, + .key_num_fields = 1, + .result_start_idx = 0, + .result_bit_size = 48, + .result_num_fields = 2 + }, + { /* act_tid: 1, , table: shared_meter_tbl_cache.rd */ + .description = "shared_meter_tbl_cache.rd", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_SHARED_METER_TBL_CACHE, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 2, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 2, + .cond_nums = 1 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_READ, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_HASH, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .key_start_idx = 2, + .blob_key_bit_size = 32, + .key_bit_size = 32, + .key_num_fields = 1, + .ident_start_idx = 1, + .ident_nums = 1 + }, + { /* act_tid: 1, , table: control.meter_chk */ + .description = "control.meter_chk", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_RX, + .true_message = "Reject due to unknown meter.", + .execute_info = { + .cond_true_goto = 1023, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 3, + .cond_nums = 1 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP + }, + { /* act_tid: 1, , table: shared_mirror_record.rd */ + .description = "shared_mirror_record.rd", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_type = TF_TBL_TYPE_MIRROR_CONFIG, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_SHARED_MIRROR, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 8, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 4, + .cond_nums = 1 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_READ, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_INDEX, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .key_start_idx = 3, + .blob_key_bit_size = 8, + .key_bit_size = 8, + .key_num_fields = 1, + .ident_start_idx = 2, + .ident_nums = 1 + }, + { /* act_tid: 1, , table: control.mirror */ + .description = "control.mirror", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_RX, + .true_message = "Reject due to non-existent handle", + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 7, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 5, + .cond_nums = 1 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP + }, + { /* act_tid: 1, , table: control.create */ + .description = "control.create", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 6, + .cond_nums = 0 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_ALLOC_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID + }, + { /* act_tid: 1, , table: mirror_tbl.alloc */ + .description = "mirror_tbl.alloc", + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_MIRROR_CONFIG, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_NORMAL, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 6, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_ALLOC_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_MIRROR_PTR_0, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID, + .mark_db_opcode = BNXT_ULP_MARK_DB_OPC_NOP, + .track_type = CFA_TRACK_TYPE_SID, + .result_start_idx = 2, + .result_bit_size = 32, + .result_num_fields = 5 + }, + { /* act_tid: 1, , table: int_flow_counter_tbl.mirr */ + .description = "int_flow_counter_tbl.mirr", + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_ACT_STATS_64, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_INT_COUNT, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 6, + .cond_nums = 1 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_ALLOC_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_FLOW_CNTR_PTR_1, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID, + .mark_db_opcode = BNXT_ULP_MARK_DB_OPC_NOP, + .track_type = CFA_TRACK_TYPE_SID, + .result_start_idx = 7, + .result_bit_size = 64, + .result_num_fields = 1 + }, + { /* act_tid: 1, , table: int_compact_act_record.mirr */ + .description = "int_compact_act_record.mirr", + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_COMPACT_ACT_RECORD, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_NORMAL, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 7, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_ALLOC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_MIRR_ACTION_PTR, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID, + .mark_db_opcode = BNXT_ULP_MARK_DB_OPC_NOP, + .track_type = CFA_TRACK_TYPE_SID, + .result_start_idx = 8, + .result_bit_size = 64, + .result_num_fields = 13 + }, + { /* act_tid: 1, , table: mirror_tbl.wr */ + .description = "mirror_tbl.wr", + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_MIRROR_CONFIG, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_NORMAL, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 7, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_MIRROR_PTR_0, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP, + .mark_db_opcode = BNXT_ULP_MARK_DB_OPC_NOP, + .track_type = CFA_TRACK_TYPE_SID, + .result_start_idx = 21, + .result_bit_size = 32, + .result_num_fields = 5 + }, + { /* act_tid: 1, , table: shared_mirror_record.wr */ + .description = "shared_mirror_record.wr", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_type = TF_TBL_TYPE_MIRROR_CONFIG, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_SHARED_MIRROR, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 7, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_WRITE, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_INDEX, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .key_start_idx = 4, + .blob_key_bit_size = 8, + .key_bit_size = 8, + .key_num_fields = 1, + .result_start_idx = 26, + .result_bit_size = 36, + .result_num_fields = 2 + }, + { /* act_tid: 1, , table: control.do_mod */ + .description = "control.do_mod", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 3, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 7, + .cond_nums = 1 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP, + .func_info = { + .func_opc = BNXT_ULP_FUNC_OPC_BIT_AND, + .func_src1 = BNXT_ULP_FUNC_SRC_ACTION_BITMAP, + .func_opr1 = BNXT_ULP_FUNC_SRC_ACTION_BITMAP, + .func_src2 = BNXT_ULP_FUNC_SRC_CONST, + .func_opr2 = BNXT_ULP_ACT_BIT_DEC_TTL | + BNXT_ULP_ACT_BIT_SET_MAC_SRC | + BNXT_ULP_ACT_BIT_SET_MAC_DST | + BNXT_ULP_ACT_BIT_SET_IPV4_SRC | + BNXT_ULP_ACT_BIT_SET_IPV4_DST | + BNXT_ULP_ACT_BIT_SET_IPV6_SRC | + BNXT_ULP_ACT_BIT_SET_IPV6_DST | + BNXT_ULP_ACT_BIT_SET_TP_SRC | + BNXT_ULP_ACT_BIT_SET_TP_DST | + BNXT_ULP_ACT_BIT_GOTO_CHAIN, + .func_dst_opr = BNXT_ULP_RF_IDX_RF_0 } + }, + { /* act_tid: 1, , table: mod_record.ing_ttl */ + .description = "mod_record.ing_ttl", + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_ACT_MODIFY_64B, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_NORMAL, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 2, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 8, + .cond_nums = 1 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_ALLOC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_MODIFY_PTR, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .track_type = CFA_TRACK_TYPE_SID, + .result_start_idx = 28, + .result_bit_size = 0, + .result_num_fields = 0, + .encap_num_fields = 35 + }, + { /* act_tid: 1, , table: mod_record.ing_no_ttl */ + .description = "mod_record.ing_no_ttl", + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_ACT_MODIFY_64B, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_NORMAL, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 9, + .cond_nums = 1 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_ALLOC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_MODIFY_PTR, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .track_type = CFA_TRACK_TYPE_SID, + .result_start_idx = 63, + .result_bit_size = 0, + .result_num_fields = 0, + .encap_num_fields = 28 + }, + { /* act_tid: 1, , table: control.queue_and_rss_test */ + .description = "control.queue_and_rss_test", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_RX, + .true_message = "Reject due to both queue and rss set", + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 10, + .cond_nums = 2 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP + }, + { /* act_tid: 1, , table: vnic_interface_rss_config.0 */ + .description = "vnic_interface_rss_config.0", + .resource_func = BNXT_ULP_RESOURCE_FUNC_VNIC_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_VNIC_TABLE_RSS, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 12, + .cond_nums = 1 }, + .tbl_opcode = BNXT_ULP_VNIC_TBL_OPC_ALLOC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_RSS_VNIC, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .result_start_idx = 91, + .result_bit_size = 0, + .result_num_fields = 0 + }, + { /* act_tid: 1, , table: vnic_interface_queue_config.0 */ + .description = "vnic_interface_queue_config.0", + .resource_func = BNXT_ULP_RESOURCE_FUNC_VNIC_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_VNIC_TABLE_QUEUE, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 13, + .cond_nums = 1 }, + .tbl_opcode = BNXT_ULP_VNIC_TBL_OPC_ALLOC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_RSS_VNIC, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .result_start_idx = 91, + .result_bit_size = 0, + .result_num_fields = 0 + }, + { /* act_tid: 1, , table: int_flow_counter_tbl.0 */ + .description = "int_flow_counter_tbl.0", + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_ACT_STATS_64, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_INT_COUNT, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 14, + .cond_nums = 1 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_ALLOC_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_FLOW_CNTR_PTR_0, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .track_type = CFA_TRACK_TYPE_SID, + .result_start_idx = 91, + .result_bit_size = 64, + .result_num_fields = 1 + }, + { /* act_tid: 1, , table: int_full_act_record.0 */ + .description = "int_full_act_record.0", + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_FULL_ACT_RECORD, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_NORMAL, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 0, + .cond_false_goto = 0, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 15, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_ALLOC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_MAIN_ACTION_PTR, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .mark_db_opcode = BNXT_ULP_MARK_DB_OPC_NOP, + .track_type = CFA_TRACK_TYPE_SID, + .result_start_idx = 92, + .result_bit_size = 128, + .result_num_fields = 17, + .encap_num_fields = 0 + }, + { /* act_tid: 2, , table: control.reject */ + .description = "control.reject", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_RX, + .true_message = "Thor not supporting non-generic template", + .execute_info = { + .cond_true_goto = 1023, + .cond_false_goto = 0, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 21, + .cond_nums = 0 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP + }, + { /* act_tid: 3, , table: control.delete_chk */ + .description = "control.delete_chk", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 4, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 21, + .cond_nums = 1 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP + }, + { /* act_tid: 3, , table: shared_mirror_record.del_chk */ + .description = "shared_mirror_record.del_chk", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_type = TF_TBL_TYPE_MIRROR_CONFIG, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_SHARED_MIRROR, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 22, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_READ, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_INDEX, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP, + .ref_cnt_opcode = BNXT_ULP_REF_CNT_OPC_NOP, + .key_start_idx = 5, + .blob_key_bit_size = 8, + .key_bit_size = 8, + .key_num_fields = 1, + .ident_start_idx = 3, + .ident_nums = 1 + }, + { /* act_tid: 3, , table: control.mirror_del_exist_chk */ + .description = "control.mirror_del_exist_chk", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 0, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 22, + .cond_nums = 1 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP + }, + { /* act_tid: 3, , table: control.mirror_ref_cnt_chk */ + .description = "control.mirror_ref_cnt_chk", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 0, + .cond_false_goto = 1023, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 23, + .cond_nums = 1 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_DELETE_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID, + .func_info = { + .func_opc = BNXT_ULP_FUNC_OPC_EQ, + .func_src1 = BNXT_ULP_FUNC_SRC_REGFILE, + .func_opr1 = BNXT_ULP_RF_IDX_REF_CNT, + .func_src2 = BNXT_ULP_FUNC_SRC_CONST, + .func_opr2 = 1, + .func_dst_opr = BNXT_ULP_RF_IDX_CC } + }, + { /* act_tid: 3, , table: control.create */ + .description = "control.create", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 24, + .cond_nums = 0 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_ALLOC_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID + }, + { /* act_tid: 3, , table: mirror_tbl.alloc */ + .description = "mirror_tbl.alloc", + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_MIRROR_CONFIG, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_NORMAL, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 24, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_ALLOC_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_MIRROR_PTR_0, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID, + .mark_db_opcode = BNXT_ULP_MARK_DB_OPC_NOP, + .track_type = CFA_TRACK_TYPE_SID, + .result_start_idx = 109, + .result_bit_size = 32, + .result_num_fields = 5 + }, + { /* act_tid: 3, , table: int_flow_counter_tbl.0 */ + .description = "int_flow_counter_tbl.0", + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_ACT_STATS_64, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_INT_COUNT, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 24, + .cond_nums = 1 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_ALLOC_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_FLOW_CNTR_PTR_0, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID, + .mark_db_opcode = BNXT_ULP_MARK_DB_OPC_NOP, + .track_type = CFA_TRACK_TYPE_SID, + .result_start_idx = 114, + .result_bit_size = 64, + .result_num_fields = 1 + }, + { /* act_tid: 3, , table: int_compact_act_record.0 */ + .description = "int_compact_act_record.0", + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_COMPACT_ACT_RECORD, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_NORMAL, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 25, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_ALLOC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_MAIN_ACTION_PTR, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID, + .mark_db_opcode = BNXT_ULP_MARK_DB_OPC_NOP, + .track_type = CFA_TRACK_TYPE_SID, + .result_start_idx = 115, + .result_bit_size = 64, + .result_num_fields = 13, + .encap_num_fields = 0 + }, + { /* act_tid: 3, , table: mirror_tbl.wr */ + .description = "mirror_tbl.wr", + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_MIRROR_CONFIG, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_NORMAL, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 25, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_MIRROR_PTR_0, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP, + .mark_db_opcode = BNXT_ULP_MARK_DB_OPC_NOP, + .track_type = CFA_TRACK_TYPE_SID, + .result_start_idx = 128, + .result_bit_size = 32, + .result_num_fields = 5 + }, + { /* act_tid: 3, , table: shared_mirror_record.wr */ + .description = "shared_mirror_record.wr", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_type = TF_TBL_TYPE_MIRROR_CONFIG, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_SHARED_MIRROR, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 0, + .cond_false_goto = 0, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 25, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_WRITE, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_INDEX, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID, + .ref_cnt_opcode = BNXT_ULP_REF_CNT_OPC_INC, + .key_start_idx = 6, + .blob_key_bit_size = 8, + .key_bit_size = 8, + .key_num_fields = 1, + .result_start_idx = 133, + .result_bit_size = 36, + .result_num_fields = 2 + }, + { /* act_tid: 4, , table: control.reject */ + .description = "control.reject", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_RX, + .true_message = "Thor not supporting non-generic template", + .execute_info = { + .cond_true_goto = 1023, + .cond_false_goto = 0, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 25, + .cond_nums = 0 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP + }, + { /* act_tid: 5, , table: control.reject */ + .description = "control.reject", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_RX, + .true_message = "Thor not supporting non-generic template", + .execute_info = { + .cond_true_goto = 1023, + .cond_false_goto = 0, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 25, + .cond_nums = 0 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP + }, + { /* act_tid: 6, , table: control.create_check */ + .description = "control.create_check", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 11, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 25, + .cond_nums = 2 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP + }, + { /* act_tid: 6, , table: meter_profile_tbl_cache.rd */ + .description = "meter_profile_tbl_cache.rd", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_METER_PROFILE_TBL_CACHE, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 4, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 27, + .cond_nums = 1 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_READ, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_HASH, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP, + .ref_cnt_opcode = BNXT_ULP_REF_CNT_OPC_NOP, + .key_start_idx = 7, + .blob_key_bit_size = 32, + .key_bit_size = 32, + .key_num_fields = 1, + .ident_start_idx = 4, + .ident_nums = 0 + }, + { /* act_tid: 6, , table: control.shared_meter_profile_0 */ + .description = "control.shared_meter_profile_0", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1023, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 28, + .cond_nums = 1 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_ALLOC_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID + }, + { /* act_tid: 6, , table: meter_profile_tbl.0 */ + .description = "meter_profile_tbl.0", + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_METER_PROF, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_NORMAL, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 29, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_ALLOC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_METER_PROFILE_PTR_0, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID, + .track_type = CFA_TRACK_TYPE_SID, + .result_start_idx = 135, + .result_bit_size = 65, + .result_num_fields = 11 + }, + { /* act_tid: 6, , table: meter_profile_tbl_cache.wr */ + .description = "meter_profile_tbl_cache.wr", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_type = TF_TBL_TYPE_METER_PROF, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_METER_PROFILE_TBL_CACHE, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 0, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 29, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_WRITE, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_HASH, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID, + .key_start_idx = 8, + .blob_key_bit_size = 32, + .key_bit_size = 32, + .key_num_fields = 1, + .result_start_idx = 146, + .result_bit_size = 42, + .result_num_fields = 2 + }, + { /* act_tid: 6, , table: shared_meter_tbl_cache.rd */ + .description = "shared_meter_tbl_cache.rd", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_SHARED_METER_TBL_CACHE, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1023, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 29, + .cond_nums = 1 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_READ, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_HASH, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP, + .ref_cnt_opcode = BNXT_ULP_REF_CNT_OPC_NOP, + .key_start_idx = 9, + .blob_key_bit_size = 32, + .key_bit_size = 32, + .key_num_fields = 1, + .ident_start_idx = 4, + .ident_nums = 0 + }, + { /* act_tid: 6, , table: control.meter_created_chk */ + .description = "control.meter_created_chk", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1023, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 30, + .cond_nums = 1 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_ALLOC_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID + }, + { /* act_tid: 6, , table: meter_profile_tbl_cache.rd2 */ + .description = "meter_profile_tbl_cache.rd2", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_METER_PROFILE_TBL_CACHE, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 31, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_READ, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_HASH, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID, + .key_start_idx = 10, + .blob_key_bit_size = 32, + .key_bit_size = 32, + .key_num_fields = 1, + .ident_start_idx = 4, + .ident_nums = 1 + }, + { /* act_tid: 6, , table: control.shared_meter_profile_chk */ + .description = "control.shared_meter_profile_chk", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1023, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 31, + .cond_nums = 1 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP + }, + { /* act_tid: 6, , table: meter_tbl.0 */ + .description = "meter_tbl.0", + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_METER_INST, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_NORMAL, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 32, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_ALLOC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_METER_PTR_0, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID, + .track_type = CFA_TRACK_TYPE_SID, + .result_start_idx = 148, + .result_bit_size = 64, + .result_num_fields = 5 + }, + { /* act_tid: 6, , table: shared_meter_tbl_cache.wr */ + .description = "shared_meter_tbl_cache.wr", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_SHARED_METER_TBL_CACHE, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 0, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 32, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_WRITE, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_HASH, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID, + .key_start_idx = 11, + .blob_key_bit_size = 32, + .key_bit_size = 32, + .key_num_fields = 1, + .result_start_idx = 153, + .result_bit_size = 74, + .result_num_fields = 3 + }, + { /* act_tid: 6, , table: control.delete_check */ + .description = "control.delete_check", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 5, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 32, + .cond_nums = 1 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP + }, + { /* act_tid: 6, , table: meter_profile_tbl_cache.del_chk */ + .description = "meter_profile_tbl_cache.del_chk", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_METER_PROFILE_TBL_CACHE, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 2, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 33, + .cond_nums = 1 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_READ, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_HASH, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP, + .ref_cnt_opcode = BNXT_ULP_REF_CNT_OPC_NOP, + .key_start_idx = 12, + .blob_key_bit_size = 32, + .key_bit_size = 32, + .key_num_fields = 1, + .ident_start_idx = 5, + .ident_nums = 1 + }, + { /* act_tid: 6, , table: control.mtr_prof_ref_cnt_chk */ + .description = "control.mtr_prof_ref_cnt_chk", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 0, + .cond_false_goto = 1023, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 34, + .cond_nums = 1 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_DELETE_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID, + .func_info = { + .func_opc = BNXT_ULP_FUNC_OPC_EQ, + .func_src1 = BNXT_ULP_FUNC_SRC_REGFILE, + .func_opr1 = BNXT_ULP_RF_IDX_REF_CNT, + .func_src2 = BNXT_ULP_FUNC_SRC_CONST, + .func_opr2 = 1, + .func_dst_opr = BNXT_ULP_RF_IDX_CC } + }, + { /* act_tid: 6, , table: shared_meter_tbl_cache.del_chk */ + .description = "shared_meter_tbl_cache.del_chk", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_SHARED_METER_TBL_CACHE, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1023, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 35, + .cond_nums = 1 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_READ, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_HASH, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP, + .ref_cnt_opcode = BNXT_ULP_REF_CNT_OPC_NOP, + .key_start_idx = 13, + .blob_key_bit_size = 32, + .key_bit_size = 32, + .key_num_fields = 1, + .ident_start_idx = 6, + .ident_nums = 1 + }, + { /* act_tid: 6, , table: control.shared_mtr_ref_cnt_chk */ + .description = "control.shared_mtr_ref_cnt_chk", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 0, + .cond_false_goto = 1023, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 36, + .cond_nums = 1 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_DELETE_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID, + .func_info = { + .func_opc = BNXT_ULP_FUNC_OPC_EQ, + .func_src1 = BNXT_ULP_FUNC_SRC_REGFILE, + .func_opr1 = BNXT_ULP_RF_IDX_REF_CNT, + .func_src2 = BNXT_ULP_FUNC_SRC_CONST, + .func_opr2 = 1, + .func_dst_opr = BNXT_ULP_RF_IDX_CC } + }, + { /* act_tid: 6, , table: control.update_check */ + .description = "control.update_check", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1023, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 37, + .cond_nums = 0 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP + }, + { /* act_tid: 6, , table: shared_meter_tbl_cache.rd_update */ + .description = "shared_meter_tbl_cache.rd_update", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_SHARED_METER_TBL_CACHE, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1023, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 37, + .cond_nums = 1 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_READ, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_HASH, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP, + .ref_cnt_opcode = BNXT_ULP_REF_CNT_OPC_NOP, + .key_start_idx = 14, + .blob_key_bit_size = 32, + .key_bit_size = 32, + .key_num_fields = 1, + .ident_start_idx = 7, + .ident_nums = 1 + }, + { /* act_tid: 6, , table: meter_tbl.update_rd */ + .description = "meter_tbl.update_rd", + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_METER_INST, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_NORMAL, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1023, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 38, + .cond_nums = 2 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_RD_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_METER_PTR_0, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP, + .track_type = CFA_TRACK_TYPE_SID, + .ident_start_idx = 8, + .ident_nums = 3, + .result_bit_size = 64 + }, + { /* act_tid: 6, , table: meter_tbl.update_wr */ + .description = "meter_tbl.update_wr", + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_METER_INST, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_NORMAL, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 0, + .cond_false_goto = 0, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 40, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_METER_PTR_0, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP, + .track_type = CFA_TRACK_TYPE_SID, + .result_start_idx = 156, + .result_bit_size = 64, + .result_num_fields = 5 + }, + { /* act_tid: 7, , table: flow_chain_cache.rd */ + .description = "flow_chain_cache.rd", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_FLOW_CHAIN_CACHE, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 6, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 40, + .cond_nums = 1 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_READ, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_HASH, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .key_start_idx = 15, + .blob_key_bit_size = 32, + .key_bit_size = 32, + .key_num_fields = 1, + .ident_start_idx = 11, + .ident_nums = 1 + }, + { /* act_tid: 7, , table: control.no_jump_vf_to_vf */ + .description = "control.no_jump_vf_to_vf", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_TX, + .true_message = "Reject: unsupported vf_to_vf + jump.", + .execute_info = { + .cond_true_goto = 1023, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 41, + .cond_nums = 1 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP + }, + { /* act_tid: 7, , table: control.flow_chain */ + .description = "control.flow_chain", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 4, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 42, + .cond_nums = 1 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_ALLOC_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID + }, + { /* act_tid: 7, , table: jump_index_table.alloc */ + .description = "jump_index_table.alloc", + .resource_func = BNXT_ULP_RESOURCE_FUNC_ALLOCATOR_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_ALLOCATOR_TABLE_JUMP_INDEX, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 43, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_ALLOC_TBL_OPC_ALLOC, + .tbl_operand = BNXT_ULP_RF_IDX_JUMP_META_IDX, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID, + .result_start_idx = 161, + .result_bit_size = 0, + .result_num_fields = 0 + }, + { /* act_tid: 7, , table: control.metadata_cal */ + .description = "control.metadata_cal", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 43, + .cond_nums = 0 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP, + .func_info = { + .func_opc = BNXT_ULP_FUNC_OPC_BIT_OR, + .func_oper_size = 16, + .func_src1 = BNXT_ULP_FUNC_SRC_REGFILE, + .func_opr1 = BNXT_ULP_RF_IDX_JUMP_META_IDX, + .func_src2 = BNXT_ULP_FUNC_SRC_CONST, + .func_opr2 = ULP_THOR_SYM_CHAIN_META_VAL, + .func_dst_opr = BNXT_ULP_RF_IDX_JUMP_META } + }, + { /* act_tid: 7, , table: flow_chain_cache.write */ + .description = "flow_chain_cache.write", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_FLOW_CHAIN_CACHE, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 43, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_WRITE, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_HASH, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .key_start_idx = 16, + .blob_key_bit_size = 32, + .key_bit_size = 32, + .key_num_fields = 1, + .result_start_idx = 161, + .result_bit_size = 48, + .result_num_fields = 2 + }, + { /* act_tid: 7, , table: int_flow_counter_tbl.0 */ + .description = "int_flow_counter_tbl.0", + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_ACT_STATS_64, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_INT_COUNT, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 43, + .cond_nums = 1 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_ALLOC_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_FLOW_CNTR_PTR_0, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .track_type = CFA_TRACK_TYPE_SID, + .result_start_idx = 163, + .result_bit_size = 64, + .result_num_fields = 1 + }, + { /* act_tid: 7, , table: shared_mirror_record.rd */ + .description = "shared_mirror_record.rd", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_type = TF_TBL_TYPE_MIRROR_CONFIG, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_SHARED_MIRROR, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 9, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 44, + .cond_nums = 1 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_READ, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_INDEX, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .key_start_idx = 17, + .blob_key_bit_size = 8, + .key_bit_size = 8, + .key_num_fields = 1, + .ident_start_idx = 12, + .ident_nums = 1 + }, + { /* act_tid: 7, , table: control.mirror */ + .description = "control.mirror", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_TX, + .true_message = "Reject due to non-existent shared handle", + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 8, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 45, + .cond_nums = 1 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP + }, + { /* act_tid: 7, , table: control.create */ + .description = "control.create", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 46, + .cond_nums = 0 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_ALLOC_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID + }, + { /* act_tid: 7, , table: mirror_tbl.alloc */ + .description = "mirror_tbl.alloc", + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_MIRROR_CONFIG, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_NORMAL, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 46, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_ALLOC_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_MIRROR_PTR_0, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID, + .mark_db_opcode = BNXT_ULP_MARK_DB_OPC_NOP, + .track_type = CFA_TRACK_TYPE_SID, + .result_start_idx = 164, + .result_bit_size = 32, + .result_num_fields = 5 + }, + { /* act_tid: 7, , table: mod_record.vf_2_vf */ + .description = "mod_record.vf_2_vf", + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_ACT_MODIFY_64B, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_NORMAL, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 46, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_ALLOC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_MODIFY_PTR, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID, + .track_type = CFA_TRACK_TYPE_SID, + .result_start_idx = 169, + .result_bit_size = 0, + .result_num_fields = 0, + .encap_num_fields = 20 + }, + { /* act_tid: 7, , table: int_flow_counter_tbl.mirr */ + .description = "int_flow_counter_tbl.mirr", + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_ACT_STATS_64, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_INT_COUNT, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 46, + .cond_nums = 1 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_ALLOC_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_FLOW_CNTR_PTR_1, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID, + .mark_db_opcode = BNXT_ULP_MARK_DB_OPC_NOP, + .track_type = CFA_TRACK_TYPE_SID, + .result_start_idx = 189, + .result_bit_size = 64, + .result_num_fields = 1 + }, + { /* act_tid: 7, , table: int_full_act_record.mirr */ + .description = "int_full_act_record.mirr", + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_FULL_ACT_RECORD, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_NORMAL, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 47, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_ALLOC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_META_ACTION_PTR, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID, + .mark_db_opcode = BNXT_ULP_MARK_DB_OPC_NOP, + .track_type = CFA_TRACK_TYPE_SID, + .result_start_idx = 190, + .result_bit_size = 128, + .result_num_fields = 17 + }, + { /* act_tid: 7, , table: mirror_tbl.wr */ + .description = "mirror_tbl.wr", + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_MIRROR_CONFIG, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_NORMAL, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 47, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_MIRROR_PTR_0, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP, + .mark_db_opcode = BNXT_ULP_MARK_DB_OPC_NOP, + .track_type = CFA_TRACK_TYPE_SID, + .result_start_idx = 207, + .result_bit_size = 32, + .result_num_fields = 5 + }, + { /* act_tid: 7, , table: shared_mirror_record.wr */ + .description = "shared_mirror_record.wr", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_type = TF_TBL_TYPE_MIRROR_CONFIG, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_SHARED_MIRROR, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 47, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_WRITE, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_INDEX, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .key_start_idx = 18, + .blob_key_bit_size = 8, + .key_bit_size = 8, + .key_num_fields = 1, + .result_start_idx = 212, + .result_bit_size = 36, + .result_num_fields = 2 + }, + { /* act_tid: 7, , table: control.do_mod */ + .description = "control.do_mod", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 3, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 47, + .cond_nums = 1 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP, + .func_info = { + .func_opc = BNXT_ULP_FUNC_OPC_BIT_AND, + .func_src1 = BNXT_ULP_FUNC_SRC_ACTION_BITMAP, + .func_opr1 = BNXT_ULP_FUNC_SRC_ACTION_BITMAP, + .func_src2 = BNXT_ULP_FUNC_SRC_CONST, + .func_opr2 = BNXT_ULP_ACT_BIT_DEC_TTL | + BNXT_ULP_ACT_BIT_SET_MAC_SRC | + BNXT_ULP_ACT_BIT_SET_MAC_DST | + BNXT_ULP_ACT_BIT_SET_IPV4_SRC | + BNXT_ULP_ACT_BIT_SET_IPV4_DST | + BNXT_ULP_ACT_BIT_SET_IPV6_SRC | + BNXT_ULP_ACT_BIT_SET_IPV6_DST | + BNXT_ULP_ACT_BIT_SET_TP_SRC | + BNXT_ULP_ACT_BIT_SET_TP_DST | + BNXT_ULP_ACT_BIT_GOTO_CHAIN | + BNXT_ULP_ACT_BIT_VF_TO_VF, + .func_dst_opr = BNXT_ULP_RF_IDX_RF_0 } + }, + { /* act_tid: 7, , table: mod_record.ing_ttl */ + .description = "mod_record.ing_ttl", + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_ACT_MODIFY_64B, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_NORMAL, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 2, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 48, + .cond_nums = 1 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_ALLOC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_MODIFY_PTR, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .track_type = CFA_TRACK_TYPE_SID, + .result_start_idx = 214, + .result_bit_size = 0, + .result_num_fields = 0, + .encap_num_fields = 35 + }, + { /* act_tid: 7, , table: mod_record.ing_no_ttl */ + .description = "mod_record.ing_no_ttl", + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_ACT_MODIFY_64B, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_NORMAL, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 59, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_ALLOC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_MODIFY_PTR, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .track_type = CFA_TRACK_TYPE_SID, + .result_start_idx = 249, + .result_bit_size = 0, + .result_num_fields = 0, + .encap_num_fields = 28 + }, + { /* act_tid: 7, , table: control.do_tunnel_check */ + .description = "control.do_tunnel_check", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 24, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_OR, + .cond_start_idx = 69, + .cond_nums = 2 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP + }, + { /* act_tid: 7, , table: control.do_tunnel_vlan_exclusion */ + .description = "control.do_tunnel_vlan_exclusion", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_TX, + .true_message = "Tunnel Encap + Push VLAN unsupported.", + .execute_info = { + .cond_true_goto = 1023, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_OR, + .cond_start_idx = 71, + .cond_nums = 1 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP + }, + { /* act_tid: 7, , table: source_property_cache.rd */ + .description = "source_property_cache.rd", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_SOURCE_PROPERTY_CACHE, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 4, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 72, + .cond_nums = 1 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_READ, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_HASH, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .key_start_idx = 19, + .blob_key_bit_size = 85, + .key_bit_size = 85, + .key_num_fields = 3, + .ident_start_idx = 13, + .ident_nums = 1 + }, + { /* act_tid: 7, , table: control.sp_rec_v4 */ + .description = "control.sp_rec_v4", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 7, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 73, + .cond_nums = 1 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_ALLOC_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID + }, + { /* act_tid: 7, , table: sp_smac_ipv4.0 */ + .description = "sp_smac_ipv4.0", + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_ACT_SP_SMAC_IPV4, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_NORMAL, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 74, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_ALLOC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_MAIN_SP_PTR, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID, + .track_type = CFA_TRACK_TYPE_SID, + .record_size = 16, + .result_start_idx = 277, + .result_bit_size = 0, + .result_num_fields = 0, + .encap_num_fields = 3 + }, + { /* act_tid: 7, , table: source_property_cache.wr */ + .description = "source_property_cache.wr", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_SOURCE_PROPERTY_CACHE, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 5, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 74, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_WRITE, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_HASH, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .key_start_idx = 22, + .blob_key_bit_size = 85, + .key_bit_size = 85, + .key_num_fields = 3, + .result_start_idx = 280, + .result_bit_size = 48, + .result_num_fields = 2 + }, + { /* act_tid: 7, , table: source_property_ipv6_cache.rd */ + .description = "source_property_ipv6_cache.rd", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_SOURCE_PROPERTY_IPV6_CACHE, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1023, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 74, + .cond_nums = 1 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_READ, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_HASH, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .key_start_idx = 25, + .blob_key_bit_size = 176, + .key_bit_size = 176, + .key_num_fields = 2, + .ident_start_idx = 14, + .ident_nums = 1 + }, + { /* act_tid: 7, , table: control.sp_rec_v6 */ + .description = "control.sp_rec_v6", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 3, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 75, + .cond_nums = 1 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_ALLOC_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID + }, + { /* act_tid: 7, , table: sp_smac_ipv6.0 */ + .description = "sp_smac_ipv6.0", + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_ACT_SP_SMAC_IPV6, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_NORMAL, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 76, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_ALLOC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_MAIN_SP_PTR, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .track_type = CFA_TRACK_TYPE_SID, + .record_size = 32, + .result_start_idx = 282, + .result_bit_size = 0, + .result_num_fields = 0, + .encap_num_fields = 3 + }, + { /* act_tid: 7, , table: source_property_ipv6_cache.wr */ + .description = "source_property_ipv6_cache.wr", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_SOURCE_PROPERTY_IPV6_CACHE, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 76, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_WRITE, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_HASH, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .key_start_idx = 27, + .blob_key_bit_size = 176, + .key_bit_size = 176, + .key_num_fields = 2, + .result_start_idx = 285, + .result_bit_size = 48, + .result_num_fields = 2 + }, + { /* act_tid: 7, , table: control.do_vxlan_check */ + .description = "control.do_vxlan_check", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 9, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_OR, + .cond_start_idx = 76, + .cond_nums = 1 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP + }, + { /* act_tid: 7, , table: vxlan_encap_rec_cache.rd */ + .description = "vxlan_encap_rec_cache.rd", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_VXLAN_ENCAP_REC_CACHE, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 4, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 77, + .cond_nums = 1 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_READ, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_HASH, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .key_start_idx = 29, + .blob_key_bit_size = 141, + .key_bit_size = 141, + .key_num_fields = 6, + .ident_start_idx = 15, + .ident_nums = 1 + }, + { /* act_tid: 7, , table: control.vxlan_v4_encap */ + .description = "control.vxlan_v4_encap", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 13, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 78, + .cond_nums = 1 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_ALLOC_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID + }, + { /* act_tid: 7, , table: int_tun_encap_record.ipv4_vxlan */ + .description = "int_tun_encap_record.ipv4_vxlan", + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_ACT_ENCAP_64B, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_NORMAL, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 79, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_ALLOC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_ENCAP_PTR_0, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID, + .track_type = CFA_TRACK_TYPE_SID, + .result_start_idx = 287, + .result_bit_size = 0, + .result_num_fields = 0, + .encap_num_fields = 25 + }, + { /* act_tid: 7, , table: vxlan_encap_rec_cache.wr */ + .description = "vxlan_encap_rec_cache.wr", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_VXLAN_ENCAP_REC_CACHE, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 11, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 79, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_WRITE, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_HASH, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .key_start_idx = 35, + .blob_key_bit_size = 141, + .key_bit_size = 141, + .key_num_fields = 6, + .result_start_idx = 312, + .result_bit_size = 48, + .result_num_fields = 2 + }, + { /* act_tid: 7, , table: vxlan_encap_ipv6_rec_cache.rd */ + .description = "vxlan_encap_ipv6_rec_cache.rd", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_VXLAN_ENCAP_IPV6_REC_CACHE, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 79, + .cond_nums = 1 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_READ, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_HASH, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .key_start_idx = 41, + .blob_key_bit_size = 237, + .key_bit_size = 237, + .key_num_fields = 6, + .ident_start_idx = 16, + .ident_nums = 1 + }, + { /* act_tid: 7, , table: control.vxlan_v6_encap */ + .description = "control.vxlan_v6_encap", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 9, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 80, + .cond_nums = 1 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_ALLOC_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID + }, + { /* act_tid: 7, , table: int_tun_encap_record.ipv6_vxlan */ + .description = "int_tun_encap_record.ipv6_vxlan", + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_ACT_ENCAP_64B, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_NORMAL, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 81, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_ALLOC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_ENCAP_PTR_0, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .track_type = CFA_TRACK_TYPE_SID, + .result_start_idx = 314, + .result_bit_size = 0, + .result_num_fields = 0, + .encap_num_fields = 23 + }, + { /* act_tid: 7, , table: vxlan_encap_ipv6_rec_cache.wr */ + .description = "vxlan_encap_ipv6_rec_cache.wr", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_VXLAN_ENCAP_IPV6_REC_CACHE, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 7, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 81, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_WRITE, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_HASH, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .key_start_idx = 47, + .blob_key_bit_size = 237, + .key_bit_size = 237, + .key_num_fields = 6, + .result_start_idx = 337, + .result_bit_size = 48, + .result_num_fields = 2 + }, + { /* act_tid: 7, , table: geneve_encap_rec_cache.rd */ + .description = "geneve_encap_rec_cache.rd", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_GENEVE_ENCAP_REC_CACHE, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 81, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_READ, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_HASH, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .key_start_idx = 53, + .blob_key_bit_size = 493, + .key_bit_size = 493, + .key_num_fields = 15, + .ident_start_idx = 17, + .ident_nums = 1 + }, + { /* act_tid: 7, , table: control.geneve_encap */ + .description = "control.geneve_encap", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 5, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 81, + .cond_nums = 1 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_ALLOC_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID + }, + { /* act_tid: 7, , table: int_geneve_encap_record.ipv4_geneve */ + .description = "int_geneve_encap_record.ipv4_geneve", + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_ACT_ENCAP_64B, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_NORMAL, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 2, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 82, + .cond_nums = 1 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_ALLOC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_ENCAP_PTR_0, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID, + .track_type = CFA_TRACK_TYPE_SID, + .result_start_idx = 339, + .result_bit_size = 0, + .result_num_fields = 0, + .encap_num_fields = 31 + }, + { /* act_tid: 7, , table: int_geneve_encap_record.ipv6_geneve */ + .description = "int_geneve_encap_record.ipv6_geneve", + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_ACT_ENCAP_64B, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_NORMAL, + .direction = TF_DIR_TX, + .false_message = "Geneve outer hdr must be v4 or v6.", + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1023, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 83, + .cond_nums = 1 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_ALLOC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_ENCAP_PTR_0, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .track_type = CFA_TRACK_TYPE_SID, + .result_start_idx = 370, + .result_bit_size = 0, + .result_num_fields = 0, + .encap_num_fields = 29 + }, + { /* act_tid: 7, , table: geneve_encap_rec_cache.wr */ + .description = "geneve_encap_rec_cache.wr", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_GENEVE_ENCAP_REC_CACHE, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 2, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 84, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_WRITE, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_HASH, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .key_start_idx = 68, + .blob_key_bit_size = 493, + .key_bit_size = 493, + .key_num_fields = 15, + .result_start_idx = 399, + .result_bit_size = 64, + .result_num_fields = 2 + }, + { /* act_tid: 7, , table: int_vtag_encap_record.0 */ + .description = "int_vtag_encap_record.0", + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_ACT_ENCAP_16B, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_NORMAL, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 84, + .cond_nums = 1 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_ALLOC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_ENCAP_PTR_0, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .track_type = CFA_TRACK_TYPE_SID, + .result_start_idx = 401, + .result_bit_size = 0, + .result_num_fields = 0, + .encap_num_fields = 11 + }, + { /* act_tid: 7, , table: int_full_act_record.0 */ + .description = "int_full_act_record.0", + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_FULL_ACT_RECORD, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_NORMAL, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 0, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 85, + .cond_nums = 1 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_ALLOC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_MAIN_ACTION_PTR, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .func_info = { + .func_opc = BNXT_ULP_FUNC_OPC_BIT_AND, + .func_src1 = BNXT_ULP_FUNC_SRC_ACTION_BITMAP, + .func_opr1 = BNXT_ULP_FUNC_SRC_ACTION_BITMAP, + .func_src2 = BNXT_ULP_FUNC_SRC_CONST, + .func_opr2 = BNXT_ULP_ACT_BIT_DEC_TTL | + BNXT_ULP_ACT_BIT_SET_MAC_SRC | + BNXT_ULP_ACT_BIT_SET_MAC_DST | + BNXT_ULP_ACT_BIT_SET_IPV4_SRC | + BNXT_ULP_ACT_BIT_SET_IPV4_DST | + BNXT_ULP_ACT_BIT_SET_IPV6_SRC | + BNXT_ULP_ACT_BIT_SET_IPV6_DST | + BNXT_ULP_ACT_BIT_SET_TP_SRC | + BNXT_ULP_ACT_BIT_SET_TP_DST | + BNXT_ULP_ACT_BIT_GOTO_CHAIN | + BNXT_ULP_ACT_BIT_VF_TO_VF | + BNXT_ULP_ACT_BIT_VXLAN_ENCAP | + BNXT_ULP_ACT_BIT_GENEVE_ENCAP | + BNXT_ULP_ACT_BIT_PUSH_VLAN, + .func_dst_opr = BNXT_ULP_RF_IDX_RF_0 }, + .mark_db_opcode = BNXT_ULP_MARK_DB_OPC_NOP, + .track_type = CFA_TRACK_TYPE_SID, + .result_start_idx = 412, + .result_bit_size = 128, + .result_num_fields = 17, + .encap_num_fields = 0 + }, + { /* act_tid: 7, , table: int_compact_act_record.0 */ + .description = "int_compact_act_record.0", + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_COMPACT_ACT_RECORD, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_NORMAL, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 0, + .cond_false_goto = 0, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 88, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_ALLOC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_MAIN_ACTION_PTR, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .mark_db_opcode = BNXT_ULP_MARK_DB_OPC_NOP, + .track_type = CFA_TRACK_TYPE_SID, + .result_start_idx = 429, + .result_bit_size = 64, + .result_num_fields = 13, + .encap_num_fields = 0 + }, + { /* act_tid: 8, , table: control.reject */ + .description = "control.reject", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_TX, + .true_message = "Thor not supporting non-generic template", + .execute_info = { + .cond_true_goto = 1023, + .cond_false_goto = 0, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 88, + .cond_nums = 0 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP + }, + { /* act_tid: 9, , table: control.reject */ + .description = "control.reject", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_TX, + .true_message = "Thor not supporting non-generic template", + .execute_info = { + .cond_true_goto = 1023, + .cond_false_goto = 0, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 88, + .cond_nums = 0 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP + }, + { /* act_tid: 10, , table: control.reject */ + .description = "control.reject", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_TX, + .true_message = "Thor not supporting non-generic template", + .execute_info = { + .cond_true_goto = 1023, + .cond_false_goto = 0, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 88, + .cond_nums = 0 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP + }, + { /* act_tid: 11, , table: control.reject */ + .description = "control.reject", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_TX, + .true_message = "Thor not supporting non-generic template", + .execute_info = { + .cond_true_goto = 1023, + .cond_false_goto = 0, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 88, + .cond_nums = 0 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP + }, + { /* act_tid: 12, , table: control.delete_chk */ + .description = "control.delete_chk", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 4, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 88, + .cond_nums = 1 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP + }, + { /* act_tid: 12, , table: shared_mirror_record.del_chk */ + .description = "shared_mirror_record.del_chk", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_type = TF_TBL_TYPE_MIRROR_CONFIG, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_SHARED_MIRROR, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 89, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_READ, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_INDEX, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP, + .ref_cnt_opcode = BNXT_ULP_REF_CNT_OPC_NOP, + .key_start_idx = 83, + .blob_key_bit_size = 8, + .key_bit_size = 8, + .key_num_fields = 1, + .ident_start_idx = 18, + .ident_nums = 1 + }, + { /* act_tid: 12, , table: control.mirror_del_exist_chk */ + .description = "control.mirror_del_exist_chk", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 0, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 89, + .cond_nums = 1 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP + }, + { /* act_tid: 12, , table: control.mirror_ref_cnt_chk */ + .description = "control.mirror_ref_cnt_chk", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 0, + .cond_false_goto = 1023, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 90, + .cond_nums = 1 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_DELETE_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID, + .func_info = { + .func_opc = BNXT_ULP_FUNC_OPC_EQ, + .func_src1 = BNXT_ULP_FUNC_SRC_REGFILE, + .func_opr1 = BNXT_ULP_RF_IDX_REF_CNT, + .func_src2 = BNXT_ULP_FUNC_SRC_CONST, + .func_opr2 = 1, + .func_dst_opr = BNXT_ULP_RF_IDX_CC } + }, + { /* act_tid: 12, , table: control.create */ + .description = "control.create", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 91, + .cond_nums = 0 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_ALLOC_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID + }, + { /* act_tid: 12, , table: mirror_tbl.alloc */ + .description = "mirror_tbl.alloc", + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_MIRROR_CONFIG, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_NORMAL, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 91, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_ALLOC_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_MIRROR_PTR_0, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID, + .mark_db_opcode = BNXT_ULP_MARK_DB_OPC_NOP, + .track_type = CFA_TRACK_TYPE_SID, + .result_start_idx = 442, + .result_bit_size = 32, + .result_num_fields = 5 + }, + { /* act_tid: 12, , table: int_flow_counter_tbl.0 */ + .description = "int_flow_counter_tbl.0", + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_ACT_STATS_64, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_INT_COUNT, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 91, + .cond_nums = 1 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_ALLOC_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_FLOW_CNTR_PTR_0, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID, + .mark_db_opcode = BNXT_ULP_MARK_DB_OPC_NOP, + .track_type = CFA_TRACK_TYPE_SID, + .result_start_idx = 447, + .result_bit_size = 64, + .result_num_fields = 1 + }, + { /* act_tid: 12, , table: mod_record.vf_2_vf */ + .description = "mod_record.vf_2_vf", + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_ACT_MODIFY_64B, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_NORMAL, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 92, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_ALLOC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_MODIFY_PTR, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID, + .track_type = CFA_TRACK_TYPE_SID, + .result_start_idx = 448, + .result_bit_size = 0, + .result_num_fields = 0, + .encap_num_fields = 20 + }, + { /* act_tid: 12, , table: int_full_act_record.0 */ + .description = "int_full_act_record.0", + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_FULL_ACT_RECORD, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_NORMAL, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 92, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_ALLOC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_MAIN_ACTION_PTR, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID, + .mark_db_opcode = BNXT_ULP_MARK_DB_OPC_NOP, + .track_type = CFA_TRACK_TYPE_SID, + .result_start_idx = 468, + .result_bit_size = 128, + .result_num_fields = 17 + }, + { /* act_tid: 12, , table: mirror_tbl.wr */ + .description = "mirror_tbl.wr", + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_MIRROR_CONFIG, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_NORMAL, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 92, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_MIRROR_PTR_0, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP, + .mark_db_opcode = BNXT_ULP_MARK_DB_OPC_NOP, + .track_type = CFA_TRACK_TYPE_SID, + .result_start_idx = 485, + .result_bit_size = 32, + .result_num_fields = 5 + }, + { /* act_tid: 12, , table: shared_mirror_record.wr */ + .description = "shared_mirror_record.wr", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_type = TF_TBL_TYPE_MIRROR_CONFIG, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_SHARED_MIRROR, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 0, + .cond_false_goto = 0, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 92, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_WRITE, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_INDEX, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID, + .ref_cnt_opcode = BNXT_ULP_REF_CNT_OPC_INC, + .key_start_idx = 84, + .blob_key_bit_size = 8, + .key_bit_size = 8, + .key_num_fields = 1, + .result_start_idx = 490, + .result_bit_size = 36, + .result_num_fields = 2 + } +}; + +struct bnxt_ulp_mapper_cond_list_info ulp_thor_act_cond_oper_list[] = { +}; + +struct bnxt_ulp_mapper_cond_info ulp_thor_act_cond_list[] = { + /* cond_execute: act_tid: 1, flow_chain_cache.rd:0*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_GOTO_CHAIN + }, + /* cond_execute: act_tid: 1, control.flow_chain:1*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_RF_IS_SET, + .cond_operand = BNXT_ULP_RF_IDX_GENERIC_TBL_MISS + }, + /* cond_execute: act_tid: 1, shared_meter_tbl_cache.rd:2*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_METER + }, + /* cond_execute: act_tid: 1, control.meter_chk:3*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_RF_IS_SET, + .cond_operand = BNXT_ULP_RF_IDX_GENERIC_TBL_MISS + }, + /* cond_execute: act_tid: 1, shared_mirror_record.rd:4*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_SHARED_SAMPLE + }, + /* cond_execute: act_tid: 1, control.mirror:5*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_RF_IS_SET, + .cond_operand = BNXT_ULP_RF_IDX_GENERIC_TBL_MISS + }, + /* cond_execute: act_tid: 1, int_flow_counter_tbl.mirr:6*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_COUNT + }, + /* cond_execute: act_tid: 1, control.do_mod:7*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_RF_IS_SET, + .cond_operand = BNXT_ULP_RF_IDX_RF_0 + }, + /* cond_execute: act_tid: 1, mod_record.ing_ttl:8*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_DEC_TTL + }, + /* cond_execute: act_tid: 1, mod_record.ing_no_ttl:9*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_NOT_SET, + .cond_operand = BNXT_ULP_ACT_BIT_DEC_TTL + }, + /* cond_execute: act_tid: 1, control.queue_and_rss_test:10*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_QUEUE + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_RSS + }, + /* cond_execute: act_tid: 1, vnic_interface_rss_config.0:12*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_RSS + }, + /* cond_execute: act_tid: 1, vnic_interface_queue_config.0:13*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_QUEUE + }, + /* cond_execute: act_tid: 1, int_flow_counter_tbl.0:14*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_COUNT + }, + /* field_cond: act_tid: 1, int_full_act_record.0:15*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_VXLAN_DECAP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_GENEVE_DECAP + }, + /* field_cond: act_tid: 1, int_full_act_record.0:17*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_DROP + }, + /* field_cond: act_tid: 1, int_full_act_record.0:18*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_RSS + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_QUEUE + }, + /* field_cond: act_tid: 1, int_full_act_record.0:20*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_GOTO_CHAIN + }, + /* cond_execute: act_tid: 3, control.delete_chk:21*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_DELETE + }, + /* cond_execute: act_tid: 3, control.mirror_del_exist_chk:22*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_RF_IS_SET, + .cond_operand = BNXT_ULP_RF_IDX_GENERIC_TBL_MISS + }, + /* cond_execute: act_tid: 3, control.mirror_ref_cnt_chk:23*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_RF_IS_SET, + .cond_operand = BNXT_ULP_RF_IDX_CC + }, + /* cond_execute: act_tid: 3, int_flow_counter_tbl.0:24*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_COUNT + }, + /* cond_execute: act_tid: 6, control.create_check:25*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_NOT_SET, + .cond_operand = BNXT_ULP_ACT_BIT_UPDATE + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_NOT_SET, + .cond_operand = BNXT_ULP_ACT_BIT_DELETE + }, + /* cond_execute: act_tid: 6, meter_profile_tbl_cache.rd:27*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_METER_PROFILE + }, + /* cond_execute: act_tid: 6, control.shared_meter_profile_0:28*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_RF_IS_SET, + .cond_operand = BNXT_ULP_RF_IDX_GENERIC_TBL_MISS + }, + /* cond_execute: act_tid: 6, shared_meter_tbl_cache.rd:29*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_SHARED_METER + }, + /* cond_execute: act_tid: 6, control.meter_created_chk:30*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_RF_IS_SET, + .cond_operand = BNXT_ULP_RF_IDX_GENERIC_TBL_MISS + }, + /* cond_execute: act_tid: 6, control.shared_meter_profile_chk:31*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_RF_IS_SET, + .cond_operand = BNXT_ULP_RF_IDX_GENERIC_TBL_MISS + }, + /* cond_execute: act_tid: 6, control.delete_check:32*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_DELETE + }, + /* cond_execute: act_tid: 6, meter_profile_tbl_cache.del_chk:33*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_METER_PROFILE + }, + /* cond_execute: act_tid: 6, control.mtr_prof_ref_cnt_chk:34*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_RF_IS_SET, + .cond_operand = BNXT_ULP_RF_IDX_CC + }, + /* cond_execute: act_tid: 6, shared_meter_tbl_cache.del_chk:35*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_SHARED_METER + }, + /* cond_execute: act_tid: 6, control.shared_mtr_ref_cnt_chk:36*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_RF_IS_SET, + .cond_operand = BNXT_ULP_RF_IDX_CC + }, + /* cond_execute: act_tid: 6, shared_meter_tbl_cache.rd_update:37*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_SHARED_METER + }, + /* cond_execute: act_tid: 6, meter_tbl.update_rd:38*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_RF_NOT_SET, + .cond_operand = BNXT_ULP_RF_IDX_GENERIC_TBL_MISS + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_PROP_NOT_SET, + .cond_operand = BNXT_ULP_ACT_PROP_IDX_METER_PROF_ID_UPDATE + }, + /* cond_execute: act_tid: 7, flow_chain_cache.rd:40*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_GOTO_CHAIN + }, + /* cond_execute: act_tid: 7, control.no_jump_vf_to_vf:41*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_VF_TO_VF + }, + /* cond_execute: act_tid: 7, control.flow_chain:42*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_RF_IS_SET, + .cond_operand = BNXT_ULP_RF_IDX_GENERIC_TBL_MISS + }, + /* cond_execute: act_tid: 7, int_flow_counter_tbl.0:43*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_COUNT + }, + /* cond_execute: act_tid: 7, shared_mirror_record.rd:44*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_SHARED_SAMPLE + }, + /* cond_execute: act_tid: 7, control.mirror:45*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_RF_IS_SET, + .cond_operand = BNXT_ULP_RF_IDX_GENERIC_TBL_MISS + }, + /* cond_execute: act_tid: 7, int_flow_counter_tbl.mirr:46*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_COUNT + }, + /* cond_execute: act_tid: 7, control.do_mod:47*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_RF_IS_SET, + .cond_operand = BNXT_ULP_RF_IDX_RF_0 + }, + /* cond_execute: act_tid: 7, mod_record.ing_ttl:48*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_DEC_TTL + }, + /* field_cond: act_tid: 7, mod_record.ing_ttl:49*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_GOTO_CHAIN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_VF_TO_VF + }, + /* field_cond: act_tid: 7, mod_record.ing_ttl:51*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_GOTO_CHAIN + }, + /* field_cond: act_tid: 7, mod_record.ing_ttl:52*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_VF_TO_VF + }, + /* field_cond: act_tid: 7, mod_record.ing_ttl:53*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_GOTO_CHAIN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_VF_TO_VF + }, + /* field_cond: act_tid: 7, mod_record.ing_ttl:55*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_GOTO_CHAIN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_VF_TO_VF + }, + /* field_cond: act_tid: 7, mod_record.ing_ttl:57*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_GOTO_CHAIN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_VF_TO_VF + }, + /* field_cond: act_tid: 7, mod_record.ing_no_ttl:59*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_GOTO_CHAIN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_VF_TO_VF + }, + /* field_cond: act_tid: 7, mod_record.ing_no_ttl:61*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_GOTO_CHAIN + }, + /* field_cond: act_tid: 7, mod_record.ing_no_ttl:62*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_VF_TO_VF + }, + /* field_cond: act_tid: 7, mod_record.ing_no_ttl:63*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_GOTO_CHAIN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_VF_TO_VF + }, + /* field_cond: act_tid: 7, mod_record.ing_no_ttl:65*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_GOTO_CHAIN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_VF_TO_VF + }, + /* field_cond: act_tid: 7, mod_record.ing_no_ttl:67*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_GOTO_CHAIN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_VF_TO_VF + }, + /* cond_execute: act_tid: 7, control.do_tunnel_check:69*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_VXLAN_ENCAP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_GENEVE_ENCAP + }, + /* cond_execute: act_tid: 7, control.do_tunnel_vlan_exclusion:71*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_PUSH_VLAN + }, + /* cond_execute: act_tid: 7, source_property_cache.rd:72*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_IS_SET, + .cond_operand = BNXT_ULP_CF_IDX_ACT_ENCAP_IPV4_FLAG + }, + /* cond_execute: act_tid: 7, control.sp_rec_v4:73*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_RF_IS_SET, + .cond_operand = BNXT_ULP_RF_IDX_GENERIC_TBL_MISS + }, + /* cond_execute: act_tid: 7, source_property_ipv6_cache.rd:74*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_IS_SET, + .cond_operand = BNXT_ULP_CF_IDX_ACT_ENCAP_IPV6_FLAG + }, + /* cond_execute: act_tid: 7, control.sp_rec_v6:75*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_RF_IS_SET, + .cond_operand = BNXT_ULP_RF_IDX_GENERIC_TBL_MISS + }, + /* cond_execute: act_tid: 7, control.do_vxlan_check:76*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_VXLAN_ENCAP + }, + /* cond_execute: act_tid: 7, vxlan_encap_rec_cache.rd:77*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_ENC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + /* cond_execute: act_tid: 7, control.vxlan_v4_encap:78*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_RF_IS_SET, + .cond_operand = BNXT_ULP_RF_IDX_GENERIC_TBL_MISS + }, + /* cond_execute: act_tid: 7, vxlan_encap_ipv6_rec_cache.rd:79*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_ENC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + /* cond_execute: act_tid: 7, control.vxlan_v6_encap:80*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_RF_IS_SET, + .cond_operand = BNXT_ULP_RF_IDX_GENERIC_TBL_MISS + }, + /* cond_execute: act_tid: 7, control.geneve_encap:81*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_RF_IS_SET, + .cond_operand = BNXT_ULP_RF_IDX_GENERIC_TBL_MISS + }, + /* cond_execute: act_tid: 7, int_geneve_encap_record.ipv4_geneve:82*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_ENC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + /* cond_execute: act_tid: 7, int_geneve_encap_record.ipv6_geneve:83*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_ENC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + /* cond_execute: act_tid: 7, int_vtag_encap_record.0:84*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_PUSH_VLAN + }, + /* cond_execute: act_tid: 7, int_full_act_record.0:85*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_RF_IS_SET, + .cond_operand = BNXT_ULP_RF_IDX_RF_0 + }, + /* field_cond: act_tid: 7, int_full_act_record.0:86*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_GOTO_CHAIN + }, + /* field_cond: act_tid: 7, int_full_act_record.0:87*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_VF_TO_VF + }, + /* cond_execute: act_tid: 12, control.delete_chk:88*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_DELETE + }, + /* cond_execute: act_tid: 12, control.mirror_del_exist_chk:89*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_RF_IS_SET, + .cond_operand = BNXT_ULP_RF_IDX_GENERIC_TBL_MISS + }, + /* cond_execute: act_tid: 12, control.mirror_ref_cnt_chk:90*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_RF_IS_SET, + .cond_operand = BNXT_ULP_RF_IDX_CC + }, + /* cond_execute: act_tid: 12, int_flow_counter_tbl.0:91*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_COUNT + } +}; + +struct bnxt_ulp_mapper_key_info ulp_thor_act_key_info_list[] = { + /* act_tid: 1, , table: flow_chain_cache.rd */ + { + .field_info_mask = { + .description = "group_id", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff, + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "group_id", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr1 = { + (BNXT_ULP_ACT_PROP_IDX_GOTO_CHAIN >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_GOTO_CHAIN & 0xff} + } + }, + /* act_tid: 1, , table: flow_chain_cache.write */ + { + .field_info_mask = { + .description = "group_id", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff, + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "group_id", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr1 = { + (BNXT_ULP_ACT_PROP_IDX_GOTO_CHAIN >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_GOTO_CHAIN & 0xff} + } + }, + /* act_tid: 1, , table: shared_meter_tbl_cache.rd */ + { + .field_info_mask = { + .description = "sw_meter_id", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff, + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "sw_meter_id", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr1 = { + (BNXT_ULP_ACT_PROP_IDX_METER >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_METER & 0xff} + } + }, + /* act_tid: 1, , table: shared_mirror_record.rd */ + { + .field_info_mask = { + .description = "shared_index", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "shared_index", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_DEV_ACT_MIRR_PORT_ID >> 8) & 0xff, + BNXT_ULP_CF_IDX_DEV_ACT_MIRR_PORT_ID & 0xff} + } + }, + /* act_tid: 1, , table: shared_mirror_record.wr */ + { + .field_info_mask = { + .description = "shared_index", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "shared_index", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_DEV_ACT_MIRR_PORT_ID >> 8) & 0xff, + BNXT_ULP_CF_IDX_DEV_ACT_MIRR_PORT_ID & 0xff} + } + }, + /* act_tid: 3, , table: shared_mirror_record.del_chk */ + { + .field_info_mask = { + .description = "shared_index", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "shared_index", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr1 = { + (BNXT_ULP_ACT_PROP_IDX_SHARED_HANDLE >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_SHARED_HANDLE & 0xff} + } + }, + /* act_tid: 3, , table: shared_mirror_record.wr */ + { + .field_info_mask = { + .description = "shared_index", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "shared_index", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_MIRROR_PTR_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_MIRROR_PTR_0 & 0xff} + } + }, + /* act_tid: 6, , table: meter_profile_tbl_cache.rd */ + { + .field_info_mask = { + .description = "sw_meter_profile_id", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff, + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "sw_meter_profile_id", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr1 = { + (BNXT_ULP_ACT_PROP_IDX_METER_PROF_ID >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_METER_PROF_ID & 0xff} + } + }, + /* act_tid: 6, , table: meter_profile_tbl_cache.wr */ + { + .field_info_mask = { + .description = "sw_meter_profile_id", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff, + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "sw_meter_profile_id", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr1 = { + (BNXT_ULP_ACT_PROP_IDX_METER_PROF_ID >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_METER_PROF_ID & 0xff} + } + }, + /* act_tid: 6, , table: shared_meter_tbl_cache.rd */ + { + .field_info_mask = { + .description = "sw_meter_id", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff, + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "sw_meter_id", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr1 = { + (BNXT_ULP_ACT_PROP_IDX_METER_INST_ID >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_METER_INST_ID & 0xff} + } + }, + /* act_tid: 6, , table: meter_profile_tbl_cache.rd2 */ + { + .field_info_mask = { + .description = "sw_meter_profile_id", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff, + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "sw_meter_profile_id", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr1 = { + (BNXT_ULP_ACT_PROP_IDX_METER_PROF_ID >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_METER_PROF_ID & 0xff} + } + }, + /* act_tid: 6, , table: shared_meter_tbl_cache.wr */ + { + .field_info_mask = { + .description = "sw_meter_id", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff, + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "sw_meter_id", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr1 = { + (BNXT_ULP_ACT_PROP_IDX_METER_INST_ID >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_METER_INST_ID & 0xff} + } + }, + /* act_tid: 6, , table: meter_profile_tbl_cache.del_chk */ + { + .field_info_mask = { + .description = "sw_meter_profile_id", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff, + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "sw_meter_profile_id", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr1 = { + (BNXT_ULP_ACT_PROP_IDX_METER_PROF_ID >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_METER_PROF_ID & 0xff} + } + }, + /* act_tid: 6, , table: shared_meter_tbl_cache.del_chk */ + { + .field_info_mask = { + .description = "sw_meter_id", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff, + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "sw_meter_id", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr1 = { + (BNXT_ULP_ACT_PROP_IDX_METER_INST_ID >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_METER_INST_ID & 0xff} + } + }, + /* act_tid: 6, , table: shared_meter_tbl_cache.rd_update */ + { + .field_info_mask = { + .description = "sw_meter_id", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff, + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "sw_meter_id", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr1 = { + (BNXT_ULP_ACT_PROP_IDX_METER_INST_ID >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_METER_INST_ID & 0xff} + } + }, + /* act_tid: 7, , table: flow_chain_cache.rd */ + { + .field_info_mask = { + .description = "group_id", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff, + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "group_id", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr1 = { + (BNXT_ULP_ACT_PROP_IDX_GOTO_CHAIN >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_GOTO_CHAIN & 0xff} + } + }, + /* act_tid: 7, , table: flow_chain_cache.write */ + { + .field_info_mask = { + .description = "group_id", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff, + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "group_id", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr1 = { + (BNXT_ULP_ACT_PROP_IDX_GOTO_CHAIN >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_GOTO_CHAIN & 0xff} + } + }, + /* act_tid: 7, , table: shared_mirror_record.rd */ + { + .field_info_mask = { + .description = "shared_index", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "shared_index", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_DEV_ACT_MIRR_PORT_ID >> 8) & 0xff, + BNXT_ULP_CF_IDX_DEV_ACT_MIRR_PORT_ID & 0xff} + } + }, + /* act_tid: 7, , table: shared_mirror_record.wr */ + { + .field_info_mask = { + .description = "shared_index", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "shared_index", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_DEV_ACT_MIRR_PORT_ID >> 8) & 0xff, + BNXT_ULP_CF_IDX_DEV_ACT_MIRR_PORT_ID & 0xff} + } + }, + /* act_tid: 7, , table: source_property_cache.rd */ + { + .field_info_mask = { + .description = "smac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "smac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_ETH_SMAC >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_ETH_SMAC & 0xff} + } + }, + { + .field_info_mask = { + .description = "ipv4_src_addr", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff, + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "ipv4_src_addr", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_IPV4_SADDR >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_IPV4_SADDR & 0xff} + } + }, + { + .field_info_mask = { + .description = "tbl_scope", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tbl_scope", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + /* act_tid: 7, , table: source_property_cache.wr */ + { + .field_info_mask = { + .description = "smac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "smac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_ETH_SMAC >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_ETH_SMAC & 0xff} + } + }, + { + .field_info_mask = { + .description = "ipv4_src_addr", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff, + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "ipv4_src_addr", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_IPV4_SADDR >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_IPV4_SADDR & 0xff} + } + }, + { + .field_info_mask = { + .description = "tbl_scope", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tbl_scope", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + /* act_tid: 7, , table: source_property_ipv6_cache.rd */ + { + .field_info_mask = { + .description = "smac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "smac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_ETH_SMAC >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_ETH_SMAC & 0xff} + } + }, + { + .field_info_mask = { + .description = "ipv6_src_addr", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "ipv6_src_addr", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_IPV6_SADDR >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_IPV6_SADDR & 0xff} + } + }, + /* act_tid: 7, , table: source_property_ipv6_cache.wr */ + { + .field_info_mask = { + .description = "smac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "smac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_ETH_SMAC >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_ETH_SMAC & 0xff} + } + }, + { + .field_info_mask = { + .description = "ipv6_src_addr", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "ipv6_src_addr", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_IPV6_SADDR >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_IPV6_SADDR & 0xff} + } + }, + /* act_tid: 7, , table: vxlan_encap_rec_cache.rd */ + { + .field_info_mask = { + .description = "dmac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "dmac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_ETH_DMAC >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_ETH_DMAC & 0xff} + } + }, + { + .field_info_mask = { + .description = "ipv4_dst_addr", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff, + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "ipv4_dst_addr", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_IPV4_DADDR >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_IPV4_DADDR & 0xff} + } + }, + { + .field_info_mask = { + .description = "udp_sport", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "udp_sport", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_UDP_SPORT >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_UDP_SPORT & 0xff} + } + }, + { + .field_info_mask = { + .description = "udp_dport", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "udp_dport", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_UDP_DPORT >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_UDP_DPORT & 0xff} + } + }, + { + .field_info_mask = { + .description = "vni", + .field_bit_size = 24, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "vni", + .field_bit_size = 24, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_VXLAN_VNI >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_VXLAN_VNI & 0xff} + } + }, + { + .field_info_mask = { + .description = "tbl_scope", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tbl_scope", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + /* act_tid: 7, , table: vxlan_encap_rec_cache.wr */ + { + .field_info_mask = { + .description = "dmac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "dmac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_ETH_DMAC >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_ETH_DMAC & 0xff} + } + }, + { + .field_info_mask = { + .description = "ipv4_dst_addr", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff, + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "ipv4_dst_addr", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_IPV4_DADDR >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_IPV4_DADDR & 0xff} + } + }, + { + .field_info_mask = { + .description = "udp_sport", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "udp_sport", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_UDP_SPORT >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_UDP_SPORT & 0xff} + } + }, + { + .field_info_mask = { + .description = "udp_dport", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "udp_dport", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_UDP_DPORT >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_UDP_DPORT & 0xff} + } + }, + { + .field_info_mask = { + .description = "vni", + .field_bit_size = 24, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "vni", + .field_bit_size = 24, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_VXLAN_VNI >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_VXLAN_VNI & 0xff} + } + }, + { + .field_info_mask = { + .description = "tbl_scope", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tbl_scope", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + /* act_tid: 7, , table: vxlan_encap_ipv6_rec_cache.rd */ + { + .field_info_mask = { + .description = "dmac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "dmac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_ETH_DMAC >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_ETH_DMAC & 0xff} + } + }, + { + .field_info_mask = { + .description = "ipv6_dst_addr", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "ipv6_dst_addr", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_IPV6_DADDR >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_IPV6_DADDR & 0xff} + } + }, + { + .field_info_mask = { + .description = "udp_sport", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "udp_sport", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_UDP_SPORT >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_UDP_SPORT & 0xff} + } + }, + { + .field_info_mask = { + .description = "udp_dport", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "udp_dport", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_UDP_DPORT >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_UDP_DPORT & 0xff} + } + }, + { + .field_info_mask = { + .description = "vni", + .field_bit_size = 24, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "vni", + .field_bit_size = 24, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_VXLAN_VNI >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_VXLAN_VNI & 0xff} + } + }, + { + .field_info_mask = { + .description = "tbl_scope", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tbl_scope", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + /* act_tid: 7, , table: vxlan_encap_ipv6_rec_cache.wr */ + { + .field_info_mask = { + .description = "dmac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "dmac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_ETH_DMAC >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_ETH_DMAC & 0xff} + } + }, + { + .field_info_mask = { + .description = "ipv6_dst_addr", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "ipv6_dst_addr", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_IPV6_DADDR >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_IPV6_DADDR & 0xff} + } + }, + { + .field_info_mask = { + .description = "udp_sport", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "udp_sport", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_UDP_SPORT >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_UDP_SPORT & 0xff} + } + }, + { + .field_info_mask = { + .description = "udp_dport", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "udp_dport", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_UDP_DPORT >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_UDP_DPORT & 0xff} + } + }, + { + .field_info_mask = { + .description = "vni", + .field_bit_size = 24, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "vni", + .field_bit_size = 24, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_VXLAN_VNI >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_VXLAN_VNI & 0xff} + } + }, + { + .field_info_mask = { + .description = "tbl_scope", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tbl_scope", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + /* act_tid: 7, , table: geneve_encap_rec_cache.rd */ + { + .field_info_mask = { + .description = "dmac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "dmac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_ETH_DMAC >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_ETH_DMAC & 0xff} + } + }, + { + .field_info_mask = { + .description = "ipv4_dst_addr", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff, + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "ipv4_dst_addr", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_HDR_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV4 >> 56) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV4 >> 48) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV4 >> 40) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV4 >> 32) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV4 >> 24) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV4 >> 16) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV4 >> 8) & 0xff, + (uint64_t)BNXT_ULP_HDR_BIT_O_IPV4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr2 = { + (BNXT_ULP_ENC_FIELD_IPV4_DADDR >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_IPV4_DADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "ipv6_dst_addr", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "ipv6_dst_addr", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_HDR_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV6 >> 56) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV6 >> 48) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV6 >> 40) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV6 >> 32) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV6 >> 24) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV6 >> 16) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV6 >> 8) & 0xff, + (uint64_t)BNXT_ULP_HDR_BIT_O_IPV6 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr2 = { + (BNXT_ULP_ENC_FIELD_IPV6_DADDR >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_IPV6_DADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "udp_sport", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "udp_sport", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_UDP_SPORT >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_UDP_SPORT & 0xff} + } + }, + { + .field_info_mask = { + .description = "udp_dport", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "udp_dport", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_UDP_DPORT >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_UDP_DPORT & 0xff} + } + }, + { + .field_info_mask = { + .description = "ver_opt_len_o_c_rsvd0", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "ver_opt_len_o_c_rsvd0", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_GENEVE_VER_OPT_LEN_O_C_RSVD0 >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_GENEVE_VER_OPT_LEN_O_C_RSVD0 & 0xff} + } + }, + { + .field_info_mask = { + .description = "proto_type", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "proto_type", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_GENEVE_PROTO_TYPE >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_GENEVE_PROTO_TYPE & 0xff} + } + }, + { + .field_info_mask = { + .description = "vni", + .field_bit_size = 24, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "vni", + .field_bit_size = 24, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_GENEVE_VNI >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_GENEVE_VNI & 0xff} + } + }, + { + .field_info_mask = { + .description = "opt_w0", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff, + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "opt_w0", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_GENEVE_OPT_W0 >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_GENEVE_OPT_W0 & 0xff} + } + }, + { + .field_info_mask = { + .description = "opt_w1", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff, + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "opt_w1", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_GENEVE_OPT_W1 >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_GENEVE_OPT_W1 & 0xff} + } + }, + { + .field_info_mask = { + .description = "opt_w2", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff, + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "opt_w2", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_GENEVE_OPT_W2 >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_GENEVE_OPT_W2 & 0xff} + } + }, + { + .field_info_mask = { + .description = "opt_w3", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff, + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "opt_w3", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_GENEVE_OPT_W3 >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_GENEVE_OPT_W3 & 0xff} + } + }, + { + .field_info_mask = { + .description = "opt_w4", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff, + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "opt_w4", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_GENEVE_OPT_W4 >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_GENEVE_OPT_W4 & 0xff} + } + }, + { + .field_info_mask = { + .description = "opt_w5", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff, + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "opt_w5", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_GENEVE_OPT_W5 >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_GENEVE_OPT_W5 & 0xff} + } + }, + { + .field_info_mask = { + .description = "tbl_scope", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tbl_scope", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + /* act_tid: 7, , table: geneve_encap_rec_cache.wr */ + { + .field_info_mask = { + .description = "dmac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "dmac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_ETH_DMAC >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_ETH_DMAC & 0xff} + } + }, + { + .field_info_mask = { + .description = "ipv4_dst_addr", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff, + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "ipv4_dst_addr", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_HDR_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV4 >> 56) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV4 >> 48) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV4 >> 40) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV4 >> 32) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV4 >> 24) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV4 >> 16) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV4 >> 8) & 0xff, + (uint64_t)BNXT_ULP_HDR_BIT_O_IPV4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr2 = { + (BNXT_ULP_ENC_FIELD_IPV4_DADDR >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_IPV4_DADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "ipv6_dst_addr", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "ipv6_dst_addr", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_HDR_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV6 >> 56) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV6 >> 48) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV6 >> 40) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV6 >> 32) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV6 >> 24) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV6 >> 16) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV6 >> 8) & 0xff, + (uint64_t)BNXT_ULP_HDR_BIT_O_IPV6 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr2 = { + (BNXT_ULP_ENC_FIELD_IPV6_DADDR >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_IPV6_DADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "udp_sport", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "udp_sport", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_UDP_SPORT >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_UDP_SPORT & 0xff} + } + }, + { + .field_info_mask = { + .description = "udp_dport", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "udp_dport", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_UDP_DPORT >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_UDP_DPORT & 0xff} + } + }, + { + .field_info_mask = { + .description = "ver_opt_len_o_c_rsvd0", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "ver_opt_len_o_c_rsvd0", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_GENEVE_VER_OPT_LEN_O_C_RSVD0 >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_GENEVE_VER_OPT_LEN_O_C_RSVD0 & 0xff} + } + }, + { + .field_info_mask = { + .description = "proto_type", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "proto_type", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_GENEVE_PROTO_TYPE >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_GENEVE_PROTO_TYPE & 0xff} + } + }, + { + .field_info_mask = { + .description = "vni", + .field_bit_size = 24, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "vni", + .field_bit_size = 24, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_GENEVE_VNI >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_GENEVE_VNI & 0xff} + } + }, + { + .field_info_mask = { + .description = "opt_w0", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff, + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "opt_w0", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_GENEVE_OPT_W0 >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_GENEVE_OPT_W0 & 0xff} + } + }, + { + .field_info_mask = { + .description = "opt_w1", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff, + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "opt_w1", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_GENEVE_OPT_W1 >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_GENEVE_OPT_W1 & 0xff} + } + }, + { + .field_info_mask = { + .description = "opt_w2", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff, + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "opt_w2", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_GENEVE_OPT_W2 >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_GENEVE_OPT_W2 & 0xff} + } + }, + { + .field_info_mask = { + .description = "opt_w3", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff, + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "opt_w3", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_GENEVE_OPT_W3 >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_GENEVE_OPT_W3 & 0xff} + } + }, + { + .field_info_mask = { + .description = "opt_w4", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff, + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "opt_w4", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_GENEVE_OPT_W4 >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_GENEVE_OPT_W4 & 0xff} + } + }, + { + .field_info_mask = { + .description = "opt_w5", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff, + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "opt_w5", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_GENEVE_OPT_W5 >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_GENEVE_OPT_W5 & 0xff} + } + }, + { + .field_info_mask = { + .description = "tbl_scope", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tbl_scope", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + /* act_tid: 12, , table: shared_mirror_record.del_chk */ + { + .field_info_mask = { + .description = "shared_index", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "shared_index", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr1 = { + (BNXT_ULP_ACT_PROP_IDX_SHARED_HANDLE >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_SHARED_HANDLE & 0xff} + } + }, + /* act_tid: 12, , table: shared_mirror_record.wr */ + { + .field_info_mask = { + .description = "shared_index", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "shared_index", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_MIRROR_PTR_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_MIRROR_PTR_0 & 0xff} + } + } +}; + +struct bnxt_ulp_mapper_field_info ulp_thor_act_key_ext_list[] = { + { + .description = "vnic_or_vport", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_OR, + .field_opr1 = { + (18 >> 8) & 0xff, + 18 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_RF, + .field_opr2 = { + (BNXT_ULP_RF_IDX_RSS_VNIC >> 8) & 0xff, + BNXT_ULP_RF_IDX_RSS_VNIC & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (1 >> 8) & 0xff, + 1 & 0xff} + }, + { + .description = "vnic_or_vport", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (20 >> 8) & 0xff, + 20 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + (ULP_THOR_SYM_RECYCLE_DST >> 8) & 0xff, + ULP_THOR_SYM_RECYCLE_DST & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr3 = { + (BNXT_ULP_ACT_PROP_IDX_VNIC >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_VNIC & 0xff} + }, + { + .description = "metadata_data", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (52 >> 8) & 0xff, + 52 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_PORT_TABLE, + .field_opr2 = { + (BNXT_ULP_CF_IDX_DEV_ACT_PORT_ID >> 8) & 0xff, + BNXT_ULP_CF_IDX_DEV_ACT_PORT_ID & 0xff, + (BNXT_ULP_PORT_TABLE_VF_FUNC_METADATA >> 8) & 0xff, + BNXT_ULP_PORT_TABLE_VF_FUNC_METADATA & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "metadata_data", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (62 >> 8) & 0xff, + 62 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_PORT_TABLE, + .field_opr2 = { + (BNXT_ULP_CF_IDX_DEV_ACT_PORT_ID >> 8) & 0xff, + BNXT_ULP_CF_IDX_DEV_ACT_PORT_ID & 0xff, + (BNXT_ULP_PORT_TABLE_VF_FUNC_METADATA >> 8) & 0xff, + BNXT_ULP_PORT_TABLE_VF_FUNC_METADATA & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "vnic_or_vport", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (87 >> 8) & 0xff, + 87 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + (ULP_THOR_SYM_LOOPBACK_PORT >> 8) & 0xff, + ULP_THOR_SYM_LOOPBACK_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr3 = { + (BNXT_ULP_ACT_PROP_IDX_VPORT >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_VPORT & 0xff} + } +}; + +struct bnxt_ulp_mapper_field_info ulp_thor_act_result_field_list[] = { + /* act_tid: 1, , table: jump_index_table.alloc */ + /* act_tid: 1, , table: flow_chain_cache.write */ + { + .description = "rid", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_RID >> 8) & 0xff, + BNXT_ULP_RF_IDX_RID & 0xff} + }, + { + .description = "metadata", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_JUMP_META >> 8) & 0xff, + BNXT_ULP_RF_IDX_JUMP_META & 0xff} + }, + /* act_tid: 1, , table: mirror_tbl.alloc */ + { + .description = "act_rec_ptr", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "reserved", + .field_bit_size = 13, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "ignore_drop", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "copy_ing_or_egr", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "enable", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + /* act_tid: 1, , table: int_flow_counter_tbl.mirr */ + { + .description = "count", + .field_bit_size = 64, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + /* act_tid: 1, , table: int_compact_act_record.mirr */ + { + .description = "rsvd0", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "decap_func", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "meter", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "stats_op", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + { + .description = "stats_ptr", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_FLOW_CNTR_PTR_1 >> 8) & 0xff, + BNXT_ULP_RF_IDX_FLOW_CNTR_PTR_1 & 0xff} + }, + { + .description = "vnic_or_vport", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr1 = { + (BNXT_ULP_ACT_PROP_IDX_MIRR_VNIC >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_MIRR_VNIC & 0xff} + }, + { + .description = "use_default", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "mirror", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "cond_copy", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "vlan_del_rpt", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "drop", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "hit", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "type", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + /* act_tid: 1, , table: mirror_tbl.wr */ + { + .description = "act_rec_ptr", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_MIRR_ACTION_PTR >> 8) & 0xff, + BNXT_ULP_RF_IDX_MIRR_ACTION_PTR & 0xff} + }, + { + .description = "reserved", + .field_bit_size = 13, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "ignore_drop", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "copy_ing_or_egr", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_MIRROR_COPY_ING_OR_EGR >> 8) & 0xff, + BNXT_ULP_CF_IDX_MIRROR_COPY_ING_OR_EGR & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "enable", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + /* act_tid: 1, , table: shared_mirror_record.wr */ + { + .description = "rid", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_RID >> 8) & 0xff, + BNXT_ULP_RF_IDX_RID & 0xff} + }, + { + .description = "mirror_id", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_MIRROR_PTR_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_MIRROR_PTR_0 & 0xff} + }, + /* act_tid: 1, , table: mod_record.ing_ttl */ + { + .description = "metadata_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "rem_ovlan", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "rem_ivlan", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "rep_add_ivlan", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "rep_add_ovlan", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "ttl_update", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + { + .description = "tun_md_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "reserved_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_dmac_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_DST >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_DST >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_DST >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_DST >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_DST >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_DST >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_DST >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_DST & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_smac_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_SRC >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_SRC >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_SRC >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_SRC >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_SRC >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_SRC >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_SRC >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_SRC & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_sip_ipv6_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_SRC >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_SRC >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_SRC >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_SRC >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_SRC >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_SRC >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_SRC >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_SRC & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_dip_ipv6_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_DST >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_DST >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_DST >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_DST >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_DST >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_DST >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_DST >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_DST & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_sip_ipv4_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_SRC >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_SRC >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_SRC >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_SRC >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_SRC >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_SRC >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_SRC >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_SRC & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_dip_ipv4_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_DST >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_DST >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_DST >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_DST >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_DST >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_DST >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_DST >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_DST & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_sport_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_dport_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "metadata_data", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_RF, + .field_opr2 = { + (BNXT_ULP_RF_IDX_JUMP_META >> 8) & 0xff, + BNXT_ULP_RF_IDX_JUMP_META & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "metadata_rsvd", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ZERO, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "metadata_op", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR_SYM_METADATA_OP_NORMAL}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "metadata_prof", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR_SYM_META_PROFILE_0}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "alt_pfid", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "alt_vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "ttl_rsvd", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "ttl_tl3_dec", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_ACT_T_DEC_TTL >> 8) & 0xff, + BNXT_ULP_CF_IDX_ACT_T_DEC_TTL & 0xff} + }, + { + .description = "ttl_il3_dec", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_ACT_DEC_TTL >> 8) & 0xff, + BNXT_ULP_CF_IDX_ACT_DEC_TTL & 0xff} + }, + { + .description = "ttl_tl3_rdir", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "ttl_il3_rdir", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_dmac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_DST >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_DST >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_DST >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_DST >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_DST >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_DST >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_DST >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_DST & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr2 = { + (BNXT_ULP_ACT_PROP_IDX_SET_MAC_DST >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_SET_MAC_DST & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l2_smac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_SRC >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_SRC >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_SRC >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_SRC >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_SRC >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_SRC >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_SRC >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_SRC & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr2 = { + (BNXT_ULP_ACT_PROP_IDX_SET_MAC_SRC >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_SET_MAC_SRC & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l3_sip_ipv6", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_SRC >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_SRC >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_SRC >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_SRC >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_SRC >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_SRC >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_SRC >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_SRC & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr2 = { + (BNXT_ULP_ACT_PROP_IDX_SET_IPV6_SRC >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_SET_IPV6_SRC & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l3_dip_ipv6", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_DST >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_DST >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_DST >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_DST >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_DST >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_DST >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_DST >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_DST & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr2 = { + (BNXT_ULP_ACT_PROP_IDX_SET_IPV6_DST >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_SET_IPV6_DST & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l3_sip_ipv4", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_SRC >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_SRC >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_SRC >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_SRC >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_SRC >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_SRC >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_SRC >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_SRC & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr2 = { + (BNXT_ULP_ACT_PROP_IDX_SET_IPV4_SRC >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_SET_IPV4_SRC & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l3_dip_ipv4", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_DST >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_DST >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_DST >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_DST >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_DST >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_DST >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_DST >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_DST & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr2 = { + (BNXT_ULP_ACT_PROP_IDX_SET_IPV4_DST >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_SET_IPV4_DST & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l4_sport", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr2 = { + (BNXT_ULP_ACT_PROP_IDX_SET_TP_SRC >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_SET_TP_SRC & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l4_dport", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr2 = { + (BNXT_ULP_ACT_PROP_IDX_SET_TP_DST >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_SET_TP_DST & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + /* act_tid: 1, , table: mod_record.ing_no_ttl */ + { + .description = "metadata_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "rem_ovlan", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "rem_ivlan", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "rep_add_ivlan", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "rep_add_ovlan", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "ttl_update", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tun_md_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "reserved_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_dmac_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_DST >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_DST >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_DST >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_DST >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_DST >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_DST >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_DST >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_DST & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_smac_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_SRC >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_SRC >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_SRC >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_SRC >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_SRC >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_SRC >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_SRC >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_SRC & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_sip_ipv6_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_SRC >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_SRC >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_SRC >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_SRC >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_SRC >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_SRC >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_SRC >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_SRC & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_dip_ipv6_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_DST >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_DST >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_DST >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_DST >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_DST >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_DST >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_DST >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_DST & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_sip_ipv4_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_SRC >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_SRC >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_SRC >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_SRC >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_SRC >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_SRC >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_SRC >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_SRC & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_dip_ipv4_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_DST >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_DST >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_DST >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_DST >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_DST >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_DST >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_DST >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_DST & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_sport_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_dport_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "metadata_data", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_RF, + .field_opr2 = { + (BNXT_ULP_RF_IDX_JUMP_META >> 8) & 0xff, + BNXT_ULP_RF_IDX_JUMP_META & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "metadata_rsvd", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ZERO, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "metadata_op", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR_SYM_METADATA_OP_NORMAL}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "metadata_prof", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_GOTO_CHAIN & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR_SYM_META_PROFILE_0}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l2_dmac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_DST >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_DST >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_DST >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_DST >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_DST >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_DST >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_DST >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_DST & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr2 = { + (BNXT_ULP_ACT_PROP_IDX_SET_MAC_DST >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_SET_MAC_DST & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l2_smac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_SRC >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_SRC >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_SRC >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_SRC >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_SRC >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_SRC >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_SRC >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_SRC & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr2 = { + (BNXT_ULP_ACT_PROP_IDX_SET_MAC_SRC >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_SET_MAC_SRC & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l3_sip_ipv6", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_SRC >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_SRC >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_SRC >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_SRC >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_SRC >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_SRC >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_SRC >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_SRC & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr2 = { + (BNXT_ULP_ACT_PROP_IDX_SET_IPV6_SRC >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_SET_IPV6_SRC & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l3_dip_ipv6", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_DST >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_DST >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_DST >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_DST >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_DST >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_DST >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_DST >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_DST & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr2 = { + (BNXT_ULP_ACT_PROP_IDX_SET_IPV6_DST >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_SET_IPV6_DST & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l3_sip_ipv4", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_SRC >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_SRC >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_SRC >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_SRC >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_SRC >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_SRC >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_SRC >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_SRC & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr2 = { + (BNXT_ULP_ACT_PROP_IDX_SET_IPV4_SRC >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_SET_IPV4_SRC & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l3_dip_ipv4", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_DST >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_DST >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_DST >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_DST >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_DST >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_DST >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_DST >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_DST & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr2 = { + (BNXT_ULP_ACT_PROP_IDX_SET_IPV4_DST >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_SET_IPV4_DST & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l4_sport", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr2 = { + (BNXT_ULP_ACT_PROP_IDX_SET_TP_SRC >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_SET_TP_SRC & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l4_dport", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr2 = { + (BNXT_ULP_ACT_PROP_IDX_SET_TP_DST >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_SET_TP_DST & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + /* act_tid: 1, , table: vnic_interface_rss_config.0 */ + /* act_tid: 1, , table: vnic_interface_queue_config.0 */ + /* act_tid: 1, , table: int_flow_counter_tbl.0 */ + { + .description = "count", + .field_bit_size = 64, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + /* act_tid: 1, , table: int_full_act_record.0 */ + { + .description = "sp_rec_ptr", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "encap_ptr", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "mod_rec_ptr", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_MODIFY_PTR >> 8) & 0xff, + BNXT_ULP_RF_IDX_MODIFY_PTR & 0xff} + }, + { + .description = "rsvd1", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "rsvd0", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "decap_func", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_OR, + .field_opr1 = { + (15 >> 8) & 0xff, + 15 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR_SYM_DECAP_FUNC_THRU_TUN}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "meter", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_METER_PTR_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_METER_PTR_0 & 0xff} + }, + { + .description = "stats_op", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + { + .description = "stats_ptr", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_FLOW_CNTR_PTR_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_FLOW_CNTR_PTR_0 & 0xff} + }, + { + .description = "vnic_or_vport", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (17 >> 8) & 0xff, + 17 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ZERO, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT + }, + { + .description = "use_default", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "mirror", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_SHARED_SAMPLE >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SHARED_SAMPLE >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SHARED_SAMPLE >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SHARED_SAMPLE >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SHARED_SAMPLE >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SHARED_SAMPLE >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SHARED_SAMPLE >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_SHARED_SAMPLE & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_RF, + .field_opr2 = { + (BNXT_ULP_RF_IDX_MIRROR_PTR_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_MIRROR_PTR_0 & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "cond_copy", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "vlan_del_rpt", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_POP_VLAN >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_POP_VLAN >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_POP_VLAN >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_POP_VLAN >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_POP_VLAN >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_POP_VLAN >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_POP_VLAN >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_POP_VLAN & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR_SYM_VLAN_DEL_RPT_STRIP_OUTER}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "drop", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_DROP >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_DROP >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_DROP >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_DROP >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_DROP >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_DROP >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_DROP >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_DROP & 0xff} + }, + { + .description = "hit", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "type", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + /* act_tid: 3, , table: mirror_tbl.alloc */ + { + .description = "act_rec_ptr", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "reserved", + .field_bit_size = 13, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "ignore_drop", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "copy_ing_or_egr", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "enable", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + /* act_tid: 3, , table: int_flow_counter_tbl.0 */ + { + .description = "count", + .field_bit_size = 64, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + /* act_tid: 3, , table: int_compact_act_record.0 */ + { + .description = "rsvd0", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "decap_func", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "meter", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "stats_op", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + { + .description = "stats_ptr", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_FLOW_CNTR_PTR_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_FLOW_CNTR_PTR_0 & 0xff} + }, + { + .description = "vnic_or_vport", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr1 = { + (BNXT_ULP_ACT_PROP_IDX_VNIC >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_VNIC & 0xff} + }, + { + .description = "use_default", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "mirror", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "cond_copy", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "vlan_del_rpt", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "drop", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "hit", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "type", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + /* act_tid: 3, , table: mirror_tbl.wr */ + { + .description = "act_rec_ptr", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_MAIN_ACTION_PTR >> 8) & 0xff, + BNXT_ULP_RF_IDX_MAIN_ACTION_PTR & 0xff} + }, + { + .description = "reserved", + .field_bit_size = 13, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "ignore_drop", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "copy_ing_or_egr", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + { + .description = "enable", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + /* act_tid: 3, , table: shared_mirror_record.wr */ + { + .description = "rid", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_RID >> 8) & 0xff, + BNXT_ULP_RF_IDX_RID & 0xff} + }, + { + .description = "mirror_id", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_MIRROR_PTR_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_MIRROR_PTR_0 & 0xff} + }, + /* act_tid: 6, , table: meter_profile_tbl.0 */ + { + .description = "cf", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr1 = { + (BNXT_ULP_ACT_PROP_IDX_METER_PROF_CF >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_METER_PROF_CF & 0xff} + }, + { + .description = "pm", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr1 = { + (BNXT_ULP_ACT_PROP_IDX_METER_PROF_PM >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_METER_PROF_PM & 0xff} + }, + { + .description = "rfc2698", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr1 = { + (BNXT_ULP_ACT_PROP_IDX_METER_PROF_RFC2698 >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_METER_PROF_RFC2698 & 0xff} + }, + { + .description = "cbsm", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr1 = { + (BNXT_ULP_ACT_PROP_IDX_METER_PROF_CBSM >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_METER_PROF_CBSM & 0xff} + }, + { + .description = "ebsm", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr1 = { + (BNXT_ULP_ACT_PROP_IDX_METER_PROF_EBSM >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_METER_PROF_EBSM & 0xff} + }, + { + .description = "cbnd", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr1 = { + (BNXT_ULP_ACT_PROP_IDX_METER_PROF_CBND >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_METER_PROF_CBND & 0xff} + }, + { + .description = "ebnd", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr1 = { + (BNXT_ULP_ACT_PROP_IDX_METER_PROF_EBND >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_METER_PROF_EBND & 0xff} + }, + { + .description = "cbs", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr1 = { + (BNXT_ULP_ACT_PROP_IDX_METER_PROF_CBS >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_METER_PROF_CBS & 0xff} + }, + { + .description = "ebs", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr1 = { + (BNXT_ULP_ACT_PROP_IDX_METER_PROF_EBS >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_METER_PROF_EBS & 0xff} + }, + { + .description = "cir", + .field_bit_size = 17, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr1 = { + (BNXT_ULP_ACT_PROP_IDX_METER_PROF_CIR >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_METER_PROF_CIR & 0xff} + }, + { + .description = "eir", + .field_bit_size = 17, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr1 = { + (BNXT_ULP_ACT_PROP_IDX_METER_PROF_EIR >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_METER_PROF_EIR & 0xff} + }, + /* act_tid: 6, , table: meter_profile_tbl_cache.wr */ + { + .description = "rid", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_RID >> 8) & 0xff, + BNXT_ULP_RF_IDX_RID & 0xff} + }, + { + .description = "meter_profile_ptr", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_METER_PROFILE_PTR_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_METER_PROFILE_PTR_0 & 0xff} + }, + /* act_tid: 6, , table: meter_tbl.0 */ + { + .description = "bkt_c", + .field_bit_size = 27, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + (134217727 >> 24) & 0xff, + (134217727 >> 16) & 0xff, + (134217727 >> 8) & 0xff, + 134217727 & 0xff} + }, + { + .description = "bkt_e", + .field_bit_size = 27, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + (134217727 >> 24) & 0xff, + (134217727 >> 16) & 0xff, + (134217727 >> 8) & 0xff, + 134217727 & 0xff} + }, + { + .description = "mtr_val", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr1 = { + (BNXT_ULP_ACT_PROP_IDX_METER_INST_MTR_VAL >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_METER_INST_MTR_VAL & 0xff} + }, + { + .description = "ecn_rmp_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr1 = { + (BNXT_ULP_ACT_PROP_IDX_METER_INST_ECN_RMP_EN >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_METER_INST_ECN_RMP_EN & 0xff} + }, + { + .description = "meter_profile", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_METER_PROFILE_PTR_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_METER_PROFILE_PTR_0 & 0xff} + }, + /* act_tid: 6, , table: shared_meter_tbl_cache.wr */ + { + .description = "rid", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_RID >> 8) & 0xff, + BNXT_ULP_RF_IDX_RID & 0xff} + }, + { + .description = "meter_ptr", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_METER_PTR_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_METER_PTR_0 & 0xff} + }, + { + .description = "sw_meter_profile_id", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr1 = { + (BNXT_ULP_ACT_PROP_IDX_METER_PROF_ID >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_METER_PROF_ID & 0xff} + }, + /* act_tid: 6, , table: meter_tbl.update_wr */ + { + .description = "bkt_c", + .field_bit_size = 27, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + (134217727 >> 24) & 0xff, + (134217727 >> 16) & 0xff, + (134217727 >> 8) & 0xff, + 134217727 & 0xff} + }, + { + .description = "bkt_e", + .field_bit_size = 27, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + (134217727 >> 24) & 0xff, + (134217727 >> 16) & 0xff, + (134217727 >> 8) & 0xff, + 134217727 & 0xff} + }, + { + .description = "mtr_val", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr1 = { + (BNXT_ULP_ACT_PROP_IDX_METER_INST_MTR_VAL_UPDATE >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_METER_INST_MTR_VAL_UPDATE & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr2 = { + (BNXT_ULP_ACT_PROP_IDX_METER_INST_MTR_VAL >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_METER_INST_MTR_VAL & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_RF, + .field_opr3 = { + (BNXT_ULP_RF_IDX_RF_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_RF_0 & 0xff} + }, + { + .description = "ecn_rmp_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr1 = { + (BNXT_ULP_ACT_PROP_IDX_METER_INST_ECN_RMP_EN_UPDATE >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_METER_INST_ECN_RMP_EN_UPDATE & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr2 = { + (BNXT_ULP_ACT_PROP_IDX_METER_INST_ECN_RMP_EN >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_METER_INST_ECN_RMP_EN & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_RF, + .field_opr3 = { + (BNXT_ULP_RF_IDX_RF_1 >> 8) & 0xff, + BNXT_ULP_RF_IDX_RF_1 & 0xff} + }, + { + .description = "meter_profile", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_METER_PROFILE_PTR_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_METER_PROFILE_PTR_0 & 0xff} + }, + /* act_tid: 7, , table: jump_index_table.alloc */ + /* act_tid: 7, , table: flow_chain_cache.write */ + { + .description = "rid", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_RID >> 8) & 0xff, + BNXT_ULP_RF_IDX_RID & 0xff} + }, + { + .description = "metadata", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_JUMP_META >> 8) & 0xff, + BNXT_ULP_RF_IDX_JUMP_META & 0xff} + }, + /* act_tid: 7, , table: int_flow_counter_tbl.0 */ + { + .description = "count", + .field_bit_size = 64, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + /* act_tid: 7, , table: mirror_tbl.alloc */ + { + .description = "act_rec_ptr", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "reserved", + .field_bit_size = 13, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "ignore_drop", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "copy_ing_or_egr", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "enable", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + /* act_tid: 7, , table: mod_record.vf_2_vf */ + { + .description = "metadata_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + { + .description = "rem_ovlan", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "rem_ivlan", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "rep_add_ivlan", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "rep_add_ovlan", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "ttl_update", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tun_md_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "reserved_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_dmac_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_smac_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_sip_ipv6_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_dip_ipv6_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_sip_ipv4_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_dip_ipv4_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_sport_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_dport_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "metadata_data", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_PORT_TABLE, + .field_opr1 = { + (BNXT_ULP_CF_IDX_DEV_ACT_MIRR_PORT_ID >> 8) & 0xff, + BNXT_ULP_CF_IDX_DEV_ACT_MIRR_PORT_ID & 0xff, + (BNXT_ULP_PORT_TABLE_VF_FUNC_METADATA >> 8) & 0xff, + BNXT_ULP_PORT_TABLE_VF_FUNC_METADATA & 0xff} + }, + { + .description = "metadata_rsvd", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "metadata_op", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "metadata_prof", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + /* act_tid: 7, , table: int_flow_counter_tbl.mirr */ + { + .description = "count", + .field_bit_size = 64, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + /* act_tid: 7, , table: int_full_act_record.mirr */ + { + .description = "sp_rec_ptr", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "encap_ptr", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "mod_rec_ptr", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_MODIFY_PTR >> 8) & 0xff, + BNXT_ULP_RF_IDX_MODIFY_PTR & 0xff} + }, + { + .description = "rsvd1", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "rsvd0", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "decap_func", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "meter", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "stats_op", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + { + .description = "stats_ptr", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_FLOW_CNTR_PTR_1 >> 8) & 0xff, + BNXT_ULP_RF_IDX_FLOW_CNTR_PTR_1 & 0xff} + }, + { + .description = "vnic_or_vport", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + (ULP_THOR_SYM_LOOPBACK_PORT >> 8) & 0xff, + ULP_THOR_SYM_LOOPBACK_PORT & 0xff} + }, + { + .description = "use_default", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "mirror", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "cond_copy", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "vlan_del_rpt", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "drop", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "hit", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "type", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + /* act_tid: 7, , table: mirror_tbl.wr */ + { + .description = "act_rec_ptr", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_META_ACTION_PTR >> 8) & 0xff, + BNXT_ULP_RF_IDX_META_ACTION_PTR & 0xff} + }, + { + .description = "reserved", + .field_bit_size = 13, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "ignore_drop", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + { + .description = "copy_ing_or_egr", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_MIRROR_COPY_ING_OR_EGR >> 8) & 0xff, + BNXT_ULP_CF_IDX_MIRROR_COPY_ING_OR_EGR & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "enable", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + /* act_tid: 7, , table: shared_mirror_record.wr */ + { + .description = "rid", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_RID >> 8) & 0xff, + BNXT_ULP_RF_IDX_RID & 0xff} + }, + { + .description = "mirror_id", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_MIRROR_PTR_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_MIRROR_PTR_0 & 0xff} + }, + /* act_tid: 7, , table: mod_record.ing_ttl */ + { + .description = "metadata_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_OR, + .field_opr1 = { + (49 >> 8) & 0xff, + 49 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "rem_ovlan", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "rem_ivlan", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "rep_add_ivlan", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "rep_add_ovlan", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "ttl_update", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + { + .description = "tun_md_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "reserved_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_dmac_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_DST >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_DST >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_DST >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_DST >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_DST >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_DST >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_DST >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_DST & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_smac_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_SRC >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_SRC >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_SRC >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_SRC >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_SRC >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_SRC >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_SRC >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_SRC & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_sip_ipv6_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_SRC >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_SRC >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_SRC >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_SRC >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_SRC >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_SRC >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_SRC >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_SRC & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_dip_ipv6_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_DST >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_DST >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_DST >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_DST >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_DST >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_DST >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_DST >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_DST & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_sip_ipv4_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_SRC >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_SRC >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_SRC >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_SRC >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_SRC >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_SRC >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_SRC >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_SRC & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_dip_ipv4_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_DST >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_DST >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_DST >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_DST >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_DST >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_DST >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_DST >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_DST & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_sport_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_dport_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "metadata_data", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (51 >> 8) & 0xff, + 51 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_RF, + .field_opr2 = { + (BNXT_ULP_RF_IDX_JUMP_META >> 8) & 0xff, + BNXT_ULP_RF_IDX_JUMP_META & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (2 >> 8) & 0xff, + 2 & 0xff} + }, + { + .description = "metadata_rsvd", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_OR, + .field_opr1 = { + (53 >> 8) & 0xff, + 53 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ZERO, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "metadata_op", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_OR, + .field_opr1 = { + (55 >> 8) & 0xff, + 55 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ZERO, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "metadata_prof", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_OR, + .field_opr1 = { + (57 >> 8) & 0xff, + 57 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ZERO, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "alt_pfid", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "alt_vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "ttl_rsvd", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "ttl_tl3_dec", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_ACT_T_DEC_TTL >> 8) & 0xff, + BNXT_ULP_CF_IDX_ACT_T_DEC_TTL & 0xff} + }, + { + .description = "ttl_il3_dec", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_ACT_DEC_TTL >> 8) & 0xff, + BNXT_ULP_CF_IDX_ACT_DEC_TTL & 0xff} + }, + { + .description = "ttl_tl3_rdir", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "ttl_il3_rdir", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_dmac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_DST >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_DST >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_DST >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_DST >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_DST >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_DST >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_DST >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_DST & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr2 = { + (BNXT_ULP_ACT_PROP_IDX_SET_MAC_DST >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_SET_MAC_DST & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l2_smac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_SRC >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_SRC >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_SRC >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_SRC >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_SRC >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_SRC >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_SRC >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_SRC & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr2 = { + (BNXT_ULP_ACT_PROP_IDX_SET_MAC_SRC >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_SET_MAC_SRC & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l3_sip_ipv6", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_SRC >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_SRC >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_SRC >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_SRC >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_SRC >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_SRC >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_SRC >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_SRC & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr2 = { + (BNXT_ULP_ACT_PROP_IDX_SET_IPV6_SRC >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_SET_IPV6_SRC & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l3_dip_ipv6", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_DST >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_DST >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_DST >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_DST >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_DST >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_DST >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_DST >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_DST & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr2 = { + (BNXT_ULP_ACT_PROP_IDX_SET_IPV6_DST >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_SET_IPV6_DST & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l3_sip_ipv4", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_SRC >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_SRC >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_SRC >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_SRC >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_SRC >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_SRC >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_SRC >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_SRC & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr2 = { + (BNXT_ULP_ACT_PROP_IDX_SET_IPV4_SRC >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_SET_IPV4_SRC & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l3_dip_ipv4", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_DST >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_DST >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_DST >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_DST >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_DST >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_DST >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_DST >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_DST & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr2 = { + (BNXT_ULP_ACT_PROP_IDX_SET_IPV4_DST >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_SET_IPV4_DST & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l4_sport", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr2 = { + (BNXT_ULP_ACT_PROP_IDX_SET_TP_SRC >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_SET_TP_SRC & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l4_dport", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr2 = { + (BNXT_ULP_ACT_PROP_IDX_SET_TP_DST >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_SET_TP_DST & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + /* act_tid: 7, , table: mod_record.ing_no_ttl */ + { + .description = "metadata_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_OR, + .field_opr1 = { + (59 >> 8) & 0xff, + 59 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "rem_ovlan", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "rem_ivlan", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "rep_add_ivlan", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "rep_add_ovlan", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "ttl_update", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tun_md_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "reserved_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_dmac_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_DST >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_DST >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_DST >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_DST >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_DST >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_DST >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_DST >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_DST & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_smac_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_SRC >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_SRC >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_SRC >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_SRC >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_SRC >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_SRC >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_SRC >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_SRC & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_sip_ipv6_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_SRC >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_SRC >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_SRC >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_SRC >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_SRC >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_SRC >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_SRC >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_SRC & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_dip_ipv6_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_DST >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_DST >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_DST >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_DST >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_DST >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_DST >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_DST >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_DST & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_sip_ipv4_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_SRC >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_SRC >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_SRC >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_SRC >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_SRC >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_SRC >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_SRC >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_SRC & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_dip_ipv4_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_DST >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_DST >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_DST >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_DST >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_DST >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_DST >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_DST >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_DST & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_sport_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_dport_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "metadata_data", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (61 >> 8) & 0xff, + 61 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_RF, + .field_opr2 = { + (BNXT_ULP_RF_IDX_JUMP_META >> 8) & 0xff, + BNXT_ULP_RF_IDX_JUMP_META & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (3 >> 8) & 0xff, + 3 & 0xff} + }, + { + .description = "metadata_rsvd", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_OR, + .field_opr1 = { + (63 >> 8) & 0xff, + 63 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ZERO, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "metadata_op", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_OR, + .field_opr1 = { + (65 >> 8) & 0xff, + 65 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ZERO, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "metadata_prof", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_OR, + .field_opr1 = { + (67 >> 8) & 0xff, + 67 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ZERO, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l2_dmac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_DST >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_DST >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_DST >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_DST >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_DST >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_DST >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_DST >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_DST & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr2 = { + (BNXT_ULP_ACT_PROP_IDX_SET_MAC_DST >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_SET_MAC_DST & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l2_smac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_SRC >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_SRC >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_SRC >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_SRC >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_SRC >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_SRC >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_SRC >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_SET_MAC_SRC & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr2 = { + (BNXT_ULP_ACT_PROP_IDX_SET_MAC_SRC >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_SET_MAC_SRC & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l3_sip_ipv6", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_SRC >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_SRC >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_SRC >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_SRC >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_SRC >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_SRC >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_SRC >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_SRC & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr2 = { + (BNXT_ULP_ACT_PROP_IDX_SET_IPV6_SRC >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_SET_IPV6_SRC & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l3_dip_ipv6", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_DST >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_DST >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_DST >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_DST >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_DST >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_DST >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_DST >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_SET_IPV6_DST & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr2 = { + (BNXT_ULP_ACT_PROP_IDX_SET_IPV6_DST >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_SET_IPV6_DST & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l3_sip_ipv4", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_SRC >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_SRC >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_SRC >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_SRC >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_SRC >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_SRC >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_SRC >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_SRC & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr2 = { + (BNXT_ULP_ACT_PROP_IDX_SET_IPV4_SRC >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_SET_IPV4_SRC & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l3_dip_ipv4", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_DST >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_DST >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_DST >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_DST >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_DST >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_DST >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_DST >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_SET_IPV4_DST & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr2 = { + (BNXT_ULP_ACT_PROP_IDX_SET_IPV4_DST >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_SET_IPV4_DST & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l4_sport", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr2 = { + (BNXT_ULP_ACT_PROP_IDX_SET_TP_SRC >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_SET_TP_SRC & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l4_dport", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr2 = { + (BNXT_ULP_ACT_PROP_IDX_SET_TP_DST >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_SET_TP_DST & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + /* act_tid: 7, , table: sp_smac_ipv4.0 */ + { + .description = "smac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_ETH_SMAC >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_ETH_SMAC & 0xff} + }, + { + .description = "ipv4_src_addr", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_IPV4_SADDR >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_IPV4_SADDR & 0xff} + }, + { + .description = "reserved", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + /* act_tid: 7, , table: source_property_cache.wr */ + { + .description = "rid", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_RID >> 8) & 0xff, + BNXT_ULP_RF_IDX_RID & 0xff} + }, + { + .description = "sp_rec_ptr", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_MAIN_SP_PTR >> 8) & 0xff, + BNXT_ULP_RF_IDX_MAIN_SP_PTR & 0xff} + }, + /* act_tid: 7, , table: sp_smac_ipv6.0 */ + { + .description = "smac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_ETH_SMAC >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_ETH_SMAC & 0xff} + }, + { + .description = "ipv6_src_addr", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_IPV6_SADDR >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_IPV6_SADDR & 0xff} + }, + { + .description = "reserved", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + /* act_tid: 7, , table: source_property_ipv6_cache.wr */ + { + .description = "rid", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_RID >> 8) & 0xff, + BNXT_ULP_RF_IDX_RID & 0xff} + }, + { + .description = "sp_rec_ptr", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_MAIN_SP_PTR >> 8) & 0xff, + BNXT_ULP_RF_IDX_MAIN_SP_PTR & 0xff} + }, + /* act_tid: 7, , table: int_tun_encap_record.ipv4_vxlan */ + { + .description = "ecv_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + ULP_THOR_SYM_ECV_VALID_YES} + }, + { + .description = "ecv_custom_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "ecv_vtag_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr1 = { + (BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_TYPE >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_TYPE & 0xff} + }, + { + .description = "ecv_l2_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + ULP_THOR_SYM_ECV_L2_EN_YES} + }, + { + .description = "ecv_l3_type", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr1 = { + (BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE & 0xff} + }, + { + .description = "ecv_l4_type", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + ULP_THOR_SYM_ECV_L4_TYPE_UDP_CSUM} + }, + { + .description = "ecv_tun_type", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + ULP_THOR_SYM_ECV_TUN_TYPE_VXLAN} + }, + { + .description = "enc_eth_dmac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_ETH_DMAC >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_ETH_DMAC & 0xff} + }, + { + .description = "enc_o_vlan_tag", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_HDR_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 56) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 48) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 40) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 32) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 24) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 16) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 8) & 0xff, + (uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr2 = { + (BNXT_ULP_ENC_FIELD_O_VLAN_TCI >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_O_VLAN_TCI & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "enc_o_vlan_type", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_HDR_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 56) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 48) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 40) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 32) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 24) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 16) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 8) & 0xff, + (uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr2 = { + (BNXT_ULP_ENC_FIELD_O_VLAN_TYPE >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_O_VLAN_TYPE & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "enc_i_vlan_tag", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_HDR_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN >> 56) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN >> 48) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN >> 40) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN >> 32) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN >> 24) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN >> 16) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN >> 8) & 0xff, + (uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr2 = { + (BNXT_ULP_ENC_FIELD_I_VLAN_TCI >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_I_VLAN_TCI & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "enc_i_vlan_type", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_HDR_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN >> 56) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN >> 48) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN >> 40) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN >> 32) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN >> 24) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN >> 16) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN >> 8) & 0xff, + (uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr2 = { + (BNXT_ULP_ENC_FIELD_I_VLAN_TYPE >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_I_VLAN_TYPE & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "enc_ipv4_ihl", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_IPV4_IHL >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_IPV4_IHL & 0xff} + }, + { + .description = "enc_ipv4_tos", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_IPV4_TOS >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_IPV4_TOS & 0xff} + }, + { + .description = "enc_ipv4_pkt_id", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_IPV4_PKT_ID >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_IPV4_PKT_ID & 0xff} + }, + { + .description = "enc_ipv4_frag", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_IPV4_FRAG >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_IPV4_FRAG & 0xff} + }, + { + .description = "enc_ipv4_ttl", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_IPV4_TTL >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_IPV4_TTL & 0xff} + }, + { + .description = "enc_ipv4_proto", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_IPV4_PROTO >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_IPV4_PROTO & 0xff} + }, + { + .description = "enc_ipv4_daddr", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_IPV4_DADDR >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_IPV4_DADDR & 0xff} + }, + { + .description = "enc_udp_sport", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_UDP_SPORT >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_UDP_SPORT & 0xff} + }, + { + .description = "enc_udp_dport", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_UDP_DPORT >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_UDP_DPORT & 0xff} + }, + { + .description = "enc_vxlan_flags", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_VXLAN_FLAGS >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_VXLAN_FLAGS & 0xff} + }, + { + .description = "enc_vxlan_rsvd0", + .field_bit_size = 24, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_VXLAN_RSVD0 >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_VXLAN_RSVD0 & 0xff} + }, + { + .description = "enc_vxlan_vni", + .field_bit_size = 24, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_VXLAN_VNI >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_VXLAN_VNI & 0xff} + }, + { + .description = "enc_vxlan_rsvd1", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_VXLAN_RSVD1 >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_VXLAN_RSVD1 & 0xff} + }, + /* act_tid: 7, , table: vxlan_encap_rec_cache.wr */ + { + .description = "rid", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_RID >> 8) & 0xff, + BNXT_ULP_RF_IDX_RID & 0xff} + }, + { + .description = "enc_rec_ptr", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_ENCAP_PTR_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_ENCAP_PTR_0 & 0xff} + }, + /* act_tid: 7, , table: int_tun_encap_record.ipv6_vxlan */ + { + .description = "ecv_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + ULP_THOR_SYM_ECV_VALID_YES} + }, + { + .description = "ecv_custom_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "ecv_vtag_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr1 = { + (BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_TYPE >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_TYPE & 0xff} + }, + { + .description = "ecv_l2_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + ULP_THOR_SYM_ECV_L2_EN_YES} + }, + { + .description = "ecv_l3_type", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr1 = { + (BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE & 0xff} + }, + { + .description = "ecv_l4_type", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + ULP_THOR_SYM_ECV_L4_TYPE_UDP_CSUM} + }, + { + .description = "ecv_tun_type", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + ULP_THOR_SYM_ECV_TUN_TYPE_VXLAN} + }, + { + .description = "enc_eth_dmac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_ETH_DMAC >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_ETH_DMAC & 0xff} + }, + { + .description = "enc_o_vlan_tag", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_HDR_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 56) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 48) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 40) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 32) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 24) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 16) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 8) & 0xff, + (uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr2 = { + (BNXT_ULP_ENC_FIELD_O_VLAN_TCI >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_O_VLAN_TCI & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "enc_o_vlan_type", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_HDR_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 56) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 48) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 40) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 32) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 24) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 16) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 8) & 0xff, + (uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr2 = { + (BNXT_ULP_ENC_FIELD_O_VLAN_TYPE >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_O_VLAN_TYPE & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "enc_i_vlan_tag", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_HDR_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN >> 56) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN >> 48) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN >> 40) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN >> 32) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN >> 24) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN >> 16) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN >> 8) & 0xff, + (uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr2 = { + (BNXT_ULP_ENC_FIELD_I_VLAN_TCI >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_I_VLAN_TCI & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "enc_i_vlan_type", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_HDR_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN >> 56) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN >> 48) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN >> 40) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN >> 32) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN >> 24) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN >> 16) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN >> 8) & 0xff, + (uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr2 = { + (BNXT_ULP_ENC_FIELD_I_VLAN_TYPE >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_I_VLAN_TYPE & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "enc_ipv6_vtc", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_IPV6_VTC_FLOW >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_IPV6_VTC_FLOW & 0xff} + }, + { + .description = "enc_ipv6_zero", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "enc_ipv6_proto", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_IPV6_PROTO >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_IPV6_PROTO & 0xff} + }, + { + .description = "enc_ipv6_ttl", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_IPV6_TTL >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_IPV6_TTL & 0xff} + }, + { + .description = "enc_ipv6_daddr", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_IPV6_DADDR >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_IPV6_DADDR & 0xff} + }, + { + .description = "enc_udp_sport", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_UDP_SPORT >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_UDP_SPORT & 0xff} + }, + { + .description = "enc_udp_dport", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_UDP_DPORT >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_UDP_DPORT & 0xff} + }, + { + .description = "enc_vxlan_flags", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_VXLAN_FLAGS >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_VXLAN_FLAGS & 0xff} + }, + { + .description = "enc_vxlan_rsvd0", + .field_bit_size = 24, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_VXLAN_RSVD0 >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_VXLAN_RSVD0 & 0xff} + }, + { + .description = "enc_vxlan_vni", + .field_bit_size = 24, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_VXLAN_VNI >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_VXLAN_VNI & 0xff} + }, + { + .description = "enc_vxlan_rsvd1", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_VXLAN_RSVD1 >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_VXLAN_RSVD1 & 0xff} + }, + /* act_tid: 7, , table: vxlan_encap_ipv6_rec_cache.wr */ + { + .description = "rid", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_RID >> 8) & 0xff, + BNXT_ULP_RF_IDX_RID & 0xff} + }, + { + .description = "enc_rec_ptr", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_ENCAP_PTR_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_ENCAP_PTR_0 & 0xff} + }, + /* act_tid: 7, , table: int_geneve_encap_record.ipv4_geneve */ + { + .description = "ecv_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + ULP_THOR_SYM_ECV_VALID_YES} + }, + { + .description = "ecv_custom_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "ecv_vtag_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr1 = { + (BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_TYPE >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_TYPE & 0xff} + }, + { + .description = "ecv_l2_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + ULP_THOR_SYM_ECV_L2_EN_YES} + }, + { + .description = "ecv_l3_type", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr1 = { + (BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE & 0xff} + }, + { + .description = "ecv_l4_type", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + ULP_THOR_SYM_ECV_L4_TYPE_UDP_CSUM} + }, + { + .description = "ecv_tun_type", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + ULP_THOR_SYM_ECV_TUN_TYPE_NGE} + }, + { + .description = "enc_eth_dmac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_ETH_DMAC >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_ETH_DMAC & 0xff} + }, + { + .description = "enc_o_vlan_tag", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_HDR_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 56) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 48) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 40) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 32) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 24) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 16) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 8) & 0xff, + (uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr2 = { + (BNXT_ULP_ENC_FIELD_O_VLAN_TCI >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_O_VLAN_TCI & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "enc_o_vlan_type", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_HDR_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 56) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 48) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 40) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 32) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 24) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 16) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 8) & 0xff, + (uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr2 = { + (BNXT_ULP_ENC_FIELD_O_VLAN_TYPE >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_O_VLAN_TYPE & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "enc_i_vlan_tag", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_HDR_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN >> 56) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN >> 48) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN >> 40) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN >> 32) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN >> 24) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN >> 16) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN >> 8) & 0xff, + (uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr2 = { + (BNXT_ULP_ENC_FIELD_I_VLAN_TCI >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_I_VLAN_TCI & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "enc_i_vlan_type", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_HDR_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN >> 56) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN >> 48) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN >> 40) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN >> 32) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN >> 24) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN >> 16) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN >> 8) & 0xff, + (uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr2 = { + (BNXT_ULP_ENC_FIELD_I_VLAN_TYPE >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_I_VLAN_TYPE & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "enc_ipv4_ihl", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_IPV4_IHL >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_IPV4_IHL & 0xff} + }, + { + .description = "enc_ipv4_tos", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_IPV4_TOS >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_IPV4_TOS & 0xff} + }, + { + .description = "enc_ipv4_pkt_id", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_IPV4_PKT_ID >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_IPV4_PKT_ID & 0xff} + }, + { + .description = "enc_ipv4_frag", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_IPV4_FRAG >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_IPV4_FRAG & 0xff} + }, + { + .description = "enc_ipv4_ttl", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_IPV4_TTL >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_IPV4_TTL & 0xff} + }, + { + .description = "enc_ipv4_proto", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_IPV4_PROTO >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_IPV4_PROTO & 0xff} + }, + { + .description = "enc_ipv4_daddr", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_IPV4_DADDR >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_IPV4_DADDR & 0xff} + }, + { + .description = "enc_udp_sport", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_UDP_SPORT >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_UDP_SPORT & 0xff} + }, + { + .description = "enc_udp_dport", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_UDP_DPORT >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_UDP_DPORT & 0xff} + }, + { + .description = "enc_geneve_ver_opt_len_o_c_rsvd0", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_GENEVE_VER_OPT_LEN_O_C_RSVD0 >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_GENEVE_VER_OPT_LEN_O_C_RSVD0 & 0xff} + }, + { + .description = "enc_geneve_proto_type", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_GENEVE_PROTO_TYPE >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_GENEVE_PROTO_TYPE & 0xff} + }, + { + .description = "enc_geneve_vni", + .field_bit_size = 24, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_GENEVE_VNI >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_GENEVE_VNI & 0xff} + }, + { + .description = "enc_geneve_rsvd1", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_GENEVE_RSVD1 >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_GENEVE_RSVD1 & 0xff} + }, + { + .description = "enc_geneve_opt_w0", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_GENEVE_OPT_W0 >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_GENEVE_OPT_W0 & 0xff} + }, + { + .description = "enc_geneve_opt_w1", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_GENEVE_OPT_W1 >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_GENEVE_OPT_W1 & 0xff} + }, + { + .description = "enc_geneve_opt_w2", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_GENEVE_OPT_W2 >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_GENEVE_OPT_W2 & 0xff} + }, + { + .description = "enc_geneve_opt_w3", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_GENEVE_OPT_W3 >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_GENEVE_OPT_W3 & 0xff} + }, + { + .description = "enc_geneve_opt_w4", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_GENEVE_OPT_W4 >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_GENEVE_OPT_W4 & 0xff} + }, + { + .description = "enc_geneve_opt_w5", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_GENEVE_OPT_W5 >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_GENEVE_OPT_W5 & 0xff} + }, + /* act_tid: 7, , table: int_geneve_encap_record.ipv6_geneve */ + { + .description = "ecv_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + ULP_THOR_SYM_ECV_VALID_YES} + }, + { + .description = "ecv_custom_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "ecv_vtag_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr1 = { + (BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_TYPE >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_TYPE & 0xff} + }, + { + .description = "ecv_l2_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + ULP_THOR_SYM_ECV_L2_EN_YES} + }, + { + .description = "ecv_l3_type", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr1 = { + (BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE & 0xff} + }, + { + .description = "ecv_l4_type", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + ULP_THOR_SYM_ECV_L4_TYPE_UDP_CSUM} + }, + { + .description = "ecv_tun_type", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + ULP_THOR_SYM_ECV_TUN_TYPE_NGE} + }, + { + .description = "enc_eth_dmac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_ETH_DMAC >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_ETH_DMAC & 0xff} + }, + { + .description = "enc_o_vlan_tag", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_HDR_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 56) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 48) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 40) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 32) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 24) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 16) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 8) & 0xff, + (uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr2 = { + (BNXT_ULP_ENC_FIELD_O_VLAN_TCI >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_O_VLAN_TCI & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "enc_o_vlan_type", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_HDR_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 56) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 48) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 40) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 32) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 24) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 16) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 8) & 0xff, + (uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr2 = { + (BNXT_ULP_ENC_FIELD_O_VLAN_TYPE >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_O_VLAN_TYPE & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "enc_i_vlan_tag", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_HDR_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN >> 56) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN >> 48) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN >> 40) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN >> 32) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN >> 24) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN >> 16) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN >> 8) & 0xff, + (uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr2 = { + (BNXT_ULP_ENC_FIELD_I_VLAN_TCI >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_I_VLAN_TCI & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "enc_i_vlan_type", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_HDR_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN >> 56) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN >> 48) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN >> 40) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN >> 32) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN >> 24) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN >> 16) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN >> 8) & 0xff, + (uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr2 = { + (BNXT_ULP_ENC_FIELD_I_VLAN_TYPE >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_I_VLAN_TYPE & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "enc_ipv6_vtc", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_IPV6_VTC_FLOW >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_IPV6_VTC_FLOW & 0xff} + }, + { + .description = "enc_ipv6_zero", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "enc_ipv6_proto", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_IPV6_PROTO >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_IPV6_PROTO & 0xff} + }, + { + .description = "enc_ipv6_ttl", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_IPV6_TTL >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_IPV6_TTL & 0xff} + }, + { + .description = "enc_ipv6_daddr", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_IPV6_DADDR >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_IPV6_DADDR & 0xff} + }, + { + .description = "enc_udp_sport", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_UDP_SPORT >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_UDP_SPORT & 0xff} + }, + { + .description = "enc_udp_dport", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_UDP_DPORT >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_UDP_DPORT & 0xff} + }, + { + .description = "enc_geneve_ver_opt_len_o_c_rsvd0", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_GENEVE_VER_OPT_LEN_O_C_RSVD0 >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_GENEVE_VER_OPT_LEN_O_C_RSVD0 & 0xff} + }, + { + .description = "enc_geneve_proto_type", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_GENEVE_PROTO_TYPE >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_GENEVE_PROTO_TYPE & 0xff} + }, + { + .description = "enc_geneve_vni", + .field_bit_size = 24, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_GENEVE_VNI >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_GENEVE_VNI & 0xff} + }, + { + .description = "enc_geneve_rsvd1", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_GENEVE_RSVD1 >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_GENEVE_RSVD1 & 0xff} + }, + { + .description = "enc_geneve_opt_w0", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_GENEVE_OPT_W0 >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_GENEVE_OPT_W0 & 0xff} + }, + { + .description = "enc_geneve_opt_w1", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_GENEVE_OPT_W1 >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_GENEVE_OPT_W1 & 0xff} + }, + { + .description = "enc_geneve_opt_w2", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_GENEVE_OPT_W2 >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_GENEVE_OPT_W2 & 0xff} + }, + { + .description = "enc_geneve_opt_w3", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_GENEVE_OPT_W3 >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_GENEVE_OPT_W3 & 0xff} + }, + { + .description = "enc_geneve_opt_w4", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_GENEVE_OPT_W4 >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_GENEVE_OPT_W4 & 0xff} + }, + { + .description = "enc_geneve_opt_w5", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_GENEVE_OPT_W5 >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_GENEVE_OPT_W5 & 0xff} + }, + /* act_tid: 7, , table: geneve_encap_rec_cache.wr */ + { + .description = "rid", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_RID >> 8) & 0xff, + BNXT_ULP_RF_IDX_RID & 0xff} + }, + { + .description = "enc_rec_ptr", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_ENCAP_PTR_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_ENCAP_PTR_0 & 0xff} + }, + /* act_tid: 7, , table: int_vtag_encap_record.0 */ + { + .description = "ecv_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + { + .description = "ecv_custom_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "ecv_vtag_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + ULP_THOR_SYM_ECV_VTAG_TYPE_ADD_1_ENCAP_PRI} + }, + { + .description = "ecv_l2_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "ecv_l3_type", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "ecv_l4_type", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "ecv_tun_type", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "vtag_tpid", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr1 = { + (BNXT_ULP_ACT_PROP_IDX_PUSH_VLAN >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_PUSH_VLAN & 0xff} + }, + { + .description = "vtag_pcp", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr1 = { + (BNXT_ULP_ACT_PROP_IDX_SET_VLAN_PCP >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_SET_VLAN_PCP & 0xff} + }, + { + .description = "vtag_de", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "vtag_vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr1 = { + (BNXT_ULP_ACT_PROP_IDX_SET_VLAN_VID >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_SET_VLAN_VID & 0xff} + }, + /* act_tid: 7, , table: int_full_act_record.0 */ + { + .description = "sp_rec_ptr", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_MAIN_SP_PTR >> 8) & 0xff, + BNXT_ULP_RF_IDX_MAIN_SP_PTR & 0xff} + }, + { + .description = "encap_ptr", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_ENCAP_PTR_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_ENCAP_PTR_0 & 0xff} + }, + { + .description = "mod_rec_ptr", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_MODIFY_PTR >> 8) & 0xff, + BNXT_ULP_RF_IDX_MODIFY_PTR & 0xff} + }, + { + .description = "rsvd1", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "rsvd0", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "decap_func", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "meter", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "stats_op", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "stats_ptr", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_FLOW_CNTR_PTR_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_FLOW_CNTR_PTR_0 & 0xff} + }, + { + .description = "vnic_or_vport", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (86 >> 8) & 0xff, + 86 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + (ULP_THOR_SYM_RECYCLE_DST >> 8) & 0xff, + ULP_THOR_SYM_RECYCLE_DST & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (4 >> 8) & 0xff, + 4 & 0xff} + }, + { + .description = "use_default", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "mirror", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_SHARED_SAMPLE >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SHARED_SAMPLE >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SHARED_SAMPLE >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SHARED_SAMPLE >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SHARED_SAMPLE >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SHARED_SAMPLE >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SHARED_SAMPLE >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_SHARED_SAMPLE & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_RF, + .field_opr2 = { + (BNXT_ULP_RF_IDX_MIRROR_PTR_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_MIRROR_PTR_0 & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "cond_copy", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "vlan_del_rpt", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "drop", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_DROP >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_DROP >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_DROP >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_DROP >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_DROP >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_DROP >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_DROP >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_DROP & 0xff} + }, + { + .description = "hit", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "type", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + /* act_tid: 7, , table: int_compact_act_record.0 */ + { + .description = "rsvd0", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "decap_func", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "meter", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "stats_op", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "stats_ptr", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_FLOW_CNTR_PTR_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_FLOW_CNTR_PTR_0 & 0xff} + }, + { + .description = "vnic_or_vport", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr1 = { + (BNXT_ULP_ACT_PROP_IDX_VPORT >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_VPORT & 0xff} + }, + { + .description = "use_default", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "mirror", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_SHARED_SAMPLE >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SHARED_SAMPLE >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SHARED_SAMPLE >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SHARED_SAMPLE >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SHARED_SAMPLE >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SHARED_SAMPLE >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SHARED_SAMPLE >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_SHARED_SAMPLE & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_RF, + .field_opr2 = { + (BNXT_ULP_RF_IDX_MIRROR_PTR_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_MIRROR_PTR_0 & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "cond_copy", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "vlan_del_rpt", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "drop", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_DROP >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_DROP >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_DROP >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_DROP >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_DROP >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_DROP >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_DROP >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_DROP & 0xff} + }, + { + .description = "hit", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "type", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + /* act_tid: 12, , table: mirror_tbl.alloc */ + { + .description = "act_rec_ptr", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "reserved", + .field_bit_size = 13, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "ignore_drop", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "copy_ing_or_egr", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "enable", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + /* act_tid: 12, , table: int_flow_counter_tbl.0 */ + { + .description = "count", + .field_bit_size = 64, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + /* act_tid: 12, , table: mod_record.vf_2_vf */ + { + .description = "metadata_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + { + .description = "rem_ovlan", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "rem_ivlan", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "rep_add_ivlan", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "rep_add_ovlan", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "ttl_update", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tun_md_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "reserved_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_dmac_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_smac_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_sip_ipv6_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_dip_ipv6_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_sip_ipv4_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_dip_ipv4_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_sport_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_dport_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "metadata_data", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_PORT_TABLE, + .field_opr1 = { + (BNXT_ULP_CF_IDX_DEV_ACT_PORT_ID >> 8) & 0xff, + BNXT_ULP_CF_IDX_DEV_ACT_PORT_ID & 0xff, + (BNXT_ULP_PORT_TABLE_VF_FUNC_METADATA >> 8) & 0xff, + BNXT_ULP_PORT_TABLE_VF_FUNC_METADATA & 0xff} + }, + { + .description = "metadata_rsvd", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "metadata_op", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "metadata_prof", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + /* act_tid: 12, , table: int_full_act_record.0 */ + { + .description = "sp_rec_ptr", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "encap_ptr", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "mod_rec_ptr", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_MODIFY_PTR >> 8) & 0xff, + BNXT_ULP_RF_IDX_MODIFY_PTR & 0xff} + }, + { + .description = "rsvd1", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "rsvd0", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "decap_func", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "meter", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "stats_op", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + { + .description = "stats_ptr", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_FLOW_CNTR_PTR_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_FLOW_CNTR_PTR_0 & 0xff} + }, + { + .description = "vnic_or_vport", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + (ULP_THOR_SYM_LOOPBACK_PORT >> 8) & 0xff, + ULP_THOR_SYM_LOOPBACK_PORT & 0xff} + }, + { + .description = "use_default", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "mirror", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "cond_copy", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "vlan_del_rpt", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "drop", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "hit", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "type", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + /* act_tid: 12, , table: mirror_tbl.wr */ + { + .description = "act_rec_ptr", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_MAIN_ACTION_PTR >> 8) & 0xff, + BNXT_ULP_RF_IDX_MAIN_ACTION_PTR & 0xff} + }, + { + .description = "reserved", + .field_bit_size = 13, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "ignore_drop", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + { + .description = "copy_ing_or_egr", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "enable", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + /* act_tid: 12, , table: shared_mirror_record.wr */ + { + .description = "rid", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_RID >> 8) & 0xff, + BNXT_ULP_RF_IDX_RID & 0xff} + }, + { + .description = "mirror_id", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_MIRROR_PTR_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_MIRROR_PTR_0 & 0xff} + } +}; + +struct bnxt_ulp_mapper_ident_info ulp_thor_act_ident_list[] = { + /* act_tid: 1, , table: flow_chain_cache.rd */ + { + .description = "metadata", + .regfile_idx = BNXT_ULP_RF_IDX_JUMP_META, + .ident_bit_size = 16, + .ident_bit_pos = 32 + }, + /* act_tid: 1, , table: shared_meter_tbl_cache.rd */ + { + .description = "meter_ptr", + .regfile_idx = BNXT_ULP_RF_IDX_METER_PTR_0, + .ident_bit_size = 10, + .ident_bit_pos = 32 + }, + /* act_tid: 1, , table: shared_mirror_record.rd */ + { + .description = "mirror_id", + .regfile_idx = BNXT_ULP_RF_IDX_MIRROR_PTR_0, + .ident_bit_size = 4, + .ident_bit_pos = 32 + }, + /* act_tid: 3, , table: shared_mirror_record.del_chk */ + { + .description = "rid", + .regfile_idx = BNXT_ULP_RF_IDX_RID, + .ident_bit_size = 32, + .ident_bit_pos = 0 + }, + /* act_tid: 6, , table: meter_profile_tbl_cache.rd2 */ + { + .description = "meter_profile_ptr", + .regfile_idx = BNXT_ULP_RF_IDX_METER_PROFILE_PTR_0, + .ident_bit_size = 10, + .ident_bit_pos = 32 + }, + /* act_tid: 6, , table: meter_profile_tbl_cache.del_chk */ + { + .description = "rid", + .regfile_idx = BNXT_ULP_RF_IDX_RID, + .ident_bit_size = 32, + .ident_bit_pos = 0 + }, + /* act_tid: 6, , table: shared_meter_tbl_cache.del_chk */ + { + .description = "rid", + .regfile_idx = BNXT_ULP_RF_IDX_RID, + .ident_bit_size = 32, + .ident_bit_pos = 0 + }, + /* act_tid: 6, , table: shared_meter_tbl_cache.rd_update */ + { + .description = "meter_ptr", + .regfile_idx = BNXT_ULP_RF_IDX_METER_PTR_0, + .ident_bit_size = 10, + .ident_bit_pos = 32 + }, + /* act_tid: 6, , table: meter_tbl.update_rd */ + { + .description = "ecn_rmp_en", + .regfile_idx = BNXT_ULP_RF_IDX_RF_1, + .ident_bit_size = 1, + .ident_bit_pos = 55 + }, + { + .description = "meter_profile", + .regfile_idx = BNXT_ULP_RF_IDX_METER_PROFILE_PTR_0, + .ident_bit_size = 8, + .ident_bit_pos = 56 + }, + { + .description = "mtr_val", + .regfile_idx = BNXT_ULP_RF_IDX_RF_0, + .ident_bit_size = 1, + .ident_bit_pos = 54 + }, + /* act_tid: 7, , table: flow_chain_cache.rd */ + { + .description = "metadata", + .regfile_idx = BNXT_ULP_RF_IDX_JUMP_META, + .ident_bit_size = 16, + .ident_bit_pos = 32 + }, + /* act_tid: 7, , table: shared_mirror_record.rd */ + { + .description = "mirror_id", + .regfile_idx = BNXT_ULP_RF_IDX_MIRROR_ID_0, + .ident_bit_size = 4, + .ident_bit_pos = 32 + }, + /* act_tid: 7, , table: source_property_cache.rd */ + { + .description = "sp_rec_ptr", + .regfile_idx = BNXT_ULP_RF_IDX_MAIN_SP_PTR, + .ident_bit_size = 16, + .ident_bit_pos = 32 + }, + /* act_tid: 7, , table: source_property_ipv6_cache.rd */ + { + .description = "sp_rec_ptr", + .regfile_idx = BNXT_ULP_RF_IDX_MAIN_SP_PTR, + .ident_bit_size = 16, + .ident_bit_pos = 32 + }, + /* act_tid: 7, , table: vxlan_encap_rec_cache.rd */ + { + .description = "enc_rec_ptr", + .regfile_idx = BNXT_ULP_RF_IDX_ENCAP_PTR_0, + .ident_bit_size = 16, + .ident_bit_pos = 32 + }, + /* act_tid: 7, , table: vxlan_encap_ipv6_rec_cache.rd */ + { + .description = "enc_rec_ptr", + .regfile_idx = BNXT_ULP_RF_IDX_ENCAP_PTR_0, + .ident_bit_size = 16, + .ident_bit_pos = 32 + }, + /* act_tid: 7, , table: geneve_encap_rec_cache.rd */ + { + .description = "enc_rec_ptr", + .regfile_idx = BNXT_ULP_RF_IDX_ENCAP_PTR_0, + .ident_bit_size = 32, + .ident_bit_pos = 32 + }, + /* act_tid: 12, , table: shared_mirror_record.del_chk */ + { + .description = "rid", + .regfile_idx = BNXT_ULP_RF_IDX_RID, + .ident_bit_size = 32, + .ident_bit_pos = 0 + } +}; diff --git a/drivers/thirdparty/release-drivers/bnxt/tf_ulp/generic_templates/ulp_template_db_thor_class.c b/drivers/thirdparty/release-drivers/bnxt/tf_ulp/generic_templates/ulp_template_db_thor_class.c new file mode 100644 index 000000000000..e0cbbafe870c --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/tf_ulp/generic_templates/ulp_template_db_thor_class.c @@ -0,0 +1,50716 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* Copyright(c) 2014-2024 Broadcom + * All rights reserved. + */ + +#include "ulp_template_db_enum.h" +#include "ulp_template_db_field.h" +#include "ulp_template_struct.h" +#include "ulp_template_db_tbl.h" + +/* Mapper templates for header class list */ +struct bnxt_ulp_mapper_tmpl_info ulp_thor_class_tmpl_list[] = { + /* class_tid: 1, ingress */ + [1] = { + .device_name = BNXT_ULP_DEVICE_ID_THOR, + .num_tbls = 51, + .start_tbl_idx = 0, + .reject_info = { + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 0, + .cond_nums = 2 } + }, + /* class_tid: 2, egress */ + [2] = { + .device_name = BNXT_ULP_DEVICE_ID_THOR, + .num_tbls = 33, + .start_tbl_idx = 51, + .reject_info = { + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 1587, + .cond_nums = 2 } + }, + /* class_tid: 3, ingress */ + [3] = { + .device_name = BNXT_ULP_DEVICE_ID_THOR, + .num_tbls = 23, + .start_tbl_idx = 84, + .reject_info = { + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_FALSE, + .cond_start_idx = 3149, + .cond_nums = 0 } + }, + /* class_tid: 4, egress */ + [4] = { + .device_name = BNXT_ULP_DEVICE_ID_THOR, + .num_tbls = 35, + .start_tbl_idx = 107, + .reject_info = { + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_FALSE, + .cond_start_idx = 3153, + .cond_nums = 0 } + } +}; + +struct bnxt_ulp_mapper_tbl_info ulp_thor_class_tbl_list[] = { + { /* class_tid: 1, , table: port_table.rd */ + .description = "port_table.rd", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_PORT_TABLE, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 2, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_READ, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_INDEX, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP, + .key_start_idx = 0, + .blob_key_bit_size = 10, + .key_bit_size = 10, + .key_num_fields = 1, + .ident_start_idx = 0, + .ident_nums = 3 + }, + { /* class_tid: 1, , table: l2_cntxt_tcam_cache.rd */ + .description = "l2_cntxt_tcam_cache.rd", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_L2_CNTXT_TCAM, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 2, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_READ, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_INDEX, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .key_start_idx = 1, + .blob_key_bit_size = 11, + .key_bit_size = 11, + .key_num_fields = 1, + .ident_start_idx = 3, + .ident_nums = 3 + }, + { /* class_tid: 1, , table: control.check_f1_f2_flow */ + .description = "control.check_f1_f2_flow", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 6, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_OR, + .cond_start_idx = 2, + .cond_nums = 2 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP + }, + { /* class_tid: 1, , table: tunnel_cache.f1_f2_rd */ + .description = "tunnel_cache.f1_f2_rd", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_TUNNEL_CACHE, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 4, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_READ, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_HASH, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .key_start_idx = 2, + .blob_key_bit_size = 19, + .key_bit_size = 19, + .key_num_fields = 2, + .ident_start_idx = 6, + .ident_nums = 1 + }, + { /* class_tid: 1, , table: control.tunnel_cache_check */ + .description = "control.tunnel_cache_check", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 3, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 4, + .cond_nums = 1 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_ALLOC_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID + }, + { /* class_tid: 1, , table: l2_cntxt_tcam.f1_f2_alloc_l2_cntxt */ + .description = "l2_cntxt_tcam.f1_f2_alloc_l2_cntxt", + .resource_func = BNXT_ULP_RESOURCE_FUNC_TCAM_TABLE, + .resource_type = TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_HIGH, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 5, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_TCAM_TBL_OPC_ALLOC_IDENT, + .tbl_operand = BNXT_ULP_RF_IDX_L2_CNTXT_TCAM_INDEX_0, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID, + .pri_opcode = BNXT_ULP_PRI_OPC_CONST, + .pri_operand = 0, + .track_type = CFA_TRACK_TYPE_SID, + .ident_start_idx = 7, + .ident_nums = 1 + }, + { /* class_tid: 1, , table: tunnel_cache.f1_f2_wr */ + .description = "tunnel_cache.f1_f2_wr", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_TUNNEL_CACHE, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 5, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_WRITE, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_HASH, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .key_start_idx = 4, + .blob_key_bit_size = 19, + .key_bit_size = 19, + .key_num_fields = 2, + .result_start_idx = 0, + .result_bit_size = 52, + .result_num_fields = 3 + }, + { /* class_tid: 1, , table: control.check_f2_flow */ + .description = "control.check_f2_flow", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 18, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 5, + .cond_nums = 1 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP + }, + { /* class_tid: 1, , table: control.dmac_calculation */ + .description = "control.dmac_calculation", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 6, + .cond_nums = 0 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP, + .func_info = { + .func_opc = BNXT_ULP_FUNC_OPC_COND_LIST, + .func_oper_size = 48, + .func_src1 = BNXT_ULP_FUNC_SRC_KEY_EXT_LIST, + .func_opr1 = 0, + .func_dst_opr = BNXT_ULP_RF_IDX_O_DMAC } + }, + { /* class_tid: 1, , table: control.group_id_check */ + .description = "control.group_id_check", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 10, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 10, + .cond_nums = 1 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP + }, + { /* class_tid: 1, , table: flow_chain_cache.group_check */ + .description = "flow_chain_cache.group_check", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_FLOW_CHAIN_CACHE, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 11, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_READ, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_HASH, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .key_start_idx = 6, + .blob_key_bit_size = 32, + .key_bit_size = 32, + .key_num_fields = 1, + .ident_start_idx = 8, + .ident_nums = 1 + }, + { /* class_tid: 1, , table: control.flow_chain_group_id */ + .description = "control.flow_chain_group_id", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 4, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 11, + .cond_nums = 1 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_ALLOC_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID + }, + { /* class_tid: 1, , table: jump_index_table.alloc */ + .description = "jump_index_table.alloc", + .resource_func = BNXT_ULP_RESOURCE_FUNC_ALLOCATOR_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_ALLOCATOR_TABLE_JUMP_INDEX, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 12, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_ALLOC_TBL_OPC_ALLOC, + .tbl_operand = BNXT_ULP_RF_IDX_JUMP_META_IDX, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID, + .result_start_idx = 3, + .result_bit_size = 0, + .result_num_fields = 0 + }, + { /* class_tid: 1, , table: control.metadata_cal */ + .description = "control.metadata_cal", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 12, + .cond_nums = 0 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP, + .func_info = { + .func_opc = BNXT_ULP_FUNC_OPC_BIT_OR, + .func_oper_size = 16, + .func_src1 = BNXT_ULP_FUNC_SRC_REGFILE, + .func_opr1 = BNXT_ULP_RF_IDX_JUMP_META_IDX, + .func_src2 = BNXT_ULP_FUNC_SRC_CONST, + .func_opr2 = ULP_THOR_SYM_CHAIN_META_VAL, + .func_dst_opr = BNXT_ULP_RF_IDX_JUMP_META } + }, + { /* class_tid: 1, , table: flow_chain_cache.write */ + .description = "flow_chain_cache.write", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_FLOW_CHAIN_CACHE, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 12, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_WRITE, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_HASH, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .key_start_idx = 7, + .blob_key_bit_size = 32, + .key_bit_size = 32, + .key_num_fields = 1, + .result_start_idx = 3, + .result_bit_size = 48, + .result_num_fields = 2 + }, + { /* class_tid: 1, , table: flow_chain_l2_cntxt.group_check */ + .description = "flow_chain_l2_cntxt.group_check", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_FLOW_CHAIN_L2_CNTXT, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 4, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 12, + .cond_nums = 1 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_READ, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_HASH, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .key_start_idx = 8, + .blob_key_bit_size = 11, + .key_bit_size = 11, + .key_num_fields = 1, + .ident_start_idx = 9, + .ident_nums = 1 + }, + { /* class_tid: 1, , table: control.flow_chain_group_l2_cntxt_check */ + .description = "control.flow_chain_group_l2_cntxt_check", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 8, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 13, + .cond_nums = 1 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_ALLOC_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID + }, + { /* class_tid: 1, , table: l2_cntxt_tcam.chain_entry */ + .description = "l2_cntxt_tcam.chain_entry", + .resource_func = BNXT_ULP_RESOURCE_FUNC_TCAM_TABLE, + .resource_type = TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_HIGH, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 14, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_TCAM_TBL_OPC_ALLOC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_L2_CNTXT_TCAM_INDEX_0, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID, + .pri_opcode = BNXT_ULP_PRI_OPC_CONST, + .pri_operand = 0, + .track_type = CFA_TRACK_TYPE_SID, + .key_start_idx = 9, + .blob_key_bit_size = 213, + .key_bit_size = 213, + .key_num_fields = 21, + .result_start_idx = 5, + .result_bit_size = 43, + .result_num_fields = 6, + .ident_start_idx = 10, + .ident_nums = 1 + }, + { /* class_tid: 1, , table: flow_chain_l2_cntxt.write */ + .description = "flow_chain_l2_cntxt.write", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_FLOW_CHAIN_L2_CNTXT, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 6, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 16, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_WRITE, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_HASH, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .key_start_idx = 30, + .blob_key_bit_size = 11, + .key_bit_size = 11, + .key_num_fields = 1, + .result_start_idx = 11, + .result_bit_size = 42, + .result_num_fields = 2 + }, + { /* class_tid: 1, , table: mac_addr_cache.rd */ + .description = "mac_addr_cache.rd", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_MAC_ADDR_CACHE, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 5, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 16, + .cond_nums = 1 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_READ, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_HASH, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .key_start_idx = 31, + .blob_key_bit_size = 174, + .key_bit_size = 174, + .key_num_fields = 9, + .ident_start_idx = 11, + .ident_nums = 1 + }, + { /* class_tid: 1, , table: control.mac_addr_cache_miss */ + .description = "control.mac_addr_cache_miss", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 4, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 19, + .cond_nums = 1 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_ALLOC_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID + }, + { /* class_tid: 1, , table: l2_cntxt_tcam.allocate_l2_context */ + .description = "l2_cntxt_tcam.allocate_l2_context", + .resource_func = BNXT_ULP_RESOURCE_FUNC_TCAM_TABLE, + .resource_type = TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_HIGH, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 20, + .cond_nums = 2 }, + .tbl_opcode = BNXT_ULP_TCAM_TBL_OPC_ALLOC_IDENT, + .tbl_operand = BNXT_ULP_RF_IDX_L2_CNTXT_TCAM_INDEX_0, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID, + .pri_opcode = BNXT_ULP_PRI_OPC_CONST, + .pri_operand = 0, + .track_type = CFA_TRACK_TYPE_SID, + .ident_start_idx = 12, + .ident_nums = 1 + }, + { /* class_tid: 1, , table: l2_cntxt_tcam.ingress_entry */ + .description = "l2_cntxt_tcam.ingress_entry", + .resource_func = BNXT_ULP_RESOURCE_FUNC_TCAM_TABLE, + .resource_type = TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_HIGH, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 22, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_TCAM_TBL_OPC_ALLOC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_L2_CNTXT_TCAM_INDEX_0, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID, + .pri_opcode = BNXT_ULP_PRI_OPC_CONST, + .pri_operand = 0, + .track_type = CFA_TRACK_TYPE_SID, + .key_start_idx = 40, + .blob_key_bit_size = 213, + .key_bit_size = 213, + .key_num_fields = 21, + .result_start_idx = 13, + .result_bit_size = 43, + .result_num_fields = 6, + .ident_start_idx = 13, + .ident_nums = 0 + }, + { /* class_tid: 1, , table: mac_addr_cache.wr */ + .description = "mac_addr_cache.wr", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_MAC_ADDR_CACHE, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 25, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_WRITE, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_HASH, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .key_start_idx = 61, + .blob_key_bit_size = 174, + .key_bit_size = 174, + .key_num_fields = 9, + .result_start_idx = 19, + .result_bit_size = 70, + .result_num_fields = 5 + }, + { /* class_tid: 1, , table: control.check_f1_flow */ + .description = "control.check_f1_flow", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 0, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 27, + .cond_nums = 1 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP + }, + { /* class_tid: 1, , table: control.tunnel_ipv6_sip_check */ + .description = "control.tunnel_ipv6_sip_check", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_RX, + .true_message = "reject ipv6 tunnel flow with tunnel source ip", + .execute_info = { + .cond_true_goto = 1023, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_LIST_OR, + .cond_start_idx = 0, + .cond_nums = 1 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP + }, + { /* class_tid: 1, , table: control.l2_only_check */ + .description = "control.l2_only_check", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_RX, + .true_message = "Reject due to missing Ethertype for L2 flows", + .execute_info = { + .cond_true_goto = 1023, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_LIST_OR, + .cond_start_idx = 1, + .cond_nums = 2 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP + }, + { /* class_tid: 1, , table: control.terminating_flow */ + .description = "control.terminating_flow", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 41, + .cond_nums = 0 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP, + .func_info = { + .func_opc = BNXT_ULP_FUNC_OPC_COND_LIST, + .func_oper_size = 8, + .func_src1 = BNXT_ULP_FUNC_SRC_KEY_EXT_LIST, + .func_opr1 = 3, + .func_dst_opr = BNXT_ULP_RF_IDX_TERM_FLOW } + }, + { /* class_tid: 1, , table: proto_header_cache.rd */ + .description = "proto_header_cache.rd", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_PROTO_HEADER, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 49, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_READ, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_HASH, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .key_start_idx = 70, + .blob_key_bit_size = 75, + .key_bit_size = 75, + .key_num_fields = 3, + .ident_start_idx = 13, + .ident_nums = 7 + }, + { /* class_tid: 1, , table: control.proto_header_cache_miss */ + .description = "control.proto_header_cache_miss", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 13, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 50, + .cond_nums = 1 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP + }, + { /* class_tid: 1, , table: hdr_overlap_cache.overlap_check */ + .description = "hdr_overlap_cache.overlap_check", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_HDR_OVERLAP, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 51, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_READ, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_HASH, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP, + .key_start_idx = 73, + .blob_key_bit_size = 11, + .key_bit_size = 11, + .key_num_fields = 2, + .partial_key_start_idx = 75, + .partial_key_num_fields = 1, + .partial_key_bit_size = 64, + .ident_start_idx = 20, + .ident_nums = 2 + }, + { /* class_tid: 1, , table: control.overlap_miss */ + .description = "control.overlap_miss", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 4, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 52, + .cond_nums = 1 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_ALLOC_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID + }, + { /* class_tid: 1, , table: profile_tcam.allocate_wc_profile */ + .description = "profile_tcam.allocate_wc_profile", + .resource_func = BNXT_ULP_RESOURCE_FUNC_TCAM_TABLE, + .resource_type = TF_TCAM_TBL_TYPE_PROF_TCAM, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 53, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_TCAM_TBL_OPC_ALLOC_IDENT, + .tbl_operand = BNXT_ULP_RF_IDX_PROFILE_TCAM_INDEX_0, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID, + .pri_opcode = BNXT_ULP_PRI_OPC_CONST, + .pri_operand = 0, + .track_type = CFA_TRACK_TYPE_SID, + .ident_start_idx = 22, + .ident_nums = 1 + }, + { /* class_tid: 1, , table: fkb_select.wc_gen_template */ + .description = "fkb_select.wc_gen_template", + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_WC_FKB, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 53, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_ALLOC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_WC_KEY_ID_0, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID, + .track_type = CFA_TRACK_TYPE_SID, + .result_start_idx = 24, + .result_bit_size = 106, + .result_num_fields = 106 + }, + { /* class_tid: 1, , table: hdr_overlap_cache.overlap_wr */ + .description = "hdr_overlap_cache.overlap_wr", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_HDR_OVERLAP, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 269, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_WRITE, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_HASH, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .key_start_idx = 76, + .blob_key_bit_size = 11, + .key_bit_size = 11, + .key_num_fields = 2, + .partial_key_start_idx = 78, + .partial_key_num_fields = 1, + .partial_key_bit_size = 64, + .result_start_idx = 130, + .result_bit_size = 48, + .result_num_fields = 3 + }, + { /* class_tid: 1, , table: control.proto_header_rid_alloc */ + .description = "control.proto_header_rid_alloc", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 270, + .cond_nums = 0 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_ALLOC_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID + }, + { /* class_tid: 1, , table: fkb_select.em_gen_template_alloc */ + .description = "fkb_select.em_gen_template_alloc", + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_EM_FKB, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 270, + .cond_nums = 1 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_ALLOC_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_EM_KEY_ID_0, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID, + .track_type = CFA_TRACK_TYPE_SID, + .result_start_idx = 133, + .result_bit_size = 106, + .result_num_fields = 106 + }, + { /* class_tid: 1, , table: em_key_recipe.alloc_only */ + .description = "em_key_recipe.alloc_only", + .resource_func = BNXT_ULP_RESOURCE_FUNC_KEY_RECIPE_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_KEY_RECIPE_TABLE_EM, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 271, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_KEY_RECIPE_TBL_OPC_ALLOC_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_EM_RECIPE_ID, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID, + .result_start_idx = 239, + .result_bit_size = 0, + .result_num_fields = 0 + }, + { /* class_tid: 1, , table: control.profile_tcam_priority */ + .description = "control.profile_tcam_priority", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 271, + .cond_nums = 0 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP, + .func_info = { + .func_opc = BNXT_ULP_FUNC_OPC_COND_LIST, + .func_oper_size = 8, + .func_src1 = BNXT_ULP_FUNC_SRC_KEY_EXT_LIST, + .func_opr1 = 44, + .func_dst_opr = BNXT_ULP_RF_IDX_PROF_TCAM_PRIORITY } + }, + { /* class_tid: 1, , table: profile_tcam.gen_template */ + .description = "profile_tcam.gen_template", + .resource_func = BNXT_ULP_RESOURCE_FUNC_TCAM_TABLE, + .resource_type = TF_TCAM_TBL_TYPE_PROF_TCAM, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 287, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_TCAM_TBL_OPC_ALLOC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_PROFILE_TCAM_INDEX_0, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID, + .pri_opcode = BNXT_ULP_PRI_OPC_REGFILE, + .pri_operand = BNXT_ULP_RF_IDX_PROF_TCAM_PRIORITY, + .mark_db_opcode = BNXT_ULP_MARK_DB_OPC_NOP, + .critical_resource = BNXT_ULP_CRITICAL_RESOURCE_NO, + .track_type = CFA_TRACK_TYPE_SID, + .key_start_idx = 79, + .blob_key_bit_size = 94, + .key_bit_size = 94, + .key_num_fields = 43, + .result_start_idx = 239, + .result_bit_size = 33, + .result_num_fields = 8, + .ident_start_idx = 23, + .ident_nums = 1 + }, + { /* class_tid: 1, , table: wm_key_recipe.0 */ + .description = "wm_key_recipe.0", + .resource_func = BNXT_ULP_RESOURCE_FUNC_KEY_RECIPE_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_KEY_RECIPE_TABLE_WM, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 542, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_KEY_RECIPE_TBL_OPC_ALLOC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_WC_RECIPE_ID, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID, + .key_start_idx = 122, + .blob_key_bit_size = 0, + .key_bit_size = 0, + .key_num_fields = 35, + .result_start_idx = 247, + .result_bit_size = 0, + .result_num_fields = 0 + }, + { /* class_tid: 1, , table: proto_header_cache.wr */ + .description = "proto_header_cache.wr", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_PROTO_HEADER, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 964, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_WRITE, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_HASH, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .key_start_idx = 157, + .blob_key_bit_size = 75, + .key_bit_size = 75, + .key_num_fields = 3, + .result_start_idx = 247, + .result_bit_size = 106, + .result_num_fields = 8 + }, + { /* class_tid: 1, , table: em_flow_conflict_cache.rd */ + .description = "em_flow_conflict_cache.rd", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_EM_FLOW_CONFLICT, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 8, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 965, + .cond_nums = 3 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_READ, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_HASH, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .key_start_idx = 160, + .blob_key_bit_size = 73, + .key_bit_size = 73, + .key_num_fields = 3, + .ident_start_idx = 24, + .ident_nums = 1 + }, + { /* class_tid: 1, , table: control.em_flow_conflict_cache_miss */ + .description = "control.em_flow_conflict_cache_miss", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 4, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 968, + .cond_nums = 1 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_ALLOC_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID + }, + { /* class_tid: 1, , table: fkb_select.em_gen_template */ + .description = "fkb_select.em_gen_template", + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_EM_FKB, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 969, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_EM_KEY_ID_0, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP, + .track_type = CFA_TRACK_TYPE_SID, + .result_start_idx = 255, + .result_bit_size = 106, + .result_num_fields = 106 + }, + { /* class_tid: 1, , table: em_key_recipe.0 */ + .description = "em_key_recipe.0", + .resource_func = BNXT_ULP_RESOURCE_FUNC_KEY_RECIPE_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_KEY_RECIPE_TABLE_EM, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 1176, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_KEY_RECIPE_TBL_OPC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_EM_RECIPE_ID, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP, + .key_start_idx = 163, + .blob_key_bit_size = 0, + .key_bit_size = 0, + .key_num_fields = 34, + .result_start_idx = 361, + .result_bit_size = 0, + .result_num_fields = 0 + }, + { /* class_tid: 1, , table: em_flow_conflict_cache.wr */ + .description = "em_flow_conflict_cache.wr", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_EM_FLOW_CONFLICT, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 2, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 1580, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_WRITE, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_HASH, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .key_start_idx = 197, + .blob_key_bit_size = 73, + .key_bit_size = 73, + .key_num_fields = 3, + .result_start_idx = 361, + .result_bit_size = 96, + .result_num_fields = 2 + }, + { /* class_tid: 1, , table: control.field_sig_validation */ + .description = "control.field_sig_validation", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 3, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 1580, + .cond_nums = 2 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP, + .func_info = { + .func_opc = BNXT_ULP_FUNC_OPC_EQ, + .func_src1 = BNXT_ULP_FUNC_SRC_REGFILE, + .func_opr1 = BNXT_ULP_RF_IDX_FLOW_SIG_ID, + .func_src2 = BNXT_ULP_FUNC_SRC_COMP_FIELD, + .func_opr2 = BNXT_ULP_CF_IDX_FLOW_SIG_ID, + .func_dst_opr = BNXT_ULP_RF_IDX_CC } + }, + { /* class_tid: 1, , table: em.ingress_generic_template */ + .description = "em.ingress_generic_template", + .resource_func = BNXT_ULP_RESOURCE_FUNC_EM_TABLE, + .resource_type = TF_MEM_INTERNAL, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 2, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 1582, + .cond_nums = 4 }, + .tbl_opcode = BNXT_ULP_EM_TBL_OPC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_EM_INSERT_FAIL, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_DYN_KEY, + .key_recipe_operand = BNXT_ULP_RF_IDX_EM_RECIPE_ID, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .critical_resource = BNXT_ULP_CRITICAL_RESOURCE_YES, + .result_start_idx = 363, + .result_bit_size = 0, + .result_num_fields = 6 + }, + { /* class_tid: 1, , table: control.em_add_check */ + .description = "control.em_add_check", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 0, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 1586, + .cond_nums = 1 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP + }, + { /* class_tid: 1, , table: wm.ingress_generic_template */ + .description = "wm.ingress_generic_template", + .resource_func = BNXT_ULP_RESOURCE_FUNC_TCAM_TABLE, + .resource_type = TF_TCAM_TBL_TYPE_WC_TCAM, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 0, + .cond_false_goto = 0, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 1587, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_TCAM_TBL_OPC_ALLOC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_WC_TCAM_INDEX_0, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_DYN_KEY, + .key_recipe_operand = BNXT_ULP_RF_IDX_WC_RECIPE_ID, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .pri_opcode = BNXT_ULP_PRI_OPC_APP_PRI, + .mark_db_opcode = BNXT_ULP_MARK_DB_OPC_NOP, + .critical_resource = BNXT_ULP_CRITICAL_RESOURCE_YES, + .track_type = CFA_TRACK_TYPE_SID, + .result_start_idx = 369, + .result_bit_size = 38, + .result_num_fields = 5 + }, + { /* class_tid: 2, , table: l2_cntxt_tcam_cache.rd */ + .description = "l2_cntxt_tcam_cache.rd", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_L2_CNTXT_TCAM, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 1589, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_READ, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_INDEX, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .key_start_idx = 200, + .blob_key_bit_size = 11, + .key_bit_size = 11, + .key_num_fields = 1, + .ident_start_idx = 25, + .ident_nums = 2 + }, + { /* class_tid: 2, , table: control.l2_only_check */ + .description = "control.l2_only_check", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_TX, + .true_message = "Reject due to missing Ethertype for L2 flows", + .execute_info = { + .cond_true_goto = 1023, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_LIST_OR, + .cond_start_idx = 3, + .cond_nums = 2 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP + }, + { /* class_tid: 2, , table: control.tunnel_ipv6_sip_check */ + .description = "control.tunnel_ipv6_sip_check", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_TX, + .true_message = "reject ipv6 tunnel flow with tunnel source ip or source mac", + .execute_info = { + .cond_true_goto = 1023, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_LIST_OR, + .cond_start_idx = 5, + .cond_nums = 2 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP + }, + { /* class_tid: 2, , table: control.terminating_flow */ + .description = "control.terminating_flow", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 1607, + .cond_nums = 0 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP, + .func_info = { + .func_opc = BNXT_ULP_FUNC_OPC_COND_LIST, + .func_oper_size = 8, + .func_src1 = BNXT_ULP_FUNC_SRC_KEY_EXT_LIST, + .func_opr1 = 318, + .func_dst_opr = BNXT_ULP_RF_IDX_TERM_FLOW } + }, + { /* class_tid: 2, , table: control.group_id_check */ + .description = "control.group_id_check", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 6, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 1615, + .cond_nums = 1 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP + }, + { /* class_tid: 2, , table: flow_chain_cache.group_check */ + .description = "flow_chain_cache.group_check", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_FLOW_CHAIN_CACHE, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 1616, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_READ, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_HASH, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .key_start_idx = 201, + .blob_key_bit_size = 32, + .key_bit_size = 32, + .key_num_fields = 1, + .ident_start_idx = 27, + .ident_nums = 1 + }, + { /* class_tid: 2, , table: control.flow_chain_group_id */ + .description = "control.flow_chain_group_id", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 4, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 1616, + .cond_nums = 1 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_ALLOC_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID + }, + { /* class_tid: 2, , table: jump_index_table.alloc */ + .description = "jump_index_table.alloc", + .resource_func = BNXT_ULP_RESOURCE_FUNC_ALLOCATOR_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_ALLOCATOR_TABLE_JUMP_INDEX, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 1617, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_ALLOC_TBL_OPC_ALLOC, + .tbl_operand = BNXT_ULP_RF_IDX_JUMP_META_IDX, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID, + .result_start_idx = 374, + .result_bit_size = 0, + .result_num_fields = 0 + }, + { /* class_tid: 2, , table: control.metadata_cal */ + .description = "control.metadata_cal", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 1617, + .cond_nums = 0 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP, + .func_info = { + .func_opc = BNXT_ULP_FUNC_OPC_BIT_OR, + .func_oper_size = 16, + .func_src1 = BNXT_ULP_FUNC_SRC_REGFILE, + .func_opr1 = BNXT_ULP_RF_IDX_JUMP_META_IDX, + .func_src2 = BNXT_ULP_FUNC_SRC_CONST, + .func_opr2 = ULP_THOR_SYM_CHAIN_META_VAL, + .func_dst_opr = BNXT_ULP_RF_IDX_JUMP_META } + }, + { /* class_tid: 2, , table: flow_chain_cache.write */ + .description = "flow_chain_cache.write", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_FLOW_CHAIN_CACHE, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 1617, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_WRITE, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_HASH, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .key_start_idx = 202, + .blob_key_bit_size = 32, + .key_bit_size = 32, + .key_num_fields = 1, + .result_start_idx = 374, + .result_bit_size = 48, + .result_num_fields = 2 + }, + { /* class_tid: 2, , table: proto_header_cache.rd */ + .description = "proto_header_cache.rd", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_PROTO_HEADER, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 1617, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_READ, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_HASH, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .key_start_idx = 203, + .blob_key_bit_size = 75, + .key_bit_size = 75, + .key_num_fields = 3, + .ident_start_idx = 28, + .ident_nums = 7 + }, + { /* class_tid: 2, , table: control.proto_header_cache_miss */ + .description = "control.proto_header_cache_miss", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 13, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 1618, + .cond_nums = 1 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP + }, + { /* class_tid: 2, , table: hdr_overlap_cache.overlap_check */ + .description = "hdr_overlap_cache.overlap_check", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_HDR_OVERLAP, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 1619, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_READ, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_HASH, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP, + .key_start_idx = 206, + .blob_key_bit_size = 11, + .key_bit_size = 11, + .key_num_fields = 2, + .partial_key_start_idx = 208, + .partial_key_num_fields = 1, + .partial_key_bit_size = 64, + .ident_start_idx = 35, + .ident_nums = 2 + }, + { /* class_tid: 2, , table: control.overlap_miss */ + .description = "control.overlap_miss", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 4, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 1620, + .cond_nums = 1 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_ALLOC_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID + }, + { /* class_tid: 2, , table: profile_tcam.allocate_wc_profile */ + .description = "profile_tcam.allocate_wc_profile", + .resource_func = BNXT_ULP_RESOURCE_FUNC_TCAM_TABLE, + .resource_type = TF_TCAM_TBL_TYPE_PROF_TCAM, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 1621, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_TCAM_TBL_OPC_ALLOC_IDENT, + .tbl_operand = BNXT_ULP_RF_IDX_PROFILE_TCAM_INDEX_0, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID, + .pri_opcode = BNXT_ULP_PRI_OPC_CONST, + .pri_operand = 0, + .track_type = CFA_TRACK_TYPE_SID, + .ident_start_idx = 37, + .ident_nums = 1 + }, + { /* class_tid: 2, , table: fkb_select.wc_gen_template */ + .description = "fkb_select.wc_gen_template", + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_WC_FKB, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 1621, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_ALLOC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_WC_KEY_ID_0, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID, + .track_type = CFA_TRACK_TYPE_SID, + .result_start_idx = 376, + .result_bit_size = 106, + .result_num_fields = 106 + }, + { /* class_tid: 2, , table: hdr_overlap_cache.overlap_wr */ + .description = "hdr_overlap_cache.overlap_wr", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_HDR_OVERLAP, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 1839, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_WRITE, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_HASH, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .key_start_idx = 209, + .blob_key_bit_size = 11, + .key_bit_size = 11, + .key_num_fields = 2, + .partial_key_start_idx = 211, + .partial_key_num_fields = 1, + .partial_key_bit_size = 64, + .result_start_idx = 482, + .result_bit_size = 48, + .result_num_fields = 3 + }, + { /* class_tid: 2, , table: control.proto_header_rid_alloc */ + .description = "control.proto_header_rid_alloc", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 1840, + .cond_nums = 0 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_ALLOC_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID + }, + { /* class_tid: 2, , table: fkb_select.em_gen_template_alloc */ + .description = "fkb_select.em_gen_template_alloc", + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_EM_FKB, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 1840, + .cond_nums = 1 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_ALLOC_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_EM_KEY_ID_0, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID, + .track_type = CFA_TRACK_TYPE_SID, + .result_start_idx = 485, + .result_bit_size = 106, + .result_num_fields = 106 + }, + { /* class_tid: 2, , table: em_key_recipe.alloc_only */ + .description = "em_key_recipe.alloc_only", + .resource_func = BNXT_ULP_RESOURCE_FUNC_KEY_RECIPE_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_KEY_RECIPE_TABLE_EM, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 1841, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_KEY_RECIPE_TBL_OPC_ALLOC_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_EM_RECIPE_ID, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID, + .result_start_idx = 591, + .result_bit_size = 0, + .result_num_fields = 0 + }, + { /* class_tid: 2, , table: control.profile_tcam_priority */ + .description = "control.profile_tcam_priority", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 1841, + .cond_nums = 0 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP, + .func_info = { + .func_opc = BNXT_ULP_FUNC_OPC_COND_LIST, + .func_oper_size = 8, + .func_src1 = BNXT_ULP_FUNC_SRC_KEY_EXT_LIST, + .func_opr1 = 360, + .func_dst_opr = BNXT_ULP_RF_IDX_PROF_TCAM_PRIORITY } + }, + { /* class_tid: 2, , table: profile_tcam.gen_template */ + .description = "profile_tcam.gen_template", + .resource_func = BNXT_ULP_RESOURCE_FUNC_TCAM_TABLE, + .resource_type = TF_TCAM_TBL_TYPE_PROF_TCAM, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 1857, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_TCAM_TBL_OPC_ALLOC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_PROFILE_TCAM_INDEX_0, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID, + .pri_opcode = BNXT_ULP_PRI_OPC_REGFILE, + .pri_operand = BNXT_ULP_RF_IDX_PROF_TCAM_PRIORITY, + .mark_db_opcode = BNXT_ULP_MARK_DB_OPC_NOP, + .critical_resource = BNXT_ULP_CRITICAL_RESOURCE_NO, + .track_type = CFA_TRACK_TYPE_SID, + .key_start_idx = 212, + .blob_key_bit_size = 94, + .key_bit_size = 94, + .key_num_fields = 43, + .result_start_idx = 591, + .result_bit_size = 33, + .result_num_fields = 8, + .ident_start_idx = 38, + .ident_nums = 1 + }, + { /* class_tid: 2, , table: wm_key_recipe.0 */ + .description = "wm_key_recipe.0", + .resource_func = BNXT_ULP_RESOURCE_FUNC_KEY_RECIPE_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_KEY_RECIPE_TABLE_WM, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 2110, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_KEY_RECIPE_TBL_OPC_ALLOC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_WC_RECIPE_ID, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID, + .key_start_idx = 255, + .blob_key_bit_size = 0, + .key_bit_size = 0, + .key_num_fields = 35, + .result_start_idx = 599, + .result_bit_size = 0, + .result_num_fields = 0 + }, + { /* class_tid: 2, , table: proto_header_cache.wr */ + .description = "proto_header_cache.wr", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_PROTO_HEADER, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 2536, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_WRITE, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_HASH, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .key_start_idx = 290, + .blob_key_bit_size = 75, + .key_bit_size = 75, + .key_num_fields = 3, + .result_start_idx = 599, + .result_bit_size = 106, + .result_num_fields = 8 + }, + { /* class_tid: 2, , table: em_flow_conflict_cache.rd */ + .description = "em_flow_conflict_cache.rd", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_EM_FLOW_CONFLICT, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 8, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 2537, + .cond_nums = 3 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_READ, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_HASH, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .key_start_idx = 293, + .blob_key_bit_size = 73, + .key_bit_size = 73, + .key_num_fields = 3, + .ident_start_idx = 39, + .ident_nums = 1 + }, + { /* class_tid: 2, , table: control.em_flow_conflict_cache_miss */ + .description = "control.em_flow_conflict_cache_miss", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 4, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 2540, + .cond_nums = 1 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_ALLOC_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID + }, + { /* class_tid: 2, , table: fkb_select.em_gen_template */ + .description = "fkb_select.em_gen_template", + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_EM_FKB, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 2541, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_EM_KEY_ID_0, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP, + .track_type = CFA_TRACK_TYPE_SID, + .result_start_idx = 607, + .result_bit_size = 106, + .result_num_fields = 106 + }, + { /* class_tid: 2, , table: em_key_recipe.0 */ + .description = "em_key_recipe.0", + .resource_func = BNXT_ULP_RESOURCE_FUNC_KEY_RECIPE_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_KEY_RECIPE_TABLE_EM, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 2745, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_KEY_RECIPE_TBL_OPC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_EM_RECIPE_ID, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP, + .key_start_idx = 296, + .blob_key_bit_size = 0, + .key_bit_size = 0, + .key_num_fields = 34, + .result_start_idx = 713, + .result_bit_size = 0, + .result_num_fields = 0 + }, + { /* class_tid: 2, , table: em_flow_conflict_cache.wr */ + .description = "em_flow_conflict_cache.wr", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_EM_FLOW_CONFLICT, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 2, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 3143, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_WRITE, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_HASH, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .key_start_idx = 330, + .blob_key_bit_size = 73, + .key_bit_size = 73, + .key_num_fields = 3, + .result_start_idx = 713, + .result_bit_size = 96, + .result_num_fields = 2 + }, + { /* class_tid: 2, , table: control.field_sig_validation */ + .description = "control.field_sig_validation", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 3, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 3143, + .cond_nums = 2 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP, + .func_info = { + .func_opc = BNXT_ULP_FUNC_OPC_EQ, + .func_src1 = BNXT_ULP_FUNC_SRC_REGFILE, + .func_opr1 = BNXT_ULP_RF_IDX_FLOW_SIG_ID, + .func_src2 = BNXT_ULP_FUNC_SRC_COMP_FIELD, + .func_opr2 = BNXT_ULP_CF_IDX_FLOW_SIG_ID, + .func_dst_opr = BNXT_ULP_RF_IDX_CC } + }, + { /* class_tid: 2, , table: em.egress_generic_template */ + .description = "em.egress_generic_template", + .resource_func = BNXT_ULP_RESOURCE_FUNC_EM_TABLE, + .resource_type = TF_MEM_INTERNAL, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 2, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 3145, + .cond_nums = 3 }, + .tbl_opcode = BNXT_ULP_EM_TBL_OPC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_EM_INSERT_FAIL, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_DYN_KEY, + .key_recipe_operand = BNXT_ULP_RF_IDX_EM_RECIPE_ID, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .critical_resource = BNXT_ULP_CRITICAL_RESOURCE_YES, + .result_start_idx = 715, + .result_bit_size = 0, + .result_num_fields = 6 + }, + { /* class_tid: 2, , table: control.em_add_check */ + .description = "control.em_add_check", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 0, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 3148, + .cond_nums = 1 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP + }, + { /* class_tid: 2, , table: wm.egress_generic_template */ + .description = "wm.egress_generic_template", + .resource_func = BNXT_ULP_RESOURCE_FUNC_TCAM_TABLE, + .resource_type = TF_TCAM_TBL_TYPE_WC_TCAM, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 0, + .cond_false_goto = 0, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 3149, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_TCAM_TBL_OPC_ALLOC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_WC_TCAM_INDEX_0, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_DYN_KEY, + .key_recipe_operand = BNXT_ULP_RF_IDX_WC_RECIPE_ID, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .pri_opcode = BNXT_ULP_PRI_OPC_APP_PRI, + .mark_db_opcode = BNXT_ULP_MARK_DB_OPC_NOP, + .critical_resource = BNXT_ULP_CRITICAL_RESOURCE_YES, + .track_type = CFA_TRACK_TYPE_SID, + .result_start_idx = 721, + .result_bit_size = 38, + .result_num_fields = 5 + }, + { /* class_tid: 3, , table: int_full_act_record.0 */ + .description = "int_full_act_record.0", + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_FULL_ACT_RECORD, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_NORMAL, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 3149, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_ALLOC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_DEFAULT_AREC_PTR, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .mark_db_opcode = BNXT_ULP_MARK_DB_OPC_NOP, + .track_type = CFA_TRACK_TYPE_SID, + .result_start_idx = 726, + .result_bit_size = 128, + .result_num_fields = 17 + }, + { /* class_tid: 3, , table: port_table.ing_wr_0 */ + .description = "port_table.ing_wr_0", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_PORT_TABLE, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 3149, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_WRITE, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_INDEX, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .key_start_idx = 333, + .blob_key_bit_size = 10, + .key_bit_size = 10, + .key_num_fields = 1, + .result_start_idx = 743, + .result_bit_size = 195, + .result_num_fields = 9 + }, + { /* class_tid: 3, , table: l2_cntxt_tcam_cache.ing_rd */ + .description = "l2_cntxt_tcam_cache.ing_rd", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_L2_CNTXT_TCAM, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 3149, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_READ, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_INDEX, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .key_start_idx = 334, + .blob_key_bit_size = 11, + .key_bit_size = 11, + .key_num_fields = 1, + .ident_start_idx = 40, + .ident_nums = 0 + }, + { /* class_tid: 3, , table: control.ing_0 */ + .description = "control.ing_0", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 4, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 3149, + .cond_nums = 1 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_ALLOC_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID + }, + { /* class_tid: 3, , table: l2_cntxt_tcam.ing_0 */ + .description = "l2_cntxt_tcam.ing_0", + .resource_func = BNXT_ULP_RESOURCE_FUNC_TCAM_TABLE, + .resource_type = TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_LOW, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 3150, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_TCAM_TBL_OPC_ALLOC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_L2_CNTXT_TCAM_INDEX_0, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID, + .pri_opcode = BNXT_ULP_PRI_OPC_CONST, + .pri_operand = 0, + .mark_db_opcode = BNXT_ULP_MARK_DB_OPC_NOP, + .critical_resource = BNXT_ULP_CRITICAL_RESOURCE_NO, + .track_type = CFA_TRACK_TYPE_SID, + .key_start_idx = 335, + .blob_key_bit_size = 213, + .key_bit_size = 213, + .key_num_fields = 21, + .result_start_idx = 752, + .result_bit_size = 43, + .result_num_fields = 6, + .ident_start_idx = 40, + .ident_nums = 2 + }, + { /* class_tid: 3, , table: l2_cntxt_tcam_cache.ing_wr */ + .description = "l2_cntxt_tcam_cache.ing_wr", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_L2_CNTXT_TCAM, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 3150, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_WRITE, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_INDEX, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .key_start_idx = 356, + .blob_key_bit_size = 11, + .key_bit_size = 11, + .key_num_fields = 1, + .result_start_idx = 758, + .result_bit_size = 70, + .result_num_fields = 5 + }, + { /* class_tid: 3, , table: profile_tcam.prof_func_catch_all */ + .description = "profile_tcam.prof_func_catch_all", + .resource_func = BNXT_ULP_RESOURCE_FUNC_TCAM_TABLE, + .resource_type = TF_TCAM_TBL_TYPE_PROF_TCAM, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 3150, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_TCAM_TBL_OPC_ALLOC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_PROFILE_TCAM_INDEX_0, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .pri_opcode = BNXT_ULP_PRI_OPC_CONST, + .pri_operand = 5, + .mark_db_opcode = BNXT_ULP_MARK_DB_OPC_NOP, + .critical_resource = BNXT_ULP_CRITICAL_RESOURCE_NO, + .track_type = CFA_TRACK_TYPE_SID, + .key_start_idx = 357, + .blob_key_bit_size = 94, + .key_bit_size = 94, + .key_num_fields = 43, + .result_start_idx = 763, + .result_bit_size = 33, + .result_num_fields = 8, + .ident_start_idx = 42, + .ident_nums = 0 + }, + { /* class_tid: 3, , table: parif_def_arec_ptr.ing_0 */ + .description = "parif_def_arec_ptr.ing_0", + .resource_func = BNXT_ULP_RESOURCE_FUNC_IF_TABLE, + .resource_type = TF_IF_TBL_TYPE_PROF_PARIF_DFLT_ACT_REC_PTR, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 3150, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_IF_TBL_OPC_WR_COMP_FIELD, + .tbl_operand = BNXT_ULP_CF_IDX_PHY_PORT_PARIF, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP, + .result_start_idx = 771, + .result_bit_size = 32, + .result_num_fields = 1 + }, + { /* class_tid: 3, , table: parif_def_err_arec_ptr.ing_0 */ + .description = "parif_def_err_arec_ptr.ing_0", + .resource_func = BNXT_ULP_RESOURCE_FUNC_IF_TABLE, + .resource_type = TF_IF_TBL_TYPE_PROF_PARIF_ERR_ACT_REC_PTR, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 3150, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_IF_TBL_OPC_WR_COMP_FIELD, + .tbl_operand = BNXT_ULP_CF_IDX_PHY_PORT_PARIF, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP, + .result_start_idx = 772, + .result_bit_size = 32, + .result_num_fields = 1 + }, + { /* class_tid: 3, , table: int_full_act_record.egr_0 */ + .description = "int_full_act_record.egr_0", + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_FULL_ACT_RECORD, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_VFR_CFA_ACTION, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 3150, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_ALLOC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_MAIN_ACTION_PTR, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .mark_db_opcode = BNXT_ULP_MARK_DB_OPC_NOP, + .track_type = CFA_TRACK_TYPE_SID, + .result_start_idx = 773, + .result_bit_size = 128, + .result_num_fields = 17, + .encap_num_fields = 0 + }, + { /* class_tid: 3, , table: port_table.egr_wr_0 */ + .description = "port_table.egr_wr_0", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_PORT_TABLE, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 3150, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_WRITE, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_INDEX, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .key_start_idx = 400, + .blob_key_bit_size = 10, + .key_bit_size = 10, + .key_num_fields = 1, + .result_start_idx = 790, + .result_bit_size = 195, + .result_num_fields = 9 + }, + { /* class_tid: 3, , table: control.egr_0 */ + .description = "control.egr_0", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 6, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 3150, + .cond_nums = 1 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP + }, + { /* class_tid: 3, , table: l2_cntxt_tcam_cache.egr_rd_vfr */ + .description = "l2_cntxt_tcam_cache.egr_rd_vfr", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_L2_CNTXT_TCAM, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 3151, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_READ, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_INDEX, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .key_start_idx = 401, + .blob_key_bit_size = 11, + .key_bit_size = 11, + .key_num_fields = 1, + .ident_start_idx = 42, + .ident_nums = 0 + }, + { /* class_tid: 3, , table: control.egr_1 */ + .description = "control.egr_1", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 0, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 3151, + .cond_nums = 1 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_ALLOC_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID + }, + { /* class_tid: 3, , table: l2_cntxt_tcam.drv_func_prof_func_alloc */ + .description = "l2_cntxt_tcam.drv_func_prof_func_alloc", + .resource_func = BNXT_ULP_RESOURCE_FUNC_TCAM_TABLE, + .resource_type = TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_HIGH, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 3152, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_TCAM_TBL_OPC_ALLOC_IDENT, + .tbl_operand = BNXT_ULP_RF_IDX_L2_CNTXT_TCAM_INDEX_0, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID, + .pri_opcode = BNXT_ULP_PRI_OPC_CONST, + .pri_operand = 0, + .track_type = CFA_TRACK_TYPE_SID, + .ident_start_idx = 42, + .ident_nums = 1 + }, + { /* class_tid: 3, , table: ilt_tbl.egr_vfr */ + .description = "ilt_tbl.egr_vfr", + .resource_func = BNXT_ULP_RESOURCE_FUNC_IF_TABLE, + .resource_type = TF_IF_TBL_TYPE_ILT, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 3152, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_IF_TBL_OPC_WR_COMP_FIELD, + .tbl_operand = BNXT_ULP_CF_IDX_DRV_FUNC_SVIF, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID, + .result_start_idx = 799, + .result_bit_size = 64, + .result_num_fields = 8 + }, + { /* class_tid: 3, , table: l2_cntxt_tcam_cache.egr_wr_vfr */ + .description = "l2_cntxt_tcam_cache.egr_wr_vfr", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_L2_CNTXT_TCAM, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 0, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 3152, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_WRITE, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_INDEX, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .key_start_idx = 402, + .blob_key_bit_size = 11, + .key_bit_size = 11, + .key_num_fields = 1, + .result_start_idx = 807, + .result_bit_size = 70, + .result_num_fields = 5 + }, + { /* class_tid: 3, , table: l2_cntxt_tcam_cache.egr_rd */ + .description = "l2_cntxt_tcam_cache.egr_rd", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_L2_CNTXT_TCAM, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 3152, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_READ, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_INDEX, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .key_start_idx = 403, + .blob_key_bit_size = 11, + .key_bit_size = 11, + .key_num_fields = 1, + .ident_start_idx = 43, + .ident_nums = 0 + }, + { /* class_tid: 3, , table: control.egr_2 */ + .description = "control.egr_2", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 3, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 3152, + .cond_nums = 1 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_ALLOC_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID + }, + { /* class_tid: 3, , table: l2_cntxt_tcam.egr_0 */ + .description = "l2_cntxt_tcam.egr_0", + .resource_func = BNXT_ULP_RESOURCE_FUNC_TCAM_TABLE, + .resource_type = TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_LOW, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 3153, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_TCAM_TBL_OPC_ALLOC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_L2_CNTXT_TCAM_INDEX_0, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID, + .mark_db_opcode = BNXT_ULP_MARK_DB_OPC_NOP, + .critical_resource = BNXT_ULP_CRITICAL_RESOURCE_NO, + .track_type = CFA_TRACK_TYPE_SID, + .key_start_idx = 404, + .blob_key_bit_size = 213, + .key_bit_size = 213, + .key_num_fields = 21, + .result_start_idx = 812, + .result_bit_size = 43, + .result_num_fields = 6, + .ident_start_idx = 43, + .ident_nums = 2 + }, + { /* class_tid: 3, , table: l2_cntxt_tcam_cache.egr_wr */ + .description = "l2_cntxt_tcam_cache.egr_wr", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_L2_CNTXT_TCAM, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 3153, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_WRITE, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_INDEX, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .key_start_idx = 425, + .blob_key_bit_size = 11, + .key_bit_size = 11, + .key_num_fields = 1, + .result_start_idx = 818, + .result_bit_size = 70, + .result_num_fields = 5 + }, + { /* class_tid: 3, , table: parif_def_arec_ptr.egr_0 */ + .description = "parif_def_arec_ptr.egr_0", + .resource_func = BNXT_ULP_RESOURCE_FUNC_IF_TABLE, + .resource_type = TF_IF_TBL_TYPE_PROF_PARIF_DFLT_ACT_REC_PTR, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 3153, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_IF_TBL_OPC_WR_COMP_FIELD, + .tbl_operand = BNXT_ULP_CF_IDX_DRV_FUNC_PARIF, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .result_start_idx = 823, + .result_bit_size = 32, + .result_num_fields = 1 + }, + { /* class_tid: 3, , table: parif_def_err_arec_ptr.egr_0 */ + .description = "parif_def_err_arec_ptr.egr_0", + .resource_func = BNXT_ULP_RESOURCE_FUNC_IF_TABLE, + .resource_type = TF_IF_TBL_TYPE_PROF_PARIF_ERR_ACT_REC_PTR, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 0, + .cond_false_goto = 0, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 3153, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_IF_TBL_OPC_WR_COMP_FIELD, + .tbl_operand = BNXT_ULP_CF_IDX_DRV_FUNC_PARIF, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .result_start_idx = 824, + .result_bit_size = 32, + .result_num_fields = 1 + }, + { /* class_tid: 4, , table: profile_tcam_cache.vfr_glb_act_rec_rd */ + .description = "profile_tcam_cache.vfr_glb_act_rec_rd", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_type = TF_TCAM_TBL_TYPE_PROF_TCAM, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_PROFILE_TCAM, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 3153, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_READ, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_INDEX, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .key_start_idx = 426, + .blob_key_bit_size = 14, + .key_bit_size = 14, + .key_num_fields = 3, + .ident_start_idx = 45, + .ident_nums = 0 + }, + { /* class_tid: 4, , table: control.prof_tcam_cache.vfr_glb_act_rec_rd.0 */ + .description = "control.prof_tcam_cache.vfr_glb_act_rec_rd.0", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 6, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 3153, + .cond_nums = 1 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_ALLOC_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID + }, + { /* class_tid: 4, , table: mod_record.vf_2_vfr_egr */ + .description = "mod_record.vf_2_vfr_egr", + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_ACT_MODIFY_64B, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_NORMAL, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 3154, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_WR_GLB_REGFILE, + .tbl_operand = BNXT_ULP_GLB_RF_IDX_GLB_MODIFY_PTR, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP, + .track_type = CFA_TRACK_TYPE_SID, + .result_start_idx = 825, + .result_bit_size = 0, + .result_num_fields = 0, + .encap_num_fields = 20 + }, + { /* class_tid: 4, , table: int_full_act_record.vf_2_vfr_loopback */ + .description = "int_full_act_record.vf_2_vfr_loopback", + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_FULL_ACT_RECORD, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_NORMAL, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 3154, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_WR_GLB_REGFILE, + .tbl_operand = BNXT_ULP_GLB_RF_IDX_GLB_LB_AREC_PTR, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP, + .mark_db_opcode = BNXT_ULP_MARK_DB_OPC_NOP, + .track_type = CFA_TRACK_TYPE_SID, + .result_start_idx = 845, + .result_bit_size = 128, + .result_num_fields = 17, + .encap_num_fields = 0 + }, + { /* class_tid: 4, , table: parif_def_arec_ptr.vf_egr */ + .description = "parif_def_arec_ptr.vf_egr", + .resource_func = BNXT_ULP_RESOURCE_FUNC_IF_TABLE, + .resource_type = TF_IF_TBL_TYPE_PROF_PARIF_DFLT_ACT_REC_PTR, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 3154, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_IF_TBL_OPC_WR_CONST, + .tbl_operand = ULP_THOR_SYM_LOOPBACK_PARIF, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID, + .result_start_idx = 862, + .result_bit_size = 32, + .result_num_fields = 1 + }, + { /* class_tid: 4, , table: parif_def_err_arec_ptr.vf_egr */ + .description = "parif_def_err_arec_ptr.vf_egr", + .resource_func = BNXT_ULP_RESOURCE_FUNC_IF_TABLE, + .resource_type = TF_IF_TBL_TYPE_PROF_PARIF_ERR_ACT_REC_PTR, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 3154, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_IF_TBL_OPC_WR_CONST, + .tbl_operand = ULP_THOR_SYM_LOOPBACK_PARIF, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID, + .result_start_idx = 863, + .result_bit_size = 32, + .result_num_fields = 1 + }, + { /* class_tid: 4, , table: profile_tcam_cache.vfr_glb_act_rec_wr */ + .description = "profile_tcam_cache.vfr_glb_act_rec_wr", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_type = TF_TCAM_TBL_TYPE_PROF_TCAM, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_PROFILE_TCAM, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 3154, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_WRITE, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_INDEX, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .key_start_idx = 429, + .blob_key_bit_size = 14, + .key_bit_size = 14, + .key_num_fields = 3, + .result_start_idx = 864, + .result_bit_size = 138, + .result_num_fields = 7 + }, + { /* class_tid: 4, , table: l2_cntxt_tcam_cache.vf_rd_egr */ + .description = "l2_cntxt_tcam_cache.vf_rd_egr", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_L2_CNTXT_TCAM, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 3154, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_READ, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_INDEX, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .key_start_idx = 432, + .blob_key_bit_size = 11, + .key_bit_size = 11, + .key_num_fields = 1, + .ident_start_idx = 45, + .ident_nums = 0 + }, + { /* class_tid: 4, , table: control.vf_2_vfr.0 */ + .description = "control.vf_2_vfr.0", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 5, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 3154, + .cond_nums = 1 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_ALLOC_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID + }, + { /* class_tid: 4, , table: l2_cntxt_tcam_cache.get_drv_func_prof_func */ + .description = "l2_cntxt_tcam_cache.get_drv_func_prof_func", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_L2_CNTXT_TCAM, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 3155, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_READ, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_INDEX, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP, + .key_start_idx = 433, + .blob_key_bit_size = 11, + .key_bit_size = 11, + .key_num_fields = 1, + .ident_start_idx = 45, + .ident_nums = 1 + }, + { /* class_tid: 4, , table: l2_cntxt_tcam.vf_egr */ + .description = "l2_cntxt_tcam.vf_egr", + .resource_func = BNXT_ULP_RESOURCE_FUNC_TCAM_TABLE, + .resource_type = TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_LOW, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 3155, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_TCAM_TBL_OPC_ALLOC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_L2_CNTXT_TCAM_INDEX_0, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID, + .pri_opcode = BNXT_ULP_PRI_OPC_CONST, + .pri_operand = 0, + .track_type = CFA_TRACK_TYPE_SID, + .key_start_idx = 434, + .blob_key_bit_size = 213, + .key_bit_size = 213, + .key_num_fields = 21, + .result_start_idx = 871, + .result_bit_size = 43, + .result_num_fields = 6, + .ident_start_idx = 46, + .ident_nums = 1 + }, + { /* class_tid: 4, , table: profile_tcam.prof_func_catch_all */ + .description = "profile_tcam.prof_func_catch_all", + .resource_func = BNXT_ULP_RESOURCE_FUNC_TCAM_TABLE, + .resource_type = TF_TCAM_TBL_TYPE_PROF_TCAM, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 3155, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_TCAM_TBL_OPC_ALLOC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_PROFILE_TCAM_INDEX_0, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID, + .pri_opcode = BNXT_ULP_PRI_OPC_CONST, + .pri_operand = 5, + .mark_db_opcode = BNXT_ULP_MARK_DB_OPC_NOP, + .critical_resource = BNXT_ULP_CRITICAL_RESOURCE_NO, + .track_type = CFA_TRACK_TYPE_SID, + .key_start_idx = 455, + .blob_key_bit_size = 94, + .key_bit_size = 94, + .key_num_fields = 43, + .result_start_idx = 877, + .result_bit_size = 33, + .result_num_fields = 8, + .ident_start_idx = 47, + .ident_nums = 0 + }, + { /* class_tid: 4, , table: l2_cntxt_tcam_cache.vf_egr_wr */ + .description = "l2_cntxt_tcam_cache.vf_egr_wr", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_L2_CNTXT_TCAM, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 3155, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_WRITE, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_INDEX, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .key_start_idx = 498, + .blob_key_bit_size = 11, + .key_bit_size = 11, + .key_num_fields = 1, + .result_start_idx = 885, + .result_bit_size = 70, + .result_num_fields = 5 + }, + { /* class_tid: 4, , table: int_full_act_record.vf_2_vfr_ing */ + .description = "int_full_act_record.vf_2_vfr_ing", + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_FULL_ACT_RECORD, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_NORMAL, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 3155, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_ALLOC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_MAIN_ACTION_PTR, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .mark_db_opcode = BNXT_ULP_MARK_DB_OPC_PUSH_AND_SET_VFR_FLAG, + .track_type = CFA_TRACK_TYPE_SID, + .result_start_idx = 890, + .result_bit_size = 128, + .result_num_fields = 17 + }, + { /* class_tid: 4, , table: profile_tcam_cache.vfr_rd */ + .description = "profile_tcam_cache.vfr_rd", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_type = TF_TCAM_TBL_TYPE_PROF_TCAM, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_PROFILE_TCAM, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 3155, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_READ, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_INDEX, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .key_start_idx = 499, + .blob_key_bit_size = 14, + .key_bit_size = 14, + .key_num_fields = 3, + .ident_start_idx = 47, + .ident_nums = 0 + }, + { /* class_tid: 4, , table: control.prof_tcam_cache.vfr.0 */ + .description = "control.prof_tcam_cache.vfr.0", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 10, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 3155, + .cond_nums = 1 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_ALLOC_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID + }, + { /* class_tid: 4, , table: int_full_act_record.drop_action */ + .description = "int_full_act_record.drop_action", + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_FULL_ACT_RECORD, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_NORMAL, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 3156, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_WR_GLB_REGFILE, + .tbl_operand = BNXT_ULP_GLB_RF_IDX_GLB_DROP_AREC_PTR, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP, + .mark_db_opcode = BNXT_ULP_MARK_DB_OPC_NOP, + .track_type = CFA_TRACK_TYPE_SID, + .result_start_idx = 907, + .result_bit_size = 128, + .result_num_fields = 17, + .encap_num_fields = 0 + }, + { /* class_tid: 4, , table: l2_cntxt_tcam.vf_2_vfr_ing.0 */ + .description = "l2_cntxt_tcam.vf_2_vfr_ing.0", + .resource_func = BNXT_ULP_RESOURCE_FUNC_TCAM_TABLE, + .resource_type = TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_HIGH, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 3156, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_TCAM_TBL_OPC_ALLOC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_L2_CNTXT_TCAM_INDEX_0, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID, + .pri_opcode = BNXT_ULP_PRI_OPC_CONST, + .pri_operand = 0, + .mark_db_opcode = BNXT_ULP_MARK_DB_OPC_NOP, + .critical_resource = BNXT_ULP_CRITICAL_RESOURCE_NO, + .track_type = CFA_TRACK_TYPE_SID, + .key_start_idx = 502, + .blob_key_bit_size = 213, + .key_bit_size = 213, + .key_num_fields = 21, + .result_start_idx = 924, + .result_bit_size = 43, + .result_num_fields = 6, + .ident_start_idx = 47, + .ident_nums = 0 + }, + { /* class_tid: 4, , table: l2_cntxt_tcam.vfr_2_vf_ing.0 */ + .description = "l2_cntxt_tcam.vfr_2_vf_ing.0", + .resource_func = BNXT_ULP_RESOURCE_FUNC_TCAM_TABLE, + .resource_type = TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_HIGH, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 3156, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_TCAM_TBL_OPC_ALLOC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_L2_CNTXT_TCAM_INDEX_0, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID, + .pri_opcode = BNXT_ULP_PRI_OPC_CONST, + .pri_operand = 0, + .mark_db_opcode = BNXT_ULP_MARK_DB_OPC_NOP, + .critical_resource = BNXT_ULP_CRITICAL_RESOURCE_NO, + .track_type = CFA_TRACK_TYPE_SID, + .key_start_idx = 523, + .blob_key_bit_size = 213, + .key_bit_size = 213, + .key_num_fields = 21, + .result_start_idx = 930, + .result_bit_size = 43, + .result_num_fields = 6, + .ident_start_idx = 47, + .ident_nums = 0 + }, + { /* class_tid: 4, , table: fkb_select.vfr_em */ + .description = "fkb_select.vfr_em", + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_EM_FKB, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 3156, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_WR_GLB_REGFILE, + .tbl_operand = BNXT_ULP_GLB_RF_IDX_GLB_VFR_EM_KEY_ID_0, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP, + .track_type = CFA_TRACK_TYPE_SID, + .result_start_idx = 936, + .result_bit_size = 106, + .result_num_fields = 106 + }, + { /* class_tid: 4, , table: fkb_select.vf_em */ + .description = "fkb_select.vf_em", + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_EM_FKB, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 3156, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_WR_GLB_REGFILE, + .tbl_operand = BNXT_ULP_GLB_RF_IDX_GLB_VFR_EM_KEY_ID_1, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP, + .track_type = CFA_TRACK_TYPE_SID, + .result_start_idx = 1042, + .result_bit_size = 106, + .result_num_fields = 106 + }, + { /* class_tid: 4, , table: profile_tcam.vf_2_vfr.0 */ + .description = "profile_tcam.vf_2_vfr.0", + .resource_func = BNXT_ULP_RESOURCE_FUNC_TCAM_TABLE, + .resource_type = TF_TCAM_TBL_TYPE_PROF_TCAM, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 3156, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_TCAM_TBL_OPC_ALLOC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_PROFILE_TCAM_INDEX_0, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID, + .pri_opcode = BNXT_ULP_PRI_OPC_CONST, + .pri_operand = 0, + .mark_db_opcode = BNXT_ULP_MARK_DB_OPC_NOP, + .critical_resource = BNXT_ULP_CRITICAL_RESOURCE_NO, + .track_type = CFA_TRACK_TYPE_SID, + .key_start_idx = 544, + .blob_key_bit_size = 94, + .key_bit_size = 94, + .key_num_fields = 43, + .result_start_idx = 1148, + .result_bit_size = 33, + .result_num_fields = 8 + }, + { /* class_tid: 4, , table: profile_tcam.vfr_2_vf.0 */ + .description = "profile_tcam.vfr_2_vf.0", + .resource_func = BNXT_ULP_RESOURCE_FUNC_TCAM_TABLE, + .resource_type = TF_TCAM_TBL_TYPE_PROF_TCAM, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 3156, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_TCAM_TBL_OPC_ALLOC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_PROFILE_TCAM_INDEX_0, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID, + .pri_opcode = BNXT_ULP_PRI_OPC_CONST, + .pri_operand = 0, + .mark_db_opcode = BNXT_ULP_MARK_DB_OPC_NOP, + .critical_resource = BNXT_ULP_CRITICAL_RESOURCE_NO, + .track_type = CFA_TRACK_TYPE_SID, + .key_start_idx = 587, + .blob_key_bit_size = 94, + .key_bit_size = 94, + .key_num_fields = 43, + .result_start_idx = 1156, + .result_bit_size = 33, + .result_num_fields = 8 + }, + { /* class_tid: 4, , table: profile_tcam_cache.vfr_wr */ + .description = "profile_tcam_cache.vfr_wr", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_type = TF_TCAM_TBL_TYPE_PROF_TCAM, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_PROFILE_TCAM, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 3156, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_WRITE, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_INDEX, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .key_start_idx = 630, + .blob_key_bit_size = 14, + .key_bit_size = 14, + .key_num_fields = 3, + .result_start_idx = 1164, + .result_bit_size = 138, + .result_num_fields = 7 + }, + { /* class_tid: 4, , table: ilt_tbl.vfr_ing */ + .description = "ilt_tbl.vfr_ing", + .resource_func = BNXT_ULP_RESOURCE_FUNC_IF_TABLE, + .resource_type = TF_IF_TBL_TYPE_ILT, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 3156, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_IF_TBL_OPC_WR_COMP_FIELD, + .tbl_operand = BNXT_ULP_CF_IDX_VF_FUNC_SVIF, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .result_start_idx = 1171, + .result_bit_size = 64, + .result_num_fields = 8 + }, + { /* class_tid: 4, , table: em.vf_2_vfr.0 */ + .description = "em.vf_2_vfr.0", + .resource_func = BNXT_ULP_RESOURCE_FUNC_EM_TABLE, + .resource_type = TF_MEM_INTERNAL, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 3156, + .cond_nums = 0 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .critical_resource = BNXT_ULP_CRITICAL_RESOURCE_NO, + .key_start_idx = 633, + .blob_key_bit_size = 0, + .key_bit_size = 0, + .key_num_fields = 3, + .result_start_idx = 1179, + .result_bit_size = 0, + .result_num_fields = 6 + }, + { /* class_tid: 4, , table: l2_cntxt_tcam_cache.rd_egr0 */ + .description = "l2_cntxt_tcam_cache.rd_egr0", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_type = TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_LOW, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_L2_CNTXT_TCAM, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 3156, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_READ, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_INDEX, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .key_start_idx = 636, + .blob_key_bit_size = 11, + .key_bit_size = 11, + .key_num_fields = 1, + .ident_start_idx = 47, + .ident_nums = 0 + }, + { /* class_tid: 4, , table: control.0 */ + .description = "control.0", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 4, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 3156, + .cond_nums = 1 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_ALLOC_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID + }, + { /* class_tid: 4, , table: ilt_tbl.vfr_egr */ + .description = "ilt_tbl.vfr_egr", + .resource_func = BNXT_ULP_RESOURCE_FUNC_IF_TABLE, + .resource_type = TF_IF_TBL_TYPE_ILT, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 3157, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_IF_TBL_OPC_WR_COMP_FIELD, + .tbl_operand = BNXT_ULP_CF_IDX_DRV_FUNC_SVIF, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID, + .result_start_idx = 1185, + .result_bit_size = 64, + .result_num_fields = 8 + }, + { /* class_tid: 4, , table: l2_cntxt_tcam_cache.vfr_wr_egr0 */ + .description = "l2_cntxt_tcam_cache.vfr_wr_egr0", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_L2_CNTXT_TCAM, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 3157, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_WRITE, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_INDEX, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .key_start_idx = 637, + .blob_key_bit_size = 11, + .key_bit_size = 11, + .key_num_fields = 1, + .result_start_idx = 1193, + .result_bit_size = 70, + .result_num_fields = 5 + }, + { /* class_tid: 4, , table: ilt_tbl.vf_egr */ + .description = "ilt_tbl.vf_egr", + .resource_func = BNXT_ULP_RESOURCE_FUNC_IF_TABLE, + .resource_type = TF_IF_TBL_TYPE_ILT, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 3157, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_IF_TBL_OPC_WR_COMP_FIELD, + .tbl_operand = BNXT_ULP_CF_IDX_VF_FUNC_SVIF, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .result_start_idx = 1198, + .result_bit_size = 64, + .result_num_fields = 8 + }, + { /* class_tid: 4, , table: mod_record.vfr_2_vf_egr */ + .description = "mod_record.vfr_2_vf_egr", + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_ACT_MODIFY_64B, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_NORMAL, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 3157, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_ALLOC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_MODIFY_PTR, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .track_type = CFA_TRACK_TYPE_SID, + .result_start_idx = 1206, + .result_bit_size = 0, + .result_num_fields = 0, + .encap_num_fields = 20 + }, + { /* class_tid: 4, , table: int_full_act_record.vfr_egr */ + .description = "int_full_act_record.vfr_egr", + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_FULL_ACT_RECORD, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_VFR_CFA_ACTION, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 3157, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_ALLOC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_MAIN_ACTION_PTR, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .mark_db_opcode = BNXT_ULP_MARK_DB_OPC_NOP, + .track_type = CFA_TRACK_TYPE_SID, + .result_start_idx = 1226, + .result_bit_size = 128, + .result_num_fields = 17 + }, + { /* class_tid: 4, , table: int_full_act_record.vfr_2_vf.ing0 */ + .description = "int_full_act_record.vfr_2_vf.ing0", + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_FULL_ACT_RECORD, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_NORMAL, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 3157, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_ALLOC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_MAIN_ACTION_PTR, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .mark_db_opcode = BNXT_ULP_MARK_DB_OPC_NOP, + .track_type = CFA_TRACK_TYPE_SID, + .result_start_idx = 1243, + .result_bit_size = 128, + .result_num_fields = 17 + }, + { /* class_tid: 4, , table: em.vfr_2_vf.0 */ + .description = "em.vfr_2_vf.0", + .resource_func = BNXT_ULP_RESOURCE_FUNC_EM_TABLE, + .resource_type = TF_MEM_INTERNAL, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 0, + .cond_false_goto = 0, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 3157, + .cond_nums = 0 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .critical_resource = BNXT_ULP_CRITICAL_RESOURCE_YES, + .key_start_idx = 638, + .blob_key_bit_size = 0, + .key_bit_size = 0, + .key_num_fields = 2, + .result_start_idx = 1260, + .result_bit_size = 0, + .result_num_fields = 6 + } +}; + +struct bnxt_ulp_mapper_cond_list_info ulp_thor_class_cond_oper_list[] = { + /* cond_execute: class_tid: 1, control.tunnel_ipv6_sip_check:28*/ + { + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 28, + .cond_nums = 3 + }, + /* cond_execute: class_tid: 1, control.l2_only_check:31*/ + { + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 31, + .cond_nums = 5 + }, + /* cond_execute: class_tid: 1, control.l2_only_check:31*/ + { + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 36, + .cond_nums = 5 + }, + /* cond_execute: class_tid: 2, control.l2_only_check:1589*/ + { + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 1589, + .cond_nums = 5 + }, + /* cond_execute: class_tid: 2, control.l2_only_check:1589*/ + { + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 1594, + .cond_nums = 5 + }, + /* cond_execute: class_tid: 2, control.tunnel_ipv6_sip_check:1599*/ + { + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 1599, + .cond_nums = 4 + }, + /* cond_execute: class_tid: 2, control.tunnel_ipv6_sip_check:1599*/ + { + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 1603, + .cond_nums = 4 + } +}; + +struct bnxt_ulp_mapper_cond_info ulp_thor_class_cond_list[] = { + /* cond_reject: thor, class_tid: 1 */ + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_NOT_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_TCP_TCP_FLAGS + }, + /* cond_execute: class_tid: 1, control.check_f1_f2_flow:2*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_F1 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_F2 + }, + /* cond_execute: class_tid: 1, control.tunnel_cache_check:4*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_RF_IS_SET, + .cond_operand = BNXT_ULP_RF_IDX_GENERIC_TBL_MISS + }, + /* cond_execute: class_tid: 1, control.check_f2_flow:5*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_F2 + }, + /* field_cond: class_tid: 1, control.dmac_calculation:6*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_DMAC + }, + /* field_cond: class_tid: 1, control.dmac_calculation:8*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_FEATURE_BIT_IS_SET, + .cond_operand = BNXT_ULP_FEATURE_BIT_PORT_DMAC + }, + /* field_cond: class_tid: 1, control.dmac_calculation:9*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_FEATURE_BIT_IS_SET, + .cond_operand = BNXT_ULP_FEATURE_BIT_PARENT_DMAC + }, + /* cond_execute: class_tid: 1, control.group_id_check:10*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_GROUP_ID + }, + /* cond_execute: class_tid: 1, control.flow_chain_group_id:11*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_RF_IS_SET, + .cond_operand = BNXT_ULP_RF_IDX_GENERIC_TBL_MISS + }, + /* cond_execute: class_tid: 1, flow_chain_l2_cntxt.group_check:12*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_GROUP_ID + }, + /* cond_execute: class_tid: 1, control.flow_chain_group_l2_cntxt_check:13*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_RF_IS_SET, + .cond_operand = BNXT_ULP_RF_IDX_GENERIC_TBL_MISS + }, + /* field_cond: class_tid: 1, l2_cntxt_tcam.chain_entry:14*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_RECYCLE_CNT + }, + /* field_cond: class_tid: 1, l2_cntxt_tcam.chain_entry:15*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_RECYCLE_CNT + }, + /* cond_execute: class_tid: 1, mac_addr_cache.rd:16*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_RF_IS_SET, + .cond_operand = BNXT_ULP_RF_IDX_O_DMAC + }, + /* field_cond: class_tid: 1, mac_addr_cache.rd:17*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_RECYCLE_CNT + }, + /* field_cond: class_tid: 1, mac_addr_cache.rd:18*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_METADATA + }, + /* cond_execute: class_tid: 1, control.mac_addr_cache_miss:19*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_RF_IS_SET, + .cond_operand = BNXT_ULP_RF_IDX_GENERIC_TBL_MISS + }, + /* cond_execute: class_tid: 1, l2_cntxt_tcam.allocate_l2_context:20*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_NOT_SET, + .cond_operand = BNXT_ULP_HDR_BIT_F1 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_NOT_SET, + .cond_operand = BNXT_ULP_HDR_BIT_F2 + }, + /* field_cond: class_tid: 1, l2_cntxt_tcam.ingress_entry:22*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_DMAC + }, + /* field_cond: class_tid: 1, l2_cntxt_tcam.ingress_entry:23*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_RECYCLE_CNT + }, + /* field_cond: class_tid: 1, l2_cntxt_tcam.ingress_entry:24*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_RECYCLE_CNT + }, + /* field_cond: class_tid: 1, mac_addr_cache.wr:25*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_RECYCLE_CNT + }, + /* field_cond: class_tid: 1, mac_addr_cache.wr:26*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_METADATA + }, + /* cond_execute: class_tid: 1, control.check_f1_flow:27*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_F1 + }, + /* cond_execute: class_tid: 1, control.tunnel_ipv6_sip_check:28*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_SRC_ADDR + }, + /* cond_execute: class_tid: 1, control.l2_only_check:31*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_NOT_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_NOT_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_NOT_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_TYPE + }, + /* cond_execute: class_tid: 1, control.l2_only_check:31*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_NOT_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_NOT_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_NOT_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_ETH_TYPE + }, + /* field_cond: class_tid: 1, control.terminating_flow:41*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_TCP + }, + /* field_cond: class_tid: 1, control.terminating_flow:43*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_UDP + }, + /* field_cond: class_tid: 1, control.terminating_flow:45*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + /* field_cond: class_tid: 1, control.terminating_flow:47*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + /* field_cond: class_tid: 1, proto_header_cache.rd:49*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_GROUP_ID + }, + /* cond_execute: class_tid: 1, control.proto_header_cache_miss:50*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_RF_IS_SET, + .cond_operand = BNXT_ULP_RF_IDX_GENERIC_TBL_MISS + }, + /* field_cond: class_tid: 1, hdr_overlap_cache.overlap_check:51*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_GROUP_ID + }, + /* cond_execute: class_tid: 1, control.overlap_miss:52*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_RF_IS_SET, + .cond_operand = BNXT_ULP_RF_IDX_GENERIC_TBL_MISS + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:53*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_CNTXT_ID + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_NOT_SET, + .cond_operand = BNXT_ULP_CF_IDX_CHAIN_ID_METADATA + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:55*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_IS_SET, + .cond_operand = BNXT_ULP_CF_IDX_CHAIN_ID_METADATA + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:56*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_GROUP_ID + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:57*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_RECYCLE_CNT + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:58*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_DMAC + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:61*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_SMAC + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:64*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_OO_VLAN_VID + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:68*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_OI_VLAN_VID + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:72*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_NOT_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_OO_VLAN_VID + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:76*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_TYPE + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:80*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_SRC_ADDR + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:83*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_SRC_ADDR + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:86*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_DST_ADDR + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:89*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_DST_ADDR + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:92*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_TTL + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:95*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_TTL + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:98*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_PROTO_ID + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:101*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_PROTO_ID + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:104*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_QOS + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:107*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_QOS + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:110*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_UDP_SRC_PORT + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:113*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_TCP_SRC_PORT + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:116*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_UDP_DST_PORT + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:119*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_TCP_DST_PORT + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:122*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_T_VXLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_T_VXLAN_VNI + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:124*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_T_VXLAN_GPE + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_T_VXLAN_GPE_VNI + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:126*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_ETH_DMAC + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:129*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_DMAC + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:132*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_ETH_SMAC + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:135*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_SMAC + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:138*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_IO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_II_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_IO_VLAN_VID + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:142*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_OO_VLAN_VID + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:146*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_IO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_II_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_II_VLAN_VID + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:150*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_IO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_NOT_SET, + .cond_operand = BNXT_ULP_HDR_BIT_II_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_IO_VLAN_VID + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:154*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_OI_VLAN_VID + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:158*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_NOT_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_OO_VLAN_VID + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:162*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_ETH_TYPE + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:166*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_TYPE + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:170*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV6_SRC_ADDR + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:174*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV4_SRC_ADDR + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:178*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_SRC_ADDR + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:182*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_SRC_ADDR + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:186*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV6_DST_ADDR + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:190*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV4_DST_ADDR + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:194*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_DST_ADDR + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:198*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_DST_ADDR + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:202*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV6_TTL + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:206*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV4_TTL + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:210*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_TTL + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:214*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_TTL + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:218*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV6_PROTO_ID + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:222*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV4_PROTO_ID + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:226*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_PROTO_ID + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:230*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_PROTO_ID + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:234*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV6_QOS + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:238*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV4_QOS + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:242*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_QOS + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:246*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_QOS + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:250*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:251*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_ICMP + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:254*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ICMP + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:257*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:258*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_ICMP + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:261*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ICMP + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:264*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:265*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_TCP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_TCP_TCP_FLAGS + }, + /* field_cond: class_tid: 1, fkb_select.wc_gen_template:267*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_TCP_TCP_FLAGS + }, + /* field_cond: class_tid: 1, hdr_overlap_cache.overlap_wr:269*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_GROUP_ID + }, + /* cond_execute: class_tid: 1, fkb_select.em_gen_template_alloc:270*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_RF_IS_SET, + .cond_operand = BNXT_ULP_RF_IDX_TERM_FLOW + }, + /* field_cond: class_tid: 1, control.profile_tcam_priority:271*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + /* field_cond: class_tid: 1, control.profile_tcam_priority:273*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + /* field_cond: class_tid: 1, control.profile_tcam_priority:275*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_TCP + }, + /* field_cond: class_tid: 1, control.profile_tcam_priority:277*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_UDP + }, + /* field_cond: class_tid: 1, control.profile_tcam_priority:279*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + /* field_cond: class_tid: 1, control.profile_tcam_priority:281*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + /* field_cond: class_tid: 1, control.profile_tcam_priority:283*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + /* field_cond: class_tid: 1, control.profile_tcam_priority:285*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:287*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_TCP + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:289*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:291*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_UDP + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:293*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:295*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_TCP + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:297*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:299*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_UDP + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:301*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:303*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_TCP + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:305*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:307*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_UDP + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:309*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:311*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_TCP + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:313*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:315*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_UDP + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:317*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:319*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_TCP + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:321*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:323*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_UDP + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:325*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:327*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_TCP + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:329*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:331*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_UDP + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:333*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:335*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_TCP + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:337*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:339*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_UDP + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:341*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:343*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_TCP + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:345*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:347*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_UDP + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:349*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:351*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_IS_SET, + .cond_operand = BNXT_ULP_CF_IDX_CHAIN_ID_METADATA + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:352*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:354*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:356*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:358*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:360*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_IS_SET, + .cond_operand = BNXT_ULP_CF_IDX_CHAIN_ID_METADATA + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:361*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:363*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:365*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:367*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:369*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:371*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:373*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:375*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:377*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:379*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:381*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:383*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:385*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:387*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:389*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:391*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:393*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:395*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:397*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:399*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:401*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:403*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:405*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:407*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:409*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:411*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:413*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:415*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:417*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_II_VLAN + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:420*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:423*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_IO_VLAN + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:426*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:429*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_DIX_TRAFFIC + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:430*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_DIX_TRAFFIC + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:431*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_ETH + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:433*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:435*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_ETH + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:437*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:439*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_IS_SET, + .cond_operand = BNXT_ULP_CF_IDX_CHAIN_ID_METADATA + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:440*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_ETH + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:442*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:444*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_T_VXLAN + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:446*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_T_VXLAN_GPE + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:448*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_T_GENEVE + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:450*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_T_GRE + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:452*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_UPAR1 + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:454*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_UPAR2 + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:456*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_T_VXLAN + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:458*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_T_VXLAN_GPE + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:460*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_T_GENEVE + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:462*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_T_GRE + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:464*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_UPAR1 + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:466*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_UPAR2 + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:468*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:469*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:470*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:471*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:473*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:475*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:477*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:479*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:481*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:483*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:485*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:487*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:489*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:491*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:493*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:495*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:497*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:499*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:500*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:502*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:504*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:505*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:507*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:509*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:511*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:513*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:515*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:517*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:519*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:521*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:523*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:525*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:527*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:529*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:532*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:535*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_DIX_TRAFFIC + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:536*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_DIX_TRAFFIC + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:537*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:539*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_F2 + }, + /* field_cond: class_tid: 1, profile_tcam.gen_template:541*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_GROUP_ID + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:542*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_CNTXT_ID + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_NOT_SET, + .cond_operand = BNXT_ULP_CF_IDX_CHAIN_ID_METADATA + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:544*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_CNTXT_ID + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_NOT_SET, + .cond_operand = BNXT_ULP_CF_IDX_CHAIN_ID_METADATA + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:546*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_IS_SET, + .cond_operand = BNXT_ULP_CF_IDX_CHAIN_ID_METADATA + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:547*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_IS_SET, + .cond_operand = BNXT_ULP_CF_IDX_CHAIN_ID_METADATA + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:548*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_GROUP_ID + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:549*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_GROUP_ID + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:550*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_RECYCLE_CNT + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:551*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_RECYCLE_CNT + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:552*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_DMAC + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:555*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_DMAC + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:558*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_SMAC + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:561*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_SMAC + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:564*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_OO_VLAN_VID + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:568*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_OO_VLAN_VID + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:572*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_OI_VLAN_VID + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:576*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_NOT_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_OO_VLAN_VID + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:580*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_OI_VLAN_VID + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:584*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_NOT_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_OO_VLAN_VID + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:588*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_TYPE + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:592*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_TYPE + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:596*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_SRC_ADDR + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:599*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_SRC_ADDR + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:602*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_SRC_ADDR + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:605*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_SRC_ADDR + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:608*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_DST_ADDR + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:611*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_DST_ADDR + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:614*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_DST_ADDR + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:617*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_DST_ADDR + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:620*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_TTL + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:623*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_TTL + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:626*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_TTL + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:629*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_TTL + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:632*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_PROTO_ID + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:635*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_PROTO_ID + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:638*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_PROTO_ID + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:641*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_PROTO_ID + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:644*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_QOS + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:647*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_QOS + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:650*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_QOS + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:653*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_QOS + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:656*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_UDP_SRC_PORT + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:659*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_TCP_SRC_PORT + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:662*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_UDP_SRC_PORT + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:665*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_TCP_SRC_PORT + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:668*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_UDP_DST_PORT + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:671*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_TCP_DST_PORT + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:674*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_UDP_DST_PORT + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:677*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_TCP_DST_PORT + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:680*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_T_VXLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_T_VXLAN_VNI + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:682*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_T_VXLAN_GPE + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_T_VXLAN_GPE_VNI + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:684*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_T_VXLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_T_VXLAN_VNI + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:686*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_T_VXLAN_GPE + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_T_VXLAN_GPE_VNI + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:688*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_ETH_DMAC + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:691*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_DMAC + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:694*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_ETH_DMAC + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:697*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_DMAC + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:700*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_ETH_SMAC + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:703*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_SMAC + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:706*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_ETH_SMAC + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:709*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_SMAC + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:712*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_IO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_II_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_IO_VLAN_VID + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:716*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_OO_VLAN_VID + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:720*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_IO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_II_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_IO_VLAN_VID + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:724*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_OO_VLAN_VID + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:728*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_IO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_II_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_II_VLAN_VID + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:732*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_IO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_NOT_SET, + .cond_operand = BNXT_ULP_HDR_BIT_II_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_IO_VLAN_VID + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:736*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_OI_VLAN_VID + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:740*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_NOT_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_OO_VLAN_VID + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:744*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_IO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_II_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_II_VLAN_VID + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:748*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_IO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_NOT_SET, + .cond_operand = BNXT_ULP_HDR_BIT_II_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_IO_VLAN_VID + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:752*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_OI_VLAN_VID + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:756*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_NOT_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_OO_VLAN_VID + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:760*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_ETH_TYPE + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:764*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_TYPE + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:768*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_ETH_TYPE + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:772*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_TYPE + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:776*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV4_SRC_ADDR + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:779*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_SRC_ADDR + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:782*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV4_SRC_ADDR + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:785*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_SRC_ADDR + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:788*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV6_SRC_ADDR + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:791*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_SRC_ADDR + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:794*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV6_SRC_ADDR + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:797*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_SRC_ADDR + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:800*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV4_DST_ADDR + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:803*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_DST_ADDR + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:806*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV4_DST_ADDR + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:809*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_DST_ADDR + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:812*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV6_DST_ADDR + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:815*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_DST_ADDR + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:818*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV6_DST_ADDR + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:821*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_DST_ADDR + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:824*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV6_TTL + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:827*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV4_TTL + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:830*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_TTL + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:833*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_TTL + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:836*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV6_TTL + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:839*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV4_TTL + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:842*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_TTL + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:845*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_TTL + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:848*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_TCP + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:850*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_UDP + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:852*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:854*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:856*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV6_PROTO_ID + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:859*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV4_PROTO_ID + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:862*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_PROTO_ID + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:865*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_PROTO_ID + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:868*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_TCP + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:870*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_UDP + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:872*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:874*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:876*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV6_PROTO_ID + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:879*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV4_PROTO_ID + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:882*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_PROTO_ID + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:885*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_PROTO_ID + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:888*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV6_QOS + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:891*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV4_QOS + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:894*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_QOS + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:897*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_QOS + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:900*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV6_QOS + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:903*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV4_QOS + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:906*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_QOS + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:909*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_QOS + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:912*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_UDP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_UDP_SRC_PORT + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:915*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_TCP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_TCP_SRC_PORT + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:918*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_UDP_SRC_PORT + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:921*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_TCP_SRC_PORT + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:924*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:925*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_UDP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_UDP_SRC_PORT + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:928*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_TCP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_TCP_SRC_PORT + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:931*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_UDP_SRC_PORT + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:934*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_TCP_SRC_PORT + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:937*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:938*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_UDP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_UDP_DST_PORT + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:941*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_TCP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_TCP_DST_PORT + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:944*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_UDP_DST_PORT + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:947*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_TCP_DST_PORT + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:950*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:951*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_UDP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_UDP_DST_PORT + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:954*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_TCP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_TCP_DST_PORT + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:957*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_UDP_DST_PORT + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:960*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_TCP_DST_PORT + }, + /* field_cond: class_tid: 1, wm_key_recipe.0:963*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + /* field_cond: class_tid: 1, proto_header_cache.wr:964*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_GROUP_ID + }, + /* cond_execute: class_tid: 1, em_flow_conflict_cache.rd:965*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_NOT_SET, + .cond_operand = BNXT_ULP_CF_IDX_WC_MATCH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_RF_IS_SET, + .cond_operand = BNXT_ULP_RF_IDX_TERM_FLOW + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_IS_SET, + .cond_operand = BNXT_ULP_CF_IDX_EM_FOR_TC + }, + /* cond_execute: class_tid: 1, control.em_flow_conflict_cache_miss:968*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_RF_IS_SET, + .cond_operand = BNXT_ULP_RF_IDX_GENERIC_TBL_MISS + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:969*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_CNTXT_ID + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:970*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_GROUP_ID + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:971*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_RECYCLE_CNT + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:972*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_DMAC + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_EXCLUDE_FIELD_BIT_NOT_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_DMAC + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:976*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_SMAC + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:979*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_OO_VLAN_VID + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:983*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_OI_VLAN_VID + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:987*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_NOT_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_OO_VLAN_VID + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:991*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_TYPE + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:995*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_SRC_ADDR + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:998*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_SRC_ADDR + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:1001*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_DST_ADDR + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:1004*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_DST_ADDR + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:1007*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_TTL + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:1010*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_TTL + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:1013*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_PROTO_ID + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:1016*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_PROTO_ID + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:1019*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_QOS + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:1022*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_QOS + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:1025*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_UDP_SRC_PORT + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:1028*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_TCP_SRC_PORT + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:1031*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_UDP_DST_PORT + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:1034*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_TCP_DST_PORT + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:1037*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_T_VXLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_T_VXLAN_VNI + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:1039*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_T_VXLAN_GPE + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_T_VXLAN_GPE_VNI + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:1041*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_ETH_DMAC + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_EXCLUDE_FIELD_BIT_NOT_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_ETH_DMAC + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:1045*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_DMAC + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_EXCLUDE_FIELD_BIT_NOT_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_DMAC + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:1049*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_ETH_SMAC + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:1052*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_SMAC + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:1055*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_IO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_II_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_IO_VLAN_VID + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:1059*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_OO_VLAN_VID + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:1063*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_IO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_II_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_II_VLAN_VID + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:1067*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_IO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_NOT_SET, + .cond_operand = BNXT_ULP_HDR_BIT_II_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_IO_VLAN_VID + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:1071*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_OI_VLAN_VID + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:1075*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_NOT_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_OO_VLAN_VID + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:1079*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_ETH_TYPE + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:1083*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_TYPE + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:1087*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV6_SRC_ADDR + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:1090*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV4_SRC_ADDR + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:1093*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_SRC_ADDR + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:1096*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_SRC_ADDR + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:1099*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV6_DST_ADDR + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:1102*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV4_DST_ADDR + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:1105*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_DST_ADDR + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:1108*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_DST_ADDR + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:1111*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV6_TTL + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:1114*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV4_TTL + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:1117*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_TTL + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:1120*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_TTL + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:1123*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV6_PROTO_ID + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:1126*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV4_PROTO_ID + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:1129*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_PROTO_ID + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:1132*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_PROTO_ID + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:1135*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV6_QOS + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:1138*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV4_QOS + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:1141*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_QOS + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:1144*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_QOS + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:1147*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_UDP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_UDP_SRC_PORT + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:1150*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_TCP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_TCP_SRC_PORT + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:1153*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_UDP_SRC_PORT + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:1156*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_TCP_SRC_PORT + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:1159*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_UDP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_UDP_DST_PORT + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:1162*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_TCP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_TCP_DST_PORT + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:1165*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_UDP_DST_PORT + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:1168*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_TCP_DST_PORT + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:1171*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:1172*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_TCP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_TCP_TCP_FLAGS + }, + /* field_cond: class_tid: 1, fkb_select.em_gen_template:1174*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_TCP_TCP_FLAGS + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1176*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_CNTXT_ID + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1177*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_CNTXT_ID + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1178*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_GROUP_ID + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1179*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_GROUP_ID + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1180*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_RECYCLE_CNT + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1181*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_RECYCLE_CNT + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1182*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_DMAC + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_EXCLUDE_FIELD_BIT_NOT_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_DMAC + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1186*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_DMAC + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_EXCLUDE_FIELD_BIT_NOT_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_DMAC + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1190*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_SMAC + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1193*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_SMAC + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1196*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_OO_VLAN_VID + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1200*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_OO_VLAN_VID + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1204*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_OI_VLAN_VID + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1208*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_NOT_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_OO_VLAN_VID + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1212*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_OI_VLAN_VID + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1216*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_NOT_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_OO_VLAN_VID + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1220*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_TYPE + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1224*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_TYPE + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1228*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_SRC_ADDR + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1231*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_SRC_ADDR + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1234*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_SRC_ADDR + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1237*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_SRC_ADDR + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1240*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_DST_ADDR + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1243*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_DST_ADDR + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1246*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_DST_ADDR + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1249*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_DST_ADDR + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1252*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_TTL + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1255*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_TTL + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1258*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_TTL + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1261*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_TTL + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1264*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_PROTO_ID + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1267*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_PROTO_ID + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1270*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_PROTO_ID + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1273*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_PROTO_ID + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1276*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_QOS + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1279*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_QOS + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1282*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_QOS + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1285*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_QOS + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1288*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_UDP_SRC_PORT + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1291*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_TCP_SRC_PORT + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1294*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_UDP_SRC_PORT + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1297*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_TCP_SRC_PORT + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1300*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_UDP_DST_PORT + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1303*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_TCP_DST_PORT + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1306*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_UDP_DST_PORT + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1309*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_TCP_DST_PORT + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1312*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_T_VXLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_T_VXLAN_VNI + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1314*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_T_VXLAN_GPE + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_T_VXLAN_GPE_VNI + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1316*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_T_VXLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_T_VXLAN_VNI + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1318*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_T_VXLAN_GPE + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_T_VXLAN_GPE_VNI + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1320*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_ETH_DMAC + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_EXCLUDE_FIELD_BIT_NOT_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_ETH_DMAC + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1324*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_DMAC + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_EXCLUDE_FIELD_BIT_NOT_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_DMAC + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1328*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_ETH_DMAC + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_EXCLUDE_FIELD_BIT_NOT_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_ETH_DMAC + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1332*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_DMAC + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_EXCLUDE_FIELD_BIT_NOT_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_DMAC + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1336*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_ETH_SMAC + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1339*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_SMAC + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1342*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_ETH_SMAC + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1345*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_SMAC + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1348*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_IO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_II_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_IO_VLAN_VID + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1352*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_OO_VLAN_VID + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1356*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_IO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_II_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_IO_VLAN_VID + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1360*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_OO_VLAN_VID + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1364*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_IO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_II_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_II_VLAN_VID + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1368*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_IO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_NOT_SET, + .cond_operand = BNXT_ULP_HDR_BIT_II_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_IO_VLAN_VID + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1372*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_OI_VLAN_VID + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1376*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_NOT_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_OO_VLAN_VID + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1380*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_IO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_II_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_II_VLAN_VID + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1384*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_IO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_NOT_SET, + .cond_operand = BNXT_ULP_HDR_BIT_II_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_IO_VLAN_VID + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1388*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_OI_VLAN_VID + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1392*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_NOT_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_OO_VLAN_VID + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1396*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_ETH_TYPE + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1400*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_TYPE + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1404*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_ETH_TYPE + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1408*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_TYPE + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1412*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV4_SRC_ADDR + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1415*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_SRC_ADDR + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1418*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV4_SRC_ADDR + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1421*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_SRC_ADDR + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1424*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV6_SRC_ADDR + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1427*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_SRC_ADDR + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1430*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV6_SRC_ADDR + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1433*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_SRC_ADDR + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1436*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV4_DST_ADDR + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1439*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_DST_ADDR + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1442*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV4_DST_ADDR + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1445*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_DST_ADDR + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1448*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV6_DST_ADDR + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1451*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_DST_ADDR + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1454*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV6_DST_ADDR + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1457*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_DST_ADDR + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1460*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV6_TTL + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1463*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV4_TTL + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1466*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_TTL + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1469*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_TTL + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1472*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV6_TTL + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1475*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV4_TTL + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1478*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_TTL + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1481*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_TTL + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1484*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV6_PROTO_ID + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1487*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV4_PROTO_ID + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1490*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_PROTO_ID + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1493*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_PROTO_ID + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1496*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV6_PROTO_ID + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1499*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV4_PROTO_ID + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1502*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_PROTO_ID + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1505*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_PROTO_ID + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1508*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV6_QOS + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1511*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV4_QOS + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1514*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_QOS + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1517*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_QOS + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1520*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV6_QOS + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1523*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV4_QOS + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1526*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_QOS + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1529*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_QOS + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1532*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_UDP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_UDP_SRC_PORT + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1535*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_TCP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_TCP_SRC_PORT + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1538*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_UDP_SRC_PORT + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1541*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_TCP_SRC_PORT + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1544*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_UDP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_UDP_SRC_PORT + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1547*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_TCP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_TCP_SRC_PORT + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1550*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_UDP_SRC_PORT + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1553*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_TCP_SRC_PORT + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1556*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_UDP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_UDP_DST_PORT + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1559*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_TCP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_TCP_DST_PORT + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1562*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_UDP_DST_PORT + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1565*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_TCP_DST_PORT + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1568*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_UDP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_UDP_DST_PORT + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1571*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_TCP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_TCP_DST_PORT + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1574*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_UDP_DST_PORT + }, + /* field_cond: class_tid: 1, em_key_recipe.0:1577*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_TCP_DST_PORT + }, + /* cond_execute: class_tid: 1, control.field_sig_validation:1580*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_RF_NOT_SET, + .cond_operand = BNXT_ULP_RF_IDX_CC + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_RF_IS_SET, + .cond_operand = BNXT_ULP_RF_IDX_FLOW_SIG_ID + }, + /* cond_execute: class_tid: 1, em.ingress_generic_template:1582*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_NOT_SET, + .cond_operand = BNXT_ULP_CF_IDX_WC_MATCH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_RF_IS_SET, + .cond_operand = BNXT_ULP_RF_IDX_TERM_FLOW + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_IS_SET, + .cond_operand = BNXT_ULP_CF_IDX_EM_FOR_TC + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_DEF_PRIO + }, + /* cond_execute: class_tid: 1, control.em_add_check:1586*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_RF_IS_SET, + .cond_operand = BNXT_ULP_RF_IDX_EM_INSERT_FAIL + }, + /* cond_reject: thor, class_tid: 2 */ + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_NOT_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_TCP_TCP_FLAGS + }, + /* cond_execute: class_tid: 2, control.l2_only_check:1589*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_NOT_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_NOT_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_NOT_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_TYPE + }, + /* cond_execute: class_tid: 2, control.l2_only_check:1589*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_NOT_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_NOT_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_NOT_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_ETH_TYPE + }, + /* cond_execute: class_tid: 2, control.tunnel_ipv6_sip_check:1599*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_SRC_ADDR + }, + /* cond_execute: class_tid: 2, control.tunnel_ipv6_sip_check:1599*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_SMAC + }, + /* field_cond: class_tid: 2, control.terminating_flow:1607*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_TCP + }, + /* field_cond: class_tid: 2, control.terminating_flow:1609*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_UDP + }, + /* field_cond: class_tid: 2, control.terminating_flow:1611*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + /* field_cond: class_tid: 2, control.terminating_flow:1613*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + /* cond_execute: class_tid: 2, control.group_id_check:1615*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_GROUP_ID + }, + /* cond_execute: class_tid: 2, control.flow_chain_group_id:1616*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_RF_IS_SET, + .cond_operand = BNXT_ULP_RF_IDX_GENERIC_TBL_MISS + }, + /* field_cond: class_tid: 2, proto_header_cache.rd:1617*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_GROUP_ID + }, + /* cond_execute: class_tid: 2, control.proto_header_cache_miss:1618*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_RF_IS_SET, + .cond_operand = BNXT_ULP_RF_IDX_GENERIC_TBL_MISS + }, + /* field_cond: class_tid: 2, hdr_overlap_cache.overlap_check:1619*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_GROUP_ID + }, + /* cond_execute: class_tid: 2, control.overlap_miss:1620*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_RF_IS_SET, + .cond_operand = BNXT_ULP_RF_IDX_GENERIC_TBL_MISS + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1621*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_CNTXT_ID + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_NOT_SET, + .cond_operand = BNXT_ULP_CF_IDX_CHAIN_ID_METADATA + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1623*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_IS_SET, + .cond_operand = BNXT_ULP_CF_IDX_CHAIN_ID_METADATA + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1624*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_GROUP_ID + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1625*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_RECYCLE_CNT + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1626*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_DMAC + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1629*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_SMAC + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1632*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1634*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_OO_VLAN_VID + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1638*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_OI_VLAN_VID + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1642*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_NOT_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_OO_VLAN_VID + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1646*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_TYPE + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1650*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_SRC_ADDR + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1653*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_SRC_ADDR + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1656*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_DST_ADDR + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1659*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_DST_ADDR + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1662*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_TTL + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1665*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_TTL + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1668*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_PROTO_ID + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1671*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_PROTO_ID + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1674*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_QOS + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1677*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_QOS + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1680*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_UDP_SRC_PORT + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1683*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_TCP_SRC_PORT + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1686*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_UDP_DST_PORT + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1689*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_TCP_DST_PORT + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1692*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_T_VXLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_T_VXLAN_VNI + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1694*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_T_VXLAN_GPE + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_T_VXLAN_GPE_VNI + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1696*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_ETH_DMAC + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1699*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_DMAC + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1702*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_ETH_SMAC + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1705*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_SMAC + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1708*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_IO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_II_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_IO_VLAN_VID + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1712*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_OO_VLAN_VID + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1716*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_IO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_II_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_II_VLAN_VID + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1720*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_IO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_NOT_SET, + .cond_operand = BNXT_ULP_HDR_BIT_II_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_IO_VLAN_VID + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1724*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_OI_VLAN_VID + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1728*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_NOT_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_OO_VLAN_VID + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1732*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_ETH_TYPE + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1736*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_TYPE + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1740*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV6_SRC_ADDR + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1744*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV4_SRC_ADDR + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1748*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_SRC_ADDR + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1752*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_SRC_ADDR + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1756*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV6_DST_ADDR + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1760*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV4_DST_ADDR + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1764*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_DST_ADDR + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1768*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_DST_ADDR + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1772*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV6_TTL + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1776*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV4_TTL + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1780*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_TTL + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1784*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_TTL + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1788*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV6_PROTO_ID + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1792*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV4_PROTO_ID + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1796*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_PROTO_ID + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1800*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_PROTO_ID + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1804*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV6_QOS + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1808*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV4_QOS + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1812*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_QOS + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1816*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_QOS + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1820*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1821*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_ICMP + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1824*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ICMP + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1827*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1828*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_ICMP + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1831*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ICMP + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1834*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1835*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_TCP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_TCP_TCP_FLAGS + }, + /* field_cond: class_tid: 2, fkb_select.wc_gen_template:1837*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_TCP_TCP_FLAGS + }, + /* field_cond: class_tid: 2, hdr_overlap_cache.overlap_wr:1839*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_GROUP_ID + }, + /* cond_execute: class_tid: 2, fkb_select.em_gen_template_alloc:1840*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_RF_IS_SET, + .cond_operand = BNXT_ULP_RF_IDX_TERM_FLOW + }, + /* field_cond: class_tid: 2, control.profile_tcam_priority:1841*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + /* field_cond: class_tid: 2, control.profile_tcam_priority:1843*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + /* field_cond: class_tid: 2, control.profile_tcam_priority:1845*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_TCP + }, + /* field_cond: class_tid: 2, control.profile_tcam_priority:1847*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_UDP + }, + /* field_cond: class_tid: 2, control.profile_tcam_priority:1849*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + /* field_cond: class_tid: 2, control.profile_tcam_priority:1851*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + /* field_cond: class_tid: 2, control.profile_tcam_priority:1853*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + /* field_cond: class_tid: 2, control.profile_tcam_priority:1855*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:1857*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_TCP + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:1859*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:1861*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_UDP + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:1863*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:1865*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_TCP + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:1867*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:1869*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_UDP + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:1871*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:1873*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_TCP + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:1875*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:1877*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_UDP + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:1879*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:1881*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_TCP + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:1883*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:1885*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_UDP + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:1887*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:1889*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_TCP + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:1891*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:1893*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_UDP + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:1895*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:1897*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_TCP + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:1899*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:1901*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_UDP + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:1903*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:1905*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_TCP + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:1907*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:1909*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_UDP + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:1911*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:1913*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_TCP + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:1915*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:1917*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_UDP + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:1919*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:1921*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_IS_SET, + .cond_operand = BNXT_ULP_CF_IDX_CHAIN_ID_METADATA + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:1922*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:1924*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:1926*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:1928*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:1930*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_IS_SET, + .cond_operand = BNXT_ULP_CF_IDX_CHAIN_ID_METADATA + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:1931*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:1933*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:1935*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:1937*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:1939*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:1941*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:1943*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:1945*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:1947*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:1949*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:1951*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:1953*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:1955*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:1957*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:1959*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:1961*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:1963*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:1965*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:1967*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:1969*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:1971*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:1973*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:1975*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:1977*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:1979*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:1981*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:1983*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:1985*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:1987*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_II_VLAN + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:1990*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:1993*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_IO_VLAN + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:1996*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:1999*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_DIX_TRAFFIC + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2000*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_DIX_TRAFFIC + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2001*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_ETH + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2003*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2005*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_ETH + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2007*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2009*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_IS_SET, + .cond_operand = BNXT_ULP_CF_IDX_CHAIN_ID_METADATA + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2010*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_ETH + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2012*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2014*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_T_VXLAN + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2016*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_T_VXLAN_GPE + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2018*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_T_GENEVE + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2020*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_T_GRE + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2022*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_UPAR1 + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2024*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_UPAR2 + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2026*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_T_VXLAN + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2028*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_T_VXLAN_GPE + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2030*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_T_GENEVE + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2032*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_T_GRE + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2034*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_UPAR1 + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2036*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_UPAR2 + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2038*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2039*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2040*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2041*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2043*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2045*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2047*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2049*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2051*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2053*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2055*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2057*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2059*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2061*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2063*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2065*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2067*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2069*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2070*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2072*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2074*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2075*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2077*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2079*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2081*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2083*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2085*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2087*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2089*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2091*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2093*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2095*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2097*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2099*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2102*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2105*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_DIX_TRAFFIC + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2106*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_DIX_TRAFFIC + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2107*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + /* field_cond: class_tid: 2, profile_tcam.gen_template:2109*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_GROUP_ID + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2110*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_CNTXT_ID + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_NOT_SET, + .cond_operand = BNXT_ULP_CF_IDX_CHAIN_ID_METADATA + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2112*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_CNTXT_ID + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_NOT_SET, + .cond_operand = BNXT_ULP_CF_IDX_CHAIN_ID_METADATA + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2114*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_IS_SET, + .cond_operand = BNXT_ULP_CF_IDX_CHAIN_ID_METADATA + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2115*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_IS_SET, + .cond_operand = BNXT_ULP_CF_IDX_CHAIN_ID_METADATA + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2116*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_GROUP_ID + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2117*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_GROUP_ID + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2118*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_RECYCLE_CNT + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2119*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_RECYCLE_CNT + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2120*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_DMAC + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2123*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_DMAC + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2126*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_SMAC + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2129*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2131*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_SMAC + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2134*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2136*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_OO_VLAN_VID + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2140*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_OO_VLAN_VID + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2144*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_OI_VLAN_VID + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2148*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_NOT_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_OO_VLAN_VID + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2152*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_OI_VLAN_VID + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2156*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_NOT_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_OO_VLAN_VID + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2160*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_TYPE + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2164*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_TYPE + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2168*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_SRC_ADDR + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2171*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_SRC_ADDR + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2174*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_SRC_ADDR + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2177*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_SRC_ADDR + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2180*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_DST_ADDR + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2183*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_DST_ADDR + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2186*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_DST_ADDR + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2189*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_DST_ADDR + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2192*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_TTL + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2195*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_TTL + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2198*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_TTL + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2201*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_TTL + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2204*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_PROTO_ID + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2207*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_PROTO_ID + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2210*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_PROTO_ID + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2213*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_PROTO_ID + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2216*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_QOS + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2219*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_QOS + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2222*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_QOS + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2225*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_QOS + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2228*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_UDP_SRC_PORT + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2231*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_TCP_SRC_PORT + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2234*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_UDP_SRC_PORT + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2237*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_TCP_SRC_PORT + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2240*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_UDP_DST_PORT + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2243*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_TCP_DST_PORT + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2246*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_UDP_DST_PORT + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2249*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_TCP_DST_PORT + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2252*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_T_VXLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_T_VXLAN_VNI + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2254*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_T_VXLAN_GPE + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_T_VXLAN_GPE_VNI + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2256*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_T_VXLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_T_VXLAN_VNI + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2258*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_T_VXLAN_GPE + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_T_VXLAN_GPE_VNI + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2260*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_ETH_DMAC + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2263*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_DMAC + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2266*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_ETH_DMAC + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2269*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_DMAC + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2272*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_ETH_SMAC + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2275*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_SMAC + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2278*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_ETH_SMAC + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2281*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_SMAC + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2284*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_IO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_II_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_IO_VLAN_VID + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2288*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_OO_VLAN_VID + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2292*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_IO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_II_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_IO_VLAN_VID + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2296*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_OO_VLAN_VID + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2300*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_IO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_II_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_II_VLAN_VID + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2304*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_IO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_NOT_SET, + .cond_operand = BNXT_ULP_HDR_BIT_II_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_IO_VLAN_VID + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2308*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_OI_VLAN_VID + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2312*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_NOT_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_OO_VLAN_VID + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2316*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_IO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_II_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_II_VLAN_VID + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2320*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_IO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_NOT_SET, + .cond_operand = BNXT_ULP_HDR_BIT_II_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_IO_VLAN_VID + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2324*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_OI_VLAN_VID + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2328*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_NOT_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_OO_VLAN_VID + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2332*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_ETH_TYPE + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2336*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_TYPE + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2340*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_ETH_TYPE + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2344*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_TYPE + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2348*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV4_SRC_ADDR + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2351*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_SRC_ADDR + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2354*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV4_SRC_ADDR + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2357*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_SRC_ADDR + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2360*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV6_SRC_ADDR + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2363*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_SRC_ADDR + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2366*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV6_SRC_ADDR + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2369*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_SRC_ADDR + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2372*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV4_DST_ADDR + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2375*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_DST_ADDR + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2378*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV4_DST_ADDR + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2381*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_DST_ADDR + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2384*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV6_DST_ADDR + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2387*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_DST_ADDR + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2390*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV6_DST_ADDR + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2393*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_DST_ADDR + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2396*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV6_TTL + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2399*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV4_TTL + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2402*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_TTL + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2405*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_TTL + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2408*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV6_TTL + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2411*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV4_TTL + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2414*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_TTL + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2417*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_TTL + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2420*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_TCP + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2422*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_UDP + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2424*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2426*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2428*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV6_PROTO_ID + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2431*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV4_PROTO_ID + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2434*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_PROTO_ID + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2437*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_PROTO_ID + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2440*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_TCP + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2442*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_UDP + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2444*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2446*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2448*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV6_PROTO_ID + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2451*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV4_PROTO_ID + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2454*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_PROTO_ID + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2457*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_PROTO_ID + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2460*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV6_QOS + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2463*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV4_QOS + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2466*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_QOS + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2469*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_QOS + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2472*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV6_QOS + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2475*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV4_QOS + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2478*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_QOS + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2481*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_QOS + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2484*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_UDP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_UDP_SRC_PORT + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2487*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_TCP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_TCP_SRC_PORT + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2490*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_UDP_SRC_PORT + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2493*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_TCP_SRC_PORT + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2496*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2497*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_UDP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_UDP_SRC_PORT + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2500*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_TCP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_TCP_SRC_PORT + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2503*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_UDP_SRC_PORT + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2506*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_TCP_SRC_PORT + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2509*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2510*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_UDP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_UDP_DST_PORT + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2513*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_TCP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_TCP_DST_PORT + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2516*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_UDP_DST_PORT + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2519*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_TCP_DST_PORT + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2522*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2523*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_UDP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_UDP_DST_PORT + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2526*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_TCP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_TCP_DST_PORT + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2529*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_UDP_DST_PORT + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2532*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_TCP_DST_PORT + }, + /* field_cond: class_tid: 2, wm_key_recipe.0:2535*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + /* field_cond: class_tid: 2, proto_header_cache.wr:2536*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_GROUP_ID + }, + /* cond_execute: class_tid: 2, em_flow_conflict_cache.rd:2537*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_NOT_SET, + .cond_operand = BNXT_ULP_CF_IDX_WC_MATCH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_RF_IS_SET, + .cond_operand = BNXT_ULP_RF_IDX_TERM_FLOW + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_IS_SET, + .cond_operand = BNXT_ULP_CF_IDX_EM_FOR_TC + }, + /* cond_execute: class_tid: 2, control.em_flow_conflict_cache_miss:2540*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_RF_IS_SET, + .cond_operand = BNXT_ULP_RF_IDX_GENERIC_TBL_MISS + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2541*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_CNTXT_ID + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2542*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_GROUP_ID + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2543*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_RECYCLE_CNT + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2544*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_DMAC + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2547*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_SMAC + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2550*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_OO_VLAN_VID + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2554*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_OI_VLAN_VID + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2558*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_NOT_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_OO_VLAN_VID + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2562*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_TYPE + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2566*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_SRC_ADDR + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2569*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_SRC_ADDR + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2572*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_DST_ADDR + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2575*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_DST_ADDR + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2578*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_TTL + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2581*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_TTL + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2584*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_PROTO_ID + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2587*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_PROTO_ID + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2590*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_QOS + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2593*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_QOS + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2596*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_UDP_SRC_PORT + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2599*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_TCP_SRC_PORT + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2602*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_UDP_DST_PORT + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2605*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_TCP_DST_PORT + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2608*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_T_VXLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_T_VXLAN_VNI + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2610*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_T_VXLAN_GPE + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_T_VXLAN_GPE_VNI + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2612*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_ETH_DMAC + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2615*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_DMAC + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2618*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_ETH_SMAC + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2621*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_SMAC + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2624*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_IO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_II_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_IO_VLAN_VID + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2628*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_OO_VLAN_VID + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2632*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_IO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_II_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_II_VLAN_VID + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2636*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_IO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_NOT_SET, + .cond_operand = BNXT_ULP_HDR_BIT_II_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_IO_VLAN_VID + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2640*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_OI_VLAN_VID + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2644*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_NOT_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_OO_VLAN_VID + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2648*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_ETH_TYPE + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2652*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_TYPE + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2656*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV6_SRC_ADDR + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2659*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV4_SRC_ADDR + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2662*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_SRC_ADDR + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2665*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_SRC_ADDR + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2668*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV6_DST_ADDR + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2671*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV4_DST_ADDR + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2674*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_DST_ADDR + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2677*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_DST_ADDR + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2680*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV6_TTL + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2683*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV4_TTL + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2686*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_TTL + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2689*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_TTL + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2692*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV6_PROTO_ID + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2695*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV4_PROTO_ID + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2698*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_PROTO_ID + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2701*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_PROTO_ID + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2704*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV6_QOS + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2707*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV4_QOS + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2710*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_QOS + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2713*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_QOS + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2716*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_UDP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_UDP_SRC_PORT + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2719*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_TCP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_TCP_SRC_PORT + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2722*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_UDP_SRC_PORT + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2725*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_TCP_SRC_PORT + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2728*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_UDP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_UDP_DST_PORT + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2731*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_TCP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_TCP_DST_PORT + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2734*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_UDP_DST_PORT + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2737*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_TCP_DST_PORT + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2740*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2741*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_TCP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_TCP_TCP_FLAGS + }, + /* field_cond: class_tid: 2, fkb_select.em_gen_template:2743*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_TCP_TCP_FLAGS + }, + /* field_cond: class_tid: 2, em_key_recipe.0:2745*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_CNTXT_ID + }, + /* field_cond: class_tid: 2, em_key_recipe.0:2746*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_CNTXT_ID + }, + /* field_cond: class_tid: 2, em_key_recipe.0:2747*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_GROUP_ID + }, + /* field_cond: class_tid: 2, em_key_recipe.0:2748*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_GROUP_ID + }, + /* field_cond: class_tid: 2, em_key_recipe.0:2749*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_RECYCLE_CNT + }, + /* field_cond: class_tid: 2, em_key_recipe.0:2750*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_RECYCLE_CNT + }, + /* field_cond: class_tid: 2, em_key_recipe.0:2751*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_DMAC + }, + /* field_cond: class_tid: 2, em_key_recipe.0:2754*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_DMAC + }, + /* field_cond: class_tid: 2, em_key_recipe.0:2757*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_SMAC + }, + /* field_cond: class_tid: 2, em_key_recipe.0:2760*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_SMAC + }, + /* field_cond: class_tid: 2, em_key_recipe.0:2763*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_OO_VLAN_VID + }, + /* field_cond: class_tid: 2, em_key_recipe.0:2767*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_OO_VLAN_VID + }, + /* field_cond: class_tid: 2, em_key_recipe.0:2771*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_OI_VLAN_VID + }, + /* field_cond: class_tid: 2, em_key_recipe.0:2775*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_NOT_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_OO_VLAN_VID + }, + /* field_cond: class_tid: 2, em_key_recipe.0:2779*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_OI_VLAN_VID + }, + /* field_cond: class_tid: 2, em_key_recipe.0:2783*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_NOT_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_OO_VLAN_VID + }, + /* field_cond: class_tid: 2, em_key_recipe.0:2787*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_TYPE + }, + /* field_cond: class_tid: 2, em_key_recipe.0:2791*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_TYPE + }, + /* field_cond: class_tid: 2, em_key_recipe.0:2795*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_SRC_ADDR + }, + /* field_cond: class_tid: 2, em_key_recipe.0:2798*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_SRC_ADDR + }, + /* field_cond: class_tid: 2, em_key_recipe.0:2801*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_SRC_ADDR + }, + /* field_cond: class_tid: 2, em_key_recipe.0:2804*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_SRC_ADDR + }, + /* field_cond: class_tid: 2, em_key_recipe.0:2807*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_DST_ADDR + }, + /* field_cond: class_tid: 2, em_key_recipe.0:2810*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_DST_ADDR + }, + /* field_cond: class_tid: 2, em_key_recipe.0:2813*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_DST_ADDR + }, + /* field_cond: class_tid: 2, em_key_recipe.0:2816*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_DST_ADDR + }, + /* field_cond: class_tid: 2, em_key_recipe.0:2819*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_TTL + }, + /* field_cond: class_tid: 2, em_key_recipe.0:2822*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_TTL + }, + /* field_cond: class_tid: 2, em_key_recipe.0:2825*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_TTL + }, + /* field_cond: class_tid: 2, em_key_recipe.0:2828*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_TTL + }, + /* field_cond: class_tid: 2, em_key_recipe.0:2831*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_PROTO_ID + }, + /* field_cond: class_tid: 2, em_key_recipe.0:2834*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_PROTO_ID + }, + /* field_cond: class_tid: 2, em_key_recipe.0:2837*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_PROTO_ID + }, + /* field_cond: class_tid: 2, em_key_recipe.0:2840*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_PROTO_ID + }, + /* field_cond: class_tid: 2, em_key_recipe.0:2843*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_QOS + }, + /* field_cond: class_tid: 2, em_key_recipe.0:2846*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_QOS + }, + /* field_cond: class_tid: 2, em_key_recipe.0:2849*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_QOS + }, + /* field_cond: class_tid: 2, em_key_recipe.0:2852*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_QOS + }, + /* field_cond: class_tid: 2, em_key_recipe.0:2855*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_UDP_SRC_PORT + }, + /* field_cond: class_tid: 2, em_key_recipe.0:2858*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_TCP_SRC_PORT + }, + /* field_cond: class_tid: 2, em_key_recipe.0:2861*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_UDP_SRC_PORT + }, + /* field_cond: class_tid: 2, em_key_recipe.0:2864*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_TCP_SRC_PORT + }, + /* field_cond: class_tid: 2, em_key_recipe.0:2867*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_UDP_DST_PORT + }, + /* field_cond: class_tid: 2, em_key_recipe.0:2870*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_TCP_DST_PORT + }, + /* field_cond: class_tid: 2, em_key_recipe.0:2873*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_UDP_DST_PORT + }, + /* field_cond: class_tid: 2, em_key_recipe.0:2876*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_TCP_DST_PORT + }, + /* field_cond: class_tid: 2, em_key_recipe.0:2879*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_T_VXLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_T_VXLAN_VNI + }, + /* field_cond: class_tid: 2, em_key_recipe.0:2881*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_T_VXLAN_GPE + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_T_VXLAN_GPE_VNI + }, + /* field_cond: class_tid: 2, em_key_recipe.0:2883*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_T_VXLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_T_VXLAN_VNI + }, + /* field_cond: class_tid: 2, em_key_recipe.0:2885*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_T_VXLAN_GPE + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_T_VXLAN_GPE_VNI + }, + /* field_cond: class_tid: 2, em_key_recipe.0:2887*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_ETH_DMAC + }, + /* field_cond: class_tid: 2, em_key_recipe.0:2890*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_DMAC + }, + /* field_cond: class_tid: 2, em_key_recipe.0:2893*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_ETH_DMAC + }, + /* field_cond: class_tid: 2, em_key_recipe.0:2896*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_DMAC + }, + /* field_cond: class_tid: 2, em_key_recipe.0:2899*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_ETH_SMAC + }, + /* field_cond: class_tid: 2, em_key_recipe.0:2902*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_SMAC + }, + /* field_cond: class_tid: 2, em_key_recipe.0:2905*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_ETH_SMAC + }, + /* field_cond: class_tid: 2, em_key_recipe.0:2908*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_SMAC + }, + /* field_cond: class_tid: 2, em_key_recipe.0:2911*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_IO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_II_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_IO_VLAN_VID + }, + /* field_cond: class_tid: 2, em_key_recipe.0:2915*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_OO_VLAN_VID + }, + /* field_cond: class_tid: 2, em_key_recipe.0:2919*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_IO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_II_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_IO_VLAN_VID + }, + /* field_cond: class_tid: 2, em_key_recipe.0:2923*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_OO_VLAN_VID + }, + /* field_cond: class_tid: 2, em_key_recipe.0:2927*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_IO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_II_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_II_VLAN_VID + }, + /* field_cond: class_tid: 2, em_key_recipe.0:2931*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_IO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_NOT_SET, + .cond_operand = BNXT_ULP_HDR_BIT_II_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_IO_VLAN_VID + }, + /* field_cond: class_tid: 2, em_key_recipe.0:2935*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_OI_VLAN_VID + }, + /* field_cond: class_tid: 2, em_key_recipe.0:2939*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_NOT_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_OO_VLAN_VID + }, + /* field_cond: class_tid: 2, em_key_recipe.0:2943*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_IO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_II_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_II_VLAN_VID + }, + /* field_cond: class_tid: 2, em_key_recipe.0:2947*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_IO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_NOT_SET, + .cond_operand = BNXT_ULP_HDR_BIT_II_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_IO_VLAN_VID + }, + /* field_cond: class_tid: 2, em_key_recipe.0:2951*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_OI_VLAN_VID + }, + /* field_cond: class_tid: 2, em_key_recipe.0:2955*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OO_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_NOT_SET, + .cond_operand = BNXT_ULP_HDR_BIT_OI_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_OO_VLAN_VID + }, + /* field_cond: class_tid: 2, em_key_recipe.0:2959*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_ETH_TYPE + }, + /* field_cond: class_tid: 2, em_key_recipe.0:2963*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_TYPE + }, + /* field_cond: class_tid: 2, em_key_recipe.0:2967*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_ETH_TYPE + }, + /* field_cond: class_tid: 2, em_key_recipe.0:2971*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_L2_ONLY + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_ETH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_TYPE + }, + /* field_cond: class_tid: 2, em_key_recipe.0:2975*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV4_SRC_ADDR + }, + /* field_cond: class_tid: 2, em_key_recipe.0:2978*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_SRC_ADDR + }, + /* field_cond: class_tid: 2, em_key_recipe.0:2981*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV4_SRC_ADDR + }, + /* field_cond: class_tid: 2, em_key_recipe.0:2984*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_SRC_ADDR + }, + /* field_cond: class_tid: 2, em_key_recipe.0:2987*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV6_SRC_ADDR + }, + /* field_cond: class_tid: 2, em_key_recipe.0:2990*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_SRC_ADDR + }, + /* field_cond: class_tid: 2, em_key_recipe.0:2993*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV6_SRC_ADDR + }, + /* field_cond: class_tid: 2, em_key_recipe.0:2996*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_SRC_ADDR + }, + /* field_cond: class_tid: 2, em_key_recipe.0:2999*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV4_DST_ADDR + }, + /* field_cond: class_tid: 2, em_key_recipe.0:3002*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_DST_ADDR + }, + /* field_cond: class_tid: 2, em_key_recipe.0:3005*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV4_DST_ADDR + }, + /* field_cond: class_tid: 2, em_key_recipe.0:3008*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_DST_ADDR + }, + /* field_cond: class_tid: 2, em_key_recipe.0:3011*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV6_DST_ADDR + }, + /* field_cond: class_tid: 2, em_key_recipe.0:3014*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_DST_ADDR + }, + /* field_cond: class_tid: 2, em_key_recipe.0:3017*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV6_DST_ADDR + }, + /* field_cond: class_tid: 2, em_key_recipe.0:3020*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_DST_ADDR + }, + /* field_cond: class_tid: 2, em_key_recipe.0:3023*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV6_TTL + }, + /* field_cond: class_tid: 2, em_key_recipe.0:3026*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV4_TTL + }, + /* field_cond: class_tid: 2, em_key_recipe.0:3029*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_TTL + }, + /* field_cond: class_tid: 2, em_key_recipe.0:3032*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_TTL + }, + /* field_cond: class_tid: 2, em_key_recipe.0:3035*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV6_TTL + }, + /* field_cond: class_tid: 2, em_key_recipe.0:3038*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV4_TTL + }, + /* field_cond: class_tid: 2, em_key_recipe.0:3041*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_TTL + }, + /* field_cond: class_tid: 2, em_key_recipe.0:3044*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_TTL + }, + /* field_cond: class_tid: 2, em_key_recipe.0:3047*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV6_PROTO_ID + }, + /* field_cond: class_tid: 2, em_key_recipe.0:3050*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV4_PROTO_ID + }, + /* field_cond: class_tid: 2, em_key_recipe.0:3053*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_PROTO_ID + }, + /* field_cond: class_tid: 2, em_key_recipe.0:3056*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_PROTO_ID + }, + /* field_cond: class_tid: 2, em_key_recipe.0:3059*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV6_PROTO_ID + }, + /* field_cond: class_tid: 2, em_key_recipe.0:3062*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV4_PROTO_ID + }, + /* field_cond: class_tid: 2, em_key_recipe.0:3065*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_PROTO_ID + }, + /* field_cond: class_tid: 2, em_key_recipe.0:3068*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_PROTO_ID + }, + /* field_cond: class_tid: 2, em_key_recipe.0:3071*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV6_QOS + }, + /* field_cond: class_tid: 2, em_key_recipe.0:3074*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV4_QOS + }, + /* field_cond: class_tid: 2, em_key_recipe.0:3077*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_QOS + }, + /* field_cond: class_tid: 2, em_key_recipe.0:3080*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_QOS + }, + /* field_cond: class_tid: 2, em_key_recipe.0:3083*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV6_QOS + }, + /* field_cond: class_tid: 2, em_key_recipe.0:3086*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_IPV4_QOS + }, + /* field_cond: class_tid: 2, em_key_recipe.0:3089*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV6_QOS + }, + /* field_cond: class_tid: 2, em_key_recipe.0:3092*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_IPV4_QOS + }, + /* field_cond: class_tid: 2, em_key_recipe.0:3095*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_UDP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_UDP_SRC_PORT + }, + /* field_cond: class_tid: 2, em_key_recipe.0:3098*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_TCP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_TCP_SRC_PORT + }, + /* field_cond: class_tid: 2, em_key_recipe.0:3101*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_UDP_SRC_PORT + }, + /* field_cond: class_tid: 2, em_key_recipe.0:3104*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_TCP_SRC_PORT + }, + /* field_cond: class_tid: 2, em_key_recipe.0:3107*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_UDP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_UDP_SRC_PORT + }, + /* field_cond: class_tid: 2, em_key_recipe.0:3110*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_TCP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_TCP_SRC_PORT + }, + /* field_cond: class_tid: 2, em_key_recipe.0:3113*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_UDP_SRC_PORT + }, + /* field_cond: class_tid: 2, em_key_recipe.0:3116*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_TCP_SRC_PORT + }, + /* field_cond: class_tid: 2, em_key_recipe.0:3119*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_UDP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_UDP_DST_PORT + }, + /* field_cond: class_tid: 2, em_key_recipe.0:3122*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_TCP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_TCP_DST_PORT + }, + /* field_cond: class_tid: 2, em_key_recipe.0:3125*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_UDP_DST_PORT + }, + /* field_cond: class_tid: 2, em_key_recipe.0:3128*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_TCP_DST_PORT + }, + /* field_cond: class_tid: 2, em_key_recipe.0:3131*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_UDP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_UDP_DST_PORT + }, + /* field_cond: class_tid: 2, em_key_recipe.0:3134*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_I_TCP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_I_TCP_DST_PORT + }, + /* field_cond: class_tid: 2, em_key_recipe.0:3137*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_UDP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_UDP_DST_PORT + }, + /* field_cond: class_tid: 2, em_key_recipe.0:3140*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_NOT_SET, + .cond_operand = BNXT_ULP_CF_BIT_IS_TUNNEL + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_TCP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_TCP_DST_PORT + }, + /* cond_execute: class_tid: 2, control.field_sig_validation:3143*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_RF_NOT_SET, + .cond_operand = BNXT_ULP_RF_IDX_CC + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_RF_IS_SET, + .cond_operand = BNXT_ULP_RF_IDX_FLOW_SIG_ID + }, + /* cond_execute: class_tid: 2, em.egress_generic_template:3145*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_NOT_SET, + .cond_operand = BNXT_ULP_CF_IDX_WC_MATCH + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_RF_IS_SET, + .cond_operand = BNXT_ULP_RF_IDX_TERM_FLOW + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_BIT_IS_SET, + .cond_operand = BNXT_ULP_CF_BIT_DEF_PRIO + }, + /* cond_execute: class_tid: 2, control.em_add_check:3148*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_RF_IS_SET, + .cond_operand = BNXT_ULP_RF_IDX_EM_INSERT_FAIL + }, + /* cond_execute: class_tid: 3, control.ing_0:3149*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_RF_IS_SET, + .cond_operand = BNXT_ULP_RF_IDX_GENERIC_TBL_MISS + }, + /* cond_execute: class_tid: 3, control.egr_0:3150*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_IS_SET, + .cond_operand = BNXT_ULP_CF_IDX_VFR_MODE + }, + /* cond_execute: class_tid: 3, control.egr_1:3151*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_RF_IS_SET, + .cond_operand = BNXT_ULP_RF_IDX_GENERIC_TBL_MISS + }, + /* cond_execute: class_tid: 3, control.egr_2:3152*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_RF_IS_SET, + .cond_operand = BNXT_ULP_RF_IDX_GENERIC_TBL_MISS + }, + /* cond_execute: class_tid: 4, control.prof_tcam_cache.vfr_glb_act_rec_rd.0:3153*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_RF_IS_SET, + .cond_operand = BNXT_ULP_RF_IDX_GENERIC_TBL_MISS + }, + /* cond_execute: class_tid: 4, control.vf_2_vfr.0:3154*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_RF_IS_SET, + .cond_operand = BNXT_ULP_RF_IDX_GENERIC_TBL_MISS + }, + /* cond_execute: class_tid: 4, control.prof_tcam_cache.vfr.0:3155*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_RF_IS_SET, + .cond_operand = BNXT_ULP_RF_IDX_GENERIC_TBL_MISS + }, + /* cond_execute: class_tid: 4, control.0:3156*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_RF_IS_SET, + .cond_operand = BNXT_ULP_RF_IDX_GENERIC_TBL_MISS + } +}; + +struct bnxt_ulp_mapper_key_info ulp_thor_class_key_info_list[] = { + /* class_tid: 1, , table: port_table.rd */ + { + .field_info_mask = { + .description = "dev.port_id", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "dev.port_id", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_DEV_PORT_ID >> 8) & 0xff, + BNXT_ULP_CF_IDX_DEV_PORT_ID & 0xff} + } + }, + /* class_tid: 1, , table: l2_cntxt_tcam_cache.rd */ + { + .field_info_mask = { + .description = "svif", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_HF, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_SVIF_INDEX >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_SVIF_INDEX & 0xff} + }, + .field_info_spec = { + .description = "svif", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_HF, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_SVIF_INDEX >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_SVIF_INDEX & 0xff} + } + }, + /* class_tid: 1, , table: tunnel_cache.f1_f2_rd */ + { + .field_info_mask = { + .description = "svif", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_HF, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_SVIF_INDEX >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_SVIF_INDEX & 0xff} + }, + .field_info_spec = { + .description = "svif", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_HF, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_SVIF_INDEX >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_SVIF_INDEX & 0xff} + } + }, + { + .field_info_mask = { + .description = "tunnel_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "tunnel_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_TUNNEL_ID >> 8) & 0xff, + BNXT_ULP_CF_IDX_TUNNEL_ID & 0xff} + } + }, + /* class_tid: 1, , table: tunnel_cache.f1_f2_wr */ + { + .field_info_mask = { + .description = "svif", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_HF, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_SVIF_INDEX >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_SVIF_INDEX & 0xff} + }, + .field_info_spec = { + .description = "svif", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_HF, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_SVIF_INDEX >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_SVIF_INDEX & 0xff} + } + }, + { + .field_info_mask = { + .description = "tunnel_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "tunnel_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_TUNNEL_ID >> 8) & 0xff, + BNXT_ULP_CF_IDX_TUNNEL_ID & 0xff} + } + }, + /* class_tid: 1, , table: flow_chain_cache.group_check */ + { + .field_info_mask = { + .description = "group_id", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff, + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "group_id", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_GROUP_ID >> 8) & 0xff, + BNXT_ULP_CF_IDX_GROUP_ID & 0xff} + } + }, + /* class_tid: 1, , table: flow_chain_cache.write */ + { + .field_info_mask = { + .description = "group_id", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff, + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "group_id", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_GROUP_ID >> 8) & 0xff, + BNXT_ULP_CF_IDX_GROUP_ID & 0xff} + } + }, + /* class_tid: 1, , table: flow_chain_l2_cntxt.group_check */ + { + .field_info_mask = { + .description = "svif", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_HF, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_SVIF_INDEX >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_SVIF_INDEX & 0xff} + }, + .field_info_spec = { + .description = "svif", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_HF, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_SVIF_INDEX >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_SVIF_INDEX & 0xff} + } + }, + /* class_tid: 1, , table: l2_cntxt_tcam.chain_entry */ + { + .field_info_mask = { + .description = "etype", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "etype", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_ivlan_tpid_sel", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l2_ivlan_tpid_sel", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_ivlan_vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l2_ivlan_vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_ovlan_tpid_sel", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l2_ovlan_tpid_sel", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_ovlan_vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l2_ovlan_vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "two_vtags", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "two_vtags", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "vtag_present", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "vtag_present", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "mac1_addr", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "mac1_addr", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "mac0_addr", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "mac0_addr", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tunnel_id", + .field_bit_size = 24, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tunnel_id", + .field_bit_size = 24, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tun_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tun_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "llc", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "llc", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "roce", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + .field_info_spec = { + .description = "roce", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "metadata", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + (ULP_THOR_SYM_CHAIN_META_VAL_MASK >> 8) & 0xff, + ULP_THOR_SYM_CHAIN_META_VAL_MASK & 0xff} + }, + .field_info_spec = { + .description = "metadata", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + (ULP_THOR_SYM_CHAIN_META_VAL >> 8) & 0xff, + ULP_THOR_SYM_CHAIN_META_VAL & 0xff} + } + }, + { + .field_info_mask = { + .description = "svif", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_HF, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_SVIF_INDEX >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_SVIF_INDEX & 0xff} + }, + .field_info_spec = { + .description = "svif", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_HF, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_SVIF_INDEX >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_SVIF_INDEX & 0xff} + } + }, + { + .field_info_mask = { + .description = "parif", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "parif", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "spif", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "spif", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "loopback", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "loopback", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "recycle_cnt", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (14 >> 8) & 0xff, + 14 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "recycle_cnt", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (15 >> 8) & 0xff, + 15 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_CONST + } + }, + { + .field_info_mask = { + .description = "mpass_cnt", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "mpass_cnt", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + .field_info_spec = { + .description = "valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + } + }, + /* class_tid: 1, , table: flow_chain_l2_cntxt.write */ + { + .field_info_mask = { + .description = "svif", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_HF, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_SVIF_INDEX >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_SVIF_INDEX & 0xff} + }, + .field_info_spec = { + .description = "svif", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_HF, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_SVIF_INDEX >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_SVIF_INDEX & 0xff} + } + }, + /* class_tid: 1, , table: mac_addr_cache.rd */ + { + .field_info_mask = { + .description = "svif", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_HF, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_SVIF_INDEX >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_SVIF_INDEX & 0xff} + }, + .field_info_spec = { + .description = "svif", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_HF, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_SVIF_INDEX >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_SVIF_INDEX & 0xff} + } + }, + { + .field_info_mask = { + .description = "tun_hdr", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "tun_hdr", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "one_tag", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "one_tag", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "mac_addr", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "mac_addr", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_O_DMAC >> 8) & 0xff, + BNXT_ULP_RF_IDX_O_DMAC & 0xff} + } + }, + { + .field_info_mask = { + .description = "etype", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "etype", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "recycle_cnt", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "recycle_cnt", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (17 >> 8) & 0xff, + 17 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "metadata", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "metadata", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (18 >> 8) & 0xff, + 18 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_filter_id", + .field_bit_size = 64, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l2_filter_id", + .field_bit_size = 64, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + /* class_tid: 1, , table: l2_cntxt_tcam.ingress_entry */ + { + .field_info_mask = { + .description = "etype", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "etype", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_ivlan_tpid_sel", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l2_ivlan_tpid_sel", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_ivlan_vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l2_ivlan_vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_ovlan_tpid_sel", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l2_ovlan_tpid_sel", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_ovlan_vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l2_ovlan_vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "two_vtags", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "two_vtags", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "vtag_present", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "vtag_present", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "mac1_addr", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "mac1_addr", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "mac0_addr", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (22 >> 8) & 0xff, + 22 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_ETH_DMAC >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_ETH_DMAC & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_ONES + }, + .field_info_spec = { + .description = "mac0_addr", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_O_DMAC >> 8) & 0xff, + BNXT_ULP_RF_IDX_O_DMAC & 0xff} + } + }, + { + .field_info_mask = { + .description = "tunnel_id", + .field_bit_size = 24, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tunnel_id", + .field_bit_size = 24, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tun_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tun_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "llc", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "llc", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "roce", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + .field_info_spec = { + .description = "roce", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "metadata", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + (ULP_THOR_SYM_CHAIN_META_VAL_MASK >> 8) & 0xff, + ULP_THOR_SYM_CHAIN_META_VAL_MASK & 0xff} + }, + .field_info_spec = { + .description = "metadata", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "svif", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_HF, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_SVIF_INDEX >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_SVIF_INDEX & 0xff} + }, + .field_info_spec = { + .description = "svif", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_HF, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_SVIF_INDEX >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_SVIF_INDEX & 0xff} + } + }, + { + .field_info_mask = { + .description = "parif", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "parif", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "spif", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "spif", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "loopback", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "loopback", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "recycle_cnt", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (23 >> 8) & 0xff, + 23 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "recycle_cnt", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (24 >> 8) & 0xff, + 24 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_CONST + } + }, + { + .field_info_mask = { + .description = "mpass_cnt", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 3} + }, + .field_info_spec = { + .description = "mpass_cnt", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + .field_info_spec = { + .description = "valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + } + }, + /* class_tid: 1, , table: mac_addr_cache.wr */ + { + .field_info_mask = { + .description = "svif", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_HF, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_SVIF_INDEX >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_SVIF_INDEX & 0xff} + }, + .field_info_spec = { + .description = "svif", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_HF, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_SVIF_INDEX >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_SVIF_INDEX & 0xff} + } + }, + { + .field_info_mask = { + .description = "tun_hdr", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "tun_hdr", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "one_tag", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "one_tag", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "mac_addr", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "mac_addr", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_O_DMAC >> 8) & 0xff, + BNXT_ULP_RF_IDX_O_DMAC & 0xff} + } + }, + { + .field_info_mask = { + .description = "etype", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "etype", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "recycle_cnt", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "recycle_cnt", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (25 >> 8) & 0xff, + 25 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "metadata", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "metadata", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (26 >> 8) & 0xff, + 26 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_filter_id", + .field_bit_size = 64, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l2_filter_id", + .field_bit_size = 64, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + /* class_tid: 1, , table: proto_header_cache.rd */ + { + .field_info_mask = { + .description = "group_metadata", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "group_metadata", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (49 >> 8) & 0xff, + 49 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR_SYM_CHAIN_META_TYPE}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "prof_func_id", + .field_bit_size = 7, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "prof_func_id", + .field_bit_size = 7, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_PROF_FUNC_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_PROF_FUNC_ID_0 & 0xff} + } + }, + { + .field_info_mask = { + .description = "hdr_bitmap", + .field_bit_size = 64, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "hdr_bitmap", + .field_bit_size = 64, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_PROFILE_BITMAP >> 8) & 0xff, + BNXT_ULP_CF_IDX_PROFILE_BITMAP & 0xff} + } + }, + /* class_tid: 1, , table: hdr_overlap_cache.overlap_check */ + { + .field_info_mask = { + .description = "group_metadata", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "group_metadata", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (51 >> 8) & 0xff, + 51 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR_SYM_CHAIN_META_TYPE}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "prof_func_id", + .field_bit_size = 7, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "prof_func_id", + .field_bit_size = 7, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_PROF_FUNC_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_PROF_FUNC_ID_0 & 0xff} + } + }, + /* class_tid: 1, , table: hdr_overlap_cache.overlap_check */ + { + .field_info_mask = { + .description = "hdr_bitmap", + .field_bit_size = 64, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "hdr_bitmap", + .field_bit_size = 64, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_HDR_BITMAP >> 8) & 0xff, + BNXT_ULP_CF_IDX_HDR_BITMAP & 0xff} + } + }, + /* class_tid: 1, , table: hdr_overlap_cache.overlap_wr */ + { + .field_info_mask = { + .description = "group_metadata", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "group_metadata", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (269 >> 8) & 0xff, + 269 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR_SYM_CHAIN_META_TYPE}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "prof_func_id", + .field_bit_size = 7, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "prof_func_id", + .field_bit_size = 7, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_PROF_FUNC_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_PROF_FUNC_ID_0 & 0xff} + } + }, + /* class_tid: 1, , table: hdr_overlap_cache.overlap_wr */ + { + .field_info_mask = { + .description = "hdr_bitmap", + .field_bit_size = 64, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "hdr_bitmap", + .field_bit_size = 64, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_HDR_BITMAP >> 8) & 0xff, + BNXT_ULP_CF_IDX_HDR_BITMAP & 0xff} + } + }, + /* class_tid: 1, , table: profile_tcam.gen_template */ + { + .field_info_mask = { + .description = "l4_hdr_is_udp_tcp", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (287 >> 8) & 0xff, + 287 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (52 >> 8) & 0xff, + 52 & 0xff} + }, + .field_info_spec = { + .description = "l4_hdr_is_udp_tcp", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (295 >> 8) & 0xff, + 295 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR_SYM_L4_HDR_IS_UDP_TCP_YES}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (55 >> 8) & 0xff, + 55 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l4_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (303 >> 8) & 0xff, + 303 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (58 >> 8) & 0xff, + 58 & 0xff} + }, + .field_info_spec = { + .description = "l4_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (311 >> 8) & 0xff, + 311 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ZERO, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (61 >> 8) & 0xff, + 61 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l4_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (319 >> 8) & 0xff, + 319 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (64 >> 8) & 0xff, + 64 & 0xff} + }, + .field_info_spec = { + .description = "l4_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (327 >> 8) & 0xff, + 327 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ZERO, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (67 >> 8) & 0xff, + 67 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l4_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (335 >> 8) & 0xff, + 335 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (70 >> 8) & 0xff, + 70 & 0xff} + }, + .field_info_spec = { + .description = "l4_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (343 >> 8) & 0xff, + 343 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR_SYM_L4_HDR_VALID_YES}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (73 >> 8) & 0xff, + 73 & 0xff} + } + }, + { + .field_info_mask = { + .description = "ieh", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "ieh", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l3_ipv6_cmp_dst", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l3_ipv6_cmp_dst", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l3_ipv6_cmp_src", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l3_ipv6_cmp_src", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l3_hdr_isIP", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (351 >> 8) & 0xff, + 351 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ZERO, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (76 >> 8) & 0xff, + 76 & 0xff} + }, + .field_info_spec = { + .description = "l3_hdr_isIP", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (360 >> 8) & 0xff, + 360 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ZERO, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (80 >> 8) & 0xff, + 80 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l3_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (369 >> 8) & 0xff, + 369 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (84 >> 8) & 0xff, + 84 & 0xff} + }, + .field_info_spec = { + .description = "l3_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (377 >> 8) & 0xff, + 377 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR_SYM_L3_HDR_TYPE_IPV6}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (87 >> 8) & 0xff, + 87 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l3_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (385 >> 8) & 0xff, + 385 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (90 >> 8) & 0xff, + 90 & 0xff} + }, + .field_info_spec = { + .description = "l3_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (393 >> 8) & 0xff, + 393 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ZERO, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (93 >> 8) & 0xff, + 93 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l3_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (401 >> 8) & 0xff, + 401 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (96 >> 8) & 0xff, + 96 & 0xff} + }, + .field_info_spec = { + .description = "l3_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (409 >> 8) & 0xff, + 409 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR_SYM_L3_HDR_VALID_YES}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (99 >> 8) & 0xff, + 99 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l2_two_vtags", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "l2_two_vtags", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (417 >> 8) & 0xff, + 417 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR_SYM_L2_TWO_VTAGS_YES}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (102 >> 8) & 0xff, + 102 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l2_vtag_present", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "l2_vtag_present", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (423 >> 8) & 0xff, + 423 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR_SYM_L2_VTAG_PRESENT_YES}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (103 >> 8) & 0xff, + 103 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l2_uc_mc_bc", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l2_uc_mc_bc", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_hdr_type", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (429 >> 8) & 0xff, + 429 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l2_hdr_type", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (430 >> 8) & 0xff, + 430 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ZERO, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (431 >> 8) & 0xff, + 431 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (104 >> 8) & 0xff, + 104 & 0xff} + }, + .field_info_spec = { + .description = "l2_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (435 >> 8) & 0xff, + 435 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ZERO, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (105 >> 8) & 0xff, + 105 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l2_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "l2_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (439 >> 8) & 0xff, + 439 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR_SYM_L2_HDR_VALID_YES}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (106 >> 8) & 0xff, + 106 & 0xff} + } + }, + { + .field_info_mask = { + .description = "tun_hdr_flags", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tun_hdr_flags", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tun_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (444 >> 8) & 0xff, + 444 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (108 >> 8) & 0xff, + 108 & 0xff} + }, + .field_info_spec = { + .description = "tun_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (456 >> 8) & 0xff, + 456 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ZERO, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (113 >> 8) & 0xff, + 113 & 0xff} + } + }, + { + .field_info_mask = { + .description = "tun_hdr_err", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (468 >> 8) & 0xff, + 468 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tun_hdr_err", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (469 >> 8) & 0xff, + 469 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ZERO, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tun_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "tun_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (470 >> 8) & 0xff, + 470 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR_SYM_TUN_HDR_VALID_YES}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl4_hdr_is_udp_tcp", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (471 >> 8) & 0xff, + 471 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (118 >> 8) & 0xff, + 118 & 0xff} + }, + .field_info_spec = { + .description = "tl4_hdr_is_udp_tcp", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (475 >> 8) & 0xff, + 475 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR_SYM_TL4_HDR_IS_UDP_TCP_YES}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (119 >> 8) & 0xff, + 119 & 0xff} + } + }, + { + .field_info_mask = { + .description = "tl4_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (479 >> 8) & 0xff, + 479 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (120 >> 8) & 0xff, + 120 & 0xff} + }, + .field_info_spec = { + .description = "tl4_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (483 >> 8) & 0xff, + 483 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ZERO, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (121 >> 8) & 0xff, + 121 & 0xff} + } + }, + { + .field_info_mask = { + .description = "tl4_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (487 >> 8) & 0xff, + 487 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (122 >> 8) & 0xff, + 122 & 0xff} + }, + .field_info_spec = { + .description = "tl4_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (491 >> 8) & 0xff, + 491 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ZERO, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (123 >> 8) & 0xff, + 123 & 0xff} + } + }, + { + .field_info_mask = { + .description = "tl4_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (495 >> 8) & 0xff, + 495 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (124 >> 8) & 0xff, + 124 & 0xff} + }, + .field_info_spec = { + .description = "tl4_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (500 >> 8) & 0xff, + 500 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR_SYM_TL4_HDR_VALID_YES}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (126 >> 8) & 0xff, + 126 & 0xff} + } + }, + { + .field_info_mask = { + .description = "tl3_ipv6_cmp_dst", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl3_ipv6_cmp_dst", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl3_ipv6_cmp_src", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl3_ipv6_cmp_src", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl3_hdr_isIP", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "tl3_hdr_isIP", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (505 >> 8) & 0xff, + 505 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR_SYM_TL3_HDR_ISIP_YES}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (128 >> 8) & 0xff, + 128 & 0xff} + } + }, + { + .field_info_mask = { + .description = "tl3_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (509 >> 8) & 0xff, + 509 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (129 >> 8) & 0xff, + 129 & 0xff} + }, + .field_info_spec = { + .description = "tl3_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (513 >> 8) & 0xff, + 513 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR_SYM_TL3_HDR_TYPE_IPV6}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (130 >> 8) & 0xff, + 130 & 0xff} + } + }, + { + .field_info_mask = { + .description = "tl3_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (517 >> 8) & 0xff, + 517 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (131 >> 8) & 0xff, + 131 & 0xff} + }, + .field_info_spec = { + .description = "tl3_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (521 >> 8) & 0xff, + 521 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ZERO, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (132 >> 8) & 0xff, + 132 & 0xff} + } + }, + { + .field_info_mask = { + .description = "tl3_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "tl3_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (525 >> 8) & 0xff, + 525 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR_SYM_TL3_HDR_VALID_YES}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (133 >> 8) & 0xff, + 133 & 0xff} + } + }, + { + .field_info_mask = { + .description = "tl2_two_vtags", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "tl2_two_vtags", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (529 >> 8) & 0xff, + 529 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR_SYM_TL2_TWO_VTAGS_YES}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl2_vtag_present", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "tl2_vtag_present", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (532 >> 8) & 0xff, + 532 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR_SYM_TL2_VTAG_PRESENT_YES}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl2_uc_mc_bc", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl2_uc_mc_bc", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl2_hdr_type", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (535 >> 8) & 0xff, + 535 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl2_hdr_type", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (536 >> 8) & 0xff, + 536 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ZERO, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl2_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "tl2_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (537 >> 8) & 0xff, + 537 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR_SYM_TL2_HDR_VALID_YES}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (134 >> 8) & 0xff, + 134 & 0xff} + } + }, + { + .field_info_mask = { + .description = "hrec_next", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "hrec_next", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "prof_func_id", + .field_bit_size = 7, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "prof_func_id", + .field_bit_size = 7, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_PROF_FUNC_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_PROF_FUNC_ID_0 & 0xff} + } + }, + { + .field_info_mask = { + .description = "agg_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "agg_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "metadata", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + (0xf000 >> 8) & 0xff, + 0xf000 & 0xff} + }, + .field_info_spec = { + .description = "metadata", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (541 >> 8) & 0xff, + 541 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_RF, + .field_opr2 = { + (BNXT_ULP_RF_IDX_JUMP_META >> 8) & 0xff, + BNXT_ULP_RF_IDX_JUMP_META & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_CONST + } + }, + { + .field_info_mask = { + .description = "pkt_type_0", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "pkt_type_0", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "pkt_type_1", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "pkt_type_1", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + .field_info_spec = { + .description = "valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + } + }, + /* class_tid: 1, , table: wm_key_recipe.0 */ + { + .field_info_mask = { + .description = "wc_profile_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "wc_profile_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_WC_PROFILE_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_WC_PROFILE_ID_0 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l2_cntxt_id", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (542 >> 8) & 0xff, + 542 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + .field_info_spec = { + .description = "l2_cntxt_id", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (544 >> 8) & 0xff, + 544 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_RF, + .field_opr2 = { + (BNXT_ULP_RF_IDX_L2_CNTXT_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_L2_CNTXT_ID_0 & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + } + }, + { + .field_info_mask = { + .description = "svif", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (546 >> 8) & 0xff, + 546 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + .field_info_spec = { + .description = "svif", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (547 >> 8) & 0xff, + 547 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_SVIF_INDEX >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_SVIF_INDEX & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + } + }, + { + .field_info_mask = { + .description = "meta", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (548 >> 8) & 0xff, + 548 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + .field_info_spec = { + .description = "meta", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (549 >> 8) & 0xff, + 549 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_RF, + .field_opr2 = { + (BNXT_ULP_RF_IDX_JUMP_META >> 8) & 0xff, + BNXT_ULP_RF_IDX_JUMP_META & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + } + }, + { + .field_info_mask = { + .description = "rcyc_cnt", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (550 >> 8) & 0xff, + 550 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + .field_info_spec = { + .description = "rcyc_cnt", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (551 >> 8) & 0xff, + 551 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_RF, + .field_opr2 = { + (BNXT_ULP_RF_IDX_RECYCLE_CNT >> 8) & 0xff, + BNXT_ULP_RF_IDX_RECYCLE_CNT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + } + }, + { + .field_info_mask = { + .description = "tl2_dmac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (552 >> 8) & 0xff, + 552 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_ETH_DMAC >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_ETH_DMAC & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + .field_info_spec = { + .description = "tl2_dmac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (555 >> 8) & 0xff, + 555 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_ETH_DMAC >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_ETH_DMAC & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + } + }, + { + .field_info_mask = { + .description = "tl2_smac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (558 >> 8) & 0xff, + 558 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_ETH_SMAC >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_ETH_SMAC & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + .field_info_spec = { + .description = "tl2_smac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (561 >> 8) & 0xff, + 561 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_ETH_SMAC >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_ETH_SMAC & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + } + }, + { + .field_info_mask = { + .description = "tl2_ovv", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (564 >> 8) & 0xff, + 564 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_OO_VLAN_VID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_OO_VLAN_VID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + .field_info_spec = { + .description = "tl2_ovv", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (568 >> 8) & 0xff, + 568 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_OO_VLAN_VID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_OO_VLAN_VID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + } + }, + { + .field_info_mask = { + .description = "tl2_ivv", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (572 >> 8) & 0xff, + 572 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_OI_VLAN_VID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_OI_VLAN_VID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (135 >> 8) & 0xff, + 135 & 0xff} + }, + .field_info_spec = { + .description = "tl2_ivv", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (580 >> 8) & 0xff, + 580 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_OI_VLAN_VID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_OI_VLAN_VID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (136 >> 8) & 0xff, + 136 & 0xff} + } + }, + { + .field_info_mask = { + .description = "tl2_etype", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (588 >> 8) & 0xff, + 588 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_ETH_TYPE >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_ETH_TYPE & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + .field_info_spec = { + .description = "tl2_etype", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (592 >> 8) & 0xff, + 592 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_ETH_TYPE >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_ETH_TYPE & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + } + }, + { + .field_info_mask = { + .description = "tl3.sip.ipv4", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (596 >> 8) & 0xff, + 596 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_SRC_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_SRC_ADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + .field_info_spec = { + .description = "tl3.sip.ipv4", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (599 >> 8) & 0xff, + 599 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_SRC_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_SRC_ADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + } + }, + { + .field_info_mask = { + .description = "tl3.sip.ipv6", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (602 >> 8) & 0xff, + 602 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_SKIP, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + .field_info_spec = { + .description = "tl3.sip.ipv6", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (605 >> 8) & 0xff, + 605 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_SKIP, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + } + }, + { + .field_info_mask = { + .description = "tl3.dip.ipv4", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (608 >> 8) & 0xff, + 608 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_DST_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_DST_ADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + .field_info_spec = { + .description = "tl3.dip.ipv4", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (611 >> 8) & 0xff, + 611 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_DST_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_DST_ADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + } + }, + { + .field_info_mask = { + .description = "tl3.dip.ipv6", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (614 >> 8) & 0xff, + 614 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV6_DST_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV6_DST_ADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + .field_info_spec = { + .description = "tl3.dip.ipv6", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (617 >> 8) & 0xff, + 617 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV6_DST_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV6_DST_ADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + } + }, + { + .field_info_mask = { + .description = "tl3.ttl", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (620 >> 8) & 0xff, + 620 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV6_TTL >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV6_TTL & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (137 >> 8) & 0xff, + 137 & 0xff} + }, + .field_info_spec = { + .description = "tl3.ttl", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (626 >> 8) & 0xff, + 626 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV6_TTL >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV6_TTL & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (138 >> 8) & 0xff, + 138 & 0xff} + } + }, + { + .field_info_mask = { + .description = "tl3.prot", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (632 >> 8) & 0xff, + 632 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV6_PROTO_ID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV6_PROTO_ID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (139 >> 8) & 0xff, + 139 & 0xff} + }, + .field_info_spec = { + .description = "tl3.prot", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (638 >> 8) & 0xff, + 638 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV6_PROTO_ID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV6_PROTO_ID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (140 >> 8) & 0xff, + 140 & 0xff} + } + }, + { + .field_info_mask = { + .description = "tl3.qos", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (644 >> 8) & 0xff, + 644 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV6_QOS >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV6_QOS & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (141 >> 8) & 0xff, + 141 & 0xff} + }, + .field_info_spec = { + .description = "tl3.qos", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (650 >> 8) & 0xff, + 650 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV6_QOS >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV6_QOS & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (142 >> 8) & 0xff, + 142 & 0xff} + } + }, + { + .field_info_mask = { + .description = "tl4.src", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (656 >> 8) & 0xff, + 656 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_UDP_SRC_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_UDP_SRC_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (143 >> 8) & 0xff, + 143 & 0xff} + }, + .field_info_spec = { + .description = "tl4.src", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (662 >> 8) & 0xff, + 662 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_UDP_SRC_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_UDP_SRC_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (144 >> 8) & 0xff, + 144 & 0xff} + } + }, + { + .field_info_mask = { + .description = "tl4.dst", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (668 >> 8) & 0xff, + 668 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_UDP_DST_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_UDP_DST_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (145 >> 8) & 0xff, + 145 & 0xff} + }, + .field_info_spec = { + .description = "tl4.dst", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (674 >> 8) & 0xff, + 674 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_UDP_DST_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_UDP_DST_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (146 >> 8) & 0xff, + 146 & 0xff} + } + }, + { + .field_info_mask = { + .description = "tids", + .field_bit_size = 24, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (680 >> 8) & 0xff, + 680 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_T_VXLAN_VNI >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_T_VXLAN_VNI & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (147 >> 8) & 0xff, + 147 & 0xff} + }, + .field_info_spec = { + .description = "tids", + .field_bit_size = 24, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (684 >> 8) & 0xff, + 684 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_T_VXLAN_VNI >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_T_VXLAN_VNI & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (148 >> 8) & 0xff, + 148 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l2_dmac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (688 >> 8) & 0xff, + 688 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_ETH_DMAC >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_ETH_DMAC & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (149 >> 8) & 0xff, + 149 & 0xff} + }, + .field_info_spec = { + .description = "l2_dmac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (694 >> 8) & 0xff, + 694 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_ETH_DMAC >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_ETH_DMAC & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (150 >> 8) & 0xff, + 150 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l2_smac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (700 >> 8) & 0xff, + 700 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_ETH_SMAC >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_ETH_SMAC & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (151 >> 8) & 0xff, + 151 & 0xff} + }, + .field_info_spec = { + .description = "l2_smac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (706 >> 8) & 0xff, + 706 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_ETH_SMAC >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_ETH_SMAC & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (152 >> 8) & 0xff, + 152 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l2_ovv", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (712 >> 8) & 0xff, + 712 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_IO_VLAN_VID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_IO_VLAN_VID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (153 >> 8) & 0xff, + 153 & 0xff} + }, + .field_info_spec = { + .description = "l2_ovv", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (720 >> 8) & 0xff, + 720 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_IO_VLAN_VID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_IO_VLAN_VID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (154 >> 8) & 0xff, + 154 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l2_ivv", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (728 >> 8) & 0xff, + 728 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_II_VLAN_VID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_II_VLAN_VID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (155 >> 8) & 0xff, + 155 & 0xff} + }, + .field_info_spec = { + .description = "l2_ivv", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (744 >> 8) & 0xff, + 744 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_II_VLAN_VID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_II_VLAN_VID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (158 >> 8) & 0xff, + 158 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l2_etype", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (760 >> 8) & 0xff, + 760 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_ETH_TYPE >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_ETH_TYPE & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (161 >> 8) & 0xff, + 161 & 0xff} + }, + .field_info_spec = { + .description = "l2_etype", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (768 >> 8) & 0xff, + 768 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_ETH_TYPE >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_ETH_TYPE & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (162 >> 8) & 0xff, + 162 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l3.sip.ipv4", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (776 >> 8) & 0xff, + 776 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_IPV4_SRC_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_IPV4_SRC_ADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (163 >> 8) & 0xff, + 163 & 0xff} + }, + .field_info_spec = { + .description = "l3.sip.ipv4", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (782 >> 8) & 0xff, + 782 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_IPV4_SRC_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_IPV4_SRC_ADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (164 >> 8) & 0xff, + 164 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l3.sip.ipv6", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (788 >> 8) & 0xff, + 788 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_IPV6_SRC_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_IPV6_SRC_ADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (165 >> 8) & 0xff, + 165 & 0xff} + }, + .field_info_spec = { + .description = "l3.sip.ipv6", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (794 >> 8) & 0xff, + 794 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_IPV6_SRC_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_IPV6_SRC_ADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (166 >> 8) & 0xff, + 166 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l3.dip.ipv4", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (800 >> 8) & 0xff, + 800 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_IPV4_DST_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_IPV4_DST_ADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (167 >> 8) & 0xff, + 167 & 0xff} + }, + .field_info_spec = { + .description = "l3.dip.ipv4", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (806 >> 8) & 0xff, + 806 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_IPV4_DST_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_IPV4_DST_ADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (168 >> 8) & 0xff, + 168 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l3.dip.ipv6", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (812 >> 8) & 0xff, + 812 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_IPV6_DST_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_IPV6_DST_ADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (169 >> 8) & 0xff, + 169 & 0xff} + }, + .field_info_spec = { + .description = "l3.dip.ipv6", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (818 >> 8) & 0xff, + 818 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_IPV6_DST_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_IPV6_DST_ADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (170 >> 8) & 0xff, + 170 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l3.ttl", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (824 >> 8) & 0xff, + 824 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_IPV6_TTL >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_IPV6_TTL & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (171 >> 8) & 0xff, + 171 & 0xff} + }, + .field_info_spec = { + .description = "l3.ttl", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (836 >> 8) & 0xff, + 836 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_IPV6_TTL >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_IPV6_TTL & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (174 >> 8) & 0xff, + 174 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l3.prot", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (848 >> 8) & 0xff, + 848 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (177 >> 8) & 0xff, + 177 & 0xff} + }, + .field_info_spec = { + .description = "l3.prot", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (868 >> 8) & 0xff, + 868 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR_SYM_IP_PROTO_TCP}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (184 >> 8) & 0xff, + 184 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l3.qos", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (888 >> 8) & 0xff, + 888 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_IPV6_QOS >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_IPV6_QOS & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (191 >> 8) & 0xff, + 191 & 0xff} + }, + .field_info_spec = { + .description = "l3.qos", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (900 >> 8) & 0xff, + 900 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_IPV6_QOS >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_IPV6_QOS & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (194 >> 8) & 0xff, + 194 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l4.src", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (912 >> 8) & 0xff, + 912 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_UDP_SRC_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_UDP_SRC_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (197 >> 8) & 0xff, + 197 & 0xff} + }, + .field_info_spec = { + .description = "l4.src", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (925 >> 8) & 0xff, + 925 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_UDP_SRC_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_UDP_SRC_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (201 >> 8) & 0xff, + 201 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l4.dst", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (938 >> 8) & 0xff, + 938 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_UDP_DST_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_UDP_DST_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (205 >> 8) & 0xff, + 205 & 0xff} + }, + .field_info_spec = { + .description = "l4.dst", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (951 >> 8) & 0xff, + 951 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_UDP_DST_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_UDP_DST_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (209 >> 8) & 0xff, + 209 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l4.flags", + .field_bit_size = 9, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_FIELD_BIT, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_O_TCP_TCP_FLAGS >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_TCP_TCP_FLAGS & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_TCP_TCP_FLAGS >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_TCP_TCP_FLAGS & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l4.flags", + .field_bit_size = 9, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_FIELD_BIT, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_O_TCP_TCP_FLAGS >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_TCP_TCP_FLAGS & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_TCP_TCP_FLAGS >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_TCP_TCP_FLAGS & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + /* class_tid: 1, , table: proto_header_cache.wr */ + { + .field_info_mask = { + .description = "group_metadata", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "group_metadata", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (964 >> 8) & 0xff, + 964 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 3}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "prof_func_id", + .field_bit_size = 7, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "prof_func_id", + .field_bit_size = 7, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_PROF_FUNC_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_PROF_FUNC_ID_0 & 0xff} + } + }, + { + .field_info_mask = { + .description = "hdr_bitmap", + .field_bit_size = 64, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "hdr_bitmap", + .field_bit_size = 64, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_PROFILE_BITMAP >> 8) & 0xff, + BNXT_ULP_CF_IDX_PROFILE_BITMAP & 0xff} + } + }, + /* class_tid: 1, , table: em_flow_conflict_cache.rd */ + { + .field_info_mask = { + .description = "group_metadata", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "group_metadata", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "prof_func_id", + .field_bit_size = 7, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "prof_func_id", + .field_bit_size = 7, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_PROF_FUNC_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_PROF_FUNC_ID_0 & 0xff} + } + }, + { + .field_info_mask = { + .description = "hdr_bitmap", + .field_bit_size = 64, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "hdr_bitmap", + .field_bit_size = 64, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_PROFILE_BITMAP >> 8) & 0xff, + BNXT_ULP_CF_IDX_PROFILE_BITMAP & 0xff} + } + }, + /* class_tid: 1, , table: em_key_recipe.0 */ + { + .field_info_mask = { + .description = "em_profile_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "em_profile_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_EM_PROFILE_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_EM_PROFILE_ID_0 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l2_cntxt_id", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1176 >> 8) & 0xff, + 1176 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + .field_info_spec = { + .description = "l2_cntxt_id", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1177 >> 8) & 0xff, + 1177 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_RF, + .field_opr2 = { + (BNXT_ULP_RF_IDX_L2_CNTXT_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_L2_CNTXT_ID_0 & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + } + }, + { + .field_info_mask = { + .description = "meta", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1178 >> 8) & 0xff, + 1178 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + .field_info_spec = { + .description = "meta", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1179 >> 8) & 0xff, + 1179 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_RF, + .field_opr2 = { + (BNXT_ULP_RF_IDX_JUMP_META >> 8) & 0xff, + BNXT_ULP_RF_IDX_JUMP_META & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + } + }, + { + .field_info_mask = { + .description = "rcyc_cnt", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1180 >> 8) & 0xff, + 1180 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + .field_info_spec = { + .description = "rcyc_cnt", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1181 >> 8) & 0xff, + 1181 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_RF, + .field_opr2 = { + (BNXT_ULP_RF_IDX_RECYCLE_CNT >> 8) & 0xff, + BNXT_ULP_RF_IDX_RECYCLE_CNT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + } + }, + { + .field_info_mask = { + .description = "tl2_dmac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1182 >> 8) & 0xff, + 1182 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + .field_info_spec = { + .description = "tl2_dmac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1186 >> 8) & 0xff, + 1186 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_ETH_DMAC >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_ETH_DMAC & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + } + }, + { + .field_info_mask = { + .description = "tl2_smac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1190 >> 8) & 0xff, + 1190 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + .field_info_spec = { + .description = "tl2_smac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1193 >> 8) & 0xff, + 1193 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_ETH_SMAC >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_ETH_SMAC & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + } + }, + { + .field_info_mask = { + .description = "tl2_ovv", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1196 >> 8) & 0xff, + 1196 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + .field_info_spec = { + .description = "tl2_ovv", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1200 >> 8) & 0xff, + 1200 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_OO_VLAN_VID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_OO_VLAN_VID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + } + }, + { + .field_info_mask = { + .description = "tl2_ivv", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1204 >> 8) & 0xff, + 1204 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (252 >> 8) & 0xff, + 252 & 0xff} + }, + .field_info_spec = { + .description = "tl2_ivv", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1212 >> 8) & 0xff, + 1212 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_OI_VLAN_VID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_OI_VLAN_VID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (253 >> 8) & 0xff, + 253 & 0xff} + } + }, + { + .field_info_mask = { + .description = "tl2_etype", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1220 >> 8) & 0xff, + 1220 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + .field_info_spec = { + .description = "tl2_etype", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1224 >> 8) & 0xff, + 1224 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_ETH_TYPE >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_ETH_TYPE & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + } + }, + { + .field_info_mask = { + .description = "tl3.sip.ipv4", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1228 >> 8) & 0xff, + 1228 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + .field_info_spec = { + .description = "tl3.sip.ipv4", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1231 >> 8) & 0xff, + 1231 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_SRC_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_SRC_ADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + } + }, + { + .field_info_mask = { + .description = "tl3.sip.ipv6", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1234 >> 8) & 0xff, + 1234 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + .field_info_spec = { + .description = "tl3.sip.ipv6", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1237 >> 8) & 0xff, + 1237 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV6_SRC_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV6_SRC_ADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + } + }, + { + .field_info_mask = { + .description = "tl3.dip.ipv4", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1240 >> 8) & 0xff, + 1240 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + .field_info_spec = { + .description = "tl3.dip.ipv4", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1243 >> 8) & 0xff, + 1243 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_DST_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_DST_ADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + } + }, + { + .field_info_mask = { + .description = "tl3.dip.ipv6", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1246 >> 8) & 0xff, + 1246 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + .field_info_spec = { + .description = "tl3.dip.ipv6", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1249 >> 8) & 0xff, + 1249 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV6_DST_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV6_DST_ADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + } + }, + { + .field_info_mask = { + .description = "tl3.ttl", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1252 >> 8) & 0xff, + 1252 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (254 >> 8) & 0xff, + 254 & 0xff} + }, + .field_info_spec = { + .description = "tl3.ttl", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1258 >> 8) & 0xff, + 1258 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV6_TTL >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV6_TTL & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (255 >> 8) & 0xff, + 255 & 0xff} + } + }, + { + .field_info_mask = { + .description = "tl3.prot", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1264 >> 8) & 0xff, + 1264 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (256 >> 8) & 0xff, + 256 & 0xff} + }, + .field_info_spec = { + .description = "tl3.prot", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1270 >> 8) & 0xff, + 1270 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV6_PROTO_ID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV6_PROTO_ID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (257 >> 8) & 0xff, + 257 & 0xff} + } + }, + { + .field_info_mask = { + .description = "tl3.qos", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1276 >> 8) & 0xff, + 1276 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (258 >> 8) & 0xff, + 258 & 0xff} + }, + .field_info_spec = { + .description = "tl3.qos", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1282 >> 8) & 0xff, + 1282 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV6_QOS >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV6_QOS & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (259 >> 8) & 0xff, + 259 & 0xff} + } + }, + { + .field_info_mask = { + .description = "tl4.src", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1288 >> 8) & 0xff, + 1288 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (260 >> 8) & 0xff, + 260 & 0xff} + }, + .field_info_spec = { + .description = "tl4.src", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1294 >> 8) & 0xff, + 1294 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_UDP_SRC_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_UDP_SRC_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (261 >> 8) & 0xff, + 261 & 0xff} + } + }, + { + .field_info_mask = { + .description = "tl4.dst", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1300 >> 8) & 0xff, + 1300 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (262 >> 8) & 0xff, + 262 & 0xff} + }, + .field_info_spec = { + .description = "tl4.dst", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1306 >> 8) & 0xff, + 1306 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_UDP_DST_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_UDP_DST_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (263 >> 8) & 0xff, + 263 & 0xff} + } + }, + { + .field_info_mask = { + .description = "tids", + .field_bit_size = 24, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1312 >> 8) & 0xff, + 1312 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (264 >> 8) & 0xff, + 264 & 0xff} + }, + .field_info_spec = { + .description = "tids", + .field_bit_size = 24, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1316 >> 8) & 0xff, + 1316 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_T_VXLAN_VNI >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_T_VXLAN_VNI & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (265 >> 8) & 0xff, + 265 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l2_dmac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1320 >> 8) & 0xff, + 1320 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (266 >> 8) & 0xff, + 266 & 0xff} + }, + .field_info_spec = { + .description = "l2_dmac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1328 >> 8) & 0xff, + 1328 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_ETH_DMAC >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_ETH_DMAC & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (267 >> 8) & 0xff, + 267 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l2_smac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1336 >> 8) & 0xff, + 1336 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (268 >> 8) & 0xff, + 268 & 0xff} + }, + .field_info_spec = { + .description = "l2_smac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1342 >> 8) & 0xff, + 1342 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_ETH_SMAC >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_ETH_SMAC & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (269 >> 8) & 0xff, + 269 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l2_ovv", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1348 >> 8) & 0xff, + 1348 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (270 >> 8) & 0xff, + 270 & 0xff} + }, + .field_info_spec = { + .description = "l2_ovv", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1356 >> 8) & 0xff, + 1356 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_IO_VLAN_VID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_IO_VLAN_VID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (271 >> 8) & 0xff, + 271 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l2_ivv", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1364 >> 8) & 0xff, + 1364 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (272 >> 8) & 0xff, + 272 & 0xff} + }, + .field_info_spec = { + .description = "l2_ivv", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1380 >> 8) & 0xff, + 1380 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_II_VLAN_VID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_II_VLAN_VID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (275 >> 8) & 0xff, + 275 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l2_etype", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1396 >> 8) & 0xff, + 1396 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (278 >> 8) & 0xff, + 278 & 0xff} + }, + .field_info_spec = { + .description = "l2_etype", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1404 >> 8) & 0xff, + 1404 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_ETH_TYPE >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_ETH_TYPE & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (279 >> 8) & 0xff, + 279 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l3.sip.ipv4", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1412 >> 8) & 0xff, + 1412 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (280 >> 8) & 0xff, + 280 & 0xff} + }, + .field_info_spec = { + .description = "l3.sip.ipv4", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1418 >> 8) & 0xff, + 1418 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_IPV4_SRC_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_IPV4_SRC_ADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (281 >> 8) & 0xff, + 281 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l3.sip.ipv6", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1424 >> 8) & 0xff, + 1424 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (282 >> 8) & 0xff, + 282 & 0xff} + }, + .field_info_spec = { + .description = "l3.sip.ipv6", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1430 >> 8) & 0xff, + 1430 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_IPV6_SRC_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_IPV6_SRC_ADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (283 >> 8) & 0xff, + 283 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l3.dip.ipv4", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1436 >> 8) & 0xff, + 1436 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (284 >> 8) & 0xff, + 284 & 0xff} + }, + .field_info_spec = { + .description = "l3.dip.ipv4", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1442 >> 8) & 0xff, + 1442 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_IPV4_DST_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_IPV4_DST_ADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (285 >> 8) & 0xff, + 285 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l3.dip.ipv6", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1448 >> 8) & 0xff, + 1448 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (286 >> 8) & 0xff, + 286 & 0xff} + }, + .field_info_spec = { + .description = "l3.dip.ipv6", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1454 >> 8) & 0xff, + 1454 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_IPV6_DST_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_IPV6_DST_ADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (287 >> 8) & 0xff, + 287 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l3.ttl", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1460 >> 8) & 0xff, + 1460 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (288 >> 8) & 0xff, + 288 & 0xff} + }, + .field_info_spec = { + .description = "l3.ttl", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1472 >> 8) & 0xff, + 1472 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_IPV6_TTL >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_IPV6_TTL & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (291 >> 8) & 0xff, + 291 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l3.prot", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1484 >> 8) & 0xff, + 1484 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (294 >> 8) & 0xff, + 294 & 0xff} + }, + .field_info_spec = { + .description = "l3.prot", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1496 >> 8) & 0xff, + 1496 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_IPV6_PROTO_ID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_IPV6_PROTO_ID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (297 >> 8) & 0xff, + 297 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l3.qos", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1508 >> 8) & 0xff, + 1508 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (300 >> 8) & 0xff, + 300 & 0xff} + }, + .field_info_spec = { + .description = "l3.qos", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1520 >> 8) & 0xff, + 1520 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_IPV6_QOS >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_IPV6_QOS & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (303 >> 8) & 0xff, + 303 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l4.src", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1532 >> 8) & 0xff, + 1532 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (306 >> 8) & 0xff, + 306 & 0xff} + }, + .field_info_spec = { + .description = "l4.src", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1544 >> 8) & 0xff, + 1544 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_UDP_SRC_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_UDP_SRC_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (309 >> 8) & 0xff, + 309 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l4.dst", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1556 >> 8) & 0xff, + 1556 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (312 >> 8) & 0xff, + 312 & 0xff} + }, + .field_info_spec = { + .description = "l4.dst", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1568 >> 8) & 0xff, + 1568 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_UDP_DST_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_UDP_DST_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (315 >> 8) & 0xff, + 315 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l4.flags", + .field_bit_size = 9, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_FIELD_BIT, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_O_TCP_TCP_FLAGS >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_TCP_TCP_FLAGS & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_TCP_TCP_FLAGS >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_TCP_TCP_FLAGS & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + .field_info_spec = { + .description = "l4.flags", + .field_bit_size = 9, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_FIELD_BIT, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_O_TCP_TCP_FLAGS >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_TCP_TCP_FLAGS & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_TCP_TCP_FLAGS >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_TCP_TCP_FLAGS & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + } + }, + /* class_tid: 1, , table: em_flow_conflict_cache.wr */ + { + .field_info_mask = { + .description = "group_metadata", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "group_metadata", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "prof_func_id", + .field_bit_size = 7, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "prof_func_id", + .field_bit_size = 7, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_PROF_FUNC_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_PROF_FUNC_ID_0 & 0xff} + } + }, + { + .field_info_mask = { + .description = "hdr_bitmap", + .field_bit_size = 64, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "hdr_bitmap", + .field_bit_size = 64, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_PROFILE_BITMAP >> 8) & 0xff, + BNXT_ULP_CF_IDX_PROFILE_BITMAP & 0xff} + } + }, + /* class_tid: 2, , table: l2_cntxt_tcam_cache.rd */ + { + .field_info_mask = { + .description = "svif", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_HF, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_SVIF_INDEX >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_SVIF_INDEX & 0xff} + }, + .field_info_spec = { + .description = "svif", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_HF, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_SVIF_INDEX >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_SVIF_INDEX & 0xff} + } + }, + /* class_tid: 2, , table: flow_chain_cache.group_check */ + { + .field_info_mask = { + .description = "group_id", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff, + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "group_id", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_GROUP_ID >> 8) & 0xff, + BNXT_ULP_CF_IDX_GROUP_ID & 0xff} + } + }, + /* class_tid: 2, , table: flow_chain_cache.write */ + { + .field_info_mask = { + .description = "group_id", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff, + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "group_id", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_GROUP_ID >> 8) & 0xff, + BNXT_ULP_CF_IDX_GROUP_ID & 0xff} + } + }, + /* class_tid: 2, , table: proto_header_cache.rd */ + { + .field_info_mask = { + .description = "group_metadata", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "group_metadata", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1617 >> 8) & 0xff, + 1617 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR_SYM_CHAIN_META_TYPE}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "prof_func_id", + .field_bit_size = 7, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "prof_func_id", + .field_bit_size = 7, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_PROF_FUNC_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_PROF_FUNC_ID_0 & 0xff} + } + }, + { + .field_info_mask = { + .description = "hdr_bitmap", + .field_bit_size = 64, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "hdr_bitmap", + .field_bit_size = 64, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_PROFILE_BITMAP >> 8) & 0xff, + BNXT_ULP_CF_IDX_PROFILE_BITMAP & 0xff} + } + }, + /* class_tid: 2, , table: hdr_overlap_cache.overlap_check */ + { + .field_info_mask = { + .description = "group_metadata", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "group_metadata", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1619 >> 8) & 0xff, + 1619 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR_SYM_CHAIN_META_TYPE}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "prof_func_id", + .field_bit_size = 7, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "prof_func_id", + .field_bit_size = 7, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_PROF_FUNC_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_PROF_FUNC_ID_0 & 0xff} + } + }, + /* class_tid: 2, , table: hdr_overlap_cache.overlap_check */ + { + .field_info_mask = { + .description = "hdr_bitmap", + .field_bit_size = 64, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "hdr_bitmap", + .field_bit_size = 64, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_HDR_BITMAP >> 8) & 0xff, + BNXT_ULP_CF_IDX_HDR_BITMAP & 0xff} + } + }, + /* class_tid: 2, , table: hdr_overlap_cache.overlap_wr */ + { + .field_info_mask = { + .description = "group_metadata", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "group_metadata", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1839 >> 8) & 0xff, + 1839 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR_SYM_CHAIN_META_TYPE}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "prof_func_id", + .field_bit_size = 7, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "prof_func_id", + .field_bit_size = 7, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_PROF_FUNC_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_PROF_FUNC_ID_0 & 0xff} + } + }, + /* class_tid: 2, , table: hdr_overlap_cache.overlap_wr */ + { + .field_info_mask = { + .description = "hdr_bitmap", + .field_bit_size = 64, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "hdr_bitmap", + .field_bit_size = 64, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_HDR_BITMAP >> 8) & 0xff, + BNXT_ULP_CF_IDX_HDR_BITMAP & 0xff} + } + }, + /* class_tid: 2, , table: profile_tcam.gen_template */ + { + .field_info_mask = { + .description = "l4_hdr_is_udp_tcp", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1857 >> 8) & 0xff, + 1857 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (368 >> 8) & 0xff, + 368 & 0xff} + }, + .field_info_spec = { + .description = "l4_hdr_is_udp_tcp", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1865 >> 8) & 0xff, + 1865 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR_SYM_L4_HDR_IS_UDP_TCP_YES}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (371 >> 8) & 0xff, + 371 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l4_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1873 >> 8) & 0xff, + 1873 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (374 >> 8) & 0xff, + 374 & 0xff} + }, + .field_info_spec = { + .description = "l4_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1881 >> 8) & 0xff, + 1881 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ZERO, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (377 >> 8) & 0xff, + 377 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l4_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1889 >> 8) & 0xff, + 1889 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (380 >> 8) & 0xff, + 380 & 0xff} + }, + .field_info_spec = { + .description = "l4_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1897 >> 8) & 0xff, + 1897 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ZERO, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (383 >> 8) & 0xff, + 383 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l4_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1905 >> 8) & 0xff, + 1905 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (386 >> 8) & 0xff, + 386 & 0xff} + }, + .field_info_spec = { + .description = "l4_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1913 >> 8) & 0xff, + 1913 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR_SYM_L4_HDR_VALID_YES}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (389 >> 8) & 0xff, + 389 & 0xff} + } + }, + { + .field_info_mask = { + .description = "ieh", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "ieh", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l3_ipv6_cmp_dst", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l3_ipv6_cmp_dst", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l3_ipv6_cmp_src", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l3_ipv6_cmp_src", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l3_hdr_isIP", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1921 >> 8) & 0xff, + 1921 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ZERO, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (392 >> 8) & 0xff, + 392 & 0xff} + }, + .field_info_spec = { + .description = "l3_hdr_isIP", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1930 >> 8) & 0xff, + 1930 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ZERO, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (396 >> 8) & 0xff, + 396 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l3_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1939 >> 8) & 0xff, + 1939 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (400 >> 8) & 0xff, + 400 & 0xff} + }, + .field_info_spec = { + .description = "l3_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1947 >> 8) & 0xff, + 1947 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR_SYM_L3_HDR_TYPE_IPV6}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (403 >> 8) & 0xff, + 403 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l3_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1955 >> 8) & 0xff, + 1955 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (406 >> 8) & 0xff, + 406 & 0xff} + }, + .field_info_spec = { + .description = "l3_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1963 >> 8) & 0xff, + 1963 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ZERO, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (409 >> 8) & 0xff, + 409 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l3_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1971 >> 8) & 0xff, + 1971 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (412 >> 8) & 0xff, + 412 & 0xff} + }, + .field_info_spec = { + .description = "l3_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1979 >> 8) & 0xff, + 1979 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR_SYM_L3_HDR_VALID_YES}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (415 >> 8) & 0xff, + 415 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l2_two_vtags", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "l2_two_vtags", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1987 >> 8) & 0xff, + 1987 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR_SYM_L2_TWO_VTAGS_YES}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (418 >> 8) & 0xff, + 418 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l2_vtag_present", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "l2_vtag_present", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1993 >> 8) & 0xff, + 1993 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR_SYM_L2_VTAG_PRESENT_YES}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (419 >> 8) & 0xff, + 419 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l2_uc_mc_bc", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l2_uc_mc_bc", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_hdr_type", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1999 >> 8) & 0xff, + 1999 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l2_hdr_type", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2000 >> 8) & 0xff, + 2000 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ZERO, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2001 >> 8) & 0xff, + 2001 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (420 >> 8) & 0xff, + 420 & 0xff} + }, + .field_info_spec = { + .description = "l2_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2005 >> 8) & 0xff, + 2005 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ZERO, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (421 >> 8) & 0xff, + 421 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l2_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "l2_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2009 >> 8) & 0xff, + 2009 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR_SYM_L2_HDR_VALID_YES}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (422 >> 8) & 0xff, + 422 & 0xff} + } + }, + { + .field_info_mask = { + .description = "tun_hdr_flags", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tun_hdr_flags", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tun_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2014 >> 8) & 0xff, + 2014 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (424 >> 8) & 0xff, + 424 & 0xff} + }, + .field_info_spec = { + .description = "tun_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2026 >> 8) & 0xff, + 2026 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ZERO, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (429 >> 8) & 0xff, + 429 & 0xff} + } + }, + { + .field_info_mask = { + .description = "tun_hdr_err", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2038 >> 8) & 0xff, + 2038 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tun_hdr_err", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2039 >> 8) & 0xff, + 2039 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ZERO, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tun_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "tun_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2040 >> 8) & 0xff, + 2040 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR_SYM_TUN_HDR_VALID_YES}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl4_hdr_is_udp_tcp", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2041 >> 8) & 0xff, + 2041 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (434 >> 8) & 0xff, + 434 & 0xff} + }, + .field_info_spec = { + .description = "tl4_hdr_is_udp_tcp", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2045 >> 8) & 0xff, + 2045 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR_SYM_TL4_HDR_IS_UDP_TCP_YES}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (435 >> 8) & 0xff, + 435 & 0xff} + } + }, + { + .field_info_mask = { + .description = "tl4_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2049 >> 8) & 0xff, + 2049 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (436 >> 8) & 0xff, + 436 & 0xff} + }, + .field_info_spec = { + .description = "tl4_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2053 >> 8) & 0xff, + 2053 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ZERO, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (437 >> 8) & 0xff, + 437 & 0xff} + } + }, + { + .field_info_mask = { + .description = "tl4_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2057 >> 8) & 0xff, + 2057 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (438 >> 8) & 0xff, + 438 & 0xff} + }, + .field_info_spec = { + .description = "tl4_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2061 >> 8) & 0xff, + 2061 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ZERO, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (439 >> 8) & 0xff, + 439 & 0xff} + } + }, + { + .field_info_mask = { + .description = "tl4_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2065 >> 8) & 0xff, + 2065 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (440 >> 8) & 0xff, + 440 & 0xff} + }, + .field_info_spec = { + .description = "tl4_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2070 >> 8) & 0xff, + 2070 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR_SYM_TL4_HDR_VALID_YES}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (442 >> 8) & 0xff, + 442 & 0xff} + } + }, + { + .field_info_mask = { + .description = "tl3_ipv6_cmp_dst", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl3_ipv6_cmp_dst", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl3_ipv6_cmp_src", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl3_ipv6_cmp_src", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl3_hdr_isIP", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "tl3_hdr_isIP", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2075 >> 8) & 0xff, + 2075 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR_SYM_TL3_HDR_ISIP_YES}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (444 >> 8) & 0xff, + 444 & 0xff} + } + }, + { + .field_info_mask = { + .description = "tl3_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2079 >> 8) & 0xff, + 2079 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (445 >> 8) & 0xff, + 445 & 0xff} + }, + .field_info_spec = { + .description = "tl3_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2083 >> 8) & 0xff, + 2083 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR_SYM_TL3_HDR_TYPE_IPV6}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (446 >> 8) & 0xff, + 446 & 0xff} + } + }, + { + .field_info_mask = { + .description = "tl3_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2087 >> 8) & 0xff, + 2087 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (447 >> 8) & 0xff, + 447 & 0xff} + }, + .field_info_spec = { + .description = "tl3_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2091 >> 8) & 0xff, + 2091 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ZERO, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (448 >> 8) & 0xff, + 448 & 0xff} + } + }, + { + .field_info_mask = { + .description = "tl3_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "tl3_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2095 >> 8) & 0xff, + 2095 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR_SYM_TL3_HDR_VALID_YES}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (449 >> 8) & 0xff, + 449 & 0xff} + } + }, + { + .field_info_mask = { + .description = "tl2_two_vtags", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "tl2_two_vtags", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2099 >> 8) & 0xff, + 2099 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR_SYM_TL2_TWO_VTAGS_YES}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl2_vtag_present", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "tl2_vtag_present", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2102 >> 8) & 0xff, + 2102 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR_SYM_TL2_VTAG_PRESENT_YES}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl2_uc_mc_bc", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl2_uc_mc_bc", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl2_hdr_type", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2105 >> 8) & 0xff, + 2105 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl2_hdr_type", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2106 >> 8) & 0xff, + 2106 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ZERO, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl2_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "tl2_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2107 >> 8) & 0xff, + 2107 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR_SYM_TL2_HDR_VALID_YES}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "hrec_next", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "hrec_next", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "prof_func_id", + .field_bit_size = 7, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "prof_func_id", + .field_bit_size = 7, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_PROF_FUNC_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_PROF_FUNC_ID_0 & 0xff} + } + }, + { + .field_info_mask = { + .description = "agg_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "agg_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "metadata", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + (0xf000 >> 8) & 0xff, + 0xf000 & 0xff} + }, + .field_info_spec = { + .description = "metadata", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2109 >> 8) & 0xff, + 2109 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_RF, + .field_opr2 = { + (BNXT_ULP_RF_IDX_JUMP_META >> 8) & 0xff, + BNXT_ULP_RF_IDX_JUMP_META & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_CONST + } + }, + { + .field_info_mask = { + .description = "pkt_type_0", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "pkt_type_0", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "pkt_type_1", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "pkt_type_1", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + .field_info_spec = { + .description = "valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + } + }, + /* class_tid: 2, , table: wm_key_recipe.0 */ + { + .field_info_mask = { + .description = "wc_profile_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "wc_profile_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_WC_PROFILE_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_WC_PROFILE_ID_0 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l2_cntxt_id", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2110 >> 8) & 0xff, + 2110 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + .field_info_spec = { + .description = "l2_cntxt_id", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2112 >> 8) & 0xff, + 2112 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_RF, + .field_opr2 = { + (BNXT_ULP_RF_IDX_L2_CNTXT_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_L2_CNTXT_ID_0 & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + } + }, + { + .field_info_mask = { + .description = "svif", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2114 >> 8) & 0xff, + 2114 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + .field_info_spec = { + .description = "svif", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2115 >> 8) & 0xff, + 2115 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_SVIF_INDEX >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_SVIF_INDEX & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + } + }, + { + .field_info_mask = { + .description = "meta", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2116 >> 8) & 0xff, + 2116 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + .field_info_spec = { + .description = "meta", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2117 >> 8) & 0xff, + 2117 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_RF, + .field_opr2 = { + (BNXT_ULP_RF_IDX_JUMP_META >> 8) & 0xff, + BNXT_ULP_RF_IDX_JUMP_META & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + } + }, + { + .field_info_mask = { + .description = "rcyc_cnt", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2118 >> 8) & 0xff, + 2118 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + .field_info_spec = { + .description = "rcyc_cnt", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2119 >> 8) & 0xff, + 2119 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_RF, + .field_opr2 = { + (BNXT_ULP_RF_IDX_RECYCLE_CNT >> 8) & 0xff, + BNXT_ULP_RF_IDX_RECYCLE_CNT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + } + }, + { + .field_info_mask = { + .description = "tl2_dmac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2120 >> 8) & 0xff, + 2120 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_ETH_DMAC >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_ETH_DMAC & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + .field_info_spec = { + .description = "tl2_dmac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2123 >> 8) & 0xff, + 2123 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_ETH_DMAC >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_ETH_DMAC & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + } + }, + { + .field_info_mask = { + .description = "tl2_smac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2126 >> 8) & 0xff, + 2126 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr2 = { + (450 >> 8) & 0xff, + 450 & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + .field_info_spec = { + .description = "tl2_smac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2131 >> 8) & 0xff, + 2131 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr2 = { + (451 >> 8) & 0xff, + 451 & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + } + }, + { + .field_info_mask = { + .description = "tl2_ovv", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2136 >> 8) & 0xff, + 2136 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_OO_VLAN_VID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_OO_VLAN_VID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + .field_info_spec = { + .description = "tl2_ovv", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2140 >> 8) & 0xff, + 2140 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_OO_VLAN_VID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_OO_VLAN_VID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + } + }, + { + .field_info_mask = { + .description = "tl2_ivv", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2144 >> 8) & 0xff, + 2144 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_OI_VLAN_VID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_OI_VLAN_VID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (452 >> 8) & 0xff, + 452 & 0xff} + }, + .field_info_spec = { + .description = "tl2_ivv", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2152 >> 8) & 0xff, + 2152 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_OI_VLAN_VID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_OI_VLAN_VID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (453 >> 8) & 0xff, + 453 & 0xff} + } + }, + { + .field_info_mask = { + .description = "tl2_etype", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2160 >> 8) & 0xff, + 2160 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_ETH_TYPE >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_ETH_TYPE & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + .field_info_spec = { + .description = "tl2_etype", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2164 >> 8) & 0xff, + 2164 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_ETH_TYPE >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_ETH_TYPE & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + } + }, + { + .field_info_mask = { + .description = "tl3.sip.ipv4", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2168 >> 8) & 0xff, + 2168 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_SRC_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_SRC_ADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + .field_info_spec = { + .description = "tl3.sip.ipv4", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2171 >> 8) & 0xff, + 2171 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_SRC_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_SRC_ADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + } + }, + { + .field_info_mask = { + .description = "tl3.sip.ipv6", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2174 >> 8) & 0xff, + 2174 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV6_SRC_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV6_SRC_ADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + .field_info_spec = { + .description = "tl3.sip.ipv6", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2177 >> 8) & 0xff, + 2177 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV6_SRC_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV6_SRC_ADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + } + }, + { + .field_info_mask = { + .description = "tl3.dip.ipv4", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2180 >> 8) & 0xff, + 2180 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_DST_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_DST_ADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + .field_info_spec = { + .description = "tl3.dip.ipv4", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2183 >> 8) & 0xff, + 2183 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_DST_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_DST_ADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + } + }, + { + .field_info_mask = { + .description = "tl3.dip.ipv6", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2186 >> 8) & 0xff, + 2186 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV6_DST_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV6_DST_ADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + .field_info_spec = { + .description = "tl3.dip.ipv6", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2189 >> 8) & 0xff, + 2189 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV6_DST_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV6_DST_ADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + } + }, + { + .field_info_mask = { + .description = "tl3.ttl", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2192 >> 8) & 0xff, + 2192 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV6_TTL >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV6_TTL & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (454 >> 8) & 0xff, + 454 & 0xff} + }, + .field_info_spec = { + .description = "tl3.ttl", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2198 >> 8) & 0xff, + 2198 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV6_TTL >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV6_TTL & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (455 >> 8) & 0xff, + 455 & 0xff} + } + }, + { + .field_info_mask = { + .description = "tl3.prot", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2204 >> 8) & 0xff, + 2204 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV6_PROTO_ID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV6_PROTO_ID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (456 >> 8) & 0xff, + 456 & 0xff} + }, + .field_info_spec = { + .description = "tl3.prot", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2210 >> 8) & 0xff, + 2210 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV6_PROTO_ID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV6_PROTO_ID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (457 >> 8) & 0xff, + 457 & 0xff} + } + }, + { + .field_info_mask = { + .description = "tl3.qos", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2216 >> 8) & 0xff, + 2216 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV6_QOS >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV6_QOS & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (458 >> 8) & 0xff, + 458 & 0xff} + }, + .field_info_spec = { + .description = "tl3.qos", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2222 >> 8) & 0xff, + 2222 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV6_QOS >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV6_QOS & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (459 >> 8) & 0xff, + 459 & 0xff} + } + }, + { + .field_info_mask = { + .description = "tl4.src", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2228 >> 8) & 0xff, + 2228 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_UDP_SRC_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_UDP_SRC_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (460 >> 8) & 0xff, + 460 & 0xff} + }, + .field_info_spec = { + .description = "tl4.src", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2234 >> 8) & 0xff, + 2234 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_UDP_SRC_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_UDP_SRC_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (461 >> 8) & 0xff, + 461 & 0xff} + } + }, + { + .field_info_mask = { + .description = "tl4.dst", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2240 >> 8) & 0xff, + 2240 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_UDP_DST_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_UDP_DST_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (462 >> 8) & 0xff, + 462 & 0xff} + }, + .field_info_spec = { + .description = "tl4.dst", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2246 >> 8) & 0xff, + 2246 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_UDP_DST_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_UDP_DST_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (463 >> 8) & 0xff, + 463 & 0xff} + } + }, + { + .field_info_mask = { + .description = "tids", + .field_bit_size = 24, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2252 >> 8) & 0xff, + 2252 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_T_VXLAN_VNI >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_T_VXLAN_VNI & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (464 >> 8) & 0xff, + 464 & 0xff} + }, + .field_info_spec = { + .description = "tids", + .field_bit_size = 24, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2256 >> 8) & 0xff, + 2256 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_T_VXLAN_VNI >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_T_VXLAN_VNI & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (465 >> 8) & 0xff, + 465 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l2_dmac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2260 >> 8) & 0xff, + 2260 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_ETH_DMAC >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_ETH_DMAC & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (466 >> 8) & 0xff, + 466 & 0xff} + }, + .field_info_spec = { + .description = "l2_dmac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2266 >> 8) & 0xff, + 2266 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_ETH_DMAC >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_ETH_DMAC & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (467 >> 8) & 0xff, + 467 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l2_smac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2272 >> 8) & 0xff, + 2272 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_ETH_SMAC >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_ETH_SMAC & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (468 >> 8) & 0xff, + 468 & 0xff} + }, + .field_info_spec = { + .description = "l2_smac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2278 >> 8) & 0xff, + 2278 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_ETH_SMAC >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_ETH_SMAC & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (469 >> 8) & 0xff, + 469 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l2_ovv", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2284 >> 8) & 0xff, + 2284 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_IO_VLAN_VID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_IO_VLAN_VID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (470 >> 8) & 0xff, + 470 & 0xff} + }, + .field_info_spec = { + .description = "l2_ovv", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2292 >> 8) & 0xff, + 2292 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_IO_VLAN_VID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_IO_VLAN_VID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (471 >> 8) & 0xff, + 471 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l2_ivv", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2300 >> 8) & 0xff, + 2300 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_II_VLAN_VID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_II_VLAN_VID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (472 >> 8) & 0xff, + 472 & 0xff} + }, + .field_info_spec = { + .description = "l2_ivv", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2316 >> 8) & 0xff, + 2316 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_II_VLAN_VID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_II_VLAN_VID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (475 >> 8) & 0xff, + 475 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l2_etype", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2332 >> 8) & 0xff, + 2332 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_ETH_TYPE >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_ETH_TYPE & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (478 >> 8) & 0xff, + 478 & 0xff} + }, + .field_info_spec = { + .description = "l2_etype", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2340 >> 8) & 0xff, + 2340 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_ETH_TYPE >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_ETH_TYPE & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (479 >> 8) & 0xff, + 479 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l3.sip.ipv4", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2348 >> 8) & 0xff, + 2348 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_IPV4_SRC_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_IPV4_SRC_ADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (480 >> 8) & 0xff, + 480 & 0xff} + }, + .field_info_spec = { + .description = "l3.sip.ipv4", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2354 >> 8) & 0xff, + 2354 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_IPV4_SRC_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_IPV4_SRC_ADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (481 >> 8) & 0xff, + 481 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l3.sip.ipv6", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2360 >> 8) & 0xff, + 2360 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_IPV6_SRC_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_IPV6_SRC_ADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (482 >> 8) & 0xff, + 482 & 0xff} + }, + .field_info_spec = { + .description = "l3.sip.ipv6", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2366 >> 8) & 0xff, + 2366 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_IPV6_SRC_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_IPV6_SRC_ADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (483 >> 8) & 0xff, + 483 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l3.dip.ipv4", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2372 >> 8) & 0xff, + 2372 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_IPV4_DST_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_IPV4_DST_ADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (484 >> 8) & 0xff, + 484 & 0xff} + }, + .field_info_spec = { + .description = "l3.dip.ipv4", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2378 >> 8) & 0xff, + 2378 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_IPV4_DST_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_IPV4_DST_ADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (485 >> 8) & 0xff, + 485 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l3.dip.ipv6", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2384 >> 8) & 0xff, + 2384 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_IPV6_DST_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_IPV6_DST_ADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (486 >> 8) & 0xff, + 486 & 0xff} + }, + .field_info_spec = { + .description = "l3.dip.ipv6", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2390 >> 8) & 0xff, + 2390 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_IPV6_DST_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_IPV6_DST_ADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (487 >> 8) & 0xff, + 487 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l3.ttl", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2396 >> 8) & 0xff, + 2396 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_IPV6_TTL >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_IPV6_TTL & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (488 >> 8) & 0xff, + 488 & 0xff} + }, + .field_info_spec = { + .description = "l3.ttl", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2408 >> 8) & 0xff, + 2408 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_IPV6_TTL >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_IPV6_TTL & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (491 >> 8) & 0xff, + 491 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l3.prot", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2420 >> 8) & 0xff, + 2420 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (494 >> 8) & 0xff, + 494 & 0xff} + }, + .field_info_spec = { + .description = "l3.prot", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2440 >> 8) & 0xff, + 2440 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR_SYM_IP_PROTO_TCP}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (501 >> 8) & 0xff, + 501 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l3.qos", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2460 >> 8) & 0xff, + 2460 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_IPV6_QOS >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_IPV6_QOS & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (508 >> 8) & 0xff, + 508 & 0xff} + }, + .field_info_spec = { + .description = "l3.qos", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2472 >> 8) & 0xff, + 2472 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_IPV6_QOS >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_IPV6_QOS & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (511 >> 8) & 0xff, + 511 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l4.src", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2484 >> 8) & 0xff, + 2484 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_UDP_SRC_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_UDP_SRC_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (514 >> 8) & 0xff, + 514 & 0xff} + }, + .field_info_spec = { + .description = "l4.src", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2497 >> 8) & 0xff, + 2497 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_UDP_SRC_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_UDP_SRC_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (518 >> 8) & 0xff, + 518 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l4.dst", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2510 >> 8) & 0xff, + 2510 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_UDP_DST_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_UDP_DST_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (522 >> 8) & 0xff, + 522 & 0xff} + }, + .field_info_spec = { + .description = "l4.dst", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2523 >> 8) & 0xff, + 2523 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_UDP_DST_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_UDP_DST_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (526 >> 8) & 0xff, + 526 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l4.flags", + .field_bit_size = 9, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_FIELD_BIT, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_O_TCP_TCP_FLAGS >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_TCP_TCP_FLAGS & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_TCP_TCP_FLAGS >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_TCP_TCP_FLAGS & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l4.flags", + .field_bit_size = 9, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_FIELD_BIT, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_O_TCP_TCP_FLAGS >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_TCP_TCP_FLAGS & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_TCP_TCP_FLAGS >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_TCP_TCP_FLAGS & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + /* class_tid: 2, , table: proto_header_cache.wr */ + { + .field_info_mask = { + .description = "group_metadata", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "group_metadata", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2536 >> 8) & 0xff, + 2536 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 3}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "prof_func_id", + .field_bit_size = 7, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "prof_func_id", + .field_bit_size = 7, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_PROF_FUNC_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_PROF_FUNC_ID_0 & 0xff} + } + }, + { + .field_info_mask = { + .description = "hdr_bitmap", + .field_bit_size = 64, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "hdr_bitmap", + .field_bit_size = 64, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_PROFILE_BITMAP >> 8) & 0xff, + BNXT_ULP_CF_IDX_PROFILE_BITMAP & 0xff} + } + }, + /* class_tid: 2, , table: em_flow_conflict_cache.rd */ + { + .field_info_mask = { + .description = "group_metadata", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "group_metadata", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "prof_func_id", + .field_bit_size = 7, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "prof_func_id", + .field_bit_size = 7, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_PROF_FUNC_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_PROF_FUNC_ID_0 & 0xff} + } + }, + { + .field_info_mask = { + .description = "hdr_bitmap", + .field_bit_size = 64, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "hdr_bitmap", + .field_bit_size = 64, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_PROFILE_BITMAP >> 8) & 0xff, + BNXT_ULP_CF_IDX_PROFILE_BITMAP & 0xff} + } + }, + /* class_tid: 2, , table: em_key_recipe.0 */ + { + .field_info_mask = { + .description = "em_profile_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "em_profile_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_EM_PROFILE_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_EM_PROFILE_ID_0 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l2_cntxt_id", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2745 >> 8) & 0xff, + 2745 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + .field_info_spec = { + .description = "l2_cntxt_id", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2746 >> 8) & 0xff, + 2746 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_RF, + .field_opr2 = { + (BNXT_ULP_RF_IDX_L2_CNTXT_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_L2_CNTXT_ID_0 & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + } + }, + { + .field_info_mask = { + .description = "meta", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2747 >> 8) & 0xff, + 2747 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + .field_info_spec = { + .description = "meta", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2748 >> 8) & 0xff, + 2748 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_RF, + .field_opr2 = { + (BNXT_ULP_RF_IDX_JUMP_META >> 8) & 0xff, + BNXT_ULP_RF_IDX_JUMP_META & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + } + }, + { + .field_info_mask = { + .description = "rcyc_cnt", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2749 >> 8) & 0xff, + 2749 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + .field_info_spec = { + .description = "rcyc_cnt", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2750 >> 8) & 0xff, + 2750 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_RF, + .field_opr2 = { + (BNXT_ULP_RF_IDX_RECYCLE_CNT >> 8) & 0xff, + BNXT_ULP_RF_IDX_RECYCLE_CNT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + } + }, + { + .field_info_mask = { + .description = "tl2_dmac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2751 >> 8) & 0xff, + 2751 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + .field_info_spec = { + .description = "tl2_dmac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2754 >> 8) & 0xff, + 2754 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_ETH_DMAC >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_ETH_DMAC & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + } + }, + { + .field_info_mask = { + .description = "tl2_smac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2757 >> 8) & 0xff, + 2757 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + .field_info_spec = { + .description = "tl2_smac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2760 >> 8) & 0xff, + 2760 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_ETH_SMAC >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_ETH_SMAC & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + } + }, + { + .field_info_mask = { + .description = "tl2_ovv", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2763 >> 8) & 0xff, + 2763 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + .field_info_spec = { + .description = "tl2_ovv", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2767 >> 8) & 0xff, + 2767 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_OO_VLAN_VID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_OO_VLAN_VID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + } + }, + { + .field_info_mask = { + .description = "tl2_ivv", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2771 >> 8) & 0xff, + 2771 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (569 >> 8) & 0xff, + 569 & 0xff} + }, + .field_info_spec = { + .description = "tl2_ivv", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2779 >> 8) & 0xff, + 2779 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_OI_VLAN_VID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_OI_VLAN_VID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (570 >> 8) & 0xff, + 570 & 0xff} + } + }, + { + .field_info_mask = { + .description = "tl2_etype", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2787 >> 8) & 0xff, + 2787 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + .field_info_spec = { + .description = "tl2_etype", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2791 >> 8) & 0xff, + 2791 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_ETH_SMAC >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_ETH_SMAC & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + } + }, + { + .field_info_mask = { + .description = "tl3.sip.ipv4", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2795 >> 8) & 0xff, + 2795 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + .field_info_spec = { + .description = "tl3.sip.ipv4", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2798 >> 8) & 0xff, + 2798 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_SRC_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_SRC_ADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + } + }, + { + .field_info_mask = { + .description = "tl3.sip.ipv6", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2801 >> 8) & 0xff, + 2801 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + .field_info_spec = { + .description = "tl3.sip.ipv6", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2804 >> 8) & 0xff, + 2804 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV6_SRC_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV6_SRC_ADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + } + }, + { + .field_info_mask = { + .description = "tl3.dip.ipv4", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2807 >> 8) & 0xff, + 2807 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + .field_info_spec = { + .description = "tl3.dip.ipv4", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2810 >> 8) & 0xff, + 2810 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_DST_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_DST_ADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + } + }, + { + .field_info_mask = { + .description = "tl3.dip.ipv6", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2813 >> 8) & 0xff, + 2813 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + .field_info_spec = { + .description = "tl3.dip.ipv6", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2816 >> 8) & 0xff, + 2816 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV6_DST_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV6_DST_ADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + } + }, + { + .field_info_mask = { + .description = "tl3.ttl", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2819 >> 8) & 0xff, + 2819 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (571 >> 8) & 0xff, + 571 & 0xff} + }, + .field_info_spec = { + .description = "tl3.ttl", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2825 >> 8) & 0xff, + 2825 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV6_TTL >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV6_TTL & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (572 >> 8) & 0xff, + 572 & 0xff} + } + }, + { + .field_info_mask = { + .description = "tl3.prot", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2831 >> 8) & 0xff, + 2831 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (573 >> 8) & 0xff, + 573 & 0xff} + }, + .field_info_spec = { + .description = "tl3.prot", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2837 >> 8) & 0xff, + 2837 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV6_PROTO_ID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV6_PROTO_ID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (574 >> 8) & 0xff, + 574 & 0xff} + } + }, + { + .field_info_mask = { + .description = "tl3.qos", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2843 >> 8) & 0xff, + 2843 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (575 >> 8) & 0xff, + 575 & 0xff} + }, + .field_info_spec = { + .description = "tl3.qos", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2849 >> 8) & 0xff, + 2849 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV6_QOS >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV6_QOS & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (576 >> 8) & 0xff, + 576 & 0xff} + } + }, + { + .field_info_mask = { + .description = "tl4.src", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2855 >> 8) & 0xff, + 2855 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (577 >> 8) & 0xff, + 577 & 0xff} + }, + .field_info_spec = { + .description = "tl4.src", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2861 >> 8) & 0xff, + 2861 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_UDP_SRC_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_UDP_SRC_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (578 >> 8) & 0xff, + 578 & 0xff} + } + }, + { + .field_info_mask = { + .description = "tl4.dst", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2867 >> 8) & 0xff, + 2867 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (579 >> 8) & 0xff, + 579 & 0xff} + }, + .field_info_spec = { + .description = "tl4.dst", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2873 >> 8) & 0xff, + 2873 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_UDP_DST_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_UDP_DST_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (580 >> 8) & 0xff, + 580 & 0xff} + } + }, + { + .field_info_mask = { + .description = "tids", + .field_bit_size = 24, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2879 >> 8) & 0xff, + 2879 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (581 >> 8) & 0xff, + 581 & 0xff} + }, + .field_info_spec = { + .description = "tids", + .field_bit_size = 24, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2883 >> 8) & 0xff, + 2883 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_T_VXLAN_VNI >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_T_VXLAN_VNI & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (582 >> 8) & 0xff, + 582 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l2_dmac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2887 >> 8) & 0xff, + 2887 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (583 >> 8) & 0xff, + 583 & 0xff} + }, + .field_info_spec = { + .description = "l2_dmac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2893 >> 8) & 0xff, + 2893 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_ETH_DMAC >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_ETH_DMAC & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (584 >> 8) & 0xff, + 584 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l2_smac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2899 >> 8) & 0xff, + 2899 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (585 >> 8) & 0xff, + 585 & 0xff} + }, + .field_info_spec = { + .description = "l2_smac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2905 >> 8) & 0xff, + 2905 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_ETH_SMAC >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_ETH_SMAC & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (586 >> 8) & 0xff, + 586 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l2_ovv", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2911 >> 8) & 0xff, + 2911 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (587 >> 8) & 0xff, + 587 & 0xff} + }, + .field_info_spec = { + .description = "l2_ovv", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2919 >> 8) & 0xff, + 2919 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_IO_VLAN_VID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_IO_VLAN_VID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (588 >> 8) & 0xff, + 588 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l2_ivv", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2927 >> 8) & 0xff, + 2927 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (589 >> 8) & 0xff, + 589 & 0xff} + }, + .field_info_spec = { + .description = "l2_ivv", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2943 >> 8) & 0xff, + 2943 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_II_VLAN_VID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_II_VLAN_VID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (592 >> 8) & 0xff, + 592 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l2_etype", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2959 >> 8) & 0xff, + 2959 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (595 >> 8) & 0xff, + 595 & 0xff} + }, + .field_info_spec = { + .description = "l2_etype", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2967 >> 8) & 0xff, + 2967 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_ETH_TYPE >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_ETH_TYPE & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (596 >> 8) & 0xff, + 596 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l3.sip.ipv4", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2975 >> 8) & 0xff, + 2975 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (597 >> 8) & 0xff, + 597 & 0xff} + }, + .field_info_spec = { + .description = "l3.sip.ipv4", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2981 >> 8) & 0xff, + 2981 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_IPV4_SRC_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_IPV4_SRC_ADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (598 >> 8) & 0xff, + 598 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l3.sip.ipv6", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2987 >> 8) & 0xff, + 2987 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (599 >> 8) & 0xff, + 599 & 0xff} + }, + .field_info_spec = { + .description = "l3.sip.ipv6", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2993 >> 8) & 0xff, + 2993 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_IPV6_SRC_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_IPV6_SRC_ADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (600 >> 8) & 0xff, + 600 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l3.dip.ipv4", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2999 >> 8) & 0xff, + 2999 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (601 >> 8) & 0xff, + 601 & 0xff} + }, + .field_info_spec = { + .description = "l3.dip.ipv4", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (3005 >> 8) & 0xff, + 3005 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_IPV4_DST_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_IPV4_DST_ADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (602 >> 8) & 0xff, + 602 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l3.dip.ipv6", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (3011 >> 8) & 0xff, + 3011 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (603 >> 8) & 0xff, + 603 & 0xff} + }, + .field_info_spec = { + .description = "l3.dip.ipv6", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (3017 >> 8) & 0xff, + 3017 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_IPV6_DST_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_IPV6_DST_ADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (604 >> 8) & 0xff, + 604 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l3.ttl", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (3023 >> 8) & 0xff, + 3023 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (605 >> 8) & 0xff, + 605 & 0xff} + }, + .field_info_spec = { + .description = "l3.ttl", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (3035 >> 8) & 0xff, + 3035 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_IPV6_TTL >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_IPV6_TTL & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (608 >> 8) & 0xff, + 608 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l3.prot", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (3047 >> 8) & 0xff, + 3047 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (611 >> 8) & 0xff, + 611 & 0xff} + }, + .field_info_spec = { + .description = "l3.prot", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (3059 >> 8) & 0xff, + 3059 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_IPV6_PROTO_ID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_IPV6_PROTO_ID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (614 >> 8) & 0xff, + 614 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l3.qos", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (3071 >> 8) & 0xff, + 3071 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (617 >> 8) & 0xff, + 617 & 0xff} + }, + .field_info_spec = { + .description = "l3.qos", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (3083 >> 8) & 0xff, + 3083 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_IPV6_QOS >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_IPV6_QOS & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (620 >> 8) & 0xff, + 620 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l4.src", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (3095 >> 8) & 0xff, + 3095 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (623 >> 8) & 0xff, + 623 & 0xff} + }, + .field_info_spec = { + .description = "l4.src", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (3107 >> 8) & 0xff, + 3107 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_UDP_SRC_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_UDP_SRC_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (626 >> 8) & 0xff, + 626 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l4.dst", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (3119 >> 8) & 0xff, + 3119 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (629 >> 8) & 0xff, + 629 & 0xff} + }, + .field_info_spec = { + .description = "l4.dst", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (3131 >> 8) & 0xff, + 3131 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_UDP_DST_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_UDP_DST_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (632 >> 8) & 0xff, + 632 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l4.flags", + .field_bit_size = 9, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_FIELD_BIT, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_O_TCP_TCP_FLAGS >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_TCP_TCP_FLAGS & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_TCP_TCP_FLAGS >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_TCP_TCP_FLAGS & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + .field_info_spec = { + .description = "l4.flags", + .field_bit_size = 9, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_FIELD_BIT, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_O_TCP_TCP_FLAGS >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_TCP_TCP_FLAGS & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_TCP_TCP_FLAGS >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_TCP_TCP_FLAGS & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + } + }, + /* class_tid: 2, , table: em_flow_conflict_cache.wr */ + { + .field_info_mask = { + .description = "group_metadata", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "group_metadata", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "prof_func_id", + .field_bit_size = 7, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "prof_func_id", + .field_bit_size = 7, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_PROF_FUNC_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_PROF_FUNC_ID_0 & 0xff} + } + }, + { + .field_info_mask = { + .description = "hdr_bitmap", + .field_bit_size = 64, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "hdr_bitmap", + .field_bit_size = 64, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_PROFILE_BITMAP >> 8) & 0xff, + BNXT_ULP_CF_IDX_PROFILE_BITMAP & 0xff} + } + }, + /* class_tid: 3, , table: port_table.ing_wr_0 */ + { + .field_info_mask = { + .description = "dev.port_id", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "dev.port_id", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_DEV_PORT_ID >> 8) & 0xff, + BNXT_ULP_CF_IDX_DEV_PORT_ID & 0xff} + } + }, + /* class_tid: 3, , table: l2_cntxt_tcam_cache.ing_rd */ + { + .field_info_mask = { + .description = "svif", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "svif", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_PHY_PORT_SVIF >> 8) & 0xff, + BNXT_ULP_CF_IDX_PHY_PORT_SVIF & 0xff} + } + }, + /* class_tid: 3, , table: l2_cntxt_tcam.ing_0 */ + { + .field_info_mask = { + .description = "etype", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "etype", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_ivlan_tpid_sel", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l2_ivlan_tpid_sel", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_ivlan_vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l2_ivlan_vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_ovlan_tpid_sel", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l2_ovlan_tpid_sel", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_ovlan_vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l2_ovlan_vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "two_vtags", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "two_vtags", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "vtag_present", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "vtag_present", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "mac1_addr", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "mac1_addr", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "mac0_addr", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "mac0_addr", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tunnel_id", + .field_bit_size = 24, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tunnel_id", + .field_bit_size = 24, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tun_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tun_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "llc", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "llc", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "roce", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + .field_info_spec = { + .description = "roce", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "metadata", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "metadata", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "svif", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "svif", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_PHY_PORT_SVIF >> 8) & 0xff, + BNXT_ULP_CF_IDX_PHY_PORT_SVIF & 0xff} + } + }, + { + .field_info_mask = { + .description = "parif", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "parif", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "spif", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "spif", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "loopback", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "loopback", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "recycle_cnt", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "recycle_cnt", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "mpass_cnt", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "mpass_cnt", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + .field_info_spec = { + .description = "valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + } + }, + /* class_tid: 3, , table: l2_cntxt_tcam_cache.ing_wr */ + { + .field_info_mask = { + .description = "svif", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "svif", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_PHY_PORT_SVIF >> 8) & 0xff, + BNXT_ULP_CF_IDX_PHY_PORT_SVIF & 0xff} + } + }, + /* class_tid: 3, , table: profile_tcam.prof_func_catch_all */ + { + .field_info_mask = { + .description = "l4_hdr_is_udp_tcp", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l4_hdr_is_udp_tcp", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l4_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l4_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l4_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l4_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l4_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l4_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "ieh", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "ieh", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l3_ipv6_cmp_dst", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l3_ipv6_cmp_dst", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l3_ipv6_cmp_src", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l3_ipv6_cmp_src", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l3_hdr_isIP", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l3_hdr_isIP", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l3_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l3_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l3_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l3_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l3_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l3_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_two_vtags", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l2_two_vtags", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_vtag_present", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l2_vtag_present", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_uc_mc_bc", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l2_uc_mc_bc", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_hdr_type", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l2_hdr_type", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l2_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l2_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tun_hdr_flags", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tun_hdr_flags", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tun_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tun_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tun_hdr_err", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tun_hdr_err", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tun_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tun_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl4_hdr_is_udp_tcp", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl4_hdr_is_udp_tcp", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl4_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl4_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl4_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl4_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl4_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl4_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl3_ipv6_cmp_dst", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl3_ipv6_cmp_dst", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl3_ipv6_cmp_src", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl3_ipv6_cmp_src", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl3_hdr_isIP", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl3_hdr_isIP", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl3_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl3_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl3_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl3_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl3_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl3_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl2_two_vtags", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl2_two_vtags", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl2_vtag_present", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl2_vtag_present", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl2_uc_mc_bc", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl2_uc_mc_bc", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl2_hdr_type", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl2_hdr_type", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl2_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl2_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "hrec_next", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "hrec_next", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "prof_func_id", + .field_bit_size = 7, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "prof_func_id", + .field_bit_size = 7, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_PROF_FUNC_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_PROF_FUNC_ID_0 & 0xff} + } + }, + { + .field_info_mask = { + .description = "agg_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "agg_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "metadata", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "metadata", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "pkt_type_0", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "pkt_type_0", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "pkt_type_1", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "pkt_type_1", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + .field_info_spec = { + .description = "valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + } + }, + /* class_tid: 3, , table: port_table.egr_wr_0 */ + { + .field_info_mask = { + .description = "dev.port_id", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "dev.port_id", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_DEV_PORT_ID >> 8) & 0xff, + BNXT_ULP_CF_IDX_DEV_PORT_ID & 0xff} + } + }, + /* class_tid: 3, , table: l2_cntxt_tcam_cache.egr_rd_vfr */ + { + .field_info_mask = { + .description = "svif", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "svif", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_DRV_FUNC_SVIF >> 8) & 0xff, + BNXT_ULP_CF_IDX_DRV_FUNC_SVIF & 0xff} + } + }, + /* class_tid: 3, , table: l2_cntxt_tcam_cache.egr_wr_vfr */ + { + .field_info_mask = { + .description = "svif", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "svif", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_DRV_FUNC_SVIF >> 8) & 0xff, + BNXT_ULP_CF_IDX_DRV_FUNC_SVIF & 0xff} + } + }, + /* class_tid: 3, , table: l2_cntxt_tcam_cache.egr_rd */ + { + .field_info_mask = { + .description = "svif", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "svif", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_DRV_FUNC_SVIF >> 8) & 0xff, + BNXT_ULP_CF_IDX_DRV_FUNC_SVIF & 0xff} + } + }, + /* class_tid: 3, , table: l2_cntxt_tcam.egr_0 */ + { + .field_info_mask = { + .description = "etype", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "etype", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_ivlan_tpid_sel", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l2_ivlan_tpid_sel", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_ivlan_vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l2_ivlan_vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_ovlan_tpid_sel", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l2_ovlan_tpid_sel", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_ovlan_vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l2_ovlan_vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "two_vtags", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "two_vtags", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "vtag_present", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "vtag_present", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "mac1_addr", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "mac1_addr", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "mac0_addr", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "mac0_addr", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tunnel_id", + .field_bit_size = 24, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tunnel_id", + .field_bit_size = 24, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tun_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tun_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "llc", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "llc", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "roce", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + .field_info_spec = { + .description = "roce", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "metadata", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "metadata", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "svif", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "svif", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_DRV_FUNC_SVIF >> 8) & 0xff, + BNXT_ULP_CF_IDX_DRV_FUNC_SVIF & 0xff} + } + }, + { + .field_info_mask = { + .description = "parif", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "parif", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "spif", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "spif", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "loopback", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "loopback", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "recycle_cnt", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "recycle_cnt", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "mpass_cnt", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "mpass_cnt", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + .field_info_spec = { + .description = "valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + } + }, + /* class_tid: 3, , table: l2_cntxt_tcam_cache.egr_wr */ + { + .field_info_mask = { + .description = "svif", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "svif", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_DRV_FUNC_SVIF >> 8) & 0xff, + BNXT_ULP_CF_IDX_DRV_FUNC_SVIF & 0xff} + } + }, + /* class_tid: 4, , table: profile_tcam_cache.vfr_glb_act_rec_rd */ + { + .field_info_mask = { + .description = "recycle_cnt", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "recycle_cnt", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + } + }, + { + .field_info_mask = { + .description = "prof_func_id", + .field_bit_size = 7, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "prof_func_id", + .field_bit_size = 7, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_GLB_RF, + .field_opr1 = { + (BNXT_ULP_GLB_RF_IDX_ANY_2_VF_PROF_FUNC_ID >> 8) & 0xff, + BNXT_ULP_GLB_RF_IDX_ANY_2_VF_PROF_FUNC_ID & 0xff} + } + }, + { + .field_info_mask = { + .description = "hdr_sig_id", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "hdr_sig_id", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + /* class_tid: 4, , table: profile_tcam_cache.vfr_glb_act_rec_wr */ + { + .field_info_mask = { + .description = "recycle_cnt", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "recycle_cnt", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + } + }, + { + .field_info_mask = { + .description = "prof_func_id", + .field_bit_size = 7, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "prof_func_id", + .field_bit_size = 7, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_GLB_RF, + .field_opr1 = { + (BNXT_ULP_GLB_RF_IDX_ANY_2_VF_PROF_FUNC_ID >> 8) & 0xff, + BNXT_ULP_GLB_RF_IDX_ANY_2_VF_PROF_FUNC_ID & 0xff} + } + }, + { + .field_info_mask = { + .description = "hdr_sig_id", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "hdr_sig_id", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + /* class_tid: 4, , table: l2_cntxt_tcam_cache.vf_rd_egr */ + { + .field_info_mask = { + .description = "svif", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "svif", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_VF_FUNC_SVIF >> 8) & 0xff, + BNXT_ULP_CF_IDX_VF_FUNC_SVIF & 0xff} + } + }, + /* class_tid: 4, , table: l2_cntxt_tcam_cache.get_drv_func_prof_func */ + { + .field_info_mask = { + .description = "svif", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "svif", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_DRV_FUNC_SVIF >> 8) & 0xff, + BNXT_ULP_CF_IDX_DRV_FUNC_SVIF & 0xff} + } + }, + /* class_tid: 4, , table: l2_cntxt_tcam.vf_egr */ + { + .field_info_mask = { + .description = "etype", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "etype", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_ivlan_tpid_sel", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l2_ivlan_tpid_sel", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_ivlan_vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l2_ivlan_vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_ovlan_tpid_sel", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l2_ovlan_tpid_sel", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_ovlan_vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l2_ovlan_vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "two_vtags", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "two_vtags", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "vtag_present", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "vtag_present", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "mac1_addr", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "mac1_addr", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "mac0_addr", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "mac0_addr", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tunnel_id", + .field_bit_size = 24, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tunnel_id", + .field_bit_size = 24, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tun_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tun_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "llc", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "llc", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "roce", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + .field_info_spec = { + .description = "roce", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "metadata", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "metadata", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "svif", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "svif", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_VF_FUNC_SVIF >> 8) & 0xff, + BNXT_ULP_CF_IDX_VF_FUNC_SVIF & 0xff} + } + }, + { + .field_info_mask = { + .description = "parif", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "parif", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "spif", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "spif", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "loopback", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "loopback", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "recycle_cnt", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "recycle_cnt", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "mpass_cnt", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "mpass_cnt", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + .field_info_spec = { + .description = "valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + } + }, + /* class_tid: 4, , table: profile_tcam.prof_func_catch_all */ + { + .field_info_mask = { + .description = "l4_hdr_is_udp_tcp", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l4_hdr_is_udp_tcp", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l4_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l4_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l4_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l4_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l4_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l4_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "ieh", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "ieh", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l3_ipv6_cmp_dst", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l3_ipv6_cmp_dst", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l3_ipv6_cmp_src", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l3_ipv6_cmp_src", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l3_hdr_isIP", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l3_hdr_isIP", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l3_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l3_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l3_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l3_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l3_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l3_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_two_vtags", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l2_two_vtags", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_vtag_present", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l2_vtag_present", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_uc_mc_bc", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l2_uc_mc_bc", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_hdr_type", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l2_hdr_type", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l2_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l2_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tun_hdr_flags", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tun_hdr_flags", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tun_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tun_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tun_hdr_err", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tun_hdr_err", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tun_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tun_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl4_hdr_is_udp_tcp", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl4_hdr_is_udp_tcp", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl4_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl4_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl4_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl4_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl4_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl4_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl3_ipv6_cmp_dst", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl3_ipv6_cmp_dst", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl3_ipv6_cmp_src", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl3_ipv6_cmp_src", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl3_hdr_isIP", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl3_hdr_isIP", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl3_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl3_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl3_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl3_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl3_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl3_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl2_two_vtags", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl2_two_vtags", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl2_vtag_present", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl2_vtag_present", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl2_uc_mc_bc", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl2_uc_mc_bc", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl2_hdr_type", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl2_hdr_type", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl2_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl2_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "hrec_next", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "hrec_next", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "prof_func_id", + .field_bit_size = 7, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "prof_func_id", + .field_bit_size = 7, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_PROF_FUNC_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_PROF_FUNC_ID_0 & 0xff} + } + }, + { + .field_info_mask = { + .description = "agg_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "agg_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "metadata", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "metadata", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "pkt_type_0", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "pkt_type_0", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "pkt_type_1", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "pkt_type_1", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + .field_info_spec = { + .description = "valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + } + }, + /* class_tid: 4, , table: l2_cntxt_tcam_cache.vf_egr_wr */ + { + .field_info_mask = { + .description = "svif", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "svif", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_VF_FUNC_SVIF >> 8) & 0xff, + BNXT_ULP_CF_IDX_VF_FUNC_SVIF & 0xff} + } + }, + /* class_tid: 4, , table: profile_tcam_cache.vfr_rd */ + { + .field_info_mask = { + .description = "recycle_cnt", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "recycle_cnt", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "prof_func_id", + .field_bit_size = 7, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "prof_func_id", + .field_bit_size = 7, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_GLB_RF, + .field_opr1 = { + (BNXT_ULP_GLB_RF_IDX_VF_2_VFR_PROF_FUNC_ID >> 8) & 0xff, + BNXT_ULP_GLB_RF_IDX_VF_2_VFR_PROF_FUNC_ID & 0xff} + } + }, + { + .field_info_mask = { + .description = "hdr_sig_id", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "hdr_sig_id", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + /* class_tid: 4, , table: l2_cntxt_tcam.vf_2_vfr_ing.0 */ + { + .field_info_mask = { + .description = "etype", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "etype", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_ivlan_tpid_sel", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l2_ivlan_tpid_sel", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_ivlan_vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l2_ivlan_vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_ovlan_tpid_sel", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l2_ovlan_tpid_sel", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_ovlan_vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l2_ovlan_vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "two_vtags", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "two_vtags", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "vtag_present", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "vtag_present", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "mac1_addr", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "mac1_addr", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "mac0_addr", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "mac0_addr", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tunnel_id", + .field_bit_size = 24, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tunnel_id", + .field_bit_size = 24, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tun_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tun_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "llc", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "llc", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "roce", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + .field_info_spec = { + .description = "roce", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "metadata", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + (ULP_THOR_SYM_VF_2_VFR_META_MASK >> 8) & 0xff, + ULP_THOR_SYM_VF_2_VFR_META_MASK & 0xff} + }, + .field_info_spec = { + .description = "metadata", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + (ULP_THOR_SYM_VF_2_VFR_META_VAL >> 8) & 0xff, + ULP_THOR_SYM_VF_2_VFR_META_VAL & 0xff} + } + }, + { + .field_info_mask = { + .description = "svif", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "svif", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "parif", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "parif", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "spif", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "spif", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "loopback", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "loopback", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + } + }, + { + .field_info_mask = { + .description = "recycle_cnt", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "recycle_cnt", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "mpass_cnt", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "mpass_cnt", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + .field_info_spec = { + .description = "valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + } + }, + /* class_tid: 4, , table: l2_cntxt_tcam.vfr_2_vf_ing.0 */ + { + .field_info_mask = { + .description = "etype", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "etype", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_ivlan_tpid_sel", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l2_ivlan_tpid_sel", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_ivlan_vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l2_ivlan_vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_ovlan_tpid_sel", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l2_ovlan_tpid_sel", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_ovlan_vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l2_ovlan_vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "two_vtags", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "two_vtags", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "vtag_present", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "vtag_present", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "mac1_addr", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "mac1_addr", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "mac0_addr", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "mac0_addr", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tunnel_id", + .field_bit_size = 24, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tunnel_id", + .field_bit_size = 24, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tun_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tun_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "llc", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "llc", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "roce", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + .field_info_spec = { + .description = "roce", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "metadata", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + (ULP_THOR_SYM_VF_2_VFR_META_MASK >> 8) & 0xff, + ULP_THOR_SYM_VF_2_VFR_META_MASK & 0xff} + }, + .field_info_spec = { + .description = "metadata", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + (ULP_THOR_SYM_VF_2_VF_META_VAL >> 8) & 0xff, + ULP_THOR_SYM_VF_2_VF_META_VAL & 0xff} + } + }, + { + .field_info_mask = { + .description = "svif", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "svif", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "parif", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "parif", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "spif", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "spif", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "loopback", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "loopback", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + } + }, + { + .field_info_mask = { + .description = "recycle_cnt", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "recycle_cnt", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "mpass_cnt", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "mpass_cnt", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + .field_info_spec = { + .description = "valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + } + }, + /* class_tid: 4, , table: profile_tcam.vf_2_vfr.0 */ + { + .field_info_mask = { + .description = "l4_hdr_is_udp_tcp", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l4_hdr_is_udp_tcp", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l4_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l4_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l4_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l4_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l4_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l4_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "ieh", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "ieh", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l3_ipv6_cmp_dst", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l3_ipv6_cmp_dst", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l3_ipv6_cmp_src", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l3_ipv6_cmp_src", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l3_hdr_isIP", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l3_hdr_isIP", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l3_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l3_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l3_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l3_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l3_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l3_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_two_vtags", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l2_two_vtags", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_vtag_present", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l2_vtag_present", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_uc_mc_bc", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l2_uc_mc_bc", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_hdr_type", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l2_hdr_type", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l2_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l2_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tun_hdr_flags", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tun_hdr_flags", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tun_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tun_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tun_hdr_err", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tun_hdr_err", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tun_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tun_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl4_hdr_is_udp_tcp", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl4_hdr_is_udp_tcp", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl4_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl4_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl4_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl4_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl4_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl4_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl3_ipv6_cmp_dst", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl3_ipv6_cmp_dst", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl3_ipv6_cmp_src", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl3_ipv6_cmp_src", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl3_hdr_isIP", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl3_hdr_isIP", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl3_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl3_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl3_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl3_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl3_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl3_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl2_two_vtags", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl2_two_vtags", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl2_vtag_present", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl2_vtag_present", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl2_uc_mc_bc", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl2_uc_mc_bc", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl2_hdr_type", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl2_hdr_type", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl2_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl2_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "hrec_next", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "hrec_next", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "prof_func_id", + .field_bit_size = 7, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "prof_func_id", + .field_bit_size = 7, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_GLB_RF, + .field_opr1 = { + (BNXT_ULP_GLB_RF_IDX_VF_2_VFR_PROF_FUNC_ID >> 8) & 0xff, + BNXT_ULP_GLB_RF_IDX_VF_2_VFR_PROF_FUNC_ID & 0xff} + } + }, + { + .field_info_mask = { + .description = "agg_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "agg_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "metadata", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "metadata", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "pkt_type_0", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "pkt_type_0", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "pkt_type_1", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "pkt_type_1", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + .field_info_spec = { + .description = "valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + } + }, + /* class_tid: 4, , table: profile_tcam.vfr_2_vf.0 */ + { + .field_info_mask = { + .description = "l4_hdr_is_udp_tcp", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l4_hdr_is_udp_tcp", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l4_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l4_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l4_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l4_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l4_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l4_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "ieh", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "ieh", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l3_ipv6_cmp_dst", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l3_ipv6_cmp_dst", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l3_ipv6_cmp_src", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l3_ipv6_cmp_src", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l3_hdr_isIP", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l3_hdr_isIP", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l3_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l3_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l3_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l3_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l3_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l3_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_two_vtags", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l2_two_vtags", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_vtag_present", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l2_vtag_present", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_uc_mc_bc", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l2_uc_mc_bc", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_hdr_type", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l2_hdr_type", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l2_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l2_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tun_hdr_flags", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tun_hdr_flags", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tun_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tun_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tun_hdr_err", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tun_hdr_err", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tun_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tun_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl4_hdr_is_udp_tcp", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl4_hdr_is_udp_tcp", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl4_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl4_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl4_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl4_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl4_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl4_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl3_ipv6_cmp_dst", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl3_ipv6_cmp_dst", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl3_ipv6_cmp_src", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl3_ipv6_cmp_src", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl3_hdr_isIP", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl3_hdr_isIP", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl3_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl3_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl3_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl3_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl3_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl3_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl2_two_vtags", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl2_two_vtags", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl2_vtag_present", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl2_vtag_present", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl2_uc_mc_bc", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl2_uc_mc_bc", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl2_hdr_type", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl2_hdr_type", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl2_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl2_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "hrec_next", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "hrec_next", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "prof_func_id", + .field_bit_size = 7, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "prof_func_id", + .field_bit_size = 7, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_GLB_RF, + .field_opr1 = { + (BNXT_ULP_GLB_RF_IDX_ANY_2_VF_PROF_FUNC_ID >> 8) & 0xff, + BNXT_ULP_GLB_RF_IDX_ANY_2_VF_PROF_FUNC_ID & 0xff} + } + }, + { + .field_info_mask = { + .description = "agg_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "agg_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "metadata", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "metadata", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "pkt_type_0", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "pkt_type_0", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "pkt_type_1", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "pkt_type_1", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + .field_info_spec = { + .description = "valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + } + }, + /* class_tid: 4, , table: profile_tcam_cache.vfr_wr */ + { + .field_info_mask = { + .description = "recycle_cnt", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "recycle_cnt", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "prof_func_id", + .field_bit_size = 7, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "prof_func_id", + .field_bit_size = 7, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_GLB_RF, + .field_opr1 = { + (BNXT_ULP_GLB_RF_IDX_VF_2_VFR_PROF_FUNC_ID >> 8) & 0xff, + BNXT_ULP_GLB_RF_IDX_VF_2_VFR_PROF_FUNC_ID & 0xff} + } + }, + { + .field_info_mask = { + .description = "hdr_sig_id", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "hdr_sig_id", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + /* class_tid: 4, , table: em.vf_2_vfr.0 */ + { + .field_info_mask = { + .description = "em_profile_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "em_profile_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_GLB_RF, + .field_opr1 = { + (BNXT_ULP_GLB_RF_IDX_GLB_VFR_EM_PROF_ID_0 >> 8) & 0xff, + BNXT_ULP_GLB_RF_IDX_GLB_VFR_EM_PROF_ID_0 & 0xff} + } + }, + { + .field_info_mask = { + .description = "svif", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "svif", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_VF_FUNC_SVIF >> 8) & 0xff, + BNXT_ULP_CF_IDX_VF_FUNC_SVIF & 0xff} + } + }, + { + .field_info_mask = { + .description = "meta", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "meta", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + (ULP_THOR_SYM_VF_2_VFR_META_VAL >> 8) & 0xff, + ULP_THOR_SYM_VF_2_VFR_META_VAL & 0xff} + } + }, + /* class_tid: 4, , table: l2_cntxt_tcam_cache.rd_egr0 */ + { + .field_info_mask = { + .description = "svif", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "svif", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_DRV_FUNC_SVIF >> 8) & 0xff, + BNXT_ULP_CF_IDX_DRV_FUNC_SVIF & 0xff} + } + }, + /* class_tid: 4, , table: l2_cntxt_tcam_cache.vfr_wr_egr0 */ + { + .field_info_mask = { + .description = "svif", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "svif", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_DRV_FUNC_SVIF >> 8) & 0xff, + BNXT_ULP_CF_IDX_DRV_FUNC_SVIF & 0xff} + } + }, + /* class_tid: 4, , table: em.vfr_2_vf.0 */ + { + .field_info_mask = { + .description = "em_profile_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "em_profile_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_GLB_RF, + .field_opr1 = { + (BNXT_ULP_GLB_RF_IDX_GLB_VFR_EM_PROF_ID_1 >> 8) & 0xff, + BNXT_ULP_GLB_RF_IDX_GLB_VFR_EM_PROF_ID_1 & 0xff} + } + }, + { + .field_info_mask = { + .description = "meta", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "meta", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_VF_META_FID >> 8) & 0xff, + BNXT_ULP_CF_IDX_VF_META_FID & 0xff} + } + } +}; + +struct bnxt_ulp_mapper_field_info ulp_thor_class_key_ext_list[] = { + /* class_tid: 1, , table: control.dmac_calculation */ + { + .description = "", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (6 >> 8) & 0xff, + 6 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_ETH_DMAC >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_ETH_DMAC & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (1 >> 8) & 0xff, + 1 & 0xff} + }, + { + .description = "", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (8 >> 8) & 0xff, + 8 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_PORT_TABLE, + .field_opr2 = { + (BNXT_ULP_CF_IDX_DEV_PORT_ID >> 8) & 0xff, + BNXT_ULP_CF_IDX_DEV_PORT_ID & 0xff, + (BNXT_ULP_PORT_TABLE_DRV_FUNC_MAC >> 8) & 0xff, + BNXT_ULP_PORT_TABLE_DRV_FUNC_MAC & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (2 >> 8) & 0xff, + 2 & 0xff} + }, + { + .description = "", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (9 >> 8) & 0xff, + 9 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_RF, + .field_opr2 = { + (BNXT_ULP_RF_IDX_DRV_FUNC_PARENT_MAC >> 8) & 0xff, + BNXT_ULP_RF_IDX_DRV_FUNC_PARENT_MAC & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + /* class_tid: 1, , table: control.terminating_flow */ + { + .description = "", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (41 >> 8) & 0xff, + 41 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (4 >> 8) & 0xff, + 4 & 0xff} + }, + { + .description = "", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (43 >> 8) & 0xff, + 43 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (5 >> 8) & 0xff, + 5 & 0xff} + }, + { + .description = "", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (45 >> 8) & 0xff, + 45 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (6 >> 8) & 0xff, + 6 & 0xff} + }, + { + .description = "", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (47 >> 8) & 0xff, + 47 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_ivv.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (72 >> 8) & 0xff, + 72 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_sip.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (83 >> 8) & 0xff, + 83 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_dip.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (89 >> 8) & 0xff, + 89 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_ttl.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (95 >> 8) & 0xff, + 95 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_prot.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (101 >> 8) & 0xff, + 101 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_qos.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (107 >> 8) & 0xff, + 107 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_src.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (113 >> 8) & 0xff, + 113 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_dst.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (119 >> 8) & 0xff, + 119 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tids.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (124 >> 8) & 0xff, + 124 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_dmac.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (129 >> 8) & 0xff, + 129 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_smac.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (135 >> 8) & 0xff, + 135 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_ovv.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (142 >> 8) & 0xff, + 142 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_ivv.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (150 >> 8) & 0xff, + 150 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (20 >> 8) & 0xff, + 20 & 0xff} + }, + { + .description = "l2_ivv.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (154 >> 8) & 0xff, + 154 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (21 >> 8) & 0xff, + 21 & 0xff} + }, + { + .description = "l2_ivv.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (158 >> 8) & 0xff, + 158 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_etype.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (166 >> 8) & 0xff, + 166 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_sip.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (174 >> 8) & 0xff, + 174 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (24 >> 8) & 0xff, + 24 & 0xff} + }, + { + .description = "l3_sip.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (178 >> 8) & 0xff, + 178 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (25 >> 8) & 0xff, + 25 & 0xff} + }, + { + .description = "l3_sip.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (182 >> 8) & 0xff, + 182 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_dip.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (190 >> 8) & 0xff, + 190 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (27 >> 8) & 0xff, + 27 & 0xff} + }, + { + .description = "l3_dip.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (194 >> 8) & 0xff, + 194 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (28 >> 8) & 0xff, + 28 & 0xff} + }, + { + .description = "l3_dip.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (198 >> 8) & 0xff, + 198 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_ttl.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (206 >> 8) & 0xff, + 206 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (30 >> 8) & 0xff, + 30 & 0xff} + }, + { + .description = "l3_ttl.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (210 >> 8) & 0xff, + 210 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (31 >> 8) & 0xff, + 31 & 0xff} + }, + { + .description = "l3_ttl.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (214 >> 8) & 0xff, + 214 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_prot.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (222 >> 8) & 0xff, + 222 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (33 >> 8) & 0xff, + 33 & 0xff} + }, + { + .description = "l3_prot.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (226 >> 8) & 0xff, + 226 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (34 >> 8) & 0xff, + 34 & 0xff} + }, + { + .description = "l3_prot.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (230 >> 8) & 0xff, + 230 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_qos.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (238 >> 8) & 0xff, + 238 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (36 >> 8) & 0xff, + 36 & 0xff} + }, + { + .description = "l3_qos.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (242 >> 8) & 0xff, + 242 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (37 >> 8) & 0xff, + 37 & 0xff} + }, + { + .description = "l3_qos.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (246 >> 8) & 0xff, + 246 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_src.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_OR, + .field_opr1 = { + (251 >> 8) & 0xff, + 251 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_src.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_OR, + .field_opr1 = { + (254 >> 8) & 0xff, + 254 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_dst.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_OR, + .field_opr1 = { + (258 >> 8) & 0xff, + 258 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_dst.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_OR, + .field_opr1 = { + (261 >> 8) & 0xff, + 261 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_flags.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (265 >> 8) & 0xff, + 265 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_flags.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (267 >> 8) & 0xff, + 267 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + /* class_tid: 1, , table: control.profile_tcam_priority */ + { + .description = "", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (271 >> 8) & 0xff, + 271 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ZERO, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (45 >> 8) & 0xff, + 45 & 0xff} + }, + { + .description = "", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (273 >> 8) & 0xff, + 273 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ZERO, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (46 >> 8) & 0xff, + 46 & 0xff} + }, + { + .description = "", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (275 >> 8) & 0xff, + 275 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ZERO, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (47 >> 8) & 0xff, + 47 & 0xff} + }, + { + .description = "", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (277 >> 8) & 0xff, + 277 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ZERO, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (48 >> 8) & 0xff, + 48 & 0xff} + }, + { + .description = "", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (279 >> 8) & 0xff, + 279 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (49 >> 8) & 0xff, + 49 & 0xff} + }, + { + .description = "", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (281 >> 8) & 0xff, + 281 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (50 >> 8) & 0xff, + 50 & 0xff} + }, + { + .description = "", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (283 >> 8) & 0xff, + 283 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (51 >> 8) & 0xff, + 51 & 0xff} + }, + { + .description = "", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (285 >> 8) & 0xff, + 285 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr3 = { + 2} + }, + { + .description = "l4_hdr_is_udp_tcp", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (289 >> 8) & 0xff, + 289 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (53 >> 8) & 0xff, + 53 & 0xff} + }, + { + .description = "l4_hdr_is_udp_tcp", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (291 >> 8) & 0xff, + 291 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (54 >> 8) & 0xff, + 54 & 0xff} + }, + { + .description = "l4_hdr_is_udp_tcp", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (293 >> 8) & 0xff, + 293 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_hdr_is_udp_tcp", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (297 >> 8) & 0xff, + 297 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR_SYM_L4_HDR_IS_UDP_TCP_YES}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (56 >> 8) & 0xff, + 56 & 0xff} + }, + { + .description = "l4_hdr_is_udp_tcp", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (299 >> 8) & 0xff, + 299 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR_SYM_L4_HDR_IS_UDP_TCP_YES}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (57 >> 8) & 0xff, + 57 & 0xff} + }, + { + .description = "l4_hdr_is_udp_tcp", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (301 >> 8) & 0xff, + 301 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR_SYM_L4_HDR_IS_UDP_TCP_YES}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (305 >> 8) & 0xff, + 305 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (59 >> 8) & 0xff, + 59 & 0xff} + }, + { + .description = "l4_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (307 >> 8) & 0xff, + 307 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (60 >> 8) & 0xff, + 60 & 0xff} + }, + { + .description = "l4_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (309 >> 8) & 0xff, + 309 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (313 >> 8) & 0xff, + 313 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ZERO, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (62 >> 8) & 0xff, + 62 & 0xff} + }, + { + .description = "l4_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (315 >> 8) & 0xff, + 315 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR_SYM_L4_HDR_TYPE_UDP}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (63 >> 8) & 0xff, + 63 & 0xff} + }, + { + .description = "l4_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (317 >> 8) & 0xff, + 317 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR_SYM_L4_HDR_TYPE_UDP}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (321 >> 8) & 0xff, + 321 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (65 >> 8) & 0xff, + 65 & 0xff} + }, + { + .description = "l4_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (323 >> 8) & 0xff, + 323 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (66 >> 8) & 0xff, + 66 & 0xff} + }, + { + .description = "l4_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (325 >> 8) & 0xff, + 325 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (329 >> 8) & 0xff, + 329 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ZERO, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (68 >> 8) & 0xff, + 68 & 0xff} + }, + { + .description = "l4_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (331 >> 8) & 0xff, + 331 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ZERO, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (69 >> 8) & 0xff, + 69 & 0xff} + }, + { + .description = "l4_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (333 >> 8) & 0xff, + 333 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ZERO, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (337 >> 8) & 0xff, + 337 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (71 >> 8) & 0xff, + 71 & 0xff} + }, + { + .description = "l4_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (339 >> 8) & 0xff, + 339 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (72 >> 8) & 0xff, + 72 & 0xff} + }, + { + .description = "l4_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (341 >> 8) & 0xff, + 341 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (345 >> 8) & 0xff, + 345 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR_SYM_L4_HDR_VALID_YES}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (74 >> 8) & 0xff, + 74 & 0xff} + }, + { + .description = "l4_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (347 >> 8) & 0xff, + 347 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR_SYM_L4_HDR_VALID_YES}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (75 >> 8) & 0xff, + 75 & 0xff} + }, + { + .description = "l4_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (349 >> 8) & 0xff, + 349 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR_SYM_L4_HDR_VALID_YES}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_hdr_isIP", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (352 >> 8) & 0xff, + 352 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (77 >> 8) & 0xff, + 77 & 0xff} + }, + { + .description = "l3_hdr_isIP", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (354 >> 8) & 0xff, + 354 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (78 >> 8) & 0xff, + 78 & 0xff} + }, + { + .description = "l3_hdr_isIP", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (356 >> 8) & 0xff, + 356 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (79 >> 8) & 0xff, + 79 & 0xff} + }, + { + .description = "l3_hdr_isIP", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (358 >> 8) & 0xff, + 358 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_ONES + }, + { + .description = "l3_hdr_isIP", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (361 >> 8) & 0xff, + 361 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR_SYM_L3_HDR_ISIP_YES}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (81 >> 8) & 0xff, + 81 & 0xff} + }, + { + .description = "l3_hdr_isIP", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (363 >> 8) & 0xff, + 363 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR_SYM_L3_HDR_ISIP_YES}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (82 >> 8) & 0xff, + 82 & 0xff} + }, + { + .description = "l3_hdr_isIP", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (365 >> 8) & 0xff, + 365 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR_SYM_L3_HDR_ISIP_YES}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (83 >> 8) & 0xff, + 83 & 0xff} + }, + { + .description = "l3_hdr_isIP", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (367 >> 8) & 0xff, + 367 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR_SYM_L3_HDR_ISIP_YES}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (371 >> 8) & 0xff, + 371 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (85 >> 8) & 0xff, + 85 & 0xff} + }, + { + .description = "l3_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (373 >> 8) & 0xff, + 373 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (86 >> 8) & 0xff, + 86 & 0xff} + }, + { + .description = "l3_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (375 >> 8) & 0xff, + 375 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (379 >> 8) & 0xff, + 379 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR_SYM_L3_HDR_TYPE_IPV6}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (88 >> 8) & 0xff, + 88 & 0xff} + }, + { + .description = "l3_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (381 >> 8) & 0xff, + 381 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ZERO, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (89 >> 8) & 0xff, + 89 & 0xff} + }, + { + .description = "l3_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (383 >> 8) & 0xff, + 383 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ZERO, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (387 >> 8) & 0xff, + 387 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (91 >> 8) & 0xff, + 91 & 0xff} + }, + { + .description = "l3_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (389 >> 8) & 0xff, + 389 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (92 >> 8) & 0xff, + 92 & 0xff} + }, + { + .description = "l3_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (391 >> 8) & 0xff, + 391 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (395 >> 8) & 0xff, + 395 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ZERO, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (94 >> 8) & 0xff, + 94 & 0xff} + }, + { + .description = "l3_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (397 >> 8) & 0xff, + 397 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ZERO, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (95 >> 8) & 0xff, + 95 & 0xff} + }, + { + .description = "l3_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (399 >> 8) & 0xff, + 399 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ZERO, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (403 >> 8) & 0xff, + 403 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (97 >> 8) & 0xff, + 97 & 0xff} + }, + { + .description = "l3_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (405 >> 8) & 0xff, + 405 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (98 >> 8) & 0xff, + 98 & 0xff} + }, + { + .description = "l3_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (407 >> 8) & 0xff, + 407 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (411 >> 8) & 0xff, + 411 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR_SYM_L3_HDR_VALID_YES}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (100 >> 8) & 0xff, + 100 & 0xff} + }, + { + .description = "l3_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (413 >> 8) & 0xff, + 413 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR_SYM_L3_HDR_VALID_YES}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (101 >> 8) & 0xff, + 101 & 0xff} + }, + { + .description = "l3_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (415 >> 8) & 0xff, + 415 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR_SYM_L3_HDR_VALID_YES}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_two_vtags", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (420 >> 8) & 0xff, + 420 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR_SYM_L2_TWO_VTAGS_YES}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_vtag_present", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (426 >> 8) & 0xff, + 426 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR_SYM_L2_VTAG_PRESENT_YES}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (433 >> 8) & 0xff, + 433 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (437 >> 8) & 0xff, + 437 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ZERO, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (440 >> 8) & 0xff, + 440 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR_SYM_L2_HDR_VALID_YES}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (107 >> 8) & 0xff, + 107 & 0xff} + }, + { + .description = "l2_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (442 >> 8) & 0xff, + 442 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR_SYM_L2_HDR_VALID_YES}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tun_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (446 >> 8) & 0xff, + 446 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (109 >> 8) & 0xff, + 109 & 0xff} + }, + { + .description = "tun_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (448 >> 8) & 0xff, + 448 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (110 >> 8) & 0xff, + 110 & 0xff} + }, + { + .description = "tun_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (450 >> 8) & 0xff, + 450 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (111 >> 8) & 0xff, + 111 & 0xff} + }, + { + .description = "tun_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (452 >> 8) & 0xff, + 452 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (112 >> 8) & 0xff, + 112 & 0xff} + }, + { + .description = "tun_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (454 >> 8) & 0xff, + 454 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tun_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (458 >> 8) & 0xff, + 458 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ZERO, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (114 >> 8) & 0xff, + 114 & 0xff} + }, + { + .description = "tun_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (460 >> 8) & 0xff, + 460 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR_SYM_TUN_HDR_TYPE_GENEVE}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (115 >> 8) & 0xff, + 115 & 0xff} + }, + { + .description = "tun_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (462 >> 8) & 0xff, + 462 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR_SYM_TUN_HDR_TYPE_GRE}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (116 >> 8) & 0xff, + 116 & 0xff} + }, + { + .description = "tun_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (464 >> 8) & 0xff, + 464 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR_SYM_TUN_HDR_TYPE_UPAR1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (117 >> 8) & 0xff, + 117 & 0xff} + }, + { + .description = "tun_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (466 >> 8) & 0xff, + 466 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR_SYM_TUN_HDR_TYPE_UPAR2}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_hdr_is_udp_tcp", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (473 >> 8) & 0xff, + 473 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_hdr_is_udp_tcp", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (477 >> 8) & 0xff, + 477 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR_SYM_TL4_HDR_IS_UDP_TCP_YES}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (481 >> 8) & 0xff, + 481 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (485 >> 8) & 0xff, + 485 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR_SYM_TL4_HDR_TYPE_UDP}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (489 >> 8) & 0xff, + 489 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (493 >> 8) & 0xff, + 493 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ZERO, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (497 >> 8) & 0xff, + 497 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (125 >> 8) & 0xff, + 125 & 0xff} + }, + { + .description = "tl4_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (499 >> 8) & 0xff, + 499 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ZERO, + .field_src3 = BNXT_ULP_FIELD_SRC_ONES + }, + { + .description = "tl4_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (502 >> 8) & 0xff, + 502 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR_SYM_TL4_HDR_VALID_YES}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (127 >> 8) & 0xff, + 127 & 0xff} + }, + { + .description = "tl4_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (504 >> 8) & 0xff, + 504 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ZERO, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_hdr_isIP", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (507 >> 8) & 0xff, + 507 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR_SYM_TL3_HDR_ISIP_YES}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (511 >> 8) & 0xff, + 511 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (515 >> 8) & 0xff, + 515 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ZERO, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (519 >> 8) & 0xff, + 519 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (523 >> 8) & 0xff, + 523 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ZERO, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (527 >> 8) & 0xff, + 527 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR_SYM_TL3_HDR_VALID_YES}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (539 >> 8) & 0xff, + 539 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR_SYM_TL2_HDR_VALID_YES}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_ivv", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (576 >> 8) & 0xff, + 576 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_OO_VLAN_VID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_OO_VLAN_VID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "tl2_ivv", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (584 >> 8) & 0xff, + 584 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_OO_VLAN_VID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_OO_VLAN_VID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "tl3.ttl", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (623 >> 8) & 0xff, + 623 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_TTL >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_TTL & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "tl3.ttl", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (629 >> 8) & 0xff, + 629 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_TTL >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_TTL & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "tl3.prot", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (635 >> 8) & 0xff, + 635 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_PROTO_ID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_PROTO_ID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "tl3.prot", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (641 >> 8) & 0xff, + 641 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_PROTO_ID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_PROTO_ID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "tl3.qos", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (647 >> 8) & 0xff, + 647 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_QOS >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_QOS & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "tl3.qos", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (653 >> 8) & 0xff, + 653 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_QOS >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_QOS & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "tl4.src", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (659 >> 8) & 0xff, + 659 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_TCP_SRC_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_TCP_SRC_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "tl4.src", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (665 >> 8) & 0xff, + 665 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_TCP_SRC_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_TCP_SRC_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "tl4.dst", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (671 >> 8) & 0xff, + 671 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_TCP_DST_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_TCP_DST_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "tl4.dst", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (677 >> 8) & 0xff, + 677 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_TCP_DST_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_TCP_DST_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "tids", + .field_bit_size = 24, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (682 >> 8) & 0xff, + 682 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_T_VXLAN_GPE_VNI >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_T_VXLAN_GPE_VNI & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "tids", + .field_bit_size = 24, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (686 >> 8) & 0xff, + 686 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_T_VXLAN_GPE_VNI >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_T_VXLAN_GPE_VNI & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l2_dmac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (691 >> 8) & 0xff, + 691 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_ETH_DMAC >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_ETH_DMAC & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l2_dmac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (697 >> 8) & 0xff, + 697 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_ETH_DMAC >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_ETH_DMAC & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l2_smac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (703 >> 8) & 0xff, + 703 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_ETH_SMAC >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_ETH_SMAC & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l2_smac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (709 >> 8) & 0xff, + 709 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_ETH_SMAC >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_ETH_SMAC & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l2_ovv", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (716 >> 8) & 0xff, + 716 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_OO_VLAN_VID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_OO_VLAN_VID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l2_ovv", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (724 >> 8) & 0xff, + 724 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_OO_VLAN_VID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_OO_VLAN_VID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l2_ivv", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (732 >> 8) & 0xff, + 732 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_IO_VLAN_VID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_IO_VLAN_VID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (156 >> 8) & 0xff, + 156 & 0xff} + }, + { + .description = "l2_ivv", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (736 >> 8) & 0xff, + 736 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_OI_VLAN_VID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_OI_VLAN_VID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (157 >> 8) & 0xff, + 157 & 0xff} + }, + { + .description = "l2_ivv", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (740 >> 8) & 0xff, + 740 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_OO_VLAN_VID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_OO_VLAN_VID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l2_ivv", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (748 >> 8) & 0xff, + 748 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_IO_VLAN_VID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_IO_VLAN_VID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (159 >> 8) & 0xff, + 159 & 0xff} + }, + { + .description = "l2_ivv", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (752 >> 8) & 0xff, + 752 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_OI_VLAN_VID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_OI_VLAN_VID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (160 >> 8) & 0xff, + 160 & 0xff} + }, + { + .description = "l2_ivv", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (756 >> 8) & 0xff, + 756 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_OO_VLAN_VID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_OO_VLAN_VID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l2_etype", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (764 >> 8) & 0xff, + 764 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_ETH_TYPE >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_ETH_TYPE & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l2_etype", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (772 >> 8) & 0xff, + 772 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_ETH_TYPE >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_ETH_TYPE & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l3.sip.ipv4", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (779 >> 8) & 0xff, + 779 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_SRC_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_SRC_ADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l3.sip.ipv4", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (785 >> 8) & 0xff, + 785 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_SRC_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_SRC_ADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l3.sip.ipv6", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (791 >> 8) & 0xff, + 791 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV6_SRC_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV6_SRC_ADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l3.sip.ipv6", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (797 >> 8) & 0xff, + 797 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV6_SRC_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV6_SRC_ADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l3.dip.ipv4", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (803 >> 8) & 0xff, + 803 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_DST_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_DST_ADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l3.dip.ipv4", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (809 >> 8) & 0xff, + 809 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_DST_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_DST_ADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l3.dip.ipv6", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (815 >> 8) & 0xff, + 815 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV6_DST_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV6_DST_ADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l3.dip.ipv6", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (821 >> 8) & 0xff, + 821 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV6_DST_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV6_DST_ADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l3.ttl", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (827 >> 8) & 0xff, + 827 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_IPV4_TTL >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_IPV4_TTL & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (172 >> 8) & 0xff, + 172 & 0xff} + }, + { + .description = "l3.ttl", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (830 >> 8) & 0xff, + 830 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV6_TTL >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV6_TTL & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (173 >> 8) & 0xff, + 173 & 0xff} + }, + { + .description = "l3.ttl", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (833 >> 8) & 0xff, + 833 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_TTL >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_TTL & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l3.ttl", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (839 >> 8) & 0xff, + 839 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_IPV4_TTL >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_IPV4_TTL & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (175 >> 8) & 0xff, + 175 & 0xff} + }, + { + .description = "l3.ttl", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (842 >> 8) & 0xff, + 842 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV6_TTL >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV6_TTL & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (176 >> 8) & 0xff, + 176 & 0xff} + }, + { + .description = "l3.ttl", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (845 >> 8) & 0xff, + 845 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_TTL >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_TTL & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l3.prot", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (850 >> 8) & 0xff, + 850 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (178 >> 8) & 0xff, + 178 & 0xff} + }, + { + .description = "l3.prot", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (852 >> 8) & 0xff, + 852 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (179 >> 8) & 0xff, + 179 & 0xff} + }, + { + .description = "l3.prot", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (854 >> 8) & 0xff, + 854 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (180 >> 8) & 0xff, + 180 & 0xff} + }, + { + .description = "l3.prot", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (856 >> 8) & 0xff, + 856 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_IPV6_PROTO_ID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_IPV6_PROTO_ID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (181 >> 8) & 0xff, + 181 & 0xff} + }, + { + .description = "l3.prot", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (859 >> 8) & 0xff, + 859 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_IPV4_PROTO_ID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_IPV4_PROTO_ID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (182 >> 8) & 0xff, + 182 & 0xff} + }, + { + .description = "l3.prot", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (862 >> 8) & 0xff, + 862 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV6_PROTO_ID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV6_PROTO_ID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (183 >> 8) & 0xff, + 183 & 0xff} + }, + { + .description = "l3.prot", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (865 >> 8) & 0xff, + 865 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_PROTO_ID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_PROTO_ID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l3.prot", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (870 >> 8) & 0xff, + 870 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR_SYM_IP_PROTO_UDP}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (185 >> 8) & 0xff, + 185 & 0xff} + }, + { + .description = "l3.prot", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (872 >> 8) & 0xff, + 872 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR_SYM_IP_PROTO_TCP}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (186 >> 8) & 0xff, + 186 & 0xff} + }, + { + .description = "l3.prot", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (874 >> 8) & 0xff, + 874 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR_SYM_IP_PROTO_UDP}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (187 >> 8) & 0xff, + 187 & 0xff} + }, + { + .description = "l3.prot", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (876 >> 8) & 0xff, + 876 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_IPV6_PROTO_ID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_IPV6_PROTO_ID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (188 >> 8) & 0xff, + 188 & 0xff} + }, + { + .description = "l3.prot", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (879 >> 8) & 0xff, + 879 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_IPV4_PROTO_ID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_IPV4_PROTO_ID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (189 >> 8) & 0xff, + 189 & 0xff} + }, + { + .description = "l3.prot", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (882 >> 8) & 0xff, + 882 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV6_PROTO_ID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV6_PROTO_ID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (190 >> 8) & 0xff, + 190 & 0xff} + }, + { + .description = "l3.prot", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (885 >> 8) & 0xff, + 885 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_PROTO_ID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_PROTO_ID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l3.qos", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (891 >> 8) & 0xff, + 891 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_IPV4_QOS >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_IPV4_QOS & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (192 >> 8) & 0xff, + 192 & 0xff} + }, + { + .description = "l3.qos", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (894 >> 8) & 0xff, + 894 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV6_QOS >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV6_QOS & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (193 >> 8) & 0xff, + 193 & 0xff} + }, + { + .description = "l3.qos", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (897 >> 8) & 0xff, + 897 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_QOS >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_QOS & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l3.qos", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (903 >> 8) & 0xff, + 903 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_IPV4_QOS >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_IPV4_QOS & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (195 >> 8) & 0xff, + 195 & 0xff} + }, + { + .description = "l3.qos", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (906 >> 8) & 0xff, + 906 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV6_QOS >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV6_QOS & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (196 >> 8) & 0xff, + 196 & 0xff} + }, + { + .description = "l3.qos", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (909 >> 8) & 0xff, + 909 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_QOS >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_QOS & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l4.src", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (915 >> 8) & 0xff, + 915 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_TCP_SRC_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_TCP_SRC_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (198 >> 8) & 0xff, + 198 & 0xff} + }, + { + .description = "l4.src", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (918 >> 8) & 0xff, + 918 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_UDP_SRC_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_UDP_SRC_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (199 >> 8) & 0xff, + 199 & 0xff} + }, + { + .description = "l4.src", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (921 >> 8) & 0xff, + 921 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_TCP_SRC_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_TCP_SRC_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (200 >> 8) & 0xff, + 200 & 0xff} + }, + { + .description = "l4.src", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (924 >> 8) & 0xff, + 924 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_SKIP, + .field_src3 = BNXT_ULP_FIELD_SRC_CONST + }, + { + .description = "l4.src", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (928 >> 8) & 0xff, + 928 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_TCP_SRC_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_TCP_SRC_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (202 >> 8) & 0xff, + 202 & 0xff} + }, + { + .description = "l4.src", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (931 >> 8) & 0xff, + 931 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_UDP_SRC_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_UDP_SRC_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (203 >> 8) & 0xff, + 203 & 0xff} + }, + { + .description = "l4.src", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (934 >> 8) & 0xff, + 934 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_TCP_SRC_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_TCP_SRC_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (204 >> 8) & 0xff, + 204 & 0xff} + }, + { + .description = "l4.src", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (937 >> 8) & 0xff, + 937 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_SKIP, + .field_src3 = BNXT_ULP_FIELD_SRC_CONST + }, + { + .description = "l4.dst", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (941 >> 8) & 0xff, + 941 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_TCP_DST_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_TCP_DST_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (206 >> 8) & 0xff, + 206 & 0xff} + }, + { + .description = "l4.dst", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (944 >> 8) & 0xff, + 944 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_UDP_DST_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_UDP_DST_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (207 >> 8) & 0xff, + 207 & 0xff} + }, + { + .description = "l4.dst", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (947 >> 8) & 0xff, + 947 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_TCP_DST_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_TCP_DST_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (208 >> 8) & 0xff, + 208 & 0xff} + }, + { + .description = "l4.dst", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (950 >> 8) & 0xff, + 950 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_SKIP, + .field_src3 = BNXT_ULP_FIELD_SRC_CONST + }, + { + .description = "l4.dst", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (954 >> 8) & 0xff, + 954 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_TCP_DST_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_TCP_DST_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (210 >> 8) & 0xff, + 210 & 0xff} + }, + { + .description = "l4.dst", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (957 >> 8) & 0xff, + 957 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_UDP_DST_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_UDP_DST_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (211 >> 8) & 0xff, + 211 & 0xff} + }, + { + .description = "l4.dst", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (960 >> 8) & 0xff, + 960 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_TCP_DST_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_TCP_DST_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (212 >> 8) & 0xff, + 212 & 0xff} + }, + { + .description = "l4.dst", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (963 >> 8) & 0xff, + 963 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_SKIP, + .field_src3 = BNXT_ULP_FIELD_SRC_CONST + }, + { + .description = "tl2_ivv.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (987 >> 8) & 0xff, + 987 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_sip.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (998 >> 8) & 0xff, + 998 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_dip.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1004 >> 8) & 0xff, + 1004 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_ttl.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1010 >> 8) & 0xff, + 1010 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_prot.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1016 >> 8) & 0xff, + 1016 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_qos.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1022 >> 8) & 0xff, + 1022 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_src.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1028 >> 8) & 0xff, + 1028 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_dst.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1034 >> 8) & 0xff, + 1034 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tids.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1039 >> 8) & 0xff, + 1039 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_dmac.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1045 >> 8) & 0xff, + 1045 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_smac.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1052 >> 8) & 0xff, + 1052 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_ovv.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1059 >> 8) & 0xff, + 1059 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_ivv.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1067 >> 8) & 0xff, + 1067 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (226 >> 8) & 0xff, + 226 & 0xff} + }, + { + .description = "l2_ivv.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1071 >> 8) & 0xff, + 1071 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (227 >> 8) & 0xff, + 227 & 0xff} + }, + { + .description = "l2_ivv.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1075 >> 8) & 0xff, + 1075 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_etype.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1083 >> 8) & 0xff, + 1083 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_sip.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1090 >> 8) & 0xff, + 1090 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (230 >> 8) & 0xff, + 230 & 0xff} + }, + { + .description = "l3_sip.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1093 >> 8) & 0xff, + 1093 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (231 >> 8) & 0xff, + 231 & 0xff} + }, + { + .description = "l3_sip.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1096 >> 8) & 0xff, + 1096 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_dip.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1102 >> 8) & 0xff, + 1102 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (233 >> 8) & 0xff, + 233 & 0xff} + }, + { + .description = "l3_dip.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1105 >> 8) & 0xff, + 1105 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (234 >> 8) & 0xff, + 234 & 0xff} + }, + { + .description = "l3_dip.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1108 >> 8) & 0xff, + 1108 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_ttl.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1114 >> 8) & 0xff, + 1114 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (236 >> 8) & 0xff, + 236 & 0xff} + }, + { + .description = "l3_ttl.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1117 >> 8) & 0xff, + 1117 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (237 >> 8) & 0xff, + 237 & 0xff} + }, + { + .description = "l3_ttl.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1120 >> 8) & 0xff, + 1120 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_prot.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1126 >> 8) & 0xff, + 1126 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (239 >> 8) & 0xff, + 239 & 0xff} + }, + { + .description = "l3_prot.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1129 >> 8) & 0xff, + 1129 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (240 >> 8) & 0xff, + 240 & 0xff} + }, + { + .description = "l3_prot.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1132 >> 8) & 0xff, + 1132 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_qos.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1138 >> 8) & 0xff, + 1138 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (242 >> 8) & 0xff, + 242 & 0xff} + }, + { + .description = "l3_qos.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1141 >> 8) & 0xff, + 1141 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (243 >> 8) & 0xff, + 243 & 0xff} + }, + { + .description = "l3_qos.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1144 >> 8) & 0xff, + 1144 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_src.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1150 >> 8) & 0xff, + 1150 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (245 >> 8) & 0xff, + 245 & 0xff} + }, + { + .description = "l4_src.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1153 >> 8) & 0xff, + 1153 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (246 >> 8) & 0xff, + 246 & 0xff} + }, + { + .description = "l4_src.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1156 >> 8) & 0xff, + 1156 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_dst.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1162 >> 8) & 0xff, + 1162 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (248 >> 8) & 0xff, + 248 & 0xff} + }, + { + .description = "l4_dst.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1165 >> 8) & 0xff, + 1165 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (249 >> 8) & 0xff, + 249 & 0xff} + }, + { + .description = "l4_dst.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1168 >> 8) & 0xff, + 1168 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_flags.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1172 >> 8) & 0xff, + 1172 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_flags.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1174 >> 8) & 0xff, + 1174 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_ivv", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1208 >> 8) & 0xff, + 1208 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "tl2_ivv", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1216 >> 8) & 0xff, + 1216 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_OO_VLAN_VID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_OO_VLAN_VID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "tl3.ttl", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1255 >> 8) & 0xff, + 1255 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "tl3.ttl", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1261 >> 8) & 0xff, + 1261 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_TTL >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_TTL & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "tl3.prot", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1267 >> 8) & 0xff, + 1267 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "tl3.prot", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1273 >> 8) & 0xff, + 1273 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_PROTO_ID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_PROTO_ID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "tl3.qos", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1279 >> 8) & 0xff, + 1279 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "tl3.qos", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1285 >> 8) & 0xff, + 1285 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_QOS >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_QOS & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "tl4.src", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1291 >> 8) & 0xff, + 1291 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "tl4.src", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1297 >> 8) & 0xff, + 1297 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_TCP_SRC_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_TCP_SRC_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "tl4.dst", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1303 >> 8) & 0xff, + 1303 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "tl4.dst", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1309 >> 8) & 0xff, + 1309 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_TCP_DST_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_TCP_DST_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "tids", + .field_bit_size = 24, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1314 >> 8) & 0xff, + 1314 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "tids", + .field_bit_size = 24, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1318 >> 8) & 0xff, + 1318 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_T_VXLAN_GPE_VNI >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_T_VXLAN_GPE_VNI & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l2_dmac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1324 >> 8) & 0xff, + 1324 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l2_dmac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1332 >> 8) & 0xff, + 1332 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_ETH_DMAC >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_ETH_DMAC & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l2_smac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1339 >> 8) & 0xff, + 1339 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l2_smac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1345 >> 8) & 0xff, + 1345 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_ETH_SMAC >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_ETH_SMAC & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l2_ovv", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1352 >> 8) & 0xff, + 1352 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l2_ovv", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1360 >> 8) & 0xff, + 1360 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_OO_VLAN_VID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_OO_VLAN_VID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l2_ivv", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1368 >> 8) & 0xff, + 1368 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (273 >> 8) & 0xff, + 273 & 0xff} + }, + { + .description = "l2_ivv", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1372 >> 8) & 0xff, + 1372 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (274 >> 8) & 0xff, + 274 & 0xff} + }, + { + .description = "l2_ivv", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1376 >> 8) & 0xff, + 1376 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l2_ivv", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1384 >> 8) & 0xff, + 1384 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_IO_VLAN_VID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_IO_VLAN_VID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (276 >> 8) & 0xff, + 276 & 0xff} + }, + { + .description = "l2_ivv", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1388 >> 8) & 0xff, + 1388 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_OI_VLAN_VID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_OI_VLAN_VID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (277 >> 8) & 0xff, + 277 & 0xff} + }, + { + .description = "l2_ivv", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1392 >> 8) & 0xff, + 1392 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_OO_VLAN_VID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_OO_VLAN_VID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l2_etype", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1400 >> 8) & 0xff, + 1400 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l2_etype", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1408 >> 8) & 0xff, + 1408 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_ETH_TYPE >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_ETH_TYPE & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l3.sip.ipv4", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1415 >> 8) & 0xff, + 1415 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l3.sip.ipv4", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1421 >> 8) & 0xff, + 1421 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_SRC_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_SRC_ADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l3.sip.ipv6", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1427 >> 8) & 0xff, + 1427 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l3.sip.ipv6", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1433 >> 8) & 0xff, + 1433 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV6_SRC_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV6_SRC_ADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l3.dip.ipv4", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1439 >> 8) & 0xff, + 1439 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l3.dip.ipv4", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1445 >> 8) & 0xff, + 1445 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_DST_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_DST_ADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l3.dip.ipv6", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1451 >> 8) & 0xff, + 1451 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l3.dip.ipv6", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1457 >> 8) & 0xff, + 1457 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV6_DST_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV6_DST_ADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l3.ttl", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1463 >> 8) & 0xff, + 1463 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (289 >> 8) & 0xff, + 289 & 0xff} + }, + { + .description = "l3.ttl", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1466 >> 8) & 0xff, + 1466 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (290 >> 8) & 0xff, + 290 & 0xff} + }, + { + .description = "l3.ttl", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1469 >> 8) & 0xff, + 1469 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l3.ttl", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1475 >> 8) & 0xff, + 1475 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_IPV4_TTL >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_IPV4_TTL & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (292 >> 8) & 0xff, + 292 & 0xff} + }, + { + .description = "l3.ttl", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1478 >> 8) & 0xff, + 1478 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV6_TTL >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV6_TTL & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (293 >> 8) & 0xff, + 293 & 0xff} + }, + { + .description = "l3.ttl", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1481 >> 8) & 0xff, + 1481 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_TTL >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_TTL & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l3.prot", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1487 >> 8) & 0xff, + 1487 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (295 >> 8) & 0xff, + 295 & 0xff} + }, + { + .description = "l3.prot", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1490 >> 8) & 0xff, + 1490 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (296 >> 8) & 0xff, + 296 & 0xff} + }, + { + .description = "l3.prot", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1493 >> 8) & 0xff, + 1493 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l3.prot", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1499 >> 8) & 0xff, + 1499 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_IPV4_PROTO_ID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_IPV4_PROTO_ID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (298 >> 8) & 0xff, + 298 & 0xff} + }, + { + .description = "l3.prot", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1502 >> 8) & 0xff, + 1502 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV6_PROTO_ID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV6_PROTO_ID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (299 >> 8) & 0xff, + 299 & 0xff} + }, + { + .description = "l3.prot", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1505 >> 8) & 0xff, + 1505 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_PROTO_ID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_PROTO_ID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l3.qos", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1511 >> 8) & 0xff, + 1511 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (301 >> 8) & 0xff, + 301 & 0xff} + }, + { + .description = "l3.qos", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1514 >> 8) & 0xff, + 1514 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (302 >> 8) & 0xff, + 302 & 0xff} + }, + { + .description = "l3.qos", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1517 >> 8) & 0xff, + 1517 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l3.qos", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1523 >> 8) & 0xff, + 1523 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_IPV4_QOS >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_IPV4_QOS & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (304 >> 8) & 0xff, + 304 & 0xff} + }, + { + .description = "l3.qos", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1526 >> 8) & 0xff, + 1526 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV6_QOS >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV6_QOS & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (305 >> 8) & 0xff, + 305 & 0xff} + }, + { + .description = "l3.qos", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1529 >> 8) & 0xff, + 1529 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_QOS >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_QOS & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l4.src", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1535 >> 8) & 0xff, + 1535 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (307 >> 8) & 0xff, + 307 & 0xff} + }, + { + .description = "l4.src", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1538 >> 8) & 0xff, + 1538 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (308 >> 8) & 0xff, + 308 & 0xff} + }, + { + .description = "l4.src", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1541 >> 8) & 0xff, + 1541 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l4.src", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1547 >> 8) & 0xff, + 1547 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_TCP_SRC_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_TCP_SRC_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (310 >> 8) & 0xff, + 310 & 0xff} + }, + { + .description = "l4.src", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1550 >> 8) & 0xff, + 1550 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_UDP_SRC_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_UDP_SRC_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (311 >> 8) & 0xff, + 311 & 0xff} + }, + { + .description = "l4.src", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1553 >> 8) & 0xff, + 1553 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_TCP_SRC_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_TCP_SRC_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l4.dst", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1559 >> 8) & 0xff, + 1559 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (313 >> 8) & 0xff, + 313 & 0xff} + }, + { + .description = "l4.dst", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1562 >> 8) & 0xff, + 1562 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (314 >> 8) & 0xff, + 314 & 0xff} + }, + { + .description = "l4.dst", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1565 >> 8) & 0xff, + 1565 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l4.dst", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1571 >> 8) & 0xff, + 1571 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_TCP_DST_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_TCP_DST_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (316 >> 8) & 0xff, + 316 & 0xff} + }, + { + .description = "l4.dst", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1574 >> 8) & 0xff, + 1574 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_UDP_DST_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_UDP_DST_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (317 >> 8) & 0xff, + 317 & 0xff} + }, + { + .description = "l4.dst", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1577 >> 8) & 0xff, + 1577 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_TCP_DST_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_TCP_DST_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + /* class_tid: 2, , table: control.terminating_flow */ + { + .description = "", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1607 >> 8) & 0xff, + 1607 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (319 >> 8) & 0xff, + 319 & 0xff} + }, + { + .description = "", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1609 >> 8) & 0xff, + 1609 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (320 >> 8) & 0xff, + 320 & 0xff} + }, + { + .description = "", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1611 >> 8) & 0xff, + 1611 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (321 >> 8) & 0xff, + 321 & 0xff} + }, + { + .description = "", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1613 >> 8) & 0xff, + 1613 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_smac.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1632 >> 8) & 0xff, + 1632 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ZERO, + .field_src3 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr3 = { + 1} + }, + { + .description = "tl2_ivv.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1642 >> 8) & 0xff, + 1642 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_sip.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1653 >> 8) & 0xff, + 1653 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_dip.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1659 >> 8) & 0xff, + 1659 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_ttl.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1665 >> 8) & 0xff, + 1665 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_prot.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1671 >> 8) & 0xff, + 1671 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_qos.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1677 >> 8) & 0xff, + 1677 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_src.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1683 >> 8) & 0xff, + 1683 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_dst.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1689 >> 8) & 0xff, + 1689 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tids.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1694 >> 8) & 0xff, + 1694 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_dmac.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1699 >> 8) & 0xff, + 1699 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_smac.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1705 >> 8) & 0xff, + 1705 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_ovv.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1712 >> 8) & 0xff, + 1712 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_ivv.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1720 >> 8) & 0xff, + 1720 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (336 >> 8) & 0xff, + 336 & 0xff} + }, + { + .description = "l2_ivv.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1724 >> 8) & 0xff, + 1724 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (337 >> 8) & 0xff, + 337 & 0xff} + }, + { + .description = "l2_ivv.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1728 >> 8) & 0xff, + 1728 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_etype.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1736 >> 8) & 0xff, + 1736 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_sip.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1744 >> 8) & 0xff, + 1744 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (340 >> 8) & 0xff, + 340 & 0xff} + }, + { + .description = "l3_sip.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1748 >> 8) & 0xff, + 1748 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (341 >> 8) & 0xff, + 341 & 0xff} + }, + { + .description = "l3_sip.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1752 >> 8) & 0xff, + 1752 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_dip.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1760 >> 8) & 0xff, + 1760 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (343 >> 8) & 0xff, + 343 & 0xff} + }, + { + .description = "l3_dip.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1764 >> 8) & 0xff, + 1764 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (344 >> 8) & 0xff, + 344 & 0xff} + }, + { + .description = "l3_dip.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1768 >> 8) & 0xff, + 1768 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_ttl.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1776 >> 8) & 0xff, + 1776 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (346 >> 8) & 0xff, + 346 & 0xff} + }, + { + .description = "l3_ttl.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1780 >> 8) & 0xff, + 1780 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (347 >> 8) & 0xff, + 347 & 0xff} + }, + { + .description = "l3_ttl.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1784 >> 8) & 0xff, + 1784 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_prot.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1792 >> 8) & 0xff, + 1792 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (349 >> 8) & 0xff, + 349 & 0xff} + }, + { + .description = "l3_prot.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1796 >> 8) & 0xff, + 1796 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (350 >> 8) & 0xff, + 350 & 0xff} + }, + { + .description = "l3_prot.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1800 >> 8) & 0xff, + 1800 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_qos.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1808 >> 8) & 0xff, + 1808 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (352 >> 8) & 0xff, + 352 & 0xff} + }, + { + .description = "l3_qos.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1812 >> 8) & 0xff, + 1812 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (353 >> 8) & 0xff, + 353 & 0xff} + }, + { + .description = "l3_qos.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1816 >> 8) & 0xff, + 1816 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_src.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_OR, + .field_opr1 = { + (1821 >> 8) & 0xff, + 1821 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_src.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_OR, + .field_opr1 = { + (1824 >> 8) & 0xff, + 1824 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_dst.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_OR, + .field_opr1 = { + (1828 >> 8) & 0xff, + 1828 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_dst.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_OR, + .field_opr1 = { + (1831 >> 8) & 0xff, + 1831 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_flags.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1835 >> 8) & 0xff, + 1835 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_flags.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1837 >> 8) & 0xff, + 1837 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + /* class_tid: 2, , table: control.profile_tcam_priority */ + { + .description = "", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1841 >> 8) & 0xff, + 1841 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ZERO, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (361 >> 8) & 0xff, + 361 & 0xff} + }, + { + .description = "", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1843 >> 8) & 0xff, + 1843 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ZERO, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (362 >> 8) & 0xff, + 362 & 0xff} + }, + { + .description = "", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1845 >> 8) & 0xff, + 1845 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ZERO, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (363 >> 8) & 0xff, + 363 & 0xff} + }, + { + .description = "", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1847 >> 8) & 0xff, + 1847 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ZERO, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (364 >> 8) & 0xff, + 364 & 0xff} + }, + { + .description = "", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1849 >> 8) & 0xff, + 1849 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (365 >> 8) & 0xff, + 365 & 0xff} + }, + { + .description = "", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1851 >> 8) & 0xff, + 1851 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (366 >> 8) & 0xff, + 366 & 0xff} + }, + { + .description = "", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1853 >> 8) & 0xff, + 1853 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (367 >> 8) & 0xff, + 367 & 0xff} + }, + { + .description = "", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1855 >> 8) & 0xff, + 1855 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr3 = { + 2} + }, + { + .description = "l4_hdr_is_udp_tcp", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1859 >> 8) & 0xff, + 1859 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (369 >> 8) & 0xff, + 369 & 0xff} + }, + { + .description = "l4_hdr_is_udp_tcp", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1861 >> 8) & 0xff, + 1861 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (370 >> 8) & 0xff, + 370 & 0xff} + }, + { + .description = "l4_hdr_is_udp_tcp", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1863 >> 8) & 0xff, + 1863 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_hdr_is_udp_tcp", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1867 >> 8) & 0xff, + 1867 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR_SYM_L4_HDR_IS_UDP_TCP_YES}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (372 >> 8) & 0xff, + 372 & 0xff} + }, + { + .description = "l4_hdr_is_udp_tcp", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1869 >> 8) & 0xff, + 1869 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR_SYM_L4_HDR_IS_UDP_TCP_YES}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (373 >> 8) & 0xff, + 373 & 0xff} + }, + { + .description = "l4_hdr_is_udp_tcp", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1871 >> 8) & 0xff, + 1871 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR_SYM_L4_HDR_IS_UDP_TCP_YES}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1875 >> 8) & 0xff, + 1875 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (375 >> 8) & 0xff, + 375 & 0xff} + }, + { + .description = "l4_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1877 >> 8) & 0xff, + 1877 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (376 >> 8) & 0xff, + 376 & 0xff} + }, + { + .description = "l4_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1879 >> 8) & 0xff, + 1879 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1883 >> 8) & 0xff, + 1883 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ZERO, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (378 >> 8) & 0xff, + 378 & 0xff} + }, + { + .description = "l4_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1885 >> 8) & 0xff, + 1885 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR_SYM_L4_HDR_TYPE_UDP}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (379 >> 8) & 0xff, + 379 & 0xff} + }, + { + .description = "l4_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1887 >> 8) & 0xff, + 1887 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR_SYM_L4_HDR_TYPE_UDP}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1891 >> 8) & 0xff, + 1891 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (381 >> 8) & 0xff, + 381 & 0xff} + }, + { + .description = "l4_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1893 >> 8) & 0xff, + 1893 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (382 >> 8) & 0xff, + 382 & 0xff} + }, + { + .description = "l4_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1895 >> 8) & 0xff, + 1895 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1899 >> 8) & 0xff, + 1899 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ZERO, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (384 >> 8) & 0xff, + 384 & 0xff} + }, + { + .description = "l4_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1901 >> 8) & 0xff, + 1901 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ZERO, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (385 >> 8) & 0xff, + 385 & 0xff} + }, + { + .description = "l4_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1903 >> 8) & 0xff, + 1903 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ZERO, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1907 >> 8) & 0xff, + 1907 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (387 >> 8) & 0xff, + 387 & 0xff} + }, + { + .description = "l4_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1909 >> 8) & 0xff, + 1909 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (388 >> 8) & 0xff, + 388 & 0xff} + }, + { + .description = "l4_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1911 >> 8) & 0xff, + 1911 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1915 >> 8) & 0xff, + 1915 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR_SYM_L4_HDR_VALID_YES}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (390 >> 8) & 0xff, + 390 & 0xff} + }, + { + .description = "l4_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1917 >> 8) & 0xff, + 1917 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR_SYM_L4_HDR_VALID_YES}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (391 >> 8) & 0xff, + 391 & 0xff} + }, + { + .description = "l4_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1919 >> 8) & 0xff, + 1919 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR_SYM_L4_HDR_VALID_YES}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_hdr_isIP", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1922 >> 8) & 0xff, + 1922 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (393 >> 8) & 0xff, + 393 & 0xff} + }, + { + .description = "l3_hdr_isIP", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1924 >> 8) & 0xff, + 1924 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (394 >> 8) & 0xff, + 394 & 0xff} + }, + { + .description = "l3_hdr_isIP", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1926 >> 8) & 0xff, + 1926 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (395 >> 8) & 0xff, + 395 & 0xff} + }, + { + .description = "l3_hdr_isIP", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1928 >> 8) & 0xff, + 1928 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_ONES + }, + { + .description = "l3_hdr_isIP", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1931 >> 8) & 0xff, + 1931 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR_SYM_L3_HDR_ISIP_YES}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (397 >> 8) & 0xff, + 397 & 0xff} + }, + { + .description = "l3_hdr_isIP", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1933 >> 8) & 0xff, + 1933 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR_SYM_L3_HDR_ISIP_YES}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (398 >> 8) & 0xff, + 398 & 0xff} + }, + { + .description = "l3_hdr_isIP", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1935 >> 8) & 0xff, + 1935 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR_SYM_L3_HDR_ISIP_YES}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (399 >> 8) & 0xff, + 399 & 0xff} + }, + { + .description = "l3_hdr_isIP", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1937 >> 8) & 0xff, + 1937 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR_SYM_L3_HDR_ISIP_YES}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1941 >> 8) & 0xff, + 1941 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (401 >> 8) & 0xff, + 401 & 0xff} + }, + { + .description = "l3_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1943 >> 8) & 0xff, + 1943 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (402 >> 8) & 0xff, + 402 & 0xff} + }, + { + .description = "l3_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1945 >> 8) & 0xff, + 1945 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1949 >> 8) & 0xff, + 1949 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR_SYM_L3_HDR_TYPE_IPV6}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (404 >> 8) & 0xff, + 404 & 0xff} + }, + { + .description = "l3_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1951 >> 8) & 0xff, + 1951 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ZERO, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (405 >> 8) & 0xff, + 405 & 0xff} + }, + { + .description = "l3_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1953 >> 8) & 0xff, + 1953 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ZERO, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1957 >> 8) & 0xff, + 1957 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (407 >> 8) & 0xff, + 407 & 0xff} + }, + { + .description = "l3_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1959 >> 8) & 0xff, + 1959 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (408 >> 8) & 0xff, + 408 & 0xff} + }, + { + .description = "l3_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1961 >> 8) & 0xff, + 1961 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1965 >> 8) & 0xff, + 1965 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ZERO, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (410 >> 8) & 0xff, + 410 & 0xff} + }, + { + .description = "l3_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1967 >> 8) & 0xff, + 1967 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ZERO, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (411 >> 8) & 0xff, + 411 & 0xff} + }, + { + .description = "l3_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1969 >> 8) & 0xff, + 1969 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ZERO, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1973 >> 8) & 0xff, + 1973 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (413 >> 8) & 0xff, + 413 & 0xff} + }, + { + .description = "l3_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1975 >> 8) & 0xff, + 1975 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (414 >> 8) & 0xff, + 414 & 0xff} + }, + { + .description = "l3_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1977 >> 8) & 0xff, + 1977 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1981 >> 8) & 0xff, + 1981 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR_SYM_L3_HDR_VALID_YES}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (416 >> 8) & 0xff, + 416 & 0xff} + }, + { + .description = "l3_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1983 >> 8) & 0xff, + 1983 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR_SYM_L3_HDR_VALID_YES}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (417 >> 8) & 0xff, + 417 & 0xff} + }, + { + .description = "l3_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1985 >> 8) & 0xff, + 1985 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR_SYM_L3_HDR_VALID_YES}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_two_vtags", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1990 >> 8) & 0xff, + 1990 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR_SYM_L2_TWO_VTAGS_YES}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_vtag_present", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1996 >> 8) & 0xff, + 1996 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR_SYM_L2_VTAG_PRESENT_YES}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2003 >> 8) & 0xff, + 2003 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2007 >> 8) & 0xff, + 2007 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ZERO, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2010 >> 8) & 0xff, + 2010 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR_SYM_L2_HDR_VALID_YES}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (423 >> 8) & 0xff, + 423 & 0xff} + }, + { + .description = "l2_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2012 >> 8) & 0xff, + 2012 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR_SYM_L2_HDR_VALID_YES}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tun_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2016 >> 8) & 0xff, + 2016 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (425 >> 8) & 0xff, + 425 & 0xff} + }, + { + .description = "tun_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2018 >> 8) & 0xff, + 2018 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (426 >> 8) & 0xff, + 426 & 0xff} + }, + { + .description = "tun_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2020 >> 8) & 0xff, + 2020 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (427 >> 8) & 0xff, + 427 & 0xff} + }, + { + .description = "tun_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2022 >> 8) & 0xff, + 2022 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (428 >> 8) & 0xff, + 428 & 0xff} + }, + { + .description = "tun_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2024 >> 8) & 0xff, + 2024 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tun_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2028 >> 8) & 0xff, + 2028 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ZERO, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (430 >> 8) & 0xff, + 430 & 0xff} + }, + { + .description = "tun_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2030 >> 8) & 0xff, + 2030 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR_SYM_TUN_HDR_TYPE_GENEVE}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (431 >> 8) & 0xff, + 431 & 0xff} + }, + { + .description = "tun_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2032 >> 8) & 0xff, + 2032 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR_SYM_TUN_HDR_TYPE_GRE}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (432 >> 8) & 0xff, + 432 & 0xff} + }, + { + .description = "tun_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2034 >> 8) & 0xff, + 2034 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR_SYM_TUN_HDR_TYPE_UPAR1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (433 >> 8) & 0xff, + 433 & 0xff} + }, + { + .description = "tun_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2036 >> 8) & 0xff, + 2036 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR_SYM_TUN_HDR_TYPE_UPAR2}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_hdr_is_udp_tcp", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2043 >> 8) & 0xff, + 2043 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_hdr_is_udp_tcp", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2047 >> 8) & 0xff, + 2047 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR_SYM_TL4_HDR_IS_UDP_TCP_YES}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2051 >> 8) & 0xff, + 2051 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2055 >> 8) & 0xff, + 2055 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR_SYM_TL4_HDR_TYPE_UDP}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2059 >> 8) & 0xff, + 2059 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2063 >> 8) & 0xff, + 2063 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ZERO, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2067 >> 8) & 0xff, + 2067 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (441 >> 8) & 0xff, + 441 & 0xff} + }, + { + .description = "tl4_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2069 >> 8) & 0xff, + 2069 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ZERO, + .field_src3 = BNXT_ULP_FIELD_SRC_ONES + }, + { + .description = "tl4_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2072 >> 8) & 0xff, + 2072 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR_SYM_TL4_HDR_VALID_YES}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (443 >> 8) & 0xff, + 443 & 0xff} + }, + { + .description = "tl4_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2074 >> 8) & 0xff, + 2074 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ZERO, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_hdr_isIP", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2077 >> 8) & 0xff, + 2077 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR_SYM_TL3_HDR_ISIP_YES}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2081 >> 8) & 0xff, + 2081 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2085 >> 8) & 0xff, + 2085 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ZERO, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2089 >> 8) & 0xff, + 2089 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2093 >> 8) & 0xff, + 2093 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ZERO, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2097 >> 8) & 0xff, + 2097 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR_SYM_TL3_HDR_VALID_YES}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_smac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2129 >> 8) & 0xff, + 2129 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_SKIP, + .field_src3 = BNXT_ULP_FIELD_SRC_HF, + .field_opr3 = { + (BNXT_ULP_GLB_HF_ID_O_ETH_SMAC >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_ETH_SMAC & 0xff} + }, + { + .description = "tl2_smac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2134 >> 8) & 0xff, + 2134 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_SKIP, + .field_src3 = BNXT_ULP_FIELD_SRC_HF, + .field_opr3 = { + (BNXT_ULP_GLB_HF_ID_O_ETH_SMAC >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_ETH_SMAC & 0xff} + }, + { + .description = "tl2_ivv", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2148 >> 8) & 0xff, + 2148 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_OO_VLAN_VID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_OO_VLAN_VID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "tl2_ivv", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2156 >> 8) & 0xff, + 2156 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_OO_VLAN_VID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_OO_VLAN_VID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "tl3.ttl", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2195 >> 8) & 0xff, + 2195 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_TTL >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_TTL & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "tl3.ttl", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2201 >> 8) & 0xff, + 2201 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_TTL >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_TTL & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "tl3.prot", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2207 >> 8) & 0xff, + 2207 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_PROTO_ID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_PROTO_ID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "tl3.prot", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2213 >> 8) & 0xff, + 2213 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_PROTO_ID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_PROTO_ID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "tl3.qos", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2219 >> 8) & 0xff, + 2219 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_QOS >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_QOS & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "tl3.qos", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2225 >> 8) & 0xff, + 2225 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_QOS >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_QOS & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "tl4.src", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2231 >> 8) & 0xff, + 2231 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_TCP_SRC_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_TCP_SRC_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "tl4.src", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2237 >> 8) & 0xff, + 2237 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_TCP_SRC_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_TCP_SRC_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "tl4.dst", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2243 >> 8) & 0xff, + 2243 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_TCP_DST_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_TCP_DST_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "tl4.dst", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2249 >> 8) & 0xff, + 2249 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_TCP_DST_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_TCP_DST_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "tids", + .field_bit_size = 24, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2254 >> 8) & 0xff, + 2254 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_T_VXLAN_GPE_VNI >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_T_VXLAN_GPE_VNI & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "tids", + .field_bit_size = 24, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2258 >> 8) & 0xff, + 2258 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_T_VXLAN_GPE_VNI >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_T_VXLAN_GPE_VNI & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l2_dmac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2263 >> 8) & 0xff, + 2263 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_ETH_DMAC >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_ETH_DMAC & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l2_dmac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2269 >> 8) & 0xff, + 2269 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_ETH_DMAC >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_ETH_DMAC & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l2_smac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2275 >> 8) & 0xff, + 2275 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_ETH_SMAC >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_ETH_SMAC & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l2_smac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2281 >> 8) & 0xff, + 2281 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_ETH_SMAC >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_ETH_SMAC & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l2_ovv", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2288 >> 8) & 0xff, + 2288 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_OO_VLAN_VID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_OO_VLAN_VID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l2_ovv", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2296 >> 8) & 0xff, + 2296 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_OO_VLAN_VID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_OO_VLAN_VID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l2_ivv", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2304 >> 8) & 0xff, + 2304 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_IO_VLAN_VID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_IO_VLAN_VID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (473 >> 8) & 0xff, + 473 & 0xff} + }, + { + .description = "l2_ivv", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2308 >> 8) & 0xff, + 2308 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_OI_VLAN_VID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_OI_VLAN_VID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (474 >> 8) & 0xff, + 474 & 0xff} + }, + { + .description = "l2_ivv", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2312 >> 8) & 0xff, + 2312 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_OO_VLAN_VID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_OO_VLAN_VID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l2_ivv", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2320 >> 8) & 0xff, + 2320 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_IO_VLAN_VID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_IO_VLAN_VID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (476 >> 8) & 0xff, + 476 & 0xff} + }, + { + .description = "l2_ivv", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2324 >> 8) & 0xff, + 2324 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_OI_VLAN_VID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_OI_VLAN_VID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (477 >> 8) & 0xff, + 477 & 0xff} + }, + { + .description = "l2_ivv", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2328 >> 8) & 0xff, + 2328 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_OO_VLAN_VID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_OO_VLAN_VID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l2_etype", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2336 >> 8) & 0xff, + 2336 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_ETH_TYPE >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_ETH_TYPE & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l2_etype", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2344 >> 8) & 0xff, + 2344 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_ETH_TYPE >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_ETH_TYPE & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l3.sip.ipv4", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2351 >> 8) & 0xff, + 2351 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_SRC_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_SRC_ADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l3.sip.ipv4", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2357 >> 8) & 0xff, + 2357 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_SRC_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_SRC_ADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l3.sip.ipv6", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2363 >> 8) & 0xff, + 2363 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV6_SRC_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV6_SRC_ADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l3.sip.ipv6", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2369 >> 8) & 0xff, + 2369 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV6_SRC_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV6_SRC_ADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l3.dip.ipv4", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2375 >> 8) & 0xff, + 2375 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_DST_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_DST_ADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l3.dip.ipv4", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2381 >> 8) & 0xff, + 2381 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_DST_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_DST_ADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l3.dip.ipv6", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2387 >> 8) & 0xff, + 2387 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV6_DST_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV6_DST_ADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l3.dip.ipv6", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2393 >> 8) & 0xff, + 2393 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV6_DST_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV6_DST_ADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l3.ttl", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2399 >> 8) & 0xff, + 2399 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_IPV4_TTL >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_IPV4_TTL & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (489 >> 8) & 0xff, + 489 & 0xff} + }, + { + .description = "l3.ttl", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2402 >> 8) & 0xff, + 2402 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV6_TTL >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV6_TTL & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (490 >> 8) & 0xff, + 490 & 0xff} + }, + { + .description = "l3.ttl", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2405 >> 8) & 0xff, + 2405 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_TTL >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_TTL & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l3.ttl", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2411 >> 8) & 0xff, + 2411 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_IPV4_TTL >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_IPV4_TTL & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (492 >> 8) & 0xff, + 492 & 0xff} + }, + { + .description = "l3.ttl", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2414 >> 8) & 0xff, + 2414 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV6_TTL >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV6_TTL & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (493 >> 8) & 0xff, + 493 & 0xff} + }, + { + .description = "l3.ttl", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2417 >> 8) & 0xff, + 2417 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_TTL >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_TTL & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l3.prot", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2422 >> 8) & 0xff, + 2422 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (495 >> 8) & 0xff, + 495 & 0xff} + }, + { + .description = "l3.prot", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2424 >> 8) & 0xff, + 2424 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (496 >> 8) & 0xff, + 496 & 0xff} + }, + { + .description = "l3.prot", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2426 >> 8) & 0xff, + 2426 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (497 >> 8) & 0xff, + 497 & 0xff} + }, + { + .description = "l3.prot", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2428 >> 8) & 0xff, + 2428 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_IPV6_PROTO_ID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_IPV6_PROTO_ID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (498 >> 8) & 0xff, + 498 & 0xff} + }, + { + .description = "l3.prot", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2431 >> 8) & 0xff, + 2431 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_IPV4_PROTO_ID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_IPV4_PROTO_ID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (499 >> 8) & 0xff, + 499 & 0xff} + }, + { + .description = "l3.prot", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2434 >> 8) & 0xff, + 2434 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV6_PROTO_ID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV6_PROTO_ID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (500 >> 8) & 0xff, + 500 & 0xff} + }, + { + .description = "l3.prot", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2437 >> 8) & 0xff, + 2437 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_PROTO_ID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_PROTO_ID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l3.prot", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2442 >> 8) & 0xff, + 2442 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR_SYM_IP_PROTO_UDP}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (502 >> 8) & 0xff, + 502 & 0xff} + }, + { + .description = "l3.prot", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2444 >> 8) & 0xff, + 2444 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR_SYM_IP_PROTO_TCP}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (503 >> 8) & 0xff, + 503 & 0xff} + }, + { + .description = "l3.prot", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2446 >> 8) & 0xff, + 2446 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_THOR_SYM_IP_PROTO_UDP}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (504 >> 8) & 0xff, + 504 & 0xff} + }, + { + .description = "l3.prot", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2448 >> 8) & 0xff, + 2448 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_IPV6_PROTO_ID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_IPV6_PROTO_ID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (505 >> 8) & 0xff, + 505 & 0xff} + }, + { + .description = "l3.prot", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2451 >> 8) & 0xff, + 2451 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_IPV4_PROTO_ID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_IPV4_PROTO_ID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (506 >> 8) & 0xff, + 506 & 0xff} + }, + { + .description = "l3.prot", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2454 >> 8) & 0xff, + 2454 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV6_PROTO_ID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV6_PROTO_ID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (507 >> 8) & 0xff, + 507 & 0xff} + }, + { + .description = "l3.prot", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2457 >> 8) & 0xff, + 2457 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_PROTO_ID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_PROTO_ID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l3.qos", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2463 >> 8) & 0xff, + 2463 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_IPV4_QOS >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_IPV4_QOS & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (509 >> 8) & 0xff, + 509 & 0xff} + }, + { + .description = "l3.qos", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2466 >> 8) & 0xff, + 2466 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV6_QOS >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV6_QOS & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (510 >> 8) & 0xff, + 510 & 0xff} + }, + { + .description = "l3.qos", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2469 >> 8) & 0xff, + 2469 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_QOS >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_QOS & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l3.qos", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2475 >> 8) & 0xff, + 2475 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_IPV4_QOS >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_IPV4_QOS & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (512 >> 8) & 0xff, + 512 & 0xff} + }, + { + .description = "l3.qos", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2478 >> 8) & 0xff, + 2478 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV6_QOS >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV6_QOS & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (513 >> 8) & 0xff, + 513 & 0xff} + }, + { + .description = "l3.qos", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2481 >> 8) & 0xff, + 2481 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_QOS >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_QOS & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l4.src", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2487 >> 8) & 0xff, + 2487 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_TCP_SRC_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_TCP_SRC_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (515 >> 8) & 0xff, + 515 & 0xff} + }, + { + .description = "l4.src", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2490 >> 8) & 0xff, + 2490 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_UDP_SRC_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_UDP_SRC_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (516 >> 8) & 0xff, + 516 & 0xff} + }, + { + .description = "l4.src", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2493 >> 8) & 0xff, + 2493 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_TCP_SRC_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_TCP_SRC_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (517 >> 8) & 0xff, + 517 & 0xff} + }, + { + .description = "l4.src", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2496 >> 8) & 0xff, + 2496 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_SKIP, + .field_src3 = BNXT_ULP_FIELD_SRC_CONST + }, + { + .description = "l4.src", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2500 >> 8) & 0xff, + 2500 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_TCP_SRC_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_TCP_SRC_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (519 >> 8) & 0xff, + 519 & 0xff} + }, + { + .description = "l4.src", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2503 >> 8) & 0xff, + 2503 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_UDP_SRC_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_UDP_SRC_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (520 >> 8) & 0xff, + 520 & 0xff} + }, + { + .description = "l4.src", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2506 >> 8) & 0xff, + 2506 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_TCP_SRC_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_TCP_SRC_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (521 >> 8) & 0xff, + 521 & 0xff} + }, + { + .description = "l4.src", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2509 >> 8) & 0xff, + 2509 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_SKIP, + .field_src3 = BNXT_ULP_FIELD_SRC_CONST + }, + { + .description = "l4.dst", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2513 >> 8) & 0xff, + 2513 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_TCP_DST_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_TCP_DST_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (523 >> 8) & 0xff, + 523 & 0xff} + }, + { + .description = "l4.dst", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2516 >> 8) & 0xff, + 2516 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_UDP_DST_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_UDP_DST_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (524 >> 8) & 0xff, + 524 & 0xff} + }, + { + .description = "l4.dst", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2519 >> 8) & 0xff, + 2519 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_TCP_DST_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_TCP_DST_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (525 >> 8) & 0xff, + 525 & 0xff} + }, + { + .description = "l4.dst", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2522 >> 8) & 0xff, + 2522 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_SKIP, + .field_src3 = BNXT_ULP_FIELD_SRC_CONST + }, + { + .description = "l4.dst", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2526 >> 8) & 0xff, + 2526 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_TCP_DST_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_TCP_DST_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (527 >> 8) & 0xff, + 527 & 0xff} + }, + { + .description = "l4.dst", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2529 >> 8) & 0xff, + 2529 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_UDP_DST_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_UDP_DST_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (528 >> 8) & 0xff, + 528 & 0xff} + }, + { + .description = "l4.dst", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2532 >> 8) & 0xff, + 2532 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_TCP_DST_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_TCP_DST_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (529 >> 8) & 0xff, + 529 & 0xff} + }, + { + .description = "l4.dst", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2535 >> 8) & 0xff, + 2535 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_SKIP, + .field_src3 = BNXT_ULP_FIELD_SRC_CONST + }, + { + .description = "tl2_ivv.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2558 >> 8) & 0xff, + 2558 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_sip.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2569 >> 8) & 0xff, + 2569 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_dip.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2575 >> 8) & 0xff, + 2575 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_ttl.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2581 >> 8) & 0xff, + 2581 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_prot.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2587 >> 8) & 0xff, + 2587 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_qos.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2593 >> 8) & 0xff, + 2593 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_src.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2599 >> 8) & 0xff, + 2599 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_dst.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2605 >> 8) & 0xff, + 2605 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tids.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2610 >> 8) & 0xff, + 2610 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_dmac.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2615 >> 8) & 0xff, + 2615 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_smac.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2621 >> 8) & 0xff, + 2621 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_ovv.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2628 >> 8) & 0xff, + 2628 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_ivv.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2636 >> 8) & 0xff, + 2636 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (543 >> 8) & 0xff, + 543 & 0xff} + }, + { + .description = "l2_ivv.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2640 >> 8) & 0xff, + 2640 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (544 >> 8) & 0xff, + 544 & 0xff} + }, + { + .description = "l2_ivv.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2644 >> 8) & 0xff, + 2644 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_etype.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2652 >> 8) & 0xff, + 2652 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_sip.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2659 >> 8) & 0xff, + 2659 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (547 >> 8) & 0xff, + 547 & 0xff} + }, + { + .description = "l3_sip.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2662 >> 8) & 0xff, + 2662 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (548 >> 8) & 0xff, + 548 & 0xff} + }, + { + .description = "l3_sip.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2665 >> 8) & 0xff, + 2665 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_dip.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2671 >> 8) & 0xff, + 2671 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (550 >> 8) & 0xff, + 550 & 0xff} + }, + { + .description = "l3_dip.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2674 >> 8) & 0xff, + 2674 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (551 >> 8) & 0xff, + 551 & 0xff} + }, + { + .description = "l3_dip.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2677 >> 8) & 0xff, + 2677 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_ttl.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2683 >> 8) & 0xff, + 2683 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (553 >> 8) & 0xff, + 553 & 0xff} + }, + { + .description = "l3_ttl.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2686 >> 8) & 0xff, + 2686 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (554 >> 8) & 0xff, + 554 & 0xff} + }, + { + .description = "l3_ttl.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2689 >> 8) & 0xff, + 2689 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_prot.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2695 >> 8) & 0xff, + 2695 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (556 >> 8) & 0xff, + 556 & 0xff} + }, + { + .description = "l3_prot.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2698 >> 8) & 0xff, + 2698 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (557 >> 8) & 0xff, + 557 & 0xff} + }, + { + .description = "l3_prot.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2701 >> 8) & 0xff, + 2701 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_qos.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2707 >> 8) & 0xff, + 2707 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (559 >> 8) & 0xff, + 559 & 0xff} + }, + { + .description = "l3_qos.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2710 >> 8) & 0xff, + 2710 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (560 >> 8) & 0xff, + 560 & 0xff} + }, + { + .description = "l3_qos.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2713 >> 8) & 0xff, + 2713 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_src.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2719 >> 8) & 0xff, + 2719 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (562 >> 8) & 0xff, + 562 & 0xff} + }, + { + .description = "l4_src.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2722 >> 8) & 0xff, + 2722 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (563 >> 8) & 0xff, + 563 & 0xff} + }, + { + .description = "l4_src.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2725 >> 8) & 0xff, + 2725 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_dst.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2731 >> 8) & 0xff, + 2731 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (565 >> 8) & 0xff, + 565 & 0xff} + }, + { + .description = "l4_dst.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2734 >> 8) & 0xff, + 2734 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (566 >> 8) & 0xff, + 566 & 0xff} + }, + { + .description = "l4_dst.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2737 >> 8) & 0xff, + 2737 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_flags.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2741 >> 8) & 0xff, + 2741 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_flags.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2743 >> 8) & 0xff, + 2743 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_ivv", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2775 >> 8) & 0xff, + 2775 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "tl2_ivv", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2783 >> 8) & 0xff, + 2783 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_OO_VLAN_VID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_OO_VLAN_VID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "tl3.ttl", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2822 >> 8) & 0xff, + 2822 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "tl3.ttl", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2828 >> 8) & 0xff, + 2828 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_TTL >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_TTL & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "tl3.prot", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2834 >> 8) & 0xff, + 2834 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "tl3.prot", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2840 >> 8) & 0xff, + 2840 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_PROTO_ID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_PROTO_ID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "tl3.qos", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2846 >> 8) & 0xff, + 2846 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "tl3.qos", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2852 >> 8) & 0xff, + 2852 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_QOS >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_QOS & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "tl4.src", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2858 >> 8) & 0xff, + 2858 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "tl4.src", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2864 >> 8) & 0xff, + 2864 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_TCP_SRC_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_TCP_SRC_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "tl4.dst", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2870 >> 8) & 0xff, + 2870 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "tl4.dst", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2876 >> 8) & 0xff, + 2876 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_TCP_DST_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_TCP_DST_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "tids", + .field_bit_size = 24, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2881 >> 8) & 0xff, + 2881 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "tids", + .field_bit_size = 24, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2885 >> 8) & 0xff, + 2885 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_T_VXLAN_GPE_VNI >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_T_VXLAN_GPE_VNI & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l2_dmac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2890 >> 8) & 0xff, + 2890 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l2_dmac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2896 >> 8) & 0xff, + 2896 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_ETH_DMAC >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_ETH_DMAC & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l2_smac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2902 >> 8) & 0xff, + 2902 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l2_smac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2908 >> 8) & 0xff, + 2908 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_ETH_SMAC >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_ETH_SMAC & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l2_ovv", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2915 >> 8) & 0xff, + 2915 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l2_ovv", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2923 >> 8) & 0xff, + 2923 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_OO_VLAN_VID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_OO_VLAN_VID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l2_ivv", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2931 >> 8) & 0xff, + 2931 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (590 >> 8) & 0xff, + 590 & 0xff} + }, + { + .description = "l2_ivv", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2935 >> 8) & 0xff, + 2935 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (591 >> 8) & 0xff, + 591 & 0xff} + }, + { + .description = "l2_ivv", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2939 >> 8) & 0xff, + 2939 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l2_ivv", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2947 >> 8) & 0xff, + 2947 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_IO_VLAN_VID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_IO_VLAN_VID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (593 >> 8) & 0xff, + 593 & 0xff} + }, + { + .description = "l2_ivv", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2951 >> 8) & 0xff, + 2951 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_OI_VLAN_VID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_OI_VLAN_VID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (594 >> 8) & 0xff, + 594 & 0xff} + }, + { + .description = "l2_ivv", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2955 >> 8) & 0xff, + 2955 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_OO_VLAN_VID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_OO_VLAN_VID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l2_etype", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2963 >> 8) & 0xff, + 2963 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l2_etype", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2971 >> 8) & 0xff, + 2971 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_ETH_TYPE >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_ETH_TYPE & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l3.sip.ipv4", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2978 >> 8) & 0xff, + 2978 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l3.sip.ipv4", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2984 >> 8) & 0xff, + 2984 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_SRC_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_SRC_ADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l3.sip.ipv6", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2990 >> 8) & 0xff, + 2990 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l3.sip.ipv6", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2996 >> 8) & 0xff, + 2996 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV6_SRC_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV6_SRC_ADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l3.dip.ipv4", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (3002 >> 8) & 0xff, + 3002 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l3.dip.ipv4", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (3008 >> 8) & 0xff, + 3008 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_DST_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_DST_ADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l3.dip.ipv6", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (3014 >> 8) & 0xff, + 3014 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l3.dip.ipv6", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (3020 >> 8) & 0xff, + 3020 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV6_DST_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV6_DST_ADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l3.ttl", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (3026 >> 8) & 0xff, + 3026 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (606 >> 8) & 0xff, + 606 & 0xff} + }, + { + .description = "l3.ttl", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (3029 >> 8) & 0xff, + 3029 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (607 >> 8) & 0xff, + 607 & 0xff} + }, + { + .description = "l3.ttl", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (3032 >> 8) & 0xff, + 3032 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l3.ttl", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (3038 >> 8) & 0xff, + 3038 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_IPV4_TTL >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_IPV4_TTL & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (609 >> 8) & 0xff, + 609 & 0xff} + }, + { + .description = "l3.ttl", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (3041 >> 8) & 0xff, + 3041 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV6_TTL >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV6_TTL & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (610 >> 8) & 0xff, + 610 & 0xff} + }, + { + .description = "l3.ttl", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (3044 >> 8) & 0xff, + 3044 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_TTL >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_TTL & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l3.prot", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (3050 >> 8) & 0xff, + 3050 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (612 >> 8) & 0xff, + 612 & 0xff} + }, + { + .description = "l3.prot", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (3053 >> 8) & 0xff, + 3053 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (613 >> 8) & 0xff, + 613 & 0xff} + }, + { + .description = "l3.prot", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (3056 >> 8) & 0xff, + 3056 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l3.prot", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (3062 >> 8) & 0xff, + 3062 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_IPV4_PROTO_ID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_IPV4_PROTO_ID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (615 >> 8) & 0xff, + 615 & 0xff} + }, + { + .description = "l3.prot", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (3065 >> 8) & 0xff, + 3065 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV6_PROTO_ID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV6_PROTO_ID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (616 >> 8) & 0xff, + 616 & 0xff} + }, + { + .description = "l3.prot", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (3068 >> 8) & 0xff, + 3068 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_PROTO_ID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_PROTO_ID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l3.qos", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (3074 >> 8) & 0xff, + 3074 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (618 >> 8) & 0xff, + 618 & 0xff} + }, + { + .description = "l3.qos", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (3077 >> 8) & 0xff, + 3077 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (619 >> 8) & 0xff, + 619 & 0xff} + }, + { + .description = "l3.qos", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (3080 >> 8) & 0xff, + 3080 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l3.qos", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (3086 >> 8) & 0xff, + 3086 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_IPV4_QOS >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_IPV4_QOS & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (621 >> 8) & 0xff, + 621 & 0xff} + }, + { + .description = "l3.qos", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (3089 >> 8) & 0xff, + 3089 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV6_QOS >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV6_QOS & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (622 >> 8) & 0xff, + 622 & 0xff} + }, + { + .description = "l3.qos", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (3092 >> 8) & 0xff, + 3092 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_QOS >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_QOS & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l4.src", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (3098 >> 8) & 0xff, + 3098 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (624 >> 8) & 0xff, + 624 & 0xff} + }, + { + .description = "l4.src", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (3101 >> 8) & 0xff, + 3101 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (625 >> 8) & 0xff, + 625 & 0xff} + }, + { + .description = "l4.src", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (3104 >> 8) & 0xff, + 3104 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l4.src", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (3110 >> 8) & 0xff, + 3110 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_TCP_SRC_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_TCP_SRC_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (627 >> 8) & 0xff, + 627 & 0xff} + }, + { + .description = "l4.src", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (3113 >> 8) & 0xff, + 3113 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_UDP_SRC_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_UDP_SRC_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (628 >> 8) & 0xff, + 628 & 0xff} + }, + { + .description = "l4.src", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (3116 >> 8) & 0xff, + 3116 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_TCP_SRC_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_TCP_SRC_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l4.dst", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (3122 >> 8) & 0xff, + 3122 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (630 >> 8) & 0xff, + 630 & 0xff} + }, + { + .description = "l4.dst", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (3125 >> 8) & 0xff, + 3125 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (631 >> 8) & 0xff, + 631 & 0xff} + }, + { + .description = "l4.dst", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (3128 >> 8) & 0xff, + 3128 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "l4.dst", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (3134 >> 8) & 0xff, + 3134 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_I_TCP_DST_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_I_TCP_DST_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (633 >> 8) & 0xff, + 633 & 0xff} + }, + { + .description = "l4.dst", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (3137 >> 8) & 0xff, + 3137 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_UDP_DST_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_UDP_DST_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (634 >> 8) & 0xff, + 634 & 0xff} + }, + { + .description = "l4.dst", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (3140 >> 8) & 0xff, + 3140 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_TCP_DST_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_TCP_DST_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + } +}; + +struct bnxt_ulp_mapper_field_info ulp_thor_class_result_field_list[] = { + /* class_tid: 1, , table: tunnel_cache.f1_f2_wr */ + { + .description = "rid", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_RID >> 8) & 0xff, + BNXT_ULP_RF_IDX_RID & 0xff} + }, + { + .description = "l2_cntxt_tcam_index", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_cntxt_id", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_L2_CNTXT_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_L2_CNTXT_ID_0 & 0xff} + }, + /* class_tid: 1, , table: jump_index_table.alloc */ + /* class_tid: 1, , table: flow_chain_cache.write */ + { + .description = "rid", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_RID >> 8) & 0xff, + BNXT_ULP_RF_IDX_RID & 0xff} + }, + { + .description = "metadata", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_JUMP_META >> 8) & 0xff, + BNXT_ULP_RF_IDX_JUMP_META & 0xff} + }, + /* class_tid: 1, , table: l2_cntxt_tcam.chain_entry */ + { + .description = "prof_func_id", + .field_bit_size = 7, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_PROF_FUNC_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_PROF_FUNC_ID_0 & 0xff} + }, + { + .description = "ctxt_meta_prof", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "def_ctxt_data", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_DEFAULT_AREC_PTR >> 8) & 0xff, + BNXT_ULP_RF_IDX_DEFAULT_AREC_PTR & 0xff} + }, + { + .description = "ctxt_opcode", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + ULP_THOR_SYM_CTXT_OPCODE_NORMAL_FLOW} + }, + { + .description = "l2_cntxt_id", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_L2_CNTXT_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_L2_CNTXT_ID_0 & 0xff} + }, + { + .description = "parif", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_PHY_PORT_PARIF >> 8) & 0xff, + BNXT_ULP_CF_IDX_PHY_PORT_PARIF & 0xff} + }, + /* class_tid: 1, , table: flow_chain_l2_cntxt.write */ + { + .description = "rid", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_RID >> 8) & 0xff, + BNXT_ULP_RF_IDX_RID & 0xff} + }, + { + .description = "l2_cntxt_id", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_L2_CNTXT_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_L2_CNTXT_ID_0 & 0xff} + }, + /* class_tid: 1, , table: l2_cntxt_tcam.ingress_entry */ + { + .description = "prof_func_id", + .field_bit_size = 7, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_PROF_FUNC_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_PROF_FUNC_ID_0 & 0xff} + }, + { + .description = "ctxt_meta_prof", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "def_ctxt_data", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_DEFAULT_AREC_PTR >> 8) & 0xff, + BNXT_ULP_RF_IDX_DEFAULT_AREC_PTR & 0xff} + }, + { + .description = "ctxt_opcode", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + ULP_THOR_SYM_CTXT_OPCODE_NORMAL_FLOW} + }, + { + .description = "l2_cntxt_id", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_L2_CNTXT_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_L2_CNTXT_ID_0 & 0xff} + }, + { + .description = "parif", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_PHY_PORT_PARIF >> 8) & 0xff, + BNXT_ULP_CF_IDX_PHY_PORT_PARIF & 0xff} + }, + /* class_tid: 1, , table: mac_addr_cache.wr */ + { + .description = "rid", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_RID >> 8) & 0xff, + BNXT_ULP_RF_IDX_RID & 0xff} + }, + { + .description = "l2_cntxt_tcam_index", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_cntxt_id", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_L2_CNTXT_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_L2_CNTXT_ID_0 & 0xff} + }, + { + .description = "src_property_ptr", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "prof_func_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + /* class_tid: 1, , table: fkb_select.wc_gen_template */ + { + .description = "l2_cntxt_id.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (53 >> 8) & 0xff, + 53 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "parif.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "spif.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "svif.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (55 >> 8) & 0xff, + 55 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "lcos.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "meta.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (56 >> 8) & 0xff, + 56 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "rcyc_cnt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (57 >> 8) & 0xff, + 57 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "loopback.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_l2type.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_dmac.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (58 >> 8) & 0xff, + 58 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_smac.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (61 >> 8) & 0xff, + 61 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_dt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_sa.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_nvt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_ovp.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_ovd.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_ovv.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (64 >> 8) & 0xff, + 64 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_ovt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_ivp.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_ivd.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_ivv.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (68 >> 8) & 0xff, + 68 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (7 >> 8) & 0xff, + 7 & 0xff} + }, + { + .description = "tl2_ivt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_etype.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (76 >> 8) & 0xff, + 76 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_l3type.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_sip.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (80 >> 8) & 0xff, + 80 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ZERO, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (8 >> 8) & 0xff, + 8 & 0xff} + }, + { + .description = "tl3_sip_selcmp.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_dip.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (86 >> 8) & 0xff, + 86 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (9 >> 8) & 0xff, + 9 & 0xff} + }, + { + .description = "tl3_dip_selcmp.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_ttl.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (92 >> 8) & 0xff, + 92 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (10 >> 8) & 0xff, + 10 & 0xff} + }, + { + .description = "tl3_prot.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (98 >> 8) & 0xff, + 98 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (11 >> 8) & 0xff, + 11 & 0xff} + }, + { + .description = "tl3_fid.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_qos.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (104 >> 8) & 0xff, + 104 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (12 >> 8) & 0xff, + 12 & 0xff} + }, + { + .description = "tl3_ieh_nonext.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_ieh_esp.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_ieh_auth.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_ieh_dest.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_ieh_frag.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_ieh_rthdr.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_ieh_hop.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_ieh_1frag.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_df.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_l3err.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_l4type.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_src.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (110 >> 8) & 0xff, + 110 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (13 >> 8) & 0xff, + 13 & 0xff} + }, + { + .description = "tl4_dst.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (116 >> 8) & 0xff, + 116 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (14 >> 8) & 0xff, + 14 & 0xff} + }, + { + .description = "tl4_flags.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_seq.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_pa.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_opt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_tcpts.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_err.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tuntype.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tflags.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tids.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (122 >> 8) & 0xff, + 122 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (15 >> 8) & 0xff, + 15 & 0xff} + }, + { + .description = "tid.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tctxts.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tctxt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tqos.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "terr.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_l2type.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_dmac.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (126 >> 8) & 0xff, + 126 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (16 >> 8) & 0xff, + 16 & 0xff} + }, + { + .description = "l2_smac.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (132 >> 8) & 0xff, + 132 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (17 >> 8) & 0xff, + 17 & 0xff} + }, + { + .description = "l2_dt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_sa.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_nvt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_ovp.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_ovd.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_ovv.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (138 >> 8) & 0xff, + 138 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (18 >> 8) & 0xff, + 18 & 0xff} + }, + { + .description = "l2_ovt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_ivp.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_ivd.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_ivv.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (146 >> 8) & 0xff, + 146 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (19 >> 8) & 0xff, + 19 & 0xff} + }, + { + .description = "l2_ivt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_etype.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (162 >> 8) & 0xff, + 162 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (22 >> 8) & 0xff, + 22 & 0xff} + }, + { + .description = "l3_l3type.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_sip.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (170 >> 8) & 0xff, + 170 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (23 >> 8) & 0xff, + 23 & 0xff} + }, + { + .description = "l3_sip_selcmp.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_dip.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (186 >> 8) & 0xff, + 186 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (26 >> 8) & 0xff, + 26 & 0xff} + }, + { + .description = "l3_dip_selcmp.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_ttl.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (202 >> 8) & 0xff, + 202 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (29 >> 8) & 0xff, + 29 & 0xff} + }, + { + .description = "l3_prot.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (218 >> 8) & 0xff, + 218 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (32 >> 8) & 0xff, + 32 & 0xff} + }, + { + .description = "l3_fid.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_qos.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (234 >> 8) & 0xff, + 234 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (35 >> 8) & 0xff, + 35 & 0xff} + }, + { + .description = "l3_ieh_nonext.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_ieh_esp.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_ieh_auth.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_ieh_dest.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_ieh_frag.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_ieh_rthdr.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_ieh_hop.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_ieh_1frag.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_df.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_l3err.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_l4type.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_src.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (250 >> 8) & 0xff, + 250 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr2 = { + (38 >> 8) & 0xff, + 38 & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (39 >> 8) & 0xff, + 39 & 0xff} + }, + { + .description = "l4_dst.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (257 >> 8) & 0xff, + 257 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr2 = { + (40 >> 8) & 0xff, + 40 & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (41 >> 8) & 0xff, + 41 & 0xff} + }, + { + .description = "l4_flags.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (264 >> 8) & 0xff, + 264 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr2 = { + (42 >> 8) & 0xff, + 42 & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (43 >> 8) & 0xff, + 43 & 0xff} + }, + { + .description = "l4_seq.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_ack.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_win.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_pa.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_opt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_tcpts.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_tsval.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_txecr.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_err.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + /* class_tid: 1, , table: hdr_overlap_cache.overlap_wr */ + { + .description = "rid", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_RID >> 8) & 0xff, + BNXT_ULP_RF_IDX_RID & 0xff} + }, + { + .description = "wc_profile_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_WC_PROFILE_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_WC_PROFILE_ID_0 & 0xff} + }, + { + .description = "wc_key_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_WC_KEY_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_WC_KEY_ID_0 & 0xff} + }, + /* class_tid: 1, , table: fkb_select.em_gen_template_alloc */ + { + .description = "l2_cntxt_id.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "parif.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "spif.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "svif.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "lcos.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "meta.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "rcyc_cnt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "loopback.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_l2type.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_dmac.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_smac.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_dt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_sa.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_nvt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_ovp.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_ovd.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_ovv.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_ovt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_ivp.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_ivd.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_ivv.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_ivt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_etype.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_l3type.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_sip.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_sip_selcmp.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_dip.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_dip_selcmp.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_ttl.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_prot.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_fid.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_qos.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_ieh_nonext.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_ieh_esp.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_ieh_auth.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_ieh_dest.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_ieh_frag.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_ieh_rthdr.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_ieh_hop.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_ieh_1frag.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_df.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_l3err.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_l4type.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_src.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_dst.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_flags.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_seq.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_pa.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_opt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_tcpts.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_err.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tuntype.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tflags.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tids.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tid.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tctxts.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tctxt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tqos.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "terr.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_l2type.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_dmac.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_smac.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_dt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_sa.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_nvt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_ovp.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_ovd.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_ovv.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_ovt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_ivp.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_ivd.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_ivv.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_ivt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_etype.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_l3type.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_sip.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_sip_selcmp.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_dip.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_dip_selcmp.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_ttl.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_prot.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_fid.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_qos.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_ieh_nonext.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_ieh_esp.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_ieh_auth.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_ieh_dest.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_ieh_frag.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_ieh_rthdr.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_ieh_hop.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_ieh_1frag.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_df.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_l3err.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_l4type.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_src.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_dst.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_flags.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_seq.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_ack.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_win.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_pa.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_opt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_tcpts.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_tsval.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_txecr.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_err.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + /* class_tid: 1, , table: profile_tcam.gen_template */ + { + .description = "wc_key_id", + .field_bit_size = 6, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_WC_KEY_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_WC_KEY_ID_0 & 0xff} + }, + { + .description = "wc_profile_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_WC_PROFILE_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_WC_PROFILE_ID_0 & 0xff} + }, + { + .description = "wc_search_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + { + .description = "em_key_type", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "em_key_id", + .field_bit_size = 6, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_EM_KEY_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_EM_KEY_ID_0 & 0xff} + }, + { + .description = "em_profile_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_EM_PROFILE_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_EM_PROFILE_ID_0 & 0xff} + }, + { + .description = "em_search_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_TERM_FLOW >> 8) & 0xff, + BNXT_ULP_RF_IDX_TERM_FLOW & 0xff} + }, + { + .description = "pl_byp_lkup_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + /* class_tid: 1, , table: proto_header_cache.wr */ + { + .description = "rid", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_RID >> 8) & 0xff, + BNXT_ULP_RF_IDX_RID & 0xff} + }, + { + .description = "profile_tcam_index", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_PROFILE_TCAM_INDEX_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_PROFILE_TCAM_INDEX_0 & 0xff} + }, + { + .description = "em_profile_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_EM_PROFILE_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_EM_PROFILE_ID_0 & 0xff} + }, + { + .description = "em_key_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_EM_KEY_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_EM_KEY_ID_0 & 0xff} + }, + { + .description = "wc_profile_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_WC_PROFILE_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_WC_PROFILE_ID_0 & 0xff} + }, + { + .description = "wc_key_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_WC_KEY_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_WC_KEY_ID_0 & 0xff} + }, + { + .description = "em_recipe_id", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_EM_RECIPE_ID >> 8) & 0xff, + BNXT_ULP_RF_IDX_EM_RECIPE_ID & 0xff} + }, + { + .description = "wc_recipe_id", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_WC_RECIPE_ID >> 8) & 0xff, + BNXT_ULP_RF_IDX_WC_RECIPE_ID & 0xff} + }, + /* class_tid: 1, , table: fkb_select.em_gen_template */ + { + .description = "l2_cntxt_id.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (969 >> 8) & 0xff, + 969 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "parif.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "spif.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "svif.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "lcos.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "meta.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (970 >> 8) & 0xff, + 970 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "rcyc_cnt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (971 >> 8) & 0xff, + 971 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "loopback.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_l2type.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_dmac.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (972 >> 8) & 0xff, + 972 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_smac.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (976 >> 8) & 0xff, + 976 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_dt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_sa.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_nvt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_ovp.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_ovd.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_ovv.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (979 >> 8) & 0xff, + 979 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_ovt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_ivp.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_ivd.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_ivv.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (983 >> 8) & 0xff, + 983 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (213 >> 8) & 0xff, + 213 & 0xff} + }, + { + .description = "tl2_ivt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_etype.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (991 >> 8) & 0xff, + 991 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_l3type.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_sip.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (995 >> 8) & 0xff, + 995 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (214 >> 8) & 0xff, + 214 & 0xff} + }, + { + .description = "tl3_sip_selcmp.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_dip.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1001 >> 8) & 0xff, + 1001 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (215 >> 8) & 0xff, + 215 & 0xff} + }, + { + .description = "tl3_dip_selcmp.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_ttl.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1007 >> 8) & 0xff, + 1007 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (216 >> 8) & 0xff, + 216 & 0xff} + }, + { + .description = "tl3_prot.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1013 >> 8) & 0xff, + 1013 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (217 >> 8) & 0xff, + 217 & 0xff} + }, + { + .description = "tl3_fid.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_qos.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1019 >> 8) & 0xff, + 1019 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (218 >> 8) & 0xff, + 218 & 0xff} + }, + { + .description = "tl3_ieh_nonext.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_ieh_esp.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_ieh_auth.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_ieh_dest.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_ieh_frag.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_ieh_rthdr.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_ieh_hop.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_ieh_1frag.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_df.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_l3err.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_l4type.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_src.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1025 >> 8) & 0xff, + 1025 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (219 >> 8) & 0xff, + 219 & 0xff} + }, + { + .description = "tl4_dst.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1031 >> 8) & 0xff, + 1031 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (220 >> 8) & 0xff, + 220 & 0xff} + }, + { + .description = "tl4_flags.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_seq.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_pa.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_opt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_tcpts.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_err.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tuntype.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tflags.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tids.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1037 >> 8) & 0xff, + 1037 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (221 >> 8) & 0xff, + 221 & 0xff} + }, + { + .description = "tid.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tctxts.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tctxt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tqos.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "terr.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_l2type.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_dmac.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1041 >> 8) & 0xff, + 1041 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (222 >> 8) & 0xff, + 222 & 0xff} + }, + { + .description = "l2_smac.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1049 >> 8) & 0xff, + 1049 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (223 >> 8) & 0xff, + 223 & 0xff} + }, + { + .description = "l2_dt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_sa.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_nvt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_ovp.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_ovd.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_ovv.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1055 >> 8) & 0xff, + 1055 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (224 >> 8) & 0xff, + 224 & 0xff} + }, + { + .description = "l2_ovt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_ivp.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_ivd.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_ivv.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1063 >> 8) & 0xff, + 1063 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (225 >> 8) & 0xff, + 225 & 0xff} + }, + { + .description = "l2_ivt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_etype.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1079 >> 8) & 0xff, + 1079 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (228 >> 8) & 0xff, + 228 & 0xff} + }, + { + .description = "l3_l3type.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_sip.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1087 >> 8) & 0xff, + 1087 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (229 >> 8) & 0xff, + 229 & 0xff} + }, + { + .description = "l3_sip_selcmp.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_dip.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1099 >> 8) & 0xff, + 1099 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (232 >> 8) & 0xff, + 232 & 0xff} + }, + { + .description = "l3_dip_selcmp.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_ttl.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1111 >> 8) & 0xff, + 1111 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (235 >> 8) & 0xff, + 235 & 0xff} + }, + { + .description = "l3_prot.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1123 >> 8) & 0xff, + 1123 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (238 >> 8) & 0xff, + 238 & 0xff} + }, + { + .description = "l3_fid.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_qos.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1135 >> 8) & 0xff, + 1135 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (241 >> 8) & 0xff, + 241 & 0xff} + }, + { + .description = "l3_ieh_nonext.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_ieh_esp.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_ieh_auth.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_ieh_dest.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_ieh_frag.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_ieh_rthdr.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_ieh_hop.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_ieh_1frag.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_df.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_l3err.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_l4type.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_src.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1147 >> 8) & 0xff, + 1147 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (244 >> 8) & 0xff, + 244 & 0xff} + }, + { + .description = "l4_dst.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1159 >> 8) & 0xff, + 1159 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (247 >> 8) & 0xff, + 247 & 0xff} + }, + { + .description = "l4_flags.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1171 >> 8) & 0xff, + 1171 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr2 = { + (250 >> 8) & 0xff, + 250 & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (251 >> 8) & 0xff, + 251 & 0xff} + }, + { + .description = "l4_seq.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_ack.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_win.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_pa.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_opt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_tcpts.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_tsval.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_txecr.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_err.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + /* class_tid: 1, , table: em_flow_conflict_cache.wr */ + { + .description = "rid", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_RID >> 8) & 0xff, + BNXT_ULP_RF_IDX_RID & 0xff} + }, + { + .description = "flow_sig_id", + .field_bit_size = 64, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_FLOW_SIG_ID >> 8) & 0xff, + BNXT_ULP_CF_IDX_FLOW_SIG_ID & 0xff} + }, + /* class_tid: 1, , table: em.ingress_generic_template */ + { + .description = "valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + { + .description = "strength", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 3} + }, + { + .description = "data", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_MAIN_ACTION_PTR >> 8) & 0xff, + BNXT_ULP_RF_IDX_MAIN_ACTION_PTR & 0xff} + }, + { + .description = "opcode", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "meta_prof", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "ctxt_data", + .field_bit_size = 14, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + /* class_tid: 1, , table: wm.ingress_generic_template */ + { + .description = "ctxt_data", + .field_bit_size = 14, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "meta_prof", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "opcode", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "data", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_MAIN_ACTION_PTR >> 8) & 0xff, + BNXT_ULP_RF_IDX_MAIN_ACTION_PTR & 0xff} + }, + { + .description = "strength", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + /* class_tid: 2, , table: jump_index_table.alloc */ + /* class_tid: 2, , table: flow_chain_cache.write */ + { + .description = "rid", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_RID >> 8) & 0xff, + BNXT_ULP_RF_IDX_RID & 0xff} + }, + { + .description = "metadata", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_JUMP_META >> 8) & 0xff, + BNXT_ULP_RF_IDX_JUMP_META & 0xff} + }, + /* class_tid: 2, , table: fkb_select.wc_gen_template */ + { + .description = "l2_cntxt_id.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1621 >> 8) & 0xff, + 1621 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "parif.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "spif.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "svif.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1623 >> 8) & 0xff, + 1623 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "lcos.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "meta.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1624 >> 8) & 0xff, + 1624 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "rcyc_cnt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1625 >> 8) & 0xff, + 1625 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "loopback.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_l2type.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_dmac.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1626 >> 8) & 0xff, + 1626 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_smac.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1629 >> 8) & 0xff, + 1629 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr2 = { + (322 >> 8) & 0xff, + 322 & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_dt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_sa.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_nvt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_ovp.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_ovd.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_ovv.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1634 >> 8) & 0xff, + 1634 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_ovt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_ivp.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_ivd.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_ivv.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1638 >> 8) & 0xff, + 1638 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (323 >> 8) & 0xff, + 323 & 0xff} + }, + { + .description = "tl2_ivt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_etype.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1646 >> 8) & 0xff, + 1646 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_l3type.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_sip.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1650 >> 8) & 0xff, + 1650 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (324 >> 8) & 0xff, + 324 & 0xff} + }, + { + .description = "tl3_sip_selcmp.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_dip.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1656 >> 8) & 0xff, + 1656 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (325 >> 8) & 0xff, + 325 & 0xff} + }, + { + .description = "tl3_dip_selcmp.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_ttl.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1662 >> 8) & 0xff, + 1662 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (326 >> 8) & 0xff, + 326 & 0xff} + }, + { + .description = "tl3_prot.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1668 >> 8) & 0xff, + 1668 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (327 >> 8) & 0xff, + 327 & 0xff} + }, + { + .description = "tl3_fid.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_qos.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1674 >> 8) & 0xff, + 1674 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (328 >> 8) & 0xff, + 328 & 0xff} + }, + { + .description = "tl3_ieh_nonext.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_ieh_esp.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_ieh_auth.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_ieh_dest.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_ieh_frag.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_ieh_rthdr.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_ieh_hop.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_ieh_1frag.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_df.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_l3err.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_l4type.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_src.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1680 >> 8) & 0xff, + 1680 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (329 >> 8) & 0xff, + 329 & 0xff} + }, + { + .description = "tl4_dst.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1686 >> 8) & 0xff, + 1686 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (330 >> 8) & 0xff, + 330 & 0xff} + }, + { + .description = "tl4_flags.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_seq.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_pa.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_opt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_tcpts.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_err.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tuntype.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tflags.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tids.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1692 >> 8) & 0xff, + 1692 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (331 >> 8) & 0xff, + 331 & 0xff} + }, + { + .description = "tid.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tctxts.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tctxt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tqos.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "terr.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_l2type.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_dmac.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1696 >> 8) & 0xff, + 1696 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (332 >> 8) & 0xff, + 332 & 0xff} + }, + { + .description = "l2_smac.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1702 >> 8) & 0xff, + 1702 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (333 >> 8) & 0xff, + 333 & 0xff} + }, + { + .description = "l2_dt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_sa.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_nvt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_ovp.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_ovd.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_ovv.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1708 >> 8) & 0xff, + 1708 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (334 >> 8) & 0xff, + 334 & 0xff} + }, + { + .description = "l2_ovt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_ivp.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_ivd.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_ivv.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1716 >> 8) & 0xff, + 1716 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (335 >> 8) & 0xff, + 335 & 0xff} + }, + { + .description = "l2_ivt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_etype.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1732 >> 8) & 0xff, + 1732 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (338 >> 8) & 0xff, + 338 & 0xff} + }, + { + .description = "l3_l3type.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_sip.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1740 >> 8) & 0xff, + 1740 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (339 >> 8) & 0xff, + 339 & 0xff} + }, + { + .description = "l3_sip_selcmp.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_dip.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1756 >> 8) & 0xff, + 1756 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (342 >> 8) & 0xff, + 342 & 0xff} + }, + { + .description = "l3_dip_selcmp.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_ttl.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1772 >> 8) & 0xff, + 1772 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (345 >> 8) & 0xff, + 345 & 0xff} + }, + { + .description = "l3_prot.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1788 >> 8) & 0xff, + 1788 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (348 >> 8) & 0xff, + 348 & 0xff} + }, + { + .description = "l3_fid.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_qos.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1804 >> 8) & 0xff, + 1804 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (351 >> 8) & 0xff, + 351 & 0xff} + }, + { + .description = "l3_ieh_nonext.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_ieh_esp.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_ieh_auth.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_ieh_dest.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_ieh_frag.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_ieh_rthdr.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_ieh_hop.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_ieh_1frag.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_df.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_l3err.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_l4type.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_src.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1820 >> 8) & 0xff, + 1820 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr2 = { + (354 >> 8) & 0xff, + 354 & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (355 >> 8) & 0xff, + 355 & 0xff} + }, + { + .description = "l4_dst.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1827 >> 8) & 0xff, + 1827 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr2 = { + (356 >> 8) & 0xff, + 356 & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (357 >> 8) & 0xff, + 357 & 0xff} + }, + { + .description = "l4_flags.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (1834 >> 8) & 0xff, + 1834 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr2 = { + (358 >> 8) & 0xff, + 358 & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (359 >> 8) & 0xff, + 359 & 0xff} + }, + { + .description = "l4_seq.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_ack.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_win.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_pa.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_opt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_tcpts.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_tsval.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_txecr.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_err.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + /* class_tid: 2, , table: hdr_overlap_cache.overlap_wr */ + { + .description = "rid", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_RID >> 8) & 0xff, + BNXT_ULP_RF_IDX_RID & 0xff} + }, + { + .description = "wc_profile_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_WC_PROFILE_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_WC_PROFILE_ID_0 & 0xff} + }, + { + .description = "wc_key_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_WC_KEY_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_WC_KEY_ID_0 & 0xff} + }, + /* class_tid: 2, , table: fkb_select.em_gen_template_alloc */ + { + .description = "l2_cntxt_id.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "parif.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "spif.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "svif.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "lcos.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "meta.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "rcyc_cnt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "loopback.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_l2type.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_dmac.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_smac.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_dt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_sa.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_nvt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_ovp.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_ovd.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_ovv.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_ovt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_ivp.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_ivd.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_ivv.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_ivt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_etype.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_l3type.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_sip.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_sip_selcmp.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_dip.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_dip_selcmp.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_ttl.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_prot.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_fid.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_qos.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_ieh_nonext.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_ieh_esp.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_ieh_auth.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_ieh_dest.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_ieh_frag.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_ieh_rthdr.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_ieh_hop.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_ieh_1frag.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_df.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_l3err.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_l4type.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_src.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_dst.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_flags.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_seq.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_pa.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_opt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_tcpts.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_err.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tuntype.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tflags.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tids.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tid.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tctxts.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tctxt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tqos.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "terr.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_l2type.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_dmac.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_smac.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_dt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_sa.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_nvt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_ovp.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_ovd.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_ovv.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_ovt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_ivp.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_ivd.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_ivv.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_ivt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_etype.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_l3type.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_sip.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_sip_selcmp.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_dip.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_dip_selcmp.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_ttl.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_prot.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_fid.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_qos.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_ieh_nonext.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_ieh_esp.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_ieh_auth.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_ieh_dest.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_ieh_frag.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_ieh_rthdr.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_ieh_hop.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_ieh_1frag.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_df.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_l3err.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_l4type.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_src.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_dst.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_flags.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_seq.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_ack.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_win.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_pa.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_opt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_tcpts.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_tsval.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_txecr.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_err.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + /* class_tid: 2, , table: profile_tcam.gen_template */ + { + .description = "wc_key_id", + .field_bit_size = 6, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_WC_KEY_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_WC_KEY_ID_0 & 0xff} + }, + { + .description = "wc_profile_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_WC_PROFILE_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_WC_PROFILE_ID_0 & 0xff} + }, + { + .description = "wc_search_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + { + .description = "em_key_type", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "em_key_id", + .field_bit_size = 6, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_EM_KEY_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_EM_KEY_ID_0 & 0xff} + }, + { + .description = "em_profile_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_EM_PROFILE_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_EM_PROFILE_ID_0 & 0xff} + }, + { + .description = "em_search_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_TERM_FLOW >> 8) & 0xff, + BNXT_ULP_RF_IDX_TERM_FLOW & 0xff} + }, + { + .description = "pl_byp_lkup_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + /* class_tid: 2, , table: proto_header_cache.wr */ + { + .description = "rid", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_RID >> 8) & 0xff, + BNXT_ULP_RF_IDX_RID & 0xff} + }, + { + .description = "profile_tcam_index", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_PROFILE_TCAM_INDEX_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_PROFILE_TCAM_INDEX_0 & 0xff} + }, + { + .description = "em_profile_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_EM_PROFILE_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_EM_PROFILE_ID_0 & 0xff} + }, + { + .description = "em_key_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_EM_KEY_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_EM_KEY_ID_0 & 0xff} + }, + { + .description = "wc_profile_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_WC_PROFILE_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_WC_PROFILE_ID_0 & 0xff} + }, + { + .description = "wc_key_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_WC_KEY_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_WC_KEY_ID_0 & 0xff} + }, + { + .description = "em_recipe_id", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_EM_RECIPE_ID >> 8) & 0xff, + BNXT_ULP_RF_IDX_EM_RECIPE_ID & 0xff} + }, + { + .description = "wc_recipe_id", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_WC_RECIPE_ID >> 8) & 0xff, + BNXT_ULP_RF_IDX_WC_RECIPE_ID & 0xff} + }, + /* class_tid: 2, , table: fkb_select.em_gen_template */ + { + .description = "l2_cntxt_id.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2541 >> 8) & 0xff, + 2541 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "parif.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "spif.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "svif.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "lcos.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "meta.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2542 >> 8) & 0xff, + 2542 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "rcyc_cnt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2543 >> 8) & 0xff, + 2543 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "loopback.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_l2type.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_dmac.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2544 >> 8) & 0xff, + 2544 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_smac.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2547 >> 8) & 0xff, + 2547 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_dt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_sa.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_nvt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_ovp.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_ovd.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_ovv.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2550 >> 8) & 0xff, + 2550 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_ovt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_ivp.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_ivd.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_ivv.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2554 >> 8) & 0xff, + 2554 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (530 >> 8) & 0xff, + 530 & 0xff} + }, + { + .description = "tl2_ivt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_etype.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2562 >> 8) & 0xff, + 2562 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_l3type.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_sip.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2566 >> 8) & 0xff, + 2566 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (531 >> 8) & 0xff, + 531 & 0xff} + }, + { + .description = "tl3_sip_selcmp.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_dip.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2572 >> 8) & 0xff, + 2572 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (532 >> 8) & 0xff, + 532 & 0xff} + }, + { + .description = "tl3_dip_selcmp.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_ttl.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2578 >> 8) & 0xff, + 2578 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (533 >> 8) & 0xff, + 533 & 0xff} + }, + { + .description = "tl3_prot.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2584 >> 8) & 0xff, + 2584 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (534 >> 8) & 0xff, + 534 & 0xff} + }, + { + .description = "tl3_fid.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_qos.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2590 >> 8) & 0xff, + 2590 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (535 >> 8) & 0xff, + 535 & 0xff} + }, + { + .description = "tl3_ieh_nonext.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_ieh_esp.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_ieh_auth.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_ieh_dest.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_ieh_frag.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_ieh_rthdr.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_ieh_hop.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_ieh_1frag.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_df.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_l3err.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_l4type.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_src.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2596 >> 8) & 0xff, + 2596 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (536 >> 8) & 0xff, + 536 & 0xff} + }, + { + .description = "tl4_dst.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2602 >> 8) & 0xff, + 2602 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (537 >> 8) & 0xff, + 537 & 0xff} + }, + { + .description = "tl4_flags.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_seq.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_pa.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_opt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_tcpts.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_err.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tuntype.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tflags.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tids.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2608 >> 8) & 0xff, + 2608 & 0xff, + (2 >> 8) & 0xff, + 2 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (538 >> 8) & 0xff, + 538 & 0xff} + }, + { + .description = "tid.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tctxts.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tctxt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tqos.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "terr.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_l2type.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_dmac.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2612 >> 8) & 0xff, + 2612 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (539 >> 8) & 0xff, + 539 & 0xff} + }, + { + .description = "l2_smac.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2618 >> 8) & 0xff, + 2618 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (540 >> 8) & 0xff, + 540 & 0xff} + }, + { + .description = "l2_dt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_sa.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_nvt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_ovp.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_ovd.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_ovv.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2624 >> 8) & 0xff, + 2624 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (541 >> 8) & 0xff, + 541 & 0xff} + }, + { + .description = "l2_ovt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_ivp.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_ivd.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_ivv.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2632 >> 8) & 0xff, + 2632 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (542 >> 8) & 0xff, + 542 & 0xff} + }, + { + .description = "l2_ivt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_etype.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2648 >> 8) & 0xff, + 2648 & 0xff, + (4 >> 8) & 0xff, + 4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (545 >> 8) & 0xff, + 545 & 0xff} + }, + { + .description = "l3_l3type.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_sip.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2656 >> 8) & 0xff, + 2656 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (546 >> 8) & 0xff, + 546 & 0xff} + }, + { + .description = "l3_sip_selcmp.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_dip.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2668 >> 8) & 0xff, + 2668 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (549 >> 8) & 0xff, + 549 & 0xff} + }, + { + .description = "l3_dip_selcmp.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_ttl.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2680 >> 8) & 0xff, + 2680 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (552 >> 8) & 0xff, + 552 & 0xff} + }, + { + .description = "l3_prot.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2692 >> 8) & 0xff, + 2692 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (555 >> 8) & 0xff, + 555 & 0xff} + }, + { + .description = "l3_fid.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_qos.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2704 >> 8) & 0xff, + 2704 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (558 >> 8) & 0xff, + 558 & 0xff} + }, + { + .description = "l3_ieh_nonext.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_ieh_esp.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_ieh_auth.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_ieh_dest.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_ieh_frag.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_ieh_rthdr.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_ieh_hop.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_ieh_1frag.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_df.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_l3err.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_l4type.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_src.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2716 >> 8) & 0xff, + 2716 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (561 >> 8) & 0xff, + 561 & 0xff} + }, + { + .description = "l4_dst.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2728 >> 8) & 0xff, + 2728 & 0xff, + (3 >> 8) & 0xff, + 3 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (564 >> 8) & 0xff, + 564 & 0xff} + }, + { + .description = "l4_flags.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_TERNARY_LIST, + .field_src1 = BNXT_ULP_FIELD_SRC_LIST_AND, + .field_opr1 = { + (2740 >> 8) & 0xff, + 2740 & 0xff, + (1 >> 8) & 0xff, + 1 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr2 = { + (567 >> 8) & 0xff, + 567 & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_NEXT, + .field_opr3 = { + (568 >> 8) & 0xff, + 568 & 0xff} + }, + { + .description = "l4_seq.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_ack.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_win.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_pa.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_opt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_tcpts.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_tsval.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_txecr.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_err.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + /* class_tid: 2, , table: em_flow_conflict_cache.wr */ + { + .description = "rid", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_RID >> 8) & 0xff, + BNXT_ULP_RF_IDX_RID & 0xff} + }, + { + .description = "flow_sig_id", + .field_bit_size = 64, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_FLOW_SIG_ID >> 8) & 0xff, + BNXT_ULP_CF_IDX_FLOW_SIG_ID & 0xff} + }, + /* class_tid: 2, , table: em.egress_generic_template */ + { + .description = "valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + { + .description = "strength", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 3} + }, + { + .description = "data", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_MAIN_ACTION_PTR >> 8) & 0xff, + BNXT_ULP_RF_IDX_MAIN_ACTION_PTR & 0xff} + }, + { + .description = "opcode", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "meta_prof", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "ctxt_data", + .field_bit_size = 14, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + /* class_tid: 2, , table: wm.egress_generic_template */ + { + .description = "ctxt_data", + .field_bit_size = 14, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "meta_prof", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "opcode", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "data", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_MAIN_ACTION_PTR >> 8) & 0xff, + BNXT_ULP_RF_IDX_MAIN_ACTION_PTR & 0xff} + }, + { + .description = "strength", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + /* class_tid: 3, , table: int_full_act_record.0 */ + { + .description = "sp_rec_ptr", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "encap_ptr", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "mod_rec_ptr", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "rsvd1", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "rsvd0", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "decap_func", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "meter", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "stats_op", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + { + .description = "stats_ptr", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "vnic_or_vport", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_DRV_FUNC_VNIC >> 8) & 0xff, + BNXT_ULP_CF_IDX_DRV_FUNC_VNIC & 0xff} + }, + { + .description = "use_default", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "mirror", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "cond_copy", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "vlan_del_rpt", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "drop", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "hit", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "type", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + /* class_tid: 3, , table: port_table.ing_wr_0 */ + { + .description = "rid", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "drv_func.mac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "drv_func.parent.mac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "phy_port", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "port_is_pf", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "default_arec_ptr", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_DEFAULT_AREC_PTR >> 8) & 0xff, + BNXT_ULP_RF_IDX_DEFAULT_AREC_PTR & 0xff} + }, + { + .description = "l2_cntxt_id", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "sp_rec_ptr", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "default_arec_ptr.roce", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + /* class_tid: 3, , table: l2_cntxt_tcam.ing_0 */ + { + .description = "prof_func_id", + .field_bit_size = 7, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_PROF_FUNC_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_PROF_FUNC_ID_0 & 0xff} + }, + { + .description = "ctxt_meta_prof", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "def_ctxt_data", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_DEFAULT_AREC_PTR >> 8) & 0xff, + BNXT_ULP_RF_IDX_DEFAULT_AREC_PTR & 0xff} + }, + { + .description = "ctxt_opcode", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + ULP_THOR_SYM_CTXT_OPCODE_NORMAL_FLOW} + }, + { + .description = "l2_cntxt_id", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_L2_CNTXT_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_L2_CNTXT_ID_0 & 0xff} + }, + { + .description = "parif", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_PHY_PORT_PARIF >> 8) & 0xff, + BNXT_ULP_CF_IDX_PHY_PORT_PARIF & 0xff} + }, + /* class_tid: 3, , table: l2_cntxt_tcam_cache.ing_wr */ + { + .description = "rid", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_RID >> 8) & 0xff, + BNXT_ULP_RF_IDX_RID & 0xff} + }, + { + .description = "l2_cntxt_tcam_index", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_L2_CNTXT_TCAM_INDEX_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_L2_CNTXT_TCAM_INDEX_0 & 0xff} + }, + { + .description = "l2_cntxt_id", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_L2_CNTXT_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_L2_CNTXT_ID_0 & 0xff} + }, + { + .description = "src_property_ptr", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "prof_func_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_PROF_FUNC_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_PROF_FUNC_ID_0 & 0xff} + }, + /* class_tid: 3, , table: profile_tcam.prof_func_catch_all */ + { + .description = "wc_key_id", + .field_bit_size = 6, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "wc_profile_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "wc_search_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "em_key_type", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "em_key_id", + .field_bit_size = 6, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "em_profile_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "em_search_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "pl_byp_lkup_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + /* class_tid: 3, , table: parif_def_arec_ptr.ing_0 */ + { + .description = "act_rec_ptr", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_DEFAULT_AREC_PTR >> 8) & 0xff, + BNXT_ULP_RF_IDX_DEFAULT_AREC_PTR & 0xff} + }, + /* class_tid: 3, , table: parif_def_err_arec_ptr.ing_0 */ + { + .description = "act_rec_ptr", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_DEFAULT_AREC_PTR >> 8) & 0xff, + BNXT_ULP_RF_IDX_DEFAULT_AREC_PTR & 0xff} + }, + /* class_tid: 3, , table: int_full_act_record.egr_0 */ + { + .description = "sp_rec_ptr", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "encap_ptr", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "mod_rec_ptr", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "rsvd1", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "rsvd0", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "decap_func", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "meter", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "stats_op", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + { + .description = "stats_ptr", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "vnic_or_vport", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_PHY_PORT_VPORT >> 8) & 0xff, + BNXT_ULP_CF_IDX_PHY_PORT_VPORT & 0xff} + }, + { + .description = "use_default", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "mirror", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "cond_copy", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "vlan_del_rpt", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "drop", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "hit", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "type", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + /* class_tid: 3, , table: port_table.egr_wr_0 */ + { + .description = "rid", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "drv_func.mac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "drv_func.parent.mac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "phy_port", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "port_is_pf", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "default_arec_ptr", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_MAIN_ACTION_PTR >> 8) & 0xff, + BNXT_ULP_RF_IDX_MAIN_ACTION_PTR & 0xff} + }, + { + .description = "l2_cntxt_id", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "sp_rec_ptr", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "default_arec_ptr.roce", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + /* class_tid: 3, , table: ilt_tbl.egr_vfr */ + { + .description = "ilt_destination", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "act_rec_ptr", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "fwd_op", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + ULP_THOR_SYM_FWD_OP_BYPASS_LKUP} + }, + { + .description = "en_ilt_dest", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "en_bd_action", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + { + .description = "en_bd_meta", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "parif", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_DRV_FUNC_PARIF >> 8) & 0xff, + BNXT_ULP_CF_IDX_DRV_FUNC_PARIF & 0xff} + }, + { + .description = "reserved", + .field_bit_size = 23, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + /* class_tid: 3, , table: l2_cntxt_tcam_cache.egr_wr_vfr */ + { + .description = "rid", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_RID >> 8) & 0xff, + BNXT_ULP_RF_IDX_RID & 0xff} + }, + { + .description = "l2_cntxt_tcam_index", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_cntxt_id", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "src_property_ptr", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "prof_func_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_PROF_FUNC_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_PROF_FUNC_ID_0 & 0xff} + }, + /* class_tid: 3, , table: l2_cntxt_tcam.egr_0 */ + { + .description = "prof_func_id", + .field_bit_size = 7, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_PROF_FUNC_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_PROF_FUNC_ID_0 & 0xff} + }, + { + .description = "ctxt_meta_prof", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "def_ctxt_data", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_MAIN_ACTION_PTR >> 8) & 0xff, + BNXT_ULP_RF_IDX_MAIN_ACTION_PTR & 0xff} + }, + { + .description = "ctxt_opcode", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + ULP_THOR_SYM_CTXT_OPCODE_NORMAL_FLOW} + }, + { + .description = "l2_cntxt_id", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_L2_CNTXT_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_L2_CNTXT_ID_0 & 0xff} + }, + { + .description = "parif", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_DRV_FUNC_PARIF >> 8) & 0xff, + BNXT_ULP_CF_IDX_DRV_FUNC_PARIF & 0xff} + }, + /* class_tid: 3, , table: l2_cntxt_tcam_cache.egr_wr */ + { + .description = "rid", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_RID >> 8) & 0xff, + BNXT_ULP_RF_IDX_RID & 0xff} + }, + { + .description = "l2_cntxt_tcam_index", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_L2_CNTXT_TCAM_INDEX_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_L2_CNTXT_TCAM_INDEX_0 & 0xff} + }, + { + .description = "l2_cntxt_id", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_L2_CNTXT_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_L2_CNTXT_ID_0 & 0xff} + }, + { + .description = "src_property_ptr", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "prof_func_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_PROF_FUNC_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_PROF_FUNC_ID_0 & 0xff} + }, + /* class_tid: 3, , table: parif_def_arec_ptr.egr_0 */ + { + .description = "act_rec_ptr", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_MAIN_ACTION_PTR >> 8) & 0xff, + BNXT_ULP_RF_IDX_MAIN_ACTION_PTR & 0xff} + }, + /* class_tid: 3, , table: parif_def_err_arec_ptr.egr_0 */ + { + .description = "act_rec_ptr", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_MAIN_ACTION_PTR >> 8) & 0xff, + BNXT_ULP_RF_IDX_MAIN_ACTION_PTR & 0xff} + }, + /* class_tid: 4, , table: mod_record.vf_2_vfr_egr */ + { + .description = "metadata_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + { + .description = "rem_ovlan", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "rem_ivlan", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "rep_add_ivlan", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "rep_add_ovlan", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "ttl_update", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tun_md_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "reserved_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_dmac_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_smac_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_sip_ipv6_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_dip_ipv6_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_sip_ipv4_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_dip_ipv4_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_sport_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_dport_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "metadata_data", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + (ULP_THOR_SYM_VF_2_VFR_META_VAL >> 8) & 0xff, + ULP_THOR_SYM_VF_2_VFR_META_VAL & 0xff} + }, + { + .description = "metadata_rsvd", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "metadata_op", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "metadata_prof", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + /* class_tid: 4, , table: int_full_act_record.vf_2_vfr_loopback */ + { + .description = "sp_rec_ptr", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "encap_ptr", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "mod_rec_ptr", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_GLB_RF, + .field_opr1 = { + (BNXT_ULP_GLB_RF_IDX_GLB_MODIFY_PTR >> 8) & 0xff, + BNXT_ULP_GLB_RF_IDX_GLB_MODIFY_PTR & 0xff} + }, + { + .description = "rsvd1", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "rsvd0", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "decap_func", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "meter", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "stats_op", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + { + .description = "stats_ptr", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "vnic_or_vport", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + (ULP_THOR_SYM_LOOPBACK_PORT >> 8) & 0xff, + ULP_THOR_SYM_LOOPBACK_PORT & 0xff} + }, + { + .description = "use_default", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "mirror", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "cond_copy", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "vlan_del_rpt", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "drop", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "hit", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "type", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + /* class_tid: 4, , table: parif_def_arec_ptr.vf_egr */ + { + .description = "act_rec_ptr", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_GLB_RF, + .field_opr1 = { + (BNXT_ULP_GLB_RF_IDX_GLB_LB_AREC_PTR >> 8) & 0xff, + BNXT_ULP_GLB_RF_IDX_GLB_LB_AREC_PTR & 0xff} + }, + /* class_tid: 4, , table: parif_def_err_arec_ptr.vf_egr */ + { + .description = "act_rec_ptr", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_GLB_RF, + .field_opr1 = { + (BNXT_ULP_GLB_RF_IDX_GLB_LB_AREC_PTR >> 8) & 0xff, + BNXT_ULP_GLB_RF_IDX_GLB_LB_AREC_PTR & 0xff} + }, + /* class_tid: 4, , table: profile_tcam_cache.vfr_glb_act_rec_wr */ + { + .description = "rid", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_RID >> 8) & 0xff, + BNXT_ULP_RF_IDX_RID & 0xff} + }, + { + .description = "profile_tcam_index", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "em_profile_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "em_key_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "wc_profile_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "wc_key_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "flow_sig_id", + .field_bit_size = 64, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + /* class_tid: 4, , table: l2_cntxt_tcam.vf_egr */ + { + .description = "prof_func_id", + .field_bit_size = 7, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_PROF_FUNC_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_PROF_FUNC_ID_0 & 0xff} + }, + { + .description = "ctxt_meta_prof", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "def_ctxt_data", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_GLB_RF, + .field_opr1 = { + (BNXT_ULP_GLB_RF_IDX_GLB_LB_AREC_PTR >> 8) & 0xff, + BNXT_ULP_GLB_RF_IDX_GLB_LB_AREC_PTR & 0xff} + }, + { + .description = "ctxt_opcode", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + ULP_THOR_SYM_CTXT_OPCODE_NORMAL_FLOW} + }, + { + .description = "l2_cntxt_id", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_L2_CNTXT_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_L2_CNTXT_ID_0 & 0xff} + }, + { + .description = "parif", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + ULP_THOR_SYM_LOOPBACK_PARIF} + }, + /* class_tid: 4, , table: profile_tcam.prof_func_catch_all */ + { + .description = "wc_key_id", + .field_bit_size = 6, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "wc_profile_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "wc_search_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "em_key_type", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "em_key_id", + .field_bit_size = 6, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "em_profile_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "em_search_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "pl_byp_lkup_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + /* class_tid: 4, , table: l2_cntxt_tcam_cache.vf_egr_wr */ + { + .description = "rid", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_RID >> 8) & 0xff, + BNXT_ULP_RF_IDX_RID & 0xff} + }, + { + .description = "l2_cntxt_tcam_index", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_L2_CNTXT_TCAM_INDEX_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_L2_CNTXT_TCAM_INDEX_0 & 0xff} + }, + { + .description = "l2_cntxt_id", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_L2_CNTXT_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_L2_CNTXT_ID_0 & 0xff} + }, + { + .description = "src_property_ptr", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "prof_func_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_PROF_FUNC_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_PROF_FUNC_ID_0 & 0xff} + }, + /* class_tid: 4, , table: int_full_act_record.vf_2_vfr_ing */ + { + .description = "sp_rec_ptr", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "encap_ptr", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "mod_rec_ptr", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "rsvd1", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "rsvd0", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "decap_func", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "meter", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "stats_op", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + { + .description = "stats_ptr", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "vnic_or_vport", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_DRV_FUNC_VNIC >> 8) & 0xff, + BNXT_ULP_CF_IDX_DRV_FUNC_VNIC & 0xff} + }, + { + .description = "use_default", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "mirror", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "cond_copy", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "vlan_del_rpt", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "drop", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "hit", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "type", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + /* class_tid: 4, , table: int_full_act_record.drop_action */ + { + .description = "sp_rec_ptr", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "encap_ptr", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "mod_rec_ptr", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "rsvd1", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "rsvd0", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "decap_func", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "meter", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "stats_op", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + { + .description = "stats_ptr", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "vnic_or_vport", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "use_default", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "mirror", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "cond_copy", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "vlan_del_rpt", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "drop", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + { + .description = "hit", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "type", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + /* class_tid: 4, , table: l2_cntxt_tcam.vf_2_vfr_ing.0 */ + { + .description = "prof_func_id", + .field_bit_size = 7, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_GLB_RF, + .field_opr1 = { + (BNXT_ULP_GLB_RF_IDX_VF_2_VFR_PROF_FUNC_ID >> 8) & 0xff, + BNXT_ULP_GLB_RF_IDX_VF_2_VFR_PROF_FUNC_ID & 0xff} + }, + { + .description = "ctxt_meta_prof", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "def_ctxt_data", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_GLB_RF, + .field_opr1 = { + (BNXT_ULP_GLB_RF_IDX_GLB_DROP_AREC_PTR >> 8) & 0xff, + BNXT_ULP_GLB_RF_IDX_GLB_DROP_AREC_PTR & 0xff} + }, + { + .description = "ctxt_opcode", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + ULP_THOR_SYM_CTXT_OPCODE_NORMAL_FLOW} + }, + { + .description = "l2_cntxt_id", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_L2_CNTXT_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_L2_CNTXT_ID_0 & 0xff} + }, + { + .description = "parif", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_DRV_FUNC_PARIF >> 8) & 0xff, + BNXT_ULP_CF_IDX_DRV_FUNC_PARIF & 0xff} + }, + /* class_tid: 4, , table: l2_cntxt_tcam.vfr_2_vf_ing.0 */ + { + .description = "prof_func_id", + .field_bit_size = 7, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_GLB_RF, + .field_opr1 = { + (BNXT_ULP_GLB_RF_IDX_ANY_2_VF_PROF_FUNC_ID >> 8) & 0xff, + BNXT_ULP_GLB_RF_IDX_ANY_2_VF_PROF_FUNC_ID & 0xff} + }, + { + .description = "ctxt_meta_prof", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "def_ctxt_data", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_GLB_RF, + .field_opr1 = { + (BNXT_ULP_GLB_RF_IDX_GLB_DROP_AREC_PTR >> 8) & 0xff, + BNXT_ULP_GLB_RF_IDX_GLB_DROP_AREC_PTR & 0xff} + }, + { + .description = "ctxt_opcode", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + ULP_THOR_SYM_CTXT_OPCODE_NORMAL_FLOW} + }, + { + .description = "l2_cntxt_id", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_L2_CNTXT_ID_1 >> 8) & 0xff, + BNXT_ULP_RF_IDX_L2_CNTXT_ID_1 & 0xff} + }, + { + .description = "parif", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_DRV_FUNC_PARIF >> 8) & 0xff, + BNXT_ULP_CF_IDX_DRV_FUNC_PARIF & 0xff} + }, + /* class_tid: 4, , table: fkb_select.vfr_em */ + { + .description = "l2_cntxt_id.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "parif.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "spif.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "svif.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + { + .description = "lcos.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "meta.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + { + .description = "rcyc_cnt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "loopback.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_l2type.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_dmac.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_smac.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_dt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_sa.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_nvt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_ovp.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_ovd.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_ovv.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_ovt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_ivp.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_ivd.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_ivv.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_ivt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_etype.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_l3type.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_sip.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_sip_selcmp.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_dip.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_dip_selcmp.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_ttl.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_prot.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_fid.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_qos.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_ieh_nonext.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_ieh_esp.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_ieh_auth.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_ieh_dest.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_ieh_frag.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_ieh_rthdr.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_ieh_hop.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_ieh_1frag.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_df.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_l3err.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_l4type.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_src.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_dst.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_flags.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_seq.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_pa.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_opt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_tcpts.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_err.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tuntype.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tflags.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tids.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tid.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tctxts.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tctxt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tqos.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "terr.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_l2type.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_dmac.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_smac.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_dt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_sa.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_nvt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_ovp.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_ovd.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_ovv.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_ovt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_ivp.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_ivd.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_ivv.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_ivt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_etype.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_l3type.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_sip.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_sip_selcmp.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_dip.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_dip_selcmp.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_ttl.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_prot.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_fid.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_qos.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_ieh_nonext.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_ieh_esp.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_ieh_auth.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_ieh_dest.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_ieh_frag.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_ieh_rthdr.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_ieh_hop.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_ieh_1frag.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_df.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_l3err.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_l4type.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_src.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_dst.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_flags.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_seq.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_ack.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_win.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_pa.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_opt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_tcpts.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_tsval.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_txecr.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_err.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + /* class_tid: 4, , table: fkb_select.vf_em */ + { + .description = "l2_cntxt_id.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "parif.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "spif.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "svif.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "lcos.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "meta.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + { + .description = "rcyc_cnt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "loopback.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_l2type.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_dmac.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_smac.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_dt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_sa.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_nvt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_ovp.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_ovd.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_ovv.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_ovt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_ivp.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_ivd.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_ivv.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_ivt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl2_etype.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_l3type.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_sip.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_sip_selcmp.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_dip.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_dip_selcmp.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_ttl.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_prot.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_fid.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_qos.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_ieh_nonext.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_ieh_esp.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_ieh_auth.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_ieh_dest.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_ieh_frag.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_ieh_rthdr.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_ieh_hop.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_ieh_1frag.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_df.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_l3err.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_l4type.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_src.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_dst.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_flags.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_seq.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_pa.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_opt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_tcpts.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl4_err.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tuntype.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tflags.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tids.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tid.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tctxts.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tctxt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tqos.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "terr.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_l2type.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_dmac.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_smac.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_dt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_sa.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_nvt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_ovp.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_ovd.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_ovv.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_ovt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_ivp.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_ivd.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_ivv.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_ivt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_etype.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_l3type.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_sip.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_sip_selcmp.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_dip.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_dip_selcmp.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_ttl.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_prot.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_fid.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_qos.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_ieh_nonext.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_ieh_esp.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_ieh_auth.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_ieh_dest.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_ieh_frag.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_ieh_rthdr.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_ieh_hop.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_ieh_1frag.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_df.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_l3err.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_l4type.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_src.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_dst.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_flags.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_seq.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_ack.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_win.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_pa.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_opt.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_tcpts.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_tsval.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_txecr.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_err.en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + /* class_tid: 4, , table: profile_tcam.vf_2_vfr.0 */ + { + .description = "wc_key_id", + .field_bit_size = 6, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "wc_profile_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "wc_search_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "em_key_type", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "em_key_id", + .field_bit_size = 6, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_GLB_RF, + .field_opr1 = { + (BNXT_ULP_GLB_RF_IDX_GLB_VFR_EM_KEY_ID_0 >> 8) & 0xff, + BNXT_ULP_GLB_RF_IDX_GLB_VFR_EM_KEY_ID_0 & 0xff} + }, + { + .description = "em_profile_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_GLB_RF, + .field_opr1 = { + (BNXT_ULP_GLB_RF_IDX_GLB_VFR_EM_PROF_ID_0 >> 8) & 0xff, + BNXT_ULP_GLB_RF_IDX_GLB_VFR_EM_PROF_ID_0 & 0xff} + }, + { + .description = "em_search_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + { + .description = "pl_byp_lkup_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + /* class_tid: 4, , table: profile_tcam.vfr_2_vf.0 */ + { + .description = "wc_key_id", + .field_bit_size = 6, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "wc_profile_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "wc_search_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "em_key_type", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "em_key_id", + .field_bit_size = 6, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_GLB_RF, + .field_opr1 = { + (BNXT_ULP_GLB_RF_IDX_GLB_VFR_EM_KEY_ID_1 >> 8) & 0xff, + BNXT_ULP_GLB_RF_IDX_GLB_VFR_EM_KEY_ID_1 & 0xff} + }, + { + .description = "em_profile_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_GLB_RF, + .field_opr1 = { + (BNXT_ULP_GLB_RF_IDX_GLB_VFR_EM_PROF_ID_1 >> 8) & 0xff, + BNXT_ULP_GLB_RF_IDX_GLB_VFR_EM_PROF_ID_1 & 0xff} + }, + { + .description = "em_search_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + { + .description = "pl_byp_lkup_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + /* class_tid: 4, , table: profile_tcam_cache.vfr_wr */ + { + .description = "rid", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_RID >> 8) & 0xff, + BNXT_ULP_RF_IDX_RID & 0xff} + }, + { + .description = "profile_tcam_index", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "em_profile_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "em_key_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "wc_profile_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "wc_key_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "flow_sig_id", + .field_bit_size = 64, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + /* class_tid: 4, , table: ilt_tbl.vfr_ing */ + { + .description = "ilt_destination", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "act_rec_ptr", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_GLB_RF, + .field_opr1 = { + (BNXT_ULP_GLB_RF_IDX_GLB_DROP_AREC_PTR >> 8) & 0xff, + BNXT_ULP_GLB_RF_IDX_GLB_DROP_AREC_PTR & 0xff} + }, + { + .description = "fwd_op", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + ULP_THOR_SYM_FWD_OP_NORMAL_FLOW} + }, + { + .description = "en_ilt_dest", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "en_bd_action", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "en_bd_meta", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "parif", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "reserved", + .field_bit_size = 23, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + /* class_tid: 4, , table: em.vf_2_vfr.0 */ + { + .description = "valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + { + .description = "strength", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 3} + }, + { + .description = "data", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_MAIN_ACTION_PTR >> 8) & 0xff, + BNXT_ULP_RF_IDX_MAIN_ACTION_PTR & 0xff} + }, + { + .description = "opcode", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "meta_prof", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "ctxt_data", + .field_bit_size = 14, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + /* class_tid: 4, , table: ilt_tbl.vfr_egr */ + { + .description = "ilt_destination", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "act_rec_ptr", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "fwd_op", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + ULP_THOR_SYM_FWD_OP_BYPASS_LKUP} + }, + { + .description = "en_ilt_dest", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "en_bd_action", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + { + .description = "en_bd_meta", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "parif", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_DRV_FUNC_PARIF >> 8) & 0xff, + BNXT_ULP_CF_IDX_DRV_FUNC_PARIF & 0xff} + }, + { + .description = "reserved", + .field_bit_size = 23, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + /* class_tid: 4, , table: l2_cntxt_tcam_cache.vfr_wr_egr0 */ + { + .description = "rid", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_RID >> 8) & 0xff, + BNXT_ULP_RF_IDX_RID & 0xff} + }, + { + .description = "l2_cntxt_tcam_index", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_cntxt_id", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "src_property_ptr", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "prof_func_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + /* class_tid: 4, , table: ilt_tbl.vf_egr */ + { + .description = "ilt_destination", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "act_rec_ptr", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_GLB_RF, + .field_opr1 = { + (BNXT_ULP_GLB_RF_IDX_GLB_LB_AREC_PTR >> 8) & 0xff, + BNXT_ULP_GLB_RF_IDX_GLB_LB_AREC_PTR & 0xff} + }, + { + .description = "fwd_op", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + ULP_THOR_SYM_FWD_OP_NORMAL_FLOW} + }, + { + .description = "en_ilt_dest", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "en_bd_action", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "en_bd_meta", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "parif", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + ULP_THOR_SYM_LOOPBACK_PARIF} + }, + { + .description = "reserved", + .field_bit_size = 23, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + /* class_tid: 4, , table: mod_record.vfr_2_vf_egr */ + { + .description = "metadata_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + { + .description = "rem_ovlan", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "rem_ivlan", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "rep_add_ivlan", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "rep_add_ovlan", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "ttl_update", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tun_md_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "reserved_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_dmac_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_smac_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_sip_ipv6_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_dip_ipv6_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_sip_ipv4_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_dip_ipv4_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_sport_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l4_dport_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "metadata_data", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_PORT_TABLE, + .field_opr1 = { + (BNXT_ULP_CF_IDX_DEV_PORT_ID >> 8) & 0xff, + BNXT_ULP_CF_IDX_DEV_PORT_ID & 0xff, + (BNXT_ULP_PORT_TABLE_VF_FUNC_METADATA >> 8) & 0xff, + BNXT_ULP_PORT_TABLE_VF_FUNC_METADATA & 0xff} + }, + { + .description = "metadata_rsvd", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "metadata_op", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "metadata_prof", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + /* class_tid: 4, , table: int_full_act_record.vfr_egr */ + { + .description = "sp_rec_ptr", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "encap_ptr", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "mod_rec_ptr", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_MODIFY_PTR >> 8) & 0xff, + BNXT_ULP_RF_IDX_MODIFY_PTR & 0xff} + }, + { + .description = "rsvd1", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "rsvd0", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "decap_func", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "meter", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "stats_op", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + { + .description = "stats_ptr", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "vnic_or_vport", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + (ULP_THOR_SYM_LOOPBACK_PORT >> 8) & 0xff, + ULP_THOR_SYM_LOOPBACK_PORT & 0xff} + }, + { + .description = "use_default", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "mirror", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "cond_copy", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "vlan_del_rpt", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "drop", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "hit", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "type", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + /* class_tid: 4, , table: int_full_act_record.vfr_2_vf.ing0 */ + { + .description = "sp_rec_ptr", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "encap_ptr", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "mod_rec_ptr", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "rsvd1", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "rsvd0", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "decap_func", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "meter", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "stats_op", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + { + .description = "stats_ptr", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "vnic_or_vport", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_VF_FUNC_VNIC >> 8) & 0xff, + BNXT_ULP_CF_IDX_VF_FUNC_VNIC & 0xff} + }, + { + .description = "use_default", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "mirror", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "cond_copy", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "vlan_del_rpt", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "drop", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "hit", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "type", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + /* class_tid: 4, , table: em.vfr_2_vf.0 */ + { + .description = "valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + { + .description = "strength", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 3} + }, + { + .description = "data", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_MAIN_ACTION_PTR >> 8) & 0xff, + BNXT_ULP_RF_IDX_MAIN_ACTION_PTR & 0xff} + }, + { + .description = "opcode", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "meta_prof", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "ctxt_data", + .field_bit_size = 14, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } +}; + +struct bnxt_ulp_mapper_ident_info ulp_thor_class_ident_list[] = { + /* class_tid: 1, , table: port_table.rd */ + { + .description = "default_arec_ptr", + .regfile_idx = BNXT_ULP_RF_IDX_DEFAULT_AREC_PTR, + .ident_bit_size = 16, + .ident_bit_pos = 137 + }, + { + .description = "drv_func.parent.mac", + .regfile_idx = BNXT_ULP_RF_IDX_DRV_FUNC_PARENT_MAC, + .ident_bit_size = 48, + .ident_bit_pos = 80 + }, + { + .description = "phy_port", + .regfile_idx = BNXT_ULP_RF_IDX_PHY_PORT, + .ident_bit_size = 8, + .ident_bit_pos = 128 + }, + /* class_tid: 1, , table: l2_cntxt_tcam_cache.rd */ + { + .description = "l2_cntxt_id", + .regfile_idx = BNXT_ULP_RF_IDX_L2_CNTXT_ID_0, + .ident_bit_size = 10, + .ident_bit_pos = 42 + }, + { + .description = "l2_cntxt_tcam_index", + .regfile_idx = BNXT_ULP_RF_IDX_L2_CNTXT_TCAM_INDEX_0, + .ident_bit_size = 10, + .ident_bit_pos = 32 + }, + { + .description = "prof_func_id", + .regfile_idx = BNXT_ULP_RF_IDX_PROF_FUNC_ID_0, + .ident_bit_size = 8, + .ident_bit_pos = 62 + }, + /* class_tid: 1, , table: tunnel_cache.f1_f2_rd */ + { + .description = "l2_cntxt_id", + .regfile_idx = BNXT_ULP_RF_IDX_L2_CNTXT_ID_0, + .ident_bit_size = 10, + .ident_bit_pos = 42 + }, + /* class_tid: 1, , table: l2_cntxt_tcam.f1_f2_alloc_l2_cntxt */ + { + .description = "l2_cntxt_id", + .resource_func = BNXT_ULP_RESOURCE_FUNC_IDENTIFIER, + .ident_type = TF_IDENT_TYPE_L2_CTXT_HIGH, + .regfile_idx = BNXT_ULP_RF_IDX_L2_CNTXT_ID_0, + .ident_bit_size = 10, + .ident_bit_pos = 29 + }, + /* class_tid: 1, , table: flow_chain_cache.group_check */ + { + .description = "metadata", + .regfile_idx = BNXT_ULP_RF_IDX_JUMP_META, + .ident_bit_size = 16, + .ident_bit_pos = 32 + }, + /* class_tid: 1, , table: flow_chain_l2_cntxt.group_check */ + { + .description = "l2_cntxt_id", + .regfile_idx = BNXT_ULP_RF_IDX_L2_CNTXT_ID_0, + .ident_bit_size = 10, + .ident_bit_pos = 32 + }, + /* class_tid: 1, , table: l2_cntxt_tcam.chain_entry */ + { + .description = "l2_cntxt_id", + .resource_func = BNXT_ULP_RESOURCE_FUNC_IDENTIFIER, + .ident_type = TF_IDENT_TYPE_L2_CTXT_HIGH, + .regfile_idx = BNXT_ULP_RF_IDX_L2_CNTXT_ID_0, + .ident_bit_size = 10, + .ident_bit_pos = 29 + }, + /* class_tid: 1, , table: mac_addr_cache.rd */ + { + .description = "l2_cntxt_id", + .regfile_idx = BNXT_ULP_RF_IDX_L2_CNTXT_ID_0, + .ident_bit_size = 10, + .ident_bit_pos = 42 + }, + /* class_tid: 1, , table: l2_cntxt_tcam.allocate_l2_context */ + { + .description = "l2_cntxt_id", + .resource_func = BNXT_ULP_RESOURCE_FUNC_IDENTIFIER, + .ident_type = TF_IDENT_TYPE_L2_CTXT_HIGH, + .regfile_idx = BNXT_ULP_RF_IDX_L2_CNTXT_ID_0, + .ident_bit_size = 10, + .ident_bit_pos = 29 + }, + /* class_tid: 1, , table: proto_header_cache.rd */ + { + .description = "em_key_id", + .regfile_idx = BNXT_ULP_RF_IDX_EM_KEY_ID_0, + .ident_bit_size = 8, + .ident_bit_pos = 50 + }, + { + .description = "em_profile_id", + .regfile_idx = BNXT_ULP_RF_IDX_EM_PROFILE_ID_0, + .ident_bit_size = 8, + .ident_bit_pos = 42 + }, + { + .description = "em_recipe_id", + .regfile_idx = BNXT_ULP_RF_IDX_EM_RECIPE_ID, + .ident_bit_size = 16, + .ident_bit_pos = 74 + }, + { + .description = "profile_tcam_index", + .regfile_idx = BNXT_ULP_RF_IDX_PROFILE_TCAM_INDEX_0, + .ident_bit_size = 10, + .ident_bit_pos = 32 + }, + { + .description = "wc_key_id", + .regfile_idx = BNXT_ULP_RF_IDX_WC_KEY_ID_0, + .ident_bit_size = 8, + .ident_bit_pos = 66 + }, + { + .description = "wc_profile_id", + .regfile_idx = BNXT_ULP_RF_IDX_WC_PROFILE_ID_0, + .ident_bit_size = 8, + .ident_bit_pos = 58 + }, + { + .description = "wc_recipe_id", + .regfile_idx = BNXT_ULP_RF_IDX_WC_RECIPE_ID, + .ident_bit_size = 16, + .ident_bit_pos = 90 + }, + /* class_tid: 1, , table: hdr_overlap_cache.overlap_check */ + { + .description = "wc_key_id", + .regfile_idx = BNXT_ULP_RF_IDX_WC_KEY_ID_0, + .ident_bit_size = 8, + .ident_bit_pos = 40 + }, + { + .description = "wc_profile_id", + .regfile_idx = BNXT_ULP_RF_IDX_WC_PROFILE_ID_0, + .ident_bit_size = 8, + .ident_bit_pos = 32 + }, + /* class_tid: 1, , table: profile_tcam.allocate_wc_profile */ + { + .description = "wc_profile_id", + .resource_func = BNXT_ULP_RESOURCE_FUNC_IDENTIFIER, + .ident_type = TF_IDENT_TYPE_WC_PROF, + .regfile_idx = BNXT_ULP_RF_IDX_WC_PROFILE_ID_0, + .ident_bit_size = 8, + .ident_bit_pos = 6 + }, + /* class_tid: 1, , table: profile_tcam.gen_template */ + { + .description = "em_profile_id", + .resource_func = BNXT_ULP_RESOURCE_FUNC_IDENTIFIER, + .ident_type = TF_IDENT_TYPE_EM_PROF, + .regfile_idx = BNXT_ULP_RF_IDX_EM_PROFILE_ID_0, + .ident_bit_size = 8, + .ident_bit_pos = 23 + }, + /* class_tid: 1, , table: em_flow_conflict_cache.rd */ + { + .description = "flow_sig_id", + .regfile_idx = BNXT_ULP_RF_IDX_FLOW_SIG_ID, + .ident_bit_size = 64, + .ident_bit_pos = 32 + }, + /* class_tid: 2, , table: l2_cntxt_tcam_cache.rd */ + { + .description = "l2_cntxt_id", + .regfile_idx = BNXT_ULP_RF_IDX_L2_CNTXT_ID_0, + .ident_bit_size = 10, + .ident_bit_pos = 42 + }, + { + .description = "prof_func_id", + .regfile_idx = BNXT_ULP_RF_IDX_PROF_FUNC_ID_0, + .ident_bit_size = 8, + .ident_bit_pos = 62 + }, + /* class_tid: 2, , table: flow_chain_cache.group_check */ + { + .description = "metadata", + .regfile_idx = BNXT_ULP_RF_IDX_JUMP_META, + .ident_bit_size = 16, + .ident_bit_pos = 32 + }, + /* class_tid: 2, , table: proto_header_cache.rd */ + { + .description = "em_key_id", + .regfile_idx = BNXT_ULP_RF_IDX_EM_KEY_ID_0, + .ident_bit_size = 8, + .ident_bit_pos = 50 + }, + { + .description = "em_profile_id", + .regfile_idx = BNXT_ULP_RF_IDX_EM_PROFILE_ID_0, + .ident_bit_size = 8, + .ident_bit_pos = 42 + }, + { + .description = "em_recipe_id", + .regfile_idx = BNXT_ULP_RF_IDX_EM_RECIPE_ID, + .ident_bit_size = 16, + .ident_bit_pos = 74 + }, + { + .description = "profile_tcam_index", + .regfile_idx = BNXT_ULP_RF_IDX_PROFILE_TCAM_INDEX_0, + .ident_bit_size = 10, + .ident_bit_pos = 32 + }, + { + .description = "wc_key_id", + .regfile_idx = BNXT_ULP_RF_IDX_WC_KEY_ID_0, + .ident_bit_size = 8, + .ident_bit_pos = 66 + }, + { + .description = "wc_profile_id", + .regfile_idx = BNXT_ULP_RF_IDX_WC_PROFILE_ID_0, + .ident_bit_size = 8, + .ident_bit_pos = 58 + }, + { + .description = "wc_recipe_id", + .regfile_idx = BNXT_ULP_RF_IDX_WC_RECIPE_ID, + .ident_bit_size = 16, + .ident_bit_pos = 90 + }, + /* class_tid: 2, , table: hdr_overlap_cache.overlap_check */ + { + .description = "wc_key_id", + .regfile_idx = BNXT_ULP_RF_IDX_WC_KEY_ID_0, + .ident_bit_size = 8, + .ident_bit_pos = 40 + }, + { + .description = "wc_profile_id", + .regfile_idx = BNXT_ULP_RF_IDX_WC_PROFILE_ID_0, + .ident_bit_size = 8, + .ident_bit_pos = 32 + }, + /* class_tid: 2, , table: profile_tcam.allocate_wc_profile */ + { + .description = "wc_profile_id", + .resource_func = BNXT_ULP_RESOURCE_FUNC_IDENTIFIER, + .ident_type = TF_IDENT_TYPE_WC_PROF, + .regfile_idx = BNXT_ULP_RF_IDX_WC_PROFILE_ID_0, + .ident_bit_size = 8, + .ident_bit_pos = 6 + }, + /* class_tid: 2, , table: profile_tcam.gen_template */ + { + .description = "em_profile_id", + .resource_func = BNXT_ULP_RESOURCE_FUNC_IDENTIFIER, + .ident_type = TF_IDENT_TYPE_EM_PROF, + .regfile_idx = BNXT_ULP_RF_IDX_EM_PROFILE_ID_0, + .ident_bit_size = 8, + .ident_bit_pos = 23 + }, + /* class_tid: 2, , table: em_flow_conflict_cache.rd */ + { + .description = "flow_sig_id", + .regfile_idx = BNXT_ULP_RF_IDX_FLOW_SIG_ID, + .ident_bit_size = 64, + .ident_bit_pos = 32 + }, + /* class_tid: 3, , table: l2_cntxt_tcam.ing_0 */ + { + .description = "l2_cntxt_id_low", + .resource_func = BNXT_ULP_RESOURCE_FUNC_IDENTIFIER, + .ident_type = TF_IDENT_TYPE_L2_CTXT_LOW, + .regfile_idx = BNXT_ULP_RF_IDX_L2_CNTXT_ID_0, + .ident_bit_size = 10, + .ident_bit_pos = 29 + }, + { + .description = "prof_func_id", + .resource_func = BNXT_ULP_RESOURCE_FUNC_IDENTIFIER, + .ident_type = TF_IDENT_TYPE_PROF_FUNC, + .regfile_idx = BNXT_ULP_RF_IDX_PROF_FUNC_ID_0, + .ident_bit_size = 7, + .ident_bit_pos = 0 + }, + /* class_tid: 3, , table: l2_cntxt_tcam.drv_func_prof_func_alloc */ + { + .description = "prof_func_id", + .resource_func = BNXT_ULP_RESOURCE_FUNC_IDENTIFIER, + .ident_type = TF_IDENT_TYPE_PROF_FUNC, + .regfile_idx = BNXT_ULP_RF_IDX_PROF_FUNC_ID_0, + .ident_bit_size = 7, + .ident_bit_pos = 0 + }, + /* class_tid: 3, , table: l2_cntxt_tcam.egr_0 */ + { + .description = "l2_cntxt_id_low", + .resource_func = BNXT_ULP_RESOURCE_FUNC_IDENTIFIER, + .ident_type = TF_IDENT_TYPE_L2_CTXT_LOW, + .regfile_idx = BNXT_ULP_RF_IDX_L2_CNTXT_ID_0, + .ident_bit_size = 10, + .ident_bit_pos = 29 + }, + { + .description = "prof_func_id", + .resource_func = BNXT_ULP_RESOURCE_FUNC_IDENTIFIER, + .ident_type = TF_IDENT_TYPE_PROF_FUNC, + .regfile_idx = BNXT_ULP_RF_IDX_PROF_FUNC_ID_0, + .ident_bit_size = 7, + .ident_bit_pos = 0 + }, + /* class_tid: 4, , table: l2_cntxt_tcam_cache.get_drv_func_prof_func */ + { + .description = "prof_func_id", + .regfile_idx = BNXT_ULP_RF_IDX_PROF_FUNC_ID_0, + .ident_bit_size = 8, + .ident_bit_pos = 62 + }, + /* class_tid: 4, , table: l2_cntxt_tcam.vf_egr */ + { + .description = "l2_cntxt_id_low", + .resource_func = BNXT_ULP_RESOURCE_FUNC_IDENTIFIER, + .ident_type = TF_IDENT_TYPE_L2_CTXT_LOW, + .regfile_idx = BNXT_ULP_RF_IDX_L2_CNTXT_ID_0, + .ident_bit_size = 10, + .ident_bit_pos = 29 + } +}; diff --git a/drivers/thirdparty/release-drivers/bnxt/tf_ulp/generic_templates/ulp_template_db_wh_plus_act.c b/drivers/thirdparty/release-drivers/bnxt/tf_ulp/generic_templates/ulp_template_db_wh_plus_act.c new file mode 100644 index 000000000000..313eb5356cf3 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/tf_ulp/generic_templates/ulp_template_db_wh_plus_act.c @@ -0,0 +1,6772 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* Copyright(c) 2014-2024 Broadcom + * All rights reserved. + */ + +#include "ulp_template_db_enum.h" +#include "ulp_template_db_field.h" +#include "ulp_template_struct.h" +#include "ulp_template_db_tbl.h" + +/* Mapper templates for header act list */ +struct bnxt_ulp_mapper_tmpl_info ulp_wh_plus_act_tmpl_list[] = { + /* act_tid: 1, ingress */ + [1] = { + .device_name = BNXT_ULP_DEVICE_ID_WH_PLUS, + .num_tbls = 0, + .start_tbl_idx = 0, + .reject_info = { + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_FALSE, + .cond_start_idx = 0, + .cond_nums = 0 } + }, + /* act_tid: 2, ingress */ + [2] = { + .device_name = BNXT_ULP_DEVICE_ID_WH_PLUS, + .num_tbls = 5, + .start_tbl_idx = 0, + .reject_info = { + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_OR, + .cond_start_idx = 0, + .cond_nums = 12 } + }, + /* act_tid: 3, ingress */ + [3] = { + .device_name = BNXT_ULP_DEVICE_ID_WH_PLUS, + .num_tbls = 7, + .start_tbl_idx = 5, + .reject_info = { + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_FALSE, + .cond_start_idx = 17, + .cond_nums = 0 } + }, + /* act_tid: 4, ingress */ + [4] = { + .device_name = BNXT_ULP_DEVICE_ID_WH_PLUS, + .num_tbls = 7, + .start_tbl_idx = 12, + .reject_info = { + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_LIST_OR, + .cond_start_idx = 0, + .cond_nums = 2 } + }, + /* act_tid: 5, ingress */ + [5] = { + .device_name = BNXT_ULP_DEVICE_ID_WH_PLUS, + .num_tbls = 5, + .start_tbl_idx = 19, + .reject_info = { + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_LIST_OR, + .cond_start_idx = 2, + .cond_nums = 2 } + }, + /* act_tid: 6, ingress */ + [6] = { + .device_name = BNXT_ULP_DEVICE_ID_WH_PLUS, + .num_tbls = 1, + .start_tbl_idx = 24, + .reject_info = { + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_FALSE, + .cond_start_idx = 40, + .cond_nums = 0 } + }, + /* act_tid: 7, egress */ + [7] = { + .device_name = BNXT_ULP_DEVICE_ID_WH_PLUS, + .num_tbls = 1, + .start_tbl_idx = 25, + .reject_info = { + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_FALSE, + .cond_start_idx = 40, + .cond_nums = 0 } + }, + /* act_tid: 8, egress */ + [8] = { + .device_name = BNXT_ULP_DEVICE_ID_WH_PLUS, + .num_tbls = 5, + .start_tbl_idx = 26, + .reject_info = { + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_FALSE, + .cond_start_idx = 40, + .cond_nums = 0 } + }, + /* act_tid: 9, egress */ + [9] = { + .device_name = BNXT_ULP_DEVICE_ID_WH_PLUS, + .num_tbls = 7, + .start_tbl_idx = 31, + .reject_info = { + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_LIST_OR, + .cond_start_idx = 4, + .cond_nums = 2 } + }, + /* act_tid: 10, egress */ + [10] = { + .device_name = BNXT_ULP_DEVICE_ID_WH_PLUS, + .num_tbls = 6, + .start_tbl_idx = 38, + .reject_info = { + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_OR, + .cond_start_idx = 62, + .cond_nums = 3 } + }, + /* act_tid: 11, egress */ + [11] = { + .device_name = BNXT_ULP_DEVICE_ID_WH_PLUS, + .num_tbls = 3, + .start_tbl_idx = 44, + .reject_info = { + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_FALSE, + .cond_start_idx = 71, + .cond_nums = 0 } + }, + /* act_tid: 12, egress */ + [12] = { + .device_name = BNXT_ULP_DEVICE_ID_WH_PLUS, + .num_tbls = 0, + .start_tbl_idx = 0, + .reject_info = { + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 73, + .cond_nums = 0 } + } +}; + +struct bnxt_ulp_mapper_tbl_info ulp_wh_plus_act_tbl_list[] = { + { /* act_tid: 2, , table: shared_mirror_record.rd */ + .description = "shared_mirror_record.rd", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_type = TF_TBL_TYPE_MIRROR_CONFIG, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_SHARED_MIRROR, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 12, + .cond_nums = 1 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_READ, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_INDEX, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP, + .key_start_idx = 0, + .blob_key_bit_size = 4, + .key_bit_size = 4, + .key_num_fields = 1, + .ident_start_idx = 0, + .ident_nums = 1 + }, + { /* act_tid: 2, , table: int_flow_counter_tbl.0 */ + .description = "int_flow_counter_tbl.0", + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_ACT_STATS_64, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_INT_COUNT, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 13, + .cond_nums = 1 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_ALLOC_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_FLOW_CNTR_PTR_0, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .mark_db_opcode = BNXT_ULP_MARK_DB_OPC_NOP, + .track_type = CFA_TRACK_TYPE_SID, + .result_start_idx = 0, + .result_bit_size = 64, + .result_num_fields = 1 + }, + { /* act_tid: 2, , table: int_vtag_encap_record.0 */ + .description = "int_vtag_encap_record.0", + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_ACT_ENCAP_8B, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_NORMAL, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 14, + .cond_nums = 1 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_ALLOC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_ENCAP_PTR_0, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .mark_db_opcode = BNXT_ULP_MARK_DB_OPC_NOP, + .track_type = CFA_TRACK_TYPE_SID, + .record_size = 8, + .result_start_idx = 1, + .result_bit_size = 0, + .result_num_fields = 0, + .encap_num_fields = 11 + }, + { /* act_tid: 2, , table: int_full_act_record.0 */ + .description = "int_full_act_record.0", + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_FULL_ACT_RECORD, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_NORMAL, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 15, + .cond_nums = 1 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_ALLOC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_MAIN_ACTION_PTR, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .mark_db_opcode = BNXT_ULP_MARK_DB_OPC_NOP, + .track_type = CFA_TRACK_TYPE_SID, + .result_start_idx = 12, + .result_bit_size = 128, + .result_num_fields = 26, + .encap_num_fields = 0 + }, + { /* act_tid: 2, , table: ext_full_act_record.0 */ + .description = "ext_full_act_record.0", + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_EXT, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_NORMAL, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 0, + .cond_false_goto = 0, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 16, + .cond_nums = 1 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_ALLOC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_MAIN_ACTION_PTR, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .mark_db_opcode = BNXT_ULP_MARK_DB_OPC_NOP, + .track_type = CFA_TRACK_TYPE_SID, + .result_start_idx = 38, + .result_bit_size = 128, + .result_num_fields = 26, + .encap_num_fields = 11 + }, + { /* act_tid: 3, , table: control.0 */ + .description = "control.0", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 17, + .cond_nums = 0 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_ALLOC_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID + }, + { /* act_tid: 3, , table: mirror_tbl.alloc */ + .description = "mirror_tbl.alloc", + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_MIRROR_CONFIG, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_NORMAL, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 17, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_ALLOC_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_MIRROR_PTR_0, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID, + .mark_db_opcode = BNXT_ULP_MARK_DB_OPC_NOP, + .track_type = CFA_TRACK_TYPE_SID, + .result_start_idx = 75, + .result_bit_size = 32, + .result_num_fields = 6 + }, + { /* act_tid: 3, , table: int_flow_counter_tbl.0 */ + .description = "int_flow_counter_tbl.0", + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_ACT_STATS_64, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_INT_COUNT, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 17, + .cond_nums = 1 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_ALLOC_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_FLOW_CNTR_PTR_0, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID, + .mark_db_opcode = BNXT_ULP_MARK_DB_OPC_NOP, + .track_type = CFA_TRACK_TYPE_SID, + .result_start_idx = 81, + .result_bit_size = 64, + .result_num_fields = 1 + }, + { /* act_tid: 3, , table: int_full_act_record.0 */ + .description = "int_full_act_record.0", + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_FULL_ACT_RECORD, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_NORMAL, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 18, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_ALLOC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_MAIN_ACTION_PTR, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID, + .mark_db_opcode = BNXT_ULP_MARK_DB_OPC_NOP, + .track_type = CFA_TRACK_TYPE_SID, + .result_start_idx = 82, + .result_bit_size = 128, + .result_num_fields = 26, + .encap_num_fields = 0 + }, + { /* act_tid: 3, , table: ext_full_act_record.0 */ + .description = "ext_full_act_record.0", + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_EXT, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_NORMAL, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 18, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_ALLOC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_MAIN_ACTION_PTR, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID, + .mark_db_opcode = BNXT_ULP_MARK_DB_OPC_NOP, + .track_type = CFA_TRACK_TYPE_SID, + .result_start_idx = 108, + .result_bit_size = 128, + .result_num_fields = 26, + .encap_num_fields = 11 + }, + { /* act_tid: 3, , table: mirror_tbl.wr */ + .description = "mirror_tbl.wr", + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_MIRROR_CONFIG, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_NORMAL, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 18, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_MIRROR_PTR_0, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP, + .mark_db_opcode = BNXT_ULP_MARK_DB_OPC_NOP, + .track_type = CFA_TRACK_TYPE_SID, + .result_start_idx = 145, + .result_bit_size = 32, + .result_num_fields = 6 + }, + { /* act_tid: 3, , table: shared_mirror_record.wr */ + .description = "shared_mirror_record.wr", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_type = TF_TBL_TYPE_MIRROR_CONFIG, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_SHARED_MIRROR, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 0, + .cond_false_goto = 0, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 18, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_WRITE, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_INDEX, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP, + .key_start_idx = 1, + .blob_key_bit_size = 4, + .key_bit_size = 4, + .key_num_fields = 1, + .result_start_idx = 151, + .result_bit_size = 36, + .result_num_fields = 2 + }, + { /* act_tid: 4, , table: control.0 */ + .description = "control.0", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1023, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 26, + .cond_nums = 1 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP + }, + { /* act_tid: 4, , table: int_flow_counter_tbl.0 */ + .description = "int_flow_counter_tbl.0", + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_ACT_STATS_64, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_INT_COUNT, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 27, + .cond_nums = 1 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_ALLOC_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_FLOW_CNTR_PTR_0, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .track_type = CFA_TRACK_TYPE_SID, + .result_start_idx = 153, + .result_bit_size = 64, + .result_num_fields = 1 + }, + { /* act_tid: 4, , table: act_modify_ipv4_src.0 */ + .description = "act_modify_ipv4_src.0", + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_ACT_MODIFY_IPV4, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_NORMAL, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 28, + .cond_nums = 1 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_ALLOC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_MODIFY_IPV4_SRC_PTR_0, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .track_type = CFA_TRACK_TYPE_SID, + .result_start_idx = 154, + .result_bit_size = 32, + .result_num_fields = 1 + }, + { /* act_tid: 4, , table: act_modify_ipv4_dst.0 */ + .description = "act_modify_ipv4_dst.0", + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_ACT_MODIFY_IPV4, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_NORMAL, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 29, + .cond_nums = 1 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_ALLOC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_MODIFY_IPV4_DST_PTR_0, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .track_type = CFA_TRACK_TYPE_SID, + .result_start_idx = 155, + .result_bit_size = 32, + .result_num_fields = 1 + }, + { /* act_tid: 4, , table: int_encap_mac_record.0 */ + .description = "int_encap_mac_record.0", + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_ACT_ENCAP_16B, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_NORMAL, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 30, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_WR_GLB_REGFILE, + .tbl_operand = BNXT_ULP_GLB_RF_IDX_ENCAP_MAC_PTR, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP, + .track_type = CFA_TRACK_TYPE_SID, + .record_size = 16, + .result_start_idx = 156, + .result_bit_size = 0, + .result_num_fields = 0, + .encap_num_fields = 11 + }, + { /* act_tid: 4, , table: int_full_act_record.0 */ + .description = "int_full_act_record.0", + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_FULL_ACT_RECORD, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_NORMAL, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 30, + .cond_nums = 1 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_ALLOC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_MAIN_ACTION_PTR, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .track_type = CFA_TRACK_TYPE_SID, + .result_start_idx = 167, + .result_bit_size = 128, + .result_num_fields = 26 + }, + { /* act_tid: 4, , table: ext_full_act_record.0 */ + .description = "ext_full_act_record.0", + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_EXT, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_NORMAL, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 0, + .cond_false_goto = 0, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 31, + .cond_nums = 1 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_ALLOC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_MAIN_ACTION_PTR, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .track_type = CFA_TRACK_TYPE_SID, + .result_start_idx = 193, + .result_bit_size = 128, + .result_num_fields = 26, + .encap_num_fields = 11 + }, + { /* act_tid: 5, , table: int_flow_counter_tbl.0 */ + .description = "int_flow_counter_tbl.0", + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_ACT_STATS_64, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_INT_COUNT, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 35, + .cond_nums = 1 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_ALLOC_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_FLOW_CNTR_PTR_0, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .mark_db_opcode = BNXT_ULP_MARK_DB_OPC_NOP, + .track_type = CFA_TRACK_TYPE_SID, + .result_start_idx = 230, + .result_bit_size = 64, + .result_num_fields = 1 + }, + { /* act_tid: 5, , table: vnic_interface_rss_config.0 */ + .description = "vnic_interface_rss_config.0", + .resource_func = BNXT_ULP_RESOURCE_FUNC_VNIC_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_VNIC_TABLE_RSS, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 36, + .cond_nums = 1 }, + .tbl_opcode = BNXT_ULP_VNIC_TBL_OPC_ALLOC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_RSS_VNIC, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .result_start_idx = 231, + .result_bit_size = 0, + .result_num_fields = 0 + }, + { /* act_tid: 5, , table: vnic_interface_queue_config.0 */ + .description = "vnic_interface_queue_config.0", + .resource_func = BNXT_ULP_RESOURCE_FUNC_VNIC_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_VNIC_TABLE_QUEUE, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 37, + .cond_nums = 1 }, + .tbl_opcode = BNXT_ULP_VNIC_TBL_OPC_ALLOC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_RSS_VNIC, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .result_start_idx = 231, + .result_bit_size = 0, + .result_num_fields = 0 + }, + { /* act_tid: 5, , table: int_full_act_record.0 */ + .description = "int_full_act_record.0", + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_FULL_ACT_RECORD, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_NORMAL, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 0, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_OR, + .cond_start_idx = 38, + .cond_nums = 2 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_ALLOC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_MAIN_ACTION_PTR, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .mark_db_opcode = BNXT_ULP_MARK_DB_OPC_NOP, + .track_type = CFA_TRACK_TYPE_SID, + .result_start_idx = 231, + .result_bit_size = 128, + .result_num_fields = 26, + .encap_num_fields = 0 + }, + { /* act_tid: 5, , table: int_full_act_record.1 */ + .description = "int_full_act_record.1", + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_FULL_ACT_RECORD, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_NORMAL, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 0, + .cond_false_goto = 0, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 40, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_ALLOC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_MAIN_ACTION_PTR, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .mark_db_opcode = BNXT_ULP_MARK_DB_OPC_NOP, + .track_type = CFA_TRACK_TYPE_SID, + .result_start_idx = 257, + .result_bit_size = 128, + .result_num_fields = 26, + .encap_num_fields = 0 + }, + { /* act_tid: 6, , table: control.0 */ + .description = "control.0", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_RX, + .true_message = "Wh not supporting meter", + .execute_info = { + .cond_true_goto = 1023, + .cond_false_goto = 0, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 40, + .cond_nums = 0 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP + }, + { /* act_tid: 7, , table: control.reject */ + .description = "control.reject", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_TX, + .true_message = "Reject: wh+ not supporting gen template", + .execute_info = { + .cond_true_goto = 1023, + .cond_false_goto = 0, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 40, + .cond_nums = 0 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP + }, + { /* act_tid: 8, , table: int_flow_counter_tbl.0 */ + .description = "int_flow_counter_tbl.0", + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_ACT_STATS_64, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_INT_COUNT, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 40, + .cond_nums = 1 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_ALLOC_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_FLOW_CNTR_PTR_0, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .track_type = CFA_TRACK_TYPE_SID, + .result_start_idx = 283, + .result_bit_size = 64, + .result_num_fields = 1 + }, + { /* act_tid: 8, , table: int_vtag_encap_record.0 */ + .description = "int_vtag_encap_record.0", + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_ACT_ENCAP_16B, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_NORMAL, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 41, + .cond_nums = 2 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_ALLOC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_ENCAP_PTR_0, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .track_type = CFA_TRACK_TYPE_SID, + .record_size = 8, + .result_start_idx = 284, + .result_bit_size = 0, + .result_num_fields = 0, + .encap_num_fields = 11 + }, + { /* act_tid: 8, , table: int_full_act_record.0 */ + .description = "int_full_act_record.0", + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_FULL_ACT_RECORD, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_NORMAL, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 43, + .cond_nums = 1 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_ALLOC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_MAIN_ACTION_PTR, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .track_type = CFA_TRACK_TYPE_SID, + .result_start_idx = 295, + .result_bit_size = 128, + .result_num_fields = 26 + }, + { /* act_tid: 8, , table: ext_full_act_record.no_tag */ + .description = "ext_full_act_record.no_tag", + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_EXT, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_NORMAL, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 44, + .cond_nums = 2 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_ALLOC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_MAIN_ACTION_PTR, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .track_type = CFA_TRACK_TYPE_SID, + .result_start_idx = 321, + .result_bit_size = 128, + .result_num_fields = 26, + .encap_num_fields = 11 + }, + { /* act_tid: 8, , table: ext_full_act_record.one_tag */ + .description = "ext_full_act_record.one_tag", + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_EXT, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_NORMAL, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 0, + .cond_false_goto = 0, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 46, + .cond_nums = 2 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_ALLOC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_MAIN_ACTION_PTR, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .track_type = CFA_TRACK_TYPE_SID, + .result_start_idx = 358, + .result_bit_size = 128, + .result_num_fields = 26, + .encap_num_fields = 11 + }, + { /* act_tid: 9, , table: control.0 */ + .description = "control.0", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1023, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 56, + .cond_nums = 1 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP + }, + { /* act_tid: 9, , table: int_flow_counter_tbl.0 */ + .description = "int_flow_counter_tbl.0", + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_ACT_STATS_64, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_INT_COUNT, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 57, + .cond_nums = 1 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_ALLOC_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_FLOW_CNTR_PTR_0, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .track_type = CFA_TRACK_TYPE_SID, + .result_start_idx = 395, + .result_bit_size = 64, + .result_num_fields = 1 + }, + { /* act_tid: 9, , table: act_modify_ipv4_src.0 */ + .description = "act_modify_ipv4_src.0", + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_ACT_MODIFY_IPV4, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_NORMAL, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 58, + .cond_nums = 1 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_ALLOC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_MODIFY_IPV4_SRC_PTR_0, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .track_type = CFA_TRACK_TYPE_SID, + .result_start_idx = 396, + .result_bit_size = 32, + .result_num_fields = 1 + }, + { /* act_tid: 9, , table: act_modify_ipv4_dst.0 */ + .description = "act_modify_ipv4_dst.0", + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_ACT_MODIFY_IPV4, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_NORMAL, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 59, + .cond_nums = 1 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_ALLOC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_MODIFY_IPV4_DST_PTR_0, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .track_type = CFA_TRACK_TYPE_SID, + .result_start_idx = 397, + .result_bit_size = 32, + .result_num_fields = 1 + }, + { /* act_tid: 9, , table: int_encap_mac_record.dummy */ + .description = "int_encap_mac_record.dummy", + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_ACT_ENCAP_16B, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_NORMAL, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 60, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_WR_GLB_REGFILE, + .tbl_operand = BNXT_ULP_GLB_RF_IDX_ENCAP_MAC_PTR, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP, + .track_type = CFA_TRACK_TYPE_SID, + .record_size = 16, + .result_start_idx = 398, + .result_bit_size = 0, + .result_num_fields = 0, + .encap_num_fields = 11 + }, + { /* act_tid: 9, , table: int_full_act_record.0 */ + .description = "int_full_act_record.0", + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_FULL_ACT_RECORD, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_NORMAL, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 60, + .cond_nums = 1 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_ALLOC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_MAIN_ACTION_PTR, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .track_type = CFA_TRACK_TYPE_SID, + .result_start_idx = 409, + .result_bit_size = 128, + .result_num_fields = 26 + }, + { /* act_tid: 9, , table: ext_full_act_record.0 */ + .description = "ext_full_act_record.0", + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_EXT, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_NORMAL, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 0, + .cond_false_goto = 0, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 61, + .cond_nums = 1 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_ALLOC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_MAIN_ACTION_PTR, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .track_type = CFA_TRACK_TYPE_SID, + .result_start_idx = 435, + .result_bit_size = 128, + .result_num_fields = 26, + .encap_num_fields = 11 + }, + { /* act_tid: 10, , table: int_flow_counter_tbl.0 */ + .description = "int_flow_counter_tbl.0", + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_ACT_STATS_64, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_INT_COUNT, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 65, + .cond_nums = 1 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_ALLOC_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_FLOW_CNTR_PTR_0, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .track_type = CFA_TRACK_TYPE_SID, + .result_start_idx = 472, + .result_bit_size = 64, + .result_num_fields = 1 + }, + { /* act_tid: 10, , table: sp_smac_ipv4.0 */ + .description = "sp_smac_ipv4.0", + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_ACT_SP_SMAC_IPV4, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_NORMAL, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 66, + .cond_nums = 1 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_ALLOC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_MAIN_SP_PTR, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .track_type = CFA_TRACK_TYPE_SID, + .record_size = 16, + .result_start_idx = 473, + .result_bit_size = 0, + .result_num_fields = 0, + .encap_num_fields = 2 + }, + { /* act_tid: 10, , table: sp_smac_ipv6.0 */ + .description = "sp_smac_ipv6.0", + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_ACT_SP_SMAC_IPV6, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_NORMAL, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 67, + .cond_nums = 1 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_ALLOC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_MAIN_SP_PTR, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .track_type = CFA_TRACK_TYPE_SID, + .record_size = 24, + .result_start_idx = 475, + .result_bit_size = 0, + .result_num_fields = 0, + .encap_num_fields = 2 + }, + { /* act_tid: 10, , table: int_tun_encap_record.0 */ + .description = "int_tun_encap_record.0", + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_ACT_ENCAP_64B, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_NORMAL, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 68, + .cond_nums = 1 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_ALLOC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_ENCAP_PTR_0, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .track_type = CFA_TRACK_TYPE_SID, + .record_size = 64, + .result_start_idx = 477, + .result_bit_size = 0, + .result_num_fields = 0, + .encap_num_fields = 30 + }, + { /* act_tid: 10, , table: int_full_act_record.0 */ + .description = "int_full_act_record.0", + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_FULL_ACT_RECORD, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_NORMAL, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 69, + .cond_nums = 1 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_ALLOC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_MAIN_ACTION_PTR, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .track_type = CFA_TRACK_TYPE_SID, + .result_start_idx = 507, + .result_bit_size = 128, + .result_num_fields = 26 + }, + { /* act_tid: 10, , table: ext_full_act_record_vxlan.0 */ + .description = "ext_full_act_record_vxlan.0", + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_EXT, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_NORMAL, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 0, + .cond_false_goto = 0, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 70, + .cond_nums = 1 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_ALLOC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_MAIN_ACTION_PTR, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .track_type = CFA_TRACK_TYPE_SID, + .result_start_idx = 533, + .result_bit_size = 128, + .result_num_fields = 26, + .encap_num_fields = 30 + }, + { /* act_tid: 11, , table: control.reject */ + .description = "control.reject", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1023, + .cond_false_goto = 1023, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 71, + .cond_nums = 0 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP + }, + { /* act_tid: 11, , table: int_flow_counter_tbl.0 */ + .description = "int_flow_counter_tbl.0", + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_ACT_STATS_64, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_INT_COUNT, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 71, + .cond_nums = 1 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_ALLOC_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_FLOW_CNTR_PTR_0, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .track_type = CFA_TRACK_TYPE_SID, + .result_start_idx = 589, + .result_bit_size = 64, + .result_num_fields = 1 + }, + { /* act_tid: 11, , table: int_full_act_record.0 */ + .description = "int_full_act_record.0", + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_FULL_ACT_RECORD, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_NORMAL, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 0, + .cond_false_goto = 0, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 72, + .cond_nums = 1 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_ALLOC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_MAIN_ACTION_PTR, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .track_type = CFA_TRACK_TYPE_SID, + .result_start_idx = 590, + .result_bit_size = 128, + .result_num_fields = 26 + } +}; + +struct bnxt_ulp_mapper_cond_list_info ulp_wh_plus_act_cond_oper_list[] = { + /* cond_reject: wh_plus, act_tid: 4 */ + { + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 18, + .cond_nums = 3 + }, + /* cond_reject: wh_plus, act_tid: 4 */ + { + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_OR, + .cond_start_idx = 21, + .cond_nums = 5 + }, + /* cond_reject: wh_plus, act_tid: 5 */ + { + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 32, + .cond_nums = 1 + }, + /* cond_reject: wh_plus, act_tid: 5 */ + { + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 33, + .cond_nums = 2 + }, + /* cond_reject: wh_plus, act_tid: 9 */ + { + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 48, + .cond_nums = 3 + }, + /* cond_reject: wh_plus, act_tid: 9 */ + { + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_OR, + .cond_start_idx = 51, + .cond_nums = 5 + } +}; + +struct bnxt_ulp_mapper_cond_info ulp_wh_plus_act_cond_list[] = { + /* cond_reject: wh_plus, act_tid: 2 */ + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_SET_IPV4_SRC + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_SET_IPV6_SRC + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_SET_TP_SRC + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_SET_IPV4_DST + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_SET_IPV6_DST + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_SET_TP_DST + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_PUSH_VLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_SET_VLAN_VID + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_SET_VLAN_PCP + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_SET_MAC_DST + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_SET_MAC_SRC + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_METER + }, + /* cond_execute: act_tid: 2, shared_mirror_record.rd:12*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_SHARED_SAMPLE + }, + /* cond_execute: act_tid: 2, int_flow_counter_tbl.0:13*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_COUNT + }, + /* cond_execute: act_tid: 2, int_vtag_encap_record.0:14*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_PUSH_VLAN + }, + /* cond_execute: act_tid: 2, int_full_act_record.0:15*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_EXT_MEM_NOT_SET, + }, + /* cond_execute: act_tid: 2, ext_full_act_record.0:16*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_EXT_MEM_IS_SET, + }, + /* cond_execute: act_tid: 3, int_flow_counter_tbl.0:17*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_COUNT + }, + /* cond_reject: wh_plus, act_tid: 4 */ + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_SET_IPV4_DST + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_NOT_SET, + .cond_operand = BNXT_ULP_ACT_BIT_SET_TP_SRC + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_SET_TP_DST + }, + /* cond_reject: wh_plus, act_tid: 4 */ + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_SET_MAC_DST + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_SET_MAC_SRC + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_SHARED_SAMPLE + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_SET_IPV6_SRC + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_SET_IPV6_DST + }, + /* cond_execute: act_tid: 4, control.0:26*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_RF_IS_SET, + .cond_operand = BNXT_ULP_RF_IDX_GENERIC_TBL_MISS + }, + /* cond_execute: act_tid: 4, int_flow_counter_tbl.0:27*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_COUNT + }, + /* cond_execute: act_tid: 4, act_modify_ipv4_src.0:28*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_SET_IPV4_SRC + }, + /* cond_execute: act_tid: 4, act_modify_ipv4_dst.0:29*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_SET_IPV4_DST + }, + /* cond_execute: act_tid: 4, int_full_act_record.0:30*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_EXT_MEM_NOT_SET, + }, + /* cond_execute: act_tid: 4, ext_full_act_record.0:31*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_EXT_MEM_IS_SET, + }, + /* cond_reject: wh_plus, act_tid: 5 */ + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_SHARED_SAMPLE + }, + /* cond_reject: wh_plus, act_tid: 5 */ + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_RSS + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_QUEUE + }, + /* cond_execute: act_tid: 5, int_flow_counter_tbl.0:35*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_COUNT + }, + /* cond_execute: act_tid: 5, vnic_interface_rss_config.0:36*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_RSS + }, + /* cond_execute: act_tid: 5, vnic_interface_queue_config.0:37*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_QUEUE + }, + /* cond_execute: act_tid: 5, int_full_act_record.0:38*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_QUEUE + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_RSS + }, + /* cond_execute: act_tid: 8, int_flow_counter_tbl.0:40*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_COUNT + }, + /* cond_execute: act_tid: 8, int_vtag_encap_record.0:41*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_EXT_MEM_NOT_SET, + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_PUSH_VLAN + }, + /* cond_execute: act_tid: 8, int_full_act_record.0:43*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_EXT_MEM_NOT_SET, + }, + /* cond_execute: act_tid: 8, ext_full_act_record.no_tag:44*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_EXT_MEM_IS_SET, + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_NOT_SET, + .cond_operand = BNXT_ULP_ACT_BIT_PUSH_VLAN + }, + /* cond_execute: act_tid: 8, ext_full_act_record.one_tag:46*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_EXT_MEM_IS_SET, + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_PUSH_VLAN + }, + /* cond_reject: wh_plus, act_tid: 9 */ + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_SET_IPV4_DST + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_NOT_SET, + .cond_operand = BNXT_ULP_ACT_BIT_SET_TP_SRC + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_SET_TP_DST + }, + /* cond_reject: wh_plus, act_tid: 9 */ + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_SET_MAC_DST + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_SET_MAC_SRC + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_SHARED_SAMPLE + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_SET_IPV6_SRC + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_SET_IPV6_DST + }, + /* cond_execute: act_tid: 9, control.0:56*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_RF_IS_SET, + .cond_operand = BNXT_ULP_RF_IDX_GENERIC_TBL_MISS + }, + /* cond_execute: act_tid: 9, int_flow_counter_tbl.0:57*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_COUNT + }, + /* cond_execute: act_tid: 9, act_modify_ipv4_src.0:58*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_SET_IPV4_SRC + }, + /* cond_execute: act_tid: 9, act_modify_ipv4_dst.0:59*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_SET_IPV4_DST + }, + /* cond_execute: act_tid: 9, int_full_act_record.0:60*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_EXT_MEM_NOT_SET, + }, + /* cond_execute: act_tid: 9, ext_full_act_record.0:61*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_EXT_MEM_IS_SET, + }, + /* cond_reject: wh_plus, act_tid: 10 */ + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_SET_MAC_DST + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_SET_MAC_SRC + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_SHARED_SAMPLE + }, + /* cond_execute: act_tid: 10, int_flow_counter_tbl.0:65*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_COUNT + }, + /* cond_execute: act_tid: 10, sp_smac_ipv4.0:66*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_IS_SET, + .cond_operand = BNXT_ULP_CF_IDX_ACT_ENCAP_IPV4_FLAG + }, + /* cond_execute: act_tid: 10, sp_smac_ipv6.0:67*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_IS_SET, + .cond_operand = BNXT_ULP_CF_IDX_ACT_ENCAP_IPV6_FLAG + }, + /* cond_execute: act_tid: 10, int_tun_encap_record.0:68*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_EXT_MEM_NOT_SET, + }, + /* cond_execute: act_tid: 10, int_full_act_record.0:69*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_EXT_MEM_NOT_SET, + }, + /* cond_execute: act_tid: 10, ext_full_act_record_vxlan.0:70*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_EXT_MEM_IS_SET, + }, + /* cond_execute: act_tid: 11, int_flow_counter_tbl.0:71*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_ACT_BIT_IS_SET, + .cond_operand = BNXT_ULP_ACT_BIT_COUNT + }, + /* cond_execute: act_tid: 11, int_full_act_record.0:72*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_EXT_MEM_NOT_SET, + } +}; + +struct bnxt_ulp_mapper_key_info ulp_wh_plus_act_key_info_list[] = { + /* act_tid: 2, , table: shared_mirror_record.rd */ + { + .field_info_mask = { + .description = "shared_index", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "shared_index", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr1 = { + (BNXT_ULP_ACT_PROP_IDX_SHARED_HANDLE >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_SHARED_HANDLE & 0xff} + } + }, + /* act_tid: 3, , table: shared_mirror_record.wr */ + { + .field_info_mask = { + .description = "shared_index", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "shared_index", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_MIRROR_PTR_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_MIRROR_PTR_0 & 0xff} + } + } +}; + +struct bnxt_ulp_mapper_field_info ulp_wh_plus_act_key_ext_list[] = { +}; + +struct bnxt_ulp_mapper_field_info ulp_wh_plus_act_result_field_list[] = { + /* act_tid: 2, , table: int_flow_counter_tbl.0 */ + { + .description = "count", + .field_bit_size = 64, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + /* act_tid: 2, , table: int_vtag_encap_record.0 */ + { + .description = "ecv_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + ULP_WP_SYM_ECV_VALID_YES} + }, + { + .description = "ecv_custom_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "ecv_vtag_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + ULP_WP_SYM_ECV_VTAG_TYPE_ADD_1_ENCAP_PRI} + }, + { + .description = "ecv_l2_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "ecv_l3_type", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "ecv_l4_type", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "ecv_tun_type", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "vtag_tpid", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr1 = { + (BNXT_ULP_ACT_PROP_IDX_PUSH_VLAN >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_PUSH_VLAN & 0xff} + }, + { + .description = "vtag_pcp", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr1 = { + (BNXT_ULP_ACT_PROP_IDX_SET_VLAN_PCP >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_SET_VLAN_PCP & 0xff} + }, + { + .description = "vtag_de", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "vtag_vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr1 = { + (BNXT_ULP_ACT_PROP_IDX_SET_VLAN_VID >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_SET_VLAN_VID & 0xff} + }, + /* act_tid: 2, , table: int_full_act_record.0 */ + { + .description = "flow_cntr_ptr", + .field_bit_size = 14, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_FLOW_CNTR_PTR_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_FLOW_CNTR_PTR_0 & 0xff} + }, + { + .description = "age_enable", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "agg_cntr_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "rate_cntr_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "flow_cntr_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_COUNT >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_COUNT >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_COUNT >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_COUNT >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_COUNT >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_COUNT >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_COUNT >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_COUNT & 0xff} + }, + { + .description = "tcpflags_key", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tcpflags_mir", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tcpflags_match", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "encap_ptr", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_ENCAP_PTR_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_ENCAP_PTR_0 & 0xff} + }, + { + .description = "dst_ip_ptr", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_MODIFY_IPV4_DST_PTR_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_MODIFY_IPV4_DST_PTR_0 & 0xff} + }, + { + .description = "tcp_dst_port", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr2 = { + (BNXT_ULP_ACT_PROP_IDX_SET_TP_DST >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_SET_TP_DST & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "src_ip_ptr", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_MODIFY_IPV4_SRC_PTR_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_MODIFY_IPV4_SRC_PTR_0 & 0xff} + }, + { + .description = "tcp_src_port", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr2 = { + (BNXT_ULP_ACT_PROP_IDX_SET_TP_SRC >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_SET_TP_SRC & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "meter_id", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_rdir", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_rdir", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_ttl_dec", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_ACT_DEC_TTL >> 8) & 0xff, + BNXT_ULP_CF_IDX_ACT_DEC_TTL & 0xff} + }, + { + .description = "tl3_ttl_dec", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_ACT_T_DEC_TTL >> 8) & 0xff, + BNXT_ULP_CF_IDX_ACT_T_DEC_TTL & 0xff} + }, + { + .description = "decap_func", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_VXLAN_DECAP >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_VXLAN_DECAP >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_VXLAN_DECAP >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_VXLAN_DECAP >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_VXLAN_DECAP >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_VXLAN_DECAP >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_VXLAN_DECAP >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_VXLAN_DECAP & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_WP_SYM_DECAP_FUNC_THRU_TUN}, + .field_src3 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr3 = { + ULP_WP_SYM_DECAP_FUNC_NONE} + }, + { + .description = "vnic_or_vport", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr1 = { + (BNXT_ULP_ACT_PROP_IDX_VNIC >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_VNIC & 0xff} + }, + { + .description = "pop_vlan", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_POP_VLAN >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_POP_VLAN >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_POP_VLAN >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_POP_VLAN >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_POP_VLAN >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_POP_VLAN >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_POP_VLAN >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_POP_VLAN & 0xff} + }, + { + .description = "meter", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "mirror", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_SHARED_SAMPLE >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SHARED_SAMPLE >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SHARED_SAMPLE >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SHARED_SAMPLE >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SHARED_SAMPLE >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SHARED_SAMPLE >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SHARED_SAMPLE >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_SHARED_SAMPLE & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_RF, + .field_opr2 = { + (BNXT_ULP_RF_IDX_MIRROR_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_MIRROR_ID_0 & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "drop", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_DROP >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_DROP >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_DROP >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_DROP >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_DROP >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_DROP >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_DROP >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_DROP & 0xff} + }, + { + .description = "hit", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "type", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + /* act_tid: 2, , table: ext_full_act_record.0 */ + { + .description = "flow_cntr_ptr", + .field_bit_size = 14, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_FLOW_CNTR_PTR_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_FLOW_CNTR_PTR_0 & 0xff} + }, + { + .description = "age_enable", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "agg_cntr_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "rate_cntr_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "flow_cntr_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_COUNT >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_COUNT >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_COUNT >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_COUNT >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_COUNT >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_COUNT >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_COUNT >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_COUNT & 0xff} + }, + { + .description = "flow_cntr_ext", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tcpflags_key", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tcpflags_mir", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tcpflags_match", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "encap_ptr", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "encap_rec_int", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "dst_ip_ptr", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_MODIFY_IPV4_DST_PTR_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_MODIFY_IPV4_DST_PTR_0 & 0xff} + }, + { + .description = "tcp_dst_port", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr2 = { + (BNXT_ULP_ACT_PROP_IDX_SET_TP_DST >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_SET_TP_DST & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "src_ip_ptr", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_MODIFY_IPV4_SRC_PTR_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_MODIFY_IPV4_SRC_PTR_0 & 0xff} + }, + { + .description = "tcp_src_port", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr2 = { + (BNXT_ULP_ACT_PROP_IDX_SET_TP_SRC >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_SET_TP_SRC & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "meter_id", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_rdir", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_rdir", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_ttl_dec", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_ACT_DEC_TTL >> 8) & 0xff, + BNXT_ULP_CF_IDX_ACT_DEC_TTL & 0xff} + }, + { + .description = "tl3_ttl_dec", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_ACT_T_DEC_TTL >> 8) & 0xff, + BNXT_ULP_CF_IDX_ACT_T_DEC_TTL & 0xff} + }, + { + .description = "decap_func", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_VXLAN_DECAP >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_VXLAN_DECAP >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_VXLAN_DECAP >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_VXLAN_DECAP >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_VXLAN_DECAP >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_VXLAN_DECAP >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_VXLAN_DECAP >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_VXLAN_DECAP & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_WP_SYM_DECAP_FUNC_THRU_TUN}, + .field_src3 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr3 = { + ULP_WP_SYM_DECAP_FUNC_NONE} + }, + { + .description = "vnic_or_vport", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr1 = { + (BNXT_ULP_ACT_PROP_IDX_VNIC >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_VNIC & 0xff} + }, + { + .description = "pop_vlan", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_POP_VLAN >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_POP_VLAN >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_POP_VLAN >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_POP_VLAN >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_POP_VLAN >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_POP_VLAN >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_POP_VLAN >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_POP_VLAN & 0xff} + }, + { + .description = "meter", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "mirror", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_MIRROR_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_MIRROR_ID_0 & 0xff} + }, + { + .description = "drop", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_DROP >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_DROP >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_DROP >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_DROP >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_DROP >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_DROP >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_DROP >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_DROP & 0xff} + }, + { + .description = "ecv_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + ULP_WP_SYM_ECV_VALID_YES} + }, + { + .description = "ecv_custom_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "ecv_vtag_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "ecv_l2_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "ecv_l3_type", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "ecv_l4_type", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "ecv_tun_type", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "vtag_tpid", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "vtag_pcp", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "vtag_de", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "vtag_vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + /* act_tid: 3, , table: mirror_tbl.alloc */ + { + .description = "act_rec_ptr", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "enable", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + { + .description = "copy", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "ign_drop", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "reserved", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "sp_ptr", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + /* act_tid: 3, , table: int_flow_counter_tbl.0 */ + { + .description = "count", + .field_bit_size = 64, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + /* act_tid: 3, , table: int_full_act_record.0 */ + { + .description = "flow_cntr_ptr", + .field_bit_size = 14, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_FLOW_CNTR_PTR_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_FLOW_CNTR_PTR_0 & 0xff} + }, + { + .description = "age_enable", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "agg_cntr_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "rate_cntr_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "flow_cntr_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_COUNT >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_COUNT >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_COUNT >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_COUNT >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_COUNT >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_COUNT >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_COUNT >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_COUNT & 0xff} + }, + { + .description = "tcpflags_key", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tcpflags_mir", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tcpflags_match", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "encap_ptr", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "dst_ip_ptr", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tcp_dst_port", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "src_ip_ptr", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tcp_src_port", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "meter_id", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_rdir", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_rdir", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_ttl_dec", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_ttl_dec", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "decap_func", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "vnic_or_vport", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr1 = { + (BNXT_ULP_ACT_PROP_IDX_VNIC >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_VNIC & 0xff} + }, + { + .description = "pop_vlan", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "meter", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "mirror", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_PLUS_SRC2, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1} + }, + { + .description = "drop", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "hit", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "type", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + /* act_tid: 3, , table: ext_full_act_record.0 */ + { + .description = "flow_cntr_ptr", + .field_bit_size = 14, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_FLOW_CNTR_PTR_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_FLOW_CNTR_PTR_0 & 0xff} + }, + { + .description = "age_enable", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "agg_cntr_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "rate_cntr_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "flow_cntr_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_COUNT >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_COUNT >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_COUNT >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_COUNT >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_COUNT >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_COUNT >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_COUNT >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_COUNT & 0xff} + }, + { + .description = "flow_cntr_ext", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tcpflags_key", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tcpflags_mir", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tcpflags_match", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "encap_ptr", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "encap_rec_int", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "dst_ip_ptr", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tcp_dst_port", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "src_ip_ptr", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tcp_src_port", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "meter_id", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_rdir", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_rdir", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_ttl_dec", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_ttl_dec", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "decap_func", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "vnic_or_vport", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr1 = { + (BNXT_ULP_ACT_PROP_IDX_VNIC >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_VNIC & 0xff} + }, + { + .description = "pop_vlan", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_POP_VLAN >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_POP_VLAN >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_POP_VLAN >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_POP_VLAN >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_POP_VLAN >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_POP_VLAN >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_POP_VLAN >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_POP_VLAN & 0xff} + }, + { + .description = "meter", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "mirror", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_PLUS_SRC2, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + 1} + }, + { + .description = "drop", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "ecv_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + ULP_WP_SYM_ECV_VALID_YES} + }, + { + .description = "ecv_custom_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "ecv_vtag_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "ecv_l2_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "ecv_l3_type", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "ecv_l4_type", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "ecv_tun_type", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "vtag_tpid", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "vtag_pcp", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "vtag_de", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "vtag_vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + /* act_tid: 3, , table: mirror_tbl.wr */ + { + .description = "act_rec_ptr", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_MAIN_ACTION_PTR >> 8) & 0xff, + BNXT_ULP_RF_IDX_MAIN_ACTION_PTR & 0xff} + }, + { + .description = "enable", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + { + .description = "copy", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "ign_drop", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "reserved", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "sp_ptr", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + /* act_tid: 3, , table: shared_mirror_record.wr */ + { + .description = "rid", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_RID >> 8) & 0xff, + BNXT_ULP_RF_IDX_RID & 0xff} + }, + { + .description = "mirror_id", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_PLUS_SRC2_POST, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + (1 >> 8) & 0xff, + 1 & 0xff} + }, + /* act_tid: 4, , table: int_flow_counter_tbl.0 */ + { + .description = "count", + .field_bit_size = 64, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + /* act_tid: 4, , table: act_modify_ipv4_src.0 */ + { + .description = "ipv4_addr", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr1 = { + (BNXT_ULP_ACT_PROP_IDX_SET_IPV4_SRC >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_SET_IPV4_SRC & 0xff} + }, + /* act_tid: 4, , table: act_modify_ipv4_dst.0 */ + { + .description = "ipv4_addr", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr1 = { + (BNXT_ULP_ACT_PROP_IDX_SET_IPV4_DST >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_SET_IPV4_DST & 0xff} + }, + /* act_tid: 4, , table: int_encap_mac_record.0 */ + { + .description = "ecv_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + { + .description = "ecv_custom_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "ecv_vtag_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "ecv_l2_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + ULP_WP_SYM_ECV_L2_EN_YES} + }, + { + .description = "ecv_l3_type", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "ecv_l4_type", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "ecv_tun_type", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "vtag_tpid", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "vtag_pcp", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "vtag_de", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "vtag_vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + /* act_tid: 4, , table: int_full_act_record.0 */ + { + .description = "flow_cntr_ptr", + .field_bit_size = 14, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_FLOW_CNTR_PTR_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_FLOW_CNTR_PTR_0 & 0xff} + }, + { + .description = "age_enable", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "agg_cntr_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "rate_cntr_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "flow_cntr_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_COUNT >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_COUNT >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_COUNT >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_COUNT >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_COUNT >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_COUNT >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_COUNT >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_COUNT & 0xff} + }, + { + .description = "tcpflags_key", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tcpflags_mir", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tcpflags_match", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "encap_ptr", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_GLB_RF, + .field_opr1 = { + (BNXT_ULP_GLB_RF_IDX_ENCAP_MAC_PTR >> 8) & 0xff, + BNXT_ULP_GLB_RF_IDX_ENCAP_MAC_PTR & 0xff} + }, + { + .description = "dst_ip_ptr", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_MODIFY_IPV4_DST_PTR_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_MODIFY_IPV4_DST_PTR_0 & 0xff} + }, + { + .description = "tcp_dst_port", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr2 = { + (BNXT_ULP_ACT_PROP_IDX_SET_TP_DST >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_SET_TP_DST & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "src_ip_ptr", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_MODIFY_IPV4_SRC_PTR_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_MODIFY_IPV4_SRC_PTR_0 & 0xff} + }, + { + .description = "tcp_src_port", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr2 = { + (BNXT_ULP_ACT_PROP_IDX_SET_TP_SRC >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_SET_TP_SRC & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "meter_id", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_rdir", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_rdir", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_ttl_dec", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_ACT_DEC_TTL >> 8) & 0xff, + BNXT_ULP_CF_IDX_ACT_DEC_TTL & 0xff} + }, + { + .description = "tl3_ttl_dec", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_ACT_T_DEC_TTL >> 8) & 0xff, + BNXT_ULP_CF_IDX_ACT_T_DEC_TTL & 0xff} + }, + { + .description = "decap_func", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_HDR_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_HDR_BIT_T_VXLAN >> 56) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_T_VXLAN >> 48) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_T_VXLAN >> 40) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_T_VXLAN >> 32) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_T_VXLAN >> 24) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_T_VXLAN >> 16) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_T_VXLAN >> 8) & 0xff, + (uint64_t)BNXT_ULP_HDR_BIT_T_VXLAN & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_WP_SYM_DECAP_FUNC_THRU_TL2}, + .field_src3 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr3 = { + ULP_WP_SYM_DECAP_FUNC_THRU_L2} + }, + { + .description = "vnic_or_vport", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr1 = { + (BNXT_ULP_ACT_PROP_IDX_VNIC >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_VNIC & 0xff} + }, + { + .description = "pop_vlan", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "meter", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "mirror", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "drop", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "hit", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "type", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + /* act_tid: 4, , table: ext_full_act_record.0 */ + { + .description = "flow_cntr_ptr", + .field_bit_size = 14, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_FLOW_CNTR_PTR_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_FLOW_CNTR_PTR_0 & 0xff} + }, + { + .description = "age_enable", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "agg_cntr_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "rate_cntr_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "flow_cntr_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_COUNT >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_COUNT >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_COUNT >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_COUNT >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_COUNT >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_COUNT >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_COUNT >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_COUNT & 0xff} + }, + { + .description = "flow_cntr_ext", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tcpflags_key", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tcpflags_mir", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tcpflags_match", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "encap_ptr", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_GLB_RF, + .field_opr1 = { + (BNXT_ULP_GLB_RF_IDX_ENCAP_MAC_PTR >> 8) & 0xff, + BNXT_ULP_GLB_RF_IDX_ENCAP_MAC_PTR & 0xff} + }, + { + .description = "encap_rec_int", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + { + .description = "dst_ip_ptr", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_MODIFY_IPV4_DST_PTR_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_MODIFY_IPV4_DST_PTR_0 & 0xff} + }, + { + .description = "tcp_dst_port", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr2 = { + (BNXT_ULP_ACT_PROP_IDX_SET_TP_DST >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_SET_TP_DST & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "src_ip_ptr", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_MODIFY_IPV4_SRC_PTR_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_MODIFY_IPV4_SRC_PTR_0 & 0xff} + }, + { + .description = "tcp_src_port", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr2 = { + (BNXT_ULP_ACT_PROP_IDX_SET_TP_SRC >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_SET_TP_SRC & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "meter_id", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_rdir", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_rdir", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_ttl_dec", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_ACT_DEC_TTL >> 8) & 0xff, + BNXT_ULP_CF_IDX_ACT_DEC_TTL & 0xff} + }, + { + .description = "tl3_ttl_dec", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_ACT_T_DEC_TTL >> 8) & 0xff, + BNXT_ULP_CF_IDX_ACT_T_DEC_TTL & 0xff} + }, + { + .description = "decap_func", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_HDR_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_HDR_BIT_T_VXLAN >> 56) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_T_VXLAN >> 48) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_T_VXLAN >> 40) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_T_VXLAN >> 32) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_T_VXLAN >> 24) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_T_VXLAN >> 16) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_T_VXLAN >> 8) & 0xff, + (uint64_t)BNXT_ULP_HDR_BIT_T_VXLAN & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_WP_SYM_DECAP_FUNC_THRU_TL2}, + .field_src3 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr3 = { + ULP_WP_SYM_DECAP_FUNC_THRU_L2} + }, + { + .description = "vnic_or_vport", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr1 = { + (BNXT_ULP_ACT_PROP_IDX_VNIC >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_VNIC & 0xff} + }, + { + .description = "pop_vlan", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "meter", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "mirror", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "drop", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "ecv_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + ULP_WP_SYM_ECV_VALID_YES} + }, + { + .description = "ecv_custom_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "ecv_vtag_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "ecv_l2_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "ecv_l3_type", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "ecv_l4_type", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "ecv_tun_type", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "vtag_tpid", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "vtag_pcp", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "vtag_de", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "vtag_vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + /* act_tid: 5, , table: int_flow_counter_tbl.0 */ + { + .description = "count", + .field_bit_size = 64, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + /* act_tid: 5, , table: vnic_interface_rss_config.0 */ + /* act_tid: 5, , table: vnic_interface_queue_config.0 */ + /* act_tid: 5, , table: int_full_act_record.0 */ + { + .description = "flow_cntr_ptr", + .field_bit_size = 14, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_FLOW_CNTR_PTR_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_FLOW_CNTR_PTR_0 & 0xff} + }, + { + .description = "age_enable", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "agg_cntr_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "rate_cntr_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "flow_cntr_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_COUNT >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_COUNT >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_COUNT >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_COUNT >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_COUNT >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_COUNT >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_COUNT >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_COUNT & 0xff} + }, + { + .description = "tcpflags_key", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tcpflags_mir", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tcpflags_match", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "encap_ptr", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "dst_ip_ptr", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tcp_dst_port", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "src_ip_ptr", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tcp_src_port", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "meter_id", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_rdir", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_rdir", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_ttl_dec", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_ttl_dec", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "decap_func", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "vnic_or_vport", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_RSS_VNIC >> 8) & 0xff, + BNXT_ULP_RF_IDX_RSS_VNIC & 0xff} + }, + { + .description = "pop_vlan", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "meter", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "mirror", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "drop", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "hit", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "type", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + /* act_tid: 5, , table: int_full_act_record.1 */ + { + .description = "flow_cntr_ptr", + .field_bit_size = 14, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_FLOW_CNTR_PTR_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_FLOW_CNTR_PTR_0 & 0xff} + }, + { + .description = "age_enable", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "agg_cntr_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "rate_cntr_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "flow_cntr_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_COUNT >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_COUNT >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_COUNT >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_COUNT >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_COUNT >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_COUNT >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_COUNT >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_COUNT & 0xff} + }, + { + .description = "tcpflags_key", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tcpflags_mir", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tcpflags_match", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "encap_ptr", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "dst_ip_ptr", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tcp_dst_port", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "src_ip_ptr", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tcp_src_port", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "meter_id", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_rdir", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_rdir", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_ttl_dec", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_ttl_dec", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "decap_func", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "vnic_or_vport", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr1 = { + (BNXT_ULP_ACT_PROP_IDX_VNIC >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_VNIC & 0xff} + }, + { + .description = "pop_vlan", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "meter", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "mirror", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "drop", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "hit", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "type", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + /* act_tid: 8, , table: int_flow_counter_tbl.0 */ + { + .description = "count", + .field_bit_size = 64, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + /* act_tid: 8, , table: int_vtag_encap_record.0 */ + { + .description = "ecv_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + { + .description = "ecv_custom_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "ecv_vtag_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + ULP_WP_SYM_ECV_VTAG_TYPE_ADD_1_ENCAP_PRI} + }, + { + .description = "ecv_l2_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "ecv_l3_type", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "ecv_l4_type", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "ecv_tun_type", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "vtag_tpid", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr1 = { + (BNXT_ULP_ACT_PROP_IDX_PUSH_VLAN >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_PUSH_VLAN & 0xff} + }, + { + .description = "vtag_pcp", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr1 = { + (BNXT_ULP_ACT_PROP_IDX_SET_VLAN_PCP >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_SET_VLAN_PCP & 0xff} + }, + { + .description = "vtag_de", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "vtag_vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr1 = { + (BNXT_ULP_ACT_PROP_IDX_SET_VLAN_VID >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_SET_VLAN_VID & 0xff} + }, + /* act_tid: 8, , table: int_full_act_record.0 */ + { + .description = "flow_cntr_ptr", + .field_bit_size = 14, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_FLOW_CNTR_PTR_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_FLOW_CNTR_PTR_0 & 0xff} + }, + { + .description = "age_enable", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "agg_cntr_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "rate_cntr_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "flow_cntr_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_COUNT >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_COUNT >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_COUNT >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_COUNT >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_COUNT >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_COUNT >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_COUNT >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_COUNT & 0xff} + }, + { + .description = "tcpflags_key", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tcpflags_mir", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tcpflags_match", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "encap_ptr", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_ENCAP_PTR_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_ENCAP_PTR_0 & 0xff} + }, + { + .description = "dst_ip_ptr", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tcp_dst_port", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "src_ip_ptr", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tcp_src_port", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "meter_id", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_rdir", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_rdir", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_ttl_dec", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_ACT_DEC_TTL >> 8) & 0xff, + BNXT_ULP_CF_IDX_ACT_DEC_TTL & 0xff} + }, + { + .description = "tl3_ttl_dec", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_ACT_T_DEC_TTL >> 8) & 0xff, + BNXT_ULP_CF_IDX_ACT_T_DEC_TTL & 0xff} + }, + { + .description = "decap_func", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "vnic_or_vport", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr1 = { + (BNXT_ULP_ACT_PROP_IDX_VPORT >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_VPORT & 0xff} + }, + { + .description = "pop_vlan", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "meter", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "mirror", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "drop", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_DROP >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_DROP >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_DROP >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_DROP >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_DROP >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_DROP >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_DROP >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_DROP & 0xff} + }, + { + .description = "hit", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "type", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + /* act_tid: 8, , table: ext_full_act_record.no_tag */ + { + .description = "flow_cntr_ptr", + .field_bit_size = 14, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_FLOW_CNTR_PTR_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_FLOW_CNTR_PTR_0 & 0xff} + }, + { + .description = "age_enable", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "agg_cntr_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "rate_cntr_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "flow_cntr_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_COUNT >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_COUNT >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_COUNT >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_COUNT >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_COUNT >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_COUNT >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_COUNT >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_COUNT & 0xff} + }, + { + .description = "flow_cntr_ext", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tcpflags_key", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tcpflags_mir", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tcpflags_match", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "encap_ptr", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "encap_rec_int", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "dst_ip_ptr", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tcp_dst_port", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "src_ip_ptr", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tcp_src_port", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "meter_id", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_rdir", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_rdir", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_ttl_dec", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_ACT_DEC_TTL >> 8) & 0xff, + BNXT_ULP_CF_IDX_ACT_DEC_TTL & 0xff} + }, + { + .description = "tl3_ttl_dec", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_ACT_T_DEC_TTL >> 8) & 0xff, + BNXT_ULP_CF_IDX_ACT_T_DEC_TTL & 0xff} + }, + { + .description = "decap_func", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "vnic_or_vport", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr1 = { + (BNXT_ULP_ACT_PROP_IDX_VPORT >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_VPORT & 0xff} + }, + { + .description = "pop_vlan", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "meter", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "mirror", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "drop", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_DROP >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_DROP >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_DROP >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_DROP >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_DROP >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_DROP >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_DROP >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_DROP & 0xff} + }, + { + .description = "ecv_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + ULP_WP_SYM_ECV_VALID_YES} + }, + { + .description = "ecv_custom_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "ecv_vtag_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "ecv_l2_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "ecv_l3_type", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "ecv_l4_type", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "ecv_tun_type", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "vtag_tpid", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "vtag_pcp", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "vtag_de", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "vtag_vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + /* act_tid: 8, , table: ext_full_act_record.one_tag */ + { + .description = "flow_cntr_ptr", + .field_bit_size = 14, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_FLOW_CNTR_PTR_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_FLOW_CNTR_PTR_0 & 0xff} + }, + { + .description = "age_enable", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "agg_cntr_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "rate_cntr_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "flow_cntr_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_COUNT >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_COUNT >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_COUNT >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_COUNT >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_COUNT >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_COUNT >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_COUNT >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_COUNT & 0xff} + }, + { + .description = "flow_cntr_ext", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tcpflags_key", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tcpflags_mir", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tcpflags_match", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "encap_ptr", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "encap_rec_int", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "dst_ip_ptr", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tcp_dst_port", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "src_ip_ptr", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tcp_src_port", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "meter_id", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_rdir", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_rdir", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_ttl_dec", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_ACT_DEC_TTL >> 8) & 0xff, + BNXT_ULP_CF_IDX_ACT_DEC_TTL & 0xff} + }, + { + .description = "tl3_ttl_dec", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_ACT_T_DEC_TTL >> 8) & 0xff, + BNXT_ULP_CF_IDX_ACT_T_DEC_TTL & 0xff} + }, + { + .description = "decap_func", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "vnic_or_vport", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr1 = { + (BNXT_ULP_ACT_PROP_IDX_VPORT >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_VPORT & 0xff} + }, + { + .description = "pop_vlan", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_POP_VLAN >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_POP_VLAN >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_POP_VLAN >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_POP_VLAN >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_POP_VLAN >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_POP_VLAN >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_POP_VLAN >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_POP_VLAN & 0xff} + }, + { + .description = "meter", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "mirror", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "drop", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_DROP >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_DROP >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_DROP >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_DROP >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_DROP >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_DROP >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_DROP >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_DROP & 0xff} + }, + { + .description = "ecv_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + { + .description = "ecv_custom_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "ecv_vtag_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + ULP_WP_SYM_ECV_VTAG_TYPE_ADD_1_ENCAP_PRI} + }, + { + .description = "ecv_l2_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "ecv_l3_type", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "ecv_l4_type", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "ecv_tun_type", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "vtag_tpid", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr1 = { + (BNXT_ULP_ACT_PROP_IDX_PUSH_VLAN >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_PUSH_VLAN & 0xff} + }, + { + .description = "vtag_pcp", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr1 = { + (BNXT_ULP_ACT_PROP_IDX_SET_VLAN_PCP >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_SET_VLAN_PCP & 0xff} + }, + { + .description = "vtag_de", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "vtag_vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr1 = { + (BNXT_ULP_ACT_PROP_IDX_SET_VLAN_VID >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_SET_VLAN_VID & 0xff} + }, + /* act_tid: 9, , table: int_flow_counter_tbl.0 */ + { + .description = "count", + .field_bit_size = 64, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + /* act_tid: 9, , table: act_modify_ipv4_src.0 */ + { + .description = "ipv4_addr", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr1 = { + (BNXT_ULP_ACT_PROP_IDX_SET_IPV4_SRC >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_SET_IPV4_SRC & 0xff} + }, + /* act_tid: 9, , table: act_modify_ipv4_dst.0 */ + { + .description = "ipv4_addr", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr1 = { + (BNXT_ULP_ACT_PROP_IDX_SET_IPV4_DST >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_SET_IPV4_DST & 0xff} + }, + /* act_tid: 9, , table: int_encap_mac_record.dummy */ + { + .description = "ecv_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + { + .description = "ecv_custom_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "ecv_vtag_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "ecv_l2_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + ULP_WP_SYM_ECV_L2_EN_YES} + }, + { + .description = "ecv_l3_type", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "ecv_l4_type", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "ecv_tun_type", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "vtag_tpid", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "vtag_pcp", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "vtag_de", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "vtag_vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + /* act_tid: 9, , table: int_full_act_record.0 */ + { + .description = "flow_cntr_ptr", + .field_bit_size = 14, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_FLOW_CNTR_PTR_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_FLOW_CNTR_PTR_0 & 0xff} + }, + { + .description = "age_enable", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "agg_cntr_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "rate_cntr_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "flow_cntr_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_COUNT >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_COUNT >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_COUNT >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_COUNT >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_COUNT >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_COUNT >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_COUNT >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_COUNT & 0xff} + }, + { + .description = "tcpflags_key", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tcpflags_mir", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tcpflags_match", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "encap_ptr", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_GLB_RF, + .field_opr1 = { + (BNXT_ULP_GLB_RF_IDX_ENCAP_MAC_PTR >> 8) & 0xff, + BNXT_ULP_GLB_RF_IDX_ENCAP_MAC_PTR & 0xff} + }, + { + .description = "dst_ip_ptr", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_MODIFY_IPV4_DST_PTR_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_MODIFY_IPV4_DST_PTR_0 & 0xff} + }, + { + .description = "tcp_dst_port", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr2 = { + (BNXT_ULP_ACT_PROP_IDX_SET_TP_DST >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_SET_TP_DST & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "src_ip_ptr", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_MODIFY_IPV4_SRC_PTR_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_MODIFY_IPV4_SRC_PTR_0 & 0xff} + }, + { + .description = "tcp_src_port", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr2 = { + (BNXT_ULP_ACT_PROP_IDX_SET_TP_SRC >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_SET_TP_SRC & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "meter_id", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_rdir", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_rdir", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_ttl_dec", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_ACT_DEC_TTL >> 8) & 0xff, + BNXT_ULP_CF_IDX_ACT_DEC_TTL & 0xff} + }, + { + .description = "tl3_ttl_dec", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_ACT_T_DEC_TTL >> 8) & 0xff, + BNXT_ULP_CF_IDX_ACT_T_DEC_TTL & 0xff} + }, + { + .description = "decap_func", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_HDR_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_HDR_BIT_T_VXLAN >> 56) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_T_VXLAN >> 48) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_T_VXLAN >> 40) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_T_VXLAN >> 32) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_T_VXLAN >> 24) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_T_VXLAN >> 16) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_T_VXLAN >> 8) & 0xff, + (uint64_t)BNXT_ULP_HDR_BIT_T_VXLAN & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_WP_SYM_DECAP_FUNC_THRU_TL2}, + .field_src3 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr3 = { + ULP_WP_SYM_DECAP_FUNC_THRU_L2} + }, + { + .description = "vnic_or_vport", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr1 = { + (BNXT_ULP_ACT_PROP_IDX_VPORT >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_VPORT & 0xff} + }, + { + .description = "pop_vlan", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "meter", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "mirror", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "drop", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "hit", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "type", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + /* act_tid: 9, , table: ext_full_act_record.0 */ + { + .description = "flow_cntr_ptr", + .field_bit_size = 14, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_FLOW_CNTR_PTR_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_FLOW_CNTR_PTR_0 & 0xff} + }, + { + .description = "age_enable", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "agg_cntr_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "rate_cntr_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "flow_cntr_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_COUNT >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_COUNT >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_COUNT >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_COUNT >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_COUNT >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_COUNT >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_COUNT >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_COUNT & 0xff} + }, + { + .description = "flow_cntr_ext", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tcpflags_key", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tcpflags_mir", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tcpflags_match", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "encap_ptr", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_GLB_RF, + .field_opr1 = { + (BNXT_ULP_GLB_RF_IDX_ENCAP_MAC_PTR >> 8) & 0xff, + BNXT_ULP_GLB_RF_IDX_ENCAP_MAC_PTR & 0xff} + }, + { + .description = "encap_rec_int", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + { + .description = "dst_ip_ptr", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_MODIFY_IPV4_DST_PTR_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_MODIFY_IPV4_DST_PTR_0 & 0xff} + }, + { + .description = "tcp_dst_port", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_SET_TP_DST & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr2 = { + (BNXT_ULP_ACT_PROP_IDX_SET_TP_DST >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_SET_TP_DST & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "src_ip_ptr", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_MODIFY_IPV4_SRC_PTR_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_MODIFY_IPV4_SRC_PTR_0 & 0xff} + }, + { + .description = "tcp_src_port", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_SET_TP_SRC & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr2 = { + (BNXT_ULP_ACT_PROP_IDX_SET_TP_SRC >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_SET_TP_SRC & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "meter_id", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_rdir", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_rdir", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_ttl_dec", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_ACT_DEC_TTL >> 8) & 0xff, + BNXT_ULP_CF_IDX_ACT_DEC_TTL & 0xff} + }, + { + .description = "tl3_ttl_dec", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_ACT_T_DEC_TTL >> 8) & 0xff, + BNXT_ULP_CF_IDX_ACT_T_DEC_TTL & 0xff} + }, + { + .description = "decap_func", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_HDR_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_HDR_BIT_T_VXLAN >> 56) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_T_VXLAN >> 48) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_T_VXLAN >> 40) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_T_VXLAN >> 32) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_T_VXLAN >> 24) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_T_VXLAN >> 16) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_T_VXLAN >> 8) & 0xff, + (uint64_t)BNXT_ULP_HDR_BIT_T_VXLAN & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_WP_SYM_DECAP_FUNC_THRU_TL2}, + .field_src3 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr3 = { + ULP_WP_SYM_DECAP_FUNC_THRU_L2} + }, + { + .description = "vnic_or_vport", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr1 = { + (BNXT_ULP_ACT_PROP_IDX_VPORT >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_VPORT & 0xff} + }, + { + .description = "pop_vlan", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "meter", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "mirror", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "drop", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "ecv_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + ULP_WP_SYM_ECV_VALID_YES} + }, + { + .description = "ecv_custom_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "ecv_vtag_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "ecv_l2_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "ecv_l3_type", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "ecv_l4_type", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "ecv_tun_type", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "vtag_tpid", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "vtag_pcp", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "vtag_de", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "vtag_vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + /* act_tid: 10, , table: int_flow_counter_tbl.0 */ + { + .description = "count", + .field_bit_size = 64, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + /* act_tid: 10, , table: sp_smac_ipv4.0 */ + { + .description = "smac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_ETH_SMAC >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_ETH_SMAC & 0xff} + }, + { + .description = "ipv4_src_addr", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_IPV4_SADDR >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_IPV4_SADDR & 0xff} + }, + /* act_tid: 10, , table: sp_smac_ipv6.0 */ + { + .description = "smac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_ETH_SMAC >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_ETH_SMAC & 0xff} + }, + { + .description = "ipv6_src_addr", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_IPV6_SADDR >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_IPV6_SADDR & 0xff} + }, + /* act_tid: 10, , table: int_tun_encap_record.0 */ + { + .description = "ecv_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + ULP_WP_SYM_ECV_VALID_YES} + }, + { + .description = "ecv_custom_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "ecv_vtag_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr1 = { + (BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_TYPE >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_TYPE & 0xff} + }, + { + .description = "ecv_l2_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + ULP_WP_SYM_ECV_L2_EN_YES} + }, + { + .description = "ecv_l3_type", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr1 = { + (BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE & 0xff} + }, + { + .description = "ecv_l4_type", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + ULP_WP_SYM_ECV_L4_TYPE_UDP_CSUM} + }, + { + .description = "ecv_tun_type", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + ULP_WP_SYM_ECV_TUN_TYPE_VXLAN} + }, + { + .description = "enc_eth_dmac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_ETH_DMAC >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_ETH_DMAC & 0xff} + }, + { + .description = "enc_o_vlan_tag", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_HDR_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 56) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 48) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 40) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 32) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 24) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 16) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 8) & 0xff, + (uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr2 = { + (BNXT_ULP_ENC_FIELD_O_VLAN_TCI >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_O_VLAN_TCI & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "enc_o_vlan_type", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_HDR_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 56) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 48) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 40) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 32) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 24) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 16) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 8) & 0xff, + (uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr2 = { + (BNXT_ULP_ENC_FIELD_O_VLAN_TYPE >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_O_VLAN_TYPE & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "enc_i_vlan_tag", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_HDR_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN >> 56) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN >> 48) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN >> 40) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN >> 32) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN >> 24) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN >> 16) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN >> 8) & 0xff, + (uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr2 = { + (BNXT_ULP_ENC_FIELD_I_VLAN_TCI >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_I_VLAN_TCI & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "enc_i_vlan_type", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_HDR_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN >> 56) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN >> 48) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN >> 40) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN >> 32) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN >> 24) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN >> 16) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN >> 8) & 0xff, + (uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr2 = { + (BNXT_ULP_ENC_FIELD_I_VLAN_TYPE >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_I_VLAN_TYPE & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "enc_ipv4_ihl", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_HDR_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV4 >> 56) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV4 >> 48) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV4 >> 40) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV4 >> 32) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV4 >> 24) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV4 >> 16) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV4 >> 8) & 0xff, + (uint64_t)BNXT_ULP_HDR_BIT_O_IPV4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr2 = { + (BNXT_ULP_ENC_FIELD_IPV4_IHL >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_IPV4_IHL & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "enc_ipv4_tos", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_HDR_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV4 >> 56) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV4 >> 48) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV4 >> 40) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV4 >> 32) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV4 >> 24) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV4 >> 16) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV4 >> 8) & 0xff, + (uint64_t)BNXT_ULP_HDR_BIT_O_IPV4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr2 = { + (BNXT_ULP_ENC_FIELD_IPV4_TOS >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_IPV4_TOS & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "enc_ipv4_pkt_id", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_HDR_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV4 >> 56) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV4 >> 48) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV4 >> 40) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV4 >> 32) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV4 >> 24) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV4 >> 16) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV4 >> 8) & 0xff, + (uint64_t)BNXT_ULP_HDR_BIT_O_IPV4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr2 = { + (BNXT_ULP_ENC_FIELD_IPV4_PKT_ID >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_IPV4_PKT_ID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "enc_ipv4_frag", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_HDR_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV4 >> 56) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV4 >> 48) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV4 >> 40) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV4 >> 32) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV4 >> 24) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV4 >> 16) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV4 >> 8) & 0xff, + (uint64_t)BNXT_ULP_HDR_BIT_O_IPV4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr2 = { + (BNXT_ULP_ENC_FIELD_IPV4_FRAG >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_IPV4_FRAG & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "enc_ipv4_ttl", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_HDR_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV4 >> 56) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV4 >> 48) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV4 >> 40) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV4 >> 32) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV4 >> 24) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV4 >> 16) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV4 >> 8) & 0xff, + (uint64_t)BNXT_ULP_HDR_BIT_O_IPV4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr2 = { + (BNXT_ULP_ENC_FIELD_IPV4_TTL >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_IPV4_TTL & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "enc_ipv4_proto", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_HDR_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV4 >> 56) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV4 >> 48) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV4 >> 40) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV4 >> 32) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV4 >> 24) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV4 >> 16) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV4 >> 8) & 0xff, + (uint64_t)BNXT_ULP_HDR_BIT_O_IPV4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr2 = { + (BNXT_ULP_ENC_FIELD_IPV4_PROTO >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_IPV4_PROTO & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "enc_ipv4_daddr", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_HDR_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV4 >> 56) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV4 >> 48) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV4 >> 40) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV4 >> 32) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV4 >> 24) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV4 >> 16) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV4 >> 8) & 0xff, + (uint64_t)BNXT_ULP_HDR_BIT_O_IPV4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr2 = { + (BNXT_ULP_ENC_FIELD_IPV4_DADDR >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_IPV4_DADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "enc_ipv6_vtc", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_HDR_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV6 >> 56) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV6 >> 48) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV6 >> 40) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV6 >> 32) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV6 >> 24) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV6 >> 16) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV6 >> 8) & 0xff, + (uint64_t)BNXT_ULP_HDR_BIT_O_IPV6 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr2 = { + (BNXT_ULP_ENC_FIELD_IPV6_VTC_FLOW >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_IPV6_VTC_FLOW & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "enc_ipv6_zero", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_HDR_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV6 >> 56) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV6 >> 48) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV6 >> 40) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV6 >> 32) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV6 >> 24) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV6 >> 16) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV6 >> 8) & 0xff, + (uint64_t)BNXT_ULP_HDR_BIT_O_IPV6 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ZERO, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "enc_ipv6_proto", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_HDR_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV6 >> 56) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV6 >> 48) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV6 >> 40) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV6 >> 32) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV6 >> 24) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV6 >> 16) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV6 >> 8) & 0xff, + (uint64_t)BNXT_ULP_HDR_BIT_O_IPV6 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr2 = { + (BNXT_ULP_ENC_FIELD_IPV6_PROTO >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_IPV6_PROTO & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "enc_ipv6_ttl", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_HDR_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV6 >> 56) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV6 >> 48) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV6 >> 40) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV6 >> 32) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV6 >> 24) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV6 >> 16) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV6 >> 8) & 0xff, + (uint64_t)BNXT_ULP_HDR_BIT_O_IPV6 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr2 = { + (BNXT_ULP_ENC_FIELD_IPV6_TTL >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_IPV6_TTL & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "enc_ipv6_daddr", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_HDR_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV6 >> 56) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV6 >> 48) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV6 >> 40) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV6 >> 32) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV6 >> 24) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV6 >> 16) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV6 >> 8) & 0xff, + (uint64_t)BNXT_ULP_HDR_BIT_O_IPV6 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr2 = { + (BNXT_ULP_ENC_FIELD_IPV6_DADDR >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_IPV6_DADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "enc_udp_sport", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_HDR_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_HDR_BIT_O_UDP >> 56) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_UDP >> 48) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_UDP >> 40) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_UDP >> 32) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_UDP >> 24) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_UDP >> 16) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_UDP >> 8) & 0xff, + (uint64_t)BNXT_ULP_HDR_BIT_O_UDP & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr2 = { + (BNXT_ULP_ENC_FIELD_UDP_SPORT >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_UDP_SPORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "enc_udp_dport", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_HDR_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_HDR_BIT_O_UDP >> 56) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_UDP >> 48) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_UDP >> 40) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_UDP >> 32) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_UDP >> 24) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_UDP >> 16) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_UDP >> 8) & 0xff, + (uint64_t)BNXT_ULP_HDR_BIT_O_UDP & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr2 = { + (BNXT_ULP_ENC_FIELD_UDP_DPORT >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_UDP_DPORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "enc_vxlan_flags", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_HDR_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_HDR_BIT_T_VXLAN >> 56) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_T_VXLAN >> 48) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_T_VXLAN >> 40) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_T_VXLAN >> 32) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_T_VXLAN >> 24) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_T_VXLAN >> 16) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_T_VXLAN >> 8) & 0xff, + (uint64_t)BNXT_ULP_HDR_BIT_T_VXLAN & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr2 = { + (BNXT_ULP_ENC_FIELD_VXLAN_FLAGS >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_VXLAN_FLAGS & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "enc_vxlan_rsvd0", + .field_bit_size = 24, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_HDR_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_HDR_BIT_T_VXLAN >> 56) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_T_VXLAN >> 48) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_T_VXLAN >> 40) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_T_VXLAN >> 32) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_T_VXLAN >> 24) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_T_VXLAN >> 16) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_T_VXLAN >> 8) & 0xff, + (uint64_t)BNXT_ULP_HDR_BIT_T_VXLAN & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr2 = { + (BNXT_ULP_ENC_FIELD_VXLAN_RSVD0 >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_VXLAN_RSVD0 & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "enc_vxlan_vni", + .field_bit_size = 24, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_HDR_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_HDR_BIT_T_VXLAN >> 56) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_T_VXLAN >> 48) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_T_VXLAN >> 40) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_T_VXLAN >> 32) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_T_VXLAN >> 24) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_T_VXLAN >> 16) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_T_VXLAN >> 8) & 0xff, + (uint64_t)BNXT_ULP_HDR_BIT_T_VXLAN & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr2 = { + (BNXT_ULP_ENC_FIELD_VXLAN_VNI >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_VXLAN_VNI & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "enc_vxlan_rsvd1", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_HDR_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_HDR_BIT_T_VXLAN >> 56) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_T_VXLAN >> 48) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_T_VXLAN >> 40) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_T_VXLAN >> 32) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_T_VXLAN >> 24) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_T_VXLAN >> 16) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_T_VXLAN >> 8) & 0xff, + (uint64_t)BNXT_ULP_HDR_BIT_T_VXLAN & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr2 = { + (BNXT_ULP_ENC_FIELD_VXLAN_RSVD1 >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_VXLAN_RSVD1 & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + /* act_tid: 10, , table: int_full_act_record.0 */ + { + .description = "flow_cntr_ptr", + .field_bit_size = 14, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_FLOW_CNTR_PTR_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_FLOW_CNTR_PTR_0 & 0xff} + }, + { + .description = "age_enable", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "agg_cntr_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "rate_cntr_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "flow_cntr_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_COUNT >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_COUNT >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_COUNT >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_COUNT >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_COUNT >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_COUNT >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_COUNT >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_COUNT & 0xff} + }, + { + .description = "tcpflags_key", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tcpflags_mir", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tcpflags_match", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "encap_ptr", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_ENCAP_PTR_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_ENCAP_PTR_0 & 0xff} + }, + { + .description = "dst_ip_ptr", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tcp_dst_port", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "src_ip_ptr", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tcp_src_port", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "meter_id", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_rdir", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_rdir", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_ttl_dec", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_ttl_dec", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "decap_func", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "vnic_or_vport", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr1 = { + (BNXT_ULP_ACT_PROP_IDX_VPORT >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_VPORT & 0xff} + }, + { + .description = "pop_vlan", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "meter", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "mirror", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "drop", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "hit", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "type", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + /* act_tid: 10, , table: ext_full_act_record_vxlan.0 */ + { + .description = "flow_cntr_ptr", + .field_bit_size = 14, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_FLOW_CNTR_PTR_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_FLOW_CNTR_PTR_0 & 0xff} + }, + { + .description = "age_enable", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "agg_cntr_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "rate_cntr_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "flow_cntr_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_COUNT >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_COUNT >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_COUNT >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_COUNT >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_COUNT >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_COUNT >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_COUNT >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_COUNT & 0xff} + }, + { + .description = "flow_cntr_ext", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tcpflags_key", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tcpflags_mir", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tcpflags_match", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "encap_ptr", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "encap_rec_int", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "dst_ip_ptr", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tcp_dst_port", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "src_ip_ptr", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tcp_src_port", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "meter_id", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_rdir", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_rdir", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_ttl_dec", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_ttl_dec", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "decap_func", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "vnic_or_vport", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr1 = { + (BNXT_ULP_ACT_PROP_IDX_VPORT >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_VPORT & 0xff} + }, + { + .description = "pop_vlan", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "meter", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "mirror", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "drop", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "ecv_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + ULP_WP_SYM_ECV_VALID_YES} + }, + { + .description = "ecv_custom_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "ecv_vtag_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr1 = { + (BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_TYPE >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_TYPE & 0xff} + }, + { + .description = "ecv_l2_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + ULP_WP_SYM_ECV_L2_EN_YES} + }, + { + .description = "ecv_l3_type", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr1 = { + (BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE & 0xff} + }, + { + .description = "ecv_l4_type", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + ULP_WP_SYM_ECV_L4_TYPE_UDP_CSUM} + }, + { + .description = "ecv_tun_type", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + ULP_WP_SYM_ECV_TUN_TYPE_VXLAN} + }, + { + .description = "enc_eth_dmac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr1 = { + (BNXT_ULP_ENC_FIELD_ETH_DMAC >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_ETH_DMAC & 0xff} + }, + { + .description = "enc_o_vlan_tag", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_HDR_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 56) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 48) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 40) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 32) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 24) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 16) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 8) & 0xff, + (uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr2 = { + (BNXT_ULP_ENC_FIELD_O_VLAN_TCI >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_O_VLAN_TCI & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "enc_o_vlan_type", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_HDR_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 56) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 48) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 40) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 32) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 24) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 16) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 8) & 0xff, + (uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr2 = { + (BNXT_ULP_ENC_FIELD_O_VLAN_TYPE >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_O_VLAN_TYPE & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "enc_i_vlan_tag", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_HDR_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN >> 56) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN >> 48) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN >> 40) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN >> 32) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN >> 24) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN >> 16) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN >> 8) & 0xff, + (uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr2 = { + (BNXT_ULP_ENC_FIELD_I_VLAN_TCI >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_I_VLAN_TCI & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "enc_i_vlan_type", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_HDR_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN >> 56) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN >> 48) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN >> 40) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN >> 32) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN >> 24) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN >> 16) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN >> 8) & 0xff, + (uint64_t)BNXT_ULP_HDR_BIT_OI_VLAN & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr2 = { + (BNXT_ULP_ENC_FIELD_I_VLAN_TYPE >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_I_VLAN_TYPE & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "enc_ipv4_ihl", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_HDR_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV4 >> 56) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV4 >> 48) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV4 >> 40) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV4 >> 32) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV4 >> 24) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV4 >> 16) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV4 >> 8) & 0xff, + (uint64_t)BNXT_ULP_HDR_BIT_O_IPV4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr2 = { + (BNXT_ULP_ENC_FIELD_IPV4_IHL >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_IPV4_IHL & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "enc_ipv4_tos", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_HDR_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV4 >> 56) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV4 >> 48) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV4 >> 40) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV4 >> 32) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV4 >> 24) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV4 >> 16) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV4 >> 8) & 0xff, + (uint64_t)BNXT_ULP_HDR_BIT_O_IPV4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr2 = { + (BNXT_ULP_ENC_FIELD_IPV4_TOS >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_IPV4_TOS & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "enc_ipv4_pkt_id", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_HDR_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV4 >> 56) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV4 >> 48) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV4 >> 40) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV4 >> 32) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV4 >> 24) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV4 >> 16) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV4 >> 8) & 0xff, + (uint64_t)BNXT_ULP_HDR_BIT_O_IPV4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr2 = { + (BNXT_ULP_ENC_FIELD_IPV4_PKT_ID >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_IPV4_PKT_ID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "enc_ipv4_frag", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_HDR_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV4 >> 56) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV4 >> 48) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV4 >> 40) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV4 >> 32) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV4 >> 24) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV4 >> 16) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV4 >> 8) & 0xff, + (uint64_t)BNXT_ULP_HDR_BIT_O_IPV4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr2 = { + (BNXT_ULP_ENC_FIELD_IPV4_FRAG >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_IPV4_FRAG & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "enc_ipv4_ttl", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_HDR_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV4 >> 56) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV4 >> 48) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV4 >> 40) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV4 >> 32) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV4 >> 24) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV4 >> 16) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV4 >> 8) & 0xff, + (uint64_t)BNXT_ULP_HDR_BIT_O_IPV4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr2 = { + (BNXT_ULP_ENC_FIELD_IPV4_TTL >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_IPV4_TTL & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "enc_ipv4_proto", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_HDR_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV4 >> 56) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV4 >> 48) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV4 >> 40) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV4 >> 32) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV4 >> 24) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV4 >> 16) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV4 >> 8) & 0xff, + (uint64_t)BNXT_ULP_HDR_BIT_O_IPV4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr2 = { + (BNXT_ULP_ENC_FIELD_IPV4_PROTO >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_IPV4_PROTO & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "enc_ipv4_daddr", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_HDR_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV4 >> 56) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV4 >> 48) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV4 >> 40) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV4 >> 32) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV4 >> 24) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV4 >> 16) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV4 >> 8) & 0xff, + (uint64_t)BNXT_ULP_HDR_BIT_O_IPV4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr2 = { + (BNXT_ULP_ENC_FIELD_IPV4_DADDR >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_IPV4_DADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "enc_ipv6_vtc", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_HDR_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV6 >> 56) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV6 >> 48) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV6 >> 40) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV6 >> 32) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV6 >> 24) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV6 >> 16) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV6 >> 8) & 0xff, + (uint64_t)BNXT_ULP_HDR_BIT_O_IPV6 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr2 = { + (BNXT_ULP_ENC_FIELD_IPV6_VTC_FLOW >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_IPV6_VTC_FLOW & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "enc_ipv6_zero", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_HDR_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV6 >> 56) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV6 >> 48) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV6 >> 40) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV6 >> 32) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV6 >> 24) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV6 >> 16) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV6 >> 8) & 0xff, + (uint64_t)BNXT_ULP_HDR_BIT_O_IPV6 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ZERO, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "enc_ipv6_proto", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_HDR_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV6 >> 56) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV6 >> 48) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV6 >> 40) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV6 >> 32) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV6 >> 24) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV6 >> 16) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV6 >> 8) & 0xff, + (uint64_t)BNXT_ULP_HDR_BIT_O_IPV6 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr2 = { + (BNXT_ULP_ENC_FIELD_IPV6_PROTO >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_IPV6_PROTO & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "enc_ipv6_ttl", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_HDR_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV6 >> 56) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV6 >> 48) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV6 >> 40) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV6 >> 32) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV6 >> 24) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV6 >> 16) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV6 >> 8) & 0xff, + (uint64_t)BNXT_ULP_HDR_BIT_O_IPV6 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr2 = { + (BNXT_ULP_ENC_FIELD_IPV6_TTL >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_IPV6_TTL & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "enc_ipv6_daddr", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_HDR_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV6 >> 56) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV6 >> 48) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV6 >> 40) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV6 >> 32) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV6 >> 24) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV6 >> 16) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_IPV6 >> 8) & 0xff, + (uint64_t)BNXT_ULP_HDR_BIT_O_IPV6 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr2 = { + (BNXT_ULP_ENC_FIELD_IPV6_DADDR >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_IPV6_DADDR & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "enc_udp_sport", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_HDR_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_HDR_BIT_O_UDP >> 56) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_UDP >> 48) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_UDP >> 40) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_UDP >> 32) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_UDP >> 24) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_UDP >> 16) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_UDP >> 8) & 0xff, + (uint64_t)BNXT_ULP_HDR_BIT_O_UDP & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr2 = { + (BNXT_ULP_ENC_FIELD_UDP_SPORT >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_UDP_SPORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "enc_udp_dport", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_HDR_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_HDR_BIT_O_UDP >> 56) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_UDP >> 48) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_UDP >> 40) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_UDP >> 32) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_UDP >> 24) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_UDP >> 16) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_UDP >> 8) & 0xff, + (uint64_t)BNXT_ULP_HDR_BIT_O_UDP & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr2 = { + (BNXT_ULP_ENC_FIELD_UDP_DPORT >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_UDP_DPORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "enc_vxlan_flags", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_HDR_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_HDR_BIT_T_VXLAN >> 56) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_T_VXLAN >> 48) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_T_VXLAN >> 40) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_T_VXLAN >> 32) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_T_VXLAN >> 24) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_T_VXLAN >> 16) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_T_VXLAN >> 8) & 0xff, + (uint64_t)BNXT_ULP_HDR_BIT_T_VXLAN & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr2 = { + (BNXT_ULP_ENC_FIELD_VXLAN_FLAGS >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_VXLAN_FLAGS & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "enc_vxlan_rsvd0", + .field_bit_size = 24, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_HDR_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_HDR_BIT_T_VXLAN >> 56) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_T_VXLAN >> 48) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_T_VXLAN >> 40) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_T_VXLAN >> 32) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_T_VXLAN >> 24) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_T_VXLAN >> 16) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_T_VXLAN >> 8) & 0xff, + (uint64_t)BNXT_ULP_HDR_BIT_T_VXLAN & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr2 = { + (BNXT_ULP_ENC_FIELD_VXLAN_RSVD0 >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_VXLAN_RSVD0 & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "enc_vxlan_vni", + .field_bit_size = 24, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_HDR_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_HDR_BIT_T_VXLAN >> 56) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_T_VXLAN >> 48) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_T_VXLAN >> 40) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_T_VXLAN >> 32) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_T_VXLAN >> 24) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_T_VXLAN >> 16) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_T_VXLAN >> 8) & 0xff, + (uint64_t)BNXT_ULP_HDR_BIT_T_VXLAN & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr2 = { + (BNXT_ULP_ENC_FIELD_VXLAN_VNI >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_VXLAN_VNI & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + { + .description = "enc_vxlan_rsvd1", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_ENC_HDR_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_HDR_BIT_T_VXLAN >> 56) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_T_VXLAN >> 48) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_T_VXLAN >> 40) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_T_VXLAN >> 32) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_T_VXLAN >> 24) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_T_VXLAN >> 16) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_T_VXLAN >> 8) & 0xff, + (uint64_t)BNXT_ULP_HDR_BIT_T_VXLAN & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ENC_FIELD, + .field_opr2 = { + (BNXT_ULP_ENC_FIELD_VXLAN_RSVD1 >> 8) & 0xff, + BNXT_ULP_ENC_FIELD_VXLAN_RSVD1 & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_SKIP + }, + /* act_tid: 11, , table: int_flow_counter_tbl.0 */ + { + .description = "count", + .field_bit_size = 64, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + /* act_tid: 11, , table: int_full_act_record.0 */ + { + .description = "flow_cntr_ptr", + .field_bit_size = 14, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_FLOW_CNTR_PTR_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_FLOW_CNTR_PTR_0 & 0xff} + }, + { + .description = "age_enable", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "agg_cntr_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "rate_cntr_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "flow_cntr_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_ACT_BIT_COUNT >> 56) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_COUNT >> 48) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_COUNT >> 40) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_COUNT >> 32) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_COUNT >> 24) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_COUNT >> 16) & 0xff, + ((uint64_t)BNXT_ULP_ACT_BIT_COUNT >> 8) & 0xff, + (uint64_t)BNXT_ULP_ACT_BIT_COUNT & 0xff} + }, + { + .description = "tcpflags_key", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tcpflags_mir", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tcpflags_match", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "encap_ptr", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "dst_ip_ptr", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tcp_dst_port", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "src_ip_ptr", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tcp_src_port", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "meter_id", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_rdir", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_rdir", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_ttl_dec", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_ttl_dec", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "decap_func", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "vnic_or_vport", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ACT_PROP, + .field_opr1 = { + (BNXT_ULP_ACT_PROP_IDX_VPORT >> 8) & 0xff, + BNXT_ULP_ACT_PROP_IDX_VPORT & 0xff} + }, + { + .description = "pop_vlan", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "meter", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "mirror", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "drop", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "hit", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "type", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } +}; + +struct bnxt_ulp_mapper_ident_info ulp_wh_plus_act_ident_list[] = { + /* act_tid: 2, , table: shared_mirror_record.rd */ + { + .description = "mirror_id", + .regfile_idx = BNXT_ULP_RF_IDX_MIRROR_ID_0, + .ident_bit_size = 4, + .ident_bit_pos = 32 + } +}; diff --git a/drivers/thirdparty/release-drivers/bnxt/tf_ulp/generic_templates/ulp_template_db_wh_plus_class.c b/drivers/thirdparty/release-drivers/bnxt/tf_ulp/generic_templates/ulp_template_db_wh_plus_class.c new file mode 100644 index 000000000000..a22599989892 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/tf_ulp/generic_templates/ulp_template_db_wh_plus_class.c @@ -0,0 +1,14567 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* Copyright(c) 2014-2024 Broadcom + * All rights reserved. + */ + +#include "ulp_template_db_enum.h" +#include "ulp_template_db_field.h" +#include "ulp_template_struct.h" +#include "ulp_template_db_tbl.h" + +/* Mapper templates for header class list */ +struct bnxt_ulp_mapper_tmpl_info ulp_wh_plus_class_tmpl_list[] = { + /* class_tid: 1, ingress */ + [1] = { + .device_name = BNXT_ULP_DEVICE_ID_WH_PLUS, + .num_tbls = 18, + .start_tbl_idx = 0, + .reject_info = { + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 0, + .cond_nums = 1 } + }, + /* class_tid: 2, egress */ + [2] = { + .device_name = BNXT_ULP_DEVICE_ID_WH_PLUS, + .num_tbls = 15, + .start_tbl_idx = 18, + .reject_info = { + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 24, + .cond_nums = 1 } + }, + /* class_tid: 3, ingress */ + [3] = { + .device_name = BNXT_ULP_DEVICE_ID_WH_PLUS, + .num_tbls = 22, + .start_tbl_idx = 33, + .reject_info = { + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_FALSE, + .cond_start_idx = 35, + .cond_nums = 0 } + }, + /* class_tid: 4, egress */ + [4] = { + .device_name = BNXT_ULP_DEVICE_ID_WH_PLUS, + .num_tbls = 19, + .start_tbl_idx = 55, + .reject_info = { + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_FALSE, + .cond_start_idx = 41, + .cond_nums = 0 } + } +}; + +struct bnxt_ulp_mapper_tbl_info ulp_wh_plus_class_tbl_list[] = { + { /* class_tid: 1, , table: l2_cntxt_tcam_cache.rd */ + .description = "l2_cntxt_tcam_cache.rd", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_type = TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_LOW, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_L2_CNTXT_TCAM, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 5, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 1, + .cond_nums = 1 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_READ, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_INDEX, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .key_start_idx = 0, + .blob_key_bit_size = 8, + .key_bit_size = 8, + .key_num_fields = 1, + .ident_start_idx = 0, + .ident_nums = 1 + }, + { /* class_tid: 1, , table: mac_addr_cache.rd */ + .description = "mac_addr_cache.rd", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_MAC_ADDR_CACHE, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 2, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_READ, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_HASH, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .key_start_idx = 1, + .blob_key_bit_size = 161, + .key_bit_size = 161, + .key_num_fields = 8, + .ident_start_idx = 1, + .ident_nums = 1 + }, + { /* class_tid: 1, , table: control.0 */ + .description = "control.0", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 3, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 2, + .cond_nums = 1 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_ALLOC_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID + }, + { /* class_tid: 1, , table: l2_cntxt_tcam.0 */ + .description = "l2_cntxt_tcam.0", + .resource_func = BNXT_ULP_RESOURCE_FUNC_TCAM_TABLE, + .resource_type = TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_HIGH, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 3, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_TCAM_TBL_OPC_ALLOC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_L2_CNTXT_TCAM_INDEX_0, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID, + .pri_opcode = BNXT_ULP_PRI_OPC_CONST, + .pri_operand = 0, + .track_type = CFA_TRACK_TYPE_SID, + .key_start_idx = 9, + .blob_key_bit_size = 167, + .key_bit_size = 167, + .key_num_fields = 13, + .result_start_idx = 0, + .result_bit_size = 64, + .result_num_fields = 13, + .ident_start_idx = 2, + .ident_nums = 1 + }, + { /* class_tid: 1, , table: mac_addr_cache.wr */ + .description = "mac_addr_cache.wr", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_MAC_ADDR_CACHE, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 3, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_WRITE, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_HASH, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .key_start_idx = 22, + .blob_key_bit_size = 161, + .key_bit_size = 161, + .key_num_fields = 8, + .result_start_idx = 13, + .result_bit_size = 69, + .result_num_fields = 5 + }, + { /* class_tid: 1, , table: profile_tcam_cache.rd */ + .description = "profile_tcam_cache.rd", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_type = TF_TCAM_TBL_TYPE_PROF_TCAM, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_PROFILE_TCAM, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 3, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_READ, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_INDEX, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .key_start_idx = 30, + .blob_key_bit_size = 14, + .key_bit_size = 14, + .key_num_fields = 3, + .ident_start_idx = 3, + .ident_nums = 3 + }, + { /* class_tid: 1, , table: control.1 */ + .description = "control.1", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 2, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 3, + .cond_nums = 1 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_ALLOC_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID + }, + { /* class_tid: 1, , table: control.2 */ + .description = "control.2", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 5, + .cond_false_goto = 1023, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 4, + .cond_nums = 1 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP, + .func_info = { + .func_opc = BNXT_ULP_FUNC_OPC_EQ, + .func_src1 = BNXT_ULP_FUNC_SRC_REGFILE, + .func_opr1 = BNXT_ULP_RF_IDX_FLOW_SIG_ID, + .func_src2 = BNXT_ULP_FUNC_SRC_COMP_FIELD, + .func_opr2 = BNXT_ULP_CF_IDX_FLOW_SIG_ID, + .func_dst_opr = BNXT_ULP_RF_IDX_CC } + }, + { /* class_tid: 1, , table: profile_tcam.ipv4 */ + .description = "profile_tcam.ipv4", + .resource_func = BNXT_ULP_RESOURCE_FUNC_TCAM_TABLE, + .resource_type = TF_TCAM_TBL_TYPE_PROF_TCAM, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 3, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 5, + .cond_nums = 2 }, + .tbl_opcode = BNXT_ULP_TCAM_TBL_OPC_ALLOC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_PROFILE_TCAM_INDEX_0, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID, + .mark_db_opcode = BNXT_ULP_MARK_DB_OPC_NOP, + .critical_resource = BNXT_ULP_CRITICAL_RESOURCE_NO, + .track_type = CFA_TRACK_TYPE_SID, + .key_start_idx = 33, + .blob_key_bit_size = 81, + .key_bit_size = 81, + .key_num_fields = 43, + .result_start_idx = 18, + .result_bit_size = 38, + .result_num_fields = 17, + .ident_start_idx = 6, + .ident_nums = 1 + }, + { /* class_tid: 1, , table: profile_tcam.ipv6 */ + .description = "profile_tcam.ipv6", + .resource_func = BNXT_ULP_RESOURCE_FUNC_TCAM_TABLE, + .resource_type = TF_TCAM_TBL_TYPE_PROF_TCAM, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 7, + .cond_nums = 2 }, + .tbl_opcode = BNXT_ULP_TCAM_TBL_OPC_ALLOC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_PROFILE_TCAM_INDEX_0, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID, + .mark_db_opcode = BNXT_ULP_MARK_DB_OPC_NOP, + .critical_resource = BNXT_ULP_CRITICAL_RESOURCE_NO, + .track_type = CFA_TRACK_TYPE_SID, + .key_start_idx = 76, + .blob_key_bit_size = 81, + .key_bit_size = 81, + .key_num_fields = 43, + .result_start_idx = 35, + .result_bit_size = 38, + .result_num_fields = 17, + .ident_start_idx = 7, + .ident_nums = 1 + }, + { /* class_tid: 1, , table: profile_tcam.ipv4_vxlan */ + .description = "profile_tcam.ipv4_vxlan", + .resource_func = BNXT_ULP_RESOURCE_FUNC_TCAM_TABLE, + .resource_type = TF_TCAM_TBL_TYPE_PROF_TCAM, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 9, + .cond_nums = 2 }, + .tbl_opcode = BNXT_ULP_TCAM_TBL_OPC_ALLOC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_PROFILE_TCAM_INDEX_0, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID, + .mark_db_opcode = BNXT_ULP_MARK_DB_OPC_NOP, + .critical_resource = BNXT_ULP_CRITICAL_RESOURCE_NO, + .track_type = CFA_TRACK_TYPE_SID, + .key_start_idx = 119, + .blob_key_bit_size = 81, + .key_bit_size = 81, + .key_num_fields = 43, + .result_start_idx = 52, + .result_bit_size = 38, + .result_num_fields = 17, + .ident_start_idx = 8, + .ident_nums = 1 + }, + { /* class_tid: 1, , table: profile_tcam_cache.wr */ + .description = "profile_tcam_cache.wr", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_type = TF_TCAM_TBL_TYPE_PROF_TCAM, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_PROFILE_TCAM, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 11, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_WRITE, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_INDEX, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .key_start_idx = 162, + .blob_key_bit_size = 14, + .key_bit_size = 14, + .key_num_fields = 3, + .result_start_idx = 69, + .result_bit_size = 122, + .result_num_fields = 5 + }, + { /* class_tid: 1, , table: em.ipv4 */ + .description = "em.ipv4", + .resource_func = BNXT_ULP_RESOURCE_FUNC_EM_TABLE, + .resource_type = TF_MEM_INTERNAL, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 0, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 11, + .cond_nums = 3 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .mark_db_opcode = BNXT_ULP_MARK_DB_OPC_PUSH_IF_MARK_ACTION, + .critical_resource = BNXT_ULP_CRITICAL_RESOURCE_YES, + .key_start_idx = 165, + .blob_key_bit_size = 176, + .key_bit_size = 176, + .key_num_fields = 10, + .result_start_idx = 74, + .result_bit_size = 64, + .result_num_fields = 9 + }, + { /* class_tid: 1, , table: eem.ipv4 */ + .description = "eem.ipv4", + .resource_func = BNXT_ULP_RESOURCE_FUNC_EM_TABLE, + .resource_type = TF_MEM_EXTERNAL, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 0, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 14, + .cond_nums = 3 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .mark_db_opcode = BNXT_ULP_MARK_DB_OPC_PUSH_IF_MARK_ACTION, + .critical_resource = BNXT_ULP_CRITICAL_RESOURCE_YES, + .key_start_idx = 175, + .blob_key_bit_size = 448, + .key_bit_size = 448, + .key_num_fields = 10, + .result_start_idx = 83, + .result_bit_size = 64, + .result_num_fields = 9 + }, + { /* class_tid: 1, , table: em.ipv6 */ + .description = "em.ipv6", + .resource_func = BNXT_ULP_RESOURCE_FUNC_EM_TABLE, + .resource_type = TF_MEM_INTERNAL, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 0, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 17, + .cond_nums = 3 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .mark_db_opcode = BNXT_ULP_MARK_DB_OPC_PUSH_IF_MARK_ACTION, + .critical_resource = BNXT_ULP_CRITICAL_RESOURCE_YES, + .key_start_idx = 185, + .blob_key_bit_size = 416, + .key_bit_size = 416, + .key_num_fields = 11, + .result_start_idx = 92, + .result_bit_size = 64, + .result_num_fields = 9 + }, + { /* class_tid: 1, , table: eem.ipv6 */ + .description = "eem.ipv6", + .resource_func = BNXT_ULP_RESOURCE_FUNC_EM_TABLE, + .resource_type = TF_MEM_EXTERNAL, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 0, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 20, + .cond_nums = 3 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .mark_db_opcode = BNXT_ULP_MARK_DB_OPC_PUSH_IF_MARK_ACTION, + .critical_resource = BNXT_ULP_CRITICAL_RESOURCE_YES, + .key_start_idx = 196, + .blob_key_bit_size = 448, + .key_bit_size = 448, + .key_num_fields = 11, + .result_start_idx = 101, + .result_bit_size = 64, + .result_num_fields = 9 + }, + { /* class_tid: 1, , table: em.vxlan */ + .description = "em.vxlan", + .resource_func = BNXT_ULP_RESOURCE_FUNC_EM_TABLE, + .resource_type = TF_MEM_INTERNAL, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 0, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 23, + .cond_nums = 1 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .mark_db_opcode = BNXT_ULP_MARK_DB_OPC_PUSH_IF_MARK_ACTION, + .critical_resource = BNXT_ULP_CRITICAL_RESOURCE_YES, + .key_start_idx = 207, + .blob_key_bit_size = 200, + .key_bit_size = 200, + .key_num_fields = 11, + .result_start_idx = 110, + .result_bit_size = 64, + .result_num_fields = 9 + }, + { /* class_tid: 1, , table: eem.vxlan */ + .description = "eem.vxlan", + .resource_func = BNXT_ULP_RESOURCE_FUNC_EM_TABLE, + .resource_type = TF_MEM_EXTERNAL, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 0, + .cond_false_goto = 0, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 24, + .cond_nums = 0 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .mark_db_opcode = BNXT_ULP_MARK_DB_OPC_PUSH_IF_MARK_ACTION, + .critical_resource = BNXT_ULP_CRITICAL_RESOURCE_YES, + .key_start_idx = 218, + .blob_key_bit_size = 448, + .key_bit_size = 448, + .key_num_fields = 11, + .result_start_idx = 119, + .result_bit_size = 64, + .result_num_fields = 9 + }, + { /* class_tid: 2, , table: l2_cntxt_tcam_cache.rd */ + .description = "l2_cntxt_tcam_cache.rd", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_L2_CNTXT_TCAM, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 5, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 25, + .cond_nums = 1 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_READ, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_INDEX, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .key_start_idx = 229, + .blob_key_bit_size = 8, + .key_bit_size = 8, + .key_num_fields = 1, + .ident_start_idx = 9, + .ident_nums = 1 + }, + { /* class_tid: 2, , table: mac_addr_cache.rd */ + .description = "mac_addr_cache.rd", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_MAC_ADDR_CACHE, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 26, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_READ, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_HASH, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .key_start_idx = 230, + .blob_key_bit_size = 161, + .key_bit_size = 161, + .key_num_fields = 8, + .ident_start_idx = 10, + .ident_nums = 1 + }, + { /* class_tid: 2, , table: control.0 */ + .description = "control.0", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 3, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 26, + .cond_nums = 1 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_ALLOC_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID + }, + { /* class_tid: 2, , table: l2_cntxt_tcam.0 */ + .description = "l2_cntxt_tcam.0", + .resource_func = BNXT_ULP_RESOURCE_FUNC_TCAM_TABLE, + .resource_type = TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_HIGH, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 27, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_TCAM_TBL_OPC_ALLOC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_L2_CNTXT_TCAM_INDEX_0, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID, + .pri_opcode = BNXT_ULP_PRI_OPC_CONST, + .pri_operand = 0, + .track_type = CFA_TRACK_TYPE_SID, + .key_start_idx = 238, + .blob_key_bit_size = 167, + .key_bit_size = 167, + .key_num_fields = 13, + .result_start_idx = 128, + .result_bit_size = 64, + .result_num_fields = 13, + .ident_start_idx = 11, + .ident_nums = 1 + }, + { /* class_tid: 2, , table: mac_addr_cache.wr */ + .description = "mac_addr_cache.wr", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_MAC_ADDR_CACHE, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 27, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_WRITE, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_HASH, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .key_start_idx = 251, + .blob_key_bit_size = 161, + .key_bit_size = 161, + .key_num_fields = 8, + .result_start_idx = 141, + .result_bit_size = 69, + .result_num_fields = 5 + }, + { /* class_tid: 2, , table: profile_tcam_cache.rd */ + .description = "profile_tcam_cache.rd", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_PROFILE_TCAM, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 27, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_READ, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_INDEX, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .key_start_idx = 259, + .blob_key_bit_size = 14, + .key_bit_size = 14, + .key_num_fields = 3, + .ident_start_idx = 12, + .ident_nums = 3 + }, + { /* class_tid: 2, , table: control.gen_tbl_miss */ + .description = "control.gen_tbl_miss", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 2, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 27, + .cond_nums = 1 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_ALLOC_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID + }, + { /* class_tid: 2, , table: control.conflict_check */ + .description = "control.conflict_check", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 4, + .cond_false_goto = 1023, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 28, + .cond_nums = 1 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP, + .func_info = { + .func_opc = BNXT_ULP_FUNC_OPC_EQ, + .func_src1 = BNXT_ULP_FUNC_SRC_REGFILE, + .func_opr1 = BNXT_ULP_RF_IDX_FLOW_SIG_ID, + .func_src2 = BNXT_ULP_FUNC_SRC_COMP_FIELD, + .func_opr2 = BNXT_ULP_CF_IDX_FLOW_SIG_ID, + .func_dst_opr = BNXT_ULP_RF_IDX_CC } + }, + { /* class_tid: 2, , table: profile_tcam.ipv4 */ + .description = "profile_tcam.ipv4", + .resource_func = BNXT_ULP_RESOURCE_FUNC_TCAM_TABLE, + .resource_type = TF_TCAM_TBL_TYPE_PROF_TCAM, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 2, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 29, + .cond_nums = 1 }, + .tbl_opcode = BNXT_ULP_TCAM_TBL_OPC_ALLOC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_PROFILE_TCAM_INDEX_0, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID, + .mark_db_opcode = BNXT_ULP_MARK_DB_OPC_NOP, + .critical_resource = BNXT_ULP_CRITICAL_RESOURCE_NO, + .track_type = CFA_TRACK_TYPE_SID, + .key_start_idx = 262, + .blob_key_bit_size = 81, + .key_bit_size = 81, + .key_num_fields = 43, + .result_start_idx = 146, + .result_bit_size = 38, + .result_num_fields = 17, + .ident_start_idx = 15, + .ident_nums = 1 + }, + { /* class_tid: 2, , table: profile_tcam.ipv6 */ + .description = "profile_tcam.ipv6", + .resource_func = BNXT_ULP_RESOURCE_FUNC_TCAM_TABLE, + .resource_type = TF_TCAM_TBL_TYPE_PROF_TCAM, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 30, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_TCAM_TBL_OPC_ALLOC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_PROFILE_TCAM_INDEX_0, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID, + .mark_db_opcode = BNXT_ULP_MARK_DB_OPC_NOP, + .critical_resource = BNXT_ULP_CRITICAL_RESOURCE_NO, + .track_type = CFA_TRACK_TYPE_SID, + .key_start_idx = 305, + .blob_key_bit_size = 81, + .key_bit_size = 81, + .key_num_fields = 43, + .result_start_idx = 163, + .result_bit_size = 38, + .result_num_fields = 17, + .ident_start_idx = 16, + .ident_nums = 1 + }, + { /* class_tid: 2, , table: profile_tcam_cache.wr */ + .description = "profile_tcam_cache.wr", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_PROFILE_TCAM, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 30, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_WRITE, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_INDEX, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .key_start_idx = 348, + .blob_key_bit_size = 14, + .key_bit_size = 14, + .key_num_fields = 3, + .result_start_idx = 180, + .result_bit_size = 122, + .result_num_fields = 5 + }, + { /* class_tid: 2, , table: em.ipv4 */ + .description = "em.ipv4", + .resource_func = BNXT_ULP_RESOURCE_FUNC_EM_TABLE, + .resource_type = TF_MEM_INTERNAL, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 0, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 30, + .cond_nums = 2 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .mark_db_opcode = BNXT_ULP_MARK_DB_OPC_PUSH_IF_MARK_ACTION, + .critical_resource = BNXT_ULP_CRITICAL_RESOURCE_YES, + .key_start_idx = 351, + .blob_key_bit_size = 176, + .key_bit_size = 176, + .key_num_fields = 10, + .result_start_idx = 185, + .result_bit_size = 64, + .result_num_fields = 9 + }, + { /* class_tid: 2, , table: eem.ipv4 */ + .description = "eem.ipv4", + .resource_func = BNXT_ULP_RESOURCE_FUNC_EM_TABLE, + .resource_type = TF_MEM_EXTERNAL, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 0, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 32, + .cond_nums = 2 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .mark_db_opcode = BNXT_ULP_MARK_DB_OPC_PUSH_IF_MARK_ACTION, + .critical_resource = BNXT_ULP_CRITICAL_RESOURCE_YES, + .key_start_idx = 361, + .blob_key_bit_size = 448, + .key_bit_size = 448, + .key_num_fields = 10, + .result_start_idx = 194, + .result_bit_size = 64, + .result_num_fields = 9 + }, + { /* class_tid: 2, , table: em.ipv6 */ + .description = "em.ipv6", + .resource_func = BNXT_ULP_RESOURCE_FUNC_EM_TABLE, + .resource_type = TF_MEM_INTERNAL, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 0, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 34, + .cond_nums = 1 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .mark_db_opcode = BNXT_ULP_MARK_DB_OPC_PUSH_IF_MARK_ACTION, + .critical_resource = BNXT_ULP_CRITICAL_RESOURCE_YES, + .key_start_idx = 371, + .blob_key_bit_size = 416, + .key_bit_size = 416, + .key_num_fields = 11, + .result_start_idx = 203, + .result_bit_size = 64, + .result_num_fields = 9 + }, + { /* class_tid: 2, , table: eem.ipv6 */ + .description = "eem.ipv6", + .resource_func = BNXT_ULP_RESOURCE_FUNC_EM_TABLE, + .resource_type = TF_MEM_EXTERNAL, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 0, + .cond_false_goto = 0, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 35, + .cond_nums = 0 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .mark_db_opcode = BNXT_ULP_MARK_DB_OPC_PUSH_IF_MARK_ACTION, + .critical_resource = BNXT_ULP_CRITICAL_RESOURCE_YES, + .key_start_idx = 382, + .blob_key_bit_size = 448, + .key_bit_size = 448, + .key_num_fields = 11, + .result_start_idx = 212, + .result_bit_size = 64, + .result_num_fields = 9 + }, + { /* class_tid: 3, , table: int_full_act_record.ing_0 */ + .description = "int_full_act_record.ing_0", + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_FULL_ACT_RECORD, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_NORMAL, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 35, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_ALLOC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_MAIN_ACTION_PTR, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .mark_db_opcode = BNXT_ULP_MARK_DB_OPC_NOP, + .track_type = CFA_TRACK_TYPE_SID, + .result_start_idx = 221, + .result_bit_size = 128, + .result_num_fields = 26 + }, + { /* class_tid: 3, , table: l2_cntxt_tcam_cache.ing_rd */ + .description = "l2_cntxt_tcam_cache.ing_rd", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_L2_CNTXT_TCAM, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 35, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_READ, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_INDEX, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .key_start_idx = 393, + .blob_key_bit_size = 8, + .key_bit_size = 8, + .key_num_fields = 1, + .ident_start_idx = 17, + .ident_nums = 0 + }, + { /* class_tid: 3, , table: control.ing_0 */ + .description = "control.ing_0", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 3, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 35, + .cond_nums = 1 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_ALLOC_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID + }, + { /* class_tid: 3, , table: l2_cntxt_tcam.ing_0 */ + .description = "l2_cntxt_tcam.ing_0", + .resource_func = BNXT_ULP_RESOURCE_FUNC_TCAM_TABLE, + .resource_type = TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_LOW, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 36, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_TCAM_TBL_OPC_ALLOC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_L2_CNTXT_TCAM_INDEX_0, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID, + .pri_opcode = BNXT_ULP_PRI_OPC_CONST, + .pri_operand = 0, + .mark_db_opcode = BNXT_ULP_MARK_DB_OPC_NOP, + .critical_resource = BNXT_ULP_CRITICAL_RESOURCE_NO, + .track_type = CFA_TRACK_TYPE_SID, + .key_start_idx = 394, + .blob_key_bit_size = 167, + .key_bit_size = 167, + .key_num_fields = 13, + .result_start_idx = 247, + .result_bit_size = 64, + .result_num_fields = 13, + .ident_start_idx = 17, + .ident_nums = 1 + }, + { /* class_tid: 3, , table: l2_cntxt_tcam_cache.ing_wr */ + .description = "l2_cntxt_tcam_cache.ing_wr", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_L2_CNTXT_TCAM, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 36, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_WRITE, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_INDEX, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .key_start_idx = 407, + .blob_key_bit_size = 8, + .key_bit_size = 8, + .key_num_fields = 1, + .result_start_idx = 260, + .result_bit_size = 70, + .result_num_fields = 5 + }, + { /* class_tid: 3, , table: parif_def_lkup_arec_ptr.ing_0 */ + .description = "parif_def_lkup_arec_ptr.ing_0", + .resource_func = BNXT_ULP_RESOURCE_FUNC_IF_TABLE, + .resource_type = TF_IF_TBL_TYPE_LKUP_PARIF_DFLT_ACT_REC_PTR, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 36, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_IF_TBL_OPC_WR_COMP_FIELD, + .tbl_operand = BNXT_ULP_CF_IDX_PHY_PORT_PARIF, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .result_start_idx = 265, + .result_bit_size = 32, + .result_num_fields = 1 + }, + { /* class_tid: 3, , table: parif_def_arec_ptr.ing_0 */ + .description = "parif_def_arec_ptr.ing_0", + .resource_func = BNXT_ULP_RESOURCE_FUNC_IF_TABLE, + .resource_type = TF_IF_TBL_TYPE_PROF_PARIF_DFLT_ACT_REC_PTR, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 36, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_IF_TBL_OPC_WR_COMP_FIELD, + .tbl_operand = BNXT_ULP_CF_IDX_PHY_PORT_PARIF, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .result_start_idx = 266, + .result_bit_size = 32, + .result_num_fields = 1 + }, + { /* class_tid: 3, , table: parif_def_err_arec_ptr.ing_0 */ + .description = "parif_def_err_arec_ptr.ing_0", + .resource_func = BNXT_ULP_RESOURCE_FUNC_IF_TABLE, + .resource_type = TF_IF_TBL_TYPE_PROF_PARIF_ERR_ACT_REC_PTR, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 36, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_IF_TBL_OPC_WR_COMP_FIELD, + .tbl_operand = BNXT_ULP_CF_IDX_PHY_PORT_PARIF, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .result_start_idx = 267, + .result_bit_size = 32, + .result_num_fields = 1 + }, + { /* class_tid: 3, , table: control.egr_0 */ + .description = "control.egr_0", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 6, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 36, + .cond_nums = 1 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP + }, + { /* class_tid: 3, , table: int_full_act_record.egr_vfr */ + .description = "int_full_act_record.egr_vfr", + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_FULL_ACT_RECORD, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_VFR_CFA_ACTION, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 37, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_ALLOC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_MAIN_ACTION_PTR, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .mark_db_opcode = BNXT_ULP_MARK_DB_OPC_NOP, + .track_type = CFA_TRACK_TYPE_SID, + .result_start_idx = 268, + .result_bit_size = 128, + .result_num_fields = 26, + .encap_num_fields = 0 + }, + { /* class_tid: 3, , table: l2_cntxt_tcam_cache.egr_rd_vfr */ + .description = "l2_cntxt_tcam_cache.egr_rd_vfr", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_L2_CNTXT_TCAM, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 37, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_READ, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_INDEX, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .key_start_idx = 408, + .blob_key_bit_size = 8, + .key_bit_size = 8, + .key_num_fields = 1, + .ident_start_idx = 18, + .ident_nums = 0 + }, + { /* class_tid: 3, , table: control.egr_1 */ + .description = "control.egr_1", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 0, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 37, + .cond_nums = 1 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_ALLOC_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID + }, + { /* class_tid: 3, , table: l2_cntxt_tcam_bypass.egr_vfr */ + .description = "l2_cntxt_tcam_bypass.egr_vfr", + .resource_func = BNXT_ULP_RESOURCE_FUNC_TCAM_TABLE, + .resource_type = TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_LOW, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 38, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_TCAM_TBL_OPC_ALLOC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_L2_CNTXT_TCAM_INDEX_0, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID, + .pri_opcode = BNXT_ULP_PRI_OPC_CONST, + .pri_operand = 0, + .track_type = CFA_TRACK_TYPE_SID, + .key_start_idx = 409, + .blob_key_bit_size = 167, + .key_bit_size = 167, + .key_num_fields = 13, + .result_start_idx = 294, + .result_bit_size = 64, + .result_num_fields = 13, + .ident_start_idx = 18, + .ident_nums = 0 + }, + { /* class_tid: 3, , table: l2_cntxt_tcam_cache.egr_wr_vfr */ + .description = "l2_cntxt_tcam_cache.egr_wr_vfr", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_L2_CNTXT_TCAM, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 0, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 38, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_WRITE, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_INDEX, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .key_start_idx = 422, + .blob_key_bit_size = 8, + .key_bit_size = 8, + .key_num_fields = 1, + .result_start_idx = 307, + .result_bit_size = 70, + .result_num_fields = 5 + }, + { /* class_tid: 3, , table: l2_cntxt_tcam_cache.rd */ + .description = "l2_cntxt_tcam_cache.rd", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_L2_CNTXT_TCAM, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 38, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_READ, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_INDEX, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .key_start_idx = 423, + .blob_key_bit_size = 8, + .key_bit_size = 8, + .key_num_fields = 1, + .ident_start_idx = 18, + .ident_nums = 0 + }, + { /* class_tid: 3, , table: control.egr_2 */ + .description = "control.egr_2", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 3, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 38, + .cond_nums = 1 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_ALLOC_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID + }, + { /* class_tid: 3, , table: l2_cntxt_tcam.egr_0 */ + .description = "l2_cntxt_tcam.egr_0", + .resource_func = BNXT_ULP_RESOURCE_FUNC_TCAM_TABLE, + .resource_type = TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_LOW, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 39, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_TCAM_TBL_OPC_ALLOC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_L2_CNTXT_TCAM_INDEX_0, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID, + .mark_db_opcode = BNXT_ULP_MARK_DB_OPC_NOP, + .critical_resource = BNXT_ULP_CRITICAL_RESOURCE_NO, + .track_type = CFA_TRACK_TYPE_SID, + .key_start_idx = 424, + .blob_key_bit_size = 167, + .key_bit_size = 167, + .key_num_fields = 13, + .result_start_idx = 312, + .result_bit_size = 64, + .result_num_fields = 13, + .ident_start_idx = 18, + .ident_nums = 1 + }, + { /* class_tid: 3, , table: l2_cntxt_tcam_cache.egr_wr */ + .description = "l2_cntxt_tcam_cache.egr_wr", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_L2_CNTXT_TCAM, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 39, + .cond_nums = 2 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_WRITE, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_INDEX, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .key_start_idx = 437, + .blob_key_bit_size = 8, + .key_bit_size = 8, + .key_num_fields = 1, + .result_start_idx = 325, + .result_bit_size = 70, + .result_num_fields = 5 + }, + { /* class_tid: 3, , table: int_full_act_record.egr_0 */ + .description = "int_full_act_record.egr_0", + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_FULL_ACT_RECORD, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_VFR_CFA_ACTION, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 41, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_ALLOC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_MAIN_ACTION_PTR, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .mark_db_opcode = BNXT_ULP_MARK_DB_OPC_NOP, + .track_type = CFA_TRACK_TYPE_SID, + .result_start_idx = 330, + .result_bit_size = 128, + .result_num_fields = 26, + .encap_num_fields = 0 + }, + { /* class_tid: 3, , table: parif_def_lkup_arec_ptr.egr_0 */ + .description = "parif_def_lkup_arec_ptr.egr_0", + .resource_func = BNXT_ULP_RESOURCE_FUNC_IF_TABLE, + .resource_type = TF_IF_TBL_TYPE_LKUP_PARIF_DFLT_ACT_REC_PTR, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 41, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_IF_TBL_OPC_WR_COMP_FIELD, + .tbl_operand = BNXT_ULP_CF_IDX_DRV_FUNC_PARIF, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .result_start_idx = 356, + .result_bit_size = 32, + .result_num_fields = 1 + }, + { /* class_tid: 3, , table: parif_def_arec_ptr.egr_0 */ + .description = "parif_def_arec_ptr.egr_0", + .resource_func = BNXT_ULP_RESOURCE_FUNC_IF_TABLE, + .resource_type = TF_IF_TBL_TYPE_PROF_PARIF_DFLT_ACT_REC_PTR, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 41, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_IF_TBL_OPC_WR_COMP_FIELD, + .tbl_operand = BNXT_ULP_CF_IDX_DRV_FUNC_PARIF, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .result_start_idx = 357, + .result_bit_size = 32, + .result_num_fields = 1 + }, + { /* class_tid: 3, , table: parif_def_err_arec_ptr.egr_0 */ + .description = "parif_def_err_arec_ptr.egr_0", + .resource_func = BNXT_ULP_RESOURCE_FUNC_IF_TABLE, + .resource_type = TF_IF_TBL_TYPE_PROF_PARIF_ERR_ACT_REC_PTR, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 0, + .cond_false_goto = 0, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 41, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_IF_TBL_OPC_WR_COMP_FIELD, + .tbl_operand = BNXT_ULP_CF_IDX_DRV_FUNC_PARIF, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .result_start_idx = 358, + .result_bit_size = 32, + .result_num_fields = 1 + }, + { /* class_tid: 4, , table: int_full_act_record.loopback */ + .description = "int_full_act_record.loopback", + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_FULL_ACT_RECORD, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_VFR_CFA_ACTION, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 41, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_WR_GLB_REGFILE, + .tbl_operand = BNXT_ULP_GLB_RF_IDX_GLB_LB_AREC_PTR, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_NOP, + .mark_db_opcode = BNXT_ULP_MARK_DB_OPC_NOP, + .track_type = CFA_TRACK_TYPE_SID, + .result_start_idx = 359, + .result_bit_size = 128, + .result_num_fields = 26, + .encap_num_fields = 0 + }, + { /* class_tid: 4, , table: l2_cntxt_tcam_cache.vf_rd_egr */ + .description = "l2_cntxt_tcam_cache.vf_rd_egr", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_L2_CNTXT_TCAM, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 41, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_READ, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_INDEX, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .key_start_idx = 438, + .blob_key_bit_size = 8, + .key_bit_size = 8, + .key_num_fields = 1, + .ident_start_idx = 19, + .ident_nums = 0 + }, + { /* class_tid: 4, , table: control.vf_0 */ + .description = "control.vf_0", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 3, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 41, + .cond_nums = 1 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_ALLOC_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID + }, + { /* class_tid: 4, , table: l2_cntxt_tcam.vf_egr */ + .description = "l2_cntxt_tcam.vf_egr", + .resource_func = BNXT_ULP_RESOURCE_FUNC_TCAM_TABLE, + .resource_type = TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_LOW, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 42, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_TCAM_TBL_OPC_ALLOC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_L2_CNTXT_TCAM_INDEX_0, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID, + .pri_opcode = BNXT_ULP_PRI_OPC_CONST, + .pri_operand = 0, + .track_type = CFA_TRACK_TYPE_SID, + .key_start_idx = 439, + .blob_key_bit_size = 167, + .key_bit_size = 167, + .key_num_fields = 13, + .result_start_idx = 385, + .result_bit_size = 64, + .result_num_fields = 13, + .ident_start_idx = 19, + .ident_nums = 1 + }, + { /* class_tid: 4, , table: l2_cntxt_tcam_cache.vf_egr_wr */ + .description = "l2_cntxt_tcam_cache.vf_egr_wr", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_L2_CNTXT_TCAM, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 42, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_WRITE, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_INDEX, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .key_start_idx = 452, + .blob_key_bit_size = 8, + .key_bit_size = 8, + .key_num_fields = 1, + .result_start_idx = 398, + .result_bit_size = 70, + .result_num_fields = 5 + }, + { /* class_tid: 4, , table: parif_def_lkup_arec_ptr.vf_egr */ + .description = "parif_def_lkup_arec_ptr.vf_egr", + .resource_func = BNXT_ULP_RESOURCE_FUNC_IF_TABLE, + .resource_type = TF_IF_TBL_TYPE_LKUP_PARIF_DFLT_ACT_REC_PTR, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 42, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_IF_TBL_OPC_WR_CONST, + .tbl_operand = ULP_WP_SYM_LOOPBACK_PARIF, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .result_start_idx = 403, + .result_bit_size = 32, + .result_num_fields = 1 + }, + { /* class_tid: 4, , table: parif_def_arec_ptr.vf_egr */ + .description = "parif_def_arec_ptr.vf_egr", + .resource_func = BNXT_ULP_RESOURCE_FUNC_IF_TABLE, + .resource_type = TF_IF_TBL_TYPE_PROF_PARIF_DFLT_ACT_REC_PTR, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 42, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_IF_TBL_OPC_WR_CONST, + .tbl_operand = ULP_WP_SYM_LOOPBACK_PARIF, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .result_start_idx = 404, + .result_bit_size = 32, + .result_num_fields = 1 + }, + { /* class_tid: 4, , table: parif_def_err_arec_ptr.vf_egr */ + .description = "parif_def_err_arec_ptr.vf_egr", + .resource_func = BNXT_ULP_RESOURCE_FUNC_IF_TABLE, + .resource_type = TF_IF_TBL_TYPE_PROF_PARIF_ERR_ACT_REC_PTR, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 42, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_IF_TBL_OPC_WR_CONST, + .tbl_operand = ULP_WP_SYM_LOOPBACK_PARIF, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .result_start_idx = 405, + .result_bit_size = 32, + .result_num_fields = 1 + }, + { /* class_tid: 4, , table: int_full_act_record.vf_ing */ + .description = "int_full_act_record.vf_ing", + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_FULL_ACT_RECORD, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_NORMAL, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 42, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_ALLOC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_MAIN_ACTION_PTR, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .mark_db_opcode = BNXT_ULP_MARK_DB_OPC_PUSH_AND_SET_VFR_FLAG, + .track_type = CFA_TRACK_TYPE_SID, + .result_start_idx = 406, + .result_bit_size = 128, + .result_num_fields = 26, + .encap_num_fields = 0 + }, + { /* class_tid: 4, , table: l2_cntxt_tcam_bypass.vf_ing */ + .description = "l2_cntxt_tcam_bypass.vf_ing", + .resource_func = BNXT_ULP_RESOURCE_FUNC_TCAM_TABLE, + .resource_type = TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_HIGH, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 42, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_TCAM_TBL_OPC_ALLOC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_L2_CNTXT_TCAM_INDEX_0, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .pri_opcode = BNXT_ULP_PRI_OPC_CONST, + .pri_operand = 0, + .mark_db_opcode = BNXT_ULP_MARK_DB_OPC_NOP, + .critical_resource = BNXT_ULP_CRITICAL_RESOURCE_NO, + .track_type = CFA_TRACK_TYPE_SID, + .key_start_idx = 453, + .blob_key_bit_size = 167, + .key_bit_size = 167, + .key_num_fields = 13, + .result_start_idx = 432, + .result_bit_size = 64, + .result_num_fields = 13, + .ident_start_idx = 20, + .ident_nums = 0 + }, + { /* class_tid: 4, , table: l2_cntxt_tcam_cache.vfr_rd_egr0 */ + .description = "l2_cntxt_tcam_cache.vfr_rd_egr0", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_L2_CNTXT_TCAM, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 42, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_READ, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_INDEX, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .key_start_idx = 466, + .blob_key_bit_size = 8, + .key_bit_size = 8, + .key_num_fields = 1, + .ident_start_idx = 20, + .ident_nums = 0 + }, + { /* class_tid: 4, , table: control.vfr_0 */ + .description = "control.vfr_0", + .resource_func = BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 3, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND, + .cond_start_idx = 42, + .cond_nums = 1 }, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_ALLOC_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID + }, + { /* class_tid: 4, , table: l2_cntxt_tcam_bypass.vfr_egr0 */ + .description = "l2_cntxt_tcam_bypass.vfr_egr0", + .resource_func = BNXT_ULP_RESOURCE_FUNC_TCAM_TABLE, + .resource_type = TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_LOW, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 43, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_TCAM_TBL_OPC_ALLOC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_L2_CNTXT_TCAM_INDEX_0, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_RID_REGFILE, + .fdb_operand = BNXT_ULP_RF_IDX_RID, + .pri_opcode = BNXT_ULP_PRI_OPC_CONST, + .pri_operand = 0, + .track_type = CFA_TRACK_TYPE_SID, + .key_start_idx = 467, + .blob_key_bit_size = 167, + .key_bit_size = 167, + .key_num_fields = 13, + .result_start_idx = 445, + .result_bit_size = 64, + .result_num_fields = 13, + .ident_start_idx = 20, + .ident_nums = 0 + }, + { /* class_tid: 4, , table: l2_cntxt_tcam_cache.vfr_wr_egr0 */ + .description = "l2_cntxt_tcam_cache.vfr_wr_egr0", + .resource_func = BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_L2_CNTXT_TCAM, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 43, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_GENERIC_TBL_OPC_WRITE, + .gen_tbl_lkup_type = BNXT_ULP_GENERIC_TBL_LKUP_TYPE_INDEX, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .key_start_idx = 480, + .blob_key_bit_size = 8, + .key_bit_size = 8, + .key_num_fields = 1, + .result_start_idx = 458, + .result_bit_size = 70, + .result_num_fields = 5 + }, + { /* class_tid: 4, , table: int_vtag_encap_record.vfr_egr0 */ + .description = "int_vtag_encap_record.vfr_egr0", + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_ACT_ENCAP_8B, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_NORMAL, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 43, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_ALLOC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_ENCAP_PTR_0, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .mark_db_opcode = BNXT_ULP_MARK_DB_OPC_NOP, + .track_type = CFA_TRACK_TYPE_SID, + .record_size = 8, + .result_start_idx = 463, + .result_bit_size = 0, + .result_num_fields = 0, + .encap_num_fields = 11 + }, + { /* class_tid: 4, , table: int_full_act_record.vfr_egr0 */ + .description = "int_full_act_record.vfr_egr0", + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_FULL_ACT_RECORD, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_VFR_CFA_ACTION, + .direction = TF_DIR_TX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 43, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_ALLOC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_MAIN_ACTION_PTR, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .mark_db_opcode = BNXT_ULP_MARK_DB_OPC_NOP, + .track_type = CFA_TRACK_TYPE_SID, + .result_start_idx = 474, + .result_bit_size = 128, + .result_num_fields = 26 + }, + { /* class_tid: 4, , table: int_full_act_record.vfr_ing0 */ + .description = "int_full_act_record.vfr_ing0", + .resource_func = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + .resource_type = TF_TBL_TYPE_FULL_ACT_RECORD, + .resource_sub_type = + BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_NORMAL, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 43, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_INDEX_TBL_OPC_ALLOC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_MAIN_ACTION_PTR, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .mark_db_opcode = BNXT_ULP_MARK_DB_OPC_NOP, + .track_type = CFA_TRACK_TYPE_SID, + .result_start_idx = 500, + .result_bit_size = 128, + .result_num_fields = 26 + }, + { /* class_tid: 4, , table: l2_cntxt_tcam_bypass.vfr_dtagged_ing0 */ + .description = "l2_cntxt_tcam_bypass.vfr_dtagged_ing0", + .resource_func = BNXT_ULP_RESOURCE_FUNC_TCAM_TABLE, + .resource_type = TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_HIGH, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 1, + .cond_false_goto = 1, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 43, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_TCAM_TBL_OPC_ALLOC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_L2_CNTXT_TCAM_INDEX_0, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .pri_opcode = BNXT_ULP_PRI_OPC_CONST, + .pri_operand = 0, + .mark_db_opcode = BNXT_ULP_MARK_DB_OPC_NOP, + .critical_resource = BNXT_ULP_CRITICAL_RESOURCE_NO, + .track_type = CFA_TRACK_TYPE_SID, + .key_start_idx = 481, + .blob_key_bit_size = 167, + .key_bit_size = 167, + .key_num_fields = 13, + .result_start_idx = 526, + .result_bit_size = 64, + .result_num_fields = 13, + .ident_start_idx = 20, + .ident_nums = 0 + }, + { /* class_tid: 4, , table: l2_cntxt_tcam_bypass.vfr_stagged_ing0 */ + .description = "l2_cntxt_tcam_bypass.vfr_stagged_ing0", + .resource_func = BNXT_ULP_RESOURCE_FUNC_TCAM_TABLE, + .resource_type = TF_TCAM_TBL_TYPE_L2_CTXT_TCAM_HIGH, + .direction = TF_DIR_RX, + .execute_info = { + .cond_true_goto = 0, + .cond_false_goto = 0, + .cond_list_opcode = BNXT_ULP_COND_LIST_OPC_TRUE, + .cond_start_idx = 43, + .cond_nums = 0 }, + .tbl_opcode = BNXT_ULP_TCAM_TBL_OPC_ALLOC_WR_REGFILE, + .tbl_operand = BNXT_ULP_RF_IDX_L2_CNTXT_TCAM_INDEX_0, + .key_recipe_opcode = BNXT_ULP_KEY_RECIPE_OPC_NOP, + .fdb_opcode = BNXT_ULP_FDB_OPC_PUSH_FID, + .pri_opcode = BNXT_ULP_PRI_OPC_CONST, + .pri_operand = 0, + .mark_db_opcode = BNXT_ULP_MARK_DB_OPC_NOP, + .critical_resource = BNXT_ULP_CRITICAL_RESOURCE_NO, + .track_type = CFA_TRACK_TYPE_SID, + .key_start_idx = 494, + .blob_key_bit_size = 167, + .key_bit_size = 167, + .key_num_fields = 13, + .result_start_idx = 539, + .result_bit_size = 64, + .result_num_fields = 13, + .ident_start_idx = 20, + .ident_nums = 0 + } +}; + +struct bnxt_ulp_mapper_cond_list_info ulp_wh_plus_class_cond_oper_list[] = { +}; + +struct bnxt_ulp_mapper_cond_info ulp_wh_plus_class_cond_list[] = { + /* cond_reject: wh_plus, class_tid: 1 */ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_IS_SET, + .cond_operand = BNXT_ULP_CF_IDX_WC_MATCH + }, + /* cond_execute: class_tid: 1, l2_cntxt_tcam_cache.rd:1*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_NOT_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_DMAC + }, + /* cond_execute: class_tid: 1, control.0:2*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_RF_IS_SET, + .cond_operand = BNXT_ULP_RF_IDX_GENERIC_TBL_MISS + }, + /* cond_execute: class_tid: 1, control.1:3*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_RF_IS_SET, + .cond_operand = BNXT_ULP_RF_IDX_GENERIC_TBL_MISS + }, + /* cond_execute: class_tid: 1, control.2:4*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_RF_IS_SET, + .cond_operand = BNXT_ULP_RF_IDX_CC + }, + /* cond_execute: class_tid: 1, profile_tcam.ipv4:5*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_NOT_SET, + .cond_operand = BNXT_ULP_HDR_BIT_T_VXLAN + }, + /* cond_execute: class_tid: 1, profile_tcam.ipv6:7*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_NOT_SET, + .cond_operand = BNXT_ULP_HDR_BIT_T_VXLAN + }, + /* cond_execute: class_tid: 1, profile_tcam.ipv4_vxlan:9*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_T_VXLAN + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + /* cond_execute: class_tid: 1, em.ipv4:11*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_EXT_MEM_NOT_SET, + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_NOT_SET, + .cond_operand = BNXT_ULP_HDR_BIT_T_VXLAN + }, + /* cond_execute: class_tid: 1, eem.ipv4:14*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_EXT_MEM_IS_SET, + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_NOT_SET, + .cond_operand = BNXT_ULP_HDR_BIT_T_VXLAN + }, + /* cond_execute: class_tid: 1, em.ipv6:17*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_EXT_MEM_NOT_SET, + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_NOT_SET, + .cond_operand = BNXT_ULP_HDR_BIT_T_VXLAN + }, + /* cond_execute: class_tid: 1, eem.ipv6:20*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_EXT_MEM_IS_SET, + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV6 + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_NOT_SET, + .cond_operand = BNXT_ULP_HDR_BIT_T_VXLAN + }, + /* cond_execute: class_tid: 1, em.vxlan:23*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_EXT_MEM_NOT_SET, + }, + /* cond_reject: wh_plus, class_tid: 2 */ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_IS_SET, + .cond_operand = BNXT_ULP_CF_IDX_WC_MATCH + }, + /* cond_execute: class_tid: 2, l2_cntxt_tcam_cache.rd:25*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_FIELD_BIT_NOT_SET, + .cond_operand = BNXT_ULP_GLB_HF_ID_O_ETH_SMAC + }, + /* cond_execute: class_tid: 2, control.0:26*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_RF_IS_SET, + .cond_operand = BNXT_ULP_RF_IDX_GENERIC_TBL_MISS + }, + /* cond_execute: class_tid: 2, control.gen_tbl_miss:27*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_RF_IS_SET, + .cond_operand = BNXT_ULP_RF_IDX_GENERIC_TBL_MISS + }, + /* cond_execute: class_tid: 2, control.conflict_check:28*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_RF_IS_SET, + .cond_operand = BNXT_ULP_RF_IDX_CC + }, + /* cond_execute: class_tid: 2, profile_tcam.ipv4:29*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + /* cond_execute: class_tid: 2, em.ipv4:30*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_EXT_MEM_NOT_SET, + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + /* cond_execute: class_tid: 2, eem.ipv4:32*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_EXT_MEM_IS_SET, + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_HDR_BIT_IS_SET, + .cond_operand = BNXT_ULP_HDR_BIT_O_IPV4 + }, + /* cond_execute: class_tid: 2, em.ipv6:34*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_EXT_MEM_NOT_SET, + }, + /* cond_execute: class_tid: 3, control.ing_0:35*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_RF_IS_SET, + .cond_operand = BNXT_ULP_RF_IDX_GENERIC_TBL_MISS + }, + /* cond_execute: class_tid: 3, control.egr_0:36*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_IS_SET, + .cond_operand = BNXT_ULP_CF_IDX_VFR_MODE + }, + /* cond_execute: class_tid: 3, control.egr_1:37*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_RF_IS_SET, + .cond_operand = BNXT_ULP_RF_IDX_GENERIC_TBL_MISS + }, + /* cond_execute: class_tid: 3, control.egr_2:38*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_RF_IS_SET, + .cond_operand = BNXT_ULP_RF_IDX_GENERIC_TBL_MISS + }, + /* cond_execute: class_tid: 3, l2_cntxt_tcam_cache.egr_wr:39*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_CF_NOT_SET, + .cond_operand = BNXT_ULP_CF_IDX_VFR_MODE + }, + { + .cond_opcode = BNXT_ULP_COND_OPC_RF_IS_SET, + .cond_operand = BNXT_ULP_RF_IDX_GENERIC_TBL_MISS + }, + /* cond_execute: class_tid: 4, control.vf_0:41*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_RF_IS_SET, + .cond_operand = BNXT_ULP_RF_IDX_GENERIC_TBL_MISS + }, + /* cond_execute: class_tid: 4, control.vfr_0:42*/ + { + .cond_opcode = BNXT_ULP_COND_OPC_RF_IS_SET, + .cond_operand = BNXT_ULP_RF_IDX_GENERIC_TBL_MISS + } +}; + +struct bnxt_ulp_mapper_key_info ulp_wh_plus_class_key_info_list[] = { + /* class_tid: 1, , table: l2_cntxt_tcam_cache.rd */ + { + .field_info_mask = { + .description = "svif", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_HF, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_SVIF_INDEX >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_SVIF_INDEX & 0xff} + }, + .field_info_spec = { + .description = "svif", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_HF, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_SVIF_INDEX >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_SVIF_INDEX & 0xff} + } + }, + /* class_tid: 1, , table: mac_addr_cache.rd */ + { + .field_info_mask = { + .description = "svif", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_HF, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_SVIF_INDEX >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_SVIF_INDEX & 0xff} + }, + .field_info_spec = { + .description = "svif", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_HF, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_SVIF_INDEX >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_SVIF_INDEX & 0xff} + } + }, + { + .field_info_mask = { + .description = "tun_hdr", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + ULP_WP_SYM_TUN_HDR_TYPE_NONE} + }, + .field_info_spec = { + .description = "tun_hdr", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + ULP_WP_SYM_TUN_HDR_TYPE_NONE} + } + }, + { + .field_info_mask = { + .description = "one_tag", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "one_tag", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_O_ONE_VTAG >> 8) & 0xff, + BNXT_ULP_CF_IDX_O_ONE_VTAG & 0xff} + } + }, + { + .field_info_mask = { + .description = "vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_HDR_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 56) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 48) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 40) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 32) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 24) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 16) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 8) & 0xff, + (uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_OO_VLAN_VID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_OO_VLAN_VID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_HDR_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 56) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 48) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 40) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 32) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 24) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 16) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 8) & 0xff, + (uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_OO_VLAN_VID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_OO_VLAN_VID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "mac_addr", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_HF, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_O_ETH_DMAC >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_ETH_DMAC & 0xff} + }, + .field_info_spec = { + .description = "mac_addr", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_HF, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_O_ETH_DMAC >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_ETH_DMAC & 0xff} + } + }, + { + .field_info_mask = { + .description = "etype", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "etype", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tbl_scope", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tbl_scope", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_filter_id", + .field_bit_size = 64, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l2_filter_id", + .field_bit_size = 64, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + /* class_tid: 1, , table: l2_cntxt_tcam.0 */ + { + .field_info_mask = { + .description = "l2_ivlan_vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_HDR_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 56) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 48) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 40) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 32) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 24) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 16) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 8) & 0xff, + (uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_OO_VLAN_VID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_OO_VLAN_VID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l2_ivlan_vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_HDR_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 56) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 48) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 40) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 32) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 24) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 16) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 8) & 0xff, + (uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_OO_VLAN_VID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_OO_VLAN_VID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_ovlan_vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l2_ovlan_vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "mac0_addr", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_HF, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_O_ETH_DMAC >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_ETH_DMAC & 0xff} + }, + .field_info_spec = { + .description = "mac0_addr", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_HF, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_O_ETH_DMAC >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_ETH_DMAC & 0xff} + } + }, + { + .field_info_mask = { + .description = "svif", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_HF, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_SVIF_INDEX >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_SVIF_INDEX & 0xff} + }, + .field_info_spec = { + .description = "svif", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_HF, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_SVIF_INDEX >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_SVIF_INDEX & 0xff} + } + }, + { + .field_info_mask = { + .description = "sparif", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "sparif", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl2_ivlan_vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl2_ivlan_vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl2_ovlan_vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl2_ovlan_vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "mac1_addr", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "mac1_addr", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_num_vtags", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "l2_num_vtags", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_O_VTAG_NUM >> 8) & 0xff, + BNXT_ULP_CF_IDX_O_VTAG_NUM & 0xff} + } + }, + { + .field_info_mask = { + .description = "tl2_num_vtags", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl2_num_vtags", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tun_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tun_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "key_type", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "key_type", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + .field_info_spec = { + .description = "valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + } + }, + /* class_tid: 1, , table: mac_addr_cache.wr */ + { + .field_info_mask = { + .description = "svif", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_HF, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_SVIF_INDEX >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_SVIF_INDEX & 0xff} + }, + .field_info_spec = { + .description = "svif", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_HF, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_SVIF_INDEX >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_SVIF_INDEX & 0xff} + } + }, + { + .field_info_mask = { + .description = "tun_hdr", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + ULP_WP_SYM_TUN_HDR_TYPE_NONE} + }, + .field_info_spec = { + .description = "tun_hdr", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + ULP_WP_SYM_TUN_HDR_TYPE_NONE} + } + }, + { + .field_info_mask = { + .description = "one_tag", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "one_tag", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_O_ONE_VTAG >> 8) & 0xff, + BNXT_ULP_CF_IDX_O_ONE_VTAG & 0xff} + } + }, + { + .field_info_mask = { + .description = "vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_HDR_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 56) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 48) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 40) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 32) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 24) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 16) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 8) & 0xff, + (uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_OO_VLAN_VID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_OO_VLAN_VID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_HDR_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 56) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 48) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 40) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 32) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 24) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 16) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 8) & 0xff, + (uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_OO_VLAN_VID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_OO_VLAN_VID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "mac_addr", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_HF, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_O_ETH_DMAC >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_ETH_DMAC & 0xff} + }, + .field_info_spec = { + .description = "mac_addr", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_HF, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_O_ETH_DMAC >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_ETH_DMAC & 0xff} + } + }, + { + .field_info_mask = { + .description = "etype", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "etype", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tbl_scope", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tbl_scope", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_filter_id", + .field_bit_size = 64, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l2_filter_id", + .field_bit_size = 64, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + /* class_tid: 1, , table: profile_tcam_cache.rd */ + { + .field_info_mask = { + .description = "recycle_cnt", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "recycle_cnt", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "prof_func_id", + .field_bit_size = 7, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "prof_func_id", + .field_bit_size = 7, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_FIELD_BIT, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_O_ETH_DMAC >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_ETH_DMAC & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_GLB_RF, + .field_opr2 = { + (BNXT_ULP_GLB_RF_IDX_L2_PROF_FUNC_ID >> 8) & 0xff, + BNXT_ULP_GLB_RF_IDX_L2_PROF_FUNC_ID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_GLB_RF, + .field_opr3 = { + (BNXT_ULP_GLB_RF_IDX_GLB_PROF_FUNC_ID >> 8) & 0xff, + BNXT_ULP_GLB_RF_IDX_GLB_PROF_FUNC_ID & 0xff} + } + }, + { + .field_info_mask = { + .description = "hdr_sig_id", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "hdr_sig_id", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_HDR_SIG_ID >> 8) & 0xff, + BNXT_ULP_CF_IDX_HDR_SIG_ID & 0xff} + } + }, + /* class_tid: 1, , table: profile_tcam.ipv4 */ + { + .field_info_mask = { + .description = "l4_hdr_is_udp_tcp", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l4_hdr_is_udp_tcp", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l4_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_O_L4 >> 8) & 0xff, + BNXT_ULP_CF_IDX_O_L4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l4_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_HDR_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_HDR_BIT_O_TCP >> 56) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_TCP >> 48) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_TCP >> 40) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_TCP >> 32) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_TCP >> 24) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_TCP >> 16) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_TCP >> 8) & 0xff, + (uint64_t)BNXT_ULP_HDR_BIT_O_TCP & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_WP_SYM_L4_HDR_TYPE_TCP}, + .field_src3 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr3 = { + ULP_WP_SYM_L4_HDR_TYPE_UDP} + } + }, + { + .field_info_mask = { + .description = "l4_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_O_L4 >> 8) & 0xff, + BNXT_ULP_CF_IDX_O_L4 & 0xff} + }, + .field_info_spec = { + .description = "l4_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l4_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_O_L4 >> 8) & 0xff, + BNXT_ULP_CF_IDX_O_L4 & 0xff} + }, + .field_info_spec = { + .description = "l4_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_O_L4 >> 8) & 0xff, + BNXT_ULP_CF_IDX_O_L4 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l3_ipv6_cmp_dst", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l3_ipv6_cmp_dst", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l3_ipv6_cmp_src", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l3_ipv6_cmp_src", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l3_hdr_isIP", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l3_hdr_isIP", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l3_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "l3_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l3_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "l3_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l3_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "l3_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + ULP_WP_SYM_L3_HDR_VALID_YES} + } + }, + { + .field_info_mask = { + .description = "l2_two_vtags", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "l2_two_vtags", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_vtag_present", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "l2_vtag_present", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_O_ONE_VTAG >> 8) & 0xff, + BNXT_ULP_CF_IDX_O_ONE_VTAG & 0xff} + } + }, + { + .field_info_mask = { + .description = "l2_uc_mc_bc", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "l2_uc_mc_bc", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_hdr_type", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "l2_hdr_type", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "l2_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "l2_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + ULP_WP_SYM_L2_HDR_VALID_YES} + } + }, + { + .field_info_mask = { + .description = "tun_hdr_flags", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tun_hdr_flags", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tun_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tun_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tun_hdr_err", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tun_hdr_err", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tun_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "tun_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl4_hdr_is_udp_tcp", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl4_hdr_is_udp_tcp", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl4_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl4_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl4_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl4_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl4_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "tl4_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl3_ipv6_cmp_dst", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl3_ipv6_cmp_dst", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl3_ipv6_cmp_src", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl3_ipv6_cmp_src", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl3_hdr_isIP", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl3_hdr_isIP", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl3_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl3_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl3_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl3_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl3_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "tl3_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl2_two_vtags", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl2_two_vtags", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl2_vtag_present", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl2_vtag_present", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl2_uc_mc_bc", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl2_uc_mc_bc", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl2_hdr_type", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl2_hdr_type", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl2_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "tl2_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "hrec_next", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "hrec_next", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "reserved", + .field_bit_size = 9, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "reserved", + .field_bit_size = 9, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "prof_func_id", + .field_bit_size = 7, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "prof_func_id", + .field_bit_size = 7, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_FIELD_BIT, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_O_ETH_DMAC >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_ETH_DMAC & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_GLB_RF, + .field_opr2 = { + (BNXT_ULP_GLB_RF_IDX_L2_PROF_FUNC_ID >> 8) & 0xff, + BNXT_ULP_GLB_RF_IDX_L2_PROF_FUNC_ID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_GLB_RF, + .field_opr3 = { + (BNXT_ULP_GLB_RF_IDX_GLB_PROF_FUNC_ID >> 8) & 0xff, + BNXT_ULP_GLB_RF_IDX_GLB_PROF_FUNC_ID & 0xff} + } + }, + { + .field_info_mask = { + .description = "agg_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "agg_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "recycle_cnt", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "recycle_cnt", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "pkt_type_0", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "pkt_type_0", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "pkt_type_1", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "pkt_type_1", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + .field_info_spec = { + .description = "valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + } + }, + /* class_tid: 1, , table: profile_tcam.ipv6 */ + { + .field_info_mask = { + .description = "l4_hdr_is_udp_tcp", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l4_hdr_is_udp_tcp", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l4_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_O_L4 >> 8) & 0xff, + BNXT_ULP_CF_IDX_O_L4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l4_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_HDR_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_HDR_BIT_O_TCP >> 56) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_TCP >> 48) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_TCP >> 40) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_TCP >> 32) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_TCP >> 24) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_TCP >> 16) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_TCP >> 8) & 0xff, + (uint64_t)BNXT_ULP_HDR_BIT_O_TCP & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_WP_SYM_L4_HDR_TYPE_TCP}, + .field_src3 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr3 = { + ULP_WP_SYM_L4_HDR_TYPE_UDP} + } + }, + { + .field_info_mask = { + .description = "l4_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_O_L4 >> 8) & 0xff, + BNXT_ULP_CF_IDX_O_L4 & 0xff} + }, + .field_info_spec = { + .description = "l4_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l4_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_O_L4 >> 8) & 0xff, + BNXT_ULP_CF_IDX_O_L4 & 0xff} + }, + .field_info_spec = { + .description = "l4_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_O_L4 >> 8) & 0xff, + BNXT_ULP_CF_IDX_O_L4 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l3_ipv6_cmp_dst", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l3_ipv6_cmp_dst", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l3_ipv6_cmp_src", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l3_ipv6_cmp_src", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l3_hdr_isIP", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l3_hdr_isIP", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l3_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "l3_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + ULP_WP_SYM_L3_HDR_TYPE_IPV6} + } + }, + { + .field_info_mask = { + .description = "l3_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "l3_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l3_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "l3_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + ULP_WP_SYM_L3_HDR_VALID_YES} + } + }, + { + .field_info_mask = { + .description = "l2_two_vtags", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "l2_two_vtags", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_vtag_present", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "l2_vtag_present", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_O_ONE_VTAG >> 8) & 0xff, + BNXT_ULP_CF_IDX_O_ONE_VTAG & 0xff} + } + }, + { + .field_info_mask = { + .description = "l2_uc_mc_bc", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "l2_uc_mc_bc", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_hdr_type", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "l2_hdr_type", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "l2_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "l2_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + ULP_WP_SYM_L2_HDR_VALID_YES} + } + }, + { + .field_info_mask = { + .description = "tun_hdr_flags", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tun_hdr_flags", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tun_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tun_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tun_hdr_err", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tun_hdr_err", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tun_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "tun_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl4_hdr_is_udp_tcp", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl4_hdr_is_udp_tcp", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl4_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl4_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl4_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl4_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl4_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "tl4_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl3_ipv6_cmp_dst", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl3_ipv6_cmp_dst", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl3_ipv6_cmp_src", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl3_ipv6_cmp_src", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl3_hdr_isIP", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl3_hdr_isIP", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl3_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl3_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl3_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl3_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl3_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "tl3_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl2_two_vtags", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl2_two_vtags", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl2_vtag_present", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl2_vtag_present", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl2_uc_mc_bc", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl2_uc_mc_bc", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl2_hdr_type", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl2_hdr_type", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl2_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "tl2_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "hrec_next", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "hrec_next", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "reserved", + .field_bit_size = 9, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "reserved", + .field_bit_size = 9, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "prof_func_id", + .field_bit_size = 7, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "prof_func_id", + .field_bit_size = 7, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_FIELD_BIT, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_O_ETH_DMAC >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_ETH_DMAC & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_GLB_RF, + .field_opr2 = { + (BNXT_ULP_GLB_RF_IDX_L2_PROF_FUNC_ID >> 8) & 0xff, + BNXT_ULP_GLB_RF_IDX_L2_PROF_FUNC_ID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_GLB_RF, + .field_opr3 = { + (BNXT_ULP_GLB_RF_IDX_GLB_PROF_FUNC_ID >> 8) & 0xff, + BNXT_ULP_GLB_RF_IDX_GLB_PROF_FUNC_ID & 0xff} + } + }, + { + .field_info_mask = { + .description = "agg_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "agg_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "recycle_cnt", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "recycle_cnt", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "pkt_type_0", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "pkt_type_0", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "pkt_type_1", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "pkt_type_1", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + .field_info_spec = { + .description = "valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + } + }, + /* class_tid: 1, , table: profile_tcam.ipv4_vxlan */ + { + .field_info_mask = { + .description = "l4_hdr_is_udp_tcp", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l4_hdr_is_udp_tcp", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l4_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "l4_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + ULP_WP_SYM_L4_HDR_TYPE_UDP} + } + }, + { + .field_info_mask = { + .description = "l4_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "l4_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l4_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l4_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l3_ipv6_cmp_dst", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l3_ipv6_cmp_dst", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l3_ipv6_cmp_src", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l3_ipv6_cmp_src", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l3_hdr_isIP", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l3_hdr_isIP", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l3_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l3_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l3_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "l3_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l3_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l3_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_two_vtags", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l2_two_vtags", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_vtag_present", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l2_vtag_present", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_uc_mc_bc", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l2_uc_mc_bc", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_hdr_type", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l2_hdr_type", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l2_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l2_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tun_hdr_flags", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tun_hdr_flags", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tun_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "tun_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tun_hdr_err", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tun_hdr_err", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tun_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "tun_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + ULP_WP_SYM_TUN_HDR_VALID_YES} + } + }, + { + .field_info_mask = { + .description = "tl4_hdr_is_udp_tcp", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl4_hdr_is_udp_tcp", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl4_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl4_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl4_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl4_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl4_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "tl4_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + ULP_WP_SYM_TL4_HDR_VALID_YES} + } + }, + { + .field_info_mask = { + .description = "tl3_ipv6_cmp_dst", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl3_ipv6_cmp_dst", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl3_ipv6_cmp_src", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl3_ipv6_cmp_src", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl3_hdr_isIP", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl3_hdr_isIP", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl3_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "tl3_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl3_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl3_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl3_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "tl3_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + ULP_WP_SYM_TL3_HDR_VALID_YES} + } + }, + { + .field_info_mask = { + .description = "tl2_two_vtags", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl2_two_vtags", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl2_vtag_present", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl2_vtag_present", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl2_uc_mc_bc", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "tl2_uc_mc_bc", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl2_hdr_type", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl2_hdr_type", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl2_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "tl2_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + ULP_WP_SYM_TL2_HDR_VALID_YES} + } + }, + { + .field_info_mask = { + .description = "hrec_next", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "hrec_next", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "reserved", + .field_bit_size = 9, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "reserved", + .field_bit_size = 9, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "prof_func_id", + .field_bit_size = 7, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "prof_func_id", + .field_bit_size = 7, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_GLB_RF, + .field_opr1 = { + (BNXT_ULP_GLB_RF_IDX_L2_PROF_FUNC_ID >> 8) & 0xff, + BNXT_ULP_GLB_RF_IDX_L2_PROF_FUNC_ID & 0xff} + } + }, + { + .field_info_mask = { + .description = "agg_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "agg_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "recycle_cnt", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "recycle_cnt", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "pkt_type_0", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "pkt_type_0", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "pkt_type_1", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "pkt_type_1", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + .field_info_spec = { + .description = "valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + } + }, + /* class_tid: 1, , table: profile_tcam_cache.wr */ + { + .field_info_mask = { + .description = "recycle_cnt", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "recycle_cnt", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "prof_func_id", + .field_bit_size = 7, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "prof_func_id", + .field_bit_size = 7, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_FIELD_BIT, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_O_ETH_DMAC >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_ETH_DMAC & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_GLB_RF, + .field_opr2 = { + (BNXT_ULP_GLB_RF_IDX_L2_PROF_FUNC_ID >> 8) & 0xff, + BNXT_ULP_GLB_RF_IDX_L2_PROF_FUNC_ID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_GLB_RF, + .field_opr3 = { + (BNXT_ULP_GLB_RF_IDX_GLB_PROF_FUNC_ID >> 8) & 0xff, + BNXT_ULP_GLB_RF_IDX_GLB_PROF_FUNC_ID & 0xff} + } + }, + { + .field_info_mask = { + .description = "hdr_sig_id", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "hdr_sig_id", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_HDR_SIG_ID >> 8) & 0xff, + BNXT_ULP_CF_IDX_HDR_SIG_ID & 0xff} + } + }, + /* class_tid: 1, , table: em.ipv4 */ + { + .field_info_mask = { + .description = "spare", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "spare", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "local_cos", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "local_cos", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l4.dst", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "l4.dst", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_O_L4 >> 8) & 0xff, + BNXT_ULP_CF_IDX_O_L4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CF, + .field_opr2 = { + (BNXT_ULP_CF_IDX_O_L4_DST_PORT >> 8) & 0xff, + BNXT_ULP_CF_IDX_O_L4_DST_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l4.src", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "l4.src", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_O_L4 >> 8) & 0xff, + BNXT_ULP_CF_IDX_O_L4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CF, + .field_opr2 = { + (BNXT_ULP_CF_IDX_O_L4_SRC_PORT >> 8) & 0xff, + BNXT_ULP_CF_IDX_O_L4_SRC_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l3.prot", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "l3.prot", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_FIELD_BIT, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_PROTO_ID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_PROTO_ID & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CF, + .field_opr2 = { + (BNXT_ULP_CF_IDX_O_L3_PROTO_ID >> 8) & 0xff, + BNXT_ULP_CF_IDX_O_L3_PROTO_ID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l3.dst", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_HF, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_DST_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_DST_ADDR & 0xff} + }, + .field_info_spec = { + .description = "l3.dst", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_HF, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_DST_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_DST_ADDR & 0xff} + } + }, + { + .field_info_mask = { + .description = "l3.src", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_HF, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_SRC_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_SRC_ADDR & 0xff} + }, + .field_info_spec = { + .description = "l3.src", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_HF, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_SRC_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_SRC_ADDR & 0xff} + } + }, + { + .field_info_mask = { + .description = "l2.smac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_HF, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_O_ETH_SMAC >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_ETH_SMAC & 0xff} + }, + .field_info_spec = { + .description = "l2.smac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_HF, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_O_ETH_SMAC >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_ETH_SMAC & 0xff} + } + }, + { + .field_info_mask = { + .description = "l2_cntxt_id", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "l2_cntxt_id", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_L2_CNTXT_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_L2_CNTXT_ID_0 & 0xff} + } + }, + { + .field_info_mask = { + .description = "em_profile_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "em_profile_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_EM_PROFILE_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_EM_PROFILE_ID_0 & 0xff} + } + }, + /* class_tid: 1, , table: eem.ipv4 */ + { + .field_info_mask = { + .description = "spare", + .field_bit_size = 275, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "spare", + .field_bit_size = 275, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "local_cos", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "local_cos", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l4.dst", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "l4.dst", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_O_L4 >> 8) & 0xff, + BNXT_ULP_CF_IDX_O_L4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CF, + .field_opr2 = { + (BNXT_ULP_CF_IDX_O_L4_DST_PORT >> 8) & 0xff, + BNXT_ULP_CF_IDX_O_L4_DST_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l4.src", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "l4.src", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_O_L4 >> 8) & 0xff, + BNXT_ULP_CF_IDX_O_L4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CF, + .field_opr2 = { + (BNXT_ULP_CF_IDX_O_L4_SRC_PORT >> 8) & 0xff, + BNXT_ULP_CF_IDX_O_L4_SRC_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l3.prot", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "l3.prot", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_FIELD_BIT, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_PROTO_ID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_PROTO_ID & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CF, + .field_opr2 = { + (BNXT_ULP_CF_IDX_O_L3_PROTO_ID >> 8) & 0xff, + BNXT_ULP_CF_IDX_O_L3_PROTO_ID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l3.dst", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_HF, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_DST_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_DST_ADDR & 0xff} + }, + .field_info_spec = { + .description = "l3.dst", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_HF, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_DST_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_DST_ADDR & 0xff} + } + }, + { + .field_info_mask = { + .description = "l3.src", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_HF, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_SRC_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_SRC_ADDR & 0xff} + }, + .field_info_spec = { + .description = "l3.src", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_HF, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_SRC_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_SRC_ADDR & 0xff} + } + }, + { + .field_info_mask = { + .description = "l2.smac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_HF, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_O_ETH_SMAC >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_ETH_SMAC & 0xff} + }, + .field_info_spec = { + .description = "l2.smac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_HF, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_O_ETH_SMAC >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_ETH_SMAC & 0xff} + } + }, + { + .field_info_mask = { + .description = "l2_cntxt_id", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "l2_cntxt_id", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_L2_CNTXT_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_L2_CNTXT_ID_0 & 0xff} + } + }, + { + .field_info_mask = { + .description = "em_profile_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "em_profile_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_EM_PROFILE_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_EM_PROFILE_ID_0 & 0xff} + } + }, + /* class_tid: 1, , table: em.ipv6 */ + { + .field_info_mask = { + .description = "spare", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "spare", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "local_cos", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "local_cos", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l4.dst", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "l4.dst", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_O_L4 >> 8) & 0xff, + BNXT_ULP_CF_IDX_O_L4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CF, + .field_opr2 = { + (BNXT_ULP_CF_IDX_O_L4_DST_PORT >> 8) & 0xff, + BNXT_ULP_CF_IDX_O_L4_DST_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l4.src", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "l4.src", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_O_L4 >> 8) & 0xff, + BNXT_ULP_CF_IDX_O_L4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CF, + .field_opr2 = { + (BNXT_ULP_CF_IDX_O_L4_SRC_PORT >> 8) & 0xff, + BNXT_ULP_CF_IDX_O_L4_SRC_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l3.prot", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "l3.prot", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_FIELD_BIT, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_O_IPV6_PROTO_ID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV6_PROTO_ID & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CF, + .field_opr2 = { + (BNXT_ULP_CF_IDX_O_L3_PROTO_ID >> 8) & 0xff, + BNXT_ULP_CF_IDX_O_L3_PROTO_ID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l3.dst", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_HF, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_O_IPV6_DST_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV6_DST_ADDR & 0xff} + }, + .field_info_spec = { + .description = "l3.dst", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_HF, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_O_IPV6_DST_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV6_DST_ADDR & 0xff} + } + }, + { + .field_info_mask = { + .description = "l3.src", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_HF, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_O_IPV6_SRC_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV6_SRC_ADDR & 0xff} + }, + .field_info_spec = { + .description = "l3.src", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_HF, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_O_IPV6_SRC_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV6_SRC_ADDR & 0xff} + } + }, + { + .field_info_mask = { + .description = "l2.smac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_HF, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_O_ETH_SMAC >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_ETH_SMAC & 0xff} + }, + .field_info_spec = { + .description = "l2.smac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_HF, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_O_ETH_SMAC >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_ETH_SMAC & 0xff} + } + }, + { + .field_info_mask = { + .description = "l2.dmac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l2.dmac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_cntxt_id", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "l2_cntxt_id", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_L2_CNTXT_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_L2_CNTXT_ID_0 & 0xff} + } + }, + { + .field_info_mask = { + .description = "em_profile_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "em_profile_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_EM_PROFILE_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_EM_PROFILE_ID_0 & 0xff} + } + }, + /* class_tid: 1, , table: eem.ipv6 */ + { + .field_info_mask = { + .description = "spare", + .field_bit_size = 35, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "spare", + .field_bit_size = 35, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "local_cos", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "local_cos", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l4.dst", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "l4.dst", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_O_L4 >> 8) & 0xff, + BNXT_ULP_CF_IDX_O_L4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CF, + .field_opr2 = { + (BNXT_ULP_CF_IDX_O_L4_DST_PORT >> 8) & 0xff, + BNXT_ULP_CF_IDX_O_L4_DST_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l4.src", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "l4.src", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_O_L4 >> 8) & 0xff, + BNXT_ULP_CF_IDX_O_L4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CF, + .field_opr2 = { + (BNXT_ULP_CF_IDX_O_L4_SRC_PORT >> 8) & 0xff, + BNXT_ULP_CF_IDX_O_L4_SRC_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l3.prot", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "l3.prot", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_FIELD_BIT, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_O_IPV6_PROTO_ID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV6_PROTO_ID & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CF, + .field_opr2 = { + (BNXT_ULP_CF_IDX_O_L3_PROTO_ID >> 8) & 0xff, + BNXT_ULP_CF_IDX_O_L3_PROTO_ID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l3.dst", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_HF, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_O_IPV6_DST_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV6_DST_ADDR & 0xff} + }, + .field_info_spec = { + .description = "l3.dst", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_HF, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_O_IPV6_DST_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV6_DST_ADDR & 0xff} + } + }, + { + .field_info_mask = { + .description = "l3.src", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_HF, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_O_IPV6_SRC_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV6_SRC_ADDR & 0xff} + }, + .field_info_spec = { + .description = "l3.src", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_HF, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_O_IPV6_SRC_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV6_SRC_ADDR & 0xff} + } + }, + { + .field_info_mask = { + .description = "l2.smac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_HF, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_O_ETH_SMAC >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_ETH_SMAC & 0xff} + }, + .field_info_spec = { + .description = "l2.smac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_HF, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_O_ETH_SMAC >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_ETH_SMAC & 0xff} + } + }, + { + .field_info_mask = { + .description = "l2.dmac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l2.dmac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_cntxt_id", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "l2_cntxt_id", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_L2_CNTXT_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_L2_CNTXT_ID_0 & 0xff} + } + }, + { + .field_info_mask = { + .description = "em_profile_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "em_profile_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_EM_PROFILE_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_EM_PROFILE_ID_0 & 0xff} + } + }, + /* class_tid: 1, , table: em.vxlan */ + { + .field_info_mask = { + .description = "spare", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "spare", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "local_cos", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "local_cos", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl4.src", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl4.src", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl4.dst", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "tl4.dst", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + (4789 >> 8) & 0xff, + 4789 & 0xff} + } + }, + { + .field_info_mask = { + .description = "tl3.prot", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "tl3.prot", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 17} + } + }, + { + .field_info_mask = { + .description = "tl3.dst", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_HF, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_DST_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_DST_ADDR & 0xff} + }, + .field_info_spec = { + .description = "tl3.dst", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_HF, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_DST_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_DST_ADDR & 0xff} + } + }, + { + .field_info_mask = { + .description = "tl3.src", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl3.src", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl2.src", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl2.src", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tun_id", + .field_bit_size = 24, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tun_id", + .field_bit_size = 24, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_cntxt_id", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "l2_cntxt_id", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_L2_CNTXT_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_L2_CNTXT_ID_0 & 0xff} + } + }, + { + .field_info_mask = { + .description = "em_profile_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "em_profile_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_EM_PROFILE_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_EM_PROFILE_ID_0 & 0xff} + } + }, + /* class_tid: 1, , table: eem.vxlan */ + { + .field_info_mask = { + .description = "spare", + .field_bit_size = 251, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "spare", + .field_bit_size = 251, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "local_cos", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "local_cos", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl4.dst", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "tl4.dst", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + (4789 >> 8) & 0xff, + 4789 & 0xff} + } + }, + { + .field_info_mask = { + .description = "tl4.src", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl4.src", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl3.prot", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "tl3.prot", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 17} + } + }, + { + .field_info_mask = { + .description = "tl3.dst", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_HF, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_DST_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_DST_ADDR & 0xff} + }, + .field_info_spec = { + .description = "tl3.dst", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_HF, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_DST_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_DST_ADDR & 0xff} + } + }, + { + .field_info_mask = { + .description = "tl3.src", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl3.src", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl2.src", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl2.src", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tun_id", + .field_bit_size = 24, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tun_id", + .field_bit_size = 24, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_cntxt_id", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "l2_cntxt_id", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_L2_CNTXT_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_L2_CNTXT_ID_0 & 0xff} + } + }, + { + .field_info_mask = { + .description = "em_profile_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "em_profile_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_EM_PROFILE_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_EM_PROFILE_ID_0 & 0xff} + } + }, + /* class_tid: 2, , table: l2_cntxt_tcam_cache.rd */ + { + .field_info_mask = { + .description = "svif", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_HF, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_SVIF_INDEX >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_SVIF_INDEX & 0xff} + }, + .field_info_spec = { + .description = "svif", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_HF, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_SVIF_INDEX >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_SVIF_INDEX & 0xff} + } + }, + /* class_tid: 2, , table: mac_addr_cache.rd */ + { + .field_info_mask = { + .description = "svif", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_HF, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_SVIF_INDEX >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_SVIF_INDEX & 0xff} + }, + .field_info_spec = { + .description = "svif", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_HF, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_SVIF_INDEX >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_SVIF_INDEX & 0xff} + } + }, + { + .field_info_mask = { + .description = "tun_hdr", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + ULP_WP_SYM_TUN_HDR_TYPE_NONE} + }, + .field_info_spec = { + .description = "tun_hdr", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + ULP_WP_SYM_TUN_HDR_TYPE_NONE} + } + }, + { + .field_info_mask = { + .description = "one_tag", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "one_tag", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_O_ONE_VTAG >> 8) & 0xff, + BNXT_ULP_CF_IDX_O_ONE_VTAG & 0xff} + } + }, + { + .field_info_mask = { + .description = "vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_HDR_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 56) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 48) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 40) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 32) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 24) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 16) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 8) & 0xff, + (uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_OO_VLAN_VID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_OO_VLAN_VID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_HDR_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 56) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 48) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 40) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 32) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 24) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 16) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 8) & 0xff, + (uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_OO_VLAN_VID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_OO_VLAN_VID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "mac_addr", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_HF, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_O_ETH_SMAC >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_ETH_SMAC & 0xff} + }, + .field_info_spec = { + .description = "mac_addr", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_HF, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_O_ETH_SMAC >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_ETH_SMAC & 0xff} + } + }, + { + .field_info_mask = { + .description = "etype", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "etype", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tbl_scope", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tbl_scope", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_filter_id", + .field_bit_size = 64, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l2_filter_id", + .field_bit_size = 64, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + /* class_tid: 2, , table: l2_cntxt_tcam.0 */ + { + .field_info_mask = { + .description = "l2_ivlan_vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_HDR_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 56) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 48) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 40) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 32) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 24) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 16) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 8) & 0xff, + (uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_OO_VLAN_VID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_OO_VLAN_VID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l2_ivlan_vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_HDR_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 56) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 48) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 40) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 32) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 24) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 16) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 8) & 0xff, + (uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_OO_VLAN_VID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_OO_VLAN_VID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_ovlan_vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l2_ovlan_vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "mac0_addr", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_HF, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_O_ETH_SMAC >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_ETH_SMAC & 0xff} + }, + .field_info_spec = { + .description = "mac0_addr", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_HF, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_O_ETH_SMAC >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_ETH_SMAC & 0xff} + } + }, + { + .field_info_mask = { + .description = "svif", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_HF, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_SVIF_INDEX >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_SVIF_INDEX & 0xff} + }, + .field_info_spec = { + .description = "svif", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_HF, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_SVIF_INDEX >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_SVIF_INDEX & 0xff} + } + }, + { + .field_info_mask = { + .description = "sparif", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "sparif", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl2_ivlan_vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl2_ivlan_vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl2_ovlan_vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl2_ovlan_vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "mac1_addr", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "mac1_addr", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_num_vtags", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "l2_num_vtags", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_O_VTAG_NUM >> 8) & 0xff, + BNXT_ULP_CF_IDX_O_VTAG_NUM & 0xff} + } + }, + { + .field_info_mask = { + .description = "tl2_num_vtags", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl2_num_vtags", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tun_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tun_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "key_type", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "key_type", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + .field_info_spec = { + .description = "valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + } + }, + /* class_tid: 2, , table: mac_addr_cache.wr */ + { + .field_info_mask = { + .description = "svif", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_HF, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_SVIF_INDEX >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_SVIF_INDEX & 0xff} + }, + .field_info_spec = { + .description = "svif", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_HF, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_SVIF_INDEX >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_SVIF_INDEX & 0xff} + } + }, + { + .field_info_mask = { + .description = "tun_hdr", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + ULP_WP_SYM_TUN_HDR_TYPE_NONE} + }, + .field_info_spec = { + .description = "tun_hdr", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + ULP_WP_SYM_TUN_HDR_TYPE_NONE} + } + }, + { + .field_info_mask = { + .description = "one_tag", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "one_tag", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_O_ONE_VTAG >> 8) & 0xff, + BNXT_ULP_CF_IDX_O_ONE_VTAG & 0xff} + } + }, + { + .field_info_mask = { + .description = "vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_HDR_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 56) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 48) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 40) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 32) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 24) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 16) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 8) & 0xff, + (uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_OO_VLAN_VID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_OO_VLAN_VID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_HDR_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 56) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 48) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 40) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 32) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 24) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 16) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN >> 8) & 0xff, + (uint64_t)BNXT_ULP_HDR_BIT_OO_VLAN & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_HF, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_OO_VLAN_VID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_OO_VLAN_VID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "mac_addr", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_HF, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_O_ETH_SMAC >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_ETH_SMAC & 0xff} + }, + .field_info_spec = { + .description = "mac_addr", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_HF, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_O_ETH_SMAC >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_ETH_SMAC & 0xff} + } + }, + { + .field_info_mask = { + .description = "etype", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "etype", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tbl_scope", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tbl_scope", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_filter_id", + .field_bit_size = 64, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l2_filter_id", + .field_bit_size = 64, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + /* class_tid: 2, , table: profile_tcam_cache.rd */ + { + .field_info_mask = { + .description = "recycle_cnt", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "recycle_cnt", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "prof_func_id", + .field_bit_size = 7, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "prof_func_id", + .field_bit_size = 7, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_FIELD_BIT, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_O_ETH_SMAC >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_ETH_SMAC & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_GLB_RF, + .field_opr2 = { + (BNXT_ULP_GLB_RF_IDX_L2_PROF_FUNC_ID >> 8) & 0xff, + BNXT_ULP_GLB_RF_IDX_L2_PROF_FUNC_ID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_GLB_RF, + .field_opr3 = { + (BNXT_ULP_GLB_RF_IDX_GLB_PROF_FUNC_ID >> 8) & 0xff, + BNXT_ULP_GLB_RF_IDX_GLB_PROF_FUNC_ID & 0xff} + } + }, + { + .field_info_mask = { + .description = "hdr_sig_id", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "hdr_sig_id", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_HDR_SIG_ID >> 8) & 0xff, + BNXT_ULP_CF_IDX_HDR_SIG_ID & 0xff} + } + }, + /* class_tid: 2, , table: profile_tcam.ipv4 */ + { + .field_info_mask = { + .description = "l4_hdr_is_udp_tcp", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l4_hdr_is_udp_tcp", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l4_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_O_L4 >> 8) & 0xff, + BNXT_ULP_CF_IDX_O_L4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l4_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_HDR_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_HDR_BIT_O_TCP >> 56) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_TCP >> 48) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_TCP >> 40) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_TCP >> 32) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_TCP >> 24) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_TCP >> 16) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_TCP >> 8) & 0xff, + (uint64_t)BNXT_ULP_HDR_BIT_O_TCP & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_WP_SYM_L4_HDR_TYPE_TCP}, + .field_src3 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr3 = { + ULP_WP_SYM_L4_HDR_TYPE_UDP} + } + }, + { + .field_info_mask = { + .description = "l4_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_O_L4 >> 8) & 0xff, + BNXT_ULP_CF_IDX_O_L4 & 0xff} + }, + .field_info_spec = { + .description = "l4_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l4_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_O_L4 >> 8) & 0xff, + BNXT_ULP_CF_IDX_O_L4 & 0xff} + }, + .field_info_spec = { + .description = "l4_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_O_L4 >> 8) & 0xff, + BNXT_ULP_CF_IDX_O_L4 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l3_ipv6_cmp_dst", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l3_ipv6_cmp_dst", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l3_ipv6_cmp_src", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l3_ipv6_cmp_src", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l3_hdr_isIP", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l3_hdr_isIP", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l3_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "l3_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l3_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "l3_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l3_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "l3_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + ULP_WP_SYM_L3_HDR_VALID_YES} + } + }, + { + .field_info_mask = { + .description = "l2_two_vtags", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "l2_two_vtags", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_vtag_present", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "l2_vtag_present", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_O_ONE_VTAG >> 8) & 0xff, + BNXT_ULP_CF_IDX_O_ONE_VTAG & 0xff} + } + }, + { + .field_info_mask = { + .description = "l2_uc_mc_bc", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "l2_uc_mc_bc", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_hdr_type", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "l2_hdr_type", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "l2_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "l2_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + ULP_WP_SYM_L2_HDR_VALID_YES} + } + }, + { + .field_info_mask = { + .description = "tun_hdr_flags", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tun_hdr_flags", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tun_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tun_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tun_hdr_err", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tun_hdr_err", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tun_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "tun_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl4_hdr_is_udp_tcp", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl4_hdr_is_udp_tcp", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl4_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl4_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl4_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl4_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl4_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "tl4_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl3_ipv6_cmp_dst", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl3_ipv6_cmp_dst", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl3_ipv6_cmp_src", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl3_ipv6_cmp_src", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl3_hdr_isIP", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl3_hdr_isIP", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl3_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl3_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl3_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl3_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl3_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "tl3_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl2_two_vtags", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl2_two_vtags", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl2_vtag_present", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl2_vtag_present", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl2_uc_mc_bc", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl2_uc_mc_bc", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl2_hdr_type", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl2_hdr_type", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl2_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "tl2_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "hrec_next", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "hrec_next", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "reserved", + .field_bit_size = 9, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "reserved", + .field_bit_size = 9, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "prof_func_id", + .field_bit_size = 7, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "prof_func_id", + .field_bit_size = 7, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_FIELD_BIT, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_O_ETH_SMAC >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_ETH_SMAC & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_GLB_RF, + .field_opr2 = { + (BNXT_ULP_GLB_RF_IDX_L2_PROF_FUNC_ID >> 8) & 0xff, + BNXT_ULP_GLB_RF_IDX_L2_PROF_FUNC_ID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_GLB_RF, + .field_opr3 = { + (BNXT_ULP_GLB_RF_IDX_GLB_PROF_FUNC_ID >> 8) & 0xff, + BNXT_ULP_GLB_RF_IDX_GLB_PROF_FUNC_ID & 0xff} + } + }, + { + .field_info_mask = { + .description = "agg_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "agg_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "recycle_cnt", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "recycle_cnt", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "pkt_type_0", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "pkt_type_0", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "pkt_type_1", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "pkt_type_1", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + .field_info_spec = { + .description = "valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + } + }, + /* class_tid: 2, , table: profile_tcam.ipv6 */ + { + .field_info_mask = { + .description = "l4_hdr_is_udp_tcp", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l4_hdr_is_udp_tcp", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l4_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_O_L4 >> 8) & 0xff, + BNXT_ULP_CF_IDX_O_L4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_ONES, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l4_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_HDR_BIT, + .field_opr1 = { + ((uint64_t)BNXT_ULP_HDR_BIT_O_TCP >> 56) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_TCP >> 48) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_TCP >> 40) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_TCP >> 32) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_TCP >> 24) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_TCP >> 16) & 0xff, + ((uint64_t)BNXT_ULP_HDR_BIT_O_TCP >> 8) & 0xff, + (uint64_t)BNXT_ULP_HDR_BIT_O_TCP & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_WP_SYM_L4_HDR_TYPE_TCP}, + .field_src3 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr3 = { + ULP_WP_SYM_L4_HDR_TYPE_UDP} + } + }, + { + .field_info_mask = { + .description = "l4_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_O_L4 >> 8) & 0xff, + BNXT_ULP_CF_IDX_O_L4 & 0xff} + }, + .field_info_spec = { + .description = "l4_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l4_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_O_L4 >> 8) & 0xff, + BNXT_ULP_CF_IDX_O_L4 & 0xff} + }, + .field_info_spec = { + .description = "l4_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_O_L4 >> 8) & 0xff, + BNXT_ULP_CF_IDX_O_L4 & 0xff} + } + }, + { + .field_info_mask = { + .description = "l3_ipv6_cmp_dst", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l3_ipv6_cmp_dst", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l3_ipv6_cmp_src", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l3_ipv6_cmp_src", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l3_hdr_isIP", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l3_hdr_isIP", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l3_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "l3_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + ULP_WP_SYM_L3_HDR_TYPE_IPV6} + } + }, + { + .field_info_mask = { + .description = "l3_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "l3_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l3_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "l3_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + ULP_WP_SYM_L3_HDR_VALID_YES} + } + }, + { + .field_info_mask = { + .description = "l2_two_vtags", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "l2_two_vtags", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_vtag_present", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "l2_vtag_present", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_O_ONE_VTAG >> 8) & 0xff, + BNXT_ULP_CF_IDX_O_ONE_VTAG & 0xff} + } + }, + { + .field_info_mask = { + .description = "l2_uc_mc_bc", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "l2_uc_mc_bc", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_hdr_type", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "l2_hdr_type", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "l2_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "l2_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + ULP_WP_SYM_L2_HDR_VALID_YES} + } + }, + { + .field_info_mask = { + .description = "tun_hdr_flags", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tun_hdr_flags", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tun_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tun_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tun_hdr_err", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tun_hdr_err", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tun_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "tun_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl4_hdr_is_udp_tcp", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl4_hdr_is_udp_tcp", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl4_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl4_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl4_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl4_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl4_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "tl4_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl3_ipv6_cmp_dst", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl3_ipv6_cmp_dst", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl3_ipv6_cmp_src", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl3_ipv6_cmp_src", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl3_hdr_isIP", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl3_hdr_isIP", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl3_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl3_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl3_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl3_hdr_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl3_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "tl3_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl2_two_vtags", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl2_two_vtags", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl2_vtag_present", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl2_vtag_present", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl2_uc_mc_bc", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl2_uc_mc_bc", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl2_hdr_type", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl2_hdr_type", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl2_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "tl2_hdr_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "hrec_next", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "hrec_next", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "reserved", + .field_bit_size = 9, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "reserved", + .field_bit_size = 9, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "prof_func_id", + .field_bit_size = 7, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "prof_func_id", + .field_bit_size = 7, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_FIELD_BIT, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_O_ETH_SMAC >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_ETH_SMAC & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_GLB_RF, + .field_opr2 = { + (BNXT_ULP_GLB_RF_IDX_L2_PROF_FUNC_ID >> 8) & 0xff, + BNXT_ULP_GLB_RF_IDX_L2_PROF_FUNC_ID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_GLB_RF, + .field_opr3 = { + (BNXT_ULP_GLB_RF_IDX_GLB_PROF_FUNC_ID >> 8) & 0xff, + BNXT_ULP_GLB_RF_IDX_GLB_PROF_FUNC_ID & 0xff} + } + }, + { + .field_info_mask = { + .description = "agg_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "agg_error", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "recycle_cnt", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "recycle_cnt", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "pkt_type_0", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "pkt_type_0", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "pkt_type_1", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "pkt_type_1", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + .field_info_spec = { + .description = "valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + } + }, + /* class_tid: 2, , table: profile_tcam_cache.wr */ + { + .field_info_mask = { + .description = "recycle_cnt", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "recycle_cnt", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "prof_func_id", + .field_bit_size = 7, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "prof_func_id", + .field_bit_size = 7, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_FIELD_BIT, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_O_ETH_SMAC >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_ETH_SMAC & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_GLB_RF, + .field_opr2 = { + (BNXT_ULP_GLB_RF_IDX_L2_PROF_FUNC_ID >> 8) & 0xff, + BNXT_ULP_GLB_RF_IDX_L2_PROF_FUNC_ID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_GLB_RF, + .field_opr3 = { + (BNXT_ULP_GLB_RF_IDX_GLB_PROF_FUNC_ID >> 8) & 0xff, + BNXT_ULP_GLB_RF_IDX_GLB_PROF_FUNC_ID & 0xff} + } + }, + { + .field_info_mask = { + .description = "hdr_sig_id", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "hdr_sig_id", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_HDR_SIG_ID >> 8) & 0xff, + BNXT_ULP_CF_IDX_HDR_SIG_ID & 0xff} + } + }, + /* class_tid: 2, , table: em.ipv4 */ + { + .field_info_mask = { + .description = "spare", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "spare", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "local_cos", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "local_cos", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l4.dst", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "l4.dst", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_O_L4 >> 8) & 0xff, + BNXT_ULP_CF_IDX_O_L4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CF, + .field_opr2 = { + (BNXT_ULP_CF_IDX_O_L4_DST_PORT >> 8) & 0xff, + BNXT_ULP_CF_IDX_O_L4_DST_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l4.src", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "l4.src", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_O_L4 >> 8) & 0xff, + BNXT_ULP_CF_IDX_O_L4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CF, + .field_opr2 = { + (BNXT_ULP_CF_IDX_O_L4_SRC_PORT >> 8) & 0xff, + BNXT_ULP_CF_IDX_O_L4_SRC_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l3.prot", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "l3.prot", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_FIELD_BIT, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_PROTO_ID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_PROTO_ID & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CF, + .field_opr2 = { + (BNXT_ULP_CF_IDX_O_L3_PROTO_ID >> 8) & 0xff, + BNXT_ULP_CF_IDX_O_L3_PROTO_ID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l3.dst", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_HF, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_DST_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_DST_ADDR & 0xff} + }, + .field_info_spec = { + .description = "l3.dst", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_HF, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_DST_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_DST_ADDR & 0xff} + } + }, + { + .field_info_mask = { + .description = "l3.src", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_HF, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_SRC_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_SRC_ADDR & 0xff} + }, + .field_info_spec = { + .description = "l3.src", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_HF, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_SRC_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_SRC_ADDR & 0xff} + } + }, + { + .field_info_mask = { + .description = "l2.dmac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_HF, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_O_ETH_DMAC >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_ETH_DMAC & 0xff} + }, + .field_info_spec = { + .description = "l2.dmac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_HF, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_O_ETH_DMAC >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_ETH_DMAC & 0xff} + } + }, + { + .field_info_mask = { + .description = "l2_cntxt_id", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "l2_cntxt_id", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_L2_CNTXT_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_L2_CNTXT_ID_0 & 0xff} + } + }, + { + .field_info_mask = { + .description = "em_profile_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "em_profile_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_EM_PROFILE_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_EM_PROFILE_ID_0 & 0xff} + } + }, + /* class_tid: 2, , table: eem.ipv4 */ + { + .field_info_mask = { + .description = "spare", + .field_bit_size = 275, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "spare", + .field_bit_size = 275, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "local_cos", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "local_cos", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l4.dst", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "l4.dst", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_O_L4 >> 8) & 0xff, + BNXT_ULP_CF_IDX_O_L4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CF, + .field_opr2 = { + (BNXT_ULP_CF_IDX_O_L4_DST_PORT >> 8) & 0xff, + BNXT_ULP_CF_IDX_O_L4_DST_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l4.src", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "l4.src", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_O_L4 >> 8) & 0xff, + BNXT_ULP_CF_IDX_O_L4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CF, + .field_opr2 = { + (BNXT_ULP_CF_IDX_O_L4_SRC_PORT >> 8) & 0xff, + BNXT_ULP_CF_IDX_O_L4_SRC_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l3.prot", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "l3.prot", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_FIELD_BIT, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_PROTO_ID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_PROTO_ID & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CF, + .field_opr2 = { + (BNXT_ULP_CF_IDX_O_L3_PROTO_ID >> 8) & 0xff, + BNXT_ULP_CF_IDX_O_L3_PROTO_ID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l3.dst", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_HF, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_DST_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_DST_ADDR & 0xff} + }, + .field_info_spec = { + .description = "l3.dst", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_HF, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_DST_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_DST_ADDR & 0xff} + } + }, + { + .field_info_mask = { + .description = "l3.src", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_HF, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_SRC_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_SRC_ADDR & 0xff} + }, + .field_info_spec = { + .description = "l3.src", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_HF, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_SRC_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_SRC_ADDR & 0xff} + } + }, + { + .field_info_mask = { + .description = "l2.dmac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_HF, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_O_ETH_DMAC >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_ETH_DMAC & 0xff} + }, + .field_info_spec = { + .description = "l2.dmac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_HF, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_O_ETH_DMAC >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_ETH_DMAC & 0xff} + } + }, + { + .field_info_mask = { + .description = "l2_cntxt_id", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "l2_cntxt_id", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_L2_CNTXT_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_L2_CNTXT_ID_0 & 0xff} + } + }, + { + .field_info_mask = { + .description = "em_profile_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "em_profile_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_EM_PROFILE_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_EM_PROFILE_ID_0 & 0xff} + } + }, + /* class_tid: 2, , table: em.ipv6 */ + { + .field_info_mask = { + .description = "spare", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "spare", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "local_cos", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "local_cos", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l4.dst", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "l4.dst", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_O_L4 >> 8) & 0xff, + BNXT_ULP_CF_IDX_O_L4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CF, + .field_opr2 = { + (BNXT_ULP_CF_IDX_O_L4_DST_PORT >> 8) & 0xff, + BNXT_ULP_CF_IDX_O_L4_DST_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l4.src", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "l4.src", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_O_L4 >> 8) & 0xff, + BNXT_ULP_CF_IDX_O_L4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CF, + .field_opr2 = { + (BNXT_ULP_CF_IDX_O_L4_SRC_PORT >> 8) & 0xff, + BNXT_ULP_CF_IDX_O_L4_SRC_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l3.prot", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "l3.prot", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_FIELD_BIT, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_O_IPV6_PROTO_ID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV6_PROTO_ID & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CF, + .field_opr2 = { + (BNXT_ULP_CF_IDX_O_L3_PROTO_ID >> 8) & 0xff, + BNXT_ULP_CF_IDX_O_L3_PROTO_ID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l3.dst", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_HF, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_O_IPV6_DST_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV6_DST_ADDR & 0xff} + }, + .field_info_spec = { + .description = "l3.dst", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_HF, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_O_IPV6_DST_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV6_DST_ADDR & 0xff} + } + }, + { + .field_info_mask = { + .description = "l3.src", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_HF, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_O_IPV6_SRC_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV6_SRC_ADDR & 0xff} + }, + .field_info_spec = { + .description = "l3.src", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_HF, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_O_IPV6_SRC_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV6_SRC_ADDR & 0xff} + } + }, + { + .field_info_mask = { + .description = "l2.smac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l2.smac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2.dmac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_HF, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_O_ETH_DMAC >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_ETH_DMAC & 0xff} + }, + .field_info_spec = { + .description = "l2.dmac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_HF, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_O_ETH_DMAC >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_ETH_DMAC & 0xff} + } + }, + { + .field_info_mask = { + .description = "l2_cntxt_id", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "l2_cntxt_id", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_L2_CNTXT_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_L2_CNTXT_ID_0 & 0xff} + } + }, + { + .field_info_mask = { + .description = "em_profile_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "em_profile_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_EM_PROFILE_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_EM_PROFILE_ID_0 & 0xff} + } + }, + /* class_tid: 2, , table: eem.ipv6 */ + { + .field_info_mask = { + .description = "spare", + .field_bit_size = 35, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "spare", + .field_bit_size = 35, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "local_cos", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "local_cos", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l4.dst", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "l4.dst", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_O_L4 >> 8) & 0xff, + BNXT_ULP_CF_IDX_O_L4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CF, + .field_opr2 = { + (BNXT_ULP_CF_IDX_O_L4_DST_PORT >> 8) & 0xff, + BNXT_ULP_CF_IDX_O_L4_DST_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l4.src", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "l4.src", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_O_L4 >> 8) & 0xff, + BNXT_ULP_CF_IDX_O_L4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CF, + .field_opr2 = { + (BNXT_ULP_CF_IDX_O_L4_SRC_PORT >> 8) & 0xff, + BNXT_ULP_CF_IDX_O_L4_SRC_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l3.prot", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "l3.prot", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_FIELD_BIT, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_O_IPV6_PROTO_ID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV6_PROTO_ID & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CF, + .field_opr2 = { + (BNXT_ULP_CF_IDX_O_L3_PROTO_ID >> 8) & 0xff, + BNXT_ULP_CF_IDX_O_L3_PROTO_ID & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l3.dst", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_HF, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_O_IPV6_DST_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV6_DST_ADDR & 0xff} + }, + .field_info_spec = { + .description = "l3.dst", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_HF, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_O_IPV6_DST_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV6_DST_ADDR & 0xff} + } + }, + { + .field_info_mask = { + .description = "l3.src", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_HF, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_O_IPV6_SRC_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV6_SRC_ADDR & 0xff} + }, + .field_info_spec = { + .description = "l3.src", + .field_bit_size = 128, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_HF, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_O_IPV6_SRC_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV6_SRC_ADDR & 0xff} + } + }, + { + .field_info_mask = { + .description = "l2.smac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l2.smac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2.dmac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_HF, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_O_ETH_DMAC >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_ETH_DMAC & 0xff} + }, + .field_info_spec = { + .description = "l2.dmac", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_HF, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_O_ETH_DMAC >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_ETH_DMAC & 0xff} + } + }, + { + .field_info_mask = { + .description = "l2_cntxt_id", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "l2_cntxt_id", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_L2_CNTXT_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_L2_CNTXT_ID_0 & 0xff} + } + }, + { + .field_info_mask = { + .description = "em_profile_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "em_profile_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_EM_PROFILE_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_EM_PROFILE_ID_0 & 0xff} + } + }, + /* class_tid: 3, , table: l2_cntxt_tcam_cache.ing_rd */ + { + .field_info_mask = { + .description = "svif", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "svif", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_PHY_PORT_SVIF >> 8) & 0xff, + BNXT_ULP_CF_IDX_PHY_PORT_SVIF & 0xff} + } + }, + /* class_tid: 3, , table: l2_cntxt_tcam.ing_0 */ + { + .field_info_mask = { + .description = "l2_ivlan_vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l2_ivlan_vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_ovlan_vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l2_ovlan_vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "mac0_addr", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "mac0_addr", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "svif", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "svif", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_PHY_PORT_SVIF >> 8) & 0xff, + BNXT_ULP_CF_IDX_PHY_PORT_SVIF & 0xff} + } + }, + { + .field_info_mask = { + .description = "sparif", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "sparif", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl2_ivlan_vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl2_ivlan_vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl2_ovlan_vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl2_ovlan_vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "mac1_addr", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "mac1_addr", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_num_vtags", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l2_num_vtags", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl2_num_vtags", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl2_num_vtags", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tun_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tun_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "key_type", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "key_type", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + .field_info_spec = { + .description = "valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + } + }, + /* class_tid: 3, , table: l2_cntxt_tcam_cache.ing_wr */ + { + .field_info_mask = { + .description = "svif", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "svif", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_PHY_PORT_SVIF >> 8) & 0xff, + BNXT_ULP_CF_IDX_PHY_PORT_SVIF & 0xff} + } + }, + /* class_tid: 3, , table: l2_cntxt_tcam_cache.egr_rd_vfr */ + { + .field_info_mask = { + .description = "svif", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "svif", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_DRV_FUNC_SVIF >> 8) & 0xff, + BNXT_ULP_CF_IDX_DRV_FUNC_SVIF & 0xff} + } + }, + /* class_tid: 3, , table: l2_cntxt_tcam_bypass.egr_vfr */ + { + .field_info_mask = { + .description = "l2_ivlan_vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l2_ivlan_vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_ovlan_vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l2_ovlan_vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "mac0_addr", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "mac0_addr", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "svif", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "svif", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_DRV_FUNC_SVIF >> 8) & 0xff, + BNXT_ULP_CF_IDX_DRV_FUNC_SVIF & 0xff} + } + }, + { + .field_info_mask = { + .description = "sparif", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "sparif", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl2_ivlan_vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl2_ivlan_vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl2_ovlan_vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl2_ovlan_vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "mac1_addr", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "mac1_addr", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_num_vtags", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l2_num_vtags", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl2_num_vtags", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl2_num_vtags", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tun_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tun_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "key_type", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "key_type", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + .field_info_spec = { + .description = "valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + } + }, + /* class_tid: 3, , table: l2_cntxt_tcam_cache.egr_wr_vfr */ + { + .field_info_mask = { + .description = "svif", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "svif", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_DRV_FUNC_SVIF >> 8) & 0xff, + BNXT_ULP_CF_IDX_DRV_FUNC_SVIF & 0xff} + } + }, + /* class_tid: 3, , table: l2_cntxt_tcam_cache.rd */ + { + .field_info_mask = { + .description = "svif", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "svif", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_DRV_FUNC_SVIF >> 8) & 0xff, + BNXT_ULP_CF_IDX_DRV_FUNC_SVIF & 0xff} + } + }, + /* class_tid: 3, , table: l2_cntxt_tcam.egr_0 */ + { + .field_info_mask = { + .description = "l2_ivlan_vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l2_ivlan_vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_ovlan_vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l2_ovlan_vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "mac0_addr", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "mac0_addr", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "svif", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "svif", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_DRV_FUNC_SVIF >> 8) & 0xff, + BNXT_ULP_CF_IDX_DRV_FUNC_SVIF & 0xff} + } + }, + { + .field_info_mask = { + .description = "sparif", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "sparif", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl2_ivlan_vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl2_ivlan_vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl2_ovlan_vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl2_ovlan_vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "mac1_addr", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "mac1_addr", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_num_vtags", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l2_num_vtags", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl2_num_vtags", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl2_num_vtags", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tun_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tun_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "key_type", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "key_type", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + .field_info_spec = { + .description = "valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + } + }, + /* class_tid: 3, , table: l2_cntxt_tcam_cache.egr_wr */ + { + .field_info_mask = { + .description = "svif", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "svif", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_DRV_FUNC_SVIF >> 8) & 0xff, + BNXT_ULP_CF_IDX_DRV_FUNC_SVIF & 0xff} + } + }, + /* class_tid: 4, , table: l2_cntxt_tcam_cache.vf_rd_egr */ + { + .field_info_mask = { + .description = "svif", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "svif", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_VF_FUNC_SVIF >> 8) & 0xff, + BNXT_ULP_CF_IDX_VF_FUNC_SVIF & 0xff} + } + }, + /* class_tid: 4, , table: l2_cntxt_tcam.vf_egr */ + { + .field_info_mask = { + .description = "l2_ivlan_vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l2_ivlan_vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_ovlan_vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l2_ovlan_vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "mac0_addr", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "mac0_addr", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "svif", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "svif", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_VF_FUNC_SVIF >> 8) & 0xff, + BNXT_ULP_CF_IDX_VF_FUNC_SVIF & 0xff} + } + }, + { + .field_info_mask = { + .description = "sparif", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "sparif", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl2_ivlan_vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl2_ivlan_vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl2_ovlan_vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl2_ovlan_vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "mac1_addr", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "mac1_addr", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_num_vtags", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l2_num_vtags", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl2_num_vtags", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl2_num_vtags", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tun_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tun_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "key_type", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "key_type", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + .field_info_spec = { + .description = "valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + } + }, + /* class_tid: 4, , table: l2_cntxt_tcam_cache.vf_egr_wr */ + { + .field_info_mask = { + .description = "svif", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "svif", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_VF_FUNC_SVIF >> 8) & 0xff, + BNXT_ULP_CF_IDX_VF_FUNC_SVIF & 0xff} + } + }, + /* class_tid: 4, , table: l2_cntxt_tcam_bypass.vf_ing */ + { + .field_info_mask = { + .description = "l2_ivlan_vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l2_ivlan_vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_ovlan_vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l2_ovlan_vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "mac0_addr", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "mac0_addr", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "svif", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "svif", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_VF_FUNC_SVIF >> 8) & 0xff, + BNXT_ULP_CF_IDX_VF_FUNC_SVIF & 0xff} + } + }, + { + .field_info_mask = { + .description = "sparif", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "sparif", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl2_ivlan_vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl2_ivlan_vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl2_ovlan_vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl2_ovlan_vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "mac1_addr", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "mac1_addr", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_num_vtags", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l2_num_vtags", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl2_num_vtags", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl2_num_vtags", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tun_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tun_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "key_type", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "key_type", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + .field_info_spec = { + .description = "valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + } + }, + /* class_tid: 4, , table: l2_cntxt_tcam_cache.vfr_rd_egr0 */ + { + .field_info_mask = { + .description = "svif", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "svif", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_DRV_FUNC_SVIF >> 8) & 0xff, + BNXT_ULP_CF_IDX_DRV_FUNC_SVIF & 0xff} + } + }, + /* class_tid: 4, , table: l2_cntxt_tcam_bypass.vfr_egr0 */ + { + .field_info_mask = { + .description = "l2_ivlan_vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l2_ivlan_vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_ovlan_vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l2_ovlan_vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "mac0_addr", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "mac0_addr", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "svif", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "svif", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_DRV_FUNC_SVIF >> 8) & 0xff, + BNXT_ULP_CF_IDX_DRV_FUNC_SVIF & 0xff} + } + }, + { + .field_info_mask = { + .description = "sparif", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "sparif", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl2_ivlan_vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl2_ivlan_vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl2_ovlan_vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl2_ovlan_vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "mac1_addr", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "mac1_addr", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_num_vtags", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l2_num_vtags", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl2_num_vtags", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl2_num_vtags", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tun_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tun_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "key_type", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "key_type", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + .field_info_spec = { + .description = "valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + } + }, + /* class_tid: 4, , table: l2_cntxt_tcam_cache.vfr_wr_egr0 */ + { + .field_info_mask = { + .description = "svif", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "svif", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_DRV_FUNC_SVIF >> 8) & 0xff, + BNXT_ULP_CF_IDX_DRV_FUNC_SVIF & 0xff} + } + }, + /* class_tid: 4, , table: l2_cntxt_tcam_bypass.vfr_dtagged_ing0 */ + { + .field_info_mask = { + .description = "l2_ivlan_vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l2_ivlan_vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_ovlan_vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "l2_ovlan_vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_DEV_PORT_ID >> 8) & 0xff, + BNXT_ULP_CF_IDX_DEV_PORT_ID & 0xff} + } + }, + { + .field_info_mask = { + .description = "mac0_addr", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "mac0_addr", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "svif", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "svif", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_DRV_FUNC_SVIF >> 8) & 0xff, + BNXT_ULP_CF_IDX_DRV_FUNC_SVIF & 0xff} + } + }, + { + .field_info_mask = { + .description = "sparif", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "sparif", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl2_ivlan_vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl2_ivlan_vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl2_ovlan_vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl2_ovlan_vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "mac1_addr", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "mac1_addr", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_num_vtags", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "l2_num_vtags", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 2} + } + }, + { + .field_info_mask = { + .description = "tl2_num_vtags", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl2_num_vtags", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tun_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tun_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "key_type", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "key_type", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + .field_info_spec = { + .description = "valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + } + }, + /* class_tid: 4, , table: l2_cntxt_tcam_bypass.vfr_stagged_ing0 */ + { + .field_info_mask = { + .description = "l2_ivlan_vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff, + 0xff} + }, + .field_info_spec = { + .description = "l2_ivlan_vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_DEV_PORT_ID >> 8) & 0xff, + BNXT_ULP_CF_IDX_DEV_PORT_ID & 0xff} + } + }, + { + .field_info_mask = { + .description = "l2_ovlan_vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "l2_ovlan_vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "mac0_addr", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "mac0_addr", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "svif", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "svif", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_DRV_FUNC_SVIF >> 8) & 0xff, + BNXT_ULP_CF_IDX_DRV_FUNC_SVIF & 0xff} + } + }, + { + .field_info_mask = { + .description = "sparif", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "sparif", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl2_ivlan_vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl2_ivlan_vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tl2_ovlan_vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl2_ovlan_vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "mac1_addr", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "mac1_addr", + .field_bit_size = 48, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "l2_num_vtags", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ONES, + .field_opr1 = { + 0xff} + }, + .field_info_spec = { + .description = "l2_num_vtags", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + } + }, + { + .field_info_mask = { + .description = "tl2_num_vtags", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tl2_num_vtags", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "tun_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "tun_hdr_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "key_type", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + .field_info_spec = { + .description = "key_type", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } + }, + { + .field_info_mask = { + .description = "valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + .field_info_spec = { + .description = "valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + } + } +}; + +struct bnxt_ulp_mapper_field_info ulp_wh_plus_class_key_ext_list[] = { +}; + +struct bnxt_ulp_mapper_field_info ulp_wh_plus_class_result_field_list[] = { + /* class_tid: 1, , table: l2_cntxt_tcam.0 */ + { + .description = "l2_cntxt_id", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_L2_CNTXT_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_L2_CNTXT_ID_0 & 0xff} + }, + { + .description = "prof_func_id", + .field_bit_size = 7, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_GLB_RF, + .field_opr1 = { + (BNXT_ULP_GLB_RF_IDX_L2_PROF_FUNC_ID >> 8) & 0xff, + BNXT_ULP_GLB_RF_IDX_L2_PROF_FUNC_ID & 0xff} + }, + { + .description = "l2_byp_lkup_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "parif", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_PHY_PORT_PARIF >> 8) & 0xff, + BNXT_ULP_CF_IDX_PHY_PORT_PARIF & 0xff} + }, + { + .description = "allowed_pri", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "default_pri", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "allowed_tpid", + .field_bit_size = 6, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "default_tpid", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "bd_act_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "sp_rec_ptr", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "byp_sp_lkup", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + { + .description = "pri_anti_spoof_ctl", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tpid_anti_spoof_ctl", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + /* class_tid: 1, , table: mac_addr_cache.wr */ + { + .description = "rid", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_RID >> 8) & 0xff, + BNXT_ULP_RF_IDX_RID & 0xff} + }, + { + .description = "l2_cntxt_tcam_index", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_cntxt_id", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_L2_CNTXT_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_L2_CNTXT_ID_0 & 0xff} + }, + { + .description = "src_property_ptr", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "prof_func_id", + .field_bit_size = 7, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + /* class_tid: 1, , table: profile_tcam.ipv4 */ + { + .description = "wc_key_id", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "wc_profile_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "wc_search_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "em_key_mask.0", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + { + .description = "em_key_mask.1", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_FIELD_BIT, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_O_ETH_SMAC >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_ETH_SMAC & 0xff} + }, + { + .description = "em_key_mask.2", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_FIELD_BIT, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_SRC_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_SRC_ADDR & 0xff} + }, + { + .description = "em_key_mask.3", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_FIELD_BIT, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_DST_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_DST_ADDR & 0xff} + }, + { + .description = "em_key_mask.4", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_FIELD_BIT, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_PROTO_ID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_PROTO_ID & 0xff} + }, + { + .description = "em_key_mask.5", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_AND_SRC2_OR_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_O_L4 >> 8) & 0xff, + BNXT_ULP_CF_IDX_O_L4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_FIELD_BIT, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_UDP_SRC_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_UDP_SRC_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_FIELD_BIT, + .field_opr3 = { + (BNXT_ULP_GLB_HF_ID_O_TCP_SRC_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_TCP_SRC_PORT & 0xff} + }, + { + .description = "em_key_mask.6", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_AND_SRC2_OR_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_O_L4 >> 8) & 0xff, + BNXT_ULP_CF_IDX_O_L4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_FIELD_BIT, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_UDP_DST_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_UDP_DST_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_FIELD_BIT, + .field_opr3 = { + (BNXT_ULP_GLB_HF_ID_O_TCP_DST_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_TCP_DST_PORT & 0xff} + }, + { + .description = "em_key_mask.7", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "em_key_mask.8", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "em_key_mask.9", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "em_key_id", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 3} + }, + { + .description = "em_profile_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_EM_PROFILE_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_EM_PROFILE_ID_0 & 0xff} + }, + { + .description = "em_search_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + { + .description = "pl_byp_lkup_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + /* class_tid: 1, , table: profile_tcam.ipv6 */ + { + .description = "wc_key_id", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "wc_profile_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "wc_search_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "em_key_mask.0", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + { + .description = "em_key_mask.1", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "em_key_mask.2", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_FIELD_BIT, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_O_ETH_SMAC >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_ETH_SMAC & 0xff} + }, + { + .description = "em_key_mask.3", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_FIELD_BIT, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_O_IPV6_SRC_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV6_SRC_ADDR & 0xff} + }, + { + .description = "em_key_mask.4", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_FIELD_BIT, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_O_IPV6_DST_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV6_DST_ADDR & 0xff} + }, + { + .description = "em_key_mask.5", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_FIELD_BIT, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_O_IPV6_PROTO_ID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV6_PROTO_ID & 0xff} + }, + { + .description = "em_key_mask.6", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_AND_SRC2_OR_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_O_L4 >> 8) & 0xff, + BNXT_ULP_CF_IDX_O_L4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_FIELD_BIT, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_UDP_SRC_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_UDP_SRC_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_FIELD_BIT, + .field_opr3 = { + (BNXT_ULP_GLB_HF_ID_O_TCP_SRC_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_TCP_SRC_PORT & 0xff} + }, + { + .description = "em_key_mask.7", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_AND_SRC2_OR_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_O_L4 >> 8) & 0xff, + BNXT_ULP_CF_IDX_O_L4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_FIELD_BIT, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_UDP_DST_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_UDP_DST_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_FIELD_BIT, + .field_opr3 = { + (BNXT_ULP_GLB_HF_ID_O_TCP_DST_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_TCP_DST_PORT & 0xff} + }, + { + .description = "em_key_mask.8", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "em_key_mask.9", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "em_key_id", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 7} + }, + { + .description = "em_profile_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_EM_PROFILE_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_EM_PROFILE_ID_0 & 0xff} + }, + { + .description = "em_search_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + { + .description = "pl_byp_lkup_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + /* class_tid: 1, , table: profile_tcam.ipv4_vxlan */ + { + .description = "wc_key_id", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "wc_profile_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "wc_search_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "em_key_mask.0", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + { + .description = "em_key_mask.1", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "em_key_mask.2", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "em_key_mask.3", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "em_key_mask.4", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + { + .description = "em_key_mask.5", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + { + .description = "em_key_mask.6", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + { + .description = "em_key_mask.7", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "em_key_mask.8", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "em_key_mask.9", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "em_key_id", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 20} + }, + { + .description = "em_profile_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_EM_PROFILE_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_EM_PROFILE_ID_0 & 0xff} + }, + { + .description = "em_search_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + { + .description = "pl_byp_lkup_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + /* class_tid: 1, , table: profile_tcam_cache.wr */ + { + .description = "rid", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_RID >> 8) & 0xff, + BNXT_ULP_RF_IDX_RID & 0xff} + }, + { + .description = "profile_tcam_index", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_PROFILE_TCAM_INDEX_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_PROFILE_TCAM_INDEX_0 & 0xff} + }, + { + .description = "em_profile_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_EM_PROFILE_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_EM_PROFILE_ID_0 & 0xff} + }, + { + .description = "wc_profile_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "flow_sig_id", + .field_bit_size = 64, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_FLOW_SIG_ID >> 8) & 0xff, + BNXT_ULP_CF_IDX_FLOW_SIG_ID & 0xff} + }, + /* class_tid: 1, , table: em.ipv4 */ + { + .description = "act_rec_ptr", + .field_bit_size = 33, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_MAIN_ACTION_PTR >> 8) & 0xff, + BNXT_ULP_RF_IDX_MAIN_ACTION_PTR & 0xff} + }, + { + .description = "ext_flow_cntr", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "act_rec_int", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "act_rec_size", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "key_size", + .field_bit_size = 9, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "reserved", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "strength", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 3} + }, + { + .description = "l1_cacheable", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + /* class_tid: 1, , table: eem.ipv4 */ + { + .description = "act_rec_ptr", + .field_bit_size = 33, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_MAIN_ACTION_PTR >> 8) & 0xff, + BNXT_ULP_RF_IDX_MAIN_ACTION_PTR & 0xff} + }, + { + .description = "ext_flow_cntr", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "act_rec_int", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "act_rec_size", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_ACTION_REC_SIZE >> 8) & 0xff, + BNXT_ULP_RF_IDX_ACTION_REC_SIZE & 0xff} + }, + { + .description = "key_size", + .field_bit_size = 9, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + (173 >> 8) & 0xff, + 173 & 0xff} + }, + { + .description = "reserved", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "strength", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 3} + }, + { + .description = "l1_cacheable", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + /* class_tid: 1, , table: em.ipv6 */ + { + .description = "act_rec_ptr", + .field_bit_size = 33, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_MAIN_ACTION_PTR >> 8) & 0xff, + BNXT_ULP_RF_IDX_MAIN_ACTION_PTR & 0xff} + }, + { + .description = "ext_flow_cntr", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "act_rec_int", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "act_rec_size", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "key_size", + .field_bit_size = 9, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "reserved", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "strength", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 3} + }, + { + .description = "l1_cacheable", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + /* class_tid: 1, , table: eem.ipv6 */ + { + .description = "act_rec_ptr", + .field_bit_size = 33, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_MAIN_ACTION_PTR >> 8) & 0xff, + BNXT_ULP_RF_IDX_MAIN_ACTION_PTR & 0xff} + }, + { + .description = "ext_flow_cntr", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "act_rec_int", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "act_rec_size", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_ACTION_REC_SIZE >> 8) & 0xff, + BNXT_ULP_RF_IDX_ACTION_REC_SIZE & 0xff} + }, + { + .description = "key_size", + .field_bit_size = 9, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + (413 >> 8) & 0xff, + 413 & 0xff} + }, + { + .description = "reserved", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "strength", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 3} + }, + { + .description = "l1_cacheable", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + /* class_tid: 1, , table: em.vxlan */ + { + .description = "act_rec_ptr", + .field_bit_size = 33, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_MAIN_ACTION_PTR >> 8) & 0xff, + BNXT_ULP_RF_IDX_MAIN_ACTION_PTR & 0xff} + }, + { + .description = "ext_flow_cntr", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "act_rec_int", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "act_rec_size", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "key_size", + .field_bit_size = 9, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "reserved", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "strength", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 3} + }, + { + .description = "l1_cacheable", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + /* class_tid: 1, , table: eem.vxlan */ + { + .description = "act_rec_ptr", + .field_bit_size = 33, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_MAIN_ACTION_PTR >> 8) & 0xff, + BNXT_ULP_RF_IDX_MAIN_ACTION_PTR & 0xff} + }, + { + .description = "ext_flow_cntr", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "act_rec_int", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "act_rec_size", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_ACTION_REC_SIZE >> 8) & 0xff, + BNXT_ULP_RF_IDX_ACTION_REC_SIZE & 0xff} + }, + { + .description = "key_size", + .field_bit_size = 9, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + (197 >> 8) & 0xff, + 197 & 0xff} + }, + { + .description = "reserved", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "strength", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 3} + }, + { + .description = "l1_cacheable", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + /* class_tid: 2, , table: l2_cntxt_tcam.0 */ + { + .description = "l2_cntxt_id", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_L2_CNTXT_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_L2_CNTXT_ID_0 & 0xff} + }, + { + .description = "prof_func_id", + .field_bit_size = 7, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_GLB_RF, + .field_opr1 = { + (BNXT_ULP_GLB_RF_IDX_L2_PROF_FUNC_ID >> 8) & 0xff, + BNXT_ULP_GLB_RF_IDX_L2_PROF_FUNC_ID & 0xff} + }, + { + .description = "l2_byp_lkup_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "parif", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_MATCH_PORT_IS_VFREP >> 8) & 0xff, + BNXT_ULP_CF_IDX_MATCH_PORT_IS_VFREP & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr2 = { + ULP_WP_SYM_LOOPBACK_PARIF}, + .field_src3 = BNXT_ULP_FIELD_SRC_CF, + .field_opr3 = { + (BNXT_ULP_CF_IDX_DRV_FUNC_PARIF >> 8) & 0xff, + BNXT_ULP_CF_IDX_DRV_FUNC_PARIF & 0xff} + }, + { + .description = "allowed_pri", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "default_pri", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "allowed_tpid", + .field_bit_size = 6, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "default_tpid", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "bd_act_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "sp_rec_ptr", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_MAIN_SP_PTR >> 8) & 0xff, + BNXT_ULP_RF_IDX_MAIN_SP_PTR & 0xff} + }, + { + .description = "byp_sp_lkup", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + { + .description = "pri_anti_spoof_ctl", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tpid_anti_spoof_ctl", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + /* class_tid: 2, , table: mac_addr_cache.wr */ + { + .description = "rid", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_RID >> 8) & 0xff, + BNXT_ULP_RF_IDX_RID & 0xff} + }, + { + .description = "l2_cntxt_tcam_index", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_L2_CNTXT_TCAM_INDEX_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_L2_CNTXT_TCAM_INDEX_0 & 0xff} + }, + { + .description = "l2_cntxt_id", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_L2_CNTXT_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_L2_CNTXT_ID_0 & 0xff} + }, + { + .description = "src_property_ptr", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "prof_func_id", + .field_bit_size = 7, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + /* class_tid: 2, , table: profile_tcam.ipv4 */ + { + .description = "wc_key_id", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "wc_profile_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "wc_search_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "em_key_mask.0", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + { + .description = "em_key_mask.1", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_FIELD_BIT, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_O_ETH_DMAC >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_ETH_DMAC & 0xff} + }, + { + .description = "em_key_mask.2", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_FIELD_BIT, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_SRC_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_SRC_ADDR & 0xff} + }, + { + .description = "em_key_mask.3", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_FIELD_BIT, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_DST_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_DST_ADDR & 0xff} + }, + { + .description = "em_key_mask.4", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_FIELD_BIT, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_O_IPV4_PROTO_ID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV4_PROTO_ID & 0xff} + }, + { + .description = "em_key_mask.5", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_AND_SRC2_OR_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_O_L4 >> 8) & 0xff, + BNXT_ULP_CF_IDX_O_L4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_FIELD_BIT, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_UDP_SRC_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_UDP_SRC_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_FIELD_BIT, + .field_opr3 = { + (BNXT_ULP_GLB_HF_ID_O_TCP_SRC_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_TCP_SRC_PORT & 0xff} + }, + { + .description = "em_key_mask.6", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_AND_SRC2_OR_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_O_L4 >> 8) & 0xff, + BNXT_ULP_CF_IDX_O_L4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_FIELD_BIT, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_UDP_DST_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_UDP_DST_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_FIELD_BIT, + .field_opr3 = { + (BNXT_ULP_GLB_HF_ID_O_TCP_DST_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_TCP_DST_PORT & 0xff} + }, + { + .description = "em_key_mask.7", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "em_key_mask.8", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "em_key_mask.9", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "em_key_id", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 4} + }, + { + .description = "em_profile_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_EM_PROFILE_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_EM_PROFILE_ID_0 & 0xff} + }, + { + .description = "em_search_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + { + .description = "pl_byp_lkup_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + /* class_tid: 2, , table: profile_tcam.ipv6 */ + { + .description = "wc_key_id", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "wc_profile_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "wc_search_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "em_key_mask.0", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + { + .description = "em_key_mask.1", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_FIELD_BIT, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_O_ETH_DMAC >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_ETH_DMAC & 0xff} + }, + { + .description = "em_key_mask.2", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "em_key_mask.3", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_FIELD_BIT, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_O_IPV6_SRC_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV6_SRC_ADDR & 0xff} + }, + { + .description = "em_key_mask.4", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_FIELD_BIT, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_O_IPV6_DST_ADDR >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV6_DST_ADDR & 0xff} + }, + { + .description = "em_key_mask.5", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_FIELD_BIT, + .field_opr1 = { + (BNXT_ULP_GLB_HF_ID_O_IPV6_PROTO_ID >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_IPV6_PROTO_ID & 0xff} + }, + { + .description = "em_key_mask.6", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_AND_SRC2_OR_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_O_L4 >> 8) & 0xff, + BNXT_ULP_CF_IDX_O_L4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_FIELD_BIT, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_UDP_SRC_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_UDP_SRC_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_FIELD_BIT, + .field_opr3 = { + (BNXT_ULP_GLB_HF_ID_O_TCP_SRC_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_TCP_SRC_PORT & 0xff} + }, + { + .description = "em_key_mask.7", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1_AND_SRC2_OR_SRC3, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_O_L4 >> 8) & 0xff, + BNXT_ULP_CF_IDX_O_L4 & 0xff}, + .field_src2 = BNXT_ULP_FIELD_SRC_FIELD_BIT, + .field_opr2 = { + (BNXT_ULP_GLB_HF_ID_O_UDP_DST_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_UDP_DST_PORT & 0xff}, + .field_src3 = BNXT_ULP_FIELD_SRC_FIELD_BIT, + .field_opr3 = { + (BNXT_ULP_GLB_HF_ID_O_TCP_DST_PORT >> 8) & 0xff, + BNXT_ULP_GLB_HF_ID_O_TCP_DST_PORT & 0xff} + }, + { + .description = "em_key_mask.8", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "em_key_mask.9", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "em_key_id", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 7} + }, + { + .description = "em_profile_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_EM_PROFILE_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_EM_PROFILE_ID_0 & 0xff} + }, + { + .description = "em_search_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + { + .description = "pl_byp_lkup_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + /* class_tid: 2, , table: profile_tcam_cache.wr */ + { + .description = "rid", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_RID >> 8) & 0xff, + BNXT_ULP_RF_IDX_RID & 0xff} + }, + { + .description = "profile_tcam_index", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_PROFILE_TCAM_INDEX_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_PROFILE_TCAM_INDEX_0 & 0xff} + }, + { + .description = "em_profile_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_EM_PROFILE_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_EM_PROFILE_ID_0 & 0xff} + }, + { + .description = "wc_profile_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "flow_sig_id", + .field_bit_size = 64, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_FLOW_SIG_ID >> 8) & 0xff, + BNXT_ULP_CF_IDX_FLOW_SIG_ID & 0xff} + }, + /* class_tid: 2, , table: em.ipv4 */ + { + .description = "act_rec_ptr", + .field_bit_size = 33, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_MAIN_ACTION_PTR >> 8) & 0xff, + BNXT_ULP_RF_IDX_MAIN_ACTION_PTR & 0xff} + }, + { + .description = "ext_flow_cntr", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "act_rec_int", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "act_rec_size", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "key_size", + .field_bit_size = 9, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "reserved", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "strength", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 3} + }, + { + .description = "l1_cacheable", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + /* class_tid: 2, , table: eem.ipv4 */ + { + .description = "act_rec_ptr", + .field_bit_size = 33, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_MAIN_ACTION_PTR >> 8) & 0xff, + BNXT_ULP_RF_IDX_MAIN_ACTION_PTR & 0xff} + }, + { + .description = "ext_flow_cntr", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "act_rec_int", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "act_rec_size", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_ACTION_REC_SIZE >> 8) & 0xff, + BNXT_ULP_RF_IDX_ACTION_REC_SIZE & 0xff} + }, + { + .description = "key_size", + .field_bit_size = 9, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + (173 >> 8) & 0xff, + 173 & 0xff} + }, + { + .description = "reserved", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "strength", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 3} + }, + { + .description = "l1_cacheable", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + /* class_tid: 2, , table: em.ipv6 */ + { + .description = "act_rec_ptr", + .field_bit_size = 33, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_MAIN_ACTION_PTR >> 8) & 0xff, + BNXT_ULP_RF_IDX_MAIN_ACTION_PTR & 0xff} + }, + { + .description = "ext_flow_cntr", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "act_rec_int", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "act_rec_size", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "key_size", + .field_bit_size = 9, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "reserved", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "strength", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 3} + }, + { + .description = "l1_cacheable", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + /* class_tid: 2, , table: eem.ipv6 */ + { + .description = "act_rec_ptr", + .field_bit_size = 33, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_MAIN_ACTION_PTR >> 8) & 0xff, + BNXT_ULP_RF_IDX_MAIN_ACTION_PTR & 0xff} + }, + { + .description = "ext_flow_cntr", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "act_rec_int", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "act_rec_size", + .field_bit_size = 5, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_ACTION_REC_SIZE >> 8) & 0xff, + BNXT_ULP_RF_IDX_ACTION_REC_SIZE & 0xff} + }, + { + .description = "key_size", + .field_bit_size = 9, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + (413 >> 8) & 0xff, + 413 & 0xff} + }, + { + .description = "reserved", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "strength", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 3} + }, + { + .description = "l1_cacheable", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + /* class_tid: 3, , table: int_full_act_record.ing_0 */ + { + .description = "flow_cntr_ptr", + .field_bit_size = 14, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "age_enable", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "agg_cntr_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "rate_cntr_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "flow_cntr_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tcpflags_key", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tcpflags_mir", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tcpflags_match", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "encap_ptr", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "dst_ip_ptr", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tcp_dst_port", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "src_ip_ptr", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tcp_src_port", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "meter_id", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_rdir", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_rdir", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_ttl_dec", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_ttl_dec", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "decap_func", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "vnic_or_vport", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_DRV_FUNC_VNIC >> 8) & 0xff, + BNXT_ULP_CF_IDX_DRV_FUNC_VNIC & 0xff} + }, + { + .description = "pop_vlan", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "meter", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "mirror", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "drop", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "hit", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "type", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + /* class_tid: 3, , table: l2_cntxt_tcam.ing_0 */ + { + .description = "l2_cntxt_id", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_L2_CNTXT_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_L2_CNTXT_ID_0 & 0xff} + }, + { + .description = "prof_func_id", + .field_bit_size = 7, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_GLB_RF, + .field_opr1 = { + (BNXT_ULP_GLB_RF_IDX_GLB_PROF_FUNC_ID >> 8) & 0xff, + BNXT_ULP_GLB_RF_IDX_GLB_PROF_FUNC_ID & 0xff} + }, + { + .description = "l2_byp_lkup_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "parif", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_PHY_PORT_PARIF >> 8) & 0xff, + BNXT_ULP_CF_IDX_PHY_PORT_PARIF & 0xff} + }, + { + .description = "allowed_pri", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "default_pri", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "allowed_tpid", + .field_bit_size = 6, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "default_tpid", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "bd_act_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "sp_rec_ptr", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "byp_sp_lkup", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + { + .description = "pri_anti_spoof_ctl", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tpid_anti_spoof_ctl", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + /* class_tid: 3, , table: l2_cntxt_tcam_cache.ing_wr */ + { + .description = "rid", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_RID >> 8) & 0xff, + BNXT_ULP_RF_IDX_RID & 0xff} + }, + { + .description = "l2_cntxt_tcam_index", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_L2_CNTXT_TCAM_INDEX_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_L2_CNTXT_TCAM_INDEX_0 & 0xff} + }, + { + .description = "l2_cntxt_id", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_L2_CNTXT_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_L2_CNTXT_ID_0 & 0xff} + }, + { + .description = "src_property_ptr", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "prof_func_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + /* class_tid: 3, , table: parif_def_lkup_arec_ptr.ing_0 */ + { + .description = "act_rec_ptr", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_MAIN_ACTION_PTR >> 8) & 0xff, + BNXT_ULP_RF_IDX_MAIN_ACTION_PTR & 0xff} + }, + /* class_tid: 3, , table: parif_def_arec_ptr.ing_0 */ + { + .description = "act_rec_ptr", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_MAIN_ACTION_PTR >> 8) & 0xff, + BNXT_ULP_RF_IDX_MAIN_ACTION_PTR & 0xff} + }, + /* class_tid: 3, , table: parif_def_err_arec_ptr.ing_0 */ + { + .description = "act_rec_ptr", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_MAIN_ACTION_PTR >> 8) & 0xff, + BNXT_ULP_RF_IDX_MAIN_ACTION_PTR & 0xff} + }, + /* class_tid: 3, , table: int_full_act_record.egr_vfr */ + { + .description = "flow_cntr_ptr", + .field_bit_size = 14, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "age_enable", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "agg_cntr_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "rate_cntr_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "flow_cntr_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tcpflags_key", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tcpflags_mir", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tcpflags_match", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "encap_ptr", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "dst_ip_ptr", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tcp_dst_port", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "src_ip_ptr", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tcp_src_port", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "meter_id", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_rdir", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_rdir", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_ttl_dec", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_ttl_dec", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "decap_func", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "vnic_or_vport", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_PHY_PORT_VPORT >> 8) & 0xff, + BNXT_ULP_CF_IDX_PHY_PORT_VPORT & 0xff} + }, + { + .description = "pop_vlan", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "meter", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "mirror", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "drop", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "hit", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "type", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + /* class_tid: 3, , table: l2_cntxt_tcam_bypass.egr_vfr */ + { + .description = "act_record_ptr", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "reserved", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_byp_lkup_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + { + .description = "parif", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + ULP_WP_SYM_LOOPBACK_PARIF} + }, + { + .description = "allowed_pri", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "default_pri", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "allowed_tpid", + .field_bit_size = 6, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "default_tpid", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "bd_act_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + { + .description = "sp_rec_ptr", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "byp_sp_lkup", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + { + .description = "pri_anti_spoof_ctl", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tpid_anti_spoof_ctl", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + /* class_tid: 3, , table: l2_cntxt_tcam_cache.egr_wr_vfr */ + { + .description = "rid", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_RID >> 8) & 0xff, + BNXT_ULP_RF_IDX_RID & 0xff} + }, + { + .description = "l2_cntxt_tcam_index", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_L2_CNTXT_TCAM_INDEX_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_L2_CNTXT_TCAM_INDEX_0 & 0xff} + }, + { + .description = "l2_cntxt_id", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "src_property_ptr", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "prof_func_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + /* class_tid: 3, , table: l2_cntxt_tcam.egr_0 */ + { + .description = "l2_cntxt_id", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_L2_CNTXT_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_L2_CNTXT_ID_0 & 0xff} + }, + { + .description = "prof_func_id", + .field_bit_size = 7, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_GLB_RF, + .field_opr1 = { + (BNXT_ULP_GLB_RF_IDX_GLB_PROF_FUNC_ID >> 8) & 0xff, + BNXT_ULP_GLB_RF_IDX_GLB_PROF_FUNC_ID & 0xff} + }, + { + .description = "l2_byp_lkup_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "parif", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_DRV_FUNC_PARIF >> 8) & 0xff, + BNXT_ULP_CF_IDX_DRV_FUNC_PARIF & 0xff} + }, + { + .description = "allowed_pri", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "default_pri", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "allowed_tpid", + .field_bit_size = 6, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "default_tpid", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "bd_act_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "sp_rec_ptr", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "byp_sp_lkup", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + { + .description = "pri_anti_spoof_ctl", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tpid_anti_spoof_ctl", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + /* class_tid: 3, , table: l2_cntxt_tcam_cache.egr_wr */ + { + .description = "rid", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_RID >> 8) & 0xff, + BNXT_ULP_RF_IDX_RID & 0xff} + }, + { + .description = "l2_cntxt_tcam_index", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_L2_CNTXT_TCAM_INDEX_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_L2_CNTXT_TCAM_INDEX_0 & 0xff} + }, + { + .description = "l2_cntxt_id", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_L2_CNTXT_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_L2_CNTXT_ID_0 & 0xff} + }, + { + .description = "src_property_ptr", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "prof_func_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + /* class_tid: 3, , table: int_full_act_record.egr_0 */ + { + .description = "flow_cntr_ptr", + .field_bit_size = 14, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "age_enable", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "agg_cntr_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "rate_cntr_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "flow_cntr_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tcpflags_key", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tcpflags_mir", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tcpflags_match", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "encap_ptr", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "dst_ip_ptr", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tcp_dst_port", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "src_ip_ptr", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tcp_src_port", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "meter_id", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_rdir", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_rdir", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_ttl_dec", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_ttl_dec", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "decap_func", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "vnic_or_vport", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_PHY_PORT_VPORT >> 8) & 0xff, + BNXT_ULP_CF_IDX_PHY_PORT_VPORT & 0xff} + }, + { + .description = "pop_vlan", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "meter", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "mirror", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "drop", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "hit", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "type", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + /* class_tid: 3, , table: parif_def_lkup_arec_ptr.egr_0 */ + { + .description = "act_rec_ptr", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_MAIN_ACTION_PTR >> 8) & 0xff, + BNXT_ULP_RF_IDX_MAIN_ACTION_PTR & 0xff} + }, + /* class_tid: 3, , table: parif_def_arec_ptr.egr_0 */ + { + .description = "act_rec_ptr", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_MAIN_ACTION_PTR >> 8) & 0xff, + BNXT_ULP_RF_IDX_MAIN_ACTION_PTR & 0xff} + }, + /* class_tid: 3, , table: parif_def_err_arec_ptr.egr_0 */ + { + .description = "act_rec_ptr", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_MAIN_ACTION_PTR >> 8) & 0xff, + BNXT_ULP_RF_IDX_MAIN_ACTION_PTR & 0xff} + }, + /* class_tid: 4, , table: int_full_act_record.loopback */ + { + .description = "flow_cntr_ptr", + .field_bit_size = 14, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "age_enable", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "agg_cntr_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "rate_cntr_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "flow_cntr_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tcpflags_key", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tcpflags_mir", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tcpflags_match", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "encap_ptr", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "dst_ip_ptr", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tcp_dst_port", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "src_ip_ptr", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tcp_src_port", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "meter_id", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_rdir", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_rdir", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_ttl_dec", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_ttl_dec", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "decap_func", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "vnic_or_vport", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + (ULP_WP_SYM_LOOPBACK_PORT >> 8) & 0xff, + ULP_WP_SYM_LOOPBACK_PORT & 0xff} + }, + { + .description = "pop_vlan", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "meter", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "mirror", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "drop", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "hit", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "type", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + /* class_tid: 4, , table: l2_cntxt_tcam.vf_egr */ + { + .description = "l2_cntxt_id", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_L2_CNTXT_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_L2_CNTXT_ID_0 & 0xff} + }, + { + .description = "prof_func_id", + .field_bit_size = 7, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_GLB_RF, + .field_opr1 = { + (BNXT_ULP_GLB_RF_IDX_GLB_PROF_FUNC_ID >> 8) & 0xff, + BNXT_ULP_GLB_RF_IDX_GLB_PROF_FUNC_ID & 0xff} + }, + { + .description = "l2_byp_lkup_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "parif", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + ULP_WP_SYM_LOOPBACK_PARIF} + }, + { + .description = "allowed_pri", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "default_pri", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "allowed_tpid", + .field_bit_size = 6, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "default_tpid", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "bd_act_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "sp_rec_ptr", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "byp_sp_lkup", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + { + .description = "pri_anti_spoof_ctl", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tpid_anti_spoof_ctl", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + /* class_tid: 4, , table: l2_cntxt_tcam_cache.vf_egr_wr */ + { + .description = "rid", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_RID >> 8) & 0xff, + BNXT_ULP_RF_IDX_RID & 0xff} + }, + { + .description = "l2_cntxt_tcam_index", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_L2_CNTXT_TCAM_INDEX_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_L2_CNTXT_TCAM_INDEX_0 & 0xff} + }, + { + .description = "l2_cntxt_id", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_L2_CNTXT_ID_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_L2_CNTXT_ID_0 & 0xff} + }, + { + .description = "src_property_ptr", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "prof_func_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + /* class_tid: 4, , table: parif_def_lkup_arec_ptr.vf_egr */ + { + .description = "act_rec_ptr", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_GLB_RF, + .field_opr1 = { + (BNXT_ULP_GLB_RF_IDX_GLB_LB_AREC_PTR >> 8) & 0xff, + BNXT_ULP_GLB_RF_IDX_GLB_LB_AREC_PTR & 0xff} + }, + /* class_tid: 4, , table: parif_def_arec_ptr.vf_egr */ + { + .description = "act_rec_ptr", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_GLB_RF, + .field_opr1 = { + (BNXT_ULP_GLB_RF_IDX_GLB_LB_AREC_PTR >> 8) & 0xff, + BNXT_ULP_GLB_RF_IDX_GLB_LB_AREC_PTR & 0xff} + }, + /* class_tid: 4, , table: parif_def_err_arec_ptr.vf_egr */ + { + .description = "act_rec_ptr", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_GLB_RF, + .field_opr1 = { + (BNXT_ULP_GLB_RF_IDX_GLB_LB_AREC_PTR >> 8) & 0xff, + BNXT_ULP_GLB_RF_IDX_GLB_LB_AREC_PTR & 0xff} + }, + /* class_tid: 4, , table: int_full_act_record.vf_ing */ + { + .description = "flow_cntr_ptr", + .field_bit_size = 14, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "age_enable", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "agg_cntr_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "rate_cntr_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "flow_cntr_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tcpflags_key", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tcpflags_mir", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tcpflags_match", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "encap_ptr", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "dst_ip_ptr", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tcp_dst_port", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "src_ip_ptr", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tcp_src_port", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "meter_id", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_rdir", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_rdir", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_ttl_dec", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_ttl_dec", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "decap_func", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "vnic_or_vport", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_DRV_FUNC_VNIC >> 8) & 0xff, + BNXT_ULP_CF_IDX_DRV_FUNC_VNIC & 0xff} + }, + { + .description = "pop_vlan", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "meter", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "mirror", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "drop", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "hit", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "type", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + /* class_tid: 4, , table: l2_cntxt_tcam_bypass.vf_ing */ + { + .description = "act_record_ptr", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_MAIN_ACTION_PTR >> 8) & 0xff, + BNXT_ULP_RF_IDX_MAIN_ACTION_PTR & 0xff} + }, + { + .description = "reserved", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_byp_lkup_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + { + .description = "parif", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "allowed_pri", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "default_pri", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "allowed_tpid", + .field_bit_size = 6, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "default_tpid", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "bd_act_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "sp_rec_ptr", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "byp_sp_lkup", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + { + .description = "pri_anti_spoof_ctl", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tpid_anti_spoof_ctl", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + /* class_tid: 4, , table: l2_cntxt_tcam_bypass.vfr_egr0 */ + { + .description = "act_record_ptr", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "reserved", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_byp_lkup_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + { + .description = "parif", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "allowed_pri", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "default_pri", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "allowed_tpid", + .field_bit_size = 6, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "default_tpid", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "bd_act_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + { + .description = "sp_rec_ptr", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "byp_sp_lkup", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + { + .description = "pri_anti_spoof_ctl", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tpid_anti_spoof_ctl", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + /* class_tid: 4, , table: l2_cntxt_tcam_cache.vfr_wr_egr0 */ + { + .description = "rid", + .field_bit_size = 32, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_RID >> 8) & 0xff, + BNXT_ULP_RF_IDX_RID & 0xff} + }, + { + .description = "l2_cntxt_tcam_index", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_L2_CNTXT_TCAM_INDEX_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_L2_CNTXT_TCAM_INDEX_0 & 0xff} + }, + { + .description = "l2_cntxt_id", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "src_property_ptr", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "prof_func_id", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + /* class_tid: 4, , table: int_vtag_encap_record.vfr_egr0 */ + { + .description = "ecv_valid", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + { + .description = "ecv_custom_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "ecv_vtag_type", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + ULP_WP_SYM_ECV_VTAG_TYPE_ADD_1_ENCAP_PRI} + }, + { + .description = "ecv_l2_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "ecv_l3_type", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "ecv_l4_type", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "ecv_tun_type", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "vtag_tpid", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 0x81, + 0x00} + }, + { + .description = "vtag_pcp", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "vtag_de", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "vtag_vid", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_DEV_PORT_ID >> 8) & 0xff, + BNXT_ULP_CF_IDX_DEV_PORT_ID & 0xff} + }, + /* class_tid: 4, , table: int_full_act_record.vfr_egr0 */ + { + .description = "flow_cntr_ptr", + .field_bit_size = 14, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "age_enable", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "agg_cntr_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "rate_cntr_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "flow_cntr_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tcpflags_key", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tcpflags_mir", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tcpflags_match", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "encap_ptr", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_ENCAP_PTR_0 >> 8) & 0xff, + BNXT_ULP_RF_IDX_ENCAP_PTR_0 & 0xff} + }, + { + .description = "dst_ip_ptr", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tcp_dst_port", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "src_ip_ptr", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tcp_src_port", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "meter_id", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_rdir", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_rdir", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_ttl_dec", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_ttl_dec", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "decap_func", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "vnic_or_vport", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + (ULP_WP_SYM_LOOPBACK_PORT >> 8) & 0xff, + ULP_WP_SYM_LOOPBACK_PORT & 0xff} + }, + { + .description = "pop_vlan", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "meter", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "mirror", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "drop", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "hit", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "type", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + /* class_tid: 4, , table: int_full_act_record.vfr_ing0 */ + { + .description = "flow_cntr_ptr", + .field_bit_size = 14, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "age_enable", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "agg_cntr_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "rate_cntr_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "flow_cntr_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tcpflags_key", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tcpflags_mir", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tcpflags_match", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "encap_ptr", + .field_bit_size = 11, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "dst_ip_ptr", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tcp_dst_port", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "src_ip_ptr", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tcp_src_port", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "meter_id", + .field_bit_size = 10, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_rdir", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_rdir", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l3_ttl_dec", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tl3_ttl_dec", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "decap_func", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "vnic_or_vport", + .field_bit_size = 12, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CF, + .field_opr1 = { + (BNXT_ULP_CF_IDX_VF_FUNC_VNIC >> 8) & 0xff, + BNXT_ULP_CF_IDX_VF_FUNC_VNIC & 0xff} + }, + { + .description = "pop_vlan", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + { + .description = "meter", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "mirror", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "drop", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "hit", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "type", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + /* class_tid: 4, , table: l2_cntxt_tcam_bypass.vfr_dtagged_ing0 */ + { + .description = "act_record_ptr", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_MAIN_ACTION_PTR >> 8) & 0xff, + BNXT_ULP_RF_IDX_MAIN_ACTION_PTR & 0xff} + }, + { + .description = "reserved", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_byp_lkup_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + { + .description = "parif", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "allowed_pri", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "default_pri", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "allowed_tpid", + .field_bit_size = 6, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "default_tpid", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "bd_act_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "sp_rec_ptr", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "byp_sp_lkup", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + { + .description = "pri_anti_spoof_ctl", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tpid_anti_spoof_ctl", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + /* class_tid: 4, , table: l2_cntxt_tcam_bypass.vfr_stagged_ing0 */ + { + .description = "act_record_ptr", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_RF, + .field_opr1 = { + (BNXT_ULP_RF_IDX_MAIN_ACTION_PTR >> 8) & 0xff, + BNXT_ULP_RF_IDX_MAIN_ACTION_PTR & 0xff} + }, + { + .description = "reserved", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "l2_byp_lkup_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + { + .description = "parif", + .field_bit_size = 4, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "allowed_pri", + .field_bit_size = 8, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "default_pri", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "allowed_tpid", + .field_bit_size = 6, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "default_tpid", + .field_bit_size = 3, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "bd_act_en", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "sp_rec_ptr", + .field_bit_size = 16, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "byp_sp_lkup", + .field_bit_size = 1, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_CONST, + .field_opr1 = { + 1} + }, + { + .description = "pri_anti_spoof_ctl", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + }, + { + .description = "tpid_anti_spoof_ctl", + .field_bit_size = 2, + .field_opc = BNXT_ULP_FIELD_OPC_SRC1, + .field_src1 = BNXT_ULP_FIELD_SRC_ZERO + } +}; + +struct bnxt_ulp_mapper_ident_info ulp_wh_plus_class_ident_list[] = { + /* class_tid: 1, , table: l2_cntxt_tcam_cache.rd */ + { + .description = "l2_cntxt_id", + .regfile_idx = BNXT_ULP_RF_IDX_L2_CNTXT_ID_0, + .ident_bit_size = 10, + .ident_bit_pos = 42 + }, + /* class_tid: 1, , table: mac_addr_cache.rd */ + { + .description = "l2_cntxt_id", + .regfile_idx = BNXT_ULP_RF_IDX_L2_CNTXT_ID_0, + .ident_bit_size = 10, + .ident_bit_pos = 42 + }, + /* class_tid: 1, , table: l2_cntxt_tcam.0 */ + { + .description = "l2_cntxt_id", + .resource_func = BNXT_ULP_RESOURCE_FUNC_IDENTIFIER, + .ident_type = TF_IDENT_TYPE_L2_CTXT_HIGH, + .regfile_idx = BNXT_ULP_RF_IDX_L2_CNTXT_ID_0, + .ident_bit_size = 10, + .ident_bit_pos = 0 + }, + /* class_tid: 1, , table: profile_tcam_cache.rd */ + { + .description = "em_profile_id", + .regfile_idx = BNXT_ULP_RF_IDX_EM_PROFILE_ID_0, + .ident_bit_size = 8, + .ident_bit_pos = 42 + }, + { + .description = "flow_sig_id", + .regfile_idx = BNXT_ULP_RF_IDX_FLOW_SIG_ID, + .ident_bit_size = 64, + .ident_bit_pos = 58 + }, + { + .description = "profile_tcam_index", + .regfile_idx = BNXT_ULP_RF_IDX_PROFILE_TCAM_INDEX_0, + .ident_bit_size = 10, + .ident_bit_pos = 32 + }, + /* class_tid: 1, , table: profile_tcam.ipv4 */ + { + .description = "em_profile_id", + .resource_func = BNXT_ULP_RESOURCE_FUNC_IDENTIFIER, + .ident_type = TF_IDENT_TYPE_EM_PROF, + .regfile_idx = BNXT_ULP_RF_IDX_EM_PROFILE_ID_0, + .ident_bit_size = 8, + .ident_bit_pos = 28 + }, + /* class_tid: 1, , table: profile_tcam.ipv6 */ + { + .description = "em_profile_id", + .resource_func = BNXT_ULP_RESOURCE_FUNC_IDENTIFIER, + .ident_type = TF_IDENT_TYPE_EM_PROF, + .regfile_idx = BNXT_ULP_RF_IDX_EM_PROFILE_ID_0, + .ident_bit_size = 8, + .ident_bit_pos = 28 + }, + /* class_tid: 1, , table: profile_tcam.ipv4_vxlan */ + { + .description = "em_profile_id", + .resource_func = BNXT_ULP_RESOURCE_FUNC_IDENTIFIER, + .ident_type = TF_IDENT_TYPE_EM_PROF, + .regfile_idx = BNXT_ULP_RF_IDX_EM_PROFILE_ID_0, + .ident_bit_size = 8, + .ident_bit_pos = 28 + }, + /* class_tid: 2, , table: l2_cntxt_tcam_cache.rd */ + { + .description = "l2_cntxt_id", + .regfile_idx = BNXT_ULP_RF_IDX_L2_CNTXT_ID_0, + .ident_bit_size = 10, + .ident_bit_pos = 42 + }, + /* class_tid: 2, , table: mac_addr_cache.rd */ + { + .description = "l2_cntxt_id", + .regfile_idx = BNXT_ULP_RF_IDX_L2_CNTXT_ID_0, + .ident_bit_size = 10, + .ident_bit_pos = 42 + }, + /* class_tid: 2, , table: l2_cntxt_tcam.0 */ + { + .description = "l2_cntxt_id", + .resource_func = BNXT_ULP_RESOURCE_FUNC_IDENTIFIER, + .ident_type = TF_IDENT_TYPE_L2_CTXT_HIGH, + .regfile_idx = BNXT_ULP_RF_IDX_L2_CNTXT_ID_0, + .ident_bit_size = 10, + .ident_bit_pos = 0 + }, + /* class_tid: 2, , table: profile_tcam_cache.rd */ + { + .description = "em_profile_id", + .regfile_idx = BNXT_ULP_RF_IDX_EM_PROFILE_ID_0, + .ident_bit_size = 8, + .ident_bit_pos = 42 + }, + { + .description = "flow_sig_id", + .regfile_idx = BNXT_ULP_RF_IDX_FLOW_SIG_ID, + .ident_bit_size = 64, + .ident_bit_pos = 58 + }, + { + .description = "profile_tcam_index", + .regfile_idx = BNXT_ULP_RF_IDX_PROFILE_TCAM_INDEX_0, + .ident_bit_size = 10, + .ident_bit_pos = 32 + }, + /* class_tid: 2, , table: profile_tcam.ipv4 */ + { + .description = "em_profile_id", + .resource_func = BNXT_ULP_RESOURCE_FUNC_IDENTIFIER, + .ident_type = TF_IDENT_TYPE_EM_PROF, + .regfile_idx = BNXT_ULP_RF_IDX_EM_PROFILE_ID_0, + .ident_bit_size = 8, + .ident_bit_pos = 28 + }, + /* class_tid: 2, , table: profile_tcam.ipv6 */ + { + .description = "em_profile_id", + .resource_func = BNXT_ULP_RESOURCE_FUNC_IDENTIFIER, + .ident_type = TF_IDENT_TYPE_EM_PROF, + .regfile_idx = BNXT_ULP_RF_IDX_EM_PROFILE_ID_0, + .ident_bit_size = 8, + .ident_bit_pos = 28 + }, + /* class_tid: 3, , table: l2_cntxt_tcam.ing_0 */ + { + .description = "l2_cntxt_id_low", + .resource_func = BNXT_ULP_RESOURCE_FUNC_IDENTIFIER, + .ident_type = TF_IDENT_TYPE_L2_CTXT_LOW, + .regfile_idx = BNXT_ULP_RF_IDX_L2_CNTXT_ID_0, + .ident_bit_size = 10, + .ident_bit_pos = 0 + }, + /* class_tid: 3, , table: l2_cntxt_tcam.egr_0 */ + { + .description = "l2_cntxt_id_low", + .resource_func = BNXT_ULP_RESOURCE_FUNC_IDENTIFIER, + .ident_type = TF_IDENT_TYPE_L2_CTXT_LOW, + .regfile_idx = BNXT_ULP_RF_IDX_L2_CNTXT_ID_0, + .ident_bit_size = 10, + .ident_bit_pos = 0 + }, + /* class_tid: 4, , table: l2_cntxt_tcam.vf_egr */ + { + .description = "l2_cntxt_id_low", + .resource_func = BNXT_ULP_RESOURCE_FUNC_IDENTIFIER, + .ident_type = TF_IDENT_TYPE_L2_CTXT_LOW, + .regfile_idx = BNXT_ULP_RF_IDX_L2_CNTXT_ID_0, + .ident_bit_size = 10, + .ident_bit_pos = 0 + } +}; diff --git a/drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_alloc_tbl.c b/drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_alloc_tbl.c new file mode 100644 index 000000000000..416e6a209c13 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_alloc_tbl.c @@ -0,0 +1,210 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* Copyright(c) 2014-2023 Broadcom + * All rights reserved. + */ + +#include "tf_core.h" +#include "ulp_mapper.h" +#include "ulp_alloc_tbl.h" + +#if defined(CONFIG_BNXT_FLOWER_OFFLOAD) || defined(CONFIG_BNXT_CUSTOM_FLOWER_OFFLOAD) +/* Retrieve the allocator table initialization parameters for the tbl_idx */ +static const struct bnxt_ulp_allocator_tbl_params* +ulp_allocator_tbl_params_get(struct bnxt_ulp_context *ulp_ctx, u32 tbl_idx) +{ + const struct bnxt_ulp_allocator_tbl_params *alloc_tbl; + struct bnxt_ulp_device_params *dparms; + u32 dev_id; + + if (tbl_idx >= BNXT_ULP_ALLOCATOR_TBL_MAX_SZ) { + netdev_dbg(ulp_ctx->bp->dev, "Allocator table out of bounds %d\n", + tbl_idx); + return NULL; + } + + if (bnxt_ulp_cntxt_dev_id_get(ulp_ctx, &dev_id)) + return NULL; + + dparms = bnxt_ulp_device_params_get(dev_id); + if (!dparms) { + netdev_dbg(ulp_ctx->bp->dev, "Failed to get device parms\n"); + return NULL; + } + + alloc_tbl = &dparms->allocator_tbl_params[tbl_idx]; + return alloc_tbl; +} + +/* + * Initialize the allocator table list + * + * mapper_data [in] Pointer to the mapper data and the allocator table is + * part of it + * + * returns 0 on success + */ +int +ulp_allocator_tbl_list_init(struct bnxt_ulp_context *ulp_ctx, + struct bnxt_ulp_mapper_data *mapper_data) +{ + const struct bnxt_ulp_allocator_tbl_params *tbl; + struct ulp_allocator_tbl_entry *entry; + u32 idx, pool_size; + + /* Allocate the generic tables. */ + for (idx = 0; idx < BNXT_ULP_ALLOCATOR_TBL_MAX_SZ; idx++) { + tbl = ulp_allocator_tbl_params_get(ulp_ctx, idx); + if (!tbl) { + netdev_dbg(ulp_ctx->bp->dev, "Failed to get alloc table parm %d\n", + idx); + return -EINVAL; + } + entry = &mapper_data->alloc_tbl[idx]; + + /* Allocate memory for result data and key data */ + if (tbl->num_entries != 0) { + /* assign the name */ + entry->alloc_tbl_name = tbl->name; + entry->num_entries = tbl->num_entries; + pool_size = BITALLOC_SIZEOF(tbl->num_entries); + /* allocate the big chunk of memory */ + entry->ulp_bitalloc = vzalloc(pool_size); + if (!entry->ulp_bitalloc) + return -ENOMEM; + if (bnxt_ba_init(entry->ulp_bitalloc, entry->num_entries, true)) + return -ENOMEM; + } else { + netdev_dbg(ulp_ctx->bp->dev, "%s:Unused alloc tbl entry is %d\n", + tbl->name, idx); + continue; + } + } + return 0; +} + +/* + * Free the allocator table list + * + * mapper_data [in] Pointer to the mapper data and the generic table is + * part of it + * + * returns 0 on success + */ +int +ulp_allocator_tbl_list_deinit(struct bnxt_ulp_mapper_data *mapper_data) +{ + struct ulp_allocator_tbl_entry *entry; + u32 idx; + + /* iterate the generic table. */ + for (idx = 0; idx < BNXT_ULP_ALLOCATOR_TBL_MAX_SZ; idx++) { + entry = &mapper_data->alloc_tbl[idx]; + if (entry->ulp_bitalloc) { + vfree(entry->ulp_bitalloc); + entry->ulp_bitalloc = NULL; + } + } + /* success */ + return 0; +} + +/* + * utility function to calculate the table idx + * + * res_sub_type [in] - Resource sub type + * dir [in] - Direction + * + * returns None + */ +static int +ulp_allocator_tbl_idx_calculate(u32 res_sub_type, u32 dir) +{ + int tbl_idx; + + /* Validate for direction */ + if (dir >= TF_DIR_MAX) { + netdev_dbg(NULL, "invalid argument %x\n", dir); + return -EINVAL; + } + tbl_idx = (res_sub_type << 1) | (dir & 0x1); + if (tbl_idx >= BNXT_ULP_ALLOCATOR_TBL_MAX_SZ) { + netdev_dbg(NULL, "invalid table index %x\n", tbl_idx); + return -EINVAL; + } + return tbl_idx; +} + +/* + * allocate a index from allocator + * + * mapper_data [in] Pointer to the mapper data and the allocator table is + * part of it + * + * returns index on success or negative number on failure + */ +int +ulp_allocator_tbl_list_alloc(struct bnxt_ulp_mapper_data *mapper_data, + u32 res_sub_type, u32 dir, + int *alloc_id) +{ + struct ulp_allocator_tbl_entry *entry; + int idx; + + idx = ulp_allocator_tbl_idx_calculate(res_sub_type, dir); + if (idx < 0) + return -EINVAL; + + entry = &mapper_data->alloc_tbl[idx]; + if (!entry->ulp_bitalloc || !entry->num_entries) { + netdev_dbg(NULL, "invalid table index %x\n", idx); + return -EINVAL; + } + *alloc_id = bnxt_ba_alloc(entry->ulp_bitalloc); + + if (*alloc_id < 0) { + netdev_dbg(NULL, "unable to alloc index %x\n", idx); + return -ENOMEM; + } + return 0; +} + +/* + * free a index in allocator + * + * mapper_data [in] Pointer to the mapper data and the allocator table is + * part of it + * + * returns error + */ +int +ulp_allocator_tbl_list_free(struct bnxt *bp, + struct bnxt_ulp_mapper_data *mapper_data, + u32 res_sub_type, u32 dir, + int index) +{ + struct ulp_allocator_tbl_entry *entry; + int rc; + int idx; + + idx = ulp_allocator_tbl_idx_calculate(res_sub_type, dir); + if (idx < 0) + return -EINVAL; + + entry = &mapper_data->alloc_tbl[idx]; + if (!entry->ulp_bitalloc || !entry->num_entries) { + netdev_dbg(bp->dev, "invalid table index %x\n", idx); + return -EINVAL; + } + if (index < 0 || index > entry->num_entries) { + netdev_dbg(bp->dev, "invalid alloc index %x\n", index); + return -EINVAL; + } + rc = bnxt_ba_free(entry->ulp_bitalloc, index); + if (rc < 0) { + netdev_dbg(bp->dev, "%s:unable to free index %x\n", + entry->alloc_tbl_name, index); + return -EINVAL; + } + return 0; +} +#endif /* defined(CONFIG_BNXT_FLOWER_OFFLOAD) || defined(CONFIG_BNXT_CUSTOM_FLOWER_OFFLOAD) */ diff --git a/drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_alloc_tbl.h b/drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_alloc_tbl.h new file mode 100644 index 000000000000..bb467d015567 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_alloc_tbl.h @@ -0,0 +1,72 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2023 Broadcom + * All rights reserved. + */ + +#ifndef _ULP_ALLOC_TBL_H_ +#define _ULP_ALLOC_TBL_H_ + +#include "bitalloc.h" + +/* Structure to pass the allocator table values across APIs */ +struct ulp_allocator_tbl_entry { + const char *alloc_tbl_name; + u16 num_entries; + struct bitalloc *ulp_bitalloc; +}; + +/* Forward declaration */ +struct bnxt_ulp_mapper_data; +struct ulp_flow_db_res_params; + +/* + * Initialize the allocator table list + * + * @ulp_ctx: - Pointer to the ulp context + * @mapper_data: Pointer to the mapper data and the generic table is + * part of it + * + * returns 0 on success + */ +int +ulp_allocator_tbl_list_init(struct bnxt_ulp_context *ulp_ctx, + struct bnxt_ulp_mapper_data *mapper_data); + +/* + * Free the allocator table list + * + * @mapper_data: Pointer to the mapper data and the generic table is + * part of it + * + * returns 0 on success + */ +int +ulp_allocator_tbl_list_deinit(struct bnxt_ulp_mapper_data *mapper_data); + +/* + * allocate a index from allocator + * + * @mapper_data: Pointer to the mapper data and the allocator table is + * part of it + * + * returns index on success or negative number on failure + */ +int +ulp_allocator_tbl_list_alloc(struct bnxt_ulp_mapper_data *mapper_data, + u32 res_sub_type, u32 dir, + int *alloc_id); + +/* + * free a index in allocator + * + * @mapper_data: Pointer to the mapper data and the allocator table is + * part of it + * + * returns error + */ +int +ulp_allocator_tbl_list_free(struct bnxt *bp, + struct bnxt_ulp_mapper_data *mapper_data, + u32 res_sub_type, u32 dir, + int index); +#endif /* _ULP_ALLOC_TBL_H_ */ diff --git a/drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_def_rules.c b/drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_def_rules.c new file mode 100644 index 000000000000..9094bc3119fc --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_def_rules.c @@ -0,0 +1,745 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* Copyright(c) 2019-2023 Broadcom + * All rights reserved. + */ + +#include "ulp_linux.h" +#include "bnxt_compat.h" +#include "bnxt_hsi.h" +#include "bnxt.h" +#include "bnxt_vfr.h" +#include "bnxt_tf_common.h" +#include "ulp_template_struct.h" +#include "ulp_template_db_enum.h" +#include "ulp_template_db_field.h" +#include "ulp_utils.h" +#include "ulp_port_db.h" +#include "ulp_flow_db.h" +#include "ulp_mapper.h" + +#if defined(CONFIG_BNXT_FLOWER_OFFLOAD) || defined(CONFIG_BNXT_CUSTOM_FLOWER_OFFLOAD) +struct bnxt_ulp_def_param_handler { + int (*vfr_func)(struct bnxt_ulp_context *ulp_ctx, + struct ulp_tlv_param *param, + struct bnxt_ulp_mapper_parms *mapper_params); +}; + +static int +ulp_set_vf_roce_en_in_comp_fld(struct bnxt_ulp_context *ulp_ctx, u32 port_id, + struct bnxt_ulp_mapper_parms *mapper_params) +{ + u16 vf_roce; + int rc; + + rc = ulp_port_db_vf_roce_get(ulp_ctx, port_id, &vf_roce); + if (rc) + return rc; + + ULP_COMP_FLD_IDX_WR(mapper_params, BNXT_ULP_CF_IDX_VF_ROCE_EN, + vf_roce); + return 0; +} + +static int +ulp_set_udcc_en_in_comp_fld(struct bnxt_ulp_context *ulp_ctx, u32 port_id, + struct bnxt_ulp_mapper_parms *mapper_params) +{ + u8 udcc = 0; + int rc; + + rc = ulp_port_db_udcc_get(ulp_ctx, port_id, &udcc); + if (rc) + return rc; + + ULP_COMP_FLD_IDX_WR(mapper_params, BNXT_ULP_CF_IDX_UDCC_EN, + udcc); + return 0; +} + +static int +ulp_set_svif_in_comp_fld(struct bnxt_ulp_context *ulp_ctx, + u32 ifindex, u8 svif_type, + struct bnxt_ulp_mapper_parms *mapper_params) +{ + u16 svif; + u8 idx; + int rc; + + rc = ulp_port_db_svif_get(ulp_ctx, ifindex, svif_type, &svif); + if (rc) + return rc; + + if (svif_type == BNXT_ULP_PHY_PORT_SVIF) + idx = BNXT_ULP_CF_IDX_PHY_PORT_SVIF; + else if (svif_type == BNXT_ULP_DRV_FUNC_SVIF) + idx = BNXT_ULP_CF_IDX_DRV_FUNC_SVIF; + else + idx = BNXT_ULP_CF_IDX_VF_FUNC_SVIF; + + ULP_COMP_FLD_IDX_WR(mapper_params, idx, svif); + + return 0; +} + +static int +ulp_set_spif_in_comp_fld(struct bnxt_ulp_context *ulp_ctx, + u32 ifindex, u8 spif_type, + struct bnxt_ulp_mapper_parms *mapper_params) +{ + u16 spif; + u8 idx; + int rc; + + rc = ulp_port_db_spif_get(ulp_ctx, ifindex, spif_type, &spif); + if (rc) + return rc; + + if (spif_type == BNXT_ULP_PHY_PORT_SPIF) + idx = BNXT_ULP_CF_IDX_PHY_PORT_SPIF; + else if (spif_type == BNXT_ULP_DRV_FUNC_SPIF) + idx = BNXT_ULP_CF_IDX_DRV_FUNC_SPIF; + else + idx = BNXT_ULP_CF_IDX_VF_FUNC_SPIF; + + ULP_COMP_FLD_IDX_WR(mapper_params, idx, spif); + + return 0; +} + +static int +ulp_set_parif_in_comp_fld(struct bnxt_ulp_context *ulp_ctx, + u32 ifindex, u8 parif_type, + struct bnxt_ulp_mapper_parms *mapper_params) +{ + u16 parif; + u8 idx; + int rc; + + rc = ulp_port_db_parif_get(ulp_ctx, ifindex, parif_type, &parif); + if (rc) + return rc; + + if (parif_type == BNXT_ULP_PHY_PORT_PARIF) + idx = BNXT_ULP_CF_IDX_PHY_PORT_PARIF; + else if (parif_type == BNXT_ULP_DRV_FUNC_PARIF) + idx = BNXT_ULP_CF_IDX_DRV_FUNC_PARIF; + else + idx = BNXT_ULP_CF_IDX_VF_FUNC_PARIF; + + ULP_COMP_FLD_IDX_WR(mapper_params, idx, parif); + + return 0; +} + +static int +ulp_set_vport_in_comp_fld(struct bnxt_ulp_context *ulp_ctx, u32 ifindex, + struct bnxt_ulp_mapper_parms *mapper_params) +{ + u16 vport; + int rc; + + rc = ulp_port_db_vport_get(ulp_ctx, ifindex, &vport); + if (rc) + return rc; + + ULP_COMP_FLD_IDX_WR(mapper_params, BNXT_ULP_CF_IDX_PHY_PORT_VPORT, + vport); + return 0; +} + +static int +ulp_set_vnic_in_comp_fld(struct bnxt_ulp_context *ulp_ctx, + u32 ifindex, u8 vnic_type, + struct bnxt_ulp_mapper_parms *mapper_params) +{ + u16 vnic; + u8 idx; + int rc; + + rc = ulp_port_db_default_vnic_get(ulp_ctx, ifindex, vnic_type, &vnic); + if (rc) + return rc; + + if (vnic_type == BNXT_ULP_DRV_FUNC_VNIC) + idx = BNXT_ULP_CF_IDX_DRV_FUNC_VNIC; + else + idx = BNXT_ULP_CF_IDX_VF_FUNC_VNIC; + + ULP_COMP_FLD_IDX_WR(mapper_params, idx, vnic); + + return 0; +} + +static int +ulp_set_vlan_in_act_prop(struct bnxt_ulp_context *ulp_ctx, u16 port_id, + struct bnxt_ulp_mapper_parms *mapper_params) +{ + struct ulp_tc_act_prop *act_prop = mapper_params->act_prop; + + if (ULP_BITMAP_ISSET(mapper_params->act_bitmap->bits, + BNXT_ULP_ACT_BIT_SET_VLAN_VID)) { + netdev_dbg(ulp_ctx->bp->dev, + "VLAN already set, multiple VLANs unsupported\n"); + return BNXT_TF_RC_ERROR; + } + + port_id = cpu_to_be16(port_id); + + ULP_BITMAP_SET(mapper_params->act_bitmap->bits, + BNXT_ULP_ACT_BIT_SET_VLAN_VID); + + memcpy(&act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG], + &port_id, sizeof(port_id)); + + return 0; +} + +static int +ulp_set_mark_in_act_prop(struct bnxt_ulp_context *ulp_ctx, u16 port_id, + struct bnxt_ulp_mapper_parms *mapper_params) +{ + if (ULP_BITMAP_ISSET(mapper_params->act_bitmap->bits, + BNXT_ULP_ACT_BIT_MARK)) { + netdev_dbg(ulp_ctx->bp->dev, + "MARK already set, multiple MARKs unsupported\n"); + return BNXT_TF_RC_ERROR; + } + + ULP_COMP_FLD_IDX_WR(mapper_params, BNXT_ULP_CF_IDX_DEV_PORT_ID, + port_id); + + return 0; +} + +static int +ulp_df_dev_port_handler(struct bnxt_ulp_context *ulp_ctx, + struct ulp_tlv_param *param, + struct bnxt_ulp_mapper_parms *mapper_params) +{ + u16 port_id; + u32 ifindex; + int rc; + + port_id = (((u16)param->value[0]) << 8) | (u16)param->value[1]; + + rc = ulp_port_db_dev_port_to_ulp_index(ulp_ctx, port_id, &ifindex); + if (rc) { + netdev_dbg(ulp_ctx->bp->dev, + "Invalid port id %d\n", port_id); + return BNXT_TF_RC_ERROR; + } + + /* Set port SVIF */ + rc = ulp_set_svif_in_comp_fld(ulp_ctx, ifindex, BNXT_ULP_PHY_PORT_SVIF, + mapper_params); + if (rc) + return rc; + + /* Set DRV Func SVIF */ + rc = ulp_set_svif_in_comp_fld(ulp_ctx, ifindex, BNXT_ULP_DRV_FUNC_SVIF, + mapper_params); + if (rc) + return rc; + + /* Set VF Func SVIF */ + rc = ulp_set_svif_in_comp_fld(ulp_ctx, ifindex, BNXT_ULP_VF_FUNC_SVIF, + mapper_params); + if (rc) + return rc; + + /* Set port SPIF */ + rc = ulp_set_spif_in_comp_fld(ulp_ctx, ifindex, BNXT_ULP_PHY_PORT_SPIF, + mapper_params); + if (rc) + return rc; + + /* Set DRV Func SPIF */ + rc = ulp_set_spif_in_comp_fld(ulp_ctx, ifindex, BNXT_ULP_DRV_FUNC_SPIF, + mapper_params); + if (rc) + return rc; + + /* Set VF Func SPIF */ + rc = ulp_set_spif_in_comp_fld(ulp_ctx, ifindex, BNXT_ULP_DRV_FUNC_SPIF, + mapper_params); + if (rc) + return rc; + + /* Set port PARIF */ + rc = ulp_set_parif_in_comp_fld(ulp_ctx, ifindex, + BNXT_ULP_PHY_PORT_PARIF, mapper_params); + if (rc) + return rc; + + /* Set DRV Func PARIF */ + rc = ulp_set_parif_in_comp_fld(ulp_ctx, ifindex, + BNXT_ULP_DRV_FUNC_PARIF, mapper_params); + if (rc) + return rc; + + /* Set VF Func PARIF */ + rc = ulp_set_parif_in_comp_fld(ulp_ctx, ifindex, BNXT_ULP_VF_FUNC_PARIF, + mapper_params); + if (rc) + return rc; + + /* Set uplink VNIC */ + rc = ulp_set_vnic_in_comp_fld(ulp_ctx, ifindex, true, mapper_params); + if (rc) + return rc; + + /* Set VF VNIC */ + rc = ulp_set_vnic_in_comp_fld(ulp_ctx, ifindex, false, mapper_params); + if (rc) + return rc; + + /* Set VPORT */ + rc = ulp_set_vport_in_comp_fld(ulp_ctx, ifindex, mapper_params); + if (rc) + return rc; + + /* Set VLAN */ + rc = ulp_set_vlan_in_act_prop(ulp_ctx, port_id, mapper_params); + if (rc) + return rc; + + /* Set MARK */ + rc = ulp_set_mark_in_act_prop(ulp_ctx, port_id, mapper_params); + if (rc) + return rc; + + return 0; +} + +struct bnxt_ulp_def_param_handler ulp_def_handler_tbl[] = { + [BNXT_ULP_DF_PARAM_TYPE_DEV_PORT_ID] = { + .vfr_func = ulp_df_dev_port_handler } +}; + +/* Function to create default rules for the following paths + * 1) Device PORT to App + * 2) App to Device PORT + * 3) VF Representor to VF + * 4) VF to VF Representor + * + * @bp : Ptr to bnxt structure. + * @param_list: Ptr to a list of parameters (Currently, only ifindex). + * @ulp_class_tid: Class template ID number. + * @flow_id: Ptr to flow identifier. + * + * Returns 0 on success or negative number on failure. + */ +int +ulp_default_flow_create(struct bnxt *bp, + struct ulp_tlv_param *param_list, + u32 ulp_class_tid, + u16 port_id, + u32 *flow_id) +{ + struct bnxt_ulp_mapper_parms mapper_params = { 0 }; + struct ulp_tc_hdr_bitmap act = { 0 }; + struct ulp_tc_hdr_field *hdr_field; + struct ulp_tc_act_prop act_prop = {{ 0 }}; + struct bnxt_ulp_context *ulp_ctx; + u32 type, ulp_flags = 0, fid; + u64 *comp_fld; + int rc = 0; + + hdr_field = vzalloc(sizeof(*hdr_field) * BNXT_ULP_PROTO_HDR_MAX); + if (!hdr_field) + return -ENOMEM; + + comp_fld = vzalloc(sizeof(u64) * BNXT_ULP_CF_IDX_LAST); + if (!comp_fld) { + rc = -ENOMEM; + goto err1; + } + + mapper_params.hdr_field = hdr_field; + mapper_params.act_bitmap = &act; + mapper_params.act_prop = &act_prop; + mapper_params.comp_fld = comp_fld; + mapper_params.class_tid = ulp_class_tid; + mapper_params.flow_type = BNXT_ULP_FDB_TYPE_DEFAULT; + mapper_params.port_id = bp->pf.fw_fid; + + ulp_ctx = bp->ulp_ctx; + if (!ulp_ctx) { + netdev_dbg(bp->dev, + "ULP context is not initialized. Failed to create dflt flow.\n"); + rc = -EINVAL; + goto err1; + } + + /* update the vf rep flag */ + if (bnxt_ulp_cntxt_ptr2_ulp_flags_get(ulp_ctx, &ulp_flags)) { + netdev_dbg(bp->dev, "Error in getting ULP context flags\n"); + rc = -EINVAL; + goto err1; + } + if (ULP_VF_REP_IS_ENABLED(ulp_flags)) + ULP_COMP_FLD_IDX_WR(&mapper_params, + BNXT_ULP_CF_IDX_VFR_MODE, 1); + + type = param_list->type; + while (type < BNXT_ULP_DF_PARAM_TYPE_LAST) { + if (ulp_def_handler_tbl[type].vfr_func) { + rc = ulp_def_handler_tbl[type].vfr_func(ulp_ctx, + param_list, + &mapper_params); + if (rc) { + netdev_dbg(bp->dev, + "Failed to create default flow.\n"); + goto err1; + } + } + + param_list++; + type = param_list->type; + } + + /* Get the function id */ + if (ulp_port_db_port_func_id_get(ulp_ctx, + port_id, + &mapper_params.func_id)) { + netdev_dbg(bp->dev, "conversion of port to func id failed\n"); + goto err1; + } + + /* update the VF meta function id */ + ULP_COMP_FLD_IDX_WR(&mapper_params, BNXT_ULP_CF_IDX_VF_META_FID, + BNXT_ULP_META_VF_FLAG | mapper_params.func_id); + + /* Set VF_ROCE */ + rc = ulp_set_vf_roce_en_in_comp_fld(ulp_ctx, port_id, &mapper_params); + if (rc) + goto err1; + + /* Set UDCC */ + rc = ulp_set_udcc_en_in_comp_fld(ulp_ctx, port_id, &mapper_params); + if (rc) + goto err1; + + netdev_dbg(bp->dev, "Creating default flow with template id: %u\n", + ulp_class_tid); + + /* Protect flow creation */ + mutex_lock(&ulp_ctx->cfg_data->flow_db_lock); + rc = ulp_flow_db_fid_alloc(ulp_ctx, mapper_params.flow_type, + mapper_params.func_id, &fid); + if (rc) { + netdev_dbg(bp->dev, "Unable to allocate flow table entry\n"); + goto err2; + } + + mapper_params.flow_id = fid; + rc = ulp_mapper_flow_create(ulp_ctx, &mapper_params, NULL); + if (rc) + goto err3; + + mutex_unlock(&ulp_ctx->cfg_data->flow_db_lock); + *flow_id = fid; + return 0; + +err3: + ulp_flow_db_fid_free(ulp_ctx, mapper_params.flow_type, fid); +err2: + mutex_unlock(&ulp_ctx->cfg_data->flow_db_lock); +err1: + vfree(hdr_field); + vfree(comp_fld); + netdev_dbg(bp->dev, "Failed to create default flow.\n"); + return rc; +} + +/* Function to destroy default rules for the following paths + * 1) Device PORT to App + * 2) App to Device PORT + * 3) VF Representor to VF + * 4) VF to VF Representor + * + * @bp : Ptr to bnxt structure. + * @flow_id: Flow identifier. + * + * Returns 0 on success or negative number on failure. + */ +int +ulp_default_flow_destroy(struct bnxt *bp, u32 flow_id) +{ + struct bnxt_ulp_context *ulp_ctx; + int rc = 0; + + ulp_ctx = bnxt_ulp_bp_ptr2_cntxt_get(bp); + if (!ulp_ctx) { + netdev_dbg(bp->dev, "ULP context is not initialized\n"); + return -EINVAL; + } + + if (!flow_id) { + netdev_dbg(bp->dev, "invalid flow id zero\n"); + return rc; + } + + mutex_lock(&ulp_ctx->cfg_data->flow_db_lock); + rc = ulp_mapper_flow_destroy(ulp_ctx, BNXT_ULP_FDB_TYPE_DEFAULT, + flow_id, NULL); + if (rc) + netdev_dbg(bp->dev, "Failed to destroy flow.\n"); + mutex_unlock(&ulp_ctx->cfg_data->flow_db_lock); + + return rc; +} + +void +bnxt_ulp_destroy_df_rules(struct bnxt *bp, bool global) +{ + struct bnxt_ulp_context *ulp_ctx = bp->ulp_ctx; + struct bnxt_ulp_df_rule_info *info; + u16 fid; + + if (!BNXT_TRUFLOW_EN(bp) || + bnxt_dev_is_vf_rep(bp->dev)) + return; + + if (!ulp_ctx || !ulp_ctx->cfg_data) + return; + + /* PF's tx_cfa_action is used to hint the adapter about + * which action record pointer to use when sending the + * packet out of the port (software path of Truflow). + * If this is not cleared then the adapter will try to + * use a stale action record pointer which will black hole + * the packets. tx_cfa_action is set during the creation + * of ULP default rules. + */ + bp->tx_cfa_action = 0; + + /* Delete default rules per port */ + if (!global) { + fid = bp->pf.fw_fid; + info = &ulp_ctx->cfg_data->df_rule_info[fid]; + if (!info->valid) + return; + + ulp_default_flow_destroy(bp, + info->def_port_flow_id); + memset(info, 0, sizeof(struct bnxt_ulp_df_rule_info)); + return; + } + + /* Delete default rules for all ports */ + for (fid = 0; fid < TC_MAX_ETHPORTS; fid++) { + info = &ulp_ctx->cfg_data->df_rule_info[fid]; + if (!info->valid) + continue; + + ulp_default_flow_destroy(bp, + info->def_port_flow_id); + memset(info, 0, sizeof(struct bnxt_ulp_df_rule_info)); + } +} + +static int +bnxt_create_port_app_df_rule(struct bnxt *bp, u8 flow_type, + u32 *flow_id) +{ + u16 fid = bp->pf.fw_fid; + struct ulp_tlv_param param_list[] = { + { + .type = BNXT_ULP_DF_PARAM_TYPE_DEV_PORT_ID, + .length = 2, + .value = {(fid >> 8) & 0xff, fid & 0xff} + }, + { + .type = BNXT_ULP_DF_PARAM_TYPE_LAST, + .length = 0, + .value = {0} + } + }; + + if (!flow_type) { + *flow_id = 0; + return 0; + } + + return ulp_default_flow_create(bp, param_list, flow_type, + fid, flow_id); +} + +int +bnxt_ulp_create_df_rules(struct bnxt *bp) +{ + struct bnxt_ulp_context *ulp_ctx = bp->ulp_ctx; + struct bnxt_ulp_df_rule_info *info; + int rc = 0; + u8 fid; + + if (!BNXT_TRUFLOW_EN(bp) || bnxt_dev_is_vf_rep(bp->dev) || !ulp_ctx) + return 0; + + fid = bp->pf.fw_fid; + info = &ulp_ctx->cfg_data->df_rule_info[fid]; + + rc = bnxt_create_port_app_df_rule(bp, + BNXT_ULP_DF_TPL_DEFAULT_UPLINK_PORT, + &info->def_port_flow_id); + if (rc) { + netdev_dbg(bp->dev, + "Failed to create port to app default rule\n"); + return rc; + } + + /* If the template already set the bd_action, skip this. + * This is handled differently between Thor and Thor2. + */ + if (!BNXT_CHIP_P7(bp) || !bp->tx_cfa_action) { + rc = ulp_default_flow_db_cfa_action_get(ulp_ctx, + info->def_port_flow_id, + &bp->tx_cfa_action); + } + + if (rc) + bp->tx_cfa_action = 0; + + netdev_dbg(bp->dev, "Default flow id %d Tx cfa action is 0x%x\n", + info->def_port_flow_id, bp->tx_cfa_action); + info->valid = true; + return 0; +} + +#ifdef CONFIG_VF_REPS + +static int +bnxt_create_port_vfr_default_rule(struct bnxt *bp, + u8 flow_type, + u16 vfr_port_id, + u32 *flow_id) +{ + struct ulp_tlv_param param_list[] = { + { + .type = BNXT_ULP_DF_PARAM_TYPE_DEV_PORT_ID, + .length = 2, + .value = {(vfr_port_id >> 8) & 0xff, vfr_port_id & 0xff} + }, + { + .type = BNXT_ULP_DF_PARAM_TYPE_LAST, + .length = 0, + .value = {0} + } + }; + return ulp_default_flow_create(bp, param_list, flow_type, + vfr_port_id, flow_id); +} + +int +bnxt_ulp_create_vfr_default_rules(void *vf_rep) +{ + struct bnxt_ulp_vfr_rule_info *info; + struct bnxt_ulp_context *ulp_ctx; + struct bnxt_vf_rep *vfr = vf_rep; + struct bnxt *bp = vfr->bp; + u16 vfr_port_id; + int rc; + + if (!bp) + return -EINVAL; + + ulp_ctx = bp->ulp_ctx; + vfr_port_id = bp->pf.vf[vfr->vf_idx].fw_fid; + + info = bnxt_ulp_cntxt_ptr2_ulp_vfr_info_get(ulp_ctx, vfr_port_id); + if (!info) { + netdev_dbg(bp->dev, "Failed to get vfr ulp context\n"); + return -EINVAL; + } + + if (info->valid) { + netdev_dbg(bp->dev, "VFR already allocated\n"); + return -EINVAL; + } + + memset(info, 0, sizeof(struct bnxt_ulp_vfr_rule_info)); + rc = bnxt_create_port_vfr_default_rule(bp, BNXT_ULP_DF_TPL_DEFAULT_VFR, + vfr_port_id, + &info->vfr_flow_id); + if (rc) { + netdev_dbg(bp->dev, "Failed to create VFR default rule\n"); + goto error; + } + + /* If the template already set the bd action, skip this. + * This is handled differently between Thor and Thor2 + */ + if (!BNXT_CHIP_P7(bp) || !vfr->tx_cfa_action) { + rc = ulp_default_flow_db_cfa_action_get(ulp_ctx, + info->vfr_flow_id, + &vfr->tx_cfa_action); + + if (rc) { + netdev_dbg(bp->dev, "Failed to get the tx cfa action\n"); + goto error; + } + } + netdev_dbg(bp->dev, "VFR: Default flow id %d Tx cfa action is 0x%x\n", + info->vfr_flow_id, vfr->tx_cfa_action); + + /* Update the other details */ + info->valid = true; + info->parent_port_id = bp->pf.vf[vfr->vf_idx].fw_fid; + + return 0; + +error: + if (info->vfr_flow_id) + ulp_default_flow_destroy(bp, info->vfr_flow_id); + + return rc; +} + +int +bnxt_ulp_delete_vfr_default_rules(void *vf_rep) +{ + struct bnxt_ulp_vfr_rule_info *info; + struct bnxt_ulp_context *ulp_ctx; + struct bnxt_vf_rep *vfr = vf_rep; + struct bnxt *bp = vfr->bp; + u16 vfr_port_id; + + if (!bp || !BNXT_TRUFLOW_EN(bp)) + return 0; + + ulp_ctx = bp->ulp_ctx; + vfr_port_id = bp->pf.vf[vfr->vf_idx].fw_fid; + info = bnxt_ulp_cntxt_ptr2_ulp_vfr_info_get(ulp_ctx, vfr_port_id); + if (!info) { + netdev_dbg(bp->dev, "Failed to get vfr ulp context\n"); + return -EINVAL; + } + + if (!info->valid) { + netdev_dbg(bp->dev, "VFR already freed\n"); + return -EINVAL; + } + ulp_default_flow_destroy(bp, info->vfr_flow_id); + vfr->tx_cfa_action = 0; + memset(info, 0, sizeof(struct bnxt_ulp_vfr_rule_info)); + + return 0; +} + +#else + +int +bnxt_ulp_create_vfr_default_rules(void *vf_rep) +{ + return -EINVAL; +} + +int +bnxt_ulp_delete_vfr_default_rules(void *vf_rep) +{ + return -EINVAL; +} + +#endif +#endif /* CONFIG_BNXT_FLOWER_OFFLOAD */ diff --git a/drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_fc_mgr.c b/drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_fc_mgr.c new file mode 100644 index 000000000000..f13c5276bb9f --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_fc_mgr.c @@ -0,0 +1,589 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* Copyright(c) 2019-2023 Broadcom + * All rights reserved. + */ + +#include "bnxt_compat.h" +#include "bnxt_hsi.h" +#include "bnxt.h" +#include "bnxt_tf_ulp.h" +#include "bnxt_tf_ulp_p5.h" +#include "bnxt_tf_common.h" +#include "ulp_fc_mgr.h" +#include "ulp_flow_db.h" +#include "ulp_template_db_enum.h" +#include "ulp_template_struct.h" +#include "tf_tbl.h" + +#if defined(CONFIG_BNXT_FLOWER_OFFLOAD) || defined(CONFIG_BNXT_CUSTOM_FLOWER_OFFLOAD) +static const struct bnxt_ulp_fc_core_ops * +bnxt_ulp_fc_ops_get(struct bnxt_ulp_context *ctxt) +{ + const struct bnxt_ulp_fc_core_ops *func_ops; + enum bnxt_ulp_device_id dev_id; + int rc; + + rc = bnxt_ulp_cntxt_dev_id_get(ctxt, &dev_id); + if (rc) + return NULL; + + switch (dev_id) { + case BNXT_ULP_DEVICE_ID_THOR2: + func_ops = &ulp_fc_tfc_core_ops; + break; + case BNXT_ULP_DEVICE_ID_THOR: + case BNXT_ULP_DEVICE_ID_WH_PLUS: + func_ops = &ulp_fc_tf_core_ops; + break; + default: + func_ops = NULL; + break; + } + return func_ops; +} + +static int +ulp_fc_mgr_shadow_mem_alloc(struct bnxt_ulp_context *ulp_ctx, + struct hw_fc_mem_info *parms, int size) +{ + /* Allocate memory*/ + if (!parms) + return -EINVAL; + + parms->mem_va = kzalloc(L1_CACHE_ALIGN(size), GFP_KERNEL); + if (!parms->mem_va) + return -ENOMEM; + + parms->mem_pa = (void *)__pa(parms->mem_va); + return 0; +} + +static void +ulp_fc_mgr_shadow_mem_free(struct hw_fc_mem_info *parms) +{ + kfree(parms->mem_va); +} + +/** + * Allocate and Initialize all Flow Counter Manager resources for this ulp + * context. + * + * @ctxt: The ulp context for the Flow Counter manager. + * + */ +int +ulp_fc_mgr_init(struct bnxt_ulp_context *ctxt) +{ + u32 dev_id, sw_acc_cntr_tbl_sz, hw_fc_mem_info_sz; + const struct bnxt_ulp_fc_core_ops *fc_ops; + struct bnxt_ulp_device_params *dparms; + struct bnxt_ulp_fc_info *ulp_fc_info; + uint32_t flags = 0; + int i, rc; + + if (!ctxt) { + netdev_dbg(NULL, "Invalid ULP CTXT\n"); + return -EINVAL; + } + + if (bnxt_ulp_cntxt_dev_id_get(ctxt, &dev_id)) { + netdev_dbg(ctxt->bp->dev, "Failed to get device id\n"); + return -EINVAL; + } + + dparms = bnxt_ulp_device_params_get(dev_id); + if (!dparms) { + netdev_dbg(ctxt->bp->dev, "Failed to device parms\n"); + return -EINVAL; + } + + /* update the features list */ + if (dparms->dev_features & BNXT_ULP_DEV_FT_STAT_SW_AGG) + flags = ULP_FLAG_FC_SW_AGG_EN; + if (dparms->dev_features & BNXT_ULP_DEV_FT_STAT_PARENT_AGG) + flags |= ULP_FLAG_FC_PARENT_AGG_EN; + + fc_ops = bnxt_ulp_fc_ops_get(ctxt); + if (!fc_ops) { + netdev_dbg(ctxt->bp->dev, "Failed to get the counter ops\n"); + return -EINVAL; + } + + ulp_fc_info = kzalloc(sizeof(*ulp_fc_info), GFP_KERNEL); + if (!ulp_fc_info) + goto error; + + ulp_fc_info->fc_ops = fc_ops; + ulp_fc_info->flags = flags; + + mutex_init(&ulp_fc_info->fc_lock); + + /* Add the FC info tbl to the ulp context. */ + bnxt_ulp_cntxt_ptr2_fc_info_set(ctxt, ulp_fc_info); + + ulp_fc_info->num_counters = dparms->flow_count_db_entries; + if (!ulp_fc_info->num_counters) { + /* No need for software counters, call fw directly */ + netdev_dbg(ctxt->bp->dev, "Sw flow counter support not enabled\n"); + return 0; + } + + /* no need to allocate sw aggregation memory if agg is disabled */ + if (!(ulp_fc_info->flags & ULP_FLAG_FC_SW_AGG_EN)) + return 0; + + sw_acc_cntr_tbl_sz = sizeof(struct sw_acc_counter) * + dparms->flow_count_db_entries; + + for (i = 0; i < TF_DIR_MAX; i++) { + ulp_fc_info->sw_acc_tbl[i] = kzalloc(sw_acc_cntr_tbl_sz, + GFP_KERNEL); + if (!ulp_fc_info->sw_acc_tbl[i]) + goto error; + } + + hw_fc_mem_info_sz = sizeof(u64) * dparms->flow_count_db_entries; + + for (i = 0; i < TF_DIR_MAX; i++) { + rc = ulp_fc_mgr_shadow_mem_alloc(ctxt, + &ulp_fc_info->shadow_hw_tbl[i], + hw_fc_mem_info_sz); + if (rc) + goto error; + } + + ulp_fc_mgr_thread_start(ctxt); + + return 0; + +error: + ulp_fc_mgr_deinit(ctxt); + + return -ENOMEM; +} + +/** + * Release all resources in the Flow Counter Manager for this ulp context + * + * @ctxt: The ulp context for the Flow Counter manager + * + */ +int +ulp_fc_mgr_deinit(struct bnxt_ulp_context *ctxt) +{ + struct bnxt_ulp_fc_info *ulp_fc_info; + struct hw_fc_mem_info *shd_info; + int i; + + ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt); + + if (!ulp_fc_info) + return -EINVAL; + + if (ulp_fc_info->flags & ULP_FLAG_FC_SW_AGG_EN) + ulp_fc_mgr_thread_cancel(ctxt); + + mutex_destroy(&ulp_fc_info->fc_lock); + + if (ulp_fc_info->flags & ULP_FLAG_FC_SW_AGG_EN) { + for (i = 0; i < TF_DIR_MAX; i++) + kfree(ulp_fc_info->sw_acc_tbl[i]); + + for (i = 0; i < TF_DIR_MAX; i++) { + shd_info = &ulp_fc_info->shadow_hw_tbl[i]; + ulp_fc_mgr_shadow_mem_free(shd_info); + } + } + + kfree(ulp_fc_info); + + /* Safe to ignore on deinit */ + (void)bnxt_ulp_cntxt_ptr2_fc_info_set(ctxt, NULL); + + return 0; +} + +/** + * Check if the alarm thread that walks through the flows is started + * + * @ctxt: The ulp context for the flow counter manager + * + */ +bool ulp_fc_mgr_thread_isstarted(struct bnxt_ulp_context *ctxt) +{ + struct bnxt_ulp_fc_info *ulp_fc_info; + + ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt); + + if (ulp_fc_info) + return !!(ulp_fc_info->flags & ULP_FLAG_FC_THREAD); + + return false; +} + +/** + * Setup the Flow counter timer thread that will fetch/accumulate raw counter + * data from the chip's internal flow counters + * + * @ctxt: The ulp context for the flow counter manager + * + */ +void +ulp_fc_mgr_thread_start(struct bnxt_ulp_context *ctxt) +{ + struct bnxt_ulp_fc_info *ulp_fc_info; + struct delayed_work *work = &ctxt->cfg_data->fc_work; + + ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt); + + INIT_DELAYED_WORK(work, ulp_fc_mgr_alarm_cb); + schedule_delayed_work(work, msecs_to_jiffies(1000)); + if (ulp_fc_info) + ulp_fc_info->flags |= ULP_FLAG_FC_THREAD; +} + +/** + * Cancel the alarm handler + * + * @ctxt: The ulp context for the flow counter manager + * + */ +void ulp_fc_mgr_thread_cancel(struct bnxt_ulp_context *ctxt) +{ + struct bnxt_ulp_fc_info *ulp_fc_info; + + ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt); + cancel_delayed_work_sync(&ctxt->cfg_data->fc_work); + if (ulp_fc_info) + ulp_fc_info->flags &= ~ULP_FLAG_FC_THREAD; +} + +/** + * Alarm handler that will issue the TF-Core API to fetch + * data from the chip's internal flow counters + * + * @ctxt: The ulp context for the flow counter manager + * + */ + +void +ulp_fc_mgr_alarm_cb(struct work_struct *work) +{ + u32 dev_id, hw_cntr_id = 0, num_entries = 0; + struct bnxt_ulp_device_params *dparms; + struct bnxt_ulp_fc_info *ulp_fc_info; + struct delayed_work *fc_work = NULL; + struct bnxt_ulp_data *cfg_data; + struct bnxt_ulp_context *ctxt; + struct bnxt *bp; + enum tf_dir dir; + unsigned int j; + int rc = 0; + void *tfp; + + cfg_data = container_of(work, struct bnxt_ulp_data, fc_work.work); + fc_work = &cfg_data->fc_work; + + bnxt_ulp_cntxt_lock_acquire(); + ctxt = bnxt_ulp_cntxt_entry_lookup(cfg_data); + if (!ctxt) + goto err; + if (!ctxt->cfg_data) + goto err; + + bp = ctxt->bp; + + ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt); + if (!ulp_fc_info) + goto err; + + if (bnxt_ulp_cntxt_dev_id_get(ctxt, &dev_id)) { + netdev_dbg(ctxt->bp->dev, "Failed to get dev_id from ulp\n"); + goto err; + } + + dparms = bnxt_ulp_device_params_get(dev_id); + if (!dparms) { + netdev_dbg(ctxt->bp->dev, "Failed to device parms\n"); + goto err; + } + + /* Take the fc_lock to ensure no flow is destroyed + * during the bulk get + */ + mutex_lock(&ulp_fc_info->fc_lock); + + if (!ulp_fc_info->num_entries) { + mutex_unlock(&ulp_fc_info->fc_lock); + goto err; + } + + num_entries = dparms->flow_count_db_entries / 2; + for (dir = 0; dir < TF_DIR_MAX; dir++) { + for (j = 0; j < num_entries; j++) { + if (!ulp_fc_info->sw_acc_tbl[dir][j].valid) + continue; + hw_cntr_id = ulp_fc_info->sw_acc_tbl[dir][j].hw_cntr_id; + tfp = ctxt->ops->ulp_tfp_get(ctxt, + ulp_fc_info->sw_acc_tbl[dir][j].session_type); + if (!tfp) { + mutex_unlock(&ulp_fc_info->fc_lock); + netdev_dbg(bp->dev, + "Failed to get the truflow pointer\n"); + goto err; + } + rc = ulp_get_single_flow_stat(ctxt, tfp, ulp_fc_info, dir, + hw_cntr_id, dparms); + if (rc) + break; + } + } + + mutex_unlock(&ulp_fc_info->fc_lock); + +err: + bnxt_ulp_cntxt_lock_release(); + if (fc_work) + schedule_delayed_work(fc_work, msecs_to_jiffies(1000)); +} + +/** + * Set the starting index that indicates the first HW flow + * counter ID + * + * @ctxt: The ulp context for the flow counter manager + * + * @dir: The direction of the flow + * + * @start_idx: The HW flow counter ID + * + */ +bool ulp_fc_mgr_start_idx_isset(struct bnxt_ulp_context *ctxt, enum tf_dir dir) +{ + struct bnxt_ulp_fc_info *ulp_fc_info; + + ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt); + + if (ulp_fc_info) + return ulp_fc_info->shadow_hw_tbl[dir].start_idx_is_set; + + return false; +} + +/** + * Set the starting index that indicates the first HW flow + * counter ID + * + * @ctxt: The ulp context for the flow counter manager + * + * @dir: The direction of the flow + * + * @start_idx: The HW flow counter ID + * + */ +int ulp_fc_mgr_start_idx_set(struct bnxt_ulp_context *ctxt, enum tf_dir dir, + u32 start_idx) +{ + struct bnxt_ulp_fc_info *ulp_fc_info; + + ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt); + + if (!ulp_fc_info) + return -EIO; + + if (!ulp_fc_info->shadow_hw_tbl[dir].start_idx_is_set) { + ulp_fc_info->shadow_hw_tbl[dir].start_idx = start_idx; + ulp_fc_info->shadow_hw_tbl[dir].start_idx_is_set = true; + } + + return 0; +} + +/** + * Set the corresponding SW accumulator table entry based on + * the difference between this counter ID and the starting + * counter ID. Also, keep track of num of active counter enabled + * flows. + * + * @ctxt: The ulp context for the flow counter manager + * + * @dir: The direction of the flow + * + * @hw_cntr_id: The HW flow counter ID + * + */ +int ulp_fc_mgr_cntr_set(struct bnxt_ulp_context *ctxt, enum tf_dir dir, + u32 hw_cntr_id, enum bnxt_ulp_session_type session_type) +{ + struct bnxt_ulp_fc_info *ulp_fc_info; + u32 sw_cntr_idx; + + ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt); + if (!ulp_fc_info) + return -EIO; + + if (!ulp_fc_info->num_counters) + return 0; + + mutex_lock(&ulp_fc_info->fc_lock); + sw_cntr_idx = hw_cntr_id - ulp_fc_info->shadow_hw_tbl[dir].start_idx; + ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].valid = true; + ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].hw_cntr_id = hw_cntr_id; + ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].session_type = session_type; + ulp_fc_info->num_entries++; + mutex_unlock(&ulp_fc_info->fc_lock); + + return 0; +} + +/** + * Reset the corresponding SW accumulator table entry based on + * the difference between this counter ID and the starting + * counter ID. + * + * @ctxt: The ulp context for the flow counter manager + * + * @dir: The direction of the flow + * + * @hw_cntr_id: The HW flow counter ID + * + */ +int ulp_fc_mgr_cntr_reset(struct bnxt_ulp_context *ctxt, enum tf_dir dir, + u32 hw_cntr_id) +{ + struct bnxt_ulp_fc_info *ulp_fc_info; + u32 sw_cntr_idx; + + ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt); + if (!ulp_fc_info) + return -EIO; + + if (!ulp_fc_info->num_counters) + return 0; + + mutex_lock(&ulp_fc_info->fc_lock); + sw_cntr_idx = hw_cntr_id - ulp_fc_info->shadow_hw_tbl[dir].start_idx; + ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].valid = false; + ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].hw_cntr_id = 0; + ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].session_type = 0; + ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].pkt_count = 0; + ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].byte_count = 0; + ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].pc_flow_idx = 0; + ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].pkt_count_last_polled = 0; + ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx].byte_count_last_polled = 0; + ulp_fc_info->num_entries--; + mutex_unlock(&ulp_fc_info->fc_lock); + + return 0; +} + +/** + * Fill packets & bytes with the values obtained and + * accumulated locally. + * + * @ctxt: The ulp context for the flow counter manager + * + * @flow_id: The HW flow ID + * + * @packets: + * @bytes: + * @lastused: + * @resource_hndl: if not null return the hw counter index + * + */ + +int ulp_tf_fc_mgr_query_count_get(struct bnxt_ulp_context *ctxt, + u32 flow_id, + u64 *packets, u64 *bytes, + unsigned long *lastused, + u64 *resource_hndl) +{ + const struct bnxt_ulp_fc_core_ops *fc_ops; + struct sw_acc_counter *sw_acc_tbl_entry; + struct bnxt_ulp_fc_info *ulp_fc_info; + struct ulp_flow_db_res_params params; + u32 hw_cntr_id = 0, sw_cntr_idx = 0; + bool found_cntr_resource = false; + u32 nxt_resource_index = 0; + enum tf_dir dir; + int rc = 0; + + ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ctxt); + if (!ulp_fc_info) + return -ENODEV; + + fc_ops = ulp_fc_info->fc_ops; + + mutex_lock(&ctxt->cfg_data->flow_db_lock); + do { + rc = ulp_flow_db_resource_get(ctxt, + BNXT_ULP_FDB_TYPE_REGULAR, + flow_id, + &nxt_resource_index, + ¶ms); + if (params.resource_func == + BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE && + (params.resource_sub_type == + BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_INT_COUNT || + params.resource_sub_type == + BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_EXT_COUNT || + params.resource_sub_type == + BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_INT_COUNT_ACC)) { + found_cntr_resource = true; + break; + } + if (params.resource_func == BNXT_ULP_RESOURCE_FUNC_CMM_STAT) { + found_cntr_resource = true; + break; + } + } while (!rc && nxt_resource_index); + + if (rc || !found_cntr_resource) + goto exit; + + dir = params.direction; + if (resource_hndl) + *resource_hndl = params.resource_hndl; + + if (!(ulp_fc_info->flags & ULP_FLAG_FC_SW_AGG_EN)) { + rc = fc_ops->ulp_flow_stat_get(ctxt, ¶ms, + packets, bytes); + goto exit; + } + hw_cntr_id = params.resource_hndl; + + if (params.resource_sub_type == + BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_INT_COUNT) { + hw_cntr_id = params.resource_hndl; + if (!ulp_fc_info->num_counters) { + rc = fc_ops->ulp_flow_stat_get(ctxt, ¶ms, + packets, bytes); + goto exit; + } + + /* TODO: + * Think about optimizing with try_lock later + */ + mutex_lock(&ulp_fc_info->fc_lock); + sw_cntr_idx = hw_cntr_id - + ulp_fc_info->shadow_hw_tbl[dir].start_idx; + sw_acc_tbl_entry = &ulp_fc_info->sw_acc_tbl[dir][sw_cntr_idx]; + if (sw_acc_tbl_entry->pkt_count) { + *packets = sw_acc_tbl_entry->pkt_count; + *bytes = sw_acc_tbl_entry->byte_count; + sw_acc_tbl_entry->pkt_count = 0; + sw_acc_tbl_entry->byte_count = 0; + *lastused = jiffies; + } + mutex_unlock(&ulp_fc_info->fc_lock); + } else if (params.resource_func == BNXT_ULP_RESOURCE_FUNC_CMM_STAT) { + rc = fc_ops->ulp_flow_stat_get(ctxt, ¶ms, packets, bytes); + } else { + rc = -EINVAL; + } + +exit: + mutex_unlock(&ctxt->cfg_data->flow_db_lock); + return rc; +} +#endif /* CONFIG_BNXT_FLOWER_OFFLOAD */ diff --git a/drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_fc_mgr.h b/drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_fc_mgr.h new file mode 100644 index 000000000000..3621a40b4d6c --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_fc_mgr.h @@ -0,0 +1,195 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019-2023 Broadcom + * All rights reserved. + */ + +#ifndef _ULP_FC_MGR_H_ +#define _ULP_FC_MGR_H_ + +#include "bnxt.h" +#include "bnxt_tf_ulp.h" +#include "ulp_flow_db.h" +#include "tf_core.h" + +#define ULP_FLAG_FC_THREAD BIT(0) +#define ULP_FLAG_FC_SW_AGG_EN BIT(1) +#define ULP_FLAG_FC_PARENT_AGG_EN BIT(2) +#define ULP_FC_TIMER 100/* Timer freq in Sec Flow Counters */ + +/* Macros to extract packet/byte counters from a 64-bit flow counter. */ +#define FLOW_CNTR_BYTE_WIDTH 36 +#define FLOW_CNTR_BYTE_MASK (((u64)1 << FLOW_CNTR_BYTE_WIDTH) - 1) + +#define FLOW_CNTR_PKTS(v, d) (((v) & (d)->packet_count_mask) >> \ + (d)->packet_count_shift) +#define FLOW_CNTR_BYTES(v, d) (((v) & (d)->byte_count_mask) >> \ + (d)->byte_count_shift) + +#define FLOW_CNTR_PKTS_MAX(d) (((u64)1 << (64 - (d)->packet_count_shift)) - 1) +#define FLOW_CNTR_BYTES_MAX(d) (((u64)1 << (d)->packet_count_shift) - 1) + +#define FLOW_CNTR_PC_FLOW_VALID 0x1000000 + +struct bnxt_ulp_fc_core_ops { + int + (*ulp_flow_stat_get)(struct bnxt_ulp_context *ctxt, + struct ulp_flow_db_res_params *res, + u64 *packets, u64 *bytes); + int + (*ulp_flow_stats_accum_update)(struct bnxt_ulp_context *ctxt, + struct bnxt_ulp_fc_info *ulp_fc_info, + struct bnxt_ulp_device_params *dparms); +}; + +struct sw_acc_counter { + u64 pkt_count; + u64 pkt_count_last_polled; + u64 byte_count; + u64 byte_count_last_polled; + bool valid; + u32 hw_cntr_id; + u32 pc_flow_idx; + enum bnxt_ulp_session_type session_type; +}; + +struct hw_fc_mem_info { + void *mem_va; /* mem_va, pointer to the allocated memory. */ + void *mem_pa; /* mem_pa, physical address of the allocated memory. */ + u32 start_idx; + bool start_idx_is_set; +}; + +struct bnxt_ulp_fc_info { + struct sw_acc_counter *sw_acc_tbl[TF_DIR_MAX]; + struct hw_fc_mem_info shadow_hw_tbl[TF_DIR_MAX]; + u32 flags; + u32 num_entries; + struct mutex fc_lock; /* Serialize flow counter thread operations */ + u32 num_counters; + const struct bnxt_ulp_fc_core_ops *fc_ops; +}; + +int +ulp_fc_mgr_init(struct bnxt_ulp_context *ctxt); + +/* Release all resources in the flow counter manager for this ulp context + * + * @ctxt: The ulp context for the flow counter manager + */ +int +ulp_fc_mgr_deinit(struct bnxt_ulp_context *ctxt); + +/* Setup the Flow counter timer thread that will fetch/accumulate raw counter + * data from the chip's internal flow counters + * + * @ctxt: The ulp context for the flow counter manager + */ +void +ulp_fc_mgr_thread_start(struct bnxt_ulp_context *ctxt); + +/* Alarm handler that will issue the TF-Core API to fetch + * data from the chip's internal flow counters + * + * @ctxt: The ulp context for the flow counter manager + */ +void +ulp_fc_mgr_alarm_cb(struct work_struct *work); + +/* Cancel the alarm handler + * + * @ctxt: The ulp context for the flow counter manager + * + */ +void ulp_fc_mgr_thread_cancel(struct bnxt_ulp_context *ctxt); + +/* Set the starting index that indicates the first HW flow + * counter ID + * + * @ctxt: The ulp context for the flow counter manager + * + * @dir: The direction of the flow + * + * @start_idx: The HW flow counter ID + * + */ +int ulp_fc_mgr_start_idx_set(struct bnxt_ulp_context *ctxt, enum tf_dir dir, + u32 start_idx); + +/* Set the corresponding SW accumulator table entry based on + * the difference between this counter ID and the starting + * counter ID. Also, keep track of num of active counter enabled + * flows. + * + * @ctxt: The ulp context for the flow counter manager + * + * @dir: The direction of the flow + * + * @hw_cntr_id: The HW flow counter ID + * + */ +int ulp_fc_mgr_cntr_set(struct bnxt_ulp_context *ctxt, enum tf_dir dir, + u32 hw_cntr_id, + enum bnxt_ulp_session_type session_type); +/* Reset the corresponding SW accumulator table entry based on + * the difference between this counter ID and the starting + * counter ID. + * + * @ctxt: The ulp context for the flow counter manager + * + * @dir: The direction of the flow + * + * @hw_cntr_id: The HW flow counter ID + * + */ +int ulp_fc_mgr_cntr_reset(struct bnxt_ulp_context *ctxt, enum tf_dir dir, + u32 hw_cntr_id); +/* Check if the starting HW counter ID value is set in the + * flow counter manager. + * + * @ctxt: The ulp context for the flow counter manager + * + * @dir: The direction of the flow + * + */ +bool ulp_fc_mgr_start_idx_isset(struct bnxt_ulp_context *ctxt, enum tf_dir dir); + +/* Check if the alarm thread that walks through the flows is started + * + * @ctxt: The ulp context for the flow counter manager + * + */ +bool ulp_fc_mgr_thread_isstarted(struct bnxt_ulp_context *ctxt); + +/* Fill packets & bytes with the values obtained and + * accumulated locally. + * + * @ctxt: The ulp context for the flow counter manager + * + * @flow_id: The HW flow ID + * + * @packets: + * @bytes: + * @lastused: + * @resource_hndl: if not null returns the hw counter id + * + */ +int ulp_tf_fc_mgr_query_count_get(struct bnxt_ulp_context *ctxt, + u32 flow_id, + u64 *packets, u64 *bytes, + unsigned long *lastused, + u64 *resource_hndl); + +int ulp_get_single_flow_stat(struct bnxt_ulp_context *ctxt, + struct tf *tfp, + struct bnxt_ulp_fc_info *fc_info, + enum tf_dir dir, + u32 hw_cntr_id, + struct bnxt_ulp_device_params *dparms); + +int ulp_tf_fc_tf_flow_stat_get(struct bnxt_ulp_context *ctxt, + struct ulp_flow_db_res_params *res, + u64 *packets, u64 *bytes); + +extern const struct bnxt_ulp_fc_core_ops ulp_fc_tf_core_ops; +extern const struct bnxt_ulp_fc_core_ops ulp_fc_tfc_core_ops; +#endif /* _ULP_FC_MGR_H_ */ diff --git a/drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_fc_mgr_p5.c b/drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_fc_mgr_p5.c new file mode 100644 index 000000000000..a41ac97d2b58 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_fc_mgr_p5.c @@ -0,0 +1,142 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* Copyright(c) 2014-2023 Broadcom + * All rights reserved. + */ + +#include "bnxt_compat.h" +#include "bnxt_hsi.h" +#include "bnxt.h" +#include "bnxt_tf_ulp.h" +#include "bnxt_tf_ulp_p5.h" +#include "bnxt_tf_common.h" +#include "ulp_fc_mgr.h" +#include "ulp_flow_db.h" +#include "ulp_template_db_enum.h" +#include "ulp_template_struct.h" +#include "tf_tbl.h" + +#ifdef CONFIG_BNXT_FLOWER_OFFLOAD +int +ulp_tf_fc_tf_flow_stat_get(struct bnxt_ulp_context *ctxt, + struct ulp_flow_db_res_params *res, + u64 *packets, u64 *bytes) +{ + struct tf_get_tbl_entry_parms parms = { 0 }; + struct bnxt_ulp_device_params *dparms; + struct bnxt *bp; + u32 dev_id = 0; + struct tf *tfp; + u64 stats = 0; + int rc = 0; + + tfp = bnxt_tf_ulp_cntxt_tfp_get(ctxt, + ulp_flow_db_shared_session_get(res)); + if (!tfp) + return -EINVAL; + + bp = tfp->bp; + + if (bnxt_ulp_cntxt_dev_id_get(ctxt, &dev_id)) { + netdev_dbg(ctxt->bp->dev, "Failed to get device id\n"); + return -EINVAL; + } + + dparms = bnxt_ulp_device_params_get(dev_id); + if (!dparms) { + netdev_dbg(bp->dev, "Failed to device parms\n"); + return -EINVAL; + } + parms.dir = res->direction; + parms.type = TF_TBL_TYPE_ACT_STATS_64; + parms.idx = res->resource_hndl; + parms.data_sz_in_bytes = sizeof(u64); + parms.data = (u8 *)&stats; + rc = tf_get_tbl_entry(tfp, &parms); + if (rc) { + netdev_dbg(bp->dev, + "Get failed for id:0x%x rc:%d\n", + parms.idx, rc); + return rc; + } + + *packets = FLOW_CNTR_PKTS(stats, dparms); + *bytes = FLOW_CNTR_BYTES(stats, dparms); + + return rc; +} + +int +ulp_get_single_flow_stat(struct bnxt_ulp_context *ctxt, + struct tf *tfp, + struct bnxt_ulp_fc_info *fc_info, + enum tf_dir dir, + u32 hw_cntr_id, + struct bnxt_ulp_device_params *dparms) +{ + struct sw_acc_counter *sw_acc_tbl_entry = NULL; + struct tf_get_tbl_entry_parms parms = { 0 }; + struct bnxt *bp = tfp->bp; + u32 sw_cntr_indx = 0; + u64 stats = 0; + u64 delta_pkts = 0; + u64 delta_bytes = 0; + u64 cur_pkts = 0; + u64 cur_bytes = 0; + int rc = 0; + + parms.dir = dir; + parms.type = TF_TBL_TYPE_ACT_STATS_64; + parms.idx = hw_cntr_id; + /* TODO: + * Size of an entry needs to obtained from template + */ + parms.data_sz_in_bytes = sizeof(u64); + parms.data = (u8 *)&stats; + rc = tf_get_tbl_entry(tfp, &parms); + if (rc) { + netdev_dbg(bp->dev, + "Get failed for id:0x%x rc:%d\n", + parms.idx, rc); + return rc; + } + + /* TBD - Get PKT/BYTE COUNT SHIFT/MASK from Template */ + sw_cntr_indx = hw_cntr_id - fc_info->shadow_hw_tbl[dir].start_idx; + sw_acc_tbl_entry = &fc_info->sw_acc_tbl[dir][sw_cntr_indx]; + /* Some applications may accumulate the flow counters while some + * may not. In cases where the application is accumulating the counters + * the PMD need not do the accumulation itself and viceversa to report + * the correct flow counters. + */ + + cur_pkts = FLOW_CNTR_PKTS(stats, dparms); + cur_bytes = FLOW_CNTR_BYTES(stats, dparms); + + delta_pkts = ((cur_pkts - sw_acc_tbl_entry->pkt_count_last_polled) & + FLOW_CNTR_PKTS_MAX(dparms)); + delta_bytes = ((cur_bytes - sw_acc_tbl_entry->byte_count_last_polled) & + FLOW_CNTR_BYTES_MAX(dparms)); + + sw_acc_tbl_entry->pkt_count += delta_pkts; + sw_acc_tbl_entry->byte_count += delta_bytes; + + netdev_dbg(bp->dev, + " STATS_64 dir %d for id:0x%x cc:%llu tot:%llu lp:%llu dp:0x%llx\n", + dir, parms.idx, + cur_pkts, + sw_acc_tbl_entry->pkt_count, + sw_acc_tbl_entry->pkt_count_last_polled, + delta_pkts); + + /* Update the last polled */ + sw_acc_tbl_entry->pkt_count_last_polled = cur_pkts; + sw_acc_tbl_entry->byte_count_last_polled = cur_bytes; + + return rc; +} + +const struct bnxt_ulp_fc_core_ops ulp_fc_tf_core_ops = { + .ulp_flow_stat_get = ulp_tf_fc_tf_flow_stat_get, + .ulp_flow_stats_accum_update = NULL, +}; +#endif /* CONFIG_BNXT_FLOWER_OFFLOAD */ diff --git a/drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_fc_mgr_p7.c b/drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_fc_mgr_p7.c new file mode 100644 index 000000000000..13720b78890d --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_fc_mgr_p7.c @@ -0,0 +1,108 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* Copyright(c) 2014-2021 Broadcom + * All rights reserved. + */ + +#include +#include +#include "ulp_linux.h" +#include "bnxt_compat.h" +#include "bnxt_hsi.h" +#include "bnxt.h" +#include "bnxt_tf_ulp.h" +#include "bnxt_tf_ulp_p7.h" +#include "bnxt_tf_common.h" +#include "ulp_fc_mgr.h" +#include "ulp_flow_db.h" +#include "ulp_template_db_enum.h" +#include "ulp_template_struct.h" +#include "tfc.h" +#include "tfc_debug.h" +#include "tfc_action_handle.h" + +#ifdef CONFIG_BNXT_FLOWER_OFFLOAD +/* Need to create device parms for these values and handle + * alignment dynamically. + */ +#define ULP_FC_TFC_PKT_CNT_OFFS 0 +#define ULP_FC_TFC_BYTE_CNT_OFFS 1 +#define ULP_TFC_CNTR_READ_BYTES 32 +#define ULP_TFC_CNTR_ALIGN 32 +#define ULP_TFC_ACT_WORD_SZ 32 + +static int +ulp_tf_fc_tfc_update_accum_stats(struct bnxt_ulp_context *ctxt, + struct bnxt_ulp_fc_info *fc_info, + struct bnxt_ulp_device_params *dparms) +{ + /* Accumulation is not supported, just return success */ + return 0; +} + +static void *data; + +static int +ulp_tf_fc_tfc_flow_stat_get(struct bnxt_ulp_context *ctxt, + struct ulp_flow_db_res_params *res, + u64 *packets, u64 *bytes) +{ + u16 data_size = ULP_TFC_CNTR_READ_BYTES; + struct tfc_cmm_clr cmm_clr = { 0 }; + struct tfc_cmm_info cmm_info; + dma_addr_t pa_addr; + struct tfc *tfcp; + u16 word_size; + u64 *data64; + int rc = 0; + + tfcp = bnxt_ulp_cntxt_tfcp_get(ctxt, BNXT_ULP_SESSION_TYPE_DEFAULT); + if (!tfcp) { + netdev_dbg(ctxt->bp->dev, "Failed to get tf object\n"); + return -EINVAL; + } + + if (!data) { + data = dma_alloc_coherent(&ctxt->bp->pdev->dev, ULP_TFC_CNTR_READ_BYTES, + &pa_addr, GFP_KERNEL); + if (!data) + return -EINVAL; + } + + /* Ensure that data is large enough to read words */ + word_size = (data_size + ULP_TFC_ACT_WORD_SZ - 1) / ULP_TFC_ACT_WORD_SZ; + if (word_size * ULP_TFC_ACT_WORD_SZ > data_size) { + netdev_dbg(ctxt->bp->dev, "Insufficient size %d for stat get\n", + data_size); + return -EINVAL; + } + + data64 = (u64 *)data; + cmm_info.rsubtype = CFA_RSUBTYPE_CMM_ACT; + cmm_info.act_handle = res->resource_hndl; + cmm_info.dir = (enum cfa_dir)res->direction; + /* Read and Clear the hw stat if requested */ + cmm_clr.clr = true; + cmm_clr.offset_in_byte = 0; + cmm_clr.sz_in_byte = sizeof(data64[ULP_FC_TFC_PKT_CNT_OFFS]) + + sizeof(data64[ULP_FC_TFC_BYTE_CNT_OFFS]); + rc = tfc_act_get(tfcp, &cmm_info, &cmm_clr, data, &word_size); + if (rc) { + netdev_dbg(ctxt->bp->dev, + "Failed to read stat memory hndl=%llu\n", + res->resource_hndl); + return rc; + } + if (data64[ULP_FC_TFC_PKT_CNT_OFFS]) + *packets = data64[ULP_FC_TFC_PKT_CNT_OFFS]; + + if (data64[ULP_FC_TFC_BYTE_CNT_OFFS]) + *bytes = data64[ULP_FC_TFC_BYTE_CNT_OFFS]; + + return rc; +} + +const struct bnxt_ulp_fc_core_ops ulp_fc_tfc_core_ops = { + .ulp_flow_stat_get = ulp_tf_fc_tfc_flow_stat_get, + .ulp_flow_stats_accum_update = ulp_tf_fc_tfc_update_accum_stats +}; +#endif /* CONFIG_BNXT_FLOWER_OFFLOAD */ diff --git a/drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_flow_db.c b/drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_flow_db.c new file mode 100644 index 000000000000..083e7e1276d8 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_flow_db.c @@ -0,0 +1,1951 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* Copyright(c) 2019-2023 Broadcom + * All rights reserved. + */ + +#include "ulp_linux.h" +#include "bnxt_compat.h" +#include "bnxt_hsi.h" +#include "bnxt.h" +#include "bnxt_tf_common.h" +#include "ulp_utils.h" +#include "ulp_template_struct.h" +#include "ulp_mapper.h" +#include "ulp_flow_db.h" +#include "ulp_fc_mgr.h" + +#if defined(CONFIG_BNXT_FLOWER_OFFLOAD) || defined(CONFIG_BNXT_CUSTOM_FLOWER_OFFLOAD) +#define ULP_FLOW_DB_RES_DIR_BIT 31 +#define ULP_FLOW_DB_RES_DIR_MASK 0x80000000 +#define ULP_FLOW_DB_RES_FUNC_BITS 28 +#define ULP_FLOW_DB_RES_FUNC_MASK 0x70000000 +#define ULP_FLOW_DB_RES_NXT_MASK 0x0FFFFFFF +#define ULP_FLOW_DB_RES_FUNC_UPPER 5 +#define ULP_FLOW_DB_RES_FUNC_NEED_LOWER 0x80 +#define ULP_FLOW_DB_RES_FUNC_LOWER_MASK 0x1F + +/* Macro to copy the nxt_resource_idx */ +#define ULP_FLOW_DB_RES_NXT_SET(dst, src) {(dst) |= ((src) &\ + ULP_FLOW_DB_RES_NXT_MASK); } +#define ULP_FLOW_DB_RES_NXT_RESET(dst) ((dst) &= ~(ULP_FLOW_DB_RES_NXT_MASK)) + +/** + * Helper function to set the bit in the active flows + * No validation is done in this function. + * + * @flow_db: Ptr to flow database + * @flow_type: - specify default or regular + * @idx: The index to bit to be set or reset. + * @flag: 1 to set and 0 to reset. + * + * returns none + */ +static void +ulp_flow_db_active_flows_bit_set(struct bnxt_ulp_flow_db *flow_db, + enum bnxt_ulp_fdb_type flow_type, + u32 idx, + u32 flag) +{ + struct bnxt_ulp_flow_tbl *f_tbl = &flow_db->flow_tbl; + u32 a_idx = idx / ULP_INDEX_BITMAP_SIZE; + + if (flag) { + if (flow_type == BNXT_ULP_FDB_TYPE_REGULAR || flow_type == + BNXT_ULP_FDB_TYPE_RID) + ULP_INDEX_BITMAP_SET(f_tbl->active_reg_flows[a_idx], + idx); + if (flow_type == BNXT_ULP_FDB_TYPE_DEFAULT || flow_type == + BNXT_ULP_FDB_TYPE_RID) + ULP_INDEX_BITMAP_SET(f_tbl->active_dflt_flows[a_idx], + idx); + } else { + if (flow_type == BNXT_ULP_FDB_TYPE_REGULAR || flow_type == + BNXT_ULP_FDB_TYPE_RID) + ULP_INDEX_BITMAP_RESET(f_tbl->active_reg_flows[a_idx], + idx); + if (flow_type == BNXT_ULP_FDB_TYPE_DEFAULT || flow_type == + BNXT_ULP_FDB_TYPE_RID) + ULP_INDEX_BITMAP_RESET(f_tbl->active_dflt_flows[a_idx], + idx); + } +} + +/** + * Helper function to check if given fid is active flow. + * No validation being done in this function. + * + * @flow_db: Ptr to flow database + * @flow_type: - specify default or regular + * @idx: The index to bit to be set or reset. + * + * returns 1 on set or 0 if not set. + */ +static int +ulp_flow_db_active_flows_bit_is_set(struct bnxt_ulp_flow_db *flow_db, + enum bnxt_ulp_fdb_type flow_type, + u32 idx) +{ + struct bnxt_ulp_flow_tbl *f_tbl = &flow_db->flow_tbl; + u32 a_idx = idx / ULP_INDEX_BITMAP_SIZE; + u32 reg, dflt; + + reg = ULP_INDEX_BITMAP_GET(f_tbl->active_reg_flows[a_idx], idx); + dflt = ULP_INDEX_BITMAP_GET(f_tbl->active_dflt_flows[a_idx], idx); + + switch (flow_type) { + case BNXT_ULP_FDB_TYPE_REGULAR: + return (reg && !dflt); + case BNXT_ULP_FDB_TYPE_DEFAULT: + return (!reg && dflt); + case BNXT_ULP_FDB_TYPE_RID: + return (reg && dflt); + default: + return 0; + } +} + +static inline enum tf_dir +ulp_flow_db_resource_dir_get(struct ulp_fdb_resource_info *res_info) +{ + return ((res_info->nxt_resource_idx & ULP_FLOW_DB_RES_DIR_MASK) >> + ULP_FLOW_DB_RES_DIR_BIT); +} + +static u8 +ulp_flow_db_resource_func_get(struct ulp_fdb_resource_info *res_info) +{ + u8 func; + + func = (((res_info->nxt_resource_idx & ULP_FLOW_DB_RES_FUNC_MASK) >> + ULP_FLOW_DB_RES_FUNC_BITS) << ULP_FLOW_DB_RES_FUNC_UPPER); + /* The resource func is split into upper and lower */ + if (func & ULP_FLOW_DB_RES_FUNC_NEED_LOWER) + return (func | res_info->resource_func_lower); + return func; +} + +/** + * Helper function to copy the resource params to resource info + * No validation being done in this function. + * + * @resource_info: Ptr to resource information + * @params: The input params from the caller + * returns none + */ +static void +ulp_flow_db_res_params_to_info(struct ulp_fdb_resource_info *resource_info, + struct ulp_flow_db_res_params *params) +{ + u32 resource_func; + + resource_info->nxt_resource_idx |= ((params->direction << + ULP_FLOW_DB_RES_DIR_BIT) & + ULP_FLOW_DB_RES_DIR_MASK); + resource_func = (params->resource_func >> ULP_FLOW_DB_RES_FUNC_UPPER); + resource_info->nxt_resource_idx |= ((resource_func << + ULP_FLOW_DB_RES_FUNC_BITS) & + ULP_FLOW_DB_RES_FUNC_MASK); + + if (params->resource_func & ULP_FLOW_DB_RES_FUNC_NEED_LOWER) { + /* Break the resource func into two parts */ + resource_func = (params->resource_func & + ULP_FLOW_DB_RES_FUNC_LOWER_MASK); + resource_info->resource_func_lower = resource_func; + } + + /* Store the handle as 64bit only for EM table entries */ + if (params->resource_func != BNXT_ULP_RESOURCE_FUNC_EM_TABLE && + params->resource_func != BNXT_ULP_RESOURCE_FUNC_CMM_TABLE && + params->resource_func != BNXT_ULP_RESOURCE_FUNC_CMM_STAT) { + resource_info->resource_hndl = (u32)params->resource_hndl; + resource_info->key_data = params->key_data; + resource_info->resource_type = params->resource_type; + resource_info->resource_sub_type = params->resource_sub_type; + resource_info->fdb_flags = params->fdb_flags; + } else { + resource_info->resource_em_handle = params->resource_hndl; + resource_info->reserve_flag = params->reserve_flag; + } +} + +/** + * Helper function to copy the resource params to resource info + * No validation being done in this function. + * + * @resource_info: Ptr to resource information + * @params: The output params to the caller + * + * returns none + */ +static void +ulp_flow_db_res_info_to_params(struct ulp_fdb_resource_info *resource_info, + struct ulp_flow_db_res_params *params) +{ + memset(params, 0, sizeof(struct ulp_flow_db_res_params)); + + /* use the helper function to get the resource func */ + params->direction = ulp_flow_db_resource_dir_get(resource_info); + params->resource_func = ulp_flow_db_resource_func_get(resource_info); + + if (params->resource_func == BNXT_ULP_RESOURCE_FUNC_EM_TABLE || + params->resource_func == BNXT_ULP_RESOURCE_FUNC_CMM_TABLE || + params->resource_func == BNXT_ULP_RESOURCE_FUNC_CMM_STAT) { + params->resource_hndl = resource_info->resource_em_handle; + params->reserve_flag = resource_info->reserve_flag; + } else if (params->resource_func & ULP_FLOW_DB_RES_FUNC_NEED_LOWER) { + params->resource_hndl = resource_info->resource_hndl; + params->key_data = resource_info->key_data; + params->resource_type = resource_info->resource_type; + params->resource_sub_type = resource_info->resource_sub_type; + params->fdb_flags = resource_info->fdb_flags; + } +} + +/** + * Helper function to allocate the flow table and initialize + * the stack for allocation operations. + * + * @flow_db: Ptr to flow database structure + * + * Returns 0 on success or negative number on failure. + */ +static int +ulp_flow_db_alloc_resource(struct bnxt_ulp_context *ulp_ctxt, + struct bnxt_ulp_flow_db *flow_db) +{ + struct bnxt_ulp_flow_tbl *flow_tbl; + u32 idx = 0; + u32 size; + + flow_tbl = &flow_db->flow_tbl; + + size = sizeof(struct ulp_fdb_resource_info) * flow_tbl->num_resources; + flow_tbl->flow_resources = vzalloc(size); + + if (!flow_tbl->flow_resources) + return -ENOMEM; + + size = sizeof(u32) * flow_tbl->num_resources; + flow_tbl->flow_tbl_stack = vzalloc(size); + if (!flow_tbl->flow_tbl_stack) + return -ENOMEM; + + size = (flow_tbl->num_flows / sizeof(u64)) + 1; + size = ULP_BYTE_ROUND_OFF_8(size); + flow_tbl->active_reg_flows = vzalloc(size); + if (!flow_tbl->active_reg_flows) + return -ENOMEM; + + flow_tbl->active_dflt_flows = vzalloc(size); + if (!flow_tbl->active_dflt_flows) + return -ENOMEM; + + /* Initialize the stack table. */ + for (idx = 0; idx < flow_tbl->num_resources; idx++) + flow_tbl->flow_tbl_stack[idx] = idx; + + /* Ignore the first element in the list. */ + flow_tbl->head_index = 1; + /* Tail points to the last entry in the list. */ + flow_tbl->tail_index = flow_tbl->num_resources - 1; + return 0; +} + +/** + * Helper function to deallocate the flow table. + * + * @flow_db: Ptr to flow database structure + * + * Returns none. + */ +static void +ulp_flow_db_dealloc_resource(struct bnxt_ulp_flow_db *flow_db) +{ + struct bnxt_ulp_flow_tbl *flow_tbl = &flow_db->flow_tbl; + + /* Free all the allocated tables in the flow table. */ + vfree(flow_tbl->active_reg_flows); + flow_tbl->active_reg_flows = NULL; + + vfree(flow_tbl->active_dflt_flows); + flow_tbl->active_dflt_flows = NULL; + + vfree(flow_tbl->flow_tbl_stack); + flow_tbl->flow_tbl_stack = NULL; + + vfree(flow_tbl->flow_resources); + flow_tbl->flow_resources = NULL; +} + +/** + * Helper function to add function id to the flow table + * + * @flow_db: Ptr to flow table + * @flow_id: The flow id of the flow + * @func_id: The func_id to be set, for reset pass zero + * + * returns none + */ +static void +ulp_flow_db_func_id_set(struct bnxt_ulp_context *ulp_ctxt, + struct bnxt_ulp_flow_db *flow_db, + u32 flow_id, + u32 func_id) +{ + /* set the function id in the function table */ + if (flow_id < flow_db->func_id_tbl_size) + flow_db->func_id_tbl[flow_id] = func_id; + else /* This should never happen */ + netdev_dbg(ulp_ctxt->bp->dev, "Invalid flow id, flowdb corrupt\n"); +} + +/** + * Initialize the parent-child database. Memory is allocated in this + * call and assigned to the database + * + * @flow_db: [in] Ptr to flow table + * @num_entries: [in] - number of entries to allocate + * + * Returns 0 on success or negative number on failure. + */ +static int +ulp_flow_db_parent_tbl_init(struct bnxt_ulp_flow_db *flow_db, + u32 num_entries) +{ + struct ulp_fdb_parent_child_db *p_db; + u32 size, idx; + + if (!num_entries) + return 0; + + /* update the sizes for the allocation */ + p_db = &flow_db->parent_child_db; + p_db->child_bitset_size = (flow_db->flow_tbl.num_flows / + sizeof(u64)) + 1; /* size in bytes */ + p_db->child_bitset_size = ULP_BYTE_ROUND_OFF_8(p_db->child_bitset_size); + p_db->entries_count = num_entries; + + /* allocate the memory */ + p_db->parent_flow_tbl = vzalloc(sizeof(*p_db->parent_flow_tbl) * p_db->entries_count); + if (!p_db->parent_flow_tbl) + return -ENOMEM; + + size = p_db->child_bitset_size * p_db->entries_count; + + /* allocate the big chunk of memory to be statically carved into + * child_fid_bitset pointer. + */ + p_db->parent_flow_tbl_mem = vzalloc(size); + if (!p_db->parent_flow_tbl_mem) + return -ENOMEM; + + /* set the pointers in parent table to their offsets */ + for (idx = 0 ; idx < p_db->entries_count; idx++) { + p_db->parent_flow_tbl[idx].child_fid_bitset = + (u64 *)&p_db->parent_flow_tbl_mem[idx * + p_db->child_bitset_size]; + } + /* success */ + return 0; +} + +/* + * Deinitialize the parent-child database. Memory is deallocated in + * this call and all flows should have been purged before this + * call. + * + * flow_db [in] Ptr to flow table + * + * Returns none + */ +static void +ulp_flow_db_parent_tbl_deinit(struct bnxt_ulp_flow_db *flow_db) +{ + /* free the memory related to parent child database */ + vfree(flow_db->parent_child_db.parent_flow_tbl_mem); + flow_db->parent_child_db.parent_flow_tbl_mem = NULL; + + vfree(flow_db->parent_child_db.parent_flow_tbl); + flow_db->parent_child_db.parent_flow_tbl = NULL; +} + +/** + * Initialize the flow database. Memory is allocated in this + * call and assigned to the flow database. + * + * @ulp_ctxt: Ptr to ulp context + * + * Returns 0 on success or negative number on failure. + */ +int +ulp_flow_db_init(struct bnxt_ulp_context *ulp_ctxt) +{ + struct bnxt_ulp_device_params *dparms; + struct bnxt_ulp_flow_tbl *flow_tbl; + enum bnxt_ulp_flow_mem_type mtype; + struct bnxt_ulp_flow_db *flow_db; + u32 dev_id, num_flows; + + /* Get the dev specific number of flows that needed to be supported. */ + if (bnxt_ulp_cntxt_dev_id_get(ulp_ctxt, &dev_id)) { + netdev_dbg(ulp_ctxt->bp->dev, "Invalid device id\n"); + return -EINVAL; + } + + dparms = bnxt_ulp_device_params_get(dev_id); + if (!dparms) { + netdev_dbg(ulp_ctxt->bp->dev, "could not fetch the device params\n"); + return -ENODEV; + } + + flow_db = vzalloc(sizeof(*flow_db)); + if (!flow_db) + return -ENOMEM; + + /* Attach the flow database to the ulp context. */ + bnxt_ulp_cntxt_ptr2_flow_db_set(ulp_ctxt, flow_db); + + /* Determine the number of flows based on EM type */ + if (bnxt_ulp_cntxt_mem_type_get(ulp_ctxt, &mtype)) + goto error_free; + + if (mtype == BNXT_ULP_FLOW_MEM_TYPE_INT) + num_flows = dparms->int_flow_db_num_entries; + else + num_flows = dparms->ext_flow_db_num_entries; + + /* Populate the regular flow table limits. */ + flow_tbl = &flow_db->flow_tbl; + flow_tbl->num_flows = num_flows + 1; + flow_tbl->num_resources = ((num_flows + 1) * + dparms->num_resources_per_flow); + + /* Include the default flow table limits. */ + flow_tbl->num_flows += (BNXT_FLOW_DB_DEFAULT_NUM_FLOWS + 1); + flow_tbl->num_resources += ((BNXT_FLOW_DB_DEFAULT_NUM_FLOWS + 1) * + BNXT_FLOW_DB_DEFAULT_NUM_RESOURCES); + + /* Allocate the resource for the flow table. */ + if (ulp_flow_db_alloc_resource(ulp_ctxt, flow_db)) + goto error_free; + + /* add 1 since we are not using index 0 for flow id */ + flow_db->func_id_tbl_size = flow_tbl->num_flows + 1; + /* Allocate the function Id table */ + flow_db->func_id_tbl = vzalloc(flow_db->func_id_tbl_size * sizeof(u16)); + if (!flow_db->func_id_tbl) + goto error_free; + /* initialize the parent child database */ + if (ulp_flow_db_parent_tbl_init(flow_db, dparms->fdb_parent_flow_entries)) { + netdev_dbg(ulp_ctxt->bp->dev, "Failed to allocate mem for parent child db\n"); + goto error_free; + } + + /* All good so return. */ + netdev_dbg(ulp_ctxt->bp->dev, "FlowDB initialized with %d flows.\n", + flow_tbl->num_flows); + return 0; +error_free: + ulp_flow_db_deinit(ulp_ctxt); + return -ENOMEM; +} + +/** + * Deinitialize the flow database. Memory is deallocated in + * this call and all flows should have been purged before this + * call. + * + * @ulp_ctxt: Ptr to ulp context + * + * Returns 0 on success. + */ +int +ulp_flow_db_deinit(struct bnxt_ulp_context *ulp_ctxt) +{ + struct bnxt_ulp_flow_db *flow_db; + + flow_db = bnxt_ulp_cntxt_ptr2_flow_db_get(ulp_ctxt); + if (!flow_db) + return -EINVAL; + + /* Debug dump to confirm there are no active flows */ + ulp_flow_db_debug_dump(ulp_ctxt, 0); + + /* Detach the flow database from the ulp context. */ + bnxt_ulp_cntxt_ptr2_flow_db_set(ulp_ctxt, NULL); + + /* Free up all the memory. */ + ulp_flow_db_parent_tbl_deinit(flow_db); + ulp_flow_db_dealloc_resource(flow_db); + vfree(flow_db->func_id_tbl); + vfree(flow_db); + + return 0; +} + +/** + * Allocate the flow database entry + * + * @ulp_ctxt: Ptr to ulp_context + * @flow_type: - specify default or regular + * @func_id:.function id of the ingress port + * @fid: The index to the flow entry + * + * returns 0 on success and negative on failure. + */ +int +ulp_flow_db_fid_alloc(struct bnxt_ulp_context *ulp_ctxt, + enum bnxt_ulp_fdb_type flow_type, + u16 func_id, + u32 *fid) +{ + struct bnxt_ulp_flow_tbl *flow_tbl; + struct bnxt_ulp_flow_db *flow_db; + + *fid = 0; /* Initialize fid to invalid value */ + flow_db = bnxt_ulp_cntxt_ptr2_flow_db_get(ulp_ctxt); + if (!flow_db) { + netdev_dbg(ulp_ctxt->bp->dev, "Invalid Arguments\n"); + return -EINVAL; + } + + if (flow_type >= BNXT_ULP_FDB_TYPE_LAST) { + netdev_dbg(ulp_ctxt->bp->dev, "Invalid flow type\n"); + return -EINVAL; + } + + flow_tbl = &flow_db->flow_tbl; + /* check for max flows */ + if (flow_tbl->num_flows <= flow_tbl->head_index) { + netdev_dbg(ulp_ctxt->bp->dev, "Flow database has reached max flows\n"); + return -ENOSPC; + } + + if (flow_tbl->tail_index <= (flow_tbl->head_index + 1)) { + netdev_dbg(ulp_ctxt->bp->dev, "Flow database has reached max resources\n"); + return -ENOSPC; + } + *fid = flow_tbl->flow_tbl_stack[flow_tbl->head_index]; + flow_tbl->head_index++; + + /* Set the flow type */ + ulp_flow_db_active_flows_bit_set(flow_db, flow_type, *fid, 1); + + /* function id update is only valid for regular flow table */ + if (flow_type == BNXT_ULP_FDB_TYPE_REGULAR) + ulp_flow_db_func_id_set(ulp_ctxt, flow_db, *fid, func_id); + + netdev_dbg(ulp_ctxt->bp->dev, "flow_id = %u:%u allocated\n", flow_type, *fid); + /* return success */ + return 0; +} + +/** + * Allocate the flow database entry. + * The params->critical_resource has to be set to 0 to allocate a new resource. + * + * @ulp_ctxt: Ptr to ulp_context + * @flow_type: Specify it is regular or default flow + * @fid: The index to the flow entry + * @params: The contents to be copied into resource + * + * returns 0 on success and negative on failure. + */ +int +ulp_flow_db_resource_add(struct bnxt_ulp_context *ulp_ctxt, + enum bnxt_ulp_fdb_type flow_type, + u32 fid, + struct ulp_flow_db_res_params *params) +{ + struct ulp_fdb_resource_info *resource, *fid_resource; + struct bnxt_ulp_fc_info *ulp_fc_info; + struct bnxt_ulp_flow_tbl *flow_tbl; + struct bnxt_ulp_flow_db *flow_db; + u32 idx; + + flow_db = bnxt_ulp_cntxt_ptr2_flow_db_get(ulp_ctxt); + if (!flow_db) { + netdev_dbg(ulp_ctxt->bp->dev, "Invalid Arguments\n"); + return -EINVAL; + } + + if (flow_type >= BNXT_ULP_FDB_TYPE_LAST) { + netdev_dbg(ulp_ctxt->bp->dev, "Invalid flow type\n"); + return -EINVAL; + } + + flow_tbl = &flow_db->flow_tbl; + /* check for max flows */ + if (fid >= flow_tbl->num_flows || !fid) { + netdev_dbg(ulp_ctxt->bp->dev, + "Invalid flow index fid %d num_flows %d\n", + fid, flow_tbl->num_flows); + return -EINVAL; + } + + /* check if the flow is active or not */ + if (!ulp_flow_db_active_flows_bit_is_set(flow_db, flow_type, fid)) { + netdev_dbg(ulp_ctxt->bp->dev, "flow does not exist %x:%x\n", flow_type, fid); + return -EINVAL; + } + + /* check for max resource */ + if ((flow_tbl->head_index + 1) >= flow_tbl->tail_index) { + netdev_dbg(ulp_ctxt->bp->dev, "Flow db has reached max resources\n"); + return -ENOSPC; + } + fid_resource = &flow_tbl->flow_resources[fid]; + + if (params->critical_resource && fid_resource->resource_em_handle) { + netdev_dbg(ulp_ctxt->bp->dev, "Ignore multiple critical resources\n"); + /* Ignore the multiple critical resources */ + params->critical_resource = BNXT_ULP_CRITICAL_RESOURCE_NO; + } + + if (!params->critical_resource) { + /* Not the critical_resource so allocate a resource */ + idx = flow_tbl->flow_tbl_stack[flow_tbl->tail_index]; + resource = &flow_tbl->flow_resources[idx]; + flow_tbl->tail_index--; + + /* Update the chain list of resource*/ + ULP_FLOW_DB_RES_NXT_SET(resource->nxt_resource_idx, + fid_resource->nxt_resource_idx); + /* update the contents */ + ulp_flow_db_res_params_to_info(resource, params); + ULP_FLOW_DB_RES_NXT_RESET(fid_resource->nxt_resource_idx); + ULP_FLOW_DB_RES_NXT_SET(fid_resource->nxt_resource_idx, + idx); + } else { + /* critical resource. Just update the fid resource */ + ulp_flow_db_res_params_to_info(fid_resource, params); + } + + ulp_fc_info = bnxt_ulp_cntxt_ptr2_fc_info_get(ulp_ctxt); + if (params->resource_type == TF_TBL_TYPE_ACT_STATS_64 && + params->resource_sub_type == + BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_INT_COUNT && + ulp_fc_info && ulp_fc_info->num_counters) { + /* Store the first HW counter ID for this table */ + if (!ulp_fc_mgr_start_idx_isset(ulp_ctxt, params->direction)) + ulp_fc_mgr_start_idx_set(ulp_ctxt, params->direction, + params->resource_hndl); + + ulp_fc_mgr_cntr_set(ulp_ctxt, params->direction, + params->resource_hndl, + ulp_flow_db_shared_session_get(params)); + + if (!ulp_fc_mgr_thread_isstarted(ulp_ctxt)) + ulp_fc_mgr_thread_start(ulp_ctxt); + } + + /* all good, return success */ + return 0; +} + +/** + * Free the flow database entry. + * The params->critical_resource has to be set to 1 to free the first resource. + * + * @ulp_ctxt: Ptr to ulp_context + * @flow_type: Specify it is regular or default flow + * @fid: The index to the flow entry + * @params: The contents to be copied into params. + * Onlythe critical_resource needs to be set by the caller. + * + * Returns 0 on success and negative on failure. + */ +int +ulp_flow_db_resource_del(struct bnxt_ulp_context *ulp_ctxt, + enum bnxt_ulp_fdb_type flow_type, + u32 fid, + struct ulp_flow_db_res_params *params) +{ + struct ulp_fdb_resource_info *nxt_resource, *fid_resource; + struct bnxt_ulp_flow_tbl *flow_tbl; + struct bnxt_ulp_flow_db *flow_db; + u32 nxt_idx = 0; + + flow_db = bnxt_ulp_cntxt_ptr2_flow_db_get(ulp_ctxt); + if (!flow_db) { + netdev_dbg(ulp_ctxt->bp->dev, "Invalid Arguments\n"); + return -EINVAL; + } + + if (flow_type >= BNXT_ULP_FDB_TYPE_LAST) { + netdev_dbg(ulp_ctxt->bp->dev, "Invalid flow type\n"); + return -EINVAL; + } + + flow_tbl = &flow_db->flow_tbl; + /* check for max flows */ + if (fid >= flow_tbl->num_flows || !fid) { + netdev_dbg(ulp_ctxt->bp->dev, + "Invalid flow index fid %d num_flows %d\n", + fid, flow_tbl->num_flows); + return -EINVAL; + } + + /* check if the flow is active or not */ + if (!ulp_flow_db_active_flows_bit_is_set(flow_db, flow_type, fid)) { + netdev_dbg(ulp_ctxt->bp->dev, "flow does not exist %x:%x\n", flow_type, fid); + return -EINVAL; + } + + fid_resource = &flow_tbl->flow_resources[fid]; + if (!params->critical_resource) { + /* Not the critical resource so free the resource */ + ULP_FLOW_DB_RES_NXT_SET(nxt_idx, + fid_resource->nxt_resource_idx); + if (!nxt_idx) { + /* reached end of resources */ + return -ENOENT; + } + nxt_resource = &flow_tbl->flow_resources[nxt_idx]; + + /* connect the fid resource to the next resource */ + ULP_FLOW_DB_RES_NXT_RESET(fid_resource->nxt_resource_idx); + ULP_FLOW_DB_RES_NXT_SET(fid_resource->nxt_resource_idx, + nxt_resource->nxt_resource_idx); + + /* update the contents to be given to caller */ + ulp_flow_db_res_info_to_params(nxt_resource, params); + + /* Delete the nxt_resource */ + memset(nxt_resource, 0, sizeof(struct ulp_fdb_resource_info)); + + /* add it to the free list */ + flow_tbl->tail_index++; + if (flow_tbl->tail_index >= flow_tbl->num_resources) { + netdev_dbg(ulp_ctxt->bp->dev, "FlowDB:Tail reached max\n"); + return -ENOENT; + } + flow_tbl->flow_tbl_stack[flow_tbl->tail_index] = nxt_idx; + + } else { + /* Critical resource. copy the contents and exit */ + ulp_flow_db_res_info_to_params(fid_resource, params); + ULP_FLOW_DB_RES_NXT_SET(nxt_idx, + fid_resource->nxt_resource_idx); + memset(fid_resource, 0, sizeof(struct ulp_fdb_resource_info)); + ULP_FLOW_DB_RES_NXT_SET(fid_resource->nxt_resource_idx, + nxt_idx); + } + + /* Now that the HW Flow counter resource is deleted, reset it's + * corresponding slot in the SW accumulation table in the Flow Counter + * manager + */ + if (params->resource_type == TF_TBL_TYPE_ACT_STATS_64 && + params->resource_sub_type == + BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_INT_COUNT) { + ulp_fc_mgr_cntr_reset(ulp_ctxt, params->direction, + params->resource_hndl); + } + + /* all good, return success */ + return 0; +} + +/** + * Free the flow database entry + * + * @ulp_ctxt: Ptr to ulp_context + * @flow_type: - specify default or regular + * @fid: The index to the flow entry + * + * returns 0 on success and negative on failure. + */ +int +ulp_flow_db_fid_free(struct bnxt_ulp_context *ulp_ctxt, + enum bnxt_ulp_fdb_type flow_type, + u32 fid) +{ + struct bnxt_ulp_flow_tbl *flow_tbl; + struct bnxt_ulp_flow_db *flow_db; + + flow_db = bnxt_ulp_cntxt_ptr2_flow_db_get(ulp_ctxt); + if (!flow_db) { + netdev_dbg(ulp_ctxt->bp->dev, "Invalid Arguments\n"); + return -EINVAL; + } + + if (flow_type >= BNXT_ULP_FDB_TYPE_LAST) { + netdev_dbg(ulp_ctxt->bp->dev, "Invalid flow type\n"); + return -EINVAL; + } + + flow_tbl = &flow_db->flow_tbl; + + /* check for limits of fid */ + if (fid >= flow_tbl->num_flows || !fid) { + netdev_dbg(ulp_ctxt->bp->dev, + "Invalid flow index fid %d num_flows %d\n", + fid, flow_tbl->num_flows); + return -EINVAL; + } + + /* check if the flow is active or not */ + if (!ulp_flow_db_active_flows_bit_is_set(flow_db, flow_type, fid)) { + netdev_dbg(ulp_ctxt->bp->dev, "flow does not exist %x:%x\n", flow_type, fid); + return -EINVAL; + } + flow_tbl->head_index--; + if (!flow_tbl->head_index) { + netdev_dbg(ulp_ctxt->bp->dev, "FlowDB: Head Ptr is zero\n"); + return -ENOENT; + } + + flow_tbl->flow_tbl_stack[flow_tbl->head_index] = fid; + + /* Clear the flows bitmap */ + ulp_flow_db_active_flows_bit_set(flow_db, flow_type, fid, 0); + + if (flow_type == BNXT_ULP_FDB_TYPE_REGULAR) + ulp_flow_db_func_id_set(ulp_ctxt, flow_db, fid, 0); + + netdev_dbg(ulp_ctxt->bp->dev, "flow_id = %u:%u freed\n", flow_type, fid); + /* all good, return success */ + return 0; +} + +/** + *Get the flow database entry details + * + * @ulp_ctxt: Ptr to ulp_context + * @flow_type: - specify default or regular + * @fid: The index to the flow entry + * @nxt_idx: the index to the next entry + * @params: The contents to be copied into params. + * + * returns 0 on success and negative on failure. + */ +int +ulp_flow_db_resource_get(struct bnxt_ulp_context *ulp_ctxt, + enum bnxt_ulp_fdb_type flow_type, + u32 fid, + u32 *nxt_idx, + struct ulp_flow_db_res_params *params) +{ + struct ulp_fdb_resource_info *nxt_resource, *fid_resource; + struct bnxt_ulp_flow_tbl *flow_tbl; + struct bnxt_ulp_flow_db *flow_db; + + flow_db = bnxt_ulp_cntxt_ptr2_flow_db_get(ulp_ctxt); + if (!flow_db) { + netdev_dbg(ulp_ctxt->bp->dev, "Invalid Arguments\n"); + return -EINVAL; + } + + if (flow_type >= BNXT_ULP_FDB_TYPE_LAST) { + netdev_dbg(ulp_ctxt->bp->dev, "Invalid flow type\n"); + return -EINVAL; + } + + flow_tbl = &flow_db->flow_tbl; + + /* check for limits of fid */ + if (fid >= flow_tbl->num_flows || !fid) { + netdev_dbg(ulp_ctxt->bp->dev, + "Invalid flow index fid %d num_flows %d\n", + fid, flow_tbl->num_flows); + return -EINVAL; + } + + /* check if the flow is active or not */ + if (!ulp_flow_db_active_flows_bit_is_set(flow_db, flow_type, fid)) { + netdev_dbg(ulp_ctxt->bp->dev, "flow does not exist\n"); + return -EINVAL; + } + + if (!*nxt_idx) { + fid_resource = &flow_tbl->flow_resources[fid]; + ulp_flow_db_res_info_to_params(fid_resource, params); + ULP_FLOW_DB_RES_NXT_SET(*nxt_idx, + fid_resource->nxt_resource_idx); + } else { + nxt_resource = &flow_tbl->flow_resources[*nxt_idx]; + ulp_flow_db_res_info_to_params(nxt_resource, params); + *nxt_idx = 0; + ULP_FLOW_DB_RES_NXT_SET(*nxt_idx, + nxt_resource->nxt_resource_idx); + } + + /* all good, return success */ + return 0; +} + +/** + * Get the flow database entry iteratively + * + * @flow_tbl: Ptr to flow table + * @flow_type: - specify default or regular + * @fid: The index to the flow entry + * + * returns 0 on success and negative on failure. + */ +static int +ulp_flow_db_next_entry_get(struct bnxt_ulp_context *ulp_ctxt, + struct bnxt_ulp_flow_db *flow_db, + enum bnxt_ulp_fdb_type flow_type, + u32 *fid) +{ + struct bnxt_ulp_flow_tbl *flowtbl = &flow_db->flow_tbl; + u32 idx, s_idx, mod_fid; + u64 *active_flows; + u32 lfid = *fid; + u64 bs; + + if (flow_type == BNXT_ULP_FDB_TYPE_REGULAR) + active_flows = flowtbl->active_reg_flows; + else if (flow_type == BNXT_ULP_FDB_TYPE_DEFAULT) + active_flows = flowtbl->active_dflt_flows; + else + return -EINVAL; + + do { + /* increment the flow id to find the next valid flow id */ + lfid++; + if (lfid >= flowtbl->num_flows) + return -ENOENT; + idx = lfid / ULP_INDEX_BITMAP_SIZE; + mod_fid = lfid % ULP_INDEX_BITMAP_SIZE; + s_idx = idx; + while (!(bs = active_flows[idx])) { + idx++; + if ((idx * ULP_INDEX_BITMAP_SIZE) >= flowtbl->num_flows) + return -ENOENT; + } + /* remove the previous bits in the bitset bs to find the + * next non zero bit in the bitset. This needs to be done + * only if the idx is same as he one you started. + */ + if (s_idx == idx) + bs &= (-1UL >> mod_fid); + lfid = (idx * ULP_INDEX_BITMAP_SIZE) + __builtin_clzl(bs); + if (*fid >= lfid) + return -ENOENT; + } while (!ulp_flow_db_active_flows_bit_is_set(flow_db, flow_type, + lfid)); + + /* all good, return success */ + *fid = lfid; + return 0; +} + +/** + * Flush all flows in the flow database. + * + * @ulp_ctxt: Ptr to ulp context + * @flow_type: - specify default or regular + * + * returns 0 on success or negative number on failure + */ +int +ulp_flow_db_flush_flows(struct bnxt_ulp_context *ulp_ctx, + enum bnxt_ulp_fdb_type flow_type) +{ + struct bnxt_ulp_flow_db *flow_db; + u32 fid = 0; + + if (!ulp_ctx) + return -EINVAL; + + flow_db = bnxt_ulp_cntxt_ptr2_flow_db_get(ulp_ctx); + if (!flow_db) { + netdev_dbg(ulp_ctx->bp->dev, "Flow database not found\n"); + return -EINVAL; + } + + mutex_lock(&ulp_ctx->cfg_data->flow_db_lock); + while (!ulp_flow_db_next_entry_get(ulp_ctx, flow_db, flow_type, &fid)) + ulp_mapper_resources_free(ulp_ctx, flow_type, fid, NULL); + mutex_unlock(&ulp_ctx->cfg_data->flow_db_lock); + + return 0; +} + +/** + * Flush all flows in the flow database that belong to a device function. + * + * @ulp_ctxt: Ptr to ulp context + * @func_id: - The port function id + * + * returns 0 on success or negative number on failure + */ +int +ulp_flow_db_function_flow_flush(struct bnxt_ulp_context *ulp_ctx, + u16 func_id) +{ + struct bnxt_ulp_flow_db *flow_db; + u32 flow_id = 0; + + if (!ulp_ctx || !func_id) + return -EINVAL; + + flow_db = bnxt_ulp_cntxt_ptr2_flow_db_get(ulp_ctx); + if (!flow_db) { + netdev_dbg(ulp_ctx->bp->dev, "Flow database not found\n"); + return -EINVAL; + } + + mutex_lock(&ulp_ctx->cfg_data->flow_db_lock); + while (!ulp_flow_db_next_entry_get(ulp_ctx, flow_db, BNXT_ULP_FDB_TYPE_REGULAR, &flow_id)) { + if (flow_db->func_id_tbl[flow_id] == func_id) + ulp_mapper_resources_free(ulp_ctx, + BNXT_ULP_FDB_TYPE_REGULAR, + flow_id, NULL); + } + mutex_unlock(&ulp_ctx->cfg_data->flow_db_lock); + + return 0; +} + +/** + * Flush all flows in the flow database that are associated with the session. + * + * @ulp_ctxt: Ptr to ulp context + * + * returns 0 on success or negative number on failure + */ +int +ulp_flow_db_session_flow_flush(struct bnxt_ulp_context *ulp_ctx) +{ + /* TBD: Tf core implementation of FW session flush shall change this + * implementation. + */ + return ulp_flow_db_flush_flows(ulp_ctx, BNXT_ULP_FDB_TYPE_REGULAR); +} + +/** + * Check that flow id matches the function id or not + * + * @ulp_ctxt: Ptr to ulp context + * @flow_db: Ptr to flow table + * @func_id: The func_id to be set, for reset pass zero. + * + * returns 0 on success else errors + */ +int +ulp_flow_db_validate_flow_func(struct bnxt_ulp_context *ulp_ctxt, + u32 flow_id, + u32 func_id) +{ + struct bnxt_ulp_flow_db *flow_db; + + flow_db = bnxt_ulp_cntxt_ptr2_flow_db_get(ulp_ctxt); + if (!flow_db) { + netdev_dbg(ulp_ctxt->bp->dev, "Flow database not found\n"); + return -EINVAL; + } + + /* check if the flow is active or not */ + if (!ulp_flow_db_active_flows_bit_is_set(flow_db, BNXT_ULP_FDB_TYPE_REGULAR, flow_id)) { + netdev_dbg(ulp_ctxt->bp->dev, "Flow does not exist %x:%x\n", + BNXT_ULP_FDB_TYPE_REGULAR, flow_id); + return -ENOENT; + } + + /* check the function id in the function table */ + if (flow_id < flow_db->func_id_tbl_size && func_id && + flow_db->func_id_tbl[flow_id] != func_id) { + netdev_dbg(ulp_ctxt->bp->dev, + "Function id %x does not own flow %x:%x\n", + func_id, BNXT_ULP_FDB_TYPE_REGULAR, flow_id); + return -EINVAL; + } + + return 0; +} + +/** + * Internal api to traverse the resource list within a flow + * and match a resource based on resource func and resource + * sub type. This api should be used only for resources that + * are unique and do not have multiple instances of resource + * func and sub type combination since it will return only + * the first match. + */ +static int +ulp_flow_db_resource_params_get(struct bnxt_ulp_context *ulp_ctxt, + enum bnxt_ulp_fdb_type flow_type, + u32 flow_id, + u32 resource_func, + u32 res_subtype, + struct ulp_flow_db_res_params *params) +{ + struct ulp_fdb_resource_info *fid_res; + struct bnxt_ulp_flow_tbl *flow_tbl; + struct bnxt_ulp_flow_db *flow_db; + u32 res_id; + + flow_db = bnxt_ulp_cntxt_ptr2_flow_db_get(ulp_ctxt); + if (!flow_db) { + netdev_dbg(ulp_ctxt->bp->dev, "Flow database not found\n"); + return -EINVAL; + } + + if (!params) { + netdev_dbg(ulp_ctxt->bp->dev, "invalid argument\n"); + return -EINVAL; + } + + if (flow_type >= BNXT_ULP_FDB_TYPE_LAST) { + netdev_dbg(ulp_ctxt->bp->dev, "Invalid flow type\n"); + return -EINVAL; + } + + flow_tbl = &flow_db->flow_tbl; + + /* check for limits of fid */ + if (flow_id >= flow_tbl->num_flows || !flow_id) { + netdev_dbg(ulp_ctxt->bp->dev, + "Invalid flow index fid %d num_flows %d\n", + flow_id, flow_tbl->num_flows); + return -EINVAL; + } + + /* check if the flow is active or not */ + if (!ulp_flow_db_active_flows_bit_is_set(flow_db, flow_type, flow_id)) { + netdev_dbg(ulp_ctxt->bp->dev, "flow does not exist\n"); + return -EINVAL; + } + /* Iterate the resource to get the resource handle */ + res_id = flow_id; + memset(params, 0, sizeof(struct ulp_flow_db_res_params)); + while (res_id) { + fid_res = &flow_tbl->flow_resources[res_id]; + if (ulp_flow_db_resource_func_get(fid_res) == resource_func) { + if (resource_func & ULP_FLOW_DB_RES_FUNC_NEED_LOWER) { + if (res_subtype == fid_res->resource_sub_type) { + ulp_flow_db_res_info_to_params(fid_res, + params); + return 0; + } + + } else if (resource_func == + BNXT_ULP_RESOURCE_FUNC_EM_TABLE || + resource_func == + BNXT_ULP_RESOURCE_FUNC_CMM_TABLE || + resource_func == + BNXT_ULP_RESOURCE_FUNC_CMM_STAT) { + ulp_flow_db_res_info_to_params(fid_res, + params); + return 0; + } + } + res_id = 0; + ULP_FLOW_DB_RES_NXT_SET(res_id, fid_res->nxt_resource_idx); + } + return -ENOENT; +} + +/** + * Api to get the cfa action pointer from a flow. + * + * @ulp_ctxt: Ptr to ulp context + * @flow_id: flow id + * @cfa_action: The resource handle stored in the flow database + * + * returns 0 on success + */ +int +ulp_default_flow_db_cfa_action_get(struct bnxt_ulp_context *ulp_ctxt, + u32 flow_id, + u32 *cfa_action) +{ + u8 sub_typ = BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_VFR_CFA_ACTION; + struct ulp_flow_db_res_params params; + int rc; + + rc = ulp_flow_db_resource_params_get(ulp_ctxt, + BNXT_ULP_FDB_TYPE_DEFAULT, + flow_id, + BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + sub_typ, ¶ms); + if (rc) { + netdev_dbg(ulp_ctxt->bp->dev, + "CFA Action ptr not found for flow id %u\n", + flow_id); + return -ENOENT; + } + *cfa_action = params.resource_hndl; + return 0; +} + +/* internal validation function for parent flow tbl */ +static struct ulp_fdb_parent_info * +ulp_flow_db_pc_db_entry_get(struct bnxt_ulp_context *ulp_ctxt, + u32 pc_idx) +{ + struct bnxt_ulp_flow_db *flow_db; + + flow_db = bnxt_ulp_cntxt_ptr2_flow_db_get(ulp_ctxt); + if (!flow_db) { + netdev_dbg(ulp_ctxt->bp->dev, "Invalid Arguments\n"); + return NULL; + } + + /* check for max flows */ + if (pc_idx >= BNXT_ULP_MAX_TUN_CACHE_ENTRIES) { + netdev_dbg(ulp_ctxt->bp->dev, "Invalid tunnel index\n"); + return NULL; + } + + /* No support for parent child db then just exit */ + if (!flow_db->parent_child_db.entries_count) { + netdev_dbg(ulp_ctxt->bp->dev, "parent child db not supported\n"); + return NULL; + } + if (!flow_db->parent_child_db.parent_flow_tbl[pc_idx].valid) { + netdev_dbg(ulp_ctxt->bp->dev, "Not a valid tunnel index\n"); + return NULL; + } + + return &flow_db->parent_child_db.parent_flow_tbl[pc_idx]; +} + +/* internal validation function for parent flow tbl */ +static struct bnxt_ulp_flow_db * +ulp_flow_db_parent_arg_validation(struct bnxt_ulp_context *ulp_ctxt, + u32 tun_idx) +{ + struct bnxt_ulp_flow_db *flow_db; + + flow_db = bnxt_ulp_cntxt_ptr2_flow_db_get(ulp_ctxt); + if (!flow_db) { + netdev_dbg(ulp_ctxt->bp->dev, "Invalid Arguments\n"); + return NULL; + } + + /* check for max flows */ + if (tun_idx >= BNXT_ULP_MAX_TUN_CACHE_ENTRIES) { + netdev_dbg(ulp_ctxt->bp->dev, "Invalid tunnel index\n"); + return NULL; + } + + /* No support for parent child db then just exit */ + if (!flow_db->parent_child_db.entries_count) { + netdev_dbg(ulp_ctxt->bp->dev, "parent child db not supported\n"); + return NULL; + } + + return flow_db; +} + +/** + * Allocate the entry in the parent-child database + * + * @ulp_ctxt: [in] Ptr to ulp_context + * @tun_idx: [in] The tunnel index of the flow entry + * + * returns index on success and negative on failure. + */ +static int +ulp_flow_db_pc_db_idx_alloc(struct bnxt_ulp_context *ulp_ctxt, + u32 tun_idx) +{ + struct ulp_fdb_parent_child_db *p_pdb; + struct bnxt_ulp_flow_db *flow_db; + u32 idx, free_idx = 0; + + /* validate the arguments */ + flow_db = ulp_flow_db_parent_arg_validation(ulp_ctxt, tun_idx); + if (!flow_db) { + netdev_dbg(ulp_ctxt->bp->dev, "parent child db validation failed\n"); + return -EINVAL; + } + + p_pdb = &flow_db->parent_child_db; + for (idx = 0; idx < p_pdb->entries_count; idx++) { + if (p_pdb->parent_flow_tbl[idx].valid && + p_pdb->parent_flow_tbl[idx].tun_idx == tun_idx) { + return idx; + } + if (!p_pdb->parent_flow_tbl[idx].valid && !free_idx) + free_idx = idx + 1; + } + /* no free slots */ + if (!free_idx) { + netdev_dbg(ulp_ctxt->bp->dev, "parent child db is full\n"); + return -ENOMEM; + } + + free_idx -= 1; + /* set the Fid in the parent child */ + p_pdb->parent_flow_tbl[free_idx].tun_idx = tun_idx; + p_pdb->parent_flow_tbl[free_idx].valid = 1; + return free_idx; +} + +/** + * Free the entry in the parent-child database + * + * @pc_entry: [in] Ptr to parent child db entry + * + * returns none. + */ +static void +ulp_flow_db_pc_db_entry_free(struct bnxt_ulp_context *ulp_ctxt, + struct ulp_fdb_parent_info *pc_entry) +{ + struct bnxt_ulp_flow_db *flow_db; + u64 *tmp_bitset; + + /* free the child bitset*/ + flow_db = bnxt_ulp_cntxt_ptr2_flow_db_get(ulp_ctxt); + if (flow_db) + memset(pc_entry->child_fid_bitset, 0, + flow_db->parent_child_db.child_bitset_size); + + /* free the contents */ + tmp_bitset = pc_entry->child_fid_bitset; + memset(pc_entry, 0, sizeof(struct ulp_fdb_parent_info)); + pc_entry->child_fid_bitset = tmp_bitset; +} + +/** + * Set or reset the parent flow in the parent-child database + * + * @ulp_ctxt: [in] Ptr to ulp_context + * @pc_idx: [in] The index to parent child db + * @parent_fid: [in] The flow id of the parent flow entry + * @set_flag: [in] Use 1 for setting child, 0 to reset + * + * returns zero on success and negative on failure. + */ +int +ulp_flow_db_pc_db_parent_flow_set(struct bnxt_ulp_context *ulp_ctxt, + u32 pc_idx, + u32 parent_fid, + u32 set_flag) +{ + struct ulp_fdb_parent_info *pc_entry; + struct bnxt_ulp_flow_db *flow_db; + + flow_db = bnxt_ulp_cntxt_ptr2_flow_db_get(ulp_ctxt); + if (!flow_db) { + netdev_dbg(ulp_ctxt->bp->dev, "parent child db validation failed\n"); + return -EINVAL; + } + + /* check for fid validity */ + if (parent_fid >= flow_db->flow_tbl.num_flows || !parent_fid) { + netdev_dbg(ulp_ctxt->bp->dev, "Invalid parent flow index %x\n", parent_fid); + return -EINVAL; + } + + /* validate the arguments and parent child entry */ + pc_entry = ulp_flow_db_pc_db_entry_get(ulp_ctxt, pc_idx); + if (!pc_entry) { + netdev_dbg(ulp_ctxt->bp->dev, "failed to get the parent child entry\n"); + return -EINVAL; + } + + if (set_flag) { + pc_entry->parent_fid = parent_fid; + } else { + if (pc_entry->parent_fid != parent_fid) + netdev_dbg(ulp_ctxt->bp->dev, "Panic: invalid parent id\n"); + pc_entry->parent_fid = 0; + + /* Free the parent child db entry if no user present */ + if (!pc_entry->f2_cnt) + ulp_flow_db_pc_db_entry_free(ulp_ctxt, pc_entry); + } + return 0; +} + +/** + * Set or reset the child flow in the parent-child database + * + * @ulp_ctxt: [in] Ptr to ulp_context + * @pc_idx: [in] The index to parent child db + * @child_fid: [in] The flow id of the child flow entry + * @set_flag: [in] Use 1 for setting child, 0 to reset + * + * returns zero on success and negative on failure. + */ +int +ulp_flow_db_pc_db_child_flow_set(struct bnxt_ulp_context *ulp_ctxt, + u32 pc_idx, + u32 child_fid, + u32 set_flag) +{ + struct ulp_fdb_parent_info *pc_entry; + struct bnxt_ulp_flow_db *flow_db; + struct bnxt *bp = ulp_ctxt->bp; + u32 a_idx; + u64 *t; + + flow_db = bnxt_ulp_cntxt_ptr2_flow_db_get(ulp_ctxt); + if (!flow_db) { + netdev_dbg(bp->dev, "parent child db validation failed\n"); + return -EINVAL; + } + + /* check for fid validity */ + if (child_fid >= flow_db->flow_tbl.num_flows || !child_fid) { + netdev_dbg(bp->dev, "Invalid child flow index %x\n", child_fid); + return -EINVAL; + } + + /* validate the arguments and parent child entry */ + pc_entry = ulp_flow_db_pc_db_entry_get(ulp_ctxt, pc_idx); + if (!pc_entry) { + netdev_dbg(bp->dev, "failed to get the parent child entry\n"); + return -EINVAL; + } + + a_idx = child_fid / ULP_INDEX_BITMAP_SIZE; + t = pc_entry->child_fid_bitset; + if (set_flag) { + ULP_INDEX_BITMAP_SET(t[a_idx], child_fid); + pc_entry->f2_cnt++; + } else { + ULP_INDEX_BITMAP_RESET(t[a_idx], child_fid); + if (pc_entry->f2_cnt) + pc_entry->f2_cnt--; + if (!pc_entry->f2_cnt && !pc_entry->parent_fid) + ulp_flow_db_pc_db_entry_free(ulp_ctxt, pc_entry); + } + return 0; +} + +/** + * Get the next child flow in the parent-child database + * + * @ulp_ctxt: [in] Ptr to ulp_context + * @parent_fid: [in] The flow id of the parent flow entry + * @child_fid: [in/out] The flow id of the child flow entry + * + * returns zero on success and negative on failure. + * Pass child_fid as zero for first entry. + */ +int +ulp_flow_db_parent_child_flow_next_entry_get(struct bnxt_ulp_flow_db *flow_db, + u32 parent_idx, + u32 *child_fid) +{ + struct ulp_fdb_parent_child_db *p_pdb; + u32 next_fid = *child_fid; + u32 idx, s_idx, mod_fid; + u64 *child_bitset; + u64 bs; + + /* check for fid validity */ + p_pdb = &flow_db->parent_child_db; + if (parent_idx >= p_pdb->entries_count || + !p_pdb->parent_flow_tbl[parent_idx].parent_fid) + return -EINVAL; + + child_bitset = p_pdb->parent_flow_tbl[parent_idx].child_fid_bitset; + do { + /* increment the flow id to find the next valid flow id */ + next_fid++; + if (next_fid >= flow_db->flow_tbl.num_flows) + return -ENOENT; + idx = next_fid / ULP_INDEX_BITMAP_SIZE; + mod_fid = next_fid % ULP_INDEX_BITMAP_SIZE; + s_idx = idx; + while (!(bs = child_bitset[idx])) { + idx++; + if ((idx * ULP_INDEX_BITMAP_SIZE) >= + flow_db->flow_tbl.num_flows) + return -ENOENT; + } + /* + * remove the previous bits in the bitset bs to find the + * next non zero bit in the bitset. This needs to be done + * only if the idx is same as he one you started. + */ + if (s_idx == idx) + bs &= (-1UL >> mod_fid); + next_fid = (idx * ULP_INDEX_BITMAP_SIZE) + __builtin_clzl(bs); + if (*child_fid >= next_fid) { + netdev_dbg(NULL, "Parent Child Database is corrupt\n"); + return -ENOENT; + } + idx = next_fid / ULP_INDEX_BITMAP_SIZE; + } while (!ULP_INDEX_BITMAP_GET(child_bitset[idx], next_fid)); + *child_fid = next_fid; + return 0; +} + +/** + * Set the counter accumulation in the parent flow + * + * @ulp_ctxt: [in] Ptr to ulp_context + * @pc_idx: [in] The parent child index of the parent flow entry + * + * returns index on success and negative on failure. + */ +static int +ulp_flow_db_parent_flow_count_accum_set(struct bnxt_ulp_context *ulp_ctxt, + u32 pc_idx) +{ + struct ulp_fdb_parent_child_db *p_pdb; + struct bnxt_ulp_flow_db *flow_db; + + flow_db = bnxt_ulp_cntxt_ptr2_flow_db_get(ulp_ctxt); + if (!flow_db) { + netdev_dbg(ulp_ctxt->bp->dev, "Invalid Arguments\n"); + return -EINVAL; + } + + /* check for parent idx validity */ + p_pdb = &flow_db->parent_child_db; + if (pc_idx >= p_pdb->entries_count || + !p_pdb->parent_flow_tbl[pc_idx].parent_fid) { + netdev_dbg(ulp_ctxt->bp->dev, "Invalid parent child index %x\n", pc_idx); + return -EINVAL; + } + + p_pdb->parent_flow_tbl[pc_idx].counter_acc = 1; + return 0; +} + +/** + * Orphan the child flow entry + * This is called only for child flows that have + * BNXT_ULP_RESOURCE_FUNC_CHILD_FLOW resource + * + * @ulp_ctxt: [in] Ptr to ulp_context + * @flow_type: [in] Specify it is regular or default flow + * @fid: [in] The index to the flow entry + * + * Returns 0 on success and negative on failure. + */ +int +ulp_flow_db_child_flow_reset(struct bnxt_ulp_context *ulp_ctxt, + enum bnxt_ulp_fdb_type flow_type, + u32 fid) +{ + struct ulp_fdb_resource_info *fid_res; + struct bnxt_ulp_flow_tbl *flow_tbl; + struct bnxt_ulp_flow_db *flow_db; + u32 res_id = 0; + + flow_db = bnxt_ulp_cntxt_ptr2_flow_db_get(ulp_ctxt); + if (!flow_db) { + netdev_dbg(ulp_ctxt->bp->dev, "Invalid Arguments\n"); + return -EINVAL; + } + + if (flow_type >= BNXT_ULP_FDB_TYPE_LAST) { + netdev_dbg(ulp_ctxt->bp->dev, "Invalid flow type\n"); + return -EINVAL; + } + + flow_tbl = &flow_db->flow_tbl; + /* check for max flows */ + if (fid >= flow_tbl->num_flows || !fid) { + netdev_dbg(ulp_ctxt->bp->dev, "Invalid flow index %x\n", fid); + return -EINVAL; + } + + /* check if the flow is active or not */ + if (!ulp_flow_db_active_flows_bit_is_set(flow_db, flow_type, fid)) { + netdev_dbg(ulp_ctxt->bp->dev, "flow does not exist\n"); + return -EINVAL; + } + + /* Iterate the resource to get the resource handle */ + res_id = fid; + while (res_id) { + fid_res = &flow_tbl->flow_resources[res_id]; + if (ulp_flow_db_resource_func_get(fid_res) == + BNXT_ULP_RESOURCE_FUNC_CHILD_FLOW) { + /* invalidate the resource details */ + fid_res->resource_hndl = 0; + return 0; + } + res_id = 0; + ULP_FLOW_DB_RES_NXT_SET(res_id, fid_res->nxt_resource_idx); + } + /* failed */ + return -1; +} + +/** + * Create parent flow in the parent flow tbl + * + * @parms: [in] Ptr to mapper params + * + * Returns 0 on success and negative on failure. + */ +int +ulp_flow_db_parent_flow_create(struct bnxt_ulp_mapper_parms *parms) +{ + u32 sub_typ = BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_INT_COUNT; + struct ulp_flow_db_res_params res_params; + struct ulp_flow_db_res_params fid_parms; + + int pc_idx; + + /* create or get the parent child database */ + pc_idx = ulp_flow_db_pc_db_idx_alloc(parms->ulp_ctx, parms->tun_idx); + if (pc_idx < 0) { + netdev_dbg(parms->ulp_ctx->bp->dev, "Error in getting parent child db %x\n", + parms->tun_idx); + return -EINVAL; + } + + /* Update the parent fid */ + if (ulp_flow_db_pc_db_parent_flow_set(parms->ulp_ctx, pc_idx, + parms->flow_id, 1)) { + netdev_dbg(parms->ulp_ctx->bp->dev, "Error in setting parent fid %x\n", + parms->tun_idx); + return -EINVAL; + } + + /* Add the parent details in the resource list of the flow */ + memset(&fid_parms, 0, sizeof(fid_parms)); + fid_parms.resource_func = BNXT_ULP_RESOURCE_FUNC_PARENT_FLOW; + fid_parms.resource_hndl = pc_idx; + fid_parms.critical_resource = BNXT_ULP_CRITICAL_RESOURCE_NO; + if (ulp_flow_db_resource_add(parms->ulp_ctx, BNXT_ULP_FDB_TYPE_REGULAR, + parms->flow_id, &fid_parms)) { + netdev_dbg(parms->ulp_ctx->bp->dev, "Error in adding flow res for flow id %x\n", + parms->flow_id); + return -1; + } + + /* check of the flow has internal counter accumulation enabled */ + if (!ulp_flow_db_resource_params_get(parms->ulp_ctx, + BNXT_ULP_FDB_TYPE_REGULAR, + parms->flow_id, + BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE, + sub_typ, + &res_params)) { + /* Enable the counter accumulation in parent entry */ + if (ulp_flow_db_parent_flow_count_accum_set(parms->ulp_ctx, + pc_idx)) { + netdev_dbg(parms->ulp_ctx->bp->dev, "Error in setting counter acc %x\n", + parms->flow_id); + return -1; + } + } + + return 0; +} + +/** + * Create child flow in the parent flow tbl + * + * @parms: [in] Ptr to mapper params + * + * Returns 0 on success and negative on failure. + */ +int +ulp_flow_db_child_flow_create(struct bnxt_ulp_mapper_parms *parms) +{ + u32 sub_type = BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_INT_COUNT; + struct ulp_flow_db_res_params fid_parms; + enum bnxt_ulp_resource_func res_fun; + struct ulp_flow_db_res_params res_p; + int rc, pc_idx; + + /* create or get the parent child database */ + pc_idx = ulp_flow_db_pc_db_idx_alloc(parms->ulp_ctx, parms->tun_idx); + if (pc_idx < 0) { + netdev_dbg(parms->ulp_ctx->bp->dev, "Error in getting parent child db %x\n", + parms->tun_idx); + return -1; + } + + /* create the parent flow entry in parent flow table */ + rc = ulp_flow_db_pc_db_child_flow_set(parms->ulp_ctx, pc_idx, + parms->flow_id, 1); + if (rc) { + netdev_dbg(parms->ulp_ctx->bp->dev, "Error in setting child fid %x\n", + parms->flow_id); + return rc; + } + + /* Add the parent details in the resource list of the flow */ + memset(&fid_parms, 0, sizeof(fid_parms)); + fid_parms.resource_func = BNXT_ULP_RESOURCE_FUNC_CHILD_FLOW; + fid_parms.resource_hndl = pc_idx; + fid_parms.critical_resource = BNXT_ULP_CRITICAL_RESOURCE_NO; + rc = ulp_flow_db_resource_add(parms->ulp_ctx, + BNXT_ULP_FDB_TYPE_REGULAR, + parms->flow_id, &fid_parms); + if (rc) { + netdev_dbg(parms->ulp_ctx->bp->dev, "Error in adding flow res for flow id %x\n", + parms->flow_id); + return rc; + } + + /* check if internal count action included for this flow.*/ + res_fun = BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE; + rc = ulp_flow_db_resource_params_get(parms->ulp_ctx, + BNXT_ULP_FDB_TYPE_REGULAR, + parms->flow_id, + res_fun, + sub_type, + &res_p); + /* return success */ + return rc; +} + +/** + * Update the parent counters + * + * @ulp_ctxt: [in] Ptr to ulp_context + * @pc_idx: [in] The parent flow entry idx + * @packet_count: [in] - packet count + * @byte_count: [in] - byte count + * + * returns 0 on success + */ +int +ulp_flow_db_parent_flow_count_update(struct bnxt_ulp_context *ulp_ctxt, + u32 pc_idx, + u64 packet_count, + u64 byte_count) +{ + struct ulp_fdb_parent_info *pc_entry; + + /* validate the arguments and get parent child entry */ + pc_entry = ulp_flow_db_pc_db_entry_get(ulp_ctxt, pc_idx); + if (!pc_entry) { + netdev_dbg(ulp_ctxt->bp->dev, "failed to get the parent child entry\n"); + return -EINVAL; + } + + if (pc_entry->counter_acc) { + pc_entry->pkt_count += packet_count; + pc_entry->byte_count += byte_count; + } + return 0; +} + +/** + * Get the parent accumulation counters + * + * @ulp_ctxt: [in] Ptr to ulp_context + * @pc_idx: [in] The parent flow entry idx + * @packet_count: [out] - packet count + * @byte_count: [out] - byte count + * + * returns 0 on success + */ +int +ulp_flow_db_parent_flow_count_get(struct bnxt_ulp_context *ulp_ctxt, + u32 pc_idx, u64 *packet_count, + u64 *byte_count, u8 count_reset) +{ + struct ulp_fdb_parent_info *pc_entry; + + /* validate the arguments and get parent child entry */ + pc_entry = ulp_flow_db_pc_db_entry_get(ulp_ctxt, pc_idx); + if (!pc_entry) { + netdev_dbg(ulp_ctxt->bp->dev, "failed to get the parent child entry\n"); + return -EINVAL; + } + + if (pc_entry->counter_acc) { + *packet_count = pc_entry->pkt_count; + *byte_count = pc_entry->byte_count; + if (count_reset) { + pc_entry->pkt_count = 0; + pc_entry->byte_count = 0; + } + } + return 0; +} + +/** + * reset the parent accumulation counters + * + * @ulp_ctxt: [in] Ptr to ulp_context + * + * returns none + */ +void +ulp_flow_db_parent_flow_count_reset(struct bnxt_ulp_context *ulp_ctxt) +{ + struct bnxt_ulp_flow_db *flow_db; + struct ulp_fdb_parent_child_db *p_pdb; + u32 idx; + + /* validate the arguments */ + flow_db = bnxt_ulp_cntxt_ptr2_flow_db_get(ulp_ctxt); + if (!flow_db) { + netdev_dbg(ulp_ctxt->bp->dev, "parent child db validation failed\n"); + return; + } + + p_pdb = &flow_db->parent_child_db; + for (idx = 0; idx < p_pdb->entries_count; idx++) { + if (p_pdb->parent_flow_tbl[idx].valid && + p_pdb->parent_flow_tbl[idx].counter_acc) { + p_pdb->parent_flow_tbl[idx].pkt_count = 0; + p_pdb->parent_flow_tbl[idx].byte_count = 0; + } + } +} + +/** Set the shared bit for the flow db entry + * @res: Ptr to fdb entry + * @shared: shared flag + * + * returns none + */ +void ulp_flow_db_shared_session_set(struct ulp_flow_db_res_params *res, + enum bnxt_ulp_session_type s_type) +{ + if (res && (s_type & BNXT_ULP_SESSION_TYPE_SHARED)) + res->fdb_flags |= ULP_FDB_FLAG_SHARED_SESSION; + else if (res && (s_type & BNXT_ULP_SESSION_TYPE_SHARED_WC)) + res->fdb_flags |= ULP_FDB_FLAG_SHARED_WC_SESSION; +} + +/** + * get the shared bit for the flow db entry + * + * @res: [in] Ptr to fdb entry + * + * returns session type + */ +enum bnxt_ulp_session_type +ulp_flow_db_shared_session_get(struct ulp_flow_db_res_params *res) +{ + enum bnxt_ulp_session_type stype = BNXT_ULP_SESSION_TYPE_DEFAULT; + + if (res && (res->fdb_flags & ULP_FDB_FLAG_SHARED_SESSION)) + stype = BNXT_ULP_SESSION_TYPE_SHARED; + else if (res && (res->fdb_flags & ULP_FDB_FLAG_SHARED_WC_SESSION)) + stype = BNXT_ULP_SESSION_TYPE_SHARED_WC; + + return stype; +} + +#ifdef TC_BNXT_TRUFLOW_DEBUG + +/** + * Dump the entry details + * + * @ulp_ctxt: Ptr to ulp_context + * + * returns none + */ +static void ulp_flow_db_res_dump(struct bnxt_ulp_context *ulp_ctxt, + struct ulp_fdb_resource_info *r, + u32 *nxt_res) +{ + u8 res_func = ulp_flow_db_resource_func_get(r); + + netdev_dbg(ulp_ctxt->bp->dev, "Resource func = %x, nxt_resource_idx = %x\n", + res_func, (ULP_FLOW_DB_RES_NXT_MASK & r->nxt_resource_idx)); + if (res_func == BNXT_ULP_RESOURCE_FUNC_EM_TABLE || + res_func == BNXT_ULP_RESOURCE_FUNC_CMM_TABLE || + res_func == BNXT_ULP_RESOURCE_FUNC_CMM_STAT) + netdev_dbg(ulp_ctxt->bp->dev, "Handle = %llu\n", r->resource_em_handle); + else + netdev_dbg(ulp_ctxt->bp->dev, "Handle = 0x%08x\n", r->resource_hndl); + + *nxt_res = 0; + ULP_FLOW_DB_RES_NXT_SET(*nxt_res, + r->nxt_resource_idx); +} + +/** + * Dump the flow entry details + * + * @flow_db: Ptr to flow db + * @fid: flow id + * + * returns none + */ +void +ulp_flow_db_debug_fid_dump(struct bnxt_ulp_context *ulp_ctxt, + struct bnxt_ulp_flow_db *flow_db, u32 fid) +{ + struct bnxt_ulp_flow_tbl *flow_tbl; + struct ulp_fdb_resource_info *r; + u32 def_flag = 0, reg_flag = 0; + u32 nxt_res = 0; + + flow_tbl = &flow_db->flow_tbl; + if (ulp_flow_db_active_flows_bit_is_set(flow_db, + BNXT_ULP_FDB_TYPE_REGULAR, fid)) + reg_flag = 1; + if (ulp_flow_db_active_flows_bit_is_set(flow_db, + BNXT_ULP_FDB_TYPE_DEFAULT, fid)) + def_flag = 1; + + if (reg_flag && def_flag) { + netdev_dbg(ulp_ctxt->bp->dev, "RID = %u\n", fid); + } else if (reg_flag) { + netdev_dbg(ulp_ctxt->bp->dev, "Regular fid = %u and func id = %u\n", + fid, flow_db->func_id_tbl[fid]); + } else if (def_flag) { + netdev_dbg(ulp_ctxt->bp->dev, "Default fid = %u\n", fid); + } else { + return; + } + /* iterate the resource */ + nxt_res = fid; + do { + r = &flow_tbl->flow_resources[nxt_res]; + ulp_flow_db_res_dump(ulp_ctxt, r, &nxt_res); + } while (nxt_res); +} + +/** + * Dump the flow database entry details + * + * @ulp_ctxt: Ptr to ulp_context + * @flow_id: if zero then all fids are dumped. + * + * returns none + */ +int ulp_flow_db_debug_dump(struct bnxt_ulp_context *ulp_ctxt, u32 flow_id) +{ + struct bnxt_ulp_flow_tbl *flow_tbl; + struct bnxt_ulp_flow_db *flow_db; + u32 fid; + + if (!ulp_ctxt || !ulp_ctxt->cfg_data) + return -EINVAL; + + flow_db = bnxt_ulp_cntxt_ptr2_flow_db_get(ulp_ctxt); + if (!flow_db) { + netdev_dbg(ulp_ctxt->bp->dev, "Invalid Arguments\n"); + return -EINVAL; + } + + flow_tbl = &flow_db->flow_tbl; + if (flow_id) { + ulp_flow_db_debug_fid_dump(ulp_ctxt, flow_db, flow_id); + return 0; + } + + netdev_dbg(ulp_ctxt->bp->dev, "Dump flows = %u:%u\n", + flow_tbl->num_flows, + flow_tbl->num_resources); + netdev_dbg(ulp_ctxt->bp->dev, "Head_index = %u, Tail_index = %u\n", + flow_tbl->head_index, flow_tbl->tail_index); + for (fid = 1; fid < flow_tbl->num_flows; fid++) + ulp_flow_db_debug_fid_dump(ulp_ctxt, flow_db, fid); + netdev_dbg(ulp_ctxt->bp->dev, "Done.\n"); + return 0; +} + +#else /* TC_BNXT_TRUFLOW_DEBUG */ + +void ulp_flow_db_debug_fid_dump(struct bnxt_ulp_context *ulp_ctxt, + struct bnxt_ulp_flow_db *flow_db, u32 fid) +{ +} + +int ulp_flow_db_debug_dump(struct bnxt_ulp_context *ulp_ctxt, u32 flow_id) +{ + ulp_flow_db_debug_fid_dump(ulp_ctxt, NULL, 0); + return 0; +} + +#endif /* TC_BNXT_TRUFLOW_DEBUG */ +#endif /* CONFIG_BNXT_FLOWER_OFFLOAD */ diff --git a/drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_flow_db.h b/drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_flow_db.h new file mode 100644 index 000000000000..0ba9e3717c53 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_flow_db.h @@ -0,0 +1,454 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019-2023 Broadcom + * All rights reserved. + */ + +#ifndef _ULP_FLOW_DB_H_ +#define _ULP_FLOW_DB_H_ + +#include "bnxt_tf_ulp.h" +#include "ulp_template_db_enum.h" +#include "ulp_mapper.h" + +#define BNXT_FLOW_DB_DEFAULT_NUM_FLOWS 512 +#define BNXT_FLOW_DB_DEFAULT_NUM_RESOURCES 8 + +/* Defines for the fdb flag */ +#define ULP_FDB_FLAG_SHARED_SESSION 0x1 +#define ULP_FDB_FLAG_SHARED_WC_SESSION 0x2 + +/** + * Structure for the flow database resource information + * The below structure is based on the below paritions + * nxt_resource_idx = dir[31],resource_func_upper[30:28],nxt_resource_idx[27:0] + * If resource_func is EM_TBL then use resource_em_handle. + * Else the other part of the union is used and + * resource_func is resource_func_upper[30:28] << 5 | resource_func_lower + */ +struct ulp_fdb_resource_info { + /* Points to next resource in the chained list. */ + u32 nxt_resource_idx; + /* TBD: used for tfc stat resource for now */ + u32 reserve_flag; + union { + u64 resource_em_handle; + struct { + u8 resource_func_lower; + u8 resource_type; + u8 resource_sub_type; + u8 fdb_flags; + u32 resource_hndl; + u8 *key_data; + }; + }; +}; + +/* Structure for the flow database resource information. */ +struct bnxt_ulp_flow_tbl { + /* Flow tbl is the resource object list for each flow id. */ + struct ulp_fdb_resource_info *flow_resources; + + /* Flow table stack to track free list of resources. */ + u32 *flow_tbl_stack; + u32 head_index; + u32 tail_index; + + /* Table to track the active flows. */ + u64 *active_reg_flows; + u64 *active_dflt_flows; + u32 num_flows; + u32 num_resources; +}; + +/* Structure to maintain parent-child flow relationships */ +struct ulp_fdb_parent_info { + u32 valid; + u32 parent_fid; + u32 counter_acc; + u64 pkt_count; + u64 byte_count; + u64 *child_fid_bitset; + u32 f2_cnt; + u8 tun_idx; +}; + +/* Structure to maintain parent-child flow relationships */ +struct ulp_fdb_parent_child_db { + struct ulp_fdb_parent_info *parent_flow_tbl; + u32 child_bitset_size; + u32 entries_count; + u8 *parent_flow_tbl_mem; +}; + +/* Structure for the flow database resource information. */ +struct bnxt_ulp_flow_db { + struct bnxt_ulp_flow_tbl flow_tbl; + u16 *func_id_tbl; + u32 func_id_tbl_size; + struct ulp_fdb_parent_child_db parent_child_db; +}; + +/* flow db resource params to add resources */ +struct ulp_flow_db_res_params { + enum tf_dir direction; + enum bnxt_ulp_resource_func resource_func; + u8 resource_type; + u8 resource_sub_type; + u8 fdb_flags; + u8 critical_resource; + u8 *key_data; + u64 resource_hndl; + u32 reserve_flag; +}; + +/* + * Initialize the flow database. Memory is allocated in this + * call and assigned to the flow database. + * + * @ulp_ctxt: Ptr to ulp context + * + * Returns 0 on success or negative number on failure. + */ +int ulp_flow_db_init(struct bnxt_ulp_context *ulp_ctxt); + +/* + * Deinitialize the flow database. Memory is deallocated in + * this call and all flows should have been purged before this + * call. + * + * @ulp_ctxt: Ptr to ulp context + * + * Returns 0 on success. + */ +int ulp_flow_db_deinit(struct bnxt_ulp_context *ulp_ctxt); + +/* + * Allocate the flow database entry + * + * @ulp_ctxt: Ptr to ulp_context + * @tbl_idx: Specify it is regular or default flow + * @func_id: The function id of the device.Valid only for regular flows. + * @fid: The index to the flow entry + * + * returns 0 on success and negative on failure. + */ +int +ulp_flow_db_fid_alloc(struct bnxt_ulp_context *ulp_ctxt, + enum bnxt_ulp_fdb_type flow_type, + u16 func_id, + u32 *fid); + +/* + * Allocate the flow database entry. + * The params->critical_resource has to be set to 0 to allocate a new resource. + * + * @ulp_ctxt: Ptr to ulp_context + * @tbl_idx: Specify it is regular or default flow + * @fid: The index to the flow entry + * @params: The contents to be copied into resource + * + * returns 0 on success and negative on failure. + */ +int +ulp_flow_db_resource_add(struct bnxt_ulp_context *ulp_ctxt, + enum bnxt_ulp_fdb_type flow_type, + u32 fid, + struct ulp_flow_db_res_params *params); + +/* + * Free the flow database entry. + * The params->critical_resource has to be set to 1 to free the first resource. + * + * @ulp_ctxt: Ptr to ulp_context + * @tbl_idx: Specify it is regular or default flow + * @fid: The index to the flow entry + * @params: The contents to be copied into params. + * Only the critical_resource needs to be set by the caller. + * + * Returns 0 on success and negative on failure. + */ +int +ulp_flow_db_resource_del(struct bnxt_ulp_context *ulp_ctxt, + enum bnxt_ulp_fdb_type flow_type, + u32 fid, + struct ulp_flow_db_res_params *params); + +/* + * Free the flow database entry + * + * @ulp_ctxt: Ptr to ulp_context + * @tbl_idx: Specify it is regular or default flow + * @fid: The index to the flow entry + * + * returns 0 on success and negative on failure. + */ +int +ulp_flow_db_fid_free(struct bnxt_ulp_context *ulp_ctxt, + enum bnxt_ulp_fdb_type tbl_idx, + u32 fid); + +/* + * Get the flow database entry details + * + * @ulp_ctxt: Ptr to ulp_context + * @tbl_idx: Specify it is regular or default flow + * @fid: The index to the flow entry + * @nxt_idx: the index to the next entry + * @params: The contents to be copied into params. + * + * returns 0 on success and negative on failure. + */ +int +ulp_flow_db_resource_get(struct bnxt_ulp_context *ulp_ctxt, + enum bnxt_ulp_fdb_type flow_type, + u32 fid, + u32 *nxt_idx, + struct ulp_flow_db_res_params *params); + +/* + * Flush all flows in the flow database. + * + * @ulp_ctxt: Ptr to ulp context + * @flow_type: - specify default or regular + * + * returns 0 on success or negative number on failure + */ +int +ulp_flow_db_flush_flows(struct bnxt_ulp_context *ulp_ctx, + enum bnxt_ulp_fdb_type flow_type); + +/* + * Flush all flows in the flow database that belong to a device function. + * + * @ulp_ctxt: Ptr to ulp context + * @tbl_idx: The index to table + * + * returns 0 on success or negative number on failure + */ +int +ulp_flow_db_function_flow_flush(struct bnxt_ulp_context *ulp_ctx, + u16 func_id); + +/* + * Flush all flows in the flow database that are associated with the session. + * + * @ulp_ctxt: Ptr to ulp context + * + * returns 0 on success or negative number on failure + */ +int +ulp_flow_db_session_flow_flush(struct bnxt_ulp_context *ulp_ctx); + +/* + * Check that flow id matches the function id or not + * + * @ulp_ctxt: Ptr to ulp context + * @flow_id: flow id of the flow. + * @func_id: The func_id to be set, for reset pass zero. + * + * returns true on success or false on failure + */ +int +ulp_flow_db_validate_flow_func(struct bnxt_ulp_context *ulp_ctx, + u32 flow_id, + u32 func_id); + +/* + * Api to get the cfa action pointer from a flow. + * + * @ulp_ctxt: Ptr to ulp context + * @flow_id: flow id + * @cfa_action: The resource handle stored in the flow database + * + * returns 0 on success + */ +int +ulp_default_flow_db_cfa_action_get(struct bnxt_ulp_context *ulp_ctx, + u32 flow_id, + u32 *cfa_action); + +/* + * Set or reset the parent flow in the parent-child database + * + * @ulp_ctxt: Ptr to ulp_context + * @pc_idx: The index to parent child db + * @parent_fid: The flow id of the parent flow entry + * @set_flag: Use 1 for setting child, 0 to reset + * + * returns zero on success and negative on failure. + */ +int +ulp_flow_db_pc_db_parent_flow_set(struct bnxt_ulp_context *ulp_ctxt, + u32 pc_idx, + u32 parent_fid, + u32 set_flag); + +/* + * Set or reset the child flow in the parent-child database + * + * ulp_ctxt: Ptr to ulp_context + * pc_idx: The index to parent child db + * child_fid: The flow id of the child flow entry + * set_flag: Use 1 for setting child, 0 to reset + * + * returns zero on success and negative on failure. + */ +int +ulp_flow_db_pc_db_child_flow_set(struct bnxt_ulp_context *ulp_ctxt, + u32 pc_idx, + u32 child_fid, + u32 set_flag); + +/* + * Get the parent index from the parent-child database + * + * @ulp_ctxt; Ptr to ulp_context + * @parent_fid; The flow id of the parent flow entry + * @parent_idx: The parent index of parent flow entry + * + * returns zero on success and negative on failure. + */ +int +ulp_flow_db_parent_flow_idx_get(struct bnxt_ulp_context *ulp_ctxt, + u32 parent_fid, + u32 *parent_idx); + +/* + * Get the next child flow in the parent-child database + * + * @ulp_ctxt: Ptr to ulp_context + * @parent_fid: The flow id of the parent flow entry + * @child_fid: The flow id of the child flow entry + * + * returns zero on success and negative on failure. + * Pass child_fid as zero for first entry. + */ +int +ulp_flow_db_parent_child_flow_next_entry_get(struct bnxt_ulp_flow_db *flow_db, + u32 parent_idx, + u32 *child_fid); + +/* + * Orphan the child flow entry + * This is called only for child flows that have + * BNXT_ULP_RESOURCE_FUNC_CHILD_FLOW resource + * + * @ulp_ctxt: Ptr to ulp_context + * @flow_type: Specify it is regular or default flow + * @fid: The index to the flow entry + * + * Returns 0 on success and negative on failure. + */ +int +ulp_flow_db_child_flow_reset(struct bnxt_ulp_context *ulp_ctxt, + enum bnxt_ulp_fdb_type flow_type, + u32 fid); + +/* + * Create parent flow in the parent flow tbl + * + * @parms: Ptr to mapper params + * + * Returns 0 on success and negative on failure. + */ +int +ulp_flow_db_parent_flow_create(struct bnxt_ulp_mapper_parms *parms); + +/* + * Create child flow in the parent flow tbl + * + * @parms: Ptr to mapper params + * + * Returns 0 on success and negative on failure. + */ +int +ulp_flow_db_child_flow_create(struct bnxt_ulp_mapper_parms *parms); + +/* + * Update the parent counters + * + * @ulp_ctxt: Ptr to ulp_context + * @pc_idx: The parent flow entry idx + * @packet_count: - packet count + * @byte_count: - byte count + * + * returns 0 on success + */ +int +ulp_flow_db_parent_flow_count_update(struct bnxt_ulp_context *ulp_ctxt, + u32 pc_idx, + u64 packet_count, + u64 byte_count); +/* + * Get the parent accumulation counters + * + * @ulp_ctxt: Ptr to ulp_context + * @pc_idx: The parent flow entry idx + * @packet_count: - packet count + * @byte_count: - byte count + * + * returns 0 on success + */ + +int +ulp_flow_db_parent_flow_count_get(struct bnxt_ulp_context *ulp_ctxt, + u32 pc_idx, + u64 *packet_count, + u64 *byte_count, + u8 count_reset); + +/* + * reset the parent accumulation counters + * + * @ulp_ctxt: Ptr to ulp_context + * + * returns none + */ +void +ulp_flow_db_parent_flow_count_reset(struct bnxt_ulp_context *ulp_ctxt); + +/* + * Set the shared bit for the flow db entry + * + * @res: Ptr to fdb entry + * @s_type: session flag + * + * returns none + */ +void ulp_flow_db_shared_session_set(struct ulp_flow_db_res_params *res, + enum bnxt_ulp_session_type s_type); + +/* + * get the shared bit for the flow db entry + * + * @res: Ptr to fdb entry + * + * returns session type + */ +enum bnxt_ulp_session_type +ulp_flow_db_shared_session_get(struct ulp_flow_db_res_params *res); + +/* + * Dump the flow entry details + * + * @flow_db: Ptr to flow db + * @fid: flow id + * + * returns none + */ +void +ulp_flow_db_debug_fid_dump(struct bnxt_ulp_context *ulp_ctx, + struct bnxt_ulp_flow_db *flow_db, u32 fid); + +/* + * Dump the flow database entry details + * + * @ulp_ctxt: Ptr to ulp_context + * @flow_id: if zero then all fids are dumped. + * + * returns none + */ +int ulp_flow_db_debug_dump(struct bnxt_ulp_context *ulp_ctxt, + u32 flow_id); + +#endif /* _ULP_FLOW_DB_H_ */ diff --git a/drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_gen_tbl.c b/drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_gen_tbl.c new file mode 100644 index 000000000000..a2626b62f75f --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_gen_tbl.c @@ -0,0 +1,586 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* Copyright(c) 2019-2023 Broadcom + * All rights reserved. + */ +#include "ulp_linux.h" +#include "bnxt_compat.h" +#include "bnxt_hsi.h" +#include "tf_core.h" +#include "ulp_mapper.h" +#include "ulp_flow_db.h" +#include "ulp_template_debug_proto.h" +#include "ulp_tf_debug.h" + +#if defined(CONFIG_BNXT_FLOWER_OFFLOAD) || defined(CONFIG_BNXT_CUSTOM_FLOWER_OFFLOAD) +/* Retrieve the generic table initialization parameters for the tbl_idx */ +static const struct bnxt_ulp_generic_tbl_params* +ulp_mapper_gen_tbl_params_get(struct bnxt_ulp_context *ulp_ctx, + u32 tbl_idx) +{ + const struct bnxt_ulp_generic_tbl_params *gen_tbl; + struct bnxt_ulp_device_params *dparms; + u32 dev_id; + + if (tbl_idx >= BNXT_ULP_GEN_TBL_MAX_SZ) + return NULL; + + if (bnxt_ulp_cntxt_dev_id_get(ulp_ctx, &dev_id)) + return NULL; + + dparms = bnxt_ulp_device_params_get(dev_id); + if (!dparms) { + netdev_dbg(ulp_ctx->bp->dev, "Failed to get device parms\n"); + return NULL; + } + + gen_tbl = &dparms->gen_tbl_params[tbl_idx]; + return gen_tbl; +} + +/** + * Initialize the generic table list + * + * @mapper_data: Pointer to the mapper data and the generic table is + * part of it + * + * returns 0 on success + */ +int +ulp_mapper_generic_tbl_list_init(struct bnxt_ulp_context *ulp_ctx, + struct bnxt_ulp_mapper_data *mapper_data) +{ + struct rhashtable_params bnxt_tf_tc_ht_params = { 0 }; + const struct bnxt_ulp_generic_tbl_params *tbl; + struct ulp_mapper_gen_tbl_list *entry; + u32 idx, size, key_sz; + int rc = 0; + + /* Allocate the generic tables. */ + for (idx = 0; idx < BNXT_ULP_GEN_TBL_MAX_SZ; idx++) { + tbl = ulp_mapper_gen_tbl_params_get(ulp_ctx, idx); + if (!tbl) { + netdev_dbg(ulp_ctx->bp->dev, "Failed to get gen table parms %d\n", + idx); + return -EINVAL; + } + entry = &mapper_data->gen_tbl_list[idx]; + + /* For simple list allocate memory for key storage*/ + if (tbl->gen_tbl_type == BNXT_ULP_GEN_TBL_TYPE_SIMPLE_LIST && + tbl->key_num_bytes) { + key_sz = tbl->key_num_bytes + + tbl->partial_key_num_bytes; + entry->container.byte_key_ex_size = tbl->key_num_bytes; + entry->container.byte_key_par_size = + tbl->partial_key_num_bytes; + } else { + key_sz = 0; + } + + /* Allocate memory for result data and key data */ + if (tbl->result_num_entries != 0) { + /* assign the name */ + entry->gen_tbl_name = tbl->name; + entry->tbl_type = tbl->gen_tbl_type; + /* add 4 bytes for reference count */ + entry->mem_data_size = (tbl->result_num_entries + 1) * + (tbl->result_num_bytes + sizeof(u32) + key_sz); + + /* allocate the big chunk of memory */ + entry->mem_data = vzalloc(entry->mem_data_size); + if (!entry->mem_data) + return -ENOMEM; + + /* Populate the generic table container */ + entry->container.num_elem = tbl->result_num_entries; + entry->container.byte_data_size = tbl->result_num_bytes; + entry->container.ref_count = + (u32 *)entry->mem_data; + size = sizeof(u32) * (tbl->result_num_entries + 1); + entry->container.byte_data = &entry->mem_data[size]; + entry->container.byte_order = tbl->result_byte_order; + } else { + netdev_dbg(ulp_ctx->bp->dev, "%s: Unused Gen tbl entry is %d\n", + tbl->name, idx); + continue; + } + + /* assign the memory for key data */ + if (tbl->gen_tbl_type == BNXT_ULP_GEN_TBL_TYPE_SIMPLE_LIST && + key_sz) { + size += tbl->result_num_bytes * + (tbl->result_num_entries + 1); + entry->container.byte_key = + &entry->mem_data[size]; + } + + /* Initialize Hash list for hash based generic table */ + if (tbl->gen_tbl_type == BNXT_ULP_GEN_TBL_TYPE_HASH_LIST && + tbl->hash_tbl_entries) { + bnxt_tf_tc_ht_params.head_offset = + offsetof(struct ulp_gen_hash_entry_params, node); + bnxt_tf_tc_ht_params.key_offset = + offsetof(struct ulp_gen_hash_entry_params, key_data); + bnxt_tf_tc_ht_params.key_len = tbl->key_num_bytes; + bnxt_tf_tc_ht_params.automatic_shrinking = true; + bnxt_tf_tc_ht_params.nelem_hint = /* Set to about 75% */ + (tbl->result_num_entries * 75) / 100; + bnxt_tf_tc_ht_params.max_size = tbl->result_num_entries; + entry->hash_tbl_params = bnxt_tf_tc_ht_params; + entry->hash_tbl = vzalloc(sizeof(*entry->hash_tbl)); + rc = rhashtable_init(entry->hash_tbl, &entry->hash_tbl_params); + if (rc) { + netdev_dbg(ulp_ctx->bp->dev, "HASH_TABLE initialization failed\n"); + return rc; + } + } + } + /* success */ + return 0; +} + +/** + * Free the generic table list + * + * @mapper_data: Pointer to the mapper data and the generic table is + * part of it + * + * returns 0 on success + */ +int +ulp_mapper_generic_tbl_list_deinit(struct bnxt_ulp_mapper_data *mapper_data) +{ + struct ulp_mapper_gen_tbl_list *tbl_list; + u32 idx; + + /* iterate the generic table. */ + for (idx = 0; idx < BNXT_ULP_GEN_TBL_MAX_SZ; idx++) { + tbl_list = &mapper_data->gen_tbl_list[idx]; + vfree(tbl_list->mem_data); + tbl_list->mem_data = NULL; + tbl_list->container.byte_data = NULL; + tbl_list->container.byte_key = NULL; + tbl_list->container.ref_count = NULL; + if (tbl_list->hash_tbl) { + rhashtable_destroy(tbl_list->hash_tbl); + vfree(tbl_list->hash_tbl); + } + } + /* success */ + return 0; +} + +/** + * Get the generic table list entry + * + * @tbl_list: - Ptr to generic table + * @key: - Key index to the table + * @entry: - output will include the entry if found + * + * returns 0 on success. + */ +int +ulp_mapper_gen_tbl_entry_get(struct bnxt_ulp_context *ulp_ctx, + struct ulp_mapper_gen_tbl_list *tbl_list, + u32 key, + struct ulp_mapper_gen_tbl_entry *entry) +{ + /* populate the output and return the values */ + if (key > tbl_list->container.num_elem) { + netdev_dbg(ulp_ctx->bp->dev, "%s: invalid key %x:%x\n", + tbl_list->gen_tbl_name, key, + tbl_list->container.num_elem); + return -EINVAL; + } + entry->ref_count = &tbl_list->container.ref_count[key]; + entry->byte_data_size = tbl_list->container.byte_data_size; + entry->byte_data = &tbl_list->container.byte_data[key * + entry->byte_data_size]; + entry->byte_order = tbl_list->container.byte_order; + if (tbl_list->tbl_type == BNXT_ULP_GEN_TBL_TYPE_SIMPLE_LIST) { + entry->byte_key_size = tbl_list->container.byte_key_ex_size + + tbl_list->container.byte_key_par_size; + entry->byte_key = &tbl_list->container.byte_key[key * + entry->byte_key_size]; + } else { + entry->byte_key = NULL; + entry->byte_key_size = 0; + } + return 0; +} + +/** + * utility function to calculate the table idx + * + * @res_sub_type: - Resource sub type + * @dir: - Direction + * + * returns None + */ +int +ulp_mapper_gen_tbl_idx_calculate(struct bnxt_ulp_context *ulp_ctx, + u32 res_sub_type, u32 dir) +{ + int tbl_idx; + + /* Validate for direction */ + if (dir >= TF_DIR_MAX) { + netdev_dbg(ulp_ctx->bp->dev, "invalid argument %x\n", dir); + return -EINVAL; + } + tbl_idx = (res_sub_type << 1) | (dir & 0x1); + if (tbl_idx >= BNXT_ULP_GEN_TBL_MAX_SZ) { + netdev_dbg(ulp_ctx->bp->dev, "invalid table index %x\n", tbl_idx); + return -EINVAL; + } + return tbl_idx; +} + +/** + * Set the data in the generic table entry, Data is in Big endian format + * + * @entry: - generic table entry + * @key: - pointer to the key to be used for setting the value. + * @key_size: - The length of the key in bytess to be set + * @data: - pointer to the data to be used for setting the value. + * @data_size: - length of the data pointer in bytes. + * + * returns 0 on success + */ +int +ulp_mapper_gen_tbl_entry_data_set(struct bnxt_ulp_context *ulp_ctx, + struct ulp_mapper_gen_tbl_list *tbl_list, + struct ulp_mapper_gen_tbl_entry *entry, + u8 *key, u32 key_size, + u8 *data, u32 data_size) +{ + /* validate the null arguments */ + if (!entry || !key || !data) { + netdev_dbg(ulp_ctx->bp->dev, "invalid argument\n"); + return -EINVAL; + } + + /* check the size of the buffer for validation */ + if (data_size > entry->byte_data_size) { + netdev_dbg(ulp_ctx->bp->dev, "invalid offset or length %x:%x\n", + data_size, entry->byte_data_size); + return -EINVAL; + } + memcpy(entry->byte_data, data, data_size); + if (tbl_list->tbl_type == BNXT_ULP_GEN_TBL_TYPE_SIMPLE_LIST) { + if (key_size > entry->byte_key_size) { + netdev_dbg(ulp_ctx->bp->dev, "invalid offset or length %x:%x\n", + key_size, entry->byte_key_size); + return -EINVAL; + } + memcpy(entry->byte_key, key, key_size); + } + tbl_list->container.seq_cnt++; + return 0; +} + +/** + * Get the data in the generic table entry, Data is in Big endian format + * + * @entry: - generic table entry + * @offset: - The offset in bits where the data has to get + * @len: - The length of the data in bits to be get + * @data: - pointer to the data to be used for setting the value. + * @data_size: - The size of data in bytes + * + * returns 0 on success + */ +int +ulp_mapper_gen_tbl_entry_data_get(struct bnxt_ulp_context *ulp_ctx, + struct ulp_mapper_gen_tbl_entry *entry, + u32 offset, u32 len, u8 *data, + u32 data_size) +{ + /* validate the null arguments */ + if (!entry || !data) { + netdev_dbg(ulp_ctx->bp->dev, "invalid argument\n"); + return -EINVAL; + } + + /* check the size of the buffer for validation */ + if ((offset + len) > ULP_BYTE_2_BITS(entry->byte_data_size) || + len > ULP_BYTE_2_BITS(data_size)) { + netdev_dbg(ulp_ctx->bp->dev, "invalid offset or length %x:%x:%x\n", + offset, len, entry->byte_data_size); + return -EINVAL; + } + if (entry->byte_order == BNXT_ULP_BYTE_ORDER_LE) + ulp_bs_pull_lsb(entry->byte_data, data, data_size, offset, len); + else + ulp_bs_pull_msb(entry->byte_data, data, offset, len); + + return 0; +} + +/** + * Free the generic table list resource + * + * @ulp_ctx: - Pointer to the ulp context + * @res: - Pointer to flow db resource entry + * + * returns 0 on success + */ +int +ulp_mapper_gen_tbl_res_free(struct bnxt_ulp_context *ulp_ctx, + u32 fid, + struct ulp_flow_db_res_params *res) +{ + struct ulp_mapper_gen_tbl_entry entry, *actual_entry; + struct ulp_gen_hash_entry_params *hash_entry = NULL; + struct ulp_mapper_gen_tbl_list *gen_tbl_list; + struct bnxt_ulp_mapper_data *mapper_data; + int tbl_idx; + u32 rid = 0; + u32 key_idx; + int rc; + + /* Extract the resource sub type and direction */ + tbl_idx = ulp_mapper_gen_tbl_idx_calculate(ulp_ctx, res->resource_sub_type, + res->direction); + if (tbl_idx < 0) { + netdev_dbg(ulp_ctx->bp->dev, "invalid argument %x:%x\n", + res->resource_sub_type, res->direction); + return -EINVAL; + } + + mapper_data = bnxt_ulp_cntxt_ptr2_mapper_data_get(ulp_ctx); + if (!mapper_data) { + netdev_dbg(ulp_ctx->bp->dev, "invalid ulp context %x\n", tbl_idx); + return -EINVAL; + } + /* get the generic table */ + gen_tbl_list = &mapper_data->gen_tbl_list[tbl_idx]; + + /* Get the generic table entry */ + if (gen_tbl_list->hash_tbl) { + /* use the hash index to get the value */ + hash_entry = rhashtable_lookup_fast(gen_tbl_list->hash_tbl, + res->key_data, + gen_tbl_list->hash_tbl_params); + if (!hash_entry) { + netdev_dbg(ulp_ctx->bp->dev, "invalid hash entry %p\n", hash_entry); + return -EINVAL; + } + + if (!hash_entry->entry.hash_ref_count) { + netdev_dbg(ulp_ctx->bp->dev, "generic table corrupt %x: %llu\n", + tbl_idx, res->resource_hndl); + return -EINVAL; + } + hash_entry->entry.hash_ref_count--; + if (hash_entry->entry.hash_ref_count) + return 0; + + actual_entry = &hash_entry->entry; + } else { + key_idx = (u32)res->resource_hndl; + if (ulp_mapper_gen_tbl_entry_get(ulp_ctx, gen_tbl_list, + key_idx, &entry)) { + netdev_dbg(ulp_ctx->bp->dev, + "Gen tbl entry get failed %x: %llu\n", + tbl_idx, res->resource_hndl); + return -EINVAL; + } + /* Decrement the reference count */ + if (!ULP_GEN_TBL_REF_CNT(&entry)) { + netdev_dbg(ulp_ctx->bp->dev, + "generic table entry already free %x: %llu\n", + tbl_idx, res->resource_hndl); + return 0; + } + ULP_GEN_TBL_REF_CNT_DEC(&entry); + + /* retain the details since there are other users */ + if (ULP_GEN_TBL_REF_CNT(&entry)) + return 0; + + actual_entry = &entry; + } + + /* Delete the generic table entry. First extract the fid */ + if (ulp_mapper_gen_tbl_entry_data_get(ulp_ctx, actual_entry, ULP_GEN_TBL_FID_OFFSET, + ULP_GEN_TBL_FID_SIZE_BITS, + (u8 *)&rid, + sizeof(rid))) { + netdev_dbg(ulp_ctx->bp->dev, "Unable to get rid %x: %llu\n", + tbl_idx, res->resource_hndl); + return -EINVAL; + } + + rid = be32_to_cpu(rid); + /* no need to del if rid is 0 since there is no associated resource + * if rid from the entry is equal to the incoming fid, then we have a + * recursive delete, so don't follow the rid. + */ + if (rid && rid != fid) { + /* Destroy the flow associated with the shared flow id */ + if (ulp_mapper_flow_destroy(ulp_ctx, BNXT_ULP_FDB_TYPE_RID, + rid, NULL)) + netdev_dbg(ulp_ctx->bp->dev, + "Error in deleting shared flow id %x\n", + fid); + } + + /* Delete the entry from the hash table */ + if (gen_tbl_list->hash_tbl) { + rc = rhashtable_remove_fast(gen_tbl_list->hash_tbl, &hash_entry->node, + gen_tbl_list->hash_tbl_params); + if (rc) { + netdev_dbg(ulp_ctx->bp->dev, + "Unable to delete hash entry %p\n", hash_entry); + return rc; + } + + vfree(hash_entry->entry.byte_data); + hash_entry->entry.byte_data = NULL; + kfree_rcu(hash_entry, rcu); + return 0; + } + + /* decrement the count */ + if (gen_tbl_list->tbl_type == BNXT_ULP_GEN_TBL_TYPE_SIMPLE_LIST && + gen_tbl_list->container.seq_cnt > 0) + gen_tbl_list->container.seq_cnt--; + + /* clear the byte data of the generic table entry */ + memset(actual_entry->byte_data, 0, actual_entry->byte_data_size); + + return 0; +} + +/** + * Perform add entry in the simple list + * + * @tbl_list: - pointer to the generic table list + * @key: - Key added as index + * @data: - data added as result + * @key_index: - index to the entry + * @gen_tbl_ent: - write the output to the entry + * + * returns 0 on success. + */ +int32_t +ulp_gen_tbl_simple_list_add_entry(struct ulp_mapper_gen_tbl_list *tbl_list, + u8 *key, + u8 *data, + u32 *key_index, + struct ulp_mapper_gen_tbl_entry *ent) +{ + struct ulp_mapper_gen_tbl_cont *cont; + u32 key_size, idx; + u8 *entry_key; + + /* sequentially search for the matching key */ + cont = &tbl_list->container; + for (idx = 0; idx < cont->num_elem; idx++) { + ent->ref_count = &cont->ref_count[idx]; + if (ULP_GEN_TBL_REF_CNT(ent) == 0) { + /* add the entry */ + key_size = cont->byte_key_ex_size + + cont->byte_key_par_size; + entry_key = &cont->byte_key[idx * key_size]; + ent->byte_data_size = cont->byte_data_size; + ent->byte_data = &cont->byte_data[idx * + cont->byte_data_size]; + memcpy(entry_key, key, key_size); + memcpy(ent->byte_data, data, ent->byte_data_size); + ent->byte_order = cont->byte_order; + *key_index = idx; + cont->seq_cnt++; + return 0; + } + } + /* No more memory */ + return -ENOMEM; +} + +/* perform the subset and superset. len should be 64bit multiple*/ +static enum ulp_gen_list_search_flag +ulp_gen_tbl_overlap_check(u8 *key1, u8 *key2, u32 len) +{ + u32 sz = 0, superset = 0, subset = 0; + u64 src, dst; + + while (sz < len) { + memcpy(&dst, key2, sizeof(dst)); + memcpy(&src, key1, sizeof(src)); + sz += sizeof(src); + if (dst == src) + continue; + else if (dst == (dst | src)) + superset = 1; + else if (src == (dst | src)) + subset = 1; + else + return ULP_GEN_LIST_SEARCH_MISSED; + } + if (superset && !subset) + return ULP_GEN_LIST_SEARCH_FOUND_SUPERSET; + if (!superset && subset) + return ULP_GEN_LIST_SEARCH_FOUND_SUBSET; + return ULP_GEN_LIST_SEARCH_FOUND; +} + +int32_t +ulp_gen_tbl_simple_list_search(struct ulp_mapper_gen_tbl_list *tbl_list, + u8 *match_key, + u32 *key_idx) +{ + struct ulp_mapper_gen_tbl_cont *cont = &tbl_list->container; + enum ulp_gen_list_search_flag rc = ULP_GEN_LIST_SEARCH_FULL; + u32 idx = 0, key_idx_set = 0, sz = 0, key_size = 0; + u8 *k1 = NULL, *k2, *entry_key; + u32 valid_ent = 0; + u32 *ref_count; + + key_size = cont->byte_key_ex_size + cont->byte_key_par_size; + if (cont->byte_key_par_size) + k1 = match_key + cont->byte_key_ex_size; + + /* sequentially search for the matching key */ + while (idx < cont->num_elem) { + ref_count = &cont->ref_count[idx]; + entry_key = &cont->byte_key[idx * key_size]; + /* check ref count not zero and exact key matches */ + if (*ref_count) { + /* compare the exact match */ + if (!memcmp(match_key, entry_key, + cont->byte_key_ex_size)) { + /* Match the partial key*/ + if (cont->byte_key_par_size) { + k2 = entry_key + cont->byte_key_ex_size; + sz = cont->byte_key_par_size; + rc = ulp_gen_tbl_overlap_check(k1, k2, + sz); + if (rc != ULP_GEN_LIST_SEARCH_MISSED) { + *key_idx = idx; + return rc; + } + } else { + /* found the entry return */ + rc = ULP_GEN_LIST_SEARCH_FOUND; + *key_idx = idx; + return rc; + } + } + ++valid_ent; + } else { + /* empty slot */ + if (!key_idx_set) { + *key_idx = idx; + key_idx_set = 1; + rc = ULP_GEN_LIST_SEARCH_MISSED; + } + if (valid_ent >= cont->seq_cnt) + return rc; + } + idx++; + } + return rc; +} +#endif /* CONFIG_BNXT_FLOWER_OFFLOAD */ diff --git a/drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_gen_tbl.h b/drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_gen_tbl.h new file mode 100644 index 000000000000..adedbb92217d --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_gen_tbl.h @@ -0,0 +1,222 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019-2023 Broadcom + * All rights reserved. + */ + +#ifndef _ULP_GEN_TBL_H_ +#define _ULP_GEN_TBL_H_ + +#include + +/* Macros for reference count manipulation */ +#define ULP_GEN_TBL_REF_CNT_INC(entry) {*(entry)->ref_count += 1; } +#define ULP_GEN_TBL_REF_CNT_DEC(entry) {*(entry)->ref_count -= 1; } +#define ULP_GEN_TBL_REF_CNT(entry) (*(entry)->ref_count) + +#define ULP_GEN_TBL_FID_OFFSET 0 +#define ULP_GEN_TBL_FID_SIZE_BITS 32 + +enum ulp_gen_list_search_flag { + ULP_GEN_LIST_SEARCH_MISSED = 1, + ULP_GEN_LIST_SEARCH_FOUND = 2, + ULP_GEN_LIST_SEARCH_FOUND_SUBSET = 3, + ULP_GEN_LIST_SEARCH_FOUND_SUPERSET = 4, + ULP_GEN_LIST_SEARCH_FULL = 5 +}; + +enum ulp_gen_hash_search_flag { + ULP_GEN_HASH_SEARCH_MISSED = 1, + ULP_GEN_HASH_SEARCH_FOUND = 2, + ULP_GEN_HASH_SEARCH_FULL = 3 +}; + +/* Structure to pass the generic table values across APIs */ +struct ulp_mapper_gen_tbl_entry { + u32 *ref_count; + u32 byte_data_size; + u8 *byte_data; + enum bnxt_ulp_byte_order byte_order; + u32 hash_ref_count; + u32 byte_key_size; + u8 *byte_key; +}; + +/* structure to pass hash entry */ +struct ulp_gen_hash_entry_params { +#define ULP_MAX_HASH_KEY_LENGTH 57 + struct rhash_head node; + struct ulp_mapper_gen_tbl_entry entry; + struct rcu_head rcu; + u32 key_length; + enum ulp_gen_hash_search_flag search_flag; + u32 hash_index; + u32 key_idx; + u8 key_data[0]; /* must be the last one */ +}; + +/* Structure to store the generic tbl container + * The ref count and byte data contain list of "num_elem" elements. + * The size of each entry in byte_data is of size byte_data_size. + */ +struct ulp_mapper_gen_tbl_cont { + u32 num_elem; + u32 byte_data_size; + enum bnxt_ulp_byte_order byte_order; + /* Reference count to track number of users*/ + u32 *ref_count; + /* First 4 bytes is either tcam_idx or fid and rest are identities */ + u8 *byte_data; + u8 *byte_key; + u32 byte_key_ex_size; /* exact match size */ + u32 byte_key_par_size; /* partial match */ + u32 seq_cnt; +}; + +/* Structure to store the generic tbl container */ +struct ulp_mapper_gen_tbl_list { + const char *gen_tbl_name; + enum bnxt_ulp_gen_tbl_type tbl_type; + struct ulp_mapper_gen_tbl_cont container; + u32 mem_data_size; + u8 *mem_data; + struct rhashtable *hash_tbl; + struct rhashtable_params hash_tbl_params; +}; + +/* Forward declaration */ +struct bnxt_ulp_mapper_data; +struct ulp_flow_db_res_params; + +/** + * Initialize the generic table list + * + * @ulp_ctx: Pointer to the ulp context + * @mapper_data: Pointer to the mapper data and the generic table is part of it + * + * returns 0 on success + */ +int +ulp_mapper_generic_tbl_list_init(struct bnxt_ulp_context *ulp_ctx, + struct bnxt_ulp_mapper_data *mapper_data); + +/** + * Free the generic table list + * + * @mapper_data: Pointer to the mapper data and the generic table is part of it + * + * returns 0 on success + */ +int +ulp_mapper_generic_tbl_list_deinit(struct bnxt_ulp_mapper_data *mapper_data); + +/** + * Get the generic table list entry + * + * @tbl_list: Ptr to generic table + * @key: Key index to the table + * @entry: output will include the entry if found + * + * returns 0 on success. + */ +int +ulp_mapper_gen_tbl_entry_get(struct bnxt_ulp_context *ulp_ctx, + struct ulp_mapper_gen_tbl_list *tbl_list, + u32 key, + struct ulp_mapper_gen_tbl_entry *entry); + +/** + * utility function to calculate the table idx + * + * @res_sub_type: Resource sub type + * @dir: direction + * + * returns None + */ +int +ulp_mapper_gen_tbl_idx_calculate(struct bnxt_ulp_context *ulp_ctx, + u32 res_sub_type, u32 dir); + +/** + * Set the data in the generic table entry, Data is in Big endian format + * + * @entry: generic table entry + * @key: pointer to the key to be used for setting the value. + * @key_size: The length of the key in bytess to be set + * @data: pointer to the data to be used for setting the value. + * @data_size: length of the data pointer in bytes. + * + * returns 0 on success + */ +int +ulp_mapper_gen_tbl_entry_data_set(struct bnxt_ulp_context *ulp_ctx, + struct ulp_mapper_gen_tbl_list *tbl_list, + struct ulp_mapper_gen_tbl_entry *entry, + u8 *key, u32 key_size, + u8 *data, u32 data_size); + +/** + * Get the data in the generic table entry + * + * @entry: generic table entry + * @offset: The offset in bits where the data has to get + * @len: The length of the data in bits to be get + * @data: pointer to the data to be used for setting the value. + * @data_size: The size of data in bytes + * + * returns 0 on success + */ +int +ulp_mapper_gen_tbl_entry_data_get(struct bnxt_ulp_context *ulp_ctx, + struct ulp_mapper_gen_tbl_entry *entry, + u32 offset, u32 len, u8 *data, + u32 data_size); + +/** + * Free the generic table list resource + * + * @ulp_ctx: Pointer to the ulp context + * @fid: The fid the generic table is associated with + * @res: Pointer to flow db resource entry + * + * returns 0 on success + */ +int +ulp_mapper_gen_tbl_res_free(struct bnxt_ulp_context *ulp_ctx, + u32 fid, + struct ulp_flow_db_res_params *res); + +/** + * Perform add entry in the simple list + * + * @tbl_list: pointer to the generic table list + * @key: Key added as index + * @data: data added as result + * @key_index: index to the entry + * @gen_tbl_ent: write the output to the entry + * + * returns 0 on success. + */ +int +ulp_gen_tbl_simple_list_add_entry(struct ulp_mapper_gen_tbl_list *tbl_list, + u8 *key, + u8 *data, + u32 *key_index, + struct ulp_mapper_gen_tbl_entry *ent); + +/** + * Perform add entry in the simple list + * + * @tbl_list: pointer to the generic table list + * @key: Key added as index + * @data: data added as result + * @key_index: index to the entry + * @gen_tbl_ent: write the output to the entry + * + * returns 0 on success. + */ +int +ulp_gen_tbl_simple_list_search(struct ulp_mapper_gen_tbl_list *tbl_list, + u8 *match_key, + u32 *key_idx); + +#endif /* _ULP_EN_TBL_H_ */ diff --git a/drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_generic_flow_offload.c b/drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_generic_flow_offload.c new file mode 100644 index 000000000000..9ccd7ef2dac7 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_generic_flow_offload.c @@ -0,0 +1,1465 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* Copyright(c) 2023-2023 Broadcom + * All rights reserved. + */ + +#include +#include + +#include "bnxt_compat.h" +#include "bnxt_hsi.h" +#include "bnxt.h" +#include "bnxt_tf_common.h" +#include "bnxt_nic_flow.h" +#include "bnxt_ulp_flow.h" +#include "ulp_tc_parser.h" +#include "ulp_matcher.h" +#include "ulp_flow_db.h" +#include "ulp_mapper.h" +#include "ulp_fc_mgr.h" +#include "ulp_port_db.h" +#include "ulp_template_debug_proto.h" +#include "ulp_generic_flow_offload.h" + +#if defined(CONFIG_BNXT_FLOWER_OFFLOAD) + +#define BNXT_ULP_GEN_UDP_PORT_VXLAN 4789 +#define BNXT_ULP_GEN_UDP_PORT_VXLAN_MASK 0XFFFF + +/* Utility function to validate field size*/ +static int bnxt_ulp_gen_prsr_fld_size_validate(struct ulp_tc_parser_params + *params, u32 *idx, u32 size) +{ + if (params->field_idx + size >= BNXT_ULP_PROTO_HDR_MAX) + return -EINVAL; + + *idx = params->field_idx; + params->field_idx += size; + return BNXT_TF_RC_SUCCESS; +} + +/* Utility function to update the field_bitmap */ +static void bnxt_ulp_gen_parser_field_bitmap_update(struct ulp_tc_parser_params + *params, + u32 idx, + enum bnxt_ulp_prsr_action + prsr_act) +{ + struct ulp_tc_hdr_field *field; + + field = ¶ms->hdr_field[idx]; + if (ulp_bitmap_notzero(field->mask, field->size)) { + ULP_INDEX_BITMAP_SET(params->fld_bitmap.bits, idx); + if (!(prsr_act & ULP_PRSR_ACT_MATCH_IGNORE)) + ULP_INDEX_BITMAP_SET(params->fld_s_bitmap.bits, idx); + /* Not exact match */ + if (!ulp_bitmap_is_ones(field->mask, field->size)) + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_WC_MATCH, + 1); + } else { + ULP_INDEX_BITMAP_RESET(params->fld_bitmap.bits, idx); + } +} + +/* Utility function to copy field spec and masks items */ +static void bnxt_ulp_gen_prsr_fld_mask(struct ulp_tc_parser_params *params, + u32 *idx, + u32 size, + const void *spec_buff, + const void *mask_buff, + enum bnxt_ulp_prsr_action prsr_act) +{ + struct ulp_tc_hdr_field *field = ¶ms->hdr_field[*idx]; + + /* update the field size */ + field->size = size; + + /* copy the mask specifications only if mask is not null */ + if (!(prsr_act & ULP_PRSR_ACT_MASK_IGNORE) && mask_buff) { + memcpy(field->mask, mask_buff, size); + bnxt_ulp_gen_parser_field_bitmap_update(params, *idx, prsr_act); + } + + /* copy the protocol specifications only if mask is not null */ + if (spec_buff && mask_buff && ulp_bitmap_notzero(mask_buff, size)) + memcpy(field->spec, spec_buff, size); + + /* Increment the index */ + *idx = *idx + 1; +} + +/* Set the direction based on the source interface */ +static inline void bnxt_ulp_gen_set_dir_attributes(struct bnxt *bp, struct ulp_tc_parser_params + *params, enum bnxt_ulp_gen_direction dir) +{ + /* Set the flow attributes. */ + if (dir == BNXT_ULP_GEN_RX) + params->dir_attr |= BNXT_ULP_FLOW_ATTR_INGRESS; + else + params->dir_attr |= BNXT_ULP_FLOW_ATTR_EGRESS; +} + +static void +bnxt_ulp_gen_init_cf_header_bitmap(struct bnxt_ulp_mapper_parms *params) +{ + uint64_t hdr_bits = 0; + + /* Remove the internal tunnel bits */ + hdr_bits = params->hdr_bitmap->bits; + ULP_BITMAP_RESET(hdr_bits, BNXT_ULP_HDR_BIT_F2); + + /* Add untag bits */ + if (!ULP_BITMAP_ISSET(hdr_bits, BNXT_ULP_HDR_BIT_OO_VLAN)) + ULP_BITMAP_SET(hdr_bits, BNXT_ULP_HDR_BIT_OO_UNTAGGED); + if (!ULP_BITMAP_ISSET(hdr_bits, BNXT_ULP_HDR_BIT_OI_VLAN)) + ULP_BITMAP_SET(hdr_bits, BNXT_ULP_HDR_BIT_OI_UNTAGGED); + if (!ULP_BITMAP_ISSET(hdr_bits, BNXT_ULP_HDR_BIT_IO_VLAN)) + ULP_BITMAP_SET(hdr_bits, BNXT_ULP_HDR_BIT_IO_UNTAGGED); + if (!ULP_BITMAP_ISSET(hdr_bits, BNXT_ULP_HDR_BIT_II_VLAN)) + ULP_BITMAP_SET(hdr_bits, BNXT_ULP_HDR_BIT_II_UNTAGGED); + + /* Add non-tunnel bit */ + if (!ULP_BITMAP_ISSET(params->cf_bitmap, BNXT_ULP_CF_BIT_IS_TUNNEL)) + ULP_BITMAP_SET(hdr_bits, BNXT_ULP_HDR_BIT_NON_TUNNEL); + + /* Add l2 only bit */ + if ((!ULP_BITMAP_ISSET(params->cf_bitmap, BNXT_ULP_CF_BIT_IS_TUNNEL) && + !ULP_BITMAP_ISSET(hdr_bits, BNXT_ULP_HDR_BIT_O_IPV4) && + !ULP_BITMAP_ISSET(hdr_bits, BNXT_ULP_HDR_BIT_O_IPV6)) || + (ULP_BITMAP_ISSET(params->cf_bitmap, BNXT_ULP_CF_BIT_IS_TUNNEL) && + !ULP_BITMAP_ISSET(hdr_bits, BNXT_ULP_HDR_BIT_I_IPV4) && + !ULP_BITMAP_ISSET(hdr_bits, BNXT_ULP_HDR_BIT_I_IPV6))) { + ULP_BITMAP_SET(hdr_bits, BNXT_ULP_HDR_BIT_L2_ONLY); + ULP_BITMAP_SET(params->cf_bitmap, BNXT_ULP_CF_BIT_L2_ONLY); + } + + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_PROFILE_BITMAP, hdr_bits); + + /* Update the l4 protocol bits */ + if ((ULP_BITMAP_ISSET(hdr_bits, BNXT_ULP_HDR_BIT_O_TCP) || + ULP_BITMAP_ISSET(hdr_bits, BNXT_ULP_HDR_BIT_O_UDP))) { + ULP_BITMAP_RESET(hdr_bits, BNXT_ULP_HDR_BIT_O_TCP); + ULP_BITMAP_RESET(hdr_bits, BNXT_ULP_HDR_BIT_O_UDP); + ULP_BITMAP_SET(hdr_bits, BNXT_ULP_HDR_BIT_O_L4_FLOW); + } + + if ((ULP_BITMAP_ISSET(hdr_bits, BNXT_ULP_HDR_BIT_I_TCP) || + ULP_BITMAP_ISSET(hdr_bits, BNXT_ULP_HDR_BIT_I_UDP))) { + ULP_BITMAP_RESET(hdr_bits, BNXT_ULP_HDR_BIT_I_TCP); + ULP_BITMAP_RESET(hdr_bits, BNXT_ULP_HDR_BIT_I_UDP); + ULP_BITMAP_SET(hdr_bits, BNXT_ULP_HDR_BIT_I_L4_FLOW); + } + + /*update the comp field header bits */ + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_HDR_BITMAP, hdr_bits); +} + +static void +bnxt_ulp_gen_init_mapper_params(struct bnxt_ulp_mapper_parms *mparms, + struct ulp_tc_parser_params *params, + enum bnxt_ulp_fdb_type flow_type) +{ + u32 ulp_flags = 0; + + memset(mparms, 0, sizeof(*mparms)); + + mparms->flow_type = flow_type; + mparms->ulp_ctx = params->ulp_ctx; + mparms->app_priority = params->priority; + mparms->class_tid = params->class_id; + mparms->act_tid = params->act_tmpl; + mparms->func_id = params->func_id; + mparms->hdr_bitmap = ¶ms->hdr_bitmap; + mparms->enc_hdr_bitmap = ¶ms->enc_hdr_bitmap; + mparms->hdr_field = params->hdr_field; + mparms->enc_field = params->enc_field; + mparms->comp_fld = params->comp_fld; + mparms->act_bitmap = ¶ms->act_bitmap; + mparms->act_prop = ¶ms->act_prop; + mparms->flow_id = params->fid; + mparms->fld_bitmap = ¶ms->fld_bitmap; + mparms->flow_pattern_id = params->flow_pattern_id; + mparms->act_pattern_id = params->act_pattern_id; + mparms->wc_field_bitmap = params->wc_field_bitmap; + mparms->app_id = params->app_id; + mparms->tun_idx = params->tun_idx; + mparms->cf_bitmap = params->cf_bitmap; + mparms->exclude_field_bitmap = params->exclude_field_bitmap; + + /* update the signature fields into the computed field list */ + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_HDR_SIG_ID, + params->class_info_idx); + + /* update the header bitmap */ + bnxt_ulp_gen_init_cf_header_bitmap(mparms); + + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_FLOW_SIG_ID, + params->flow_sig_id); + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_FUNCTION_ID, + params->func_id); + + if (bnxt_ulp_cntxt_ptr2_ulp_flags_get(params->ulp_ctx, &ulp_flags)) + return; + + /* Update the socket direct flag */ + if (ULP_BITMAP_ISSET(params->hdr_bitmap.bits, + BNXT_ULP_HDR_BIT_SVIF_IGNORE)) { + uint32_t ifindex; + uint16_t vport; + + /* Get the port db ifindex */ + if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx, + params->port_id, + &ifindex)) { + netdev_dbg(params->ulp_ctx->bp->dev, "Invalid port id %u\n", + params->port_id); + return; + } + /* Update the phy port of the other interface */ + if (ulp_port_db_vport_get(params->ulp_ctx, ifindex, &vport)) { + netdev_dbg(params->ulp_ctx->bp->dev, "Invalid port if index %u\n", + ifindex); + return; + } + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_SOCKET_DIRECT_VPORT, + (vport == 1) ? 2 : 1); + } +} + +/* Function to handle the update of proto header based on field values */ +static void bnxt_ulp_gen_l2_proto_type_update(struct ulp_tc_parser_params + *param, u16 type, u32 in_flag) +{ + if (type == cpu_to_be16(ETH_P_IP)) { + if (in_flag) { + ULP_BITMAP_SET(param->hdr_fp_bit.bits, + BNXT_ULP_HDR_BIT_I_IPV4); + ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L3, 1); + } else { + ULP_BITMAP_SET(param->hdr_fp_bit.bits, + BNXT_ULP_HDR_BIT_O_IPV4); + ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L3, 1); + } + } else if (type == cpu_to_be16(ETH_P_IPV6)) { + if (in_flag) { + ULP_BITMAP_SET(param->hdr_fp_bit.bits, + BNXT_ULP_HDR_BIT_I_IPV6); + ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L3, 1); + } else { + ULP_BITMAP_SET(param->hdr_fp_bit.bits, + BNXT_ULP_HDR_BIT_O_IPV6); + ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L3, 1); + } + } +} + +/* Function to handle the update of proto header based on field values */ +static void bnxt_ulp_gen_l3_proto_type_update(struct ulp_tc_parser_params + *param, u8 proto, u32 in_flag) +{ + if (proto == IPPROTO_UDP) { + if (in_flag) { + ULP_BITMAP_SET(param->hdr_fp_bit.bits, + BNXT_ULP_HDR_BIT_I_UDP); + ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L4, 1); + } else { + ULP_BITMAP_SET(param->hdr_fp_bit.bits, + BNXT_ULP_HDR_BIT_O_UDP); + ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L4, 1); + } + } else if (proto == IPPROTO_TCP) { + if (in_flag) { + ULP_BITMAP_SET(param->hdr_fp_bit.bits, + BNXT_ULP_HDR_BIT_I_TCP); + ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L4, 1); + } else { + ULP_BITMAP_SET(param->hdr_fp_bit.bits, + BNXT_ULP_HDR_BIT_O_TCP); + ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L4, 1); + } + } else if (proto == IPPROTO_GRE) { + ULP_BITMAP_SET(param->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_T_GRE); + } else if (proto == IPPROTO_ICMP) { + if (ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_L3_TUN)) + ULP_BITMAP_SET(param->hdr_bitmap.bits, + BNXT_ULP_HDR_BIT_I_ICMP); + else + ULP_BITMAP_SET(param->hdr_bitmap.bits, + BNXT_ULP_HDR_BIT_O_ICMP); + } + if (proto) { + if (in_flag) { + ULP_COMP_FLD_IDX_WR(param, + BNXT_ULP_CF_IDX_I_L3_FB_PROTO_ID, + 1); + ULP_COMP_FLD_IDX_WR(param, + BNXT_ULP_CF_IDX_I_L3_PROTO_ID, + proto); + } else { + ULP_COMP_FLD_IDX_WR(param, + BNXT_ULP_CF_IDX_O_L3_FB_PROTO_ID, + 1); + ULP_COMP_FLD_IDX_WR(param, + BNXT_ULP_CF_IDX_O_L3_PROTO_ID, + proto); + } + } +} + +static int bnxt_ulp_gen_l2_l2_handler(struct bnxt *bp, + struct ulp_tc_parser_params *params, + struct bnxt_ulp_gen_eth_hdr *eth_spec, + struct bnxt_ulp_gen_eth_hdr *eth_mask) +{ + u32 idx = 0, dmac_idx = 0; + u32 inner_flag = 0; + u32 size; + u16 eth_type = 0; + + if (eth_spec) { + /* TODO: Perform validations BC, MC etc. */ + eth_type = *eth_spec->type; + } + if (eth_spec) { + /* TODO: Perform validations BC, MC etc. */ + eth_type &= *eth_mask->type; + } + + if (bnxt_ulp_gen_prsr_fld_size_validate(params, &idx, + BNXT_ULP_PROTO_HDR_ETH_NUM)) { + netdev_dbg(bp->dev, "Error parsing protocol header\n"); + return BNXT_TF_RC_ERROR; + } + /* Copy the flow for eth into hdr_field */ + dmac_idx = idx; + size = ETH_ALEN; + bnxt_ulp_gen_prsr_fld_mask(params, &idx, size, + (eth_spec) ? eth_spec->dst : NULL, + (eth_mask) ? eth_mask->dst : NULL, + ULP_PRSR_ACT_DEFAULT); + + size = ETH_ALEN; + bnxt_ulp_gen_prsr_fld_mask(params, &idx, size, + (eth_spec) ? eth_spec->src : NULL, + (eth_mask) ? eth_mask->src : NULL, + ULP_PRSR_ACT_DEFAULT); + + size = sizeof(*eth_spec->type); + bnxt_ulp_gen_prsr_fld_mask(params, &idx, size, + (eth_spec) ? eth_spec->type : NULL, + (eth_mask) ? eth_mask->type : NULL, + ULP_PRSR_ACT_DEFAULT); + + /* Update the protocol hdr bitmap */ + if (ULP_BITMAP_ISSET(params->hdr_bitmap.bits, + BNXT_ULP_HDR_BIT_O_ETH) || + ULP_BITMAP_ISSET(params->hdr_bitmap.bits, + BNXT_ULP_HDR_BIT_O_IPV4) || + ULP_BITMAP_ISSET(params->hdr_bitmap.bits, + BNXT_ULP_HDR_BIT_O_IPV6) || + ULP_BITMAP_ISSET(params->hdr_bitmap.bits, + BNXT_ULP_HDR_BIT_O_UDP) || + ULP_BITMAP_ISSET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_TCP)) { + ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_I_ETH); + inner_flag = 1; + } else { + ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_ETH); + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_TUN_OFF_DMAC_ID, + dmac_idx); + } + + /* Update the field protocol hdr bitmap */ + bnxt_ulp_gen_l2_proto_type_update(params, eth_type, inner_flag); + + return BNXT_TF_RC_SUCCESS; +} + +static int bnxt_ulp_gen_l2_filter_id_handler(struct bnxt *bp, + struct ulp_tc_parser_params + *params, + uint64_t *l2_filter_id) +{ + u32 l2_ctxt_id = 0, prof_func = 0; + int rc = BNXT_TF_RC_ERROR; + u64 l2_filter_id_mask = ~0; + u32 idx = 0; + u32 size; + + if (!l2_filter_id) { + netdev_dbg(bp->dev, "ERR: invalid l2_filter_id\n"); + return rc; + } + + if (bnxt_ulp_gen_prsr_fld_size_validate(params, &idx, + BNXT_ULP_PROTO_HDR_L2_FILTER_NUM)) { + netdev_dbg(bp->dev, "Error parsing protocol header\n"); + return rc; + } + + /* Copy the l2_filter_id into hdr_field, there is no mask */ + size = sizeof(*l2_filter_id); + bnxt_ulp_gen_prsr_fld_mask(params, &idx, size, l2_filter_id, + &l2_filter_id_mask, ULP_PRSR_ACT_DEFAULT); + + /* Update the protocol hdr bitmap */ + if (ULP_BITMAP_ISSET(params->hdr_bitmap.bits, + BNXT_ULP_HDR_BIT_O_L2_FILTER)) { + netdev_dbg(bp->dev, "ERR: not supporting inner and outer L2 filters\n"); + return rc; + } + ULP_BITMAP_SET(params->hdr_bitmap.bits, + BNXT_ULP_HDR_BIT_O_L2_FILTER); + /* Get the l2 context and prof_func from the driver and push + * it into the comp fields + */ + if (bnxt_nic_flows_filter_info_get(bp, *l2_filter_id, + &l2_ctxt_id, &prof_func)) { + netdev_dbg(bp->dev, "Error getting l2 filter info\n"); + return rc; + } + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_PROF_FUNC_ID, prof_func); + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L2_CNTXT_ID, l2_ctxt_id); + + return BNXT_TF_RC_SUCCESS; +} + +static int bnxt_ulp_gen_l2_handler(struct bnxt *bp, + struct ulp_tc_parser_params *params, + struct bnxt_ulp_gen_l2_hdr_parms *parms) +{ + if (!parms) { + netdev_dbg(bp->dev, "ERR: Nothing to do for L2\n"); + return BNXT_TF_RC_ERROR; + } + + if (parms->type == BNXT_ULP_GEN_L2_L2_HDR) + return bnxt_ulp_gen_l2_l2_handler(bp, + params, + parms->eth_spec, + parms->eth_mask); + + if (parms->type == BNXT_ULP_GEN_L2_L2_FILTER_ID) + return bnxt_ulp_gen_l2_filter_id_handler(bp, + params, + parms->l2_filter_id); + + return BNXT_TF_RC_PARSE_ERR; +} + +static int bnxt_ulp_gen_l3_v6_handler(struct bnxt *bp, + struct ulp_tc_parser_params *params, + struct bnxt_ulp_gen_ipv6_hdr *ipv6_spec, + struct bnxt_ulp_gen_ipv6_hdr *ipv6_mask) +{ + struct ulp_tc_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap; + u32 ver_spec = 0, ver_mask = 0; + u32 lab_spec = 0, lab_mask = 0; + u32 tc_spec = 0, tc_mask = 0; + u32 idx = 0, dip_idx = 0; + u32 size, vtc_flow; + u32 inner_flag = 0; + u8 proto_mask = 0; + u8 proto = 0; + u32 cnt; + + /* validate there is no 3rd L3 header */ + cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_HDR_CNT); + if (cnt == 2) { + netdev_dbg(bp->dev, + "Parse Err:Third L3 header not supported\n"); + return BNXT_TF_RC_ERROR; + } + + if (bnxt_ulp_gen_prsr_fld_size_validate(params, &idx, + BNXT_ULP_PROTO_HDR_IPV6_NUM)) { + netdev_dbg(bp->dev, "Error parsing protocol header\n"); + return BNXT_TF_RC_ERROR; + } + + if (ipv6_spec) { + vtc_flow = (ipv6_spec->vtc_flow) ? *ipv6_spec->vtc_flow : 0; + ver_spec = (BNXT_ULP_GET_IPV6_VER(vtc_flow)); + tc_spec = (BNXT_ULP_GET_IPV6_TC(vtc_flow)); + lab_spec = (BNXT_ULP_GET_IPV6_FLOWLABEL(vtc_flow)); + proto = (ipv6_spec->proto6) ? *ipv6_spec->proto6 : 0; + } + + if (ipv6_mask) { + vtc_flow = (ipv6_mask->vtc_flow) ? *(ipv6_mask->vtc_flow) : 0; + ver_mask = (BNXT_ULP_GET_IPV6_VER(vtc_flow)); + tc_mask = (BNXT_ULP_GET_IPV6_TC(vtc_flow)); + lab_mask = (BNXT_ULP_GET_IPV6_FLOWLABEL(vtc_flow)); + + proto_mask = (ipv6_mask->proto6) ? *ipv6_mask->proto6 : 0; + proto &= proto_mask; + } + + /* version */ + size = sizeof(*ipv6_spec->vtc_flow); + bnxt_ulp_gen_prsr_fld_mask(params, &idx, size, &ver_spec, &ver_mask, + ULP_PRSR_ACT_DEFAULT); + + /* traffic class */ + bnxt_ulp_gen_prsr_fld_mask(params, &idx, size, &tc_spec, &tc_mask, + ULP_PRSR_ACT_DEFAULT); + + /* flow label: Ignore for matching templates */ + bnxt_ulp_gen_prsr_fld_mask(params, &idx, size, &lab_spec, &lab_mask, + ULP_PRSR_ACT_MASK_IGNORE); + + /* payload length */ + size = sizeof(*ipv6_spec->payload_len); + bnxt_ulp_gen_prsr_fld_mask(params, &idx, size, + (ipv6_spec) ? ipv6_spec->payload_len : NULL, + (ipv6_mask) ? ipv6_mask->payload_len : NULL, + ULP_PRSR_ACT_DEFAULT); + + /* next_proto_id: Ignore proto for matching templates */ + size = sizeof(*ipv6_spec->proto6); + bnxt_ulp_gen_prsr_fld_mask(params, &idx, size, + (ipv6_spec) ? ipv6_spec->proto6 : NULL, + (ipv6_mask) ? ipv6_mask->proto6 : NULL, + ULP_PRSR_ACT_DEFAULT); + + /* hop limit (ttl) */ + size = sizeof(*ipv6_spec->hop_limits); + bnxt_ulp_gen_prsr_fld_mask(params, &idx, size, + (ipv6_spec) ? ipv6_spec->hop_limits : NULL, + (ipv6_mask) ? ipv6_mask->hop_limits : NULL, + ULP_PRSR_ACT_DEFAULT); + + size = 16; + bnxt_ulp_gen_prsr_fld_mask(params, &idx, size, + (ipv6_spec) ? ipv6_spec->sip6 : NULL, + (ipv6_mask) ? ipv6_mask->sip6 : NULL, + ULP_PRSR_ACT_DEFAULT); + + dip_idx = idx; + size = 16; + bnxt_ulp_gen_prsr_fld_mask(params, &idx, size, + (ipv6_spec) ? ipv6_spec->dip6 : NULL, + (ipv6_mask) ? ipv6_mask->dip6 : NULL, + ULP_PRSR_ACT_DEFAULT); + + /* Set the ipv6 header bitmap and computed l3 header bitmaps */ + if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) || + ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6) || + ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_TUN)) { + ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV6); + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3, 1); + inner_flag = 1; + } else { + ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6); + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, 1); + /* Update the tunnel offload dest ip offset */ + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_TUN_OFF_DIP_ID, + dip_idx); + } + + /* Update the field protocol hdr bitmap */ + if (proto_mask) + bnxt_ulp_gen_l3_proto_type_update(params, proto, inner_flag); + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_HDR_CNT, ++cnt); + + netdev_dbg(bp->dev, "%s: l3-hdr-cnt: %d l3-proto/mask 0x%x/0x%x\n", + __func__, cnt, proto, proto_mask); + + return BNXT_TF_RC_SUCCESS; +} + +static int bnxt_ulp_gen_l3_v4_handler(struct bnxt *bp, + struct ulp_tc_parser_params *params, + struct bnxt_ulp_gen_ipv4_hdr *ipv4_spec, + struct bnxt_ulp_gen_ipv4_hdr *ipv4_mask) +{ + struct ulp_tc_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap; + u32 inner_flag = 0; + u8 proto_mask = 0; + u16 val16 = 0; + u8 proto = 0; + u8 val8 = 0; + u32 idx = 0; + u32 size; + u32 cnt; + + /* validate there is no 3rd L3 header */ + cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_HDR_CNT); + if (cnt == 2) { + netdev_dbg(bp->dev, + "Parse Err:Third L3 header not supported\n"); + return BNXT_TF_RC_ERROR; + } + + if (bnxt_ulp_gen_prsr_fld_size_validate(params, &idx, + BNXT_ULP_PROTO_HDR_IPV4_NUM)) { + netdev_dbg(bp->dev, "Error parsing protocol header\n"); + return BNXT_TF_RC_ERROR; + } + + if (ipv4_spec) + proto = ipv4_spec->proto ? *ipv4_spec->proto : 0; + + if (ipv4_mask) { + proto_mask = ipv4_mask->proto ? *ipv4_mask->proto : 0; + proto &= proto_mask; + } + + /* version_ihl */ + size = sizeof(val8); + bnxt_ulp_gen_prsr_fld_mask(params, &idx, size, &val8, &val8, + ULP_PRSR_ACT_DEFAULT); + + /* tos: Ignore for matching templates with tunnel flows */ + size = sizeof(val8); + bnxt_ulp_gen_prsr_fld_mask(params, &idx, size, + &val8, &val8, params->tnl_addr_type ? + ULP_PRSR_ACT_MATCH_IGNORE : + ULP_PRSR_ACT_DEFAULT); + + /* total_length */ + size = sizeof(val16); + bnxt_ulp_gen_prsr_fld_mask(params, &idx, size, &val16, &val16, + ULP_PRSR_ACT_DEFAULT); + + /* packet_id */ + size = sizeof(val16); + bnxt_ulp_gen_prsr_fld_mask(params, &idx, size, &val16, &val16, + ULP_PRSR_ACT_DEFAULT); + + /* fragment_offset */ + size = sizeof(val16); + bnxt_ulp_gen_prsr_fld_mask(params, &idx, size, &val16, &val16, + ULP_PRSR_ACT_DEFAULT); + + /* ttl */ + size = sizeof(val8); + bnxt_ulp_gen_prsr_fld_mask(params, &idx, size, &val8, &val8, + ULP_PRSR_ACT_DEFAULT); + + /* next_proto_id: Ignore proto for matching templates */ + size = sizeof(val8); + bnxt_ulp_gen_prsr_fld_mask(params, &idx, size, + (ipv4_spec) ? ipv4_spec->proto : NULL, + (ipv4_mask) ? ipv4_mask->proto : NULL, + ULP_PRSR_ACT_DEFAULT); + + /* hdr_checksum */ + size = sizeof(val16); + bnxt_ulp_gen_prsr_fld_mask(params, &idx, size, &val16, &val16, + ULP_PRSR_ACT_DEFAULT); + + size = sizeof(u32); + bnxt_ulp_gen_prsr_fld_mask(params, &idx, size, + (ipv4_spec) ? ipv4_spec->sip : NULL, + (ipv4_mask) ? ipv4_mask->sip : NULL, + ULP_PRSR_ACT_DEFAULT); + + size = sizeof(u32); + bnxt_ulp_gen_prsr_fld_mask(params, &idx, size, + (ipv4_spec) ? ipv4_spec->dip : NULL, + (ipv4_mask) ? ipv4_mask->dip : NULL, + ULP_PRSR_ACT_DEFAULT); + + /* Set the ipv4 header bitmap and computed l3 header bitmaps */ + if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) || + ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6) || + ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_TUN)) { + ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV4); + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3, 1); + inner_flag = 1; + } else { + ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4); + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, 1); + } + + /* Update the field protocol hdr bitmap */ + if (proto_mask) + bnxt_ulp_gen_l3_proto_type_update(params, proto, inner_flag); + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_HDR_CNT, ++cnt); + + netdev_dbg(bp->dev, "%s: l3-hdr-cnt: %d l3-proto/mask 0x%x/0x%x\n", + __func__, cnt, proto, proto_mask); + return BNXT_TF_RC_SUCCESS; +} + +static int bnxt_ulp_gen_l3_handler(struct bnxt *bp, + struct ulp_tc_parser_params *params, + struct bnxt_ulp_gen_l3_hdr_parms *parms) +{ + int rc = BNXT_TF_RC_ERROR; + + if (!parms) { + netdev_dbg(bp->dev, "ERR: Nothing to do for L3\n"); + return BNXT_TF_RC_ERROR; + } + + if (parms->type == BNXT_ULP_GEN_L3_IPV4) + return bnxt_ulp_gen_l3_v4_handler(bp, + params, + parms->v4_spec, + parms->v4_mask); + + if (parms->type == BNXT_ULP_GEN_L3_IPV6) + return bnxt_ulp_gen_l3_v6_handler(bp, + params, + parms->v6_spec, + parms->v6_mask); + + return rc; +} + +static void bnxt_ulp_gen_l4_proto_type_update(struct ulp_tc_parser_params + *params, u16 src_port, + u16 src_mask, u16 dst_port, + u16 dst_mask, + enum bnxt_ulp_hdr_bit hdr_bit) +{ + switch (hdr_bit) { + case BNXT_ULP_HDR_BIT_I_UDP: + case BNXT_ULP_HDR_BIT_I_TCP: + ULP_BITMAP_SET(params->hdr_bitmap.bits, hdr_bit); + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4, 1); + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_SRC_PORT, + (u64)be16_to_cpu(src_port)); + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_DST_PORT, + (u64)be16_to_cpu(dst_port)); + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_SRC_PORT_MASK, + (u64)be16_to_cpu(src_mask)); + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_DST_PORT_MASK, + (u64)be16_to_cpu(dst_mask)); + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3_FB_PROTO_ID, + 1); + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_FB_SRC_PORT, + !!(src_port & src_mask)); + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_FB_DST_PORT, + !!(dst_port & dst_mask)); + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3_PROTO_ID, + (hdr_bit == BNXT_ULP_HDR_BIT_I_UDP) ? + IPPROTO_UDP : IPPROTO_TCP); + break; + case BNXT_ULP_HDR_BIT_O_UDP: + case BNXT_ULP_HDR_BIT_O_TCP: + ULP_BITMAP_SET(params->hdr_bitmap.bits, hdr_bit); + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4, 1); + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_SRC_PORT, + (u64)be16_to_cpu(src_port)); + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_DST_PORT, + (u64)be16_to_cpu(dst_port)); + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_SRC_PORT_MASK, + (u64)be16_to_cpu(src_mask)); + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_DST_PORT_MASK, + (u64)be16_to_cpu(dst_mask)); + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3_FB_PROTO_ID, + 1); + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_FB_SRC_PORT, + !!(src_port & src_mask)); + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_FB_DST_PORT, + !!(dst_port & dst_mask)); + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3_PROTO_ID, + (hdr_bit == BNXT_ULP_HDR_BIT_O_UDP) ? + IPPROTO_UDP : IPPROTO_TCP); + break; + default: + break; + } + + if (hdr_bit == BNXT_ULP_HDR_BIT_O_UDP && dst_port == + cpu_to_be16(BNXT_ULP_GEN_UDP_PORT_VXLAN)) { + ULP_BITMAP_SET(params->hdr_fp_bit.bits, + BNXT_ULP_HDR_BIT_T_VXLAN); + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_TUN, 1); + } +} + +static void bnxt_ulp_gen_bth_proto_type_update(struct ulp_tc_parser_params + *params, u16 op_code, + u16 op_code_mask, u16 dst_qpn, + u16 dst_qpn_mask, + enum bnxt_ulp_hdr_bit hdr_bit) +{ + switch (hdr_bit) { + case BNXT_ULP_HDR_BIT_I_BTH: + ULP_BITMAP_SET(params->hdr_bitmap.bits, hdr_bit); + ULP_BITMAP_RESET(params->hdr_bitmap.bits, + BNXT_ULP_HDR_BIT_I_UDP); + ULP_BITMAP_RESET(params->hdr_fp_bit.bits, + BNXT_ULP_HDR_BIT_I_UDP); + break; + case BNXT_ULP_HDR_BIT_O_BTH: + ULP_BITMAP_SET(params->hdr_bitmap.bits, hdr_bit); + ULP_BITMAP_RESET(params->hdr_bitmap.bits, + BNXT_ULP_HDR_BIT_O_UDP); + ULP_BITMAP_RESET(params->hdr_fp_bit.bits, + BNXT_ULP_HDR_BIT_O_UDP); + break; + default: + break; + } +} + +static int bnxt_ulp_gen_l4_udp_handler(struct bnxt *bp, + struct ulp_tc_parser_params *params, + struct bnxt_ulp_gen_udp_hdr *spec, + struct bnxt_ulp_gen_udp_hdr *mask) +{ + struct ulp_tc_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap; + enum bnxt_ulp_hdr_bit out_l4 = BNXT_ULP_HDR_BIT_O_UDP; + u16 dport_mask = 0, sport_mask = 0; + u16 dport = 0, sport = 0; + u16 dgram_cksum = 0; + u16 dgram_len = 0; + u32 idx = 0; + u32 size; + u32 cnt; + + cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L4_HDR_CNT); + if (cnt == 2) { + netdev_dbg(bp->dev, + "Parse Err:Third L4 header not supported\n"); + return BNXT_TF_RC_ERROR; + } + + if (spec) { + sport = (spec->sport) ? *spec->sport : 0; + dport = (spec->dport) ? *spec->dport : 0; + } + if (mask) { + sport_mask = (mask->sport) ? *mask->sport : 0; + dport_mask = (mask->dport) ? *mask->dport : 0; + } + + if (bnxt_ulp_gen_prsr_fld_size_validate(params, &idx, + BNXT_ULP_PROTO_HDR_UDP_NUM)) { + netdev_dbg(bp->dev, "Error parsing protocol header\n"); + return BNXT_TF_RC_ERROR; + } + + size = sizeof(*spec->sport); + bnxt_ulp_gen_prsr_fld_mask(params, &idx, size, spec->sport, + mask->sport, ULP_PRSR_ACT_DEFAULT); + + size = sizeof(*spec->dport); + bnxt_ulp_gen_prsr_fld_mask(params, &idx, size, spec->dport, + spec->dport, ULP_PRSR_ACT_DEFAULT); + + size = sizeof(dgram_len); + bnxt_ulp_gen_prsr_fld_mask(params, &idx, size, &dgram_len, &dgram_len, + ULP_PRSR_ACT_DEFAULT); + + size = sizeof(dgram_cksum); + bnxt_ulp_gen_prsr_fld_mask(params, &idx, size, &dgram_cksum, + &dgram_cksum, ULP_PRSR_ACT_DEFAULT); + + /* Set the udp header bitmap and computed l4 header bitmaps */ + if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) || + ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP) || + ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_TUN)) + out_l4 = BNXT_ULP_HDR_BIT_I_UDP; + + bnxt_ulp_gen_l4_proto_type_update(params, sport, sport_mask, dport, + dport_mask, out_l4); + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L4_HDR_CNT, ++cnt); + + return BNXT_TF_RC_SUCCESS; +} + +static int bnxt_ulp_gen_l4_tcp_handler(struct bnxt *bp, + struct ulp_tc_parser_params *params, + struct bnxt_ulp_gen_tcp_hdr *spec, + struct bnxt_ulp_gen_tcp_hdr *mask) +{ + struct ulp_tc_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap; + enum bnxt_ulp_hdr_bit out_l4 = BNXT_ULP_HDR_BIT_O_TCP; + u16 dport_mask = 0, sport_mask = 0; + u16 dport = 0, sport = 0; + u32 idx = 0; + u32 size; + u32 cnt; + + cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L4_HDR_CNT); + if (cnt == 2) { + netdev_dbg(bp->dev, + "Parse Err:Third L4 header not supported\n"); + return BNXT_TF_RC_ERROR; + } + + if (spec) { + sport = (spec->sport) ? *spec->sport : 0; + dport = (spec->dport) ? *spec->dport : 0; + } + if (mask) { + sport_mask = (mask->sport) ? *mask->sport : 0; + dport_mask = (mask->dport) ? *mask->dport : 0; + } + + if (bnxt_ulp_gen_prsr_fld_size_validate(params, &idx, + BNXT_ULP_PROTO_HDR_TCP_NUM - + 7)) { + netdev_dbg(bp->dev, "Error parsing protocol header\n"); + return BNXT_TF_RC_ERROR; + } + + size = sizeof(*spec->sport); + bnxt_ulp_gen_prsr_fld_mask(params, &idx, size, spec->sport, + mask->sport, ULP_PRSR_ACT_DEFAULT); + + size = sizeof(*spec->dport); + bnxt_ulp_gen_prsr_fld_mask(params, &idx, size, spec->dport, + mask->dport, ULP_PRSR_ACT_DEFAULT); + + if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) || + ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP) || + ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_TUN)) + out_l4 = BNXT_ULP_HDR_BIT_I_TCP; + + bnxt_ulp_gen_l4_proto_type_update(params, sport, sport_mask, dport, + dport_mask, out_l4); + return BNXT_TF_RC_SUCCESS; +} + +static int bnxt_ulp_gen_l4_roce_handler(struct bnxt *bp, + struct ulp_tc_parser_params *params, + struct bnxt_ulp_gen_bth_hdr *spec, + struct bnxt_ulp_gen_bth_hdr *mask) +{ + enum bnxt_ulp_hdr_bit out_l4 = BNXT_ULP_HDR_BIT_O_BTH; + u16 dst_qpn_mask = 0, op_code_mask = 0; + u16 dst_qpn = 0, op_code = 0; + u32 idx = 0; + u32 size; + u32 cnt; + + cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L4_HDR_CNT); + if (cnt == 2) { + netdev_dbg(bp->dev, + "Parse Err:Third L4 header not supported\n"); + return BNXT_TF_RC_ERROR; + } + + if (spec) { + op_code = (spec->op_code) ? *spec->op_code : 0; + dst_qpn = (spec->dst_qpn) ? *spec->dst_qpn : 0; + } + if (mask) { + op_code_mask = (mask->op_code) ? *mask->op_code : 0; + dst_qpn_mask = (mask->dst_qpn) ? *mask->dst_qpn : 0; + } + + if (bnxt_ulp_gen_prsr_fld_size_validate(params, &idx, + BNXT_ULP_PROTO_HDR_BTH_NUM)) { + netdev_dbg(bp->dev, "Error parsing protocol header\n"); + return BNXT_TF_RC_ERROR; + } + + if (spec->op_code) + netdev_dbg(bp->dev, + "L4 header idx %d opcde 0x%x\n", idx, *spec->op_code); + size = sizeof(*spec->op_code); + bnxt_ulp_gen_prsr_fld_mask(params, &idx, size, spec->op_code, + mask->op_code, ULP_PRSR_ACT_DEFAULT); + + if (spec->dst_qpn) + netdev_dbg(bp->dev, + "L4 header idx %d qpn 0x%x\n", idx, *spec->dst_qpn); + size = sizeof(*spec->dst_qpn); + bnxt_ulp_gen_prsr_fld_mask(params, &idx, size, spec->dst_qpn, + mask->dst_qpn, ULP_PRSR_ACT_DEFAULT); + + if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_TUN)) + out_l4 = BNXT_ULP_HDR_BIT_I_BTH; + + bnxt_ulp_gen_bth_proto_type_update(params, op_code, op_code_mask, + dst_qpn, dst_qpn_mask, out_l4); + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L4_HDR_CNT, ++cnt); + + return BNXT_TF_RC_SUCCESS; +} + +static int bnxt_ulp_gen_l4_handler(struct bnxt *bp, + struct ulp_tc_parser_params *params, + struct bnxt_ulp_gen_l4_hdr_parms *parms) +{ + if (!parms) { + netdev_dbg(bp->dev, "ERR: Nothing to do for L4\n"); + return BNXT_TF_RC_ERROR; + } + + if (parms->type == BNXT_ULP_GEN_L4_UDP) + return bnxt_ulp_gen_l4_udp_handler(bp, + params, + parms->udp_spec, + parms->udp_mask); + + if (parms->type == BNXT_ULP_GEN_L4_TCP) + return bnxt_ulp_gen_l4_tcp_handler(bp, + params, + parms->tcp_spec, + parms->tcp_mask); + + if (parms->type == BNXT_ULP_GEN_L4_BTH) + return bnxt_ulp_gen_l4_roce_handler(bp, + params, + parms->bth_spec, + parms->bth_mask); + + return BNXT_TF_RC_ERROR; +} + +static int bnxt_ulp_gen_hdr_parser(struct bnxt *bp, + struct ulp_tc_parser_params *params, + struct bnxt_ulp_gen_flow_parms *parms) +{ + int32_t rc = 0; + + if (!parms) { + netdev_dbg(bp->dev, "ERR: Flow add parms is NULL\n"); + return -EINVAL; + } + + params->field_idx = BNXT_ULP_PROTO_HDR_SVIF_NUM; + + if (parms->l2) + rc = bnxt_ulp_gen_l2_handler(bp, params, parms->l2); + if (rc) { + netdev_dbg(bp->dev, "ERR: L2 Handler error = %d\n", rc); + return rc; + } + if (parms->l3) + rc = bnxt_ulp_gen_l3_handler(bp, params, parms->l3); + if (rc) { + netdev_dbg(bp->dev, "ERR: L3 Handler error = %d\n", rc); + return rc; + } + if (parms->l4) + rc = bnxt_ulp_gen_l4_handler(bp, params, parms->l4); + if (rc) { + netdev_dbg(bp->dev, "ERR: L4 Handler error = %d\n", rc); + return rc; + } + + /* update the implied SVIF */ + rc = ulp_tc_parser_implicit_match_port_process(params); + return rc; +} + +static int bnxt_ulp_gen_act_kid_handler(struct bnxt *bp, + struct ulp_tc_parser_params *params, + struct bnxt_ulp_gen_action_parms *parms) +{ + if (!parms) { + netdev_dbg(bp->dev, "ERR: NULL parms for KID action\n"); + return BNXT_TF_RC_ERROR; + } + + netdev_dbg(bp->dev, "ERR: Not implemented\n"); + return BNXT_TF_RC_ERROR; +} + +static int bnxt_ulp_gen_act_drop_handler(struct bnxt *bp, + struct ulp_tc_parser_params *params, + struct bnxt_ulp_gen_action_parms + *parms) +{ + if (!parms) { + netdev_dbg(bp->dev, "ERR: NULL parms for DROP action\n"); + return BNXT_TF_RC_ERROR; + } + + /* Update the hdr_bitmap with drop */ + ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_DROP); + return BNXT_TF_RC_SUCCESS; +} + +static int bnxt_ulp_gen_act_queue_handler(struct bnxt *bp, + struct ulp_tc_parser_params *params, + struct bnxt_ulp_gen_action_parms + *parms) +{ + if (!parms) { + netdev_dbg(bp->dev, "ERR: NULL parms for QUEUE action\n"); + return BNXT_TF_RC_ERROR; + } + + netdev_dbg(bp->dev, "ERR: Not implemented\n"); + return BNXT_TF_RC_ERROR; +} + +static int bnxt_ulp_gen_act_redirect_handler(struct bnxt *bp, struct ulp_tc_parser_params + *params, struct bnxt_ulp_gen_action_parms + *parms) +{ + enum bnxt_ulp_intf_type intf_type; + u32 ifindex; + u16 dst_fid; + + if (!parms) { + netdev_dbg(bp->dev, "ERR: NULL parms for REDIRECT action\n"); + return BNXT_TF_RC_ERROR; + } + + dst_fid = parms->dst_fid; + + /* Get the port db ifindex */ + if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx, dst_fid, + &ifindex)) { + netdev_dbg(bp->dev, "Invalid destination fid %d\n", dst_fid); + return BNXT_TF_RC_ERROR; + } + + /* Get the intf type */ + intf_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex); + if (!intf_type) { + netdev_dbg(bp->dev, "Invalid port type\n"); + return BNXT_TF_RC_ERROR; + } + + /* Set the action port */ + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type); + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DEV_ACT_PORT_ID, dst_fid); + + return ulp_tc_parser_act_port_set(params, ifindex); +} + +static int bnxt_ulp_gen_act_numa_direct_handler(struct bnxt *bp, struct ulp_tc_parser_params + *params, struct bnxt_ulp_gen_action_parms + *parms) +{ + if (!parms) { + netdev_dbg(bp->dev, + "ERR: NULL parms for NUMA-DIRECT action\n"); + return BNXT_TF_RC_ERROR; + } + + netdev_dbg(bp->dev, "ERR: Not implemented\n"); + return BNXT_TF_RC_ERROR; +} + +static int bnxt_ulp_gen_act_count_handler(struct bnxt *bp, + struct ulp_tc_parser_params *params, + struct bnxt_ulp_gen_action_parms + *parms) +{ + if (!parms) { + netdev_dbg(bp->dev, "ERR: NULL parms for COUNT action\n"); + return BNXT_TF_RC_ERROR; + } + + /* Update the hdr_bitmap with count */ + ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_COUNT); + return BNXT_TF_RC_SUCCESS; +} + +static int bnxt_ulp_gen_act_modify_smac_handler(struct bnxt *bp, struct ulp_tc_parser_params + *params, struct bnxt_ulp_gen_action_parms + *parms) +{ + struct ulp_tc_act_prop *act = ¶ms->act_prop; + + if (!parms) { + netdev_dbg(bp->dev, + "ERR: NULL parms for Modify SMAC action\n"); + return BNXT_TF_RC_ERROR; + } + + memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_MAC_SRC], + parms->smac, BNXT_ULP_ACT_PROP_SZ_SET_MAC_SRC); + + /* Update the hdr_bitmap with set mac src */ + ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_SET_MAC_SRC); + return BNXT_TF_RC_SUCCESS; +} + +static int bnxt_ulp_gen_act_modify_dmac_handler(struct bnxt *bp, struct ulp_tc_parser_params + *params, struct bnxt_ulp_gen_action_parms + *parms) +{ + struct ulp_tc_act_prop *act = ¶ms->act_prop; + + if (!parms) { + netdev_dbg(bp->dev, + "ERR: NULL parms for Modify DMAC action\n"); + return BNXT_TF_RC_ERROR; + } + + memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_MAC_DST], + parms->dmac, BNXT_ULP_ACT_PROP_SZ_SET_MAC_DST); + + /* Update the hdr_bitmap with set mac dst */ + ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_SET_MAC_DST); + return BNXT_TF_RC_SUCCESS; +} + +static int bnxt_ulp_gen_act_parser(struct bnxt *bp, + struct ulp_tc_parser_params *params, + struct bnxt_ulp_gen_flow_parms *parms) +{ + int rc = 0; + u64 actions; + + if (!parms) { + netdev_dbg(bp->dev, "ERR: Flow actions parms is NULL\n"); + return -EINVAL; + } + + if (parms->actions) + actions = parms->actions->enables; + else + return -EIO; + + if (actions & BNXT_ULP_GEN_ACTION_ENABLES_KID) + rc = bnxt_ulp_gen_act_kid_handler(bp, params, parms->actions); + if (rc) { + netdev_dbg(bp->dev, "ERR: KID Action Handler error = %d\n", rc); + return rc; + } + + if (actions & BNXT_ULP_GEN_ACTION_ENABLES_DROP) + rc = bnxt_ulp_gen_act_drop_handler(bp, params, parms->actions); + if (rc) { + netdev_dbg(bp->dev, "ERR: DROP Action Handler error = %d\n", + rc); + return rc; + } + + if (actions & BNXT_ULP_GEN_ACTION_ENABLES_QUEUE) + rc = bnxt_ulp_gen_act_queue_handler(bp, params, parms->actions); + if (rc) { + netdev_dbg(bp->dev, "ERR: QUEUE Action Handler error = %d\n", + rc); + return rc; + } + + if (actions & BNXT_ULP_GEN_ACTION_ENABLES_REDIRECT) + rc = bnxt_ulp_gen_act_redirect_handler(bp, params, + parms->actions); + if (rc) { + netdev_dbg(bp->dev, "ERR: REDIRECT Action Handler error = %d\n", + rc); + return rc; + } + + if (actions & BNXT_ULP_GEN_ACTION_ENABLES_NUMA_DIRECT) + rc = bnxt_ulp_gen_act_numa_direct_handler(bp, params, + parms->actions); + if (rc) { + netdev_dbg(bp->dev, + "ERR: NUMA_DIRECT Action Handler error = %d\n", rc); + return rc; + } + + if (actions & BNXT_ULP_GEN_ACTION_ENABLES_COUNT) + rc = bnxt_ulp_gen_act_count_handler(bp, params, parms->actions); + if (rc) { + netdev_dbg(bp->dev, "ERR: COUNT Action Handler error = %d\n", + rc); + return rc; + } + + if (actions & BNXT_ULP_GEN_ACTION_ENABLES_SET_SMAC) + rc = bnxt_ulp_gen_act_modify_smac_handler(bp, params, + parms->actions); + if (rc) { + netdev_dbg(bp->dev, + "ERR: Modify SMAC Action Handler error = %d\n", rc); + return rc; + } + + if (actions & BNXT_ULP_GEN_ACTION_ENABLES_SET_DMAC) + rc = bnxt_ulp_gen_act_modify_dmac_handler(bp, params, + parms->actions); + if (rc) { + netdev_dbg(bp->dev, + "ERR: Modify DMAC Action Handler error = %d\n", rc); + return rc; + } + + return rc; +} + +int bnxt_ulp_gen_flow_create(struct bnxt *bp, + u16 src_fid, + struct bnxt_ulp_gen_flow_parms *flow_parms) +{ + struct bnxt_ulp_mapper_parms mapper_mparms = { 0 }; + struct ulp_tc_parser_params *parser_params = NULL; + struct bnxt_ulp_context *ulp_ctx; + int rc, tf_rc = BNXT_TF_RC_ERROR; + unsigned long lastused; + u64 packets, bytes; + u16 func_id; + u32 fid; + + /* Initialize the parser parser_params */ + parser_params = vzalloc(sizeof(*parser_params)); + if (!parser_params) + goto flow_error; + + /* Get the ULP Context */ + ulp_ctx = bnxt_ulp_bp_ptr2_cntxt_get(bp); + if (!ulp_ctx) { + netdev_dbg(bp->dev, "ULP context is not initialized\n"); + goto flow_error; + } + parser_params->ulp_ctx = ulp_ctx; + + /* Get the ULP APP id */ + if (bnxt_ulp_cntxt_app_id_get + (parser_params->ulp_ctx, &parser_params->app_id)) { + netdev_dbg(bp->dev, "Failed to get the app id\n"); + goto flow_error; + } + + /* Set the flow attributes */ + bnxt_ulp_gen_set_dir_attributes(bp, parser_params, flow_parms->dir); + + /* Copy the device port id and direction for further processing */ + ULP_COMP_FLD_IDX_WR(parser_params, BNXT_ULP_CF_IDX_INCOMING_IF, + src_fid); + ULP_COMP_FLD_IDX_WR(parser_params, BNXT_ULP_CF_IDX_DEV_PORT_ID, + src_fid); + ULP_COMP_FLD_IDX_WR(parser_params, BNXT_ULP_CF_IDX_SVIF_FLAG, + BNXT_ULP_INVALID_SVIF_VAL); + + /* Get the function id */ + if (ulp_port_db_port_func_id_get(ulp_ctx, src_fid, &func_id)) { + netdev_dbg(bp->dev, "Conversion of port to func id failed src_fid(%d)\n", + src_fid); + goto flow_error; + } + + /* Protect flow creation */ + mutex_lock(&ulp_ctx->cfg_data->flow_db_lock); + + /* Allocate a Flow ID to attach all resources for the flow. + * Once allocated, all errors have to walk the list of resources and + * free each of them. + */ + rc = ulp_flow_db_fid_alloc(ulp_ctx, BNXT_ULP_FDB_TYPE_REGULAR, + func_id, &fid); + if (rc) { + netdev_dbg(bp->dev, "Unable to allocate flow table entry\n"); + goto release_lock; + } + + /* Parse the flow headers */ + rc = bnxt_ulp_gen_hdr_parser(bp, parser_params, flow_parms); + if (rc) { + netdev_dbg(bp->dev, "ERR: Failed to parse headers\n"); + goto free_fid; + } + + /* Parse the flow action */ + rc = bnxt_ulp_gen_act_parser(bp, parser_params, flow_parms); + if (rc) { + netdev_dbg(bp->dev, "ERR: Failed to parse actions\n"); + goto free_fid; + } + + parser_params->fid = fid; + parser_params->func_id = func_id; + parser_params->port_id = src_fid; + parser_params->priority = flow_parms->priority; + + netdev_dbg(bp->dev, "Flow prio: %u func_id: %u APP ID %u\n", + parser_params->priority, func_id, parser_params->app_id); + + /* Perform the flow post process */ + tf_rc = bnxt_ulp_tc_parser_post_process(parser_params); + if (tf_rc == BNXT_TF_RC_ERROR) + goto free_fid; + else if (tf_rc == BNXT_TF_RC_FID) + goto return_fid; + + /* Dump the flow pattern */ + ulp_parser_hdr_info_dump(parser_params); + /* Dump the flow action */ + ulp_parser_act_info_dump(parser_params); + + tf_rc = + ulp_matcher_pattern_match(parser_params, &parser_params->class_id); + if (tf_rc != BNXT_TF_RC_SUCCESS) + goto free_fid; + + tf_rc = + ulp_matcher_action_match(parser_params, &parser_params->act_tmpl); + if (tf_rc != BNXT_TF_RC_SUCCESS) + goto free_fid; + + bnxt_ulp_gen_init_mapper_params(&mapper_mparms, parser_params, + BNXT_ULP_FDB_TYPE_REGULAR); + + /* Call the ulp mapper to create the flow in the hardware. */ + tf_rc = ulp_mapper_flow_create(ulp_ctx, &mapper_mparms, NULL); + if (tf_rc) + goto free_fid; + + return_fid: + /* Setup return vals for caller */ + if (flow_parms->flow_id) + *flow_parms->flow_id = fid; + + vfree(parser_params); + mutex_unlock(&ulp_ctx->cfg_data->flow_db_lock); + + /* Setup return HW counter id for caller, if requested */ + if (flow_parms->counter_hndl) + ulp_tf_fc_mgr_query_count_get(ulp_ctx, + fid, &packets, + &bytes, &lastused, + flow_parms->counter_hndl); + return BNXT_TF_RC_SUCCESS; + + free_fid: + vfree(parser_params->tnl_key); + vfree(parser_params->neigh_key); + ulp_flow_db_fid_free(ulp_ctx, BNXT_ULP_FDB_TYPE_REGULAR, fid); + release_lock: + mutex_unlock(&ulp_ctx->cfg_data->flow_db_lock); + flow_error: + vfree(parser_params); + if (tf_rc == -ENOSPC) + return tf_rc; + else + return (tf_rc == + BNXT_TF_RC_PARSE_ERR_NOTSUPP) ? -EOPNOTSUPP : -EIO; +} + +int bnxt_ulp_gen_flow_destroy(struct bnxt *bp, u16 src_fid, u32 flow_id) +{ + struct bnxt_ulp_context *ulp_ctx; + u16 func_id; + int rc; + + ulp_ctx = bnxt_ulp_bp_ptr2_cntxt_get(bp); + if (!ulp_ctx) { + netdev_dbg(bp->dev, "ULP context is not initialized\n"); + return -ENOENT; + } + + if (ulp_port_db_port_func_id_get(ulp_ctx, src_fid, &func_id)) { + netdev_dbg(bp->dev, "Conversion of port to func id failed\n"); + return -EINVAL; + } + + rc = ulp_flow_db_validate_flow_func(ulp_ctx, flow_id, func_id); + if (rc) + return rc; + + mutex_lock(&ulp_ctx->cfg_data->flow_db_lock); + rc = ulp_mapper_flow_destroy(ulp_ctx, BNXT_ULP_FDB_TYPE_REGULAR, + flow_id, NULL); + mutex_unlock(&ulp_ctx->cfg_data->flow_db_lock); + + return rc; +} + +void bnxt_ulp_gen_flow_query_count(struct bnxt *bp, + u32 flow_id, + u64 *packets, + u64 *bytes, unsigned long *lastused) +{ + ulp_tf_fc_mgr_query_count_get(bp->ulp_ctx, flow_id, packets, bytes, + lastused, NULL); +} + +#endif /*if defined(CONFIG_BNXT_FLOWER_OFFLOAD) */ diff --git a/drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_generic_flow_offload.h b/drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_generic_flow_offload.h new file mode 100644 index 000000000000..6044681e63c6 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_generic_flow_offload.h @@ -0,0 +1,167 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2023-2023 Broadcom + * All rights reserved. + */ + +#ifndef _ULP_GENERIC_FLOW_OFFLOAD_H_ +#define _ULP_GENERIC_FLOW_OFFLOAD_H_ + +/* All arguments are expected to be in big-endian */ + +/* Fields that are NULL will not be included in the key */ +struct bnxt_ulp_gen_eth_hdr { + u8 *dst; /* Destination MAC */ + u8 *src; /* Source MAC. */ + u16 *type; /* EtherType or TPID. */ +}; + +enum bnxt_ulp_gen_l2_class_type { + BNXT_ULP_GEN_L2_NONE, + BNXT_ULP_GEN_L2_L2_FILTER_ID, + BNXT_ULP_GEN_L2_L2_HDR, + BNXT_ULP_GEN_L2_LAST +}; + +struct bnxt_ulp_gen_l2_hdr_parms { + enum bnxt_ulp_gen_l2_class_type type; + union { + uint64_t *l2_filter_id; + struct { + struct bnxt_ulp_gen_eth_hdr *eth_spec; + struct bnxt_ulp_gen_eth_hdr *eth_mask; + }; + }; +}; + +enum bnxt_ulp_gen_l3_type { + BNXT_ULP_GEN_L3_NONE, + BNXT_ULP_GEN_L3_IPV4, + BNXT_ULP_GEN_L3_IPV6, + BNXT_ULP_GEN_L3_LAST +}; + +/* Fields that are NULL will not be included in the key */ +struct bnxt_ulp_gen_ipv4_hdr { + u32 *sip; /* IPV4 Source Address. */ + u32 *dip; /* IPV4 Destination Address. */ + u8 *proto; /* IP Protocol */ +}; + +/* Fields that are NULL will not be included in the key */ +struct bnxt_ulp_gen_ipv6_hdr { + u32 *vtc_flow; /* IP version, traffic class & flow label. */ + u16 *payload_len; /* IP payload size, including ext. headers */ + u8 *proto6; /* Next Header */ + u8 *hop_limits; /* Hop limits. */ + u8 *sip6; /* IPV6 Source Address. */ + u8 *dip6; /* IPV6 Destination Address. */ +}; + +struct bnxt_ulp_gen_l3_hdr_parms { + enum bnxt_ulp_gen_l3_type type; + union { + struct { + struct bnxt_ulp_gen_ipv6_hdr *v6_spec; + struct bnxt_ulp_gen_ipv6_hdr *v6_mask; + }; + struct { + struct bnxt_ulp_gen_ipv4_hdr *v4_spec; + struct bnxt_ulp_gen_ipv4_hdr *v4_mask; + }; + }; +}; + +/* Fields that are NULL will not be included in the key */ +struct bnxt_ulp_gen_udp_hdr { + u16 *sport; /* Source Port. */ + u16 *dport; /* Destination Port */ +}; + +/* Fields that are NULL will not be included in the key */ +struct bnxt_ulp_gen_tcp_hdr { + u16 *sport; /* Source Port. */ + u16 *dport; /* Destination Port */ +}; + +/* Fields that are NULL will not be included in the key */ +struct bnxt_ulp_gen_bth_hdr { + u16 *op_code; /* RoCE: L4 dstport == BTH.OpCode */ + u32 *dst_qpn; /* RoCE: L4 ack_num == BTH.dstQP */ +}; + +enum bnxt_ulp_gen_l4_hdr_type { + BNXT_ULP_GEN_L4_NONE, + BNXT_ULP_GEN_L4_UDP, + BNXT_ULP_GEN_L4_TCP, + BNXT_ULP_GEN_L4_BTH, + BNXT_ULP_GEN_L4_LAST +}; + +struct bnxt_ulp_gen_l4_hdr_parms { + enum bnxt_ulp_gen_l4_hdr_type type; + struct { + struct bnxt_ulp_gen_udp_hdr *udp_spec; + struct bnxt_ulp_gen_udp_hdr *udp_mask; + }; + struct { + struct bnxt_ulp_gen_tcp_hdr *tcp_spec; + struct bnxt_ulp_gen_tcp_hdr *tcp_mask; + }; + struct { + struct bnxt_ulp_gen_bth_hdr *bth_spec; + struct bnxt_ulp_gen_bth_hdr *bth_mask; + }; +}; + +struct bnxt_ulp_gen_action_parms { +#define BNXT_ULP_GEN_ACTION_ENABLES_KID 0x1UL +#define BNXT_ULP_GEN_ACTION_ENABLES_DROP 0x2UL +#define BNXT_ULP_GEN_ACTION_ENABLES_QUEUE 0x4UL +#define BNXT_ULP_GEN_ACTION_ENABLES_REDIRECT 0x8UL +#define BNXT_ULP_GEN_ACTION_ENABLES_NUMA_DIRECT 0x10UL +#define BNXT_ULP_GEN_ACTION_ENABLES_COUNT 0x20UL +#define BNXT_ULP_GEN_ACTION_ENABLES_SET_SMAC 0x40UL +#define BNXT_ULP_GEN_ACTION_ENABLES_SET_DMAC 0x80UL + uint64_t enables; + uint64_t kid; + u8 smac[ETH_ALEN]; + u8 dmac[ETH_ALEN]; + u32 queue; + u16 dst_fid; + bool drop; +}; + +enum bnxt_ulp_gen_direction { + BNXT_ULP_GEN_RX, + BNXT_ULP_GEN_TX +}; + +struct bnxt_ulp_gen_flow_parms { + struct bnxt_ulp_gen_l2_hdr_parms *l2; + struct bnxt_ulp_gen_l3_hdr_parms *l3; + struct bnxt_ulp_gen_l4_hdr_parms *l4; + struct bnxt_ulp_gen_action_parms *actions; + enum bnxt_ulp_gen_direction dir; + u8 app_id; + u16 priority; + + /* Return to caller */ + u32 *flow_id; + u64 *counter_hndl; +}; + +/* ULP flow create interface */ +int bnxt_ulp_gen_flow_create(struct bnxt *bp, + u16 src_fid, + struct bnxt_ulp_gen_flow_parms *flow_parms); + +/* ULP flow delete interface */ +int bnxt_ulp_gen_flow_destroy(struct bnxt *bp, u16 src_fid, u32 flow_id); + +/* ULP flow statistics interface */ +void bnxt_ulp_gen_flow_query_count(struct bnxt *bp, + u32 flow_id, + u64 *packets, + u64 *bytes, unsigned long *lastused); + +#endif /* #ifndef _ULP_GENERIC_FLOW_OFFLOAD_H_ */ diff --git a/drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_linux.h b/drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_linux.h new file mode 100644 index 000000000000..6c22aa71776f --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_linux.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019-2023 Broadcom + * All rights reserved. + */ + +#ifndef _ULP_LINUX_H_ +#define _ULP_LINUX_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#endif /* _ULP_LINUX_H_ */ diff --git a/drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_mapper.c b/drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_mapper.c new file mode 100644 index 000000000000..16b270094c82 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_mapper.c @@ -0,0 +1,4544 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* Copyright(c) 2014-2023 Broadcom + * All rights reserved. + */ + +#include "ulp_linux.h" +#include "bnxt_compat.h" +#include "bnxt_hsi.h" +#include "bnxt.h" +#include "ulp_template_db_enum.h" +#include "ulp_template_struct.h" +#include "bnxt_tf_common.h" +#include "ulp_utils.h" +#include "bnxt_tf_ulp.h" +#include "tf_ext_flow_handle.h" +#include "ulp_mark_mgr.h" +#include "ulp_mapper.h" +#include "ulp_flow_db.h" +#include "tf_util.h" +#include "ulp_template_db_tbl.h" +#include "ulp_port_db.h" +#include "ulp_template_debug_proto.h" +#include "ulp_tf_debug.h" +#include "bnxt_vfr.h" +#include "bnxt_tf_tc_shim.h" +#include "bnxt_tf_ulp_p5.h" + +#if defined(CONFIG_BNXT_FLOWER_OFFLOAD) || defined(CONFIG_BNXT_CUSTOM_FLOWER_OFFLOAD) +static u8 mapper_fld_zeros[16] = { 0 }; + +static u8 mapper_fld_ones[16] = { + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF +}; + +static u8 mapper_fld_one[16] = { + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01 +}; + +static int +ulp_mapper_cond_opc_list_process(struct bnxt_ulp_mapper_parms *parms, + struct bnxt_ulp_mapper_cond_list_info *info, + int *res); + +static const struct ulp_mapper_core_ops * +bnxt_ulp_mapper_ops_get(struct bnxt *bp) +{ + const struct ulp_mapper_core_ops *func_ops; + enum bnxt_ulp_device_id dev_id; + int rc; + + rc = bnxt_ulp_devid_get(bp, &dev_id); + if (rc) + return NULL; + + switch (dev_id) { + case BNXT_ULP_DEVICE_ID_THOR2: + func_ops = &ulp_mapper_tfc_core_ops; + break; + case BNXT_ULP_DEVICE_ID_THOR: + case BNXT_ULP_DEVICE_ID_WH_PLUS: + func_ops = &ulp_mapper_tf_core_ops; + break; + default: + func_ops = NULL; + break; + } + return func_ops; +} + +static const struct ulp_mapper_core_ops * +ulp_mapper_data_oper_get(struct bnxt_ulp_context *ulp_ctx) +{ + struct bnxt_ulp_mapper_data *m_data; + + m_data = (struct bnxt_ulp_mapper_data *)ulp_ctx->cfg_data->mapper_data; + return m_data->mapper_oper; +} + +static const char * +ulp_mapper_tmpl_name_str(enum bnxt_ulp_template_type tmpl_type) +{ + switch (tmpl_type) { + case BNXT_ULP_TEMPLATE_TYPE_CLASS: + return "class"; + case BNXT_ULP_TEMPLATE_TYPE_ACTION: + return "action"; + default: + return "invalid template type"; + } +} + +static struct bnxt_ulp_glb_resource_info * +ulp_mapper_glb_resource_info_list_get(u32 *num_entries) +{ + if (!num_entries) + return NULL; + *num_entries = BNXT_ULP_GLB_RESOURCE_TBL_MAX_SZ; + return ulp_glb_resource_tbl; +} + +/* Read the global resource from the mapper global resource list + * + * The regval is always returned in big-endian. + * + * returns 0 on success + */ +int +ulp_mapper_glb_resource_read(struct bnxt_ulp_mapper_data *mapper_data, + enum tf_dir dir, + u16 idx, + u64 *regval, + bool *shared) +{ + if (!mapper_data || !regval || !shared || + dir >= TF_DIR_MAX || idx >= BNXT_ULP_GLB_RF_IDX_LAST) + return -EINVAL; + + *regval = mapper_data->glb_res_tbl[dir][idx].resource_hndl; + *shared = mapper_data->glb_res_tbl[dir][idx].shared; + return 0; +} + +/* Write a global resource to the mapper global resource list + * + * The regval value must be in big-endian. + * + * return 0 on success. + */ +int +ulp_mapper_glb_resource_write(struct bnxt_ulp_mapper_data *data, + struct bnxt_ulp_glb_resource_info *res, + u64 regval, bool shared) +{ + struct bnxt_ulp_mapper_glb_resource_entry *ent; + + /* validate the arguments */ + if (!data || res->direction >= TF_DIR_MAX || + res->glb_regfile_index >= BNXT_ULP_GLB_RF_IDX_LAST) + return -EINVAL; + + /* write to the mapper data */ + ent = &data->glb_res_tbl[res->direction][res->glb_regfile_index]; + ent->resource_func = res->resource_func; + ent->resource_type = res->resource_type; + ent->resource_hndl = regval; + ent->shared = shared; + return 0; +} + +/* Internal function to allocate identity resource and store it in mapper data. + * + * returns 0 on success + */ +int +ulp_mapper_resource_ident_allocate(struct bnxt_ulp_context *ulp_ctx, + struct bnxt_ulp_mapper_data *mapper_data, + struct bnxt_ulp_glb_resource_info *glb_res, + bool shared) +{ + const struct ulp_mapper_core_ops *op = mapper_data->mapper_oper; + u32 session_type = BNXT_ULP_SESSION_TYPE_DEFAULT; + struct ulp_flow_db_res_params res = { 0 }; + struct bnxt *bp = ulp_ctx->bp; + u64 regval, id = 0; + int rc = 0; + + session_type = shared ? BNXT_ULP_SESSION_TYPE_SHARED : + BNXT_ULP_SESSION_TYPE_DEFAULT; + + /* This function only called for Thor so track type ignored */ + rc = op->ulp_mapper_core_ident_alloc_process(ulp_ctx, + session_type, + glb_res->resource_type, + glb_res->direction, + CFA_TRACK_TYPE_SID, + &id); + if (rc) + return rc; + + /* entries are stored as big-endian format */ + regval = cpu_to_be64(id); + /* + * write to the mapper global resource + * Shared resources are never allocated through this method, so the + * shared flag is always false. + */ + rc = ulp_mapper_glb_resource_write(mapper_data, glb_res, regval, shared); + if (rc) { + netdev_dbg(bp->dev, "Failed to write to global resource id\n"); + /* Free the identifier when update failed */ + res.direction = glb_res->direction; + res.resource_type = glb_res->resource_type; + res.resource_hndl = id; + op->ulp_mapper_core_ident_free(ulp_ctx, &res); + return rc; + } + netdev_dbg(bp->dev, "Allocated %s Glb Res Ident [%s][%d][%d] = 0x%04llx\n", + shared ? "Shared" : "Regular", tf_dir_2_str(glb_res->direction), + glb_res->glb_regfile_index, glb_res->resource_type, id); + return rc; +} + +/* Internal function to allocate index tbl resource and store it in mapper data. + * + * returns 0 on success + */ +int +ulp_mapper_resource_index_tbl_alloc(struct bnxt_ulp_context *ulp_ctx, + struct bnxt_ulp_mapper_data *mapper_data, + struct bnxt_ulp_glb_resource_info *glb_res, + bool shared) +{ + const struct ulp_mapper_core_ops *op = mapper_data->mapper_oper; + u32 session_type = BNXT_ULP_SESSION_TYPE_DEFAULT; + struct ulp_flow_db_res_params res = { 0 }; + struct bnxt *bp = ulp_ctx->bp; + u64 regval, index = 0; + int rc = 0; + + session_type = shared ? BNXT_ULP_SESSION_TYPE_SHARED : + BNXT_ULP_SESSION_TYPE_DEFAULT; + + op->ulp_mapper_core_index_tbl_alloc_process(ulp_ctx, session_type, + glb_res->resource_type, + glb_res->direction, + &index); + + /* entries are stored as big-endian format */ + regval = cpu_to_be64((u64)index); + /* + * write to the mapper global resource + * Shared resources are never allocated through this method, so the + * shared flag is always false. + */ + rc = ulp_mapper_glb_resource_write(mapper_data, glb_res, regval, shared); + if (rc) { + netdev_dbg(bp->dev, "Failed to write to global resource id\n"); + /* Free the index when update failed */ + res.direction = glb_res->direction; + res.resource_type = glb_res->resource_type; + res.resource_hndl = index; + rc = op->ulp_mapper_core_cmm_entry_free(ulp_ctx, &res, NULL); + return rc; + } + netdev_dbg(bp->dev, "Allocated Glb ReRs Index [%s][%d][%d] = %llu\n", + tf_dir_2_str(glb_res->direction), + glb_res->glb_regfile_index, glb_res->resource_type, index); + return rc; +} + +static int +ulp_mapper_glb_field_tbl_get(struct bnxt_ulp_mapper_parms *parms, + u32 operand, + u8 *val) +{ + u32 t_idx; + + if (operand >= BNXT_ULP_GLB_FIELD_TBL_SIZE) { + netdev_dbg(parms->ulp_ctx->bp->dev, "Invalid hdr field index %x:%x\n", + parms->class_tid, operand); + *val = 0; + return -EINVAL; /* error */ + } + + t_idx = ULP_COMP_FLD_IDX_RD(parms, BNXT_ULP_CF_IDX_HDR_SIG_ID); + *val = ulp_class_match_list[t_idx].field_list[operand]; + return 0; +} + +/** + * Get the size of the action property for a given index. + * + * @idx: The index for the action property + * + * returns the size of the action property. + */ +static u32 +ulp_mapper_act_prop_size_get(u32 idx) +{ + if (idx >= BNXT_ULP_ACT_PROP_IDX_LAST) + return 0; + return ulp_act_prop_map_table[idx]; +} + +static struct bnxt_ulp_mapper_cond_list_info * +ulp_mapper_tmpl_reject_list_get(struct bnxt_ulp_mapper_parms *mparms, + u32 tid) +{ + const struct bnxt_ulp_template_device_tbls *dev_tbls; + + dev_tbls = &mparms->device_params->dev_tbls[mparms->tmpl_type]; + return &dev_tbls->tmpl_list[tid].reject_info; +} + +static struct bnxt_ulp_mapper_cond_list_info * +ulp_mapper_cond_oper_list_get(struct bnxt_ulp_mapper_parms *mparms, + u32 idx) +{ + const struct bnxt_ulp_template_device_tbls *dev_tbls; + + dev_tbls = &mparms->device_params->dev_tbls[mparms->tmpl_type]; + if (idx >= dev_tbls->cond_oper_list_size) + return NULL; + return &dev_tbls->cond_oper_list[idx]; +} + +static struct bnxt_ulp_mapper_cond_info * +ulp_mapper_tmpl_cond_list_get(struct bnxt_ulp_mapper_parms *mparms, + u32 idx) +{ + const struct bnxt_ulp_template_device_tbls *dev_tbls; + + dev_tbls = &mparms->device_params->dev_tbls[mparms->tmpl_type]; + if (idx >= dev_tbls->cond_list_size) + return NULL; + return &dev_tbls->cond_list[idx]; +} + +/** + * Get a list of classifier tables that implement the flow + * Gets a device dependent list of tables that implement the class template id + * + * @mparms: The mappers parms with data related to the flow. + * + * @tid: The template id that matches the flow + * + * @num_tbls: The number of classifier tables in the returned array + * + * returns An array of classifier tables to implement the flow, or NULL on + * error + */ +static struct bnxt_ulp_mapper_tbl_info * +ulp_mapper_tbl_list_get(struct bnxt_ulp_mapper_parms *mparms, + u32 tid, + u32 *num_tbls) +{ + const struct bnxt_ulp_template_device_tbls *dev_tbls; + u32 idx; + + dev_tbls = &mparms->device_params->dev_tbls[mparms->tmpl_type]; + + idx = dev_tbls->tmpl_list[tid].start_tbl_idx; + *num_tbls = dev_tbls->tmpl_list[tid].num_tbls; + + return &dev_tbls->tbl_list[idx]; +} + +/** + * Get the list of key fields that implement the flow. + * + * @mparms: The mapper parms with information about the flow + * + * @tbl: A single table instance to get the key fields from + * + * @num_flds: The number of key fields in the returned array + * + * Returns array of Key fields, or NULL on error. + */ +struct bnxt_ulp_mapper_key_info * +ulp_mapper_key_fields_get(struct bnxt_ulp_mapper_parms *mparms, + struct bnxt_ulp_mapper_tbl_info *tbl, + u32 *num_flds) +{ + const struct bnxt_ulp_template_device_tbls *dev_tbls; + u32 idx; + + dev_tbls = &mparms->device_params->dev_tbls[mparms->tmpl_type]; + if (!dev_tbls->key_info_list) { + *num_flds = 0; + return NULL; + } + + idx = tbl->key_start_idx; + *num_flds = tbl->key_num_fields; + + return &dev_tbls->key_info_list[idx]; +} + +/* + * Get the list of partial key fields that implement the flow. + * + * @mparms: The mapper parms with information about the flow + * + * @tbl: A single table instance to get the key fields from + * + * Return number of partial fields.return 0 if no partial fields + */ +u32 +ulp_mapper_partial_key_fields_get(struct bnxt_ulp_mapper_parms *mparms, + struct bnxt_ulp_mapper_tbl_info *tbl) +{ + const struct bnxt_ulp_template_device_tbls *dev_tbls; + + dev_tbls = &mparms->device_params->dev_tbls[mparms->tmpl_type]; + if (!dev_tbls->key_info_list) + return 0; + return tbl->partial_key_num_fields; +} + +/* Get the list of data fields that implement the flow. + * + * @mparms: The mapper parms with information about the flow + * + * @tbl: A single table instance to get the data fields from + * + * @num_flds: The number of data fields in the returned array. + * + * @num_encap_flds: The number of encap fields in the returned array. + * + * Returns array of data fields, or NULL on error. + */ +static struct bnxt_ulp_mapper_field_info * +ulp_mapper_result_fields_get(struct bnxt_ulp_mapper_parms *mparms, + struct bnxt_ulp_mapper_tbl_info *tbl, + u32 *num_flds, + u32 *num_encap_flds) +{ + const struct bnxt_ulp_template_device_tbls *dev_tbls; + u32 idx; + + dev_tbls = &mparms->device_params->dev_tbls[mparms->tmpl_type]; + if (!dev_tbls->result_field_list) { + *num_flds = 0; + *num_encap_flds = 0; + return NULL; + } + + idx = tbl->result_start_idx; + *num_flds = tbl->result_num_fields; + *num_encap_flds = tbl->encap_num_fields; + + return &dev_tbls->result_field_list[idx]; +} + +/** + * Get the list of ident fields that implement the flow + * + * @tbl: A single table instance to get the ident fields from + * + * @num_flds: The number of ident fields in the returned array + * + * returns array of ident fields, or NULL on error + */ +static struct bnxt_ulp_mapper_ident_info * +ulp_mapper_ident_fields_get(struct bnxt_ulp_mapper_parms *mparms, + struct bnxt_ulp_mapper_tbl_info *tbl, + u32 *num_flds) +{ + const struct bnxt_ulp_template_device_tbls *dev_tbls; + u32 idx; + + dev_tbls = &mparms->device_params->dev_tbls[mparms->tmpl_type]; + if (!dev_tbls->ident_list) { + *num_flds = 0; + return NULL; + } + + idx = tbl->ident_start_idx; + *num_flds = tbl->ident_nums; + + return &dev_tbls->ident_list[idx]; +} + +static struct bnxt_ulp_mapper_field_info * +ulp_mapper_tmpl_key_ext_list_get(struct bnxt_ulp_mapper_parms *mparms, + u32 idx) +{ + const struct bnxt_ulp_template_device_tbls *dev_tbls; + + dev_tbls = &mparms->device_params->dev_tbls[mparms->tmpl_type]; + if (idx >= dev_tbls->key_ext_list_size) + return NULL; + return &dev_tbls->key_ext_list[idx]; +} + +static inline int +ulp_mapper_mark_free(struct bnxt_ulp_context *ulp, + struct ulp_flow_db_res_params *res) +{ + return ulp_mark_db_mark_del(ulp, + res->resource_type, + res->resource_hndl); +} + +/* Process the flow database opcode alloc action. + * returns 0 on success + */ +static int +ulp_mapper_fdb_opc_alloc_rid(struct bnxt_ulp_mapper_parms *parms, + struct bnxt_ulp_mapper_tbl_info *tbl) +{ + u32 rid = 0; + int rc = 0; + u64 val64; + + /* allocate a new fid */ + rc = ulp_flow_db_fid_alloc(parms->ulp_ctx, + BNXT_ULP_FDB_TYPE_RID, + 0, &rid); + if (rc) { + netdev_dbg(parms->ulp_ctx->bp->dev, "Unable to allocate flow table entry\n"); + return -EINVAL; + } + /* Store the allocated fid in regfile*/ + val64 = rid; + rc = ulp_regfile_write(parms->regfile, tbl->fdb_operand, + cpu_to_be64(val64)); + if (rc) { + netdev_dbg(parms->ulp_ctx->bp->dev, "Write regfile[%d] failed\n", tbl->fdb_operand); + ulp_flow_db_fid_free(parms->ulp_ctx, + BNXT_ULP_FDB_TYPE_RID, rid); + return -EINVAL; + } + /* save the rid into the parms in case a flow fails before pushing the + * rid into the fid + */ + parms->rid = rid; + return 0; +} + +/* Process the flow database opcode action. + * returns 0 on success. + */ +int +ulp_mapper_fdb_opc_process(struct bnxt_ulp_mapper_parms *parms, + struct bnxt_ulp_mapper_tbl_info *tbl, + struct ulp_flow_db_res_params *fid_parms) +{ + u32 push_fid; + u64 val64; + enum bnxt_ulp_fdb_type flow_type; + int rc = 0; + + switch (tbl->fdb_opcode) { + case BNXT_ULP_FDB_OPC_PUSH_FID: + push_fid = parms->flow_id; + flow_type = parms->flow_type; + break; + case BNXT_ULP_FDB_OPC_PUSH_RID_REGFILE: + /* get the fid from the regfile */ + rc = ulp_regfile_read(parms->regfile, tbl->fdb_operand, + &val64); + if (rc) { + netdev_dbg(parms->ulp_ctx->bp->dev, "regfile[%d] read oob\n", + tbl->fdb_operand); + return -EINVAL; + } + /* Use the extracted fid to update the flow resource */ + push_fid = (u32)be64_to_cpu(val64); + flow_type = BNXT_ULP_FDB_TYPE_RID; + break; + case BNXT_ULP_FDB_OPC_PUSH_FID_SW_ONLY: + push_fid = parms->flow_id; + flow_type = parms->flow_type; + fid_parms->reserve_flag = 0x1; + break; + default: + return rc; /* Nothing to be done */ + } + + /* Add the resource to the flow database */ + rc = ulp_flow_db_resource_add(parms->ulp_ctx, flow_type, + push_fid, fid_parms); + if (rc) + netdev_dbg(parms->ulp_ctx->bp->dev, "Failed to add res to flow %x rc = %d\n", + push_fid, rc); + return rc; +} + +/* Process the flow database opcode action. + * returns 0 on success. + */ +int +ulp_mapper_priority_opc_process(struct bnxt_ulp_mapper_parms *parms, + struct bnxt_ulp_mapper_tbl_info *tbl, + u32 *priority) +{ + u64 regval = 0; + int rc = 0; + + switch (tbl->pri_opcode) { + case BNXT_ULP_PRI_OPC_NOT_USED: + *priority = bnxt_ulp_default_app_priority_get(parms->ulp_ctx); + break; + case BNXT_ULP_PRI_OPC_CONST: + *priority = tbl->pri_operand; + break; + case BNXT_ULP_PRI_OPC_APP_PRI: + *priority = parms->app_priority; + break; + case BNXT_ULP_PRI_OPC_APP_PRI_OR_CONST: + if (parms->app_priority) + *priority = parms->app_priority; + else + *priority = tbl->pri_operand; + break; + case BNXT_ULP_PRI_OPC_REGFILE: + if (ulp_regfile_read(parms->regfile, tbl->pri_operand, + ®val)) { + netdev_dbg(parms->ulp_ctx->bp->dev, "regfile[%u] read oob\n", + tbl->pri_operand); + rc = -EINVAL; + } + *priority = (uint32_t)be64_to_cpu(regval); + break; + case BNXT_ULP_PRI_OPC_COMP_FIELD: + if (tbl->pri_operand < BNXT_ULP_CF_IDX_LAST) { + regval = ULP_COMP_FLD_IDX_RD(parms, tbl->pri_operand); + *priority = regval; + } else { + netdev_dbg(parms->ulp_ctx->bp->dev, "comp field out of bounds %u\n", + tbl->pri_operand); + rc = -EINVAL; + } + break; + default: + netdev_dbg(parms->ulp_ctx->bp->dev, "Priority opcode not supported %d\n", + tbl->pri_opcode); + rc = -EINVAL; + break; + } + netdev_dbg(parms->ulp_ctx->bp->dev, "Tcam priority = 0x%x\n", *priority); + return rc; +} + +/* Process the identifier list in the given table. + * Extract the ident from the table entry and + * write it to the reg file. + * returns 0 on success. + */ +int +ulp_mapper_tbl_ident_scan_ext(struct bnxt_ulp_mapper_parms *parms, + struct bnxt_ulp_mapper_tbl_info *tbl, + u8 *byte_data, + u32 byte_data_size, + enum bnxt_ulp_byte_order byte_order) +{ + struct bnxt_ulp_mapper_ident_info *idents; + u32 i, num_idents = 0; + u64 val64; + + /* validate the null arguments */ + if (!byte_data) { + netdev_dbg(parms->ulp_ctx->bp->dev, "invalid argument\n"); + return -EINVAL; + } + + /* Get the ident list and process each one */ + idents = ulp_mapper_ident_fields_get(parms, tbl, &num_idents); + + for (i = 0; i < num_idents; i++) { + /* check the size of the buffer for validation */ + if ((idents[i].ident_bit_pos + idents[i].ident_bit_size) > + ULP_BYTE_2_BITS(byte_data_size) || + idents[i].ident_bit_size > ULP_BYTE_2_BITS(sizeof(val64))) { + netdev_dbg(parms->ulp_ctx->bp->dev, "invalid offset or length %x:%x:%x\n", + idents[i].ident_bit_pos, + idents[i].ident_bit_size, + byte_data_size); + return -EINVAL; + } + val64 = 0; + if (byte_order == BNXT_ULP_BYTE_ORDER_LE) + ulp_bs_pull_lsb(byte_data, (u8 *)&val64, + sizeof(val64), + idents[i].ident_bit_pos, + idents[i].ident_bit_size); + else + ulp_bs_pull_msb(byte_data, (u8 *)&val64, + idents[i].ident_bit_pos, + idents[i].ident_bit_size); + + /* Write it to the regfile, val64 is already in big-endian*/ + if (ulp_regfile_write(parms->regfile, + idents[i].regfile_idx, val64)) { + netdev_dbg(parms->ulp_ctx->bp->dev, "Regfile[%d] write failed.\n", + idents[i].regfile_idx); + return -EINVAL; + } + } + return 0; +} + +/* Process the identifier instruction and either store it in the flow database + * or return it in the val (if not NULL) on success. If val is NULL, the + * identifier is to be stored in the flow database. + */ +static int +ulp_mapper_ident_process(struct bnxt_ulp_mapper_parms *parms, + struct bnxt_ulp_mapper_tbl_info *tbl, + struct bnxt_ulp_mapper_ident_info *ident, + u16 *val) +{ + const struct ulp_mapper_core_ops *op = parms->mapper_data->mapper_oper; + struct ulp_flow_db_res_params fid_parms = { 0 }; + u64 id = 0; + int idx; + int rc; + + fid_parms.direction = tbl->direction; + fid_parms.resource_func = ident->resource_func; + fid_parms.resource_type = ident->ident_type; + fid_parms.critical_resource = tbl->critical_resource; + ulp_flow_db_shared_session_set(&fid_parms, tbl->session_type); + + rc = op->ulp_mapper_core_ident_alloc_process(parms->ulp_ctx, + tbl->session_type, + ident->ident_type, + tbl->direction, + tbl->track_type, + &id); + if (rc) { + netdev_dbg(parms->ulp_ctx->bp->dev, "identifier process failed\n"); + return rc; + } + + fid_parms.resource_hndl = id; + idx = ident->regfile_idx; + if (ulp_regfile_write(parms->regfile, idx, cpu_to_be64(id))) { + netdev_dbg(parms->ulp_ctx->bp->dev, "Regfile[%d] write failed.\n", idx); + rc = -EINVAL; + /* Need to free the identifier, so goto error */ + goto error; + } + + /* Link the resource to the flow in the flow db */ + if (!val) { + rc = ulp_mapper_fdb_opc_process(parms, tbl, &fid_parms); + if (rc) { + netdev_dbg(parms->ulp_ctx->bp->dev, "Failed to link res to flow rc = %d\n", + rc); + /* Need to free the identifier, so goto error */ + goto error; + } + } else { + *val = id; + } + ulp_mapper_ident_field_dump(parms->ulp_ctx, "Ident", ident, tbl, id); + return 0; + +error: + /* Need to free the identifier */ + op->ulp_mapper_core_ident_free(parms->ulp_ctx, &fid_parms); + return rc; +} + +static int +ulp_mapper_field_port_db_process(struct bnxt_ulp_mapper_parms *parms, + u32 port_id, + u16 val16, + u8 **val) +{ + enum bnxt_ulp_port_table port_data = val16; + + switch (port_data) { + case BNXT_ULP_PORT_TABLE_DRV_FUNC_PARENT_MAC: + if (ulp_port_db_parent_mac_addr_get(parms->ulp_ctx, port_id, + val)) { + netdev_dbg(parms->ulp_ctx->bp->dev, "Invalid port id %u\n", port_id); + return -EINVAL; + } + break; + case BNXT_ULP_PORT_TABLE_DRV_FUNC_MAC: + if (ulp_port_db_drv_mac_addr_get(parms->ulp_ctx, port_id, + val)) { + netdev_dbg(parms->ulp_ctx->bp->dev, "Invalid port id %u\n", port_id); + return -EINVAL; + } + break; + case BNXT_ULP_PORT_TABLE_DRV_FUNC_PARENT_VNIC: + if (ulp_port_db_parent_vnic_get(parms->ulp_ctx, port_id, + val)) { + netdev_dbg(parms->ulp_ctx->bp->dev, "Invalid port id %u\n", port_id); + return -EINVAL; + } + break; + case BNXT_ULP_PORT_TABLE_PORT_IS_PF: + if (ulp_port_db_port_is_pf_get(parms->ulp_ctx, port_id, + val)) { + netdev_dbg(parms->ulp_ctx->bp->dev, "Invalid port id %u\n", port_id); + return -EINVAL; + } + break; + case BNXT_ULP_PORT_TABLE_VF_FUNC_METADATA: + if (ulp_port_db_port_meta_data_get(parms->ulp_ctx, port_id, + val)) { + netdev_dbg(parms->ulp_ctx->bp->dev, "Invalid port id %u\n", port_id); + return -EINVAL; + } + break; + case BNXT_ULP_PORT_TABLE_TABLE_SCOPE: + if (ulp_port_db_port_table_scope_get(parms->ulp_ctx, + port_id, val)) { + netdev_dbg(parms->ulp_ctx->bp->dev, "Invalid port id %u\n", port_id); + return -EINVAL; + } + break; + case BNXT_ULP_PORT_TABLE_VF_FUNC_FID: + if (ulp_port_db_port_vf_fid_get(parms->ulp_ctx, port_id, val)) { + netdev_dbg(parms->ulp_ctx->bp->dev, "Invalid port id %u\n", port_id); + return -EINVAL; + } + break; + case BNXT_ULP_PORT_TABLE_DRV_FUNC_ROCE_VNIC: + if (ulp_port_db_drv_roce_vnic_get(parms->ulp_ctx, port_id, + val)) { + netdev_dbg(parms->ulp_ctx->bp->dev, "Invalid port id %u\n", port_id); + return -EINVAL; + } + break; + default: + netdev_dbg(parms->ulp_ctx->bp->dev, "Invalid port_data %d\n", port_data); + return -EINVAL; + } + return 0; +} + +static int +ulp_mapper_field_src_process(struct bnxt_ulp_mapper_parms *parms, + enum bnxt_ulp_field_src field_src, + u8 *field_opr, + enum tf_dir dir, + u8 is_key, + u32 bitlen, + u8 **val, + u32 *val_len, + u64 *value) +{ + struct bnxt_ulp_mapper_cond_list_info info = { 0 }; + u32 bytelen = ULP_BITS_2_BYTE(bitlen); + u32 port_id, val_size, field_size; + struct bnxt_ulp_mapper_data *m; + u16 idx, size_idx, offset; + int cond_res; + u64 lregval; + bool shared; + u8 *buffer; + u8 i = 0; + u8 bit; + + *val_len = bitlen; + *value = 0; + /* Perform the action */ + switch (field_src) { + case BNXT_ULP_FIELD_SRC_ZERO: + *val = mapper_fld_zeros; + break; + case BNXT_ULP_FIELD_SRC_CONST: + *val = field_opr; + break; + case BNXT_ULP_FIELD_SRC_ONES: + *val = mapper_fld_ones; + *value = 1; + break; + case BNXT_ULP_FIELD_SRC_CF: + if (ulp_operand_read(field_opr, + (u8 *)&idx, sizeof(u16))) { + netdev_dbg(parms->ulp_ctx->bp->dev, "CF operand read failed\n"); + return -EINVAL; + } + idx = be16_to_cpu(idx); + if (idx >= BNXT_ULP_CF_IDX_LAST || bytelen > sizeof(u64)) { + netdev_dbg(parms->ulp_ctx->bp->dev, "comp field [%d] read oob %d\n", + idx, bytelen); + return -EINVAL; + } + buffer = (u8 *)&parms->comp_fld[idx]; + *val = &buffer[sizeof(u64) - bytelen]; + *value = ULP_COMP_FLD_IDX_RD(parms, idx); + break; + case BNXT_ULP_FIELD_SRC_RF: + if (ulp_operand_read(field_opr, + (u8 *)&idx, sizeof(u16))) { + netdev_dbg(parms->ulp_ctx->bp->dev, "RF operand read failed\n"); + return -EINVAL; + } + + idx = be16_to_cpu(idx); + /* Uninitialized regfile entries return 0 */ + if (ulp_regfile_read(parms->regfile, idx, &lregval) || + sizeof(u64) < bytelen) { + netdev_dbg(parms->ulp_ctx->bp->dev, "regfile[%d] read oob %u\n", + idx, bytelen); + return -EINVAL; + } + buffer = (u8 *)&parms->regfile->entry[idx].data; + *val = &buffer[sizeof(u64) - bytelen]; + *value = be64_to_cpu(lregval); + break; + case BNXT_ULP_FIELD_SRC_ACT_PROP: + if (ulp_operand_read(field_opr, + (u8 *)&idx, sizeof(u16))) { + netdev_dbg(parms->ulp_ctx->bp->dev, "Action operand read failed\n"); + return -EINVAL; + } + idx = be16_to_cpu(idx); + if (idx >= BNXT_ULP_ACT_PROP_IDX_LAST) { + netdev_dbg(parms->ulp_ctx->bp->dev, "act_prop[%d] oob\n", idx); + return -EINVAL; + } + buffer = &parms->act_prop->act_details[idx]; + field_size = ulp_mapper_act_prop_size_get(idx); + if (bytelen > field_size) { + netdev_dbg(parms->ulp_ctx->bp->dev, "act_prop[%d] field size small %u\n", + idx, field_size); + return -EINVAL; + } + *val = &buffer[field_size - bytelen]; + if (sizeof(*value) >= field_size) { + *value = buffer[0]; + for (i = 1; i < field_size; i++) + *value = (*value << 8) | buffer[i]; + } + break; + case BNXT_ULP_FIELD_SRC_ACT_PROP_SZ: + if (ulp_operand_read(field_opr, + (u8 *)&idx, sizeof(u16))) { + netdev_dbg(parms->ulp_ctx->bp->dev, "Action sz operand read failed\n"); + return -EINVAL; + } + idx = be16_to_cpu(idx); + + if (idx >= BNXT_ULP_ACT_PROP_IDX_LAST) { + netdev_dbg(parms->ulp_ctx->bp->dev, "act_prop_sz[%d] oob\n", idx); + return -EINVAL; + } + *val = &parms->act_prop->act_details[idx]; + + /* get the size index next */ + if (ulp_operand_read(&field_opr[sizeof(u16)], + (u8 *)&size_idx, sizeof(u16))) { + netdev_dbg(parms->ulp_ctx->bp->dev, "Action sz operand read failed\n"); + return -EINVAL; + } + size_idx = be16_to_cpu(size_idx); + if (size_idx >= BNXT_ULP_ACT_PROP_IDX_LAST) { + netdev_dbg(parms->ulp_ctx->bp->dev, "act_prop[%d] oob\n", size_idx); + return -EINVAL; + } + memcpy(&val_size, &parms->act_prop->act_details[size_idx], + sizeof(u32)); + val_size = be32_to_cpu(val_size); + *val_len = ULP_BYTE_2_BITS(val_size); + break; + case BNXT_ULP_FIELD_SRC_GLB_RF: + if (ulp_operand_read(field_opr, + (u8 *)&idx, sizeof(u16))) { + netdev_dbg(parms->ulp_ctx->bp->dev, "Global regfile read failed\n"); + return -EINVAL; + } + idx = be16_to_cpu(idx); + if (ulp_mapper_glb_resource_read(parms->mapper_data, + dir, idx, &lregval, &shared) || + sizeof(u64) < bytelen) { + netdev_dbg(parms->ulp_ctx->bp->dev, "Global regfile[%d] read failed %u\n", + idx, bytelen); + return -EINVAL; + } + m = parms->mapper_data; + buffer = (u8 *)&m->glb_res_tbl[dir][idx].resource_hndl; + *val = &buffer[sizeof(u64) - bytelen]; + *value = be64_to_cpu(lregval); + break; + case BNXT_ULP_FIELD_SRC_HF: + case BNXT_ULP_FIELD_SRC_SUB_HF: + if (ulp_operand_read(field_opr, + (u8 *)&idx, sizeof(u16))) { + netdev_dbg(parms->ulp_ctx->bp->dev, "Header field read failed\n"); + return -EINVAL; + } + idx = be16_to_cpu(idx); + /* get the index from the global field list */ + if (ulp_mapper_glb_field_tbl_get(parms, idx, &bit)) { + netdev_dbg(parms->ulp_ctx->bp->dev, "invalid ulp_glb_field_tbl idx %d\n", + idx); + return -EINVAL; + } + if (is_key) + buffer = parms->hdr_field[bit].spec; + else + buffer = parms->hdr_field[bit].mask; + + field_size = parms->hdr_field[bit].size; + if (!field_size) { + /* To support field processing of undefined fields */ + *val = mapper_fld_zeros; + break; + } else if (bytelen > field_size) { + netdev_dbg(parms->ulp_ctx->bp->dev, "Hdr field[%d] size small %u\n", + bit, field_size); + return -EINVAL; + } + if (field_src == BNXT_ULP_FIELD_SRC_HF) { + *val = &buffer[field_size - bytelen]; + } else { + /* get the offset next */ + if (ulp_operand_read(&field_opr[sizeof(u16)], + (u8 *)&offset, + sizeof(u16))) { + netdev_dbg(parms->ulp_ctx->bp->dev, "Hdr fld size read failed\n"); + return -EINVAL; + } + offset = be16_to_cpu(offset); + offset = ULP_BITS_2_BYTE_NR(offset); + if ((offset + bytelen) > field_size) { + netdev_dbg(parms->ulp_ctx->bp->dev, "Hdr field[%d] oob\n", bit); + return -EINVAL; + } + *val = &buffer[offset]; + } + break; + case BNXT_ULP_FIELD_SRC_HDR_BIT: + if (ulp_operand_read(field_opr, + (u8 *)&lregval, sizeof(u64))) { + netdev_dbg(parms->ulp_ctx->bp->dev, "Header bit read failed\n"); + return -EINVAL; + } + lregval = be64_to_cpu(lregval); + if (ULP_BITMAP_ISSET(parms->hdr_bitmap->bits, lregval)) { + *val = mapper_fld_one; + *value = 1; + } else { + *val = mapper_fld_zeros; + } + break; + case BNXT_ULP_FIELD_SRC_ACT_BIT: + if (ulp_operand_read(field_opr, + (u8 *)&lregval, sizeof(u64))) { + netdev_dbg(parms->ulp_ctx->bp->dev, "Action bit read failed\n"); + return -EINVAL; + } + lregval = be64_to_cpu(lregval); + if (ULP_BITMAP_ISSET(parms->act_bitmap->bits, lregval)) { + *val = mapper_fld_one; + *value = 1; + } else { + *val = mapper_fld_zeros; + } + break; + case BNXT_ULP_FIELD_SRC_FIELD_BIT: + if (ulp_operand_read(field_opr, + (u8 *)&idx, sizeof(u16))) { + netdev_dbg(parms->ulp_ctx->bp->dev, "Field bit read failed\n"); + return -EINVAL; + } + idx = be16_to_cpu(idx); + /* get the index from the global field list */ + if (ulp_mapper_glb_field_tbl_get(parms, idx, &bit)) { + netdev_dbg(parms->ulp_ctx->bp->dev, "invalid ulp_glb_field_tbl idx %d\n", + idx); + return -EINVAL; + } + if (ULP_INDEX_BITMAP_GET(parms->fld_bitmap->bits, bit)) { + *val = mapper_fld_one; + *value = 1; + } else { + *val = mapper_fld_zeros; + } + break; + case BNXT_ULP_FIELD_SRC_PORT_TABLE: + if (ulp_operand_read(field_opr, + (u8 *)&idx, sizeof(u16))) { + netdev_dbg(parms->ulp_ctx->bp->dev, "CF operand read failed\n"); + return -EINVAL; + } + idx = be16_to_cpu(idx); + if (idx >= BNXT_ULP_CF_IDX_LAST || bytelen > sizeof(u64)) { + netdev_dbg(parms->ulp_ctx->bp->dev, "comp field [%d] read oob %d\n", + idx, bytelen); + return -EINVAL; + } + + /* The port id is present in the comp field list */ + port_id = ULP_COMP_FLD_IDX_RD(parms, idx); + /* get the port table enum */ + if (ulp_operand_read(field_opr + sizeof(u16), + (u8 *)&idx, sizeof(u16))) { + netdev_dbg(parms->ulp_ctx->bp->dev, "Port table enum read failed\n"); + return -EINVAL; + } + idx = be16_to_cpu(idx); + if (ulp_mapper_field_port_db_process(parms, port_id, idx, + val)) { + netdev_dbg(parms->ulp_ctx->bp->dev, "field port table failed\n"); + return -EINVAL; + } + break; + case BNXT_ULP_FIELD_SRC_ENC_HDR_BIT: + if (ulp_operand_read(field_opr, + (u8 *)&lregval, sizeof(u64))) { + netdev_dbg(parms->ulp_ctx->bp->dev, "Header bit read failed\n"); + return -EINVAL; + } + lregval = be64_to_cpu(lregval); + if (ULP_BITMAP_ISSET(parms->enc_hdr_bitmap->bits, lregval)) { + *val = mapper_fld_one; + *value = 1; + } else { + *val = mapper_fld_zeros; + } + break; + case BNXT_ULP_FIELD_SRC_ENC_FIELD: + if (ulp_operand_read(field_opr, + (u8 *)&idx, sizeof(u16))) { + netdev_dbg(parms->ulp_ctx->bp->dev, "Header field read failed\n"); + return -EINVAL; + } + idx = be16_to_cpu(idx); + /* get the index from the global field list */ + if (idx >= BNXT_ULP_ENC_FIELD_LAST) { + netdev_dbg(parms->ulp_ctx->bp->dev, "invalid encap field tbl idx %d\n", + idx); + return -EINVAL; + } + buffer = parms->enc_field[idx].spec; + field_size = parms->enc_field[idx].size; + if (bytelen > field_size) { + netdev_dbg(parms->ulp_ctx->bp->dev, "Encap field[%d] size small %u\n", + idx, field_size); + return -EINVAL; + } + *val = &buffer[field_size - bytelen]; + break; + case BNXT_ULP_FIELD_SRC_SKIP: + /* do nothing */ + *val = mapper_fld_zeros; + *val_len = 0; + break; + case BNXT_ULP_FIELD_SRC_REJECT: + return -EINVAL; + case BNXT_ULP_FIELD_SRC_LIST_AND: + case BNXT_ULP_FIELD_SRC_LIST_OR: + /* read the cond table index and count */ + if (ulp_operand_read(field_opr, + (u8 *)&idx, sizeof(u16))) { + netdev_dbg(parms->ulp_ctx->bp->dev, "Cond idx operand read failed\n"); + return -EINVAL; + } + idx = be16_to_cpu(idx); + + if (ulp_operand_read(field_opr + sizeof(u16), + (u8 *)&size_idx, sizeof(u16))) { + netdev_dbg(parms->ulp_ctx->bp->dev, "Cond count operand read failed\n"); + return -EINVAL; + } + size_idx = be16_to_cpu(size_idx); + + /* populate the extracted vales to create a temp cond list */ + if (field_src == BNXT_ULP_FIELD_SRC_LIST_AND) + info.cond_list_opcode = BNXT_ULP_COND_LIST_OPC_AND; + else + info.cond_list_opcode = BNXT_ULP_COND_LIST_OPC_OR; + info.cond_start_idx = idx; + info.cond_nums = size_idx; + if (ulp_mapper_cond_opc_list_process(parms, &info, &cond_res)) { + netdev_dbg(parms->ulp_ctx->bp->dev, "Cond evaluation failed\n"); + return -EINVAL; + } + if (cond_res) { + *val = mapper_fld_one; + *value = 1; + } else { + *val = mapper_fld_zeros; + *value = 0; + } + break; + default: + netdev_dbg(parms->ulp_ctx->bp->dev, "invalid field opcode 0x%x\n", field_src); + return -EINVAL; + } + return 0; +} + +static int ulp_mapper_field_buffer_eval(u8 *buffer, u32 bitlen, + u64 *output) +{ + u32 bytelen; + u16 val_16; + u32 val_32; + u64 val_64; + + bytelen = ULP_BITS_2_BYTE(bitlen); + if (bytelen == sizeof(u8)) { + *output = *((u8 *)buffer); + } else if (bytelen == sizeof(u16)) { + val_16 = *((u16 *)buffer); + *output = be16_to_cpu(val_16); + } else if (bytelen == sizeof(u32)) { + val_32 = *((u32 *)buffer); + *output = be32_to_cpu(val_32); + } else if (bytelen == sizeof(val_64)) { + val_64 = *((u64 *)buffer); + *output = be64_to_cpu(val_64); + } else { + *output = 0; + return -EINVAL; + } + return 0; +} + +static int ulp_mapper_field_blob_write(struct bnxt_ulp_mapper_parms *parms, + enum bnxt_ulp_field_src fld_src, + struct ulp_blob *blob, + u8 *val, + u32 val_len, + u8 **out_val) +{ + if (fld_src == BNXT_ULP_FIELD_SRC_ZERO) { + if (ulp_blob_pad_push(blob, val_len)) { + netdev_dbg(parms->ulp_ctx->bp->dev, "too large for blob\n"); + return -EINVAL; + } + } else if (fld_src == BNXT_ULP_FIELD_SRC_ACT_PROP_SZ) { + if (ulp_blob_push_encap(blob, val, val_len)) { + netdev_dbg(parms->ulp_ctx->bp->dev, "encap blob push failed\n"); + return -EINVAL; + } + } else if (fld_src == BNXT_ULP_FIELD_SRC_SKIP) { + /* do nothing */ + } else { + if (ulp_blob_push(blob, val, val_len)) { + netdev_dbg(parms->ulp_ctx->bp->dev, "push of val1 failed\n"); + return -EINVAL; + } + } + *out_val = val; + return 0; +} + +static int +ulp_mapper_field_opc_next(struct bnxt_ulp_mapper_parms *parms, + enum tf_dir dir, + u8 *field_opr, + struct ulp_blob *blob, + u8 is_key, + const char *name) +{ + struct bnxt_ulp_mapper_field_info *field_info; + u16 idx; + + /* read the cond table index and count */ + if (ulp_operand_read(field_opr, + (u8 *)&idx, sizeof(u16))) { + netdev_dbg(parms->ulp_ctx->bp->dev, "field idx operand read failed\n"); + return -EINVAL; + } + idx = be16_to_cpu(idx); + + field_info = ulp_mapper_tmpl_key_ext_list_get(parms, idx); + if (!field_info) { + netdev_dbg(parms->ulp_ctx->bp->dev, "Invalid field idx %d\n", idx); + return -EINVAL; + } + + return ulp_mapper_field_opc_process(parms, dir, field_info, + blob, is_key, name); +} + +static void +ulp_mapper_key_recipe_tbl_deinit(struct bnxt_ulp_mapper_data *mdata) +{ + struct bnxt_ulp_key_recipe_entry **recipes; + enum bnxt_ulp_direction dir; + uint32_t idx, ftype; + + /* If recipe table is not initialized then exit */ + if (!mdata->key_recipe_info.num_recipes) + return; + + for (dir = 0; dir < BNXT_ULP_DIRECTION_LAST; dir++) { + for (ftype = 0; ftype < ULP_RECIPE_TYPE_MAX; ftype++) { + recipes = mdata->key_recipe_info.recipes[dir][ftype]; + for (idx = 0; idx < mdata->key_recipe_info.num_recipes; + idx++) { + if (recipes[idx]) + vfree(recipes[idx]); + } + vfree(mdata->key_recipe_info.recipes[dir][ftype]); + mdata->key_recipe_info.recipes[dir][ftype] = NULL; + vfree(mdata->key_recipe_info.recipe_ba[dir][ftype]); + mdata->key_recipe_info.recipe_ba[dir][ftype] = NULL; + } + } + mdata->key_recipe_info.num_recipes = 0; +} + +static int +ulp_mapper_key_recipe_tbl_init(struct bnxt_ulp_context *ulp_ctx, + struct bnxt_ulp_mapper_data *mdata) +{ + struct bnxt_ulp_key_recipe_entry **recipes; + enum bnxt_ulp_direction dir; + u32 dev_id = 0, size_val; + u32 num_recipes, ftype, pool_size; + int rc = 0; + struct bitalloc *recipe_ba; + + rc = bnxt_ulp_cntxt_dev_id_get(ulp_ctx, &dev_id); + if (rc) { + netdev_dbg(ulp_ctx->bp->dev, "Unable to get device id from ulp.\n"); + return rc; + } + num_recipes = bnxt_ulp_num_key_recipes_get(ulp_ctx); + if (!num_recipes) + return rc; + + /* Need to write these values so that a failure will result in freeing + * the memory in the deinit + */ + mdata->key_recipe_info.num_recipes = num_recipes; + mdata->key_recipe_info.max_fields = BNXT_ULP_KEY_RECIPE_MAX_FLDS; + + size_val = sizeof(struct bnxt_ulp_key_recipe_entry *); + pool_size = BITALLOC_SIZEOF(num_recipes); + + /* The caller will deinit if failures occur, so just return fail instead + * of attempting to free allocated memory + **/ + for (dir = 0; dir < BNXT_ULP_DIRECTION_LAST; dir++) { + for (ftype = 0; ftype < ULP_RECIPE_TYPE_MAX; ftype++) { + recipes = vzalloc(size_val * num_recipes); + if (!recipes) + return -ENOMEM; + mdata->key_recipe_info.recipes[dir][ftype] = recipes; + + recipe_ba = vzalloc(pool_size); + if (!recipe_ba) + return -ENOMEM; + mdata->key_recipe_info.recipe_ba[dir][ftype] = + recipe_ba; + rc = bnxt_ba_init(recipe_ba, num_recipes, true); + if (rc) { + netdev_dbg(ulp_ctx->bp->dev, + "Unable to alloc recipe ba\n"); + return -ENOMEM; + } + } + } + return rc; +} + +static struct bnxt_ulp_mapper_data * +ulp_mapper_key_recipe_args_validate(struct bnxt_ulp_context *ulp_ctx, + enum bnxt_ulp_direction dir, + enum bnxt_ulp_resource_sub_type stype, + u32 recipe_id) +{ + struct bnxt_ulp_mapper_data *mdata; + + mdata = (struct bnxt_ulp_mapper_data *) + bnxt_ulp_cntxt_ptr2_mapper_data_get(ulp_ctx); + if (!mdata) { + netdev_dbg(ulp_ctx->bp->dev, "Unable to get mapper data.\n"); + return NULL; + } + if (dir >= BNXT_ULP_DIRECTION_LAST) { + netdev_dbg(ulp_ctx->bp->dev, "Invalid dir (%d) in key recipe\n", dir); + return NULL; + } + if (mdata->key_recipe_info.num_recipes == 0) { + netdev_dbg(ulp_ctx->bp->dev, "Recipes are not supported\n"); + return NULL; + } + if (stype != BNXT_ULP_RESOURCE_SUB_TYPE_KEY_RECIPE_TABLE_WM && + stype != BNXT_ULP_RESOURCE_SUB_TYPE_KEY_RECIPE_TABLE_EM) { + netdev_dbg(ulp_ctx->bp->dev, "Invalid type (%d) for key recipe.\n", stype); + return NULL; + } + if (recipe_id >= mdata->key_recipe_info.num_recipes || + !mdata->key_recipe_info.num_recipes) { + netdev_dbg(ulp_ctx->bp->dev, "Key recipe id out of range(%u >= %u)\n", + recipe_id, mdata->key_recipe_info.num_recipes); + return NULL; + } + return mdata; +} + +static struct bnxt_ulp_key_recipe_entry * +ulp_mapper_key_recipe_alloc(struct bnxt_ulp_context *ulp_ctx, + enum bnxt_ulp_direction dir, + enum bnxt_ulp_resource_sub_type stype, + u32 recipe_id, bool alloc_only, + u8 *max_fields) +{ + struct bnxt_ulp_key_recipe_entry **recipes; + struct bnxt_ulp_mapper_data *mdata = NULL; + uint32_t size_s = sizeof(struct bnxt_ulp_key_recipe_entry); + + mdata = ulp_mapper_key_recipe_args_validate(ulp_ctx, dir, + stype, recipe_id); + if (!mdata) + return NULL; + + recipes = mdata->key_recipe_info.recipes[dir][stype]; + if (alloc_only && !recipes[recipe_id]) { + recipes[recipe_id] = vzalloc(size_s); + if (!recipes[recipe_id]) + return NULL; + netdev_dbg(ulp_ctx->bp->dev, "Alloc key recipe [%s]:[%s] = 0x%X\n", + (dir == BNXT_ULP_DIRECTION_INGRESS) ? "rx" : "tx", + ulp_mapper_key_recipe_type_to_str(stype), recipe_id); + } else if (alloc_only) { + netdev_dbg(ulp_ctx->bp->dev, "Recipe ID (%d) already allocated\n", recipe_id); + } + *max_fields = mdata->key_recipe_info.max_fields; + return recipes[recipe_id]; +} + +/* The free just marks the entry as not in use and resets the number of entries + * to zero. + */ +static int +ulp_mapper_key_recipe_free(struct bnxt_ulp_context *ulp_ctx, + enum bnxt_ulp_direction dir, + enum bnxt_ulp_resource_sub_type stype, + u32 index) +{ + struct bnxt_ulp_key_recipe_entry **recipes; + struct bnxt_ulp_mapper_data *mdata = NULL; + struct bitalloc *recipe_ba = NULL; + int rc; + + mdata = ulp_mapper_key_recipe_args_validate(ulp_ctx, dir, + stype, index); + if (!mdata) + return -EINVAL; + + recipe_ba = mdata->key_recipe_info.recipe_ba[dir][stype]; + rc = bnxt_ba_free(recipe_ba, index); + if (rc < 0) + netdev_dbg(ulp_ctx->bp->dev, "Unable to free recipe id[%s][%u] = (%d)\n", + (dir == BNXT_ULP_DIRECTION_INGRESS) ? "rx" : "tx", + stype, index); + + recipes = mdata->key_recipe_info.recipes[dir][stype]; + if (!recipes[index]) { + netdev_dbg(ulp_ctx->bp->dev, "recipe id[%s][%u] = (%d) already freed\n", + (dir == BNXT_ULP_DIRECTION_INGRESS) ? "rx" : "tx", + stype, index); + return 0; + } + vfree(recipes[index]); + recipes[index] = NULL; + netdev_dbg(ulp_ctx->bp->dev, "Free key recipe [%s]:[%s] = 0x%X\n", + (dir == BNXT_ULP_DIRECTION_INGRESS) ? "rx" : "tx", + ulp_mapper_key_recipe_type_to_str(stype), index); + return 0; +} + +static void +ulp_mapper_key_recipe_copy_to_src1(struct bnxt_ulp_mapper_field_info *dst, + enum bnxt_ulp_field_src field_src, + u8 *field_opr, + struct bnxt_ulp_mapper_field_info *src, + bool *written) +{ + if (field_src != BNXT_ULP_FIELD_SRC_SKIP) { + dst->field_opc = BNXT_ULP_FIELD_OPC_SRC1; + dst->field_src1 = field_src; + memcpy(dst->field_opr1, field_opr, 16); + memcpy(dst->description, src->description, 64); + dst->field_bit_size = src->field_bit_size; + *written = true; + } +} + +struct bnxt_ulp_mapper_key_info * +ulp_mapper_key_recipe_fields_get(struct bnxt_ulp_mapper_parms *parms, + struct bnxt_ulp_mapper_tbl_info *tbl, + u32 *num_flds) +{ + struct bnxt_ulp_key_recipe_entry **recipes; + enum bnxt_ulp_resource_sub_type stype; + struct bnxt_ulp_mapper_data *mdata = NULL; + u64 regval = 0; + u32 recipe_id = 0; + + /* Don't like this, but need to convert from a tbl resource func to the + * subtype for key_recipes. + */ + switch (tbl->resource_func) { + case BNXT_ULP_RESOURCE_FUNC_EM_TABLE: + stype = BNXT_ULP_RESOURCE_SUB_TYPE_KEY_RECIPE_TABLE_EM; + break; + case BNXT_ULP_RESOURCE_FUNC_TCAM_TABLE: + stype = BNXT_ULP_RESOURCE_SUB_TYPE_KEY_RECIPE_TABLE_WM; + break; + default: + netdev_dbg(parms->ulp_ctx->bp->dev, "Invalid res func(%d) for recipe fields\n", + tbl->resource_func); + return NULL; + }; + + /* Get the recipe index from the registry file */ + if (ulp_regfile_read(parms->regfile, + tbl->key_recipe_operand, + ®val)) { + netdev_dbg(parms->ulp_ctx->bp->dev, "Failed to get tbl idx from regfile[%d].\n", + tbl->tbl_operand); + return NULL; + } + recipe_id = (u32)be64_to_cpu(regval); + mdata = ulp_mapper_key_recipe_args_validate(parms->ulp_ctx, + tbl->direction, + stype, recipe_id); + if (!mdata) + return NULL; + + recipes = mdata->key_recipe_info.recipes[tbl->direction][stype]; + if (!recipes[recipe_id]) + return NULL; + + *num_flds = recipes[recipe_id]->cnt; + return &recipes[recipe_id]->flds[0]; +} + +static int +ulp_mapper_key_recipe_field_opc_next(struct bnxt_ulp_mapper_parms *parms, + enum bnxt_ulp_direction dir, + u8 *field_opr, + u8 is_key, + const char *name, + bool *written, + struct bnxt_ulp_mapper_field_info *ofld) +{ + struct bnxt_ulp_mapper_field_info *field_info; + u16 idx; + + /* read the cond table index and count */ + if (ulp_operand_read(field_opr, + (u8 *)&idx, sizeof(u16))) { + netdev_dbg(parms->ulp_ctx->bp->dev, "field idx operand read failed\n"); + return -EINVAL; + } + idx = be16_to_cpu(idx); + + field_info = ulp_mapper_tmpl_key_ext_list_get(parms, idx); + if (!field_info) { + netdev_dbg(parms->ulp_ctx->bp->dev, "Invalid field idx %d\n", idx); + return -EINVAL; + } + + return ulp_mapper_key_recipe_field_opc_process(parms, dir, field_info, + is_key, name, + written, ofld); +} + +int +ulp_mapper_key_recipe_field_opc_process(struct bnxt_ulp_mapper_parms *parms, + enum bnxt_ulp_direction dir, + struct bnxt_ulp_mapper_field_info *fld, + u8 is_key, + const char *name, + bool *written, + struct bnxt_ulp_mapper_field_info *ofld) +{ + u8 process_src1 = 0; + u32 val1_len = 0; + u64 value1 = 0; + int rc = 0; + u8 *val1; + + /* prepare the field source and values */ + switch (fld->field_opc) { + case BNXT_ULP_FIELD_OPC_SRC1: + /* No logic, just take SRC1 and return */ + ulp_mapper_key_recipe_copy_to_src1(ofld, fld->field_src1, + fld->field_opr1, fld, + written); + return rc; + case BNXT_ULP_FIELD_OPC_SKIP: + *written = false; + return rc; + case BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3: + case BNXT_ULP_FIELD_OPC_TERNARY_LIST: + process_src1 = 1; + break; + default: + netdev_dbg(parms->ulp_ctx->bp->dev, "Invalid fld opcode %u\n", fld->field_opc); + rc = -EINVAL; + return rc; + } + + /* process the src1 opcode */ + if (process_src1) { + if (ulp_mapper_field_src_process(parms, fld->field_src1, + fld->field_opr1, (enum tf_dir)dir, is_key, + fld->field_bit_size, &val1, + &val1_len, &value1)) { + netdev_dbg(parms->ulp_ctx->bp->dev, "fld src1 process failed\n"); + return -EINVAL; + } + } + + if (fld->field_opc == BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3) { + if (value1) + ulp_mapper_key_recipe_copy_to_src1(ofld, + fld->field_src2, + fld->field_opr2, + fld, written); + else + ulp_mapper_key_recipe_copy_to_src1(ofld, + fld->field_src3, + fld->field_opr3, + fld, written); + } else if (fld->field_opc == BNXT_ULP_FIELD_OPC_TERNARY_LIST) { + if (value1) { + /* check if src2 is next */ + if (fld->field_src2 == BNXT_ULP_FIELD_SRC_NEXT) { + /* get the next field info */ + if (ulp_mapper_key_recipe_field_opc_next(parms, + dir, + fld->field_opr2, + is_key, + name, + written, + ofld)) { + netdev_dbg(parms->ulp_ctx->bp->dev, + "recipe fld next process fail\n"); + return -EINVAL; + } else { + return rc; + } + } else { + ulp_mapper_key_recipe_copy_to_src1(ofld, + fld->field_src2, + fld->field_opr2, + fld, written); + } + } else { + /* check if src3 is next */ + if (fld->field_src3 == BNXT_ULP_FIELD_SRC_NEXT) { + /* get the next field info */ + if (ulp_mapper_key_recipe_field_opc_next(parms, + dir, + fld->field_opr3, + is_key, + name, + written, + ofld)) { + netdev_dbg(parms->ulp_ctx->bp->dev, + "recipt fld next process fail\n"); + return -EINVAL; + } else { + return rc; + } + } else { + ulp_mapper_key_recipe_copy_to_src1(ofld, + fld->field_src3, + fld->field_opr3, + fld, written); + } + } + } + if (*written && is_key) + netdev_dbg(parms->ulp_ctx->bp->dev, "%-20s bits = %-3d\n", fld->description, + fld->field_bit_size); + + return rc; +} + +static int +ulp_mapper_key_recipe_tbl_process(struct bnxt_ulp_mapper_parms *parms, + struct bnxt_ulp_mapper_tbl_info *tbl) +{ + bool alloc = false, write = false, regfile = false; + struct bnxt_ulp_mapper_key_info *kflds, *rflds; + struct bnxt_ulp_mapper_field_info *kfld, *rfld; + struct bnxt_ulp_mapper_data *mdata = NULL; + struct bnxt_ulp_key_recipe_entry *recipe; + struct ulp_flow_db_res_params fid_parms; + int32_t rc = 0, free_rc, tmp_recipe_id; + enum bnxt_ulp_resource_sub_type stype; + uint8_t max_rflds = 0, rnum_flds = 0; + enum bnxt_ulp_direction dir; + struct bitalloc *recipe_ba = NULL; + uint32_t recipe_id = 0; + uint32_t i, num_kflds; + bool written = false; + uint64_t regval = 0; + + dir = tbl->direction; + stype = tbl->resource_sub_type; + + switch (tbl->tbl_opcode) { + case BNXT_ULP_KEY_RECIPE_TBL_OPC_ALLOC_WR_REGFILE: + alloc = true; + write = true; + regfile = true; + break; + case BNXT_ULP_KEY_RECIPE_TBL_OPC_ALLOC_REGFILE: + alloc = true; + regfile = true; + break; + case BNXT_ULP_KEY_RECIPE_TBL_OPC_WR_REGFILE: + alloc = false; + regfile = true; + write = true; + break; + default: + netdev_dbg(parms->ulp_ctx->bp->dev, "Invalid recipe table opcode %d\n", + tbl->tbl_opcode); + return -EINVAL; + }; + + /* Get the recipe_id from the regfile */ + if (!alloc && regfile) { + if (ulp_regfile_read(parms->regfile, + tbl->tbl_operand, + ®val)) { + netdev_dbg(parms->ulp_ctx->bp->dev, + "Fail to get tbl idx from regfile[%d].\n", + tbl->tbl_operand); + return -EINVAL; + } + recipe_id = (u32)be64_to_cpu(regval); + } + + if (alloc) { + /* Allocate a recipe id based on the direction and type + * only supported types are EM and WC for now. + */ + mdata = ulp_mapper_key_recipe_args_validate(parms->ulp_ctx, dir, + stype, 0); + if (!mdata) + return -EINVAL; + + recipe_ba = mdata->key_recipe_info.recipe_ba[dir][stype]; + tmp_recipe_id = bnxt_ba_alloc(recipe_ba); + if (tmp_recipe_id < 0) { + netdev_dbg(parms->ulp_ctx->bp->dev, "Failed to allocate a recipe id\n"); + return -EINVAL; + } else if ((uint32_t)tmp_recipe_id >= + mdata->key_recipe_info.num_recipes) { + /* Shouldn't get here, but could be an issue with the + * allocator, so free the recipe_id + */ + netdev_dbg(parms->ulp_ctx->bp->dev, + "Allocated recipe id(%d) >= max(%d)\n", + tmp_recipe_id, + mdata->key_recipe_info.num_recipes); + (void)bnxt_ba_free(recipe_ba, tmp_recipe_id); + return -EINVAL; + } + /* any error after this must goto error in order to free + * the recipe_id + */ + recipe_id = tmp_recipe_id; + } + + if (alloc && regfile) { + regval = be64_to_cpu(recipe_id); + rc = ulp_regfile_write(parms->regfile, tbl->tbl_operand, + regval); + if (rc) { + netdev_dbg(parms->ulp_ctx->bp->dev, "Failed to write regfile[%d] rc=%d\n", + tbl->tbl_operand, rc); + if (recipe_ba) + (void)bnxt_ba_free(recipe_ba, recipe_id); + return -EINVAL; + } + } + + /* allocate or Get the recipe entry based on alloc */ + recipe = ulp_mapper_key_recipe_alloc(parms->ulp_ctx, dir, stype, + recipe_id, alloc, &max_rflds); + if (!recipe || !max_rflds) { + netdev_dbg(parms->ulp_ctx->bp->dev, "Failed to get the recipe slot\n"); + if (recipe_ba) + (void)bnxt_ba_free(recipe_ba, recipe_id); + return -EINVAL; + } + + /* We have a recipe_id by now, write the data */ + if (write) { + /* Get the key fields to process */ + kflds = ulp_mapper_key_fields_get(parms, tbl, &num_kflds); + if (!kflds || !num_kflds) { + netdev_dbg(parms->ulp_ctx->bp->dev, "Failed to get the key fields\n"); + rc = -EINVAL; + goto error; + } + + rflds = &recipe->flds[0]; + /* iterate over the key fields and write the recipe */ + for (i = 0; i < num_kflds; i++) { + if (rnum_flds >= max_rflds) { + netdev_dbg(parms->ulp_ctx->bp->dev, + "Max recipe fields exceeded (%d)\n", + rnum_flds); + goto error; + } + written = false; + kfld = &kflds[i].field_info_spec; + rfld = &rflds[rnum_flds].field_info_spec; + + rc = ulp_mapper_key_recipe_field_opc_process(parms, + dir, + kfld, 1, + "KEY", + &written, + rfld); + if (rc) + goto error; + + if (stype == + BNXT_ULP_RESOURCE_SUB_TYPE_KEY_RECIPE_TABLE_WM) { + kfld = &kflds[i].field_info_mask; + rfld = &rflds[rnum_flds].field_info_mask; + rc = ulp_mapper_key_recipe_field_opc_process(parms, + dir, + kfld, + 0, + "MASK", + &written, + rfld); + if (rc) + goto error; + } + if (written) + rnum_flds++; + } + recipe->cnt = rnum_flds; + } + + memset(&fid_parms, 0, sizeof(fid_parms)); + fid_parms.direction = tbl->direction; + fid_parms.resource_func = tbl->resource_func; + fid_parms.resource_type = tbl->resource_type; + fid_parms.resource_sub_type = tbl->resource_sub_type; + fid_parms.resource_hndl = recipe_id; + fid_parms.critical_resource = tbl->critical_resource; + + rc = ulp_mapper_fdb_opc_process(parms, tbl, &fid_parms); + if (rc) { + netdev_dbg(parms->ulp_ctx->bp->dev, "Failed to link resource to flow rc = %d\n", + rc); + goto error; + } + + return rc; +error: + /* Free the actual recipe */ + free_rc = ulp_mapper_key_recipe_free(parms->ulp_ctx, tbl->direction, + tbl->resource_sub_type, recipe_id); + if (free_rc) + netdev_dbg(parms->ulp_ctx->bp->dev, "Failed to free recipe on error: %d\n", + free_rc); + return rc; +} + +int +ulp_mapper_field_opc_process(struct bnxt_ulp_mapper_parms *parms, + enum tf_dir dir, + struct bnxt_ulp_mapper_field_info *fld, + struct ulp_blob *blob, + u8 is_key, + const char *name) +{ + u32 val_len = 0, val1_len = 0, val2_len = 0, val3_len = 0; + u64 val_int = 0, val1_int = 0, val2_int = 0, val3_int = 0; + u8 process_src1 = 0, process_src2 = 0, process_src3 = 0; + u8 eval_src1 = 0, eval_src2 = 0, eval_src3 = 0; + u64 value1 = 0, value2 = 0, value3 = 0; + u8 *val = NULL, *val1, *val2, *val3; + u16 write_idx = blob->write_idx; + int rc = 0; + + /* prepare the field source and values */ + switch (fld->field_opc) { + case BNXT_ULP_FIELD_OPC_SRC1: + process_src1 = 1; + break; + case BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3: + case BNXT_ULP_FIELD_OPC_TERNARY_LIST: + process_src1 = 1; + break; + case BNXT_ULP_FIELD_OPC_SRC1_OR_SRC2_OR_SRC3: + case BNXT_ULP_FIELD_OPC_SRC1_AND_SRC2_OR_SRC3: + process_src3 = 1; + eval_src3 = 1; + process_src1 = 1; + process_src2 = 1; + eval_src1 = 1; + eval_src2 = 1; + break; + case BNXT_ULP_FIELD_OPC_SRC1_PLUS_SRC2: + case BNXT_ULP_FIELD_OPC_SRC1_MINUS_SRC2: + case BNXT_ULP_FIELD_OPC_SRC1_PLUS_SRC2_POST: + case BNXT_ULP_FIELD_OPC_SRC1_MINUS_SRC2_POST: + case BNXT_ULP_FIELD_OPC_SRC1_OR_SRC2: + case BNXT_ULP_FIELD_OPC_SRC1_AND_SRC2: + process_src1 = 1; + process_src2 = 1; + eval_src1 = 1; + eval_src2 = 1; + break; + default: + break; + } + + /* process the src1 opcode */ + if (process_src1) { + if (ulp_mapper_field_src_process(parms, fld->field_src1, + fld->field_opr1, dir, is_key, + fld->field_bit_size, &val1, + &val1_len, &value1)) { + netdev_dbg(parms->ulp_ctx->bp->dev, "fld src1 process failed\n"); + goto error; + } + if (eval_src1) { + if (ulp_mapper_field_buffer_eval(val1, val1_len, + &val1_int)) { + netdev_dbg(parms->ulp_ctx->bp->dev, "fld src1 eval failed\n"); + goto error; + } + } + } + + /* for "if then clause" set the correct process */ + if (fld->field_opc == BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3) { + if (value1) + process_src2 = 1; + else + process_src3 = 1; + } else if (fld->field_opc == BNXT_ULP_FIELD_OPC_TERNARY_LIST) { + if (value1) { + /* check if src2 is next */ + if (fld->field_src2 == BNXT_ULP_FIELD_SRC_NEXT) { + /* get the next field info */ + if (ulp_mapper_field_opc_next(parms, dir, + fld->field_opr2, + blob, is_key, + name)) { + netdev_dbg(parms->ulp_ctx->bp->dev, + "fld next process fail\n"); + goto error; + } else { + return rc; + } + } else { + process_src2 = 1; + } + } else { + /* check if src2 is next */ + if (fld->field_src3 == BNXT_ULP_FIELD_SRC_NEXT) { + /* get the next field info */ + if (ulp_mapper_field_opc_next(parms, dir, + fld->field_opr3, + blob, is_key, + name)) { + netdev_dbg(parms->ulp_ctx->bp->dev, + "fld next process fail\n"); + goto error; + } else { + return rc; + } + } else { + process_src3 = 1; + } + } + } + + /* process src2 opcode */ + if (process_src2) { + if (ulp_mapper_field_src_process(parms, fld->field_src2, + fld->field_opr2, dir, is_key, + fld->field_bit_size, &val2, + &val2_len, &value2)) { + netdev_dbg(parms->ulp_ctx->bp->dev, "fld src2 process failed\n"); + goto error; + } + if (eval_src2) { + if (ulp_mapper_field_buffer_eval(val2, val2_len, + &val2_int)) { + netdev_dbg(parms->ulp_ctx->bp->dev, "fld src2 eval failed\n"); + goto error; + } + } + } + + /* process src3 opcode */ + if (process_src3) { + if (ulp_mapper_field_src_process(parms, fld->field_src3, + fld->field_opr3, dir, is_key, + fld->field_bit_size, &val3, + &val3_len, &value3)) { + netdev_dbg(parms->ulp_ctx->bp->dev, "fld src3 process failed\n"); + goto error; + } + if (eval_src3) { + if (ulp_mapper_field_buffer_eval(val3, val3_len, + &val3_int)) { + netdev_dbg(parms->ulp_ctx->bp->dev, "fld src3 eval failed\n"); + goto error; + } + } + } + + val_len = fld->field_bit_size; + /* process the field opcodes */ + switch (fld->field_opc) { + case BNXT_ULP_FIELD_OPC_SRC1: + rc = ulp_mapper_field_blob_write(parms, fld->field_src1, + blob, val1, val1_len, &val); + val_len = val1_len; + break; + case BNXT_ULP_FIELD_OPC_SRC1_THEN_SRC2_ELSE_SRC3: + case BNXT_ULP_FIELD_OPC_TERNARY_LIST: + if (value1) { + rc = ulp_mapper_field_blob_write(parms, fld->field_src2, blob, + val2, val2_len, &val); + val_len = val2_len; + } else { + rc = ulp_mapper_field_blob_write(parms, fld->field_src3, blob, + val3, val3_len, &val); + val_len = val3_len; + } + break; + case BNXT_ULP_FIELD_OPC_SRC1_PLUS_SRC2: + case BNXT_ULP_FIELD_OPC_SRC1_PLUS_SRC2_POST: + val_int = val1_int + val2_int; + val_int = cpu_to_be64(val_int); + val = ulp_blob_push_64(blob, &val_int, fld->field_bit_size); + if (!val) + rc = -EINVAL; + break; + case BNXT_ULP_FIELD_OPC_SRC1_MINUS_SRC2: + case BNXT_ULP_FIELD_OPC_SRC1_MINUS_SRC2_POST: + val_int = val1_int - val2_int; + val_int = cpu_to_be64(val_int); + val = ulp_blob_push_64(blob, &val_int, fld->field_bit_size); + if (!val) + rc = -EINVAL; + break; + case BNXT_ULP_FIELD_OPC_SRC1_OR_SRC2: + val_int = val1_int | val2_int; + val_int = cpu_to_be64(val_int); + val = ulp_blob_push_64(blob, &val_int, fld->field_bit_size); + if (!val) + rc = -EINVAL; + break; + case BNXT_ULP_FIELD_OPC_SRC1_OR_SRC2_OR_SRC3: + val_int = val1_int | val2_int | val3_int; + val_int = cpu_to_be64(val_int); + val = ulp_blob_push_64(blob, &val_int, fld->field_bit_size); + if (!val) + rc = -EINVAL; + break; + case BNXT_ULP_FIELD_OPC_SRC1_AND_SRC2: + val_int = val1_int & val2_int; + val_int = cpu_to_be64(val_int); + val = ulp_blob_push_64(blob, &val_int, fld->field_bit_size); + if (!val) + rc = -EINVAL; + break; + case BNXT_ULP_FIELD_OPC_SRC1_AND_SRC2_OR_SRC3: + val_int = val1_int & (val2_int | val3_int); + val_int = cpu_to_be64(val_int); + val = ulp_blob_push_64(blob, &val_int, fld->field_bit_size); + if (!val) + rc = -EINVAL; + break; + case BNXT_ULP_FIELD_OPC_SKIP: + break; + default: + netdev_dbg(parms->ulp_ctx->bp->dev, "Invalid fld opcode %u\n", fld->field_opc); + rc = -EINVAL; + break; + } + + if (!rc) { + if (fld->field_src1 != BNXT_ULP_FIELD_SRC_ZERO && val_len) + ulp_mapper_field_dump(parms->ulp_ctx, + name, fld, blob, write_idx, val, + val_len); + return rc; + } +error: + netdev_dbg(parms->ulp_ctx->bp->dev, "Error in %s:%s process %u:%u\n", name, + fld->description, (val) ? write_idx : 0, val_len); + return -EINVAL; +} + +/** + * Result table process and fill the result blob. + * @data: - the result blob data + */ +int +ulp_mapper_tbl_result_build(struct bnxt_ulp_mapper_parms *parms, + struct bnxt_ulp_mapper_tbl_info *tbl, + struct ulp_blob *data, + const char *name) +{ + struct bnxt_ulp_mapper_field_info *dflds; + u32 i = 0, num_flds = 0, encap_flds = 0; + const struct ulp_mapper_core_ops *oper; + struct ulp_blob encap_blob; + int rc = 0; + + /* Get the result field list */ + dflds = ulp_mapper_result_fields_get(parms, tbl, &num_flds, + &encap_flds); + + /* validate the result field list counts */ + if (!dflds || (!num_flds && !encap_flds)) { + netdev_dbg(parms->ulp_ctx->bp->dev, "Failed to get data fields %x:%x\n", + num_flds, encap_flds); + return -EINVAL; + } + + /* process the result fields */ + for (i = 0; i < num_flds; i++) { + rc = ulp_mapper_field_opc_process(parms, tbl->direction, + &dflds[i], data, 0, name); + if (rc) { + netdev_dbg(parms->ulp_ctx->bp->dev, "result field processing failed\n"); + return rc; + } + } + + /* process encap fields if any */ + if (encap_flds) { + u32 pad = 0; + /* Initialize the encap blob */ + if (ulp_blob_init(&encap_blob, + ULP_BYTE_2_BITS(tbl->record_size), + parms->device_params->encap_byte_order)) { + netdev_dbg(parms->ulp_ctx->bp->dev, "blob inits failed.\n"); + return -EINVAL; + } + for (; i < encap_flds; i++) { + rc = ulp_mapper_field_opc_process(parms, tbl->direction, + &dflds[i], + &encap_blob, 0, name); + if (rc) { + netdev_dbg(parms->ulp_ctx->bp->dev, + "encap field processing failed\n"); + return rc; + } + } + /* add the dynamic pad push */ + if (parms->device_params->dynamic_sram_en) { + u16 rec_s = ULP_BYTE_2_BITS(tbl->record_size); + u16 blob_len; + + oper = parms->mapper_data->mapper_oper; + blob_len = ulp_blob_data_len_get(&encap_blob); + + /* Get the padding size */ + oper->ulp_mapper_core_dyn_tbl_type_get(parms, tbl, + blob_len, + &rec_s); + pad = rec_s - blob_len; + } else { + pad = ULP_BYTE_2_BITS(tbl->record_size) - + ulp_blob_data_len_get(&encap_blob); + } + if (ulp_blob_pad_push(&encap_blob, pad)) { + netdev_dbg(parms->ulp_ctx->bp->dev, "encap buffer padding failed\n"); + return -EINVAL; + } + + /* perform the 64 bit byte swap */ + ulp_blob_perform_64B_byte_swap(&encap_blob); + /* Append encap blob to the result blob */ + rc = ulp_blob_buffer_copy(data, &encap_blob); + if (rc) { + netdev_dbg(parms->ulp_ctx->bp->dev, "encap buffer copy failed\n"); + return rc; + } + } + netdev_dbg(parms->ulp_ctx->bp->dev, "Result dump\n"); + ulp_mapper_blob_dump(parms->ulp_ctx, data); + return rc; +} + +int +ulp_mapper_mark_gfid_process(struct bnxt_ulp_mapper_parms *parms, + struct bnxt_ulp_mapper_tbl_info *tbl, + u64 flow_id) +{ + enum bnxt_ulp_mark_db_opc mark_op = tbl->mark_db_opcode; + struct ulp_flow_db_res_params fid_parms; + u32 mark, gfid, mark_flag; + int rc = 0; + + if (mark_op == BNXT_ULP_MARK_DB_OPC_NOP || + !(mark_op == BNXT_ULP_MARK_DB_OPC_PUSH_IF_MARK_ACTION && + ULP_BITMAP_ISSET(parms->act_bitmap->bits, + BNXT_ULP_ACT_BIT_MARK))) + return rc; /* no need to perform gfid process */ + + /* Get the mark id details from action property */ + memcpy(&mark, &parms->act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_MARK], + sizeof(mark)); + mark = be32_to_cpu(mark); + + TF_GET_GFID_FROM_FLOW_ID(flow_id, gfid); + mark_flag = BNXT_ULP_MARK_GLOBAL_HW_FID; + + rc = ulp_mark_db_mark_add(parms->ulp_ctx, mark_flag, + gfid, mark); + if (rc) { + netdev_dbg(parms->ulp_ctx->bp->dev, "Failed to add mark to flow\n"); + return rc; + } + fid_parms.direction = tbl->direction; + fid_parms.resource_func = BNXT_ULP_RESOURCE_FUNC_HW_FID; + fid_parms.critical_resource = tbl->critical_resource; + fid_parms.resource_type = mark_flag; + fid_parms.resource_hndl = gfid; + ulp_flow_db_shared_session_set(&fid_parms, tbl->session_type); + + rc = ulp_mapper_fdb_opc_process(parms, tbl, &fid_parms); + if (rc) + netdev_dbg(parms->ulp_ctx->bp->dev, "Fail to link res to flow rc = %d\n", rc); + return rc; +} + +int +ulp_mapper_mark_act_ptr_process(struct bnxt_ulp_mapper_parms *parms, + struct bnxt_ulp_mapper_tbl_info *tbl) +{ + enum bnxt_ulp_mark_db_opc mark_op = tbl->mark_db_opcode; + struct ulp_flow_db_res_params fid_parms; + u32 act_idx, mark, mark_flag; + u64 val64; + int rc = 0; + + if (mark_op == BNXT_ULP_MARK_DB_OPC_NOP || + !(mark_op == BNXT_ULP_MARK_DB_OPC_PUSH_IF_MARK_ACTION && + ULP_BITMAP_ISSET(parms->act_bitmap->bits, + BNXT_ULP_ACT_BIT_MARK))) + return rc; /* no need to perform mark action process */ + + /* Get the mark id details from action property */ + memcpy(&mark, &parms->act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_MARK], + sizeof(mark)); + mark = be32_to_cpu(mark); + + if (ulp_regfile_read(parms->regfile, + BNXT_ULP_RF_IDX_MAIN_ACTION_PTR, + &val64)) { + netdev_dbg(parms->ulp_ctx->bp->dev, "read action ptr main failed\n"); + return -EINVAL; + } + act_idx = be64_to_cpu(val64); + mark_flag = BNXT_ULP_MARK_LOCAL_HW_FID; + rc = ulp_mark_db_mark_add(parms->ulp_ctx, mark_flag, + act_idx, mark); + if (rc) { + netdev_dbg(parms->ulp_ctx->bp->dev, "Failed to add mark to flow\n"); + return rc; + } + fid_parms.direction = tbl->direction; + fid_parms.resource_func = BNXT_ULP_RESOURCE_FUNC_HW_FID; + fid_parms.critical_resource = tbl->critical_resource; + fid_parms.resource_type = mark_flag; + fid_parms.resource_hndl = act_idx; + ulp_flow_db_shared_session_set(&fid_parms, tbl->session_type); + + rc = ulp_mapper_fdb_opc_process(parms, tbl, &fid_parms); + if (rc) + netdev_dbg(parms->ulp_ctx->bp->dev, "Fail to link res to flow rc = %d\n", rc); + return rc; +} + +int +ulp_mapper_mark_vfr_idx_process(struct bnxt_ulp_mapper_parms *parms, + struct bnxt_ulp_mapper_tbl_info *tbl) +{ + struct ulp_flow_db_res_params fid_parms; + u32 act_idx, mark, mark_flag; + u64 val64; + enum bnxt_ulp_mark_db_opc mark_op = tbl->mark_db_opcode; + int rc = 0; + + if (mark_op == BNXT_ULP_MARK_DB_OPC_NOP || + mark_op == BNXT_ULP_MARK_DB_OPC_PUSH_IF_MARK_ACTION) + return rc; /* no need to perform mark action process */ + + /* Get the mark id details from the computed field of dev port id */ + mark = ULP_COMP_FLD_IDX_RD(parms, BNXT_ULP_CF_IDX_DEV_PORT_ID); + + /* Get the main action pointer */ + if (ulp_regfile_read(parms->regfile, + BNXT_ULP_RF_IDX_MAIN_ACTION_PTR, + &val64)) { + netdev_dbg(parms->ulp_ctx->bp->dev, "read action ptr main failed\n"); + return -EINVAL; + } + act_idx = be64_to_cpu(val64); + + /* Set the mark flag to local fid and vfr flag */ + mark_flag = BNXT_ULP_MARK_LOCAL_HW_FID | BNXT_ULP_MARK_VFR_ID; + + rc = ulp_mark_db_mark_add(parms->ulp_ctx, mark_flag, + act_idx, mark); + if (rc) { + netdev_dbg(parms->ulp_ctx->bp->dev, "Failed to add mark to flow\n"); + return rc; + } + fid_parms.direction = tbl->direction; + fid_parms.resource_func = BNXT_ULP_RESOURCE_FUNC_HW_FID; + fid_parms.critical_resource = tbl->critical_resource; + fid_parms.resource_type = mark_flag; + fid_parms.resource_hndl = act_idx; + ulp_flow_db_shared_session_set(&fid_parms, tbl->session_type); + + rc = ulp_mapper_fdb_opc_process(parms, tbl, &fid_parms); + if (rc) + netdev_dbg(parms->ulp_ctx->bp->dev, "Fail to link res to flow rc = %d\n", rc); + return rc; +} + +/* Tcam table scan the identifier list and allocate each identifier */ +int +ulp_mapper_tcam_tbl_ident_alloc(struct bnxt_ulp_mapper_parms *parms, + struct bnxt_ulp_mapper_tbl_info *tbl) +{ + struct bnxt_ulp_mapper_ident_info *idents; + u32 num_idents; + u32 i; + + idents = ulp_mapper_ident_fields_get(parms, tbl, &num_idents); + for (i = 0; i < num_idents; i++) { + if (ulp_mapper_ident_process(parms, tbl, + &idents[i], NULL)) + return -EINVAL; + } + return 0; +} + +/** + * internal function to post process key/mask blobs for dynamic pad WC tcam tbl + * + * @parms: The mappers parms with data related to the flow. + * + * @key: The original key to be transformed + * + * @mask: The original mask to be transformed + * + * @tkey: The transformed key + * + * @tmask: The transformed mask + * + * returns zero on success, non-zero on failure + */ +u32 +ulp_mapper_wc_tcam_tbl_dyn_post_process(struct bnxt_ulp_context *ulp_ctx, + struct bnxt_ulp_device_params *dparms, + struct ulp_blob *key, + struct ulp_blob *mask, + struct ulp_blob *tkey, + struct ulp_blob *tmask) +{ + u16 tlen, blen, clen, slice_width, num_slices, max_slices, offset; + u32 cword, i, rc; + int pad; + u8 *val; + + slice_width = dparms->wc_slice_width; + clen = dparms->wc_ctl_size_bits; + max_slices = dparms->wc_max_slices; + blen = ulp_blob_data_len_get(key); + + /* Get the length of the key based on number of slices and width */ + num_slices = 1; + tlen = slice_width; + while (tlen < blen && + num_slices <= max_slices) { + num_slices = num_slices << 1; + tlen = tlen << 1; + } + + if (num_slices > max_slices) { + netdev_dbg(ulp_ctx->bp->dev, "Key size (%d) too large for WC\n", blen); + return -EINVAL; + } + + /* The key/mask may not be on a natural slice boundary, pad it */ + pad = tlen - blen; + if (ulp_blob_pad_push(key, pad) || + ulp_blob_pad_push(mask, pad)) { + netdev_dbg(ulp_ctx->bp->dev, "Unable to pad key/mask\n"); + return -EINVAL; + } + + /* The new length accounts for the ctrl word length and num slices */ + tlen = tlen + clen * num_slices; + if (ulp_blob_init(tkey, tlen, key->byte_order) || + ulp_blob_init(tmask, tlen, mask->byte_order)) { + netdev_dbg(ulp_ctx->bp->dev, "Unable to post process wc tcam entry\n"); + return -EINVAL; + } + + /* Build the transformed key/mask */ + cword = dparms->wc_mode_list[num_slices - 1]; + cword = cpu_to_be32(cword); + offset = 0; + for (i = 0; i < num_slices; i++) { + val = ulp_blob_push_32(tkey, &cword, clen); + if (!val) { + netdev_dbg(ulp_ctx->bp->dev, "Key ctrl word push failed\n"); + return -EINVAL; + } + val = ulp_blob_push_32(tmask, &cword, clen); + if (!val) { + netdev_dbg(ulp_ctx->bp->dev, "Mask ctrl word push failed\n"); + return -EINVAL; + } + rc = ulp_blob_append(tkey, key, offset, slice_width); + if (rc) { + netdev_dbg(ulp_ctx->bp->dev, "Key blob append failed\n"); + return rc; + } + rc = ulp_blob_append(tmask, mask, offset, slice_width); + if (rc) { + netdev_dbg(ulp_ctx->bp->dev, "Mask blob append failed\n"); + return rc; + } + offset += slice_width; + } + + /* The key/mask are byte reversed on every 4 byte chunk */ + ulp_blob_perform_byte_reverse(tkey, 4); + ulp_blob_perform_byte_reverse(tmask, 4); + + return 0; +} + +/* Post process the key/mask blobs for wildcard tcam tbl */ +void ulp_mapper_wc_tcam_tbl_post_process(struct bnxt_ulp_context *ulp_ctx, struct ulp_blob *blob) +{ + ulp_blob_perform_64B_word_swap(blob); + ulp_blob_perform_64B_byte_swap(blob); + netdev_dbg(ulp_ctx->bp->dev, "Dump after wc tcam post process\n"); + ulp_mapper_blob_dump(ulp_ctx, blob); +} + +static int +ulp_mapper_gen_tbl_ref_cnt_process(struct bnxt_ulp_mapper_parms *parms, + struct bnxt_ulp_mapper_tbl_info *tbl, + struct ulp_mapper_gen_tbl_entry *entry) +{ + int rc = 0; + u64 val64; + + /* Allow the template to manage the reference count */ + switch (tbl->ref_cnt_opcode) { + case BNXT_ULP_REF_CNT_OPC_INC: + ULP_GEN_TBL_REF_CNT_INC(entry); + break; + case BNXT_ULP_REF_CNT_OPC_DEC: + /* writes never decrement the ref count */ + if (tbl->tbl_opcode == BNXT_ULP_GENERIC_TBL_OPC_WRITE) + return -EINVAL; + + ULP_GEN_TBL_REF_CNT_DEC(entry); + break; + case BNXT_ULP_REF_CNT_OPC_NOP: + /* Nothing to be done, generally used when + * template gets the ref_cnt to make a decision + */ + break; + case BNXT_ULP_REF_CNT_OPC_DEFAULT: + /* This is the default case and is backward + * compatible with older templates + */ + if (tbl->fdb_opcode != BNXT_ULP_FDB_OPC_NOP) + ULP_GEN_TBL_REF_CNT_INC(entry); + break; + default: + netdev_dbg(parms->ulp_ctx->bp->dev, "Invalid REF_CNT_OPC %d\n", + tbl->ref_cnt_opcode); + return -EINVAL; + } + + if (tbl->tbl_opcode == BNXT_ULP_GENERIC_TBL_OPC_READ) { + /* Add ref_cnt to the regfile for template to use. */ + val64 = (u32)ULP_GEN_TBL_REF_CNT(entry); + val64 = cpu_to_be64(val64); + rc = ulp_regfile_write(parms->regfile, + BNXT_ULP_RF_IDX_REF_CNT, + val64); + if (rc) { + netdev_dbg(parms->ulp_ctx->bp->dev, "Failed to write regfile[ref_cnt]\n"); + return rc; + } + } + + return rc; +} + +static int +ulp_mapper_gen_tbl_process(struct bnxt_ulp_mapper_parms *parms, + struct bnxt_ulp_mapper_tbl_info *tbl) +{ + struct ulp_mapper_gen_tbl_entry gen_tbl_ent = { 0 }, *g; + struct ulp_gen_hash_entry_params *hash_entry = NULL; + struct ulp_mapper_gen_tbl_list *gen_tbl_list; + struct ulp_flow_db_res_params fid_parms; + struct bnxt_ulp_mapper_key_info *kflds; + enum ulp_gen_list_search_flag list_srch = ULP_GEN_LIST_SEARCH_MISSED; + u32 i, num_kflds = 0, key_index = 0, num_par_kflds = 0, pad = 0; + u32 gen_tbl_miss = 1, fdb_write = 0; + struct ulp_blob key, data; + u16 keylen, datalen = 0; + u8 *byte_data; + u8 *cache_key; + int tbl_idx; + int rc = 0; + u64 ref_cnt; + + /* Get the key fields list and build the key. */ + kflds = ulp_mapper_key_fields_get(parms, tbl, &num_kflds); + if (!kflds || !num_kflds) { + netdev_dbg(parms->ulp_ctx->bp->dev, "Failed to get key fields\n"); + return -EINVAL; + } + + /* Get the partial key list number*/ + num_par_kflds = ulp_mapper_partial_key_fields_get(parms, tbl); + + if (num_par_kflds) + pad = ULP_BYTE_2_BITS(sizeof(u8)) - + ULP_BITS_IS_BYTE_NOT_ALIGNED(tbl->key_bit_size); + + if (ulp_blob_init(&key, tbl->key_bit_size + pad + + tbl->partial_key_bit_size, + parms->device_params->key_byte_order)) { + netdev_dbg(parms->ulp_ctx->bp->dev, "Failed to alloc blob\n"); + return -EINVAL; + } + for (i = 0; i < num_kflds + num_par_kflds; i++) { + /* Setup the key */ + rc = ulp_mapper_field_opc_process(parms, tbl->direction, + &kflds[i].field_info_spec, + &key, 1, "Gen Tbl Key"); + if (rc) { + netdev_dbg(parms->ulp_ctx->bp->dev, + "Failed to create key for Gen tbl rc=%d\n", rc); + return -EINVAL; + } + /* pad for the alignment between exact key and partial key */ + if (num_par_kflds && i == num_kflds - 1) { + if (ulp_blob_pad_push(&key, pad)) { + netdev_dbg(parms->ulp_ctx->bp->dev, "key padding failed\n"); + return -EINVAL; + } + } + } + + /* Calculate the table index for the generic table */ + tbl_idx = ulp_mapper_gen_tbl_idx_calculate(parms->ulp_ctx, tbl->resource_sub_type, + tbl->direction); + if (tbl_idx < 0) { + netdev_dbg(parms->ulp_ctx->bp->dev, "Invalid table index %x:%x\n", + tbl->resource_sub_type, tbl->direction); + return -EINVAL; + } + + /* The_key is a byte array convert it to a search index */ + cache_key = ulp_blob_data_get(&key, &keylen); + ulp_mapper_gen_tbl_dump(parms->ulp_ctx, + tbl->resource_sub_type, tbl->direction, &key); + /* get the generic table */ + gen_tbl_list = &parms->mapper_data->gen_tbl_list[tbl_idx]; + + /* perform basic validation of generic table */ + if ((gen_tbl_list->tbl_type == BNXT_ULP_GEN_TBL_TYPE_HASH_LIST && + !gen_tbl_list->hash_tbl) || !gen_tbl_list->mem_data) { + netdev_dbg(parms->ulp_ctx->bp->dev, "Uninitialized gen table index %x:%x\n", + tbl->resource_sub_type, tbl->direction); + return -EINVAL; + } + + /* Check if generic hash table */ + if (gen_tbl_list->tbl_type == BNXT_ULP_GEN_TBL_TYPE_HASH_LIST) { + if (tbl->gen_tbl_lkup_type != + BNXT_ULP_GENERIC_TBL_LKUP_TYPE_HASH) { + netdev_dbg(parms->ulp_ctx->bp->dev, "%s: Invalid template lkup type\n", + gen_tbl_list->gen_tbl_name); + return -EINVAL; + } + hash_entry = rhashtable_lookup_fast(gen_tbl_list->hash_tbl, cache_key, + gen_tbl_list->hash_tbl_params); + if (hash_entry) { + hash_entry->search_flag = ULP_GEN_HASH_SEARCH_FOUND; + /* store the hash index in the fdb */ + key_index = hash_entry->hash_index; + } + } else if (gen_tbl_list->tbl_type == BNXT_ULP_GEN_TBL_TYPE_KEY_LIST) { + /* convert key to index directly */ + if (ULP_BITS_2_BYTE(keylen) > (int)sizeof(key_index)) { + netdev_dbg(parms->ulp_ctx->bp->dev, "%s: keysize is bigger then 4 bytes\n", + gen_tbl_list->gen_tbl_name); + return -EINVAL; + } + memcpy(&key_index, cache_key, ULP_BITS_2_BYTE(keylen)); + /* Get the generic table entry */ + if (ulp_mapper_gen_tbl_entry_get(parms->ulp_ctx, gen_tbl_list, key_index, + &gen_tbl_ent)) + return -EINVAL; + } else if (gen_tbl_list->tbl_type == + BNXT_ULP_GEN_TBL_TYPE_SIMPLE_LIST) { + list_srch = ulp_gen_tbl_simple_list_search(gen_tbl_list, + cache_key, + &key_index); + /* Get the generic table entry */ + if (ulp_mapper_gen_tbl_entry_get(parms->ulp_ctx, gen_tbl_list, + key_index, + &gen_tbl_ent)) + return -EINVAL; + } + + switch (tbl->tbl_opcode) { + case BNXT_ULP_GENERIC_TBL_OPC_READ: + if (gen_tbl_list->tbl_type == BNXT_ULP_GEN_TBL_TYPE_HASH_LIST && + gen_tbl_list->hash_tbl) { + if (hash_entry && hash_entry->search_flag != ULP_GEN_HASH_SEARCH_FOUND) + break; /* nothing to be done , no entry */ + } else if (gen_tbl_list->tbl_type == + BNXT_ULP_GEN_TBL_TYPE_SIMPLE_LIST) { + if (list_srch == ULP_GEN_LIST_SEARCH_MISSED || + list_srch == ULP_GEN_LIST_SEARCH_FULL) + break; + } + if (gen_tbl_list->hash_tbl) + if (!hash_entry) + break; /* nothing to be done , no entry */ + + /* check the reference count */ + if ((gen_tbl_list->hash_tbl && hash_entry->entry.hash_ref_count) || + (gen_tbl_ent.ref_count && ULP_GEN_TBL_REF_CNT(&gen_tbl_ent))) { + if (gen_tbl_list->hash_tbl) + g = &hash_entry->entry; + else + g = &gen_tbl_ent; + /* Scan ident list and create the result blob*/ + rc = ulp_mapper_tbl_ident_scan_ext(parms, tbl, + g->byte_data, + g->byte_data_size, + g->byte_order); + if (rc) { + netdev_dbg(parms->ulp_ctx->bp->dev, "Failed to scan ident list\n"); + return -EINVAL; + } + + if (hash_entry && tbl->fdb_opcode != BNXT_ULP_FDB_OPC_NOP) + hash_entry->entry.hash_ref_count++; + + /* it is a hit */ + gen_tbl_miss = 0; + fdb_write = 1; + } + break; + case BNXT_ULP_GENERIC_TBL_OPC_WRITE: + if (gen_tbl_list->tbl_type == + BNXT_ULP_GEN_TBL_TYPE_SIMPLE_LIST) { + if (list_srch == ULP_GEN_LIST_SEARCH_FULL) { + netdev_dbg(parms->ulp_ctx->bp->dev, "failed to add gen entry\n"); + return -ENOMEM; + } + } + /* Initialize the blob data */ + if (ulp_blob_init(&data, tbl->result_bit_size, + gen_tbl_list->container.byte_order)) { + netdev_dbg(parms->ulp_ctx->bp->dev, "Failed initial result blob\n"); + return -EINVAL; + } + + /* Get the result fields list */ + rc = ulp_mapper_tbl_result_build(parms, tbl, &data, + "Gen tbl Result"); + if (rc) { + netdev_dbg(parms->ulp_ctx->bp->dev, "Failed to build the result blob\n"); + return rc; + } + byte_data = ulp_blob_data_get(&data, &datalen); + + if (gen_tbl_list->tbl_type == BNXT_ULP_GEN_TBL_TYPE_HASH_LIST && + gen_tbl_list->hash_tbl) { + hash_entry = kzalloc(sizeof(*hash_entry) + + ULP_BITS_2_BYTE(keylen), GFP_KERNEL); + if (!hash_entry) + return -ENOMEM; + + memcpy(hash_entry->key_data, cache_key, ULP_BITS_2_BYTE(keylen)); + hash_entry->key_length = ULP_BITS_2_BYTE(datalen); + + hash_entry->entry.byte_data_size = datalen; + hash_entry->entry.byte_data = + vzalloc(ULP_BITS_2_BYTE(datalen)); + hash_entry->entry.byte_order = gen_tbl_list->container.byte_order; + memcpy(hash_entry->entry.byte_data, byte_data, + ULP_BITS_2_BYTE(datalen)); + rc = rhashtable_insert_fast(gen_tbl_list->hash_tbl, &hash_entry->node, + gen_tbl_list->hash_tbl_params); + if (tbl->fdb_opcode != BNXT_ULP_FDB_OPC_NOP) + hash_entry->entry.hash_ref_count++; + /* store the hash index in the fdb */ + key_index = hash_entry->hash_index; + } else { + /* check the reference count and ignore ref_cnt if NOP. + * NOP allows a write as an update. + */ + if (tbl->ref_cnt_opcode != BNXT_ULP_REF_CNT_OPC_NOP && + ULP_GEN_TBL_REF_CNT(&gen_tbl_ent)) { + /* a hit then error */ + netdev_dbg(parms->ulp_ctx->bp->dev, "generic entry already present\n"); + return -EINVAL; /* success */ + } + + rc = ulp_mapper_gen_tbl_entry_data_set(parms->ulp_ctx, + gen_tbl_list, + &gen_tbl_ent, + cache_key, + ULP_BITS_2_BYTE(keylen), + byte_data, + ULP_BITS_2_BYTE(datalen)); + if (rc) { + netdev_dbg(parms->ulp_ctx->bp->dev, + "Failed to write generic table\n"); + return -EINVAL; + } + } + + fdb_write = 1; + parms->shared_hndl = (u64)tbl_idx << 32 | key_index; + break; + default: + netdev_dbg(parms->ulp_ctx->bp->dev, "Invalid table opcode %x\n", tbl->tbl_opcode); + return -EINVAL; + } + + /* Set the generic entry hit */ + rc = ulp_regfile_write(parms->regfile, + BNXT_ULP_RF_IDX_GENERIC_TBL_MISS, + cpu_to_be64(gen_tbl_miss)); + if (rc) { + netdev_dbg(parms->ulp_ctx->bp->dev, "Write regfile[%d] failed\n", + BNXT_ULP_RF_IDX_GENERIC_TBL_MISS); + return -EIO; + } + + /* add the entry to the flow database */ + if (fdb_write) { + memset(&fid_parms, 0, sizeof(fid_parms)); + fid_parms.direction = tbl->direction; + fid_parms.resource_func = tbl->resource_func; + fid_parms.resource_sub_type = tbl->resource_sub_type; + fid_parms.resource_hndl = key_index; + if (hash_entry) { + fid_parms.key_data = hash_entry->key_data; + netdev_dbg(parms->ulp_ctx->bp->dev, + "fid_params.key_data %p\n", + fid_parms.key_data); + } + fid_parms.critical_resource = tbl->critical_resource; + ulp_flow_db_shared_session_set(&fid_parms, tbl->session_type); + + rc = ulp_mapper_fdb_opc_process(parms, tbl, &fid_parms); + if (rc) { + netdev_dbg(parms->ulp_ctx->bp->dev, "Fail to add gen ent flowdb %d\n", rc); + return rc; + } + + /* Reset the in-flight RID when generic table is written and the + * rid has been pushed into a handle (rid or fid). Once it has + * been written, we have persistent accounting of the resources. + */ + if (tbl->tbl_opcode == BNXT_ULP_GENERIC_TBL_OPC_WRITE && + (tbl->fdb_opcode == BNXT_ULP_FDB_OPC_PUSH_RID_REGFILE || + tbl->fdb_opcode == BNXT_ULP_FDB_OPC_PUSH_FID)) + parms->rid = 0; + + if (hash_entry && tbl->tbl_opcode == BNXT_ULP_GENERIC_TBL_OPC_READ) { + ref_cnt = hash_entry->entry.hash_ref_count; + /* Add ref_cnt to the regfile for template to use. */ + ref_cnt = cpu_to_be64(ref_cnt); + rc = ulp_regfile_write(parms->regfile, BNXT_ULP_RF_IDX_REF_CNT, + ref_cnt); + if (rc) { + netdev_dbg(parms->ulp_ctx->bp->dev, "Failed to write regfile[ref_cnt]\n"); + return rc; + } + + } else { + if (gen_tbl_ent.ref_count) + rc = ulp_mapper_gen_tbl_ref_cnt_process(parms, tbl, + &gen_tbl_ent); + } + } + + return rc; +} + +static int +ulp_mapper_ctrl_tbl_process(struct bnxt_ulp_mapper_parms *parms, + struct bnxt_ulp_mapper_tbl_info *tbl) +{ + u64 val64 = 0; + int rc = 0; + u32 rid; + + /* process the fdb opcode for alloc push */ + if (tbl->fdb_opcode == BNXT_ULP_FDB_OPC_ALLOC_RID_REGFILE) { + rc = ulp_mapper_fdb_opc_alloc_rid(parms, tbl); + if (rc) { + netdev_dbg(parms->ulp_ctx->bp->dev, "Failed to do fdb alloc\n"); + return rc; + } + } else if (tbl->fdb_opcode == BNXT_ULP_FDB_OPC_DELETE_RID_REGFILE) { + rc = ulp_regfile_read(parms->regfile, tbl->fdb_operand, &val64); + if (rc) { + netdev_dbg(parms->ulp_ctx->bp->dev, "Failed to get RID from regfile\n"); + return rc; + } + rid = be64_to_cpu(val64); + rc = ulp_mapper_resources_free(parms->ulp_ctx, + BNXT_ULP_FDB_TYPE_RID, + rid, + NULL); + } + + return rc; +} + +static int +ulp_mapper_vnic_tbl_process(struct bnxt_ulp_mapper_parms *parms, + struct bnxt_ulp_mapper_tbl_info *tbl) +{ + struct ulp_flow_db_res_params fid_parms; + struct bnxt *bp = parms->ulp_ctx->bp; + uint16_t vnic_idx = 0, vnic_id = 0; + int32_t rc = 0; + + switch (tbl->resource_sub_type) { + case BNXT_ULP_RESOURCE_SUB_TYPE_VNIC_TABLE_QUEUE: + if (tbl->tbl_opcode != BNXT_ULP_VNIC_TBL_OPC_ALLOC_WR_REGFILE) { + netdev_err(bp->dev, "Invalid vnic table opcode\n"); + return -EINVAL; + } + rc = bnxt_queue_action_create(parms, &vnic_idx, &vnic_id); + if (rc) { + netdev_err(bp->dev, "Failed create queue action\n"); + return rc; + } + break; + default: + netdev_err(bp->dev, "Invalid vnic table sub type\n"); + return -EINVAL; + } + + /* Link the created vnic to the flow in the flow db */ + memset(&fid_parms, 0, sizeof(fid_parms)); + fid_parms.direction = tbl->direction; + fid_parms.resource_func = tbl->resource_func; + fid_parms.resource_type = tbl->resource_type; + fid_parms.resource_sub_type = tbl->resource_sub_type; + fid_parms.resource_hndl = vnic_idx; + fid_parms.critical_resource = tbl->critical_resource; + rc = ulp_mapper_fdb_opc_process(parms, tbl, &fid_parms); + if (rc) { + netdev_err(bp->dev, "Failed to link resource to flow rc = %d\n", rc); + return rc; + } + rc = ulp_regfile_write(parms->regfile, tbl->tbl_operand, + (uint64_t)cpu_to_be64(vnic_id)); + if (rc) + netdev_err(bp->dev, "Failed to write regfile[%d] rc=%d\n", + tbl->tbl_operand, rc); + + netdev_dbg(bp->dev, "Vnic id =0x%x\n", vnic_id); + return rc; +} + +/* Free the vnic resource */ +static int32_t +ulp_mapper_vnic_tbl_res_free(__maybe_unused struct bnxt_ulp_context *ulp, + struct tf *tfp, + struct ulp_flow_db_res_params *res) +{ + uint16_t vnic_idx = res->resource_hndl; + + if (res->resource_sub_type == + BNXT_ULP_RESOURCE_SUB_TYPE_VNIC_TABLE_QUEUE) + return bnxt_queue_action_delete(tfp, vnic_idx); + + return -EINVAL; +} + +static int32_t +ulp_mapper_udcc_v6subnet_tbl_process(struct bnxt_ulp_mapper_parms *parms, + struct bnxt_ulp_mapper_tbl_info *tbl) +{ + struct ulp_flow_db_res_params fid_parms; + struct bnxt *bp = parms->ulp_ctx->bp; + struct bnxt_ulp_mapper_key_info *kflds; + u16 tmplen = 0, byte_data_size = 0; + struct ulp_blob key, mask, data; + u16 subnet_hndl = 0; + u32 i, num_kflds = 0; + u8 *byte_data; + u8 *byte_key; + u8 *byte_mask; + int32_t rc = 0; + + /* Get the key fields list and build the key. */ + kflds = ulp_mapper_key_fields_get(parms, tbl, &num_kflds); + if (!kflds || !num_kflds) { + netdev_dbg(parms->ulp_ctx->bp->dev, "Failed to get key fields\n"); + return -EINVAL; + } + + if (ulp_blob_init(&key, tbl->key_bit_size, + BNXT_ULP_BYTE_ORDER_BE)) { + netdev_dbg(parms->ulp_ctx->bp->dev, "Failed to alloc key blob\n"); + return -EINVAL; + } + + if (ulp_blob_init(&mask, tbl->key_bit_size, + BNXT_ULP_BYTE_ORDER_BE)) { + netdev_dbg(parms->ulp_ctx->bp->dev, "Failed to alloc mask blob\n"); + return -EINVAL; + } + + for (i = 0; i < num_kflds; i++) { + /* Setup the key */ + rc = ulp_mapper_field_opc_process(parms, tbl->direction, + &kflds[i].field_info_spec, + &key, 1, "UDCC v6subnet Key"); + if (rc) { + netdev_dbg(parms->ulp_ctx->bp->dev, + "Failed to create key for v6subnet tbl rc=%d\n", + rc); + return -EINVAL; + } + /* Setup the mask */ + rc = ulp_mapper_field_opc_process(parms, tbl->direction, + &kflds[i].field_info_mask, + &mask, 0, "UDCC v6subnet Mask"); + if (rc) { + netdev_dbg(parms->ulp_ctx->bp->dev, "Mask field set failed %s\n", + kflds[i].field_info_mask.description); + return -EINVAL; + } + } + + netdev_dbg(parms->ulp_ctx->bp->dev, "UDCC v6subnet Tbl[%s] - Dump Key\n", + (tbl->direction == TF_DIR_RX) ? "RX" : "TX"); + ulp_mapper_blob_dump(parms->ulp_ctx, &key); + netdev_dbg(parms->ulp_ctx->bp->dev, "UDCC v6subnet Tbl[%s] - Dump Mask\n", + (tbl->direction == TF_DIR_RX) ? "RX" : "TX"); + ulp_mapper_blob_dump(parms->ulp_ctx, &mask); + + /* Initialize the blob data */ + if (ulp_blob_init(&data, tbl->result_bit_size, + BNXT_ULP_BYTE_ORDER_BE)) { + netdev_dbg(parms->ulp_ctx->bp->dev, "Failed initial index table blob\n"); + return -EINVAL; + } + + /* Get the result fields list */ + rc = ulp_mapper_tbl_result_build(parms, tbl, &data, + "UDCC v6subnet Tbl Result"); + if (rc) { + netdev_dbg(parms->ulp_ctx->bp->dev, "Failed to build the result blob\n"); + return rc; + } + + /* The_key is a byte array convert it to a search index */ + byte_key = ulp_blob_data_get(&key, &tmplen); + byte_mask = ulp_blob_data_get(&mask, &tmplen); + byte_data = ulp_blob_data_get(&data, &byte_data_size); + + rc = bnxt_ulp_tf_v6_subnet_add(bp, byte_key, byte_mask, + byte_data, &subnet_hndl); + if (rc) { + netdev_err(bp->dev, "Failed to add v6 subnet rc=%d\n", rc); + return rc; + } + + /* Link the created to the flow in the flow db */ + memset(&fid_parms, 0, sizeof(fid_parms)); + fid_parms.direction = tbl->direction; + fid_parms.resource_func = tbl->resource_func; + fid_parms.resource_type = tbl->resource_type; + fid_parms.resource_sub_type = tbl->resource_sub_type; + fid_parms.resource_hndl = subnet_hndl; + fid_parms.critical_resource = tbl->critical_resource; + rc = ulp_mapper_fdb_opc_process(parms, tbl, &fid_parms); + if (rc) { + netdev_err(bp->dev, "Failed to link resource to flow rc = %d\n", rc); + return rc; + } + rc = ulp_regfile_write(parms->regfile, tbl->tbl_operand, + (uint64_t)cpu_to_be64(subnet_hndl)); + if (rc) + netdev_err(bp->dev, "Failed to write regfile[%d] rc=%d\n", + tbl->tbl_operand, rc); + + netdev_dbg(bp->dev, "UDCC: subnet_hndl =0x%x\n", subnet_hndl); + return rc; +} + +/* Free the subnet_hndl resource */ +static int32_t +ulp_mapper_udcc_v6subnet_tbl_res_free(__maybe_unused struct bnxt_ulp_context *ulp, + struct tf *tfp, + struct ulp_flow_db_res_params *res) +{ + int rc; + + rc = bnxt_ulp_tf_v6_subnet_del(tfp->bp, (u16)res->resource_hndl); + if (rc) + return rc; + + /* STB:TODO + * Notify L2 driver to cleanup its sessions that belong to this subnet + */ + return rc; +} + +static int +ulp_mapper_glb_resource_info_init(struct bnxt_ulp_context *ulp_ctx, + struct bnxt_ulp_mapper_data *mapper_data) +{ + struct bnxt_ulp_glb_resource_info *glb_res; + u32 num_glb_res_ids, idx, dev_id; + int rc = 0; + u8 app_id; + + glb_res = ulp_mapper_glb_resource_info_list_get(&num_glb_res_ids); + if (!glb_res || !num_glb_res_ids) { + netdev_dbg(ulp_ctx->bp->dev, "Invalid Arguments\n"); + return -EINVAL; + } + + rc = bnxt_ulp_devid_get(ulp_ctx->bp, &dev_id); + if (rc) { + netdev_dbg(ulp_ctx->bp->dev, "Unsupported device %x\n", rc); + return rc; + } + + rc = bnxt_ulp_cntxt_app_id_get(ulp_ctx, &app_id); + if (rc) { + netdev_dbg(ulp_ctx->bp->dev, "Failed to get app id for glb init (%d)\n", + rc); + return rc; + } + + /* Iterate the global resources and process each one */ + for (idx = 0; idx < num_glb_res_ids; idx++) { + if (dev_id != glb_res[idx].device_id || + glb_res[idx].app_id != app_id) + continue; + switch (glb_res[idx].resource_func) { + case BNXT_ULP_RESOURCE_FUNC_IDENTIFIER: + rc = ulp_mapper_resource_ident_allocate(ulp_ctx, + mapper_data, + &glb_res[idx], + false); + break; + case BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE: + rc = ulp_mapper_resource_index_tbl_alloc(ulp_ctx, + mapper_data, + &glb_res[idx], + false); + break; + default: + netdev_dbg(ulp_ctx->bp->dev, "Global resource %x not supported\n", + glb_res[idx].resource_func); + rc = -EINVAL; + break; + } + if (rc) + return rc; + } + return rc; +} + +/** Iterate over the shared resources assigned during tf_open_session and store + * them in the global regfile with the shared flag. + */ +static int +ulp_mapper_app_glb_resource_info_init(struct bnxt_ulp_context *ulp_ctx, + struct bnxt_ulp_mapper_data *mapper_data) +{ + const struct ulp_mapper_core_ops *op = mapper_data->mapper_oper; + + return op->ulp_mapper_core_app_glb_res_info_init(ulp_ctx, mapper_data); +} + +/** Common conditional opcode process routine that is used for both the template + * rejection and table conditional execution. + */ +static int +ulp_mapper_cond_opc_process(struct bnxt_ulp_mapper_parms *parms, + enum bnxt_ulp_cond_opc opc, + u64 operand, + int *res) +{ + enum bnxt_ulp_flow_mem_type mtype = BNXT_ULP_FLOW_MEM_TYPE_INT; + struct bnxt *bp = parms->ulp_ctx->bp; + u64 regval, result = 0; + u32 field_size = 0; + int rc = 0; + u8 bit, tmp; + + switch (opc) { + case BNXT_ULP_COND_OPC_CF_IS_SET: + if (operand < BNXT_ULP_CF_IDX_LAST) { + result = ULP_COMP_FLD_IDX_RD(parms, operand); + } else { + netdev_dbg(bp->dev, "comp field out of bounds %llu\n", operand); + rc = -EINVAL; + } + break; + case BNXT_ULP_COND_OPC_CF_NOT_SET: + if (operand < BNXT_ULP_CF_IDX_LAST) { + result = !ULP_COMP_FLD_IDX_RD(parms, operand); + } else { + netdev_dbg(bp->dev, "comp field out of bounds %llu\n", operand); + rc = -EINVAL; + } + break; + case BNXT_ULP_COND_OPC_ACT_BIT_IS_SET: + if (operand < BNXT_ULP_ACT_BIT_LAST) { + result = ULP_BITMAP_ISSET(parms->act_bitmap->bits, operand); + } else { + netdev_dbg(bp->dev, "action bit out of bounds %llu\n", operand); + rc = -EINVAL; + } + break; + case BNXT_ULP_COND_OPC_ACT_BIT_NOT_SET: + if (operand < BNXT_ULP_ACT_BIT_LAST) { + result = !ULP_BITMAP_ISSET(parms->act_bitmap->bits, + operand); + } else { + netdev_dbg(bp->dev, "action bit out of bounds %llu\n", operand); + rc = -EINVAL; + } + break; + case BNXT_ULP_COND_OPC_HDR_BIT_IS_SET: + if (operand < BNXT_ULP_HDR_BIT_LAST) { + result = ULP_BITMAP_ISSET(parms->hdr_bitmap->bits, operand); + } else { + netdev_dbg(bp->dev, "header bit out of bounds %llu\n", operand); + rc = -EINVAL; + } + break; + case BNXT_ULP_COND_OPC_HDR_BIT_NOT_SET: + if (operand < BNXT_ULP_HDR_BIT_LAST) { + result = !ULP_BITMAP_ISSET(parms->hdr_bitmap->bits, + operand); + } else { + netdev_dbg(bp->dev, "header bit out of bounds %llu\n", operand); + rc = -EINVAL; + } + break; + case BNXT_ULP_COND_OPC_FIELD_BIT_IS_SET: + rc = ulp_mapper_glb_field_tbl_get(parms, operand, &bit); + if (rc) { + netdev_dbg(bp->dev, "invalid ulp_glb_field_tbl idx %llu\n", operand); + return -EINVAL; + } + result = ULP_INDEX_BITMAP_GET(parms->fld_bitmap->bits, bit); + break; + case BNXT_ULP_COND_OPC_FIELD_BIT_NOT_SET: + rc = ulp_mapper_glb_field_tbl_get(parms, operand, &bit); + if (rc) { + netdev_dbg(bp->dev, "invalid ulp_glb_field_tbl idx %llu\n", operand); + return -EINVAL; + } + result = !ULP_INDEX_BITMAP_GET(parms->fld_bitmap->bits, bit); + break; + case BNXT_ULP_COND_OPC_RF_IS_SET: + if (ulp_regfile_read(parms->regfile, operand, ®val)) { + netdev_dbg(bp->dev, "regfile[%llu] read oob\n", operand); + return -EINVAL; + } + result = regval != 0; + break; + case BNXT_ULP_COND_OPC_RF_NOT_SET: + if (ulp_regfile_read(parms->regfile, operand, ®val)) { + netdev_dbg(bp->dev, "regfile[%llu] read oob\n", operand); + return -EINVAL; + } + result = regval == 0; + break; + case BNXT_ULP_COND_OPC_FLOW_PAT_MATCH: + result = parms->flow_pattern_id == operand; + break; + case BNXT_ULP_COND_OPC_ACT_PAT_MATCH: + result = parms->act_pattern_id == operand; + break; + case BNXT_ULP_COND_OPC_EXT_MEM_IS_SET: + if (bnxt_ulp_cntxt_mem_type_get(parms->ulp_ctx, &mtype)) { + netdev_dbg(bp->dev, "Failed to get the mem type\n"); + return -EINVAL; + } + result = (mtype == BNXT_ULP_FLOW_MEM_TYPE_INT) ? 0 : 1; + break; + case BNXT_ULP_COND_OPC_EXT_MEM_NOT_SET: + if (bnxt_ulp_cntxt_mem_type_get(parms->ulp_ctx, &mtype)) { + netdev_dbg(bp->dev, "Failed to get the mem type\n"); + return -EINVAL; + } + result = (mtype == BNXT_ULP_FLOW_MEM_TYPE_INT) ? 1 : 0; + break; + case BNXT_ULP_COND_OPC_ENC_HDR_BIT_IS_SET: + if (operand < BNXT_ULP_HDR_BIT_LAST) { + result = ULP_BITMAP_ISSET(parms->enc_hdr_bitmap->bits, operand); + } else { + netdev_dbg(bp->dev, "header bit out of bounds %llu\n", operand); + rc = -EINVAL; + } + break; + case BNXT_ULP_COND_OPC_ENC_HDR_BIT_NOT_SET: + if (operand < BNXT_ULP_HDR_BIT_LAST) { + result = !ULP_BITMAP_ISSET(parms->enc_hdr_bitmap->bits, + operand); + } else { + netdev_dbg(bp->dev, "header bit out of bounds %llu\n", operand); + rc = -EINVAL; + } + break; + case BNXT_ULP_COND_OPC_ACT_PROP_IS_SET: + case BNXT_ULP_COND_OPC_ACT_PROP_NOT_SET: + /* only supporting 1-byte action properties for now */ + if (operand >= BNXT_ULP_ACT_PROP_IDX_LAST) { + netdev_dbg(bp->dev, "act_prop[%llu] oob\n", operand); + return -EINVAL; + } + field_size = ulp_mapper_act_prop_size_get(operand); + if (sizeof(tmp) != field_size) { + netdev_dbg(bp->dev, "act_prop[%llu] field mismatch %u\n", + operand, field_size); + return -EINVAL; + } + tmp = parms->act_prop->act_details[operand]; + if (opc == BNXT_ULP_COND_OPC_ACT_PROP_IS_SET) + result = (int)(tmp); + else + result = (int)(!tmp); + break; + case BNXT_ULP_COND_OPC_CF_BIT_IS_SET: + case BNXT_ULP_COND_OPC_CF_BIT_NOT_SET: + if (operand < BNXT_ULP_CF_BIT_LAST) { + result = ULP_BITMAP_ISSET(parms->cf_bitmap, operand); + } else { + netdev_dbg(bp->dev, "CF bit out of bounds %llu\n", operand); + rc = -EINVAL; + } + if (opc == BNXT_ULP_COND_OPC_CF_BIT_NOT_SET) + result = !result; + break; + case BNXT_ULP_COND_OPC_WC_FIELD_BIT_IS_SET: + case BNXT_ULP_COND_OPC_WC_FIELD_BIT_NOT_SET: + rc = ulp_mapper_glb_field_tbl_get(parms, operand, &bit); + if (rc) { + netdev_dbg(bp->dev, "invalid ulp_glb_field idx %llu\n", operand); + return -EINVAL; + } + result = ULP_INDEX_BITMAP_GET(parms->wc_field_bitmap, bit); + if (opc == BNXT_ULP_COND_OPC_WC_FIELD_BIT_NOT_SET) + result = !result; + break; + case BNXT_ULP_COND_OPC_EXCLUDE_FIELD_BIT_IS_SET: + case BNXT_ULP_COND_OPC_EXCLUDE_FIELD_BIT_NOT_SET: + rc = ulp_mapper_glb_field_tbl_get(parms, operand, &bit); + if (rc) { + netdev_dbg(bp->dev, "invalid ulp_glb_field idx %llu\n", operand); + return -EINVAL; + } + result = ULP_INDEX_BITMAP_GET(parms->exclude_field_bitmap, bit); + if (opc == BNXT_ULP_COND_OPC_EXCLUDE_FIELD_BIT_NOT_SET) + result = !result; + break; + case BNXT_ULP_COND_OPC_FEATURE_BIT_IS_SET: + case BNXT_ULP_COND_OPC_FEATURE_BIT_NOT_SET: + regval = bnxt_ulp_feature_bits_get(parms->ulp_ctx); + result = ULP_BITMAP_ISSET(regval, operand); + if (opc == BNXT_ULP_COND_OPC_FEATURE_BIT_NOT_SET) + result = !ULP_BITMAP_ISSET(regval, operand); + break; + default: + netdev_dbg(bp->dev, "Invalid conditional opcode %d\n", opc); + rc = -EINVAL; + break; + } + + *res = !!result; + return (rc); +} + +static int +ulp_mapper_func_opr_compute(struct bnxt_ulp_mapper_parms *parms, + enum tf_dir dir, + enum bnxt_ulp_func_src func_src, + u64 func_opr, + u64 *result) +{ + struct bnxt *bp = parms->ulp_ctx->bp; + bool shared; + u64 regval; + + *result = false; + switch (func_src) { + case BNXT_ULP_FUNC_SRC_COMP_FIELD: + if (func_opr >= BNXT_ULP_CF_IDX_LAST) { + netdev_dbg(bp->dev, "invalid index %u\n", (u32)func_opr); + return -EINVAL; + } + *result = ULP_COMP_FLD_IDX_RD(parms, func_opr); + break; + case BNXT_ULP_FUNC_SRC_REGFILE: + if (ulp_regfile_read(parms->regfile, func_opr, ®val)) { + netdev_dbg(bp->dev, "regfile[%d] read oob\n", (u32)func_opr); + return -EINVAL; + } + *result = be64_to_cpu(regval); + break; + case BNXT_ULP_FUNC_SRC_GLB_REGFILE: + if (ulp_mapper_glb_resource_read(parms->mapper_data, dir, + func_opr, ®val, &shared)) { + netdev_dbg(bp->dev, "global regfile[%d] read failed.\n", (u32)func_opr); + return -EINVAL; + } + *result = be64_to_cpu(regval); + break; + case BNXT_ULP_FUNC_SRC_CONST: + *result = func_opr; + break; + case BNXT_ULP_FUNC_SRC_ACTION_BITMAP: + *result = parms->act_bitmap->bits; + break; + case BNXT_ULP_FUNC_SRC_HEADER_BITMAP: + *result = parms->hdr_bitmap->bits; + break; + default: + netdev_dbg(bp->dev, "invalid src code %u\n", func_src); + return -EINVAL; + } + return 0; +} + +static int +ulp_mapper_vfr_mark_set(struct bnxt_ulp_mapper_parms *parms, + u32 key, u16 port_id, + struct bnxt_ulp_mapper_tbl_info *tbl) +{ + struct ulp_flow_db_res_params fid_parms; + u32 mark_flag; + int rc; + + /* Set the mark flag to local fid and vfr flag */ + mark_flag = BNXT_ULP_MARK_LOCAL_HW_FID | BNXT_ULP_MARK_VFR_ID; + + rc = ulp_mark_db_mark_add(parms->ulp_ctx, mark_flag, + key, port_id); + if (rc) { + netdev_dbg(parms->ulp_ctx->bp->dev, "Failed to add mark to flow\n"); + return rc; + } + fid_parms.direction = tbl->direction; + fid_parms.resource_func = BNXT_ULP_RESOURCE_FUNC_HW_FID; + fid_parms.critical_resource = tbl->critical_resource; + fid_parms.resource_type = mark_flag; + fid_parms.resource_hndl = key; + fid_parms.resource_sub_type = 0; + ulp_flow_db_shared_session_set(&fid_parms, tbl->session_type); + + rc = ulp_mapper_fdb_opc_process(parms, tbl, &fid_parms); + if (rc) { + int trc = 0; + + netdev_dbg(parms->ulp_ctx->bp->dev, "Fail to link res to flow rc = %d\n", rc); + trc = ulp_mark_db_mark_del(parms->ulp_ctx, mark_flag, key); + if (trc) + netdev_dbg(parms->ulp_ctx->bp->dev, "Failed to cleanup mark rc = %d\n", rc); + } + return rc; +} + +static int +ulp_mapper_bd_act_set(struct bnxt_ulp_mapper_parms *parms, + u16 port_id, u32 action) +{ + struct bnxt *bp = parms->ulp_ctx->bp; +#if !defined(CONFIG_BNXT_CUSTOM_FLOWER_OFFLOAD) + struct bnxt_vf_rep *vf_rep; + struct net_device *dev; +#endif + +#ifdef CONFIG_BNXT_CUSTOM_FLOWER_OFFLOAD + return bnxt_bd_act_set(bp, port_id, action); +#else + if (BNXT_CHIP_P7(bp)) { + dev = bnxt_get_vf_rep(bp, port_id); + if (!dev) { + netdev_err(bp->dev, "%s: vf_rep NULL\n", bp->dev->name); + return -1; + } + + vf_rep = netdev_priv(dev); + if (bnxt_dev_is_vf_rep(vf_rep->dev)) + vf_rep->tx_cfa_action = action; + else + bp->tx_cfa_action = action; + } else { + netdev_dbg(bp->dev, "Warning: Not support bd action\n"); + return -1; + } + return 0; +#endif +} + +/* oper size is in bits and res size are in bytes */ +static int32_t +ulp_mapper_func_cond_list_process(struct bnxt_ulp_mapper_parms *parms, + u32 idx, u8 dir, + u32 oper_size, u64 *res, + u32 res_size) +{ + struct bnxt_ulp_mapper_field_info *fld; + u8 *val = NULL; + u32 val_len = 0; + u64 value = 0; + u16 ext_idx = 0; + u8 *res_local = (uint8_t *)res; + + /* Get the field info from the key ext list */ + fld = ulp_mapper_tmpl_key_ext_list_get(parms, idx); + if (!fld || fld->field_opc != BNXT_ULP_FIELD_OPC_TERNARY_LIST) { + netdev_dbg(parms->ulp_ctx->bp->dev, "Invalid field idx %d\n", idx); + return -EINVAL; + } + + /* process the condition list */ + if (ulp_mapper_field_src_process(parms, fld->field_src1, + fld->field_opr1, dir, + 1, oper_size, &val, + &val_len, &value)) { + netdev_dbg(parms->ulp_ctx->bp->dev, "error processing func opcode %u\n", + idx); + return -EINVAL; + } + if (value) { + if (fld->field_src2 == BNXT_ULP_FIELD_SRC_NEXT) { + /* read the next key ext table index */ + if (ulp_operand_read(fld->field_opr2, + (uint8_t *)&ext_idx, + sizeof(uint16_t))) { + netdev_dbg(parms->ulp_ctx->bp->dev, + "field idx operand read failed\n"); + return -EINVAL; + } + ext_idx = be16_to_cpu(ext_idx); + return ulp_mapper_func_cond_list_process(parms, ext_idx, + dir, oper_size, + res, res_size); + } else { + /* get the value from then part */ + if (ulp_mapper_field_src_process(parms, fld->field_src2, + fld->field_opr2, dir, + 1, oper_size, + &val, &val_len, + &value)) { + netdev_dbg(parms->ulp_ctx->bp->dev, + "error processing func oper %u\n", + ext_idx); + return -EINVAL; + } + } + } else { + if (fld->field_src3 == BNXT_ULP_FIELD_SRC_NEXT) { + /* read the next key ext table index */ + if (ulp_operand_read(fld->field_opr3, + (uint8_t *)&ext_idx, + sizeof(uint16_t))) { + netdev_dbg(parms->ulp_ctx->bp->dev, + "field idx operand read failed\n"); + return -EINVAL; + } + ext_idx = be16_to_cpu(ext_idx); + return ulp_mapper_func_cond_list_process(parms, ext_idx, + dir, oper_size, + res, res_size); + } else { + /* get the value from else part */ + if (ulp_mapper_field_src_process(parms, fld->field_src3, + fld->field_opr3, dir, + 1, oper_size, + &val, &val_len, + &value)) { + netdev_dbg(parms->ulp_ctx->bp->dev, + "error processing func oper %u\n", + ext_idx); + return -EINVAL; + } + } + } + /* write the value into result */ + if (ulp_operand_read(val, res_local + res_size - + ULP_BITS_2_BYTE_NR(oper_size), + ULP_BITS_2_BYTE_NR(val_len))) { + netdev_dbg(parms->ulp_ctx->bp->dev, "Value read failed\n"); + return -EINVAL; + } + + /* convert the data to cpu format */ + *res = be64_to_cpu(*res); + return 0; +} + +static int +ulp_mapper_func_info_process(struct bnxt_ulp_mapper_parms *parms, + struct bnxt_ulp_mapper_tbl_info *tbl) +{ + const struct ulp_mapper_core_ops *op = parms->mapper_data->mapper_oper; + struct bnxt_ulp_mapper_func_info *func_info = &tbl->func_info; + u32 process_src1 = 0, process_src2 = 0; + u64 res = 0, res1 = 0, res2 = 0; + int rc = 0; + + /* determine which functional operands to compute */ + switch (func_info->func_opc) { + case BNXT_ULP_FUNC_OPC_NOP: + return rc; + case BNXT_ULP_FUNC_OPC_EQ: + case BNXT_ULP_FUNC_OPC_NE: + case BNXT_ULP_FUNC_OPC_GE: + case BNXT_ULP_FUNC_OPC_GT: + case BNXT_ULP_FUNC_OPC_LE: + case BNXT_ULP_FUNC_OPC_LT: + case BNXT_ULP_FUNC_OPC_LEFT_SHIFT: + case BNXT_ULP_FUNC_OPC_RIGHT_SHIFT: + case BNXT_ULP_FUNC_OPC_BIT_OR: + case BNXT_ULP_FUNC_OPC_BIT_AND: + case BNXT_ULP_FUNC_OPC_BIT_XOR: + case BNXT_ULP_FUNC_OPC_LOG_OR: + case BNXT_ULP_FUNC_OPC_LOG_AND: + case BNXT_ULP_FUNC_OPC_ADD: + case BNXT_ULP_FUNC_OPC_SUB: + process_src1 = 1; + process_src2 = 1; + break; + case BNXT_ULP_FUNC_OPC_COPY_SRC1_TO_RF: + process_src1 = 1; + break; + case BNXT_ULP_FUNC_OPC_HANDLE_TO_OFFSET: + case BNXT_ULP_FUNC_OPC_VFR_MARK_SET: + case BNXT_ULP_FUNC_OPC_BD_ACT_SET: + process_src1 = 1; + process_src2 = 1; + break; + case BNXT_ULP_FUNC_OPC_NOT_NOT: + process_src1 = 1; + case BNXT_ULP_FUNC_OPC_COND_LIST: + break; + default: + break; + } + + if (process_src1) { + rc = ulp_mapper_func_opr_compute(parms, tbl->direction, + func_info->func_src1, + func_info->func_opr1, &res1); + if (rc) + return rc; + } + + if (process_src2) { + rc = ulp_mapper_func_opr_compute(parms, tbl->direction, + func_info->func_src2, + func_info->func_opr2, &res2); + if (rc) + return rc; + } + + /* perform the functional opcode operations */ + switch (func_info->func_opc) { + case BNXT_ULP_FUNC_OPC_EQ: + if (res1 == res2) + res = 1; + break; + case BNXT_ULP_FUNC_OPC_NE: + if (res1 != res2) + res = 1; + break; + case BNXT_ULP_FUNC_OPC_GE: + if (res1 >= res2) + res = 1; + break; + case BNXT_ULP_FUNC_OPC_GT: + if (res1 > res2) + res = 1; + break; + case BNXT_ULP_FUNC_OPC_LE: + if (res1 <= res2) + res = 1; + break; + case BNXT_ULP_FUNC_OPC_LT: + if (res1 < res2) + res = 1; + break; + case BNXT_ULP_FUNC_OPC_LEFT_SHIFT: + res = res1 << res2; + break; + case BNXT_ULP_FUNC_OPC_RIGHT_SHIFT: + res = res1 >> res2; + break; + case BNXT_ULP_FUNC_OPC_ADD: + res = res1 + res2; + break; + case BNXT_ULP_FUNC_OPC_SUB: + res = res1 - res2; + break; + case BNXT_ULP_FUNC_OPC_NOT_NOT: + res = !!res1; + break; + case BNXT_ULP_FUNC_OPC_BIT_AND: + res = res1 & res2; + break; + case BNXT_ULP_FUNC_OPC_BIT_OR: + res = res1 | res2; + break; + case BNXT_ULP_FUNC_OPC_BIT_XOR: + res = res1 ^ res2; + break; + case BNXT_ULP_FUNC_OPC_LOG_AND: + res = res1 && res2; + break; + case BNXT_ULP_FUNC_OPC_LOG_OR: + res = res1 || res2; + break; + case BNXT_ULP_FUNC_OPC_COPY_SRC1_TO_RF: + res = res1; + break; + case BNXT_ULP_FUNC_OPC_GET_PARENT_MAC_ADDR: + bnxt_get_parent_mac_addr(parms->ulp_ctx->bp, (u8 *)&res); + res = be64_to_cpu(res); + break; + case BNXT_ULP_FUNC_OPC_HANDLE_TO_OFFSET: + rc = op->ulp_mapper_core_handle_to_offset(parms, res1, + res2, &res); + break; + case BNXT_ULP_FUNC_OPC_VFR_MARK_SET: + /* res1 is key, res2 is portid */ + return ulp_mapper_vfr_mark_set(parms, res1, res2, tbl); + case BNXT_ULP_FUNC_OPC_BD_ACT_SET: + /* res1 is port_id, res2 is action */ + return ulp_mapper_bd_act_set(parms, res1, res2); + case BNXT_ULP_FUNC_OPC_COND_LIST: + if (func_info->func_src1 != BNXT_ULP_FUNC_SRC_KEY_EXT_LIST) { + netdev_dbg(parms->ulp_ctx->bp->dev, "invalid func source %u\n", + func_info->func_opc); + return -EINVAL; + } + if (ulp_mapper_func_cond_list_process(parms, + func_info->func_opr1, + tbl->direction, + func_info->func_oper_size, + &res, sizeof(res))) + return -EINVAL; + break; + default: + netdev_dbg(parms->ulp_ctx->bp->dev, "invalid func code %u\n", func_info->func_opc); + return -EINVAL; + } + if (ulp_regfile_write(parms->regfile, func_info->func_dst_opr, + cpu_to_be64(res))) { + netdev_dbg(parms->ulp_ctx->bp->dev, "Failed write the func_opc %u\n", + func_info->func_dst_opr); + return -EINVAL; + } + netdev_dbg(parms->ulp_ctx->bp->dev, "write the 0x%llX into func_opc %u\n", res, + func_info->func_dst_opr); + + return rc; +} + +/** Processes a list of conditions and returns both a status and result of the + * list. The status must be checked prior to verifying the result. + * + * returns 0 for success, negative on failure + * returns res = 1 for true, res = 0 for false. + */ +static int +ulp_mapper_cond_opc_list_process(struct bnxt_ulp_mapper_parms *parms, + struct bnxt_ulp_mapper_cond_list_info *info, + int *res) +{ + struct bnxt_ulp_mapper_cond_info *cond_list; + int rc = 0, trc = 0; + u32 i; + + switch (info->cond_list_opcode) { + case BNXT_ULP_COND_LIST_OPC_AND: + /* AND Defaults to true. */ + *res = 1; + break; + case BNXT_ULP_COND_LIST_OPC_OR: + /* OR Defaults to false. */ + *res = 0; + break; + case BNXT_ULP_COND_LIST_OPC_TRUE: + *res = 1; + return rc; + case BNXT_ULP_COND_LIST_OPC_FALSE: + *res = 0; + return rc; + default: + netdev_dbg(parms->ulp_ctx->bp->dev, "Invalid conditional list opcode %d\n", + info->cond_list_opcode); + *res = 0; + return -EINVAL; + } + + cond_list = ulp_mapper_tmpl_cond_list_get(parms, info->cond_start_idx); + for (i = 0; i < info->cond_nums; i++) { + rc = ulp_mapper_cond_opc_process(parms, + cond_list[i].cond_opcode, + cond_list[i].cond_operand, + &trc); + if (rc) + return rc; + + if (info->cond_list_opcode == BNXT_ULP_COND_LIST_OPC_AND) { + /* early return if result is ever zero */ + if (!trc) { + *res = trc; + return rc; + } + } else { + /* early return if result is ever non-zero */ + if (trc) { + *res = trc; + return rc; + } + } + } + + return rc; +} + +static int +ulp_mapper_cond_reject_list_process(struct bnxt_ulp_mapper_parms *parms, + u32 tid, int *res) +{ + struct bnxt_ulp_mapper_cond_list_info *reject_info; + struct bnxt_ulp_mapper_cond_list_info *oper; + int cond_list_res = 0, cond_res = 0, rc = 0; + struct bnxt *bp = parms->ulp_ctx->bp; + u32 idx; + + /* set the rejection result to accept */ + *res = 0; + + /* If act rej cond is not enabled then skip reject cond processing */ + if (parms->tmpl_type == BNXT_ULP_TEMPLATE_TYPE_ACTION && + !ULP_COMP_FLD_IDX_RD(parms, BNXT_ULP_CF_IDX_ACT_REJ_COND_EN)) + return rc; + + /* get the reject condition list */ + reject_info = ulp_mapper_tmpl_reject_list_get(parms, tid); + + if (reject_info->cond_list_opcode == BNXT_ULP_COND_LIST_OPC_TRUE) { + cond_list_res = 1; + goto jump_exit; + } + + /* If there are no reject conditions then skip */ + if (!reject_info->cond_nums) + return rc; + + /* Iterate the list to process the conditions */ + if (reject_info->cond_list_opcode == BNXT_ULP_COND_LIST_OPC_LIST_AND || + reject_info->cond_list_opcode == BNXT_ULP_COND_LIST_OPC_LIST_OR) { + /* Initialize the cond result */ + if (reject_info->cond_list_opcode == + BNXT_ULP_COND_LIST_OPC_LIST_AND) + cond_res = 1; + + for (idx = reject_info->cond_start_idx; + idx < reject_info->cond_start_idx + + reject_info->cond_nums; idx++) { + oper = ulp_mapper_cond_oper_list_get(parms, idx); + if (!oper) { + netdev_dbg(bp->dev, "Invalid cond oper idx %d\n", idx); + return -EINVAL; + } + rc = ulp_mapper_cond_opc_list_process(parms, oper, + &cond_list_res); + /* if any error, then return */ + if (rc) + goto jump_exit; + + /* early return if result is ever zero */ + if (cond_res /*and */ && !cond_list_res /*false*/) + goto jump_exit; + + /* early return if result is ever non-zero */ + if (!cond_res /*or */ && cond_list_res /*true*/) + goto jump_exit; + } + } else { + rc = ulp_mapper_cond_opc_list_process(parms, reject_info, + &cond_list_res); + } +jump_exit: + *res = cond_list_res; + /* Reject the template if True */ + if (cond_list_res) + netdev_dbg(bp->dev, "%s Template %d rejected.\n", + ulp_mapper_tmpl_name_str(parms->tmpl_type), tid); + return rc; +} + +static int +ulp_mapper_cond_execute_list_process(struct bnxt_ulp_mapper_parms *parms, + struct bnxt_ulp_mapper_tbl_info *tbl, + int *res) +{ + struct bnxt_ulp_mapper_cond_list_info *execute_info; + struct bnxt_ulp_mapper_cond_list_info *oper; + int cond_list_res, cond_res = 0, rc = 0; + struct bnxt *bp = parms->ulp_ctx->bp; + u32 idx; + + /* set the execute result to true */ + *res = 1; + execute_info = &tbl->execute_info; + + /* If there are no execute conditions then skip */ + if (!execute_info->cond_nums) + return rc; + + /* Iterate the list to process the conditions */ + if (execute_info->cond_list_opcode == BNXT_ULP_COND_LIST_OPC_LIST_AND || + execute_info->cond_list_opcode == BNXT_ULP_COND_LIST_OPC_LIST_OR) { + /* Initialize the cond result */ + if (execute_info->cond_list_opcode == + BNXT_ULP_COND_LIST_OPC_LIST_AND) + cond_res = 1; + + for (idx = execute_info->cond_start_idx; + idx < execute_info->cond_start_idx + + execute_info->cond_nums; idx++) { + oper = ulp_mapper_cond_oper_list_get(parms, idx); + if (!oper) { + netdev_dbg(bp->dev, "Invalid cond oper idx %d\n", idx); + return -EINVAL; + } + rc = ulp_mapper_cond_opc_list_process(parms, oper, + &cond_list_res); + /* if any error, then return */ + if (rc) + goto jump_exit; + + /* early return if result is ever zero */ + if (cond_res /*and */ && !cond_list_res /*false*/) + goto jump_exit; + + /* early return if result is ever non-zero */ + if (!cond_res /*or */ && cond_list_res /*true*/) + goto jump_exit; + } + } else { + rc = ulp_mapper_cond_opc_list_process(parms, execute_info, + &cond_list_res); + } +jump_exit: + *res = cond_list_res; + return rc; +} + +/** Processes conflict resolution and returns both a status and result. + * The status must be checked prior to verifying the result. + * + * returns 0 for success, negative on failure + * returns res = 1 for true, res = 0 for false. + */ +static int +ulp_mapper_conflict_resolution_process(struct bnxt_ulp_mapper_parms *parms, + struct bnxt_ulp_mapper_tbl_info *tbl, + int *res) +{ + struct bnxt *bp = parms->ulp_ctx->bp; + u64 regval, comp_sig; + int rc = 0; + + *res = 0; + switch (tbl->accept_opcode) { + case BNXT_ULP_ACCEPT_OPC_ALWAYS: + *res = 1; + break; + case BNXT_ULP_ACCEPT_OPC_FLOW_SIG_ID_MATCH: + /* perform the signature validation*/ + if (tbl->resource_func == + BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE) { + /* Perform the check that generic table is hit or not */ + if (ulp_regfile_read(parms->regfile, + BNXT_ULP_RF_IDX_GENERIC_TBL_MISS, + ®val)) { + netdev_dbg(bp->dev, "regfile[%d] read oob\n", + BNXT_ULP_RF_IDX_GENERIC_TBL_MISS); + return -EINVAL; + } + if (regval) { + /* not a hit so no need to check flow sign*/ + *res = 1; + return rc; + } + } + /* compare the new flow signature against stored one */ + if (ulp_regfile_read(parms->regfile, + BNXT_ULP_RF_IDX_FLOW_SIG_ID, + ®val)) { + netdev_dbg(bp->dev, "regfile[%d] read oob\n", + BNXT_ULP_RF_IDX_FLOW_SIG_ID); + return -EINVAL; + } + comp_sig = ULP_COMP_FLD_IDX_RD(parms, + BNXT_ULP_CF_IDX_FLOW_SIG_ID); + regval = be64_to_cpu(regval); + if (comp_sig == regval) + *res = 1; + else + netdev_dbg(bp->dev, "failed signature match %llu:%x\n", comp_sig, + (u32)regval); + break; + default: + netdev_dbg(bp->dev, "Invalid accept opcode %d\n", tbl->accept_opcode); + return -EINVAL; + } + return rc; +} + +static int +ulp_mapper_allocator_tbl_process(struct bnxt_ulp_mapper_parms *parms, + struct bnxt_ulp_mapper_tbl_info *tbl) +{ + struct ulp_flow_db_res_params fid_parms; + int alloc_index, rc = 0; + u64 regval = 0; + + /* Only Alloc opcode is supported for now */ + if (tbl->tbl_opcode != BNXT_ULP_ALLOC_TBL_OPC_ALLOC) + return 0; /* nothing to done */ + + /* allocate the index from the allocator */ + rc = ulp_allocator_tbl_list_alloc(parms->mapper_data, + tbl->resource_sub_type, + tbl->direction, &alloc_index); + if (rc) { + netdev_dbg(parms->ulp_ctx->bp->dev, "unable to alloc index %x:%x\n", + tbl->resource_sub_type, tbl->direction); + return -EINVAL; + } + + /* Write to the regfile */ + regval = cpu_to_be64(alloc_index); + rc = ulp_regfile_write(parms->regfile, tbl->tbl_operand, regval); + if (rc) { + netdev_dbg(parms->ulp_ctx->bp->dev, "Failed to write regfile[%d] rc=%d\n", + tbl->tbl_operand, rc); + return -EINVAL; + } + + /* update the flow database */ + memset(&fid_parms, 0, sizeof(fid_parms)); + fid_parms.direction = tbl->direction; + fid_parms.resource_func = tbl->resource_func; + fid_parms.resource_type = tbl->resource_type; + fid_parms.resource_sub_type = tbl->resource_sub_type; + fid_parms.resource_hndl = alloc_index; + fid_parms.critical_resource = tbl->critical_resource; + + rc = ulp_mapper_fdb_opc_process(parms, tbl, &fid_parms); + if (rc) { + netdev_dbg(parms->ulp_ctx->bp->dev, "Failed to link resource to flow rc = %d\n", + rc); + goto error; + } + return rc; +error: + /* Free the allocated index */ + (void)ulp_allocator_tbl_list_free(parms->ulp_ctx->bp, + parms->mapper_data, + tbl->resource_sub_type, + tbl->direction, alloc_index); + return rc; +} + +static int +ulp_mapper_tbls_process(struct bnxt_ulp_mapper_parms *parms, void *error) +{ + const struct ulp_mapper_core_ops *oper; + struct bnxt_ulp_mapper_tbl_info *tbls; + struct bnxt_ulp_mapper_tbl_info *tbl; + struct bnxt *bp = parms->ulp_ctx->bp; + int rc = -EINVAL, cond_rc = 0; + u32 num_tbls, tbl_idx; + int cond_goto = 1; + u32 tid; + + oper = parms->mapper_data->mapper_oper; + + /* assign the template id based on template type */ + tid = (parms->tmpl_type == BNXT_ULP_TEMPLATE_TYPE_ACTION) ? + parms->act_tid : parms->class_tid; + + rc = ulp_mapper_cond_reject_list_process(parms, tid, &cond_rc); + /* if rc is failure or cond_rc is a reject then exit tbl processing */ + if (rc || cond_rc) + return -EINVAL; + + tbls = ulp_mapper_tbl_list_get(parms, tid, &num_tbls); + if (!tbls || !num_tbls) { + netdev_dbg(bp->dev, "No %s tables for %d:%d\n", + ulp_mapper_tmpl_name_str(parms->tmpl_type), + parms->dev_id, tid); + return -EINVAL; + } + + for (tbl_idx = 0; tbl_idx < num_tbls && cond_goto;) { + tbl = &tbls[tbl_idx]; + cond_goto = tbl->execute_info.cond_true_goto; + + ulp_mapper_table_dump(parms->ulp_ctx, tbl, tbl_idx); + + /* Process the conditional func code opcodes */ + if (ulp_mapper_func_info_process(parms, tbl)) { + netdev_dbg(bp->dev, "Failed to process cond update\n"); + rc = -EINVAL; + goto error; + } + + /* process the execute info of the table */ + rc = ulp_mapper_cond_execute_list_process(parms, tbl, &cond_rc); + if (rc) { + netdev_dbg(bp->dev, "Failed to proc cond opc list (%d)\n", rc); + goto error; + } + /* Skip the table if False */ + if (!cond_rc) { + cond_goto = tbl->execute_info.cond_false_goto; + goto next_iteration; + } + + switch (tbl->resource_func) { + case BNXT_ULP_RESOURCE_FUNC_TCAM_TABLE: + rc = oper->ulp_mapper_core_tcam_tbl_process(parms, tbl); + break; + case BNXT_ULP_RESOURCE_FUNC_EM_TABLE: + rc = oper->ulp_mapper_core_em_tbl_process(parms, tbl, + error); + break; + case BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE: + rc = oper->ulp_mapper_core_index_tbl_process(parms, + tbl); + break; + case BNXT_ULP_RESOURCE_FUNC_IF_TABLE: + rc = oper->ulp_mapper_core_if_tbl_process(parms, tbl); + break; + case BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE: + rc = ulp_mapper_gen_tbl_process(parms, tbl); + break; + case BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE: + rc = ulp_mapper_ctrl_tbl_process(parms, tbl); + break; + case BNXT_ULP_RESOURCE_FUNC_CMM_TABLE: + case BNXT_ULP_RESOURCE_FUNC_CMM_STAT: + rc = oper->ulp_mapper_core_cmm_tbl_process(parms, tbl, + error); + break; + case BNXT_ULP_RESOURCE_FUNC_VNIC_TABLE: + rc = ulp_mapper_vnic_tbl_process(parms, tbl); + break; + case BNXT_ULP_RESOURCE_FUNC_INVALID: + rc = 0; + break; + case BNXT_ULP_RESOURCE_FUNC_UDCC_V6SUBNET_TABLE: + rc = ulp_mapper_udcc_v6subnet_tbl_process(parms, tbl); + break; + case BNXT_ULP_RESOURCE_FUNC_KEY_RECIPE_TABLE: + rc = ulp_mapper_key_recipe_tbl_process(parms, tbl); + break; + case BNXT_ULP_RESOURCE_FUNC_ALLOCATOR_TABLE: + rc = ulp_mapper_allocator_tbl_process(parms, tbl); + break; + default: + netdev_dbg(bp->dev, "Unexpected mapper resource %d\n", tbl->resource_func); + rc = -EINVAL; + goto error; + } + + if (rc) { + netdev_dbg(bp->dev, "Resource type %d failed\n", tbl->resource_func); + goto error; + } + + /* perform the post table process */ + rc = ulp_mapper_conflict_resolution_process(parms, tbl, &cond_rc); + if (rc || !cond_rc) { + netdev_dbg(bp->dev, "Failed due to conflict resolution\n"); + rc = -EINVAL; + goto error; + } +next_iteration: + if (cond_goto == BNXT_ULP_COND_GOTO_REJECT) { + if (tbl->false_message || tbl->true_message) { + const char *msg = (tbl->false_message) ? + tbl->false_message : + tbl->true_message; + netdev_dbg(bp->dev, "%s\n", msg); + return -EINVAL; + } + netdev_dbg(bp->dev, "reject the flow\n"); + rc = -EINVAL; + goto error; + } else if (cond_goto & BNXT_ULP_COND_GOTO_RF) { + u32 rf_idx; + u64 regval; + + /* least significant 16 bits from reg_file index */ + rf_idx = (u32)(cond_goto & 0xFFFF); + if (ulp_regfile_read(parms->regfile, rf_idx, + ®val)) { + netdev_dbg(bp->dev, "regfile[%d] read oob\n", rf_idx); + rc = -EINVAL; + goto error; + } + cond_goto = (int)regval; + } + + if (cond_goto < 0 && ((int)tbl_idx + cond_goto) < 0) { + netdev_dbg(bp->dev, "invalid conditional goto %d\n", cond_goto); + goto error; + } + tbl_idx += cond_goto; + } + + return rc; +error: + netdev_dbg(bp->dev, "%s tables failed operation for %d:%d\n", + ulp_mapper_tmpl_name_str(parms->tmpl_type), + parms->dev_id, tid); + return rc; +} + +static int +ulp_mapper_resource_free(struct bnxt_ulp_context *ulp_ctx, + u32 fid, + struct ulp_flow_db_res_params *res, + void *error) +{ + const struct ulp_mapper_core_ops *mapper_op; + struct bnxt_ulp_mapper_data *mdata; + struct tf *tfp; + int rc = 0; + + if (!res || !ulp_ctx) + return -EINVAL; + + tfp = ulp_ctx->ops->ulp_tfp_get(ulp_ctx, ulp_flow_db_shared_session_get(res)); + if (!tfp) { + netdev_dbg(ulp_ctx->bp->dev, "Unable to free resource failed to get tfp\n"); + return -EINVAL; + } + + mapper_op = ulp_mapper_data_oper_get(ulp_ctx); + switch (res->resource_func) { + case BNXT_ULP_RESOURCE_FUNC_TCAM_TABLE: + rc = mapper_op->ulp_mapper_core_tcam_entry_free(ulp_ctx, res); + break; + case BNXT_ULP_RESOURCE_FUNC_EM_TABLE: + rc = mapper_op->ulp_mapper_core_em_entry_free(ulp_ctx, res, error); + break; + case BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE: + rc = mapper_op->ulp_mapper_core_index_entry_free(ulp_ctx, res); + break; + case BNXT_ULP_RESOURCE_FUNC_CMM_TABLE: + case BNXT_ULP_RESOURCE_FUNC_CMM_STAT: + rc = mapper_op->ulp_mapper_core_cmm_entry_free(ulp_ctx, res, error); + break; + case BNXT_ULP_RESOURCE_FUNC_IDENTIFIER: + rc = mapper_op->ulp_mapper_core_ident_free(ulp_ctx, res); + break; + case BNXT_ULP_RESOURCE_FUNC_HW_FID: + rc = ulp_mapper_mark_free(ulp_ctx, res); + break; + case BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE: + rc = ulp_mapper_gen_tbl_res_free(ulp_ctx, fid, res); + break; + case BNXT_ULP_RESOURCE_FUNC_KEY_RECIPE_TABLE: + rc = ulp_mapper_key_recipe_free(ulp_ctx, (enum bnxt_ulp_direction)res->direction, + res->resource_sub_type, + res->resource_hndl); + break; + case BNXT_ULP_RESOURCE_FUNC_ALLOCATOR_TABLE: + mdata = bnxt_ulp_cntxt_ptr2_mapper_data_get(ulp_ctx); + if (!mdata) { + netdev_dbg(ulp_ctx->bp->dev, "Unable to get mapper data\n"); + return -EINVAL; + } + rc = ulp_allocator_tbl_list_free(ulp_ctx->bp, mdata, + res->resource_sub_type, + res->direction, + res->resource_hndl); + break; + case BNXT_ULP_RESOURCE_FUNC_VNIC_TABLE: + rc = ulp_mapper_vnic_tbl_res_free(ulp_ctx, tfp, res); + break; + case BNXT_ULP_RESOURCE_FUNC_UDCC_V6SUBNET_TABLE: + rc = ulp_mapper_udcc_v6subnet_tbl_res_free(ulp_ctx, tfp, res); + break; + default: + break; + } + + return rc; +} + +int +ulp_mapper_resources_free(struct bnxt_ulp_context *ulp_ctx, + enum bnxt_ulp_fdb_type flow_type, + u32 fid, + void *error) +{ + struct ulp_flow_db_res_params res_parms = { 0 }; + int rc, trc, frc = 0; + + if (!ulp_ctx) { + netdev_dbg(NULL, "Invalid parms, unable to free flow\n"); + return -EINVAL; + } + + /** Set the critical resource on the first resource del, then iterate + * while status is good + */ + res_parms.critical_resource = BNXT_ULP_CRITICAL_RESOURCE_YES; + + rc = ulp_flow_db_resource_del(ulp_ctx, flow_type, fid, &res_parms); + + if (rc) { + /* This is unexpected on the first call to resource del. + * It likely means that the flow did not exist in the flow db. + */ + netdev_dbg(ulp_ctx->bp->dev, "Flow[%d][0x%08x] failed to free (rc=%d)\n", + flow_type, fid, rc); + return rc; + } + + while (!rc) { + trc = ulp_mapper_resource_free(ulp_ctx, fid, &res_parms, error); + if (trc) { + /* On fail, we still need to attempt to free the + * remaining resources. Don't return + */ + netdev_dbg(ulp_ctx->bp->dev, + "Flow[%d][0x%x] Res[%d][%0llx] failed rc=%d.\n", + flow_type, fid, res_parms.resource_func, + res_parms.resource_hndl, trc); + + /* Capture error in final rc */ + frc = trc; + } + /* All subsequent call require the non-critical_resource */ + res_parms.critical_resource = BNXT_ULP_CRITICAL_RESOURCE_NO; + + rc = ulp_flow_db_resource_del(ulp_ctx, + flow_type, + fid, + &res_parms); + } + + /* Expected that flow_db should return no entry */ + if (rc != -ENOENT) + frc = rc; + + /* Free the Flow ID since we've removed all resources */ + rc = ulp_flow_db_fid_free(ulp_ctx, flow_type, fid); + + /* Ensure that any error will be reported */ + if (rc) + frc = rc; + + return frc; +} + +static void +ulp_mapper_glb_resource_info_deinit(struct bnxt_ulp_context *ulp_ctx, + struct bnxt_ulp_mapper_data *mapper_data) +{ + struct bnxt_ulp_mapper_glb_resource_entry *ent; + struct ulp_flow_db_res_params res; + u32 dir, idx; + + /* Iterate the global resources and process each one */ + for (dir = TF_DIR_RX; dir < TF_DIR_MAX; dir++) { + for (idx = 0; idx < BNXT_ULP_GLB_RF_IDX_LAST; idx++) { + ent = &mapper_data->glb_res_tbl[dir][idx]; + if (ent->resource_func == + BNXT_ULP_RESOURCE_FUNC_INVALID || + ent->shared) + continue; + memset(&res, 0, sizeof(struct ulp_flow_db_res_params)); + res.resource_func = ent->resource_func; + res.direction = dir; + res.resource_type = ent->resource_type; + /*convert it from BE to cpu */ + res.resource_hndl = + be64_to_cpu(ent->resource_hndl); + ulp_mapper_resource_free(ulp_ctx, 0, &res, NULL); + } + } +} + +int +ulp_mapper_flow_destroy(struct bnxt_ulp_context *ulp_ctx, + enum bnxt_ulp_fdb_type flow_type, + u32 fid, + void *error) +{ + return ulp_mapper_resources_free(ulp_ctx, flow_type, fid, error); +} + +/* Function to handle the mapping of the Flow to be compatible + * with the underlying hardware. + */ +int +ulp_mapper_flow_create(struct bnxt_ulp_context *ulp_ctx, + struct bnxt_ulp_mapper_parms *parms, void *error) +{ + struct ulp_regfile *regfile; + int rc = 0, trc; + + if (!ulp_ctx || !parms) + return -EINVAL; + + regfile = vzalloc(sizeof(*regfile)); + if (!regfile) + return -ENOMEM; + + parms->regfile = regfile; + parms->ulp_ctx = ulp_ctx; + + /* Get the device id from the ulp context */ + if (bnxt_ulp_cntxt_dev_id_get(ulp_ctx, &parms->dev_id)) { + netdev_dbg(ulp_ctx->bp->dev, "Invalid ulp context\n"); + rc = -EINVAL; + goto err; + } + if (bnxt_ulp_cntxt_fid_get(ulp_ctx, &parms->fw_fid)) { + netdev_dbg(ulp_ctx->bp->dev, "Unable to get the func_id\n"); + rc = -EINVAL; + goto err; + } + + /* Get the device params, it will be used in later processing */ + parms->device_params = bnxt_ulp_device_params_get(parms->dev_id); + if (!parms->device_params) { + netdev_dbg(ulp_ctx->bp->dev, "No device parms for device id %d\n", parms->dev_id); + rc = -EINVAL; + goto err; + } + + /* + * Get the mapper data for dynamic mapper data such as default + * ids. + */ + parms->mapper_data = (struct bnxt_ulp_mapper_data *) + bnxt_ulp_cntxt_ptr2_mapper_data_get(ulp_ctx); + if (!parms->mapper_data) { + netdev_dbg(ulp_ctx->bp->dev, "Failed to get the ulp mapper data\n"); + rc = -EINVAL; + goto err; + } + + /* initialize the registry file for further processing */ + if (ulp_regfile_init(parms->regfile)) { + netdev_dbg(ulp_ctx->bp->dev, "regfile initialization failed.\n"); + rc = -EINVAL; + goto err; + } + + /* Process the action template list from the selected action table*/ + if (parms->act_tid) { + parms->tmpl_type = BNXT_ULP_TEMPLATE_TYPE_ACTION; + /* Process the action template tables */ + rc = ulp_mapper_tbls_process(parms, error); + if (rc) + goto flow_error; + } + + if (parms->class_tid) { + parms->tmpl_type = BNXT_ULP_TEMPLATE_TYPE_CLASS; + /* Process the class template tables.*/ + rc = ulp_mapper_tbls_process(parms, error); + if (rc) + goto flow_error; + } + + vfree(parms->regfile); + return rc; + +flow_error: + if (parms->rid) { + /* An RID was in-flight but not pushed, free the resources */ + trc = ulp_mapper_flow_destroy(ulp_ctx, BNXT_ULP_FDB_TYPE_RID, + parms->rid, NULL); + if (trc) + netdev_dbg(ulp_ctx->bp->dev, "Failed to free resources rid=0x%08x rc=%d\n", + parms->rid, trc); + parms->rid = 0; + } + + /* Free all resources that were allocated during flow creation */ + if (parms->flow_id) { + trc = ulp_mapper_flow_destroy(ulp_ctx, parms->flow_type, + parms->flow_id, NULL); + if (trc) + netdev_dbg(ulp_ctx->bp->dev, "Failed to free resources fid=0x%08x rc=%d\n", + parms->flow_id, trc); + } + +err: + vfree(parms->regfile); + return rc; +} + +int +ulp_mapper_init(struct bnxt_ulp_context *ulp_ctx) +{ + struct bnxt_ulp_mapper_data *data; + int rc; + + if (!ulp_ctx) + return -EINVAL; + + data = vzalloc(sizeof(*data)); + if (!data) + return -ENOMEM; + + /* set the mapper operations for the current platform */ + data->mapper_oper = bnxt_ulp_mapper_ops_get(ulp_ctx->bp); + if (!data->mapper_oper) { + vfree(data); + netdev_dbg(ulp_ctx->bp->dev, "Failed to get mapper ops\n"); + return -ENOMEM; + } + + if (bnxt_ulp_cntxt_ptr2_mapper_data_set(ulp_ctx, data)) { + netdev_dbg(ulp_ctx->bp->dev, "Failed to set mapper data in context\n"); + /* Don't call deinit since the prof_func wasn't allocated. */ + vfree(data); + return -ENOMEM; + } + + /* Allocate the global resource ids */ + rc = ulp_mapper_glb_resource_info_init(ulp_ctx, data); + if (rc) { + netdev_dbg(ulp_ctx->bp->dev, "Failed to initialize global resource ids\n"); + goto error; + } + + /** Only initialize the app global resources if a shared session was + * created. + */ + if (bnxt_ulp_cntxt_shared_session_enabled(ulp_ctx)) { + rc = ulp_mapper_app_glb_resource_info_init(ulp_ctx, data); + if (rc) { + netdev_dbg(ulp_ctx->bp->dev, "Failed to init app glb resources\n"); + goto error; + } + } + + /* Allocate the generic table list */ + rc = ulp_mapper_generic_tbl_list_init(ulp_ctx, data); + if (rc) { + netdev_dbg(ulp_ctx->bp->dev, "Failed to initialize generic tbl list\n"); + goto error; + } + + rc = ulp_mapper_key_recipe_tbl_init(ulp_ctx, data); + if (rc) { + netdev_dbg(ulp_ctx->bp->dev, "Failed to initialize key_recipe tbl\n"); + goto error; + } + + rc = ulp_allocator_tbl_list_init(ulp_ctx, data); + if (rc) { + netdev_dbg(ulp_ctx->bp->dev, "Failed to initialize allocator tbl\n"); + goto error; + } + + return 0; +error: + /* Ignore the return code in favor of returning the original error. */ + ulp_mapper_deinit(ulp_ctx); + return rc; +} + +void +ulp_mapper_deinit(struct bnxt_ulp_context *ulp_ctx) +{ + struct bnxt_ulp_mapper_data *data; + + if (!ulp_ctx) + return; + + data = (struct bnxt_ulp_mapper_data *) + bnxt_ulp_cntxt_ptr2_mapper_data_get(ulp_ctx); + if (!data) { + /* Go ahead and return since there is no allocated data. */ + netdev_dbg(ulp_ctx->bp->dev, "No data appears to have been allocated.\n"); + return; + } + + /* Free the global resource info table entries */ + ulp_mapper_glb_resource_info_deinit(ulp_ctx, data); + + /* Free the generic table */ + (void)ulp_mapper_generic_tbl_list_deinit(data); + + /* Free the key recipe table */ + (void)ulp_mapper_key_recipe_tbl_deinit(data); + + /* Free the allocator table */ + (void)ulp_allocator_tbl_list_deinit(data); + + vfree(data); + /* Reset the data pointer within the ulp_ctx. */ + bnxt_ulp_cntxt_ptr2_mapper_data_set(ulp_ctx, NULL); +} +#endif /* CONFIG_BNXT_FLOWER_OFFLOAD */ diff --git a/drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_mapper.h b/drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_mapper.h new file mode 100644 index 000000000000..c1eed2be97c6 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_mapper.h @@ -0,0 +1,291 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2023 Broadcom + * All rights reserved. + */ + +#ifndef _ULP_MAPPER_H_ +#define _ULP_MAPPER_H_ + +#include "linux/kernel.h" +#include "tf_core.h" +#include "ulp_template_db_enum.h" +#include "ulp_template_struct.h" +#include "bnxt_tf_ulp.h" +#include "ulp_utils.h" +#include "ulp_gen_tbl.h" +#include "bitalloc.h" +#include "ulp_alloc_tbl.h" + +#define ULP_IDENTS_INVALID ((u16)U16_MAX) + +struct bnxt_ulp_mapper_glb_resource_entry { + enum bnxt_ulp_resource_func resource_func; + u32 resource_type; /* TF_ enum type */ + u64 resource_hndl; + bool shared; +}; + +#define BNXT_ULP_KEY_RECIPE_MAX_FLDS 128 +struct bnxt_ulp_key_recipe_entry { + bool in_use; + u32 cnt; + struct bnxt_ulp_mapper_key_info flds[BNXT_ULP_KEY_RECIPE_MAX_FLDS]; +}; + +#define ULP_RECIPE_TYPE_MAX (BNXT_ULP_RESOURCE_SUB_TYPE_KEY_RECIPE_TABLE_WM + 1) +struct bnxt_ulp_key_recipe_info { + u32 num_recipes; + u8 max_fields; + struct bnxt_ulp_key_recipe_entry **recipes[BNXT_ULP_DIRECTION_LAST][ULP_RECIPE_TYPE_MAX]; + struct bitalloc *recipe_ba[BNXT_ULP_DIRECTION_LAST][ULP_RECIPE_TYPE_MAX]; +}; + +struct ulp_mapper_core_ops; + +struct bnxt_ulp_mapper_data { + const struct ulp_mapper_core_ops *mapper_oper; + struct bnxt_ulp_mapper_glb_resource_entry + glb_res_tbl[TF_DIR_MAX][BNXT_ULP_GLB_RF_IDX_LAST]; + struct ulp_mapper_gen_tbl_list gen_tbl_list[BNXT_ULP_GEN_TBL_MAX_SZ]; + struct bnxt_ulp_key_recipe_info key_recipe_info; + struct ulp_allocator_tbl_entry alloc_tbl[BNXT_ULP_ALLOCATOR_TBL_MAX_SZ]; +}; + +/* Internal Structure for passing the arguments around */ +struct bnxt_ulp_mapper_parms { + enum bnxt_ulp_template_type tmpl_type; + u32 dev_id; + u32 act_tid; + u32 class_tid; + struct ulp_tc_act_prop *act_prop; + struct ulp_tc_hdr_bitmap *act_bitmap; + struct ulp_tc_hdr_bitmap *hdr_bitmap; + struct ulp_tc_hdr_bitmap *enc_hdr_bitmap; + struct ulp_tc_hdr_field *hdr_field; + struct ulp_tc_hdr_field *enc_field; + struct ulp_tc_field_bitmap *fld_bitmap; + u64 *comp_fld; + struct ulp_regfile *regfile; + struct bnxt_ulp_context *ulp_ctx; + u32 flow_id; + u16 func_id; + u32 rid; + enum bnxt_ulp_fdb_type flow_type; + struct bnxt_ulp_mapper_data *mapper_data; + struct bnxt_ulp_device_params *device_params; + u32 child_flow; + u32 parent_flow; + u8 tun_idx; + u32 app_priority; + u64 shared_hndl; + u32 flow_pattern_id; + u32 act_pattern_id; + u8 app_id; + u16 port_id; + u16 fw_fid; + u64 cf_bitmap; + u64 wc_field_bitmap; + u64 exclude_field_bitmap; +}; + +/* Function to initialize any dynamic mapper data. */ +struct ulp_mapper_core_ops { + int + (*ulp_mapper_core_tcam_tbl_process)(struct bnxt_ulp_mapper_parms *parms, + struct bnxt_ulp_mapper_tbl_info *t); + int + (*ulp_mapper_core_tcam_entry_free)(struct bnxt_ulp_context *ulp_ctx, + struct ulp_flow_db_res_params *res); + int + (*ulp_mapper_core_em_tbl_process)(struct bnxt_ulp_mapper_parms *parms, + struct bnxt_ulp_mapper_tbl_info *t, + void *error); + int + (*ulp_mapper_core_em_entry_free)(struct bnxt_ulp_context *ulp, + struct ulp_flow_db_res_params *res, + void *error); + + int + (*ulp_mapper_core_index_tbl_process)(struct bnxt_ulp_mapper_parms *parm, + struct bnxt_ulp_mapper_tbl_info + *t); + int + (*ulp_mapper_core_index_entry_free)(struct bnxt_ulp_context *ulp, + struct ulp_flow_db_res_params *res); + int + (*ulp_mapper_core_cmm_tbl_process)(struct bnxt_ulp_mapper_parms *parm, + struct bnxt_ulp_mapper_tbl_info *t, + void *error); + int + (*ulp_mapper_core_cmm_entry_free)(struct bnxt_ulp_context *ulp, + struct ulp_flow_db_res_params *res, + void *error); + int + (*ulp_mapper_core_if_tbl_process)(struct bnxt_ulp_mapper_parms *parms, + struct bnxt_ulp_mapper_tbl_info *t); + + int + (*ulp_mapper_core_ident_alloc_process)(struct bnxt_ulp_context *ulp_ctx, + u32 session_type, + u16 ident_type, + u8 direction, + enum cfa_track_type tt, + u64 *identifier_id); + + int + (*ulp_mapper_core_index_tbl_alloc_process)(struct bnxt_ulp_context *ulp, + u32 session_type, + u16 table_type, + u8 direction, + u64 *index); + int + (*ulp_mapper_core_ident_free)(struct bnxt_ulp_context *ulp_ctx, + struct ulp_flow_db_res_params *res); + u32 + (*ulp_mapper_core_dyn_tbl_type_get)(struct bnxt_ulp_mapper_parms *parms, + struct bnxt_ulp_mapper_tbl_info *t, + u16 blob_len, + u16 *out_len); + int + (*ulp_mapper_core_app_glb_res_info_init)(struct bnxt_ulp_context *ulp_ctx, + struct bnxt_ulp_mapper_data *mapper_data); + + int + (*ulp_mapper_core_handle_to_offset)(struct bnxt_ulp_mapper_parms *parms, + u64 handle, + u32 offset, + u64 *result); +}; + +extern const struct ulp_mapper_core_ops ulp_mapper_tf_core_ops; +extern const struct ulp_mapper_core_ops ulp_mapper_tfc_core_ops; + +int +ulp_mapper_glb_resource_read(struct bnxt_ulp_mapper_data *mapper_data, + enum tf_dir dir, + u16 idx, + u64 *regval, + bool *shared); + +int +ulp_mapper_glb_resource_write(struct bnxt_ulp_mapper_data *data, + struct bnxt_ulp_glb_resource_info *res, + u64 regval, bool shared); + +int +ulp_mapper_resource_ident_allocate(struct bnxt_ulp_context *ulp_ctx, + struct bnxt_ulp_mapper_data *mapper_data, + struct bnxt_ulp_glb_resource_info *glb_res, + bool shared); + +int +ulp_mapper_resource_index_tbl_alloc(struct bnxt_ulp_context *ulp_ctx, + struct bnxt_ulp_mapper_data *mapper_data, + struct bnxt_ulp_glb_resource_info *glb_res, + bool shared); + +struct bnxt_ulp_mapper_key_info * +ulp_mapper_key_fields_get(struct bnxt_ulp_mapper_parms *mparms, + struct bnxt_ulp_mapper_tbl_info *tbl, + u32 *num_flds); + +uint32_t +ulp_mapper_partial_key_fields_get(struct bnxt_ulp_mapper_parms *mparms, + struct bnxt_ulp_mapper_tbl_info *tbl); + +int +ulp_mapper_fdb_opc_process(struct bnxt_ulp_mapper_parms *parms, + struct bnxt_ulp_mapper_tbl_info *tbl, + struct ulp_flow_db_res_params *fid_parms); + +int +ulp_mapper_priority_opc_process(struct bnxt_ulp_mapper_parms *parms, + struct bnxt_ulp_mapper_tbl_info *tbl, + u32 *priority); + +int +ulp_mapper_tbl_ident_scan_ext(struct bnxt_ulp_mapper_parms *parms, + struct bnxt_ulp_mapper_tbl_info *tbl, + u8 *byte_data, + u32 byte_data_size, + enum bnxt_ulp_byte_order byte_order); + +int +ulp_mapper_field_opc_process(struct bnxt_ulp_mapper_parms *parms, + enum tf_dir dir, + struct bnxt_ulp_mapper_field_info *fld, + struct ulp_blob *blob, + u8 is_key, + const char *name); + +int +ulp_mapper_key_recipe_field_opc_process(struct bnxt_ulp_mapper_parms *parms, + enum bnxt_ulp_direction dir, + struct bnxt_ulp_mapper_field_info *fld, + u8 is_key, + const char *name, + bool *written, + struct bnxt_ulp_mapper_field_info *ofld); + +int +ulp_mapper_tbl_result_build(struct bnxt_ulp_mapper_parms *parms, + struct bnxt_ulp_mapper_tbl_info *tbl, + struct ulp_blob *data, + const char *name); + +int +ulp_mapper_mark_gfid_process(struct bnxt_ulp_mapper_parms *parms, + struct bnxt_ulp_mapper_tbl_info *tbl, + u64 flow_id); + +int +ulp_mapper_mark_act_ptr_process(struct bnxt_ulp_mapper_parms *parms, + struct bnxt_ulp_mapper_tbl_info *tbl); + +int +ulp_mapper_mark_vfr_idx_process(struct bnxt_ulp_mapper_parms *parms, + struct bnxt_ulp_mapper_tbl_info *tbl); + +int +ulp_mapper_tcam_tbl_ident_alloc(struct bnxt_ulp_mapper_parms *parms, + struct bnxt_ulp_mapper_tbl_info *tbl); + +u32 +ulp_mapper_wc_tcam_tbl_dyn_post_process(struct bnxt_ulp_context *ulp_ctx, + struct bnxt_ulp_device_params *dparms, + struct ulp_blob *key, + struct ulp_blob *mask, + struct ulp_blob *tkey, + struct ulp_blob *tmask); + +void ulp_mapper_wc_tcam_tbl_post_process(struct bnxt_ulp_context *ulp_ctx, struct ulp_blob *blob); + +int +ulp_mapper_resources_free(struct bnxt_ulp_context *ulp_ctx, + enum bnxt_ulp_fdb_type flow_type, + u32 fid, + void *error); + +int +ulp_mapper_flow_destroy(struct bnxt_ulp_context *ulp_ctx, + enum bnxt_ulp_fdb_type flow_type, + u32 fid, + void *error); + +int +ulp_mapper_flow_create(struct bnxt_ulp_context *ulp_ctx, + struct bnxt_ulp_mapper_parms *parms, + void *error); + +struct bnxt_ulp_mapper_key_info * +ulp_mapper_key_recipe_fields_get(struct bnxt_ulp_mapper_parms *parms, + struct bnxt_ulp_mapper_tbl_info *tbl, + u32 *num_flds); + +int +ulp_mapper_init(struct bnxt_ulp_context *ulp_ctx); + +void +ulp_mapper_deinit(struct bnxt_ulp_context *ulp_ctx); + +#endif /* _ULP_MAPPER_H_ */ diff --git a/drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_mapper_p5.c b/drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_mapper_p5.c new file mode 100644 index 000000000000..a342d48bb319 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_mapper_p5.c @@ -0,0 +1,1349 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* Copyright(c) 2019-2023 Broadcom + * All rights reserved. + */ + +#include "linux/kernel.h" +#include "bnxt_compat.h" +#include "ulp_mapper.h" +#include "ulp_flow_db.h" +#include "tf_util.h" +#include "bnxt_tf_ulp.h" +#include "bnxt_tf_ulp_p5.h" +#include "ulp_tf_debug.h" +#include "ulp_template_debug_proto.h" + +#ifdef CONFIG_BNXT_FLOWER_OFFLOAD +/* Internal function to write the tcam entry */ +static int +ulp_mapper_tf_tcam_tbl_entry_write(struct bnxt_ulp_mapper_parms *parms, + struct bnxt_ulp_mapper_tbl_info *tbl, + struct ulp_blob *key, + struct ulp_blob *mask, + struct ulp_blob *data, + u16 idx) +{ + struct tf_set_tcam_entry_parms sparms = { 0 }; + struct tf *tfp; + u16 tmplen; + int rc; + + tfp = bnxt_tf_ulp_cntxt_tfp_get(parms->ulp_ctx, tbl->session_type); + if (!tfp) { + netdev_dbg(parms->ulp_ctx->bp->dev, "Failed to get truflow pointer\n"); + return -EINVAL; + } + + sparms.dir = tbl->direction; + sparms.tcam_tbl_type = tbl->resource_type; + sparms.idx = idx; + sparms.key = ulp_blob_data_get(key, &tmplen); + sparms.key_sz_in_bits = tmplen; + sparms.mask = ulp_blob_data_get(mask, &tmplen); + sparms.result = ulp_blob_data_get(data, &tmplen); + sparms.result_sz_in_bits = tmplen; + if (tf_set_tcam_entry(tfp, &sparms)) { + netdev_dbg(parms->ulp_ctx->bp->dev, "tcam[%s][%s][%x] write failed.\n", + tf_tcam_tbl_2_str(sparms.tcam_tbl_type), + tf_dir_2_str(sparms.dir), sparms.idx); + return -EIO; + } + netdev_dbg(parms->ulp_ctx->bp->dev, + "tcam[%s][%s][%x] write success.\n", + tf_tcam_tbl_2_str(sparms.tcam_tbl_type), + tf_dir_2_str(sparms.dir), sparms.idx); + + /* Mark action */ + rc = ulp_mapper_mark_act_ptr_process(parms, tbl); + if (rc) { + netdev_dbg(parms->ulp_ctx->bp->dev, "failed mark action processing\n"); + return rc; + } + +#ifdef RTE_LIBRTE_BNXT_TRUFLOW_DEBUG +#ifdef RTE_LIBRTE_BNXT_TRUFLOW_DEBUG_MAPPER + ulp_mapper_tcam_entry_dump(parms->ulp_ctx, + "TCAM", idx, tbl, key, mask, data); +#endif +#endif + return rc; +} + +static int +ulp_mapper_tf_tcam_is_wc_tcam(struct bnxt_ulp_mapper_tbl_info *tbl) +{ + if (tbl->resource_type == TF_TCAM_TBL_TYPE_WC_TCAM || + tbl->resource_type == TF_TCAM_TBL_TYPE_WC_TCAM_HIGH || + tbl->resource_type == TF_TCAM_TBL_TYPE_WC_TCAM_LOW) + return 1; + return 0; +} + +static int +ulp_mapper_tf_tcam_tbl_process(struct bnxt_ulp_mapper_parms *parms, + struct bnxt_ulp_mapper_tbl_info *tbl) +{ + struct bnxt_ulp_device_params *dparms = parms->device_params; + struct ulp_blob okey, omask, data, update_data; + struct tf_free_tcam_entry_parms *free_parms; + struct ulp_flow_db_res_params *fid_parms; + struct tf_alloc_tcam_entry_parms *aparms; + enum bnxt_ulp_byte_order key_byte_order; + struct bnxt_ulp_mapper_key_info *kflds; + struct ulp_blob tkey, tmask; /* transform key and mask */ + struct ulp_blob *key, *mask; + u32 i, num_kflds; + struct tf *tfp; + u16 tmplen = 0; + int rc, trc; + u32 hit = 0; + u16 idx = 0; + + aparms = vzalloc(sizeof(*aparms)); + if (!aparms) + return -ENOMEM; + + fid_parms = vzalloc(sizeof(*fid_parms)); + if (!fid_parms) { + vfree(aparms); + return -ENOMEM; + } + + free_parms = vzalloc(sizeof(*free_parms)); + if (!free_parms) { + vfree(aparms); + vfree(fid_parms); + return -ENOMEM; + } + + /* Set the key and mask to the original key and mask. */ + key = &okey; + mask = &omask; + + /* Skip this if table opcode is NOP */ + if (tbl->tbl_opcode == BNXT_ULP_TCAM_TBL_OPC_NOT_USED || + tbl->tbl_opcode >= BNXT_ULP_TCAM_TBL_OPC_LAST) { + netdev_dbg(parms->ulp_ctx->bp->dev, "Invalid tcam table opcode %d\n", + tbl->tbl_opcode); + goto done; + } + + tfp = bnxt_tf_ulp_cntxt_tfp_get(parms->ulp_ctx, tbl->session_type); + if (!tfp) { + netdev_dbg(parms->ulp_ctx->bp->dev, "Failed to get truflow pointer\n"); + rc = -EINVAL; + goto free_mem; + } + + /* If only allocation of identifier then perform and exit */ + if (tbl->tbl_opcode == BNXT_ULP_TCAM_TBL_OPC_ALLOC_IDENT) { + rc = ulp_mapper_tcam_tbl_ident_alloc(parms, tbl); + goto free_mem; + } + + if (tbl->key_recipe_opcode == BNXT_ULP_KEY_RECIPE_OPC_DYN_KEY) + kflds = ulp_mapper_key_recipe_fields_get(parms, tbl, &num_kflds); + else + kflds = ulp_mapper_key_fields_get(parms, tbl, &num_kflds); + if (!kflds || !num_kflds) { + netdev_dbg(parms->ulp_ctx->bp->dev, "Failed to get key fields\n"); + rc = -EINVAL; + goto free_mem; + } + + if (ulp_mapper_tf_tcam_is_wc_tcam(tbl)) + key_byte_order = dparms->wc_key_byte_order; + else + key_byte_order = dparms->key_byte_order; + + if (ulp_blob_init(key, tbl->blob_key_bit_size, key_byte_order) || + ulp_blob_init(mask, tbl->blob_key_bit_size, key_byte_order) || + ulp_blob_init(&data, tbl->result_bit_size, + dparms->result_byte_order) || + ulp_blob_init(&update_data, tbl->result_bit_size, + dparms->result_byte_order)) { + netdev_dbg(parms->ulp_ctx->bp->dev, "blob inits failed.\n"); + rc = -EINVAL; + goto free_mem; + } + + /* create the key/mask */ + /* NOTE: The WC table will require some kind of flag to handle the + * mode bits within the key/mask + */ + for (i = 0; i < num_kflds; i++) { + /* Setup the key */ + rc = ulp_mapper_field_opc_process(parms, tbl->direction, + &kflds[i].field_info_spec, + key, 1, "TCAM Key"); + if (rc) { + netdev_dbg(parms->ulp_ctx->bp->dev, "Key field set failed %s\n", + kflds[i].field_info_spec.description); + goto free_mem; + } + + /* Setup the mask */ + rc = ulp_mapper_field_opc_process(parms, tbl->direction, + &kflds[i].field_info_mask, + mask, 0, "TCAM Mask"); + if (rc) { + netdev_dbg(parms->ulp_ctx->bp->dev, "Mask field set failed %s\n", + kflds[i].field_info_mask.description); + goto free_mem; + } + } + + /* For wild card tcam perform the post process to swap the blob */ + if (ulp_mapper_tf_tcam_is_wc_tcam(tbl)) { + if (dparms->wc_dynamic_pad_en) { + /* Sets up the slices for writing to the WC TCAM */ + rc = ulp_mapper_wc_tcam_tbl_dyn_post_process(parms->ulp_ctx, + dparms, + key, mask, + &tkey, + &tmask); + if (rc) { + netdev_dbg(parms->ulp_ctx->bp->dev, + "Failed to post proc WC entry.\n"); + goto free_mem; + } + /* Now need to use the transform Key/Mask */ + key = &tkey; + mask = &tmask; + } else { + ulp_mapper_wc_tcam_tbl_post_process(parms->ulp_ctx, key); + ulp_mapper_wc_tcam_tbl_post_process(parms->ulp_ctx, mask); + } + } + + if (tbl->tbl_opcode == BNXT_ULP_TCAM_TBL_OPC_ALLOC_WR_REGFILE) { + /* allocate the tcam index */ + aparms->dir = tbl->direction; + aparms->tcam_tbl_type = tbl->resource_type; + aparms->key = ulp_blob_data_get(key, &tmplen); + aparms->key_sz_in_bits = tmplen; + aparms->mask = ulp_blob_data_get(mask, &tmplen); + + /* calculate the entry priority */ + rc = ulp_mapper_priority_opc_process(parms, tbl, + &aparms->priority); + if (rc) { + netdev_dbg(parms->ulp_ctx->bp->dev, "entry priority process failed\n"); + goto free_mem; + } + + rc = tf_alloc_tcam_entry(tfp, aparms); + if (rc) { + netdev_dbg(parms->ulp_ctx->bp->dev, "tcam alloc failed rc=%d.\n", rc); + goto free_mem; + } + idx = aparms->idx; + hit = aparms->hit; + } else { + rc = -EINVAL; + /* Need to free the tcam idx, so goto error */ + goto error; + } + + /* Write the tcam index into the regfile*/ + if (ulp_regfile_write(parms->regfile, tbl->tbl_operand, + (u64)cpu_to_be64(idx))) { + netdev_dbg(parms->ulp_ctx->bp->dev, "Regfile[%d] write failed.\n", + tbl->tbl_operand); + rc = -EINVAL; + /* Need to free the tcam idx, so goto error */ + goto error; + } + + /* if it is miss then it is same as no search before alloc */ + if (!hit || tbl->tbl_opcode == BNXT_ULP_TCAM_TBL_OPC_ALLOC_WR_REGFILE) { + /*Scan identifier list, allocate identifier and update regfile*/ + rc = ulp_mapper_tcam_tbl_ident_alloc(parms, tbl); + /* Create the result blob */ + if (!rc) + rc = ulp_mapper_tbl_result_build(parms, tbl, &data, + "TCAM Result"); + /* write the tcam entry */ + if (!rc) + rc = ulp_mapper_tf_tcam_tbl_entry_write(parms, tbl, key, + mask, &data, + idx); + } + + if (rc) + goto error; + + /* Add the tcam index to the flow database */ + fid_parms->direction = tbl->direction; + fid_parms->resource_func = tbl->resource_func; + fid_parms->resource_type = tbl->resource_type; + fid_parms->critical_resource = tbl->critical_resource; + fid_parms->resource_hndl = idx; + ulp_flow_db_shared_session_set(fid_parms, tbl->session_type); + + rc = ulp_mapper_fdb_opc_process(parms, tbl, fid_parms); + if (rc) { + netdev_dbg(parms->ulp_ctx->bp->dev, "Failed to link resource to flow rc = %d\n", + rc); + /* Need to free the identifier, so goto error */ + goto error; + } + +done: + vfree(aparms); + vfree(fid_parms); + vfree(free_parms); + return 0; + +error: + free_parms->dir = tbl->direction; + free_parms->tcam_tbl_type = tbl->resource_type; + free_parms->idx = idx; + trc = tf_free_tcam_entry(tfp, free_parms); + if (trc) + netdev_dbg(parms->ulp_ctx->bp->dev, "Failed to free tcam[%d][%d][%d] on failure\n", + tbl->resource_type, tbl->direction, idx); +free_mem: + vfree(aparms); + vfree(fid_parms); + vfree(free_parms); + + return rc; +} + +static int +ulp_mapper_tf_em_tbl_process(struct bnxt_ulp_mapper_parms *parms, + struct bnxt_ulp_mapper_tbl_info *tbl, + void *error) +{ + struct bnxt_ulp_device_params *dparms = parms->device_params; + struct tf_delete_em_entry_parms free_parms = { 0 }; + struct ulp_flow_db_res_params fid_parms = { 0 }; + struct tf_insert_em_entry_parms iparms = { 0 }; + enum bnxt_ulp_byte_order key_order, res_order; + struct bnxt_ulp_mapper_key_info *kflds; + enum bnxt_ulp_flow_mem_type mtype; + struct ulp_blob key, data; + u32 i, num_kflds; + struct tf *tfp; + int pad = 0; + int rc = 0; + u16 tmplen; + int trc; + + tfp = bnxt_tf_ulp_cntxt_tfp_get(parms->ulp_ctx, tbl->session_type); + rc = bnxt_ulp_cntxt_mem_type_get(parms->ulp_ctx, &mtype); + if (rc) { + netdev_dbg(parms->ulp_ctx->bp->dev, "Failed to get the mem type for EM\n"); + return -EINVAL; + } + + if (tbl->key_recipe_opcode == BNXT_ULP_KEY_RECIPE_OPC_DYN_KEY) + kflds = ulp_mapper_key_recipe_fields_get(parms, tbl, &num_kflds); + else + kflds = ulp_mapper_key_fields_get(parms, tbl, &num_kflds); + if (!kflds || !num_kflds) { + netdev_dbg(parms->ulp_ctx->bp->dev, "Failed to get key fields\n"); + return -EINVAL; + } + + key_order = dparms->em_byte_order; + res_order = dparms->em_byte_order; + + /* Initialize the key/result blobs */ + if (ulp_blob_init(&key, tbl->blob_key_bit_size, key_order) || + ulp_blob_init(&data, tbl->result_bit_size, res_order)) { + netdev_dbg(parms->ulp_ctx->bp->dev, "blob inits failed.\n"); + return -EINVAL; + } + + /* create the key */ + for (i = 0; i < num_kflds; i++) { + /* Setup the key */ + rc = ulp_mapper_field_opc_process(parms, tbl->direction, + &kflds[i].field_info_spec, + &key, 1, "EM Key"); + if (rc) { + netdev_dbg(parms->ulp_ctx->bp->dev, "Key field set failed.\n"); + return rc; + } + } + + /* if dynamic padding is enabled then add padding to result data */ + if (dparms->em_dynamic_pad_en) { + /* add padding to make sure key is at byte boundary */ + ulp_blob_pad_align(&key, ULP_BUFFER_ALIGN_8_BITS); + + /* add the pad */ + pad = dparms->em_blk_align_bits - dparms->em_blk_size_bits; + if (pad < 0) { + netdev_dbg(parms->ulp_ctx->bp->dev, "Invalid em blk size and align\n"); + return -EINVAL; + } + ulp_blob_pad_push(&data, (u32)pad); + } + + /* Create the result data blob */ + rc = ulp_mapper_tbl_result_build(parms, tbl, &data, "EM Result"); + if (rc) { + netdev_dbg(parms->ulp_ctx->bp->dev, "Failed to build the result blob\n"); + return rc; + } + ulp_mapper_result_dump(parms->ulp_ctx, "EM Result", tbl, &data); + if (dparms->em_dynamic_pad_en) { + u32 abits = dparms->em_blk_align_bits; + + /* when dynamic padding is enabled merge result + key */ + rc = ulp_blob_block_merge(&data, &key, abits, pad); + if (rc) { + netdev_dbg(parms->ulp_ctx->bp->dev, "Failed to merge the result blob\n"); + return rc; + } + + /* add padding to make sure merged result is at slice boundary*/ + ulp_blob_pad_align(&data, abits); + + ulp_blob_perform_byte_reverse(&data, ULP_BITS_2_BYTE(abits)); + ulp_mapper_result_dump(parms->ulp_ctx, "EM Merged Result", tbl, + &data); + } + + /* do the transpose for the internal EM keys */ + if (tbl->resource_type == TF_MEM_INTERNAL) { + if (dparms->em_key_align_bytes) { + int b = ULP_BYTE_2_BITS(dparms->em_key_align_bytes); + + tmplen = ulp_blob_data_len_get(&key); + ulp_blob_pad_push(&key, b - tmplen); + } + tmplen = ulp_blob_data_len_get(&key); + ulp_mapper_result_dump(parms->ulp_ctx, "EM Key Transpose", tbl, + &key); + } + + rc = bnxt_ulp_cntxt_tbl_scope_id_get(parms->ulp_ctx, + &iparms.tbl_scope_id); + if (rc) { + netdev_dbg(parms->ulp_ctx->bp->dev, "Failed to get table scope rc=%d\n", rc); + return rc; + } + + /* NOTE: the actual blob size will differ from the size in the tbl + * entry due to the padding. + */ + iparms.dup_check = 0; + iparms.dir = tbl->direction; + iparms.mem = tbl->resource_type; + iparms.key = ulp_blob_data_get(&key, &tmplen); + iparms.key_sz_in_bits = tbl->key_bit_size; + iparms.em_record = ulp_blob_data_get(&data, &tmplen); + if (tbl->result_bit_size) + iparms.em_record_sz_in_bits = tbl->result_bit_size; + else + iparms.em_record_sz_in_bits = tmplen; + + rc = tf_insert_em_entry(tfp, &iparms); + if (rc) { + /* Set the error flag in reg file */ + if (tbl->tbl_opcode == BNXT_ULP_EM_TBL_OPC_WR_REGFILE) { + uint64_t val = 0; + + /* over max flows or hash collision */ + if (rc == -EIO || rc == -ENOMEM) { + val = 1; + rc = 0; + netdev_dbg(parms->ulp_ctx->bp->dev, + "Fail to insert EM, shall add to wc\n"); + } + rc = ulp_regfile_write(parms->regfile, tbl->tbl_operand, + cpu_to_be64(val)); + } + if (rc) + netdev_dbg(parms->ulp_ctx->bp->dev, + "Failed to insert em entry rc=%d.\n", rc); + return rc; + } + + ulp_mapper_em_dump(parms->ulp_ctx, "EM", &key, &data, &iparms); + /* tf_dump_tables(tfp, iparms.tbl_scope_id); */ + /* Mark action process */ + if (mtype == BNXT_ULP_FLOW_MEM_TYPE_EXT && + tbl->resource_type == TF_MEM_EXTERNAL) + rc = ulp_mapper_mark_gfid_process(parms, tbl, iparms.flow_id); + else if (mtype == BNXT_ULP_FLOW_MEM_TYPE_INT && + tbl->resource_type == TF_MEM_INTERNAL) + rc = ulp_mapper_mark_act_ptr_process(parms, tbl); + if (rc) { + netdev_dbg(parms->ulp_ctx->bp->dev, "Failed to add mark to flow\n"); + goto error; + } + + /* Link the EM resource to the flow in the flow db */ + memset(&fid_parms, 0, sizeof(fid_parms)); + fid_parms.direction = tbl->direction; + fid_parms.resource_func = tbl->resource_func; + fid_parms.resource_type = tbl->resource_type; + fid_parms.critical_resource = tbl->critical_resource; + fid_parms.resource_hndl = iparms.flow_handle; + + rc = ulp_mapper_fdb_opc_process(parms, tbl, &fid_parms); + if (rc) { + netdev_dbg(parms->ulp_ctx->bp->dev, "Fail to link res to flow rc = %d\n", + rc); + /* Need to free the identifier, so goto error */ + goto error; + } + + return 0; +error: + free_parms.dir = iparms.dir; + free_parms.mem = iparms.mem; + free_parms.tbl_scope_id = iparms.tbl_scope_id; + free_parms.flow_handle = iparms.flow_handle; + + trc = tf_delete_em_entry(tfp, &free_parms); + if (trc) + netdev_dbg(parms->ulp_ctx->bp->dev, "Failed to delete EM entry on failed add\n"); + + return rc; +} + +static u16 +ulp_mapper_tf_dyn_blob_size_get(struct bnxt_ulp_mapper_parms *mparms, + struct bnxt_ulp_mapper_tbl_info *tbl) +{ + struct bnxt_ulp_device_params *d_params = mparms->device_params; + + if (d_params->dynamic_sram_en) { + switch (tbl->resource_type) { + case TF_TBL_TYPE_ACT_ENCAP_8B: + case TF_TBL_TYPE_ACT_ENCAP_16B: + case TF_TBL_TYPE_ACT_ENCAP_32B: + case TF_TBL_TYPE_ACT_ENCAP_64B: + case TF_TBL_TYPE_ACT_MODIFY_8B: + case TF_TBL_TYPE_ACT_MODIFY_16B: + case TF_TBL_TYPE_ACT_MODIFY_32B: + case TF_TBL_TYPE_ACT_MODIFY_64B: + /* return max size */ + return BNXT_ULP_FLMP_BLOB_SIZE_IN_BITS; + default: + break; + } + } else if (tbl->encap_num_fields) { + return BNXT_ULP_FLMP_BLOB_SIZE_IN_BITS; + } + return tbl->result_bit_size; +} + +static int +ulp_mapper_tf_em_entry_free(struct bnxt_ulp_context *ulp, + struct ulp_flow_db_res_params *res, + void *error) +{ + struct tf_delete_em_entry_parms fparms = { 0 }; + u32 session_type; + struct tf *tfp; + int rc; + + session_type = ulp_flow_db_shared_session_get(res); + tfp = bnxt_tf_ulp_cntxt_tfp_get(ulp, session_type); + if (!tfp) { + netdev_dbg(ulp->bp->dev, "Failed to get tf pointer\n"); + return -EINVAL; + } + + fparms.dir = res->direction; + fparms.flow_handle = res->resource_hndl; + + rc = bnxt_ulp_cntxt_tbl_scope_id_get(ulp, &fparms.tbl_scope_id); + if (rc) { + netdev_dbg(ulp->bp->dev, "Failed to get table scope\n"); + return -EINVAL; + } + + return tf_delete_em_entry(tfp, &fparms); +} + +static u32 +ulp_mapper_tf_dyn_tbl_type_get(struct bnxt_ulp_mapper_parms *mparms, + struct bnxt_ulp_mapper_tbl_info *tbl, + u16 blob_len, + u16 *out_len) +{ + struct bnxt_ulp_device_params *d_params = mparms->device_params; + struct bnxt_ulp_dyn_size_map *size_map; + u32 i; + + if (d_params->dynamic_sram_en) { + switch (tbl->resource_type) { + case TF_TBL_TYPE_ACT_ENCAP_8B: + case TF_TBL_TYPE_ACT_ENCAP_16B: + case TF_TBL_TYPE_ACT_ENCAP_32B: + case TF_TBL_TYPE_ACT_ENCAP_64B: + case TF_TBL_TYPE_ACT_ENCAP_128B: + size_map = d_params->dyn_encap_sizes; + for (i = 0; i < d_params->dyn_encap_list_size; i++) { + if (blob_len <= size_map[i].slab_size) { + *out_len = size_map[i].slab_size; + return size_map[i].tbl_type; + } + } + break; + case TF_TBL_TYPE_ACT_MODIFY_8B: + case TF_TBL_TYPE_ACT_MODIFY_16B: + case TF_TBL_TYPE_ACT_MODIFY_32B: + case TF_TBL_TYPE_ACT_MODIFY_64B: + size_map = d_params->dyn_modify_sizes; + for (i = 0; i < d_params->dyn_modify_list_size; i++) { + if (blob_len <= size_map[i].slab_size) { + *out_len = size_map[i].slab_size; + return size_map[i].tbl_type; + } + } + break; + default: + break; + } + } + return tbl->resource_type; +} + +static int +ulp_mapper_tf_index_tbl_process(struct bnxt_ulp_mapper_parms *parms, + struct bnxt_ulp_mapper_tbl_info *tbl) +{ + struct tf_free_tbl_entry_parms free_parms = { 0 }; + struct bnxt_ulp_glb_resource_info glb_res = { 0 }; + struct tf_alloc_tbl_entry_parms aparms = { 0 }; + enum tf_tbl_type tbl_type = tbl->resource_type; + struct tf_set_tbl_entry_parms sparms = { 0 }; + struct tf_get_tbl_entry_parms gparms = { 0 }; + struct ulp_flow_db_res_params fid_parms; + struct ulp_blob data; + bool global = false; + bool shared = false; + int rc = 0, trc = 0; + bool alloc = false; + bool write = false; + u64 act_rec_size; + u32 tbl_scope_id; + u64 regval = 0; + struct tf *tfp; + u16 bit_size; + u16 blob_len; + u16 tmplen; + u32 index; + + tfp = bnxt_tf_ulp_cntxt_tfp_get(parms->ulp_ctx, tbl->session_type); + /* compute the blob size */ + bit_size = ulp_mapper_tf_dyn_blob_size_get(parms, tbl); + + /* Initialize the blob data */ + if (ulp_blob_init(&data, bit_size, + parms->device_params->result_byte_order)) { + netdev_dbg(parms->ulp_ctx->bp->dev, "Failed to initialize index table blob\n"); + return -EINVAL; + } + + /* Get the scope id first */ + rc = bnxt_ulp_cntxt_tbl_scope_id_get(parms->ulp_ctx, &tbl_scope_id); + if (rc) { + netdev_dbg(parms->ulp_ctx->bp->dev, "Failed to get table scope rc=%d\n", rc); + return rc; + } + + switch (tbl->tbl_opcode) { + case BNXT_ULP_INDEX_TBL_OPC_ALLOC_REGFILE: + alloc = true; + break; + case BNXT_ULP_INDEX_TBL_OPC_ALLOC_WR_REGFILE: + /* Build the entry, alloc an index, write the table, and store + * the data in the regfile. + */ + alloc = true; + write = true; + break; + case BNXT_ULP_INDEX_TBL_OPC_WR_REGFILE: + /* get the index to write to from the regfile and then write + * the table entry. + */ + if (ulp_regfile_read(parms->regfile, + tbl->tbl_operand, + ®val)) { + netdev_dbg(parms->ulp_ctx->bp->dev, + "Failed to get tbl idx from regfile[%d].\n", + tbl->tbl_operand); + return -EINVAL; + } + index = be64_to_cpu(regval); + /* For external, we need to reverse shift */ + if (tbl->resource_type == TF_TBL_TYPE_EXT) + index = TF_ACT_REC_PTR_2_OFFSET(index); + + write = true; + break; + case BNXT_ULP_INDEX_TBL_OPC_ALLOC_WR_GLB_REGFILE: + /* Build the entry, alloc an index, write the table, and store + * the data in the global regfile. + */ + alloc = true; + global = true; + write = true; + glb_res.direction = tbl->direction; + glb_res.resource_func = tbl->resource_func; + glb_res.resource_type = tbl->resource_type; + glb_res.glb_regfile_index = tbl->tbl_operand; + break; + case BNXT_ULP_INDEX_TBL_OPC_WR_GLB_REGFILE: + if (tbl->fdb_opcode != BNXT_ULP_FDB_OPC_NOP) { + netdev_dbg(parms->ulp_ctx->bp->dev, "Template error, wrong fdb opcode\n"); + return -EINVAL; + } + /* get the index to write to from the global regfile and then + * write the table. + */ + if (ulp_mapper_glb_resource_read(parms->mapper_data, + tbl->direction, + tbl->tbl_operand, + ®val, &shared)) { + netdev_dbg(parms->ulp_ctx->bp->dev, + "Failed to get tbl idx from Glb RF[%d].\n", + tbl->tbl_operand); + return -EINVAL; + } + index = be64_to_cpu(regval); + /* For external, we need to reverse shift */ + if (tbl->resource_type == TF_TBL_TYPE_EXT) + index = TF_ACT_REC_PTR_2_OFFSET(index); + write = true; + break; + case BNXT_ULP_INDEX_TBL_OPC_RD_REGFILE: + /* The read is different from the rest and can be handled here + * instead of trying to use common code. Simply read the table + * with the index from the regfile, scan and store the + * identifiers, and return. + */ + if (tbl->resource_type == TF_TBL_TYPE_EXT) { + /* Not currently supporting with EXT */ + netdev_dbg(parms->ulp_ctx->bp->dev, + "Ext Table Read Opcode not supported.\n"); + return -EINVAL; + } + if (ulp_regfile_read(parms->regfile, tbl->tbl_operand, ®val)) { + netdev_dbg(parms->ulp_ctx->bp->dev, + "Failed to get tbl idx from regfile[%d]\n", + tbl->tbl_operand); + return -EINVAL; + } + index = be64_to_cpu(regval); + gparms.dir = tbl->direction; + gparms.type = tbl->resource_type; + gparms.data = ulp_blob_data_get(&data, &tmplen); + gparms.data_sz_in_bytes = ULP_BITS_2_BYTE(tbl->result_bit_size); + gparms.idx = index; + rc = tf_get_tbl_entry(tfp, &gparms); + if (rc) { + netdev_dbg(parms->ulp_ctx->bp->dev, "Failed to read the tbl entry %d:%d\n", + tbl->resource_type, index); + return rc; + } + /* Scan the fields in the entry and push them into the regfile. + */ + rc = ulp_mapper_tbl_ident_scan_ext(parms, tbl, + gparms.data, + gparms.data_sz_in_bytes, + data.byte_order); + if (rc) { + netdev_dbg(parms->ulp_ctx->bp->dev, + "Failed to get flds on tbl read rc=%d\n", rc); + return rc; + } + return 0; + default: + netdev_dbg(parms->ulp_ctx->bp->dev, "Invalid index table opcode %d\n", + tbl->tbl_opcode); + return -EINVAL; + } + + if (write) { + /* Get the result fields list */ + rc = ulp_mapper_tbl_result_build(parms, + tbl, + &data, + "Indexed Result"); + if (rc) { + netdev_dbg(parms->ulp_ctx->bp->dev, "Failed to build the result blob\n"); + return rc; + } + } + + if (alloc) { + aparms.dir = tbl->direction; + blob_len = ulp_blob_data_len_get(&data); + tbl_type = ulp_mapper_tf_dyn_tbl_type_get(parms, tbl, + blob_len, &tmplen); + aparms.type = tbl_type; + aparms.tbl_scope_id = tbl_scope_id; + + /* All failures after the alloc succeeds require a free */ + rc = tf_alloc_tbl_entry(tfp, &aparms); + if (rc) { + netdev_dbg(parms->ulp_ctx->bp->dev, "Alloc table[%s][%s] failed rc=%d\n", + tf_tbl_type_2_str(aparms.type), + tf_dir_2_str(tbl->direction), rc); + return rc; + } + index = aparms.idx; + + /* Store the index in the regfile since we either allocated it + * or it was a hit. + * + * Calculate the idx for the result record, for external EM the + * offset needs to be shifted accordingly. + * If external non-inline table types are used then need to + * revisit this logic. + */ + if (tbl->resource_type == TF_TBL_TYPE_EXT) + regval = TF_ACT_REC_OFFSET_2_PTR(index); + else + regval = index; + regval = cpu_to_be64(regval); + + /* Counters need to be reset when allocated to ensure counter is zero */ + if (tbl->resource_type == TF_TBL_TYPE_ACT_STATS_64) { + sparms.dir = tbl->direction; + sparms.data = ulp_blob_data_get(&data, &tmplen); + sparms.type = tbl->resource_type; + sparms.data_sz_in_bytes = sizeof(u64); /* ULP_BITS_2_BYTE(tmplen); */ + sparms.idx = index; + sparms.tbl_scope_id = tbl_scope_id; + + rc = tf_set_tbl_entry(tfp, &sparms); + if (rc) { + netdev_dbg(parms->ulp_ctx->bp->dev, + "Index table[%s][%s][%x] write fail rc=%d\n", + tf_tbl_type_2_str(sparms.type), + tf_dir_2_str(sparms.dir), + sparms.idx, rc); + goto error; + } + } + + if (global) { + /* Shared resources are never allocated through this + * method, so the shared flag is always false. + */ + rc = ulp_mapper_glb_resource_write(parms->mapper_data, + &glb_res, regval, + false); + } else { + rc = ulp_regfile_write(parms->regfile, + tbl->tbl_operand, regval); + } + if (rc) { + netdev_dbg(parms->ulp_ctx->bp->dev, + "Failed to write %s regfile[%d] rc=%d\n", + (global) ? "global" : "reg", + tbl->tbl_operand, rc); + goto error; + } + } + + if (write) { + sparms.dir = tbl->direction; + sparms.data = ulp_blob_data_get(&data, &tmplen); + blob_len = ulp_blob_data_len_get(&data); + tbl_type = ulp_mapper_tf_dyn_tbl_type_get(parms, tbl, + blob_len, + &tmplen); + sparms.type = tbl_type; + sparms.data_sz_in_bytes = ULP_BITS_2_BYTE(tmplen); + sparms.idx = index; + sparms.tbl_scope_id = tbl_scope_id; + if (shared) + tfp = bnxt_tf_ulp_cntxt_tfp_get(parms->ulp_ctx, + tbl->session_type); + rc = tf_set_tbl_entry(tfp, &sparms); + if (rc) { + netdev_dbg(parms->ulp_ctx->bp->dev, + "Index table[%s][%s][%x] write fail rc=%d\n", + tf_tbl_type_2_str(sparms.type), + tf_dir_2_str(sparms.dir), + sparms.idx, rc); + goto error; + } + netdev_dbg(parms->ulp_ctx->bp->dev, + "Index table[%s][%s][%x] write successful.\n", + tf_tbl_type_2_str(sparms.type), + tf_dir_2_str(sparms.dir), sparms.idx); + + /* Calculate action record size */ + if (tbl->resource_type == TF_TBL_TYPE_EXT) { + act_rec_size = (ULP_BITS_2_BYTE_NR(tmplen) + 15) / 16; + act_rec_size--; + if (ulp_regfile_write(parms->regfile, + BNXT_ULP_RF_IDX_ACTION_REC_SIZE, + cpu_to_be64(act_rec_size))) + netdev_dbg(parms->ulp_ctx->bp->dev, + "Failed write the act rec size\n"); + } + } + + /* Link the resource to the flow in the flow db */ + memset(&fid_parms, 0, sizeof(fid_parms)); + fid_parms.direction = tbl->direction; + fid_parms.resource_func = tbl->resource_func; + fid_parms.resource_type = tbl_type; + fid_parms.resource_sub_type = tbl->resource_sub_type; + fid_parms.resource_hndl = index; + fid_parms.critical_resource = tbl->critical_resource; + ulp_flow_db_shared_session_set(&fid_parms, tbl->session_type); + + rc = ulp_mapper_fdb_opc_process(parms, tbl, &fid_parms); + if (rc) { + netdev_dbg(parms->ulp_ctx->bp->dev, "Failed to link resource to flow rc = %d\n", + rc); + goto error; + } + + /* Perform the VF rep action */ + rc = ulp_mapper_mark_vfr_idx_process(parms, tbl); + if (rc) { + netdev_dbg(parms->ulp_ctx->bp->dev, "Failed to add vfr mark rc = %d\n", rc); + goto error; + } + return rc; +error: + /* Shared resources are not freed */ + if (shared) + return rc; + /* Free the allocated resource since we failed to either + * write to the entry or link the flow + */ + free_parms.dir = tbl->direction; + free_parms.type = tbl_type; + free_parms.idx = index; + free_parms.tbl_scope_id = tbl_scope_id; + + trc = tf_free_tbl_entry(tfp, &free_parms); + if (trc) + netdev_dbg(parms->ulp_ctx->bp->dev, "Failed to free tbl entry on failure\n"); + + return rc; +} + +static int +ulp_mapper_tf_cmm_tbl_process(struct bnxt_ulp_mapper_parms *parms, + struct bnxt_ulp_mapper_tbl_info *tbl, + void *error) +{ + /* CMM does not exist in TF library*/ + netdev_dbg(parms->ulp_ctx->bp->dev, "Invalid resource func,CMM is not supported on TF\n"); + return 0; +} + +static int32_t +ulp_mapper_tf_cmm_entry_free(struct bnxt_ulp_context *ulp_ctx, + struct ulp_flow_db_res_params *res, + void *error) +{ + /* CMM does not exist in TF library*/ + netdev_dbg(ulp_ctx->bp->dev, "Invalid resource func,CMM is not supported on TF\n"); + return 0; +} + +static int +ulp_mapper_tf_if_tbl_process(struct bnxt_ulp_mapper_parms *parms, + struct bnxt_ulp_mapper_tbl_info *tbl) +{ + struct tf_set_if_tbl_entry_parms iftbl_params = { 0 }; + struct tf_get_if_tbl_entry_parms get_parms = { 0 }; + enum bnxt_ulp_if_tbl_opc if_opc = tbl->tbl_opcode; + struct ulp_blob data, res_blob; + struct tf *tfp; + u32 res_size; + u16 tmplen; + int rc = 0; + u64 idx; + + tfp = bnxt_tf_ulp_cntxt_tfp_get(parms->ulp_ctx, tbl->session_type); + /* Initialize the blob data */ + if (ulp_blob_init(&data, tbl->result_bit_size, + parms->device_params->result_byte_order)) { + netdev_dbg(parms->ulp_ctx->bp->dev, "Failed initial index table blob\n"); + return -EINVAL; + } + + /* create the result blob */ + rc = ulp_mapper_tbl_result_build(parms, tbl, &data, "IFtable Result"); + if (rc) { + netdev_dbg(parms->ulp_ctx->bp->dev, "Failed to build the result blob\n"); + return rc; + } + + /* Get the index details */ + switch (if_opc) { + case BNXT_ULP_IF_TBL_OPC_WR_COMP_FIELD: + idx = ULP_COMP_FLD_IDX_RD(parms, tbl->tbl_operand); + break; + case BNXT_ULP_IF_TBL_OPC_WR_REGFILE: + if (ulp_regfile_read(parms->regfile, tbl->tbl_operand, &idx)) { + netdev_dbg(parms->ulp_ctx->bp->dev, "regfile[%d] read oob\n", + tbl->tbl_operand); + return -EINVAL; + } + idx = be64_to_cpu(idx); + break; + case BNXT_ULP_IF_TBL_OPC_WR_CONST: + idx = tbl->tbl_operand; + break; + case BNXT_ULP_IF_TBL_OPC_RD_COMP_FIELD: + /* Initialize the result blob */ + if (ulp_blob_init(&res_blob, tbl->result_bit_size, + parms->device_params->result_byte_order)) { + netdev_dbg(parms->ulp_ctx->bp->dev, "Failed initial result blob\n"); + return -EINVAL; + } + + /* read the interface table */ + idx = ULP_COMP_FLD_IDX_RD(parms, tbl->tbl_operand); + res_size = ULP_BITS_2_BYTE(tbl->result_bit_size); + get_parms.dir = tbl->direction; + get_parms.type = tbl->resource_type; + get_parms.idx = idx; + get_parms.data = ulp_blob_data_get(&res_blob, &tmplen); + get_parms.data_sz_in_bytes = res_size; + + rc = tf_get_if_tbl_entry(tfp, &get_parms); + if (rc) { + netdev_dbg(parms->ulp_ctx->bp->dev, "Get table[%d][%s][%x] failed rc=%d\n", + get_parms.type, + tf_dir_2_str(get_parms.dir), + get_parms.idx, rc); + return rc; + } + rc = ulp_mapper_tbl_ident_scan_ext(parms, tbl, + res_blob.data, + res_size, + res_blob.byte_order); + if (rc) + netdev_dbg(parms->ulp_ctx->bp->dev, "Scan and extract failed rc=%d\n", rc); + return rc; + case BNXT_ULP_IF_TBL_OPC_NOT_USED: + return rc; /* skip it */ + default: + netdev_dbg(parms->ulp_ctx->bp->dev, "Invalid tbl index opcode\n"); + return -EINVAL; + } + + /* Perform the tf table set by filling the set params */ + iftbl_params.dir = tbl->direction; + iftbl_params.type = tbl->resource_type; + iftbl_params.data = ulp_blob_data_get(&data, &tmplen); + iftbl_params.data_sz_in_bytes = ULP_BITS_2_BYTE(tmplen); + iftbl_params.idx = idx; + + rc = tf_set_if_tbl_entry(tfp, &iftbl_params); + if (rc) { + netdev_dbg(parms->ulp_ctx->bp->dev, "Set table[%d][%s][%x] failed rc=%d\n", + iftbl_params.type, + tf_dir_2_str(iftbl_params.dir), + iftbl_params.idx, rc); + return rc; + } + netdev_dbg(parms->ulp_ctx->bp->dev, "Set table[%s][%s][%x] success.\n", + tf_if_tbl_2_str(iftbl_params.type), + tf_dir_2_str(iftbl_params.dir), + iftbl_params.idx); + + /* TBD: Need to look at the need to store idx in flow db for restore + * the table to its original state on deletion of this entry. + */ + return rc; +} + +static int +ulp_mapper_tf_ident_alloc(struct bnxt_ulp_context *ulp_ctx, + u32 session_type, + u16 ident_type, + u8 direction, + enum cfa_track_type tt, + u64 *identifier_id) +{ + struct tf_alloc_identifier_parms iparms = {0}; + struct tf *tfp; + int rc = 0; + + tfp = bnxt_tf_ulp_cntxt_tfp_get(ulp_ctx, session_type); + if (!tfp) { + netdev_dbg(ulp_ctx->bp->dev, "Failed to get tf pointer\n"); + return -EINVAL; + } + + iparms.ident_type = ident_type; + iparms.dir = direction; + + rc = tf_alloc_identifier(tfp, &iparms); + if (rc) { + netdev_dbg(ulp_ctx->bp->dev, "Alloc ident %s:%s failed.\n", + tf_dir_2_str(iparms.dir), + tf_ident_2_str(iparms.ident_type)); + return rc; + } + *identifier_id = iparms.id; + netdev_dbg(ulp_ctx->bp->dev, "Allocated Identifier [%s]:[%s] = 0x%X\n", + tf_dir_2_str(iparms.dir), + tf_ident_2_str(iparms.ident_type), iparms.id); + return rc; +} + +static int +ulp_mapper_tf_ident_free(struct bnxt_ulp_context *ulp_ctx, + struct ulp_flow_db_res_params *res) +{ + struct tf_free_identifier_parms free_parms = { 0 }; + uint32_t session_type; + struct tf *tfp; + int rc = 0; + + session_type = ulp_flow_db_shared_session_get(res); + tfp = bnxt_tf_ulp_cntxt_tfp_get(ulp_ctx, session_type); + if (!tfp) { + netdev_dbg(ulp_ctx->bp->dev, "Failed to get tf pointer\n"); + return -EINVAL; + } + + free_parms.ident_type = res->resource_type; + free_parms.dir = res->direction; + free_parms.id = res->resource_hndl; + + (void)tf_free_identifier(tfp, &free_parms); + netdev_dbg(ulp_ctx->bp->dev, "Freed Identifier [%s]:[%s] = 0x%X\n", + tf_dir_2_str(free_parms.dir), + tf_ident_2_str(free_parms.ident_type), + (uint32_t)free_parms.id); + return rc; +} + +static inline int32_t +ulp_mapper_tf_tcam_entry_free(struct bnxt_ulp_context *ulp, + struct ulp_flow_db_res_params *res) +{ + struct tf_free_tcam_entry_parms fparms = { + .dir = res->direction, + .tcam_tbl_type = res->resource_type, + .idx = (uint16_t)res->resource_hndl + }; + struct tf *tfp; + + tfp = bnxt_tf_ulp_cntxt_tfp_get(ulp, ulp_flow_db_shared_session_get(res)); + if (!tfp) { + netdev_dbg(ulp->bp->dev, "Unable to free resource failed to get tfp\n"); + return -EINVAL; + } + + return tf_free_tcam_entry(tfp, &fparms); +} + +static int +ulp_mapper_clear_full_action_record(struct tf *tfp, + struct bnxt_ulp_context *ulp_ctx, + struct tf_free_tbl_entry_parms *fparms) +{ + struct tf_set_tbl_entry_parms sparms = { 0 }; + uint32_t dev_id = BNXT_ULP_DEVICE_ID_LAST; + static u8 fld_zeros[16] = { 0 }; + int rc = 0; + + rc = bnxt_ulp_cntxt_dev_id_get(ulp_ctx, &dev_id); + if (rc) { + netdev_dbg(ulp_ctx->bp->dev, "Unable to get the dev id from ulp.\n"); + return rc; + } + + if (dev_id == BNXT_ULP_DEVICE_ID_THOR) { + sparms.dir = fparms->dir; + sparms.data = fld_zeros; + sparms.type = fparms->type; + sparms.data_sz_in_bytes = 16; /* FULL ACT REC SIZE - THOR */ + sparms.idx = fparms->idx; + sparms.tbl_scope_id = fparms->tbl_scope_id; + rc = tf_set_tbl_entry(tfp, &sparms); + if (rc) { + netdev_dbg(ulp_ctx->bp->dev, + "Index table[%s][%s][%x] write fail %d\n", + tf_tbl_type_2_str(sparms.type), + tf_dir_2_str(sparms.dir), + sparms.idx, rc); + return rc; + } + } + return 0; +} + +static inline int +ulp_mapper_tf_index_entry_free(struct bnxt_ulp_context *ulp, + struct ulp_flow_db_res_params *res) +{ + struct tf_free_tbl_entry_parms fparms = { + .dir = res->direction, + .type = res->resource_type, + .idx = (uint32_t)res->resource_hndl + }; + struct tf *tfp; + + tfp = bnxt_tf_ulp_cntxt_tfp_get(ulp, ulp_flow_db_shared_session_get(res)); + if (!tfp) { + netdev_dbg(ulp->bp->dev, + "Unable to free resource failed to get tfp\n"); + return -EINVAL; + } + + /* Get the table scope, it may be ignored */ + (void)bnxt_ulp_cntxt_tbl_scope_id_get(ulp, &fparms.tbl_scope_id); + + if (fparms.type == TF_TBL_TYPE_FULL_ACT_RECORD) + (void)ulp_mapper_clear_full_action_record(tfp, ulp, &fparms); + + netdev_dbg(ulp->bp->dev, "Free index table [%s]:[%s] = 0x%X\n", + tf_dir_2_str(fparms.dir), + tf_tbl_type_2_str(fparms.type), + (u32)fparms.idx); + return tf_free_tbl_entry(tfp, &fparms); +} + +static int +ulp_mapper_tf_index_tbl_alloc_process(struct bnxt_ulp_context *ulp, + u32 session_type, + u16 table_type, + u8 direction, + u64 *index) +{ + struct tf_alloc_tbl_entry_parms aparms = { 0 }; + u32 tbl_scope_id; + struct tf *tfp; + int rc = 0; + + tfp = bnxt_tf_ulp_cntxt_tfp_get(ulp, session_type); + if (!tfp) + return -EINVAL; + + /* Get the scope id */ + rc = bnxt_ulp_cntxt_tbl_scope_id_get(ulp, &tbl_scope_id); + if (rc) { + netdev_dbg(ulp->bp->dev, "Failed to get table scope rc=%d\n", rc); + return rc; + } + + aparms.type = table_type; + aparms.dir = direction; + aparms.tbl_scope_id = tbl_scope_id; + + /* Allocate the index tbl using tf api */ + rc = tf_alloc_tbl_entry(tfp, &aparms); + if (rc) { + netdev_dbg(ulp->bp->dev, "Failed to alloc index table [%s][%d]\n", + tf_dir_2_str(aparms.dir), aparms.type); + return rc; + } + + *index = aparms.idx; + + netdev_dbg(ulp->bp->dev, "Allocated Table Index [%s][%s] = 0x%04x\n", + tf_tbl_type_2_str(aparms.type), + tf_dir_2_str(aparms.dir), + aparms.idx); + return rc; +} + +/* Iterate over the shared resources assigned during tf_open_session and store + * them in the global regfile with the shared flag. + */ +static int +ulp_mapper_tf_app_glb_resource_info_init(struct bnxt_ulp_context *ulp_ctx, + struct bnxt_ulp_mapper_data *mapper_data) +{ + struct bnxt_ulp_glb_resource_info *glb_res; + u32 num_entries, idx, dev_id; + int rc = 0; + u8 app_id; + + glb_res = bnxt_ulp_app_glb_resource_info_list_get(&num_entries); + if (!glb_res || !num_entries) { + netdev_dbg(ulp_ctx->bp->dev, "Invalid Arguments\n"); + return -EINVAL; + } + + rc = bnxt_ulp_cntxt_dev_id_get(ulp_ctx, &dev_id); + if (rc) { + netdev_dbg(ulp_ctx->bp->dev, "Failed to get dev_id from ulp\n"); + return -EINVAL; + } + + rc = bnxt_ulp_cntxt_app_id_get(ulp_ctx, &app_id); + if (rc) { + netdev_dbg(ulp_ctx->bp->dev, "Failed to get app id for glb init (%d)\n", + rc); + return rc; + } + + /* Iterate the global resources and process each one */ + for (idx = 0; idx < num_entries; idx++) { + if (dev_id != glb_res[idx].device_id || + glb_res[idx].app_id != app_id) + continue; + switch (glb_res[idx].resource_func) { + case BNXT_ULP_RESOURCE_FUNC_IDENTIFIER: + rc = ulp_mapper_resource_ident_allocate(ulp_ctx, + mapper_data, + &glb_res[idx], + true); + break; + case BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE: + rc = ulp_mapper_resource_index_tbl_alloc(ulp_ctx, + mapper_data, + &glb_res[idx], + true); + break; + default: + netdev_dbg(ulp_ctx->bp->dev, "Global resource %x not supported\n", + glb_res[idx].resource_func); + rc = -EINVAL; + break; + } + if (rc) + return rc; + } + return rc; +} + +static int32_t +ulp_mapper_tf_handle_to_offset(struct bnxt_ulp_mapper_parms *parms, + u64 handle, + u32 offset, + u64 *result) +{ + netdev_dbg(parms->ulp_ctx->bp->dev, "handle to offset not supported in tf\n"); + return -EINVAL; +} + +const struct ulp_mapper_core_ops ulp_mapper_tf_core_ops = { + .ulp_mapper_core_tcam_tbl_process = ulp_mapper_tf_tcam_tbl_process, + .ulp_mapper_core_tcam_entry_free = ulp_mapper_tf_tcam_entry_free, + .ulp_mapper_core_em_tbl_process = ulp_mapper_tf_em_tbl_process, + .ulp_mapper_core_em_entry_free = ulp_mapper_tf_em_entry_free, + .ulp_mapper_core_index_tbl_process = ulp_mapper_tf_index_tbl_process, + .ulp_mapper_core_index_entry_free = ulp_mapper_tf_index_entry_free, + .ulp_mapper_core_cmm_tbl_process = ulp_mapper_tf_cmm_tbl_process, + .ulp_mapper_core_cmm_entry_free = ulp_mapper_tf_cmm_entry_free, + .ulp_mapper_core_if_tbl_process = ulp_mapper_tf_if_tbl_process, + .ulp_mapper_core_ident_alloc_process = ulp_mapper_tf_ident_alloc, + .ulp_mapper_core_ident_free = ulp_mapper_tf_ident_free, + .ulp_mapper_core_dyn_tbl_type_get = ulp_mapper_tf_dyn_tbl_type_get, + .ulp_mapper_core_index_tbl_alloc_process = + ulp_mapper_tf_index_tbl_alloc_process, + .ulp_mapper_core_app_glb_res_info_init = + ulp_mapper_tf_app_glb_resource_info_init, + .ulp_mapper_core_handle_to_offset = ulp_mapper_tf_handle_to_offset +}; +#endif /* CONFIG_BNXT_FLOWER_OFFLOAD */ diff --git a/drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_mapper_p7.c b/drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_mapper_p7.c new file mode 100644 index 000000000000..62923616681f --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_mapper_p7.c @@ -0,0 +1,1590 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* Copyright(c) 2014-2023 Broadcom + * All rights reserved. + */ + +#include "linux/kernel.h" +#include "bnxt_compat.h" +#include "ulp_mapper.h" +#include "ulp_flow_db.h" +#include "cfa_resources.h" +#include "tfc_util.h" +#include "bnxt_tf_ulp_p7.h" +#include "tfc_action_handle.h" +#include "ulp_utils.h" +#include "tf_ulp/ulp_template_debug_proto.h" + +#ifdef CONFIG_BNXT_FLOWER_OFFLOAD +/* Internal function to write the tcam entry */ +static int +ulp_mapper_tfc_tcam_tbl_entry_write(struct bnxt_ulp_mapper_parms *parms, + struct bnxt_ulp_mapper_tbl_info *tbl, + struct ulp_blob *key, + struct ulp_blob *mask, + struct ulp_blob *remap, + u16 idx) +{ + u16 key_size = 0, mask_size = 0, remap_size = 0; + struct tfc_tcam_info tfc_info = {0}; + struct tfc_tcam_data tfc_data = {0}; + struct tfc *tfcp = NULL; + u16 fw_fid; + int rc; + + tfcp = bnxt_ulp_cntxt_tfcp_get(parms->ulp_ctx, BNXT_ULP_SESSION_TYPE_DEFAULT); + if (!tfcp) { + netdev_dbg(parms->ulp_ctx->bp->dev, "Failed to get tfcp pointer\n"); + return -EINVAL; + } + + rc = bnxt_ulp_cntxt_fid_get(parms->ulp_ctx, &fw_fid); + if (rc) + return rc; + + tfc_info.dir = tbl->direction; + tfc_info.rsubtype = tbl->resource_type; + tfc_info.id = idx; + tfc_data.key = ulp_blob_data_get(key, &key_size); + tfc_data.key_sz_in_bytes = ULP_BITS_2_BYTE(key_size); + tfc_data.mask = ulp_blob_data_get(mask, &mask_size); + tfc_data.remap = ulp_blob_data_get(remap, &remap_size); + remap_size = ULP_BITS_2_BYTE(remap_size); + tfc_data.remap_sz_in_bytes = remap_size; + + if (tfc_tcam_set(tfcp, fw_fid, &tfc_info, &tfc_data)) { + netdev_dbg(parms->ulp_ctx->bp->dev, "tcam[%s][%s][%x] write failed.\n", + tfc_tcam_2_str(tfc_info.rsubtype), + tfc_dir_2_str(tfc_info.dir), tfc_info.id); + return -EIO; + } + netdev_dbg(parms->ulp_ctx->bp->dev, "tcam[%s][%s][%x] write success.\n", + tfc_tcam_2_str(tfc_info.rsubtype), + tfc_dir_2_str(tfc_info.dir), tfc_info.id); + + /* Mark action */ + rc = ulp_mapper_mark_act_ptr_process(parms, tbl); + if (rc) { + netdev_dbg(parms->ulp_ctx->bp->dev, "failed mark action processing\n"); + return rc; + } + + ulp_mapper_tcam_entry_dump(parms->ulp_ctx, "TCAM", idx, tbl, key, mask, remap); + + return rc; +} + +static u32 +ulp_mapper_tfc_wc_tcam_post_process(struct bnxt_ulp_context *ulp_ctx, + struct bnxt_ulp_device_params *dparms, + struct ulp_blob *key, + struct ulp_blob *tkey) +{ + u16 tlen, blen, clen, slice_width, num_slices, max_slices, offset; + u32 cword, i, rc; + int pad; + u8 *val; + + slice_width = dparms->wc_slice_width; + clen = dparms->wc_ctl_size_bits; + max_slices = dparms->wc_max_slices; + blen = ulp_blob_data_len_get(key); + + /* Get the length of the key based on number of slices and width */ + num_slices = 1; + tlen = slice_width; + while (tlen < blen && + num_slices <= max_slices) { + num_slices = num_slices << 1; + tlen = tlen << 1; + } + + if (num_slices > max_slices) { + netdev_dbg(ulp_ctx->bp->dev, "Key size (%d) too large for WC\n", blen); + return -EINVAL; + } + + /* The key/mask may not be on a natural slice boundary, pad it */ + pad = tlen - blen; + if (ulp_blob_pad_push(key, pad)) { + netdev_dbg(ulp_ctx->bp->dev, "Unable to pad key/mask\n"); + return -EINVAL; + } + + /* The new length accounts for the ctrl word length and num slices */ + tlen = tlen + (clen + 1) * num_slices; + if (ulp_blob_init(tkey, tlen, key->byte_order)) { + netdev_dbg(ulp_ctx->bp->dev, "Unable to post process wc tcam entry\n"); + return -EINVAL; + } + + /* pad any remaining bits to do byte alignment */ + pad = (slice_width + clen) * num_slices; + pad = ULP_BYTE_ROUND_OFF_8(pad) - pad; + if (ulp_blob_pad_push(tkey, pad)) { + netdev_dbg(ulp_ctx->bp->dev, "Unable to pad key/mask\n"); + return -EINVAL; + } + + /* Build the transformed key/mask */ + cword = dparms->wc_mode_list[num_slices - 1]; + cword = cpu_to_be32(cword); + offset = 0; + for (i = 0; i < num_slices; i++) { + val = ulp_blob_push_32(tkey, &cword, clen); + if (!val) { + netdev_dbg(ulp_ctx->bp->dev, "Key ctrl word push failed\n"); + return -EINVAL; + } + rc = ulp_blob_append(tkey, key, offset, slice_width); + if (rc) { + netdev_dbg(ulp_ctx->bp->dev, "Key blob append failed\n"); + return rc; + } + offset += slice_width; + } + blen = ulp_blob_data_len_get(tkey); + /* reverse the blob byte wise in reverse */ + ulp_blob_perform_byte_reverse(tkey, ULP_BITS_2_BYTE(blen)); + return 0; +} + +static int +ulp_mapper_tfc_tcam_tbl_process(struct bnxt_ulp_mapper_parms *parms, + struct bnxt_ulp_mapper_tbl_info *tbl) +{ + struct bnxt_ulp_device_params *dparms = parms->device_params; + struct ulp_blob tkey, tmask; /* transform key and mask */ + u32 alloc_tcam = 0, alloc_ident = 0, write_tcam = 0; + struct ulp_flow_db_res_params fid_parms = { 0 }; + struct ulp_blob okey, omask, *key, *mask, data; + u16 key_sz_in_words = 0, key_sz_in_bits = 0; + enum cfa_track_type tt = tbl->track_type; + enum bnxt_ulp_byte_order key_byte_order; + enum bnxt_ulp_byte_order res_byte_order; + struct bnxt_ulp_mapper_key_info *kflds; + struct tfc_tcam_info tfc_inf = {0}; + struct tfc *tfcp = NULL; + int rc = 0, free_rc = 0; + u32 num_kflds, i; + u32 priority; + u16 fw_fid = 0; + + /* Set the key and mask to the original key and mask. */ + key = &okey; + mask = &omask; + + switch (tbl->tbl_opcode) { + case BNXT_ULP_TCAM_TBL_OPC_ALLOC_IDENT: + alloc_ident = 1; + break; + case BNXT_ULP_TCAM_TBL_OPC_ALLOC_WR_REGFILE: + alloc_ident = 1; + alloc_tcam = 1; + write_tcam = 1; + break; + case BNXT_ULP_TCAM_TBL_OPC_NOT_USED: + case BNXT_ULP_TCAM_TBL_OPC_LAST: + default: + netdev_dbg(parms->ulp_ctx->bp->dev, "Invalid tcam table opcode %d\n", + tbl->tbl_opcode); + return -EINVAL; + } + + tfcp = bnxt_ulp_cntxt_tfcp_get(parms->ulp_ctx, BNXT_ULP_SESSION_TYPE_DEFAULT); + if (!tfcp) { + netdev_dbg(parms->ulp_ctx->bp->dev, "Failed to get tfcp pointer\n"); + return -EINVAL; + } + + if (bnxt_ulp_cntxt_fid_get(parms->ulp_ctx, &fw_fid)) { + netdev_dbg(parms->ulp_ctx->bp->dev, "Failed to get func_id\n"); + return -EINVAL; + } + + /* Allocate the identifiers */ + if (alloc_ident) { + rc = ulp_mapper_tcam_tbl_ident_alloc(parms, tbl); + if (rc) { + netdev_dbg(parms->ulp_ctx->bp->dev, "Failed to alloc identifier\n"); + return rc; + } + } + + /* If no allocation or write is needed, then just exit */ + if (!alloc_tcam && !write_tcam) + return rc; + + /* Initialize the blobs for write */ + if (tbl->resource_type == CFA_RSUBTYPE_TCAM_WC) + key_byte_order = dparms->wc_key_byte_order; + else + key_byte_order = dparms->key_byte_order; + + res_byte_order = dparms->result_byte_order; + if (ulp_blob_init(key, tbl->blob_key_bit_size, key_byte_order) || + ulp_blob_init(mask, tbl->blob_key_bit_size, key_byte_order) || + ulp_blob_init(&data, tbl->result_bit_size, res_byte_order)) { + netdev_dbg(parms->ulp_ctx->bp->dev, "blob inits failed.\n"); + return -EINVAL; + } + + /* Get the key fields and update the key blob */ + if (tbl->key_recipe_opcode == BNXT_ULP_KEY_RECIPE_OPC_DYN_KEY) + kflds = ulp_mapper_key_recipe_fields_get(parms, tbl, &num_kflds); + else + kflds = ulp_mapper_key_fields_get(parms, tbl, &num_kflds); + if (!kflds || !num_kflds) { + netdev_dbg(parms->ulp_ctx->bp->dev, "Failed to get key fields\n"); + return -EINVAL; + } + + for (i = 0; i < num_kflds; i++) { + /* Setup the key */ + rc = ulp_mapper_field_opc_process(parms, tbl->direction, + &kflds[i].field_info_spec, + key, 1, "TCAM Key"); + if (rc) { + netdev_dbg(parms->ulp_ctx->bp->dev, "Key field set failed %s\n", + kflds[i].field_info_spec.description); + return rc; + } + + /* Setup the mask */ + rc = ulp_mapper_field_opc_process(parms, tbl->direction, + &kflds[i].field_info_mask, + mask, 0, "TCAM Mask"); + if (rc) { + netdev_dbg(parms->ulp_ctx->bp->dev, "Mask field set failed %s\n", + kflds[i].field_info_mask.description); + return rc; + } + } + + /* For wild card tcam perform the post process to swap the blob */ + if (tbl->resource_type == CFA_RSUBTYPE_TCAM_WC) { + /* Sets up the slices for writing to the WC TCAM */ + rc = ulp_mapper_tfc_wc_tcam_post_process(parms->ulp_ctx, dparms, key, &tkey); + if (rc) { + netdev_dbg(parms->ulp_ctx->bp->dev, "Failed to post proc WC key.\n"); + return rc; + } + /* Sets up the slices for writing to the WC TCAM */ + rc = ulp_mapper_tfc_wc_tcam_post_process(parms->ulp_ctx, dparms, mask, &tmask); + if (rc) { + netdev_dbg(parms->ulp_ctx->bp->dev, "Failed to post proc WC mask.\n"); + return rc; + } + key = &tkey; + mask = &tmask; + } + + ulp_mapper_tcam_entry_dump(parms->ulp_ctx, "TCAM", 0, tbl, key, mask, &data); + + if (alloc_tcam) { + tfcp = bnxt_ulp_cntxt_tfcp_get(parms->ulp_ctx, BNXT_ULP_SESSION_TYPE_DEFAULT); + if (!tfcp) { + netdev_dbg(parms->ulp_ctx->bp->dev, "Failed to get tfcp pointer\n"); + return -EINVAL; + } + /* calculate the entry priority */ + rc = ulp_mapper_priority_opc_process(parms, tbl, &priority); + if (rc) { + netdev_dbg(parms->ulp_ctx->bp->dev, "entry priority process failed\n"); + return rc; + } + + /* allocate the tcam entry, only need the length */ + (void)ulp_blob_data_get(key, &key_sz_in_bits); + key_sz_in_words = ULP_BITS_2_BYTE(key_sz_in_bits); + tfc_inf.dir = tbl->direction; + tfc_inf.rsubtype = tbl->resource_type; + + rc = tfc_tcam_alloc(tfcp, fw_fid, tt, priority, key_sz_in_words, &tfc_inf); + if (rc) { + netdev_dbg(parms->ulp_ctx->bp->dev, "TCAM Alloc failed, status:%d\n", rc); + return rc; + } + + /* Write the tcam index into the regfile*/ + if (ulp_regfile_write(parms->regfile, tbl->tbl_operand, + (u64)cpu_to_be64(tfc_inf.id))) { + netdev_dbg(parms->ulp_ctx->bp->dev, "Regfile[%d] write failed.\n", + tbl->tbl_operand); + /* Need to free the tcam idx, so goto error */ + goto error; + } + } + + if (write_tcam) { + /* Create the result blob */ + rc = ulp_mapper_tbl_result_build(parms, tbl, &data, "TCAM Result"); + /* write the tcam entry */ + if (!rc) + rc = ulp_mapper_tfc_tcam_tbl_entry_write(parms, + tbl, key, + mask, &data, + tfc_inf.id); + } + if (rc) + goto error; + + /* Add the tcam index to the flow database */ + fid_parms.direction = tbl->direction; + fid_parms.resource_func = tbl->resource_func; + fid_parms.resource_type = tbl->resource_type; + fid_parms.critical_resource = tbl->critical_resource; + fid_parms.resource_hndl = tfc_inf.id; + ulp_flow_db_shared_session_set(&fid_parms, tbl->session_type); + + rc = ulp_mapper_fdb_opc_process(parms, tbl, &fid_parms); + if (rc) { + netdev_dbg(parms->ulp_ctx->bp->dev, "Failed to link resource to flow rc = %d\n", + rc); + /* Need to free the identifier, so goto error */ + goto error; + } + + return 0; +error: + free_rc = tfc_tcam_free(tfcp, fw_fid, &tfc_inf); + if (free_rc) + netdev_dbg(parms->ulp_ctx->bp->dev, "TCAM free failed on error, status:%d\n", + free_rc); + return rc; +} + +static const char * const mpc_error_str[] = { + "OK", + "Unsupported Opcode", + "Bad Format", + "Invalid Scope", + "Bad Address", + "Cache Error", + "EM Miss", + "Duplicate Entry", + "No Events", + "EM Abort" +}; + +/** + * TBD: Temporary swap until a more generic solution is designed + * + * @blob: A byte array that is being edited in-place + * @block_sz: The size of the blocks in bytes to swap + * + * The length of the blob is assumed to be a multiple of block_sz + */ +static int +ulp_mapper_blob_block_swap(struct bnxt_ulp_context *ulp_ctx, struct ulp_blob *blob, u32 block_sz) +{ + u16 num_words, data_sz; + int i, rc = 0; + u8 *pdata; + u8 *data; /* size of a block for temp storage */ + + /* Shouldn't happen since it is internal function, but check anyway */ + if (!blob || !block_sz) { + netdev_dbg(ulp_ctx->bp->dev, "Invalid arguments\n"); + return -EINVAL; + } + + data = vzalloc(block_sz); + if (!data) + return -ENOMEM; + + pdata = ulp_blob_data_get(blob, &data_sz); + data_sz = ULP_BITS_2_BYTE(data_sz); + if (!data_sz || (data_sz % block_sz) != 0) { + netdev_dbg(ulp_ctx->bp->dev, "length(%d) not a multiple of %d\n", + data_sz, block_sz); + rc = -EINVAL; + goto err; + } + + num_words = data_sz / block_sz; + for (i = 0; i < num_words / 2; i++) { + memcpy(data, &pdata[i * block_sz], block_sz); + memcpy(&pdata[i * block_sz], + &pdata[(num_words - 1 - i) * block_sz], block_sz); + memcpy(&pdata[(num_words - 1 - i) * block_sz], + data, block_sz); + } +err: + vfree(data); + return rc; +} + +static int +ulp_mapper_tfc_em_tbl_process(struct bnxt_ulp_mapper_parms *parms, + struct bnxt_ulp_mapper_tbl_info *tbl, + void *error) +{ + struct bnxt_ulp_device_params *dparms = parms->device_params; + struct ulp_flow_db_res_params fid_parms = { 0 }; + struct tfc_em_delete_parms free_parms = { 0 }; + struct tfc_em_insert_parms iparms = { 0 }; + struct bnxt_ulp_mapper_key_info *kflds; + u16 tmplen, key_len, align_len_bits; + enum bnxt_ulp_byte_order byte_order; + struct ulp_blob key, data; + struct tfc *tfcp = NULL; + int rc = 0, trc = 0; + u32 i, num_kflds; + u64 handle = 0; + u8 tsid = 0; + + tfcp = bnxt_ulp_cntxt_tfcp_get(parms->ulp_ctx, BNXT_ULP_SESSION_TYPE_DEFAULT); + if (!tfcp) { + netdev_dbg(parms->ulp_ctx->bp->dev, "Failed to get tfcp pointer\n"); + return -EINVAL; + } + + if (tbl->key_recipe_opcode == BNXT_ULP_KEY_RECIPE_OPC_DYN_KEY) + kflds = ulp_mapper_key_recipe_fields_get(parms, tbl, &num_kflds); + else + kflds = ulp_mapper_key_fields_get(parms, tbl, &num_kflds); + if (!kflds || !num_kflds) { + netdev_dbg(parms->ulp_ctx->bp->dev, "Failed to get key fields\n"); + return -EINVAL; + } + + byte_order = dparms->em_byte_order; + /* Initialize the key/result blobs */ + if (ulp_blob_init(&key, tbl->blob_key_bit_size, byte_order) || + ulp_blob_init(&data, tbl->result_bit_size, byte_order)) { + netdev_dbg(parms->ulp_ctx->bp->dev, "blob inits failed.\n"); + return -EINVAL; + } + + /* create the key */ + for (i = 0; i < num_kflds; i++) { + /* Setup the key */ + rc = ulp_mapper_field_opc_process(parms, tbl->direction, + &kflds[i].field_info_spec, + &key, 1, "EM Key"); + if (rc) { + netdev_dbg(parms->ulp_ctx->bp->dev, "Key field set failed.\n"); + return rc; + } + } + /* add padding to make sure key is at record boundary */ + key_len = ulp_blob_data_len_get(&key); + if (key_len > dparms->em_blk_align_bits) { + key_len = key_len - dparms->em_blk_align_bits; + align_len_bits = dparms->em_blk_size_bits - + (key_len % dparms->em_blk_size_bits); + } else { + align_len_bits = dparms->em_blk_align_bits - key_len; + } + + ulp_blob_pad_push(&key, align_len_bits); + key_len = ULP_BITS_2_BYTE(ulp_blob_data_len_get(&key)); + ulp_blob_perform_byte_reverse(&key, key_len); + + ulp_mapper_result_dump(parms->ulp_ctx, "EM Key", tbl, &key); + + /* Create the result data blob */ + rc = ulp_mapper_tbl_result_build(parms, tbl, &data, "EM Result"); + if (rc) { + netdev_dbg(parms->ulp_ctx->bp->dev, "Failed to build the result blob\n"); + return rc; + } + ulp_blob_pad_align(&data, dparms->em_blk_align_bits); + key_len = ULP_BITS_2_BYTE(ulp_blob_data_len_get(&data)); + ulp_blob_perform_byte_reverse(&data, key_len); + + ulp_mapper_result_dump(parms->ulp_ctx, "EM Result", tbl, &data); + + /* merge the result into the key blob */ + rc = ulp_blob_append(&key, &data, 0, dparms->em_blk_align_bits); + if (rc) { + netdev_dbg(parms->ulp_ctx->bp->dev, "EM Failed to append the result to key(%d)", + rc); + return rc; + } + /* TBD: Need to come up with a more generic way to know when to swap, + * this is fine for now as this driver only supports this device. + */ + rc = ulp_mapper_blob_block_swap(parms->ulp_ctx, &key, + ULP_BITS_2_BYTE(dparms->em_blk_size_bits)); + /* Error printed within function, just return on error */ + if (rc) + return rc; + + ulp_mapper_result_dump(parms->ulp_ctx, "EM Merged Result", tbl, &key); + + iparms.dir = tbl->direction; + iparms.lkup_key_data = ulp_blob_data_get(&key, &tmplen); + iparms.lkup_key_sz_words = ULP_BITS_TO_32_BYTE_WORD(tmplen); + iparms.key_data = NULL; + iparms.key_sz_bits = 0; + iparms.flow_handle = &handle; + + rc = bnxt_ulp_cntxt_tsid_get(parms->ulp_ctx, &tsid); + if (rc) { + netdev_dbg(parms->ulp_ctx->bp->dev, "Failed to get the table scope\n"); + return rc; + } + rc = tfc_em_insert(tfcp, tsid, &iparms); + if (rc) { + /* Set the error flag in reg file */ + if (tbl->tbl_opcode == BNXT_ULP_EM_TBL_OPC_WR_REGFILE) { + uint64_t val = 0; + int tmp_rc = 0; + + /* hash collision */ + if (rc == -E2BIG) + netdev_dbg(parms->ulp_ctx->bp->dev, "Dulicate EM entry\n"); + + /* over max flows */ + if (rc == -ENOMEM) { + val = 1; + rc = 0; + netdev_dbg(parms->ulp_ctx->bp->dev, + "Fail to insert EM, shall add to wc\n"); + } + tmp_rc = ulp_regfile_write(parms->regfile, tbl->tbl_operand, + cpu_to_be64(val)); + if (!tmp_rc) + netdev_dbg(parms->ulp_ctx->bp->dev, "regwrite failed\n"); + } + if (rc && rc != -E2BIG) + netdev_err(parms->ulp_ctx->bp->dev, + "Failed to insert em entry rc=%d.\n", rc); + return rc; + } + + ulp_mapper_tfc_em_dump(parms->ulp_ctx, "EM", &key, &iparms); + + /* Mark action process */ + rc = ulp_mapper_mark_gfid_process(parms, tbl, *iparms.flow_handle); + if (rc) { + netdev_dbg(parms->ulp_ctx->bp->dev, "Failed to add mark to flow\n"); + goto error; + } + + /* Link the EM resource to the flow in the flow db */ + memset(&fid_parms, 0, sizeof(fid_parms)); + fid_parms.direction = tbl->direction; + fid_parms.resource_func = tbl->resource_func; + fid_parms.resource_type = tbl->resource_type; + fid_parms.critical_resource = tbl->critical_resource; + fid_parms.resource_hndl = *iparms.flow_handle; + + rc = ulp_mapper_fdb_opc_process(parms, tbl, &fid_parms); + if (rc) { + netdev_dbg(parms->ulp_ctx->bp->dev, "Fail to link res to flow rc = %d\n", rc); + /* Need to free the identifier, so goto error */ + goto error; + } + + return 0; +error: + free_parms.dir = iparms.dir; + free_parms.flow_handle = *iparms.flow_handle; + + trc = tfc_em_delete(tfcp, &free_parms); + if (trc) + netdev_dbg(parms->ulp_ctx->bp->dev, "Failed to delete EM entry on failed add\n"); + + return rc; +} + +static int +ulp_mapper_tfc_em_entry_free(struct bnxt_ulp_context *ulp, + struct ulp_flow_db_res_params *res, + void *error) +{ + struct tfc_em_delete_parms free_parms = { 0 }; + struct tfc *tfcp = NULL; + u16 fw_fid = 0; + int rc = 0; + + if (bnxt_ulp_cntxt_fid_get(ulp, &fw_fid)) { + netdev_dbg(ulp->bp->dev, "Failed to get func_id\n"); + return -EINVAL; + } + + tfcp = bnxt_ulp_cntxt_tfcp_get(ulp, BNXT_ULP_SESSION_TYPE_DEFAULT); + if (!tfcp) { + netdev_dbg(ulp->bp->dev, "Failed to get tfcp pointer\n"); + return -EINVAL; + } + + free_parms.dir = (enum cfa_dir)res->direction; + free_parms.flow_handle = res->resource_hndl; + + rc = tfc_em_delete(tfcp, &free_parms); + if (rc) { + netdev_dbg(ulp->bp->dev, "Failed to delete EM entry, res_hndl = %llx\n", + res->resource_hndl); + } else { + netdev_dbg(ulp->bp->dev, "Deleted EM entry, res = %llu\n", + res->resource_hndl); + } + + return rc; +} + +static u16 +ulp_mapper_tfc_dyn_blob_size_get(struct bnxt_ulp_mapper_parms *mparms, + struct bnxt_ulp_mapper_tbl_info *tbl) +{ + struct bnxt_ulp_device_params *d_params = mparms->device_params; + enum bnxt_ulp_resource_type rtype = tbl->resource_type; + + if (d_params->dynamic_sram_en) { + switch (rtype) { + /* TBD: add more types here */ + case BNXT_ULP_RESOURCE_TYPE_STAT: + case BNXT_ULP_RESOURCE_TYPE_ENCAP: + case BNXT_ULP_RESOURCE_TYPE_MODIFY: + /* return max size */ + return BNXT_ULP_FLMP_BLOB_SIZE_IN_BITS; + default: + break; + } + } else if (tbl->encap_num_fields) { + return BNXT_ULP_FLMP_BLOB_SIZE_IN_BITS; + } + return tbl->result_bit_size; +} + +static int +ulp_mapper_tfc_index_tbl_process(struct bnxt_ulp_mapper_parms *parms, + struct bnxt_ulp_mapper_tbl_info *tbl) +{ + bool alloc = false, write = false, global = false, regfile = false; + struct bnxt_ulp_glb_resource_info glb_res = { 0 }; + struct ulp_flow_db_res_params fid_parms; + enum cfa_track_type tt = tbl->track_type; + struct tfc_idx_tbl_info tbl_info = { 0 }; + u16 bit_size, wordlen = 0, tmplen = 0; + struct bnxt *bp = parms->ulp_ctx->bp; + struct tfc *tfcp = NULL; + unsigned char *data_p; + struct ulp_blob data; + bool shared = false; + u64 regval = 0; + u16 fw_fid = 0; + u32 index = 0; + int rc = 0; + + tfcp = bnxt_ulp_cntxt_tfcp_get(parms->ulp_ctx, BNXT_ULP_SESSION_TYPE_DEFAULT); + if (!tfcp) { + netdev_dbg(bp->dev, "Failed to get tfcp pointer\n"); + return -EINVAL; + } + + if (bnxt_ulp_cntxt_fid_get(parms->ulp_ctx, &fw_fid)) { + netdev_dbg(bp->dev, "Failed to get func id\n"); + return -EINVAL; + } + + /* compute the blob size */ + bit_size = ulp_mapper_tfc_dyn_blob_size_get(parms, tbl); + + /* Initialize the blob data */ + if (ulp_blob_init(&data, bit_size, + parms->device_params->result_byte_order)) { + netdev_dbg(bp->dev, "Failed to initialize index table blob\n"); + return -EINVAL; + } + + switch (tbl->tbl_opcode) { + case BNXT_ULP_INDEX_TBL_OPC_ALLOC_REGFILE: + alloc = true; + regfile = true; + break; + case BNXT_ULP_INDEX_TBL_OPC_ALLOC_WR_REGFILE: + /* Build the entry, alloc an index, write the table, and store + * the data in the regfile. + */ + alloc = true; + write = true; + regfile = true; + break; + case BNXT_ULP_INDEX_TBL_OPC_WR_REGFILE: + /* get the index to write to from the regfile and then write + * the table entry. + */ + regfile = true; + write = true; + break; + case BNXT_ULP_INDEX_TBL_OPC_ALLOC_WR_GLB_REGFILE: + /* Build the entry, alloc an index, write the table, and store + * the data in the global regfile. + */ + alloc = true; + global = true; + write = true; + break; + case BNXT_ULP_INDEX_TBL_OPC_WR_GLB_REGFILE: + if (tbl->fdb_opcode != BNXT_ULP_FDB_OPC_NOP) { + netdev_dbg(bp->dev, "Template error, wrong fdb opcode\n"); + return -EINVAL; + } + /* get the index to write to from the global regfile and then + * write the table. + */ + if (ulp_mapper_glb_resource_read(parms->mapper_data, + tbl->direction, + tbl->tbl_operand, + ®val, &shared)) { + netdev_dbg(bp->dev, "Failed to get tbl idx from Glb RF[%d].\n", + tbl->tbl_operand); + return -EINVAL; + } + index = be64_to_cpu(regval); + /* check to see if any scope id changes needs to be done*/ + write = true; + break; + case BNXT_ULP_INDEX_TBL_OPC_RD_REGFILE: + /* The read is different from the rest and can be handled here + * instead of trying to use common code. Simply read the table + * with the index from the regfile, scan and store the + * identifiers, and return. + */ + if (ulp_regfile_read(parms->regfile, + tbl->tbl_operand, ®val)) { + netdev_dbg(bp->dev, "Failed to get tbl idx from regfile[%d]\n", + tbl->tbl_operand); + return -EINVAL; + } + index = be64_to_cpu(regval); + tbl_info.dir = tbl->direction; + tbl_info.rsubtype = tbl->resource_type; + tbl_info.id = index; + /* Nothing has been pushed to blob, so push bit_size */ + ulp_blob_pad_push(&data, bit_size); + data_p = ulp_blob_data_get(&data, &tmplen); + wordlen = ULP_BITS_2_BYTE(tmplen); + + rc = tfc_idx_tbl_get(tfcp, fw_fid, &tbl_info, (u32 *)data_p, + (u8 *)&wordlen); + if (rc) { + netdev_dbg(bp->dev, "Failed to read the tbl entry %d:%d\n", + tbl->resource_type, index); + return rc; + } + + /* Scan the fields in the entry and push them into the regfile*/ + rc = ulp_mapper_tbl_ident_scan_ext(parms, tbl, data_p, + wordlen, data.byte_order); + if (rc) { + netdev_dbg(bp->dev, "Failed to get flds on tbl read rc=%d\n", rc); + return rc; + } + return 0; + case BNXT_ULP_INDEX_TBL_OPC_NOP_REGFILE: + /* Special case, where hw table processing is not being done */ + /* but only for writing the regfile into the flow database */ + regfile = true; + break; + default: + netdev_dbg(bp->dev, "Invalid index table opcode %d\n", tbl->tbl_opcode); + return -EINVAL; + } + + /* read the CMM identifier from the regfile, it is not allocated */ + if (!alloc && regfile) { + if (ulp_regfile_read(parms->regfile, + tbl->tbl_operand, + ®val)) { + netdev_dbg(bp->dev, "Failed to get tbl idx from regfile[%d].\n", + tbl->tbl_operand); + return -EINVAL; + } + index = be64_to_cpu(regval); + } + + /* Allocate the Action CMM identifier */ + if (alloc) { + tbl_info.dir = tbl->direction; + tbl_info.rsubtype = tbl->resource_type; + rc = tfc_idx_tbl_alloc(tfcp, fw_fid, tt, &tbl_info); + if (rc) { + netdev_dbg(bp->dev, "Alloc table[%s][%s] failed rc=%d\n", + tfc_idx_tbl_2_str(tbl_info.rsubtype), + tfc_dir_2_str(tbl->direction), rc); + return rc; + } + index = tbl_info.id; + } + + /* update the global register value */ + if (alloc && global) { + glb_res.direction = tbl->direction; + glb_res.resource_func = tbl->resource_func; + glb_res.resource_type = tbl->resource_type; + glb_res.glb_regfile_index = tbl->tbl_operand; + regval = cpu_to_be64(index); + + /* Shared resources are never allocated through this + * method, so the shared flag is always false. + */ + rc = ulp_mapper_glb_resource_write(parms->mapper_data, + &glb_res, regval, + false); + if (rc) { + netdev_dbg(bp->dev, "Failed to write %s regfile[%d] rc=%d\n", + (global) ? "global" : "reg", + tbl->tbl_operand, rc); + goto error; + } + } + + /* update the local register value */ + if (alloc && regfile) { + regval = cpu_to_be64(index); + rc = ulp_regfile_write(parms->regfile, + tbl->tbl_operand, regval); + if (rc) { + netdev_dbg(bp->dev, "Failed to write %s regfile[%d] rc=%d\n", + (global) ? "global" : "reg", + tbl->tbl_operand, rc); + goto error; + } + } + + if (write) { + /* Get the result fields list */ + rc = ulp_mapper_tbl_result_build(parms, + tbl, + &data, + "Indexed Result"); + if (rc) { + netdev_dbg(bp->dev, "Failed to build the result blob\n"); + return rc; + } + data_p = ulp_blob_data_get(&data, &tmplen); + tbl_info.dir = tbl->direction; + tbl_info.rsubtype = tbl->resource_type; + tbl_info.id = index; + wordlen = ULP_BITS_2_BYTE(tmplen); + rc = tfc_idx_tbl_set(tfcp, fw_fid, &tbl_info, + (u32 *)data_p, wordlen); + if (rc) { + netdev_dbg(bp->dev, "Index table[%s][%s][%x] write fail %d\n", + tfc_idx_tbl_2_str(tbl_info.rsubtype), + tfc_dir_2_str(tbl_info.dir), + tbl_info.id, rc); + goto error; + } + netdev_dbg(bp->dev, "Index table[%s][%d][%x] write successful\n", + tfc_idx_tbl_2_str(tbl_info.rsubtype), + tbl_info.dir, tbl_info.id); + } + /* Link the resource to the flow in the flow db */ + memset(&fid_parms, 0, sizeof(fid_parms)); + fid_parms.direction = tbl->direction; + fid_parms.resource_func = tbl->resource_func; + fid_parms.resource_type = tbl->resource_type; + fid_parms.resource_sub_type = tbl->resource_sub_type; + fid_parms.resource_hndl = index; + fid_parms.critical_resource = tbl->critical_resource; + ulp_flow_db_shared_session_set(&fid_parms, tbl->session_type); + + rc = ulp_mapper_fdb_opc_process(parms, tbl, &fid_parms); + if (rc) { + netdev_dbg(bp->dev, "Failed to link resource to flow rc = %d\n", rc); + goto error; + } + + /* Perform the VF rep action */ + rc = ulp_mapper_mark_vfr_idx_process(parms, tbl); + if (rc) { + netdev_dbg(bp->dev, "Failed to add vfr mark rc = %d\n", rc); + goto error; + } + return rc; +error: + /* Shared resources are not freed */ + if (shared) + return rc; + /* Free the allocated resource since we failed to either + * write to the entry or link the flow + */ + + if (tfc_idx_tbl_free(tfcp, fw_fid, &tbl_info)) + netdev_dbg(bp->dev, "Failed to free index entry on failure\n"); + return rc; +} + +static inline int +ulp_mapper_tfc_index_entry_free(struct bnxt_ulp_context *ulp_ctx, + struct ulp_flow_db_res_params *res) +{ + struct tfc_idx_tbl_info tbl_info = { 0 }; + struct bnxt *bp = ulp_ctx->bp; + struct tfc *tfcp = NULL; + u16 fw_fid = 0; + int rc; + + if (bnxt_ulp_cntxt_fid_get(ulp_ctx, &fw_fid)) { + netdev_dbg(bp->dev, "Failed to get func_id\n"); + return -EINVAL; + } + + tfcp = bnxt_ulp_cntxt_tfcp_get(ulp_ctx, BNXT_ULP_SESSION_TYPE_DEFAULT); + if (!tfcp) { + netdev_dbg(bp->dev, "Failed to get tfcp pointer\n"); + return -EINVAL; + } + + tbl_info.dir = (enum cfa_dir)res->direction; + tbl_info.rsubtype = res->resource_type; + tbl_info.id = (u16)res->resource_hndl; + + /* TBD: check to see if the memory needs to be cleaned as well*/ + rc = tfc_idx_tbl_free(tfcp, fw_fid, &tbl_info); + if (!rc) + netdev_dbg(bp->dev, "Freed Index [%d]:[%d] = 0x%X\n", + tbl_info.dir, tbl_info.rsubtype, tbl_info.id); + + return rc; +} + +static int +ulp_mapper_tfc_cmm_tbl_process(struct bnxt_ulp_mapper_parms *parms, + struct bnxt_ulp_mapper_tbl_info *tbl, + void *error) +{ + bool alloc = false, write = false, global = false, regfile = false; + struct bnxt_ulp_glb_resource_info glb_res = { 0 }; + u16 bit_size, act_wordlen = 0, tmplen = 0; + struct ulp_flow_db_res_params fid_parms; + struct bnxt *bp = parms->ulp_ctx->bp; + struct tfc_cmm_info cmm_info = { 0 }; + struct tfc *tfcp = NULL; + struct ulp_blob data; + u64 act_rec_size = 0; + bool shared = false; + const u8 *act_data; + u64 regval = 0; + u64 handle = 0; + u8 tsid = 0; + int rc = 0; + + tfcp = bnxt_ulp_cntxt_tfcp_get(parms->ulp_ctx, BNXT_ULP_SESSION_TYPE_DEFAULT); + if (!tfcp) { + netdev_dbg(bp->dev, "Failed to get tfcp pointer\n"); + return -EINVAL; + } + + /* compute the blob size */ + bit_size = ulp_mapper_tfc_dyn_blob_size_get(parms, tbl); + + /* Initialize the blob data */ + if (ulp_blob_init(&data, bit_size, + parms->device_params->result_byte_order)) { + netdev_dbg(bp->dev, "Failed to initialize cmm table blob\n"); + return -EINVAL; + } + + switch (tbl->tbl_opcode) { + case BNXT_ULP_INDEX_TBL_OPC_ALLOC_REGFILE: + regfile = true; + alloc = true; + break; + case BNXT_ULP_INDEX_TBL_OPC_ALLOC_WR_REGFILE: + /* Build the entry, alloc an index, write the table, and store + * the data in the regfile. + */ + alloc = true; + write = true; + regfile = true; + break; + case BNXT_ULP_INDEX_TBL_OPC_WR_REGFILE: + /* get the index to write to from the regfile and then write + * the table entry. + */ + regfile = true; + write = true; + break; + case BNXT_ULP_INDEX_TBL_OPC_ALLOC_WR_GLB_REGFILE: + /* Build the entry, alloc an index, write the table, and store + * the data in the global regfile. + */ + alloc = true; + global = true; + write = true; + break; + case BNXT_ULP_INDEX_TBL_OPC_WR_GLB_REGFILE: + if (tbl->fdb_opcode != BNXT_ULP_FDB_OPC_NOP) { + netdev_dbg(bp->dev, "Template error, wrong fdb opcode\n"); + return -EINVAL; + } + /* get the index to write to from the global regfile and then + * write the table. + */ + if (ulp_mapper_glb_resource_read(parms->mapper_data, + tbl->direction, + tbl->tbl_operand, + ®val, &shared)) { + netdev_dbg(bp->dev, "Failed to get tbl idx from Glb RF[%d].\n", + tbl->tbl_operand); + return -EINVAL; + } + handle = be64_to_cpu(regval); + /* check to see if any scope id changes needs to be done*/ + write = true; + break; + case BNXT_ULP_INDEX_TBL_OPC_RD_REGFILE: + /* The read is different from the rest and can be handled here + * instead of trying to use common code. Simply read the table + * with the index from the regfile, scan and store the + * identifiers, and return. + */ + if (ulp_regfile_read(parms->regfile, + tbl->tbl_operand, ®val)) { + netdev_dbg(bp->dev, "Failed to get tbl idx from regfile[%d]\n", + tbl->tbl_operand); + return -EINVAL; + } + handle = be64_to_cpu(regval); + return 0; + case BNXT_ULP_INDEX_TBL_OPC_NOP_REGFILE: + regfile = true; + alloc = false; + break; + default: + netdev_dbg(bp->dev, "Invalid cmm table opcode %d\n", tbl->tbl_opcode); + return -EINVAL; + } + + /* read the CMM handle from the regfile, it is not allocated */ + if (!alloc && regfile) { + if (ulp_regfile_read(parms->regfile, + tbl->tbl_operand, + ®val)) { + netdev_dbg(bp->dev, "Failed to get tbl idx from regfile[%d].\n", + tbl->tbl_operand); + return -EINVAL; + } + handle = be64_to_cpu(regval); + } + + /* Get the result fields list */ + rc = ulp_mapper_tbl_result_build(parms, + tbl, + &data, + "Indexed Result"); + if (rc) { + netdev_dbg(bp->dev, "Failed to build the result blob\n"); + return rc; + } + + /* Allocate the Action CMM identifier */ + if (alloc) { + cmm_info.dir = tbl->direction; + cmm_info.rsubtype = tbl->resource_type; + /* Only need the length for alloc, ignore the returned data */ + act_data = ulp_blob_data_get(&data, &tmplen); + act_wordlen = ULP_BITS_TO_32_BYTE_WORD(tmplen); + + rc = bnxt_ulp_cntxt_tsid_get(parms->ulp_ctx, &tsid); + if (rc) { + netdev_dbg(bp->dev, "Failed to get the table scope\n"); + return rc; + } + /* All failures after the alloc succeeds require a free */ + rc = tfc_act_alloc(tfcp, tsid, &cmm_info, act_wordlen); + if (rc) { + netdev_dbg(bp->dev, "Alloc CMM [%d][%s] failed rc=%d\n", + cmm_info.rsubtype, tfc_dir_2_str(cmm_info.dir), rc); + return rc; + } + handle = cmm_info.act_handle; + + /* Counters need to be reset when allocated to ensure counter is + * zero + */ + if (tbl->resource_func == BNXT_ULP_RESOURCE_FUNC_CMM_STAT) { + rc = tfc_act_set(tfcp, &cmm_info, act_data, act_wordlen); + if (rc) { + netdev_dbg(bp->dev, "Stat alloc/clear[%d][%s][%llu] failed rc=%d\n", + cmm_info.rsubtype, + tfc_dir_2_str(cmm_info.dir), + cmm_info.act_handle, rc); + goto error; + } + } + } + + /* update the global register value */ + if (alloc && global) { + glb_res.direction = tbl->direction; + glb_res.resource_func = tbl->resource_func; + glb_res.resource_type = tbl->resource_type; + glb_res.glb_regfile_index = tbl->tbl_operand; + regval = cpu_to_be64(handle); + + /* Shared resources are never allocated through this + * method, so the shared flag is always false. + */ + rc = ulp_mapper_glb_resource_write(parms->mapper_data, + &glb_res, regval, + false); + if (rc) { + netdev_dbg(bp->dev, "Failed to write %s regfile[%d] rc=%d\n", + (global) ? "global" : "reg", + tbl->tbl_operand, rc); + goto error; + } + } + + /* update the local register value */ + if (alloc && regfile) { + regval = cpu_to_be64(handle); + rc = ulp_regfile_write(parms->regfile, + tbl->tbl_operand, regval); + if (rc) { + netdev_dbg(bp->dev, "Failed to write %s regfile[%d] rc=%d\n", + (global) ? "global" : "reg", + tbl->tbl_operand, rc); + goto error; + } + } + + if (write) { + act_data = ulp_blob_data_get(&data, &tmplen); + cmm_info.dir = tbl->direction; + cmm_info.rsubtype = tbl->resource_type; + cmm_info.act_handle = handle; + act_wordlen = ULP_BITS_TO_32_BYTE_WORD(tmplen); + rc = tfc_act_set(tfcp, &cmm_info, act_data, act_wordlen); + if (rc) { + netdev_dbg(bp->dev, "CMM table[%d][%s][%llu] write fail %d\n", + cmm_info.rsubtype, + tfc_dir_2_str(cmm_info.dir), + handle, rc); + goto error; + } + netdev_dbg(bp->dev, "CMM table[%d][%s][0x%016llx] write successful\n", + cmm_info.rsubtype, tfc_dir_2_str(cmm_info.dir), handle); + + /* Calculate action record size */ + if (tbl->resource_type == CFA_RSUBTYPE_CMM_ACT) { + act_rec_size = (ULP_BITS_2_BYTE_NR(tmplen) + 15) / 16; + act_rec_size--; + if (ulp_regfile_write(parms->regfile, + BNXT_ULP_RF_IDX_ACTION_REC_SIZE, + cpu_to_be64(act_rec_size))) + netdev_dbg(bp->dev, "Failed write the act rec size\n"); + } + } + /* Link the resource to the flow in the flow db */ + memset(&fid_parms, 0, sizeof(fid_parms)); + fid_parms.direction = tbl->direction; + fid_parms.resource_func = tbl->resource_func; + fid_parms.resource_type = tbl->resource_type; + fid_parms.resource_sub_type = tbl->resource_sub_type; + fid_parms.resource_hndl = handle; + fid_parms.critical_resource = tbl->critical_resource; + ulp_flow_db_shared_session_set(&fid_parms, tbl->session_type); + + rc = ulp_mapper_fdb_opc_process(parms, tbl, &fid_parms); + if (rc) { + netdev_dbg(bp->dev, "Failed to link resource to flow rc = %d\n", rc); + goto error; + } + + /* Perform the VF rep action */ + rc = ulp_mapper_mark_vfr_idx_process(parms, tbl); + if (rc) { + netdev_dbg(bp->dev, "Failed to add vfr mark rc = %d\n", rc); + goto error; + } + return rc; +error: + /* Shared resources are not freed */ + if (shared) + return rc; + /* Free the allocated resource since we failed to either + * write to the entry or link the flow + */ + + if (tfc_act_free(tfcp, &cmm_info)) + netdev_dbg(bp->dev, "Failed to free cmm entry on failure\n"); + + return rc; +} + +static int +ulp_mapper_tfc_cmm_entry_free(struct bnxt_ulp_context *ulp_ctx, + struct ulp_flow_db_res_params *res, + void *error) +{ + struct tfc_cmm_info cmm_info = { 0 }; + struct tfc *tfcp = NULL; + u16 fw_fid = 0; + int rc = 0; + + /* skip cmm processing if reserve flag is enabled */ + if (res->reserve_flag) + return 0; + + if (bnxt_ulp_cntxt_fid_get(ulp_ctx, &fw_fid)) { + netdev_dbg(ulp_ctx->bp->dev, "Failed to get func_id\n"); + return -EINVAL; + } + + tfcp = bnxt_ulp_cntxt_tfcp_get(ulp_ctx, BNXT_ULP_SESSION_TYPE_DEFAULT); + if (!tfcp) { + netdev_dbg(ulp_ctx->bp->dev, "Failed to get tfcp pointer\n"); + return -EINVAL; + } + + cmm_info.dir = (enum cfa_dir)res->direction; + cmm_info.rsubtype = res->resource_type; + cmm_info.act_handle = res->resource_hndl; + + /* TBD: check to see if the memory needs to be cleaned as well */ + rc = tfc_act_free(tfcp, &cmm_info); + if (rc) { + netdev_dbg(ulp_ctx->bp->dev, "Failed to delete CMM entry,res = 0x%llX\n", + res->resource_hndl); + } else { + netdev_dbg(ulp_ctx->bp->dev, "Deleted CMM entry,res = %llX\n", res->resource_hndl); + } + return rc; +} + +static int +ulp_mapper_tfc_if_tbl_process(struct bnxt_ulp_mapper_parms *parms, + struct bnxt_ulp_mapper_tbl_info *tbl) +{ + enum bnxt_ulp_if_tbl_opc if_opc = tbl->tbl_opcode; + struct tfc_if_tbl_info tbl_info = { 0 }; + struct ulp_blob data, res_blob; + unsigned char *data_p; + struct tfc *tfcp; + u16 fw_fid = 0; + u8 data_size; + u32 res_size; + u16 tmplen; + int rc = 0; + u64 idx; + + if (bnxt_ulp_cntxt_fid_get(parms->ulp_ctx, &fw_fid)) { + netdev_dbg(parms->ulp_ctx->bp->dev, "Failed to get func_id\n"); + return -EINVAL; + } + + tfcp = bnxt_ulp_cntxt_tfcp_get(parms->ulp_ctx, BNXT_ULP_SESSION_TYPE_DEFAULT); + if (!tfcp) { + netdev_dbg(parms->ulp_ctx->bp->dev, "Failed to get tfcp pointer\n"); + return -EINVAL; + } + + /* Initialize the blob data */ + if (ulp_blob_init(&data, tbl->result_bit_size, + parms->device_params->result_byte_order)) { + netdev_dbg(parms->ulp_ctx->bp->dev, "Failed initial index table blob\n"); + return -EINVAL; + } + + /* create the result blob */ + rc = ulp_mapper_tbl_result_build(parms, tbl, &data, "IFtable Result"); + if (rc) { + netdev_dbg(parms->ulp_ctx->bp->dev, "Failed to build the result blob\n"); + return rc; + } + + /* Get the index details */ + switch (if_opc) { + case BNXT_ULP_IF_TBL_OPC_WR_COMP_FIELD: + idx = ULP_COMP_FLD_IDX_RD(parms, tbl->tbl_operand); + break; + case BNXT_ULP_IF_TBL_OPC_WR_REGFILE: + if (ulp_regfile_read(parms->regfile, tbl->tbl_operand, &idx)) { + netdev_dbg(parms->ulp_ctx->bp->dev, "regfile[%d] read oob\n", + tbl->tbl_operand); + return -EINVAL; + } + idx = be64_to_cpu(idx); + break; + case BNXT_ULP_IF_TBL_OPC_WR_CONST: + idx = tbl->tbl_operand; + break; + case BNXT_ULP_IF_TBL_OPC_RD_COMP_FIELD: + /* Initialize the result blob */ + if (ulp_blob_init(&res_blob, tbl->result_bit_size, + parms->device_params->result_byte_order)) { + netdev_dbg(parms->ulp_ctx->bp->dev, "Failed initial result blob\n"); + return -EINVAL; + } + + /* read the interface table */ + idx = ULP_COMP_FLD_IDX_RD(parms, tbl->tbl_operand); + res_size = ULP_BITS_2_BYTE(tbl->result_bit_size); + rc = ulp_mapper_tbl_ident_scan_ext(parms, tbl, + res_blob.data, + res_size, + res_blob.byte_order); + if (rc) + netdev_dbg(parms->ulp_ctx->bp->dev, "Scan and extract failed rc=%d\n", rc); + return rc; + case BNXT_ULP_IF_TBL_OPC_NOT_USED: + return rc; /* skip it */ + default: + netdev_dbg(parms->ulp_ctx->bp->dev, "Invalid tbl index opcode\n"); + return -EINVAL; + } + + tbl_info.dir = tbl->direction; + tbl_info.rsubtype = tbl->resource_type; + tbl_info.id = (uint32_t)idx; + data_p = ulp_blob_data_get(&data, &tmplen); + data_size = ULP_BITS_2_BYTE(tmplen); + + rc = tfc_if_tbl_set(tfcp, fw_fid, &tbl_info, (uint8_t *)data_p, + data_size); + if (rc) { + netdev_dbg(parms->ulp_ctx->bp->dev, + "Failed to write the if tbl entry %d:%d\n", + tbl->resource_type, (uint32_t)idx); + return rc; + } + + return rc; +} + +static int +ulp_mapper_tfc_ident_alloc(struct bnxt_ulp_context *ulp_ctx, + u32 session_type, + u16 ident_type, + u8 direction, + enum cfa_track_type tt, + u64 *identifier_id) +{ + struct tfc_identifier_info ident_info = { 0 }; + struct tfc *tfcp = NULL; + u16 fw_fid = 0; + int rc = 0; + + if (bnxt_ulp_cntxt_fid_get(ulp_ctx, &fw_fid)) { + netdev_dbg(ulp_ctx->bp->dev, "Failed to get func_id\n"); + return -EINVAL; + } + + tfcp = bnxt_ulp_cntxt_tfcp_get(ulp_ctx, BNXT_ULP_SESSION_TYPE_DEFAULT); + if (!tfcp) { + netdev_dbg(ulp_ctx->bp->dev, "Failed to get tfcp pointer\n"); + return -EINVAL; + } + + ident_info.dir = direction; + ident_info.rsubtype = ident_type; + + rc = tfc_identifier_alloc(tfcp, fw_fid, tt, &ident_info); + if (rc != 0) { + netdev_dbg(ulp_ctx->bp->dev, "alloc failed %d\n", rc); + return rc; + } + *identifier_id = ident_info.id; +#ifdef RTE_LIBRTE_BNXT_TRUFLOW_DEBUG +#ifdef RTE_LIBRTE_BNXT_TRUFLOW_DEBUG_MAPPER + netdev_dbg(ulp_ctx->bp->dev, "Allocated Identifier [%s]:[%s] = 0x%X\n", + tfc_dir_2_str(direction), + tfc_ident_2_str(ident_info.rsubtype), ident_info.id); +#endif +#endif + + return rc; +} + +static int +ulp_mapper_tfc_ident_free(struct bnxt_ulp_context *ulp_ctx, + struct ulp_flow_db_res_params *res) +{ + struct tfc_identifier_info ident_info = { 0 }; + struct tfc *tfcp = NULL; + u16 fw_fid = 0; + int rc = 0; + + if (bnxt_ulp_cntxt_fid_get(ulp_ctx, &fw_fid)) { + netdev_dbg(ulp_ctx->bp->dev, "Failed to get func_id\n"); + return -EINVAL; + } + + tfcp = bnxt_ulp_cntxt_tfcp_get(ulp_ctx, BNXT_ULP_SESSION_TYPE_DEFAULT); + if (!tfcp) { + netdev_dbg(ulp_ctx->bp->dev, "Failed to get tfcp pointer\n"); + return -EINVAL; + } + + ident_info.dir = (enum cfa_dir)res->direction; + ident_info.rsubtype = res->resource_type; + ident_info.id = res->resource_hndl; + + rc = tfc_identifier_free(tfcp, fw_fid, &ident_info); + if (rc != 0) { + netdev_dbg(ulp_ctx->bp->dev, "free failed %d\n", rc); + return rc; + } + + netdev_dbg(ulp_ctx->bp->dev, "Freed Identifier [%s]:[%s] = 0x%X\n", + tfc_dir_2_str(ident_info.dir), + tfc_ident_2_str(ident_info.rsubtype), ident_info.id); + + return rc; +} + +static inline int +ulp_mapper_tfc_tcam_entry_free(struct bnxt_ulp_context *ulp, + struct ulp_flow_db_res_params *res) +{ + struct tfc_tcam_info tcam_info = { 0 }; + struct tfc *tfcp = NULL; + u16 fw_fid = 0; + + if (bnxt_ulp_cntxt_fid_get(ulp, &fw_fid)) { + netdev_dbg(ulp->bp->dev, "Failed to get func_id\n"); + return -EINVAL; + } + + tfcp = bnxt_ulp_cntxt_tfcp_get(ulp, BNXT_ULP_SESSION_TYPE_DEFAULT); + if (!tfcp) { + netdev_dbg(ulp->bp->dev, "Failed to get tfcp pointer\n"); + return -EINVAL; + } + tcam_info.dir = (enum cfa_dir)res->direction; + tcam_info.rsubtype = res->resource_type; + tcam_info.id = (u16)res->resource_hndl; + + if (!tfcp || tfc_tcam_free(tfcp, fw_fid, &tcam_info)) { + netdev_dbg(ulp->bp->dev, "Unable to free tcam resource %u\n", tcam_info.id); + return -EINVAL; + } + + netdev_dbg(ulp->bp->dev, "Freed TCAM [%d]:[%d] = 0x%X\n", + tcam_info.dir, tcam_info.dir, tcam_info.id); + return 0; +} + +static u32 +ulp_mapper_tfc_dyn_tbl_type_get(struct bnxt_ulp_mapper_parms *parms, + struct bnxt_ulp_mapper_tbl_info *tbl, + u16 blob_len, + u16 *out_len) +{ + switch (tbl->resource_type) { + case CFA_RSUBTYPE_CMM_ACT: + *out_len = ULP_BITS_TO_32_BYTE_WORD(blob_len); + *out_len = *out_len * 256; + break; + default: + netdev_dbg(parms->ulp_ctx->bp->dev, "Not a dynamic table %d\n", tbl->resource_type); + *out_len = blob_len; + break; + } + + return tbl->resource_type; +} + +static int +ulp_mapper_tfc_index_tbl_alloc_process(struct bnxt_ulp_context *ulp, + u32 session_type, + u16 table_type, + u8 direction, + u64 *index) +{ + struct tfc_idx_tbl_info tbl_info = { 0 }; + struct tfc *tfcp = NULL; + u16 fw_fid = 0; + int rc = 0; + + tfcp = bnxt_ulp_cntxt_tfcp_get(ulp, BNXT_ULP_SESSION_TYPE_DEFAULT); + if (!tfcp) { + netdev_dbg(ulp->bp->dev, "Failed to get tfcp pointer\n"); + return -EINVAL; + } + + if (bnxt_ulp_cntxt_fid_get(ulp, &fw_fid)) { + netdev_dbg(ulp->bp->dev, "Failed to get func id\n"); + return -EINVAL; + } + + tbl_info.rsubtype = table_type; + tbl_info.dir = direction; + rc = tfc_idx_tbl_alloc(tfcp, fw_fid, CFA_TRACK_TYPE_SID, &tbl_info); + if (rc) { + netdev_dbg(ulp->bp->dev, "Alloc table[%s][%s] failed rc=%d\n", + tfc_idx_tbl_2_str(tbl_info.rsubtype), + tfc_dir_2_str(direction), rc); + return rc; + } + + *index = tbl_info.id; + + netdev_dbg(ulp->bp->dev, "Allocated Table Index [%s][%s] = 0x%04x\n", + tfc_idx_tbl_2_str(table_type), tfc_dir_2_str(direction), + tbl_info.id); + + return rc; +} + +static int +ulp_mapper_tfc_app_glb_resource_info_init(struct bnxt_ulp_context + *ulp_ctx, + struct bnxt_ulp_mapper_data + *mapper_data) +{ + /* Not supported Shared Apps yet on TFC API */ + return 0; +} + +static int +ulp_mapper_tfc_handle_to_offset(struct bnxt_ulp_mapper_parms *parms, + u64 handle, + u32 offset, + u64 *result) +{ + u32 val = 0; + int rc = 0; + + TFC_GET_32B_OFFSET_ACT_HANDLE(val, &handle); + + switch (offset) { + case 0: + val = val << 5; + break; + case 4: + val = val << 3; + break; + case 8: + val = val << 2; + break; + case 16: + val = val << 1; + break; + case 32: + break; + default: + return -EINVAL; + } + + *result = val; + return rc; +} + +const struct ulp_mapper_core_ops ulp_mapper_tfc_core_ops = { + .ulp_mapper_core_tcam_tbl_process = ulp_mapper_tfc_tcam_tbl_process, + .ulp_mapper_core_tcam_entry_free = ulp_mapper_tfc_tcam_entry_free, + .ulp_mapper_core_em_tbl_process = ulp_mapper_tfc_em_tbl_process, + .ulp_mapper_core_em_entry_free = ulp_mapper_tfc_em_entry_free, + .ulp_mapper_core_index_tbl_process = ulp_mapper_tfc_index_tbl_process, + .ulp_mapper_core_index_entry_free = ulp_mapper_tfc_index_entry_free, + .ulp_mapper_core_cmm_tbl_process = ulp_mapper_tfc_cmm_tbl_process, + .ulp_mapper_core_cmm_entry_free = ulp_mapper_tfc_cmm_entry_free, + .ulp_mapper_core_if_tbl_process = ulp_mapper_tfc_if_tbl_process, + .ulp_mapper_core_ident_alloc_process = ulp_mapper_tfc_ident_alloc, + .ulp_mapper_core_ident_free = ulp_mapper_tfc_ident_free, + .ulp_mapper_core_dyn_tbl_type_get = ulp_mapper_tfc_dyn_tbl_type_get, + .ulp_mapper_core_index_tbl_alloc_process = ulp_mapper_tfc_index_tbl_alloc_process, + .ulp_mapper_core_app_glb_res_info_init = ulp_mapper_tfc_app_glb_resource_info_init, + .ulp_mapper_core_handle_to_offset = ulp_mapper_tfc_handle_to_offset +}; +#endif /* CONFIG_BNXT_FLOWER_OFFLOAD */ diff --git a/drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_mark_mgr.c b/drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_mark_mgr.c new file mode 100644 index 000000000000..c670f66df226 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_mark_mgr.c @@ -0,0 +1,318 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* Copyright(c) 2019-2023 Broadcom + * All rights reserved. + */ + +#include "ulp_linux.h" +#include "bnxt_compat.h" +#include "bnxt_hsi.h" +#include "bnxt.h" +#include "bnxt_tf_ulp.h" +#include "tf_ext_flow_handle.h" +#include "ulp_mark_mgr.h" +#include "bnxt_tf_common.h" +#include "ulp_template_db_enum.h" +#include "ulp_template_struct.h" + +#if defined(CONFIG_BNXT_FLOWER_OFFLOAD) || defined(CONFIG_BNXT_CUSTOM_FLOWER_OFFLOAD) +#define ULP_MARK_DB_ENTRY_SET_VALID(mark_info) ((mark_info)->flags |=\ + BNXT_ULP_MARK_VALID) +#define ULP_MARK_DB_ENTRY_IS_INVALID(mark_info) (!((mark_info)->flags &\ + BNXT_ULP_MARK_VALID)) +#define ULP_MARK_DB_ENTRY_SET_VFR_ID(mark_info) ((mark_info)->flags |=\ + BNXT_ULP_MARK_VFR_ID) +#define ULP_MARK_DB_ENTRY_IS_VFR_ID(mark_info) ((mark_info)->flags &\ + BNXT_ULP_MARK_VFR_ID) +#define ULP_MARK_DB_ENTRY_IS_GLOBAL_HW_FID(mark_info) ((mark_info)->flags &\ + BNXT_ULP_MARK_GLOBAL_HW_FID) + +static inline u32 +ulp_mark_db_idx_get(bool is_gfid, u32 fid, struct bnxt_ulp_mark_tbl *mtbl) +{ + u32 idx = 0, hashtype = 0; + + if (is_gfid) { + TF_GET_HASH_TYPE_FROM_GFID(fid, hashtype); + TF_GET_HASH_INDEX_FROM_GFID(fid, idx); + + /* Need to truncate anything beyond supported flows */ + idx &= mtbl->gfid_mask; + if (hashtype) + idx |= mtbl->gfid_type_bit; + } else { + idx = fid; + } + return idx; +} + +/** + * Allocate and Initialize all Mark Manager resources for this ulp context. + * + * @ctxt: The ulp context for the mark manager. + * + */ +int +ulp_mark_db_init(struct bnxt_ulp_context *ctxt) +{ + struct bnxt_ulp_mark_tbl *mark_tbl = NULL; + struct bnxt_ulp_device_params *dparms; + u32 dev_id; + + if (!ctxt) + return -EINVAL; + + if (bnxt_ulp_cntxt_dev_id_get(ctxt, &dev_id)) { + netdev_dbg(ctxt->bp->dev, "Failed to get dev_id from ulp\n"); + return -EINVAL; + } + + dparms = bnxt_ulp_device_params_get(dev_id); + if (!dparms) { + netdev_dbg(ctxt->bp->dev, "Failed to device parms\n"); + return -EINVAL; + } + + if (!dparms->mark_db_lfid_entries || !dparms->mark_db_gfid_entries) { + netdev_dbg(ctxt->bp->dev, "mark Table is not allocated\n"); + bnxt_ulp_cntxt_ptr2_mark_db_set(ctxt, NULL); + return 0; + } + + mark_tbl = vzalloc(sizeof(*mark_tbl)); + if (!mark_tbl) + goto mem_error; + + /* Need to allocate 2 * Num flows to account for hash type bit.*/ + mark_tbl->lfid_num_entries = dparms->mark_db_lfid_entries; + mark_tbl->lfid_tbl = vzalloc(mark_tbl->lfid_num_entries * + sizeof(struct bnxt_lfid_mark_info)); + if (!mark_tbl->lfid_tbl) + goto mem_error; + + /* Need to allocate 2 * Num flows to account for hash type bit */ + mark_tbl->gfid_num_entries = dparms->mark_db_gfid_entries; + if (!mark_tbl->gfid_num_entries) + goto gfid_not_required; + + mark_tbl->gfid_tbl = vzalloc(mark_tbl->gfid_num_entries * + sizeof(struct bnxt_gfid_mark_info)); + if (!mark_tbl->gfid_tbl) + goto mem_error; + + /* These values are used to compress the FID to the allowable index + * space. The FID from hw may be the full hash which may be a big + * value to allocate and so allocate only needed hash values. + * gfid mask is the number of flow entries for the each left/right + * hash The gfid type bit is used to get to the higher or lower hash + * entries. + */ + + mark_tbl->gfid_mask = (mark_tbl->gfid_num_entries / 2) - 1; + mark_tbl->gfid_type_bit = (mark_tbl->gfid_num_entries / 2); + + netdev_dbg(ctxt->bp->dev, "GFID Max = 0x%08x GFID MASK = 0x%08x\n", + mark_tbl->gfid_num_entries - 1, + mark_tbl->gfid_mask); + +gfid_not_required: + /* Add the mark tbl to the ulp context. */ + bnxt_ulp_cntxt_ptr2_mark_db_set(ctxt, mark_tbl); + return 0; + +mem_error: + if (mark_tbl) { + vfree(mark_tbl->gfid_tbl); + vfree(mark_tbl->lfid_tbl); + vfree(mark_tbl); + } + netdev_dbg(ctxt->bp->dev, "Failed to allocate memory for mark mgr\n"); + return -ENOMEM; +} + +/** + * Release all resources in the Mark Manager for this ulp context + * + * @ctxt: The ulp context for the mark manager + * + */ +int +ulp_mark_db_deinit(struct bnxt_ulp_context *ctxt) +{ + struct bnxt_ulp_mark_tbl *mtbl; + + mtbl = bnxt_ulp_cntxt_ptr2_mark_db_get(ctxt); + + if (mtbl) { + vfree(mtbl->gfid_tbl); + vfree(mtbl->lfid_tbl); + vfree(mtbl); + /* Safe to ignore on deinit */ + (void)bnxt_ulp_cntxt_ptr2_mark_db_set(ctxt, NULL); + } + + return 0; +} + +/** + * Get a Mark from the Mark Manager + * + * @ctxt: The ulp context for the mark manager + * + * @is_gfid: The type of fid (GFID or LFID) + * + * @fid: The flow id that is returned by HW in BD + * + * @vfr_flag:.it indicatesif mark is vfr_id or mark id + * + * @mark: The mark that is associated with the FID + * + */ +int +ulp_mark_db_mark_get(struct bnxt_ulp_context *ctxt, + bool is_gfid, + u32 fid, + u32 *vfr_flag, + u32 *mark) +{ + struct bnxt_ulp_mark_tbl *mtbl; + u32 idx = 0; + + if (!ctxt || !mark) + return -EINVAL; + + mtbl = bnxt_ulp_cntxt_ptr2_mark_db_get(ctxt); + if (!mtbl) + return -EINVAL; + + idx = ulp_mark_db_idx_get(is_gfid, fid, mtbl); + + if (is_gfid) { + if (idx >= mtbl->gfid_num_entries || + ULP_MARK_DB_ENTRY_IS_INVALID(&mtbl->gfid_tbl[idx])) + return -EINVAL; + + *vfr_flag = ULP_MARK_DB_ENTRY_IS_VFR_ID(&mtbl->gfid_tbl[idx]); + *mark = mtbl->gfid_tbl[idx].mark_id; + } else { + if (idx >= mtbl->lfid_num_entries || + ULP_MARK_DB_ENTRY_IS_INVALID(&mtbl->lfid_tbl[idx])) + return -EINVAL; + + *vfr_flag = ULP_MARK_DB_ENTRY_IS_VFR_ID(&mtbl->lfid_tbl[idx]); + *mark = mtbl->lfid_tbl[idx].mark_id; + } + + return 0; +} + +/** + * Adds a Mark to the Mark Manager + * + * @ctxt: The ulp context for the mark manager + * + * @mark_flag: mark flags. + * + * @fid: The flow id that is returned by HW in BD + * + * @mark: The mark to be associated with the FID + * + */ +int +ulp_mark_db_mark_add(struct bnxt_ulp_context *ctxt, + u32 mark_flag, + u32 fid, + u32 mark) +{ + struct bnxt_ulp_mark_tbl *mtbl; + bool is_gfid; + u32 idx = 0; + + if (!ctxt) + return -EINVAL; + + mtbl = bnxt_ulp_cntxt_ptr2_mark_db_get(ctxt); + if (!mtbl) { + netdev_dbg(ctxt->bp->dev, "Unable to get Mark DB\n"); + return -EINVAL; + } + + is_gfid = (mark_flag & BNXT_ULP_MARK_GLOBAL_HW_FID); + if (is_gfid) { + idx = ulp_mark_db_idx_get(is_gfid, fid, mtbl); + if (idx >= mtbl->gfid_num_entries) { + netdev_dbg(ctxt->bp->dev, "Mark index greater than allocated\n"); + return -EINVAL; + } + netdev_dbg(ctxt->bp->dev, "Set GFID[0x%0x] = 0x%0x\n", idx, mark); + mtbl->gfid_tbl[idx].mark_id = mark; + ULP_MARK_DB_ENTRY_SET_VALID(&mtbl->gfid_tbl[idx]); + + } else { + /* For the LFID, the FID is used as the index */ + if (fid >= mtbl->lfid_num_entries) { + netdev_dbg(ctxt->bp->dev, "Mark index greater than allocated\n"); + return -EINVAL; + } + netdev_dbg(ctxt->bp->dev, "Set LFID[0x%0x] = 0x%0x\n", fid, mark); + mtbl->lfid_tbl[fid].mark_id = mark; + ULP_MARK_DB_ENTRY_SET_VALID(&mtbl->lfid_tbl[fid]); + + if (mark_flag & BNXT_ULP_MARK_VFR_ID) + ULP_MARK_DB_ENTRY_SET_VFR_ID(&mtbl->lfid_tbl[fid]); + } + + return 0; +} + +/** + * Removes a Mark from the Mark Manager + * + * @ctxt: The ulp context for the mark manager + * + * @mark_flag: mark flags. + * + * @fid: The flow id that is returned by HW in BD + * + */ +int +ulp_mark_db_mark_del(struct bnxt_ulp_context *ctxt, + u32 mark_flag, + u32 fid) +{ + struct bnxt_ulp_mark_tbl *mtbl; + bool is_gfid; + u32 idx = 0; + + if (!ctxt) + return -EINVAL; + + mtbl = bnxt_ulp_cntxt_ptr2_mark_db_get(ctxt); + if (!mtbl) { + netdev_dbg(ctxt->bp->dev, "Unable to get Mark DB\n"); + return -EINVAL; + } + + is_gfid = (mark_flag & BNXT_ULP_MARK_GLOBAL_HW_FID); + if (is_gfid) { + idx = ulp_mark_db_idx_get(is_gfid, fid, mtbl); + if (idx >= mtbl->gfid_num_entries) { + netdev_dbg(ctxt->bp->dev, "Mark index greater than allocated\n"); + return -EINVAL; + } + netdev_dbg(ctxt->bp->dev, "Reset GFID[0x%0x]\n", idx); + memset(&mtbl->gfid_tbl[idx], 0, + sizeof(struct bnxt_gfid_mark_info)); + + } else { + /* For the LFID, the FID is used as the index */ + if (fid >= mtbl->lfid_num_entries) { + netdev_dbg(ctxt->bp->dev, "Mark index greater than allocated\n"); + return -EINVAL; + } + memset(&mtbl->lfid_tbl[fid], 0, + sizeof(struct bnxt_lfid_mark_info)); + } + + return 0; +} +#endif /* CONFIG_BNXT_FLOWER_OFFLOAD */ diff --git a/drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_mark_mgr.h b/drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_mark_mgr.h new file mode 100644 index 000000000000..ada2abcca5bc --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_mark_mgr.h @@ -0,0 +1,107 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019-2023 Broadcom + * All rights reserved. + */ + +#ifndef _ULP_MARK_MGR_H_ +#define _ULP_MARK_MGR_H_ + +#include "bnxt_tf_ulp.h" + +#define BNXT_ULP_MARK_VALID 0x1 +#define BNXT_ULP_MARK_VFR_ID 0x2 +#define BNXT_ULP_MARK_GLOBAL_HW_FID 0x4 +#define BNXT_ULP_MARK_LOCAL_HW_FID 0x8 + +struct bnxt_lfid_mark_info { + u16 mark_id; + u16 flags; +}; + +struct bnxt_gfid_mark_info { + u32 mark_id; + u16 flags; +}; + +struct bnxt_ulp_mark_tbl { + struct bnxt_lfid_mark_info *lfid_tbl; + struct bnxt_gfid_mark_info *gfid_tbl; + u32 lfid_num_entries; + u32 gfid_num_entries; + u32 gfid_mask; + u32 gfid_type_bit; +}; + +/** + * Allocate and Initialize all Mark Manager resources for this ulp context. + * + * Initialize MARK database for GFID & LFID tables + * GFID: Global flow id which is based on EEM hash id. + * LFID: Local flow id which is the CFA action pointer. + * GFID is used for EEM flows, LFID is used for EM flows. + * + * Flow mapper modules adds mark_id in the MARK database. + * + * BNXT PMD receive handler extracts the hardware flow id from the + * received completion record. Fetches mark_id from the MARK + * database using the flow id. Injects mark_id into the packet's mbuf. + * + * @ctxt: The ulp context for the mark manager. + */ +int +ulp_mark_db_init(struct bnxt_ulp_context *ctxt); + +/** + * Adds a Mark to the Mark Manager + * + * @ctxt: The ulp context for the mark manager + * @mark_flag: mark flags. + * @fid: The flow id that is returned by HW in BD + * @mark: The mark to be associated with the FID + */ +int +ulp_mark_db_deinit(struct bnxt_ulp_context *ctxt); + +/** + * Get a Mark from the Mark Manager + * + * @ctxt: The ulp context for the mark manager + * @is_gfid: The type of fid (GFID or LFID) + * @fid: The flow id that is returned by HW in BD + * @vfr_flag: It indicates if mark is vfr_id or mark id + * @mark: The mark that is associated with the FID + */ +int +ulp_mark_db_mark_get(struct bnxt_ulp_context *ctxt, + bool is_gfid, + u32 fid, + u32 *vfr_flag, + u32 *mark); + +/** + * Adds a Mark to the Mark Manager + * + * @ctxt: The ulp context for the mark manager + * @mark_flag: mark flags. + * @fid: The flow id that is returned by HW in BD + * @mark: The mark to be associated with the FID + */ +int +ulp_mark_db_mark_add(struct bnxt_ulp_context *ctxt, + u32 mark_flag, + u32 gfid, + u32 mark); + +/** + * Removes a Mark from the Mark Manager + * + * @ctxt: The ulp context for the mark manager + * @mark_flag: mark flags + * @fid: The flow id that is returned by HW in BD + */ +int +ulp_mark_db_mark_del(struct bnxt_ulp_context *ctxt, + u32 mark_flag, + u32 gfid); + +#endif /* _ULP_MARK_MGR_H_ */ diff --git a/drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_matcher.c b/drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_matcher.c new file mode 100644 index 000000000000..a0bc6d2bfe72 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_matcher.c @@ -0,0 +1,509 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* Copyright(c) 2019-2023 Broadcom + * All rights reserved. + */ + +#include "bnxt_compat.h" +#include "ulp_matcher.h" +#include "ulp_utils.h" +#include "ulp_template_debug_proto.h" +#include + + +#if defined(CONFIG_BNXT_FLOWER_OFFLOAD) || defined(CONFIG_BNXT_CUSTOM_FLOWER_OFFLOAD) +static int ulp_matcher_class_list_lookup(struct ulp_tc_parser_params *params, + u32 *class_match_idx) +{ + struct bnxt_ulp_class_match_info *class_list = ulp_class_match_list; + u32 idx = 0; + + while (++idx < BNXT_ULP_CLASS_MATCH_LIST_MAX_SZ) { + /* iterate the list of class matches to find header match */ + if (class_list[idx].app_id == params->app_id && + !ULP_BITMAP_CMP(&class_list[idx].hdr_bitmap, + ¶ms->hdr_bitmap)) { + /* Found the match */ + *class_match_idx = idx; + return 0; + } + } + + netdev_dbg(params->ulp_ctx->bp->dev, "Did not find any matching protocol hdr\n"); + return -1; +} + +static int ulp_matcher_action_list_lookup(struct ulp_tc_parser_params *params, + u32 *act_tmpl_idx) +{ + struct bnxt_ulp_act_match_info *act_list = ulp_act_match_list; + u64 act_bits = params->act_bitmap.bits; + u32 idx = 0; + + while (++idx < BNXT_ULP_ACT_MATCH_LIST_MAX_SZ) { + /* iterate the list of action matches to find header match */ + if ((act_bits & act_list[idx].act_bitmap.bits) == act_bits) { + /* Found the match */ + *act_tmpl_idx = act_list[idx].act_tid; + /* set the comp field to enable action reject cond */ + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_REJ_COND_EN, 1); + return 0; + } + } + return -1; +} + +static int ulp_matcher_class_hdr_field_validate(struct ulp_tc_parser_params + *params, u32 idx) +{ + struct bnxt_ulp_class_match_info *info = &ulp_class_match_list[idx]; + u64 bitmap; + + /* manadatory fields should be enabled */ + if ((params->fld_s_bitmap.bits & info->field_man_bitmap) != + info->field_man_bitmap){ + netdev_dbg(params->ulp_ctx->bp->dev, + "mismatch in manadatory hdr fields\n"); + return -EINVAL; + } + + /* optional fields may be enabled or not */ + bitmap = params->fld_s_bitmap.bits & (~info->field_man_bitmap); + if ((bitmap && (bitmap & info->field_opt_bitmap) != bitmap)) { + netdev_dbg(params->ulp_ctx->bp->dev, + "mismatch in optional hdr fields\n"); + return -EINVAL; + } + + return 0; +} + +static u64 ulp_matcher_class_hdr_field_signature(struct ulp_tc_parser_params + *params, u32 idx) +{ + struct bnxt_ulp_class_match_info *info = &ulp_class_match_list[idx]; + + /* remove the exclude bits */ + return (params->fld_s_bitmap.bits & ~info->field_exclude_bitmap); +} + +static u64 +ulp_matcher_class_wc_fld_get(u32 idx) +{ + struct bnxt_ulp_class_match_info *info = &ulp_class_match_list[idx]; + u64 bits; + + bits = info->field_opt_bitmap | info->field_man_bitmap; + bits &= ~info->field_exclude_bitmap; + return bits; +} + +static struct ulp_matcher_class_db_node * +ulp_matcher_class_hash_lookup(struct bnxt_ulp_matcher_data *mdata, + struct ulp_tc_parser_params *params) +{ + struct ulp_matcher_class_db_node *matcher_node; + struct ulp_matcher_hash_db_key key = {{ 0 }}; + + /* populate the key for the search */ + key.app_id = params->app_id; + key.hdr_bitmap = params->hdr_bitmap; + + matcher_node = rhashtable_lookup_fast(&mdata->class_matcher_db, &key, + mdata->class_matcher_db_ht_params); + if (!matcher_node) + return NULL; + + if (!matcher_node->in_use) { + netdev_dbg(params->ulp_ctx->bp->dev, + "Matcher database is corrupt\n"); + return NULL; + } + return matcher_node; +} + +static struct ulp_matcher_class_db_node * +ulp_matcher_class_hash_add(struct bnxt_ulp_matcher_data *matcher_data, + struct ulp_tc_parser_params *params, + int class_match_idx) +{ + struct ulp_matcher_class_db_node *matcher_node; + struct ulp_matcher_hash_db_key key = {{ 0 }}; + int rc; + + /* populate the key for the search */ + key.app_id = params->app_id; + key.hdr_bitmap = params->hdr_bitmap; + + matcher_node = kzalloc(sizeof(*matcher_node), GFP_KERNEL); + if (!matcher_node) + return NULL; + + matcher_node->key = key; + matcher_node->in_use = 1; + matcher_node->match_info_idx = class_match_idx; + rc = rhashtable_insert_fast(&matcher_data->class_matcher_db, &matcher_node->node, + matcher_data->class_matcher_db_ht_params); + if (rc) { + netdev_dbg(params->ulp_ctx->bp->dev, + "unable add the entry to matcher hash: %d\n", + class_match_idx); + kfree_rcu(matcher_node, rcu); + return NULL; + } + + netdev_dbg(params->ulp_ctx->bp->dev, + "Added entry: %d to matcher hash\n", + class_match_idx); + return matcher_node; +} + +/* Function to handle the matching of RTE Flows and validating + * the pattern masks against the flow templates. + */ +int +ulp_matcher_pattern_match(struct ulp_tc_parser_params *params, + u32 *class_id) +{ + struct ulp_matcher_class_db_node *matcher_node; + struct bnxt_ulp_class_match_info *class_match; + struct bnxt_ulp_matcher_data *matcher_data; + u32 class_match_idx = 0; + u64 bits = 0; + + /* Get the matcher data for hash lookup */ + matcher_data = (struct bnxt_ulp_matcher_data *) + bnxt_ulp_cntxt_ptr2_matcher_data_get(params->ulp_ctx); + if (!matcher_data) { + netdev_dbg(params->ulp_ctx->bp->dev, + "Failed to get the ulp matcher data\n"); + return -EINVAL; + } + + bits = bnxt_ulp_cntxt_ptr2_default_class_bits_get(params->ulp_ctx); + params->hdr_bitmap.bits |= bits; + + /* search the matcher hash db for the entry */ + matcher_node = ulp_matcher_class_hash_lookup(matcher_data, params); + if (!matcher_node) { + /* find the class list entry */ + if (ulp_matcher_class_list_lookup(params, &class_match_idx)) + goto error; + + /* add it to the hash */ + matcher_node = ulp_matcher_class_hash_add(matcher_data, params, + class_match_idx); + if (!matcher_node) + goto error; + } else { + class_match_idx = matcher_node->match_info_idx; + } + + class_match = &ulp_class_match_list[matcher_node->match_info_idx]; + + /* perform the field bitmap validation */ + if (ulp_matcher_class_hdr_field_validate(params, + matcher_node->match_info_idx)) + goto error; + + /* Update the fields for further processing */ + *class_id = class_match->class_tid; + params->class_info_idx = matcher_node->match_info_idx; + params->flow_sig_id = + ulp_matcher_class_hdr_field_signature(params, class_match_idx); + params->flow_pattern_id = class_match->flow_pattern_id; + params->wc_field_bitmap = ulp_matcher_class_wc_fld_get(class_match_idx); + params->exclude_field_bitmap = class_match->field_exclude_bitmap; + + netdev_dbg(params->ulp_ctx->bp->dev, + "Found matching pattern template %u:%d\n", + class_match_idx, class_match->class_tid); + return BNXT_TF_RC_SUCCESS; + +error: + netdev_err(params->ulp_ctx->bp->dev, "Did not find any matching template\n"); + netdev_err(params->ulp_ctx->bp->dev, + "hid:0x%x, Hdr:0x%llx Fld:0x%llx SFld:0x%llx\n", + class_match_idx, params->hdr_bitmap.bits, + params->fld_bitmap.bits, params->fld_s_bitmap.bits); + *class_id = 0; + return BNXT_TF_RC_ERROR; +} + +static struct ulp_matcher_act_db_node * +ulp_matcher_action_hash_lookup(struct bnxt_ulp_matcher_data *mdata, + struct ulp_tc_parser_params *params) +{ + struct ulp_matcher_act_db_node *matcher_node; + struct ulp_matcher_hash_db_key key = {{ 0 }}; + + /* populate the key for the search */ + key.hdr_bitmap = params->act_bitmap; + + matcher_node = rhashtable_lookup_fast(&mdata->act_matcher_db, &key, + mdata->act_matcher_db_ht_params); + if (!matcher_node) + return NULL; + + return matcher_node; +} + +static struct ulp_matcher_act_db_node * +ulp_matcher_action_hash_add(struct bnxt_ulp_matcher_data *matcher_data, + struct ulp_tc_parser_params *params, + int class_match_idx) +{ + struct ulp_matcher_act_db_node *matcher_node; + struct ulp_matcher_hash_db_key key = {{ 0 }}; + int rc; + + /* populate the key for the search */ + key.hdr_bitmap = params->act_bitmap; + + matcher_node = kzalloc(sizeof(*matcher_node), GFP_KERNEL); + if (!matcher_node) + return NULL; + + matcher_node->key = key; + matcher_node->match_info_idx = class_match_idx; + rc = rhashtable_insert_fast(&matcher_data->act_matcher_db, &matcher_node->node, + matcher_data->act_matcher_db_ht_params); + if (rc) { + netdev_dbg(params->ulp_ctx->bp->dev, + "unable add the entry to matcher hash: %d\n", + class_match_idx); + kfree_rcu(matcher_node, rcu); + return NULL; + } + + netdev_dbg(params->ulp_ctx->bp->dev, + "Added entry: %d to action hash\n", + class_match_idx); + return matcher_node; +} + +/* Function to handle the matching of TC Flows and validating + * the action against the flow templates. + */ +int +ulp_matcher_action_match(struct ulp_tc_parser_params *params, + u32 *act_id) +{ + struct ulp_matcher_act_db_node *matcher_node; + struct bnxt_ulp_act_match_info *action_match; + struct bnxt_ulp_matcher_data *matcher_data; + u32 act_match_idx = 0; + u64 bits = 0; + + /* Get the matcher data for hash lookup */ + matcher_data = (struct bnxt_ulp_matcher_data *) + bnxt_ulp_cntxt_ptr2_matcher_data_get(params->ulp_ctx); + if (!matcher_data) { + netdev_dbg(params->ulp_ctx->bp->dev, "Failed to get the ulp matcher data\n"); + return -EINVAL; + } + + bits = bnxt_ulp_cntxt_ptr2_default_act_bits_get(params->ulp_ctx); + params->act_bitmap.bits |= bits; + + /* search the matcher hash db for the entry */ + matcher_node = ulp_matcher_action_hash_lookup(matcher_data, params); + if (!matcher_node) { + /* find the action list entry */ + if (ulp_matcher_action_list_lookup(params, &act_match_idx)) + goto error; + + /* add it to the hash */ + matcher_node = ulp_matcher_action_hash_add(matcher_data, params, + act_match_idx); + if (!matcher_node) + goto error; + } else { + act_match_idx = matcher_node->match_info_idx; + } + + action_match = &ulp_act_match_list[matcher_node->match_info_idx]; + + /* Update the fields for further processing */ + *act_id = action_match->act_tid; + params->act_info_idx = matcher_node->match_info_idx; + + netdev_dbg(params->ulp_ctx->bp->dev, "Found matching action templ %u\n", act_match_idx); + *act_id = act_match_idx; + return BNXT_TF_RC_SUCCESS; + +error: + netdev_err(params->ulp_ctx->bp->dev, "Did not find any matching action template\n"); + netdev_err(params->ulp_ctx->bp->dev, "Hdr:%llx\n", params->act_bitmap.bits); + *act_id = 0; + return BNXT_TF_RC_ERROR; +} + +static const struct rhashtable_params ulp_matcher_class_ht_params = { + .head_offset = offsetof(struct ulp_matcher_class_db_node, node), + .key_offset = offsetof(struct ulp_matcher_class_db_node, key), + .key_len = sizeof(struct ulp_matcher_hash_db_key), + .automatic_shrinking = true +}; + +static const struct rhashtable_params ulp_matcher_act_ht_params = { + .head_offset = offsetof(struct ulp_matcher_act_db_node, node), + .key_offset = offsetof(struct ulp_matcher_act_db_node, key), + .key_len = sizeof(struct ulp_matcher_hash_db_key), + .automatic_shrinking = true +}; + +int ulp_matcher_init(struct bnxt_ulp_context *ulp_ctx) +{ + struct bnxt_ulp_matcher_data *data; + int rc; + + data = vzalloc(sizeof(*data)); + if (!data) + return -ENOMEM; + + if (bnxt_ulp_cntxt_ptr2_matcher_data_set(ulp_ctx, data)) { + netdev_dbg(ulp_ctx->bp->dev, + "Failed to set matcher data in context\n"); + goto free_matcher_data; + } + + data->class_matcher_db_ht_params = ulp_matcher_class_ht_params; + rc = rhashtable_init(&data->class_matcher_db, + &data->class_matcher_db_ht_params); + if (rc) { + netdev_dbg(ulp_ctx->bp->dev, + "Failed to create matcher hash table\n"); + goto clear_matcher_data; + } + + if (rc) { + netdev_dbg(ulp_ctx->bp->dev, + "Failed to create class matcher hash table\n"); + goto clear_matcher_data; + } + + data->act_matcher_db_ht_params = ulp_matcher_act_ht_params; + rc = rhashtable_init(&data->act_matcher_db, + &data->act_matcher_db_ht_params); + if (rc) { + netdev_dbg(ulp_ctx->bp->dev, + "Failed to create action matcher hash table\n"); + goto clear_matcher_data; + } + + return 0; + +clear_matcher_data: + bnxt_ulp_cntxt_ptr2_matcher_data_set(ulp_ctx, NULL); +free_matcher_data: + vfree(data); + return -ENOMEM; +} + +static void ulp_matcher_class_hash_deinit(struct bnxt_ulp_context *ulp_ctx) +{ + struct bnxt_ulp_class_match_info *class_list = ulp_class_match_list; + struct ulp_matcher_class_db_node *matcher_node = NULL; + struct ulp_matcher_hash_db_key key = {{ 0 }}; + struct bnxt_ulp_matcher_data *mdata; + u32 idx = 0; + int rc; + + /* Get the matcher data for hash lookup */ + mdata = (struct bnxt_ulp_matcher_data *) + bnxt_ulp_cntxt_ptr2_matcher_data_get(ulp_ctx); + if (!mdata) { + netdev_dbg(ulp_ctx->bp->dev, + "Failed to get the ulp matcher data\n"); + return; + } + + while (++idx < BNXT_ULP_CLASS_MATCH_LIST_MAX_SZ) { + /* iterate the list of class matches to find header match */ + key.app_id = class_list[idx].app_id; + key.hdr_bitmap.bits = class_list[idx].hdr_bitmap.bits; + + matcher_node = rhashtable_lookup_fast(&mdata->class_matcher_db, &key, + mdata->class_matcher_db_ht_params); + if (!matcher_node) + continue; + rc = rhashtable_remove_fast(&mdata->class_matcher_db, + &matcher_node->node, + mdata->class_matcher_db_ht_params); + if (rc) { + netdev_dbg(ulp_ctx->bp->dev, + "Failed to remove: %d from class matcher hash\n", + idx); + continue; + } + netdev_dbg(ulp_ctx->bp->dev, + "Removed entry: %d from matcher hash\n", + idx); + kfree(matcher_node); + } +} + +static void ulp_matcher_act_hash_deinit(struct bnxt_ulp_context *ulp_ctx) +{ + struct bnxt_ulp_act_match_info *act_list = ulp_act_match_list; + struct ulp_matcher_class_db_node *matcher_node = NULL; + struct ulp_matcher_hash_db_key key = {{ 0 }}; + struct bnxt_ulp_matcher_data *mdata; + u32 idx = 0; + int rc; + + /* Get the matcher data for hash lookup */ + mdata = (struct bnxt_ulp_matcher_data *) + bnxt_ulp_cntxt_ptr2_matcher_data_get(ulp_ctx); + if (!mdata) { + netdev_dbg(ulp_ctx->bp->dev, + "Failed to get the ulp matcher data\n"); + return; + } + + while (++idx < BNXT_ULP_ACT_MATCH_LIST_MAX_SZ) { + /* iterate the list of act matches to find header match */ + key.app_id = act_list[idx].act_tid; + key.hdr_bitmap.bits = act_list[idx].act_bitmap.bits; + + matcher_node = rhashtable_lookup_fast(&mdata->act_matcher_db, &key, + mdata->act_matcher_db_ht_params); + if (!matcher_node) + continue; + rc = rhashtable_remove_fast(&mdata->act_matcher_db, + &matcher_node->node, + mdata->act_matcher_db_ht_params); + if (rc) { + netdev_dbg(ulp_ctx->bp->dev, + "Failed to remove: %d from action matcher hash\n", + idx); + continue; + } + netdev_dbg(ulp_ctx->bp->dev, + "Removed entry: %d from action matcher hash\n", + idx); + kfree(matcher_node); + } +} + +void ulp_matcher_deinit(struct bnxt_ulp_context *ulp_ctx) +{ + struct bnxt_ulp_matcher_data *data; + + if (!ulp_ctx) + return; + + data = (struct bnxt_ulp_matcher_data *) + bnxt_ulp_cntxt_ptr2_matcher_data_get(ulp_ctx); + if (!data) + return; + + ulp_matcher_class_hash_deinit(ulp_ctx); + ulp_matcher_act_hash_deinit(ulp_ctx); + rhashtable_destroy(&data->class_matcher_db); + rhashtable_destroy(&data->act_matcher_db); + bnxt_ulp_cntxt_ptr2_matcher_data_set(ulp_ctx, NULL); + vfree(data); +} + +#endif /* CONFIG_BNXT_FLOWER_OFFLOAD */ diff --git a/drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_matcher.h b/drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_matcher.h new file mode 100644 index 000000000000..24599eb401f5 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_matcher.h @@ -0,0 +1,70 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019-2023 Broadcom + * All rights reserved. + */ + +#ifndef ULP_MATCHER_H_ +#define ULP_MATCHER_H_ + +#include "bnxt_compat.h" +#include "bnxt_hsi.h" +#include "bnxt.h" +#include "ulp_template_db_enum.h" +#include "ulp_template_struct.h" +#include "bnxt_tf_common.h" + +#if defined(CONFIG_BNXT_FLOWER_OFFLOAD) || defined(CONFIG_BNXT_CUSTOM_FLOWER_OFFLOAD) +struct ulp_matcher_hash_db_key { + struct ulp_tc_hdr_bitmap hdr_bitmap; + u8 app_id; +}; + +struct ulp_matcher_class_db_node { + struct ulp_matcher_hash_db_key key; + struct rhash_head node; + u8 in_use; + u16 match_info_idx; + struct rcu_head rcu; +}; + +struct ulp_matcher_act_db_node { + struct ulp_matcher_hash_db_key key; + struct rhash_head node; + struct ulp_tc_hdr_bitmap act_bitmap; + u16 match_info_idx; + struct rcu_head rcu; +}; + +struct bnxt_ulp_matcher_data { + /* hash table to store matcher class info */ + struct rhashtable class_matcher_db; + struct rhashtable_params class_matcher_db_ht_params; + struct rhashtable act_matcher_db; + struct rhashtable_params act_matcher_db_ht_params; +}; + +#endif /* CONFIG_BNXT_FLOWER_OFFLOAD */ + +int ulp_matcher_class_info_add(struct bnxt_ulp_context *ulp_ctx, + u16 hash_idx); +int ulp_matcher_class_info_del(struct bnxt_ulp_context *ulp_ctx, + u16 hash_idx); + +/* Function to handle the matching of RTE Flows and validating + * the pattern masks against the flow templates. + */ +int +ulp_matcher_pattern_match(struct ulp_tc_parser_params *params, + u32 *class_id); + +/* Function to handle the matching of RTE Flows and validating + * the action against the flow templates. + */ +int +ulp_matcher_action_match(struct ulp_tc_parser_params *params, + u32 *act_id); + +int ulp_matcher_init(struct bnxt_ulp_context *ulp_ctx); +void ulp_matcher_deinit(struct bnxt_ulp_context *ulp_ctx); + +#endif /* ULP_MATCHER_H_ */ diff --git a/drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_nic_flow.c b/drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_nic_flow.c new file mode 100644 index 000000000000..0c6ff6bdf491 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_nic_flow.c @@ -0,0 +1,280 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* Copyright(c) 2024 Broadcom + * All rights reserved. + */ + +#include +#include +#include +#include +#include +#include + +#include "bnxt_compat.h" +#include "bnxt_hsi.h" +#include "bnxt.h" +#include "bnxt_vfr.h" +#include "bnxt_tf_ulp.h" +#include "bnxt_udcc.h" +#include "ulp_nic_flow.h" +#include "tfc.h" +#include "tfc_util.h" +#include "ulp_generic_flow_offload.h" + +#if defined(CONFIG_BNXT_FLOWER_OFFLOAD) + +static int l2_filter_roce_flow_create(struct bnxt *bp, __le64 l2_filter_id, + u32 *flow_id, u64 *flow_cnt_hndl) +{ + struct bnxt_ulp_gen_bth_hdr bth_spec = { 0 }, bth_mask = { 0 }; + struct bnxt_ulp_gen_ipv6_hdr v6_spec = { 0 }, v6_mask = { 0 }; + struct bnxt_ulp_gen_l2_hdr_parms l2_parms = { 0 }; + struct bnxt_ulp_gen_l3_hdr_parms l3_parms = { 0 }; + struct bnxt_ulp_gen_l4_hdr_parms l4_parms = { 0 }; + struct bnxt_ulp_gen_action_parms actions = { 0 }; + struct bnxt_ulp_gen_flow_parms parms = { 0 }; + u8 l4_proto = IPPROTO_UDP; + u8 l4_proto_mask = 0xff; + int rc = 0; + + l2_parms.type = BNXT_ULP_GEN_L2_L2_FILTER_ID; + l2_parms.l2_filter_id = &l2_filter_id; + + /* Pack the L3 Data */ + v6_spec.proto6 = &l4_proto; + v6_mask.proto6 = &l4_proto_mask; + v6_spec.dip6 = NULL; + v6_mask.dip6 = NULL; + v6_spec.sip6 = NULL; + v6_mask.sip6 = NULL; + + l3_parms.type = BNXT_ULP_GEN_L3_IPV6; + l3_parms.v6_spec = &v6_spec; + l3_parms.v6_mask = &v6_mask; + + /* Pack the L4 Data */ + l4_parms.type = BNXT_ULP_GEN_L4_BTH; + bth_spec.op_code = NULL; + bth_mask.op_code = NULL; + bth_spec.dst_qpn = NULL; + bth_mask.dst_qpn = NULL; + l4_parms.bth_spec = &bth_spec; + l4_parms.bth_mask = &bth_mask; + + /* Pack the actions - NIC template will use RoCE VNIC always by default */ + actions.enables = BNXT_ULP_GEN_ACTION_ENABLES_COUNT; + actions.dst_fid = bp->pf.fw_fid; + + parms.dir = BNXT_ULP_GEN_RX; + parms.flow_id = flow_id; + parms.counter_hndl = flow_cnt_hndl; + parms.l2 = &l2_parms; + parms.l3 = &l3_parms; + parms.l4 = &l4_parms; + parms.actions = &actions; + + rc = bnxt_ulp_gen_flow_create(bp, bp->pf.fw_fid, &parms); + if (rc) + return rc; + + netdev_dbg(bp->dev, "%s: L2 filter(%llx) ROCE Add Rx flow_id: %d, ctr: 0x%llx\n", + __func__, + l2_filter_id, + *flow_id, + *flow_cnt_hndl); + return rc; +} + +static int l2_filter_roce_cnp_flow_create(struct bnxt *bp, __le64 l2_filter_id, + u32 *cnp_flow_id, u64 *cnp_flow_cnt_hndl) +{ + struct bnxt_ulp_gen_bth_hdr bth_spec = { 0 }, bth_mask = { 0 }; + struct bnxt_ulp_gen_ipv6_hdr v6_spec = { 0 }, v6_mask = { 0 }; + struct bnxt_ulp_gen_l2_hdr_parms l2_parms = { 0 }; + struct bnxt_ulp_gen_l3_hdr_parms l3_parms = { 0 }; + struct bnxt_ulp_gen_l4_hdr_parms l4_parms = { 0 }; + struct bnxt_ulp_gen_action_parms actions = { 0 }; + struct bnxt_ulp_gen_flow_parms parms = { 0 }; + u16 op_code = cpu_to_be16(0x81); /* RoCE CNP */ + u16 op_code_mask = cpu_to_be16(0xffff); + u8 l4_proto = IPPROTO_UDP; + u8 l4_proto_mask = 0xff; + int rc = 0; + + l2_parms.type = BNXT_ULP_GEN_L2_L2_FILTER_ID; + l2_parms.l2_filter_id = &l2_filter_id; + + /* Pack the L3 Data */ + v6_spec.proto6 = &l4_proto; + v6_mask.proto6 = &l4_proto_mask; + v6_spec.dip6 = NULL; + v6_mask.dip6 = NULL; + v6_spec.sip6 = NULL; + v6_mask.sip6 = NULL; + + l3_parms.type = BNXT_ULP_GEN_L3_IPV6; + l3_parms.v6_spec = &v6_spec; + l3_parms.v6_mask = &v6_mask; + + /* Pack the L4 Data */ + bth_spec.op_code = &op_code; + bth_mask.op_code = &op_code_mask; + bth_spec.dst_qpn = NULL; + bth_mask.dst_qpn = NULL; + l4_parms.type = BNXT_ULP_GEN_L4_BTH; + l4_parms.bth_spec = &bth_spec; + l4_parms.bth_mask = &bth_mask; + + /* Pack the actions - NIC template will use RoCE VNIC always by default */ + actions.enables = BNXT_ULP_GEN_ACTION_ENABLES_COUNT; + actions.dst_fid = bp->pf.fw_fid; + + parms.dir = BNXT_ULP_GEN_RX; + parms.flow_id = cnp_flow_id; + parms.counter_hndl = cnp_flow_cnt_hndl; + parms.l2 = &l2_parms; + parms.l3 = &l3_parms; + parms.l4 = &l4_parms; + parms.actions = &actions; + parms.priority = 1; /* must be lower priority than UDCC CNP */ + + rc = bnxt_ulp_gen_flow_create(bp, bp->pf.fw_fid, &parms); + if (rc) + return rc; + + netdev_dbg(bp->dev, "%s: ROCE CNP Add Rx flow for fid(%d) flow_id: %d, ctr: 0x%llx\n", + __func__, + bp->pf.fw_fid, + *cnp_flow_id, + *cnp_flow_cnt_hndl); + + return rc; +} + +int bnxt_ulp_nic_flows_roce_add(struct bnxt *bp, __le64 l2_filter_id, + u32 *l2_ctxt_id, u32 *prof_func, + u32 *flow_id, u64 *flow_cnt_hndl, + u32 *cnp_flow_id, u64 *cnp_flow_cnt_hndl) +{ + struct tfc_identifier_info l2_ident_info = { 0 }; + struct tfc_identifier_info prof_ident_info = { 0 }; + struct tfc *tfcp = (struct tfc *)(bp->tfp); + int rc; + + if (!tfcp) { + netdev_dbg(bp->dev, "%s TF core not initialized\n", __func__); + return -EINVAL; + } + + *prof_func = 0; + *l2_ctxt_id = 0; + *flow_id = 0; + *cnp_flow_id = 0; + *flow_cnt_hndl = 0; + *cnp_flow_cnt_hndl = 0; + + l2_ident_info.dir = (enum cfa_dir)TF_DIR_RX; + l2_ident_info.rsubtype = CFA_RSUBTYPE_IDENT_L2CTX; + + rc = tfc_identifier_alloc(tfcp, bp->pf.fw_fid, CFA_TRACK_TYPE_FID, + &l2_ident_info); + if (rc) { + netdev_dbg(bp->dev, "%s: RoCE flow ident alloc failed %d\n", + __func__, rc); + return rc; + } + *l2_ctxt_id = l2_ident_info.id; + + netdev_dbg(bp->dev, "%s: NIC Flow allocate l2 ctxt:%d\n", __func__, + *l2_ctxt_id); + + prof_ident_info.dir = (enum cfa_dir)TF_DIR_RX; + prof_ident_info.rsubtype = CFA_RSUBTYPE_IDENT_PROF_FUNC; + + rc = tfc_identifier_alloc(tfcp, bp->pf.fw_fid, CFA_TRACK_TYPE_FID, + &prof_ident_info); + if (rc) { + netdev_dbg(bp->dev, "%s: RoCE flow prof_func alloc failed %d\n", + __func__, rc); + goto cleanup; + } + + *prof_func = prof_ident_info.id; + + netdev_dbg(bp->dev, "%s: NIC Flow allocate prof_func:%d\n", + __func__, *prof_func); + + rc = l2_filter_roce_flow_create(bp, l2_filter_id, flow_id, + flow_cnt_hndl); + if (rc) + goto cleanup; + + rc = l2_filter_roce_cnp_flow_create(bp, l2_filter_id, cnp_flow_id, + cnp_flow_cnt_hndl); + + if (rc) + goto cleanup; + + return rc; + +cleanup: + bnxt_ulp_nic_flows_roce_del(bp, l2_filter_id, *l2_ctxt_id, *prof_func, + *flow_id, *cnp_flow_id); + return rc; +} + +int bnxt_ulp_nic_flows_roce_del(struct bnxt *bp, __le64 l2_filter_id, + u32 l2_ctxt_id, u32 prof_func, + u32 roce_flow_id, u32 roce_cnp_flow_id) +{ + struct tfc_identifier_info l2_ident_info = { 0 }; + struct tfc_identifier_info prof_ident_info = { 0 }; + struct tfc *tfcp = (struct tfc *)(bp->tfp); + int rc_save = 0, rc = 0; + + if (!tfcp) { + netdev_dbg(bp->dev, "%s TF core not initialized\n", __func__); + return -EINVAL; + } + if (l2_ctxt_id) { + l2_ident_info.dir = CFA_DIR_RX; + l2_ident_info.rsubtype = CFA_RSUBTYPE_IDENT_L2CTX; + l2_ident_info.id = l2_ctxt_id; + + rc = tfc_identifier_free(tfcp, bp->pf.fw_fid, &l2_ident_info); + if (rc) { + netdev_dbg(bp->dev, "%s: l2ctx free failed %d\n", __func__, rc); + rc_save = rc; + } + } + if (prof_func) { + prof_ident_info.dir = CFA_DIR_RX; + prof_ident_info.rsubtype = CFA_RSUBTYPE_IDENT_PROF_FUNC; + prof_ident_info.id = prof_func; + + rc = tfc_identifier_free(tfcp, bp->pf.fw_fid, &prof_ident_info); + if (rc) { + netdev_dbg(bp->dev, "%s: prof_func free failed %d\n", __func__, rc); + rc_save = rc; + } + } + if (roce_flow_id) { + rc = bnxt_ulp_gen_flow_destroy(bp, bp->pf.fw_fid, roce_flow_id); + if (rc) { + netdev_dbg(bp->dev, "%s: delete Rx RoCE flow_id: %d failed %d\n", + __func__, roce_flow_id, rc); + rc_save = rc; + } + } + if (roce_cnp_flow_id) { + rc = bnxt_ulp_gen_flow_destroy(bp, bp->pf.fw_fid, roce_cnp_flow_id); + if (rc) { + netdev_dbg(bp->dev, "%s: delete Rx RoCE CNP flow_id: %d failed %d\n", + __func__, roce_cnp_flow_id, rc); + rc_save = rc; + } + } + return rc_save; +} + +#endif /* if defined(CONFIG_BNXT_FLOWER_OFFLOAD) */ diff --git a/drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_nic_flow.h b/drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_nic_flow.h new file mode 100644 index 000000000000..981a9c26c23b --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_nic_flow.h @@ -0,0 +1,32 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2024 Broadcom + * All rights reserved. + */ + +#ifndef _ULP_NIC_FLOW_H_ +#define _ULP_NIC_FLOW_H_ + +/* Add per DMAC RoCE and RoCE CNP flows + * @l2_ctxt_id[out]: pointer to where to store the allocated l2 context ident + * @prof_func[out]: pointer to where to store the allocated profile func ident + * @roce_flow_id[out]: pointer to where to store per DMAC RoCE flow id + * @roce_cnp_flow_id[out]: pointer to where to store per DMAC RoCE CNP flow id + * return 0 on success and - on failure + */ +int bnxt_ulp_nic_flows_roce_add(struct bnxt *bp, __u64 l2_filter_id, + u32 *l2_ctxt_id, u32 *prof_func, + u32 *flow_id, u64 *flow_cnt_hndl, + u32 *cnp_flow_id, u64 *cnp_flow_cnt_hndl); + +/* Delete per DMAC RoCE and RoCE CNP flows + * @l2_ctxt_id[in]: The l2 context identifier to free + * @prof_func[in]: The profile func identifier to free + * @roce_flow_id[in]: The per DMAC RoCE flow id to free + * @roce_cnp_flow_id[in]: The per DMAC RoCE CNP flow id to free + * return 0 on success and - on failure + */ +int bnxt_ulp_nic_flows_roce_del(struct bnxt *bp, __u64 l2_filter_id, + u32 l2_ctxt_id, u32 prof_func, + u32 flow_id, u32 cnp_flow_id); + +#endif /* #ifndef _ULP_NIC_FLOW_H_ */ diff --git a/drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_port_db.c b/drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_port_db.c new file mode 100644 index 000000000000..84a25dc6ca38 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_port_db.c @@ -0,0 +1,1047 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* Copyright(c) 2019-2023 Broadcom + * All rights reserved. + */ + +#include "ulp_linux.h" +#include "bnxt_compat.h" +#include "bnxt_hsi.h" +#include "bnxt.h" +#include "bnxt_vfr.h" +#include "bnxt_udcc.h" +#include "bnxt_tf_common.h" +#include "ulp_port_db.h" +#include "ulp_tf_debug.h" + +#if defined(CONFIG_BNXT_FLOWER_OFFLOAD) || defined(CONFIG_BNXT_CUSTOM_FLOWER_OFFLOAD) +/* Truflow definitions */ +void +bnxt_get_parent_mac_addr(struct bnxt *bp, u8 *mac) +{ + memcpy(mac, bp->pf.mac_addr, ETH_ALEN); +} + +u16 +bnxt_get_svif(struct bnxt *bp, bool func_svif, + enum bnxt_ulp_intf_type type) +{ + return func_svif ? bp->func_svif : bp->port_svif; +} + +void +bnxt_get_iface_mac(struct bnxt *bp, enum bnxt_ulp_intf_type type, + u8 *mac, u8 *parent_mac) +{ + if (type == BNXT_ULP_INTF_TYPE_PF) { + memcpy(mac, bp->pf.mac_addr, ETH_ALEN); + } else if (type == BNXT_ULP_INTF_TYPE_TRUSTED_VF) { + memcpy(mac, bp->vf.mac_addr, ETH_ALEN); + memcpy(parent_mac, bp->pf.mac_addr, ETH_ALEN); + } + return; +} + +u16 +bnxt_get_parent_vnic_id(struct bnxt *bp, enum bnxt_ulp_intf_type type) +{ + if (type != BNXT_ULP_INTF_TYPE_TRUSTED_VF) + return 0; + + return bp->pf.dflt_vnic_id; +} + +enum bnxt_ulp_intf_type +bnxt_get_interface_type(struct bnxt *bp) +{ + if (BNXT_PF(bp)) + return BNXT_ULP_INTF_TYPE_PF; + else if (BNXT_VF_IS_TRUSTED(bp)) + return BNXT_ULP_INTF_TYPE_TRUSTED_VF; + else if (BNXT_VF(bp)) + return BNXT_ULP_INTF_TYPE_VF; + + return BNXT_ULP_INTF_TYPE_INVALID; +} + +u16 +bnxt_get_vnic_id(struct bnxt *bp, enum bnxt_ulp_intf_type type) +{ +#ifdef CONFIG_VF_REPS + struct bnxt_vf_rep *vf_rep = netdev_priv(bp->dev); + + if (bnxt_dev_is_vf_rep(bp->dev)) + return vf_rep->bp->vnic_info->fw_vnic_id; +#endif + + return bp->vnic_info[0].fw_vnic_id; +} + +u16 +bnxt_vfr_get_fw_func_id(void *vf_rep) +{ +#ifdef CONFIG_VF_REPS + struct bnxt_vf_rep *vfr = vf_rep; + + if (bnxt_dev_is_vf_rep(vfr->dev)) + return bnxt_vf_rep_get_fid(vfr->dev); +#endif + + return 0; +} + +u16 +bnxt_get_fw_func_id(struct bnxt *bp, enum bnxt_ulp_intf_type type) +{ +#ifdef CONFIG_VF_REPS + if (bnxt_dev_is_vf_rep(bp->dev)) + return bnxt_vf_rep_get_fid(bp->dev); +#endif + + return BNXT_PF(bp) ? bp->pf.fw_fid : bp->vf.fw_fid; +} + +u16 +bnxt_get_phy_port_id(struct bnxt *bp) +{ +#ifdef CONFIG_VF_REPS + struct bnxt_vf_rep *vf_rep = netdev_priv(bp->dev); + + if (bnxt_dev_is_vf_rep(bp->dev)) + return vf_rep->bp->pf.port_id; +#endif + + return bp->pf.port_id; +} + +u16 +bnxt_get_parif(struct bnxt *bp) +{ +#ifdef CONFIG_VF_REPS + if (bnxt_dev_is_vf_rep(bp->dev)) + return (bnxt_vf_rep_get_fid(bp->dev) - 1); +#endif + + return BNXT_PF(bp) ? bp->pf.fw_fid - 1 : bp->vf.fw_fid - 1; +} + +u16 +bnxt_get_vport(struct bnxt *bp) +{ + return (1 << bnxt_get_phy_port_id(bp)); +} + +/** + * Initialize the port database. Memory is allocated in this + * call and assigned to the port database. + * + * @ulp_ctxt: Ptr to ulp context + * + * Returns 0 on success or negative number on failure. + */ +int ulp_port_db_init(struct bnxt_ulp_context *ulp_ctxt, u8 port_cnt) +{ + struct bnxt_ulp_port_db *port_db; + int rc; + + port_db = vzalloc(sizeof(*port_db)); + if (!port_db) + return -ENOMEM; + + /* Attach the port database to the ulp context. */ + rc = bnxt_ulp_cntxt_ptr2_port_db_set(ulp_ctxt, port_db); + if (rc) { + vfree(port_db); + return rc; + } + + /* 256 VFs + PFs etc. so making it 512*/ + port_db->ulp_intf_list_size = BNXT_PORT_DB_MAX_INTF_LIST * 2; + /* Allocate the port tables */ + port_db->ulp_intf_list = vzalloc(port_db->ulp_intf_list_size * + sizeof(struct ulp_interface_info)); + if (!port_db->ulp_intf_list) + goto error_free; + + /* Allocate the phy port list */ + port_db->phy_port_list = vzalloc(port_cnt * sizeof(struct ulp_phy_port_info)); + if (!port_db->phy_port_list) + goto error_free; + + port_db->phy_port_cnt = port_cnt; + return 0; + +error_free: + ulp_port_db_deinit(ulp_ctxt); + return -ENOMEM; +} + +/** + * Deinitialize the port database. Memory is deallocated in + * this call. + * + * @ulp_ctxt: Ptr to ulp context + * + * Returns 0 on success. + */ +int ulp_port_db_deinit(struct bnxt_ulp_context *ulp_ctxt) +{ + struct bnxt_ulp_port_db *port_db; + + port_db = bnxt_ulp_cntxt_ptr2_port_db_get(ulp_ctxt); + if (!port_db) { + netdev_dbg(ulp_ctxt->bp->dev, "Invalid Arguments\n"); + return -EINVAL; + } + + /* Detach the flow database from the ulp context. */ + bnxt_ulp_cntxt_ptr2_port_db_set(ulp_ctxt, NULL); + + /* Free up all the memory. */ + vfree(port_db->phy_port_list); + vfree(port_db->ulp_intf_list); + vfree(port_db); + return 0; +} + +/** + * Update the port database.This api is called when the port + * details are available during the startup. + * + * @ulp_ctxt: Ptr to ulp context + * @bp:. ptr to the device function. + * + * Returns 0 on success or negative number on failure. + */ +#if defined(CONFIG_VF_REPS) || defined(CONFIG_BNXT_CUSTOM_FLOWER_OFFLOAD) + +static u32 +ulp_port_db_allocate_ifindex(struct bnxt_ulp_context *ulp_ctx, + struct bnxt_ulp_port_db *port_db) +{ + u32 idx = 1; + + while (idx < port_db->ulp_intf_list_size && + port_db->ulp_intf_list[idx].type != BNXT_ULP_INTF_TYPE_INVALID) + idx++; + + if (idx >= port_db->ulp_intf_list_size) { + netdev_dbg(ulp_ctx->bp->dev, "Port DB interface list is full\n"); + return 0; + } + return idx; +} + +int ulp_port_db_dev_port_intf_update(struct bnxt_ulp_context *ulp_ctxt, + struct bnxt *bp, void *vf_rep) +{ + struct ulp_phy_port_info *port_data; + struct bnxt_ulp_port_db *port_db; + struct bnxt_vf_rep *vfr = vf_rep; + struct ulp_interface_info *intf; + struct ulp_func_if_info *func; + u32 ifindex; + u32 port_id; + uint8_t tsid; + int rc; + +#if defined(CONFIG_BNXT_CUSTOM_FLOWER_OFFLOAD) + port_id = bp->pf.fw_fid; +#else + if (!vfr) + port_id = bp->pf.fw_fid; + else + port_id = bp->pf.vf[vfr->vf_idx].fw_fid; +#endif + + port_db = bnxt_ulp_cntxt_ptr2_port_db_get(ulp_ctxt); + if (!port_db) { + netdev_dbg(ulp_ctxt->bp->dev, "Invalid Arguments\n"); + return -EINVAL; + } + + rc = ulp_port_db_dev_port_to_ulp_index(ulp_ctxt, port_id, &ifindex); + if (rc == -ENOENT) { + /* port not found, allocate one */ + ifindex = ulp_port_db_allocate_ifindex(ulp_ctxt, port_db); + if (!ifindex) + return -ENOMEM; + port_db->dev_port_list[port_id] = ifindex; + } else if (rc == -EINVAL) { + return -EINVAL; + } + + /* update the interface details */ + intf = &port_db->ulp_intf_list[ifindex]; + + if (!vfr) + intf->type = bnxt_get_interface_type(bp); + else + intf->type = BNXT_ULP_INTF_TYPE_VF_REP; + intf->drv_func_id = bnxt_get_fw_func_id(bp, + BNXT_ULP_INTF_TYPE_INVALID); + intf->rdma_sriov_en = BNXT_RDMA_SRIOV_EN(bp) ? 1 : 0; + + /* Update if UDCC is enabled on the PF */ + intf->udcc_en = bnxt_udcc_get_mode(bp); + + func = &port_db->ulp_func_id_tbl[intf->drv_func_id]; + if (!func->func_valid) { + func->func_svif = bnxt_get_svif(bp, true, + BNXT_ULP_INTF_TYPE_INVALID); + func->func_spif = bnxt_get_phy_port_id(bp); + func->func_parif = bnxt_get_parif(bp); + /* Temporary fix, more concrete fix is needed in the future. + * sriov_disable/set_channels threads might have freed bp->vnic_info. + */ + if (!bp->vnic_info) { + netdev_dbg(bp->dev, "VNIC information is not initialized\n"); + return -EINVAL; + } + func->func_vnic = + bnxt_get_vnic_id(bp, BNXT_ULP_INTF_TYPE_INVALID); + bnxt_hwrm_get_dflt_roce_vnic(bp, -1, + &func->func_roce_vnic); + func->func_roce_vnic = cpu_to_be16(func->func_roce_vnic); + func->phy_port_id = bnxt_get_phy_port_id(bp); + func->func_valid = true; + func->ifindex = ifindex; + /* Table scope is defined for all devices, ignore failures. */ + if (!bnxt_ulp_cntxt_tsid_get(ulp_ctxt, &tsid)) + func->table_scope = tsid; + } + + if (intf->type == BNXT_ULP_INTF_TYPE_VF_REP) { + intf->vf_func_id = + bnxt_vfr_get_fw_func_id(vfr); + func = &port_db->ulp_func_id_tbl[intf->vf_func_id]; + bnxt_hwrm_get_dflt_vnic_svif(bp, intf->vf_func_id, + &func->func_vnic, + &func->func_svif); + bnxt_hwrm_get_dflt_roce_vnic(bp, intf->vf_func_id, + &func->func_roce_vnic); + func->func_roce_vnic = cpu_to_be16(func->func_roce_vnic); + func->func_spif = bnxt_get_phy_port_id(bp); + func->func_parif = bnxt_get_parif(bp); + func->phy_port_id = bnxt_get_phy_port_id(bp); + func->ifindex = ifindex; + func->func_valid = true; + func->vf_meta_data = cpu_to_be16(BNXT_ULP_META_VF_FLAG | + intf->vf_func_id); + if (!bnxt_ulp_cntxt_tsid_get(ulp_ctxt, &tsid)) + func->table_scope = tsid; + } + + /* When there is no match, the default action is to send the packet to + * the kernel. And to send it to the kernel, we need the PF's vnic id. + */ + func->func_parent_vnic = bnxt_get_parent_vnic_id(bp, intf->type); + func->func_parent_vnic = cpu_to_be16(func->func_parent_vnic); + bnxt_get_iface_mac(bp, intf->type, func->func_mac, + func->func_parent_mac); + port_data = &port_db->phy_port_list[func->phy_port_id]; + if (!port_data->port_valid) { + port_data->port_svif = + bnxt_get_svif(bp, false, + BNXT_ULP_INTF_TYPE_INVALID); + port_data->port_spif = bnxt_get_phy_port_id(bp); + port_data->port_parif = bnxt_get_parif(bp); + port_data->port_vport = bnxt_get_vport(bp); + port_data->port_valid = true; + } + ulp_port_db_dump(ulp_ctxt, port_db, intf, port_id); + return 0; +} + +#else + +int ulp_port_db_dev_port_intf_update(struct bnxt_ulp_context *ulp_ctxt, + struct bnxt *bp, void *vf_rep) +{ + return -EINVAL; +} + +#endif + +/** + * Api to get the ulp ifindex for a given device port. + * + * @ulp_ctxt: Ptr to ulp context + * @port_id:.device port id + * @ifindex: ulp ifindex + * + * Returns 0 on success or negative number on failure. + */ +int +ulp_port_db_dev_port_to_ulp_index(struct bnxt_ulp_context *ulp_ctxt, + u32 port_id, + u32 *ifindex) +{ + struct bnxt_ulp_port_db *port_db; + + *ifindex = 0; + port_db = bnxt_ulp_cntxt_ptr2_port_db_get(ulp_ctxt); + if (!port_db || port_id >= TC_MAX_ETHPORTS) { + netdev_dbg(ulp_ctxt->bp->dev, "Invalid Arguments\n"); + return -EINVAL; + } + if (!port_db->dev_port_list[port_id]) { + netdev_dbg(ulp_ctxt->bp->dev, + "Port: %d not present in port_db\n", port_id); + return -ENOENT; + } + + *ifindex = port_db->dev_port_list[port_id]; + return 0; +} + +/** + * Api to get the function id for a given ulp ifindex. + * + * @ulp_ctxt: Ptr to ulp context + * @ifindex: ulp ifindex + * @func_id: the function id of the given ifindex. + * + * Returns 0 on success or negative number on failure. + */ +int +ulp_port_db_function_id_get(struct bnxt_ulp_context *ulp_ctxt, + u32 ifindex, + u32 fid_type, + u16 *func_id) +{ + struct bnxt_ulp_port_db *port_db; + + port_db = bnxt_ulp_cntxt_ptr2_port_db_get(ulp_ctxt); + if (!port_db || ifindex >= port_db->ulp_intf_list_size || !ifindex) { + netdev_dbg(ulp_ctxt->bp->dev, "Invalid Arguments\n"); + return -EINVAL; + } + + if (fid_type == BNXT_ULP_DRV_FUNC_FID) + *func_id = port_db->ulp_intf_list[ifindex].drv_func_id; + else + *func_id = port_db->ulp_intf_list[ifindex].vf_func_id; + + return 0; +} + +/** + * Api to get the VF RoCE support for a given ulp ifindex. + * + * @ulp_ctxt: Ptr to ulp context + * @ifindex: ulp ifindex + * @func_id: the function id of the given ifindex. + * + * Returns 0 on success or negative number on failure. + */ +int +ulp_port_db_vf_roce_get(struct bnxt_ulp_context *ulp_ctxt, + u32 port_id, + u16 *vf_roce) +{ + struct bnxt_ulp_port_db *port_db; + u32 ifindex; + + port_db = bnxt_ulp_cntxt_ptr2_port_db_get(ulp_ctxt); + if (!port_db || port_id >= TC_MAX_ETHPORTS) { + netdev_dbg(ulp_ctxt->bp->dev, "Invalid Arguments\n"); + return -EINVAL; + } + ifindex = port_db->dev_port_list[port_id]; + if (!ifindex) + return -ENOENT; + + *vf_roce = port_db->ulp_intf_list[ifindex].rdma_sriov_en; + + return 0; +} + +/** + * Api to get the UDCC support for a given ulp ifindex. + * + * @ulp_ctxt: Ptr to ulp context + * @ifindex: ulp ifindex + * @func_id: the function id of the given ifindex. + * + * Returns 0 on success or negative number on failure. + */ +int +ulp_port_db_udcc_get(struct bnxt_ulp_context *ulp_ctxt, + u32 port_id, + u8 *udcc) +{ + struct bnxt_ulp_port_db *port_db; + u32 ifindex; + + port_db = bnxt_ulp_cntxt_ptr2_port_db_get(ulp_ctxt); + if (!port_db || port_id >= TC_MAX_ETHPORTS) { + netdev_dbg(ulp_ctxt->bp->dev, "Invalid Arguments\n"); + return -EINVAL; + } + ifindex = port_db->dev_port_list[port_id]; + if (!ifindex) + return -ENOENT; + + *udcc = port_db->ulp_intf_list[ifindex].udcc_en; + + return 0; +} + +/** + * Api to get the svif for a given ulp ifindex. + * + * @ulp_ctxt: Ptr to ulp context + * @ifindex: ulp ifindex + * @svif_type: the svif type of the given ifindex. + * @svif: the svif of the given ifindex. + * + * Returns 0 on success or negative number on failure. + */ +int +ulp_port_db_svif_get(struct bnxt_ulp_context *ulp_ctxt, + u32 ifindex, + u32 svif_type, + u16 *svif) +{ + struct bnxt_ulp_port_db *port_db; + u16 phy_port_id, func_id; + + port_db = bnxt_ulp_cntxt_ptr2_port_db_get(ulp_ctxt); + if (!port_db || ifindex >= port_db->ulp_intf_list_size || !ifindex) { + netdev_dbg(ulp_ctxt->bp->dev, "Invalid Arguments\n"); + return -EINVAL; + } + + if (svif_type == BNXT_ULP_DRV_FUNC_SVIF) { + func_id = port_db->ulp_intf_list[ifindex].drv_func_id; + *svif = port_db->ulp_func_id_tbl[func_id].func_svif; + } else if (svif_type == BNXT_ULP_VF_FUNC_SVIF) { + func_id = port_db->ulp_intf_list[ifindex].vf_func_id; + *svif = port_db->ulp_func_id_tbl[func_id].func_svif; + } else { + func_id = port_db->ulp_intf_list[ifindex].drv_func_id; + phy_port_id = port_db->ulp_func_id_tbl[func_id].phy_port_id; + *svif = port_db->phy_port_list[phy_port_id].port_svif; + } + + return 0; +} + +/** + * Api to get the spif for a given ulp ifindex. + * + * @ulp_ctxt: Ptr to ulp context + * @ifindex: ulp ifindex + * @spif_type: the spif type of the given ifindex. + * @spif: the spif of the given ifindex. + * + * Returns 0 on success or negative number on failure. + */ +int +ulp_port_db_spif_get(struct bnxt_ulp_context *ulp_ctxt, + u32 ifindex, + u32 spif_type, + u16 *spif) +{ + struct bnxt_ulp_port_db *port_db; + u16 phy_port_id, func_id; + + port_db = bnxt_ulp_cntxt_ptr2_port_db_get(ulp_ctxt); + if (!port_db || ifindex >= port_db->ulp_intf_list_size || !ifindex) { + netdev_dbg(ulp_ctxt->bp->dev, "Invalid Arguments\n"); + return -EINVAL; + } + + if (spif_type == BNXT_ULP_DRV_FUNC_SPIF) { + func_id = port_db->ulp_intf_list[ifindex].drv_func_id; + *spif = port_db->ulp_func_id_tbl[func_id].func_spif; + } else if (spif_type == BNXT_ULP_VF_FUNC_SPIF) { + func_id = port_db->ulp_intf_list[ifindex].vf_func_id; + *spif = port_db->ulp_func_id_tbl[func_id].func_spif; + } else { + func_id = port_db->ulp_intf_list[ifindex].drv_func_id; + phy_port_id = port_db->ulp_func_id_tbl[func_id].phy_port_id; + *spif = port_db->phy_port_list[phy_port_id].port_spif; + } + + return 0; +} + +/** + * Api to get the parif for a given ulp ifindex. + * + * @ulp_ctxt: Ptr to ulp context + * @ifindex: ulp ifindex + * @parif_type: the parif type of the given ifindex. + * @parif: the parif of the given ifindex. + * + * Returns 0 on success or negative number on failure. + */ +int +ulp_port_db_parif_get(struct bnxt_ulp_context *ulp_ctxt, + u32 ifindex, + u32 parif_type, + u16 *parif) +{ + struct bnxt_ulp_port_db *port_db; + u16 phy_port_id, func_id; + + port_db = bnxt_ulp_cntxt_ptr2_port_db_get(ulp_ctxt); + if (!port_db || ifindex >= port_db->ulp_intf_list_size || !ifindex) { + netdev_dbg(ulp_ctxt->bp->dev, "Invalid Arguments\n"); + return -EINVAL; + } + if (parif_type == BNXT_ULP_DRV_FUNC_PARIF) { + func_id = port_db->ulp_intf_list[ifindex].drv_func_id; + *parif = port_db->ulp_func_id_tbl[func_id].func_parif; + } else if (parif_type == BNXT_ULP_VF_FUNC_PARIF) { + func_id = port_db->ulp_intf_list[ifindex].vf_func_id; + *parif = port_db->ulp_func_id_tbl[func_id].func_parif; + } else { + func_id = port_db->ulp_intf_list[ifindex].drv_func_id; + phy_port_id = port_db->ulp_func_id_tbl[func_id].phy_port_id; + *parif = port_db->phy_port_list[phy_port_id].port_parif; + } + /* Parif needs to be reset to a free partition */ + *parif += BNXT_ULP_FREE_PARIF_BASE; + + return 0; +} + +/** + * Api to get the vnic id for a given ulp ifindex. + * + * @ulp_ctxt: Ptr to ulp context + * @ifindex: ulp ifindex + * @vnic: the vnic of the given ifindex. + * + * Returns 0 on success or negative number on failure. + */ +int +ulp_port_db_default_vnic_get(struct bnxt_ulp_context *ulp_ctxt, + u32 ifindex, + u32 vnic_type, + u16 *vnic) +{ + struct bnxt_ulp_port_db *port_db; + u16 func_id; + + port_db = bnxt_ulp_cntxt_ptr2_port_db_get(ulp_ctxt); + if (!port_db || ifindex >= port_db->ulp_intf_list_size || !ifindex) { + netdev_dbg(ulp_ctxt->bp->dev, "Invalid Arguments\n"); + return -EINVAL; + } + + if (vnic_type == BNXT_ULP_DRV_FUNC_VNIC) { + func_id = port_db->ulp_intf_list[ifindex].drv_func_id; + *vnic = port_db->ulp_func_id_tbl[func_id].func_vnic; + } else { + func_id = port_db->ulp_intf_list[ifindex].vf_func_id; + *vnic = port_db->ulp_func_id_tbl[func_id].func_vnic; + } + + return 0; +} + +/** + * Api to get the vport id for a given ulp ifindex. + * + * @ulp_ctxt: Ptr to ulp context + * @ifindex: ulp ifindex + * @vport: the port of the given ifindex. + * + * Returns 0 on success or negative number on failure. + */ +int +ulp_port_db_vport_get(struct bnxt_ulp_context *ulp_ctxt, + u32 ifindex, u16 *vport) +{ + struct bnxt_ulp_port_db *port_db; + u16 phy_port_id, func_id; + + port_db = bnxt_ulp_cntxt_ptr2_port_db_get(ulp_ctxt); + if (!port_db || ifindex >= port_db->ulp_intf_list_size || !ifindex) { + netdev_dbg(ulp_ctxt->bp->dev, "Invalid Arguments\n"); + return -EINVAL; + } + + func_id = port_db->ulp_intf_list[ifindex].drv_func_id; + phy_port_id = port_db->ulp_func_id_tbl[func_id].phy_port_id; + *vport = port_db->phy_port_list[phy_port_id].port_vport; + return 0; +} + +/** + * Api to get the vport for a given physical port. + * + * @ulp_ctxt: Ptr to ulp context + * @phy_port: physical port index + * @out_port: the port of the given physical index + * + * Returns 0 on success or negative number on failure. + */ +int +ulp_port_db_phy_port_vport_get(struct bnxt_ulp_context *ulp_ctxt, + u32 phy_port, + u16 *out_port) +{ + struct bnxt_ulp_port_db *port_db; + + port_db = bnxt_ulp_cntxt_ptr2_port_db_get(ulp_ctxt); + if (!port_db || phy_port >= port_db->phy_port_cnt) { + netdev_dbg(ulp_ctxt->bp->dev, "Invalid Arguments\n"); + return -EINVAL; + } + *out_port = port_db->phy_port_list[phy_port].port_vport; + return 0; +} + +/** + * Api to get the svif for a given physical port. + * + * @ulp_ctxt: Ptr to ulp context + * @phy_port: physical port index + * @svif: the svif of the given physical index + * + * Returns 0 on success or negative number on failure. + */ +int +ulp_port_db_phy_port_svif_get(struct bnxt_ulp_context *ulp_ctxt, + u32 phy_port, + u16 *svif) +{ + struct bnxt_ulp_port_db *port_db; + + port_db = bnxt_ulp_cntxt_ptr2_port_db_get(ulp_ctxt); + if (!port_db || phy_port >= port_db->phy_port_cnt) { + netdev_dbg(ulp_ctxt->bp->dev, "Invalid Arguments\n"); + return -EINVAL; + } + *svif = port_db->phy_port_list[phy_port].port_svif; + return 0; +} + +/** + * Api to get the port type for a given ulp ifindex. + * + * @ulp_ctxt: Ptr to ulp context + * @ifindex: ulp ifindex + * + * Returns port type. + */ +enum bnxt_ulp_intf_type +ulp_port_db_port_type_get(struct bnxt_ulp_context *ulp_ctxt, + u32 ifindex) +{ + struct bnxt_ulp_port_db *port_db; + + port_db = bnxt_ulp_cntxt_ptr2_port_db_get(ulp_ctxt); + if (!port_db || ifindex >= port_db->ulp_intf_list_size || !ifindex) { + netdev_dbg(ulp_ctxt->bp->dev, "Invalid Arguments\n"); + return BNXT_ULP_INTF_TYPE_INVALID; + } + return port_db->ulp_intf_list[ifindex].type; +} + +/** + * Api to get the ulp ifindex for a given function id. + * + * @ulp_ctxt: Ptr to ulp context + * @func_id:.device func id + * @ifindex: ulp ifindex + * + * Returns 0 on success or negative number on failure. + */ +int +ulp_port_db_dev_func_id_to_ulp_index(struct bnxt_ulp_context *ulp_ctxt, + u32 func_id, u32 *ifindex) +{ + struct bnxt_ulp_port_db *port_db; + + *ifindex = 0; + port_db = bnxt_ulp_cntxt_ptr2_port_db_get(ulp_ctxt); + if (!port_db || func_id >= BNXT_PORT_DB_MAX_FUNC) { + netdev_dbg(ulp_ctxt->bp->dev, "Invalid Arguments\n"); + return -EINVAL; + } + if (!port_db->ulp_func_id_tbl[func_id].func_valid) + return -ENOENT; + + *ifindex = port_db->ulp_func_id_tbl[func_id].ifindex; + return 0; +} + +/** + * Api to get the function id for a given port id. + * + * @ulp_ctxt: Ptr to ulp context + * @port_id: fw fid + * @func_id: the function id of the given ifindex. + * + * Returns 0 on success or negative number on failure. + */ +int +ulp_port_db_port_func_id_get(struct bnxt_ulp_context *ulp_ctxt, + u16 port_id, u16 *func_id) +{ + struct bnxt_ulp_port_db *port_db; + u32 ifindex; + + port_db = bnxt_ulp_cntxt_ptr2_port_db_get(ulp_ctxt); + if (!port_db || port_id >= TC_MAX_ETHPORTS) { + netdev_dbg(ulp_ctxt->bp->dev, "Invalid Arguments\n"); + return -EINVAL; + } + ifindex = port_db->dev_port_list[port_id]; + if (!ifindex) + return -ENOENT; + + switch (port_db->ulp_intf_list[ifindex].type) { + case BNXT_ULP_INTF_TYPE_TRUSTED_VF: + case BNXT_ULP_INTF_TYPE_PF: + *func_id = port_db->ulp_intf_list[ifindex].drv_func_id; + break; + case BNXT_ULP_INTF_TYPE_VF: + case BNXT_ULP_INTF_TYPE_VF_REP: + *func_id = port_db->ulp_intf_list[ifindex].vf_func_id; + break; + default: + *func_id = 0; + break; + } + return 0; +} + +/* internal function to get the */ +static struct ulp_func_if_info* +ulp_port_db_func_if_info_get(struct bnxt_ulp_context *ulp_ctxt, + u32 port_id) +{ + struct bnxt_ulp_port_db *port_db; + u16 func_id; + + port_db = bnxt_ulp_cntxt_ptr2_port_db_get(ulp_ctxt); + if (ulp_port_db_port_func_id_get(ulp_ctxt, port_id, &func_id)) { + netdev_dbg(ulp_ctxt->bp->dev, "Invalid port_id %x\n", port_id); + return NULL; + } + + if (!port_db->ulp_func_id_tbl[func_id].func_valid) { + netdev_dbg(ulp_ctxt->bp->dev, "Invalid func_id %x\n", func_id); + return NULL; + } + return &port_db->ulp_func_id_tbl[func_id]; +} + +/** + * Api to get the parent mac address for a given port id. + * + * @ulp_ctxt: Ptr to ulp context + * @port_id: device port id + * @mac_addr: mac address + * + * Returns 0 on success or negative number on failure. + */ +int +ulp_port_db_parent_mac_addr_get(struct bnxt_ulp_context *ulp_ctxt, + u32 port_id, u8 **mac_addr) +{ + struct ulp_func_if_info *info; + + info = ulp_port_db_func_if_info_get(ulp_ctxt, port_id); + if (info) { + *mac_addr = info->func_parent_mac; + return 0; + } + return -EINVAL; +} + +/** + * Api to get the mac address for a given port id. + * + * @ulp_ctxt: Ptr to ulp context + * @port_id: device port id + * @mac_addr: mac address + * + * Returns 0 on success or negative number on failure. + */ +int +ulp_port_db_drv_mac_addr_get(struct bnxt_ulp_context *ulp_ctxt, + u32 port_id, u8 **mac_addr) +{ + struct ulp_func_if_info *info; + + info = ulp_port_db_func_if_info_get(ulp_ctxt, port_id); + if (info) { + *mac_addr = info->func_mac; + return 0; + } + return -EINVAL; +} + +/** + * Api to get the parent vnic for a given port id. + * + * @ulp_ctxt: Ptr to ulp context + * @port_id: device port id + * @vnic: parent vnic + * + * Returns 0 on success or negative number on failure. + */ +int +ulp_port_db_parent_vnic_get(struct bnxt_ulp_context *ulp_ctxt, + u32 port_id, u8 **vnic) +{ + struct ulp_func_if_info *info; + + info = ulp_port_db_func_if_info_get(ulp_ctxt, port_id); + if (info) { + *vnic = (u8 *)&info->func_parent_vnic; + return 0; + } + return -EINVAL; +} + +/** + * Api to get the phy port for a given port id. + * + * @ulp_ctxt: Ptr to ulp context + * @port_id: device port id + * @phy_port: phy_port of the dpdk port_id + * + * Returns 0 on success or negative number on failure. + */ +int +ulp_port_db_phy_port_get(struct bnxt_ulp_context *ulp_ctxt, + u32 port_id, u16 *phy_port) +{ + struct ulp_func_if_info *info; + + info = ulp_port_db_func_if_info_get(ulp_ctxt, port_id); + if (info) { + *phy_port = info->phy_port_id; + return 0; + } + return -EINVAL; +} + +/** + * Api to get the port type for a given port id. + * + * @ulp_ctxt: Ptr to ulp context + * @port_id: device port id + * @type: type if pf or not + * + * Returns 0 on success or negative number on failure. + */ +int +ulp_port_db_port_is_pf_get(struct bnxt_ulp_context *ulp_ctxt, + u32 port_id, u8 **type) +{ + struct ulp_func_if_info *info; + struct bnxt_ulp_port_db *port_db; + uint16_t pid; + + port_db = bnxt_ulp_cntxt_ptr2_port_db_get(ulp_ctxt); + info = ulp_port_db_func_if_info_get(ulp_ctxt, port_id); + if (info) { + pid = info->ifindex; + *type = (u8 *)&port_db->ulp_intf_list[pid].type_is_pf; + return 0; + } + return -EINVAL; +} + +/** + * Api to get the meta data for a given port id. + * + * @ulp_ctxt [in] Ptr to ulp context + * @port_id [in] dpdk port id + * @meta data [out] the meta data of the given port + * + * Returns 0 on success or negative number on failure. + */ +int +ulp_port_db_port_meta_data_get(struct bnxt_ulp_context *ulp_ctxt, + u16 port_id, u8 **meta_data) +{ + struct ulp_func_if_info *info; + + info = ulp_port_db_func_if_info_get(ulp_ctxt, port_id); + if (info) { + *meta_data = (uint8_t *)&info->vf_meta_data; + return 0; + } + return -EINVAL; +} + +/** Api to get the function id for a given port id + * + * @ulp_ctxt: Ptr to ulp context + * @port_id: dpdk port id + * @fid_data: the function id of the given port + * + * Returns 0 on success or negative number on failure. + */ +int +ulp_port_db_port_vf_fid_get(struct bnxt_ulp_context *ulp_ctxt, + u16 port_id, u8 **fid_data) +{ + struct bnxt_ulp_port_db *port_db; + u32 ifindex; + + port_db = bnxt_ulp_cntxt_ptr2_port_db_get(ulp_ctxt); + if (!port_db || port_id >= TC_MAX_ETHPORTS) { + netdev_dbg(ulp_ctxt->bp->dev, "Invalid Arguments\n"); + return -EINVAL; + } + ifindex = port_db->dev_port_list[port_id]; + if (!ifindex) + return -ENOENT; + + if (port_db->ulp_intf_list[ifindex].type != BNXT_ULP_INTF_TYPE_VF && + port_db->ulp_intf_list[ifindex].type != BNXT_ULP_INTF_TYPE_VF_REP) + return -EINVAL; + + *fid_data = (uint8_t *)&port_db->ulp_intf_list[ifindex].vf_func_id; + return 0; +} + +int +ulp_port_db_port_table_scope_get(struct bnxt_ulp_context *ulp_ctxt, + u16 port_id, u8 **tsid) +{ + struct ulp_func_if_info *info; + + info = ulp_port_db_func_if_info_get(ulp_ctxt, port_id); + if (info) { + *tsid = &info->table_scope; + return 0; + } + return -EINVAL; +} + +/** + * Api to get the RoCE vnic for a given port id. + * + * @ulp_ctxt: Ptr to ulp context + * @port_id: device port id + * @roce_vnic: RoCE vnic + * + * Returns 0 on success or negative number on failure. + */ +int +ulp_port_db_drv_roce_vnic_get(struct bnxt_ulp_context *ulp_ctxt, + u32 port_id, u8 **roce_vnic) +{ + struct ulp_func_if_info *info; + + info = ulp_port_db_func_if_info_get(ulp_ctxt, port_id); + if (info) { + *roce_vnic = (uint8_t *)&info->func_roce_vnic; + return 0; + } + return -EINVAL; +} + +#endif /* CONFIG_BNXT_FLOWER_OFFLOAD */ diff --git a/drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_port_db.h b/drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_port_db.h new file mode 100644 index 000000000000..df6bb4150ae2 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_port_db.h @@ -0,0 +1,196 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019-2023 Broadcom + * All rights reserved. + */ + +#ifndef _ULP_PORT_DB_H_ +#define _ULP_PORT_DB_H_ + +#include "bnxt.h" +#include "bnxt_tf_ulp.h" +#include "bnxt_tf_common.h" + +#define BNXT_PORT_DB_MAX_INTF_LIST 256 +#define BNXT_PORT_DB_MAX_FUNC 2048 +#define BNXT_ULP_FREE_PARIF_BASE 11 +#define BNXT_ULP_META_VF_FLAG 0x1000 + +enum bnxt_ulp_svif_type { + BNXT_ULP_DRV_FUNC_SVIF = 0, + BNXT_ULP_VF_FUNC_SVIF, + BNXT_ULP_PHY_PORT_SVIF +}; + +enum bnxt_ulp_spif_type { + BNXT_ULP_DRV_FUNC_SPIF = 0, + BNXT_ULP_VF_FUNC_SPIF, + BNXT_ULP_PHY_PORT_SPIF +}; + +enum bnxt_ulp_parif_type { + BNXT_ULP_DRV_FUNC_PARIF = 0, + BNXT_ULP_VF_FUNC_PARIF, + BNXT_ULP_PHY_PORT_PARIF +}; + +enum bnxt_ulp_vnic_type { + BNXT_ULP_DRV_FUNC_VNIC = 0, + BNXT_ULP_VF_FUNC_VNIC +}; + +enum bnxt_ulp_fid_type { + BNXT_ULP_DRV_FUNC_FID, + BNXT_ULP_VF_FUNC_FID +}; + +struct ulp_func_if_info { + u16 func_valid; + u16 func_svif; + u16 func_spif; + u16 func_parif; + u16 func_vnic; + u16 func_roce_vnic; + u8 func_mac[ETH_ALEN]; + u16 func_parent_vnic; + u8 func_parent_mac[ETH_ALEN]; + u16 phy_port_id; + u16 ifindex; + u16 vf_meta_data; + u8 table_scope; +}; + +/* Structure for the Port database resource information. */ +struct ulp_interface_info { + enum bnxt_ulp_intf_type type; + u16 drv_func_id; + u16 vf_func_id; + u16 type_is_pf; + u16 rdma_sriov_en; + u8 udcc_en; +}; + +struct ulp_phy_port_info { + u16 port_valid; + u16 port_svif; + u16 port_spif; + u16 port_parif; + u16 port_vport; +}; + +/* Structure for the Port database */ +struct bnxt_ulp_port_db { + struct ulp_interface_info *ulp_intf_list; + u32 ulp_intf_list_size; + + /* uplink port list */ +#define TC_MAX_ETHPORTS 1024 + u16 dev_port_list[TC_MAX_ETHPORTS]; + struct ulp_phy_port_info *phy_port_list; + u16 phy_port_cnt; + struct ulp_func_if_info ulp_func_id_tbl[BNXT_PORT_DB_MAX_FUNC]; +}; + +int ulp_port_db_init(struct bnxt_ulp_context *ulp_ctxt, u8 port_cnt); + +int ulp_port_db_deinit(struct bnxt_ulp_context *ulp_ctxt); + +int ulp_port_db_dev_port_intf_update(struct bnxt_ulp_context *ulp_ctxt, + struct bnxt *bp, void *vf_rep); +int +ulp_port_db_dev_port_to_ulp_index(struct bnxt_ulp_context *ulp_ctxt, + u32 port_id, u32 *ifindex); + +int +ulp_port_db_function_id_get(struct bnxt_ulp_context *ulp_ctxt, + u32 ifindex, u32 fid_type, + u16 *func_id); +int +ulp_port_db_vf_roce_get(struct bnxt_ulp_context *ulp_ctxt, + u32 port_id, + u16 *vf_roce); + +int +ulp_port_db_udcc_get(struct bnxt_ulp_context *ulp_ctxt, + u32 port_id, + u8 *udcc); + +int +ulp_port_db_svif_get(struct bnxt_ulp_context *ulp_ctxt, + u32 ifindex, u32 dir, u16 *svif); + +int +ulp_port_db_spif_get(struct bnxt_ulp_context *ulp_ctxt, + u32 ifindex, u32 dir, u16 *spif); + +int +ulp_port_db_parif_get(struct bnxt_ulp_context *ulp_ctxt, + u32 ifindex, u32 dir, u16 *parif); + +int +ulp_port_db_default_vnic_get(struct bnxt_ulp_context *ulp_ctxt, + u32 ifindex, u32 vnic_type, + u16 *vnic); + +int +ulp_port_db_vport_get(struct bnxt_ulp_context *ulp_ctxt, + u32 ifindex, u16 *vport); + +int +ulp_port_db_phy_port_vport_get(struct bnxt_ulp_context *ulp_ctxt, + u32 phy_port, + u16 *out_port); + +int +ulp_port_db_phy_port_svif_get(struct bnxt_ulp_context *ulp_ctxt, + u32 phy_port, + u16 *svif); + +enum bnxt_ulp_intf_type +ulp_port_db_port_type_get(struct bnxt_ulp_context *ulp_ctxt, + u32 ifindex); + +int +ulp_port_db_dev_func_id_to_ulp_index(struct bnxt_ulp_context *ulp_ctxt, + u32 func_id, u32 *ifindex); + +int +ulp_port_db_port_func_id_get(struct bnxt_ulp_context *ulp_ctxt, + u16 port_id, u16 *func_id); + +int +ulp_port_db_parent_mac_addr_get(struct bnxt_ulp_context *ulp_ctxt, + u32 port_id, u8 **mac_addr); + +int +ulp_port_db_drv_mac_addr_get(struct bnxt_ulp_context *ulp_ctxt, + u32 port_id, u8 **mac_addr); + +int +ulp_port_db_parent_vnic_get(struct bnxt_ulp_context *ulp_ctxt, + u32 port_id, u8 **vnic); +int +ulp_port_db_phy_port_get(struct bnxt_ulp_context *ulp_ctxt, + u32 port_id, u16 *phy_port); + +int +ulp_port_db_port_is_pf_get(struct bnxt_ulp_context *ulp_ctxt, + u32 port_id, u8 **type); + +int +ulp_port_db_port_meta_data_get(struct bnxt_ulp_context *ulp_ctxt, + u16 port_id, u8 **meta_data); + +int +ulp_port_db_port_vf_fid_get(struct bnxt_ulp_context *ulp_ctxt, + u16 port_id, u8 **fid_data); + +int +ulp_port_db_port_table_scope_get(struct bnxt_ulp_context *ulp_ctxt, + u16 port_id, u8 **tsid); + +u16 bnxt_vfr_get_fw_func_id(void *vf_rep); + +int +ulp_port_db_drv_roce_vnic_get(struct bnxt_ulp_context *ulp_ctxt, + u32 port_id, u8 **roce_vnic); +#endif /* _ULP_PORT_DB_H_ */ diff --git a/drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_tc_custom_offload.c b/drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_tc_custom_offload.c new file mode 100644 index 000000000000..a5ac8cf47506 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_tc_custom_offload.c @@ -0,0 +1,1953 @@ + +// SPDX-License-Identifier: BSD-3-Clause +/* Copyright(c) 2023-2023 Broadcom + * All rights reserved. + */ + +#ifdef CONFIG_BNXT_CUSTOM_FLOWER_OFFLOAD +#include + +#include "bnxt_compat.h" +#include "bnxt_hsi.h" +#include "bnxt.h" +#include "bnxt_tf_common.h" +#include "bnxt_ulp_flow.h" +#include "ulp_tc_parser.h" +#include "ulp_matcher.h" +#include "ulp_flow_db.h" +#include "ulp_mapper.h" +#include "ulp_fc_mgr.h" +#include "ulp_port_db.h" +#include "ulp_tc_parser.h" +#include "ulp_template_debug_proto.h" +#include "ulp_tc_custom_offload.h" +#include "ulp_tc_rte_flow.h" + +static inline void +bnxt_custom_ulp_set_dir_attributes(struct bnxt *bp, struct ulp_tc_parser_params + *params, u16 src_fid) +{ + /* Set the flow attributes. + * TBD: This logic might need some port-process fixing for the + * vxlan-decap case. + */ + if (bp->pf.fw_fid == src_fid) + params->dir_attr |= BNXT_ULP_FLOW_ATTR_INGRESS; + else + params->dir_attr |= BNXT_ULP_FLOW_ATTR_EGRESS; +} + +void +bnxt_custom_ulp_init_mapper_params(struct bnxt_ulp_mapper_parms *mparms, + struct ulp_tc_parser_params *params, + enum bnxt_ulp_fdb_type flow_type) +{ + memset(mparms, 0, sizeof(*mparms)); + + mparms->flow_type = flow_type; + mparms->app_priority = params->priority; + mparms->class_tid = params->class_id; + mparms->act_tid = params->act_tmpl; + mparms->func_id = params->func_id; + mparms->hdr_bitmap = ¶ms->hdr_bitmap; + mparms->enc_hdr_bitmap = ¶ms->enc_hdr_bitmap; + mparms->hdr_field = params->hdr_field; + mparms->enc_field = params->enc_field; + mparms->comp_fld = params->comp_fld; + mparms->act_bitmap = ¶ms->act_bitmap; + mparms->act_prop = ¶ms->act_prop; + mparms->parent_flow = params->parent_flow; + mparms->child_flow = params->child_flow; + mparms->fld_bitmap = ¶ms->fld_bitmap; + mparms->flow_pattern_id = params->flow_pattern_id; + mparms->act_pattern_id = params->act_pattern_id; + mparms->wc_field_bitmap = params->wc_field_bitmap; + mparms->app_id = params->app_id; + mparms->tun_idx = params->tun_idx; + mparms->cf_bitmap = params->cf_bitmap; + mparms->exclude_field_bitmap = params->exclude_field_bitmap; + + /* update the signature fields into the computed field list */ + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_HDR_SIG_ID, + params->class_info_idx); + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_FLOW_SIG_ID, + params->flow_sig_id); +} + +int +bnxt_custom_ulp_alloc_mapper_encap_cparams(struct bnxt_ulp_mapper_parms **mparms_dyn, + struct bnxt_ulp_mapper_parms *mparms) +{ + struct bnxt_ulp_mapper_parms *parms = NULL; + + parms = vzalloc(sizeof(*parms)); + if (!parms) + goto err; + memcpy(parms, mparms, sizeof(*parms)); + + parms->hdr_bitmap = vzalloc(sizeof(*parms->hdr_bitmap)); + if (!parms->hdr_bitmap) + goto err_cparm; + + parms->enc_hdr_bitmap = vzalloc(sizeof(*parms->enc_hdr_bitmap)); + if (!parms->enc_hdr_bitmap) + goto err_hdr_bitmap; + + parms->hdr_field = vzalloc(sizeof(*parms->hdr_field) * BNXT_ULP_PROTO_HDR_MAX); + if (!parms->hdr_field) + goto err_enc_hdr_bitmap; + + parms->enc_field = vzalloc(sizeof(*parms->enc_field) * BNXT_ULP_PROTO_HDR_ENCAP_MAX); + if (!parms->enc_field) + goto err_hdr_field; + + parms->comp_fld = vzalloc(sizeof(*parms->comp_fld) * BNXT_ULP_CF_IDX_LAST); + if (!parms->comp_fld) + goto err_enc_field; + + parms->act_bitmap = vzalloc(sizeof(*parms->act_bitmap)); + if (!parms->act_bitmap) + goto err_comp_fld; + + parms->act_prop = vzalloc(sizeof(*parms->act_prop)); + if (!parms->act_prop) + goto err_act; + + parms->fld_bitmap = vzalloc(sizeof(*parms->fld_bitmap)); + if (!parms->fld_bitmap) + goto err_act_prop; + + memcpy(parms->hdr_bitmap, mparms->hdr_bitmap, sizeof(*parms->hdr_bitmap)); + memcpy(parms->enc_hdr_bitmap, mparms->enc_hdr_bitmap, + sizeof(*parms->enc_hdr_bitmap)); + memcpy(parms->hdr_field, mparms->hdr_field, + sizeof(*parms->hdr_field) * BNXT_ULP_PROTO_HDR_MAX); + memcpy(parms->enc_field, mparms->enc_field, + sizeof(*parms->enc_field) * BNXT_ULP_PROTO_HDR_ENCAP_MAX); + memcpy(parms->comp_fld, mparms->comp_fld, + sizeof(*parms->comp_fld) * BNXT_ULP_CF_IDX_LAST); + memcpy(parms->act_bitmap, mparms->act_bitmap, sizeof(*parms->act_bitmap)); + memcpy(parms->act_prop, mparms->act_prop, sizeof(*parms->act_prop)); + memcpy(parms->fld_bitmap, mparms->fld_bitmap, sizeof(*parms->fld_bitmap)); + + *mparms_dyn = parms; + return 0; + +err_act_prop: + vfree(parms->act_prop); +err_act: + vfree(parms->act_bitmap); +err_comp_fld: + vfree(parms->comp_fld); +err_enc_field: + vfree(parms->enc_field); +err_hdr_field: + vfree(parms->hdr_field); +err_enc_hdr_bitmap: + vfree(parms->enc_hdr_bitmap); +err_hdr_bitmap: + vfree(parms->hdr_bitmap); +err_cparm: + vfree(parms); +err: + return -ENOMEM; +} + +void +bnxt_custom_ulp_free_mapper_encap_mparams(void *mapper_mparms) +{ + struct bnxt_ulp_mapper_parms *parms = mapper_mparms; + + vfree(parms->act_prop); + vfree(parms->act_bitmap); + vfree(parms->comp_fld); + vfree(parms->enc_field); + vfree(parms->hdr_field); + vfree(parms->enc_hdr_bitmap); + vfree(parms->hdr_bitmap); + vfree(parms); +} + +static int ulp_rte_prsr_fld_size_validate(struct ulp_tc_parser_params *params, + u32 *idx, u32 size) +{ + if (params->field_idx + size >= BNXT_ULP_PROTO_HDR_MAX) + return -EINVAL; + *idx = params->field_idx; + params->field_idx += size; + return 0; +} + +/* Utility function to update the field_bitmap */ +static void ulp_tc_parser_field_bitmap_update(struct ulp_tc_parser_params + *params, u32 idx, + enum bnxt_ulp_prsr_action prsr_act) +{ + struct ulp_tc_hdr_field *field; + + field = ¶ms->hdr_field[idx]; + if (ulp_bitmap_notzero(field->mask, field->size)) { + ULP_INDEX_BITMAP_SET(params->fld_bitmap.bits, idx); + if (!(prsr_act & ULP_PRSR_ACT_MATCH_IGNORE)) + ULP_INDEX_BITMAP_SET(params->fld_s_bitmap.bits, idx); + /* Not exact match */ + if (!ulp_bitmap_is_ones(field->mask, field->size)) + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_WC_MATCH, 1); + } else { + ULP_INDEX_BITMAP_RESET(params->fld_bitmap.bits, idx); + } +} + +#define ulp_deference_struct(x, y) ((x) ? &((x)->y) : NULL) +/* Utility function to copy field spec and masks items */ +static void ulp_tc_prsr_fld_mask(struct ulp_tc_parser_params *params, + u32 *idx, u32 size, const void *spec_buff, + const void *mask_buff, + enum bnxt_ulp_prsr_action prsr_act) +{ + struct ulp_tc_hdr_field *field = ¶ms->hdr_field[*idx]; + + /* update the field size */ + field->size = size; + + /* copy the mask specifications only if mask is not null */ + if (!(prsr_act & ULP_PRSR_ACT_MASK_IGNORE) && mask_buff) { + memcpy(field->mask, mask_buff, size); + ulp_tc_parser_field_bitmap_update(params, *idx, prsr_act); + } + + /* copy the protocol specifications only if mask is not null*/ + if (spec_buff && mask_buff && ulp_bitmap_notzero(mask_buff, size)) + memcpy(field->spec, spec_buff, size); + + /* Increment the index */ + *idx = *idx + 1; +} + +/* Function to handle the update of proto header based on field values */ +static void +ulp_rte_l2_proto_type_update(struct ulp_tc_parser_params *param, + uint16_t type, uint32_t in_flag, + uint32_t has_vlan, uint32_t has_vlan_mask) +{ +#define ULP_RTE_ETHER_TYPE_ROE 0xfc3d + + if (type == cpu_to_be16(RTE_ETHER_TYPE_IPV4)) { + if (in_flag) { + ULP_BITMAP_SET(param->hdr_fp_bit.bits, + BNXT_ULP_HDR_BIT_I_IPV4); + ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L3, 1); + } else { + ULP_BITMAP_SET(param->hdr_fp_bit.bits, + BNXT_ULP_HDR_BIT_O_IPV4); + ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L3, 1); + } + } else if (type == cpu_to_be16(RTE_ETHER_TYPE_IPV6)) { + if (in_flag) { + ULP_BITMAP_SET(param->hdr_fp_bit.bits, + BNXT_ULP_HDR_BIT_I_IPV6); + ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L3, 1); + } else { + ULP_BITMAP_SET(param->hdr_fp_bit.bits, + BNXT_ULP_HDR_BIT_O_IPV6); + ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L3, 1); + } + } else if (type == cpu_to_be16(RTE_ETHER_TYPE_VLAN)) { + has_vlan_mask = 1; + has_vlan = 1; + } else if (type == cpu_to_be16(RTE_ETHER_TYPE_ECPRI)) { + /* Update the hdr_bitmap with eCPRI */ + ULP_BITMAP_SET(param->hdr_fp_bit.bits, BNXT_ULP_HDR_BIT_O_ECPRI); + } else if (type == cpu_to_be16(ULP_RTE_ETHER_TYPE_ROE)) { + /* Update the hdr_bitmap with RoE */ + ULP_BITMAP_SET(param->hdr_fp_bit.bits, BNXT_ULP_HDR_BIT_O_ROE); + } + + if (has_vlan_mask) { + if (in_flag) { + ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_HAS_VTAG, + has_vlan); + ULP_COMP_FLD_IDX_WR(param, + BNXT_ULP_CF_IDX_I_VLAN_NO_IGNORE, + 1); + } else { + ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_HAS_VTAG, + has_vlan); + ULP_COMP_FLD_IDX_WR(param, + BNXT_ULP_CF_IDX_O_VLAN_NO_IGNORE, + 1); + } + } +} + +/* Internal Function to identify broadcast or multicast packets */ +static int32_t +ulp_rte_parser_is_bcmc_addr(const struct rte_ether_addr *eth_addr) +{ + if (rte_is_multicast_ether_addr(eth_addr)) + return 0; + + if (rte_is_broadcast_ether_addr(eth_addr)) { + netdev_dbg(NULL, "No support for bcast addr offload\n"); + return 1; + } + return 0; +} + +/* Function to handle the parsing of RTE Flow item Ethernet Header. */ +int32_t +ulp_rte_eth_hdr_handler(const struct rte_flow_item *item, + struct ulp_tc_parser_params *params) +{ + const struct rte_flow_item_eth *eth_spec = item->spec; + const struct rte_flow_item_eth *eth_mask = item->mask; + uint32_t idx = 0, dmac_idx = 0; + uint32_t size; + uint16_t eth_type = 0; + uint32_t inner_flag = 0; + uint32_t has_vlan = 0, has_vlan_mask = 0; + struct bnxt *bp = params->ulp_ctx->bp; + + /* Perform validations */ + if (eth_spec) { + /* Avoid multicast and broadcast addr */ + if (ulp_rte_parser_is_bcmc_addr(ð_spec->dst)) + return BNXT_TF_RC_PARSE_ERR; + + if (ulp_rte_parser_is_bcmc_addr(ð_spec->src)) + return BNXT_TF_RC_PARSE_ERR; + + eth_type = eth_spec->type; + has_vlan = eth_spec->has_vlan; + } + if (eth_mask) { + eth_type &= eth_mask->type; + has_vlan_mask = eth_mask->has_vlan; + } + + if (ulp_rte_prsr_fld_size_validate(params, &idx, + BNXT_ULP_PROTO_HDR_ETH_NUM)) { + netdev_err(bp->dev, "Error parsing protocol header\n"); + return BNXT_TF_RC_ERROR; + } + /* + * Copy the rte_flow_item for eth into hdr_field using ethernet + * header fields + */ + dmac_idx = idx; + size = sizeof(((struct rte_flow_item_eth *)NULL)->dst.addr_bytes); + ulp_tc_prsr_fld_mask(params, &idx, size, + ulp_deference_struct(eth_spec, dst.addr_bytes), + ulp_deference_struct(eth_mask, dst.addr_bytes), + ULP_PRSR_ACT_DEFAULT); + + size = sizeof(((struct rte_flow_item_eth *)NULL)->src.addr_bytes); + ulp_tc_prsr_fld_mask(params, &idx, size, + ulp_deference_struct(eth_spec, src.addr_bytes), + ulp_deference_struct(eth_mask, src.addr_bytes), + ULP_PRSR_ACT_DEFAULT); + + size = sizeof(((struct rte_flow_item_eth *)NULL)->type); + ulp_tc_prsr_fld_mask(params, &idx, size, + ulp_deference_struct(eth_spec, type), + ulp_deference_struct(eth_mask, type), + ULP_PRSR_ACT_DEFAULT); + + /* Update the protocol hdr bitmap */ + if (ULP_BITMAP_ISSET(params->hdr_bitmap.bits, + BNXT_ULP_HDR_BIT_O_ETH) || + ULP_BITMAP_ISSET(params->hdr_bitmap.bits, + BNXT_ULP_HDR_BIT_O_IPV4) || + ULP_BITMAP_ISSET(params->hdr_bitmap.bits, + BNXT_ULP_HDR_BIT_O_IPV6) || + ULP_BITMAP_ISSET(params->hdr_bitmap.bits, + BNXT_ULP_HDR_BIT_O_UDP) || + ULP_BITMAP_ISSET(params->hdr_bitmap.bits, + BNXT_ULP_HDR_BIT_O_TCP)) { + ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_I_ETH); + inner_flag = 1; + } else { + ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_ETH); + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_TUN_OFF_DMAC_ID, + dmac_idx); + } + /* Update the field protocol hdr bitmap */ + ulp_rte_l2_proto_type_update(params, eth_type, inner_flag, + has_vlan, has_vlan_mask); + + return BNXT_TF_RC_SUCCESS; +} + +/* Function to handle the parsing of RTE Flow item Vlan Header. */ +int32_t +ulp_rte_vlan_hdr_handler(const struct rte_flow_item *item, + struct ulp_tc_parser_params *params) +{ + const struct rte_flow_item_vlan *vlan_spec = item->spec; + const struct rte_flow_item_vlan *vlan_mask = item->mask; + struct ulp_tc_hdr_bitmap *hdr_bit; + uint32_t idx = 0; + uint16_t vlan_tag = 0, priority = 0; + uint16_t vlan_tag_mask = 0, priority_mask = 0; + uint32_t outer_vtag_num; + uint32_t inner_vtag_num; + uint16_t eth_type = 0; + uint32_t inner_flag = 0; + uint32_t size; + struct bnxt *bp = params->ulp_ctx->bp; + + if (vlan_spec) { + vlan_tag = ntohs(vlan_spec->tci); + priority = htons(vlan_tag >> ULP_VLAN_PRIORITY_SHIFT); + vlan_tag &= ULP_VLAN_TAG_MASK; + vlan_tag = htons(vlan_tag); + eth_type = vlan_spec->inner_type; + } + + if (vlan_mask) { + vlan_tag_mask = ntohs(vlan_mask->tci); + priority_mask = htons(vlan_tag_mask >> ULP_VLAN_PRIORITY_SHIFT); + vlan_tag_mask &= 0xfff; + + /* + * the storage for priority and vlan tag is 2 bytes + * The mask of priority which is 3 bits if it is all 1's + * then make the rest bits 13 bits as 1's + * so that it is matched as exact match. + */ + if (priority_mask == ULP_VLAN_PRIORITY_MASK) + priority_mask |= ~ULP_VLAN_PRIORITY_MASK; + if (vlan_tag_mask == ULP_VLAN_TAG_MASK) + vlan_tag_mask |= ~ULP_VLAN_TAG_MASK; + vlan_tag_mask = htons(vlan_tag_mask); + } + + if (ulp_rte_prsr_fld_size_validate(params, &idx, + BNXT_ULP_PROTO_HDR_S_VLAN_NUM)) { + netdev_err(bp->dev, "Error parsing protocol header\n"); + return BNXT_TF_RC_ERROR; + } + + /* + * Copy the rte_flow_item for vlan into hdr_field using Vlan + * header fields + */ + size = sizeof(((struct rte_flow_item_vlan *)NULL)->tci); + /* + * The priority field is ignored since OVS is setting it as + * wild card match and it is not supported. This is a work + * around and shall be addressed in the future. + */ + ulp_tc_prsr_fld_mask(params, &idx, size, + &priority, + (vlan_mask) ? &priority_mask : NULL, + ULP_PRSR_ACT_MASK_IGNORE); + + ulp_tc_prsr_fld_mask(params, &idx, size, + &vlan_tag, + (vlan_mask) ? &vlan_tag_mask : NULL, + ULP_PRSR_ACT_DEFAULT); + + size = sizeof(((struct rte_flow_item_vlan *)NULL)->inner_type); + ulp_tc_prsr_fld_mask(params, &idx, size, + ulp_deference_struct(vlan_spec, inner_type), + ulp_deference_struct(vlan_mask, inner_type), + ULP_PRSR_ACT_MATCH_IGNORE); + + /* Get the outer tag and inner tag counts */ + outer_vtag_num = ULP_COMP_FLD_IDX_RD(params, + BNXT_ULP_CF_IDX_O_VTAG_NUM); + inner_vtag_num = ULP_COMP_FLD_IDX_RD(params, + BNXT_ULP_CF_IDX_I_VTAG_NUM); + + /* Update the hdr_bitmap of the vlans */ + hdr_bit = ¶ms->hdr_bitmap; + if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) && + !ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) && + !outer_vtag_num) { + /* Update the vlan tag num */ + outer_vtag_num++; + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_VTAG_NUM, + outer_vtag_num); + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_HAS_VTAG, 1); + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_ONE_VTAG, 1); + ULP_BITMAP_SET(params->hdr_bitmap.bits, + BNXT_ULP_HDR_BIT_OO_VLAN); + if (vlan_mask && vlan_tag_mask) + ULP_COMP_FLD_IDX_WR(params, + BNXT_ULP_CF_IDX_OO_VLAN_FB_VID, 1); + + } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) && + !ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) && + outer_vtag_num == 1) { + /* update the vlan tag num */ + outer_vtag_num++; + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_VTAG_NUM, + outer_vtag_num); + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_TWO_VTAGS, 1); + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_ONE_VTAG, 0); + ULP_BITMAP_SET(params->hdr_bitmap.bits, + BNXT_ULP_HDR_BIT_OI_VLAN); + if (vlan_mask && vlan_tag_mask) + ULP_COMP_FLD_IDX_WR(params, + BNXT_ULP_CF_IDX_OI_VLAN_FB_VID, 1); + + } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) && + ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) && + !inner_vtag_num) { + /* update the vlan tag num */ + inner_vtag_num++; + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_VTAG_NUM, + inner_vtag_num); + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_HAS_VTAG, 1); + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_ONE_VTAG, 1); + ULP_BITMAP_SET(params->hdr_bitmap.bits, + BNXT_ULP_HDR_BIT_IO_VLAN); + if (vlan_mask && vlan_tag_mask) + ULP_COMP_FLD_IDX_WR(params, + BNXT_ULP_CF_IDX_IO_VLAN_FB_VID, 1); + inner_flag = 1; + } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) && + ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) && + inner_vtag_num == 1) { + /* update the vlan tag num */ + inner_vtag_num++; + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_VTAG_NUM, + inner_vtag_num); + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_TWO_VTAGS, 1); + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_ONE_VTAG, 0); + ULP_BITMAP_SET(params->hdr_bitmap.bits, + BNXT_ULP_HDR_BIT_II_VLAN); + if (vlan_mask && vlan_tag_mask) + ULP_COMP_FLD_IDX_WR(params, + BNXT_ULP_CF_IDX_II_VLAN_FB_VID, 1); + inner_flag = 1; + } else { + netdev_err(bp->dev, "Error Parsing:Vlan hdr found without eth\n"); + return BNXT_TF_RC_ERROR; + } + /* Update the field protocol hdr bitmap */ + ulp_rte_l2_proto_type_update(params, eth_type, inner_flag, 1, 1); + return BNXT_TF_RC_SUCCESS; +} + +/* Function to handle the update of proto header based on field values */ +static void +ulp_rte_l3_proto_type_update(struct ulp_tc_parser_params *param, + uint8_t proto, uint32_t in_flag) +{ + if (proto == IPPROTO_UDP) { + if (in_flag) { + ULP_BITMAP_SET(param->hdr_fp_bit.bits, + BNXT_ULP_HDR_BIT_I_UDP); + ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L4, 1); + } else { + ULP_BITMAP_SET(param->hdr_fp_bit.bits, + BNXT_ULP_HDR_BIT_O_UDP); + ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L4, 1); + } + } else if (proto == IPPROTO_TCP) { + if (in_flag) { + ULP_BITMAP_SET(param->hdr_fp_bit.bits, + BNXT_ULP_HDR_BIT_I_TCP); + ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L4, 1); + } else { + ULP_BITMAP_SET(param->hdr_fp_bit.bits, + BNXT_ULP_HDR_BIT_O_TCP); + ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L4, 1); + } + } else if (proto == IPPROTO_GRE) { + ULP_BITMAP_SET(param->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_T_GRE); + } else if (proto == IPPROTO_ICMP) { + if (ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_L3_TUN)) + ULP_BITMAP_SET(param->hdr_bitmap.bits, + BNXT_ULP_HDR_BIT_I_ICMP); + else + ULP_BITMAP_SET(param->hdr_bitmap.bits, + BNXT_ULP_HDR_BIT_O_ICMP); + } + + if (in_flag) { + ULP_COMP_FLD_IDX_WR(param, + BNXT_ULP_CF_IDX_I_L3_FB_PROTO_ID, + 1); + ULP_COMP_FLD_IDX_WR(param, + BNXT_ULP_CF_IDX_I_L3_PROTO_ID, + proto); + } else { + ULP_COMP_FLD_IDX_WR(param, + BNXT_ULP_CF_IDX_O_L3_FB_PROTO_ID, + 1); + ULP_COMP_FLD_IDX_WR(param, + BNXT_ULP_CF_IDX_O_L3_PROTO_ID, + proto); + } +} + +/* Function to handle the parsing of RTE Flow item IPV4 Header. */ +int32_t +ulp_rte_ipv4_hdr_handler(const struct rte_flow_item *item, + struct ulp_tc_parser_params *params) +{ + const struct rte_flow_item_ipv4 *ipv4_spec = item->spec; + const struct rte_flow_item_ipv4 *ipv4_mask = item->mask; + struct ulp_tc_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap; + uint32_t idx = 0, dip_idx = 0; + uint32_t size; + uint8_t proto = 0; + uint8_t proto_mask = 0; + uint32_t inner_flag = 0; + uint32_t cnt; + struct bnxt *bp = params->ulp_ctx->bp; + + /* validate there are no 3rd L3 header */ + cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_HDR_CNT); + if (cnt == 2) { + netdev_err(bp->dev, "Parse Err:Third L3 header not supported\n"); + return BNXT_TF_RC_ERROR; + } + + if (ulp_rte_prsr_fld_size_validate(params, &idx, + BNXT_ULP_PROTO_HDR_IPV4_NUM)) { + netdev_err(bp->dev, "Error parsing protocol header\n"); + return BNXT_TF_RC_ERROR; + } + + /* + * Copy the rte_flow_item for ipv4 into hdr_field using ipv4 + * header fields + */ + size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.version_ihl); + ulp_tc_prsr_fld_mask(params, &idx, size, + ulp_deference_struct(ipv4_spec, hdr.version_ihl), + ulp_deference_struct(ipv4_mask, hdr.version_ihl), + ULP_PRSR_ACT_DEFAULT); + + /* + * The tos field is ignored since OVS is setting it as wild card + * match and it is not supported. An application can enable tos support. + */ + size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.type_of_service); + ulp_tc_prsr_fld_mask(params, &idx, size, + ulp_deference_struct(ipv4_spec, hdr.type_of_service), + ulp_deference_struct(ipv4_mask, hdr.type_of_service), + ULP_PRSR_ACT_DEFAULT); + + size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.total_length); + ulp_tc_prsr_fld_mask(params, &idx, size, + ulp_deference_struct(ipv4_spec, hdr.total_length), + ulp_deference_struct(ipv4_mask, hdr.total_length), + ULP_PRSR_ACT_DEFAULT); + + size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.packet_id); + ulp_tc_prsr_fld_mask(params, &idx, size, + ulp_deference_struct(ipv4_spec, hdr.packet_id), + ulp_deference_struct(ipv4_mask, hdr.packet_id), + ULP_PRSR_ACT_DEFAULT); + + size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.fragment_offset); + ulp_tc_prsr_fld_mask(params, &idx, size, + ulp_deference_struct(ipv4_spec, hdr.fragment_offset), + ulp_deference_struct(ipv4_mask, hdr.fragment_offset), + ULP_PRSR_ACT_MASK_IGNORE); + + size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.time_to_live); + ulp_tc_prsr_fld_mask(params, &idx, size, + ulp_deference_struct(ipv4_spec, hdr.time_to_live), + ulp_deference_struct(ipv4_mask, hdr.time_to_live), + ULP_PRSR_ACT_DEFAULT); + + /* Ignore proto for matching templates */ + size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.next_proto_id); + ulp_tc_prsr_fld_mask(params, &idx, size, + ulp_deference_struct(ipv4_spec, hdr.next_proto_id), + ulp_deference_struct(ipv4_mask, hdr.next_proto_id), + (ULP_APP_TOS_PROTO_SUPPORT(params->ulp_ctx)) ? + ULP_PRSR_ACT_DEFAULT : ULP_PRSR_ACT_MATCH_IGNORE); + + if (ipv4_spec) + proto = ipv4_spec->hdr.next_proto_id; + + size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.hdr_checksum); + ulp_tc_prsr_fld_mask(params, &idx, size, + ulp_deference_struct(ipv4_spec, hdr.hdr_checksum), + ulp_deference_struct(ipv4_mask, hdr.hdr_checksum), + ULP_PRSR_ACT_DEFAULT); + + size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.src_addr); + ulp_tc_prsr_fld_mask(params, &idx, size, + ulp_deference_struct(ipv4_spec, hdr.src_addr), + ulp_deference_struct(ipv4_mask, hdr.src_addr), + ULP_PRSR_ACT_DEFAULT); + + dip_idx = idx; + size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.dst_addr); + ulp_tc_prsr_fld_mask(params, &idx, size, + ulp_deference_struct(ipv4_spec, hdr.dst_addr), + ulp_deference_struct(ipv4_mask, hdr.dst_addr), + ULP_PRSR_ACT_DEFAULT); + + /* Set the ipv4 header bitmap and computed l3 header bitmaps */ + if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) || + ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6) || + ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_TUN)) { + ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV4); + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3, 1); + inner_flag = 1; + } else { + ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4); + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, 1); + /* Update the tunnel offload dest ip offset */ + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_TUN_OFF_DIP_ID, + dip_idx); + } + + /* Some of the PMD applications may set the protocol field + * in the IPv4 spec but don't set the mask. So, consider + * the mask in the proto value calculation. + */ + if (ipv4_mask) { + proto &= ipv4_mask->hdr.next_proto_id; + proto_mask = ipv4_mask->hdr.next_proto_id; + } + + /* Update the field protocol hdr bitmap */ + if (proto_mask) + ulp_rte_l3_proto_type_update(params, proto, inner_flag); + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_HDR_CNT, ++cnt); + return BNXT_TF_RC_SUCCESS; +} + +/* Function to handle the parsing of RTE Flow item IPV6 Header */ +int32_t +ulp_rte_ipv6_hdr_handler(const struct rte_flow_item *item, + struct ulp_tc_parser_params *params) +{ + const struct rte_flow_item_ipv6 *ipv6_spec = item->spec; + const struct rte_flow_item_ipv6 *ipv6_mask = item->mask; + struct ulp_tc_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap; + uint32_t idx = 0, dip_idx = 0; + uint32_t size, vtc_flow; + uint32_t ver_spec = 0, ver_mask = 0; + uint32_t tc_spec = 0, tc_mask = 0; + uint32_t lab_spec = 0, lab_mask = 0; + uint8_t proto = 0; + uint8_t proto_mask = 0; + uint32_t inner_flag = 0; + uint32_t cnt; + struct bnxt *bp = params->ulp_ctx->bp; + + /* validate there are no 3rd L3 header */ + cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_HDR_CNT); + if (cnt == 2) { + netdev_err(bp->dev, "Parse Err:Third L3 header not supported\n"); + return BNXT_TF_RC_ERROR; + } + + if (ulp_rte_prsr_fld_size_validate(params, &idx, + BNXT_ULP_PROTO_HDR_IPV6_NUM)) { + netdev_err(bp->dev, "Error parsing protocol header\n"); + return BNXT_TF_RC_ERROR; + } + + /* + * Copy the rte_flow_item for ipv6 into hdr_field using ipv6 + * header fields + */ + if (ipv6_spec) { + vtc_flow = ntohl(ipv6_spec->hdr.vtc_flow); + ver_spec = htonl(BNXT_ULP_GET_IPV6_VER(vtc_flow)); + tc_spec = htonl(BNXT_ULP_GET_IPV6_TC(vtc_flow)); + lab_spec = htonl(BNXT_ULP_GET_IPV6_FLOWLABEL(vtc_flow)); + proto = ipv6_spec->hdr.proto; + } + + if (ipv6_mask) { + vtc_flow = ntohl(ipv6_mask->hdr.vtc_flow); + ver_mask = htonl(BNXT_ULP_GET_IPV6_VER(vtc_flow)); + tc_mask = htonl(BNXT_ULP_GET_IPV6_TC(vtc_flow)); + lab_mask = htonl(BNXT_ULP_GET_IPV6_FLOWLABEL(vtc_flow)); + + /* Some of the PMD applications may set the protocol field + * in the IPv6 spec but don't set the mask. So, consider + * the mask in proto value calculation. + */ + proto &= ipv6_mask->hdr.proto; + proto_mask = ipv6_mask->hdr.proto; + } + + size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.vtc_flow); + ulp_tc_prsr_fld_mask(params, &idx, size, &ver_spec, &ver_mask, + ULP_PRSR_ACT_DEFAULT); + /* + * The TC and flow label field are ignored since OVS is + * setting it for match and it is not supported. + * This is a work around and + * shall be addressed in the future. + */ + ulp_tc_prsr_fld_mask(params, &idx, size, &tc_spec, &tc_mask, + (ULP_APP_TOS_PROTO_SUPPORT(params->ulp_ctx)) ? + ULP_PRSR_ACT_DEFAULT : ULP_PRSR_ACT_MASK_IGNORE); + ulp_tc_prsr_fld_mask(params, &idx, size, &lab_spec, &lab_mask, + ULP_PRSR_ACT_MASK_IGNORE); + + size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.payload_len); + ulp_tc_prsr_fld_mask(params, &idx, size, + ulp_deference_struct(ipv6_spec, hdr.payload_len), + ulp_deference_struct(ipv6_mask, hdr.payload_len), + ULP_PRSR_ACT_DEFAULT); + + /* Ignore proto for template matching */ + size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.proto); + ulp_tc_prsr_fld_mask(params, &idx, size, + ulp_deference_struct(ipv6_spec, hdr.proto), + ulp_deference_struct(ipv6_mask, hdr.proto), + (ULP_APP_TOS_PROTO_SUPPORT(params->ulp_ctx)) ? + ULP_PRSR_ACT_DEFAULT : ULP_PRSR_ACT_MATCH_IGNORE); + + size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.hop_limits); + ulp_tc_prsr_fld_mask(params, &idx, size, + ulp_deference_struct(ipv6_spec, hdr.hop_limits), + ulp_deference_struct(ipv6_mask, hdr.hop_limits), + ULP_PRSR_ACT_DEFAULT); + + size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.src_addr); + ulp_tc_prsr_fld_mask(params, &idx, size, + ulp_deference_struct(ipv6_spec, hdr.src_addr), + ulp_deference_struct(ipv6_mask, hdr.src_addr), + ULP_PRSR_ACT_DEFAULT); + + dip_idx = idx; + size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.dst_addr); + ulp_tc_prsr_fld_mask(params, &idx, size, + ulp_deference_struct(ipv6_spec, hdr.dst_addr), + ulp_deference_struct(ipv6_mask, hdr.dst_addr), + ULP_PRSR_ACT_DEFAULT); + + /* Set the ipv6 header bitmap and computed l3 header bitmaps */ + if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) || + ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6) || + ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_TUN)) { + ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV6); + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3, 1); + inner_flag = 1; + } else { + ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6); + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, 1); + /* Update the tunnel offload dest ip offset */ + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_TUN_OFF_DIP_ID, + dip_idx); + } + + /* Update the field protocol hdr bitmap */ + if (proto_mask) + ulp_rte_l3_proto_type_update(params, proto, inner_flag); + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_HDR_CNT, ++cnt); + + return BNXT_TF_RC_SUCCESS; +} + +/* Function to handle the update of proto header based on field values */ +static void +ulp_rte_l4_proto_type_update(struct ulp_tc_parser_params *params, + uint16_t src_port, uint16_t src_mask, + uint16_t dst_port, uint16_t dst_mask, + enum bnxt_ulp_hdr_bit hdr_bit) +{ + switch (hdr_bit) { + case BNXT_ULP_HDR_BIT_I_UDP: + case BNXT_ULP_HDR_BIT_I_TCP: + ULP_BITMAP_SET(params->hdr_bitmap.bits, hdr_bit); + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4, 1); + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_SRC_PORT, + (uint64_t)be16_to_cpu(src_port)); + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_DST_PORT, + (uint64_t)be16_to_cpu(dst_port)); + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_SRC_PORT_MASK, + (uint64_t)be16_to_cpu(src_mask)); + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_DST_PORT_MASK, + (uint64_t)be16_to_cpu(dst_mask)); + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3_FB_PROTO_ID, + 1); + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_FB_SRC_PORT, + !!(src_port & src_mask)); + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_FB_DST_PORT, + !!(dst_port & dst_mask)); + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3_PROTO_ID, + (hdr_bit == BNXT_ULP_HDR_BIT_I_UDP) ? + IPPROTO_UDP : IPPROTO_TCP); + break; + case BNXT_ULP_HDR_BIT_O_UDP: + case BNXT_ULP_HDR_BIT_O_TCP: + ULP_BITMAP_SET(params->hdr_bitmap.bits, hdr_bit); + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4, 1); + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_SRC_PORT, + (uint64_t)be16_to_cpu(src_port)); + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_DST_PORT, + (uint64_t)be16_to_cpu(dst_port)); + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_SRC_PORT_MASK, + (uint64_t)be16_to_cpu(src_mask)); + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_DST_PORT_MASK, + (uint64_t)be16_to_cpu(dst_mask)); + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3_FB_PROTO_ID, + 1); + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_FB_SRC_PORT, + !!(src_port & src_mask)); + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_FB_DST_PORT, + !!(dst_port & dst_mask)); + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3_PROTO_ID, + (hdr_bit == BNXT_ULP_HDR_BIT_O_UDP) ? + IPPROTO_UDP : IPPROTO_TCP); + break; + default: + break; + } + + if (hdr_bit == BNXT_ULP_HDR_BIT_O_UDP && dst_port == + cpu_to_be16(ULP_UDP_PORT_VXLAN)) { + ULP_BITMAP_SET(params->hdr_fp_bit.bits, + BNXT_ULP_HDR_BIT_T_VXLAN); + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_TUN, 1); + ULP_BITMAP_SET(params->cf_bitmap, BNXT_ULP_CF_BIT_IS_TUNNEL); + } +} + +/* Function to handle the parsing of RTE Flow item UDP Header. */ +int32_t +ulp_rte_udp_hdr_handler(const struct rte_flow_item *item, + struct ulp_tc_parser_params *params) +{ + const struct rte_flow_item_udp *udp_spec = item->spec; + const struct rte_flow_item_udp *udp_mask = item->mask; + struct ulp_tc_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap; + uint32_t idx = 0; + uint32_t size; + uint16_t dport = 0, sport = 0; + uint16_t dport_mask = 0, sport_mask = 0; + uint32_t cnt; + enum bnxt_ulp_hdr_bit out_l4 = BNXT_ULP_HDR_BIT_O_UDP; + struct bnxt *bp = params->ulp_ctx->bp; + + cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L4_HDR_CNT); + if (cnt == 2) { + netdev_err(bp->dev, "Parse Err:Third L4 header not supported\n"); + return BNXT_TF_RC_ERROR; + } + + if (udp_spec) { + sport = udp_spec->hdr.src_port; + dport = udp_spec->hdr.dst_port; + } + if (udp_mask) { + sport_mask = udp_mask->hdr.src_port; + dport_mask = udp_mask->hdr.dst_port; + } + + if (ulp_rte_prsr_fld_size_validate(params, &idx, + BNXT_ULP_PROTO_HDR_UDP_NUM)) { + netdev_err(bp->dev, "Error parsing protocol header\n"); + return BNXT_TF_RC_ERROR; + } + + /* + * Copy the rte_flow_item for ipv4 into hdr_field using ipv4 + * header fields + */ + size = sizeof(((struct rte_flow_item_udp *)NULL)->hdr.src_port); + ulp_tc_prsr_fld_mask(params, &idx, size, + ulp_deference_struct(udp_spec, hdr.src_port), + ulp_deference_struct(udp_mask, hdr.src_port), + ULP_PRSR_ACT_DEFAULT); + + size = sizeof(((struct rte_flow_item_udp *)NULL)->hdr.dst_port); + ulp_tc_prsr_fld_mask(params, &idx, size, + ulp_deference_struct(udp_spec, hdr.dst_port), + ulp_deference_struct(udp_mask, hdr.dst_port), + ULP_PRSR_ACT_DEFAULT); + + size = sizeof(((struct rte_flow_item_udp *)NULL)->hdr.dgram_len); + ulp_tc_prsr_fld_mask(params, &idx, size, + ulp_deference_struct(udp_spec, hdr.dgram_len), + ulp_deference_struct(udp_mask, hdr.dgram_len), + ULP_PRSR_ACT_DEFAULT); + + size = sizeof(((struct rte_flow_item_udp *)NULL)->hdr.dgram_cksum); + ulp_tc_prsr_fld_mask(params, &idx, size, + ulp_deference_struct(udp_spec, hdr.dgram_cksum), + ulp_deference_struct(udp_mask, hdr.dgram_cksum), + ULP_PRSR_ACT_DEFAULT); + + /* Set the udp header bitmap and computed l4 header bitmaps */ + if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) || + ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP) || + ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_TUN)) + out_l4 = BNXT_ULP_HDR_BIT_I_UDP; + + ulp_rte_l4_proto_type_update(params, sport, sport_mask, dport, + dport_mask, out_l4); + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L4_HDR_CNT, ++cnt); + return BNXT_TF_RC_SUCCESS; +} + +/* Function to handle the parsing of RTE Flow item TCP Header. */ +int32_t +ulp_rte_tcp_hdr_handler(const struct rte_flow_item *item, + struct ulp_tc_parser_params *params) +{ + const struct rte_flow_item_tcp *tcp_spec = item->spec; + const struct rte_flow_item_tcp *tcp_mask = item->mask; + struct ulp_tc_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap; + uint32_t idx = 0; + uint16_t dport = 0, sport = 0; + uint16_t dport_mask = 0, sport_mask = 0; + uint32_t size; + uint32_t cnt; + enum bnxt_ulp_hdr_bit out_l4 = BNXT_ULP_HDR_BIT_O_TCP; + struct bnxt *bp = params->ulp_ctx->bp; + + cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L4_HDR_CNT); + if (cnt == 2) { + netdev_err(bp->dev, "Parse Err:Third L4 header not supported\n"); + return BNXT_TF_RC_ERROR; + } + + if (tcp_spec) { + sport = tcp_spec->hdr.src_port; + dport = tcp_spec->hdr.dst_port; + } + if (tcp_mask) { + sport_mask = tcp_mask->hdr.src_port; + dport_mask = tcp_mask->hdr.dst_port; + } + + if (ulp_rte_prsr_fld_size_validate(params, &idx, + BNXT_ULP_PROTO_HDR_TCP_NUM)) { + netdev_err(bp->dev, "Error parsing protocol header\n"); + return BNXT_TF_RC_ERROR; + } + + /* + * Copy the rte_flow_item for ipv4 into hdr_field using ipv4 + * header fields + */ + size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.src_port); + ulp_tc_prsr_fld_mask(params, &idx, size, + ulp_deference_struct(tcp_spec, hdr.src_port), + ulp_deference_struct(tcp_mask, hdr.src_port), + ULP_PRSR_ACT_DEFAULT); + + size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.dst_port); + ulp_tc_prsr_fld_mask(params, &idx, size, + ulp_deference_struct(tcp_spec, hdr.dst_port), + ulp_deference_struct(tcp_mask, hdr.dst_port), + ULP_PRSR_ACT_DEFAULT); + + size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.sent_seq); + ulp_tc_prsr_fld_mask(params, &idx, size, + ulp_deference_struct(tcp_spec, hdr.sent_seq), + ulp_deference_struct(tcp_mask, hdr.sent_seq), + ULP_PRSR_ACT_DEFAULT); + + size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.recv_ack); + ulp_tc_prsr_fld_mask(params, &idx, size, + ulp_deference_struct(tcp_spec, hdr.recv_ack), + ulp_deference_struct(tcp_mask, hdr.recv_ack), + ULP_PRSR_ACT_DEFAULT); + + size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.data_off); + ulp_tc_prsr_fld_mask(params, &idx, size, + ulp_deference_struct(tcp_spec, hdr.data_off), + ulp_deference_struct(tcp_mask, hdr.data_off), + ULP_PRSR_ACT_DEFAULT); + + size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.tcp_flags); + ulp_tc_prsr_fld_mask(params, &idx, size, + ulp_deference_struct(tcp_spec, hdr.tcp_flags), + ulp_deference_struct(tcp_mask, hdr.tcp_flags), + ULP_PRSR_ACT_DEFAULT); + + size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.rx_win); + ulp_tc_prsr_fld_mask(params, &idx, size, + ulp_deference_struct(tcp_spec, hdr.rx_win), + ulp_deference_struct(tcp_mask, hdr.rx_win), + ULP_PRSR_ACT_DEFAULT); + + size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.cksum); + ulp_tc_prsr_fld_mask(params, &idx, size, + ulp_deference_struct(tcp_spec, hdr.cksum), + ulp_deference_struct(tcp_mask, hdr.cksum), + ULP_PRSR_ACT_DEFAULT); + + size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.tcp_urp); + ulp_tc_prsr_fld_mask(params, &idx, size, + ulp_deference_struct(tcp_spec, hdr.tcp_urp), + ulp_deference_struct(tcp_mask, hdr.tcp_urp), + ULP_PRSR_ACT_DEFAULT); + + /* Set the udp header bitmap and computed l4 header bitmaps */ + if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) || + ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP) || + ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_TUN)) + out_l4 = BNXT_ULP_HDR_BIT_I_TCP; + + ulp_rte_l4_proto_type_update(params, sport, sport_mask, dport, + dport_mask, out_l4); + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L4_HDR_CNT, ++cnt); + return BNXT_TF_RC_SUCCESS; +} + +/* Function to handle the parsing of RTE Flow item Vxlan Header. */ +int32_t +ulp_rte_vxlan_hdr_handler(const struct rte_flow_item *item, + struct ulp_tc_parser_params *params) +{ + const struct rte_flow_item_vxlan *vxlan_spec = item->spec; + const struct rte_flow_item_vxlan *vxlan_mask = item->mask; + struct ulp_tc_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap; + uint32_t idx = 0; + uint16_t dport; + uint32_t size; + struct bnxt *bp = params->ulp_ctx->bp; + + if (ulp_rte_prsr_fld_size_validate(params, &idx, + BNXT_ULP_PROTO_HDR_VXLAN_NUM)) { + netdev_err(bp->dev, "Error parsing protocol header\n"); + return BNXT_TF_RC_ERROR; + } + + /* + * Copy the rte_flow_item for vxlan into hdr_field using vxlan + * header fields + */ + size = sizeof(((struct rte_flow_item_vxlan *)NULL)->flags); + ulp_tc_prsr_fld_mask(params, &idx, size, + ulp_deference_struct(vxlan_spec, flags), + ulp_deference_struct(vxlan_mask, flags), + ULP_PRSR_ACT_DEFAULT); + + size = sizeof(((struct rte_flow_item_vxlan *)NULL)->rsvd0); + ulp_tc_prsr_fld_mask(params, &idx, size, + ulp_deference_struct(vxlan_spec, rsvd0), + ulp_deference_struct(vxlan_mask, rsvd0), + ULP_PRSR_ACT_DEFAULT); + + size = sizeof(((struct rte_flow_item_vxlan *)NULL)->vni); + ulp_tc_prsr_fld_mask(params, &idx, size, + ulp_deference_struct(vxlan_spec, vni), + ulp_deference_struct(vxlan_mask, vni), + ULP_PRSR_ACT_DEFAULT); + + size = sizeof(((struct rte_flow_item_vxlan *)NULL)->rsvd1); + ulp_tc_prsr_fld_mask(params, &idx, size, + ulp_deference_struct(vxlan_spec, rsvd1), + ulp_deference_struct(vxlan_mask, rsvd1), + ULP_PRSR_ACT_DEFAULT); + + /* Update the hdr_bitmap with vxlan */ + ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_T_VXLAN); + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_TUN, 1); + ULP_BITMAP_SET(params->cf_bitmap, BNXT_ULP_CF_BIT_IS_TUNNEL); + + dport = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_O_L4_DST_PORT); + if (!dport) { + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_DST_PORT, + ULP_UDP_PORT_VXLAN); + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_DST_PORT_MASK, + ULP_UDP_PORT_VXLAN_MASK); + } + + return BNXT_TF_RC_SUCCESS; +} + +/* Function to handle the parsing of RTE Flow item GRE Header. */ +int32_t +ulp_rte_gre_hdr_handler(const struct rte_flow_item *item, + struct ulp_tc_parser_params *params) +{ + const struct rte_flow_item_gre *gre_spec = item->spec; + const struct rte_flow_item_gre *gre_mask = item->mask; + struct ulp_tc_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap; + uint32_t idx = 0; + uint32_t size; + struct bnxt *bp = params->ulp_ctx->bp; + + if (ulp_rte_prsr_fld_size_validate(params, &idx, + BNXT_ULP_PROTO_HDR_GRE_NUM)) { + netdev_err(bp->dev, "Error parsing protocol header\n"); + return BNXT_TF_RC_ERROR; + } + + size = sizeof(((struct rte_flow_item_gre *)NULL)->c_rsvd0_ver); + ulp_tc_prsr_fld_mask(params, &idx, size, + ulp_deference_struct(gre_spec, c_rsvd0_ver), + ulp_deference_struct(gre_mask, c_rsvd0_ver), + ULP_PRSR_ACT_DEFAULT); + + size = sizeof(((struct rte_flow_item_gre *)NULL)->protocol); + ulp_tc_prsr_fld_mask(params, &idx, size, + ulp_deference_struct(gre_spec, protocol), + ulp_deference_struct(gre_mask, protocol), + ULP_PRSR_ACT_DEFAULT); + + /* Update the hdr_bitmap with GRE */ + ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_T_GRE); + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_TUN, 1); + ULP_BITMAP_SET(params->cf_bitmap, BNXT_ULP_CF_BIT_IS_TUNNEL); + return BNXT_TF_RC_SUCCESS; +} + +/* + * This table has to be indexed based on the rte_flow_item_type that is part of + * DPDK. The below array is list of parsing functions for each of the flow items + * that are supported. + */ +struct bnxt_ulp_rte_hdr_info rte_ulp_hdr_info[] = { + [RTE_FLOW_ITEM_TYPE_END] = { + .hdr_type = BNXT_ULP_HDR_TYPE_END, + .proto_hdr_func = NULL + }, + [RTE_FLOW_ITEM_TYPE_VOID] = { + .hdr_type = BNXT_ULP_HDR_TYPE_SUPPORTED, + .proto_hdr_func = NULL + }, + [RTE_FLOW_ITEM_TYPE_INVERT] = { + .hdr_type = BNXT_ULP_HDR_TYPE_NOT_SUPPORTED, + .proto_hdr_func = NULL + }, + [RTE_FLOW_ITEM_TYPE_ANY] = { + .hdr_type = BNXT_ULP_HDR_TYPE_SUPPORTED, + .proto_hdr_func = NULL + }, + [RTE_FLOW_ITEM_TYPE_PF] = { + .hdr_type = BNXT_ULP_HDR_TYPE_SUPPORTED, + .proto_hdr_func = NULL + }, + [RTE_FLOW_ITEM_TYPE_VF] = { + .hdr_type = BNXT_ULP_HDR_TYPE_SUPPORTED, + .proto_hdr_func = NULL + }, + [RTE_FLOW_ITEM_TYPE_PHY_PORT] = { + .hdr_type = BNXT_ULP_HDR_TYPE_SUPPORTED, + .proto_hdr_func = NULL + }, + [RTE_FLOW_ITEM_TYPE_PORT_ID] = { + .hdr_type = BNXT_ULP_HDR_TYPE_SUPPORTED, + .proto_hdr_func = NULL + }, + [RTE_FLOW_ITEM_TYPE_RAW] = { + .hdr_type = BNXT_ULP_HDR_TYPE_NOT_SUPPORTED, + .proto_hdr_func = NULL + }, + [RTE_FLOW_ITEM_TYPE_ETH] = { + .hdr_type = BNXT_ULP_HDR_TYPE_SUPPORTED, + .proto_hdr_func = ulp_rte_eth_hdr_handler + }, + [RTE_FLOW_ITEM_TYPE_VLAN] = { + .hdr_type = BNXT_ULP_HDR_TYPE_SUPPORTED, + .proto_hdr_func = ulp_rte_vlan_hdr_handler + }, + [RTE_FLOW_ITEM_TYPE_IPV4] = { + .hdr_type = BNXT_ULP_HDR_TYPE_SUPPORTED, + .proto_hdr_func = ulp_rte_ipv4_hdr_handler + }, + [RTE_FLOW_ITEM_TYPE_IPV6] = { + .hdr_type = BNXT_ULP_HDR_TYPE_SUPPORTED, + .proto_hdr_func = ulp_rte_ipv6_hdr_handler + }, + [RTE_FLOW_ITEM_TYPE_ICMP] = { + .hdr_type = BNXT_ULP_HDR_TYPE_SUPPORTED, + .proto_hdr_func = NULL + }, + [RTE_FLOW_ITEM_TYPE_UDP] = { + .hdr_type = BNXT_ULP_HDR_TYPE_SUPPORTED, + .proto_hdr_func = ulp_rte_udp_hdr_handler + }, + [RTE_FLOW_ITEM_TYPE_TCP] = { + .hdr_type = BNXT_ULP_HDR_TYPE_SUPPORTED, + .proto_hdr_func = ulp_rte_tcp_hdr_handler + }, + [RTE_FLOW_ITEM_TYPE_SCTP] = { + .hdr_type = BNXT_ULP_HDR_TYPE_NOT_SUPPORTED, + .proto_hdr_func = NULL + }, + [RTE_FLOW_ITEM_TYPE_VXLAN] = { + .hdr_type = BNXT_ULP_HDR_TYPE_SUPPORTED, + .proto_hdr_func = ulp_rte_vxlan_hdr_handler + }, + [RTE_FLOW_ITEM_TYPE_E_TAG] = { + .hdr_type = BNXT_ULP_HDR_TYPE_NOT_SUPPORTED, + .proto_hdr_func = NULL + }, + [RTE_FLOW_ITEM_TYPE_NVGRE] = { + .hdr_type = BNXT_ULP_HDR_TYPE_NOT_SUPPORTED, + .proto_hdr_func = NULL + }, + [RTE_FLOW_ITEM_TYPE_MPLS] = { + .hdr_type = BNXT_ULP_HDR_TYPE_NOT_SUPPORTED, + .proto_hdr_func = NULL + }, + [RTE_FLOW_ITEM_TYPE_GRE] = { + .hdr_type = BNXT_ULP_HDR_TYPE_SUPPORTED, + .proto_hdr_func = ulp_rte_gre_hdr_handler + }, + [RTE_FLOW_ITEM_TYPE_FUZZY] = { + .hdr_type = BNXT_ULP_HDR_TYPE_NOT_SUPPORTED, + .proto_hdr_func = NULL + }, + [RTE_FLOW_ITEM_TYPE_GTP] = { + .hdr_type = BNXT_ULP_HDR_TYPE_NOT_SUPPORTED, + .proto_hdr_func = NULL + }, + [RTE_FLOW_ITEM_TYPE_GTPC] = { + .hdr_type = BNXT_ULP_HDR_TYPE_NOT_SUPPORTED, + .proto_hdr_func = NULL + }, + [RTE_FLOW_ITEM_TYPE_GTPU] = { + .hdr_type = BNXT_ULP_HDR_TYPE_NOT_SUPPORTED, + .proto_hdr_func = NULL + }, + [RTE_FLOW_ITEM_TYPE_ESP] = { + .hdr_type = BNXT_ULP_HDR_TYPE_NOT_SUPPORTED, + .proto_hdr_func = NULL + }, + [RTE_FLOW_ITEM_TYPE_GENEVE] = { + .hdr_type = BNXT_ULP_HDR_TYPE_NOT_SUPPORTED, + .proto_hdr_func = NULL + }, + [RTE_FLOW_ITEM_TYPE_VXLAN_GPE] = { + .hdr_type = BNXT_ULP_HDR_TYPE_NOT_SUPPORTED, + .proto_hdr_func = NULL + }, + [RTE_FLOW_ITEM_TYPE_ARP_ETH_IPV4] = { + .hdr_type = BNXT_ULP_HDR_TYPE_NOT_SUPPORTED, + .proto_hdr_func = NULL + }, + [RTE_FLOW_ITEM_TYPE_IPV6_EXT] = { + .hdr_type = BNXT_ULP_HDR_TYPE_SUPPORTED, + .proto_hdr_func = NULL + }, + [RTE_FLOW_ITEM_TYPE_IPV6_ROUTE_EXT] = { + .hdr_type = BNXT_ULP_HDR_TYPE_SUPPORTED, + .proto_hdr_func = NULL + }, + [RTE_FLOW_ITEM_TYPE_ICMP6] = { + .hdr_type = BNXT_ULP_HDR_TYPE_SUPPORTED, + .proto_hdr_func = NULL + }, + [RTE_FLOW_ITEM_TYPE_ICMP6_ND_NS] = { + .hdr_type = BNXT_ULP_HDR_TYPE_NOT_SUPPORTED, + .proto_hdr_func = NULL + }, + [RTE_FLOW_ITEM_TYPE_ICMP6_ND_NA] = { + .hdr_type = BNXT_ULP_HDR_TYPE_NOT_SUPPORTED, + .proto_hdr_func = NULL + }, + [RTE_FLOW_ITEM_TYPE_ICMP6_ND_OPT] = { + .hdr_type = BNXT_ULP_HDR_TYPE_NOT_SUPPORTED, + .proto_hdr_func = NULL + }, + [RTE_FLOW_ITEM_TYPE_ICMP6_ND_OPT_SLA_ETH] = { + .hdr_type = BNXT_ULP_HDR_TYPE_NOT_SUPPORTED, + .proto_hdr_func = NULL + }, + [RTE_FLOW_ITEM_TYPE_ICMP6_ND_OPT_TLA_ETH] = { + .hdr_type = BNXT_ULP_HDR_TYPE_NOT_SUPPORTED, + .proto_hdr_func = NULL + }, + [RTE_FLOW_ITEM_TYPE_MARK] = { + .hdr_type = BNXT_ULP_HDR_TYPE_NOT_SUPPORTED, + .proto_hdr_func = NULL + }, + [RTE_FLOW_ITEM_TYPE_META] = { + .hdr_type = BNXT_ULP_HDR_TYPE_NOT_SUPPORTED, + .proto_hdr_func = NULL + }, + [RTE_FLOW_ITEM_TYPE_GRE_KEY] = { + .hdr_type = BNXT_ULP_HDR_TYPE_NOT_SUPPORTED, + .proto_hdr_func = NULL + }, + [RTE_FLOW_ITEM_TYPE_GTP_PSC] = { + .hdr_type = BNXT_ULP_HDR_TYPE_NOT_SUPPORTED, + .proto_hdr_func = NULL + }, + [RTE_FLOW_ITEM_TYPE_PPPOES] = { + .hdr_type = BNXT_ULP_HDR_TYPE_NOT_SUPPORTED, + .proto_hdr_func = NULL + }, + [RTE_FLOW_ITEM_TYPE_PPPOED] = { + .hdr_type = BNXT_ULP_HDR_TYPE_NOT_SUPPORTED, + .proto_hdr_func = NULL + }, + [RTE_FLOW_ITEM_TYPE_PPPOE_PROTO_ID] = { + .hdr_type = BNXT_ULP_HDR_TYPE_NOT_SUPPORTED, + .proto_hdr_func = NULL + }, + [RTE_FLOW_ITEM_TYPE_NSH] = { + .hdr_type = BNXT_ULP_HDR_TYPE_NOT_SUPPORTED, + .proto_hdr_func = NULL + }, + [RTE_FLOW_ITEM_TYPE_IGMP] = { + .hdr_type = BNXT_ULP_HDR_TYPE_NOT_SUPPORTED, + .proto_hdr_func = NULL + }, + [RTE_FLOW_ITEM_TYPE_AH] = { + .hdr_type = BNXT_ULP_HDR_TYPE_NOT_SUPPORTED, + .proto_hdr_func = NULL + }, + [RTE_FLOW_ITEM_TYPE_HIGIG2] = { + .hdr_type = BNXT_ULP_HDR_TYPE_NOT_SUPPORTED, + .proto_hdr_func = NULL + }, + [RTE_FLOW_ITEM_TYPE_ECPRI] = { + .hdr_type = BNXT_ULP_HDR_TYPE_SUPPORTED, + .proto_hdr_func = NULL + }, +}; + +/* + * Function to handle the parsing of RTE Flows and placing + * the RTE flow items into the ulp structures. + */ +int32_t +bnxt_ulp_custom_tc_parser_hdr_parse(struct bnxt *bp, + const struct rte_flow_item pattern[], + struct ulp_tc_parser_params *params) +{ + const struct rte_flow_item *item = pattern; + struct bnxt_ulp_rte_hdr_info *hdr_info; + + params->field_idx = BNXT_ULP_PROTO_HDR_SVIF_NUM; + + /* Parse all the items in the pattern */ + while (item && item->type != RTE_FLOW_ITEM_TYPE_END) { + hdr_info = &rte_ulp_hdr_info[item->type]; + if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_NOT_SUPPORTED) { + goto hdr_parser_error; + } else if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_SUPPORTED) { + /* call the registered callback handler */ + if (hdr_info->proto_hdr_func) { + if (hdr_info->proto_hdr_func(item, params) != + BNXT_TF_RC_SUCCESS) { + return BNXT_TF_RC_ERROR; + } + } + } + item++; + } + /* update the implied SVIF */ + return ulp_tc_parser_implicit_match_port_process(params); + +hdr_parser_error: + netdev_err(bp->dev, "Truflow parser does not support type %d\n", item->type); + return BNXT_TF_RC_PARSE_ERR; +} + +/* Function to handle the parsing of RTE Flow action queue. */ +int32_t +ulp_rte_queue_act_handler(const struct rte_flow_action *action_item, + struct ulp_tc_parser_params *param) +{ + const struct rte_flow_action_queue *q_info; + struct ulp_tc_act_prop *ap = ¶m->act_prop; + + if (!action_item || !action_item->conf) { + netdev_err(NULL, "Parse Err: invalid queue configuration\n"); + return BNXT_TF_RC_ERROR; + } + + q_info = action_item->conf; + /* Copy the queue into the specific action properties */ + memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_QUEUE_INDEX], + &q_info->index, BNXT_ULP_ACT_PROP_SZ_QUEUE_INDEX); + + /* set the queue action header bit */ + ULP_BITMAP_SET(param->act_bitmap.bits, BNXT_ULP_ACT_BIT_QUEUE); + + return BNXT_TF_RC_SUCCESS; +} + +/* Function to handle the parsing of RTE Flow action count. */ +int32_t +ulp_rte_count_act_handler(const struct rte_flow_action *action_item, + struct ulp_tc_parser_params *params) +{ + struct ulp_tc_act_prop *act_prop = ¶ms->act_prop; + const struct rte_flow_action_count *act_count; + + act_count = action_item->conf; + if (act_count) { + memcpy(&act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_COUNT], + &act_count->id, + BNXT_ULP_ACT_PROP_SZ_COUNT); + } + + /* Update the hdr_bitmap with count */ + ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_COUNT); + return BNXT_TF_RC_SUCCESS; +} + +/* + * This structure has to be indexed based on the rte_flow_action_type that is + * part of DPDK. The below array is list of parsing functions for each of the + * flow actions that are supported. + */ +struct bnxt_ulp_rte_act_info rte_ulp_act_info[] = { + [RTE_FLOW_ACTION_TYPE_END] = { + .act_type = BNXT_ULP_ACT_TYPE_END, + .proto_act_func = NULL + }, + [RTE_FLOW_ACTION_TYPE_VOID] = { + .act_type = BNXT_ULP_ACT_TYPE_NOT_SUPPORTED, + .proto_act_func = NULL + }, + [RTE_FLOW_ACTION_TYPE_PASSTHRU] = { + .act_type = BNXT_ULP_ACT_TYPE_NOT_SUPPORTED, + .proto_act_func = NULL + }, + [RTE_FLOW_ACTION_TYPE_JUMP] = { + .act_type = BNXT_ULP_ACT_TYPE_NOT_SUPPORTED, + .proto_act_func = NULL + }, + [RTE_FLOW_ACTION_TYPE_MARK] = { + .act_type = BNXT_ULP_ACT_TYPE_NOT_SUPPORTED, + .proto_act_func = NULL + }, + [RTE_FLOW_ACTION_TYPE_FLAG] = { + .act_type = BNXT_ULP_ACT_TYPE_NOT_SUPPORTED, + .proto_act_func = NULL + }, + [RTE_FLOW_ACTION_TYPE_QUEUE] = { + .act_type = BNXT_ULP_ACT_TYPE_SUPPORTED, + .proto_act_func = ulp_rte_queue_act_handler + }, + [RTE_FLOW_ACTION_TYPE_DROP] = { + .act_type = BNXT_ULP_ACT_TYPE_NOT_SUPPORTED, + .proto_act_func = NULL + }, + [RTE_FLOW_ACTION_TYPE_COUNT] = { + .act_type = BNXT_ULP_ACT_TYPE_SUPPORTED, + .proto_act_func = ulp_rte_count_act_handler + }, + [RTE_FLOW_ACTION_TYPE_RSS] = { + .act_type = BNXT_ULP_ACT_TYPE_NOT_SUPPORTED, + .proto_act_func = NULL + }, + [RTE_FLOW_ACTION_TYPE_PF] = { + .act_type = BNXT_ULP_ACT_TYPE_NOT_SUPPORTED, + .proto_act_func = NULL + }, + [RTE_FLOW_ACTION_TYPE_VF] = { + .act_type = BNXT_ULP_ACT_TYPE_NOT_SUPPORTED, + .proto_act_func = NULL + }, + [RTE_FLOW_ACTION_TYPE_PORT_ID] = { + .act_type = BNXT_ULP_ACT_TYPE_NOT_SUPPORTED, + .proto_act_func = NULL + }, + [RTE_FLOW_ACTION_TYPE_METER] = { + .act_type = BNXT_ULP_ACT_TYPE_NOT_SUPPORTED, + .proto_act_func = NULL + }, + [RTE_FLOW_ACTION_TYPE_SECURITY] = { + .act_type = BNXT_ULP_ACT_TYPE_NOT_SUPPORTED, + .proto_act_func = NULL + }, + [RTE_FLOW_ACTION_TYPE_OF_DEC_NW_TTL] = { + .act_type = BNXT_ULP_ACT_TYPE_NOT_SUPPORTED, + .proto_act_func = NULL + }, + [RTE_FLOW_ACTION_TYPE_OF_POP_VLAN] = { + .act_type = BNXT_ULP_ACT_TYPE_NOT_SUPPORTED, + .proto_act_func = NULL + }, + [RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN] = { + .act_type = BNXT_ULP_ACT_TYPE_NOT_SUPPORTED, + .proto_act_func = NULL + }, + [RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID] = { + .act_type = BNXT_ULP_ACT_TYPE_NOT_SUPPORTED, + .proto_act_func = NULL + }, + [RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP] = { + .act_type = BNXT_ULP_ACT_TYPE_NOT_SUPPORTED, + .proto_act_func = NULL + }, + [RTE_FLOW_ACTION_TYPE_OF_POP_MPLS] = { + .act_type = BNXT_ULP_ACT_TYPE_NOT_SUPPORTED, + .proto_act_func = NULL + }, + [RTE_FLOW_ACTION_TYPE_OF_PUSH_MPLS] = { + .act_type = BNXT_ULP_ACT_TYPE_NOT_SUPPORTED, + .proto_act_func = NULL + }, + [RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP] = { + .act_type = BNXT_ULP_ACT_TYPE_NOT_SUPPORTED, + .proto_act_func = NULL + }, + [RTE_FLOW_ACTION_TYPE_VXLAN_DECAP] = { + .act_type = BNXT_ULP_ACT_TYPE_NOT_SUPPORTED, + .proto_act_func = NULL + }, + [RTE_FLOW_ACTION_TYPE_IP_ENCAP] = { + .act_type = BNXT_ULP_ACT_TYPE_NOT_SUPPORTED, + .proto_act_func = NULL + }, + [RTE_FLOW_ACTION_TYPE_IP_DECAP] = { + .act_type = BNXT_ULP_ACT_TYPE_NOT_SUPPORTED, + .proto_act_func = NULL + }, + [RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP] = { + .act_type = BNXT_ULP_ACT_TYPE_NOT_SUPPORTED, + .proto_act_func = NULL + }, + [RTE_FLOW_ACTION_TYPE_NVGRE_DECAP] = { + .act_type = BNXT_ULP_ACT_TYPE_NOT_SUPPORTED, + .proto_act_func = NULL + }, + [RTE_FLOW_ACTION_TYPE_RAW_ENCAP] = { + .act_type = BNXT_ULP_ACT_TYPE_NOT_SUPPORTED, + .proto_act_func = NULL + }, + [RTE_FLOW_ACTION_TYPE_RAW_DECAP] = { + .act_type = BNXT_ULP_ACT_TYPE_NOT_SUPPORTED, + .proto_act_func = NULL + }, + [RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC] = { + .act_type = BNXT_ULP_ACT_TYPE_NOT_SUPPORTED, + .proto_act_func = NULL + }, + [RTE_FLOW_ACTION_TYPE_SET_IPV4_DST] = { + .act_type = BNXT_ULP_ACT_TYPE_NOT_SUPPORTED, + .proto_act_func = NULL + }, + [RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC] = { + .act_type = BNXT_ULP_ACT_TYPE_NOT_SUPPORTED, + .proto_act_func = NULL + }, + [RTE_FLOW_ACTION_TYPE_SET_IPV6_DST] = { + .act_type = BNXT_ULP_ACT_TYPE_NOT_SUPPORTED, + .proto_act_func = NULL + }, + [RTE_FLOW_ACTION_TYPE_SET_TP_SRC] = { + .act_type = BNXT_ULP_ACT_TYPE_NOT_SUPPORTED, + .proto_act_func = NULL + }, + [RTE_FLOW_ACTION_TYPE_SET_TP_DST] = { + .act_type = BNXT_ULP_ACT_TYPE_NOT_SUPPORTED, + .proto_act_func = NULL + }, + [RTE_FLOW_ACTION_TYPE_MAC_SWAP] = { + .act_type = BNXT_ULP_ACT_TYPE_NOT_SUPPORTED, + .proto_act_func = NULL + }, + [RTE_FLOW_ACTION_TYPE_DEC_TTL] = { + .act_type = BNXT_ULP_ACT_TYPE_NOT_SUPPORTED, + .proto_act_func = NULL + }, + [RTE_FLOW_ACTION_TYPE_SET_TTL] = { + .act_type = BNXT_ULP_ACT_TYPE_NOT_SUPPORTED, + .proto_act_func = NULL + }, + [RTE_FLOW_ACTION_TYPE_SET_MAC_SRC] = { + .act_type = BNXT_ULP_ACT_TYPE_NOT_SUPPORTED, + .proto_act_func = NULL + }, + [RTE_FLOW_ACTION_TYPE_SET_MAC_DST] = { + .act_type = BNXT_ULP_ACT_TYPE_NOT_SUPPORTED, + .proto_act_func = NULL + }, + [RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ] = { + .act_type = BNXT_ULP_ACT_TYPE_NOT_SUPPORTED, + .proto_act_func = NULL + }, + [RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ] = { + .act_type = BNXT_ULP_ACT_TYPE_NOT_SUPPORTED, + .proto_act_func = NULL + }, + [RTE_FLOW_ACTION_TYPE_INC_TCP_ACK] = { + .act_type = BNXT_ULP_ACT_TYPE_NOT_SUPPORTED, + .proto_act_func = NULL + }, + [RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK] = { + .act_type = BNXT_ULP_ACT_TYPE_NOT_SUPPORTED, + .proto_act_func = NULL + }, + [RTE_FLOW_ACTION_TYPE_SAMPLE] = { + .act_type = BNXT_ULP_ACT_TYPE_NOT_SUPPORTED, + .proto_act_func = NULL + }, + [RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR] = { + .act_type = BNXT_ULP_ACT_TYPE_NOT_SUPPORTED, + .proto_act_func = NULL + }, + [RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT] = { + .act_type = BNXT_ULP_ACT_TYPE_NOT_SUPPORTED, + .proto_act_func = NULL + }, + [RTE_FLOW_ACTION_TYPE_INDIRECT] = { + .act_type = BNXT_ULP_ACT_TYPE_NOT_SUPPORTED, + .proto_act_func = NULL + }, + [RTE_FLOW_ACTION_TYPE_INDIRECT + 1] = { + .act_type = BNXT_ULP_ACT_TYPE_NOT_SUPPORTED, + .proto_act_func = NULL + } +}; + +static int +ulp_tc_custom_parser_implicit_redirect_process(struct bnxt *bp, struct ulp_tc_parser_params *params) +{ + enum bnxt_ulp_intf_type intf_type; + u32 ifindex; + u16 dst_fid; + + /* No, SR-IOV. So, dst_fid will always be PF's */ + dst_fid = bp->pf.fw_fid; + + /* Get the port db ifindex */ + if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx, dst_fid, + &ifindex)) { + netdev_dbg(bp->dev, "Invalid port id\n"); + return BNXT_TF_RC_ERROR; + } + + /* Get the intf type */ + intf_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex); + if (!intf_type) { + netdev_dbg(bp->dev, "Invalid port type\n"); + return BNXT_TF_RC_ERROR; + } + + /* Set the action port */ + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type); + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DEV_ACT_PORT_ID, dst_fid); + + return ulp_tc_parser_act_port_set(params, ifindex); +} + +/* + * Function to handle the parsing of RTE Flows and placing + * the RTE flow actions into the ulp structures. + */ +int32_t +bnxt_ulp_custom_tc_parser_act_parse(struct bnxt *bp, + const struct rte_flow_action actions[], + struct ulp_tc_parser_params *params) +{ + const struct rte_flow_action *action_item = actions; + struct bnxt_ulp_rte_act_info *hdr_info; + + /* Parse all the items in the pattern */ + while (action_item && action_item->type != RTE_FLOW_ACTION_TYPE_END) { + hdr_info = &rte_ulp_act_info[action_item->type]; + if (hdr_info->act_type == BNXT_ULP_ACT_TYPE_NOT_SUPPORTED) { + goto act_parser_error; + } else if (hdr_info->act_type == BNXT_ULP_ACT_TYPE_SUPPORTED) { + /* call the registered callback handler */ + if (hdr_info->proto_act_func) { + if (hdr_info->proto_act_func(action_item, + params) != + BNXT_TF_RC_SUCCESS) { + return BNXT_TF_RC_ERROR; + } + } + } + action_item++; + } + + if (!ULP_BITMAP_ISSET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_QUEUE)) + ulp_tc_custom_parser_implicit_redirect_process(bp, params); + + return BNXT_TF_RC_SUCCESS; + +act_parser_error: + netdev_err(NULL, "Truflow parser does not support act %u\n", action_item->type); + return BNXT_TF_RC_ERROR; +} + +/* Function to create the ulp flow. */ +int +bnxt_custom_ulp_flow_create(struct bnxt *bp, u16 src_fid, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct bnxt_ulp_flow_info *flow_info) +{ + struct bnxt_ulp_mapper_parms *mapper_encap_mparms = NULL; + struct bnxt_ulp_mapper_parms mapper_mparms = { 0 }; + struct ulp_tc_parser_params *params = NULL; + struct bnxt_ulp_context *ulp_ctx; + int rc, ret = BNXT_TF_RC_ERROR; + u16 func_id; + u32 fid; + + ulp_ctx = bnxt_ulp_bp_ptr2_cntxt_get(bp); + if (!ulp_ctx) { + netdev_dbg(bp->dev, "ULP context is not initialized\n"); + goto flow_error; + } + + /* Initialize the parser params */ + params = vzalloc(sizeof(*params)); + params->ulp_ctx = ulp_ctx; + + if (bnxt_ulp_cntxt_app_id_get(params->ulp_ctx, ¶ms->app_id)) { + netdev_dbg(bp->dev, "failed to get the app id\n"); + goto flow_error; + } + + /* Set the flow attributes */ + bnxt_custom_ulp_set_dir_attributes(bp, params, src_fid); + + /* copy the device port id and direction for further processing */ + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_INCOMING_IF, src_fid); + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DEV_PORT_ID, src_fid); + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_SVIF_FLAG, + BNXT_ULP_INVALID_SVIF_VAL); + + /* Get the function id */ + if (ulp_port_db_port_func_id_get(ulp_ctx, src_fid, &func_id)) { + netdev_dbg(bp->dev, "conversion of port to func id failed\n"); + goto flow_error; + } + + /* Protect flow creation */ + mutex_lock(&ulp_ctx->cfg_data->flow_db_lock); + + /* Allocate a Flow ID to attach all resources for the flow. + * Once allocated, all errors have to walk the list of resources and + * free each of them. + */ + rc = ulp_flow_db_fid_alloc(ulp_ctx, BNXT_ULP_FDB_TYPE_REGULAR, + func_id, &fid); + if (rc) { + netdev_dbg(bp->dev, "Unable to allocate flow table entry\n"); + goto release_lock; + } + + /* Parse the rte flow pattern */ + ret = bnxt_ulp_custom_tc_parser_hdr_parse(bp, pattern, params); + if (ret != BNXT_TF_RC_SUCCESS) + goto free_fid; + + /* Parse the rte flow action */ + ret = bnxt_ulp_custom_tc_parser_act_parse(bp, actions, params); + if (ret != BNXT_TF_RC_SUCCESS) + goto free_fid; + + params->fid = fid; + params->func_id = func_id; + /* TODO: params->priority = tc_flow_cmd->common.prio; */ + + netdev_dbg(bp->dev, "Flow prio: %u chain: %u\n", + params->priority, params->match_chain_id); + + params->port_id = src_fid; + /* Perform the rte flow post process */ + ret = bnxt_ulp_tc_parser_post_process(params); + if (ret == BNXT_TF_RC_ERROR) + goto free_fid; + else if (ret == BNXT_TF_RC_FID) + goto return_fid; + + /* Dump the rte flow pattern */ + ulp_parser_hdr_info_dump(params); + /* Dump the rte flow action */ + ulp_parser_act_info_dump(params); + + ret = ulp_matcher_pattern_match(params, ¶ms->class_id); + if (ret != BNXT_TF_RC_SUCCESS) + goto free_fid; + + ret = ulp_matcher_action_match(params, ¶ms->act_tmpl); + if (ret != BNXT_TF_RC_SUCCESS) + goto free_fid; + + bnxt_custom_ulp_init_mapper_params(&mapper_mparms, params, BNXT_ULP_FDB_TYPE_REGULAR); + /* Call the ulp mapper to create the flow in the hardware. */ + ret = ulp_mapper_flow_create(ulp_ctx, &mapper_mparms, NULL); + if (ret) + goto free_fid; + + if (params->tnl_key) { + ret = bnxt_custom_ulp_alloc_mapper_encap_cparams(&mapper_encap_mparms, + &mapper_mparms); + if (ret) + goto mapper_destroy; + } + +return_fid: + flow_info->flow_id = fid; + if (params->tnl_key) { + flow_info->mparms = mapper_encap_mparms; + ether_addr_copy(flow_info->tnl_dmac, params->tnl_dmac); + ether_addr_copy(flow_info->tnl_smac, params->tnl_smac); + flow_info->tnl_ether_type = params->tnl_ether_type; + flow_info->encap_key = params->tnl_key; + flow_info->neigh_key = params->neigh_key; + } + vfree(params); + mutex_unlock(&ulp_ctx->cfg_data->flow_db_lock); + + return 0; + +mapper_destroy: + ulp_mapper_flow_destroy(ulp_ctx, mapper_mparms.flow_type, + mapper_mparms.flow_id, NULL); +free_fid: + vfree(params->tnl_key); + vfree(params->neigh_key); + ulp_flow_db_fid_free(ulp_ctx, BNXT_ULP_FDB_TYPE_REGULAR, fid); +release_lock: + mutex_unlock(&ulp_ctx->cfg_data->flow_db_lock); +flow_error: + vfree(params); + if (ret == -ENOSPC) + return ret; + else + return (ret == BNXT_TF_RC_PARSE_ERR_NOTSUPP) ? -EOPNOTSUPP : -EIO; +} + +/* + * Function to destroy the ulp flow. + * flow_id: This value is stored in the flow_info structure. + * It's populated during bnxt_custom_ulp_flow_create. + * src_fid: This value is stored in the bp structure (bp->pf.fw_fid). + */ +int +bnxt_custom_ulp_flow_destroy(struct bnxt *bp, u32 flow_id, u16 src_fid) +{ + struct bnxt_ulp_context *ulp_ctx; + u16 func_id; + int ret; + + ulp_ctx = bnxt_ulp_bp_ptr2_cntxt_get(bp); + if (!ulp_ctx) { + netdev_dbg(bp->dev, "ULP context is not initialized\n"); + return -ENOENT; + } + + if (ulp_port_db_port_func_id_get(ulp_ctx, src_fid, &func_id)) { + netdev_dbg(bp->dev, "Conversion of port to func id failed\n"); + return -EINVAL; + } + + ret = ulp_flow_db_validate_flow_func(ulp_ctx, flow_id, func_id); + if (ret) + return ret; + + mutex_lock(&ulp_ctx->cfg_data->flow_db_lock); + ret = ulp_mapper_flow_destroy(ulp_ctx, BNXT_ULP_FDB_TYPE_REGULAR, + flow_id, NULL); + mutex_unlock(&ulp_ctx->cfg_data->flow_db_lock); + + return ret; +} + +void +bnxt_custom_ulp_flow_query_count(struct bnxt *bp, u32 flow_id, u64 *packets, + u64 *bytes, unsigned long *lastused) +{ + ulp_tf_fc_mgr_query_count_get(bp->ulp_ctx, flow_id, packets, bytes, + lastused, NULL); +} +#endif diff --git a/drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_tc_custom_offload.h b/drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_tc_custom_offload.h new file mode 100644 index 000000000000..4a5b9c3cdb98 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_tc_custom_offload.h @@ -0,0 +1,80 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2023-2023 Broadcom + * All rights reserved. + */ + +#ifndef _ULP_TC_CUSTOM_OFFLOAD_H_ +#define _ULP_TC_CUSTOM_OFFLOAD_H_ + +#ifdef CONFIG_BNXT_CUSTOM_FLOWER_OFFLOAD +#include "ulp_tc_rte_flow.h" + +enum bnxt_rte_flow_item_type { + BNXT_RTE_FLOW_ITEM_TYPE_END = (u32)INT_MIN, + BNXT_RTE_FLOW_ITEM_TYPE_VXLAN_DECAP, + BNXT_RTE_FLOW_ITEM_TYPE_LAST +}; + +enum bnxt_rte_flow_action_type { + BNXT_RTE_FLOW_ACTION_TYPE_END = (u32)INT_MIN, + BNXT_RTE_FLOW_ACTION_TYPE_VXLAN_DECAP, + BNXT_RTE_FLOW_ACTION_TYPE_LAST +}; + +/* Local defines for the parsing functions */ +#define ULP_VLAN_PRIORITY_SHIFT 13 /* First 3 bits */ +#define ULP_VLAN_PRIORITY_MASK 0x700 +#define ULP_VLAN_TAG_MASK 0xFFF /* Last 12 bits*/ +#define ULP_UDP_PORT_VXLAN 4789 +#define ULP_UDP_PORT_VXLAN_MASK 0XFFFF + +/* Ethernet frame types */ +#define RTE_ETHER_TYPE_IPV4 0x0800 /**< IPv4 Protocol. */ +#define RTE_ETHER_TYPE_IPV6 0x86DD /**< IPv6 Protocol. */ +#define RTE_ETHER_TYPE_ARP 0x0806 /**< Arp Protocol. */ +#define RTE_ETHER_TYPE_RARP 0x8035 /**< Reverse Arp Protocol. */ +#define RTE_ETHER_TYPE_VLAN 0x8100 /**< IEEE 802.1Q VLAN tagging. */ +#define RTE_ETHER_TYPE_QINQ 0x88A8 /**< IEEE 802.1ad QinQ tagging. */ +#define RTE_ETHER_TYPE_QINQ1 0x9100 /**< Deprecated QinQ VLAN. */ +#define RTE_ETHER_TYPE_QINQ2 0x9200 /**< Deprecated QinQ VLAN. */ +#define RTE_ETHER_TYPE_QINQ3 0x9300 /**< Deprecated QinQ VLAN. */ +#define RTE_ETHER_TYPE_PPPOE_DISCOVERY 0x8863 /**< PPPoE Discovery Stage. */ +#define RTE_ETHER_TYPE_PPPOE_SESSION 0x8864 /**< PPPoE Session Stage. */ +#define RTE_ETHER_TYPE_ETAG 0x893F /**< IEEE 802.1BR E-Tag. */ +#define RTE_ETHER_TYPE_1588 0x88F7 +/**< IEEE 802.1AS 1588 Precise Time Protocol. */ +#define RTE_ETHER_TYPE_SLOW 0x8809 /**< Slow protocols (LACP and Marker). */ +#define RTE_ETHER_TYPE_TEB 0x6558 /**< Transparent Ethernet Bridging. */ +#define RTE_ETHER_TYPE_LLDP 0x88CC /**< LLDP Protocol. */ +#define RTE_ETHER_TYPE_MPLS 0x8847 /**< MPLS ethertype. */ +#define RTE_ETHER_TYPE_MPLSM 0x8848 /**< MPLS multicast ethertype. */ +#define RTE_ETHER_TYPE_ECPRI 0xAEFE /**< eCPRI ethertype (.1Q supported). */ + +#ifdef ULP_APP_TOS_PROTO_SUPPORT +#undef ULP_APP_TOS_PROTO_SUPPORT +#endif +#define ULP_APP_TOS_PROTO_SUPPORT(x) 1 + +/* Flow Parser Header Information Structure */ +struct bnxt_ulp_rte_hdr_info { + enum bnxt_ulp_hdr_type hdr_type; + /* Flow Parser Protocol Header Function Prototype */ + int (*proto_hdr_func)(const struct rte_flow_item *item_list, + struct ulp_tc_parser_params *params); +}; + +/* Flow Parser Action Information Structure */ +struct bnxt_ulp_rte_act_info { + enum bnxt_ulp_act_type act_type; + /* Flow Parser Protocol Action Function Prototype */ + int (*proto_act_func)(const struct rte_flow_action *action_item, + struct ulp_tc_parser_params *params); +}; + +int +bnxt_custom_ulp_flow_create(struct bnxt *bp, u16 src_fid, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct bnxt_ulp_flow_info *flow_info); +#endif +#endif diff --git a/drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_tc_handler_tbl.c b/drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_tc_handler_tbl.c new file mode 100644 index 000000000000..f125a1b29989 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_tc_handler_tbl.c @@ -0,0 +1,142 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* Copyright(c) 2019-2023 Broadcom + * All rights reserved. + */ + +#include "bnxt_compat.h" +#include "ulp_template_db_enum.h" +#include "ulp_template_struct.h" +#include "ulp_tc_parser.h" + +#ifdef CONFIG_BNXT_FLOWER_OFFLOAD +/* The below array is the list of parsing functions for each of the flow + * dissector keys that are supported. + * NOTE: Updating this table with new keys requires that the corresponding + * key seqence also be updated in the table ulp_hdr_parse_sequence[] in + * ulp_tc_parser.c. + */ +struct bnxt_ulp_tc_hdr_info ulp_hdr_info[] = { + [FLOW_DISSECTOR_KEY_CONTROL] = { + .hdr_type = BNXT_ULP_HDR_TYPE_SUPPORTED, + .proto_hdr_func = ulp_tc_control_key_handler + }, + [FLOW_DISSECTOR_KEY_BASIC] = { + .hdr_type = BNXT_ULP_HDR_TYPE_SUPPORTED, + .proto_hdr_func = ulp_tc_basic_key_handler + }, + [FLOW_DISSECTOR_KEY_IPV4_ADDRS] = { + .hdr_type = BNXT_ULP_HDR_TYPE_SUPPORTED, + .proto_hdr_func = ulp_tc_ipv4_addr_handler + }, + [FLOW_DISSECTOR_KEY_IPV6_ADDRS] = { + .hdr_type = BNXT_ULP_HDR_TYPE_SUPPORTED, + .proto_hdr_func = ulp_tc_ipv6_addr_handler + }, + [FLOW_DISSECTOR_KEY_PORTS] = { + .hdr_type = BNXT_ULP_HDR_TYPE_SUPPORTED, + .proto_hdr_func = ulp_tc_l4_ports_handler + }, + [FLOW_DISSECTOR_KEY_ETH_ADDRS] = { + .hdr_type = BNXT_ULP_HDR_TYPE_SUPPORTED, + .proto_hdr_func = ulp_tc_eth_addr_handler + }, + [FLOW_DISSECTOR_KEY_VLAN] = { + .hdr_type = BNXT_ULP_HDR_TYPE_SUPPORTED, + .proto_hdr_func = ulp_tc_vlan_handler + }, + [FLOW_DISSECTOR_KEY_TCP] = { + .hdr_type = BNXT_ULP_HDR_TYPE_SUPPORTED, + .proto_hdr_func = ulp_tc_tcp_ctrl_handler + }, + [FLOW_DISSECTOR_KEY_IP] = { + .hdr_type = BNXT_ULP_HDR_TYPE_SUPPORTED, + .proto_hdr_func = ulp_tc_ip_ctrl_handler + }, + [FLOW_DISSECTOR_KEY_ENC_KEYID] = { + .hdr_type = BNXT_ULP_HDR_TYPE_SUPPORTED, + .proto_hdr_func = ulp_tc_tnl_key_handler + }, + [FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS] = { + .hdr_type = BNXT_ULP_HDR_TYPE_SUPPORTED, + .proto_hdr_func = ulp_tc_tnl_ipv4_addr_handler + }, + [FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS] = { + .hdr_type = BNXT_ULP_HDR_TYPE_SUPPORTED, + .proto_hdr_func = ulp_tc_tnl_ipv6_addr_handler + }, + [FLOW_DISSECTOR_KEY_ENC_CONTROL] = { + .hdr_type = BNXT_ULP_HDR_TYPE_SUPPORTED, + .proto_hdr_func = ulp_tc_tnl_control_key_handler + }, + [FLOW_DISSECTOR_KEY_ENC_PORTS] = { + .hdr_type = BNXT_ULP_HDR_TYPE_SUPPORTED, + .proto_hdr_func = ulp_tc_tnl_l4_ports_handler + }, + [FLOW_DISSECTOR_KEY_ENC_IP] = { + .hdr_type = BNXT_ULP_HDR_TYPE_SUPPORTED, + .proto_hdr_func = ulp_tc_tnl_ip_ctrl_handler + }, + [FLOW_DISSECTOR_KEY_MAX] = { + .hdr_type = BNXT_ULP_HDR_TYPE_NOT_SUPPORTED, + .proto_hdr_func = NULL + } +}; + +/* This structure has to be indexed based on the TC flow_action_id. + * The below array is list of parsing functions for each of the + * flow actions that are supported. + */ +struct bnxt_ulp_tc_act_info ulp_act_info[] = { + [FLOW_ACTION_DROP] = { + .act_type = BNXT_ULP_ACT_TYPE_SUPPORTED, + .proto_act_func = ulp_tc_drop_act_handler + }, + [FLOW_ACTION_GOTO] = { + .act_type = BNXT_ULP_ACT_TYPE_SUPPORTED, + .proto_act_func = ulp_tc_goto_act_handler + }, + [FLOW_ACTION_TUNNEL_ENCAP] = { + .act_type = BNXT_ULP_ACT_TYPE_SUPPORTED, + .proto_act_func = ulp_tc_tunnel_encap_act_handler + }, + [FLOW_ACTION_TUNNEL_DECAP] = { + .act_type = BNXT_ULP_ACT_TYPE_SUPPORTED, + .proto_act_func = ulp_tc_tunnel_decap_act_handler + }, + [FLOW_ACTION_REDIRECT] = { + .act_type = BNXT_ULP_ACT_TYPE_SUPPORTED, + .proto_act_func = ulp_tc_redirect_act_handler + }, + [FLOW_ACTION_MIRRED] = { + .act_type = BNXT_ULP_ACT_TYPE_SUPPORTED, + .proto_act_func = ulp_tc_egress_mirror_act_handler + }, +#if defined(HAVE_FLOW_ACTION_MIRRED_INGRESS) + [FLOW_ACTION_MIRRED_INGRESS] = { + .act_type = BNXT_ULP_ACT_TYPE_SUPPORTED, + .proto_act_func = ulp_tc_ingress_mirror_act_handler + }, +#endif + [FLOW_ACTION_MANGLE] = { + .act_type = BNXT_ULP_ACT_TYPE_SUPPORTED, + .proto_act_func = ulp_tc_mangle_act_handler + }, + [FLOW_ACTION_CSUM] = { + .act_type = BNXT_ULP_ACT_TYPE_SUPPORTED, + .proto_act_func = ulp_tc_csum_act_handler + }, + [FLOW_ACTION_VLAN_PUSH] = { + .act_type = BNXT_ULP_ACT_TYPE_SUPPORTED, + .proto_act_func = ulp_tc_vlan_push_act_handler + }, + [FLOW_ACTION_VLAN_POP] = { + .act_type = BNXT_ULP_ACT_TYPE_SUPPORTED, + .proto_act_func = ulp_tc_vlan_pop_act_handler + }, + [NUM_FLOW_ACTIONS] = { + .act_type = BNXT_ULP_ACT_TYPE_NOT_SUPPORTED, + .proto_act_func = NULL + } +}; + +#endif /* CONFIG_BNXT_FLOWER_OFFLOAD */ diff --git a/drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_tc_parser.c b/drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_tc_parser.c new file mode 100644 index 000000000000..d269beb89cde --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_tc_parser.c @@ -0,0 +1,3623 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* Copyright(c) 2019-2023 Broadcom + * All rights reserved. + */ + +#include "bnxt_compat.h" +#include "ulp_tc_parser.h" +#include "ulp_linux.h" +#include "bnxt_ulp.h" +#include "bnxt_tf_common.h" +#include "ulp_matcher.h" +#include "ulp_utils.h" +#include "ulp_port_db.h" +#include "ulp_flow_db.h" +#include "ulp_mapper.h" +#include "ulp_template_db_tbl.h" + +#if defined(CONFIG_BNXT_FLOWER_OFFLOAD) || defined(CONFIG_BNXT_CUSTOM_FLOWER_OFFLOAD) +#ifdef CONFIG_BNXT_FLOWER_OFFLOAD +/* Local defines for the parsing functions */ +#define ULP_VLAN_PRIORITY_SHIFT 13 /* First 3 bits */ +#define ULP_VLAN_PRIORITY_MASK 0x700 +#define ULP_VLAN_TAG_MASK 0xFFF /* Last 12 bits*/ +#define ULP_UDP_PORT_VXLAN 4789 + +struct ulp_parser_vxlan { + u8 flags; + u8 rsvd0[3]; + u8 vni[3]; + u8 rsvd1; +}; + +struct tc_match { + void *key; + void *mask; +}; + +/* Utility function to copy field spec items */ +static struct ulp_tc_hdr_field *ulp_tc_parser_fld_copy(struct ulp_tc_hdr_field + *field, + const void *buffer, + u32 size) +{ + field->size = size; + memcpy(field->spec, buffer, field->size); + field++; + return field; +} + +/* Utility function to update the field_bitmap */ +static void ulp_tc_parser_field_bitmap_update(struct ulp_tc_parser_params + *params, u32 idx, + enum bnxt_ulp_prsr_action + prsr_act) +{ + struct ulp_tc_hdr_field *field; + + field = ¶ms->hdr_field[idx]; + if (ulp_bitmap_notzero(field->mask, field->size)) { + ULP_INDEX_BITMAP_SET(params->fld_bitmap.bits, idx); + if (!(prsr_act & ULP_PRSR_ACT_MATCH_IGNORE)) + ULP_INDEX_BITMAP_SET(params->fld_s_bitmap.bits, idx); + /* Not exact match */ + if (!ulp_bitmap_is_ones(field->mask, field->size)) + ULP_COMP_FLD_IDX_WR(params, + BNXT_ULP_CF_IDX_WC_MATCH, 1); + } else { + ULP_INDEX_BITMAP_RESET(params->fld_bitmap.bits, idx); + } +} + +/* Utility function to copy field spec and masks items */ +static void ulp_tc_prsr_fld_mask(struct ulp_tc_parser_params *params, + u32 *idx, u32 size, const void *spec_buff, + const void *mask_buff, + enum bnxt_ulp_prsr_action prsr_act) +{ + struct ulp_tc_hdr_field *field = ¶ms->hdr_field[*idx]; + + /* update the field size */ + field->size = size; + + /* copy the mask specifications only if mask is not null */ + if (!(prsr_act & ULP_PRSR_ACT_MASK_IGNORE) && mask_buff) { + memcpy(field->mask, mask_buff, size); + ulp_tc_parser_field_bitmap_update(params, *idx, prsr_act); + } + + /* copy the protocol specifications only if mask is not null*/ + if (spec_buff && mask_buff && ulp_bitmap_notzero(mask_buff, size)) + memcpy(field->spec, spec_buff, size); + + /* Increment the index */ + *idx = *idx + 1; +} + +static int ulp_tc_prsr_fld_size_validate(struct ulp_tc_parser_params *params, + u32 *idx, u32 size) +{ + if (params->field_idx + size >= BNXT_ULP_PROTO_HDR_MAX) + return -EINVAL; + *idx = params->field_idx; + params->field_idx += size; + return 0; +} + +/* Function to handle the update of proto header based on field values */ +static void ulp_tc_l2_proto_type_update(struct ulp_tc_parser_params *param, + u16 type, u32 in_flag) +{ + if (type == cpu_to_be16(ETH_P_IP)) { + if (in_flag) { + ULP_BITMAP_SET(param->hdr_fp_bit.bits, + BNXT_ULP_HDR_BIT_I_IPV4); + ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L3, 1); + } else { + ULP_BITMAP_SET(param->hdr_fp_bit.bits, + BNXT_ULP_HDR_BIT_O_IPV4); + ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L3, 1); + } + } else if (type == cpu_to_be16(ETH_P_IPV6)) { + if (in_flag) { + ULP_BITMAP_SET(param->hdr_fp_bit.bits, + BNXT_ULP_HDR_BIT_I_IPV6); + ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L3, 1); + } else { + ULP_BITMAP_SET(param->hdr_fp_bit.bits, + BNXT_ULP_HDR_BIT_O_IPV6); + ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L3, 1); + } + } +} + +/* The ulp_hdr_info[] table is indexed by the dissector key_id values in + * ascending order. However parsing the headers in that sequence may not + * be desirable. For example, we might want to process the eth header + * first before parsing the IP addresses, as the parser might expect + * certain header bits to be set before processing the next layer headers. + * The below table prescribes the sequence that we want to parse the + * headers in. + */ +static int ulp_hdr_parse_sequence[] = { + FLOW_DISSECTOR_KEY_ENC_CONTROL, + FLOW_DISSECTOR_KEY_ENC_IP, + FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, + FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, + FLOW_DISSECTOR_KEY_ENC_PORTS, + FLOW_DISSECTOR_KEY_ENC_KEYID, + + FLOW_DISSECTOR_KEY_CONTROL, + FLOW_DISSECTOR_KEY_BASIC, + FLOW_DISSECTOR_KEY_ETH_ADDRS, + FLOW_DISSECTOR_KEY_VLAN, + FLOW_DISSECTOR_KEY_IP, + FLOW_DISSECTOR_KEY_IPV4_ADDRS, + FLOW_DISSECTOR_KEY_IPV6_ADDRS, + FLOW_DISSECTOR_KEY_PORTS, + FLOW_DISSECTOR_KEY_TCP +}; + +#define NUM_DISSECTOR_KEYS \ + (sizeof(ulp_hdr_parse_sequence) / sizeof(int)) + +static unsigned int ulp_supported_keys = + (BIT(FLOW_DISSECTOR_KEY_CONTROL) | + BIT(FLOW_DISSECTOR_KEY_BASIC) | + BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | + BIT(FLOW_DISSECTOR_KEY_VLAN) | + BIT(FLOW_DISSECTOR_KEY_IP) | + BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | + BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | + BIT(FLOW_DISSECTOR_KEY_PORTS) | + BIT(FLOW_DISSECTOR_KEY_TCP) | + BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | + BIT(FLOW_DISSECTOR_KEY_ENC_IP) | + BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | + BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | + BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) | + BIT(FLOW_DISSECTOR_KEY_ENC_KEYID)); +#endif + +#if defined(CONFIG_BNXT_FLOWER_OFFLOAD) || defined(CONFIG_BNXT_CUSTOM_FLOWER_OFFLOAD) +/* Function to handle the post processing of the computed + * fields for the interface. + */ +static void bnxt_ulp_comp_fld_intf_update(struct ulp_tc_parser_params + *params) +{ + enum bnxt_ulp_direction_type dir; + u16 port_id, parif, svif, vf_roce; + u32 ifindex; + u32 mtype; + u8 udcc; + + /* get the direction details */ + dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION); + + /* read the port id details */ + port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF); + if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx, port_id, + &ifindex)) { + netdev_dbg(params->ulp_ctx->bp->dev, + "ParseErr:Portid is not valid\n"); + return; + } + + /* Set VF ROCE Support*/ + if (ulp_port_db_vf_roce_get(params->ulp_ctx, port_id, &vf_roce)) { + netdev_dbg(params->ulp_ctx->bp->dev, "ParseErr:port_id %d is not valid\n", + port_id); + return; + } + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_VF_ROCE_EN, vf_roce); + + /* Set UDCC Support*/ + if (ulp_port_db_udcc_get(params->ulp_ctx, port_id, &udcc)) { + netdev_dbg(params->ulp_ctx->bp->dev, "ParseErr:port_id %d is not valid\n", + port_id); + return; + } + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_UDCC_EN, udcc); + + if (dir == BNXT_ULP_DIR_INGRESS) { + /* Set port PARIF */ + if (ulp_port_db_parif_get(params->ulp_ctx, ifindex, + BNXT_ULP_DRV_FUNC_PARIF, &parif)) { + netdev_dbg(params->ulp_ctx->bp->dev, + "ParseErr:ifindex is not valid\n"); + return; + } + /* Note: + * We save the drv_func_parif into CF_IDX of phy_port_parif, + * since that index is currently referenced by ingress templates + * for datapath flows. If in the future we change the parser to + * save it in the CF_IDX of drv_func_parif we also need to update + * the template. + */ + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_PHY_PORT_PARIF, + parif); + + /* Set port SVIF */ + if (ulp_port_db_svif_get(params->ulp_ctx, ifindex, BNXT_ULP_PHY_PORT_SVIF, &svif)) { + netdev_dbg(params->ulp_ctx->bp->dev, "ParseErr:ifindex is not valid\n"); + return; + } + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_PHY_PORT_SVIF, svif); + } else { + /* Get the match port type */ + mtype = ULP_COMP_FLD_IDX_RD(params, + BNXT_ULP_CF_IDX_MATCH_PORT_TYPE); + if (mtype == BNXT_ULP_INTF_TYPE_VF_REP) { + ULP_COMP_FLD_IDX_WR(params, + BNXT_ULP_CF_IDX_MATCH_PORT_IS_VFREP, + 1); + /* Set VF func PARIF */ + if (ulp_port_db_parif_get(params->ulp_ctx, ifindex, + BNXT_ULP_VF_FUNC_PARIF, + &parif)) { + netdev_dbg(params->ulp_ctx->bp->dev, + "ParseErr:ifindex is not valid\n"); + return; + } + ULP_COMP_FLD_IDX_WR(params, + BNXT_ULP_CF_IDX_VF_FUNC_PARIF, + parif); + + } else { + /* Set DRV func PARIF */ + if (ulp_port_db_parif_get(params->ulp_ctx, ifindex, + BNXT_ULP_DRV_FUNC_PARIF, + &parif)) { + netdev_dbg(params->ulp_ctx->bp->dev, + "ParseErr:ifindex is not valid\n"); + return; + } + ULP_COMP_FLD_IDX_WR(params, + BNXT_ULP_CF_IDX_DRV_FUNC_PARIF, + parif); + } + if (mtype == BNXT_ULP_INTF_TYPE_PF) { + ULP_COMP_FLD_IDX_WR(params, + BNXT_ULP_CF_IDX_MATCH_PORT_IS_PF, + 1); + } + } +} + +static int ulp_post_process_normal_flow(struct ulp_tc_parser_params *params) +{ + enum bnxt_ulp_intf_type match_port_type, act_port_type; + enum bnxt_ulp_direction_type dir; + u32 act_port_set; + + /* Get the computed details */ + dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION); + match_port_type = ULP_COMP_FLD_IDX_RD(params, + BNXT_ULP_CF_IDX_MATCH_PORT_TYPE); + act_port_type = ULP_COMP_FLD_IDX_RD(params, + BNXT_ULP_CF_IDX_ACT_PORT_TYPE); + act_port_set = ULP_COMP_FLD_IDX_RD(params, + BNXT_ULP_CF_IDX_ACT_PORT_IS_SET); + + /* set the flow direction in the proto and action header */ + if (dir == BNXT_ULP_DIR_EGRESS) { + ULP_BITMAP_SET(params->hdr_bitmap.bits, + BNXT_ULP_FLOW_DIR_BITMASK_EGR); + ULP_BITMAP_SET(params->act_bitmap.bits, + BNXT_ULP_FLOW_DIR_BITMASK_EGR); + } else { + ULP_BITMAP_SET(params->hdr_bitmap.bits, + BNXT_ULP_FLOW_DIR_BITMASK_ING); + ULP_BITMAP_SET(params->act_bitmap.bits, + BNXT_ULP_FLOW_DIR_BITMASK_ING); + } + + /* Evaluate the VF to VF flag */ + if (act_port_set && act_port_type == BNXT_ULP_INTF_TYPE_VF_REP && + match_port_type == BNXT_ULP_INTF_TYPE_VF_REP) + ULP_BITMAP_SET(params->act_bitmap.bits, + BNXT_ULP_ACT_BIT_VF_TO_VF); + + /* Update the decrement ttl computational fields */ + if (ULP_BITMAP_ISSET(params->act_bitmap.bits, + BNXT_ULP_ACT_BIT_DEC_TTL)) { + /* Check that vxlan proto is included and vxlan decap + * action is not set then decrement tunnel ttl. + * Similarly add GRE and NVGRE in future. + */ + if ((ULP_BITMAP_ISSET(params->hdr_bitmap.bits, + BNXT_ULP_HDR_BIT_T_VXLAN) && + !ULP_BITMAP_ISSET(params->act_bitmap.bits, + BNXT_ULP_ACT_BIT_VXLAN_DECAP))) { + ULP_COMP_FLD_IDX_WR(params, + BNXT_ULP_CF_IDX_ACT_T_DEC_TTL, 1); + } else { + ULP_COMP_FLD_IDX_WR(params, + BNXT_ULP_CF_IDX_ACT_DEC_TTL, 1); + } + } + + /* Merge the hdr_fp_bit into the proto header bit */ + params->hdr_bitmap.bits |= params->hdr_fp_bit.bits; + + /* Update the comp fld fid */ + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_FID, params->fid); + + /* Update the comp fld app_priority */ + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_APP_PRIORITY, params->priority); + + /* Update the comp fld em_for_ipv6 */ + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_EM_FOR_TC, + SUPPORT_CFA_EM_FOR_TC); + + /* set the L2 context usage shall change it later */ + ULP_BITMAP_SET(params->cf_bitmap, BNXT_ULP_CF_BIT_L2_CNTXT_ID); + + /* Update the computed interface parameters */ + bnxt_ulp_comp_fld_intf_update(params); + + /* TBD: Handle the flow rejection scenarios */ + return 0; +} + +/* Function to handle the post processing of the parsing details */ +int bnxt_ulp_tc_parser_post_process(struct ulp_tc_parser_params *params) +{ + ulp_post_process_normal_flow(params); + + /* TBD: Do we need tunnel post processing in kernel mode ? */ + return BNXT_TF_RC_NORMAL; +} + +/* Function to compute the flow direction based on the match port details */ +static void bnxt_ulp_tc_parser_direction_compute(struct ulp_tc_parser_params + *params) +{ + enum bnxt_ulp_intf_type match_port_type; + + /* Get the match port type */ + match_port_type = ULP_COMP_FLD_IDX_RD(params, + BNXT_ULP_CF_IDX_MATCH_PORT_TYPE); + + /* If ingress flow and matchport is vf rep then dir is egress*/ + if ((params->dir_attr & BNXT_ULP_FLOW_ATTR_INGRESS) && + match_port_type == BNXT_ULP_INTF_TYPE_VF_REP) { + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION, + BNXT_ULP_DIR_EGRESS); + } else { + /* Assign the input direction */ + if (params->dir_attr & BNXT_ULP_FLOW_ATTR_INGRESS) + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION, + BNXT_ULP_DIR_INGRESS); + else + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION, + BNXT_ULP_DIR_EGRESS); + } +} + +static int ulp_tc_parser_svif_set(struct ulp_tc_parser_params *params, + u32 ifindex, u16 mask) +{ + struct ulp_tc_hdr_field *hdr_field; + enum bnxt_ulp_svif_type svif_type; + enum bnxt_ulp_intf_type port_type; + enum bnxt_ulp_direction_type dir; + u16 svif; + + /* SVIF already set, multiple source not supported */ + if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_SVIF_FLAG) != + BNXT_ULP_INVALID_SVIF_VAL) + return BNXT_TF_RC_ERROR; + + /* Get port type details */ + port_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex); + if (port_type == BNXT_ULP_INTF_TYPE_INVALID) + return BNXT_TF_RC_ERROR; + + /* Update the match port type */ + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_MATCH_PORT_TYPE, port_type); + + /* compute the direction */ + bnxt_ulp_tc_parser_direction_compute(params); + + /* Get the computed direction */ + dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION); + if (dir == BNXT_ULP_DIR_INGRESS) { + svif_type = BNXT_ULP_PHY_PORT_SVIF; + } else { + if (port_type == BNXT_ULP_INTF_TYPE_VF_REP) + svif_type = BNXT_ULP_VF_FUNC_SVIF; + else + svif_type = BNXT_ULP_DRV_FUNC_SVIF; + } + ulp_port_db_svif_get(params->ulp_ctx, ifindex, svif_type, + &svif); + svif = cpu_to_be16(svif); + hdr_field = ¶ms->hdr_field[BNXT_ULP_PROTO_HDR_FIELD_SVIF_IDX]; + memcpy(hdr_field->spec, &svif, sizeof(svif)); + memcpy(hdr_field->mask, &mask, sizeof(mask)); + hdr_field->size = sizeof(svif); + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_SVIF_FLAG, + be16_to_cpu(svif)); + return BNXT_TF_RC_SUCCESS; +} + +int ulp_tc_parser_implicit_match_port_process(struct ulp_tc_parser_params + *params) +{ + int rc = BNXT_TF_RC_ERROR; + u16 svif_mask = 0xFFFF; + u16 port_id = 0; + u32 ifindex; + + if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_SVIF_FLAG) != + BNXT_ULP_INVALID_SVIF_VAL) + return BNXT_TF_RC_SUCCESS; + + /* SVIF not set. So get the port id */ + port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF); + + if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx, port_id, + &ifindex)) + return rc; + + /* Update the SVIF details */ + rc = ulp_tc_parser_svif_set(params, ifindex, svif_mask); + + /* If no ETH header match added for some chain filters, + * add the SVIF as the only match header bit. + */ + if (!(ULP_BITMAP_ISSET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_ETH)) && + !(ULP_BITMAP_ISSET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_L2_FILTER))) + ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_SVIF); + + return rc; +} +#endif + +#ifdef CONFIG_BNXT_FLOWER_OFFLOAD +static void bnxt_ulp_flow_rule_match(struct flow_rule *rule, unsigned int key, + void *match) +{ + switch (key) { + case FLOW_DISSECTOR_KEY_CONTROL: + flow_rule_match_control(rule, match); + break; + case FLOW_DISSECTOR_KEY_BASIC: + flow_rule_match_basic(rule, match); + break; + case FLOW_DISSECTOR_KEY_IPV4_ADDRS: + flow_rule_match_ipv4_addrs(rule, match); + break; + case FLOW_DISSECTOR_KEY_IPV6_ADDRS: + flow_rule_match_ipv6_addrs(rule, match); + break; + case FLOW_DISSECTOR_KEY_PORTS: + flow_rule_match_ports(rule, match); + break; + case FLOW_DISSECTOR_KEY_ETH_ADDRS: + flow_rule_match_eth_addrs(rule, match); + break; + case FLOW_DISSECTOR_KEY_VLAN: + flow_rule_match_vlan(rule, match); + break; + case FLOW_DISSECTOR_KEY_IP: + flow_rule_match_ip(rule, match); + break; + case FLOW_DISSECTOR_KEY_TCP: + flow_rule_match_tcp(rule, match); + break; + case FLOW_DISSECTOR_KEY_ENC_KEYID: + flow_rule_match_enc_keyid(rule, match); + break; + case FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS: + flow_rule_match_enc_ipv4_addrs(rule, match); + break; + case FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS: + flow_rule_match_enc_ipv6_addrs(rule, match); + break; + case FLOW_DISSECTOR_KEY_ENC_CONTROL: + flow_rule_match_enc_control(rule, match); + break; + case FLOW_DISSECTOR_KEY_ENC_PORTS: + flow_rule_match_enc_ports(rule, match); + break; + case FLOW_DISSECTOR_KEY_ENC_IP: + flow_rule_match_enc_ip(rule, match); + break; + } +} + +static struct flow_dissector_key_eth_addrs eth_addr_null = { + { 0, 0, 0, 0, 0, 0 }, /* dst */ + { 0, 0, 0, 0, 0, 0 } /* src */ +}; + +/* Return true if eth addrs should be added implicitly. + * Otherwise, return false. + */ +static bool bnxt_ulp_tc_is_implicit_eth_addrs(struct ulp_tc_parser_params + *params, + enum flow_dissector_key_id key, + unsigned int used_keys) +{ + /* ETH_ADDRS key is present in used_keys ? or have we + * already added eth addrs implicitly ? + */ + if ((used_keys & BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS)) || + params->implicit_eth_parsed) + return false; + + switch (key) { + case FLOW_DISSECTOR_KEY_VLAN: + case FLOW_DISSECTOR_KEY_IP: + return true; + case FLOW_DISSECTOR_KEY_IPV4_ADDRS: + if (params->addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) + return true; + break; + case FLOW_DISSECTOR_KEY_IPV6_ADDRS: + if (params->addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) + return true; + break; + case FLOW_DISSECTOR_KEY_BASIC: + if (!params->addr_type && + (params->n_proto == cpu_to_be16(ETH_P_IP) || + params->n_proto == cpu_to_be16(ETH_P_IPV6))) + return true; + break; + default: + break; + } + return false; +} + +static int bnxt_ulp_tc_parse_implicit_eth_addrs(struct bnxt *bp, + struct ulp_tc_parser_params + *params) +{ + struct bnxt_ulp_tc_hdr_info *hdr_info; + struct tc_match match; + int rc; + + hdr_info = &ulp_hdr_info[FLOW_DISSECTOR_KEY_ETH_ADDRS]; + match.key = ð_addr_null; + match.mask = ð_addr_null; + + rc = hdr_info->proto_hdr_func(bp, params, &match); + if (rc != BNXT_TF_RC_SUCCESS) + return rc; + params->implicit_eth_parsed = true; + return rc; +} + +static struct flow_dissector_key_ipv4_addrs ipv4_addr_null = { 0, 0 }; + +static struct flow_dissector_key_ipv6_addrs ipv6_addr_null = { + {{{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }}}, /* src */ + {{{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }}} /* dst */ +}; + +static struct flow_dissector_key_ip ip_ctrl_null = { 0, 0 }; + +static bool bnxt_ulp_tc_is_implicit_ip_ctrl(struct ulp_tc_parser_params + *params, + enum flow_dissector_key_id key, + unsigned int used_keys) +{ + if (((key == FLOW_DISSECTOR_KEY_IPV4_ADDRS && + params->addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) || + (key == FLOW_DISSECTOR_KEY_IPV6_ADDRS && + params->addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS)) && + (used_keys & BIT(FLOW_DISSECTOR_KEY_IP)) == 0) + return true; + + return false; +} + +static bool bnxt_ulp_tc_is_implicit_tnl_ip_ctrl(struct ulp_tc_parser_params + *params, + enum flow_dissector_key_id key, + unsigned int used_keys) +{ + if (((key == FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS && + params->tnl_addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) || + (key == FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS && + params->tnl_addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS)) && + (used_keys & BIT(FLOW_DISSECTOR_KEY_ENC_IP)) == 0) + return true; + + return false; +} + +static bool bnxt_ulp_tc_is_implicit_ipv4_addrs(enum flow_dissector_key_id key, + unsigned int used_keys, + u16 n_proto) +{ + if (key == FLOW_DISSECTOR_KEY_IP && + (used_keys & BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS)) == 0 && + n_proto == cpu_to_be16(ETH_P_IP)) + return true; + + return false; +} + +static bool bnxt_ulp_tc_is_implicit_ipv6_addrs(enum flow_dissector_key_id key, + unsigned int used_keys, + u16 n_proto) +{ + if (key == FLOW_DISSECTOR_KEY_IP && + (used_keys & BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS)) == 0 && + n_proto == cpu_to_be16(ETH_P_IPV6)) + return true; + + return false; +} + +static bool bnxt_ulp_tc_is_implicit_tnl_ipv4_addrs(enum + flow_dissector_key_id key, + unsigned int used_keys, + u16 n_proto) +{ + if (key == FLOW_DISSECTOR_KEY_ENC_IP && + (used_keys & BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) == 0 && + n_proto == cpu_to_be16(ETH_P_IP)) + return true; + + return false; +} + +static bool bnxt_ulp_tc_is_implicit_ipv4(struct ulp_tc_parser_params *params, + unsigned int used_keys) +{ + if (!params->implicit_ipv4_parsed && + params->n_proto == cpu_to_be16(ETH_P_IP) && + (used_keys & BIT(FLOW_DISSECTOR_KEY_IP)) == 0 && + (used_keys & BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS)) == 0) + return true; + + return false; +} + +static bool bnxt_ulp_tc_is_implicit_ipv6(struct ulp_tc_parser_params *params, + unsigned int used_keys) +{ + if (!params->implicit_ipv6_parsed && + params->n_proto == cpu_to_be16(ETH_P_IPV6) && + (used_keys & BIT(FLOW_DISSECTOR_KEY_IP)) == 0 && + (used_keys & BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS)) == 0) + return true; + + return false; +} + +static int bnxt_ulp_add_implicit_ip_ctrl(struct bnxt *bp, + struct ulp_tc_parser_params *params, + enum flow_dissector_key_id key) +{ + struct bnxt_ulp_tc_hdr_info *hdr_info; + struct tc_match match; + + hdr_info = &ulp_hdr_info[key]; + match.key = &ip_ctrl_null; + match.mask = &ip_ctrl_null; + + return hdr_info->proto_hdr_func(bp, params, &match); +} + +static int bnxt_ulp_tc_parse_implicit_ip_ctrl(struct bnxt *bp, + struct ulp_tc_parser_params + *params) +{ + return bnxt_ulp_add_implicit_ip_ctrl(bp, params, + FLOW_DISSECTOR_KEY_IP); +} + +static int bnxt_ulp_tc_parse_implicit_tnl_ip_ctrl(struct bnxt *bp, + struct ulp_tc_parser_params + *params) +{ + return bnxt_ulp_add_implicit_ip_ctrl(bp, params, + FLOW_DISSECTOR_KEY_ENC_IP); +} + +static int bnxt_ulp_tc_parse_implicit_ipv4_addrs(struct bnxt *bp, + struct ulp_tc_parser_params + *params) +{ + struct bnxt_ulp_tc_hdr_info *hdr_info; + struct tc_match match; + + hdr_info = &ulp_hdr_info[FLOW_DISSECTOR_KEY_IPV4_ADDRS]; + match.key = &ipv4_addr_null; + match.mask = &ipv4_addr_null; + + /* addr_type is implicit in this case; i.e, set to zero + * in KEY_CONTROL; so set it before invoking the handler. + */ + params->addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS; + + return hdr_info->proto_hdr_func(bp, params, &match); +} + +static int bnxt_ulp_tc_parse_implicit_ipv6_addrs(struct bnxt *bp, + struct ulp_tc_parser_params + *params) +{ + struct bnxt_ulp_tc_hdr_info *hdr_info; + struct tc_match match; + + hdr_info = &ulp_hdr_info[FLOW_DISSECTOR_KEY_IPV6_ADDRS]; + match.key = &ipv6_addr_null; + match.mask = &ipv6_addr_null; + + /* addr_type is implicit in this case; i.e, set to zero + * in KEY_CONTROL; so set it before invoking the handler. + */ + params->addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS; + + return hdr_info->proto_hdr_func(bp, params, &match); +} + +static int bnxt_ulp_tc_parse_implicit_ipv4(struct bnxt *bp, + struct ulp_tc_parser_params + *params) +{ + int rc; + + rc = bnxt_ulp_tc_parse_implicit_ip_ctrl(bp, params); + if (rc != BNXT_TF_RC_SUCCESS) + return rc; + + rc = bnxt_ulp_tc_parse_implicit_ipv4_addrs(bp, params); + if (rc != BNXT_TF_RC_SUCCESS) + return rc; + params->implicit_ipv4_parsed = true; + return rc; +} + +static int bnxt_ulp_tc_parse_implicit_ipv6(struct bnxt *bp, + struct ulp_tc_parser_params + *params) +{ + int rc; + + rc = bnxt_ulp_tc_parse_implicit_ip_ctrl(bp, params); + if (rc != BNXT_TF_RC_SUCCESS) + return rc; + + rc = bnxt_ulp_tc_parse_implicit_ipv6_addrs(bp, params); + if (rc != BNXT_TF_RC_SUCCESS) + return rc; + params->implicit_ipv6_parsed = true; + return rc; +} + +static struct flow_dissector_key_ports tcp_ports_null = {{ 0 }}; +static struct flow_dissector_key_tcp tcp_ctrl_null = { 0 }; + +static bool bnxt_ulp_tc_is_implicit_tcp_ctrl(enum flow_dissector_key_id key, + unsigned int used_keys) +{ + if (key == FLOW_DISSECTOR_KEY_PORTS && + (used_keys & BIT(FLOW_DISSECTOR_KEY_TCP)) == 0) + return true; + + return false; +} + +static bool bnxt_ulp_tc_is_implicit_tcp_ports(enum flow_dissector_key_id key, + unsigned int used_keys) +{ + if (key == FLOW_DISSECTOR_KEY_TCP && + (used_keys & BIT(FLOW_DISSECTOR_KEY_PORTS)) == 0) + return true; + + return false; +} + +static int bnxt_ulp_tc_parse_implicit_tcp_ctrl(struct bnxt *bp, + struct ulp_tc_parser_params + *params) +{ + struct bnxt_ulp_tc_hdr_info *hdr_info; + struct tc_match match; + + hdr_info = &ulp_hdr_info[FLOW_DISSECTOR_KEY_TCP]; + match.key = &tcp_ctrl_null; + match.mask = &tcp_ctrl_null; + + return hdr_info->proto_hdr_func(bp, params, &match); +} + +static int bnxt_ulp_tc_parse_implicit_tcp_ports(struct bnxt *bp, + struct ulp_tc_parser_params + *params) +{ + struct bnxt_ulp_tc_hdr_info *hdr_info; + struct tc_match match; + + hdr_info = &ulp_hdr_info[FLOW_DISSECTOR_KEY_PORTS]; + match.key = &tcp_ports_null; + match.mask = &tcp_ports_null; + + return hdr_info->proto_hdr_func(bp, params, &match); +} + +static int bnxt_ulp_tc_resolve_tnl_ipv4(struct bnxt *bp, + struct ulp_tc_parser_params *params, + struct flow_rule *rule) +{ + struct flow_match_ipv4_addrs match = { 0 }; + struct bnxt_tc_l2_key l2_info = { 0 }; + struct ip_tunnel_key tun_key = { 0 }; + int rc; + + flow_rule_match_enc_ipv4_addrs(rule, &match); + + /* If we are not matching on tnl_sip, use PF's mac as tnl_dmac */ + if (!match.mask->src) { + ether_addr_copy(params->tnl_dmac, bp->pf.mac_addr); + eth_zero_addr(params->tnl_smac); + return BNXT_TF_RC_SUCCESS; + } + + /* Resolve tnl hdrs only if we are matching on tnl_sip */ + tun_key.u.ipv4.dst = match.key->src; + tun_key.tp_dst = 4789; + + rc = bnxt_tc_resolve_ipv4_tunnel_hdrs(bp, NULL, &tun_key, &l2_info, + NULL); + if (rc != 0) + return BNXT_TF_RC_ERROR; + + ether_addr_copy(params->tnl_dmac, l2_info.smac); + ether_addr_copy(params->tnl_smac, l2_info.dmac); + + return BNXT_TF_RC_SUCCESS; +} + +static int bnxt_ulp_tc_resolve_tnl_ipv6(struct bnxt *bp, + struct ulp_tc_parser_params *params, + struct flow_rule *rule) +{ + struct flow_match_ipv6_addrs match = { 0 }; + struct bnxt_tc_l2_key l2_info = { 0 }; + struct ip_tunnel_key tun_key = { 0 }; + int rc; + + flow_rule_match_enc_ipv6_addrs(rule, &match); + + /* If we are not matching on tnl_sip, use PF's mac as tnl_dmac */ + if (!match.mask->src.s6_addr32[0] && !match.mask->src.s6_addr32[1] && + !match.mask->src.s6_addr32[2] && !match.mask->src.s6_addr32[3]) { + ether_addr_copy(params->tnl_dmac, bp->pf.mac_addr); + eth_zero_addr(params->tnl_smac); + return BNXT_TF_RC_SUCCESS; + } + + /* Resolve tnl hdrs only if we are matching on tnl_sip */ + tun_key.u.ipv6.dst = match.key->src; + tun_key.tp_dst = 4789; + + rc = bnxt_tc_resolve_ipv6_tunnel_hdrs(bp, NULL, &tun_key, &l2_info, + NULL); + if (rc) + return BNXT_TF_RC_ERROR; + + ether_addr_copy(params->tnl_dmac, l2_info.smac); + ether_addr_copy(params->tnl_smac, l2_info.dmac); + + return BNXT_TF_RC_SUCCESS; +} + +static int bnxt_ulp_tc_resolve_tnl_hdrs(struct bnxt *bp, + struct ulp_tc_parser_params *params, + struct flow_rule *rule) +{ + if (params->tnl_addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) + return bnxt_ulp_tc_resolve_tnl_ipv4(bp, params, rule); + else if (params->tnl_addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) + return bnxt_ulp_tc_resolve_tnl_ipv6(bp, params, rule); + else + return BNXT_TF_RC_ERROR; +} + +static bool bnxt_ulp_tc_is_l4_key(enum flow_dissector_key_id key) +{ + if (key == FLOW_DISSECTOR_KEY_PORTS || key == FLOW_DISSECTOR_KEY_TCP) + return true; + + return false; +} + +static int bnxt_ulp_tc_parse_pre_process(struct bnxt *bp, + struct ulp_tc_parser_params *params, + enum flow_dissector_key_id key, + unsigned int used_keys) +{ + int rc = BNXT_TF_RC_SUCCESS; + + if (bnxt_ulp_tc_is_implicit_eth_addrs(params, key, used_keys)) { + rc = bnxt_ulp_tc_parse_implicit_eth_addrs(bp, params); + if (rc != BNXT_TF_RC_SUCCESS) + return rc; + } + + if (bnxt_ulp_tc_is_implicit_tnl_ip_ctrl(params, key, used_keys)) { + rc = bnxt_ulp_tc_parse_implicit_tnl_ip_ctrl(bp, params); + if (rc != BNXT_TF_RC_SUCCESS) + return rc; + } + + if (bnxt_ulp_tc_is_implicit_ip_ctrl(params, key, used_keys)) { + rc = bnxt_ulp_tc_parse_implicit_ip_ctrl(bp, params); + if (rc != BNXT_TF_RC_SUCCESS) + return rc; + } + + if (bnxt_ulp_tc_is_l4_key(key)) { + if (bnxt_ulp_tc_is_implicit_eth_addrs(params, + FLOW_DISSECTOR_KEY_BASIC, + used_keys)) + bnxt_ulp_tc_parse_implicit_eth_addrs(bp, params); + if (bnxt_ulp_tc_is_implicit_ipv4(params, used_keys)) + bnxt_ulp_tc_parse_implicit_ipv4(bp, params); + else if (bnxt_ulp_tc_is_implicit_ipv6(params, used_keys)) + bnxt_ulp_tc_parse_implicit_ipv6(bp, params); + } + + if (params->ip_proto == IPPROTO_TCP && + bnxt_ulp_tc_is_implicit_tcp_ports(key, used_keys)) { + rc = bnxt_ulp_tc_parse_implicit_tcp_ports(bp, params); + if (rc != BNXT_TF_RC_SUCCESS) + return rc; + } + + return rc; +} + +#ifdef HAVE_FLOW_DISSECTOR_KEY_VLAN_TPID +static int bnxt_ulp_tc_parse_vlan_tpid(struct bnxt *bp, + struct ulp_tc_parser_params *params, + struct flow_rule *rule) +{ + struct flow_match_vlan match; + + flow_rule_match_vlan(rule, &match); + params->vlan_tpid = match.key->vlan_tpid; + params->vlan_tpid_mask = match.mask->vlan_tpid; + + return BNXT_TF_RC_SUCCESS; +} +#else /* HAVE_FLOW_DISSECTOR_KEY_VLAN_TPID */ +static int bnxt_ulp_tc_parse_vlan_tpid(struct bnxt *bp, + struct ulp_tc_parser_params *params, + struct flow_rule *rule) +{ + return BNXT_TF_RC_ERROR; +} +#endif /* HAVE_FLOW_DISSECTOR_KEY_VLAN_TPID */ + +static int bnxt_ulp_tc_parse_post_process(struct bnxt *bp, + struct flow_rule *rule, + struct ulp_tc_parser_params *params, + enum flow_dissector_key_id key, + unsigned int used_keys) +{ + int rc = BNXT_TF_RC_SUCCESS; + + /* Resolve tnl L2 headers before parsing other tnl keys */ + if (key == FLOW_DISSECTOR_KEY_ENC_CONTROL) { + rc = bnxt_ulp_tc_resolve_tnl_hdrs(bp, params, rule); + if (rc != BNXT_TF_RC_SUCCESS) + return rc; + } + + /* Pre process the tpid so eth handler can set it */ + if (key == FLOW_DISSECTOR_KEY_BASIC && + (used_keys & BIT(FLOW_DISSECTOR_KEY_VLAN))) { + rc = bnxt_ulp_tc_parse_vlan_tpid(bp, params, rule); + if (rc != BNXT_TF_RC_SUCCESS) + return rc; + } + + if (bnxt_ulp_tc_is_implicit_tnl_ipv4_addrs(key, used_keys, + params->n_proto)) { + rc = bnxt_ulp_tc_parse_implicit_ipv4_addrs(bp, params); + if (rc != BNXT_TF_RC_SUCCESS) + return rc; + } + + if (bnxt_ulp_tc_is_implicit_ipv4_addrs(key, used_keys, + params->n_proto)) { + rc = bnxt_ulp_tc_parse_implicit_ipv4_addrs(bp, params); + if (rc != BNXT_TF_RC_SUCCESS) + return rc; + } + + if (bnxt_ulp_tc_is_implicit_ipv6_addrs(key, used_keys, + params->n_proto)) { + rc = bnxt_ulp_tc_parse_implicit_ipv6_addrs(bp, params); + if (rc != BNXT_TF_RC_SUCCESS) + return rc; + } + + if (params->ip_proto == IPPROTO_TCP && + bnxt_ulp_tc_is_implicit_tcp_ctrl(key, used_keys)) { + rc = bnxt_ulp_tc_parse_implicit_tcp_ctrl(bp, params); + if (rc != BNXT_TF_RC_SUCCESS) + return rc; + } + + return rc; +} + +static int bnxt_ulp_tc_parse_hdr_key(struct bnxt *bp, struct flow_rule *rule, + struct ulp_tc_parser_params *params, + enum flow_dissector_key_id key, + unsigned int used_keys) +{ + struct bnxt_ulp_tc_hdr_info *hdr_info = &ulp_hdr_info[key]; + int rc = BNXT_TF_RC_PARSE_ERR; + struct tc_match match; + + if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_NOT_SUPPORTED) { + netdev_dbg(bp->dev, + "Truflow parser does not support type %d\n", key); + return rc; + } + + rc = bnxt_ulp_tc_parse_pre_process(bp, params, key, used_keys); + if (rc != BNXT_TF_RC_SUCCESS) + return rc; + + bnxt_ulp_flow_rule_match(rule, key, &match); + + /* call the registered callback handler */ + rc = hdr_info->proto_hdr_func(bp, params, &match); + if (rc != BNXT_TF_RC_SUCCESS) + return rc; + + rc = bnxt_ulp_tc_parse_post_process(bp, rule, params, key, used_keys); + if (rc != BNXT_TF_RC_SUCCESS) + return rc; + + return rc; +} + +static int bnxt_ulp_tc_validate_keys(struct bnxt *bp, unsigned int used_keys) +{ + unsigned int keys; + + /* KEY_CONTROL and KEY_BASIC are mandatory to form a meaningful key */ + if ((used_keys & BIT(FLOW_DISSECTOR_KEY_CONTROL)) == 0 || + (used_keys & BIT(FLOW_DISSECTOR_KEY_BASIC)) == 0) { + netdev_dbg(bp->dev, "%s: Invalid keys: 0x%x\n", + __func__, used_keys); + return -EINVAL; + } + + keys = used_keys & ~(ulp_supported_keys); + if (keys) { + netdev_dbg(bp->dev, "%s: Unsupported keys: 0x%x\n", + __func__, keys); + return -EOPNOTSUPP; + } + + return 0; +} + +/* Function to handle the parsing of TC Flows and placing + * the TC flow match fields into the ulp structures. + */ +int bnxt_ulp_tc_parser_hdr_parse(struct bnxt *bp, + struct flow_cls_offload *tc_flow_cmd, + struct ulp_tc_parser_params *params) +{ + struct flow_rule *rule = flow_cls_offload_flow_rule(tc_flow_cmd); + struct flow_dissector *dissector = rule->match.dissector; + unsigned int used_keys = dissector->used_keys; + enum flow_dissector_key_id key; + int rc; + int i; + + rc = bnxt_ulp_tc_validate_keys(bp, used_keys); + if (rc) + return rc; + + netdev_dbg(bp->dev, "%s: Used keys:0x%x\n", __func__, used_keys); + params->field_idx = BNXT_ULP_PROTO_HDR_SVIF_NUM; + + /* Parse all the keys in the rule */ + for (i = 0; i < NUM_DISSECTOR_KEYS; i++) { + key = ulp_hdr_parse_sequence[i]; + + /* Key not present in the rule ? */ + if (!flow_rule_match_key(rule, key)) + continue; + + rc = bnxt_ulp_tc_parse_hdr_key(bp, rule, params, key, + used_keys); + if (rc != BNXT_TF_RC_SUCCESS) + return rc; + } + + if (bnxt_ulp_tc_is_implicit_eth_addrs(params, FLOW_DISSECTOR_KEY_BASIC, + used_keys)) + bnxt_ulp_tc_parse_implicit_eth_addrs(bp, params); + if (bnxt_ulp_tc_is_implicit_ipv4(params, used_keys)) + bnxt_ulp_tc_parse_implicit_ipv4(bp, params); + else if (bnxt_ulp_tc_is_implicit_ipv6(params, used_keys)) + bnxt_ulp_tc_parse_implicit_ipv6(bp, params); + + /* update the implied SVIF */ + return ulp_tc_parser_implicit_match_port_process(params); +} + +/* Function to handle the implicit action port id */ +int ulp_tc_parser_implicit_act_port_process(struct bnxt *bp, + struct ulp_tc_parser_params + *params) +{ +#ifdef HAVE_FLOW_OFFLOAD_H + struct flow_action_entry implicit_port_act; +#else + struct tcf_mirred implicit_port_act; +#endif + /* Read the action port set bit */ + if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET)) { + /* Already set, so just exit */ + return BNXT_TF_RC_SUCCESS; + } + +#ifdef HAVE_FLOW_OFFLOAD_H + implicit_port_act.dev = bp->dev; +#else + implicit_port_act.tcfm_dev = bp->dev; +#endif + return ulp_tc_redirect_act_handler(bp, params, &implicit_port_act); +} + +#ifdef HAVE_FLOW_OFFLOAD_H +static int ulp_tc_parser_process_classid(struct bnxt *bp, + struct ulp_tc_parser_params *params, + u32 classid) +{ + struct ulp_tc_act_prop *act = ¶ms->act_prop; + u16 queue_id = TC_H_MIN(classid); + u32 mtype; + + mtype = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_MATCH_PORT_TYPE); + if (mtype != BNXT_ULP_INTF_TYPE_PF) { + netdev_dbg(bp->dev, "Queue action on invalid port type: %d\n", + mtype); + return BNXT_TF_RC_PARSE_ERR_NOTSUPP; + } + + netdev_dbg(bp->dev, "%s: classid: 0x%x queue_id: %d\n", + __func__, classid, queue_id); + memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_QUEUE_INDEX], + &queue_id, BNXT_ULP_ACT_PROP_SZ_QUEUE_INDEX); + ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_QUEUE); + + return BNXT_TF_RC_SUCCESS; +} + +/* Function to handle the parsing of TC Flows and placing + * the TC flow actions into the ulp structures. + */ +int bnxt_ulp_tc_parser_act_parse(struct bnxt *bp, + struct flow_cls_offload *tc_flow_cmd, + struct ulp_tc_parser_params *params) +{ + struct flow_rule *rule = flow_cls_offload_flow_rule(tc_flow_cmd); + struct netlink_ext_ack *extack = tc_flow_cmd->common.extack; + struct flow_action *flow_action = &rule->action; + struct bnxt_ulp_tc_act_info *act_info; + struct flow_action_entry *act; + int rc = BNXT_TF_RC_ERROR; + int i; + + if (!flow_action_has_entries(flow_action) && !tc_flow_cmd->classid) { + netdev_dbg(bp->dev, "no actions\n"); + return rc; + } + + if (!flow_action_basic_hw_stats_check(flow_action, extack)) + return rc; + + if (tc_flow_cmd->classid) { + rc = ulp_tc_parser_process_classid(bp, params, + tc_flow_cmd->classid); + if (rc == BNXT_TF_RC_SUCCESS) + goto done; + return rc; + } + + /* Parse all the actions in the rule */ + flow_action_for_each(i, act, flow_action) { + act_info = &ulp_act_info[act->id]; + + if (act_info->act_type == BNXT_ULP_ACT_TYPE_NOT_SUPPORTED) { + netdev_dbg(bp->dev, + "Truflow parser does not support act %d\n", + act->id); + return rc; + } + + if (act_info->proto_act_func) { + if (act_info->proto_act_func(bp, params, act) != + BNXT_TF_RC_SUCCESS) { + return rc; + } + } + } + +done: + /* Set count action in the action bitmap */ + ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_COUNT); + + /* update the implied port details */ + if (!ULP_BITMAP_ISSET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_QUEUE)) + ulp_tc_parser_implicit_act_port_process(bp, params); + + return BNXT_TF_RC_SUCCESS; +} + +#else /* HAVE_FLOW_OFFLOAD_H */ + +static enum flow_action_id tcf_exts_to_act_id(const struct tc_action *tc_act) +{ + enum flow_action_id act_id; + + if (is_tcf_gact_shot(tc_act)) { /* Drop */ + act_id = FLOW_ACTION_DROP; + } else if (is_tcf_mirred_egress_redirect(tc_act)) { /* Redirect */ + act_id = FLOW_ACTION_REDIRECT; + } else if (is_tcf_tunnel_set(tc_act)) { /* Tnl encap */ + act_id = FLOW_ACTION_TUNNEL_ENCAP; + } else if (is_tcf_tunnel_release(tc_act)) { /* Tnl decap */ + act_id = FLOW_ACTION_TUNNEL_DECAP; + } else if (is_tcf_pedit(tc_act)) { /* Pkt edit */ + act_id = FLOW_ACTION_MANGLE; + } else if (is_tcf_csum(tc_act)) { /* Checksum */ + act_id = FLOW_ACTION_CSUM; + } else if (is_tcf_vlan(tc_act)) { + switch (tcf_vlan_action(tc_act)) { + case TCA_VLAN_ACT_PUSH: /* VLAN Push */ + act_id = FLOW_ACTION_VLAN_PUSH; + break; + case TCA_VLAN_ACT_POP: /* VLAN Pop */ + act_id = FLOW_ACTION_VLAN_POP; + break; + default: + act_id = FLOW_ACTION_INVALID; + break; + } + } else if (is_tcf_gact_goto_chain(tc_act)) { + act_id = FLOW_ACTION_GOTO; + } else { + act_id = FLOW_ACTION_INVALID; + } + + return act_id; +} + +/* Function to handle the parsing of TC Flows and placing + * the TC flow actions into the ulp structures. + */ +int bnxt_ulp_tc_parser_act_parse(struct bnxt *bp, + struct flow_cls_offload *tc_flow_cmd, + struct ulp_tc_parser_params *params) +{ + struct tcf_exts *tc_exts = tc_flow_cmd->exts; + struct bnxt_ulp_tc_act_info *act_info; + enum flow_action_id act_id; + struct tc_action *tc_act; +#ifndef HAVE_TC_EXTS_FOR_ACTION + LIST_HEAD(tc_actions); +#else + int i; +#endif + + if (!tcf_exts_has_actions(tc_exts)) { + netdev_info(bp->dev, "no actions"); + return -EINVAL; + } + +#ifndef HAVE_TC_EXTS_FOR_ACTION + tcf_exts_to_list(tc_exts, &tc_actions); + list_for_each_entry(tc_act, &tc_actions, list) { +#else + tcf_exts_for_each_action(i, tc_act, tc_exts) { +#endif + act_id = tcf_exts_to_act_id(tc_act); + act_info = &ulp_act_info[act_id]; + + if (act_info->act_type == BNXT_ULP_ACT_TYPE_NOT_SUPPORTED) { + netdev_dbg(bp->dev, + "Truflow parser does not support act %d\n", + act_id); + return BNXT_TF_RC_ERROR; + } + + if (act_info->proto_act_func) { + if (act_info->proto_act_func(bp, params, tc_act) != + BNXT_TF_RC_SUCCESS) + return BNXT_TF_RC_ERROR; + } + } + + /* Set count action in the action bitmap */ + ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_COUNT); + + /* update the implied port details */ + ulp_tc_parser_implicit_act_port_process(bp, params); + + return BNXT_TF_RC_SUCCESS; +} +#endif /* HAVE_FLOW_OFFLOAD_H */ + +int ulp_tc_control_key_handler(struct bnxt *bp, + struct ulp_tc_parser_params *params, + void *match_arg) +{ + struct flow_match_control *match = match_arg; + + params->addr_type = match->key->addr_type; + netdev_dbg(bp->dev, "Control key: addr_type: %d\n", params->addr_type); + + return BNXT_TF_RC_SUCCESS; +} + +int ulp_tc_tnl_control_key_handler(struct bnxt *bp, + struct ulp_tc_parser_params *params, + void *match_arg) +{ + struct flow_match_control *match = match_arg; + + params->tnl_addr_type = match->key->addr_type; + netdev_dbg(bp->dev, "Tunnel Control key: addr_type: %d\n", + params->tnl_addr_type); + + return BNXT_TF_RC_SUCCESS; +} + +#define BNXT_ULP_IS_ETH_TYPE_ARP(params) \ + (cpu_to_be16((params)->n_proto) == ETH_P_ARP) +int ulp_tc_basic_key_handler(struct bnxt *bp, + struct ulp_tc_parser_params *params, + void *match_arg) +{ + struct flow_match_basic *match = match_arg; + + params->n_proto = match->key->n_proto; + if (BNXT_ULP_IS_ETH_TYPE_ARP(params)) { + netdev_dbg(bp->dev, "ARP flow offload not supported\n"); + return BNXT_TF_RC_PARSE_ERR_NOTSUPP; + } + params->n_proto_mask = match->mask->n_proto; + params->ip_proto = match->key->ip_proto; + params->ip_proto_mask = match->mask->ip_proto; + netdev_dbg(bp->dev, "Basic key: n_proto: 0x%x ip_proto: %d\n", + cpu_to_be16(params->n_proto), params->ip_proto); + + return BNXT_TF_RC_SUCCESS; +} + +int ulp_tc_eth_addr_handler(struct bnxt *bp, + struct ulp_tc_parser_params *params, + void *match_arg) +{ + struct flow_match_eth_addrs *match = match_arg; + u32 inner_flag = 0; + bool allow_bc_mc; + u32 idx = 0; + u32 size; + + allow_bc_mc = bnxt_ulp_validate_bcast_mcast(bp); + + if (!allow_bc_mc && (is_multicast_ether_addr(match->key->dst) || + is_broadcast_ether_addr(match->key->dst))) { + netdev_dbg(bp->dev, + "Broadcast/Multicast flow offload unsupported\n"); + return BNXT_TF_RC_PARSE_ERR_NOTSUPP; + } + + if (!allow_bc_mc && (is_multicast_ether_addr(match->key->src) || + is_broadcast_ether_addr(match->key->src))) { + netdev_dbg(bp->dev, + "Broadcast/Multicast flow offload unsupported\n"); + return BNXT_TF_RC_PARSE_ERR_NOTSUPP; + } + + if (ulp_tc_prsr_fld_size_validate(params, &idx, + BNXT_ULP_PROTO_HDR_ETH_NUM)) { + netdev_dbg(bp->dev, "Error parsing protocol header\n"); + return BNXT_TF_RC_ERROR; + } + + /* Copy the key item for eth into hdr_field using ethernet + * header fields + */ + size = sizeof(match->key->dst); + ulp_tc_prsr_fld_mask(params, &idx, size, match->key->dst, + match->mask->dst, ULP_PRSR_ACT_DEFAULT); + + size = sizeof(match->key->src); + ulp_tc_prsr_fld_mask(params, &idx, size, match->key->src, + match->mask->src, ULP_PRSR_ACT_DEFAULT); + + size = sizeof(params->n_proto); + ulp_tc_prsr_fld_mask(params, &idx, size, ¶ms->n_proto, + ¶ms->n_proto_mask, ULP_PRSR_ACT_MATCH_IGNORE); + + /* Parser expects the ethernet and vlan headers in wire format. + * So, when the vlan header is present, we set the tpid here + * and the vlan hdr parser sets the eth_type. Otherwise, we set + * the eth_type. + */ + if (params->vlan_tpid) { + ulp_tc_prsr_fld_mask(params, &idx, size, ¶ms->vlan_tpid, + ¶ms->vlan_tpid_mask, + ULP_PRSR_ACT_MATCH_IGNORE); + } else { + ulp_tc_prsr_fld_mask(params, &idx, size, ¶ms->n_proto, + ¶ms->n_proto_mask, + ULP_PRSR_ACT_MATCH_IGNORE); + } + + /* Update the protocol hdr bitmap */ + if (ULP_BITMAP_ISSET(params->hdr_bitmap.bits, + BNXT_ULP_HDR_BIT_O_ETH) || + ULP_BITMAP_ISSET(params->hdr_bitmap.bits, + BNXT_ULP_HDR_BIT_O_IPV4) || + ULP_BITMAP_ISSET(params->hdr_bitmap.bits, + BNXT_ULP_HDR_BIT_O_IPV6) || + ULP_BITMAP_ISSET(params->hdr_bitmap.bits, + BNXT_ULP_HDR_BIT_O_UDP) || + ULP_BITMAP_ISSET(params->hdr_bitmap.bits, + BNXT_ULP_HDR_BIT_O_TCP)) { + ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_I_ETH); + inner_flag = 1; + } else { + ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_ETH); + } + + /* Update the field protocol hdr bitmap */ + if (!params->vlan_tpid) { + ulp_tc_l2_proto_type_update(params, params->n_proto, + inner_flag); + } + + return BNXT_TF_RC_SUCCESS; +} + +int ulp_tc_vlan_handler(struct bnxt *bp, struct ulp_tc_parser_params *params, + void *match_arg) +{ + struct flow_match_vlan *match = match_arg; + u16 vlan_tag_mask = 0, priority_mask = 0; + struct ulp_tc_hdr_bitmap *hdr_bit; + u16 vlan_tag = 0, priority = 0; + u32 outer_vtag_num; + u32 inner_vtag_num; + u32 inner_flag = 0; + u32 idx = 0; + u32 size; + + if (match->key) { + priority = htons(match->key->vlan_priority); + vlan_tag = htons(match->key->vlan_id); + } + + if (match->mask) { + priority_mask = htons(match->mask->vlan_priority); + vlan_tag_mask = match->mask->vlan_id; + vlan_tag_mask &= 0xfff; + + /* The storage for priority and vlan tag is 2 bytes. + * The mask of priority which is 3 bits, if it is all 1's + * then make the rest bits 13 bits as 1's so that it is + * matched as exact match. + */ + if (priority_mask == ULP_VLAN_PRIORITY_MASK) + priority_mask |= ~ULP_VLAN_PRIORITY_MASK; + if (vlan_tag_mask == ULP_VLAN_TAG_MASK) + vlan_tag_mask |= ~ULP_VLAN_TAG_MASK; + vlan_tag_mask = htons(vlan_tag_mask); + } + + if (ulp_tc_prsr_fld_size_validate(params, &idx, + BNXT_ULP_PROTO_HDR_S_VLAN_NUM)) { + netdev_dbg(bp->dev, "Error parsing protocol header\n"); + return BNXT_TF_RC_ERROR; + } + + size = sizeof(vlan_tag); + /* The priority field is ignored since OVS is setting it as + * wild card match and it is not supported. This is a work + * around and shall be addressed in the future. + */ + ulp_tc_prsr_fld_mask(params, &idx, size, &priority, &priority_mask, + ULP_PRSR_ACT_MASK_IGNORE); + + ulp_tc_prsr_fld_mask(params, &idx, size, &vlan_tag, &vlan_tag_mask, + ULP_PRSR_ACT_DEFAULT); + + /* Parser expects the ethernet and vlan headers in wire format. + * So, when the vlan header is present, we set the eth_type here + * and the eth hdr parser would have set the tpid. + */ + size = sizeof(params->n_proto); + ulp_tc_prsr_fld_mask(params, &idx, size, ¶ms->n_proto, + ¶ms->n_proto_mask, ULP_PRSR_ACT_MATCH_IGNORE); + + /* Get the outer tag and inner tag counts */ + outer_vtag_num = ULP_COMP_FLD_IDX_RD(params, + BNXT_ULP_CF_IDX_O_VTAG_NUM); + inner_vtag_num = ULP_COMP_FLD_IDX_RD(params, + BNXT_ULP_CF_IDX_I_VTAG_NUM); + + /* Update the hdr_bitmap of the vlans */ + hdr_bit = ¶ms->hdr_bitmap; + if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) && + !ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) && + !outer_vtag_num) { + /* Update the vlan tag num */ + outer_vtag_num++; + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_VTAG_NUM, + outer_vtag_num); + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_HAS_VTAG, 1); + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_ONE_VTAG, 1); + ULP_BITMAP_SET(params->hdr_bitmap.bits, + BNXT_ULP_HDR_BIT_OO_VLAN); + if (match->mask && vlan_tag_mask) + ULP_COMP_FLD_IDX_WR(params, + BNXT_ULP_CF_IDX_OO_VLAN_FB_VID, 1); + } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) && + !ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) && + outer_vtag_num == 1) { + /* update the vlan tag num */ + outer_vtag_num++; + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_VTAG_NUM, + outer_vtag_num); + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_TWO_VTAGS, 1); + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_ONE_VTAG, 0); + ULP_BITMAP_SET(params->hdr_bitmap.bits, + BNXT_ULP_HDR_BIT_OI_VLAN); + if (match->mask && vlan_tag_mask) + ULP_COMP_FLD_IDX_WR(params, + BNXT_ULP_CF_IDX_OI_VLAN_FB_VID, 1); + } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) && + ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) && + !inner_vtag_num) { + /* update the vlan tag num */ + inner_vtag_num++; + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_VTAG_NUM, + inner_vtag_num); + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_HAS_VTAG, 1); + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_ONE_VTAG, 1); + ULP_BITMAP_SET(params->hdr_bitmap.bits, + BNXT_ULP_HDR_BIT_IO_VLAN); + if (match->mask && vlan_tag_mask) + ULP_COMP_FLD_IDX_WR(params, + BNXT_ULP_CF_IDX_IO_VLAN_FB_VID, 1); + inner_flag = 1; + } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) && + ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) && + inner_vtag_num == 1) { + /* update the vlan tag num */ + inner_vtag_num++; + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_VTAG_NUM, + inner_vtag_num); + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_TWO_VTAGS, 1); + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_ONE_VTAG, 0); + ULP_BITMAP_SET(params->hdr_bitmap.bits, + BNXT_ULP_HDR_BIT_II_VLAN); + if (match->mask && vlan_tag_mask) + ULP_COMP_FLD_IDX_WR(params, + BNXT_ULP_CF_IDX_II_VLAN_FB_VID, 1); + inner_flag = 1; + } else { + netdev_dbg(bp->dev, "%s: VLAN hdr found without eth\n", + __func__); + return BNXT_TF_RC_ERROR; + } + + ulp_tc_l2_proto_type_update(params, params->n_proto, inner_flag); + return BNXT_TF_RC_SUCCESS; +} + +/* Function to handle the update of proto header based on field values */ +static void ulp_tc_l3_proto_type_update(struct ulp_tc_parser_params *param, + u8 proto, u32 in_flag) +{ + if (proto == IPPROTO_UDP) { + if (in_flag) { + ULP_BITMAP_SET(param->hdr_fp_bit.bits, + BNXT_ULP_HDR_BIT_I_UDP); + ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L4, 1); + } else { + ULP_BITMAP_SET(param->hdr_fp_bit.bits, + BNXT_ULP_HDR_BIT_O_UDP); + ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L4, 1); + } + } else if (proto == IPPROTO_TCP) { + if (in_flag) { + ULP_BITMAP_SET(param->hdr_fp_bit.bits, + BNXT_ULP_HDR_BIT_I_TCP); + ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L4, 1); + } else { + ULP_BITMAP_SET(param->hdr_fp_bit.bits, + BNXT_ULP_HDR_BIT_O_TCP); + ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L4, 1); + } + } else if (proto == IPPROTO_GRE) { + ULP_BITMAP_SET(param->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_T_GRE); + } else if (proto == IPPROTO_ICMP) { + if (ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_L3_TUN)) + ULP_BITMAP_SET(param->hdr_bitmap.bits, + BNXT_ULP_HDR_BIT_I_ICMP); + else + ULP_BITMAP_SET(param->hdr_bitmap.bits, + BNXT_ULP_HDR_BIT_O_ICMP); + } + if (proto) { + if (in_flag) { + ULP_COMP_FLD_IDX_WR(param, + BNXT_ULP_CF_IDX_I_L3_FB_PROTO_ID, + 1); + ULP_COMP_FLD_IDX_WR(param, + BNXT_ULP_CF_IDX_I_L3_PROTO_ID, + proto); + } else { + ULP_COMP_FLD_IDX_WR(param, + BNXT_ULP_CF_IDX_O_L3_FB_PROTO_ID, + 1); + ULP_COMP_FLD_IDX_WR(param, + BNXT_ULP_CF_IDX_O_L3_PROTO_ID, + proto); + } + } +} + +static int ulp_tc_ipv4_ctrl_handler(struct bnxt *bp, + struct ulp_tc_parser_params *params, + void *match_arg) +{ + struct flow_match_ip *match = match_arg; + u16 val16 = 0; + u8 val8 = 0; + u32 idx = 0; + u32 size; + u32 cnt; + + /* validate there are no 3rd L3 header */ + cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_HDR_CNT); + if (cnt == 2) { + netdev_dbg(bp->dev, "Parse Err:Third L3 header not supported\n"); + return BNXT_TF_RC_ERROR; + } + + if (ulp_tc_prsr_fld_size_validate(params, &idx, + BNXT_ULP_PROTO_HDR_IPV4_NUM - 2)) { + netdev_dbg(bp->dev, "Error parsing protocol header\n"); + return BNXT_TF_RC_ERROR; + } + + /* version_ihl */ + size = sizeof(val8); + ulp_tc_prsr_fld_mask(params, &idx, size, &val8, &val8, + ULP_PRSR_ACT_DEFAULT); + + /* tos: Ignore for matching templates with tunnel flows */ + size = sizeof(match->key->tos); + ulp_tc_prsr_fld_mask(params, &idx, size, &match->key->tos, + &match->mask->tos, + params->tnl_addr_type ? ULP_PRSR_ACT_MATCH_IGNORE : + ULP_PRSR_ACT_DEFAULT); + + /* total_length */ + size = sizeof(val16); + ulp_tc_prsr_fld_mask(params, &idx, size, &val16, &val16, + ULP_PRSR_ACT_DEFAULT); + + /* packet_id */ + size = sizeof(val16); + ulp_tc_prsr_fld_mask(params, &idx, size, &val16, &val16, + ULP_PRSR_ACT_DEFAULT); + + /* fragment_offset */ + size = sizeof(val16); + ulp_tc_prsr_fld_mask(params, &idx, size, &val16, &val16, + ULP_PRSR_ACT_DEFAULT); + + /* ttl */ + size = sizeof(match->key->ttl); + ulp_tc_prsr_fld_mask(params, &idx, size, &match->key->ttl, + &match->mask->ttl, ULP_PRSR_ACT_DEFAULT); + + /* next_proto_id: Ignore proto for matching templates */ + size = sizeof(params->ip_proto); + ulp_tc_prsr_fld_mask(params, &idx, size, ¶ms->ip_proto, + ¶ms->ip_proto_mask, + ULP_PRSR_ACT_MATCH_IGNORE); + + /* hdr_checksum */ + size = sizeof(val16); + ulp_tc_prsr_fld_mask(params, &idx, size, &val16, &val16, + ULP_PRSR_ACT_DEFAULT); + + return BNXT_TF_RC_SUCCESS; +} + +static int ulp_tc_ipv6_ctrl_handler(struct bnxt *bp, + struct ulp_tc_parser_params *params, + void *match_arg) +{ + struct flow_match_ip *match = match_arg; + u32 val32 = 0; + u16 val16 = 0; + u32 idx = 0; + u32 size; + u32 cnt; + + /* validate there are no 3rd L3 header */ + cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_HDR_CNT); + if (cnt == 2) { + netdev_dbg(bp->dev, "Parse Err:Third L3 header not supported\n"); + return BNXT_TF_RC_ERROR; + } + + if (ulp_tc_prsr_fld_size_validate(params, &idx, + BNXT_ULP_PROTO_HDR_IPV6_NUM - 2)) { + netdev_dbg(bp->dev, "Error parsing protocol header\n"); + return BNXT_TF_RC_ERROR; + } + + /* version */ + size = sizeof(val32); + ulp_tc_prsr_fld_mask(params, &idx, size, &val32, &val32, + ULP_PRSR_ACT_DEFAULT); + + /* traffic class: Ignore for matching templates with tunnel flows */ + size = sizeof(match->key->tos); + ulp_tc_prsr_fld_mask(params, &idx, size, &match->key->tos, + &match->mask->tos, + params->tnl_addr_type ? ULP_PRSR_ACT_MATCH_IGNORE : + ULP_PRSR_ACT_DEFAULT); + + /* flow label: Ignore for matching templates */ + size = sizeof(val32); + ulp_tc_prsr_fld_mask(params, &idx, size, &val32, &val32, + ULP_PRSR_ACT_MASK_IGNORE); + + /* payload length */ + size = sizeof(val16); + ulp_tc_prsr_fld_mask(params, &idx, size, &val16, &val16, + ULP_PRSR_ACT_DEFAULT); + + /* next_proto_id: Ignore proto for matching templates */ + size = sizeof(params->ip_proto); + ulp_tc_prsr_fld_mask(params, &idx, size, ¶ms->ip_proto, + ¶ms->ip_proto_mask, + ULP_PRSR_ACT_MATCH_IGNORE); + /* hop limit (ttl) */ + size = sizeof(match->key->ttl); + ulp_tc_prsr_fld_mask(params, &idx, size, &match->key->ttl, + &match->mask->ttl, ULP_PRSR_ACT_DEFAULT); + + return BNXT_TF_RC_SUCCESS; +} + +int ulp_tc_ip_ctrl_handler(struct bnxt *bp, + struct ulp_tc_parser_params *params, + void *match_arg) +{ + if (params->n_proto == cpu_to_be16(ETH_P_IP)) + return ulp_tc_ipv4_ctrl_handler(bp, params, match_arg); + if (params->n_proto == cpu_to_be16(ETH_P_IPV6)) + return ulp_tc_ipv6_ctrl_handler(bp, params, match_arg); + return BNXT_TF_RC_ERROR; +} + +/* Function to handle the parsing of IPV4 Header. */ +static int ulp_tc_parse_ipv4_addr(struct bnxt *bp, + struct ulp_tc_parser_params *params, + void *match_arg) +{ + struct ulp_tc_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap; + struct flow_match_ipv4_addrs *match = match_arg; + u32 inner_flag = 0; + u8 proto = 0; + u32 idx = 0; + u32 size; + u32 cnt; + + /* validate there is no 3rd L3 header */ + cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_HDR_CNT); + if (cnt == 2) { + netdev_dbg(bp->dev, "Parse Err:Third L3 header not supported\n"); + return BNXT_TF_RC_ERROR; + } + + if (ulp_tc_prsr_fld_size_validate(params, &idx, + BNXT_ULP_PROTO_HDR_IPV4_NUM - 8)) { + netdev_dbg(bp->dev, "Error parsing protocol header\n"); + return BNXT_TF_RC_ERROR; + } + + size = sizeof(match->key->src); + ulp_tc_prsr_fld_mask(params, &idx, size, &match->key->src, + &match->mask->src, ULP_PRSR_ACT_DEFAULT); + + size = sizeof(match->key->dst); + ulp_tc_prsr_fld_mask(params, &idx, size, &match->key->dst, + &match->mask->dst, ULP_PRSR_ACT_DEFAULT); + + /* Set the ipv4 header bitmap and computed l3 header bitmaps */ + if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) || + ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6) || + ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_TUN)) { + ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV4); + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3, 1); + inner_flag = 1; + } else { + ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4); + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, 1); + } + + /* Some of the applications may set the protocol field + * in the IPv4 match but don't set the mask. So, consider + * the mask in the proto value calculation. + */ + proto = params->ip_proto & params->ip_proto_mask; + + /* Update the field protocol hdr bitmap */ + ulp_tc_l3_proto_type_update(params, proto, inner_flag); + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_HDR_CNT, ++cnt); + netdev_dbg(bp->dev, "%s: l3-hdr-cnt: %d\n", __func__, cnt); + + return BNXT_TF_RC_SUCCESS; +} + +/* Function to handle the parsing of IPV6 Header. */ +static int ulp_tc_parse_ipv6_addr(struct bnxt *bp, + struct ulp_tc_parser_params *params, + void *match_arg) +{ + struct ulp_tc_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap; + struct flow_match_ipv6_addrs *match = match_arg; + u32 inner_flag = 0; + u8 proto = 0; + u32 idx = 0; + u32 size; + u32 cnt; + + /* validate there is no 3rd L3 header */ + cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_HDR_CNT); + if (cnt == 2) { + netdev_dbg(bp->dev, "Parse Err:Third L3 header not supported\n"); + return BNXT_TF_RC_ERROR; + } + + if (ulp_tc_prsr_fld_size_validate(params, &idx, + BNXT_ULP_PROTO_HDR_IPV6_NUM - 6)) { + netdev_dbg(bp->dev, "Error parsing protocol header\n"); + return BNXT_TF_RC_ERROR; + } + + size = sizeof(match->key->src); + ulp_tc_prsr_fld_mask(params, &idx, size, &match->key->src, + &match->mask->src, ULP_PRSR_ACT_DEFAULT); + + size = sizeof(match->key->dst); + ulp_tc_prsr_fld_mask(params, &idx, size, &match->key->dst, + &match->mask->dst, ULP_PRSR_ACT_DEFAULT); + + /* Set the ipv6 header bitmap and computed l3 header bitmaps */ + if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) || + ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6) || + ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_TUN)) { + ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV6); + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3, 1); + inner_flag = 1; + } else { + ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6); + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, 1); + } + + /* Some of the applications may set the protocol field + * in the IPv6 match but don't set the mask. So, consider + * the mask in the proto value calculation. + */ + proto = params->ip_proto & params->ip_proto_mask; + + /* Update the field protocol hdr bitmap */ + ulp_tc_l3_proto_type_update(params, proto, inner_flag); + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_HDR_CNT, ++cnt); + netdev_dbg(bp->dev, "%s: l3-hdr-cnt: %d\n", __func__, cnt); + + return BNXT_TF_RC_SUCCESS; +} + +int ulp_tc_ipv4_addr_handler(struct bnxt *bp, + struct ulp_tc_parser_params *params, + void *match_arg) +{ + /* Dissector keys are set for both IPV4 and IPV6. Check addr_type + * (from KEY_CONTROL which is already processed) to resolve this. + */ + if (params->addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) + return ulp_tc_parse_ipv4_addr(bp, params, match_arg); + + return BNXT_TF_RC_SUCCESS; +} + +int ulp_tc_ipv6_addr_handler(struct bnxt *bp, + struct ulp_tc_parser_params *params, + void *match_arg) +{ + /* Dissector keys are set for both IPV4 and IPV6. Check addr_type + * (from KEY_CONTROL which is already processed) to resolve this. + */ + if (params->addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) + return ulp_tc_parse_ipv6_addr(bp, params, match_arg); + + return BNXT_TF_RC_SUCCESS; +} + +static void ulp_tc_l4_proto_type_update(struct ulp_tc_parser_params *params, + u16 src_port, u16 src_mask, + u16 dst_port, u16 dst_mask, + enum bnxt_ulp_hdr_bit hdr_bit) +{ + switch (hdr_bit) { + case BNXT_ULP_HDR_BIT_I_UDP: + case BNXT_ULP_HDR_BIT_I_TCP: + ULP_BITMAP_SET(params->hdr_bitmap.bits, hdr_bit); + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4, 1); + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_SRC_PORT, + (u64)be16_to_cpu(src_port)); + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_DST_PORT, + (u64)be16_to_cpu(dst_port)); + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_SRC_PORT_MASK, + (u64)be16_to_cpu(src_mask)); + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_DST_PORT_MASK, + (u64)be16_to_cpu(dst_mask)); + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3_FB_PROTO_ID, + 1); + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_FB_SRC_PORT, + !!(src_port & src_mask)); + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_FB_DST_PORT, + !!(dst_port & dst_mask)); + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3_PROTO_ID, + (hdr_bit == BNXT_ULP_HDR_BIT_I_UDP) ? + IPPROTO_UDP : IPPROTO_TCP); + break; + case BNXT_ULP_HDR_BIT_O_UDP: + case BNXT_ULP_HDR_BIT_O_TCP: + ULP_BITMAP_SET(params->hdr_bitmap.bits, hdr_bit); + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4, 1); + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_SRC_PORT, + (u64)be16_to_cpu(src_port)); + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_DST_PORT, + (u64)be16_to_cpu(dst_port)); + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_SRC_PORT_MASK, + (u64)be16_to_cpu(src_mask)); + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_DST_PORT_MASK, + (u64)be16_to_cpu(dst_mask)); + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3_FB_PROTO_ID, + 1); + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_FB_SRC_PORT, + !!(src_port & src_mask)); + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_FB_DST_PORT, + !!(dst_port & dst_mask)); + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3_PROTO_ID, + (hdr_bit == BNXT_ULP_HDR_BIT_O_UDP) ? + IPPROTO_UDP : IPPROTO_TCP); + break; + default: + break; + } + + if (hdr_bit == BNXT_ULP_HDR_BIT_O_UDP && dst_port == + cpu_to_be16(ULP_UDP_PORT_VXLAN)) { + ULP_BITMAP_SET(params->hdr_fp_bit.bits, + BNXT_ULP_HDR_BIT_T_VXLAN); + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_TUN, 1); + ULP_BITMAP_SET(params->cf_bitmap, BNXT_ULP_CF_BIT_IS_TUNNEL); + } +} + +static int ulp_tc_udp_handler(struct bnxt *bp, + struct ulp_tc_parser_params *params, + void *match_arg) +{ + struct ulp_tc_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap; + enum bnxt_ulp_hdr_bit out_l4 = BNXT_ULP_HDR_BIT_O_UDP; + struct flow_match_ports *match = match_arg; + u16 dport_mask = 0, sport_mask = 0; + u16 dport = 0, sport = 0; + u16 dgram_cksum = 0; + u16 dgram_len = 0; + u32 idx = 0; + u32 size; + u32 cnt; + + cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L4_HDR_CNT); + if (cnt == 2) { + netdev_dbg(bp->dev, + "Parse Err:Third L4 header not supported\n"); + return BNXT_TF_RC_ERROR; + } + + if (match->key) { + sport = match->key->src; + dport = match->key->dst; + } + if (match->mask) { + sport_mask = match->mask->src; + dport_mask = match->mask->dst; + } + + if (ulp_tc_prsr_fld_size_validate(params, &idx, + BNXT_ULP_PROTO_HDR_UDP_NUM)) { + netdev_dbg(bp->dev, "Error parsing protocol header\n"); + return BNXT_TF_RC_ERROR; + } + + size = sizeof(match->key->src); + ulp_tc_prsr_fld_mask(params, &idx, size, &match->key->src, + &match->mask->src, ULP_PRSR_ACT_DEFAULT); + + size = sizeof(match->key->dst); + ulp_tc_prsr_fld_mask(params, &idx, size, &match->key->dst, + &match->mask->dst, ULP_PRSR_ACT_DEFAULT); + + size = sizeof(dgram_len); + ulp_tc_prsr_fld_mask(params, &idx, size, &dgram_len, &dgram_len, + ULP_PRSR_ACT_DEFAULT); + + size = sizeof(dgram_cksum); + ulp_tc_prsr_fld_mask(params, &idx, size, &dgram_cksum, &dgram_cksum, + ULP_PRSR_ACT_DEFAULT); + + /* Set the udp header bitmap and computed l4 header bitmaps */ + if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) || + ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP) || + ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_TUN)) + out_l4 = BNXT_ULP_HDR_BIT_I_UDP; + + ulp_tc_l4_proto_type_update(params, sport, sport_mask, dport, + dport_mask, out_l4); + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L4_HDR_CNT, ++cnt); + + return BNXT_TF_RC_SUCCESS; +} + +int ulp_tc_tcp_ctrl_handler(struct bnxt *bp, + struct ulp_tc_parser_params *params, + void *match_arg) +{ + struct flow_match_tcp *match = match_arg; + u32 val32 = 0; + u16 val16 = 0; + u8 val8 = 0; + u32 idx = 0; + u32 size; + u32 cnt; + + cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L4_HDR_CNT); + if (cnt == 2) { + netdev_dbg(bp->dev, + "Parse Err:Third L4 header not supported\n"); + return BNXT_TF_RC_ERROR; + } + + if (ulp_tc_prsr_fld_size_validate(params, &idx, + BNXT_ULP_PROTO_HDR_TCP_NUM - 2)) { + netdev_dbg(bp->dev, "Error parsing protocol header\n"); + return BNXT_TF_RC_ERROR; + } + + /* seq num */ + size = sizeof(val32); + ulp_tc_prsr_fld_mask(params, &idx, size, &val32, &val32, + ULP_PRSR_ACT_DEFAULT); + + /* ack num */ + size = sizeof(val32); + ulp_tc_prsr_fld_mask(params, &idx, size, &val32, &val32, + ULP_PRSR_ACT_DEFAULT); + + /* data offset */ + size = sizeof(val8); + ulp_tc_prsr_fld_mask(params, &idx, size, &val8, &val8, + ULP_PRSR_ACT_DEFAULT); + + /* flags */ + size = sizeof(match->key->flags); + ulp_tc_prsr_fld_mask(params, &idx, size, &match->key->flags, + &match->mask->flags, ULP_PRSR_ACT_DEFAULT); + + /* rx window */ + size = sizeof(val16); + ulp_tc_prsr_fld_mask(params, &idx, size, &val16, &val16, + ULP_PRSR_ACT_DEFAULT); + + /* cksum */ + size = sizeof(val16); + ulp_tc_prsr_fld_mask(params, &idx, size, &val16, &val16, + ULP_PRSR_ACT_DEFAULT); + + /* urg ptr */ + size = sizeof(val16); + ulp_tc_prsr_fld_mask(params, &idx, size, &val16, &val16, + ULP_PRSR_ACT_DEFAULT); + + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L4_HDR_CNT, ++cnt); + return BNXT_TF_RC_SUCCESS; +} + +static int ulp_tc_tcp_ports_handler(struct bnxt *bp, + struct ulp_tc_parser_params *params, + void *match_arg) +{ + struct ulp_tc_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap; + enum bnxt_ulp_hdr_bit out_l4 = BNXT_ULP_HDR_BIT_O_TCP; + struct flow_match_ports *match = match_arg; + u16 dport_mask = 0, sport_mask = 0; + u16 dport = 0, sport = 0; + u32 idx = 0; + u32 size; + u32 cnt; + + cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L4_HDR_CNT); + if (cnt == 2) { + netdev_dbg(bp->dev, + "Parse Err:Third L4 header not supported\n"); + return BNXT_TF_RC_ERROR; + } + + if (match->key) { + sport = match->key->src; + dport = match->key->dst; + } + if (match->mask) { + sport_mask = match->mask->src; + dport_mask = match->mask->dst; + } + + if (ulp_tc_prsr_fld_size_validate(params, &idx, + BNXT_ULP_PROTO_HDR_TCP_NUM - 7)) { + netdev_dbg(bp->dev, "Error parsing protocol header\n"); + return BNXT_TF_RC_ERROR; + } + + size = sizeof(match->key->src); + ulp_tc_prsr_fld_mask(params, &idx, size, &match->key->src, + &match->mask->src, ULP_PRSR_ACT_DEFAULT); + + size = sizeof(match->key->dst); + ulp_tc_prsr_fld_mask(params, &idx, size, &match->key->dst, + &match->mask->dst, ULP_PRSR_ACT_DEFAULT); + + if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) || + ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP) || + ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_TUN)) + out_l4 = BNXT_ULP_HDR_BIT_I_TCP; + + ulp_tc_l4_proto_type_update(params, sport, sport_mask, dport, + dport_mask, out_l4); + return BNXT_TF_RC_SUCCESS; +} + +int ulp_tc_l4_ports_handler(struct bnxt *bp, + struct ulp_tc_parser_params *params, + void *match_arg) +{ + int rc = BNXT_TF_RC_ERROR; + + if (params->ip_proto != IPPROTO_TCP && params->ip_proto != IPPROTO_UDP) + return rc; + + if (params->ip_proto == IPPROTO_UDP) + rc = ulp_tc_udp_handler(bp, params, match_arg); + else if (params->ip_proto == IPPROTO_TCP) + rc = ulp_tc_tcp_ports_handler(bp, params, match_arg); + + return rc; +} + +int ulp_tc_tnl_ip_ctrl_handler(struct bnxt *bp, + struct ulp_tc_parser_params *params, + void *match_arg) +{ + struct flow_dissector_key_eth_addrs key; + struct flow_dissector_key_eth_addrs mask; + struct tc_match match; + + ether_addr_copy(key.dst, params->tnl_dmac); + eth_broadcast_addr(mask.dst); + + ether_addr_copy(key.src, params->tnl_smac); + eth_broadcast_addr(mask.src); + + match.key = &key; + match.mask = &mask; + + /* This will be overwritten when basic key is parsed later. + * Setting here so eth_addr_handler() can use it to build + * tnl eth hdr match. + */ + if (params->tnl_addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) + params->n_proto = cpu_to_be16(ETH_P_IP); + else if (params->tnl_addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) + params->n_proto = cpu_to_be16(ETH_P_IPV6); + else + return BNXT_TF_RC_ERROR; + + params->n_proto_mask = 0xffff; + ulp_tc_eth_addr_handler(bp, params, &match); + + return ulp_tc_ip_ctrl_handler(bp, params, match_arg); +} + +int ulp_tc_tnl_ipv4_addr_handler(struct bnxt *bp, + struct ulp_tc_parser_params *params, + void *match_arg) +{ + /* Dissector keys are set for both IPV4 and IPV6. Check tnl_addr_type + * (from KEY_CONTROL which is already processed) to resolve this. + */ + if (params->tnl_addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) + return ulp_tc_parse_ipv4_addr(bp, params, match_arg); + + return BNXT_TF_RC_SUCCESS; +} + +int ulp_tc_tnl_ipv6_addr_handler(struct bnxt *bp, + struct ulp_tc_parser_params *params, + void *match_arg) +{ + /* Dissector keys are set for both IPV4 and IPV6. Check tnl_addr_type + * (from KEY_CONTROL which is already processed) to resolve this. + */ + if (params->tnl_addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) + return ulp_tc_parse_ipv6_addr(bp, params, match_arg); + + return BNXT_TF_RC_SUCCESS; +} + +int ulp_tc_tnl_l4_ports_handler(struct bnxt *bp, + struct ulp_tc_parser_params *params, + void *match_arg) +{ + return ulp_tc_udp_handler(bp, params, match_arg); +} + +static int ulp_tc_vxlan_handler(struct bnxt *bp, + struct ulp_tc_parser_params *params, + void *match_arg) +{ + struct ulp_parser_vxlan vxlan_mask = { 0x00, { 0x00, 0x00, 0x00 }, + { 0xff, 0xff, 0xff }, 0x00 }; + struct ulp_tc_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap; + struct flow_match_enc_keyid *match = match_arg; + struct ulp_parser_vxlan vxlan_key = { 0 }; + u32 vni_mask; + u32 idx = 0; + u32 size; + u32 vni; + + if (ulp_tc_prsr_fld_size_validate(params, &idx, + BNXT_ULP_PROTO_HDR_VXLAN_NUM)) { + netdev_dbg(bp->dev, "Error parsing protocol header\n"); + return BNXT_TF_RC_ERROR; + } + + vni = match->key->keyid; + vni = be32_to_cpu(vni); + vni_mask = match->mask->keyid; + + netdev_dbg(bp->dev, "%s: vni: 0x%x mask: 0x%x\n", __func__, + vni, vni_mask); + + vxlan_key.vni[0] = (vni >> 16) & 0xff; + vxlan_key.vni[1] = (vni >> 8) & 0xff; + vxlan_key.vni[2] = vni & 0xff; + vxlan_key.flags = 0x08; + + size = sizeof(vxlan_key.flags); + ulp_tc_prsr_fld_mask(params, &idx, size, &vxlan_key.flags, + &vxlan_mask.flags, ULP_PRSR_ACT_DEFAULT); + + size = sizeof(vxlan_key.rsvd0); + ulp_tc_prsr_fld_mask(params, &idx, size, &vxlan_key.rsvd0, + &vxlan_mask.rsvd0, ULP_PRSR_ACT_DEFAULT); + + size = sizeof(vxlan_key.vni); + ulp_tc_prsr_fld_mask(params, &idx, size, &vxlan_key.vni, + &vxlan_mask.vni, ULP_PRSR_ACT_DEFAULT); + + size = sizeof(vxlan_key.rsvd1); + ulp_tc_prsr_fld_mask(params, &idx, size, &vxlan_key.rsvd1, + &vxlan_mask.rsvd1, ULP_PRSR_ACT_DEFAULT); + + /* Update the hdr_bitmap with vxlan */ + ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_T_VXLAN); + return BNXT_TF_RC_SUCCESS; +} + +int ulp_tc_tnl_key_handler(struct bnxt *bp, + struct ulp_tc_parser_params *params, + void *match_arg) +{ + /* Check the tunnel type as seen in UDP dport. + * We only support VXLAN tunnel for now. + */ + if (!ULP_BITMAP_ISSET(params->hdr_fp_bit.bits, + BNXT_ULP_HDR_BIT_T_VXLAN)) + return BNXT_TF_RC_ERROR; + + return ulp_tc_vxlan_handler(bp, params, match_arg); +} +#endif + +#if defined(CONFIG_BNXT_FLOWER_OFFLOAD) || defined(CONFIG_BNXT_CUSTOM_FLOWER_OFFLOAD) +/* Function to handle the parsing of action ports. */ +int ulp_tc_parser_act_port_set(struct ulp_tc_parser_params *param, u32 ifindex) +{ + struct ulp_tc_act_prop *act = ¶m->act_prop; + enum bnxt_ulp_intf_type port_type; + enum bnxt_ulp_direction_type dir; + u32 vnic_type; + u16 pid_s; + u32 pid; + + /* Get the direction */ + dir = ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_DIRECTION); + port_type = ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_ACT_PORT_TYPE); + if (dir == BNXT_ULP_DIR_EGRESS) { + /* For egress direction, fill vport */ + if (ulp_port_db_vport_get(param->ulp_ctx, ifindex, &pid_s)) + return BNXT_TF_RC_ERROR; + + pid = pid_s; + pid = cpu_to_be32(pid); + memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_VPORT], + &pid, BNXT_ULP_ACT_PROP_SZ_VPORT); + if (port_type == BNXT_ULP_INTF_TYPE_VF_REP) { + if (ulp_port_db_default_vnic_get(param->ulp_ctx, + ifindex, + BNXT_ULP_VF_FUNC_VNIC, + &pid_s)) + return BNXT_TF_RC_ERROR; + pid = pid_s; + + /* Allows use of func_opcode with VNIC */ + ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_VNIC, pid); + } + } else { + /* For ingress direction, fill vnic */ + if (port_type == BNXT_ULP_INTF_TYPE_VF_REP) + vnic_type = BNXT_ULP_VF_FUNC_VNIC; + else + vnic_type = BNXT_ULP_DRV_FUNC_VNIC; + + if (ulp_port_db_default_vnic_get(param->ulp_ctx, ifindex, + vnic_type, &pid_s)) + return BNXT_TF_RC_ERROR; + + pid = pid_s; + pid = cpu_to_be32(pid); + memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_VNIC], + &pid, BNXT_ULP_ACT_PROP_SZ_VNIC); + } + + /* Update the action port set bit */ + ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 1); + return BNXT_TF_RC_SUCCESS; +} +#endif + +#ifdef CONFIG_BNXT_FLOWER_OFFLOAD +static int ulp_tc_parser_mirr_act_port_set(struct ulp_tc_parser_params *param, + u32 ifindex) +{ + struct ulp_tc_act_prop *act = ¶m->act_prop; + enum bnxt_ulp_intf_type port_type; + enum bnxt_ulp_direction_type dir; + u32 vnic_type; + u16 pid_s; + u32 pid; + + /* Get the direction */ + dir = ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_DIRECTION); + if (dir == BNXT_ULP_DIR_EGRESS) { + /* For egress direction, fill vport */ + if (ulp_port_db_vport_get(param->ulp_ctx, ifindex, &pid_s)) + return BNXT_TF_RC_ERROR; + + pid = pid_s; + pid = cpu_to_be32(pid); + memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_MIRR_VPORT], + &pid, BNXT_ULP_ACT_PROP_SZ_MIRR_VPORT); + } else { + /* For ingress direction, fill vnic */ + port_type = ULP_COMP_FLD_IDX_RD(param, + BNXT_ULP_CF_IDX_ACT_MIRR_PORT_TYPE); + if (port_type == BNXT_ULP_INTF_TYPE_VF_REP) + vnic_type = BNXT_ULP_VF_FUNC_VNIC; + else + vnic_type = BNXT_ULP_DRV_FUNC_VNIC; + + if (ulp_port_db_default_vnic_get(param->ulp_ctx, ifindex, + vnic_type, &pid_s)) + return BNXT_TF_RC_ERROR; + + pid = pid_s; + pid = cpu_to_be32(pid); + memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_MIRR_VNIC], + &pid, BNXT_ULP_ACT_PROP_SZ_MIRR_VNIC); + } + + /* Update the action port set bit */ + ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_ACT_MIRR_PORT_IS_SET, 1); + return BNXT_TF_RC_SUCCESS; +} + +#ifndef HAVE_FLOW_OFFLOAD_H +static struct net_device *tcf_redir_dev(struct bnxt *bp, + struct tc_action *tc_act) +{ +#ifdef HAVE_TCF_MIRRED_DEV + struct net_device *dev = tcf_mirred_dev(tc_act); +#else + int ifindex = tcf_mirred_ifindex(tc_act); + struct net_device *dev; + + dev = __dev_get_by_index(dev_net(bp->dev), ifindex); +#endif + return dev; +} +#endif /* !HAVE_FLOW_OFFLOAD_H */ + +static struct net_device *ulp_tc_get_redir_dev(struct bnxt *bp, + void *action_arg) +{ +#ifdef HAVE_FLOW_OFFLOAD_H + struct flow_action_entry *action = action_arg; + + return action->dev; +#else + struct tc_action *action = action_arg; + + return tcf_redir_dev(bp, action); +#endif +} + +int ulp_tc_redirect_act_handler(struct bnxt *bp, + struct ulp_tc_parser_params *params, + void *action_arg) +{ + struct ulp_tc_hdr_bitmap *act = ¶ms->act_bitmap; + enum bnxt_ulp_intf_type intf_type; + struct net_device *redir_dev; + u32 ifindex; + u16 dst_fid; + + redir_dev = ulp_tc_get_redir_dev(bp, action_arg); + if (!redir_dev) { + netdev_dbg(bp->dev, "no dev in mirred action\n"); + return BNXT_TF_RC_ERROR; + } + + if (ULP_BITMAP_ISSET(act->bits, BNXT_ULP_ACT_BIT_VXLAN_ENCAP)) + dst_fid = bp->pf.fw_fid; + else + dst_fid = bnxt_flow_get_dst_fid(bp, redir_dev); + + /* Get the port db ifindex */ + if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx, dst_fid, + &ifindex)) { + netdev_dbg(bp->dev, "Invalid port id\n"); + return BNXT_TF_RC_ERROR; + } + + /* Get the intf type */ + intf_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex); + if (!intf_type) { + netdev_dbg(bp->dev, "Invalid port type\n"); + return BNXT_TF_RC_ERROR; + } + + /* Set the action port */ + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type); + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DEV_ACT_PORT_ID, dst_fid); + + return ulp_tc_parser_act_port_set(params, ifindex); +} + +#ifndef HAVE_FLOW_OFFLOAD_H +static struct net_device *tcf_mirror_dev(struct bnxt *bp, + struct tc_action *tc_act) +{ +#ifdef HAVE_TCF_MIRRED_DEV + struct net_device *dev = tcf_mirred_dev(tc_act); +#else + int ifindex = tcf_mirred_ifindex(tc_act); + struct net_device *dev; + + dev = __dev_get_by_index(dev_net(bp->dev), ifindex); +#endif + return dev; +} +#endif /* !HAVE_FLOW_OFFLOAD_H */ + +static struct net_device *ulp_tc_get_mirror_dev(struct bnxt *bp, + void *action_arg) +{ +#ifdef HAVE_FLOW_OFFLOAD_H + struct flow_action_entry *action = action_arg; + + return action->dev; +#else + struct tc_action *action = action_arg; + + return tcf_mirror_dev(bp, action); +#endif +} + +static int ulp_tc_mirror_act_handler(struct bnxt *bp, + struct ulp_tc_parser_params *params, + void *action_arg) +{ + struct ulp_tc_hdr_bitmap *act = ¶ms->act_bitmap; + enum bnxt_ulp_intf_type intf_type; + struct net_device *mirred_dev; + u32 ifindex; + u16 dst_fid; + + mirred_dev = ulp_tc_get_mirror_dev(bp, action_arg); + if (!mirred_dev) { + netdev_err(bp->dev, "no dev in mirred action\n"); + return BNXT_TF_RC_ERROR; + } + + if (ULP_BITMAP_ISSET(act->bits, BNXT_ULP_ACT_BIT_VXLAN_ENCAP)) + dst_fid = bp->pf.fw_fid; + else + dst_fid = bnxt_flow_get_dst_fid(bp, mirred_dev); + + /* Get the port db ifindex */ + if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx, dst_fid, + &ifindex)) { + netdev_dbg(bp->dev, "Invalid port id\n"); + return BNXT_TF_RC_ERROR; + } + + /* Get the intf type */ + intf_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex); + if (!intf_type) { + netdev_dbg(bp->dev, "Invalid port type\n"); + return BNXT_TF_RC_ERROR; + } + + if (!ULP_BITMAP_ISSET(act->bits, BNXT_ULP_ACT_BIT_SHARED_SAMPLE)) { + netdev_dbg(bp->dev, "%s: mirror ifindex[%u], intf_type[%u], dst_fid[%u]\n", + __func__, ifindex, intf_type, dst_fid); + + /* Set the mirror action port */ + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_MIRR_PORT_TYPE, intf_type); + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DEV_ACT_MIRR_PORT_ID, dst_fid); + + /* Set the shared_sample bit */ + ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_SHARED_SAMPLE); + + return ulp_tc_parser_mirr_act_port_set(params, ifindex); + } + + netdev_dbg(bp->dev, + "%s: mirror->redirect ifindex[%u], intf_type[%u], dst_fid[%u]\n", + __func__, ifindex, intf_type, dst_fid); + + /* Override the action port, as this is a 2nd mirror destination */ + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type); + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DEV_ACT_PORT_ID, dst_fid); + + return ulp_tc_parser_act_port_set(params, ifindex); +} + +int ulp_tc_ingress_mirror_act_handler(struct bnxt *bp, + struct ulp_tc_parser_params *params, + void *action_arg) +{ + netdev_dbg(bp->dev, "mirred action: ingress mirror\n"); + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_MIRROR_COPY_ING_OR_EGR, 0); + + return ulp_tc_mirror_act_handler(bp, params, action_arg); +} + +int ulp_tc_egress_mirror_act_handler(struct bnxt *bp, + struct ulp_tc_parser_params *params, + void *action_arg) +{ + netdev_dbg(bp->dev, "mirred action: egress mirror\n"); + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_MIRROR_COPY_ING_OR_EGR, 1); + + return ulp_tc_mirror_act_handler(bp, params, action_arg); +} + +static void ulp_encap_copy_eth(struct ulp_tc_parser_params *params, + struct bnxt_tc_l2_key *l2_info, + u16 eth_type) +{ + struct ulp_tc_hdr_field *field; + u32 size; + + field = ¶ms->enc_field[BNXT_ULP_ENC_FIELD_ETH_DMAC]; + size = sizeof(l2_info->dmac); + + field = ulp_tc_parser_fld_copy(field, l2_info->dmac, size); + field = ulp_tc_parser_fld_copy(field, l2_info->smac, size); + + size = sizeof(eth_type); + field = ulp_tc_parser_fld_copy(field, ð_type, size); + + ULP_BITMAP_SET(params->enc_hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_ETH); +} + +static void ulp_encap_copy_ipv4(struct ulp_tc_parser_params *params, + struct ip_tunnel_key *tun_key) +{ + struct ulp_tc_act_prop *ap = ¶ms->act_prop; + struct ulp_tc_hdr_field *field; + u32 ip_size, ip_type; + u16 val16; + u32 size; + u8 val8; + + ip_size = cpu_to_be32(BNXT_ULP_ENCAP_IPV4_SIZE); + ip_type = cpu_to_be32(BNXT_ULP_ETH_IPV4); + + /* Update the ip size details */ + memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ], + &ip_size, sizeof(u32)); + + /* update the ip type */ + memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE], + &ip_type, sizeof(u32)); + + /* update the computed field to notify it is ipv4 header */ + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_ENCAP_IPV4_FLAG, 1); + + field = ¶ms->enc_field[BNXT_ULP_ENC_FIELD_IPV4_IHL]; + + /* version_ihl */ + val8 = 0x45; + size = sizeof(val8); + field = ulp_tc_parser_fld_copy(field, &val8, size); + + /* tos */ + size = sizeof(tun_key->tos); + field = ulp_tc_parser_fld_copy(field, &tun_key->tos, size); + + /* packet_id */ + val16 = 0; + size = sizeof(val16); + field = ulp_tc_parser_fld_copy(field, &val16, size); + + /* fragment_offset */ + size = sizeof(val16); + field = ulp_tc_parser_fld_copy(field, &val16, size); + + /* ttl */ + size = sizeof(tun_key->ttl); + if (!tun_key->ttl) + val8 = BNXT_ULP_DEFAULT_TTL; + else + val8 = tun_key->ttl; + field = ulp_tc_parser_fld_copy(field, &val8, size); + + /* next_proto_id */ + val8 = 0; + size = sizeof(val8); + field = ulp_tc_parser_fld_copy(field, &val8, size); + + size = sizeof(tun_key->u.ipv4.src); + field = ulp_tc_parser_fld_copy(field, &tun_key->u.ipv4.src, size); + + size = sizeof(tun_key->u.ipv4.dst); + field = ulp_tc_parser_fld_copy(field, &tun_key->u.ipv4.dst, size); + + ULP_BITMAP_SET(params->enc_hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_IPV4); +} + +static void ulp_encap_copy_ipv6(struct ulp_tc_parser_params *params, + struct ip_tunnel_key *tun_key) +{ + struct ulp_tc_act_prop *ap = ¶ms->act_prop; + u32 ip_size, ip_type, val32, size; + struct ulp_tc_hdr_field *field; + u8 val8; + + ip_size = cpu_to_be32(BNXT_ULP_ENCAP_IPV6_SIZE); + ip_type = cpu_to_be32(BNXT_ULP_ETH_IPV6); + + /* Update the ip size details */ + memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ], + &ip_size, sizeof(u32)); + + /* update the ip type */ + memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE], + &ip_type, sizeof(u32)); + + /* update the computed field to notify it is ipv4 header */ + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_ENCAP_IPV6_FLAG, 1); + + /* Version (4b), Traffic Class (8b), Flow Label (20b) */ + field = ¶ms->enc_field[BNXT_ULP_ENC_FIELD_IPV6_VTC_FLOW]; + val32 = cpu_to_be32((tun_key->tos << 4) | 6); + val32 |= tun_key->label; + size = sizeof(val32); + field = ulp_tc_parser_fld_copy(field, &val32, size); + + /* next_proto_id */ + val8 = 0; + size = sizeof(val8); + field = ulp_tc_parser_fld_copy(field, &val8, size); + + /* hop limit */ + size = sizeof(tun_key->ttl); + val8 = tun_key->ttl ? tun_key->ttl : BNXT_ULP_DEFAULT_TTL; + field = ulp_tc_parser_fld_copy(field, &val8, size); + + size = sizeof(tun_key->u.ipv6.src); + field = ulp_tc_parser_fld_copy(field, &tun_key->u.ipv6.src, size); + + size = sizeof(tun_key->u.ipv6.dst); + field = ulp_tc_parser_fld_copy(field, &tun_key->u.ipv6.dst, size); + + ULP_BITMAP_SET(params->enc_hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_IPV6); +} + +static void ulp_encap_copy_udp(struct ulp_tc_parser_params *params, + struct ip_tunnel_key *tun_key) +{ + struct ulp_tc_hdr_field *field; + u8 type = IPPROTO_UDP; + u32 size; + + field = ¶ms->enc_field[BNXT_ULP_ENC_FIELD_UDP_SPORT]; + size = sizeof(tun_key->tp_src); + field = ulp_tc_parser_fld_copy(field, &tun_key->tp_src, size); + + /* update the computational field */ + if (tun_key->tp_src) + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_TUNNEL_SPORT, 1); + + size = sizeof(tun_key->tp_dst); + field = ulp_tc_parser_fld_copy(field, &tun_key->tp_dst, size); + + ULP_BITMAP_SET(params->enc_hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_UDP); + + /* Update the ip header protocol */ + field = ¶ms->enc_field[BNXT_ULP_ENC_FIELD_IPV4_PROTO]; + ulp_tc_parser_fld_copy(field, &type, sizeof(type)); + field = ¶ms->enc_field[BNXT_ULP_ENC_FIELD_IPV6_PROTO]; + ulp_tc_parser_fld_copy(field, &type, sizeof(type)); +} + +static void ulp_encap_copy_vxlan(struct ulp_tc_parser_params *params, + struct ip_tunnel_key *tun_key) +{ + struct ulp_tc_hdr_bitmap *act = ¶ms->act_bitmap; + struct ulp_tc_act_prop *ap = ¶ms->act_prop; + struct ulp_parser_vxlan ulp_vxlan = { 0 }; + struct ulp_tc_hdr_field *field; + u32 vxlan_size; + u32 size; + u32 vni; + + vni = tunnel_id_to_key32(tun_key->tun_id); + vni = be32_to_cpu(vni); + + netdev_dbg(params->ulp_ctx->bp->dev, "%s: vni: 0x%x\n", __func__, vni); + + ulp_vxlan.vni[0] = (vni >> 16) & 0xff; + ulp_vxlan.vni[1] = (vni >> 8) & 0xff; + ulp_vxlan.vni[2] = vni & 0xff; + ulp_vxlan.flags = 0x08; + + vxlan_size = sizeof(ulp_vxlan); + vxlan_size = cpu_to_be32(vxlan_size); + memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN_SZ], + &vxlan_size, sizeof(u32)); + + /* update the hdr_bitmap with vxlan */ + ULP_BITMAP_SET(act->bits, BNXT_ULP_ACT_BIT_VXLAN_ENCAP); + + field = ¶ms->enc_field[BNXT_ULP_ENC_FIELD_VXLAN_FLAGS]; + size = sizeof(ulp_vxlan.flags); + field = ulp_tc_parser_fld_copy(field, &ulp_vxlan.flags, size); + + size = sizeof(ulp_vxlan.rsvd0); + field = ulp_tc_parser_fld_copy(field, &ulp_vxlan.rsvd0, size); + + size = sizeof(ulp_vxlan.vni); + field = ulp_tc_parser_fld_copy(field, &ulp_vxlan.vni, size); + + size = sizeof(ulp_vxlan.rsvd1); + field = ulp_tc_parser_fld_copy(field, &ulp_vxlan.rsvd1, size); + + ULP_BITMAP_SET(params->enc_hdr_bitmap.bits, BNXT_ULP_HDR_BIT_T_VXLAN); +} + +/* Save encap action details in parser params, so it can be returned to + * the caller of bnxt_ulp_flow_create() for neighbor update processing. + * This memory will be owned and released by the caller. + */ +static int ulp_tc_save_encap_info(struct ulp_tc_parser_params *params, + struct ip_tunnel_key *tun_key, + struct bnxt_tc_neigh_key *neigh_key, + struct bnxt_tc_l2_key *l2_info) +{ + params->tnl_key = vzalloc(sizeof(*tun_key)); + if (!params->tnl_key) + return -ENOMEM; + + params->neigh_key = vzalloc(sizeof(*neigh_key)); + if (!params->neigh_key) { + vfree(params->tnl_key); + return -ENOMEM; + } + + *((struct ip_tunnel_key *)params->tnl_key) = *tun_key; + *((struct bnxt_tc_neigh_key *)params->neigh_key) = *neigh_key; + + ether_addr_copy(params->tnl_dmac, l2_info->dmac); + ether_addr_copy(params->tnl_smac, l2_info->smac); + params->tnl_ether_type = l2_info->ether_type; + + return 0; +} + +static int ulp_tc_tunnel_encap_ipv4(struct bnxt *bp, + struct ulp_tc_parser_params *params, + struct ip_tunnel_key *tun_key) +{ + struct bnxt_tc_neigh_key neigh_key = { 0 }; + struct bnxt_tc_l2_key l2_info = { 0 }; + int rc; + + rc = bnxt_tc_resolve_ipv4_tunnel_hdrs(bp, NULL, tun_key, &l2_info, + &neigh_key); + if (rc != 0) + return BNXT_TF_RC_ERROR; + + ulp_encap_copy_eth(params, &l2_info, cpu_to_be16(ETH_P_IP)); + ulp_encap_copy_ipv4(params, tun_key); + ulp_encap_copy_udp(params, tun_key); + ulp_encap_copy_vxlan(params, tun_key); + + l2_info.ether_type = ETH_P_IP; + ulp_tc_save_encap_info(params, tun_key, &neigh_key, &l2_info); + return BNXT_TF_RC_SUCCESS; +} + +static int ulp_tc_tunnel_encap_ipv6(struct bnxt *bp, + struct ulp_tc_parser_params *params, + struct ip_tunnel_key *tun_key) +{ + struct bnxt_tc_neigh_key neigh_key = { 0 }; + struct bnxt_tc_l2_key l2_info = { 0 }; + int rc; + + rc = bnxt_tc_resolve_ipv6_tunnel_hdrs(bp, NULL, tun_key, &l2_info, + &neigh_key); + if (rc) + return BNXT_TF_RC_ERROR; + + ulp_encap_copy_eth(params, &l2_info, cpu_to_be16(ETH_P_IPV6)); + ulp_encap_copy_ipv6(params, tun_key); + ulp_encap_copy_udp(params, tun_key); + ulp_encap_copy_vxlan(params, tun_key); + + l2_info.ether_type = ETH_P_IPV6; + ulp_tc_save_encap_info(params, tun_key, &neigh_key, &l2_info); + + return BNXT_TF_RC_SUCCESS; +} + +static struct ip_tunnel_info *ulp_tc_get_tun_info(void *action_arg) + +{ +#ifdef HAVE_FLOW_OFFLOAD_H + struct flow_action_entry *action = action_arg; + + return (struct ip_tunnel_info *)action->tunnel; +#else + struct tc_action *action = action_arg; + + return tcf_tunnel_info(action); +#endif +} + +int ulp_tc_tunnel_encap_act_handler(struct bnxt *bp, + struct ulp_tc_parser_params *params, + void *action_arg) +{ + struct ip_tunnel_info *tun_info = ulp_tc_get_tun_info(action_arg); + struct ip_tunnel_key encap_key = tun_info->key; + int rc = BNXT_TF_RC_ERROR; + + switch (ip_tunnel_info_af(tun_info)) { + case AF_INET: + rc = ulp_tc_tunnel_encap_ipv4(bp, params, &encap_key); + break; + case AF_INET6: + rc = ulp_tc_tunnel_encap_ipv6(bp, params, &encap_key); + break; + default: + break; + } + + return rc; +} + +int ulp_tc_tunnel_decap_act_handler(struct bnxt *bp, + struct ulp_tc_parser_params *params, + void *action_arg) +{ + /* Update the hdr_bitmap with vxlan */ + ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_VXLAN_DECAP); + + /* Update computational fields with tunnel decap info */ + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_TUN_DECAP, 1); + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_TUN, 1); + ULP_BITMAP_SET(params->cf_bitmap, BNXT_ULP_CF_BIT_IS_TUNNEL); + + return BNXT_TF_RC_SUCCESS; +} + +static void ulp_tc_get_vlan_info(void *action_arg, __be16 *proto, u16 *vid, + u8 *prio) +{ +#ifdef HAVE_FLOW_OFFLOAD_H + struct flow_action_entry *action = action_arg; + + *proto = action->vlan.proto; + *vid = action->vlan.vid; + *prio = action->vlan.prio; +#else + struct tc_action *action = action_arg; + + *proto = tcf_vlan_push_proto(action); + *vid = tcf_vlan_push_vid(action); + *prio = tcf_vlan_push_prio(action); +#endif +} + +int ulp_tc_vlan_push_act_handler(struct bnxt *bp, + struct ulp_tc_parser_params *params, + void *action_arg) +{ + struct ulp_tc_act_prop *act = ¶ms->act_prop; + __be16 proto; + u16 vid; + u8 prio; + + ulp_tc_get_vlan_info(action_arg, &proto, &vid, &prio); + netdev_dbg(bp->dev, "%s: tpid: 0x%x vid: 0x%x pcp: 0x%x\n", __func__, + proto, vid, prio); + + /* set tpid */ + memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_PUSH_VLAN], + &proto, BNXT_ULP_ACT_PROP_SZ_PUSH_VLAN); + ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_PUSH_VLAN); + + /* set vid */ + vid = cpu_to_be16(vid); + memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_VLAN_VID], + &vid, BNXT_ULP_ACT_PROP_SZ_SET_VLAN_VID); + ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_SET_VLAN_VID); + + /* set pcp */ + memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_VLAN_PCP], + &prio, BNXT_ULP_ACT_PROP_SZ_SET_VLAN_PCP); + ULP_BITMAP_SET(params->act_bitmap.bits, + BNXT_ULP_ACT_BIT_SET_VLAN_PCP); + + return BNXT_TF_RC_SUCCESS; +} + +int ulp_tc_vlan_pop_act_handler(struct bnxt *bp, + struct ulp_tc_parser_params *params, + void *action_arg) +{ + /* Update the act_bitmap with pop */ + ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_POP_VLAN); + return BNXT_TF_RC_SUCCESS; +} + +static u32 ulp_tc_get_chain_index(void *action_arg) +{ +#ifdef HAVE_FLOW_OFFLOAD_H + struct flow_action_entry *action = action_arg; + + return action->chain_index; +#else + struct tc_action *action = action_arg; + + return tcf_gact_goto_chain_index(action); +#endif +} + +int ulp_tc_goto_act_handler(struct bnxt *bp, + struct ulp_tc_parser_params *params, + void *action_arg) +{ + u32 chain_id = ulp_tc_get_chain_index(action_arg); + struct ulp_tc_act_prop *act_prop = ¶ms->act_prop; + + netdev_dbg(bp->dev, "%s: goto chain: %u\n", __func__, chain_id); + + /* Set goto action in the action bitmap */ + ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_GOTO_CHAIN); + chain_id = cpu_to_be32(chain_id); + memcpy(&act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_GOTO_CHAIN], + &chain_id, BNXT_ULP_ACT_PROP_SZ_GOTO_CHAIN); + return BNXT_TF_RC_SUCCESS; +} + +static int bnxt_tc_set_l3_v4_action_params(struct bnxt *bp, struct ulp_tc_parser_params *params, + u32 offset, u32 val) +{ + if (offset == offsetof(struct iphdr, saddr)) { + memcpy(¶ms->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_SET_IPV4_SRC], + &val, BNXT_ULP_ACT_PROP_SZ_SET_IPV4_SRC); + /* Update the hdr_bitmap with set ipv4 src */ + ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_SET_IPV4_SRC); + } else if (offset == offsetof(struct iphdr, daddr)) { + memcpy(¶ms->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_SET_IPV4_DST], + &val, BNXT_ULP_ACT_PROP_SZ_SET_IPV4_DST); + /* Update the hdr_bitmap with set ipv4 dst */ + ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_SET_IPV4_DST); + } else { + netdev_dbg(bp->dev, + "%s: IPv4_hdr: Invalid pedit field\n", + __func__); + return -EINVAL; + } + + netdev_dbg(bp->dev, "Actions NAT src IP: %pI4 dst ip : %pI4\n", + ¶ms->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_SET_IPV4_SRC], + ¶ms->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_SET_IPV4_DST]); + + return 0; +} + +#define BNXT_TC_FIRST_WORD_SRC_IPV6 0x8 +#define BNXT_TC_SECOND_WORD_SRC_IPV6 0xC +#define BNXT_TC_THIRD_WORD_SRC_IPV6 0x10 +#define BNXT_TC_FOURTH_WORD_SRC_IPV6 0x14 +#define BNXT_TC_FIRST_WORD_DST_IPV6 0x18 +#define BNXT_TC_SECOND_WORD_DST_IPV6 0x1C +#define BNXT_TC_THIRD_WORD_DST_IPV6 0x20 +#define BNXT_TC_FOURTH_WORD_DST_IPV6 0x24 +#define BNXT_TC_IPV6_SIZE_IN_EACH_ITERATION 4 +#define BNXT_TC_WORD_DSCP_IPV6 0x0 +#define BNXT_TC_MASK_DSCP_IPV6 0x0FC00000 + +#define BNXT_TC_DEFAULT_METER_PROFILE_ID 10 +#define BNXT_TC_DEFAULT_METER_ID 20 + +/* Destroy the implicit meter and meter-profile */ +static void bnxt_tc_destroy_implicit_meter(struct bnxt *bp, u32 dir) +{ + bnxt_flow_meter_destroy(bp, BNXT_TC_DEFAULT_METER_ID, dir); + bnxt_flow_meter_profile_delete(bp, BNXT_TC_DEFAULT_METER_PROFILE_ID, + dir); +} + +/* First time init; create an implicit meter profile and meter */ +static int bnxt_tc_create_implicit_meter(struct bnxt *bp, u32 dir) +{ + u32 meter_profile_id = BNXT_TC_DEFAULT_METER_PROFILE_ID; + u32 meter_id = BNXT_TC_DEFAULT_METER_ID; + int rc; + + rc = bnxt_flow_meter_profile_add(bp, meter_profile_id, + dir); + if (rc) { + netdev_dbg(bp->dev, + "%s: Failed to create meter profile, id: 0x%x\n", + __func__, meter_profile_id); + return rc; + } + + rc = bnxt_flow_meter_create(bp, meter_profile_id, meter_id, dir); + if (rc) { + netdev_dbg(bp->dev, + "%s: Failed to create meter id: 0x%x\n", + __func__, meter_id); + bnxt_flow_meter_profile_delete(bp, meter_profile_id, dir); + return rc; + } + + return 0; +} + +#define CFA_ACT_DSCP_RMP_NUM_WORDS 64 +static int bnxt_tc_dscp_global_cfg_update(struct bnxt *bp, enum tf_dir dir, + enum tf_global_config_type type, + u32 offset, u32 value, u32 set_flag) +{ + struct tf_global_cfg_parms parms = { 0 }; + u32 dscp_val = 0; + u32 dscp_rmp_val; + u32 *global_cfg; + u32 size; + int rc; + int i; + + size = sizeof(u32) * 64; + global_cfg = vzalloc(size); + if (!global_cfg) + return -ENOMEM; + + parms.dir = dir, + parms.type = type, + parms.offset = offset, + parms.config = (u8 *)global_cfg, + parms.config_sz_in_bytes = size; + + if (set_flag) { + dscp_val = cpu_to_be32(value); + dscp_val >>= 20; + } + + /* Setup each row to be written; it consists of 3 fields, + * each 8-bits. The upper 6-bits of each field contains + * the DSCP value for each color. + * + * 31:24 - Unused + * 24:16 - Red DSCP + * 15:8 - Yellow DSCP + * 7:0 - Green DSCP + * + * The current implementation sets the same value for all + * 3 colors and across all 64 rows. But the API supports + * setting unique value for each row and color. + */ + dscp_rmp_val = ((dscp_val << 16) | (dscp_val << 8) | dscp_val); + for (i = 0; i < CFA_ACT_DSCP_RMP_NUM_WORDS; i++) + global_cfg[i] = dscp_rmp_val; + + netdev_dbg(bp->dev, "%s: Setting dscp: 0x%x dscp_rmp: 0x%x\n", + __func__, dscp_val, dscp_rmp_val); + + rc = tf_set_global_cfg(bp->tfp, &parms); + if (rc) + netdev_dbg(bp->dev, "Failed to set global cfg 0x%x rc:%d\n", + type, rc); + + vfree(global_cfg); + return rc; +} + +int bnxt_tc_clear_dscp_ipv6(struct bnxt *bp, struct bnxt_ulp_context *ulp_ctx) +{ + struct bnxt_ulp_data *ulp_data = ulp_ctx->cfg_data; + int rc; + + if (!ulp_data->dscp_remap_initialized) + return -EINVAL; + + /* Clear dscp in meter table using global config */ + rc = bnxt_tc_dscp_global_cfg_update(bp, TF_DIR_TX, TF_DSCP_RMP_CFG, 0, 0, 1); + if (rc) + return -EIO; + + bnxt_tc_destroy_implicit_meter(bp, BNXT_ULP_FLOW_ATTR_EGRESS); + ulp_data->dscp_remap_val = 0; + ulp_data->dscp_remap_initialized = false; + + netdev_dbg(bp->dev, "%s: dscp_remap_initialized: %d\n", + __func__, ulp_data->dscp_remap_initialized); + return 0; +} + +static void bnxt_tc_param_set_act_meter(struct ulp_tc_parser_params *params, u32 meter_id) +{ + u32 tmp_meter_id; + + tmp_meter_id = cpu_to_be32(meter_id); + memcpy(¶ms->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_METER], + &tmp_meter_id, BNXT_ULP_ACT_PROP_SZ_METER); + ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_METER); + + ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DSCP_REMAP, 1); +} + +static int bnxt_tc_set_dscp_ipv6(struct bnxt *bp, struct ulp_tc_parser_params *params, + u32 offset, u32 val, u32 mask) +{ + struct bnxt_ulp_data *ulp_data = params->ulp_ctx->cfg_data; + u32 meter_id = BNXT_TC_DEFAULT_METER_ID; + u32 dir = 0; + int rc; + + /* Only DSCP (6-bit) supported; ECN (2-bit) must be masked */ + if (cpu_to_be32(mask) != BNXT_TC_MASK_DSCP_IPV6) { + netdev_dbg(bp->dev, "%s: Invalid mask: 0x%x\n", __func__, mask); + return -EINVAL; + } + + /* Only TX supported for now */ + dir = (params->dir_attr & BNXT_ULP_FLOW_ATTR_INGRESS) ? + BNXT_ULP_FLOW_ATTR_INGRESS : BNXT_ULP_FLOW_ATTR_EGRESS; + if (dir != BNXT_ULP_FLOW_ATTR_EGRESS) { + netdev_dbg(bp->dev, "%s: Invalid dir: 0x%x\n", __func__, dir); + return -EINVAL; + } + + netdev_dbg(bp->dev, "%s: Set DSCP: val: 0x%x mask: 0x%x\n", + __func__, cpu_to_be32(val), cpu_to_be32(mask)); + + if (ulp_data->dscp_remap_initialized) { + bnxt_tc_param_set_act_meter(params, meter_id); + + /* Setting a new dscp val; reconfig global dscp */ + if (cpu_to_be32(val) != ulp_data->dscp_remap_val) + goto dscp_glb_cfg; + + /* Setting same dscp val; just return success */ + return 0; + } + + rc = bnxt_tc_create_implicit_meter(bp, dir); + if (rc) + return rc; + + bnxt_tc_param_set_act_meter(params, meter_id); + +dscp_glb_cfg: + /* Set dscp in meter table using global config */ + rc = bnxt_tc_dscp_global_cfg_update(bp, TF_DIR_TX, TF_DSCP_RMP_CFG, 0, val, 1); + if (rc) { + bnxt_tc_destroy_implicit_meter(bp, dir); + return rc; + } + + ulp_data->dscp_remap_val = cpu_to_be32(val); + ulp_data->dscp_remap_initialized = true; + + netdev_dbg(bp->dev, "%s: dscp_remap_initialized: %d\n", + __func__, ulp_data->dscp_remap_initialized); + return 0; +} + +static int bnxt_tc_set_l3_v6_action_params(struct bnxt *bp, struct ulp_tc_parser_params *params, + u32 offset, u32 val, u32 mask) +{ + int rc = 0; + + /* The number of bytes getting copied must be BNXT_TC_IPV6_SIZE_IN_EACH_ITERATION + * i.e., 4 bytes only even though this is IPv6 address. Because the IPv6 address + * comes from the stack in 4 iterations with each iteration carrying 4 bytes. + */ + + switch (offset) { + case BNXT_TC_FIRST_WORD_SRC_IPV6: + memcpy(¶ms->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_SET_IPV6_SRC], + &val, BNXT_TC_IPV6_SIZE_IN_EACH_ITERATION); + break; + case BNXT_TC_SECOND_WORD_SRC_IPV6: + memcpy(¶ms->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_SET_IPV6_SRC + 4], + &val, BNXT_TC_IPV6_SIZE_IN_EACH_ITERATION); + break; + case BNXT_TC_THIRD_WORD_SRC_IPV6: + memcpy(¶ms->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_SET_IPV6_SRC + 8], + &val, BNXT_TC_IPV6_SIZE_IN_EACH_ITERATION); + break; + case BNXT_TC_FOURTH_WORD_SRC_IPV6: + memcpy(¶ms->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_SET_IPV6_SRC + 12], + &val, BNXT_TC_IPV6_SIZE_IN_EACH_ITERATION); + ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_SET_IPV6_SRC); + netdev_dbg(bp->dev, "Actions NAT src IPv6 addr: %pI6\n", + ¶ms->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_SET_IPV6_SRC]); + break; + case BNXT_TC_FIRST_WORD_DST_IPV6: + memcpy(¶ms->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_SET_IPV6_DST], + &val, BNXT_TC_IPV6_SIZE_IN_EACH_ITERATION); + break; + case BNXT_TC_SECOND_WORD_DST_IPV6: + memcpy(¶ms->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_SET_IPV6_DST + 4], + &val, BNXT_TC_IPV6_SIZE_IN_EACH_ITERATION); + break; + case BNXT_TC_THIRD_WORD_DST_IPV6: + memcpy(¶ms->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_SET_IPV6_DST + 8], + &val, BNXT_TC_IPV6_SIZE_IN_EACH_ITERATION); + break; + case BNXT_TC_FOURTH_WORD_DST_IPV6: + memcpy(¶ms->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_SET_IPV6_DST + 12], + &val, BNXT_TC_IPV6_SIZE_IN_EACH_ITERATION); + ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_SET_IPV6_DST); + netdev_dbg(bp->dev, "Actions NAT dst IPv6 addr: %pI6\n", + ¶ms->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_SET_IPV6_DST]); + break; + case BNXT_TC_WORD_DSCP_IPV6: + rc = bnxt_tc_set_dscp_ipv6(bp, params, offset, val, mask); + break; + default: + return -EINVAL; + } + + return rc; +} + +#define BNXT_TC_L4_PORT_TYPE_SRC 1 +#define BNXT_TC_L4_PORT_TYPE_DST 2 +static int bnxt_tc_set_l4_action_params(struct bnxt *bp, struct ulp_tc_parser_params *params, + u32 mask, u32 val, u8 port_type) +{ + /* val is a u32 that can carry either src port or dst port value which are u16 each. + * If src port extract the value correctly. + */ + if (~mask & 0xffff) + val = val >> 16; + + if (port_type == BNXT_TC_L4_PORT_TYPE_SRC) { + memcpy(¶ms->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_SET_TP_SRC], + &val, BNXT_ULP_ACT_PROP_SZ_SET_TP_SRC); + /* Update the hdr_bitmap with set tp src */ + ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_SET_TP_SRC); + netdev_dbg(bp->dev, "Actions NAT sport = %d\n", htons(val)); + } else if (port_type == BNXT_TC_L4_PORT_TYPE_DST) { + memcpy(¶ms->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_SET_TP_DST], + &val, BNXT_ULP_ACT_PROP_SZ_SET_TP_DST); + /* Update the hdr_bitmap with set tp dst */ + ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_SET_TP_DST); + netdev_dbg(bp->dev, "Actions NAT dport = %d\n", htons(val)); + } else { + return -EINVAL; + } + + return 0; +} + +/* The stack provides the smac/dmac action values to be set, using key and + * mask in multiple iterations of 4-bytes(u32). This routine consolidates + * such multiple values into 6-byte smac and dmac values. + * + * For example: + * Mask/Key Offset Iteration + * ========== ====== ========= + * src mac 0xffff0000/0x02010000 4 1 + * src mac 0xffffffff/0x06050403 8 2 + * dst mac 0xffffffff/0x0a090807 0 3 + * dst mac 0x0000ffff/0x00000c0b 4 4 + * + * The above combination coming from the stack will be consolidated as + * ============== + * src mac: 0x010203040506 + * dst mac: 0x0708090a0b0c + */ +static int bnxt_tc_set_l2_action_params(struct bnxt *bp, struct ulp_tc_parser_params *params, + u32 mask, u32 val, u32 offset) +{ + u32 act_offset, size; + u8 *act_ptr; + + netdev_dbg(bp->dev, "%s: mask: 0x%x val: 0x%x offset: %d\n", + __func__, mask, val, offset); + + switch (offset) { + case 0: /* dmac: higher 4 bytes */ + act_offset = BNXT_ULP_ACT_PROP_IDX_SET_MAC_DST + offset; + size = sizeof(val); + break; + + case 4: + if (mask == 0xffff) { /* dmac: lower 2 bytes */ + act_offset = BNXT_ULP_ACT_PROP_IDX_SET_MAC_DST + offset; + ULP_BITMAP_SET(params->act_bitmap.bits, + BNXT_ULP_ACT_BIT_SET_MAC_DST); + } else { /* smac: higher 2 bytes */ + act_offset = BNXT_ULP_ACT_PROP_IDX_SET_MAC_SRC; + val >>= 16; + } + size = 2; + break; + + case 8: /* smac: lower 4 bytes */ + act_offset = BNXT_ULP_ACT_PROP_IDX_SET_MAC_SRC + 2; + size = sizeof(val); + ULP_BITMAP_SET(params->act_bitmap.bits, + BNXT_ULP_ACT_BIT_SET_MAC_SRC); + break; + + default: + return -EINVAL; + } + + act_ptr = ¶ms->act_prop.act_details[act_offset]; + memcpy(act_ptr, &val, size); + + return 0; +} + +#ifdef HAVE_FLOW_OFFLOAD_H + +static int bnxt_tc_parse_pedit(struct bnxt *bp, struct ulp_tc_parser_params *params, + void *action) +{ + struct flow_action_entry *act = action; + u32 mask, val, offset; + u8 htype; + int rc; + + offset = act->mangle.offset; + htype = act->mangle.htype; + mask = ~act->mangle.mask; + val = act->mangle.val; + + switch (htype) { + case FLOW_ACT_MANGLE_HDR_TYPE_ETH: + rc = bnxt_tc_set_l2_action_params(bp, params, mask, val, + offset); + if (rc) + return rc; + break; + case FLOW_ACT_MANGLE_HDR_TYPE_IP4: + rc = bnxt_tc_set_l3_v4_action_params(bp, params, offset, val); + if (rc) + return rc; + break; + case FLOW_ACT_MANGLE_HDR_TYPE_IP6: + rc = bnxt_tc_set_l3_v6_action_params(bp, params, offset, val, mask); + if (rc) + return rc; + break; + case FLOW_ACT_MANGLE_HDR_TYPE_TCP: + case FLOW_ACT_MANGLE_HDR_TYPE_UDP: + /* offset == 0 means TCP/UDP SPORT/DPORT. + * PEDIT on rest of the TCP/UDP headers is not supported. + */ + if (offset) + return -EOPNOTSUPP; + if (mask & 0xffff) { + rc = bnxt_tc_set_l4_action_params(bp, params, mask, val, + BNXT_TC_L4_PORT_TYPE_SRC); + if (rc) + return rc; + } else { + rc = bnxt_tc_set_l4_action_params(bp, params, mask, val, + BNXT_TC_L4_PORT_TYPE_DST); + if (rc) + return rc; + } + break; + default: + netdev_dbg(bp->dev, "%s: Unsupported pedit hdr type\n", + __func__); + return -EOPNOTSUPP; + } + + return 0; +} + +#else /* HAVE_FLOW_OFFLOAD_H */ + +static int bnxt_tc_parse_pedit(struct bnxt *bp, struct ulp_tc_parser_params *params, + void *action) +{ + struct tc_action *tc_act = action; + u32 mask, val, offset; + int nkeys, j, rc; + u8 cmd, htype; + + nkeys = tcf_pedit_nkeys(tc_act); + for (j = 0 ; j < nkeys; j++) { + cmd = tcf_pedit_cmd(tc_act, j); + /* L2 rewrite comes as TCA_PEDIT_KEY_EX_CMD_SET type from TC. + * Return error, if the TC pedit cmd is not of this type. + */ + if (cmd != TCA_PEDIT_KEY_EX_CMD_SET) { + netdev_err(bp->dev, "%s: pedit cmd not supported\n", + __func__); + return -EINVAL; + } + + offset = tcf_pedit_offset(tc_act, j); + htype = tcf_pedit_htype(tc_act, j); + mask = ~tcf_pedit_mask(tc_act, j); + val = tcf_pedit_val(tc_act, j); + + switch (htype) { + case TCA_PEDIT_KEY_EX_HDR_TYPE_ETH: + rc = bnxt_tc_set_l2_action_params(bp, params, mask, val, offset); + if (rc) + return rc; + break; + + case TCA_PEDIT_KEY_EX_HDR_TYPE_IP4: + rc = bnxt_tc_set_l3_v4_action_params(bp, params, offset, val); + if (rc) + return rc; + break; + + case TCA_PEDIT_KEY_EX_HDR_TYPE_IP6: + rc = bnxt_tc_set_l3_v6_action_params(bp, params, offset, val, mask); + if (rc) + return rc; + break; + case TCA_PEDIT_KEY_EX_HDR_TYPE_TCP: + case TCA_PEDIT_KEY_EX_HDR_TYPE_UDP: + /* offset == 0 means TCP/UDP SPORT/DPORT. + * PEDIT on rest of the TCP/UDP headers is not supported. + */ + if (offset) + return -EOPNOTSUPP; + if (mask & 0xffff) { + rc = bnxt_tc_set_l4_action_params(bp, params, mask, val, + BNXT_TC_L4_PORT_TYPE_SRC); + if (rc) + return rc; + } else { + rc = bnxt_tc_set_l4_action_params(bp, params, mask, val, + BNXT_TC_L4_PORT_TYPE_DST); + if (rc) + return rc; + } + break; + default: + netdev_dbg(bp->dev, "%s: Unsupported pedit hdr type\n", + __func__); + return -EOPNOTSUPP; + } + } + + return 0; +} + +#endif /* HAVE_FLOW_OFFLOAD_H */ + +int ulp_tc_mangle_act_handler(struct bnxt *bp, + struct ulp_tc_parser_params *params, + void *act) +{ + int rc; + + rc = bnxt_tc_parse_pedit(bp, params, act); + if (rc) + netdev_dbg(bp->dev, "%s failed, rc: %d\n", __func__, rc); + + return rc; +} + +int ulp_tc_csum_act_handler(struct bnxt *bp, + struct ulp_tc_parser_params *params, + void *act) +{ + return 0; +} + +int ulp_tc_drop_act_handler(struct bnxt *bp, + struct ulp_tc_parser_params *params, + void *act) +{ + /* Set drop action in the action bitmap */ + ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_DROP); + + return 0; +} +#endif + +#endif /* CONFIG_BNXT_FLOWER_OFFLOAD */ diff --git a/drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_tc_parser.h b/drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_tc_parser.h new file mode 100644 index 000000000000..0079d87f4250 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_tc_parser.h @@ -0,0 +1,205 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019-2023 Broadcom + * All rights reserved. + */ + +#ifndef _ULP_TC_PARSER_H_ +#define _ULP_TC_PARSER_H_ + +#include +#include +#include +#include +#if defined(HAVE_TC_FLOW_CLS_OFFLOAD) || defined(HAVE_TC_CLS_FLOWER_OFFLOAD) +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef HAVE_TCF_TUNNEL +#include +#endif +#include +#endif /* HAVE_TC_FLOW_CLS_OFFLOAD || HAVE_TC_CLS_FLOWER_OFFLOAD */ + +#include "bnxt_compat.h" +#include "bnxt_hsi.h" +#include "bnxt.h" +#include "bnxt_hwrm.h" +#include "bnxt_sriov.h" +#include "bnxt_tc_compat.h" +#include "bnxt_tc.h" +#include "bnxt_vfr.h" +#include "tf_core.h" +#include "ulp_template_db_enum.h" +#include "ulp_template_struct.h" +#include "ulp_mapper.h" +#include "bnxt_tf_common.h" + +#if defined(CONFIG_BNXT_FLOWER_OFFLOAD) || defined(CONFIG_BNXT_CUSTOM_FLOWER_OFFLOAD) +/* defines to be used in the tunnel header parsing */ +#define BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS 2 +#define BNXT_ULP_ENCAP_IPV4_ID_PROTO 6 +#define BNXT_ULP_ENCAP_IPV4_DEST_IP 4 +#define BNXT_ULP_ENCAP_IPV4_SIZE 12 +#define BNXT_ULP_ENCAP_IPV6_VTC_FLOW 4 +#define BNXT_ULP_ENCAP_IPV6_PROTO_TTL 2 +#define BNXT_ULP_ENCAP_IPV6_DO 2 +#define BNXT_ULP_ENCAP_IPV6_SIZE 24 +#define BNXT_ULP_ENCAP_UDP_SIZE 4 +#define BNXT_ULP_INVALID_SVIF_VAL -1U + +#define BNXT_ULP_GET_IPV6_VER(vtcf) \ + (((vtcf) & BNXT_ULP_PARSER_IPV6_VER_MASK) >> 28) +#define BNXT_ULP_GET_IPV6_TC(vtcf) \ + (((vtcf) & BNXT_ULP_PARSER_IPV6_TC) >> 20) +#define BNXT_ULP_GET_IPV6_FLOWLABEL(vtcf) \ + ((vtcf) & BNXT_ULP_PARSER_IPV6_FLOW_LABEL) +#define BNXT_ULP_PARSER_IPV6_VER_MASK 0xf0000000 +#define BNXT_ULP_IPV6_DFLT_VER 0x60000000 +#define BNXT_ULP_PARSER_IPV6_TC 0x0ff00000 +#define BNXT_ULP_PARSER_IPV6_FLOW_LABEL 0x000fffff +#define BNXT_ULP_DEFAULT_TTL 64 + +enum bnxt_ulp_prsr_action { + ULP_PRSR_ACT_DEFAULT = 0, + ULP_PRSR_ACT_MATCH_IGNORE = 1, + ULP_PRSR_ACT_MASK_IGNORE = 2, + ULP_PRSR_ACT_SPEC_IGNORE = 4 +}; + +void +bnxt_ulp_init_mapper_params(struct bnxt_ulp_mapper_parms *mparms, + struct ulp_tc_parser_params *params, + enum bnxt_ulp_fdb_type flow_type); + +/* Function to handle the parsing of the RTE port id. */ +int +ulp_tc_parser_implicit_match_port_process(struct ulp_tc_parser_params *param); + +/* Function to handle the implicit action port id */ +int ulp_tc_parser_implicit_act_port_process(struct bnxt *bp, + struct ulp_tc_parser_params *params); + +/* Functions to handle the parsing of TC Flows and placing + * the TC flow match fields into the ulp structures. + */ +#ifdef CONFIG_BNXT_FLOWER_OFFLOAD +int bnxt_ulp_tc_parser_hdr_parse(struct bnxt *bp, + struct flow_cls_offload *tc_flow_cmd, + struct ulp_tc_parser_params *params); +#endif +int ulp_tc_control_key_handler(struct bnxt *bp, + struct ulp_tc_parser_params *params, + void *match_arg); +int ulp_tc_basic_key_handler(struct bnxt *bp, + struct ulp_tc_parser_params *params, + void *match_arg); +int ulp_tc_eth_addr_handler(struct bnxt *bp, + struct ulp_tc_parser_params *params, + void *match_arg); +int ulp_tc_ip_ctrl_handler(struct bnxt *bp, + struct ulp_tc_parser_params *params, + void *match_arg); +int ulp_tc_ipv4_addr_handler(struct bnxt *bp, + struct ulp_tc_parser_params *params, + void *match_arg); +int ulp_tc_ipv6_addr_handler(struct bnxt *bp, + struct ulp_tc_parser_params *params, + void *match_arg); +int ulp_tc_l4_ports_handler(struct bnxt *bp, + struct ulp_tc_parser_params *params, + void *match_arg); +int ulp_tc_tcp_ctrl_handler(struct bnxt *bp, + struct ulp_tc_parser_params *params, + void *match_arg); +int bnxt_ulp_tc_parser_post_process(struct ulp_tc_parser_params *params); +#ifdef CONFIG_BNXT_FLOWER_OFFLOAD +int bnxt_ulp_tc_parser_act_parse(struct bnxt *bp, + struct flow_cls_offload *tc_flow_cmd, + struct ulp_tc_parser_params *params); +#endif +int ulp_tc_redirect_act_handler(struct bnxt *bp, + struct ulp_tc_parser_params *params, + void *action_arg); +int ulp_tc_ingress_mirror_act_handler(struct bnxt *bp, + struct ulp_tc_parser_params *params, + void *action_arg); +int ulp_tc_egress_mirror_act_handler(struct bnxt *bp, + struct ulp_tc_parser_params *params, + void *action_arg); +int ulp_tc_tunnel_encap_act_handler(struct bnxt *bp, + struct ulp_tc_parser_params *params, + void *action_arg); +int ulp_tc_mangle_act_handler(struct bnxt *bp, + struct ulp_tc_parser_params *params, + void *act); +int ulp_tc_csum_act_handler(struct bnxt *bp, + struct ulp_tc_parser_params *params, + void *act); +int ulp_tc_drop_act_handler(struct bnxt *bp, + struct ulp_tc_parser_params *params, + void *act); +int ulp_tc_goto_act_handler(struct bnxt *bp, + struct ulp_tc_parser_params *params, + void *act); +int ulp_tc_tnl_control_key_handler(struct bnxt *bp, + struct ulp_tc_parser_params *params, + void *match_arg); +int ulp_tc_tnl_ip_ctrl_handler(struct bnxt *bp, + struct ulp_tc_parser_params *params, + void *match_arg); +int ulp_tc_tnl_ipv4_addr_handler(struct bnxt *bp, + struct ulp_tc_parser_params *params, + void *match_arg); +int ulp_tc_tnl_ipv6_addr_handler(struct bnxt *bp, + struct ulp_tc_parser_params *params, + void *match_arg); +int ulp_tc_tnl_l4_ports_handler(struct bnxt *bp, + struct ulp_tc_parser_params *params, + void *match_arg); +int ulp_tc_tnl_key_handler(struct bnxt *bp, + struct ulp_tc_parser_params *params, + void *match_arg); +int ulp_tc_tunnel_decap_act_handler(struct bnxt *bp, + struct ulp_tc_parser_params *params, + void *action_arg); +int ulp_tc_vlan_handler(struct bnxt *bp, struct ulp_tc_parser_params *params, + void *match_arg); +int ulp_tc_vlan_push_act_handler(struct bnxt *bp, + struct ulp_tc_parser_params *params, + void *action_arg); +int ulp_tc_vlan_pop_act_handler(struct bnxt *bp, + struct ulp_tc_parser_params *params, + void *action_arg); + +int +ulp_tc_set_mac_src_act_handler(struct bnxt *bp, + struct ulp_tc_parser_params *params); + +int +ulp_tc_set_mac_dst_act_handler(struct bnxt *bp, + struct ulp_tc_parser_params *params); + +int +ulp_tc_meter_act_handler(struct bnxt *bp, + struct ulp_tc_parser_params *params); + +int ulp_tc_parser_implicit_match_port_process(struct ulp_tc_parser_params *params); +int ulp_tc_parser_implicit_act_port_process(struct bnxt *bp, + struct ulp_tc_parser_params *params); +int ulp_tc_parser_act_port_set(struct ulp_tc_parser_params *param, u32 ifindex); +#ifdef CONFIG_BNXT_FLOWER_OFFLOAD +int bnxt_flow_meter_profile_add(struct bnxt *bp, u32 meter_profile_id, u32 dir); +int bnxt_flow_meter_profile_delete(struct bnxt *bp, u32 meter_profile_id, u32 dir); +int bnxt_flow_meter_create(struct bnxt *bp, u32 meter_profile_id, u32 meter_id, u32 dir); +int bnxt_flow_meter_destroy(struct bnxt *bp, u32 meter_id, u32 dir); +int bnxt_tc_clear_dscp_ipv6(struct bnxt *bp, struct bnxt_ulp_context *ulp_ctx); +#endif + +#endif /* CONFIG_BNXT_FLOWER_OFFLOAD || CONFIG_BNXT_CUSTOM_FLOWER_OFFLOAD */ + +#endif /* _ULP_TC_PARSER_H_ */ diff --git a/drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_tc_rte_flow.h b/drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_tc_rte_flow.h new file mode 100644 index 000000000000..02ee3100e373 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_tc_rte_flow.h @@ -0,0 +1,1608 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2023-2023 Broadcom + * All rights reserved. + */ + +#ifndef _ULP_TC_RTE_FLOW_GEN_H_ +#define _ULP_TC_RTE_FLOW_GEN_H_ + +#ifdef CONFIG_BNXT_CUSTOM_FLOWER_OFFLOAD +/* + * The following types should be used when handling values according to a + * specific byte ordering, which may differ from that of the host CPU. + * + * Libraries, public APIs and applications are encouraged to use them for + * documentation purposes. + */ +typedef uint16_t rte_be16_t; /**< 16-bit big-endian value. */ +typedef uint32_t rte_be32_t; /**< 32-bit big-endian value. */ +typedef uint64_t rte_be64_t; /**< 64-bit big-endian value. */ +typedef uint16_t rte_le16_t; /**< 16-bit little-endian value. */ +typedef uint32_t rte_le32_t; /**< 32-bit little-endian value. */ +typedef uint64_t rte_le64_t; /**< 64-bit little-endian value. */ + +#define RTE_ETHER_ADDR_LEN 6 + +/** + * Matching pattern item types. + * + * Pattern items fall in two categories: + * + * - Matching protocol headers and packet data, usually associated with a + * specification structure. These must be stacked in the same order as the + * protocol layers to match inside packets, starting from the lowest. + * + * - Matching meta-data or affecting pattern processing, often without a + * specification structure. Since they do not match packet contents, their + * position in the list is usually not relevant. + * + * See the description of individual types for more information. Those + * marked with [META] fall into the second category. + */ +enum rte_flow_item_type { + /** + * [META] + * + * End marker for item lists. Prevents further processing of items, + * thereby ending the pattern. + * + * No associated specification structure. + */ + RTE_FLOW_ITEM_TYPE_END, + + /** + * [META] + * + * Used as a placeholder for convenience. It is ignored and simply + * discarded by PMDs. + * + * No associated specification structure. + */ + RTE_FLOW_ITEM_TYPE_VOID, + + /** + * [META] + * + * Inverted matching, i.e. process packets that do not match the + * pattern. + * + * No associated specification structure. + */ + RTE_FLOW_ITEM_TYPE_INVERT, + + /** + * Matches any protocol in place of the current layer, a single ANY + * may also stand for several protocol layers. + * + * See struct rte_flow_item_any. + */ + RTE_FLOW_ITEM_TYPE_ANY, + + /** + * @deprecated + * @see RTE_FLOW_ITEM_TYPE_PORT_REPRESENTOR + * @see RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT + * + * [META] + * + * Matches traffic originating from (ingress) or going to (egress) + * the physical function of the current device. + * + * No associated specification structure. + */ + RTE_FLOW_ITEM_TYPE_PF, + + /** + * @deprecated + * @see RTE_FLOW_ITEM_TYPE_PORT_REPRESENTOR + * @see RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT + * + * [META] + * + * Matches traffic originating from (ingress) or going to (egress) a + * given virtual function of the current device. + * + * See struct rte_flow_item_vf. + */ + RTE_FLOW_ITEM_TYPE_VF, + + /** + * @deprecated + * @see RTE_FLOW_ITEM_TYPE_PORT_REPRESENTOR + * @see RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT + * + * [META] + * + * Matches traffic originating from (ingress) or going to (egress) a + * physical port of the underlying device. + * + * See struct rte_flow_item_phy_port. + */ + RTE_FLOW_ITEM_TYPE_PHY_PORT, + + /** + * @deprecated + * @see RTE_FLOW_ITEM_TYPE_PORT_REPRESENTOR + * @see RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT + * + * [META] + * + * Matches traffic originating from (ingress) or going to (egress) a + * given DPDK port ID. + * + * See struct rte_flow_item_port_id. + */ + RTE_FLOW_ITEM_TYPE_PORT_ID, + + /** + * Matches a byte string of a given length at a given offset. + * + * See struct rte_flow_item_raw. + */ + RTE_FLOW_ITEM_TYPE_RAW, + + /** + * Matches an Ethernet header. + * + * See struct rte_flow_item_eth. + */ + RTE_FLOW_ITEM_TYPE_ETH, + + /** + * Matches an 802.1Q/ad VLAN tag. + * + * See struct rte_flow_item_vlan. + */ + RTE_FLOW_ITEM_TYPE_VLAN, + + /** + * Matches an IPv4 header. + * + * See struct rte_flow_item_ipv4. + */ + RTE_FLOW_ITEM_TYPE_IPV4, + + /** + * Matches an IPv6 header. + * + * See struct rte_flow_item_ipv6. + */ + RTE_FLOW_ITEM_TYPE_IPV6, + + /** + * Matches an ICMP header. + * + * See struct rte_flow_item_icmp. + */ + RTE_FLOW_ITEM_TYPE_ICMP, + + /** + * Matches a UDP header. + * + * See struct rte_flow_item_udp. + */ + RTE_FLOW_ITEM_TYPE_UDP, + + /** + * Matches a TCP header. + * + * See struct rte_flow_item_tcp. + */ + RTE_FLOW_ITEM_TYPE_TCP, + + /** + * Matches a SCTP header. + * + * See struct rte_flow_item_sctp. + */ + RTE_FLOW_ITEM_TYPE_SCTP, + + /** + * Matches a VXLAN header. + * + * See struct rte_flow_item_vxlan. + */ + RTE_FLOW_ITEM_TYPE_VXLAN, + + /** + * Matches a E_TAG header. + * + * See struct rte_flow_item_e_tag. + */ + RTE_FLOW_ITEM_TYPE_E_TAG, + + /** + * Matches a NVGRE header. + * + * See struct rte_flow_item_nvgre. + */ + RTE_FLOW_ITEM_TYPE_NVGRE, + + /** + * Matches a MPLS header. + * + * See struct rte_flow_item_mpls. + */ + RTE_FLOW_ITEM_TYPE_MPLS, + + /** + * Matches a GRE header. + * + * See struct rte_flow_item_gre. + */ + RTE_FLOW_ITEM_TYPE_GRE, + + /** + * [META] + * + * Fuzzy pattern match, expect faster than default. + * + * This is for device that support fuzzy matching option. + * Usually a fuzzy matching is fast but the cost is accuracy. + * + * See struct rte_flow_item_fuzzy. + */ + RTE_FLOW_ITEM_TYPE_FUZZY, + + /** + * Matches a GTP header. + * + * Configure flow for GTP packets. + * + * See struct rte_flow_item_gtp. + */ + RTE_FLOW_ITEM_TYPE_GTP, + + /** + * Matches a GTP header. + * + * Configure flow for GTP-C packets. + * + * See struct rte_flow_item_gtp. + */ + RTE_FLOW_ITEM_TYPE_GTPC, + + /** + * Matches a GTP header. + * + * Configure flow for GTP-U packets. + * + * See struct rte_flow_item_gtp. + */ + RTE_FLOW_ITEM_TYPE_GTPU, + + /** + * Matches a ESP header. + * + * See struct rte_flow_item_esp. + */ + RTE_FLOW_ITEM_TYPE_ESP, + + /** + * Matches a GENEVE header. + * + * See struct rte_flow_item_geneve. + */ + RTE_FLOW_ITEM_TYPE_GENEVE, + + /** + * Matches a VXLAN-GPE header. + * + * See struct rte_flow_item_vxlan_gpe. + */ + RTE_FLOW_ITEM_TYPE_VXLAN_GPE, + + /** + * Matches an ARP header for Ethernet/IPv4. + * + * See struct rte_flow_item_arp_eth_ipv4. + */ + RTE_FLOW_ITEM_TYPE_ARP_ETH_IPV4, + + /** + * Matches the presence of any IPv6 extension header. + * + * See struct rte_flow_item_ipv6_ext. + */ + RTE_FLOW_ITEM_TYPE_IPV6_EXT, + + /** + * Matches any ICMPv6 header. + * + * See struct rte_flow_item_icmp6. + */ + RTE_FLOW_ITEM_TYPE_ICMP6, + + /** + * Matches an ICMPv6 neighbor discovery solicitation. + * + * See struct rte_flow_item_icmp6_nd_ns. + */ + RTE_FLOW_ITEM_TYPE_ICMP6_ND_NS, + + /** + * Matches an ICMPv6 neighbor discovery advertisement. + * + * See struct rte_flow_item_icmp6_nd_na. + */ + RTE_FLOW_ITEM_TYPE_ICMP6_ND_NA, + + /** + * Matches the presence of any ICMPv6 neighbor discovery option. + * + * See struct rte_flow_item_icmp6_nd_opt. + */ + RTE_FLOW_ITEM_TYPE_ICMP6_ND_OPT, + + /** + * Matches an ICMPv6 neighbor discovery source Ethernet link-layer + * address option. + * + * See struct rte_flow_item_icmp6_nd_opt_sla_eth. + */ + RTE_FLOW_ITEM_TYPE_ICMP6_ND_OPT_SLA_ETH, + + /** + * Matches an ICMPv6 neighbor discovery target Ethernet link-layer + * address option. + * + * See struct rte_flow_item_icmp6_nd_opt_tla_eth. + */ + RTE_FLOW_ITEM_TYPE_ICMP6_ND_OPT_TLA_ETH, + + /** + * Matches specified mark field. + * + * See struct rte_flow_item_mark. + */ + RTE_FLOW_ITEM_TYPE_MARK, + + /** + * [META] + * + * Matches a metadata value. + * + * See struct rte_flow_item_meta. + */ + RTE_FLOW_ITEM_TYPE_META, + + /** + * Matches a GRE optional key field. + * + * The value should a big-endian 32bit integer. + * + * When this item present the K bit is implicitly matched as "1" + * in the default mask. + * + * @p spec/mask type: + * @code rte_be32_t * @endcode + */ + RTE_FLOW_ITEM_TYPE_GRE_KEY, + + /** + * Matches a GTP extension header: PDU session container. + * + * Configure flow for GTP packets with extension header type 0x85. + * + * See struct rte_flow_item_gtp_psc. + */ + RTE_FLOW_ITEM_TYPE_GTP_PSC, + + /** + * Matches a PPPoE header. + * + * Configure flow for PPPoE session packets. + * + * See struct rte_flow_item_pppoe. + */ + RTE_FLOW_ITEM_TYPE_PPPOES, + + /** + * Matches a PPPoE header. + * + * Configure flow for PPPoE discovery packets. + * + * See struct rte_flow_item_pppoe. + */ + RTE_FLOW_ITEM_TYPE_PPPOED, + + /** + * Matches a PPPoE optional proto_id field. + * + * It only applies to PPPoE session packets. + * + * See struct rte_flow_item_pppoe_proto_id. + */ + RTE_FLOW_ITEM_TYPE_PPPOE_PROTO_ID, + + /** + * Matches Network service header (NSH). + * See struct rte_flow_item_nsh. + * + */ + RTE_FLOW_ITEM_TYPE_NSH, + + /** + * Matches Internet Group Management Protocol (IGMP). + * See struct rte_flow_item_igmp. + * + */ + RTE_FLOW_ITEM_TYPE_IGMP, + + /** + * Matches IP Authentication Header (AH). + * See struct rte_flow_item_ah. + * + */ + RTE_FLOW_ITEM_TYPE_AH, + /** + * Matches the presence of any IPv6 routing extension header. + * + * See struct rte_flow_item_ipv6_route_ext. + */ + RTE_FLOW_ITEM_TYPE_IPV6_ROUTE_EXT, + + /** + * Matches a HIGIG header. + * see struct rte_flow_item_higig2_hdr. + */ + RTE_FLOW_ITEM_TYPE_HIGIG2, + + /** + * [META] + * + * Matches a tag value. + * + * See struct rte_flow_item_tag. + */ + RTE_FLOW_ITEM_TYPE_TAG, + + /** + * Matches a L2TPv3 over IP header. + * + * Configure flow for L2TPv3 over IP packets. + * + * See struct rte_flow_item_l2tpv3oip. + */ + RTE_FLOW_ITEM_TYPE_L2TPV3OIP, + + /** + * Matches PFCP Header. + * See struct rte_flow_item_pfcp. + * + */ + RTE_FLOW_ITEM_TYPE_PFCP, + + /** + * Matches eCPRI Header. + * + * Configure flow for eCPRI over ETH or UDP packets. + * + * See struct rte_flow_item_ecpri. + */ + RTE_FLOW_ITEM_TYPE_ECPRI, + + /** + * Matches the presence of IPv6 fragment extension header. + * + * See struct rte_flow_item_ipv6_frag_ext. + */ + RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT, + + /** + * Matches Geneve Variable Length Option + * + * See struct rte_flow_item_geneve_opt + */ + RTE_FLOW_ITEM_TYPE_GENEVE_OPT, + + /** + * [META] + * + * Matches on packet integrity. + * For some devices application needs to enable integration checks in HW + * before using this item. + * + * @see struct rte_flow_item_integrity. + */ + RTE_FLOW_ITEM_TYPE_INTEGRITY, + + /** + * [META] + * + * Matches conntrack state. + * + * @see struct rte_flow_item_conntrack. + */ + RTE_FLOW_ITEM_TYPE_CONNTRACK, + + /** + * [META] + * + * Matches traffic entering the embedded switch from the given ethdev. + * + * @see struct rte_flow_item_ethdev + */ + RTE_FLOW_ITEM_TYPE_PORT_REPRESENTOR, + + /** + * [META] + * + * Matches traffic entering the embedded switch from + * the entity represented by the given ethdev. + * + * @see struct rte_flow_item_ethdev + */ + RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT, + + /** + * Matches a configured set of fields at runtime calculated offsets + * over the generic network header with variable length and + * flexible pattern + * + * @see struct rte_flow_item_flex. + */ + RTE_FLOW_ITEM_TYPE_FLEX, + + /** + * Matches L2TPv2 Header. + * + * See struct rte_flow_item_l2tpv2. + */ + RTE_FLOW_ITEM_TYPE_L2TPV2, + + /** + * Matches PPP Header. + * + * See struct rte_flow_item_ppp. + */ + RTE_FLOW_ITEM_TYPE_PPP, +}; + +/** + * Matching pattern item definition. + * + * A pattern is formed by stacking items starting from the lowest protocol + * layer to match. This stacking restriction does not apply to meta items + * which can be placed anywhere in the stack without affecting the meaning + * of the resulting pattern. + * + * Patterns are terminated by END items. + * + * The spec field should be a valid pointer to a structure of the related + * item type. It may remain unspecified (NULL) in many cases to request + * broad (nonspecific) matching. In such cases, last and mask must also be + * set to NULL. + * + * Optionally, last can point to a structure of the same type to define an + * inclusive range. This is mostly supported by integer and address fields, + * may cause errors otherwise. Fields that do not support ranges must be set + * to 0 or to the same value as the corresponding fields in spec. + * + * Only the fields defined to nonzero values in the default masks (see + * rte_flow_item_{name}_mask constants) are considered relevant by + * default. This can be overridden by providing a mask structure of the + * same type with applicable bits set to one. It can also be used to + * partially filter out specific fields (e.g. as an alternate mean to match + * ranges of IP addresses). + * + * Mask is a simple bit-mask applied before interpreting the contents of + * spec and last, which may yield unexpected results if not used + * carefully. For example, if for an IPv4 address field, spec provides + * 10.1.2.3, last provides 10.3.4.5 and mask provides 255.255.0.0, the + * effective range becomes 10.1.0.0 to 10.3.255.255. + */ +struct rte_flow_item { + enum rte_flow_item_type type; /**< Item type. */ + const void *spec; /**< Pointer to item specification structure. */ + const void *last; /**< Defines an inclusive range (spec to last). */ + const void *mask; /**< Bit-mask applied to spec and last. */ +}; + +/** + * Action types. + * + * Each possible action is represented by a type. + * An action can have an associated configuration object. + * Several actions combined in a list can be assigned + * to a flow rule and are performed in order. + * + * They fall in three categories: + * + * - Actions that modify the fate of matching traffic, for instance by + * dropping or assigning it a specific destination. + * + * - Actions that modify matching traffic contents or its properties. This + * includes adding/removing encapsulation, encryption, compression and + * marks. + * + * - Actions related to the flow rule itself, such as updating counters or + * making it non-terminating. + * + * Flow rules being terminating by default, not specifying any action of the + * fate kind results in undefined behavior. This applies to both ingress and + * egress. + * + * PASSTHRU, when supported, makes a flow rule non-terminating. + */ +enum rte_flow_action_type { + /** + * End marker for action lists. Prevents further processing of + * actions, thereby ending the list. + * + * No associated configuration structure. + */ + RTE_FLOW_ACTION_TYPE_END, + + /** + * Used as a placeholder for convenience. It is ignored and simply + * discarded by PMDs. + * + * No associated configuration structure. + */ + RTE_FLOW_ACTION_TYPE_VOID, + + /** + * Leaves traffic up for additional processing by subsequent flow + * rules; makes a flow rule non-terminating. + * + * No associated configuration structure. + */ + RTE_FLOW_ACTION_TYPE_PASSTHRU, + + /** + * RTE_FLOW_ACTION_TYPE_JUMP + * + * Redirects packets to a group on the current device. + * + * See struct rte_flow_action_jump. + */ + RTE_FLOW_ACTION_TYPE_JUMP, + + /** + * Attaches an integer value to packets and sets RTE_MBUF_F_RX_FDIR and + * RTE_MBUF_F_RX_FDIR_ID mbuf flags. + * + * See struct rte_flow_action_mark. + * + * One should negotiate mark delivery from the NIC to the PMD. + * @see rte_eth_rx_metadata_negotiate() + * @see RTE_ETH_RX_METADATA_USER_MARK + */ + RTE_FLOW_ACTION_TYPE_MARK, + + /** + * Flags packets. Similar to MARK without a specific value; only + * sets the RTE_MBUF_F_RX_FDIR mbuf flag. + * + * No associated configuration structure. + * + * One should negotiate flag delivery from the NIC to the PMD. + * @see rte_eth_rx_metadata_negotiate() + * @see RTE_ETH_RX_METADATA_USER_FLAG + */ + RTE_FLOW_ACTION_TYPE_FLAG, + + /** + * Assigns packets to a given queue index. + * + * See struct rte_flow_action_queue. + */ + RTE_FLOW_ACTION_TYPE_QUEUE, + + /** + * Drops packets. + * + * PASSTHRU overrides this action if both are specified. + * + * No associated configuration structure. + */ + RTE_FLOW_ACTION_TYPE_DROP, + + /** + * Enables counters for this flow rule. + * + * These counters can be retrieved and reset through rte_flow_query() or + * rte_flow_action_handle_query() if the action provided via handle, + * see struct rte_flow_query_count. + * + * See struct rte_flow_action_count. + */ + RTE_FLOW_ACTION_TYPE_COUNT, + + /** + * Similar to QUEUE, except RSS is additionally performed on packets + * to spread them among several queues according to the provided + * parameters. + * + * See struct rte_flow_action_rss. + */ + RTE_FLOW_ACTION_TYPE_RSS, + + /** + * @deprecated + * @see RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR + * @see RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT + * + * Directs matching traffic to the physical function (PF) of the + * current device. + * + * No associated configuration structure. + */ + RTE_FLOW_ACTION_TYPE_PF, + + /** + * @deprecated + * @see RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR + * @see RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT + * + * Directs matching traffic to a given virtual function of the + * current device. + * + * See struct rte_flow_action_vf. + */ + RTE_FLOW_ACTION_TYPE_VF, + + /** + * @deprecated + * @see RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR + * @see RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT + * + * Directs matching traffic to a given DPDK port ID. + * + * See struct rte_flow_action_port_id. + */ + RTE_FLOW_ACTION_TYPE_PORT_ID, + + /** + * Traffic metering and policing (MTR). + * + * See struct rte_flow_action_meter. + * See file rte_mtr.h for MTR object configuration. + */ + RTE_FLOW_ACTION_TYPE_METER, + + /** + * Redirects packets to security engine of current device for security + * processing as specified by security session. + * + * See struct rte_flow_action_security. + */ + RTE_FLOW_ACTION_TYPE_SECURITY, + + /** + * @warning This is a legacy action. + * @see RTE_FLOW_ACTION_TYPE_MODIFY_FIELD + * + * Implements OFPAT_DEC_NW_TTL ("decrement IP TTL") as defined by + * the OpenFlow Switch Specification. + * + * No associated configuration structure. + */ + RTE_FLOW_ACTION_TYPE_OF_DEC_NW_TTL, + + /** + * Implements OFPAT_POP_VLAN ("pop the outer VLAN tag") as defined + * by the OpenFlow Switch Specification. + * + * No associated configuration structure. + */ + RTE_FLOW_ACTION_TYPE_OF_POP_VLAN, + + /** + * Implements OFPAT_PUSH_VLAN ("push a new VLAN tag") as defined by + * the OpenFlow Switch Specification. + * + * See struct rte_flow_action_of_push_vlan. + */ + RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN, + + /** + * Implements OFPAT_SET_VLAN_VID ("set the 802.1q VLAN ID") as + * defined by the OpenFlow Switch Specification. + * + * See struct rte_flow_action_of_set_vlan_vid. + */ + RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID, + + /** + * Implements OFPAT_SET_LAN_PCP ("set the 802.1q priority") as + * defined by the OpenFlow Switch Specification. + * + * See struct rte_flow_action_of_set_vlan_pcp. + */ + RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP, + + /** + * Implements OFPAT_POP_MPLS ("pop the outer MPLS tag") as defined + * by the OpenFlow Switch Specification. + * + * See struct rte_flow_action_of_pop_mpls. + */ + RTE_FLOW_ACTION_TYPE_OF_POP_MPLS, + + /** + * Implements OFPAT_PUSH_MPLS ("push a new MPLS tag") as defined by + * the OpenFlow Switch Specification. + * + * See struct rte_flow_action_of_push_mpls. + */ + RTE_FLOW_ACTION_TYPE_OF_PUSH_MPLS, + + /** + * Encapsulate flow in VXLAN tunnel as defined in + * rte_flow_action_vxlan_encap action structure. + * + * See struct rte_flow_action_vxlan_encap. + */ + RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP, + + /** + * Decapsulate outer most VXLAN tunnel from matched flow. + * + * If flow pattern does not define a valid VXLAN tunnel (as specified by + * RFC7348) then the PMD should return a RTE_FLOW_ERROR_TYPE_ACTION + * error. + */ + RTE_FLOW_ACTION_TYPE_VXLAN_DECAP, + + /** + * Encapsulate flow in SRv6 Header as defined in + * rte_flow_action_ip_encap action structure. + * + * See struct rte_flow_action_ip_encap. + */ + RTE_FLOW_ACTION_TYPE_IP_ENCAP, + + /** + * Decapsulate outer most SRv6 header from matched flow. + */ + RTE_FLOW_ACTION_TYPE_IP_DECAP, + + /** + * Encapsulate flow in NVGRE tunnel defined in the + * rte_flow_action_nvgre_encap action structure. + * + * See struct rte_flow_action_nvgre_encap. + */ + RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP, + + /** + * Decapsulate outer most NVGRE tunnel from matched flow. + * + * If flow pattern does not define a valid NVGRE tunnel (as specified by + * RFC7637) then the PMD should return a RTE_FLOW_ERROR_TYPE_ACTION + * error. + */ + RTE_FLOW_ACTION_TYPE_NVGRE_DECAP, + + /** + * Add outer header whose template is provided in its data buffer + * + * See struct rte_flow_action_raw_encap. + */ + RTE_FLOW_ACTION_TYPE_RAW_ENCAP, + + /** + * Remove outer header whose template is provided in its data buffer. + * + * See struct rte_flow_action_raw_decap + */ + RTE_FLOW_ACTION_TYPE_RAW_DECAP, + + /** + * @warning This is a legacy action. + * @see RTE_FLOW_ACTION_TYPE_MODIFY_FIELD + * + * Modify IPv4 source address in the outermost IPv4 header. + * + * If flow pattern does not define a valid RTE_FLOW_ITEM_TYPE_IPV4, + * then the PMD should return a RTE_FLOW_ERROR_TYPE_ACTION error. + * + * See struct rte_flow_action_set_ipv4. + */ + RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC, + + /** + * @warning This is a legacy action. + * @see RTE_FLOW_ACTION_TYPE_MODIFY_FIELD + * + * Modify IPv4 destination address in the outermost IPv4 header. + * + * If flow pattern does not define a valid RTE_FLOW_ITEM_TYPE_IPV4, + * then the PMD should return a RTE_FLOW_ERROR_TYPE_ACTION error. + * + * See struct rte_flow_action_set_ipv4. + */ + RTE_FLOW_ACTION_TYPE_SET_IPV4_DST, + + /** + * @warning This is a legacy action. + * @see RTE_FLOW_ACTION_TYPE_MODIFY_FIELD + * + * Modify IPv6 source address in the outermost IPv6 header. + * + * If flow pattern does not define a valid RTE_FLOW_ITEM_TYPE_IPV6, + * then the PMD should return a RTE_FLOW_ERROR_TYPE_ACTION error. + * + * See struct rte_flow_action_set_ipv6. + */ + RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC, + + /** + * @warning This is a legacy action. + * @see RTE_FLOW_ACTION_TYPE_MODIFY_FIELD + * + * Modify IPv6 destination address in the outermost IPv6 header. + * + * If flow pattern does not define a valid RTE_FLOW_ITEM_TYPE_IPV6, + * then the PMD should return a RTE_FLOW_ERROR_TYPE_ACTION error. + * + * See struct rte_flow_action_set_ipv6. + */ + RTE_FLOW_ACTION_TYPE_SET_IPV6_DST, + + /** + * @warning This is a legacy action. + * @see RTE_FLOW_ACTION_TYPE_MODIFY_FIELD + * + * Modify source port number in the outermost TCP/UDP header. + * + * If flow pattern does not define a valid RTE_FLOW_ITEM_TYPE_TCP + * or RTE_FLOW_ITEM_TYPE_UDP, then the PMD should return a + * RTE_FLOW_ERROR_TYPE_ACTION error. + * + * See struct rte_flow_action_set_tp. + */ + RTE_FLOW_ACTION_TYPE_SET_TP_SRC, + + /** + * @warning This is a legacy action. + * @see RTE_FLOW_ACTION_TYPE_MODIFY_FIELD + * + * Modify destination port number in the outermost TCP/UDP header. + * + * If flow pattern does not define a valid RTE_FLOW_ITEM_TYPE_TCP + * or RTE_FLOW_ITEM_TYPE_UDP, then the PMD should return a + * RTE_FLOW_ERROR_TYPE_ACTION error. + * + * See struct rte_flow_action_set_tp. + */ + RTE_FLOW_ACTION_TYPE_SET_TP_DST, + + /** + * Swap the source and destination MAC addresses in the outermost + * Ethernet header. + * + * If flow pattern does not define a valid RTE_FLOW_ITEM_TYPE_ETH, + * then the PMD should return a RTE_FLOW_ERROR_TYPE_ACTION error. + * + * No associated configuration structure. + */ + RTE_FLOW_ACTION_TYPE_MAC_SWAP, + + /** + * @warning This is a legacy action. + * @see RTE_FLOW_ACTION_TYPE_MODIFY_FIELD + * + * Decrease TTL value directly + * + * No associated configuration structure. + */ + RTE_FLOW_ACTION_TYPE_DEC_TTL, + + /** + * @warning This is a legacy action. + * @see RTE_FLOW_ACTION_TYPE_MODIFY_FIELD + * + * Set TTL value + * + * See struct rte_flow_action_set_ttl + */ + RTE_FLOW_ACTION_TYPE_SET_TTL, + + /** + * @warning This is a legacy action. + * @see RTE_FLOW_ACTION_TYPE_MODIFY_FIELD + * + * Set source MAC address from matched flow. + * + * If flow pattern does not define a valid RTE_FLOW_ITEM_TYPE_ETH, + * the PMD should return a RTE_FLOW_ERROR_TYPE_ACTION error. + * + * See struct rte_flow_action_set_mac. + */ + RTE_FLOW_ACTION_TYPE_SET_MAC_SRC, + + /** + * @warning This is a legacy action. + * @see RTE_FLOW_ACTION_TYPE_MODIFY_FIELD + * + * Set destination MAC address from matched flow. + * + * If flow pattern does not define a valid RTE_FLOW_ITEM_TYPE_ETH, + * the PMD should return a RTE_FLOW_ERROR_TYPE_ACTION error. + * + * See struct rte_flow_action_set_mac. + */ + RTE_FLOW_ACTION_TYPE_SET_MAC_DST, + + /** + * @warning This is a legacy action. + * @see RTE_FLOW_ACTION_TYPE_MODIFY_FIELD + * + * Increase sequence number in the outermost TCP header. + * + * Action configuration specifies the value to increase + * TCP sequence number as a big-endian 32 bit integer. + * + * @p conf type: + * @code rte_be32_t * @endcode + * + * Using this action on non-matching traffic will result in + * undefined behavior. + */ + RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ, + + /** + * @warning This is a legacy action. + * @see RTE_FLOW_ACTION_TYPE_MODIFY_FIELD + * + * Decrease sequence number in the outermost TCP header. + * + * Action configuration specifies the value to decrease + * TCP sequence number as a big-endian 32 bit integer. + * + * @p conf type: + * @code rte_be32_t * @endcode + * + * Using this action on non-matching traffic will result in + * undefined behavior. + */ + RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ, + + /** + * @warning This is a legacy action. + * @see RTE_FLOW_ACTION_TYPE_MODIFY_FIELD + * + * Increase acknowledgment number in the outermost TCP header. + * + * Action configuration specifies the value to increase + * TCP acknowledgment number as a big-endian 32 bit integer. + * + * @p conf type: + * @code rte_be32_t * @endcode + + * Using this action on non-matching traffic will result in + * undefined behavior. + */ + RTE_FLOW_ACTION_TYPE_INC_TCP_ACK, + + /** + * @warning This is a legacy action. + * @see RTE_FLOW_ACTION_TYPE_MODIFY_FIELD + * + * Decrease acknowledgment number in the outermost TCP header. + * + * Action configuration specifies the value to decrease + * TCP acknowledgment number as a big-endian 32 bit integer. + * + * @p conf type: + * @code rte_be32_t * @endcode + * + * Using this action on non-matching traffic will result in + * undefined behavior. + */ + RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK, + + /** + * @warning This is a legacy action. + * @see RTE_FLOW_ACTION_TYPE_MODIFY_FIELD + * + * Set Tag. + * + * Tag is for internal flow usage only and + * is not delivered to the application. + * + * See struct rte_flow_action_set_tag. + */ + RTE_FLOW_ACTION_TYPE_SET_TAG, + + /** + * @warning This is a legacy action. + * @see RTE_FLOW_ACTION_TYPE_MODIFY_FIELD + * + * Set metadata on ingress or egress path. + * + * See struct rte_flow_action_set_meta. + */ + RTE_FLOW_ACTION_TYPE_SET_META, + + /** + * @warning This is a legacy action. + * @see RTE_FLOW_ACTION_TYPE_MODIFY_FIELD + * + * Modify IPv4 DSCP in the outermost IP header. + * + * If flow pattern does not define a valid RTE_FLOW_ITEM_TYPE_IPV4, + * then the PMD should return a RTE_FLOW_ERROR_TYPE_ACTION error. + * + * See struct rte_flow_action_set_dscp. + */ + RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP, + + /** + * @warning This is a legacy action. + * @see RTE_FLOW_ACTION_TYPE_MODIFY_FIELD + * + * Modify IPv6 DSCP in the outermost IP header. + * + * If flow pattern does not define a valid RTE_FLOW_ITEM_TYPE_IPV6, + * then the PMD should return a RTE_FLOW_ERROR_TYPE_ACTION error. + * + * See struct rte_flow_action_set_dscp. + */ + RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP, + + /** + * Report as aged flow if timeout passed without any matching on the + * flow. + * + * See struct rte_flow_action_age. + * See function rte_flow_get_q_aged_flows + * See function rte_flow_get_aged_flows + * see enum RTE_ETH_EVENT_FLOW_AGED + * See struct rte_flow_query_age + * See struct rte_flow_update_age + */ + RTE_FLOW_ACTION_TYPE_AGE, + + /** + * The matching packets will be duplicated with specified ratio and + * applied with own set of actions with a fate action. + * + * See struct rte_flow_action_sample. + */ + RTE_FLOW_ACTION_TYPE_SAMPLE, + + /** + * @deprecated + * @see RTE_FLOW_ACTION_TYPE_INDIRECT + * + * Describe action shared across multiple flow rules. + * + * Allow multiple rules reference the same action by handle (see + * struct rte_flow_shared_action). + */ + RTE_FLOW_ACTION_TYPE_SHARED, + + /** + * Modify a packet header field, tag, mark or metadata. + * + * Allow the modification of an arbitrary header field via + * set, add and sub operations or copying its content into + * tag, meta or mark for future processing. + * + * See struct rte_flow_action_modify_field. + */ + RTE_FLOW_ACTION_TYPE_MODIFY_FIELD, + + /** + * An action handle is referenced in a rule through an indirect action. + * + * The same action handle may be used in multiple rules for the same + * or different ethdev ports. + */ + RTE_FLOW_ACTION_TYPE_INDIRECT, + + /** + * [META] + * + * Enable tracking a TCP connection state. + * + * @see struct rte_flow_action_conntrack. + */ + RTE_FLOW_ACTION_TYPE_CONNTRACK, + + /** + * Color the packet to reflect the meter color result. + * Set the meter color in the mbuf to the selected color. + * + * See struct rte_flow_action_meter_color. + */ + RTE_FLOW_ACTION_TYPE_METER_COLOR, + + /** + * At embedded switch level, sends matching traffic to the given ethdev. + * + * @see struct rte_flow_action_ethdev + */ + RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR, + + /** + * At embedded switch level, send matching traffic to + * the entity represented by the given ethdev. + * + * @see struct rte_flow_action_ethdev + */ + RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT, + + /** + * Traffic metering and marking (MTR). + * + * @see struct rte_flow_action_meter_mark + * See file rte_mtr.h for MTR profile object configuration. + */ + RTE_FLOW_ACTION_TYPE_METER_MARK, + + /** + * Send packets to the kernel, without going to userspace at all. + * The packets will be received by the kernel driver sharing + * the same device as the DPDK port on which this action is configured. + * This action mostly suits bifurcated driver model. + * This is an ingress non-transfer action only. + * + * No associated configuration structure. + */ + RTE_FLOW_ACTION_TYPE_SEND_TO_KERNEL, +}; + +/** + * Definition of a single action. + * + * A list of actions is terminated by a END action. + * + * For simple actions without a configuration object, conf remains NULL. + */ +struct rte_flow_action { + enum rte_flow_action_type type; /**< Action type. */ + const void *conf; /**< Pointer to action configuration object. */ +}; + +/** + * RTE_FLOW_ACTION_TYPE_QUEUE + * + * Assign packets to a given queue index. + */ +struct rte_flow_action_queue { + uint16_t index; /**< Queue index to use. */ +}; + +/** + * @warning + * @b EXPERIMENTAL: this structure may change without prior notice + * + * RTE_FLOW_ACTION_TYPE_COUNT + * + * Adds a counter action to a matched flow. + * + * If more than one count action is specified in a single flow rule, then each + * action must specify a unique ID. + * + * Counters can be retrieved and reset through ``rte_flow_query()``, see + * ``struct rte_flow_query_count``. + * + * For ports within the same switch domain then the counter ID namespace extends + * to all ports within that switch domain. + */ +struct rte_flow_action_count { + uint32_t id; /**< Counter ID. */ +}; + +/** + * Ethernet address: + * A universally administered address is uniquely assigned to a device by its + * manufacturer. The first three octets (in transmission order) contain the + * Organizationally Unique Identifier (OUI). The following three (MAC-48 and + * EUI-48) octets are assigned by that organization with the only constraint + * of uniqueness. + * A locally administered address is assigned to a device by a network + * administrator and does not contain OUIs. + * See http://standards.ieee.org/regauth/groupmac/tutorial.html + */ +struct rte_ether_addr { + uint8_t addr_bytes[RTE_ETHER_ADDR_LEN]; /**< Addr bytes in tx order */ +/* TBD: } __rte_aligned(2); */ +}; + +/** + * Ethernet header: Contains the destination address, source address + * and frame type. + */ +struct rte_ether_hdr { + struct rte_ether_addr dst_addr; /**< Destination address. */ + struct rte_ether_addr src_addr; /**< Source address. */ + rte_be16_t ether_type; /**< Frame type. */ +/* TBD: } __rte_aligned(2); */ +}; + +#define __rte_packed __packed +#define RTE_BIG_ENDIAN 1 +#define RTE_LITTLE_ENDIAN 2 +#define RTE_BYTE_ORDER RTE_BIG_ENDIAN +/** + * IPv4 Header + */ +struct rte_ipv4_hdr { + __extension__ + union { + uint8_t version_ihl; /**< version and header length */ + struct { +#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN + uint8_t ihl:4; /**< header length */ + uint8_t version:4; /**< version */ +#elif RTE_BYTE_ORDER == RTE_BIG_ENDIAN + uint8_t version:4; /**< version */ + uint8_t ihl:4; /**< header length */ +#endif + }; + }; + uint8_t type_of_service; /**< type of service */ + rte_be16_t total_length; /**< length of packet */ + rte_be16_t packet_id; /**< packet ID */ + rte_be16_t fragment_offset; /**< fragmentation offset */ + uint8_t time_to_live; /**< time to live */ + uint8_t next_proto_id; /**< protocol ID */ + rte_be16_t hdr_checksum; /**< header checksum */ + rte_be32_t src_addr; /**< source address */ + rte_be32_t dst_addr; /**< destination address */ +} __rte_packed; + +/** + * IPv6 Header + */ +struct rte_ipv6_hdr { + rte_be32_t vtc_flow; /**< IP version, traffic class & flow label. */ + rte_be16_t payload_len; /**< IP payload size, including ext. headers */ + uint8_t proto; /**< Protocol, next header. */ + uint8_t hop_limits; /**< Hop limits. */ + uint8_t src_addr[16]; /**< IP address of source host. */ + uint8_t dst_addr[16]; /**< IP address of destination host(s). */ +} __rte_packed; + +/** + * Ethernet VLAN Header. + * Contains the 16-bit VLAN Tag Control Identifier and the Ethernet type + * of the encapsulated frame. + */ +struct rte_vlan_hdr { + rte_be16_t vlan_tci; /**< Priority (3) + CFI (1) + Identifier Code (12) */ + rte_be16_t eth_proto; /**< Ethernet type of encapsulated frame. */ +} __rte_packed; + +/** + * TCP Header + */ +struct rte_tcp_hdr { + rte_be16_t src_port; /**< TCP source port. */ + rte_be16_t dst_port; /**< TCP destination port. */ + rte_be32_t sent_seq; /**< TX data sequence number. */ + rte_be32_t recv_ack; /**< RX data acknowledgment sequence number. */ + uint8_t data_off; /**< Data offset. */ + uint8_t tcp_flags; /**< TCP flags */ + rte_be16_t rx_win; /**< RX flow control window. */ + rte_be16_t cksum; /**< TCP checksum. */ + rte_be16_t tcp_urp; /**< TCP urgent pointer, if any. */ +} __rte_packed; + +/** + * UDP Header + */ +struct rte_udp_hdr { + rte_be16_t src_port; /**< UDP source port. */ + rte_be16_t dst_port; /**< UDP destination port. */ + rte_be16_t dgram_len; /**< UDP datagram length */ + rte_be16_t dgram_cksum; /**< UDP datagram checksum */ +} __rte_packed; + +/** + * VXLAN protocol header. + * Contains the 8-bit flag, 24-bit VXLAN Network Identifier and + * Reserved fields (24 bits and 8 bits) + */ +struct rte_vxlan_hdr { + rte_be32_t vx_flags; /**< flag (8) + Reserved (24). */ + rte_be32_t vx_vni; /**< VNI (24) + Reserved (8). */ +} __rte_packed; + +/** + * GRE Header + */ +__extension__ +struct rte_gre_hdr { +#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN + uint16_t res2:4; /**< Reserved */ + uint16_t s:1; /**< Sequence Number Present bit */ + uint16_t k:1; /**< Key Present bit */ + uint16_t res1:1; /**< Reserved */ + uint16_t c:1; /**< Checksum Present bit */ + uint16_t ver:3; /**< Version Number */ + uint16_t res3:5; /**< Reserved */ +#elif RTE_BYTE_ORDER == RTE_BIG_ENDIAN + uint16_t c:1; /**< Checksum Present bit */ + uint16_t res1:1; /**< Reserved */ + uint16_t k:1; /**< Key Present bit */ + uint16_t s:1; /**< Sequence Number Present bit */ + uint16_t res2:4; /**< Reserved */ + uint16_t res3:5; /**< Reserved */ + uint16_t ver:3; /**< Version Number */ +#endif + uint16_t proto; /**< Protocol Type */ +} __rte_packed; + +struct rte_flow_item_eth { + union { + struct { + /* + * These fields are retained for compatibility. + * Please switch to the new header field below. + */ + struct rte_ether_addr dst; /**< Destination MAC. */ + struct rte_ether_addr src; /**< Source MAC. */ + rte_be16_t type; /**< EtherType or TPID. */ + }; + struct rte_ether_hdr hdr; + }; + uint32_t has_vlan:1; /**< Packet header contains at least one VLAN. */ + uint32_t reserved:31; /**< Reserved, must be zero. */ +}; + +/** + * RTE_FLOW_ITEM_TYPE_VLAN + * + * Matches an 802.1Q/ad VLAN tag. + * + * The corresponding standard outer EtherType (TPID) values are + * RTE_ETHER_TYPE_VLAN or RTE_ETHER_TYPE_QINQ. It can be overridden by + * the preceding pattern item. + * If a @p VLAN item is present in the pattern, then only tagged packets will + * match the pattern. + * The field @p has_more_vlan can be used to match any type of tagged packets, + * instead of using the @p eth_proto field of @p hdr. + * If the @p eth_proto of @p hdr and @p has_more_vlan fields are not specified, + * then any tagged packets will match the pattern. + */ +struct rte_flow_item_vlan { + union { + struct { + /* + * These fields are retained for compatibility. + * Please switch to the new header field below. + */ + rte_be16_t tci; /**< Tag control information. */ + rte_be16_t inner_type; /**< Inner EtherType or TPID. */ + }; + struct rte_vlan_hdr hdr; + }; + /** Packet header contains at least one more VLAN, after this VLAN. */ + uint32_t has_more_vlan:1; + uint32_t reserved:31; /**< Reserved, must be zero. */ +}; + +/** + * RTE_FLOW_ITEM_TYPE_IPV4 + * + * Matches an IPv4 header. + * + * Note: IPv4 options are handled by dedicated pattern items. + */ +struct rte_flow_item_ipv4 { + struct rte_ipv4_hdr hdr; /**< IPv4 header definition. */ +}; + +/** + * RTE_FLOW_ITEM_TYPE_IPV6. + * + * Matches an IPv6 header. + * + * Dedicated flags indicate if header contains specific extension headers. + */ +struct rte_flow_item_ipv6 { + struct rte_ipv6_hdr hdr; /**< IPv6 header definition. */ + /** Header contains Hop-by-Hop Options extension header. */ + uint32_t has_hop_ext:1; + /** Header contains Routing extension header. */ + uint32_t has_route_ext:1; + /** Header contains Fragment extension header. */ + uint32_t has_frag_ext:1; + /** Header contains Authentication extension header. */ + uint32_t has_auth_ext:1; + /** Header contains Encapsulation Security Payload extension header. */ + uint32_t has_esp_ext:1; + /** Header contains Destination Options extension header. */ + uint32_t has_dest_ext:1; + /** Header contains Mobility extension header. */ + uint32_t has_mobil_ext:1; + /** Header contains Host Identity Protocol extension header. */ + uint32_t has_hip_ext:1; + /** Header contains Shim6 Protocol extension header. */ + uint32_t has_shim6_ext:1; + /** Reserved for future extension headers, must be zero. */ + uint32_t reserved:23; +}; + +/** + * RTE_FLOW_ITEM_TYPE_UDP. + * + * Matches a UDP header. + */ +struct rte_flow_item_udp { + struct rte_udp_hdr hdr; /**< UDP header definition. */ +}; + +/** + * RTE_FLOW_ITEM_TYPE_TCP. + * + * Matches a TCP header. + */ +struct rte_flow_item_tcp { + struct rte_tcp_hdr hdr; /**< TCP header definition. */ +}; + +/** + * RTE_FLOW_ITEM_TYPE_VXLAN. + * + * Matches a VXLAN header (RFC 7348). + */ +struct rte_flow_item_vxlan { + union { + struct { + /* + * These fields are retained for compatibility. + * Please switch to the new header field below. + */ + uint8_t flags; /**< Normally 0x08 (I flag). */ + uint8_t rsvd0[3]; /**< Reserved, normally 0x000000. */ + uint8_t vni[3]; /**< VXLAN identifier. */ + uint8_t rsvd1; /**< Reserved, normally 0x00. */ + }; + struct rte_vxlan_hdr hdr; + }; +}; + +/** + * RTE_FLOW_ITEM_TYPE_GRE. + * + * Matches a GRE header. + */ +struct rte_flow_item_gre { + /** + * Checksum (1b), reserved 0 (12b), version (3b). + * Refer to RFC 2784. + */ + rte_be16_t c_rsvd0_ver; + rte_be16_t protocol; /**< Protocol type. */ +}; + +#define RTE_ETHER_GROUP_ADDR 0x01 /**< Multicast or broadcast Eth. address. */ +static inline int rte_is_multicast_ether_addr(const struct rte_ether_addr *ea) +{ + return ea->addr_bytes[0] & RTE_ETHER_GROUP_ADDR; +} + +/** + * Check if an Ethernet address is a broadcast address. + * + * @param ea + * A pointer to a ether_addr structure containing the ethernet address + * to check. + * @return + * True (1) if the given ethernet address is a broadcast address; + * false (0) otherwise. + */ +static inline int rte_is_broadcast_ether_addr(const struct rte_ether_addr *ea) +{ + const uint16_t *w = (const uint16_t *)ea; + + return (w[0] & w[1] & w[2]) == 0xFFFF; +} + +int ulp_tc_rte_create_all_flows(struct bnxt *bp, int count); +/** Create IPv4 address */ +#define RTE_IPV4(a, b, c, d) ((uint32_t)(((a) & 0xff) << 24) | \ + (((b) & 0xff) << 16) | \ + (((c) & 0xff) << 8) | \ + ((d) & 0xff)) + +#endif +#endif diff --git a/drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_tc_rte_flow_gen.c b/drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_tc_rte_flow_gen.c new file mode 100644 index 000000000000..25c088315344 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_tc_rte_flow_gen.c @@ -0,0 +1,1134 @@ + +// SPDX-License-Identifier: BSD-3-Clause +/* Copyright(c) 2023-2023 Broadcom + * All rights reserved. + */ + +#ifdef CONFIG_BNXT_CUSTOM_FLOWER_OFFLOAD +#include "bnxt_compat.h" +#include "bnxt_hsi.h" +#include "bnxt.h" +#include "bnxt_tf_common.h" +#include "bnxt_ulp_flow.h" +#include "ulp_tc_parser.h" +#include "ulp_matcher.h" +#include "ulp_flow_db.h" +#include "ulp_mapper.h" +#include "ulp_fc_mgr.h" +#include "ulp_port_db.h" +#include "ulp_template_debug_proto.h" +#include "bnxt_ulp_flow.h" +#include "ulp_tc_custom_offload.h" +#include "ulp_tc_rte_flow.h" +#include "bnxt_ulp_flow.h" + +static struct rte_flow_item eth_item = { RTE_FLOW_ITEM_TYPE_ETH, 0, 0, 0 }; +static struct rte_flow_item_eth eth_spec; +static struct rte_flow_item_eth eth_mask; +static struct rte_flow_item end_item = { RTE_FLOW_ITEM_TYPE_END, 0, 0, 0 }; + +static struct rte_flow_item_ipv4 ipv4_spec; +static struct rte_flow_item_ipv4 ipv4_mask; +static struct rte_flow_item_ipv6 ipv6_spec; +static struct rte_flow_item_ipv6 ipv6_mask; + +static struct rte_flow_item ipv4_item; +static struct rte_flow_item ipv6_item; +static struct rte_flow_item udp_item; +static struct rte_flow_item tcp_item; + +static struct rte_flow_item_tcp tcp_spec; +static struct rte_flow_item_tcp tcp_mask; +static struct rte_flow_item_udp udp_spec; +static struct rte_flow_item_udp udp_mask; + +#if 0 +static struct rte_flow_item ipv4_item_outer; +static struct rte_flow_item_ipv4 ipv4_spec_outer; +static struct rte_flow_item_ipv4 ipv4_mask_outer; + +static struct rte_flow_item_vxlan vxlan_spec; +static struct rte_flow_item_vxlan vxlan_mask; +static struct rte_flow_item vxlan_item; + +static struct rte_flow_item_gre gre_spec; +static struct rte_flow_item_gre gre_mask; +static struct rte_flow_item gre_item; +/* + * struct rte_flow_item_udp udp_spec_outer = { 0 }; + * struct rte_flow_item_udp udp_mask_outer = { 0 }; + * struct rte_flow_item udp_item_outer = { 0 }; + */ +#endif + +struct rte_flow_action_queue queue_action; +struct rte_flow_action actions[2]; + +uint8_t ipv6_src_addr[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 9, 9, 9, 1}; +uint8_t ipv6_dst_addr[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 9, 9, 9, 2}; + +static int +ulp_add_eth_dmac_rule(struct bnxt *bp, u8 *dst_addr, u16 q_index) +{ + u8 mask[8] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; + struct bnxt_ulp_flow_info flow_info = { 0 }; + struct rte_flow_item pattern[2] = { 0 }; + + memset(ð_spec, 0, sizeof(eth_spec)); + memset(ð_mask, 0, sizeof(eth_mask)); + memcpy(eth_spec.hdr.dst_addr.addr_bytes, dst_addr, sizeof(eth_spec.hdr.dst_addr)); + memcpy(eth_mask.hdr.dst_addr.addr_bytes, mask, sizeof(eth_mask.hdr.dst_addr)); + + eth_item.type = RTE_FLOW_ITEM_TYPE_ETH; + eth_item.spec = ð_spec; + eth_item.mask = ð_mask; + + pattern[0] = eth_item; + pattern[1] = end_item; + + queue_action.index = q_index; + actions[0].type = RTE_FLOW_ACTION_TYPE_QUEUE; + actions[0].conf = &queue_action; + + actions[1].type = RTE_FLOW_ACTION_TYPE_END; + actions[1].conf = NULL; + + return bnxt_custom_ulp_flow_create(bp, bp->pf.fw_fid, pattern, actions, + &flow_info); +} + +static int +ulp_add_eth_type_rule(struct bnxt *bp, uint16_t eth_type, u16 q_index) +{ + struct bnxt_ulp_flow_info flow_info = { 0 }; + struct rte_flow_item pattern[2] = { 0 }; + + memset(ð_spec, 0, sizeof(eth_spec)); + memset(ð_mask, 0, sizeof(eth_mask)); + + eth_spec.hdr.ether_type = cpu_to_be16(eth_type); + eth_mask.hdr.ether_type = cpu_to_be16(0xffff); + + eth_item.type = RTE_FLOW_ITEM_TYPE_ETH; + eth_item.spec = ð_spec; + eth_item.mask = ð_mask; + + pattern[0] = eth_item; + pattern[1] = end_item; + + queue_action.index = q_index; + actions[0].type = RTE_FLOW_ACTION_TYPE_QUEUE; + actions[0].conf = &queue_action; + + actions[1].type = RTE_FLOW_ACTION_TYPE_END; + actions[1].conf = NULL; + + return bnxt_custom_ulp_flow_create(bp, bp->pf.fw_fid, pattern, actions, + &flow_info); +} + +static int +ulp_add_non_tunnel_tcp_5tuple(struct bnxt *bp, int i, u16 q_index) +{ + struct bnxt_ulp_flow_info flow_info = { 0 }; + struct rte_flow_item pattern[4]; + + /* Outer IPv4 Item */ + ipv4_item.type = RTE_FLOW_ITEM_TYPE_IPV4; + ipv4_item.spec = &ipv4_spec; + ipv4_item.mask = &ipv4_mask; + ipv4_item.last = NULL; + memset(&ipv4_spec, 0, sizeof(ipv4_spec)); + ipv4_spec.hdr.next_proto_id = 6; + ipv4_spec.hdr.src_addr = cpu_to_be32(RTE_IPV4(9, 9, 9, 1)); + ipv4_spec.hdr.dst_addr = cpu_to_be32(RTE_IPV4(9, 9, 9, 2)); + memset(&ipv4_mask, 0, sizeof(ipv4_mask)); + ipv4_mask.hdr.next_proto_id = 0xff; + ipv4_mask.hdr.src_addr = cpu_to_be32(0xffffffff); + ipv4_mask.hdr.dst_addr = cpu_to_be32(0xffffffff); + + /* Outer TCP Item */ + tcp_item.type = RTE_FLOW_ITEM_TYPE_TCP; + tcp_item.spec = &tcp_spec; + tcp_item.mask = &tcp_mask; + tcp_item.last = NULL; + memset(&tcp_spec, 0, sizeof(tcp_spec)); + tcp_spec.hdr.src_port = cpu_to_be16(0xBBAA + i); + tcp_spec.hdr.dst_port = cpu_to_be16(0xDDCC + i); + memset(&tcp_mask, 0, sizeof(tcp_mask)); + tcp_mask.hdr.src_port = cpu_to_be16(0xffff); + tcp_mask.hdr.dst_port = cpu_to_be16(0xffff); + + pattern[0] = eth_item; + pattern[1] = ipv4_item; + pattern[2] = tcp_item; + pattern[3] = end_item; + + queue_action.index = q_index; + actions[0].type = RTE_FLOW_ACTION_TYPE_QUEUE; + actions[0].conf = &queue_action; + + actions[1].type = RTE_FLOW_ACTION_TYPE_END; + actions[1].conf = NULL; + + return bnxt_custom_ulp_flow_create(bp, bp->pf.fw_fid, pattern, actions, + &flow_info); +} + +static int +ulp_add_non_tunnel_udp_5tuple(struct bnxt *bp, int i, u16 q_index) +{ + struct bnxt_ulp_flow_info flow_info = { 0 }; + struct rte_flow_item pattern[4]; + + /* Outer IPv4 Item */ + ipv4_item.type = RTE_FLOW_ITEM_TYPE_IPV4; + ipv4_item.spec = &ipv4_spec; + ipv4_item.mask = &ipv4_mask; + ipv4_item.last = NULL; + memset(&ipv4_spec, 0, sizeof(ipv4_spec)); + ipv4_spec.hdr.next_proto_id = 17; + ipv4_spec.hdr.src_addr = cpu_to_be32(RTE_IPV4(9, 9, 9, 1)); + ipv4_spec.hdr.dst_addr = cpu_to_be32(RTE_IPV4(9, 9, 9, 2)); + memset(&ipv4_mask, 0, sizeof(ipv4_mask)); + ipv4_mask.hdr.next_proto_id = 0xff; + ipv4_mask.hdr.src_addr = cpu_to_be32(0xffffffff); + ipv4_mask.hdr.dst_addr = cpu_to_be32(0xffffffff); + + /* Outer UDP Item */ + udp_item.type = RTE_FLOW_ITEM_TYPE_UDP; + udp_item.spec = &udp_spec; + udp_item.mask = &udp_mask; + udp_item.last = NULL; + memset(&udp_spec, 0, sizeof(udp_spec)); + udp_spec.hdr.src_port = cpu_to_be16(0xBBAA + i); + udp_spec.hdr.dst_port = cpu_to_be16(0xDDCC + i); + memset(&udp_mask, 0, sizeof(udp_mask)); + udp_mask.hdr.src_port = cpu_to_be16(0xffff); + udp_mask.hdr.dst_port = cpu_to_be16(0xffff); + + pattern[0] = eth_item; + pattern[1] = ipv4_item; + pattern[2] = udp_item; + pattern[3] = end_item; + + queue_action.index = q_index; + actions[0].type = RTE_FLOW_ACTION_TYPE_QUEUE; + actions[0].conf = &queue_action; + + actions[1].type = RTE_FLOW_ACTION_TYPE_END; + actions[1].conf = NULL; + + return bnxt_custom_ulp_flow_create(bp, bp->pf.fw_fid, pattern, actions, + &flow_info); +} + +static int +ulp_add_non_tunnel_tcp_5tuple_ipv6(struct bnxt *bp, int i, u16 q_index) +{ + struct bnxt_ulp_flow_info flow_info = { 0 }; + struct rte_flow_item pattern[4]; + + /* Outer IPv4 Item */ + ipv6_item.type = RTE_FLOW_ITEM_TYPE_IPV6; + ipv6_item.spec = &ipv6_spec; + ipv6_item.mask = &ipv6_mask; + ipv6_item.last = NULL; + memset(&ipv6_spec, 0, sizeof(ipv6_spec)); + ipv6_spec.hdr.proto = 6; + memcpy(ipv6_spec.hdr.src_addr, (uint8_t *)ipv6_src_addr, 16); + memcpy(ipv6_spec.hdr.dst_addr, (uint8_t *)ipv6_dst_addr, 16); + memset(&ipv6_mask, 0, sizeof(ipv6_mask)); + ipv6_mask.hdr.proto = 0xff; + memset(ipv6_mask.hdr.src_addr, 0xff, 16); + memset(ipv6_mask.hdr.dst_addr, 0xff, 16); + + /* Outer TCP Item */ + tcp_item.type = RTE_FLOW_ITEM_TYPE_TCP; + tcp_item.spec = &tcp_spec; + tcp_item.mask = &tcp_mask; + tcp_item.last = NULL; + memset(&tcp_spec, 0, sizeof(tcp_spec)); + tcp_spec.hdr.src_port = cpu_to_be16(0xBBAA + i); + tcp_spec.hdr.dst_port = cpu_to_be16(0xDDCC + i); + memset(&tcp_mask, 0, sizeof(tcp_mask)); + tcp_mask.hdr.src_port = cpu_to_be16(0xffff); + tcp_mask.hdr.dst_port = cpu_to_be16(0xffff); + + pattern[0] = eth_item; + pattern[1] = ipv6_item; + pattern[2] = tcp_item; + pattern[3] = end_item; + + queue_action.index = q_index; + actions[0].type = RTE_FLOW_ACTION_TYPE_QUEUE; + actions[0].conf = &queue_action; + + actions[1].type = RTE_FLOW_ACTION_TYPE_END; + actions[1].conf = NULL; + + return bnxt_custom_ulp_flow_create(bp, bp->pf.fw_fid, pattern, actions, + &flow_info); +} + +static int +ulp_add_non_tunnel_udp_5tuple_ipv6(struct bnxt *bp, int i, u16 q_index) +{ + struct bnxt_ulp_flow_info flow_info = { 0 }; + struct rte_flow_item pattern[4]; + + /* Outer IPv4 Item */ + ipv6_item.type = RTE_FLOW_ITEM_TYPE_IPV6; + ipv6_item.spec = &ipv6_spec; + ipv6_item.mask = &ipv6_mask; + ipv6_item.last = NULL; + memset(&ipv6_spec, 0, sizeof(ipv6_spec)); + ipv6_spec.hdr.proto = 17; + memcpy(ipv6_spec.hdr.src_addr, (uint8_t *)ipv6_src_addr, 16); + memcpy(ipv6_spec.hdr.dst_addr, (uint8_t *)ipv6_dst_addr, 16); + memset(&ipv6_mask, 0, sizeof(ipv6_mask)); + ipv6_mask.hdr.proto = 0xff; + memset(ipv6_mask.hdr.src_addr, 0xff, 16); + memset(ipv6_mask.hdr.dst_addr, 0xff, 16); + + /* Outer UDP Item */ + udp_item.type = RTE_FLOW_ITEM_TYPE_UDP; + udp_item.spec = &udp_spec; + udp_item.mask = &udp_mask; + udp_item.last = NULL; + memset(&udp_spec, 0, sizeof(udp_spec)); + udp_spec.hdr.src_port = cpu_to_be16(0xBBAA + i); + udp_spec.hdr.dst_port = cpu_to_be16(0xDDCC + i); + memset(&udp_mask, 0, sizeof(udp_mask)); + udp_mask.hdr.src_port = cpu_to_be16(0xffff); + udp_mask.hdr.dst_port = cpu_to_be16(0xffff); + + pattern[0] = eth_item; + pattern[1] = ipv6_item; + pattern[2] = udp_item; + pattern[3] = end_item; + + queue_action.index = q_index; + actions[0].type = RTE_FLOW_ACTION_TYPE_QUEUE; + actions[0].conf = &queue_action; + + actions[1].type = RTE_FLOW_ACTION_TYPE_END; + actions[1].conf = NULL; + + return bnxt_custom_ulp_flow_create(bp, bp->pf.fw_fid, pattern, actions, + &flow_info); +} + +static int +ulp_add_non_tunnel_ip4_proto(struct bnxt *bp, u16 q_index) +{ + struct bnxt_ulp_flow_info flow_info = { 0 }; + struct rte_flow_item pattern[3]; + + ipv4_item.type = RTE_FLOW_ITEM_TYPE_IPV4; + ipv4_item.spec = &ipv4_spec; + ipv4_item.mask = &ipv4_mask; + ipv4_item.last = NULL; + memset(&ipv4_spec, 0, sizeof(ipv4_spec)); + ipv4_spec.hdr.next_proto_id = 0x59; + memset(&ipv4_mask, 0, sizeof(ipv4_mask)); + ipv4_mask.hdr.next_proto_id = 0xff; + + pattern[0] = eth_item; + pattern[1] = ipv4_item; + pattern[2] = end_item; + + queue_action.index = q_index; + actions[0].type = RTE_FLOW_ACTION_TYPE_QUEUE; + actions[0].conf = &queue_action; + + actions[1].type = RTE_FLOW_ACTION_TYPE_END; + actions[1].conf = NULL; + + return bnxt_custom_ulp_flow_create(bp, bp->pf.fw_fid, pattern, actions, &flow_info); +} + +static int +ulp_add_non_tunnel_ip6_proto(struct bnxt *bp, u16 q_index) +{ + struct bnxt_ulp_flow_info flow_info = { 0 }; + struct rte_flow_item pattern[3]; + + ipv6_item.type = RTE_FLOW_ITEM_TYPE_IPV6; + ipv6_item.spec = &ipv6_spec; + ipv6_item.mask = &ipv6_mask; + ipv6_item.last = NULL; + memset(&ipv6_spec, 0, sizeof(ipv6_spec)); + ipv6_spec.hdr.proto = 0x59; + memset(&ipv6_mask, 0, sizeof(ipv6_mask)); + ipv6_mask.hdr.proto = 0xff; + + pattern[0] = eth_item; + pattern[1] = ipv6_item; + pattern[2] = end_item; + + queue_action.index = q_index; + actions[0].type = RTE_FLOW_ACTION_TYPE_QUEUE; + actions[0].conf = &queue_action; + + actions[1].type = RTE_FLOW_ACTION_TYPE_END; + actions[1].conf = NULL; + + return bnxt_custom_ulp_flow_create(bp, bp->pf.fw_fid, pattern, actions, &flow_info); +} + +static int +ulp_add_non_tunnel_tcp_dport_v4(struct bnxt *bp, int dport, u16 q_index) +{ + struct bnxt_ulp_flow_info flow_info = { 0 }; + struct rte_flow_item pattern[4]; + + ipv4_item.type = RTE_FLOW_ITEM_TYPE_IPV4; + ipv4_item.spec = &ipv4_spec; + ipv4_item.mask = &ipv4_mask; + ipv4_item.last = NULL; + memset(&ipv4_spec, 0, sizeof(ipv4_spec)); + memset(&ipv4_mask, 0, sizeof(ipv4_mask)); + + /* Outer TCP Item */ + tcp_item.type = RTE_FLOW_ITEM_TYPE_TCP; + tcp_item.spec = &tcp_spec; + tcp_item.mask = &tcp_mask; + tcp_item.last = NULL; + memset(&tcp_spec, 0, sizeof(tcp_spec)); + tcp_spec.hdr.dst_port = cpu_to_be16(dport); + memset(&tcp_mask, 0, sizeof(tcp_mask)); + tcp_mask.hdr.dst_port = cpu_to_be16(0xffff); + + pattern[0] = eth_item; + pattern[1] = ipv4_item; + pattern[2] = tcp_item; + pattern[3] = end_item; + + queue_action.index = q_index; + actions[0].type = RTE_FLOW_ACTION_TYPE_QUEUE; + actions[0].conf = &queue_action; + + actions[1].type = RTE_FLOW_ACTION_TYPE_END; + actions[1].conf = NULL; + + return bnxt_custom_ulp_flow_create(bp, bp->pf.fw_fid, pattern, actions, + &flow_info); +} + +static int +ulp_add_non_tunnel_tcp_dport_v6(struct bnxt *bp, int dport, u16 q_index) +{ + struct bnxt_ulp_flow_info flow_info = { 0 }; + struct rte_flow_item pattern[4]; + + ipv6_item.type = RTE_FLOW_ITEM_TYPE_IPV6; + ipv6_item.spec = &ipv6_spec; + ipv6_item.mask = &ipv6_mask; + ipv6_item.last = NULL; + memset(&ipv6_spec, 0, sizeof(ipv6_spec)); + memset(&ipv6_mask, 0, sizeof(ipv6_mask)); + + /* Outer TCP Item */ + tcp_item.type = RTE_FLOW_ITEM_TYPE_TCP; + tcp_item.spec = &tcp_spec; + tcp_item.mask = &tcp_mask; + tcp_item.last = NULL; + memset(&tcp_spec, 0, sizeof(tcp_spec)); + tcp_spec.hdr.dst_port = cpu_to_be16(dport); + memset(&tcp_mask, 0, sizeof(tcp_mask)); + tcp_mask.hdr.dst_port = cpu_to_be16(0xffff); + + pattern[0] = eth_item; + pattern[1] = ipv6_item; + pattern[2] = tcp_item; + pattern[3] = end_item; + + queue_action.index = q_index; + actions[0].type = RTE_FLOW_ACTION_TYPE_QUEUE; + actions[0].conf = &queue_action; + + actions[1].type = RTE_FLOW_ACTION_TYPE_END; + actions[1].conf = NULL; + + return bnxt_custom_ulp_flow_create(bp, bp->pf.fw_fid, pattern, actions, + &flow_info); +} + +static int +ulp_add_non_tunnel_udp_dport_v4(struct bnxt *bp, int dport, u16 q_index) +{ + struct bnxt_ulp_flow_info flow_info = { 0 }; + struct rte_flow_item pattern[4]; + + ipv4_item.type = RTE_FLOW_ITEM_TYPE_IPV4; + ipv4_item.spec = &ipv4_spec; + ipv4_item.mask = &ipv4_mask; + ipv4_item.last = NULL; + memset(&ipv4_spec, 0, sizeof(ipv4_spec)); + memset(&ipv4_mask, 0, sizeof(ipv4_mask)); + + /* Outer TCP Item */ + udp_item.type = RTE_FLOW_ITEM_TYPE_UDP; + udp_item.spec = &udp_spec; + udp_item.mask = &udp_mask; + udp_item.last = NULL; + memset(&udp_spec, 0, sizeof(udp_spec)); + udp_spec.hdr.dst_port = cpu_to_be16(dport); + memset(&udp_mask, 0, sizeof(udp_mask)); + udp_mask.hdr.dst_port = cpu_to_be16(0xffff); + + pattern[0] = eth_item; + pattern[1] = ipv4_item; + pattern[2] = udp_item; + pattern[3] = end_item; + + queue_action.index = q_index; + actions[0].type = RTE_FLOW_ACTION_TYPE_QUEUE; + actions[0].conf = &queue_action; + + actions[1].type = RTE_FLOW_ACTION_TYPE_END; + actions[1].conf = NULL; + + return bnxt_custom_ulp_flow_create(bp, bp->pf.fw_fid, pattern, actions, &flow_info); +} + +static int +ulp_add_non_tunnel_udp_dport_v6(struct bnxt *bp, int dport, u16 q_index) +{ + struct bnxt_ulp_flow_info flow_info = { 0 }; + struct rte_flow_item pattern[4]; + + ipv6_item.type = RTE_FLOW_ITEM_TYPE_IPV6; + ipv6_item.spec = &ipv6_spec; + ipv6_item.mask = &ipv6_mask; + ipv6_item.last = NULL; + memset(&ipv6_spec, 0, sizeof(ipv6_spec)); + memset(&ipv6_mask, 0, sizeof(ipv6_mask)); + + /* Outer TCP Item */ + udp_item.type = RTE_FLOW_ITEM_TYPE_UDP; + udp_item.spec = &udp_spec; + udp_item.mask = &udp_mask; + udp_item.last = NULL; + memset(&udp_spec, 0, sizeof(udp_spec)); + udp_spec.hdr.dst_port = cpu_to_be16(dport); + memset(&udp_mask, 0, sizeof(udp_mask)); + udp_mask.hdr.dst_port = cpu_to_be16(0xffff); + + pattern[0] = eth_item; + pattern[1] = ipv6_item; + pattern[2] = udp_item; + pattern[3] = end_item; + + queue_action.index = q_index; + actions[0].type = RTE_FLOW_ACTION_TYPE_QUEUE; + actions[0].conf = &queue_action; + + actions[1].type = RTE_FLOW_ACTION_TYPE_END; + actions[1].conf = NULL; + + return bnxt_custom_ulp_flow_create(bp, bp->pf.fw_fid, pattern, actions, &flow_info); +} + +static int +ulp_add_all_ipv4_rules(struct bnxt *bp, int count) +{ + int i, rc; + + for (i = 0; i < count; i++) { + rc = ulp_add_non_tunnel_tcp_5tuple(bp, i, 1); + if (rc) { + netdev_err(bp->dev, "Failed to add IPv4 TCP 5 tuple rule\n"); + return rc; + } + } + + for (i = 0; i < count; i++) { + rc = ulp_add_non_tunnel_udp_5tuple(bp, i, 1); + if (rc) { + netdev_err(bp->dev, "Failed to add IPv4 UDP 5 tuple rule\n"); + return rc; + } + } + + rc = ulp_add_non_tunnel_ip4_proto(bp, 1); + if (rc) { + netdev_err(bp->dev, "Failed to add OSPF IPv4 flow, Proto = 0x59\n"); + return rc; + } + + rc = ulp_add_non_tunnel_tcp_dport_v4(bp, 0xB3, 1); + if (rc) { + netdev_err(bp->dev, "Failed to add BGP flow, TCP dport = 0xB3\n"); + return rc; + } + + rc = ulp_add_non_tunnel_udp_dport_v4(bp, 0x0EC8, 1); + if (rc) { + netdev_err(bp->dev, "Failed to add BFD flow, UDP dport = 0x0EC8\n"); + return rc; + } + + rc = ulp_add_non_tunnel_udp_dport_v4(bp, 0x0EC9, 1); + if (rc) { + netdev_err(bp->dev, "Failed to add BFD flow, UDP dport = 0x0EC9\n"); + return rc; + } + + rc = ulp_add_non_tunnel_udp_dport_v4(bp, 0x12B0, 1); + if (rc) { + netdev_err(bp->dev, "Failed to add BFD flow, UDP dport = 0x12B0\n"); + return rc; + } + + rc = ulp_add_non_tunnel_udp_dport_v4(bp, 0x1A80, 1); + if (rc) { + netdev_err(bp->dev, "Failed to add BFD flow, UDP dport = 0x1A80\n"); + return rc; + } + + rc = ulp_add_non_tunnel_udp_dport_v4(bp, 0x286, 1); + if (rc) { + netdev_err(bp->dev, "Failed to add LDP flow, UDP dport = 0x286\n"); + return rc; + } + + rc = ulp_add_non_tunnel_udp_dport_v4(bp, 0x7B, 1); + if (rc) { + netdev_err(bp->dev, "Failed to add NTP flow, UDP dport = 0x7B\n"); + return rc; + } + + return 0; +} + +static int +ulp_add_all_ipv6_rules(struct bnxt *bp, int count) +{ + int i, rc; + + for (i = 0; i < count; i++) { + rc = ulp_add_non_tunnel_tcp_5tuple_ipv6(bp, i, 1); + if (rc) { + netdev_err(bp->dev, "Failed to add IPv6 TCP 5 tuple rule\n"); + return rc; + } + } + + for (i = 0; i < count; i++) { + rc = ulp_add_non_tunnel_udp_5tuple_ipv6(bp, i, 1); + if (rc) { + netdev_err(bp->dev, "Failed to add IPv6 UDP 5 tuple rule\n"); + return rc; + } + } + + rc = ulp_add_non_tunnel_ip6_proto(bp, 1); + if (rc) { + netdev_err(bp->dev, "Failed to add OSPF IPv6 flow, Proto = 0x59\n"); + return rc; + } + + rc = ulp_add_non_tunnel_tcp_dport_v6(bp, 0xB3, 1); + if (rc) { + netdev_err(bp->dev, "Failed to add BGP v6 flow, TCP dport = 0xB3\n"); + return rc; + } + + rc = ulp_add_non_tunnel_udp_dport_v6(bp, 0x0EC8, 1); + if (rc) { + netdev_err(bp->dev, "Failed to add BFD v6 flow, UDP dport = 0x0EC8\n"); + return rc; + } + + rc = ulp_add_non_tunnel_udp_dport_v6(bp, 0x0EC9, 1); + if (rc) { + netdev_err(bp->dev, "Failed to add BFD v6 flow, UDP dport = 0x0EC9\n"); + return rc; + } + + rc = ulp_add_non_tunnel_udp_dport_v6(bp, 0x12B0, 1); + if (rc) { + netdev_err(bp->dev, "Failed to add BFD v6 flow, UDP dport = 0x12B0\n"); + return rc; + } + + rc = ulp_add_non_tunnel_udp_dport_v6(bp, 0x1A80, 1); + if (rc) { + netdev_err(bp->dev, "Failed to add BFD v6 flow, UDP dport = 0x1A80\n"); + return rc; + } + + rc = ulp_add_non_tunnel_udp_dport_v6(bp, 0x286, 1); + if (rc) { + netdev_err(bp->dev, "Failed to add LDP v6 flow, UDP dport = 0x286\n"); + return rc; + } + + rc = ulp_add_non_tunnel_udp_dport_v6(bp, 0x7B, 1); + if (rc) { + netdev_err(bp->dev, "Failed to add NTP v6 flow, UDP dport = 0x7B\n"); + return rc; + } + + return 0; +} + +#if 0 +static int +ulp_add_vxlan_inner_5tuple(struct bnxt *bp, int i) +{ + struct rte_flow_item pattern[7]; + struct bnxt_ulp_flow_info flow_info = { 0 }; + + /* Outer IPv4 Item */ + ipv4_item_outer.type = RTE_FLOW_ITEM_TYPE_IPV4; + ipv4_item_outer.spec = &ipv4_spec_outer; + ipv4_item_outer.mask = &ipv4_mask_outer; + ipv4_item_outer.last = NULL; + + /* Vxlan Item */ + vxlan_item.type = RTE_FLOW_ITEM_TYPE_VXLAN; + vxlan_item.spec = &vxlan_spec; + vxlan_item.mask = &vxlan_mask; + vxlan_item.last = NULL; + /* + * TBD: Outer UDP Item + * udp_item_outer.type = RTE_FLOW_ITEM_TYPE_UDP; + * udp_item_outer.spec = &udp_spec_outer; + * udp_item_outer.mask = &udp_mask_outer; + * udp_item_outer.last = NULL; + * memset(&udp_spec_outer, 0, sizeof(udp_spec_outer)); + * udp_spec_outer.hdr.dst_port = 4789; + * memset(&udp_mask_outer, 0, sizeof(udp_mask_outer)); + * udp_mask_outer.hdr.dst_port = 0xffff; + */ + + /* Inner IPv4 Item */ + ipv4_item.type = RTE_FLOW_ITEM_TYPE_IPV4; + ipv4_item.spec = &ipv4_spec; + ipv4_item.mask = &ipv4_mask; + ipv4_item.last = NULL; + memset(&ipv4_spec, 0, sizeof(ipv4_spec)); + ipv4_spec.hdr.next_proto_id = 17; + ipv4_spec.hdr.src_addr = cpu_to_be32(RTE_IPV4(9, 9, 9, 1)); + ipv4_spec.hdr.dst_addr = cpu_to_be32(RTE_IPV4(9, 9, 9, 2)); + memset(&ipv4_mask, 0, sizeof(ipv4_mask)); + ipv4_mask.hdr.next_proto_id = 0xff; + ipv4_mask.hdr.src_addr = cpu_to_be32(0xffffffff); + ipv4_mask.hdr.dst_addr = cpu_to_be32(0xffffffff); + + /* Inner UDP Item */ + udp_item.type = RTE_FLOW_ITEM_TYPE_UDP; + udp_item.spec = &udp_spec; + udp_item.mask = &udp_mask; + udp_item.last = NULL; + memset(&udp_spec, 0, sizeof(udp_spec)); + udp_spec.hdr.src_port = cpu_to_be16(0xBBAA + i); + udp_spec.hdr.dst_port = cpu_to_be16(0xDDCC + i); + memset(&udp_mask, 0, sizeof(udp_mask)); + udp_mask.hdr.src_port = 0xffff; + udp_mask.hdr.dst_port = 0xffff; + + pattern[0] = eth_item; + pattern[1] = ipv4_item_outer; + pattern[2] = vxlan_item; + /* TBD: pattern[3] = eth_item; */ + pattern[3] = ipv4_item; + pattern[4] = udp_item; + pattern[5] = end_item; + + actions[0].type = RTE_FLOW_ACTION_TYPE_END; + actions[0].conf = NULL; + + return bnxt_custom_ulp_flow_create(bp, bp->pf.fw_fid, pattern, actions, + &flow_info); +} + +static int +ulp_add_gre_inner_5tuple(struct bnxt *bp, int i) +{ + struct bnxt_ulp_flow_info flow_info = { 0 }; + struct rte_flow_item pattern[6]; + + /* Outer IPv4 Item */ + ipv4_item_outer.type = RTE_FLOW_ITEM_TYPE_IPV4; + ipv4_item_outer.spec = &ipv4_spec_outer; + ipv4_item_outer.mask = &ipv4_mask_outer; + ipv4_item_outer.last = NULL; + + /* + * ipv4_item_outer.type = RTE_FLOW_ITEM_TYPE_IPV4; + * ipv4_item_outer.spec = &ipv4_spec_outer; + * ipv4_item_outer.mask = &ipv4_mask_outer; + * ipv4_item_outer.last = NULL; + * memset(&ipv4_spec_outer, 0, sizeof(ipv4_spec_outer)); + * ipv4_spec_outer.hdr.next_proto_id = 47; + * memset(&ipv4_mask_outer, 0, sizeof(ipv4_mask_outer)); + * ipv4_mask_outer.hdr.next_proto_id = 0xff; + */ + + /* GRE Item */ + gre_item.type = RTE_FLOW_ITEM_TYPE_GRE; + gre_item.spec = &gre_spec; + gre_item.mask = &gre_mask; + gre_item.last = NULL; + + /* Inner IPv4 Item */ + ipv4_item.type = RTE_FLOW_ITEM_TYPE_IPV4; + ipv4_item.spec = &ipv4_spec; + ipv4_item.mask = &ipv4_mask; + ipv4_item.last = NULL; + memset(&ipv4_spec, 0, sizeof(ipv4_spec)); + ipv4_spec.hdr.next_proto_id = 17; + ipv4_spec.hdr.src_addr = cpu_to_be32(RTE_IPV4(9, 9, 9, 1)); + ipv4_spec.hdr.dst_addr = cpu_to_be32(RTE_IPV4(9, 9, 9, 2)); + memset(&ipv4_mask, 0, sizeof(ipv4_mask)); + ipv4_mask.hdr.next_proto_id = 0xff; + ipv4_mask.hdr.src_addr = cpu_to_be32(0xffffffff); + ipv4_mask.hdr.dst_addr = cpu_to_be32(0xffffffff); + + /* Inner UDP Item */ + udp_item.type = RTE_FLOW_ITEM_TYPE_UDP; + udp_item.spec = &udp_spec; + udp_item.mask = &udp_mask; + udp_item.last = NULL; + memset(&udp_spec, 0, sizeof(udp_spec)); + udp_spec.hdr.src_port = cpu_to_be16(0xFFEE + i); + udp_spec.hdr.dst_port = cpu_to_be16(0xEEDD + i); + memset(&udp_mask, 0, sizeof(udp_mask)); + udp_mask.hdr.src_port = 0xffff; + udp_mask.hdr.dst_port = 0xffff; + + pattern[0] = eth_item; + pattern[1] = ipv4_item_outer; + pattern[2] = gre_item; + pattern[3] = ipv4_item; + pattern[4] = udp_item; + pattern[5] = end_item; + + actions[0].type = RTE_FLOW_ACTION_TYPE_END; + actions[0].conf = NULL; + + return bnxt_custom_ulp_flow_create(bp, bp->pf.fw_fid, pattern, actions, + &flow_info); +} + +static int +ulp_add_vxlan_inner_5tuple_ipv6(struct bnxt *bp, int i) +{ + struct rte_flow_item pattern[7]; + struct bnxt_ulp_flow_info flow_info = { 0 }; + + /* Outer IPv4 Item */ + ipv4_item_outer.type = RTE_FLOW_ITEM_TYPE_IPV4; + ipv4_item_outer.spec = &ipv4_spec_outer; + ipv4_item_outer.mask = &ipv4_mask_outer; + ipv4_item_outer.last = NULL; + + /* Vxlan Item */ + vxlan_item.type = RTE_FLOW_ITEM_TYPE_VXLAN; + vxlan_item.spec = &vxlan_spec; + vxlan_item.mask = &vxlan_mask; + vxlan_item.last = NULL; + /* + * Outer UDP Item + * udp_item_outer.type = RTE_FLOW_ITEM_TYPE_UDP; + * udp_item_outer.spec = &udp_spec_outer; + * udp_item_outer.mask = &udp_mask_outer; + * udp_item_outer.last = NULL; + * memset(&udp_spec_outer, 0, sizeof(udp_spec_outer)); + * udp_spec_outer.hdr.dst_port = 4789; + * memset(&udp_mask_outer, 0, sizeof(udp_mask_outer)); + * udp_mask_outer.hdr.dst_port = 0xffff; + */ + + /* Inner IPv6 Item */ + ipv6_item.type = RTE_FLOW_ITEM_TYPE_IPV6; + ipv6_item.spec = &ipv6_spec; + ipv6_item.mask = &ipv6_mask; + ipv6_item.last = NULL; + memset(&ipv6_spec, 0, sizeof(ipv6_spec)); + ipv6_spec.hdr.proto = 17; + memcpy(ipv6_spec.hdr.src_addr, (uint8_t *)ipv6_src_addr, 16); + memcpy(ipv6_spec.hdr.dst_addr, (uint8_t *)ipv6_dst_addr, 16); + memset(&ipv6_mask, 0, sizeof(ipv6_mask)); + ipv6_mask.hdr.proto = 0xff; + memset(ipv6_mask.hdr.src_addr, 0xff, 16); + memset(ipv6_mask.hdr.dst_addr, 0xff, 16); + + /* Inner UDP Item */ + udp_item.type = RTE_FLOW_ITEM_TYPE_UDP; + udp_item.spec = &udp_spec; + udp_item.mask = &udp_mask; + udp_item.last = NULL; + memset(&udp_spec, 0, sizeof(udp_spec)); + udp_spec.hdr.src_port = cpu_to_be16(0xBBAA + i); + udp_spec.hdr.dst_port = cpu_to_be16(0xDDCC + i); + memset(&udp_mask, 0, sizeof(udp_mask)); + udp_mask.hdr.src_port = 0xffff; + udp_mask.hdr.dst_port = 0xffff; + + pattern[0] = eth_item; + pattern[1] = ipv4_item_outer; + pattern[2] = vxlan_item; + pattern[3] = ipv6_item; + pattern[4] = udp_item; + pattern[5] = end_item; + + actions[0].type = RTE_FLOW_ACTION_TYPE_END; + actions[0].conf = NULL; + + return bnxt_custom_ulp_flow_create(bp, bp->pf.fw_fid, pattern, actions, + &flow_info); +} + +static int +ulp_add_gre_inner_5tuple_ipv6(struct bnxt *bp, int i) +{ + struct bnxt_ulp_flow_info flow_info = { 0 }; + struct rte_flow_item pattern[6]; + + /* Outer IPv4 Item */ + ipv4_item_outer.type = RTE_FLOW_ITEM_TYPE_IPV4; + ipv4_item_outer.spec = &ipv4_spec_outer; + ipv4_item_outer.mask = &ipv4_mask_outer; + ipv4_item_outer.last = NULL; + + /* + * memset(&ipv4_spec_outer, 0, sizeof(ipv4_spec_outer)); + * ipv4_spec_outer.hdr.next_proto_id = 47; + * memset(&ipv4_mask_outer, 0, sizeof(ipv4_mask_outer)); + * ipv4_mask_outer.hdr.next_proto_id = 0xff; + */ + + /* GRE Item */ + gre_item.type = RTE_FLOW_ITEM_TYPE_GRE; + gre_item.spec = &gre_spec; + gre_item.mask = &gre_mask; + gre_item.last = NULL; + + /* Inner IPv6 Item */ + ipv6_item.type = RTE_FLOW_ITEM_TYPE_IPV6; + ipv6_item.spec = &ipv6_spec; + ipv6_item.mask = &ipv6_mask; + ipv6_item.last = NULL; + memset(&ipv6_spec, 0, sizeof(ipv6_spec)); + ipv6_spec.hdr.proto = 17; + memcpy(ipv6_spec.hdr.src_addr, (uint8_t *)ipv6_src_addr, 16); + memcpy(ipv6_spec.hdr.dst_addr, (uint8_t *)ipv6_dst_addr, 16); + memset(&ipv6_mask, 0, sizeof(ipv6_mask)); + ipv6_mask.hdr.proto = 0xff; + memset(ipv6_mask.hdr.src_addr, 0xff, 16); + memset(ipv6_mask.hdr.dst_addr, 0xff, 16); + + /* Inner UDP Item */ + udp_item.type = RTE_FLOW_ITEM_TYPE_UDP; + udp_item.spec = &udp_spec; + udp_item.mask = &udp_mask; + udp_item.last = NULL; + memset(&udp_spec, 0, sizeof(udp_spec)); + udp_spec.hdr.src_port = cpu_to_be16(0xFFEE + i); + udp_spec.hdr.dst_port = cpu_to_be16(0xEEDD + i); + memset(&udp_mask, 0, sizeof(udp_mask)); + udp_mask.hdr.src_port = 0xffff; + udp_mask.hdr.dst_port = 0xffff; + + pattern[0] = eth_item; + pattern[1] = ipv4_item_outer; + pattern[2] = gre_item; + pattern[3] = ipv6_item; + pattern[4] = udp_item; + pattern[5] = end_item; + + actions[0].type = RTE_FLOW_ACTION_TYPE_END; + actions[0].conf = NULL; + + return bnxt_custom_ulp_flow_create(bp, bp->pf.fw_fid, pattern, actions, + &flow_info); +} + +/* New requirement is to not offload the below flows. Instead, just do the + * default behavior. Retain the code so that it can be reused if decided + * to offload in the future. + */ +static int +ulp_add_all_vxlan_rules(struct bnxt *bp, int count) +{ + int i, rc; + + for (i = 0; i < count; i++) { + rc = ulp_add_vxlan_inner_5tuple(bp, i); + if (rc) { + netdev_err(bp->dev, "Failed to add VxLAN Inner IPv4 rule\n"); + return rc; + } + } + + for (i = 0; i < count; i++) { + rc = ulp_add_vxlan_inner_5tuple_ipv6(bp, i); + if (rc) { + netdev_err(bp->dev, "Failed to add VxLAN Inner IPv6 rule\n"); + return rc; + } + } + + return 0; +} + +static int +ulp_add_all_gre_rules(struct bnxt *bp, int count) +{ + int i, rc; + + for (i = 0; i < count; i++) { + rc = ulp_add_gre_inner_5tuple(bp, i); + if (rc) { + netdev_err(bp->dev, "Failed to add GRE Inner IPv4 rule\n"); + return rc; + } + } + + for (i = 0; i < count; i++) { + rc = ulp_add_gre_inner_5tuple_ipv6(bp, i); + if (rc) { + netdev_err(bp->dev, "Failed to add GRE Inner IPv6 rule\n"); + return rc; + } + } + + return 0; +} +#endif + +static int +ulp_add_all_custom_ethtype_rules(struct bnxt *bp) +{ + int rc, i; + + /* Custom Ethtypes: START */ + for (i = 0xfffb; i <= 0xffff; i++) { + rc = ulp_add_eth_type_rule(bp, i, 1); + if (rc) { + netdev_err(bp->dev, "Failed to add CUSTOM ETH rule: EthType = %d\n", i); + return rc; + } + } + + rc = ulp_add_eth_type_rule(bp, 0x8042, 1); + if (rc) { + netdev_err(bp->dev, "Failed to add CUSTOM ETH rule: EthType = 0x8042\n"); + return rc; + } + + rc = ulp_add_eth_type_rule(bp, 0xF0F1, 1); + if (rc) { + netdev_err(bp->dev, "Failed to add CUSTOM ETH rule: EthType = 0xF0F1\n"); + return rc; + } + + for (i = 0; i <= 14; i++) { + rc = ulp_add_eth_type_rule(bp, 0xAAEF + i, i + 2); + if (rc) { + netdev_err(bp->dev, + "Failed to add CUSTOM ETH rule: EthType = %d\n", 0xAAEF + i); + return rc; + } + + rc = ulp_add_eth_type_rule(bp, 0xBAEF + i, i + 2); + if (rc) { + netdev_err(bp->dev, + "Failed to add CUSTOM ETH rule: EthType = %d\n", 0xAAEF + i); + return rc; + } + } + + /* Custom Ethtypes: END */ + + /* Standard Ethtypes: START */ + /* LACP flow */ + rc = ulp_add_eth_type_rule(bp, 0x8809, 1); + if (rc) { + netdev_err(bp->dev, "Failed to add CUSTOM ETH rule: EthType = 0x8042\n"); + return rc; + } + + /* LLDP flow */ + rc = ulp_add_eth_type_rule(bp, 0x88CC, 1); + if (rc) { + netdev_err(bp->dev, "Failed to add CUSTOM ETH rule: EthType = 0x8042\n"); + return rc; + } + /* Standard Ethtypes: END */ + + return 0; +} + +static int +ulp_add_all_eth_dmac_rules(struct bnxt *bp) +{ + u8 dst_addr1[8] = {0x01, 0x80, 0xC2, 0x00, 0x00, 0x14}; + u8 dst_addr2[8] = {0x01, 0x80, 0xC2, 0x00, 0x00, 0x15}; + u8 dst_addr3[8] = {0x99, 0x00, 0x2B, 0x00, 0x00, 0x05}; + int rc; + + rc = ulp_add_eth_dmac_rule(bp, dst_addr1, 1); + if (rc) { + netdev_err(bp->dev, "Failed to add IS-IS flow, ETH DMAC = 0x0180C2000014 rule\n"); + return rc; + } + + rc = ulp_add_eth_dmac_rule(bp, dst_addr2, 1); + if (rc) { + netdev_err(bp->dev, "Failed to add IS-IS flow, ETH DMAC = 0x0180C2000015 rule\n"); + return rc; + } + + rc = ulp_add_eth_dmac_rule(bp, dst_addr3, 1); + if (rc) { + netdev_err(bp->dev, "Failed to add IS-IS flow, ETH DMAC = 0x99002B000005 rule\n"); + return rc; + } + + return 0; +} + +int +ulp_tc_rte_create_all_flows(struct bnxt *bp, int count) +{ + int rc; + + rc = ulp_add_all_eth_dmac_rules(bp); + if (rc) + return rc; + + rc = ulp_add_all_custom_ethtype_rules(bp); + if (rc) + return rc; + + rc = ulp_add_all_ipv4_rules(bp, count); + if (rc) + return rc; + + rc = ulp_add_all_ipv6_rules(bp, count); + if (rc) + return rc; + +#if 0 + /* New requirement is to not offload the below flows. Instead, just do the + * default behavior. Retain the code so that it can be reused if decided + * to offload in the future. + */ + rc = ulp_add_all_vxlan_rules(bp, count); + if (rc) + return rc; + + rc = ulp_add_all_gre_rules(bp, count); + if (rc) + return rc; +#endif + + return 0; +} +#endif diff --git a/drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_template_debug.c b/drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_template_debug.c new file mode 100644 index 000000000000..f136c3a8177b --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_template_debug.c @@ -0,0 +1,1000 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* Copyright(c) 2019-2023 Broadcom + * All rights reserved. + */ + +#include + +#include "bnxt_compat.h" +#include "bnxt_hsi.h" +#include "bnxt.h" +#include "ulp_utils.h" +#include "bnxt_tf_ulp.h" +#include "ulp_template_db_enum.h" +#include "ulp_template_struct.h" +#include "ulp_template_debug.h" +#include "ulp_template_debug_proto.h" + +#if defined(CONFIG_BNXT_FLOWER_OFFLOAD) || defined(CONFIG_BNXT_CUSTOM_FLOWER_OFFLOAD) +#ifdef TC_BNXT_TRUFLOW_DEBUG + +const char *ulp_tc_hdr_comp_field_names[] = { + "BNXT_ULP_CF_IDX_NOT_USED", + "BNXT_ULP_CF_IDX_MPLS_TAG_NUM", + "BNXT_ULP_CF_IDX_O_VTAG_NUM", + "BNXT_ULP_CF_IDX_O_HAS_VTAG", + "BNXT_ULP_CF_IDX_O_ONE_VTAG", + "BNXT_ULP_CF_IDX_O_TWO_VTAGS", + "BNXT_ULP_CF_IDX_I_VTAG_NUM", + "BNXT_ULP_CF_IDX_I_HAS_VTAG", + "BNXT_ULP_CF_IDX_I_ONE_VTAG", + "BNXT_ULP_CF_IDX_I_TWO_VTAGS", + "BNXT_ULP_CF_IDX_INCOMING_IF", + "BNXT_ULP_CF_IDX_DIRECTION", + "BNXT_ULP_CF_IDX_SVIF_FLAG", + "BNXT_ULP_CF_IDX_O_L3", + "BNXT_ULP_CF_IDX_I_L3", + "BNXT_ULP_CF_IDX_O_L4", + "BNXT_ULP_CF_IDX_I_L4", + "BNXT_ULP_CF_IDX_O_L4_SRC_PORT", + "BNXT_ULP_CF_IDX_O_L4_DST_PORT", + "BNXT_ULP_CF_IDX_I_L4_SRC_PORT", + "BNXT_ULP_CF_IDX_I_L4_DST_PORT", + "BNXT_ULP_CF_IDX_O_L4_SRC_PORT_MASK", + "BNXT_ULP_CF_IDX_O_L4_DST_PORT_MASK", + "BNXT_ULP_CF_IDX_I_L4_SRC_PORT_MASK", + "BNXT_ULP_CF_IDX_I_L4_DST_PORT_MASK", + "BNXT_ULP_CF_IDX_O_L4_FB_SRC_PORT", + "BNXT_ULP_CF_IDX_O_L4_FB_DST_PORT", + "BNXT_ULP_CF_IDX_I_L4_FB_SRC_PORT", + "BNXT_ULP_CF_IDX_I_L4_FB_DST_PORT", + "BNXT_ULP_CF_IDX_O_L3_FB_PROTO_ID", + "BNXT_ULP_CF_IDX_I_L3_FB_PROTO_ID", + "BNXT_ULP_CF_IDX_O_L3_PROTO_ID", + "BNXT_ULP_CF_IDX_I_L3_PROTO_ID", + "BNXT_ULP_CF_IDX_O_L3_TTL", + "BNXT_ULP_CF_IDX_DEV_PORT_ID", + "BNXT_ULP_CF_IDX_DRV_FUNC_SVIF", + "BNXT_ULP_CF_IDX_DRV_FUNC_SPIF", + "BNXT_ULP_CF_IDX_DRV_FUNC_PARIF", + "BNXT_ULP_CF_IDX_DRV_FUNC_VNIC", + "BNXT_ULP_CF_IDX_DRV_FUNC_PHY_PORT", + "BNXT_ULP_CF_IDX_VF_FUNC_SVIF", + "BNXT_ULP_CF_IDX_VF_FUNC_SPIF", + "BNXT_ULP_CF_IDX_VF_FUNC_PARIF", + "BNXT_ULP_CF_IDX_VF_FUNC_VNIC", + "BNXT_ULP_CF_IDX_VNIC", + "BNXT_ULP_CF_IDX_PHY_PORT_SVIF", + "BNXT_ULP_CF_IDX_PHY_PORT_SPIF", + "BNXT_ULP_CF_IDX_PHY_PORT_PARIF", + "BNXT_ULP_CF_IDX_PHY_PORT_VPORT", + "BNXT_ULP_CF_IDX_ACT_ENCAP_IPV4_FLAG", + "BNXT_ULP_CF_IDX_ACT_ENCAP_IPV6_FLAG", + "BNXT_ULP_CF_IDX_ACT_DEC_TTL", + "BNXT_ULP_CF_IDX_ACT_T_DEC_TTL", + "BNXT_ULP_CF_IDX_ACT_PORT_IS_SET", + "BNXT_ULP_CF_IDX_ACT_PORT_TYPE", + "BNXT_ULP_CF_IDX_ACT_MIRR_PORT_IS_SET", + "BNXT_ULP_CF_IDX_ACT_MIRR_PORT_TYPE", + "BNXT_ULP_CF_IDX_MATCH_PORT_TYPE", + "BNXT_ULP_CF_IDX_MATCH_PORT_IS_VFREP", + "BNXT_ULP_CF_IDX_MATCH_PORT_IS_PF", + "BNXT_ULP_CF_IDX_VF_TO_VF", + "BNXT_ULP_CF_IDX_L3_HDR_CNT", + "BNXT_ULP_CF_IDX_L4_HDR_CNT", + "BNXT_ULP_CF_IDX_VFR_MODE", + "BNXT_ULP_CF_IDX_L3_TUN", + "BNXT_ULP_CF_IDX_L3_TUN_DECAP", + "BNXT_ULP_CF_IDX_FID", + "BNXT_ULP_CF_IDX_HDR_SIG_ID", + "BNXT_ULP_CF_IDX_FLOW_SIG_ID", + "BNXT_ULP_CF_IDX_WC_MATCH", + "BNXT_ULP_CF_IDX_WC_IS_HA_HIGH_REG", + "BNXT_ULP_CF_IDX_TUNNEL_ID", + "BNXT_ULP_CF_IDX_TUN_OFF_DIP_ID", + "BNXT_ULP_CF_IDX_TUN_OFF_DMAC_ID", + "BNXT_ULP_CF_IDX_OO_VLAN_FB_VID", + "BNXT_ULP_CF_IDX_OI_VLAN_FB_VID", + "BNXT_ULP_CF_IDX_IO_VLAN_FB_VID", + "BNXT_ULP_CF_IDX_II_VLAN_FB_VID", + "BNXT_ULP_CF_IDX_SOCKET_DIRECT", + "BNXT_ULP_CF_IDX_SOCKET_DIRECT_VPORT", + "BNXT_ULP_CF_IDX_TUNNEL_SPORT", + "BNXT_ULP_CF_IDX_VF_META_FID", + "BNXT_ULP_CF_IDX_DEV_ACT_PORT_ID", + "BNXT_ULP_CF_IDX_DEV_ACT_MIRR_PORT_ID", + "BNXT_ULP_CF_IDX_O_VLAN_NO_IGNORE", + "BNXT_ULP_CF_IDX_I_VLAN_NO_IGNORE", + "BNXT_ULP_CF_IDX_HA_SUPPORT_DISABLED", + "BNXT_ULP_CF_IDX_FUNCTION_ID", + "BNXT_ULP_CF_IDX_CHAIN_ID_METADATA", + "BNXT_ULP_CF_IDX_SRV6_UPAR_ID", + "BNXT_ULP_CF_IDX_SRV6_T_ID", + "BNXT_ULP_CF_IDX_GENERIC_SIZE", + "BNXT_ULP_CF_IDX_APP_PRIORITY", + "BNXT_ULP_CF_IDX_MIRROR_COPY_ING_OR_EGR", + "BNXT_ULP_CF_IDX_EM_FOR_TC", + "BNXT_ULP_CF_IDX_L2_CUSTOM_UPAR_ID", + "BNXT_ULP_CF_IDX_CUSTOM_GRE_EN", + "BNXT_ULP_CF_IDX_UPAR_HIGH_EN", + "BNXT_ULP_CF_IDX_MP_NPORTS", + "BNXT_ULP_CF_IDX_MP_PORT_A", + "BNXT_ULP_CF_IDX_MP_VNIC_A", + "BNXT_ULP_CF_IDX_MP_VPORT_A", + "BNXT_ULP_CF_IDX_MP_MDATA_A", + "BNXT_ULP_CF_IDX_MP_A_IS_VFREP", + "BNXT_ULP_CF_IDX_MP_PORT_B", + "BNXT_ULP_CF_IDX_MP_VNIC_B", + "BNXT_ULP_CF_IDX_MP_VPORT_B", + "BNXT_ULP_CF_IDX_MP_MDATA_B", + "BNXT_ULP_CF_IDX_MP_B_IS_VFREP", + "BNXT_ULP_CF_IDX_VXLAN_IP_UPAR_ID", + "BNXT_ULP_CF_IDX_ACT_REJ_COND_EN", + "BNXT_ULP_CF_IDX_HDR_BITMAP", + "BNXT_ULP_CF_IDX_PROFILE_BITMAP", + "BNXT_ULP_CF_IDX_VF_ROCE_EN", + "BNXT_ULP_CF_IDX_LAST" +}; + +const char *ulp_tc_hdr_svif_names[] = { + "Wild Card", + "SVIF", +}; + +const char *ulp_tc_hdr_eth_field_names[] = { + "Dst Mac", + "Src Mac", + "Ether Type", +}; + +const char *ulp_tc_hdr_vlan_field_names[] = { + "Priority", + "Vlan Id", + "Vlan-Ether Type", +}; + +const char *ulp_tc_hdr_ipv4_field_names[] = { + "Version", + "Type of Service", + "Length", + "Fragment Id", + "Fragment Offset", + "TTL", + "Next Proto", + "Checksum", + "Src Addr", + "Dst Addr" +}; + +const char *ulp_tc_hdr_ipv6_field_names[] = { + "Version", + "Traffic Class", + "Flow Label", + "Length", + "Proto", + "Hop limits", + "Src Addr", + "Dst Addr" +}; + +const char *ulp_tc_hdr_udp_field_names[] = { + "Src Port", + "Dst Port", + "Length", + "Checksum" +}; + +const char *ulp_tc_hdr_vxlan_field_names[] = { + "Vxlan Flags", + "Reserved", + "VNI", + "Reserved" +}; + +const char *ulp_tc_hdr_tcp_field_names[] = { + "Src Port", + "Dst Port", + "Sent Seq", + "Recv Ack", + "Data Offset", + "Tcp flags", + "Rx Window", + "Checksum", + "URP", +}; + +const char *ulp_tc_hdr_icmp_field_names[] = { + "icmp type", + "icmp code", + "icmp cksum", + "icmp ident", + "icmp seq num" +}; + +const char *ulp_tc_hdr_ecpri_field_names[] = { + "eCPRI type", + "eCPRI id" +}; + +const char *ulp_mapper_resource_func_names[] = { + [BNXT_ULP_RESOURCE_FUNC_INVALID] = "Invalid Table", + [BNXT_ULP_RESOURCE_FUNC_EM_TABLE] = "EM Table", + [BNXT_ULP_RESOURCE_FUNC_CMM_TABLE] = "CMM Table", + [BNXT_ULP_RESOURCE_FUNC_CMM_STAT] = "CMM STAT", + [BNXT_ULP_RESOURCE_FUNC_TCAM_TABLE] = "Tcam Table", + [BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE] = "Index Table", + [BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE] = "Generic Table", + [BNXT_ULP_RESOURCE_FUNC_IDENTIFIER] = "Idenitifer table", + [BNXT_ULP_RESOURCE_FUNC_IF_TABLE] = "Interface Table", + [BNXT_ULP_RESOURCE_FUNC_HW_FID] = "FID Table", + [BNXT_ULP_RESOURCE_FUNC_PARENT_FLOW] = "Parent Flow", + [BNXT_ULP_RESOURCE_FUNC_CHILD_FLOW] = "Child Flow", + [BNXT_ULP_RESOURCE_FUNC_CTRL_TABLE] = "Control Table", + [BNXT_ULP_RESOURCE_FUNC_VNIC_TABLE] = "Vnic Table", + [BNXT_ULP_RESOURCE_FUNC_GLOBAL_REGISTER_TABLE] = "Global Reg Table", + [BNXT_ULP_RESOURCE_FUNC_UDCC_V6SUBNET_TABLE] = "v6 Subnet Table", + [BNXT_ULP_RESOURCE_FUNC_KEY_RECIPE_TABLE] = "Key Recipe Table", + [BNXT_ULP_RESOURCE_FUNC_ALLOCATOR_TABLE] = "Allocator Table", +}; + +const char *ulp_mapper_res_ulp_global_names[] = { + [BNXT_ULP_RESOURCE_SUB_TYPE_GLOBAL_REGISTER_CUST_VXLAN] = "Custom VxLAN", + [BNXT_ULP_RESOURCE_SUB_TYPE_GLOBAL_REGISTER_CUST_ECPRI] = "Custom eCPRI" +}; + +const char *ulp_mapper_res_key_recipe_names[] = { + [BNXT_ULP_RESOURCE_SUB_TYPE_KEY_RECIPE_TABLE_EM] = + "EM Key Recipe", + [BNXT_ULP_RESOURCE_SUB_TYPE_KEY_RECIPE_TABLE_WM] = + "WC Key Recipe" +}; + +const char *ulp_mapper_res_index_names[] = { + [BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_NORMAL] = "Normal", + [BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_VFR_CFA_ACTION] = "CFA Action", + [BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_INT_COUNT] = "Internal counter", + [BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_INT_COUNT_ACC] = "Agg Counter", + [BNXT_ULP_RESOURCE_SUB_TYPE_INDEX_TABLE_EXT_COUNT] = "External Counter" +}; + +const char *ulp_mapper_res_generic_names[] = { + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_L2_CNTXT_TCAM] = "L2 Ctxt", + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_PROFILE_TCAM] = "Prof Tcam", + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_SHARED_MIRROR] = "Mirror Tbl", + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_MAC_ADDR_CACHE] = + "Mac Addr Cache", + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_PORT_TABLE] = "Port Tbl", + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_TUNNEL_CACHE] = + "Tunnel Cache", + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_SOURCE_PROPERTY_CACHE] = + "Source Property Tbl", + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_VXLAN_ENCAP_REC_CACHE] = + "Vxlan Encap Record Tbl", + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_SOCKET_DIRECT_CACHE] = + "Socket Direct Cache", + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_SOURCE_PROPERTY_IPV6_CACHE] = + "v6 Source Property Tbl", + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_L2_ENCAP_REC_CACHE] = + "L2 Encap Record Tbl", + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_SRV6_ENCAP_REC_CACHE] = + "SRV6 Encap Record Tbl", + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_VXLAN_ENCAP_REC_CACHE] = + "Vxlan Encap Record Tbl", + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_VXLAN_ENCAP_IPV6_REC_CACHE] = + "IPv6 Encap Record Tbl", + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_SOCKET_DIRECT_CACHE] = + "Socket Direct Cache", + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_METER_PROFILE_TBL_CACHE] = + "Meter Profile Tbl Cache", + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_SHARED_METER_TBL_CACHE] = + "Meter Tbl Cache", + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_GLOBAL_REGISTER_TBL] = + "Global Register Table", + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_TABLE_SCOPE_CACHE] = + "Table Scope Cache", + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_GENEVE_ENCAP_REC_CACHE] = + "Geneve Encap Record Cache", + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_PROTO_HEADER] = + "Protocol Header Cache", + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_EM_FLOW_CONFLICT] = + "EM Flow Conflict", + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_HDR_OVERLAP] = + "Hdr Bitmap Overlap Cache", + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_FLOW_CHAIN_CACHE] = + "Flow Chain Cache", + [BNXT_ULP_RESOURCE_SUB_TYPE_GENERIC_TABLE_FLOW_CHAIN_L2_CNTXT] = + "Flow Chain L2 context", +}; + +/* Utility Function to dump a simple buffer of a given length. */ +static void dump_hex(struct bnxt_ulp_context *ulp_ctx, + u8 *ptr, u32 size) +{ + u8 *lbuffer_ptr; + u8 *lbuffer; + int ret; + u32 i; + + lbuffer = vzalloc(1024); + if (!lbuffer) + return; + + lbuffer_ptr = lbuffer; + + ret = sprintf((char *)lbuffer_ptr, "\t\t\t"); + lbuffer_ptr += ret; + for (i = 0; i < size; i++, ptr++) { + if (i && !(i % 16)) { + ret = sprintf((char *)lbuffer_ptr, "\t\t\t\t"); + lbuffer_ptr += ret; + } + ret = sprintf((char *)lbuffer_ptr, "0x%02x ", *ptr); + lbuffer_ptr += ret; + if ((i & 0x0F) == 0x0F) { + ret = sprintf((char *)lbuffer_ptr, "\n"); + lbuffer_ptr += ret; + } + } + if (size & 0x0F) + sprintf((char *)lbuffer_ptr, "\n"); + netdev_info(ulp_ctx->bp->dev, "%s", lbuffer); + + vfree(lbuffer); +} + +/* Utility Function to dump the computed field properties */ +static void ulp_parser_comp_field_dump(struct ulp_tc_parser_params *params, + const char *field_names[], + u32 count_list) +{ + u32 idx = 0; + + netdev_info(params->ulp_ctx->bp->dev, "Default computed fields\n"); + for (idx = 0; idx < count_list; idx++) { + netdev_info(params->ulp_ctx->bp->dev, "\t%s =\n", + field_names[idx]); + dump_hex(params->ulp_ctx, (u8 *)¶ms->comp_fld[idx], + sizeof(u64)); + } +} + +/* Utility Function to dump the field properties.*/ +static void ulp_parser_field_dump(struct bnxt_ulp_context *ulp_ctx, + struct ulp_tc_hdr_field *hdr_field, + const char *field_names[], + u32 start_idx, u32 count_list) +{ + u32 f_idx = 0, idx = 0; + + for (f_idx = start_idx; f_idx < (start_idx + count_list); f_idx++) { + if (hdr_field[f_idx].size) { + netdev_info(ulp_ctx->bp->dev, "\t%s = %d\n", + field_names[idx], f_idx); + dump_hex(ulp_ctx, hdr_field[f_idx].spec, + hdr_field[f_idx].size); + dump_hex(ulp_ctx, hdr_field[f_idx].mask, + hdr_field[f_idx].size); + } + idx++; + } +} + +/* Utility Function to dump the field properties.*/ +static inline void ulp_parser_vlan_dump(struct bnxt_ulp_context *ulp_ctx, + struct ulp_tc_hdr_field *hdr_field, + u32 f_idx) +{ + ulp_parser_field_dump(ulp_ctx, hdr_field, ulp_tc_hdr_vlan_field_names, + f_idx, BNXT_ULP_PROTO_HDR_S_VLAN_NUM); +} + +/* Function to dump the Pattern header bitmaps and fields. */ +void ulp_parser_hdr_info_dump(struct ulp_tc_parser_params *params) +{ + struct ulp_tc_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap; + struct ulp_tc_hdr_field *hdr_field = params->hdr_field; + struct bnxt_ulp_context *ulp_ctx = params->ulp_ctx; + u32 idx = 0, f_idx = 0; + u32 num_idx; + u64 hdr_bit; + + netdev_info(ulp_ctx->bp->dev, + "Configured Header Protocols for matching\n"); + if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_FLOW_DIR_BITMASK_EGR)) + netdev_info(ulp_ctx->bp->dev, "It is a Egress Flow - %x\n", + params->dir_attr); + else + netdev_info(ulp_ctx->bp->dev, "It is a Ingress Flow - %x\n", + params->dir_attr); + ulp_parser_comp_field_dump(params, ulp_tc_hdr_comp_field_names, + BNXT_ULP_CF_IDX_LAST); + + num_idx = sizeof(bnxt_ulp_hdr_bit_names) / + sizeof(bnxt_ulp_hdr_bit_names[0]); + + /* Print the svif details, there is no bitmap for this field */ + ulp_parser_field_dump(params->ulp_ctx, + hdr_field, ulp_tc_hdr_svif_names, f_idx, + BNXT_ULP_PROTO_HDR_SVIF_NUM); + f_idx += BNXT_ULP_PROTO_HDR_SVIF_NUM; + + for (idx = 0; idx < num_idx; idx++) { + hdr_bit = 1UL << idx; + if (!ULP_BITMAP_ISSET(hdr_bitmap->bits, hdr_bit)) + continue; + + netdev_info(params->ulp_ctx->bp->dev, "%s\n", + bnxt_ulp_hdr_bit_names[idx]); + if (ULP_BITMAP_ISSET(hdr_bit, BNXT_ULP_HDR_BIT_O_ETH)) { + ulp_parser_field_dump(ulp_ctx, hdr_field, + ulp_tc_hdr_eth_field_names, + f_idx, + BNXT_ULP_PROTO_HDR_ETH_NUM); + f_idx += BNXT_ULP_PROTO_HDR_ETH_NUM; + } else if (ULP_BITMAP_ISSET(hdr_bit, BNXT_ULP_HDR_BIT_I_ETH)) { + ulp_parser_field_dump(ulp_ctx, hdr_field, + ulp_tc_hdr_eth_field_names, + f_idx, + BNXT_ULP_PROTO_HDR_ETH_NUM); + f_idx += BNXT_ULP_PROTO_HDR_ETH_NUM; + } else if (ULP_BITMAP_ISSET(hdr_bit, + BNXT_ULP_HDR_BIT_OO_VLAN)) { + ulp_parser_vlan_dump(ulp_ctx, hdr_field, f_idx); + f_idx += BNXT_ULP_PROTO_HDR_S_VLAN_NUM; + } else if (ULP_BITMAP_ISSET(hdr_bit, + BNXT_ULP_HDR_BIT_OI_VLAN)) { + ulp_parser_vlan_dump(ulp_ctx, hdr_field, f_idx); + f_idx += BNXT_ULP_PROTO_HDR_S_VLAN_NUM; + } else if (ULP_BITMAP_ISSET(hdr_bit, + BNXT_ULP_HDR_BIT_IO_VLAN)) { + ulp_parser_vlan_dump(ulp_ctx, hdr_field, f_idx); + f_idx += BNXT_ULP_PROTO_HDR_S_VLAN_NUM; + } else if (ULP_BITMAP_ISSET(hdr_bit, + BNXT_ULP_HDR_BIT_II_VLAN)) { + ulp_parser_vlan_dump(ulp_ctx, hdr_field, f_idx); + f_idx += BNXT_ULP_PROTO_HDR_S_VLAN_NUM; + } else if (ULP_BITMAP_ISSET(hdr_bit, BNXT_ULP_HDR_BIT_O_IPV4) || + ULP_BITMAP_ISSET(hdr_bit, BNXT_ULP_HDR_BIT_I_IPV4)) { + ulp_parser_field_dump(ulp_ctx, hdr_field, + ulp_tc_hdr_ipv4_field_names, + f_idx, + BNXT_ULP_PROTO_HDR_IPV4_NUM); + f_idx += BNXT_ULP_PROTO_HDR_IPV4_NUM; + } else if (ULP_BITMAP_ISSET(hdr_bit, BNXT_ULP_HDR_BIT_O_IPV6) || + ULP_BITMAP_ISSET(hdr_bit, BNXT_ULP_HDR_BIT_I_IPV6)) { + ulp_parser_field_dump(ulp_ctx, hdr_field, + ulp_tc_hdr_ipv6_field_names, + f_idx, + BNXT_ULP_PROTO_HDR_IPV6_NUM); + f_idx += BNXT_ULP_PROTO_HDR_IPV6_NUM; + } else if (ULP_BITMAP_ISSET(hdr_bit, BNXT_ULP_HDR_BIT_O_UDP) || + ULP_BITMAP_ISSET(hdr_bit, BNXT_ULP_HDR_BIT_I_UDP)) { + ulp_parser_field_dump(ulp_ctx, hdr_field, + ulp_tc_hdr_udp_field_names, + f_idx, + BNXT_ULP_PROTO_HDR_UDP_NUM); + f_idx += BNXT_ULP_PROTO_HDR_UDP_NUM; + } else if (ULP_BITMAP_ISSET(hdr_bit, BNXT_ULP_HDR_BIT_O_TCP) || + ULP_BITMAP_ISSET(hdr_bit, BNXT_ULP_HDR_BIT_I_TCP)) { + ulp_parser_field_dump(ulp_ctx, hdr_field, + ulp_tc_hdr_tcp_field_names, + f_idx, + BNXT_ULP_PROTO_HDR_TCP_NUM); + f_idx += BNXT_ULP_PROTO_HDR_TCP_NUM; + } else if (ULP_BITMAP_ISSET(hdr_bit, + BNXT_ULP_HDR_BIT_T_VXLAN)) { + ulp_parser_field_dump(ulp_ctx, hdr_field, + ulp_tc_hdr_vxlan_field_names, + f_idx, + BNXT_ULP_PROTO_HDR_VXLAN_NUM); + f_idx += BNXT_ULP_PROTO_HDR_VXLAN_NUM; + } else if (ULP_BITMAP_ISSET(hdr_bit, BNXT_ULP_HDR_BIT_O_ICMP) || + ULP_BITMAP_ISSET(hdr_bit, BNXT_ULP_HDR_BIT_I_ICMP)) { + ulp_parser_field_dump(ulp_ctx, hdr_field, + ulp_tc_hdr_icmp_field_names, + f_idx, + BNXT_ULP_PROTO_HDR_ICMP_NUM); + f_idx += BNXT_ULP_PROTO_HDR_ICMP_NUM; + } else if (ULP_BITMAP_ISSET(hdr_bit, BNXT_ULP_HDR_BIT_O_ECPRI)) { + ulp_parser_field_dump(ulp_ctx, hdr_field, + ulp_tc_hdr_ecpri_field_names, + f_idx, + BNXT_ULP_PROTO_HDR_ECPRI_NUM); + f_idx += BNXT_ULP_PROTO_HDR_ECPRI_NUM; + } + } + netdev_info(ulp_ctx->bp->dev, "*************************************\n"); +} + +static void ulp_parser_action_prop_dump(struct bnxt_ulp_context *ulp_ctx, + struct ulp_tc_act_prop *act_prop, + u32 start_idx, u32 dump_size) +{ + netdev_info(ulp_ctx->bp->dev, "\t%s =\n", + bnxt_ulp_tc_parser_action_prop_names[start_idx]); + dump_hex(ulp_ctx, &act_prop->act_details[start_idx], dump_size); +} + +/* Function to dump the Action header bitmaps and properties. */ +void ulp_parser_act_info_dump(struct ulp_tc_parser_params *params) +{ + struct ulp_tc_hdr_bitmap *act_bitmap = ¶ms->act_bitmap; + struct ulp_tc_act_prop *act_prop = ¶ms->act_prop; + u32 num_idx = 0; + u32 idx = 0; + u64 act_bit; + + netdev_info(params->ulp_ctx->bp->dev, + "Configured actions for matching\n"); + netdev_info(params->ulp_ctx->bp->dev, "Default computed fields\n"); + ulp_parser_action_prop_dump(params->ulp_ctx, act_prop, + BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN_SZ, + BNXT_ULP_ACT_PROP_SZ_ENCAP_TUN_SZ); + ulp_parser_action_prop_dump(params->ulp_ctx, act_prop, + BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ, + BNXT_ULP_ACT_PROP_SZ_ENCAP_IP_SZ); + ulp_parser_action_prop_dump(params->ulp_ctx, act_prop, + BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_SZ, + BNXT_ULP_ACT_PROP_SZ_ENCAP_VTAG_SZ); + ulp_parser_action_prop_dump(params->ulp_ctx, act_prop, + BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_TYPE, + BNXT_ULP_ACT_PROP_SZ_ENCAP_VTAG_TYPE); + ulp_parser_action_prop_dump(params->ulp_ctx, act_prop, + BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_NUM, + BNXT_ULP_ACT_PROP_SZ_ENCAP_VTAG_NUM); + ulp_parser_action_prop_dump(params->ulp_ctx, act_prop, + BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE, + BNXT_ULP_ACT_PROP_SZ_ENCAP_L3_TYPE); + ulp_parser_action_prop_dump(params->ulp_ctx, act_prop, + BNXT_ULP_ACT_PROP_IDX_VNIC, + BNXT_ULP_ACT_PROP_SZ_VNIC); + ulp_parser_action_prop_dump(params->ulp_ctx, act_prop, + BNXT_ULP_ACT_PROP_IDX_VPORT, + BNXT_ULP_ACT_PROP_SZ_VPORT); + + num_idx = sizeof(bnxt_ulp_action_bit_names) / + sizeof(bnxt_ulp_action_bit_names[0]); + + for (idx = 0; idx < num_idx; idx++) { + enum bnxt_ulp_act_prop_idx tmp_act_p; + enum bnxt_ulp_act_prop_sz tmp_act_sz; + + act_bit = 1UL << idx; + if (!ULP_BITMAP_ISSET(act_bitmap->bits, act_bit)) + continue; + + netdev_info(params->ulp_ctx->bp->dev, "%s\n", + bnxt_ulp_action_bit_names[idx]); + if (ULP_BITMAP_ISSET(act_bit, BNXT_ULP_ACT_BIT_MARK)) { + ulp_parser_action_prop_dump(params->ulp_ctx, act_prop, + BNXT_ULP_ACT_PROP_IDX_MARK, + BNXT_ULP_ACT_PROP_SZ_MARK); + } else if (ULP_BITMAP_ISSET(act_bit, + BNXT_ULP_ACT_BIT_VXLAN_ENCAP)) { + tmp_act_p = BNXT_ULP_ACT_PROP_IDX_ENCAP_L2_DMAC; + tmp_act_sz = BNXT_ULP_ACT_PROP_IDX_LAST - + BNXT_ULP_ACT_PROP_IDX_ENCAP_L2_DMAC; + netdev_info(params->ulp_ctx->bp->dev, + "size %d and %d\n", tmp_act_p, tmp_act_sz); + tmp_act_sz = BNXT_ULP_ACT_PROP_SZ_ENCAP_L2_DMAC; + ulp_parser_action_prop_dump(params->ulp_ctx, act_prop, + tmp_act_p, tmp_act_sz); + tmp_act_p = BNXT_ULP_ACT_PROP_IDX_ENCAP_L2_SMAC; + tmp_act_sz = BNXT_ULP_ACT_PROP_SZ_ENCAP_L2_SMAC; + ulp_parser_action_prop_dump(params->ulp_ctx, act_prop, + tmp_act_p, tmp_act_sz); + tmp_act_p = BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG; + tmp_act_sz = BNXT_ULP_ACT_PROP_SZ_ENCAP_VTAG; + ulp_parser_action_prop_dump(params->ulp_ctx, act_prop, + tmp_act_p, tmp_act_sz); + tmp_act_p = BNXT_ULP_ACT_PROP_IDX_ENCAP_IP; + tmp_act_sz = BNXT_ULP_ACT_PROP_SZ_ENCAP_IP; + ulp_parser_action_prop_dump(params->ulp_ctx, act_prop, + tmp_act_p, tmp_act_sz); + tmp_act_p = BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SRC; + tmp_act_sz = BNXT_ULP_ACT_PROP_SZ_ENCAP_IP_SRC; + ulp_parser_action_prop_dump(params->ulp_ctx, act_prop, + tmp_act_p, tmp_act_sz); + tmp_act_p = BNXT_ULP_ACT_PROP_IDX_ENCAP_UDP; + tmp_act_sz = BNXT_ULP_ACT_PROP_SZ_ENCAP_UDP; + ulp_parser_action_prop_dump(params->ulp_ctx, act_prop, + tmp_act_p, tmp_act_sz); + tmp_act_p = BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN; + tmp_act_sz = BNXT_ULP_ACT_PROP_SZ_ENCAP_TUN; + ulp_parser_action_prop_dump(params->ulp_ctx, act_prop, + tmp_act_p, tmp_act_sz); + } else if (ULP_BITMAP_ISSET(act_bit, + BNXT_ULP_ACT_BIT_COUNT)) { + ulp_parser_action_prop_dump(params->ulp_ctx, act_prop, + BNXT_ULP_ACT_PROP_IDX_COUNT, + BNXT_ULP_ACT_PROP_SZ_COUNT); + } else if (ULP_BITMAP_ISSET(act_bit, + BNXT_ULP_ACT_BIT_PUSH_VLAN)) { + tmp_act_p = BNXT_ULP_ACT_PROP_IDX_PUSH_VLAN; + tmp_act_sz = BNXT_ULP_ACT_PROP_SZ_PUSH_VLAN; + ulp_parser_action_prop_dump(params->ulp_ctx, act_prop, + tmp_act_p, + tmp_act_sz); + } else if (ULP_BITMAP_ISSET(act_bit, + BNXT_ULP_ACT_BIT_SET_IPV4_SRC)) { + tmp_act_p = BNXT_ULP_ACT_PROP_IDX_SET_IPV4_SRC; + tmp_act_sz = BNXT_ULP_ACT_PROP_SZ_SET_IPV4_SRC; + ulp_parser_action_prop_dump(params->ulp_ctx, act_prop, + tmp_act_p, + tmp_act_sz); + } else if (ULP_BITMAP_ISSET(act_bit, + BNXT_ULP_ACT_BIT_SET_IPV4_DST)) { + tmp_act_p = BNXT_ULP_ACT_PROP_IDX_SET_IPV4_DST; + tmp_act_sz = BNXT_ULP_ACT_PROP_SZ_SET_IPV4_DST; + ulp_parser_action_prop_dump(params->ulp_ctx, act_prop, + tmp_act_p, + tmp_act_sz); + } else if (ULP_BITMAP_ISSET(act_bit, + BNXT_ULP_ACT_BIT_SET_TP_SRC)) { + tmp_act_p = BNXT_ULP_ACT_PROP_IDX_SET_TP_SRC; + tmp_act_sz = BNXT_ULP_ACT_PROP_SZ_SET_TP_SRC; + ulp_parser_action_prop_dump(params->ulp_ctx, act_prop, + tmp_act_p, + tmp_act_sz); + } else if (ULP_BITMAP_ISSET(act_bit, + BNXT_ULP_ACT_BIT_SET_TP_DST)) { + tmp_act_p = BNXT_ULP_ACT_PROP_IDX_SET_TP_DST; + tmp_act_sz = BNXT_ULP_ACT_PROP_SZ_SET_TP_DST; + ulp_parser_action_prop_dump(params->ulp_ctx, act_prop, + tmp_act_p, + tmp_act_sz); + } else if (ULP_BITMAP_ISSET(act_bit, + BNXT_ULP_ACT_BIT_METER_PROFILE)) { + tmp_act_p = BNXT_ULP_ACT_PROP_IDX_METER_PROF_ID; + tmp_act_sz = BNXT_ULP_ACT_PROP_SZ_METER_PROF_ID; + ulp_parser_action_prop_dump(params->ulp_ctx, act_prop, + tmp_act_p, + tmp_act_sz); + tmp_act_p = BNXT_ULP_ACT_PROP_IDX_METER_PROF_CIR; + tmp_act_sz = BNXT_ULP_ACT_PROP_SZ_METER_PROF_CIR; + ulp_parser_action_prop_dump(params->ulp_ctx, act_prop, + tmp_act_p, + tmp_act_sz); + tmp_act_p = BNXT_ULP_ACT_PROP_IDX_METER_PROF_EIR; + tmp_act_sz = BNXT_ULP_ACT_PROP_SZ_METER_PROF_EIR; + ulp_parser_action_prop_dump(params->ulp_ctx, act_prop, + tmp_act_p, + tmp_act_sz); + tmp_act_p = BNXT_ULP_ACT_PROP_IDX_METER_PROF_CBS; + tmp_act_sz = BNXT_ULP_ACT_PROP_SZ_METER_PROF_CBS; + ulp_parser_action_prop_dump(params->ulp_ctx, act_prop, + tmp_act_p, + tmp_act_sz); + tmp_act_p = BNXT_ULP_ACT_PROP_IDX_METER_PROF_EBS; + tmp_act_sz = BNXT_ULP_ACT_PROP_SZ_METER_PROF_EBS; + ulp_parser_action_prop_dump(params->ulp_ctx, act_prop, + tmp_act_p, + tmp_act_sz); + tmp_act_p = BNXT_ULP_ACT_PROP_IDX_METER_PROF_RFC2698; + tmp_act_sz = BNXT_ULP_ACT_PROP_SZ_METER_PROF_RFC2698; + ulp_parser_action_prop_dump(params->ulp_ctx, act_prop, + tmp_act_p, + tmp_act_sz); + tmp_act_p = BNXT_ULP_ACT_PROP_IDX_METER_PROF_PM; + tmp_act_sz = BNXT_ULP_ACT_PROP_SZ_METER_PROF_PM; + ulp_parser_action_prop_dump(params->ulp_ctx, act_prop, + tmp_act_p, + tmp_act_sz); + tmp_act_p = BNXT_ULP_ACT_PROP_IDX_METER_PROF_EBND; + tmp_act_sz = BNXT_ULP_ACT_PROP_SZ_METER_PROF_EBND; + ulp_parser_action_prop_dump(params->ulp_ctx, act_prop, + tmp_act_p, + tmp_act_sz); + tmp_act_p = BNXT_ULP_ACT_PROP_IDX_METER_PROF_CBND; + tmp_act_sz = BNXT_ULP_ACT_PROP_SZ_METER_PROF_CBND; + ulp_parser_action_prop_dump(params->ulp_ctx, act_prop, + tmp_act_p, + tmp_act_sz); + tmp_act_p = BNXT_ULP_ACT_PROP_IDX_METER_PROF_EBSM; + tmp_act_sz = BNXT_ULP_ACT_PROP_SZ_METER_PROF_EBSM; + ulp_parser_action_prop_dump(params->ulp_ctx, act_prop, + tmp_act_p, + tmp_act_sz); + tmp_act_p = BNXT_ULP_ACT_PROP_IDX_METER_PROF_CBSM; + tmp_act_sz = BNXT_ULP_ACT_PROP_SZ_METER_PROF_CBSM; + ulp_parser_action_prop_dump(params->ulp_ctx, act_prop, + tmp_act_p, + tmp_act_sz); + tmp_act_p = BNXT_ULP_ACT_PROP_IDX_METER_PROF_CF; + tmp_act_sz = BNXT_ULP_ACT_PROP_SZ_METER_PROF_CF; + ulp_parser_action_prop_dump(params->ulp_ctx, act_prop, + tmp_act_p, + tmp_act_sz); + } else if (ULP_BITMAP_ISSET(act_bit, + BNXT_ULP_ACT_BIT_SHARED_METER)) { + tmp_act_p = BNXT_ULP_ACT_PROP_IDX_METER_PROF_ID; + tmp_act_sz = BNXT_ULP_ACT_PROP_SZ_METER_PROF_ID; + ulp_parser_action_prop_dump(params->ulp_ctx, act_prop, + tmp_act_p, + tmp_act_sz); + tmp_act_p = BNXT_ULP_ACT_PROP_IDX_METER_INST_ID; + tmp_act_sz = BNXT_ULP_ACT_PROP_SZ_METER_INST_ID; + ulp_parser_action_prop_dump(params->ulp_ctx, act_prop, + tmp_act_p, + tmp_act_sz); + tmp_act_p = BNXT_ULP_ACT_PROP_IDX_METER_INST_MTR_VAL; + tmp_act_sz = BNXT_ULP_ACT_PROP_SZ_METER_INST_MTR_VAL; + ulp_parser_action_prop_dump(params->ulp_ctx, act_prop, + tmp_act_p, + tmp_act_sz); + tmp_act_p = BNXT_ULP_ACT_PROP_IDX_METER_INST_ECN_RMP_EN; + tmp_act_sz = BNXT_ULP_ACT_PROP_SZ_METER_INST_ECN_RMP_EN; + ulp_parser_action_prop_dump(params->ulp_ctx, act_prop, + tmp_act_p, + tmp_act_sz); + + } + } + netdev_info(params->ulp_ctx->bp->dev, "******************************************\n"); +} + +/* Function to dump the error field during matching. */ +void ulp_matcher_act_field_dump(struct bnxt_ulp_context *ulp_ctx, u32 idx, + u32 jdx, u32 mask_id) +{ + netdev_info(ulp_ctx->bp->dev, "Match failed template=%d,field=%s,mask=%s\n", + idx, + bnxt_ulp_tc_template_field_names[(jdx + + (idx + 1) * 1)], + bnxt_ulp_flow_matcher_field_mask_opcode_names[mask_id]); +} + +/* Function to dump the blob during the mapper processing. */ +void ulp_mapper_field_dump(struct bnxt_ulp_context *ulp_ctx, const char *name, + struct bnxt_ulp_mapper_field_info *fld, + struct ulp_blob *blob, u16 write_idx, u8 *val, + u32 field_size) +{ + u32 len = 0, slen = 0; + u32 ret = 0, idx = 0; + u8 *lbuffer_ptr; + u8 lbuffer[64]; + + lbuffer_ptr = lbuffer; + + if (!val || !blob) + return; + + slen = field_size; + if (slen % 8) + len = (slen / 8) + 1; + else + len = (slen / 8); + + memset(lbuffer, 0, sizeof(lbuffer)); + while (len > 0 && idx < 32) { + ret = sprintf((char *)lbuffer_ptr, "%02x", val[idx]); + lbuffer_ptr += ret; + len--; + idx++; + } + + netdev_info(ulp_ctx->bp->dev, + "%-16s %-20s, bits = %-3d and pos = %-3d val = 0x%s\n", + name, fld->description, slen, write_idx, lbuffer); +#ifdef TC_BNXT_TRUFLOW_DEBUG_DETAIL + dump_hex(ulp_ctx, (u8 *)blob->data, (write_idx + slen + 7) / 8); +#endif +} + +void ulp_mapper_ident_field_dump(struct bnxt_ulp_context *ulp_ctx, + const char *name, + struct bnxt_ulp_mapper_ident_info *ident, + struct bnxt_ulp_mapper_tbl_info *tbl, + int id) +{ + netdev_info(ulp_ctx->bp->dev, "%-16s alloc %-16s, dir= %s, id = 0x%x\n", + name, ident->description, + (tbl->direction == TF_DIR_RX) ? "RX" : "TX", id); +} + +void ulp_mapper_tcam_entry_dump(struct bnxt_ulp_context *ulp_ctx, + const char *name, u32 idx, + struct bnxt_ulp_mapper_tbl_info *tbl, + struct ulp_blob *key, struct ulp_blob *mask, + struct ulp_blob *result) +{ + netdev_info(ulp_ctx->bp->dev, "%-16s [%s][0x%0x],keysz=%-3d resultsz=%-3d\n", + name, + (tbl->direction == TF_DIR_RX) ? "RX" : "TX", + idx, key->write_idx, result->write_idx); + dump_hex(ulp_ctx, (u8 *)key->data, (key->bitlen + 7) / 8); + dump_hex(ulp_ctx, (u8 *)mask->data, (key->bitlen + 7) / 8); + dump_hex(ulp_ctx, (u8 *)result->data, (key->bitlen + 7) / 8); +} + +void ulp_mapper_result_dump(struct bnxt_ulp_context *ulp_ctx, const char *name, + struct bnxt_ulp_mapper_tbl_info *tbl, + struct ulp_blob *result) +{ + netdev_info(ulp_ctx->bp->dev, "%-16s [%s], bitlen=%-3d\n", + name, + (tbl->direction == TF_DIR_RX) ? "RX" : "TX", + result->write_idx); + dump_hex(ulp_ctx, (u8 *)result->data, (result->write_idx + 7) / 8); +} + +void ulp_mapper_act_dump(struct bnxt_ulp_context *ulp_ctx, const char *name, + struct bnxt_ulp_mapper_tbl_info *tbl, + struct ulp_blob *data) +{ + netdev_info(ulp_ctx->bp->dev, "%-16s [%s], bitlen=%-3d\n", + name, + (tbl->direction == TF_DIR_RX) ? "RX" : "TX", + data->write_idx); + dump_hex(ulp_ctx, (u8 *)data->data, (data->write_idx + 7) / 8); +} + +void ulp_mapper_em_dump(struct bnxt_ulp_context *ulp_ctx, const char *name, + struct ulp_blob *key, struct ulp_blob *data, + struct tf_insert_em_entry_parms *iparms) +{ + netdev_info(ulp_ctx->bp->dev, "%s ins %s[%s] scope=0x%02x keysz=%d recsz=%d\n", + name, + (iparms->mem == TF_MEM_EXTERNAL) ? "EXT" : "INT", + (iparms->dir == TF_DIR_RX) ? "RX" : "TX", + iparms->tbl_scope_id, + iparms->key_sz_in_bits, + iparms->em_record_sz_in_bits); + + netdev_info(ulp_ctx->bp->dev, "FlowHdl= %llx FlowID= %llu\n", + iparms->flow_handle, iparms->flow_id); + + netdev_info(ulp_ctx->bp->dev, "Key Size %d, Data Size %d\n", + key->write_idx, data->write_idx); + + dump_hex(ulp_ctx, iparms->key, (key->write_idx + 7) / 8); + dump_hex(ulp_ctx, iparms->em_record, (data->write_idx + 7) / 8); +} + +void +ulp_mapper_tfc_em_dump(struct bnxt_ulp_context *ulp_ctx, const char *name, + struct ulp_blob *data, + struct tfc_em_insert_parms *iparms) +{ + netdev_info(ulp_ctx->bp->dev, "%s [%s] keysz=%u recsz=%u\n", + name, + (iparms->dir == CFA_DIR_RX) ? "RX" : "TX", + iparms->key_sz_bits, + iparms->lkup_key_sz_words); + + netdev_info(ulp_ctx->bp->dev, "FlowHdl=%llx\n", *iparms->flow_handle); + + dump_hex(ulp_ctx, data->data, (data->write_idx + 7) / 8); +} + +void ulp_mapper_blob_dump(struct bnxt_ulp_context *ulp_ctx, + struct ulp_blob *blob) +{ + dump_hex(ulp_ctx, blob->data, (blob->write_idx + 7) / 8); +} + +void ulp_mapper_table_dump(struct bnxt_ulp_context *ulp_ctx, + struct bnxt_ulp_mapper_tbl_info *tbl, u32 idx) +{ + const char *sub_type; + + if (tbl->resource_func == BNXT_ULP_RESOURCE_FUNC_INDEX_TABLE) + sub_type = ulp_mapper_res_index_names[tbl->resource_sub_type]; + else if (tbl->resource_func == BNXT_ULP_RESOURCE_FUNC_KEY_RECIPE_TABLE) + sub_type = + ulp_mapper_res_key_recipe_names[tbl->resource_sub_type]; + else if (tbl->resource_func == BNXT_ULP_RESOURCE_FUNC_GENERIC_TABLE) + sub_type = ulp_mapper_res_generic_names[tbl->resource_sub_type]; + else if (tbl->resource_func == + BNXT_ULP_RESOURCE_FUNC_GLOBAL_REGISTER_TABLE) + sub_type = + ulp_mapper_res_ulp_global_names[tbl->resource_sub_type]; + else + sub_type = "N/A"; + netdev_info(ulp_ctx->bp->dev, "Processing table %-16s:%-16s: %u\n", + ulp_mapper_resource_func_names[tbl->resource_func], + sub_type, idx); +} + +void ulp_mapper_gen_tbl_dump(struct bnxt_ulp_context *ulp_ctx, u32 sub_type, + u8 direction, struct ulp_blob *key) +{ + netdev_info(ulp_ctx->bp->dev, "Generic Tbl[%s][%s] - Dump Key\n", + ulp_mapper_res_generic_names[sub_type], + (direction == TF_DIR_RX) ? "RX" : "TX"); + ulp_mapper_blob_dump(ulp_ctx, key); +} + +const char * +ulp_mapper_key_recipe_type_to_str(u32 sub_type) +{ + return ulp_mapper_res_key_recipe_names[sub_type]; +} + +#else /* TC_BNXT_TRUFLOW_DEBUG */ + +/* Function to dump the Pattern header bitmaps and fields. */ +void ulp_parser_hdr_info_dump(struct ulp_tc_parser_params *params) +{ +} + +/* Function to dump the Action header bitmaps and properties. */ +void ulp_parser_act_info_dump(struct ulp_tc_parser_params *params) +{ +} + +/* Function to dump the error field during matching. */ +void ulp_matcher_act_field_dump(struct bnxt_ulp_context *ulp_ctx, u32 idx, + u32 jdx, u32 mask_id) +{ +} + +/* Function to dump the blob during the mapper processing. */ +void ulp_mapper_field_dump(struct bnxt_ulp_context *ulp_ctx, const char *name, + struct bnxt_ulp_mapper_field_info *fld, + struct ulp_blob *blob, u16 write_idx, u8 *val, + u32 field_size) +{ +} + +void ulp_mapper_ident_field_dump(struct bnxt_ulp_context *ulp_ctx, + const char *name, + struct bnxt_ulp_mapper_ident_info *ident, + struct bnxt_ulp_mapper_tbl_info *tbl, int id) +{ +} + +void ulp_mapper_tcam_entry_dump(struct bnxt_ulp_context *ulp_ctx, + const char *name, u32 idx, + struct bnxt_ulp_mapper_tbl_info *tbl, + struct ulp_blob *key, struct ulp_blob *mask, + struct ulp_blob *result) +{ +} + +void ulp_mapper_result_dump(struct bnxt_ulp_context *ulp_ctx, const char *name, + struct bnxt_ulp_mapper_tbl_info *tbl, + struct ulp_blob *result) +{ +} + +void ulp_mapper_act_dump(struct bnxt_ulp_context *ulp_ctx, const char *name, + struct bnxt_ulp_mapper_tbl_info *tbl, + struct ulp_blob *data) +{ +} + +void ulp_mapper_em_dump(struct bnxt_ulp_context *ulp_ctx, const char *name, + struct ulp_blob *key, struct ulp_blob *data, + struct tf_insert_em_entry_parms *iparms) +{ +} + +void ulp_mapper_tfc_em_dump(struct bnxt_ulp_context *ulp_ctx, const char *name, + struct ulp_blob *data, + struct tfc_em_insert_parms *iparms) +{ +} + +void ulp_mapper_blob_dump(struct bnxt_ulp_context *ulp_ctx, + struct ulp_blob *blob) +{ +} + +void ulp_mapper_table_dump(struct bnxt_ulp_context *ulp_ctx, + struct bnxt_ulp_mapper_tbl_info *tbl, u32 idx) +{ +} + +void ulp_mapper_gen_tbl_dump(struct bnxt_ulp_context *ulp_ctx, u32 sub_type, + u8 direction, struct ulp_blob *key) +{ +} + +const char *ulp_mapper_key_recipe_type_to_str(u32 sub_type) +{ + return NULL; +} +#endif /* TC_BNXT_TRUFLOW_DEBUG */ +#endif /* CONFIG_BNXT_FLOWER_OFFLOAD */ diff --git a/drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_template_debug.h b/drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_template_debug.h new file mode 100644 index 000000000000..c5c04230a15d --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_template_debug.h @@ -0,0 +1,449 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019-2023 Broadcom + * All rights reserved. + */ + +#ifndef ULP_TEMPLATE_DEBUG_H_ +#define ULP_TEMPLATE_DEBUG_H_ + +#ifdef TC_BNXT_TRUFLOW_DEBUG + +/* THIS FILE IS AUTOGENERATED AND MUST NOT BE MODIFIED WITHOUT FEEDING BACK + * TO THE TEMPLATE COMPILER. + */ + +const char *bnxt_ulp_hdr_bit_names[] = { + "BNXT_ULP_HDR_BIT_O_ETH", + "BNXT_ULP_HDR_BIT_OO_VLAN", + "BNXT_ULP_HDR_BIT_OI_VLAN", + "BNXT_ULP_HDR_BIT_O_IPV4", + "BNXT_ULP_HDR_BIT_O_IPV6", + "BNXT_ULP_HDR_BIT_O_SRH", + "BNXT_ULP_HDR_BIT_O_TCP", + "BNXT_ULP_HDR_BIT_O_UDP", + "BNXT_ULP_HDR_BIT_O_ICMP", + "BNXT_ULP_HDR_BIT_T_VXLAN", + "BNXT_ULP_HDR_BIT_T_GRE", + "BNXT_ULP_HDR_BIT_I_ETH", + "BNXT_ULP_HDR_BIT_IO_VLAN", + "BNXT_ULP_HDR_BIT_II_VLAN", + "BNXT_ULP_HDR_BIT_I_IPV4", + "BNXT_ULP_HDR_BIT_I_IPV6", + "BNXT_ULP_HDR_BIT_I_TCP", + "BNXT_ULP_HDR_BIT_I_UDP", + "BNXT_ULP_HDR_BIT_I_ICMP", + "BNXT_ULP_HDR_BIT_O_ECPRI", + "BNXT_ULP_HDR_BIT_F1", + "BNXT_ULP_HDR_BIT_F2", + "BNXT_ULP_HDR_BIT_SVIF_IGNORE", + "BNXT_ULP_HDR_BIT_SVIF", + "BNXT_ULP_HDR_PROTO_LAST", +}; + +const char *bnxt_ulp_action_bit_names[] = { + "BNXT_ULP_ACT_BIT_MARK", + "BNXT_ULP_ACT_BIT_DROP", + "BNXT_ULP_ACT_BIT_COUNT", + "BNXT_ULP_ACT_BIT_RSS", + "BNXT_ULP_ACT_BIT_METER", + "BNXT_ULP_ACT_BIT_VXLAN_DECAP", + "BNXT_ULP_ACT_BIT_POP_MPLS", + "BNXT_ULP_ACT_BIT_PUSH_MPLS", + "BNXT_ULP_ACT_BIT_MAC_SWAP", + "BNXT_ULP_ACT_BIT_SET_MAC_SRC", + "BNXT_ULP_ACT_BIT_SET_MAC_DST", + "BNXT_ULP_ACT_BIT_POP_VLAN", + "BNXT_ULP_ACT_BIT_PUSH_VLAN", + "BNXT_ULP_ACT_BIT_SET_VLAN_PCP", + "BNXT_ULP_ACT_BIT_SET_VLAN_VID", + "BNXT_ULP_ACT_BIT_SET_IPV4_SRC", + "BNXT_ULP_ACT_BIT_SET_IPV4_DST", + "BNXT_ULP_ACT_BIT_SET_IPV6_SRC", + "BNXT_ULP_ACT_BIT_SET_IPV6_DST", + "BNXT_ULP_ACT_BIT_DEC_TTL", + "BNXT_ULP_ACT_BIT_SET_TP_SRC", + "BNXT_ULP_ACT_BIT_SET_TP_DST", + "BNXT_ULP_ACT_BIT_VXLAN_ENCAP", + "BNXT_ULP_ACT_BIT_JUMP", + "BNXT_ULP_ACT_BIT_SHARED", + "BNXT_ULP_ACT_BIT_SAMPLE", + "BNXT_ULP_ACT_BIT_SHARED_SAMPLE", + "BNXT_ULP_ACT_BIT_QUEUE", + "BNXT_ULP_ACT_BIT_DELETE", + "BNXT_ULP_ACT_BIT_UPDATE", + "BNXT_ULP_ACT_BIT_SHARED_METER", + "BNXT_ULP_ACT_BIT_METER_PROFILE", + "BNXT_ULP_ACT_BIT_GOTO_CHAIN", + "BNXT_ULP_ACT_BIT_VF_TO_VF", + "BNXT_ULP_ACT_BIT_IP_ENCAP", + "BNXT_ULP_ACT_BIT_IP_DECAP", + "BNXT_ULP_ACT_BIT_L2_ENCAP", + "BNXT_ULP_ACT_BIT_L2_DECAP", + "BNXT_ULP_ACT_BIT_LAST" +}; + +const char *bnxt_ulp_tc_parser_action_prop_names[] = { + [BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN_SZ] = + "BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN_SZ", + [BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ] = + "BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ", + [BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_SZ] = + "BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_SZ", + [BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_TYPE] = + "BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_TYPE", + [BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_NUM] = + "BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_NUM", + [BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE] = + "BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE", + [BNXT_ULP_ACT_PROP_IDX_MPLS_POP_NUM] = + "BNXT_ULP_ACT_PROP_IDX_MPLS_POP_NUM", + [BNXT_ULP_ACT_PROP_IDX_MPLS_PUSH_NUM] = + "BNXT_ULP_ACT_PROP_IDX_MPLS_PUSH_NUM", + [BNXT_ULP_ACT_PROP_IDX_PORT_ID] = + "BNXT_ULP_ACT_PROP_IDX_PORT_ID", + [BNXT_ULP_ACT_PROP_IDX_VNIC] = + "BNXT_ULP_ACT_PROP_IDX_VNIC", + [BNXT_ULP_ACT_PROP_IDX_VPORT] = + "BNXT_ULP_ACT_PROP_IDX_VPORT", + [BNXT_ULP_ACT_PROP_IDX_MARK] = + "BNXT_ULP_ACT_PROP_IDX_MARK", + [BNXT_ULP_ACT_PROP_IDX_COUNT] = + "BNXT_ULP_ACT_PROP_IDX_COUNT", + [BNXT_ULP_ACT_PROP_IDX_METER] = + "BNXT_ULP_ACT_PROP_IDX_METER", + [BNXT_ULP_ACT_PROP_IDX_SET_MAC_SRC] = + "BNXT_ULP_ACT_PROP_IDX_SET_MAC_SRC", + [BNXT_ULP_ACT_PROP_IDX_SET_MAC_DST] = + "BNXT_ULP_ACT_PROP_IDX_SET_MAC_DST", + [BNXT_ULP_ACT_PROP_IDX_PUSH_VLAN] = + "BNXT_ULP_ACT_PROP_IDX_PUSH_VLAN", + [BNXT_ULP_ACT_PROP_IDX_SET_VLAN_PCP] = + "BNXT_ULP_ACT_PROP_IDX_SET_VLAN_PCP", + [BNXT_ULP_ACT_PROP_IDX_SET_VLAN_VID] = + "BNXT_ULP_ACT_PROP_IDX_SET_VLAN_VID", + [BNXT_ULP_ACT_PROP_IDX_SET_IPV4_SRC] = + "BNXT_ULP_ACT_PROP_IDX_SET_IPV4_SRC", + [BNXT_ULP_ACT_PROP_IDX_SET_IPV4_DST] = + "BNXT_ULP_ACT_PROP_IDX_SET_IPV4_DST", + [BNXT_ULP_ACT_PROP_IDX_SET_IPV6_SRC] = + "BNXT_ULP_ACT_PROP_IDX_SET_IPV6_SRC", + [BNXT_ULP_ACT_PROP_IDX_SET_IPV6_DST] = + "BNXT_ULP_ACT_PROP_IDX_SET_IPV6_DST", + [BNXT_ULP_ACT_PROP_IDX_SET_TP_SRC] = + "BNXT_ULP_ACT_PROP_IDX_SET_TP_SRC", + [BNXT_ULP_ACT_PROP_IDX_SET_TP_DST] = + "BNXT_ULP_ACT_PROP_IDX_SET_TP_DST", + [BNXT_ULP_ACT_PROP_IDX_OF_PUSH_MPLS_0] = + "BNXT_ULP_ACT_PROP_IDX_OF_PUSH_MPLS_0", + [BNXT_ULP_ACT_PROP_IDX_OF_PUSH_MPLS_1] = + "BNXT_ULP_ACT_PROP_IDX_OF_PUSH_MPLS_1", + [BNXT_ULP_ACT_PROP_IDX_OF_PUSH_MPLS_2] = + "BNXT_ULP_ACT_PROP_IDX_OF_PUSH_MPLS_2", + [BNXT_ULP_ACT_PROP_IDX_OF_PUSH_MPLS_3] = + "BNXT_ULP_ACT_PROP_IDX_OF_PUSH_MPLS_3", + [BNXT_ULP_ACT_PROP_IDX_OF_PUSH_MPLS_4] = + "BNXT_ULP_ACT_PROP_IDX_OF_PUSH_MPLS_4", + [BNXT_ULP_ACT_PROP_IDX_OF_PUSH_MPLS_5] = + "BNXT_ULP_ACT_PROP_IDX_OF_PUSH_MPLS_5", + [BNXT_ULP_ACT_PROP_IDX_OF_PUSH_MPLS_6] = + "BNXT_ULP_ACT_PROP_IDX_OF_PUSH_MPLS_6", + [BNXT_ULP_ACT_PROP_IDX_OF_PUSH_MPLS_7] = + "BNXT_ULP_ACT_PROP_IDX_OF_PUSH_MPLS_7", + [BNXT_ULP_ACT_PROP_IDX_ENCAP_L2_DMAC] = + "BNXT_ULP_ACT_PROP_IDX_ENCAP_L2_DMAC", + [BNXT_ULP_ACT_PROP_IDX_ENCAP_L2_SMAC] = + "BNXT_ULP_ACT_PROP_IDX_ENCAP_L2_SMAC", + [BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG] = + "BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG", + [BNXT_ULP_ACT_PROP_IDX_ENCAP_IP] = + "BNXT_ULP_ACT_PROP_IDX_ENCAP_IP", + [BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SRC] = + "BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SRC", + [BNXT_ULP_ACT_PROP_IDX_ENCAP_UDP] = + "BNXT_ULP_ACT_PROP_IDX_ENCAP_UDP", + [BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN] = + "BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN", + [BNXT_ULP_ACT_PROP_IDX_JUMP] = + "BNXT_ULP_ACT_PROP_IDX_JUMP", + [BNXT_ULP_ACT_PROP_IDX_SHARED_HANDLE] = + "BNXT_ULP_ACT_PROP_IDX_SHARED_HANDLE", + [BNXT_ULP_ACT_PROP_IDX_RSS_TYPES] = + "BNXT_ULP_ACT_PROP_IDX_RSS_TYPES", + [BNXT_ULP_ACT_PROP_IDX_RSS_LEVEL] = + "BNXT_ULP_ACT_PROP_IDX_RSS_LEVEL", + [BNXT_ULP_ACT_PROP_IDX_RSS_KEY_LEN] = + "BNXT_ULP_ACT_PROP_IDX_RSS_KEY_LEN", + [BNXT_ULP_ACT_PROP_IDX_RSS_KEY] = + "BNXT_ULP_ACT_PROP_IDX_RSS_KEY", + [BNXT_ULP_ACT_PROP_IDX_RSS_QUEUE_NUM] = + "BNXT_ULP_ACT_PROP_IDX_RSS_QUEUE_NUM", + [BNXT_ULP_ACT_PROP_IDX_RSS_QUEUE] = + "BNXT_ULP_ACT_PROP_IDX_RSS_QUEUE", + [BNXT_ULP_ACT_PROP_IDX_QUEUE_INDEX] = + "BNXT_ULP_ACT_PROP_IDX_QUEUE_INDEX", + [BNXT_ULP_ACT_PROP_IDX_METER_PROF_ID] = + "BNXT_ULP_ACT_PROP_IDX_METER_PROF_ID", + [BNXT_ULP_ACT_PROP_IDX_METER_PROF_CIR] = + "BNXT_ULP_ACT_PROP_IDX_METER_PROF_CIR", + [BNXT_ULP_ACT_PROP_IDX_METER_PROF_EIR] = + "BNXT_ULP_ACT_PROP_IDX_METER_PROF_EIR", + [BNXT_ULP_ACT_PROP_IDX_METER_PROF_CBS] = + "BNXT_ULP_ACT_PROP_IDX_METER_PROF_CBS", + [BNXT_ULP_ACT_PROP_IDX_METER_PROF_EBS] = + "BNXT_ULP_ACT_PROP_IDX_METER_PROF_EBS", + [BNXT_ULP_ACT_PROP_IDX_METER_PROF_RFC2698] = + "BNXT_ULP_ACT_PROP_IDX_METER_PROF_RFC2698", + [BNXT_ULP_ACT_PROP_IDX_METER_PROF_PM] = + "BNXT_ULP_ACT_PROP_IDX_METER_PROF_PM", + [BNXT_ULP_ACT_PROP_IDX_METER_PROF_EBND] = + "BNXT_ULP_ACT_PROP_IDX_METER_PROF_EBND", + [BNXT_ULP_ACT_PROP_IDX_METER_PROF_CBND] = + "BNXT_ULP_ACT_PROP_IDX_METER_PROF_CBND", + [BNXT_ULP_ACT_PROP_IDX_METER_PROF_EBSM] = + "BNXT_ULP_ACT_PROP_IDX_METER_PROF_EBSM", + [BNXT_ULP_ACT_PROP_IDX_METER_PROF_CBSM] = + "BNXT_ULP_ACT_PROP_IDX_METER_PROF_CBSM", + [BNXT_ULP_ACT_PROP_IDX_METER_PROF_CF] = + "BNXT_ULP_ACT_PROP_IDX_METER_PROF_CF", + [BNXT_ULP_ACT_PROP_IDX_METER_INST_ID] = + "BNXT_ULP_ACT_PROP_IDX_METER_INST_ID", + [BNXT_ULP_ACT_PROP_IDX_METER_INST_ECN_RMP_EN] = + "BNXT_ULP_ACT_PROP_IDX_METER_INST_ECN_RMP_EN", + [BNXT_ULP_ACT_PROP_IDX_METER_INST_MTR_VAL] = + "BNXT_ULP_ACT_PROP_IDX_METER_INST_MTR_VAL", + [BNXT_ULP_ACT_PROP_IDX_LAST] = + "BNXT_ULP_ACT_PROP_IDX_LAST" +}; + +const char *bnxt_ulp_flow_matcher_field_mask_opcode_names[] = { + "MASK_IGNORE", + "MASK_ANY", + "MASK_EXACT", + "MASK_WC", + "MASK_LAST", +}; + +const char *bnxt_ulp_tc_template_field_names[] = { + /* Ingress Template 0 */ + "BNXT_ULP_HF0_SVIF_INDEX", + "BNXT_ULP_HF0_O_ETH_DMAC", + "BNXT_ULP_HF0_O_ETH_SMAC", + "BNXT_ULP_HF0_O_ETH_TYPE", + "BNXT_ULP_HF0_OO_VLAN_CFI_PRI", + "BNXT_ULP_HF0_OO_VLAN_VID", + "BNXT_ULP_HF0_OO_VLAN_TYPE", + "BNXT_ULP_HF0_OI_VLAN_CFI_PRI", + "BNXT_ULP_HF0_OI_VLAN_VID", + "BNXT_ULP_HF0_OI_VLAN_TYPE", + "BNXT_ULP_HF0_O_IPV4_VER", + "BNXT_ULP_HF0_O_IPV4_TOS", + "BNXT_ULP_HF0_O_IPV4_LEN", + "BNXT_ULP_HF0_O_IPV4_FRAG_ID", + "BNXT_ULP_HF0_O_IPV4_FRAG_OFF", + "BNXT_ULP_HF0_O_IPV4_TTL", + "BNXT_ULP_HF0_O_IPV4_NEXT_PID", + "BNXT_ULP_HF0_O_IPV4_CSUM", + "BNXT_ULP_HF0_O_IPV4_SRC_ADDR", + "BNXT_ULP_HF0_O_IPV4_DST_ADDR", + "BNXT_ULP_HF0_O_UDP_SRC_PORT", + "BNXT_ULP_HF0_O_UDP_DST_PORT", + "BNXT_ULP_HF0_O_UDP_LENGTH", + "BNXT_ULP_HF0_O_UDP_CSUM", + "BNXT_ULP_HF0_VXLAN_FLAGS", + "BNXT_ULP_HF0_VXLAN_RSVD0", + "BNXT_ULP_HF0_VXLAN_VNI", + "BNXT_ULP_HF0_VXLAN_RSVD1", + "BNXT_ULP_HF0_I_ETH_DMAC", + "BNXT_ULP_HF0_I_ETH_SMAC", + "BNXT_ULP_HF0_I_ETH_TYPE", + "BNXT_ULP_HF0_IO_VLAN_CFI_PRI", + "BNXT_ULP_HF0_IO_VLAN_VID", + "BNXT_ULP_HF0_IO_VLAN_TYPE", + "BNXT_ULP_HF0_II_VLAN_CFI_PRI", + "BNXT_ULP_HF0_II_VLAN_VID", + "BNXT_ULP_HF0_II_VLAN_TYPE", + "BNXT_ULP_HF0_I_IPV4_VER", + "BNXT_ULP_HF0_I_IPV4_TOS", + "BNXT_ULP_HF0_I_IPV4_LEN", + "BNXT_ULP_HF0_I_IPV4_FRAG_ID", + "BNXT_ULP_HF0_I_IPV4_FRAG_OFF", + "BNXT_ULP_HF0_I_IPV4_TTL", + "BNXT_ULP_HF0_I_IPV4_NEXT_PID", + "BNXT_ULP_HF0_I_IPV4_CSUM", + "BNXT_ULP_HF0_I_IPV4_SRC_ADDR", + "BNXT_ULP_HF0_I_IPV4_DST_ADDR", + "BNXT_ULP_HF0_I_TCP_SRC_PORT", + "BNXT_ULP_HF0_I_TCP_DST_PORT", + "BNXT_ULP_HF0_I_TCP_SENT_SEQ", + "BNXT_ULP_HF0_I_TCP_RECV_ACK", + "BNXT_ULP_HF0_I_TCP_DATA_OFF", + "BNXT_ULP_HF0_I_TCP_TCP_FLAGS", + "BNXT_ULP_HF0_I_TCP_RX_WIN", + "BNXT_ULP_HF0_I_TCP_CSUM", + "BNXT_ULP_HF0_I_TCP_UR", + + /* Ingress template 1 */ + "BNXT_ULP_HF1_MPLS_TAG_NUM", + "BNXT_ULP_HF1_O_VTAG_NUM", + "BNXT_ULP_HF1_I_VTAG_NUM", + "BNXT_ULP_HF1_SVIF_INDEX", + "BNXT_ULP_HF1_O_ETH_DMAC", + "BNXT_ULP_HF1_O_ETH_SMAC", + "BNXT_ULP_HF1_O_ETH_TYPE", + "BNXT_ULP_HF1_O_OVLAN_CFI_PRI", + "BNXT_ULP_HF1_O_OVLAN_VID", + "BNXT_ULP_HF1_O_OVLAN_TYPE", + "BNXT_ULP_HF1_O_IVLAN_CFI_PRI", + "BNXT_ULP_HF1_O_IVLAN_VID", + "BNXT_ULP_HF1_O_IVLAN_TYPE", + "BNXT_ULP_HF1_O_IPV4_VER", + "BNXT_ULP_HF1_O_IPV4_TOS", + "BNXT_ULP_HF1_O_IPV4_LEN", + "BNXT_ULP_HF1_O_IPV4_FRAG_ID", + "BNXT_ULP_HF1_O_IPV4_FRAG_OFF", + "BNXT_ULP_HF1_O_IPV4_TTL", + "BNXT_ULP_HF1_O_IPV4_NEXT_PID", + "BNXT_ULP_HF1_O_IPV4_CSUM", + "BNXT_ULP_HF1_O_IPV4_SRC_ADDR", + "BNXT_ULP_HF1_O_IPV4_DST_ADDR", + "BNXT_ULP_HF1_O_UDP_SRC_PORT", + "BNXT_ULP_HF1_O_UDP_DST_PORT", + "BNXT_ULP_HF1_O_UDP_LENGTH", + "BNXT_ULP_HF1_O_UDP_CSUM", + "BNXT_ULP_HF1_VXLAN_FLAGS", + "BNXT_ULP_HF1_VXLAN_RSVD0", + "BNXT_ULP_HF1_VXLAN_VNI", + "BNXT_ULP_HF1_VXLAN_RSVD1", + "BNXT_ULP_HF1_I_ETH_DMAC", + "BNXT_ULP_HF1_I_ETH_SMAC", + "BNXT_ULP_HF1_I_ETH_TYPE", + "BNXT_ULP_HF1_I_OVLAN_CFI_PRI", + "BNXT_ULP_HF1_I_OVLAN_VID", + "BNXT_ULP_HF1_I_OVLAN_TYPE", + "BNXT_ULP_HF1_I_IVLAN_CFI_PRI", + "BNXT_ULP_HF1_I_IVLAN_VID", + "BNXT_ULP_HF1_I_IVLAN_TYPE", + "BNXT_ULP_HF1_I_IPV4_VER", + "BNXT_ULP_HF1_I_IPV4_TOS", + "BNXT_ULP_HF1_I_IPV4_LEN", + "BNXT_ULP_HF1_I_IPV4_FRAG_ID", + "BNXT_ULP_HF1_I_IPV4_FRAG_OFF", + "BNXT_ULP_HF1_I_IPV4_TTL", + "BNXT_ULP_HF1_I_IPV4_NEXT_PID", + "BNXT_ULP_HF1_I_IPV4_CSUM", + "BNXT_ULP_HF1_I_IPV4_SRC_ADDR", + "BNXT_ULP_HF1_I_IPV4_DST_ADDR", + "BNXT_ULP_HF1_I_UDP_SRC_PORT", + "BNXT_ULP_HF1_I_UDP_DST_PORT", + "BNXT_ULP_HF1_I_UDP_UR", + + /* Egress template 2 */ + "BNXT_ULP_HF2_MPLS_TAG_NUM", + "BNXT_ULP_HF2_O_VTAG_NUM", + "BNXT_ULP_HF2_I_VTAG_NUM", + "BNXT_ULP_HF2_SVIF_INDEX", + "BNXT_ULP_HF2_O_ETH_DMAC", + "BNXT_ULP_HF2_O_ETH_SMAC", + "BNXT_ULP_HF2_O_ETH_TYPE", + "BNXT_ULP_HF2_O_OVLAN_CFI_PRI", + "BNXT_ULP_HF2_O_OVLAN_VID", + "BNXT_ULP_HF2_O_OVLAN_TYPE", + "BNXT_ULP_HF2_O_IVLAN_CFI_PRI", + "BNXT_ULP_HF2_O_IVLAN_VID", + "BNXT_ULP_HF2_O_IVLAN_TYPE", + "BNXT_ULP_HF2_O_IPV4_VER", + "BNXT_ULP_HF2_O_IPV4_TOS", + "BNXT_ULP_HF2_O_IPV4_LEN", + "BNXT_ULP_HF2_O_IPV4_FRAG_ID", + "BNXT_ULP_HF2_O_IPV4_FRAG_OFF", + "BNXT_ULP_HF2_O_IPV4_TTL", + "BNXT_ULP_HF2_O_IPV4_NEXT_PID", + "BNXT_ULP_HF2_O_IPV4_CSUM", + "BNXT_ULP_HF2_O_IPV4_SRC_ADDR", + "BNXT_ULP_HF2_O_IPV4_DST_ADDR", + "BNXT_ULP_HF2_O_UDP_SRC_PORT", + "BNXT_ULP_HF2_O_UDP_DST_PORT", + "BNXT_ULP_HF2_O_UDP_LENGTH", + "BNXT_ULP_HF2_O_UDP_CSUM", + "BNXT_ULP_HF2_VXLAN_FLAGS", + "BNXT_ULP_HF2_VXLAN_RSVD0", + "BNXT_ULP_HF2_VXLAN_VNI", + "BNXT_ULP_HF2_VXLAN_RSVD1", + "BNXT_ULP_HF2_I_ETH_DMAC", + "BNXT_ULP_HF2_I_ETH_SMAC", + "BNXT_ULP_HF2_I_ETH_TYPE", + "BNXT_ULP_HF2_I_OVLAN_CFI_PRI", + "BNXT_ULP_HF2_I_OVLAN_VID", + "BNXT_ULP_HF2_I_OVLAN_TYPE", + "BNXT_ULP_HF2_I_IVLAN_CFI_PRI", + "BNXT_ULP_HF2_I_IVLAN_VID", + "BNXT_ULP_HF2_I_IVLAN_TYPE", + "BNXT_ULP_HF2_I_IPV4_VER", + "BNXT_ULP_HF2_I_IPV4_TOS", + "BNXT_ULP_HF2_I_IPV4_LEN", + "BNXT_ULP_HF2_I_IPV4_FRAG_ID", + "BNXT_ULP_HF2_I_IPV4_FRAG_OFF", + "BNXT_ULP_HF2_I_IPV4_TTL", + "BNXT_ULP_HF2_I_IPV4_NEXT_PID", + "BNXT_ULP_HF2_I_IPV4_CSUM", + "BNXT_ULP_HF2_I_IPV4_SRC_ADDR", + "BNXT_ULP_HF2_I_IPV4_DST_ADDR", + "BNXT_ULP_HF2_I_UDP_SRC_PORT", + "BNXT_ULP_HF2_I_UDP_DST_PORT", + "BNXT_ULP_HF2_I_UDP_UR", +}; + +struct bnxt_ulp_bitpos { + u64 val_o_eth; + u64 val_oo_vlan; + u64 val_oi_vlan; + u64 val_o_ipv4; + u64 val_o_ipv6; + u64 val_o_tcp; + u64 val_o_udp; + u64 val_o_icmp; + u64 val_t_vxlan; + u64 val_t_gre; + u64 val_i_eth; + u64 val_io_vlan; + u64 val_ii_vlan; + u64 val_i_ipv4; + u64 val_i_ipv6; + u64 val_i_tcp; + u64 val_i_udp; + u64 val_i_icmp; + u64 val_f1; + u64 val_f2; + + u64 val_mark; + u64 val_drop; + u64 val_count; + u64 val_rss; + u64 val_meter; + u64 val_vxlan_decap; + u64 val_pop_mpls; + u64 val_push_mpls; + u64 val_mac_swap; + u64 val_set_mac_src; + u64 val_set_mac_dst; + u64 val_pop_vlan; + u64 val_push_vlan; + u64 val_set_vlan_pcp; + u64 val_set_vlan_vid; + u64 val_set_ipv4_src; + u64 val_set_ipv4_dst; + u64 val_set_ipv6_src; + u64 val_set_ipv6_dst; + u64 val_dec_ttl; + u64 val_set_tp_src; + u64 val_set_tp_dst; + u64 val_vxlan_encap; + u64 val_jump; + u64 val_shared; + u64 val_sample; + u64 val_shared_sample; + +}; + +#endif /* TC_BNXT_TRUFLOW_DEBUG */ +#endif diff --git a/drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_template_debug_proto.h b/drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_template_debug_proto.h new file mode 100644 index 000000000000..3674184fb043 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_template_debug_proto.h @@ -0,0 +1,91 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019-2023 Broadcom + * All rights reserved. + */ + +#ifndef ULP_TEMPLATE_DEBUG_PROTO_H_ +#define ULP_TEMPLATE_DEBUG_PROTO_H_ + +#include "tfc.h" + +/* Function to dump the tc flow pattern. */ +void +ulp_parser_hdr_info_dump(struct ulp_tc_parser_params *params); + +/* Function to dump the tc flow actions. */ +void +ulp_parser_act_info_dump(struct ulp_tc_parser_params *params); + +/* Function to dump the error field during matching. */ +void +ulp_matcher_act_field_dump(struct bnxt_ulp_context *ulp_ctx, + u32 idx, + u32 jdx, + u32 mask_id); + +/* * Function to dump the blob during the mapper processing. */ +void +ulp_mapper_field_dump(struct bnxt_ulp_context *ulp_ctx, + const char *name, + struct bnxt_ulp_mapper_field_info *fld, + struct ulp_blob *blob, + u16 write_idx, + u8 *val, + u32 field_size); + +/* Function to dump the identifiers during the mapper processing. */ +void +ulp_mapper_ident_field_dump(struct bnxt_ulp_context *ulp_ctx, + const char *name, + struct bnxt_ulp_mapper_ident_info *ident, + struct bnxt_ulp_mapper_tbl_info *tbl, + int id); +void +ulp_mapper_tcam_entry_dump(struct bnxt_ulp_context *ulp_ctx, + const char *name, + u32 idx, + struct bnxt_ulp_mapper_tbl_info *tbl, + struct ulp_blob *key, + struct ulp_blob *mask, + struct ulp_blob *result); +void +ulp_mapper_result_dump(struct bnxt_ulp_context *ulp_ctx, + const char *name, + struct bnxt_ulp_mapper_tbl_info *tbl, + struct ulp_blob *result); + +void +ulp_mapper_act_dump(struct bnxt_ulp_context *ulp_ctx, + const char *name, + struct bnxt_ulp_mapper_tbl_info *tbl, + struct ulp_blob *data); + +void +ulp_mapper_em_dump(struct bnxt_ulp_context *ulp_ctx, + const char *name, + struct ulp_blob *key, + struct ulp_blob *data, + struct tf_insert_em_entry_parms *iparms); + +void +ulp_mapper_tfc_em_dump(struct bnxt_ulp_context *ulp_ctx, + const char *name, + struct ulp_blob *blob, + struct tfc_em_insert_parms *iparms); + +void +ulp_mapper_blob_dump(struct bnxt_ulp_context *ulp_ctx, + struct ulp_blob *blob); + +void +ulp_mapper_table_dump(struct bnxt_ulp_context *ulp_ctx, + struct bnxt_ulp_mapper_tbl_info *tbl, u32 idx); + +void +ulp_mapper_gen_tbl_dump(struct bnxt_ulp_context *ulp_ctx, + u32 sub_type, u8 direction, + struct ulp_blob *key); + +const char * +ulp_mapper_key_recipe_type_to_str(u32 sub_type); +#endif diff --git a/drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_template_struct.h b/drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_template_struct.h new file mode 100644 index 000000000000..6ec0a3bbf16a --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_template_struct.h @@ -0,0 +1,517 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2023 Broadcom + * All rights reserved. + */ + +#ifndef _ULP_TEMPLATE_STRUCT_H_ +#define _ULP_TEMPLATE_STRUCT_H_ + +#include "tf_core.h" +#include "cfa_resources.h" +#include "cfa_types.h" + +/* Number of fields for each protocol */ +#define BNXT_ULP_PROTO_HDR_SVIF_NUM 2 +#define BNXT_ULP_PROTO_HDR_ETH_NUM 3 +#define BNXT_ULP_PROTO_HDR_S_VLAN_NUM 3 +#define BNXT_ULP_PROTO_HDR_VLAN_NUM 6 +#define BNXT_ULP_PROTO_HDR_IPV4_NUM 10 +#define BNXT_ULP_PROTO_HDR_IPV6_NUM 8 +#define BNXT_ULP_PROTO_HDR_UDP_NUM 4 +#define BNXT_ULP_PROTO_HDR_TCP_NUM 9 +#define BNXT_ULP_PROTO_HDR_VXLAN_NUM 4 +#define BNXT_ULP_PROTO_HDR_VXLAN_GPE_NUM 5 +#define BNXT_ULP_PROTO_HDR_GENEVE_NUM 4 +#define BNXT_ULP_PROTO_HDR_GRE_NUM 2 +#define BNXT_ULP_PROTO_HDR_ICMP_NUM 5 +#define BNXT_ULP_PROTO_HDR_ECPRI_NUM 2 +#define BNXT_ULP_PROTO_HDR_IPV6_EXT_NUM 1 +#define BNXT_ULP_PROTO_HDR_SRV6_NUM 7 +#define BNXT_ULP_PROTO_HDR_MAX 128 +#define BNXT_ULP_PROTO_HDR_ENCAP_MAX 64 +#define BNXT_ULP_PROTO_HDR_FIELD_SVIF_IDX 1 +#define BNXT_ULP_PROTO_HDR_BTH_NUM 2 +#define BNXT_ULP_PROTO_HDR_L2_FILTER_NUM 1 + +/* Direction attributes */ +#define BNXT_ULP_FLOW_ATTR_TRANSFER 0x1 +#define BNXT_ULP_FLOW_ATTR_INGRESS 0x2 +#define BNXT_ULP_FLOW_ATTR_EGRESS 0x4 + +struct ulp_tc_hdr_bitmap { + u64 bits; +}; + +struct ulp_tc_field_bitmap { + u64 bits; +}; + +/* Structure to store the protocol fields */ +#define TC_PARSER_FLOW_HDR_FIELD_SIZE 16 +struct ulp_tc_hdr_field { + u8 spec[TC_PARSER_FLOW_HDR_FIELD_SIZE]; + u8 mask[TC_PARSER_FLOW_HDR_FIELD_SIZE]; + u32 size; +}; + +/* Structure to hold the action property details. */ +struct ulp_tc_act_prop { + u8 act_details[BNXT_ULP_ACT_PROP_IDX_LAST]; +}; + +/* Structure to be used for passing all the parser functions */ +struct ulp_tc_parser_params { + struct hlist_node next; + struct ulp_tc_hdr_bitmap hdr_bitmap; + struct ulp_tc_hdr_bitmap act_bitmap; + struct ulp_tc_hdr_bitmap enc_hdr_bitmap; + struct ulp_tc_hdr_bitmap hdr_fp_bit; + struct ulp_tc_field_bitmap fld_bitmap; + struct ulp_tc_field_bitmap fld_s_bitmap; + struct ulp_tc_hdr_field hdr_field[BNXT_ULP_PROTO_HDR_MAX]; + struct ulp_tc_hdr_field enc_field[BNXT_ULP_PROTO_HDR_ENCAP_MAX]; + u64 comp_fld[BNXT_ULP_CF_IDX_LAST]; + u32 field_idx; + struct ulp_tc_act_prop act_prop; + u32 dir_attr; + u32 priority; + u32 match_chain_id; + u32 fid; + u32 parent_flow; + u32 child_flow; + u16 func_id; + u16 port_id; + u32 class_id; + u32 act_tmpl; + struct bnxt_ulp_context *ulp_ctx; + u32 hdr_sig_id; + u64 flow_sig_id; + u32 flow_pattern_id; + u32 act_pattern_id; + u8 app_id; + u8 tun_idx; + u16 class_info_idx; + u16 act_info_idx; + u64 wc_field_bitmap; + u64 cf_bitmap; + u64 exclude_field_bitmap; + u16 n_proto; + u16 n_proto_mask; + u8 ip_proto; + u8 ip_proto_mask; + u16 addr_type; + u32 action_flags; + u16 tnl_addr_type; + u8 tnl_dmac[BNXT_ULP_ACT_PROP_SZ_ENCAP_L2_DMAC]; + u8 tnl_smac[BNXT_ULP_ACT_PROP_SZ_ENCAP_L2_SMAC]; + u16 tnl_ether_type; + void *tnl_key; + void *neigh_key; + u16 vlan_tpid; + u16 vlan_tpid_mask; + bool implicit_eth_parsed; + bool implicit_ipv4_parsed; + bool implicit_ipv6_parsed; +}; + +/* Flow Parser Header Information Structure */ +struct bnxt_ulp_tc_hdr_info { + enum bnxt_ulp_hdr_type hdr_type; + /* Flow Parser Protocol Header Function Prototype */ + int (*proto_hdr_func)(struct bnxt *bp, + struct ulp_tc_parser_params *params, + void *match_arg); +}; + +/* Flow Parser Header Information Structure Array defined in template source*/ +extern struct bnxt_ulp_tc_hdr_info ulp_hdr_info[]; +extern struct bnxt_ulp_tc_hdr_info ulp_vendor_hdr_info[]; + +/* Flow Parser Action Information Structure */ +struct bnxt_ulp_tc_act_info { + enum bnxt_ulp_act_type act_type; + /* Flow Parser Protocol Action Function Prototype */ + int (*proto_act_func)(struct bnxt *bp, + struct ulp_tc_parser_params *params, + void *action_arg); +}; + +/* Flow Parser Action Information Structure Array defined in template source*/ +extern struct bnxt_ulp_tc_act_info ulp_act_info[]; +extern struct bnxt_ulp_tc_act_info ulp_vendor_act_info[]; + +/* Flow Matcher structures */ +struct bnxt_ulp_header_match_info { + struct ulp_tc_hdr_bitmap hdr_bitmap; + u32 start_idx; + u32 num_entries; + u32 class_tmpl_id; + u32 act_vnic; +}; + +struct ulp_tc_bitmap { + u64 bits; +}; + +struct bnxt_ulp_class_match_info { + struct ulp_tc_bitmap hdr_sig; + struct ulp_tc_bitmap field_sig; + u32 class_hid; + u32 class_tid; + u8 act_vnic; + u8 wc_pri; + u8 app_sig; + u32 hdr_sig_id; + u64 flow_sig_id; + u32 flow_pattern_id; + u8 app_id; + struct ulp_tc_bitmap hdr_bitmap; + u64 field_man_bitmap; + u64 field_opt_bitmap; + u64 field_exclude_bitmap; + u8 field_list[BNXT_ULP_GLB_FIELD_TBL_SIZE + 1]; +}; + +/* Flow Matcher templates Structure for class entries */ +extern u16 ulp_class_sig_tbl[]; +extern struct bnxt_ulp_class_match_info ulp_class_match_list[]; + +/* Flow Matcher Action structures */ +struct bnxt_ulp_act_match_info { + struct ulp_tc_bitmap act_bitmap; + u32 act_tid; +}; + +/* Flow Matcher templates Structure for action entries */ +extern u16 ulp_act_sig_tbl[]; +extern struct bnxt_ulp_act_match_info ulp_act_match_list[]; + +/* Device Specific Tables for mapper */ +struct bnxt_ulp_mapper_cond_info { + enum bnxt_ulp_cond_opc cond_opcode; + u64 cond_operand; +}; + +struct bnxt_ulp_mapper_cond_list_info { + enum bnxt_ulp_cond_list_opc cond_list_opcode; + u32 cond_start_idx; + u32 cond_nums; + int cond_true_goto; + int cond_false_goto; +}; + +struct bnxt_ulp_mapper_func_info { + enum bnxt_ulp_func_opc func_opc; + enum bnxt_ulp_func_src func_src1; + enum bnxt_ulp_func_src func_src2; + u64 func_opr1; + u64 func_opr2; + u16 func_dst_opr; + u32 func_oper_size; +}; + +struct bnxt_ulp_template_device_tbls { + struct bnxt_ulp_mapper_tmpl_info *tmpl_list; + u32 tmpl_list_size; + struct bnxt_ulp_mapper_tbl_info *tbl_list; + u32 tbl_list_size; + struct bnxt_ulp_mapper_key_info *key_info_list; + u32 key_info_list_size; + struct bnxt_ulp_mapper_field_info *key_ext_list; + u32 key_ext_list_size; + struct bnxt_ulp_mapper_field_info *result_field_list; + u32 result_field_list_size; + struct bnxt_ulp_mapper_ident_info *ident_list; + u32 ident_list_size; + struct bnxt_ulp_mapper_cond_info *cond_list; + u32 cond_list_size; + struct bnxt_ulp_mapper_cond_list_info *cond_oper_list; + u32 cond_oper_list_size; + +}; + +struct bnxt_ulp_dyn_size_map { + u32 slab_size; + enum tf_tbl_type tbl_type; +}; + +/* Device specific parameters */ +struct bnxt_ulp_device_params { + u8 description[16]; + enum bnxt_ulp_byte_order key_byte_order; + enum bnxt_ulp_byte_order result_byte_order; + enum bnxt_ulp_byte_order encap_byte_order; + enum bnxt_ulp_byte_order wc_key_byte_order; + enum bnxt_ulp_byte_order em_byte_order; + u8 encap_byte_swap; + u8 num_phy_ports; + u32 mark_db_lfid_entries; + u64 mark_db_gfid_entries; + u64 int_flow_db_num_entries; + u64 ext_flow_db_num_entries; + u32 flow_count_db_entries; + u32 fdb_parent_flow_entries; + u32 num_resources_per_flow; + u32 ext_cntr_table_type; + u64 byte_count_mask; + u64 packet_count_mask; + u32 byte_count_shift; + u32 packet_count_shift; + u32 wc_dynamic_pad_en; + u32 em_dynamic_pad_en; + u32 dynamic_sram_en; + u32 dyn_encap_list_size; + struct bnxt_ulp_dyn_size_map dyn_encap_sizes[5]; + u32 dyn_modify_list_size; + struct bnxt_ulp_dyn_size_map dyn_modify_sizes[4]; + u16 em_blk_size_bits; + u16 em_blk_align_bits; + u16 em_key_align_bytes; + u16 em_result_size_bits; + u16 wc_slice_width; + u16 wc_max_slices; + u32 wc_mode_list[4]; + u32 wc_mod_list_max_size; + u32 wc_ctl_size_bits; + u32 dev_features; + const struct bnxt_ulp_generic_tbl_params *gen_tbl_params; + const struct bnxt_ulp_allocator_tbl_params *allocator_tbl_params; + const struct bnxt_ulp_template_device_tbls *dev_tbls; +}; + +/* Flow Mapper */ +struct bnxt_ulp_mapper_tmpl_info { + u32 device_name; + u32 start_tbl_idx; + u32 num_tbls; + struct bnxt_ulp_mapper_cond_list_info reject_info; +}; + +struct bnxt_ulp_mapper_tbl_info { + enum bnxt_ulp_resource_func resource_func; + u32 resource_type; /* TF_ enum type */ + enum bnxt_ulp_resource_sub_type resource_sub_type; + struct bnxt_ulp_mapper_cond_list_info execute_info; + struct bnxt_ulp_mapper_func_info func_info; + enum bnxt_ulp_cond_opc cond_opcode; + u32 cond_operand; + u8 direction; + enum bnxt_ulp_pri_opc pri_opcode; + u32 pri_operand; + + /* conflict resolution opcode */ + enum bnxt_ulp_accept_opc accept_opcode; + + enum bnxt_ulp_critical_resource critical_resource; + + /* Information for accessing the key in ulp_key_field_list */ + u32 key_start_idx; + u16 key_bit_size; + u16 key_num_fields; + + /* Information for accessing the partial key in ulp_key_field_list */ + u32 partial_key_start_idx; + u16 partial_key_bit_size; + u16 partial_key_num_fields; + + /* Size of the blob that holds the key */ + u16 blob_key_bit_size; + u16 record_size; + + /* Information for accessing the ulp_class_result_field_list */ + u32 result_start_idx; + u16 result_bit_size; + u16 result_num_fields; + u16 encap_num_fields; + + /* Information for accessing the ulp_ident_list */ + u32 ident_start_idx; + u16 ident_nums; + + enum bnxt_ulp_mark_db_opc mark_db_opcode; + + /* Table opcode for table operations */ + u32 tbl_opcode; + u32 tbl_operand; + enum bnxt_ulp_generic_tbl_lkup_type gen_tbl_lkup_type; + + /* FDB table opcode */ + enum bnxt_ulp_fdb_opc fdb_opcode; + u32 fdb_operand; + + /* Manage ref_cnt via opcode for generic tables */ + enum bnxt_ulp_ref_cnt_opc ref_cnt_opcode; + + /* Shared session */ + enum bnxt_ulp_session_type session_type; + + /* Track by session or by function */ + enum cfa_track_type track_type; + + /* Key recipes for generic templates */ + enum bnxt_ulp_key_recipe_opc key_recipe_opcode; + u32 key_recipe_operand; + + /* control table messages */ + const char *false_message; + const char *true_message; + const char *description; +}; + +struct bnxt_ulp_mapper_field_info { + u8 description[64]; + u16 field_bit_size; + enum bnxt_ulp_field_opc field_opc; + enum bnxt_ulp_field_src field_src1; + u8 field_opr1[16]; + enum bnxt_ulp_field_src field_src2; + u8 field_opr2[16]; + enum bnxt_ulp_field_src field_src3; + u8 field_opr3[16]; +}; + +struct bnxt_ulp_mapper_key_info { + struct bnxt_ulp_mapper_field_info field_info_spec; + struct bnxt_ulp_mapper_field_info field_info_mask; +}; + +struct bnxt_ulp_mapper_ident_info { + u8 description[64]; + u32 resource_func; + + u16 ident_type; + u16 ident_bit_size; + u16 ident_bit_pos; + enum bnxt_ulp_rf_idx regfile_idx; +}; + +struct bnxt_ulp_glb_resource_info { + u8 app_id; + enum bnxt_ulp_device_id device_id; + enum tf_dir direction; + enum bnxt_ulp_session_type session_type; + enum bnxt_ulp_resource_func resource_func; + u32 resource_type; /* TF_ enum type */ + enum bnxt_ulp_glb_rf_idx glb_regfile_index; +}; + +struct bnxt_ulp_resource_resv_info { + u8 app_id; + enum bnxt_ulp_device_id device_id; + enum tf_dir direction; + enum bnxt_ulp_session_type session_type; + enum bnxt_ulp_resource_func resource_func; + u32 resource_type; /* TF_ enum type */ + u32 count; +}; + +struct bnxt_ulp_app_capabilities_info { + u8 app_id; + u32 default_priority; + u32 max_def_priority; + u32 min_flow_priority; + u32 max_flow_priority; + u32 vxlan_port; + u32 vxlan_ip_port; + u32 ecpri_udp_port; + enum bnxt_ulp_device_id device_id; + u32 upgrade_fw_update; + u8 ha_pool_id; + u8 ha_reg_state; + u8 ha_reg_cnt; + u8 tunnel_next_proto; + u32 flags; + u32 max_pools; + u8 em_multiplier; + u32 num_rx_flows; + u32 num_tx_flows; + u16 act_rx_max_sz; + u16 act_tx_max_sz; + u16 em_rx_key_max_sz; + u16 em_tx_key_max_sz; + u32 pbl_page_sz_in_bytes; + u16 num_key_recipes_per_dir; + u64 feature_bits; + u64 default_class_bits; + u64 default_act_bits; +}; + +struct bnxt_ulp_cache_tbl_params { + u16 num_entries; +}; + +struct bnxt_ulp_generic_tbl_params { + const char *name; + enum bnxt_ulp_gen_tbl_type gen_tbl_type; + u16 result_num_entries; + u16 result_num_bytes; + enum bnxt_ulp_byte_order result_byte_order; + u32 hash_tbl_entries; + u16 num_buckets; + u16 key_num_bytes; + u16 partial_key_num_bytes; +}; + +struct bnxt_ulp_allocator_tbl_params { + const char *name; + u16 num_entries; +}; + +struct bnxt_ulp_shared_act_info { + u64 act_bitmask; +}; + +/* Flow Mapper Static Data Externs: + * Access to the below static data should be done through access functions and + * directly throughout the code. + */ + +/* The ulp_device_params is indexed by the dev_id. + * This table maintains the device specific parameters. + */ +extern struct bnxt_ulp_device_params ulp_device_params[]; + +/* The ulp_act_prop_map_table provides the mapping to index and size of action + * properties. + */ +extern u32 ulp_act_prop_map_table[]; + +/* The ulp_glb_resource_tbl provides the list of global resources that need to + * be initialized and where to store them. + */ +extern struct bnxt_ulp_glb_resource_info ulp_glb_resource_tbl[]; + +/* The ulp_app_glb_resource_tbl provides the list of shared resources required + * in the event that shared session is enabled. + */ +extern struct bnxt_ulp_glb_resource_info ulp_app_glb_resource_tbl[]; + +/* The ulp_resource_resv_list provides the list of tf resources required when + * calling tf_open. + */ +extern struct bnxt_ulp_resource_resv_info ulp_resource_resv_list[]; + +/* The ulp_app_resource_resv_list provides the list of tf resources required + * when calling tf_open. + */ +extern struct bnxt_ulp_resource_resv_info ulp_app_resource_resv_list[]; + +/* The_app_cap_info_list provides the list of ULP capabilities per app/device. + */ +extern struct bnxt_ulp_app_capabilities_info ulp_app_cap_info_list[]; + +/* The ulp_cache_tbl_parms table provides the sizes of the cache tables the + * mapper must dynamically allocate during initialization. + */ +extern struct bnxt_ulp_cache_tbl_params ulp_cache_tbl_params[]; + +/* The ulp_generic_tbl_parms table provides the sizes of the generic tables the + * mapper must dynamically allocate during initialization. + */ +extern struct bnxt_ulp_generic_tbl_params ulp_generic_tbl_params[]; +/* The ulp_global template table is used to initialize default entries + * that could be reused by other templates. + */ +extern u32 ulp_glb_template_tbl[]; + +#endif /* _ULP_TEMPLATE_STRUCT_H_ */ diff --git a/drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_tf_debug.c b/drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_tf_debug.c new file mode 100644 index 000000000000..109de7ca79d0 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_tf_debug.c @@ -0,0 +1,111 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* Copyright(c) 2019-2023 Broadcom + * All rights reserved. + */ + +#include "bnxt_compat.h" +#include "bnxt_hsi.h" +#include "bnxt.h" +#include "ulp_tf_debug.h" + +#include "tf_core.h" +#include "tf_em.h" +#include "tf_msg.h" +#include "tf_ext_flow_handle.h" + +#include "ulp_port_db.h" + +#if defined(CONFIG_BNXT_FLOWER_OFFLOAD) || defined(CONFIG_BNXT_CUSTOM_FLOWER_OFFLOAD) +const char *tf_if_tbl_2_str(u32 type) +{ + enum tf_if_tbl_type id_type = type; + + switch (id_type) { + case TF_IF_TBL_TYPE_PROF_SPIF_DFLT_L2_CTXT: + return "spif dflt l2 ctxt"; + case TF_IF_TBL_TYPE_PROF_PARIF_DFLT_ACT_REC_PTR: + return "parif act rec ptr"; + case TF_IF_TBL_TYPE_PROF_PARIF_ERR_ACT_REC_PTR: + return "parif err act rec ptr"; + case TF_IF_TBL_TYPE_LKUP_PARIF_DFLT_ACT_REC_PTR: + return "lkup parif act rec ptr"; + case TF_IF_TBL_TYPE_ILT: + return "ilt tbl"; + case TF_IF_TBL_TYPE_VSPT: + return "vspt tbl"; + default: + return "Invalid identifier"; + } +} + +#ifdef TC_BNXT_TRUFLOW_DEBUG + +void ulp_port_db_dump(struct bnxt_ulp_context *ulp_ctx, + struct bnxt_ulp_port_db *port_db, + struct ulp_interface_info *intf, u32 port_id) +{ + struct ulp_func_if_info *func; + struct ulp_phy_port_info *port_data; + + netdev_dbg(ulp_ctx->bp->dev, "*****Dump for port_id %d ******\n", + port_id); + netdev_dbg(ulp_ctx->bp->dev, + "type=0x%x, drv_func_id=0x%x, vf_func_id=0x%x vf_roce=%d udcc_en=%d\n", + intf->type, intf->drv_func_id, intf->vf_func_id, + intf->rdma_sriov_en, intf->udcc_en); + + func = &port_db->ulp_func_id_tbl[intf->drv_func_id]; + netdev_dbg(ulp_ctx->bp->dev, + "drv_func_svif=0x%0x, drv_func_spif=0x%0x ", + func->func_svif, func->func_spif); + netdev_dbg(ulp_ctx->bp->dev, + "drv_func_parif=0x%0x, drv_default_vnic=0x%0x drv_roce_vnic=0x%0x\n", + func->func_parif, func->func_vnic, be16_to_cpu(func->func_roce_vnic)); + netdev_dbg(ulp_ctx->bp->dev, "drv_func_parent_vnic=0x%0x\n", + be16_to_cpu(func->func_parent_vnic)); + netdev_dbg(ulp_ctx->bp->dev, "Mac=%02X:%02X:%02X:%02X:%02X:%02X\n", + func->func_mac[0], func->func_mac[1], func->func_mac[2], + func->func_mac[3], func->func_mac[4], func->func_mac[5]); + netdev_dbg(ulp_ctx->bp->dev, + "Parent Mac=%02X:%02X:%02X:%02X:%02X:%02X\n", + func->func_parent_mac[0], func->func_parent_mac[1], + func->func_parent_mac[2], func->func_parent_mac[3], + func->func_parent_mac[4], func->func_parent_mac[5]); + + if (intf->type == BNXT_ULP_INTF_TYPE_VF_REP) { + func = &port_db->ulp_func_id_tbl[intf->vf_func_id]; + netdev_dbg(ulp_ctx->bp->dev, + "vf_func_svif=0x%0x, vf_func_spif=0x%0x ", + func->func_svif, func->func_spif); + netdev_dbg(ulp_ctx->bp->dev, + "vf_func_parif=0x%0x, vf_default_vnic=0x%0x vf_roce_vnic=0x0%x\n", + func->func_parif, func->func_vnic, be16_to_cpu(func->func_roce_vnic)); + netdev_dbg(ulp_ctx->bp->dev, "vf_func_parent_vnic=0x%0x ", + be16_to_cpu(func->func_parent_vnic)); + netdev_dbg(ulp_ctx->bp->dev, + "Mac=%02X:%02X:%02X:%02X:%02X:%02X\n", + func->func_mac[0], func->func_mac[1], + func->func_mac[2], func->func_mac[3], + func->func_mac[4], func->func_mac[5]); + } + port_data = &port_db->phy_port_list[func->phy_port_id]; + netdev_dbg(ulp_ctx->bp->dev, + "phy_port_svif=0x%0x, phy_port_spif=0x%0x ", + port_data->port_svif, port_data->port_spif); + netdev_dbg(ulp_ctx->bp->dev, + "phy_port_parif=0x%0x, phy_port_vport=0x%0x\n", + port_data->port_parif, port_data->port_vport); + + netdev_dbg(ulp_ctx->bp->dev, "***** dump complete ******\n"); +} + +#else /* TC_BNXT_TRUFLOW_DEBUG */ + +void ulp_port_db_dump(struct bnxt_ulp_context *ulp_ctx, + struct bnxt_ulp_port_db *port_db, + struct ulp_interface_info *intf, u32 port_id) +{ +} + +#endif /* TC_BNXT_TRUFLOW_DEBUG */ +#endif /* CONFIG_BNXT_FLOWER_OFFLOAD */ diff --git a/drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_tf_debug.h b/drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_tf_debug.h new file mode 100644 index 000000000000..1c75357b87c9 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_tf_debug.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019-2023 Broadcom + * All rights reserved. + */ + +#ifndef _ULP_TF_DEBUG_H_ +#define _ULP_TF_DEBUG_H_ + +#include "bnxt_tf_ulp.h" + +struct tf; +struct ulp_interface_info; +struct bnxt_ulp_port_db; + +const char *tf_if_tbl_2_str(u32 id_type); +void ulp_port_db_dump(struct bnxt_ulp_context *ulp_ctx, + struct bnxt_ulp_port_db *port_db, + struct ulp_interface_info *intf, + u32 port_id); + +#endif diff --git a/drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_udcc.c b/drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_udcc.c new file mode 100644 index 000000000000..1cf76a04b410 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_udcc.c @@ -0,0 +1,274 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* Copyright(c) 2023 Broadcom + * All rights reserved. + */ + +#include +#include +#include +#include +#include +#include + +#include "bnxt_compat.h" +#include "bnxt_hsi.h" +#include "bnxt.h" +#include "bnxt_vfr.h" +#include "bnxt_tf_ulp.h" +#include "bnxt_udcc.h" +#include "ulp_udcc.h" +#include "bitalloc.h" + +#if defined(CONFIG_BNXT_FLOWER_OFFLOAD) + +static inline int bnxt_ulp_udcc_v6_subnet_delete(struct bnxt *bp, + struct bnxt_ulp_udcc_v6_subnet_node *node) +{ + struct bnxt_tc_info *tc_info = bp->tc_info; + int rc = 0; + + if (!node) + return -EINVAL; + + netdev_dbg(bp->dev, "DEL: fid %d dst %pI6/%pI6 subnet_hndl %d ref %u\n", + node->key.src_fid, &node->key.dst, &node->key.dmsk, + node->data.subnet_hndl, + atomic_read(&node->ref.refs)); + + if (refcount_dec_and_test(&node->ref)) { + rc = bnxt_ba_free(&tc_info->v6_subnet_pool, + node->data.subnet_hndl); + if (rc) + netdev_err(bp->dev, "UDCC: BA free failed, rc=%d\n", rc); + + rc = rhashtable_remove_fast(&tc_info->v6_subnet_table, + &node->node, + tc_info->v6_subnet_ht_params); + if (rc) + netdev_err(bp->dev, "UDCC: rhash remove failed, rc=%d\n", rc); + + netdev_dbg(bp->dev, "DEL:Y suspend fid %d dst %pI6/%pI6\n", + node->key.src_fid, &node->key.dst, &node->key.dmsk); + + kfree(node); + /* Update the sessions and delete its flows */ + bnxt_udcc_update_session(bp, true); + } + + return rc; +} + +/* Utility to ensure prefix is addr & mask, so if + * user sends different host but same network addr + * we are ablel to normalize all of them to network + * + * @addr[in]: Subnet IPv6 address + * @mask[in]: Subnet IPv6 mask + * @pfx[out]: Returns the prefix + */ +static inline void bnxt_ulp_udcc_v6_addr_prefix(struct in6_addr *pfx, + struct in6_addr *msk, + const u8 *addr, + const u8 *mask) +{ + int i; + + /* Copy the key, AND it with the mask */ + for (i = 0; i < sizeof(pfx->s6_addr); i++) + pfx->s6_addr[i] = addr[i] & mask[i]; + /* Copy the mask */ + memcpy(msk->s6_addr, mask, sizeof(struct in6_addr)); +} + +int bnxt_ulp_udcc_v6_subnet_add(struct bnxt *bp, + u16 *src_fid, u8 *v6dst, u8 *v6msk, + u8 *dmac, u8 *smac, + u16 *subnet_hndl) +{ + struct bnxt_ulp_udcc_v6_subnet_node *new_node, *old_node; + struct bnxt_tc_info *tc_info = bp->tc_info; + int hndl = -1; + int rc = 0; + + if (!src_fid || !v6dst || !v6msk || !dmac || !smac || !subnet_hndl) + return -EINVAL; + + netdev_dbg(bp->dev, "ADD: fid %d dst %pI6/%pI6\n", + be16_to_cpu(*src_fid), v6dst, v6msk); + netdev_dbg(bp->dev, "ADD: dmac %pM smac %pM\n", + dmac, smac); + + /* Allocate memory for the new node */ + new_node = kzalloc(sizeof(*new_node), GFP_KERNEL); + if (!new_node) + return -ENOMEM; + + /* Setup the KEY */ + bnxt_ulp_udcc_v6_addr_prefix(&new_node->key.dst, &new_node->key.dmsk, + v6dst, v6msk); + /* Ideally we want it to be this be16_to_cpu(*src_fid), + * but this application is per PF so use the PF fid + */ + new_node->key.src_fid = bp->pf.fw_fid; + /* setup an invalid handle */ + *subnet_hndl = -1; + + /* This function returns the object if it exists, + * NULL if it did not and the insertion was successful, + * and an ERR_PTR otherwise. + */ + old_node = rhashtable_lookup_get_insert_fast(&tc_info->v6_subnet_table, + &new_node->node, + tc_info->v6_subnet_ht_params); + if (IS_ERR(old_node)) { + rc = PTR_ERR(old_node); + goto node_free; + } + + if (old_node) { + /* if the subnet already exists, but DMAC/SMAC changed */ + if ((!ether_addr_equal(old_node->data.dmac, dmac)) || + (!ether_addr_equal(old_node->data.smac, smac))) { + netdev_dbg(bp->dev, "OLD dmac %pM smac %pM\n", + old_node->data.dmac, old_node->data.smac); + ether_addr_copy(old_node->data.dmac, dmac); + ether_addr_copy(old_node->data.smac, smac); + /* Update the sessions and modify its flows */ + bnxt_udcc_update_session(bp, true); + } + + /* Increment Refcount and free the new node */ + if (!refcount_inc_not_zero(&old_node->ref)) + netdev_err(bp->dev, "UDCC: incr refcount failed for %pI6\n", + v6dst); + + netdev_dbg(bp->dev, "ADD: already exist, inc ref count %u\n", + atomic_read(&old_node->ref.refs)); + *subnet_hndl = old_node->data.subnet_hndl; + goto node_free; + + } else { + /* Set Refcount to 1 and fill up the data in the new node*/ + refcount_set(&new_node->ref, 1); + hndl = bnxt_ba_alloc(&tc_info->v6_subnet_pool); + if (hndl < 0) { + rc = -ENOMEM; + netdev_err(bp->dev, "UDCC: BA allocation failed, rc:%d\n", rc); + goto node_delete_free; + } + *subnet_hndl = (u16)hndl; + new_node->data.subnet_hndl = *subnet_hndl; + ether_addr_copy(new_node->data.dmac, dmac); + ether_addr_copy(new_node->data.smac, smac); + netdev_dbg(bp->dev, "ADD:Y unsuspend key_fid %d, entry subnet_hndl %d, ref count %u\n", + new_node->key.src_fid, + new_node->data.subnet_hndl, + atomic_read(&new_node->ref.refs)); + + /* Update the sessions and modify its flows */ + bnxt_udcc_update_session(bp, false); + } + return rc; + +node_delete_free: + bnxt_ulp_udcc_v6_subnet_delete(bp, new_node); + return rc; +node_free: + kfree(new_node); + return rc; +} + +int bnxt_ulp_udcc_v6_subnet_del(struct bnxt *bp, u16 subnet_hndl) +{ + struct bnxt_tc_info *tc_info = bp->tc_info; + struct bnxt_ulp_udcc_v6_subnet_node *node; + struct rhashtable_iter iter; + int rc = 0; + + if (subnet_hndl > BNXT_ULP_MAX_V6_SUBNETS) + return -EINVAL; + + netdev_dbg(bp->dev, "DEL HNDL: subnet_hndl %u\n", subnet_hndl); + rhashtable_walk_enter(&tc_info->v6_subnet_table, &iter); + rhashtable_walk_start(&iter); + while ((node = rhashtable_walk_next(&iter)) != NULL) { + if (IS_ERR(node)) + continue; + if (node->data.subnet_hndl == subnet_hndl) { + /* Found a subnet that matches the handle*/ + rc = bnxt_ulp_udcc_v6_subnet_delete(bp, node); + break; + } + rc = -ENOENT; + } + rhashtable_walk_stop(&iter); + rhashtable_walk_exit(&iter); + + return rc; +} + +static inline bool bnxt_ulp_udcc_v6_subnet_compare(struct bnxt *bp, + u16 src_fid, + const struct in6_addr *dst, + struct bnxt_ulp_udcc_v6_subnet_key *key) +{ + bool found = false; + + if (src_fid != key->src_fid) + return found; + + found = !ipv6_masked_addr_cmp(&key->dst, &key->dmsk, dst); + netdev_dbg(bp->dev, "CMP:%s fid %d/%d subnet %pI6/%pI6\n", + found ? "Y" : "N", + src_fid, key->src_fid, &key->dst, &key->dmsk); + + return found; +} + +int bnxt_ulp_udcc_v6_subnet_check(struct bnxt *bp, + u16 src_fid, + const struct in6_addr *dst, + u8 *dmac, u8 *smac) +{ + struct bnxt_tc_info *tc_info = bp->tc_info; + struct bnxt_ulp_context *ulp_ctx = bp->ulp_ctx; + struct bnxt_ulp_udcc_v6_subnet_node *node; + struct rhashtable_iter iter; + int rc = -ENOENT; + + /* subnets cannot be added in non-switchdev mode so return -ENOENT */ + if (!bnxt_tc_is_switchdev_mode(bp)) + return rc; + + if (!dst || !dmac || !smac) + return -EINVAL; + + netdev_dbg(bp->dev, "CHK: fid %d dst %pI6\n", src_fid, dst); + mutex_lock(&ulp_ctx->cfg_data->flow_db_lock); + rhashtable_walk_enter(&tc_info->v6_subnet_table, &iter); + rhashtable_walk_start(&iter); + while ((node = rhashtable_walk_next(&iter)) != NULL) { + if (IS_ERR(node)) + continue; + if (bnxt_ulp_udcc_v6_subnet_compare(bp, src_fid, dst, &node->key)) { + /* Found a subnet that matches the DIP */ + if (is_valid_ether_addr(node->data.dmac) && + is_valid_ether_addr(node->data.smac)) { + ether_addr_copy(dmac, node->data.dmac); + ether_addr_copy(smac, node->data.smac); + rc = 0; + } else { + /* VF to VF case the SMAC/DMAC will be invalid */ + rc = -EPERM; + } + break; + } + } + rhashtable_walk_stop(&iter); + rhashtable_walk_exit(&iter); + + mutex_unlock(&ulp_ctx->cfg_data->flow_db_lock); + return rc; +} + +#endif /* if defined(CONFIG_BNXT_FLOWER_OFFLOAD) */ diff --git a/drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_udcc.h b/drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_udcc.h new file mode 100644 index 000000000000..a8cfca27e73e --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_udcc.h @@ -0,0 +1,67 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2023 Broadcom + * All rights reserved. + */ + +#ifndef _ULP_UDCC_H_ +#define _ULP_UDCC_H_ + +#include +#include + +struct bnxt_ulp_udcc_v6_subnet_key { + u16 src_fid; + struct in6_addr dst; + struct in6_addr dmsk; +}; + +struct bnxt_ulp_udcc_v6_subnet_data { + u8 dmac[ETH_ALEN]; + u8 smac[ETH_ALEN]; + u16 subnet_hndl; /* Template FDB needs this to flush */ +}; + +struct bnxt_ulp_udcc_v6_subnet_node { + struct bnxt_ulp_udcc_v6_subnet_key key; + struct rhash_head node; + struct bnxt_ulp_udcc_v6_subnet_data data; + refcount_t ref; +}; + +#if defined(CONFIG_BNXT_FLOWER_OFFLOAD) + +/* Add subnet in the hash table + * @key[in]: Key struct with src_fid, prefix and mask + * @data[in]: pointer to data to store in the subnet node + * return 0 on success and -ve on failure + */ +int bnxt_ulp_udcc_v6_subnet_add(struct bnxt *bp, + u16 *src_fid, u8 *v6dst, u8 *v6msk, + u8 *dmac, u8 *smac, + u16 *subnet_hndl); + +/* Delete subnet in the hash table by handl + * ULP template handler can clean resource by hndl ONLY + * @subnet_hndl[in]: ULP template resource handle + * @retuns 0 on success and -ve on failure + */ +int bnxt_ulp_udcc_v6_subnet_del(struct bnxt *bp, u16 subnet_hndl); + +/* Subnet Checking for UDCC application + * @src_fid[in]: FID of the function + * @dst[in]: Dest IPv6 + * @dmac[out]: pointer to modify dmac + * @smac[out]: pointer to modify smac + * @returns: + * 0 when a valid subnet with modify dmac and smac is found, + * -ENOENT when subnet is NOT found, + * -EPERM the subnets modify dmac/smac are invalid (e.g. VFtoVF) + * -ve on other failures + */ +int bnxt_ulp_udcc_v6_subnet_check(struct bnxt *bp, + u16 src_fid, + const struct in6_addr *dst, + u8 *dmac, u8 *smac); + +#endif /* #if defined(CONFIG_BNXT_FLOWER_OFFLOAD) */ +#endif /* #ifndef _ULP_UDCC_H_ */ diff --git a/drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_utils.c b/drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_utils.c new file mode 100644 index 000000000000..aaa0dfc67c98 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_utils.c @@ -0,0 +1,1061 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* Copyright(c) 2019-2023 Broadcom + * All rights reserved. + */ + +#include "bnxt_compat.h" +#include "bnxt_hsi.h" +#include "ulp_utils.h" + +#if defined(CONFIG_BNXT_FLOWER_OFFLOAD) || defined(CONFIG_BNXT_CUSTOM_FLOWER_OFFLOAD) +/** + * Initialize the regfile structure for writing + * + * @regfile: Ptr to a regfile instance + * + */ +int +ulp_regfile_init(struct ulp_regfile *regfile) +{ + /* validate the arguments */ + if (!regfile) + return -EINVAL; + + memset(regfile, 0, sizeof(struct ulp_regfile)); + return 0; +} + +/** + * Read a value from the regfile + * + * @regfile: The regfile instance. Must be initialized prior to being used + * + * @field: The field to be read within the regfile. + * + * @data: + * + * returns size, zero on failure + */ +int +ulp_regfile_read(struct ulp_regfile *regfile, + enum bnxt_ulp_rf_idx field, + u64 *data) +{ + /* validate the arguments */ + if (!regfile || field >= BNXT_ULP_RF_IDX_LAST) + return -EINVAL; + + *data = regfile->entry[field].data; + return 0; +} + +/** + * Write a value to the regfile + * + * @regfile: The regfile instance. Must be initialized prior to being used + * + * @field: The field to be written within the regfile. + * + * @data: The value is written into this variable. It is going to be in the + * same byte order as it was written. + * + * @size: The size in bytes of the value beingritten into this + * variable. + * + * returns 0 on success + */ +int +ulp_regfile_write(struct ulp_regfile *regfile, + enum bnxt_ulp_rf_idx field, + u64 data) +{ + /* validate the arguments */ + if (!regfile || field >= BNXT_ULP_RF_IDX_LAST) + return -EINVAL; + + regfile->entry[field].data = data; + return 0; /* Success */ +} + +static void +ulp_bs_put_msb(u8 *bs, u16 bitpos, u8 bitlen, u8 val) +{ + u8 bitoffs = bitpos % 8; + u16 index = bitpos / 8; + s8 shift; + u8 mask; + u8 tmp; + + tmp = bs[index]; + mask = ((u8)-1 >> (8 - bitlen)); + shift = 8 - bitoffs - bitlen; + val &= mask; + + if (shift >= 0) { + tmp &= ~(mask << shift); + tmp |= val << shift; + bs[index] = tmp; + } else { + tmp &= ~((u8)-1 >> bitoffs); + tmp |= val >> -shift; + bs[index++] = tmp; + + tmp = bs[index]; + tmp &= ((u8)-1 >> (bitlen - (8 - bitoffs))); + tmp |= val << (8 + shift); + bs[index] = tmp; + } +} + +static void +ulp_bs_put_lsb(u8 *bs, u16 bitpos, u8 bitlen, u8 val) +{ + u8 bitoffs = bitpos % 8; + u16 index = bitpos / 8; + u8 partial; + u8 shift; + u8 mask; + u8 tmp; + + tmp = bs[index]; + shift = bitoffs; + + if (bitoffs + bitlen <= 8) { + mask = ((1 << bitlen) - 1) << shift; + tmp &= ~mask; + tmp |= ((val << shift) & mask); + bs[index] = tmp; + } else { + partial = 8 - bitoffs; + mask = ((1 << partial) - 1) << shift; + tmp &= ~mask; + tmp |= ((val << shift) & mask); + bs[index++] = tmp; + + val >>= partial; + partial = bitlen - partial; + mask = ((1 << partial) - 1); + tmp = bs[index]; + tmp &= ~mask; + tmp |= (val & mask); + bs[index] = tmp; + } +} + +/** + * Add data to the byte array in Little endian format. + * + * @bs: The byte array where data is pushed + * + * @pos: The offset where data is pushed + * + * @len: The number of bits to be added to the data array. + * + * @val: The data to be added to the data array. + * + * returns the number of bits pushed. + */ +u32 +ulp_bs_push_lsb(u8 *bs, u16 pos, u8 len, u8 *val) +{ + int cnt = (len) / 8; + int tlen = len; + int i; + + if (cnt > 0 && !(len % 8)) + cnt -= 1; + + for (i = 0; i < cnt; i++) { + ulp_bs_put_lsb(bs, pos, 8, val[cnt - i]); + pos += 8; + tlen -= 8; + } + + /* Handle the remainder bits */ + if (tlen) + ulp_bs_put_lsb(bs, pos, tlen, val[0]); + return len; +} + +/** + * Add data to the byte array in Big endian format. + * + * @bs: The byte array where data is pushed + * + * @pos: The offset where data is pushed + * + * @len: The number of bits to be added to the data array. + * + * @val: The data to be added to the data array. + * + * returns the number of bits pushed. + */ +u32 +ulp_bs_push_msb(u8 *bs, u16 pos, u8 len, u8 *val) +{ + int cnt = (len + 7) / 8; + int i; + + /* Handle any remainder bits */ + int tmp = len % 8; + + if (!tmp) + tmp = 8; + + ulp_bs_put_msb(bs, pos, tmp, val[0]); + + pos += tmp; + + for (i = 1; i < cnt; i++) { + ulp_bs_put_msb(bs, pos, 8, val[i]); + pos += 8; + } + + return len; +} + +/** + * Initializes the blob structure for creating binary blob + * + * @blob: The blob to be initialized + * + * @bitlen: The bit length of the blob + * + * @order: The byte order for the blob. Currently only supporting + * big endian. All fields are packed with this order. + * + * Notes - If bitlen is zero then set it to max. + */ +int +ulp_blob_init(struct ulp_blob *blob, + u16 bitlen, + enum bnxt_ulp_byte_order order) +{ + /* validate the arguments */ + if (!blob || bitlen > (8 * sizeof(blob->data))) + return -EINVAL; + + if (bitlen) + blob->bitlen = bitlen; + else + blob->bitlen = BNXT_ULP_FLMP_BLOB_SIZE_IN_BITS; + blob->byte_order = order; + blob->write_idx = 0; + memset(blob->data, 0, sizeof(blob->data)); + return 0; +} + +/** + * Add data to the binary blob at the current offset. + * + * @blob: The blob that data is added to. The blob must + * be initialized prior to pushing data. + * + * @data: A pointer to bytes to be added to the blob. + * + * @datalen: The number of bits to be added to the blob. + * + * The offset of the data is updated after each push of data. + * NULL returned on error. + */ +#define ULP_BLOB_BYTE 8 +#define ULP_BLOB_BYTE_HEX 0xFF +#define BLOB_MASK_CAL(x) ((0xFF << (x)) & 0xFF) +int +ulp_blob_push(struct ulp_blob *blob, + u8 *data, + u32 datalen) +{ + u32 rc; + + /* validate the arguments */ + if (!blob || datalen > (u32)(blob->bitlen - blob->write_idx)) + return -EINVAL; + + if (blob->byte_order == BNXT_ULP_BYTE_ORDER_BE) + rc = ulp_bs_push_msb(blob->data, + blob->write_idx, + datalen, + data); + else + rc = ulp_bs_push_lsb(blob->data, + blob->write_idx, + datalen, + data); + if (!rc) + return -EINVAL; + + blob->write_idx += datalen; + return 0; +} + +/** + * Insert data into the binary blob at the given offset. + * + * @blob: The blob that data is added to. The blob must + * be initialized prior to pushing data. + * + * @offset: The offset where the data needs to be inserted. + * + * @data: A pointer to bytes to be added to the blob. + * + * @datalen: The number of bits to be added to the blob. + * + * The offset of the data is updated after each push of data. + * NULL returned on error. + */ +int +ulp_blob_insert(struct ulp_blob *blob, u32 offset, + u8 *data, u32 datalen) +{ + u8 local_data[BNXT_ULP_FLMP_BLOB_SIZE]; + u16 mov_len; + u32 rc; + + /* validate the arguments */ + if (!blob || datalen > (u32)(blob->bitlen - blob->write_idx) || + offset > blob->write_idx) + return -EINVAL; + + mov_len = blob->write_idx - offset; + /* If offset and data len are not 8 bit aligned then return error */ + if (ULP_BITS_IS_BYTE_NOT_ALIGNED(offset) || + ULP_BITS_IS_BYTE_NOT_ALIGNED(datalen)) + return -EINVAL; + + /* copy the data so we can move the data */ + memcpy(local_data, &blob->data[ULP_BITS_2_BYTE_NR(offset)], + ULP_BITS_2_BYTE(mov_len)); + blob->write_idx = offset; + if (blob->byte_order == BNXT_ULP_BYTE_ORDER_BE) + rc = ulp_bs_push_msb(blob->data, + blob->write_idx, + datalen, + data); + else + rc = ulp_bs_push_lsb(blob->data, + blob->write_idx, + datalen, + data); + if (!rc) + return -EINVAL; + + /* copy the previously stored data */ + memcpy(&blob->data[ULP_BITS_2_BYTE_NR(offset + datalen)], local_data, + ULP_BITS_2_BYTE(mov_len)); + blob->write_idx += (mov_len + datalen); + return 0; +} + +/** + * Add data to the binary blob at the current offset. + * + * @blob: The blob that data is added to. The blob must + * be initialized prior to pushing data. + * + * @data: 64-bit value to be added to the blob. + * + * @datalen: The number of bits to be added to the blob. + * + * The offset of the data is updated after each push of data. + * NULL returned on error, pointer pushed value otherwise. + */ +u8 * +ulp_blob_push_64(struct ulp_blob *blob, + u64 *data, + u32 datalen) +{ + u8 *val = (u8 *)data; + int rc; + + int size = (datalen + 7) / 8; + + if (!blob || !data || + datalen > (u32)(blob->bitlen - blob->write_idx)) + return NULL; + + rc = ulp_blob_push(blob, &val[8 - size], datalen); + if (rc) + return NULL; + + return &val[8 - size]; +} + +/** + * Add data to the binary blob at the current offset. + * + * @blob: The blob that data is added to. The blob must + * be initialized prior to pushing data. + * + * @data: 32-bit value to be added to the blob. + * + * @datalen: The number of bits to be added ot the blob. + * + * The offset of the data is updated after each push of data. + * NULL returned on error, pointer pushed value otherwise. + */ +u8 * +ulp_blob_push_32(struct ulp_blob *blob, + u32 *data, + u32 datalen) +{ + u8 *val = (u8 *)data; + u32 rc; + u32 size = ULP_BITS_2_BYTE(datalen); + + if (!data || size > sizeof(u32)) + return NULL; + + rc = ulp_blob_push(blob, &val[sizeof(u32) - size], datalen); + if (rc) + return NULL; + + return &val[sizeof(u32) - size]; +} + +/** + * Add encap data to the binary blob at the current offset. + * + * @blob: The blob that data is added to. The blob must + * be initialized prior to pushing data. + * + * @data: value to be added to the blob. + * + * @datalen: The number of bits to be added to the blob. + * + * The offset of the data is updated after each push of data. + * NULL returned on error, pointer pushed value otherwise. + */ +int +ulp_blob_push_encap(struct ulp_blob *blob, + u8 *data, + u32 datalen) +{ + u32 initial_size, write_size = datalen; + u8 *val = (u8 *)data; + u32 size = 0; + + if (!blob || !data || + datalen > (u32)(blob->bitlen - blob->write_idx)) + return -EINVAL; + + initial_size = ULP_BYTE_2_BITS(sizeof(u64)) - + (blob->write_idx % ULP_BYTE_2_BITS(sizeof(u64))); + while (write_size > 0) { + if (initial_size && write_size > initial_size) { + size = initial_size; + initial_size = 0; + } else if (initial_size && write_size <= initial_size) { + size = write_size; + initial_size = 0; + } else if (write_size > ULP_BYTE_2_BITS(sizeof(u64))) { + size = ULP_BYTE_2_BITS(sizeof(u64)); + } else { + size = write_size; + } + if (ulp_blob_push(blob, val, size)) + return -EINVAL; + + val += ULP_BITS_2_BYTE(size); + write_size -= size; + } + return 0; +} + +/** + * Adds pad to an initialized blob at the current offset + * + * @blob: The blob that data is added to. The blob must + * be initialized prior to pushing data. + * + * @datalen: The number of bits of pad to add + * + * returns the number of pad bits added, -1 on failure + */ +int +ulp_blob_pad_push(struct ulp_blob *blob, + u32 datalen) +{ + if (datalen > (u32)(blob->bitlen - blob->write_idx)) + return -EINVAL; + + blob->write_idx += datalen; + return 0; +} + +/** + * Adds pad to an initialized blob at the current offset based on + * the alignment. + * + * @blob: The blob that needs to be aligned + * + * @align: Alignment in bits. + * + * returns the number of pad bits added, -1 on failure + */ +int +ulp_blob_pad_align(struct ulp_blob *blob, + u32 align) +{ + int pad = 0; + + pad = ALIGN(blob->write_idx, align) - blob->write_idx; + if (pad > (int)(blob->bitlen - blob->write_idx)) + return -EINVAL; + + blob->write_idx += pad; + return pad; +} + +/* Get data from src and put into dst using little-endian format */ +static void +ulp_bs_get_lsb(u8 *src, u16 bitpos, u8 bitlen, u8 *dst) +{ + u16 index = ULP_BITS_2_BYTE_NR(bitpos); + u8 bitoffs = bitpos % ULP_BLOB_BYTE; + u8 mask, partial, shift; + + shift = bitoffs; + partial = ULP_BLOB_BYTE - bitoffs; + if (bitoffs + bitlen <= ULP_BLOB_BYTE) { + mask = ((1 << bitlen) - 1) << shift; + *dst = (src[index] & mask) >> shift; + } else { + mask = ((1 << partial) - 1) << shift; + *dst = (src[index] & mask) >> shift; + index++; + partial = bitlen - partial; + mask = ((1 << partial) - 1); + *dst |= (src[index] & mask) << (ULP_BLOB_BYTE - bitoffs); + } +} + +/** + * Get data from the byte array in Little endian format. + * + * @src: The byte array where data is extracted from + * + * @dst: The byte array where data is pulled into + * + * @size: The size of dst array in bytes + * + * @offset: The offset where data is pulled + * + * @len: The number of bits to be extracted from the data array + * + * returns None. + */ +void +ulp_bs_pull_lsb(u8 *src, u8 *dst, u32 size, + u32 offset, u32 len) +{ + u32 cnt = ULP_BITS_2_BYTE_NR(len); + u32 idx; + + /* iterate bytewise to get data */ + for (idx = 0; idx < cnt; idx++) { + ulp_bs_get_lsb(src, offset, ULP_BLOB_BYTE, + &dst[size - 1 - idx]); + offset += ULP_BLOB_BYTE; + len -= ULP_BLOB_BYTE; + } + + /* Extract the last reminder data that is not 8 byte boundary */ + if (len) + ulp_bs_get_lsb(src, offset, len, &dst[size - 1 - idx]); +} + +/* Get data from src and put into dst using big-endian format */ +static void +ulp_bs_get_msb(u8 *src, u16 bitpos, u8 bitlen, u8 *dst) +{ + u16 index = ULP_BITS_2_BYTE_NR(bitpos); + u8 bitoffs = bitpos % ULP_BLOB_BYTE; + int shift; + u8 mask; + + shift = ULP_BLOB_BYTE - bitoffs - bitlen; + if (shift >= 0) { + mask = 0xFF >> -bitlen; + *dst = (src[index] >> shift) & mask; + } else { + *dst = (src[index] & (0xFF >> bitoffs)) << -shift; + *dst |= src[index + 1] >> -shift; + } +} + +/** + * Get data from the byte array in Big endian format. + * + * @src: The byte array where data is extracted from + * + * @dst: The byte array where data is pulled into + * + * @offset: The offset where data is pulled + * + * @len: The number of bits to be extracted from the data array + * + * returns None. + */ +void +ulp_bs_pull_msb(u8 *src, u8 *dst, + u32 offset, u32 len) +{ + u32 cnt = ULP_BITS_2_BYTE_NR(len); + u32 idx; + + /* iterate bytewise to get data */ + for (idx = 0; idx < cnt; idx++) { + ulp_bs_get_msb(src, offset, ULP_BLOB_BYTE, &dst[idx]); + offset += ULP_BLOB_BYTE; + len -= ULP_BLOB_BYTE; + } + + /* Extract the last reminder data that is not 8 byte boundary */ + if (len) + ulp_bs_get_msb(src, offset, len, &dst[idx]); +} + +/** + * Extract data from the binary blob using given offset. + * + * @blob: The blob that data is extracted from. The blob must + * be initialized prior to pulling data. + * + * @data: A pointer to put the data. + * @data_size: size of the data buffer in bytes. + *@offset: - Offset in the blob to extract the data in bits format. + * @len: The number of bits to be pulled from the blob. + * + * Output: zero on success, -1 on failure + */ +int +ulp_blob_pull(struct ulp_blob *blob, u8 *data, u32 data_size, + u16 offset, u16 len) +{ + /* validate the arguments */ + if (!blob || (offset + len) > blob->bitlen || + ULP_BYTE_2_BITS(data_size) < len) + return -EINVAL; + + if (blob->byte_order == BNXT_ULP_BYTE_ORDER_BE) + ulp_bs_pull_msb(blob->data, data, offset, len); + else + ulp_bs_pull_lsb(blob->data, data, data_size, offset, len); + return 0; +} + +/** + * Get the data portion of the binary blob. + * + * @blob: The blob's data to be retrieved. The blob must be + * initialized prior to pushing data. + * + * @datalen: The number of bits to that are filled. + * + * returns a byte array of the blob data. Returns NULL on error. + */ +u8 * +ulp_blob_data_get(struct ulp_blob *blob, + u16 *datalen) +{ + /* validate the arguments */ + if (!blob) + return NULL; /* failure */ + + *datalen = blob->write_idx; + return blob->data; +} + +/** + * Get the data length of the binary blob. + * + * @blob: The blob's data len to be retrieved. + * + * returns length of the binary blob + */ +int +ulp_blob_data_len_get(struct ulp_blob *blob) +{ + /* validate the arguments */ + if (!blob) + return -EINVAL; + + return blob->write_idx; +} + +/** + * Set the encap swap start index of the binary blob. + * + * @blob: The blob's data to be retrieved. The blob must be + * initialized prior to pushing data. + * + * returns void. + */ +void +ulp_blob_encap_swap_idx_set(struct ulp_blob *blob) +{ + /* validate the arguments */ + if (!blob) + return; /* failure */ + + blob->encap_swap_idx = blob->write_idx; +} + +/** + * Perform the encap buffer swap to 64 bit reversal. + * + * @blob: The blob's data to be used for swap. + * + * returns void. + */ +void +ulp_blob_perform_encap_swap(struct ulp_blob *blob) +{ + u32 i, idx = 0, end_idx = 0, roundoff; + u8 temp_val_1, temp_val_2; + + /* validate the arguments */ + if (!blob) + return; /* failure */ + + idx = ULP_BITS_2_BYTE_NR(blob->encap_swap_idx); + end_idx = ULP_BITS_2_BYTE(blob->write_idx); + roundoff = ULP_BYTE_2_BITS(ULP_BITS_2_BYTE(end_idx)); + if (roundoff > end_idx) { + blob->write_idx += ULP_BYTE_2_BITS(roundoff - end_idx); + end_idx = roundoff; + } + while (idx <= end_idx) { + for (i = 0; i < 4; i = i + 2) { + temp_val_1 = blob->data[idx + i]; + temp_val_2 = blob->data[idx + i + 1]; + blob->data[idx + i] = blob->data[idx + 6 - i]; + blob->data[idx + i + 1] = blob->data[idx + 7 - i]; + blob->data[idx + 7 - i] = temp_val_2; + blob->data[idx + 6 - i] = temp_val_1; + } + idx += 8; + } +} + +/** + * Perform the blob buffer reversal byte wise. + * This api makes the first byte the last and + * vice-versa. + * + * @blob: The blob's data to be used for swap. + * @chunk_size:the swap is done within the chunk in bytes + * + * returns void. + */ +void +ulp_blob_perform_byte_reverse(struct ulp_blob *blob, + u32 chunk_size) +{ + u32 idx = 0, jdx = 0, num = 0; + u8 xchar; + u8 *buff; + + /* validate the arguments */ + if (!blob) + return; /* failure */ + + buff = blob->data; + num = ULP_BITS_2_BYTE(blob->write_idx) / chunk_size; + for (idx = 0; idx < num; idx++) { + for (jdx = 0; jdx < chunk_size / 2; jdx++) { + xchar = buff[jdx]; + buff[jdx] = buff[(chunk_size - 1) - jdx]; + buff[(chunk_size - 1) - jdx] = xchar; + } + buff += chunk_size; + } +} + +/** + * Perform the blob buffer 64 bit word swap. + * This api makes the first 4 bytes the last in + * a given 64 bit value and vice-versa. + * + * @blob: The blob's data to be used for swap. + * + * returns void. + */ +void +ulp_blob_perform_64B_word_swap(struct ulp_blob *blob) +{ + u32 word_size = ULP_64B_IN_BYTES / 2; + u32 i, j, num; + u8 xchar; + + /* validate the arguments */ + if (!blob) + return; /* failure */ + + num = ULP_BITS_2_BYTE(blob->write_idx); + for (i = 0; i < num; i = i + ULP_64B_IN_BYTES) { + for (j = 0; j < word_size; j++) { + xchar = blob->data[i + j]; + blob->data[i + j] = blob->data[i + j + word_size]; + blob->data[i + j + word_size] = xchar; + } + } +} + +/** + * Perform the blob buffer 64 bit byte swap. + * This api makes the first byte the last in + * a given 64 bit value and vice-versa. + * + * @blob: The blob's data to be used for swap. + * + * returns void. + */ +void +ulp_blob_perform_64B_byte_swap(struct ulp_blob *blob) +{ + u32 offset = ULP_64B_IN_BYTES - 1; + u32 i, j, num; + u8 xchar; + + /* validate the arguments */ + if (!blob) + return; /* failure */ + + num = ULP_BITS_2_BYTE(blob->write_idx); + for (i = 0; i < num; i = i + ULP_64B_IN_BYTES) { + for (j = 0; j < (ULP_64B_IN_BYTES / 2); j++) { + xchar = blob->data[i + j]; + blob->data[i + j] = blob->data[i + offset - j]; + blob->data[i + offset - j] = xchar; + } + } +} + +static int +ulp_blob_msb_block_merge(struct ulp_blob *dst, struct ulp_blob *src, + u32 block_size, u32 pad) +{ + u32 i, k, write_bytes, remaining; + u8 *src_buf; + u16 num = 0; + u8 bluff; + + src_buf = ulp_blob_data_get(src, &num); + + for (i = 0; i < num;) { + if (((dst->write_idx % block_size) + (num - i)) > block_size) + write_bytes = block_size - + (dst->write_idx % block_size); + else + write_bytes = num - i; + for (k = 0; k < ULP_BITS_2_BYTE_NR(write_bytes); k++) { + ulp_bs_put_msb(dst->data, dst->write_idx, ULP_BLOB_BYTE, + *src_buf); + dst->write_idx += ULP_BLOB_BYTE; + src_buf++; + } + remaining = write_bytes % ULP_BLOB_BYTE; + if (remaining) { + bluff = (*src_buf) & ((u8)-1 << + (ULP_BLOB_BYTE - remaining)); + ulp_bs_put_msb(dst->data, dst->write_idx, + ULP_BLOB_BYTE, bluff); + dst->write_idx += remaining; + } + if (write_bytes != (num - i)) { + /* add the padding */ + ulp_blob_pad_push(dst, pad); + if (remaining) { + ulp_bs_put_msb(dst->data, dst->write_idx, + ULP_BLOB_BYTE - remaining, + *src_buf); + dst->write_idx += ULP_BLOB_BYTE - remaining; + src_buf++; + } + } + i += write_bytes; + } + return 0; +} + +/** + * Perform the blob buffer merge. + * This api makes the src blob merged to the dst blob. + * The block size and pad size help in padding the dst blob + * + * @dst: The destination blob, the blob to be merged. + * @src: The src blob. + * @block_size: The size of the block after which padding gets applied. + * @pad: The size of the pad to be applied. + * + * returns 0 on success. + */ +int +ulp_blob_block_merge(struct ulp_blob *dst, struct ulp_blob *src, + u32 block_size, u32 pad) +{ + if (dst->byte_order == BNXT_ULP_BYTE_ORDER_BE && + src->byte_order == BNXT_ULP_BYTE_ORDER_BE) + return ulp_blob_msb_block_merge(dst, src, block_size, pad); + + return -EINVAL; +} + +int +ulp_blob_append(struct ulp_blob *dst, struct ulp_blob *src, + u16 src_offset, u16 src_len) +{ + u32 k, remaining; + u8 *src_buf; + u16 num = 0; + u8 bluff; + + src_buf = ulp_blob_data_get(src, &num); + + if ((src_offset + src_len) > num) + return -EINVAL; + + /* Only supporting BE for now */ + if (src->byte_order != BNXT_ULP_BYTE_ORDER_BE || + dst->byte_order != BNXT_ULP_BYTE_ORDER_BE) + return -EINVAL; + + /* Handle if the source offset is not on a byte boundary */ + remaining = src_offset % ULP_BLOB_BYTE; + if (remaining) { + bluff = src_buf[src_offset / ULP_BLOB_BYTE] & ((u8)-1 >> + (ULP_BLOB_BYTE - remaining)); + ulp_bs_put_msb(dst->data, dst->write_idx, + remaining, bluff); + dst->write_idx += remaining; + src_offset += remaining; + } + + src_buf += ULP_BITS_2_BYTE_NR(src_offset); + + /* Push the byte aligned pieces */ + for (k = 0; k < ULP_BITS_2_BYTE_NR(src_len); k++) { + ulp_bs_put_msb(dst->data, dst->write_idx, ULP_BLOB_BYTE, + *src_buf); + dst->write_idx += ULP_BLOB_BYTE; + src_buf++; + } + + /* Handle the remaining if length is not a byte boundary */ + if (src_len > remaining) + remaining = (src_len - remaining) % ULP_BLOB_BYTE; + else + remaining = 0; + if (remaining) { + bluff = (*src_buf) & ((u8)-1 << + (ULP_BLOB_BYTE - remaining)); + ulp_bs_put_msb(dst->data, dst->write_idx, + ULP_BLOB_BYTE, bluff); + dst->write_idx += remaining; + } + + return 0; +} + +/** + * Perform the blob buffer copy. + * This api makes the src blob merged to the dst blob. + * + * @dst: The destination blob, the blob to be merged. + * @src: The src blob. + * + * returns 0 on success. + */ +int +ulp_blob_buffer_copy(struct ulp_blob *dst, struct ulp_blob *src) +{ + if ((dst->write_idx + src->write_idx) > dst->bitlen) + return -EINVAL; + if (ULP_BITS_IS_BYTE_NOT_ALIGNED(dst->write_idx) || + ULP_BITS_IS_BYTE_NOT_ALIGNED(src->write_idx)) + return -EINVAL; + memcpy(&dst->data[ULP_BITS_2_BYTE_NR(dst->write_idx)], + src->data, ULP_BITS_2_BYTE_NR(src->write_idx)); + dst->write_idx += src->write_idx; + return 0; +} + +/** + * Read data from the operand + * + * @operand: A pointer to a 16 Byte operand + * + * @val: The variable to copy the operand to + * + * @bytes: The number of bytes to read into val + * + * returns number of bits read, zero on error + */ +int +ulp_operand_read(u8 *operand, + u8 *val, + u16 bytes) +{ + /* validate the arguments */ + if (!operand || !val) + return -EINVAL; + + memcpy(val, operand, bytes); + return 0; +} + +/** + * Check the buffer is empty + * + * @buf: The buffer + * @size: The size of the buffer + * + */ +int ulp_buffer_is_empty(const u8 *buf, u32 size) +{ + return buf[0] == 0 && !memcmp(buf, buf + 1, size - 1); +} + +/* Function to check if bitmap is zero. */ +u32 ulp_bitmap_is_zero(u8 *bitmap, int size) +{ + while (size-- > 0) { + if (*bitmap != 0) + return false; + bitmap++; + } + return true; +} + +/* Function to check if bitmap is ones. */ +u32 ulp_bitmap_is_ones(u8 *bitmap, int size) +{ + while (size-- > 0) { + if (*bitmap != 0xFF) + return false; + bitmap++; + } + return true; +} + +/* Function to check if bitmap is not zero. */ +u32 ulp_bitmap_notzero(const u8 *bitmap, int size) +{ + while (size-- > 0) { + if (*bitmap != 0) + return true; + bitmap++; + } + return false; +} + +/* returns 0 if input is power of 2 */ +int ulp_util_is_power_of_2(u64 x) +{ + if (((x - 1) & x)) + return -1; + return 0; +} +#endif /* CONFIG_BNXT_FLOWER_OFFLOAD */ diff --git a/drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_utils.h b/drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_utils.h new file mode 100644 index 000000000000..32f52ea4d621 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/tf_ulp/ulp_utils.h @@ -0,0 +1,234 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019-2023 Broadcom + * All rights reserved. + */ + +#ifndef _ULP_UTILS_H_ +#define _ULP_UTILS_H_ + +#include "bnxt.h" +#include "ulp_template_db_enum.h" + +#define ULP_BUFFER_ALIGN_8_BITS 8 +#define ULP_BUFFER_ALIGN_8_BYTE 8 +#define ULP_BUFFER_ALIGN_16_BYTE 16 +#define ULP_BUFFER_ALIGN_64_BYTE 64 +#define ULP_64B_IN_BYTES 8 +#define ULP_64B_IN_BITS 64 + +/* Macros for bitmap sets and gets + * These macros can be used if the val are power of 2. + */ +#define ULP_BITMAP_SET(bitmap, val) ((bitmap) |= (val)) +#define ULP_BITMAP_RESET(bitmap, val) ((bitmap) &= ~(val)) +#define ULP_BITMAP_ISSET(bitmap, val) ((bitmap) & (val)) +#define ULP_BITMAP_CMP(b1, b2) memcmp(&(b1)->bits, \ + &(b2)->bits, sizeof((b1)->bits)) +/* Macros for bitmap sets and gets + * These macros can be used if the val are not power of 2 and + * are simple index values. + */ +#define ULP_INDEX_BITMAP_SIZE (sizeof(u64) * 8) +#define ULP_INDEX_BITMAP_CSET(i) (1UL << \ + ((ULP_INDEX_BITMAP_SIZE - 1) - \ + ((i) % ULP_INDEX_BITMAP_SIZE))) + +#define ULP_INDEX_BITMAP_SET(b, i) ((b) |= \ + (1UL << ((ULP_INDEX_BITMAP_SIZE - 1) - \ + ((i) % ULP_INDEX_BITMAP_SIZE)))) + +#define ULP_INDEX_BITMAP_RESET(b, i) ((b) &= \ + (~(1UL << ((ULP_INDEX_BITMAP_SIZE - 1) - \ + ((i) % ULP_INDEX_BITMAP_SIZE))))) + +#define ULP_INDEX_BITMAP_GET(b, i) (((b) >> \ + ((ULP_INDEX_BITMAP_SIZE - 1) - \ + ((i) % ULP_INDEX_BITMAP_SIZE))) & 1) + +#define ULP_DEVICE_PARAMS_INDEX(tid, dev_id) \ + (((tid) << BNXT_ULP_LOG2_MAX_NUM_DEV) | (dev_id)) + +/* Macro to convert bytes to bits */ +#define ULP_BYTE_2_BITS(byte_x) ((byte_x) * 8) +/* Macro to convert bits to bytes */ +#define ULP_BITS_2_BYTE(bits_x) (((bits_x) + 7) / 8) +/* Macro to convert bits to bytes with no round off*/ +#define ULP_BITS_2_BYTE_NR(bits_x) ((bits_x) / 8) + +/* Macro to round off to next multiple of 8*/ +#define ULP_BYTE_ROUND_OFF_8(x) (((x) + 7) & ~7) + +/* Macro to check bits are byte aligned */ +#define ULP_BITS_IS_BYTE_NOT_ALIGNED(x) ((x) % 8) + +/* Macro for word conversion*/ +#define ULP_BITS_TO_4_BYTE_WORD(x) (((x) + 31) / 32) +#define ULP_BITS_TO_32_BYTE_WORD(x) (((x) + 255) / 256) +#define ULP_BITS_TO_4_BYTE_QWORDS(x) (((x) + 127) / 128) +#define ULP_BITS_TO_128B_ALIGNED_BYTES(x) ((((x) + 127) / 128) * 16) + +/* Macros to read the computed fields */ +#define ULP_COMP_FLD_IDX_RD(params, idx) \ + be64_to_cpu((params)->comp_fld[(idx)]) + +#define ULP_COMP_FLD_IDX_WR(params, idx, val) \ + ((params)->comp_fld[(idx)] = cpu_to_be64((u64)(val))) + +enum bnxt_ulp_resource_type { + BNXT_ULP_RESOURCE_TYPE_FULL_ACT, + BNXT_ULP_RESOURCE_TYPE_COMPACT_ACT, + BNXT_ULP_RESOURCE_TYPE_MCG_ACT, + BNXT_ULP_RESOURCE_TYPE_MODIFY, + BNXT_ULP_RESOURCE_TYPE_STAT, + BNXT_ULP_RESOURCE_TYPE_SRC_PROP, + BNXT_ULP_RESOURCE_TYPE_ENCAP +}; + +/* Making the blob statically sized to 128 bytes for now. + * The blob must be initialized with ulp_blob_init prior to using. + */ +#define BNXT_ULP_FLMP_BLOB_SIZE (128) +#define BNXT_ULP_FLMP_BLOB_SIZE_IN_BITS ULP_BYTE_2_BITS(BNXT_ULP_FLMP_BLOB_SIZE) +struct ulp_blob { + enum bnxt_ulp_byte_order byte_order; + u16 write_idx; + u16 bitlen; + u8 data[BNXT_ULP_FLMP_BLOB_SIZE]; + u16 encap_swap_idx; +}; + +/* The data can likely be only 32 bits for now. Just size check + * the data when being written. + */ +#define ULP_REGFILE_ENTRY_SIZE (sizeof(u32)) +struct ulp_regfile_entry { + u64 data; + u32 size; +}; + +struct ulp_regfile { + struct ulp_regfile_entry entry[BNXT_ULP_RF_IDX_LAST]; +}; + +int +ulp_regfile_init(struct ulp_regfile *regfile); + +int +ulp_regfile_read(struct ulp_regfile *regfile, + enum bnxt_ulp_rf_idx field, + u64 *data); + +int +ulp_regfile_write(struct ulp_regfile *regfile, + enum bnxt_ulp_rf_idx field, + u64 data); + +u32 +ulp_bs_push_lsb(u8 *bs, u16 pos, u8 len, u8 *val); + +u32 +ulp_bs_push_msb(u8 *bs, u16 pos, u8 len, u8 *val); + +int +ulp_blob_init(struct ulp_blob *blob, + u16 bitlen, + enum bnxt_ulp_byte_order order); + +int +ulp_blob_push(struct ulp_blob *blob, + u8 *data, + u32 datalen); + +int +ulp_blob_insert(struct ulp_blob *blob, u32 offset, + u8 *data, u32 datalen); + +u8 * +ulp_blob_push_64(struct ulp_blob *blob, + u64 *data, + u32 datalen); + +u8 * +ulp_blob_push_32(struct ulp_blob *blob, + u32 *data, + u32 datalen); + +int +ulp_blob_push_encap(struct ulp_blob *blob, + u8 *data, + u32 datalen); + +u8 * +ulp_blob_data_get(struct ulp_blob *blob, + u16 *datalen); + +int +ulp_blob_data_len_get(struct ulp_blob *blob); + +void +ulp_bs_pull_lsb(u8 *src, u8 *dst, u32 size, + u32 offset, u32 len); + +void +ulp_bs_pull_msb(u8 *src, u8 *dst, + u32 offset, u32 len); + +int +ulp_blob_pull(struct ulp_blob *blob, u8 *data, u32 data_size, + u16 offset, u16 len); + +int +ulp_blob_pad_push(struct ulp_blob *blob, + u32 datalen); + +int +ulp_blob_pad_align(struct ulp_blob *blob, + u32 align); + +void +ulp_blob_encap_swap_idx_set(struct ulp_blob *blob); + +void +ulp_blob_perform_encap_swap(struct ulp_blob *blob); + +void +ulp_blob_perform_byte_reverse(struct ulp_blob *blob, + u32 chunk_size); + +void +ulp_blob_perform_64B_word_swap(struct ulp_blob *blob); + +void +ulp_blob_perform_64B_byte_swap(struct ulp_blob *blob); + +int +ulp_blob_block_merge(struct ulp_blob *dst, struct ulp_blob *src, + u32 block_size, u32 pad); + +int +ulp_blob_append(struct ulp_blob *dst, struct ulp_blob *src, + u16 src_offset, u16 src_len); + +int +ulp_blob_buffer_copy(struct ulp_blob *dst, struct ulp_blob *src); + +int +ulp_operand_read(u8 *operand, + u8 *val, + u16 bitlen); + +int ulp_buffer_is_empty(const u8 *buf, u32 size); + +/* Function to check if bitmap is zero.Return 1 on success */ +u32 ulp_bitmap_is_zero(u8 *bitmap, int size); + +/* Function to check if bitmap is ones. Return 1 on success */ +u32 ulp_bitmap_is_ones(u8 *bitmap, int size); + +/* Function to check if bitmap is not zero. Return 1 on success */ +u32 ulp_bitmap_notzero(const u8 *bitmap, int size); + +/* returns 0 if input is power of 2 */ +int ulp_util_is_power_of_2(u64 x); + +#endif /* _ULP_UTILS_H_ */ diff --git a/drivers/thirdparty/release-drivers/bnxt/tfc_v3/tfc.h b/drivers/thirdparty/release-drivers/bnxt/tfc_v3/tfc.h new file mode 100644 index 000000000000..e0424474ed82 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/tfc_v3/tfc.h @@ -0,0 +1,978 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2023 Broadcom + * All rights reserved. + * + * TFC (Truflow Core v3) API Header File + * API Guidance: + * + * 1. If more than 5-6 parameters, please define structures + * + * 2. Design structures that can be used with multiple APIs + * + * 3. If items in structures are not to be used, these must + * be documented in the API header IN DETAIL. + * + * 4. Use defines in cfa_types.h where possible. These are shared + * firmware types to avoid duplication. These types do + * not represent the HWRM interface and may need to be mapped + * to HWRM definitions. + * + * 5. Resource types and subtypes are defined in cfa_resources.h + */ + +#ifndef _TFC_H_ +#define _TFC_H_ + +#include "cfa_resources.h" +#include "cfa_types.h" + +/* TFC handle + */ +struct tfc { + void *tfo; /* Pointer to the private tfc object */ + void *bp; /* the pointer to the parent bp struct */ +}; + +/********* BEGIN API FUNCTION PROTOTYPES/PARAMETERS **********/ +/** + * Allocate the TFC state for this DPDK port/function. The TF + * object memory is allocated during this API call. + * + * @tfcp: Pointer to TFC handle + * + * Return + * 0 for SUCCESS, negative error value for FAILURE (errno.h) + * + * This API will initialize only the software state. + */ +int tfc_open(struct tfc *tfcp); + +/** + * De-allocate the TFC state for this DPDK port/function. The TF + * object memory is deallocated during this API call. + * + * @tfcp: Pointer to TFC handle + * + * Return + * 0 for SUCCESS, negative error value for FAILURE (errno.h) + * + * This API will reset only the software state. + */ +int tfc_close(struct tfc *tfcp); + +/* The maximum number of foreseeable resource types. + * Use cfa_resource_types enum internally. + */ +#define TFC_MAX_RESOURCE_TYPES 32 + +/* Supported resource information + */ +struct tfc_resources { + u32 rtypes_mask; /* Resource subtype mask of valid resource types */ + u8 max_rtype; /* Maximum resource type number */ + u32 rsubtypes_mask[TFC_MAX_RESOURCE_TYPES]; /* Array indicating valid subtypes */ +}; + +/** + * Get all supported CFA resource types for the device + * + * This API goes to the firmware to query all supported resource + * types and subtypes supported. + * + * @tfcp: Pointer to TFC handle + * @resources: Pointer to a structure containing information about the supported + * CFA device resources. + * + * Return + * 0 for SUCCESS, negative error value for FAILURE (errno.h) + */ +int tfc_resource_types_query(struct tfc *tfcp, struct tfc_resources *resources); + +/** + * Allocate a TFC session + * + * This API goes to the firmware to allocate a TFC session id and associate a + * forwarding function with the session. + * + * @tfcp: Pointer to TFC handle + * @fid: Function id to associated with the session + * @sid: Pointer to the where the session id will be Returned + * + * Return + * 0 for SUCCESS, negative error value for FAILURE (errno.h) + */ +int tfc_session_id_alloc(struct tfc *tfcp, u16 fid, u16 *sid); +/** + * This API sets the session id to a pre-existing session + * + * When NIC flow tracking by function, the session should be set to the + * AFM session. + * + * @tfcp: Pointer to TFC handle + * + * Return + * 0 for SUCCESS, negative error value for FAILURE (errno.h) + */ +int tfc_session_id_set(struct tfc *tfcp, u16 sid); + +/** + * Associate a forwarding function with an existing TFC session + * + * @tfcp: Pointer to TFC handle + * @fid: Function id to associated with the session + * @sid: The session id to associate with + * @fid_cnt: The number of forwarding functions currently associated with the + * session + * + * Return + * 0 for SUCCESS, negative error value for FAILURE (errno.h) + */ +int tfc_session_fid_add(struct tfc *tfcp, u16 fid, u16 sid, + u16 *fid_cnt); +/** + * Disassociate a forwarding function from an existing TFC session + * + * Once the last function has been removed from the session in the firmware + * the session is freed and all associated resources freed. + * + * @tfcp: Pointer to TFC handle + * @fid: Function id to associated with the session + * @fid_cnt: The number of forwarding functions currently associated with the session + * + * Returns + * 0 for SUCCESS, negative error value for FAILURE (errno.h) + */ +int tfc_session_fid_rem(struct tfc *tfcp, u16 fid, u16 *fid_cnt); + +/* Domain id range + */ +enum tfc_domain_id { + TFC_DOMAIN_ID_INVALID = 0, + TFC_DOMAIN_ID_1, + TFC_DOMAIN_ID_2, + TFC_DOMAIN_ID_3, + TFC_DOMAIN_ID_4, + TFC_DOMAIN_ID_MAX = TFC_DOMAIN_ID_4 +}; + +/* Global id request definition + */ +struct tfc_global_id_req { + enum cfa_resource_type rtype; /* Resource type */ + u8 rsubtype; /* Resource subtype */ + enum cfa_dir dir; /* Direction */ + u16 cnt; /* Number of resources to allocate of this type */ +}; + +/* Global id resource definition + */ +struct tfc_global_id { + enum cfa_resource_type rtype; /* Resource type */ + u8 rsubtype; /* Resource subtype */ + enum cfa_dir dir; /* Direction */ + u16 id; /* Resource id */ +}; + +/** + * Allocate global TFC resources + * + * Some resources are not owned by a single session. They are "global" in that + * they will be in use as long as any associated session exists. Once all + * sessions/functions hve been removed, all associated global ids are freed. + * There are currently up to 4 global id domain sets. + * + * @tfcp: Pointer to TFC handle + * @fid: Function ID to be used + * @domain_id: The domain id to associate. + * @req_cnt: The number of total resource requests + * @glb_id_req: The list of global id requests + * @rsp_cnt: The number of items in the response buffer + * @glb_id_rsp: The number of items in the response buffer + * @first: This is the first domain request for the indicated domain id. + * + * Returns + * 0 for SUCCESS, negative error value for FAILURE (errno.h) + */ +int tfc_global_id_alloc(struct tfc *tfcp, u16 fid, enum tfc_domain_id domain_id, + u16 req_cnt, + const struct tfc_global_id_req *glb_id_req, + struct tfc_global_id *glb_id_rsp, u16 *rsp_cnt, + bool *first); +/* Identifier resource structure + */ +struct tfc_identifier_info { + enum cfa_resource_subtype_ident rsubtype; /* resource subtype */ + enum cfa_dir dir; /* direction rx/tx */ + u16 id; /* alloc/free index */ +}; + +/** + * allocate a TFC Identifier + * + * @tfcp: Pointer to TFC handle + * @fid: Function ID to be used + * @tt: Track type - either track by session or by function + * @ident_info: All the information related to the requested identifier (subtype/dir) and + * the Returned identifier id. + * + * Returns + * 0 for SUCCESS, negative error value for FAILURE (errno.h) + */ +int tfc_identifier_alloc(struct tfc *tfcp, u16 fid, enum cfa_track_type tt, + struct tfc_identifier_info *ident_info); + +/** + * free a TFC Identifier + * + * @tfcp: Pointer to TFC handle + * @fid: Function ID to be used + * @ident_info: All the information related to the requested identifier (subtype/dir) and + * the identifier id to free. + * + * Returns success or failure code. + */ +int tfc_identifier_free(struct tfc *tfcp, u16 fid, + const struct tfc_identifier_info *ident_info); + +/* Index table resource structure + */ +struct tfc_idx_tbl_info { + enum cfa_resource_subtype_idx_tbl rsubtype; /* resource subtype */ + enum cfa_dir dir; /* direction rx/tx */ + u16 id; /* alloc/free index */ +}; + +/** + * allocate a TFC index table entry + * + * @tfcp: Pointer to TFC handle + * @fid: Function ID to be used + * @tt: either track by session or by function + * @tbl_info: All the information related to the requested index table entry + * (subtype/dir) and the Returned id. + * + * Returns + * 0 for SUCCESS, negative error value for FAILURE (errno.h) + */ +int tfc_idx_tbl_alloc(struct tfc *tfcp, u16 fid, enum cfa_track_type tt, + struct tfc_idx_tbl_info *tbl_info); + +/** + * allocate and set a TFC index table entry + * + * @tfcp: Pointer to TFC handle + * @fid: Function ID to be used + * @tt: either track by session or by function + * @tbl_info: All the information related to the requested index table entry + * (subtype/dir) and the Returned id. + * @data: Pointer to the data to write to the entry. The data is aligned + * correctly in the buffer for writing to the hardware. + * @data_sz_in_bytes: The size of the entry in bytes for Thor2. + * + * Returns + * 0 for SUCCESS, negative error value for FAILURE (errno.h) + */ +int tfc_idx_tbl_alloc_set(struct tfc *tfcp, u16 fid, + enum cfa_track_type tt, + struct tfc_idx_tbl_info *tbl_info, + const u32 *data, u8 data_sz_in_bytes); + +/** + * Set a TFC index table entry + * + * @tfcp: Pointer to TFC handle + * @fid: Function ID to be used + * @tbl_info: All the information related to the requested index table entry + * (subtype/dir) including the id. + * @data: Pointer to the data to write to the entry. The data is aligned + * correctly in the buffer for writing to the hardware. + * @data_sz_in_bytes: The size of the entry in device sized bytes for Thor2. + * + * Returns + * 0 for SUCCESS, negative error value for FAILURE (errno.h) + */ +int tfc_idx_tbl_set(struct tfc *tfcp, u16 fid, + const struct tfc_idx_tbl_info *tbl_info, + const u32 *data, u8 data_sz_in_bytes); + +/** + * Get a TFC index table entry + * + * @tfcp: Pointer to TFC handle + * @fid: Function ID to be used + * @tbl_info: All the information related to the requested index table entry + * (subtype/dir) including the id. + * @data: Pointer to the data to read from the entry. + * @data_sz_in_bytes: The size of the entry in device sized bytes for Thor2. + * Input is the size of the buffer, output is the actual size. + * + * Returns + * 0 for SUCCESS, negative error value for FAILURE (errno.h) + */ +int tfc_idx_tbl_get(struct tfc *tfcp, u16 fid, + const struct tfc_idx_tbl_info *tbl_info, + u32 *data, u8 *data_sz_in_bytes); + +/** + * Free a TFC index table entry + * + * @tfcp: Pointer to TFC handle + * @fid: Function ID to be used + * @tbl_info: All the information related to the requested index table entry + * (subtype/dir) and the Returned id. + * + * Returns + * 0 for SUCCESS, negative error value for FAILURE (errno.h) + */ +int tfc_idx_tbl_free(struct tfc *tfcp, u16 fid, + const struct tfc_idx_tbl_info *tbl_info); + +/* Tcam table info structure + */ +struct tfc_tcam_info { + enum cfa_resource_subtype_tcam rsubtype; /* resource subtype */ + enum cfa_dir dir; /* direction rx/tx */ + u16 id; /* alloc/free index */ +}; + +/* Tcam table resource structure + */ +struct tfc_tcam_data { + u8 *key; /* tcam key */ + u8 *mask; /* tcam mask */ + u8 *remap; /* remap */ + u8 key_sz_in_bytes; /* key size in bytes */ + u8 remap_sz_in_bytes; /* remap size in bytes */ +}; + +/** + * allocate a TFC TCAM entry + * + * @tfcp: Pointer to TFC handle + * @fid: Function ID to be used + * @tt: Either track by session or by function + * @priority: the priority of the tcam entry + * @tcam_info: All the information related to the requested index table entry + * (subtype/dir) and the Returned id. + * @key_sz_in_bytes: The size of the entry in bytes for Thor2. + * + * Returns + * 0 for SUCCESS, negative error value for FAILURE (errno.h) + */ +int tfc_tcam_alloc(struct tfc *tfcp, u16 fid, enum cfa_track_type tt, + u8 priority, u8 key_sz_in_bytes, + struct tfc_tcam_info *tcam_info); + +/** + * allocate and set a TFC TCAM entry + * + * @tfcp: Pointer to TFC handle + * @fid: Function ID to be used + * @tt: either track by session or by function + * @priority: the priority of the tcam entry + * @tcam_info: All the information related to the requested TCAM table entry + * (subtype/dir) and the Returned id. + * @tcam_data: Pointer to the tcam data, including tcam, mask, and remap, + * to write tothe entry. The data is aligned in the buffer for + * writing to the hardware. + * + * Returns + * 0 for SUCCESS, negative error value for FAILURE (errno.h) + */ +int tfc_tcam_alloc_set(struct tfc *tfcp, u16 fid, enum cfa_track_type tt, + u8 priority, struct tfc_tcam_info *tcam_info, + const struct tfc_tcam_data *tcam_data); + +/** + * Set a TFC TCAM entry + * + * @tfcp: Pointer to TFC handle + * @fid: Function ID to be used + * @tcam_info: All the information related to the requested index table entry + * (subtype/dir) including the id. + * @tcam_data: Pointer to the tcam data, including tcam, mask, and remap, + * to write to the entry. The data is aligned in the buffer + * for writing to the hardware. + * + * Returns + * 0 for SUCCESS, negative error value for FAILURE (errno.h) + */ +int tfc_tcam_set(struct tfc *tfcp, u16 fid, + const struct tfc_tcam_info *tcam_info, + const struct tfc_tcam_data *tcam_data); + +/** + * Get a TFC TCAM entry + * + * @tfcp: Pointer to TFC handle + * @fid: Function ID to be used + * @tcam_info: All the information related to the requested TCAM entry + * (subtype/dir) including the id. + * @tcam_data: Pointer to the tcam data, including tcam, mask, and remap, + * to read from the entry. The data is aligned in the buffer + * for writing to the hardware. + * + * Returns + * 0 for SUCCESS, negative error value for FAILURE (errno.h) + */ +int tfc_tcam_get(struct tfc *tfcp, u16 fid, + const struct tfc_tcam_info *tcam_info, + struct tfc_tcam_data *tcam_data); +/** + * Free a TFC TCAM entry + * + * @fid: Function ID to be used + * @tfcp: Pointer to TFC handle + * @tcam_info: All the information related to the requested tcam entry (subtype/dir) + * and the id to be freed. + * + * Returns + * 0 for SUCCESS, negative error value for FAILURE (errno.h) + */ +int tfc_tcam_free(struct tfc *tfcp, u16 fid, + const struct tfc_tcam_info *tcam_info); + +/* tfc_tbl_scope_bucket_factor indicates a multiplier factor for determining the + * static and dynamic buckets counts. The larger the factor, the more buckets + * will be allocated. + * + * This is necessary because flows will not hash so as to perfectly fill all the + * buckets. It is necessary to add some allowance for not fully populated + * buckets. + */ +enum tfc_tbl_scope_bucket_factor { + TFC_TBL_SCOPE_BUCKET_FACTOR_1 = 1, + TFC_TBL_SCOPE_BUCKET_FACTOR_2 = 2, + TFC_TBL_SCOPE_BUCKET_FACTOR_4 = 4, + TFC_TBL_SCOPE_BUCKET_FACTOR_8 = 8, + TFC_TBL_SCOPE_BUCKET_FACTOR_16 = 16, + TFC_TBL_SCOPE_BUCKET_FACTOR_MAX = TFC_TBL_SCOPE_BUCKET_FACTOR_16 +}; + +/* tfc_tbl_scope_size_query_parms contains the parameters for the + * tfc_tbl_scope_size_query API. + */ +struct tfc_tbl_scope_size_query_parms { + /* If a shared table scope, dynamic buckets are disabled. This + * affects the calculation for static buckets in this function. + * Initially, if not shared, the size of the static bucket table should + * be double the number of flows supported. Numbers are validated + * against static_cnt and dynamic_cnt + */ + bool shared; + /* Direction indexed array indicating the number of flows. Must be + * at least as large as the number entries that the buckets can point + * to. + */ + u32 flow_cnt[CFA_DIR_MAX]; + /* tfc_tbl_scope_bucket_factor indicates a multiplier factor for + * determining the static and dynamic buckets counts. The larger the + * factor, the more buckets will be allocated. + */ + enum tfc_tbl_scope_bucket_factor factor; + /* The number of pools each region of the table scope will be + * divided into. + */ + u32 max_pools; + /* Direction indexed array indicating the key size. */ + u16 key_sz_in_bytes[CFA_DIR_MAX]; + /* Direction indexed array indicating the action record size. Must + * be a multiple of 32B lines on Thor2. + */ + u16 act_rec_sz_in_bytes[CFA_DIR_MAX]; + /* Direction indexed array indicating the EM static bucket count + * expressed as: log2(static_bucket_count). For example if 1024 static + * buckets, 1024=2^10, so the value 10 would be Returned. + */ + u8 static_bucket_cnt_exp[CFA_DIR_MAX]; + /* Direction indexed array indicating the EM dynamic bucket count. */ + u32 dynamic_bucket_cnt[CFA_DIR_MAX]; + /* The number of minimum sized lkup records per direction. In + * this usage, records are the minimum lookup memory allocation unit in + * a table scope. This value is the total memory required for buckets + * and entries. + * + * Note: The EAS variously refers to these as words or cache-lines. + * + * For example, on Thor2 where each bucket consumes one record, if the + * key size is such that the LREC and key use 2 records, then the + * lkup_rec_cnt = the number of buckets + (2 * the number of flows). + */ + u32 lkup_rec_cnt[CFA_DIR_MAX]; + /* The number of minimum sized action records per direction. + * Similar to the lkup_rec_cnt, records are the minimum action memory + * allocation unit in a table scope. + */ + u32 act_rec_cnt[CFA_DIR_MAX]; + /* Direction indexed array indicating the size of each individual + * lookup record pool expressed as: log2(max_records/max_pools). For + * example if 1024 records and 2 pools 1024/2=512=2^9, so the value 9 + * would be entered. + */ + u8 lkup_pool_sz_exp[CFA_DIR_MAX]; + /* Direction indexed array indicating the size of each individual + * action record pool expressed as: log2(max_records/max_pools). For + * example if 1024 records and 2 pools 1024/2=512=2^9, so the value 9 + * would be entered. + */ + u8 act_pool_sz_exp[CFA_DIR_MAX]; + + /* Direction indexed array indicating the offset in records from + * the start of the memory after the static buckets where the first + * lrec pool begins. + */ + u32 lkup_rec_start_offset[CFA_DIR_MAX]; +}; + +/* tfc_tbl_scope_mem_alloc_parms contains the parameters for allocating memory + * to be used by a table scope. + */ +struct tfc_tbl_scope_mem_alloc_parms { + /* If a shared table scope, indicate whether this is the first + * if, the first, the table scope memory will be allocated. Otherwise + * only the details of the configuration will be stored internally + * for use - i.e. act_rec_cnt/lkup_rec_cnt/lkup_rec_start_offset. + */ + bool first; + /* Direction indexed array indicating the EM static bucket count + * expressed as: log2(static_bucket_count). For example if 1024 static + * buckets, 1024=2^10, so the value 10 would be entered. + */ + u8 static_bucket_cnt_exp[CFA_DIR_MAX]; + /* Direction indexed array indicating the EM dynamic bucket count. */ + u8 dynamic_bucket_cnt[CFA_DIR_MAX]; + /* The number of minimum sized lkup records per direction. In this + * usage, records are the minimum lookup memory allocation unit in a + * table scope. This value is the total memory required for buckets and + * entries. + */ + u32 lkup_rec_cnt[CFA_DIR_MAX]; + /* The number of minimum sized action records per direction. + * Similar to the lkup_rec_cnt, records are the minimum action memory + * allocation unit in a table scope. + */ + u32 act_rec_cnt[CFA_DIR_MAX]; + /* The page size used for allocation. If running in the kernel + * driver, this may be as small as 1KB. For huge pages this may be more + * commonly 2MB. Supported values include 4K, 8K, 64K, 2M, 8M and 1GB. + */ + u32 pbl_page_sz_in_bytes; + /* Indicates local application vs remote application table scope. A + * table scope can be created on a PF for it's own use or for use by + * other children. These may or may not be shared table scopes. Set + * local to false if calling API on behalf of a remote client VF. + * (alternatively, we could pass in the remote fid or the local fid). + */ + bool local; + /* The maximum number of pools supported. */ + u8 max_pools; + /* Direction indexed array indicating the action table pool size + * expressed as: log2(act_pool_sz). For example if 1024 static + * buckets, 1024=2^10, so the value 10 would be entered. + */ + u8 act_pool_sz_exp[CFA_DIR_MAX]; + /* Direction indexed array indicating the lookup table pool size + * expressed as: log2(lkup_pool_sz). For example if 1024 static + * buckets, 1024=2^10, so the value 10 would be entered. + */ + u8 lkup_pool_sz_exp[CFA_DIR_MAX]; + /* Lookup table record start offset. Offset in 32B records after + * the static buckets where the lookup records and dynamic bucket memory + * will begin. + */ + u32 lkup_rec_start_offset[CFA_DIR_MAX]; +}; + +/** + * Determine whether table scopes are supported in the hardware. + * + * @tfcp: Pointer to TFC handle + * @tbl_scope_capable: True if table scopes are supported in the firmware. + * @max_lkup_rec_cnt: The maximum number of lookup records in a table scope + * @max_act_rec_cnt: The maximum number of action records in a table scope + * @max_lkup_static_buckets_exp: The log2 of the maximum number of lookup + * static buckets in a table scope + * + * Returns + * 0 for SUCCESS, negative error value for FAILURE (errno.h) + */ +int tfc_tbl_scope_qcaps(struct tfc *tfcp, bool *tbl_scope_capable, + u32 *max_lkup_rec_cnt, + u32 *max_act_rec_cnt, + u8 *max_lkup_static_buckets_exp); + +/** + * Determine table scope sizing + * + * @tfcp: Pointer to TFC handle + * @parms: The parameters used by this function. + * + * Returns + * 0 for SUCCESS, negative error value for FAILURE (errno.h) + */ +int tfc_tbl_scope_size_query(struct tfc *tfcp, + struct tfc_tbl_scope_size_query_parms *parms); + +/** + * Allocate a table scope + * + * @tfcp: Pointer to TFC handle + * @shared: Create a shared table scope. + * @app_type: application type, TF or AFM. + * @tsid: The allocated table scope ID. + * @first: True if the caller is the creator of this table scope. + * If not shared, first is always set. + * + * Returns + * 0 for SUCCESS, negative error value for FAILURE (errno.h) + */ +int tfc_tbl_scope_id_alloc(struct tfc *tfcp, bool shared, + enum cfa_app_type app_type, u8 *tsid, + bool *first); + +/** + * Allocate memory for a table scope + * + * @tfcp: Pointer to TFC handle + * @fid: Function id requesting the memory allocation + * @tsid: Table scope identifier + * @parms: Memory allocation parameters + * + * Returns + * 0 for SUCCESS, negative error value for FAILURE (errno.h) + */ +int tfc_tbl_scope_mem_alloc(struct tfc *tfcp, u16 fid, u8 tsid, + struct tfc_tbl_scope_mem_alloc_parms *parms); + +/** + * Free memory for a table scope + * + * @tfcp: Pointer to TFC handle + * @fid: Function id for memory to free from the table scope. Set to INVALID_FID + * by default. Populated when VF2PF mem_free message received from a VF + * for a shared table scope. + * @tsid: Table scope identifier + * + * Returns + * 0 for SUCCESS, negative error value for FAILURE (errno.h) + */ +int tfc_tbl_scope_mem_free(struct tfc *tfcp, u16 fid, u8 tsid); + +/* tfc_tbl_scope_cpm_alloc_parms contains the parameters for allocating a + * CPM instance to be used by a table scope. + */ +struct tfc_tbl_scope_cpm_alloc_parms { + /* + * Direction indexed array indicating the maximum number of lookup + * contiguous records. + */ + u8 lkup_max_contig_rec[CFA_DIR_MAX]; + /* + * Direction indexed array indicating the maximum number of action + * contiguous records. + */ + u8 act_max_contig_rec[CFA_DIR_MAX]; + /* The maximum number of pools supported by the table scope. */ + u16 max_pools; +}; + +/** + * Allocate CFA Pool Manager (CPM) Instance + * + * @tfcp: Pointer to TFC handle + * @cpm_parms: Pointer to the CFA Pool Manager parameters + * + * Returns + * 0 for SUCCESS, negative error value for FAILURE (errno.h) + */ +int tfc_tbl_scope_cpm_alloc(struct tfc *tfcp, u8 tsid, + struct tfc_tbl_scope_cpm_alloc_parms *cpm_parms); + +/** + * Free CPM Instance + * + * @tfcp: Pointer to TFC handle + * @tsid: Table scoep identifier + * + * Returns + * 0 for SUCCESS, negative error value for FAILURE (errno.h) + */ +int tfc_tbl_scope_cpm_free(struct tfc *tfcp, u8 tsid); + +/** + * Associate a forwarding function with an existing table scope + * + * @tfcp: Pointer to TFC handle + * @fid: Function id to associated with the table scope + * @tsid: Table scope identifier + * @fid_cnt: The number of forwarding functions currently associated with the table scope + * + * Return + * 0 for SUCCESS, negative error value for FAILURE (errno.h) + */ +int tfc_tbl_scope_fid_add(struct tfc *tfcp, u16 fid, u8 tsid, u16 *fid_cnt); + +/** + * Disassociate a forwarding function from an existing TFC table scope + * + * Once the last function has been removed from the session in the firmware + * the session is freed and all associated resources freed. + * + * @tfcp: Pointer to TFC handle + * @fid: Function id to remove from the table scope + * @tsid: Table scope identifier + * @fid_cnt: The number of forwarding functions currently associated with the session + * + * Returns + * 0 for SUCCESS, negative error value for FAILURE (errno.h) + */ +int tfc_tbl_scope_fid_rem(struct tfc *tfcp, u16 fid, u8 tsid, u16 *fid_cnt); + +/** + * Pool allocation + * + * Allocate a pool ID and set it's size + * + * @tfcp: Pointer to TFC handle + * @fid: Function id allocating the pool + * @tsid: Table scope identifier + * @region: Pool region identifier + * @dir: Direction + * @pool_sz_exp: Pool size exponent + * @pool_id: Used to Return the allocated pool ID. + * + * Returns + * 0 for SUCCESS, negative error value for FAILURE (errno.h) + */ +int tfc_tbl_scope_pool_alloc(struct tfc *tfcp, u16 fid, u8 tsid, + enum cfa_region_type region, + enum cfa_dir dir, u8 *pool_sz_exp, u16 *pool_id); + +/** + * Pool free + * + * Free a pool ID + * + * @tfcp: Pointer to TFC handle + * @fid: Function freeing the pool + * @tsid: Table scope identifier + * @region: Pool region identifier + * @dir: Direction + * @pool_id: Used to Return the allocated pool ID. + * + * Returns + * 0 for SUCCESS, negative error value for FAILURE (errno.h) + */ +int tfc_tbl_scope_pool_free(struct tfc *tfcp, u16 fid, u8 tsid, + enum cfa_region_type region, enum cfa_dir dir, + u16 pool_id); + +/** + * Get configured state + * + * This API is intended for DPDK applications where a single table scope is shared + * across one or more DPDK instances. When one instance succeeds to allocate and + * configure a table scope, it then sets the config for that table scope; while + * other sessions polling and waiting for the shared table scope to be configured. + * + * @tfcp: Pointer to TFC handle + * @tsid: Table scope identifier + * @configured: Used to Return the allocated pool ID. + * + * Returns + * 0 for SUCCESS, negative error value for FAILURE (errno.h) + */ +int tfc_tbl_scope_config_state_get(struct tfc *tfcp, u8 tsid, bool *configured); + +/** + * Table scope function reset + * + * Delete resources and EM entries associated with fid. + * + * @tfcp: Pointer to TFC handle + * @fid: Table scope identifier + * + * Returns + * 0 for SUCCESS, negative error value for FAILURE (errno.h) + */ +int tfc_tbl_scope_func_reset(struct tfc *tfcp, u16 fid); + +/* tfc_em_insert_parms contains the parameters for an EM insert. */ +struct tfc_em_insert_parms { + enum cfa_dir dir; /* Entry direction. */ + u8 *lkup_key_data; /* ptr to the combined lkup record and key data to be written. */ + u16 lkup_key_sz_words; /* The size of the entry to write in 32b words. */ + const u8 *key_data; /* Thor only - The key data to be used to calculate the hash. */ + u16 key_sz_bits; /* Thor only - Size of key in bits. */ + u64 *flow_handle; /* Will contain the entry flow handle a unique identifier. */ +}; + +/** + * Insert an EM Entry + * + * @tfcp: Pointer to TFC handle + * @tsid: Table scope id + * @parms: EM insert params + * + * Returns + * 0 for SUCCESS, negative error value for FAILURE (errno.h) + * Error codes -1 through -9 indicate an MPC error and the + * positive value of the error code maps directly on to the + * MPC error code. For example, if the value -8 is Returned + * it indicates a CFA_BLD_MPC_EM_DUPLICATE error occurred. + */ +int tfc_em_insert(struct tfc *tfcp, u8 tsid, struct tfc_em_insert_parms *parms); + +/** + * tfc_em_delete_parms Contains args required to delete an EM Entry + * + * @tfcp: Pointer to TFC handle + * @dir: Direction (CFA_DIR_RX or CFA_DIR_TX) + * @flow_handle: The flow handle Returned to be used for flow deletion. + * + */ +struct tfc_em_delete_parms { + /* Entry direction. */ + enum cfa_dir dir; + /* Flow handle of flow to delete */ + u64 flow_handle; +}; + +/** + * Delete an EM Entry + * + * @tfcp: Pointer to TFC handle + * @parms: EM delete parameters + * + * Returns + * 0 for SUCCESS, negative error value for FAILURE (errno.h) + * Error codes -1 through -9 indicate an MPC error and the + * positive value of the error code maps directly on to the + * MPC error code. For example, if the value -8 is Returned + * it indicates a CFA_BLD_MPC_EM_DUPLICATE error occurred. + */ +int tfc_em_delete(struct tfc *tfcp, struct tfc_em_delete_parms *parms); + +/* CMM resource structure */ +struct tfc_cmm_info { + enum cfa_resource_subtype_cmm rsubtype; /* resource subtype */ + enum cfa_dir dir; /* direction rx/tx */ + u64 act_handle; /* alloc/free handle */ +}; + +/** + * CMM resource clear structure + */ +struct tfc_cmm_clr { + bool clr; /**< flag for clear */ + u16 offset_in_byte; /**< field offset in byte */ + u16 sz_in_byte; /**< field size in byte */ +}; + +/** + * Allocate an action CMM Resource + * + * @tfcp: Pointer to TFC handle + * @tsid: Table scope id + * @cmm_info: Pointer to cmm info + * @num_contig_rec: Num contiguous records required. Record size is 8B for + * Thor/32B for Thor2. + * + * Returns + * 0 for SUCCESS, negative error value for FAILURE (errno.h) + */ +int tfc_act_alloc(struct tfc *tfcp, u8 tsid, struct tfc_cmm_info *cmm_info, + u16 num_contig_rec); + +/** + * Set an action CMM resource + * + * @tfcp: Pointer to TFC handle + * @cmm_info: Pointer to cmm info. + * @data: Data to be written. + * @data_sz_words: Data buffer size in words. In 8B increments. + * + * Returns + * 0 for SUCCESS, negative error value for FAILURE (errno.h) + * Error codes -1 through -9 indicate an MPC error and the + * positive value of the error code maps directly on to the + * MPC error code. For example, if the value -8 is Returned + * it indicates a CFA_BLD_MPC_EM_DUPLICATE error occurred. + */ +int tfc_act_set(struct tfc *tfcp, const struct tfc_cmm_info *cmm_info, + const u8 *data, u16 data_sz_words); + +/** + * Get an action CMM resource + * + * @tfcp: Pointer to TFC handle + * @cmm_info: Pointer to cmm info + * @cmm_clr: Pointer to cmm clr + * @data: Data read. Must be word aligned, i.e. [1:0] must be 0. + * @data_sz_words: Data buffer size in words. Size could be 8/16/24/32/64B + * + * Returns + * 0 for SUCCESS, negative error value for FAILURE (errno.h) + * Error codes -1 through -9 indicate an MPC error and the + * positive value of the error code maps directly on to the + * MPC error code. For example, if the value -8 is Returned + * it indicates a CFA_BLD_MPC_EM_DUPLICATE error occurred. + */ +int tfc_act_get(struct tfc *tfcp, const struct tfc_cmm_info *cmm_info, + struct tfc_cmm_clr *clr, + u8 *data, u16 *data_sz_words); +/** + * Free a CMM Resource + * + * @tfcp: Pointer to TFC handle + * @cmm_info: CMM info + * + * Returns + * 0 for SUCCESS, negative error value for FAILURE (errno.h) + */ +int tfc_act_free(struct tfc *tfcp, const struct tfc_cmm_info *cmm_info); + +/* IF table resource structure + */ +struct tfc_if_tbl_info { + enum cfa_resource_subtype_if_tbl rsubtype; /* resource subtype */ + enum cfa_dir dir; /* direction rx/tx */ + u16 id; /* index */ +}; + +/** + * Set a TFC if table entry + * + * @tfcp: Pointer to TFC handle + * @fid: Function ID to be used + * @tbl_info: All the information related to the requested index table entry + * (subtype/dir) including the id. + * @data: Pointer to the data to write to the entry. The data is aligned + * correctly in the buffer for writing to the hardware. + * @data_sz_in_bytes: The size of the entry in device sized bytes for Thor2. + * + * Returns + * 0 for SUCCESS, negative error value for FAILURE (errno.h) + */ +int tfc_if_tbl_set(struct tfc *tfcp, u16 fid, + const struct tfc_if_tbl_info *tbl_info, + const u8 *data, u8 data_sz_in_bytes); + +/** + * Get a TFC if table entry + * + * @tfcp: Pointer to TFC handle + * @fid: Function ID to be used + * @tbl_info: All the information related to the requested index table entry + * (subtype/dir) including the id. + * @data: Pointer to the data to read from the entry. + * @data_sz_in_bytes: The size of the entry in device sized bytes for Thor2. + * Input is the size of the buffer, output is the actual size. + * + * Returns + * 0 for SUCCESS, negative error value for FAILURE (errno.h) + */ +int tfc_if_tbl_get(struct tfc *tfcp, u16 fid, + const struct tfc_if_tbl_info *tbl_info, + u8 *data, u8 *data_sz_in_bytes); +#endif /* _TFC_H_ */ diff --git a/drivers/thirdparty/release-drivers/bnxt/tfc_v3/tfc_act.c b/drivers/thirdparty/release-drivers/bnxt/tfc_v3/tfc_act.c new file mode 100644 index 000000000000..22823edce02b --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/tfc_v3/tfc_act.c @@ -0,0 +1,756 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* Copyright(c) 2019-2023 Broadcom + * All rights reserved. + */ + +#include +#include + +#include "tfc.h" +#include "cfa_bld_mpc_field_ids.h" +#include "cfa_bld_mpcops.h" +#include "tfo.h" +#include "tfc_em.h" +#include "tfc_cpm.h" +#include "tfc_msg.h" +#include "tfc_priv.h" +#include "cfa_types.h" +#include "cfa_mm.h" +#include "tfc_action_handle.h" +#include "bnxt_compat.h" +#include "bnxt.h" +#include "bnxt_mpc.h" +#include "bnxt_tfc.h" +#include "sys_util.h" + +/* The read/write granularity is 32B + */ +#define TFC_ACT_RW_GRANULARITY 32 + +#define TFC_ACT_CACHE_OPT_EN 0 + +int tfc_act_alloc(struct tfc *tfcp, u8 tsid, struct tfc_cmm_info *cmm_info, u16 num_contig_rec) +{ + struct cfa_mm_alloc_parms aparms; + struct tfc_cpm *cpm_lkup = NULL; + struct tfc_cpm *cpm_act = NULL; + struct tfc_ts_mem_cfg mem_cfg; + bool is_bs_owner, is_shared; + struct bnxt *bp = tfcp->bp; + struct tfc_ts_pool_info pi; + struct tfc_cmm *cmm; + u32 entry_offset; + u16 max_pools; + u16 pool_id; + bool valid; + int rc; + + rc = tfo_ts_get(tfcp->tfo, tsid, &is_shared, NULL, &valid, &max_pools); + if (rc) { + netdev_dbg(bp->dev, "%s: failed to get tsid: %d\n", __func__, rc); + return -EINVAL; + } + + if (!valid) { + netdev_dbg(bp->dev, "%s: tsid(%d) not allocated\n", __func__, tsid); + return -EINVAL; + } + + if (!max_pools) { + netdev_dbg(bp->dev, "%s: tsid(%d) Max pools must be greater than 0 %d\n", + __func__, tsid, max_pools); + return -EINVAL; + } + + rc = tfo_ts_get_pool_info(tfcp->tfo, tsid, cmm_info->dir, &pi); + if (rc) { + netdev_dbg(bp->dev, + "%s: Failed to get pool info for tsid:%d\n", + __func__, tsid); + return -EINVAL; + } + + /* Get CPM instances */ + rc = tfo_ts_get_cpm_inst(tfcp->tfo, tsid, cmm_info->dir, &cpm_lkup, &cpm_act); + if (rc) { + netdev_dbg(bp->dev, "%s: failed to get CPM instances: %d\n", + __func__, rc); + return -EINVAL; + } + + rc = tfo_ts_get_mem_cfg(tfcp->tfo, tsid, + cmm_info->dir, + CFA_REGION_TYPE_ACT, + &is_bs_owner, + &mem_cfg); + if (rc) { + netdev_dbg(bp->dev, "%s: tfo_ts_get_mem_cfg() failed: %d\n", + __func__, rc); + return -EINVAL; + } + + /* if no pool available locally or all pools full */ + rc = tfc_cpm_get_avail_pool(cpm_act, &pool_id); + if (rc) { + /* Allocate a pool */ + struct cfa_mm_query_parms qparms; + struct cfa_mm_open_parms oparms; + u16 fid; + + /* There is only 1 pool for a non-shared table scope + * and it is full. + */ + if (!is_shared) { + netdev_dbg(bp->dev, "%s: no records remain\n", __func__); + return -ENOMEM; + } + rc = tfc_get_fid(tfcp, &fid); + if (rc) + return rc; + + rc = tfc_tbl_scope_pool_alloc(tfcp, + fid, + tsid, + CFA_REGION_TYPE_ACT, + cmm_info->dir, + NULL, + &pool_id); + if (rc) { + netdev_dbg(bp->dev, "%s: table scope alloc HWRM failed: %d\n", + __func__, rc); + return -EINVAL; + } + + /* Create pool CMM instance */ + qparms.max_records = mem_cfg.rec_cnt; + qparms.max_contig_records = roundup_pow_of_two(pi.act_max_contig_rec); + rc = cfa_mm_query(&qparms); + if (rc) { + netdev_dbg(bp->dev, "%s: cfa_mm_query() failed: %d\n", + __func__, rc); + return -EINVAL; + } + + cmm = kzalloc(qparms.db_size, GFP_KERNEL); + if (!cmm) + return -ENOMEM; + + oparms.db_mem_size = qparms.db_size; + oparms.max_contig_records = roundup_pow_of_two(qparms.max_contig_records); + oparms.max_records = qparms.max_records / max_pools; + rc = cfa_mm_open(cmm, &oparms); + if (rc) { + netdev_dbg(bp->dev, "%s: cfa_mm_open() failed: %d\n", + __func__, rc); + kfree(cmm); + return -EINVAL; + } + + /* Store CMM instance in the CPM */ + rc = tfc_cpm_set_cmm_inst(cpm_act, pool_id, cmm); + if (rc) { + netdev_dbg(bp->dev, "%s: tfc_cpm_set_cmm_inst() failed: %d\n", + __func__, rc); + kfree(cmm); + return -EINVAL; + } + + /* store updated pool info */ + tfo_ts_set_pool_info(tfcp->tfo, tsid, cmm_info->dir, &pi); + } else { + /* Get the pool instance and allocate an act rec index from the pool */ + rc = tfc_cpm_get_cmm_inst(cpm_act, pool_id, &cmm); + if (rc) { + netdev_dbg(bp->dev, "%s: tfc_cpm_get_cmm_inst() failed: %d\n", + __func__, rc); + kfree(cmm); + return -EINVAL; + } + } + + aparms.num_contig_records = roundup_pow_of_two(num_contig_rec); + rc = cfa_mm_alloc(cmm, &aparms); + if (rc) { + netdev_dbg(bp->dev, "%s: cfa_mm_alloc() failed: %d\n", + __func__, rc); + kfree(cmm); + return -EINVAL; + } + + /* Update CPM info so it will determine best pool to use next alloc */ + rc = tfc_cpm_set_usage(pi.act_cpm, pool_id, aparms.used_count, aparms.all_used); + if (rc) { + netdev_dbg(bp->dev, "%s: EM insert tfc_cpm_set_usage() failed: %d\n", + __func__, rc); + } + + CREATE_OFFSET(&entry_offset, pi.act_pool_sz_exp, pool_id, aparms.record_offset); + + /* Create Action handle */ + cmm_info->act_handle = tfc_create_action_handle(tsid, num_contig_rec, entry_offset); + return rc; +} + +int tfc_act_set(struct tfc *tfcp, const struct tfc_cmm_info *cmm_info, const u8 *data, + u16 data_sz_words) +{ + struct cfa_mpc_data_obj fields_cmd[CFA_BLD_MPC_WRITE_CMD_MAX_FLD]; + struct cfa_mpc_data_obj fields_cmp[CFA_BLD_MPC_WRITE_CMP_MAX_FLD]; + u8 tx_msg[TFC_MPC_MAX_TX_BYTES], rx_msg[TFC_MPC_MAX_RX_BYTES]; + u32 i, buff_len, entry_offset, record_size; + u32 mpc_opaque = TFC_MPC_OPAQUE_VAL; + struct bnxt_mpc_mbuf mpc_msg_in; + struct bnxt_mpc_mbuf mpc_msg_out; + struct cfa_bld_mpcinfo *mpc_info; + struct bnxt *bp = tfcp->bp; + bool is_shared, valid; + int rc; + u8 tsid; + + tfo_mpcinfo_get(tfcp->tfo, &mpc_info); + + /* Check that MPC APIs are bound */ + if (!mpc_info->mpcops) { + netdev_dbg(bp->dev, "%s: MPC not initialized\n", + __func__); + return -EINVAL; + } + + tfc_get_fields_from_action_handle(&cmm_info->act_handle, + &tsid, + &record_size, + &entry_offset); + + rc = tfo_ts_get(tfcp->tfo, tsid, &is_shared, NULL, &valid, NULL); + if (rc) { + netdev_dbg(bp->dev, "%s: failed to get tsid: rc:%d\n", __func__, rc); + return -EINVAL; + } + if (!valid) { + netdev_dbg(bp->dev, "%s: tsid not allocated %d\n", __func__, tsid); + return -EINVAL; + } + + /* Create MPC EM insert command using builder */ + for (i = 0; i < CFA_BLD_MPC_WRITE_CMD_MAX_FLD; i++) + fields_cmd[i].field_id = INVALID_U16; + + fields_cmd[CFA_BLD_MPC_WRITE_CMD_OPAQUE_FLD].field_id = + CFA_BLD_MPC_WRITE_CMD_OPAQUE_FLD; + fields_cmd[CFA_BLD_MPC_WRITE_CMD_OPAQUE_FLD].val = 0xAA; + fields_cmd[CFA_BLD_MPC_WRITE_CMD_TABLE_TYPE_FLD].field_id = + CFA_BLD_MPC_WRITE_CMD_TABLE_TYPE_FLD; + fields_cmd[CFA_BLD_MPC_WRITE_CMD_TABLE_TYPE_FLD].val = CFA_BLD_MPC_HW_TABLE_TYPE_ACTION; + fields_cmd[CFA_BLD_MPC_WRITE_CMD_TABLE_SCOPE_FLD].field_id = + CFA_BLD_MPC_WRITE_CMD_TABLE_SCOPE_FLD; + fields_cmd[CFA_BLD_MPC_WRITE_CMD_TABLE_SCOPE_FLD].val = tsid; + fields_cmd[CFA_BLD_MPC_WRITE_CMD_DATA_SIZE_FLD].field_id = + CFA_BLD_MPC_WRITE_CMD_DATA_SIZE_FLD; + fields_cmd[CFA_BLD_MPC_WRITE_CMD_DATA_SIZE_FLD].val = data_sz_words; +#if TFC_ACT_CACHE_OPT_EN + fields_cmd[CFA_BLD_MPC_WRITE_CMD_CACHE_OPTION_FLD].field_id = + CFA_BLD_MPC_WRITE_CMD_CACHE_OPTION_FLD; + fields_cmd[CFA_BLD_MPC_WRITE_CMD_CACHE_OPTION_FLD].val = 0x01; +#endif + fields_cmd[CFA_BLD_MPC_WRITE_CMD_TABLE_INDEX_FLD].field_id = + CFA_BLD_MPC_WRITE_CMD_TABLE_INDEX_FLD; + fields_cmd[CFA_BLD_MPC_WRITE_CMD_TABLE_INDEX_FLD].val = entry_offset; + + buff_len = TFC_MPC_MAX_TX_BYTES; + + rc = mpc_info->mpcops->cfa_bld_mpc_build_cache_write(tx_msg, + &buff_len, + data, + fields_cmd); + if (rc) { + netdev_dbg(bp->dev, "%s: write build failed: %d\n", + __func__, rc); + goto cleanup; + } + +#ifdef TFC_ACT_MSG_DEBUG + netdev_dbg(bp->dev, "Tx Msg: size:%d\n", buff_len); + bnxt_tfc_buf_dump(bp, NULL, (uint8_t *)tx_msg, buff_len, 4, 4); +#endif + + /* Send MPC */ + mpc_msg_in.chnl_id = (cmm_info->dir == CFA_DIR_TX ? + RING_ALLOC_REQ_MPC_CHNLS_TYPE_TE_CFA : + RING_ALLOC_REQ_MPC_CHNLS_TYPE_RE_CFA); + mpc_msg_in.msg_data = &tx_msg[TFC_MPC_HEADER_SIZE_BYTES]; + mpc_msg_in.msg_size = buff_len - TFC_MPC_HEADER_SIZE_BYTES; + mpc_msg_out.cmp_type = MPC_CMP_TYPE_MID_PATH_SHORT; + mpc_msg_out.msg_data = &rx_msg[TFC_MPC_HEADER_SIZE_BYTES]; + mpc_msg_out.msg_size = TFC_MPC_MAX_RX_BYTES; + + rc = bnxt_mpc_send(tfcp->bp, + &mpc_msg_in, + &mpc_msg_out, + &mpc_opaque); + if (rc) { + netdev_dbg(bp->dev, "%s: write MPC send failed: %d\n", + __func__, rc); + goto cleanup; + } + +#ifdef TFC_ACT_MSG_DEBUG + netdev_dbg(bp->dev, "Rx Msg: size:%d\n", mpc_msg_out.msg_size); + bnxt_tfc_buf_dump(bp, NULL, (uint8_t *)rx_msg, buff_len, 4, 4); +#endif + + /* Process response */ + for (i = 0; i < CFA_BLD_MPC_WRITE_CMP_MAX_FLD; i++) + fields_cmp[i].field_id = INVALID_U16; + + fields_cmp[CFA_BLD_MPC_WRITE_CMP_STATUS_FLD].field_id = + CFA_BLD_MPC_WRITE_CMP_STATUS_FLD; + + rc = mpc_info->mpcops->cfa_bld_mpc_parse_cache_write(rx_msg, + mpc_msg_out.msg_size, + fields_cmp); + if (rc) { + netdev_dbg(bp->dev, "%s: write parse failed: %d\n", + __func__, rc); + goto cleanup; + } + + if (fields_cmp[CFA_BLD_MPC_WRITE_CMP_STATUS_FLD].val != CFA_BLD_MPC_OK) { + netdev_dbg(bp->dev, "%s: failed with status code:%d\n", + __func__, + (u32)fields_cmp[CFA_BLD_MPC_WRITE_CMP_STATUS_FLD].val); + netdev_dbg(bp->dev, "Hash MSB:0x%0x\n", + (u32)fields_cmp[CFA_BLD_MPC_WRITE_CMP_HASH_MSB_FLD].val); + rc = ((int)fields_cmp[CFA_BLD_MPC_WRITE_CMP_STATUS_FLD].val) * -1; + goto cleanup; + } + return 0; + + cleanup: + return rc; +} + +static int tfc_act_get_only(struct tfc *tfcp, const struct tfc_cmm_info *cmm_info, u8 *data, + u16 *data_sz_words) +{ + struct cfa_mpc_data_obj fields_cmd[CFA_BLD_MPC_READ_CMD_MAX_FLD] = { {0} }; + struct cfa_mpc_data_obj fields_cmp[CFA_BLD_MPC_READ_CMP_MAX_FLD] = { {0} }; + u8 tx_msg[TFC_MPC_MAX_TX_BYTES] = { 0 }; + u8 rx_msg[TFC_MPC_MAX_RX_BYTES] = { 0 }; + u32 entry_offset, record_size, buff_len; + u32 mpc_opaque = TFC_MPC_OPAQUE_VAL; + struct cfa_bld_mpcinfo *mpc_info; + struct bnxt_mpc_mbuf mpc_msg_out; + struct bnxt_mpc_mbuf mpc_msg_in; + struct bnxt *bp = tfcp->bp; + u8 discard_data[128], tsid; + bool is_shared, valid; + u64 host_address; + int i, rc; + + tfo_mpcinfo_get(tfcp->tfo, &mpc_info); + + tfc_get_fields_from_action_handle(&cmm_info->act_handle, &tsid, + &record_size, &entry_offset); + + rc = tfo_ts_get(tfcp->tfo, tsid, &is_shared, NULL, &valid, NULL); + if (rc) { + netdev_dbg(bp->dev, "%s: failed to get tsid: rc:%d\n", __func__, rc); + return -EINVAL; + } + + if (!valid) { + netdev_dbg(bp->dev, "%s: tsid not allocated %d\n", __func__, tsid); + return -EINVAL; + } + + /* Check that data pointer is word aligned */ + if (((u64)data) & 0x3ULL) { + netdev_dbg(bp->dev, "%s: data pointer not word aligned\n", + __func__); + return -EINVAL; + } + + host_address = (phys_addr_t)virt_to_phys(data); + + /* Check that MPC APIs are bound */ + if (!mpc_info->mpcops) { + netdev_dbg(bp->dev, "%s: MPC not initialized\n", + __func__); + return -EINVAL; + } + + /* Create MPC EM insert command using builder */ + for (i = 0; i < CFA_BLD_MPC_READ_CMD_MAX_FLD; i++) + fields_cmd[i].field_id = INVALID_U16; + + fields_cmd[CFA_BLD_MPC_READ_CMD_OPAQUE_FLD].field_id = + CFA_BLD_MPC_READ_CMD_OPAQUE_FLD; + fields_cmd[CFA_BLD_MPC_READ_CMD_OPAQUE_FLD].val = 0xAA; + + fields_cmd[CFA_BLD_MPC_READ_CMD_TABLE_TYPE_FLD].field_id = + CFA_BLD_MPC_READ_CMD_TABLE_TYPE_FLD; + fields_cmd[CFA_BLD_MPC_READ_CMD_TABLE_TYPE_FLD].val = + CFA_BLD_MPC_HW_TABLE_TYPE_ACTION; + + fields_cmd[CFA_BLD_MPC_READ_CMD_TABLE_SCOPE_FLD].field_id = + CFA_BLD_MPC_READ_CMD_TABLE_SCOPE_FLD; + fields_cmd[CFA_BLD_MPC_READ_CMD_TABLE_SCOPE_FLD].val = tsid; + + fields_cmd[CFA_BLD_MPC_READ_CMD_DATA_SIZE_FLD].field_id = + CFA_BLD_MPC_READ_CMD_DATA_SIZE_FLD; + fields_cmd[CFA_BLD_MPC_READ_CMD_DATA_SIZE_FLD].val = *data_sz_words; + +#if TFC_ACT_CACHE_OPT_EN + fields_cmd[CFA_BLD_MPC_READ_CMD_CACHE_OPTION_FLD].field_id = + CFA_BLD_MPC_READ_CMD_CACHE_OPTION_FLD; + fields_cmd[CFA_BLD_MPC_READ_CMD_CACHE_OPTION_FLD].val = 0x0; +#endif + fields_cmd[CFA_BLD_MPC_READ_CMD_TABLE_INDEX_FLD].field_id = + CFA_BLD_MPC_READ_CMD_TABLE_INDEX_FLD; + fields_cmd[CFA_BLD_MPC_READ_CMD_TABLE_INDEX_FLD].val = entry_offset; + + fields_cmd[CFA_BLD_MPC_READ_CMD_HOST_ADDRESS_FLD].field_id = + CFA_BLD_MPC_READ_CMD_HOST_ADDRESS_FLD; + fields_cmd[CFA_BLD_MPC_READ_CMD_HOST_ADDRESS_FLD].val = host_address; + + buff_len = TFC_MPC_MAX_TX_BYTES; + + rc = mpc_info->mpcops->cfa_bld_mpc_build_cache_read(tx_msg, + &buff_len, + fields_cmd); + if (rc) { + netdev_dbg(bp->dev, "%s: read build failed: %d\n", + __func__, rc); + goto cleanup; + } + + /* Send MPC */ + mpc_msg_in.chnl_id = (cmm_info->dir == CFA_DIR_TX ? + RING_ALLOC_REQ_MPC_CHNLS_TYPE_TE_CFA : + RING_ALLOC_REQ_MPC_CHNLS_TYPE_RE_CFA); + mpc_msg_in.msg_data = &tx_msg[TFC_MPC_HEADER_SIZE_BYTES]; + mpc_msg_in.msg_size = buff_len - TFC_MPC_HEADER_SIZE_BYTES; + mpc_msg_out.cmp_type = MPC_CMP_TYPE_MID_PATH_SHORT; + mpc_msg_out.msg_data = &rx_msg[TFC_MPC_HEADER_SIZE_BYTES]; + mpc_msg_out.msg_size = TFC_MPC_MAX_RX_BYTES; + + rc = bnxt_mpc_send(tfcp->bp, + &mpc_msg_in, + &mpc_msg_out, + &mpc_opaque); + if (rc) { + netdev_dbg(bp->dev, "%s: read MPC send failed: %d\n", + __func__, rc); + goto cleanup; + } + + /* Process response */ + for (i = 0; i < CFA_BLD_MPC_READ_CMP_MAX_FLD; i++) + fields_cmp[i].field_id = INVALID_U16; + + fields_cmp[CFA_BLD_MPC_READ_CMP_STATUS_FLD].field_id = + CFA_BLD_MPC_READ_CMP_STATUS_FLD; + + rc = mpc_info->mpcops->cfa_bld_mpc_parse_cache_read(rx_msg, + mpc_msg_out.msg_size, + discard_data, + *data_sz_words * TFC_MPC_BYTES_PER_WORD, + fields_cmp); + if (rc) { + netdev_dbg(bp->dev, "%s: Action read parse failed: %d\n", + __func__, rc); + goto cleanup; + } + + if (fields_cmp[CFA_BLD_MPC_READ_CMP_STATUS_FLD].val != CFA_BLD_MPC_OK) { + netdev_dbg(bp->dev, "%s: Action read failed with status code:%d\n", + __func__, + (u32)fields_cmp[CFA_BLD_MPC_READ_CMP_STATUS_FLD].val); + rc = ((int)fields_cmp[CFA_BLD_MPC_READ_CMP_STATUS_FLD].val) * -1; + goto cleanup; + } + return 0; + +cleanup: + return rc; +} + +static int tfc_act_get_clear(struct tfc *tfcp, + const struct tfc_cmm_info *cmm_info, + u8 *data, + u16 *data_sz_words, + u8 clr_offset, + u8 clr_size) +{ + int rc = 0; + u8 tx_msg[TFC_MPC_MAX_TX_BYTES] = { 0 }; + u8 rx_msg[TFC_MPC_MAX_RX_BYTES] = { 0 }; + u32 msg_count = BNXT_MPC_COMP_MSG_COUNT; + int i; + u32 buff_len; + struct cfa_mpc_data_obj fields_cmd[CFA_BLD_MPC_READ_CLR_CMD_MAX_FLD] = { {0} }; + struct cfa_mpc_data_obj fields_cmp[CFA_BLD_MPC_READ_CLR_CMP_MAX_FLD] = { {0} }; + u32 entry_offset; + u64 host_address; + struct bnxt_mpc_mbuf mpc_msg_in; + struct bnxt_mpc_mbuf mpc_msg_out; + u32 record_size; + u8 tsid; + bool is_shared; + struct cfa_bld_mpcinfo *mpc_info; + u8 discard_data[128]; + bool valid; + u16 mask = 0; + struct bnxt *bp = tfcp->bp; + + tfo_mpcinfo_get(tfcp->tfo, &mpc_info); + + tfc_get_fields_from_action_handle(&cmm_info->act_handle, + &tsid, + &record_size, + &entry_offset); + + rc = tfo_ts_get(tfcp->tfo, tsid, &is_shared, NULL, &valid, NULL); + if (rc != 0) { + netdev_dbg(bp->dev, "%s: failed to get tsid: %d\n", + __func__, rc); + return -EINVAL; + } + if (!valid) { + netdev_dbg(bp->dev, "%s: tsid not allocated %d\n", + __func__, tsid); + return -EINVAL; + } + + /* Check that data pointer is word aligned */ + if (((uint64_t)data) & 0x3ULL) { + netdev_dbg(bp->dev, "%s: data pointer not word aligned\n", + __func__); + return -EINVAL; + } + + host_address = (phys_addr_t)virt_to_phys(data); + + /* Check that MPC APIs are bound */ + if (!mpc_info->mpcops) { + netdev_dbg(bp->dev, "%s: MPC not initialized\n", + __func__); + return -EINVAL; + } + + /* Create MPC EM insert command using builder */ + for (i = 0; i < CFA_BLD_MPC_READ_CLR_CMD_MAX_FLD; i++) + fields_cmd[i].field_id = INVALID_U16; + + fields_cmd[CFA_BLD_MPC_READ_CLR_CMD_OPAQUE_FLD].field_id = + CFA_BLD_MPC_READ_CLR_CMD_OPAQUE_FLD; + fields_cmd[CFA_BLD_MPC_READ_CLR_CMD_OPAQUE_FLD].val = 0xAA; + + fields_cmd[CFA_BLD_MPC_READ_CLR_CMD_TABLE_TYPE_FLD].field_id = + CFA_BLD_MPC_READ_CLR_CMD_TABLE_TYPE_FLD; + fields_cmd[CFA_BLD_MPC_READ_CLR_CMD_TABLE_TYPE_FLD].val = + CFA_BLD_MPC_HW_TABLE_TYPE_ACTION; + + fields_cmd[CFA_BLD_MPC_READ_CLR_CMD_TABLE_SCOPE_FLD].field_id = + CFA_BLD_MPC_READ_CLR_CMD_TABLE_SCOPE_FLD; + fields_cmd[CFA_BLD_MPC_READ_CLR_CMD_TABLE_SCOPE_FLD].val = tsid; + + fields_cmd[CFA_BLD_MPC_READ_CLR_CMD_DATA_SIZE_FLD].field_id = + CFA_BLD_MPC_READ_CLR_CMD_DATA_SIZE_FLD; + fields_cmd[CFA_BLD_MPC_READ_CLR_CMD_DATA_SIZE_FLD].val = *data_sz_words; + +#if TFC_ACT_CACHE_OPT_EN + fields_cmd[CFA_BLD_MPC_READ_CLR_CMD_CACHE_OPTION_FLD].field_id = + CFA_BLD_MPC_READ_CLR_CMD_CACHE_OPTION_FLD; + fields_cmd[CFA_BLD_MPC_READ_CLR_CMD_CACHE_OPTION_FLD].val = 0x0; +#endif + fields_cmd[CFA_BLD_MPC_READ_CLR_CMD_TABLE_INDEX_FLD].field_id = + CFA_BLD_MPC_READ_CLR_CMD_TABLE_INDEX_FLD; + fields_cmd[CFA_BLD_MPC_READ_CLR_CMD_TABLE_INDEX_FLD].val = entry_offset; + + fields_cmd[CFA_BLD_MPC_READ_CLR_CMD_HOST_ADDRESS_FLD].field_id = + CFA_BLD_MPC_READ_CLR_CMD_HOST_ADDRESS_FLD; + fields_cmd[CFA_BLD_MPC_READ_CLR_CMD_HOST_ADDRESS_FLD].val = host_address; + + for (i = clr_offset; i < clr_size; i++) + mask |= (1 << i); + + fields_cmd[CFA_BLD_MPC_READ_CLR_CMD_CLEAR_MASK_FLD].field_id = + CFA_BLD_MPC_READ_CLR_CMD_CLEAR_MASK_FLD; + fields_cmd[CFA_BLD_MPC_READ_CLR_CMD_CLEAR_MASK_FLD].val = mask; + + buff_len = TFC_MPC_MAX_TX_BYTES; + + rc = mpc_info->mpcops->cfa_bld_mpc_build_cache_read_clr(tx_msg, + &buff_len, + fields_cmd); + + if (rc) { + netdev_dbg(bp->dev, "%s: read clear build failed: %d\n", + __func__, rc); + goto cleanup; + } + + /* Send MPC */ + mpc_msg_in.chnl_id = (cmm_info->dir == CFA_DIR_TX ? + RING_ALLOC_REQ_MPC_CHNLS_TYPE_TE_CFA : + RING_ALLOC_REQ_MPC_CHNLS_TYPE_RE_CFA); + mpc_msg_in.msg_data = &tx_msg[TFC_MPC_HEADER_SIZE_BYTES]; + mpc_msg_in.msg_size = buff_len - TFC_MPC_HEADER_SIZE_BYTES; + mpc_msg_out.cmp_type = MPC_CMP_TYPE_MID_PATH_SHORT; + mpc_msg_out.msg_data = &rx_msg[TFC_MPC_HEADER_SIZE_BYTES]; + mpc_msg_out.msg_size = TFC_MPC_MAX_RX_BYTES; + + rc = bnxt_mpc_send(tfcp->bp, + &mpc_msg_in, + &mpc_msg_out, + &msg_count); + + if (rc) { + netdev_dbg(bp->dev, "%s: read clear MPC send failed: %d\n", + __func__, rc); + goto cleanup; + } + + /* Process response */ + for (i = 0; i < CFA_BLD_MPC_READ_CLR_CMP_MAX_FLD; i++) + fields_cmp[i].field_id = INVALID_U16; + + fields_cmp[CFA_BLD_MPC_READ_CLR_CMP_STATUS_FLD].field_id = + CFA_BLD_MPC_READ_CLR_CMP_STATUS_FLD; + + rc = mpc_info->mpcops->cfa_bld_mpc_parse_cache_read_clr(rx_msg, + mpc_msg_out.msg_size, + discard_data, + *data_sz_words * + TFC_MPC_BYTES_PER_WORD, + fields_cmp); + + if (rc) { + netdev_dbg(bp->dev, "%s: Action read clear parse failed: %d\n", + __func__, rc); + goto cleanup; + } + + if (fields_cmp[CFA_BLD_MPC_READ_CLR_CMP_STATUS_FLD].val != CFA_BLD_MPC_OK) { + netdev_dbg(bp->dev, "%s: Action read clear failed with status code:%d\n", + __func__, + (uint32_t)fields_cmp[CFA_BLD_MPC_READ_CLR_CMP_STATUS_FLD].val); + rc = ((int)fields_cmp[CFA_BLD_MPC_READ_CLR_CMP_STATUS_FLD].val) * -1; + goto cleanup; + } + + return 0; + +cleanup: + + return rc; +} + +int tfc_act_get(struct tfc *tfcp, + const struct tfc_cmm_info *cmm_info, + struct tfc_cmm_clr *clr, + u8 *data, u16 *data_sz_words) +{ + struct bnxt *bp = tfcp->bp; + /* It's not an error to pass clr as a Null pointer, just means that read + * and clear is not being requested. Also allow the user to manage + * clear via the clr flag. + */ + if (clr && clr->clr) { + /* Clear offset and size have to be two bytes aligned */ + if (clr->offset_in_byte % 2 || clr->sz_in_byte % 2) { + netdev_dbg(bp->dev, "%s: clr offset(%d) or size(%d) is not two bytes aligned.\n", + __func__, clr->offset_in_byte, clr->sz_in_byte); + return -EINVAL; + } + + return tfc_act_get_clear(tfcp, cmm_info, + data, data_sz_words, + clr->offset_in_byte / 2, + clr->sz_in_byte / 2); + } else { + return tfc_act_get_only(tfcp, cmm_info, + data, data_sz_words); + } +} + +int tfc_act_free(struct tfc *tfcp, + const struct tfc_cmm_info *cmm_info) +{ + u32 pool_id = 0, record_size, record_offset; + struct cfa_mm_free_parms fparms; + struct tfc_cpm *cpm_lkup = NULL; + struct tfc_cpm *cpm_act = NULL; + struct tfc_ts_mem_cfg mem_cfg; + struct tfc_ts_pool_info pi; + struct bnxt *bp = tfcp->bp; + bool is_shared, valid; + struct tfc_cmm *cmm; + bool is_bs_owner; + u8 tsid; + int rc; + + /* Get fields from MPC Action handle */ + tfc_get_fields_from_action_handle(&cmm_info->act_handle, &tsid, + &record_size, &record_offset); + + rc = tfo_ts_get(tfcp->tfo, tsid, &is_shared, NULL, &valid, NULL); + if (rc) { + netdev_dbg(bp->dev, "%s: failed to get tsid: rc:%d\n", __func__, rc); + return -EINVAL; + } + + if (!valid) { + netdev_dbg(bp->dev, "%s: tsid not allocated %d\n", __func__, tsid); + return -EINVAL; + } + + rc = tfo_ts_get_pool_info(tfcp->tfo, tsid, cmm_info->dir, &pi); + if (rc) { + netdev_dbg(bp->dev, + "%s: Failed to get pool info for tsid:%d\n", + __func__, tsid); + return -EINVAL; + } + + pool_id = TFC_ACTION_GET_POOL_ID(record_offset, pi.act_pool_sz_exp); + + rc = tfo_ts_get_mem_cfg(tfcp->tfo, tsid, + cmm_info->dir, + CFA_REGION_TYPE_ACT, + &is_bs_owner, + &mem_cfg); + if (rc) { + netdev_dbg(bp->dev, "%s: tfo_ts_get_mem_cfg() failed: %d\n", + __func__, rc); + return -EINVAL; + } + /* Get CPM instance for this table scope */ + rc = tfo_ts_get_cpm_inst(tfcp->tfo, tsid, cmm_info->dir, &cpm_lkup, &cpm_act); + if (rc) { + netdev_dbg(bp->dev, "%s: failed to get CPM instance: %d\n", + __func__, rc); + return -EINVAL; + } + + rc = tfc_cpm_get_cmm_inst(cpm_act, pool_id, &cmm); + if (rc) { + netdev_dbg(bp->dev, "%s: failed to get record: %d\n", __func__, rc); + return -EINVAL; + } + + fparms.record_offset = record_offset; + fparms.num_contig_records = roundup_pow_of_two(record_size); + rc = cfa_mm_free(cmm, &fparms); + if (rc) { + netdev_dbg(bp->dev, "%s: failed to free CMM instance: %d\n", __func__, rc); + return -EINVAL; + } + + rc = tfc_cpm_set_usage(cpm_act, pool_id, 0, false); + if (rc) + netdev_dbg(bp->dev, "%s: failed to set usage: %d\n", __func__, rc); + + return rc; +} diff --git a/drivers/thirdparty/release-drivers/bnxt/tfc_v3/tfc_action_handle.h b/drivers/thirdparty/release-drivers/bnxt/tfc_v3/tfc_action_handle.h new file mode 100644 index 000000000000..9145b77e65fc --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/tfc_v3/tfc_action_handle.h @@ -0,0 +1,64 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019-2023 Broadcom + * All rights reserved. + */ + +#ifndef _TFC_ACTION_HANDLE_H_ +#define _TFC_ACTION_HANDLE_H_ + +#define TFC_POOL_TSID_ACTION_HANDLE_MASK 0x0000003F000000000ULL +#define TFC_POOL_TSID_ACTION_HANDLE_SFT 36 +#define TFC_RECORD_SIZE_ACTION_HANDLE_MASK 0x00000000F00000000ULL +#define TFC_RECORD_SIZE_ACTION_HANDLE_SFT 32 +#define TFC_EM_REC_OFFSET_ACTION_HANDLE_MASK 0x00000000007FFFFFFULL +#define TFC_EM_REC_OFFSET_ACTION_HANDLE_SFT 0 + +#define TFC_ACTION_HANDLE_MASK ( \ + TFC_POOL_TSID_ACTION_HANDLE_MASK | \ + TFC_RECORD_SIZE_ACTION_HANDLE_MASK | \ + TFC_EM_REC_OFFSET_ACTION_HANDLE_MASK) + +static inline void tfc_get_fields_from_action_handle(const u64 *act_handle, u8 *tsid, + u32 *record_size, u32 *action_offset) +{ + *tsid = (u8)((*act_handle & TFC_POOL_TSID_ACTION_HANDLE_MASK) >> + TFC_POOL_TSID_ACTION_HANDLE_SFT); + *record_size = + (u32)((*act_handle & TFC_RECORD_SIZE_ACTION_HANDLE_MASK) >> + TFC_RECORD_SIZE_ACTION_HANDLE_SFT); + *action_offset = + (u32)((*act_handle & TFC_EM_REC_OFFSET_ACTION_HANDLE_MASK) >> + TFC_EM_REC_OFFSET_ACTION_HANDLE_SFT); +} + +static inline u64 tfc_create_action_handle(u8 tsid, u32 record_size, u32 action_offset) +{ + u64 act_handle = 0ULL; + + act_handle |= + ((((u64)tsid) << TFC_POOL_TSID_ACTION_HANDLE_SFT) & + TFC_POOL_TSID_ACTION_HANDLE_MASK); + act_handle |= + ((((u64)record_size) << TFC_RECORD_SIZE_ACTION_HANDLE_SFT) & + TFC_RECORD_SIZE_ACTION_HANDLE_MASK); + act_handle |= + ((((u64)action_offset) << TFC_EM_REC_OFFSET_ACTION_HANDLE_SFT) & + TFC_EM_REC_OFFSET_ACTION_HANDLE_MASK); + + return act_handle; +} + +#define TFC_ACTION_GET_POOL_ID(action_offset, pool_sz_exp) \ + ((action_offset) >> (pool_sz_exp)) + +#define TFC_GET_32B_OFFSET_ACT_HANDLE(act_32byte_offset, act_handle) \ + { \ + (act_32byte_offset) = (u32)((*(act_handle) & \ + TFC_EM_REC_OFFSET_ACTION_HANDLE_MASK) >> \ + TFC_EM_REC_OFFSET_ACTION_HANDLE_SFT); \ + } + +#define TFC_GET_8B_OFFSET(act_8byte_offset, act_32byte_offset) \ + { (act_8byte_offset) = ((act_32byte_offset) << 2); } + +#endif /* _TFC_ACTION_HANDLE_H_ */ diff --git a/drivers/thirdparty/release-drivers/bnxt/tfc_v3/tfc_cpm.c b/drivers/thirdparty/release-drivers/bnxt/tfc_v3/tfc_cpm.c new file mode 100644 index 000000000000..09259791eb75 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/tfc_v3/tfc_cpm.c @@ -0,0 +1,408 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* Copyright(c) 2019-2023 Broadcom + * All rights reserved. + */ + +#include +#include "bnxt_compat.h" +#include "bnxt.h" +#include "tfc.h" +#include "tfc_cpm.h" + +/* Per pool entry + */ +struct cpm_pool_entry { + bool valid; + struct tfc_cmm *cmm; + u32 used_count; + bool all_used; + struct cpm_pool_use *pool_use; +}; + +/* Pool use list entry + */ +struct cpm_pool_use { + u16 pool_id; + struct cpm_pool_use *prev; + struct cpm_pool_use *next; +}; + +/* tfc_cpm + * + * This is the main CPM data struct + */ +struct tfc_cpm { + struct cpm_pool_entry *pools; + u16 available_pool_id; /* pool with highest use count, i.e. most used entries */ + bool pool_valid; /* pool has free entries */ + u32 pool_size; /* number of entries in each pool */ + u32 max_pools; /* maximum number of pools */ + u32 next_index; /* search index */ + struct cpm_pool_use *pool_use_list; /* Ordered list of pool usage */ +}; + +#define CPM_DEBUG 0 + +#if (CPM_DEBUG == 1) +static void show_list(char *str, struct tfc_cpm *cpm) +{ + struct cpm_pool_use *pu = cpm->pool_use_list; + + netdev_dbg(NULL, "%s - ", str); + while (!pu) { + netdev_dbg(NULL, + "PU(%p) id:%d(u:%d au:%d) p:0x%p n:0x%p\n", + pu, pu->pool_id, + cpm->pools[pu->pool_id].used_count, + cpm->pools[pu->pool_id].all_used, + pu->prev, pu->next); + + pu = pu->next; + } +} +#endif + +static int cpm_insert_pool_id(struct tfc_cpm *cpm, u16 pool_id) +{ + struct cpm_pool_entry *pool = &cpm->pools[pool_id]; + struct cpm_pool_use *pool_use = cpm->pool_use_list; + struct cpm_pool_use *new_pool_use; + struct cpm_pool_use *prev = NULL; + + if (!pool->valid) { + netdev_dbg(NULL, "%s: Pool ID:0x%x is invalid\n", __func__, pool_id); + return -EINVAL; + } + + /* Find where in insert new entry */ + while (pool_use) { + if (cpm->pools[pool_use->pool_id].valid && + cpm->pools[pool_use->pool_id].used_count > pool->used_count) { + pool_use = pool_use->next; + prev = pool_use; + } else { + break; + } + } + + /* Alloc new entry */ + new_pool_use = vzalloc(sizeof(*new_pool_use)); + new_pool_use->pool_id = pool_id; + new_pool_use->prev = NULL; + new_pool_use->next = NULL; + pool->pool_use = new_pool_use; + + if (!pool_use) { /* Empty list */ + cpm->pool_use_list = new_pool_use; + } else if (!prev) { /* Start of list */ + cpm->pool_use_list = new_pool_use; + new_pool_use->next = pool_use; + pool_use->prev = new_pool_use; + } else { /* Within list */ + prev->next = new_pool_use; + new_pool_use->next = pool_use; + new_pool_use->prev = prev; + } + + cpm->available_pool_id = cpm->pool_use_list->pool_id; + cpm->pool_valid = true; +#if (CPM_DEBUG == 1) + show_list("Insert", cpm); +#endif + return 0; +} + +static int cpm_sort_pool_id(struct tfc_cpm *cpm, u16 pool_id) +{ + struct cpm_pool_entry *pool = &cpm->pools[pool_id]; + struct cpm_pool_use *pool_use = pool->pool_use; + struct cpm_pool_use *prev, *next; + + /* Does entry need to move up, down or stay where it is? + * + * The list is ordered by: + * Head: - Most used, but not full + * - ....next most used but not full + * - least used + * Tail: - All used + */ + while (1) { + if (pool_use->prev && + cpm->pools[pool_use->prev->pool_id].valid && !pool->all_used && + (cpm->pools[pool_use->prev->pool_id].all_used || + cpm->pools[pool_use->prev->pool_id].used_count < pool->used_count)) { + /* Move up */ + prev = pool_use->prev; + pool_use->prev->next = pool_use->next; + /* May be at the end of the list */ + if (pool_use->next) + pool_use->next->prev = pool_use->prev; + pool_use->next = pool_use->prev; + + if (pool_use->prev->prev) { + pool_use->prev->prev->next = pool_use; + pool_use->prev = pool_use->prev->prev; + } else { + /* Moved to head of the list */ + pool_use->prev->prev = pool_use; + pool_use->prev = NULL; + cpm->pool_use_list = pool_use; + } + + prev->prev = pool_use; + } else if (pool_use->next && cpm->pools[pool_use->next->pool_id].valid && + (pool->all_used || (!cpm->pools[pool_use->next->pool_id].all_used && + (cpm->pools[pool_use->next->pool_id].used_count > pool->used_count)))) { + /* Move down */ + next = pool_use->next; + pool_use->next->prev = pool_use->prev; + if (pool_use->prev) /* May be at the start of the list */ + pool_use->prev->next = pool_use->next; + else + cpm->pool_use_list = pool_use->next; + + pool_use->prev = pool_use->next; + + if (pool_use->next->next) { + pool_use->next->next->prev = pool_use; + pool_use->next = pool_use->next->next; + } else { + /* Moved to end of the list */ + pool_use->next->next = pool_use; + pool_use->next = NULL; + } + + next->next = pool_use; + } else { + /* Nothing to do */ + break; + } +#if (CPM_DEBUG == 1) + show_list("Sort", cpm); +#endif + } + + if (cpm->pools[cpm->pool_use_list->pool_id].all_used) { + cpm->available_pool_id = TFC_CPM_INVALID_POOL_ID; + cpm->pool_valid = false; + } else { + cpm->available_pool_id = cpm->pool_use_list->pool_id; + cpm->pool_valid = true; + } + + return 0; +} + +int tfc_cpm_open(struct tfc_cpm **cpm, u32 max_pools) +{ + /* Allocate CPM struct */ + *cpm = vzalloc(sizeof(**cpm)); + if (!*cpm) { + netdev_dbg(NULL, "%s: cpm alloc error %d\n", __func__, -ENOMEM); + *cpm = NULL; + return -ENOMEM; + } + + /* Allocate CPM pools array */ + (*cpm)->pools = vzalloc(sizeof(*(*cpm)->pools) * max_pools); + if (!(*cpm)->pools) { + netdev_dbg(NULL, "%s: pools alloc error %d\n", __func__, -ENOMEM); + vfree(*cpm); + *cpm = NULL; + + return -ENOMEM; + } + + /* Init pool entries by setting all fields to zero */ + memset((*cpm)->pools, 0, sizeof(struct cpm_pool_entry) * max_pools); + + /* Init remaining CPM fields */ + (*cpm)->pool_valid = false; + (*cpm)->available_pool_id = 0; + (*cpm)->max_pools = max_pools; + (*cpm)->pool_use_list = NULL; + + return 0; +} + +int tfc_cpm_close(struct tfc_cpm *cpm) +{ + struct cpm_pool_use *cpm_current; + struct cpm_pool_use *next; + + if (!cpm) { + netdev_dbg(NULL, "%s: CPM is NULL\n", __func__); + return -EINVAL; + } + + cpm_current = cpm->pool_use_list; + while (cpm_current) { + next = cpm_current->next; + vfree(cpm_current); + cpm_current = next; + } + + vfree(cpm->pools); + vfree(cpm); + + return 0; +} + +int tfc_cpm_set_pool_size(struct tfc_cpm *cpm, u32 pool_sz_in_records) +{ + if (!cpm) { + netdev_dbg(NULL, "%s: CPM is NULL\n", __func__); + return -EINVAL; + } + + cpm->pool_size = pool_sz_in_records; + return 0; +} + +int tfc_cpm_get_pool_size(struct tfc_cpm *cpm, u32 *pool_sz_in_records) +{ + if (!cpm) { + netdev_dbg(NULL, "%s: CPM is NULL\n", __func__); + return -EINVAL; + } + + *pool_sz_in_records = cpm->pool_size; + return 0; +} + +int tfc_cpm_set_cmm_inst(struct tfc_cpm *cpm, u16 pool_id, struct tfc_cmm *cmm) +{ + struct cpm_pool_entry *pool; + + if (!cpm) { + netdev_dbg(NULL, "%s: CPM is NULL\n", __func__); + return -EINVAL; + } + + pool = &cpm->pools[pool_id]; + + if (pool->valid && cmm) { + netdev_dbg(NULL, "%s: Pool ID:0x%x is already in use\n", __func__, pool_id); + return -EBUSY; + } + + pool->cmm = cmm; + pool->used_count = 0; + pool->all_used = false; + pool->pool_use = NULL; + + if (!cmm) { + pool->valid = false; + } else { + pool->valid = true; + cpm_insert_pool_id(cpm, pool_id); + } + + return 0; +} + +int tfc_cpm_get_cmm_inst(struct tfc_cpm *cpm, u16 pool_id, struct tfc_cmm **cmm) +{ + struct cpm_pool_entry *pool; + + if (!cpm) { + netdev_dbg(NULL, "%s: CPM is NULL\n", __func__); + return -EINVAL; + } + + pool = &cpm->pools[pool_id]; + + if (!pool->valid) { + netdev_dbg(NULL, "%s: Pool ID:0x%x is not valid\n", __func__, pool_id); + return -EINVAL; + } + + *cmm = pool->cmm; + return 0; +} + +int tfc_cpm_get_avail_pool(struct tfc_cpm *cpm, u16 *pool_id) +{ + if (!cpm) { + netdev_dbg(NULL, "%s: CPM is NULL\n", __func__); + return -EINVAL; + } + + if (!cpm->pool_valid) + return -EINVAL; + + *pool_id = cpm->available_pool_id; + + return 0; +} + +int tfc_cpm_set_usage(struct tfc_cpm *cpm, u16 pool_id, u32 used_count, bool all_used) +{ + struct cpm_pool_entry *pool; + + if (!cpm) { + netdev_dbg(NULL, "%s: CPM is NULL\n", __func__); + return -EINVAL; + } + + pool = &cpm->pools[pool_id]; + + if (!pool->valid) { + netdev_dbg(NULL, "%s: Pool ID:0x%x is invalid\n", __func__, pool_id); + return -EINVAL; + } + + if (used_count > cpm->pool_size) { + netdev_dbg(NULL, "%s: Number of entries(%d) exceeds pool_size (%d)\n", + __func__, used_count, cpm->pool_size); + return -EINVAL; + } + + pool->all_used = all_used; + pool->used_count = used_count; + + /* Update ordered list of pool_ids */ + cpm_sort_pool_id(cpm, pool_id); + + return 0; +} + +int tfc_cpm_srchm_by_configured_pool(struct tfc_cpm *cpm, enum cfa_srch_mode srch_mode, + u16 *pool_id, struct tfc_cmm **cmm) +{ + u32 i; + + if (!cpm) { + netdev_dbg(NULL, "%s: CPM is NULL\n", + __func__); + return -EINVAL; + } + + if (!pool_id) { + netdev_dbg(NULL, "%s: pool_id ptr is NULL\n", + __func__); + return -EINVAL; + } + + if (!cmm) { + netdev_dbg(NULL, "%s: cmm ptr is NULL\n", + __func__); + return -EINVAL; + } + *pool_id = TFC_CPM_INVALID_POOL_ID; + *cmm = NULL; + + if (srch_mode == CFA_SRCH_MODE_FIRST) + cpm->next_index = 0; + + for (i = cpm->next_index; i < cpm->max_pools; i++) { + if (cpm->pools[i].cmm) { + *pool_id = i; + *cmm = cpm->pools[i].cmm; + cpm->next_index = i + 1; + return 0; + } + } + cpm->next_index = cpm->max_pools; + return -ENOENT; +} diff --git a/drivers/thirdparty/release-drivers/bnxt/tfc_v3/tfc_cpm.h b/drivers/thirdparty/release-drivers/bnxt/tfc_v3/tfc_cpm.h new file mode 100644 index 000000000000..26ffb41d8af1 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/tfc_v3/tfc_cpm.h @@ -0,0 +1,163 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019-2023 Broadcom + * All rights reserved. + */ + +#ifndef _TFC_CPM_H_ +#define _TFC_CPM_H_ + +/* Set to 1 to force using just TS 0 + */ +#define TFC_FORCE_POOL_0 1 + +/* Temp to enable build. Remove when tfc_cmm is added + */ +struct tfc_cmm { + int a; +}; + +struct tfc_cpm; + +#define TFC_CPM_INVALID_POOL_ID 0xFFFF + +/** + * int tfc_cpm_open + * + * Initializes pre-allocated CPM structure. The cpm_db_size argument is + * validated against the max_pools argument. + * + * @cpm: Pointer to pointer of the allocated CPM data structure. The open will + * perform the alloc and return a pointer to the allocated memory. + * @max_pools: Maximum number of pools + * + * Returns: + * 0 - Success + * -EINVAL - cpm_db_size is not correct + * -ENOMEM - Failed to allocate memory for CPM data structures. + */ +int tfc_cpm_open(struct tfc_cpm **cpm, u32 max_pools); + +/** + * int tfc_cpm_close + * + * Deinitialize data structures. Note this does not free the memory. + * + * @cpm: Pointer to the CPM instance to free. + * + * Returns: + * 0 - Success + * -EINVAL - Invalid argument + */ +int tfc_cpm_close(struct tfc_cpm *cpm); + +/** + * int tfc_cpm_set_pool_size + * + * Sets number of entries for pools in a given region. + * + * @cpm: Pointer to the CPM instance + * @pool_sz_in_records: Max number of entries for each pool must be a power of 2. + * + * Returns: + * 0 - Success + * -EINVAL - Invalid argument + */ +int tfc_cpm_set_pool_size(struct tfc_cpm *cpm, u32 pool_sz_in_records); + +/** + * int tfc_cpm_get_pool_size + * + * Returns the number of entries for pools in a given region. + * + * @cpm: Pointer to the CPM instance + * @pool_sz_in_records: Max number of entries for each pool + * + * Returns: + * 0 - Success + * -EINVAL - Invalid argument + */ +int tfc_cpm_get_pool_size(struct tfc_cpm *cpm, u32 *pool_sz_in_records); + +/** + * int tfc_cpm_set_cmm_inst + * + * Add CMM instance. + * + * @cpm: Pointer to the CPM instance + * @pool_id: Pool ID to use + * @valid: Is entry valid + * @cmm: Pointer to the CMM instance + * + * Returns: + * 0 - Success + * -EINVAL - Invalid argument + */ +int tfc_cpm_set_cmm_inst(struct tfc_cpm *cpm, u16 pool_id, struct tfc_cmm *cmm); + +/** + * int tfc_cpm_get_cmm_inst + * + * Get CMM instance. + * + * @cpm: Pointer to the CPM instance + * @pool_id: Pool ID to use + * @valid: Is entry valid + * @cmm: Pointer to the CMM instance + * + * Returns: + * 0 - Success + * -EINVAL - Invalid argument + */ +int tfc_cpm_get_cmm_inst(struct tfc_cpm *cpm, u16 pool_id, struct tfc_cmm **cmm); + +/** + * int tfc_cpm_get_avail_pool + * + * Returns the pool_id to use for the next EM insert + * + * @cpm: Pointer to the CPM instance + * @pool_id: Pool ID to use for EM insert + * + * Returns: + * 0 - Success + * -EINVAL - Invalid argument + */ +int tfc_cpm_get_avail_pool(struct tfc_cpm *cpm, u16 *pool_id); + +/** + * int tfc_cpm_set_usage + * + * Set the usage_count and all_used fields for the specified pool_id + * + * @cpm: Pointer to the CPM instance + * @pool_id: Pool ID to update + * @used_count: Number of entries used within specified pool + * @all_used: Set if all pool entries are used + * + * Returns: + * 0 - Success + * -EINVAL - Invalid argument + */ +int tfc_cpm_set_usage(struct tfc_cpm *cpm, u16 pool_id, u32 used_count, bool all_used); + +/** + * int tfc_cpm_srchm_by_configured_pool + * + * Get the next configured pool + * + * @cpm: Pointer to the CPM instance + * + * @srch_mode: Valid pool id + * + * @pool_id: Pointer to a valid pool id + * + * @cmm: Pointer to the associated CMM instance + * + * Returns: + * 0 - Success + * -EINVAL - Invalid argument + */ +int tfc_cpm_srchm_by_configured_pool(struct tfc_cpm *cpm, enum cfa_srch_mode srch_mode, + u16 *pool_id, struct tfc_cmm **cmm); + +#endif /* _TFC_CPM_H_ */ diff --git a/drivers/thirdparty/release-drivers/bnxt/tfc_v3/tfc_debug.h b/drivers/thirdparty/release-drivers/bnxt/tfc_v3/tfc_debug.h new file mode 100644 index 000000000000..003095585b4d --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/tfc_v3/tfc_debug.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019-2023 Broadcom + * All rights reserved. + */ + +#ifndef _TFC_DEBUG_H_ +#define _TFC_DEBUG_H_ + +/* #define EM_DEBUG */ +/* #define ACT_DEBUG */ + +int tfc_mpc_table_write_zero(struct tfc *tfcp, u8 tsid, enum cfa_dir dir, + u32 type, u32 offset, u8 words, u8 *data); +int tfc_act_show(struct seq_file *m, struct tfc *tfcp, u8 tsid, enum cfa_dir dir); +int tfc_em_show(struct seq_file *m, struct tfc *tfcp, u8 tsid, enum cfa_dir dir); +int tfc_mpc_table_invalidate(struct tfc *tfcp, u8 tsid, enum cfa_dir dir, + u32 type, u32 offset, u32 words); +#endif diff --git a/drivers/thirdparty/release-drivers/bnxt/tfc_v3/tfc_em.c b/drivers/thirdparty/release-drivers/bnxt/tfc_v3/tfc_em.c new file mode 100644 index 000000000000..49dbbbceac3e --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/tfc_v3/tfc_em.c @@ -0,0 +1,854 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* Copyright(c) 2019-2023 Broadcom + * All rights reserved. + */ + +#include +#include "bnxt_compat.h" +#include "bnxt.h" +#include "bnxt_mpc.h" +#include "bnxt_tfc.h" + +#include "tfc.h" +#include "tfo.h" +#include "tfc_em.h" +#include "tfc_cpm.h" +#include "tfc_msg.h" +#include "tfc_priv.h" +#include "cfa_types.h" +#include "cfa_mm.h" +#include "cfa_bld_mpc_field_ids.h" +#include "cfa_bld_mpcops.h" +#include "tfc_flow_handle.h" +#include "tfc_util.h" +#include "sys_util.h" + +#include "tfc_debug.h" + +#define TFC_EM_DYNAMIC_BUCKET_RECORD_SIZE 1 + +int tfc_em_insert(struct tfc *tfcp, u8 tsid, struct tfc_em_insert_parms *parms) +{ + struct cfa_mpc_data_obj fields_cmp[CFA_BLD_MPC_EM_INSERT_CMP_MAX_FLD]; + struct cfa_mpc_data_obj fields_cmd[CFA_BLD_MPC_EM_INSERT_CMD_MAX_FLD]; + u32 entry_offset, i, num_contig_records, buff_len; + u32 mpc_opaque = TFC_MPC_OPAQUE_VAL; + struct cfa_mm_alloc_parms aparms; + struct cfa_bld_mpcinfo *mpc_info; + struct bnxt_mpc_mbuf mpc_msg_out; + struct bnxt_mpc_mbuf mpc_msg_in; + struct cfa_mm_free_parms fparms; + struct tfc_cpm *cpm_lkup = NULL; + struct tfc_cpm *cpm_act = NULL; + struct tfc_ts_mem_cfg mem_cfg; + bool is_bs_owner, is_shared; + struct tfc_ts_pool_info pi; + struct bnxt *bp = tfcp->bp; + u16 pool_id, max_pools; + u8 *tx_msg, *rx_msg; + struct tfc_cmm *cmm; + int cleanup_rc; + bool valid; + int rc; + + tfo_mpcinfo_get(tfcp->tfo, &mpc_info); + + rc = tfo_ts_get(tfcp->tfo, tsid, &is_shared, NULL, &valid, &max_pools); + if (rc) { + netdev_dbg(bp->dev, "%s: failed to get tsid: %d\n", + __func__, rc); + return -EINVAL; + } + + if (!valid) { + netdev_dbg(bp->dev, "%s: tsid not allocated %d\n", __func__, tsid); + return -EINVAL; + } + + if (max_pools == 0) { + netdev_dbg(bp->dev, "%s: tsid(%d) Max pools must be greater than 0 %d\n", + __func__, tsid, max_pools); + return -EINVAL; + } + + /* Check that MPC APIs are bound */ + if (!mpc_info->mpcops) { + netdev_dbg(bp->dev, "%s: MPC not initialized\n", + __func__); + return -EINVAL; + } + + rc = tfo_ts_get_mem_cfg(tfcp->tfo, tsid, + parms->dir, + CFA_REGION_TYPE_LKUP, + &is_bs_owner, + &mem_cfg); + if (rc) { + netdev_dbg(bp->dev, "%s: tfo_ts_get_mem_cfg() failed: %d\n", + __func__, rc); + return -EINVAL; + } + + /* Get CPM instances */ + rc = tfo_ts_get_cpm_inst(tfcp->tfo, tsid, parms->dir, &cpm_lkup, &cpm_act); + if (rc) { + netdev_dbg(bp->dev, "%s: failed to get CPM instances: %d\n", + __func__, rc); + return -EINVAL; + } + + num_contig_records = roundup_pow_of_two(parms->lkup_key_sz_words); + + rc = tfo_ts_get_pool_info(tfcp->tfo, tsid, parms->dir, &pi); + if (rc) { + netdev_dbg(bp->dev, + "%s: Failed to get pool info for tsid:%d\n", + __func__, tsid); + return -EINVAL; + } + + /* if no pool available locally or all pools full */ + rc = tfc_cpm_get_avail_pool(cpm_lkup, &pool_id); + if (rc) { + /* Allocate a pool */ + struct cfa_mm_query_parms qparms; + struct cfa_mm_open_parms oparms; + u16 fid; + + /* There is only 1 pool for a non-shared table scope and it is full. */ + if (!is_shared) { + netdev_dbg(bp->dev, "%s: no records remain\n", __func__); + return -ENOMEM; + } + + rc = tfc_get_fid(tfcp, &fid); + if (rc) + return rc; + + rc = tfc_tbl_scope_pool_alloc(tfcp, + fid, + tsid, + CFA_REGION_TYPE_LKUP, + parms->dir, + NULL, + &pool_id); + + if (rc) { + netdev_dbg(bp->dev, "%s: table scope alloc pool failed: %d\n", + __func__, rc); + return -EINVAL; + } + + /* Create pool CMM instance. + * rec_cnt is the total number of records including static buckets + */ + qparms.max_records = (mem_cfg.rec_cnt - mem_cfg.lkup_rec_start_offset) / max_pools; + qparms.max_contig_records = roundup_pow_of_two(pi.lkup_max_contig_rec); + rc = cfa_mm_query(&qparms); + if (rc) { + netdev_dbg(bp->dev, "%s: cfa_mm_query() failed: %d\n", + __func__, rc); + kfree(cmm); + return -EINVAL; + } + + cmm = kzalloc(qparms.db_size, GFP_KERNEL); + if (!cmm) + return -ENOMEM; + + oparms.db_mem_size = qparms.db_size; + oparms.max_contig_records = roundup_pow_of_two(qparms.max_contig_records); + oparms.max_records = qparms.max_records; + rc = cfa_mm_open(cmm, &oparms); + if (rc) { + netdev_dbg(bp->dev, "%s: cfa_mm_open() failed: %d\n", + __func__, rc); + kfree(cmm); + return -EINVAL; + } + + /* Store CMM instance in the CPM */ + rc = tfc_cpm_set_cmm_inst(cpm_lkup, pool_id, cmm); + if (rc) { + netdev_dbg(bp->dev, "%s: tfc_cpm_set_cmm_inst() failed: %d\n", + __func__, rc); + kfree(cmm); + return -EINVAL; + } + + /* Store the updated pool information */ + tfo_ts_set_pool_info(tfcp->tfo, tsid, parms->dir, &pi); + } else { + /* Get the pool instance and allocate an lkup rec index from the pool */ + rc = tfc_cpm_get_cmm_inst(cpm_lkup, pool_id, &cmm); + if (rc) { + netdev_dbg(bp->dev, "%s: tfc_cpm_get_cmm_inst() failed: %d\n", + __func__, rc); + return -EINVAL; + } + } + + aparms.num_contig_records = num_contig_records; + rc = cfa_mm_alloc(cmm, &aparms); + if (rc) { + netdev_dbg(bp->dev, "%s: cfa_mm_alloc() failed: %d\n", + __func__, rc); + return -EINVAL; + } + + CREATE_OFFSET(&entry_offset, pi.lkup_pool_sz_exp, pool_id, aparms.record_offset); + + /* Create MPC EM insert command using builder */ + for (i = 0; i < CFA_BLD_MPC_EM_INSERT_CMD_MAX_FLD; i++) + fields_cmd[i].field_id = INVALID_U16; + + fields_cmd[CFA_BLD_MPC_EM_INSERT_CMD_OPAQUE_FLD].field_id = + CFA_BLD_MPC_EM_INSERT_CMD_OPAQUE_FLD; + fields_cmd[CFA_BLD_MPC_EM_INSERT_CMD_OPAQUE_FLD].val = 0xAA; + + fields_cmd[CFA_BLD_MPC_EM_INSERT_CMD_TABLE_SCOPE_FLD].field_id = + CFA_BLD_MPC_EM_INSERT_CMD_TABLE_SCOPE_FLD; + fields_cmd[CFA_BLD_MPC_EM_INSERT_CMD_TABLE_SCOPE_FLD].val = tsid; + + fields_cmd[CFA_BLD_MPC_EM_INSERT_CMD_DATA_SIZE_FLD].field_id = + CFA_BLD_MPC_EM_INSERT_CMD_DATA_SIZE_FLD; + fields_cmd[CFA_BLD_MPC_EM_INSERT_CMD_DATA_SIZE_FLD].val = parms->lkup_key_sz_words; + + /* LREC address */ + fields_cmd[CFA_BLD_MPC_EM_INSERT_CMD_TABLE_INDEX_FLD].field_id = + CFA_BLD_MPC_EM_INSERT_CMD_TABLE_INDEX_FLD; + fields_cmd[CFA_BLD_MPC_EM_INSERT_CMD_TABLE_INDEX_FLD].val = entry_offset + + mem_cfg.lkup_rec_start_offset; + + fields_cmd[CFA_BLD_MPC_EM_INSERT_CMD_REPLACE_FLD].field_id = + CFA_BLD_MPC_EM_INSERT_CMD_REPLACE_FLD; + fields_cmd[CFA_BLD_MPC_EM_INSERT_CMD_REPLACE_FLD].val = 0x0; + + buff_len = TFC_MPC_MAX_TX_BYTES; + + netdev_dbg(bp->dev, "Lkup key data: size;%d entry_offset:%d\n", + (parms->lkup_key_sz_words * 32), + entry_offset + mem_cfg.lkup_rec_start_offset); + bnxt_tfc_buf_dump(bp, "lkup key", (uint8_t *)parms->lkup_key_data, + (parms->lkup_key_sz_words * 32), 4, 4); + + tx_msg = kzalloc(TFC_MPC_MAX_TX_BYTES, GFP_KERNEL); + rx_msg = kzalloc(TFC_MPC_MAX_RX_BYTES, GFP_KERNEL); + + if (!tx_msg || !rx_msg) { + netdev_err(bp->dev, "%s: tx_msg[%p], rx_msg[%p]\n", + __func__, tx_msg, rx_msg); + rc = -ENOMEM; + goto cleanup; + } + + rc = mpc_info->mpcops->cfa_bld_mpc_build_em_insert(tx_msg, + &buff_len, + parms->lkup_key_data, + fields_cmd); + if (rc) { + netdev_dbg(bp->dev, "%s: EM insert build failed: %d\n", + __func__, rc); + goto cleanup; + } + + netdev_dbg(bp->dev, "Tx Msg: size:%d\n", buff_len); + bnxt_tfc_buf_dump(bp, "EM insert", (uint8_t *)tx_msg, buff_len, 4, 4); + + /* Send MPC */ + mpc_msg_in.chnl_id = (parms->dir == CFA_DIR_TX ? + RING_ALLOC_REQ_MPC_CHNLS_TYPE_TE_CFA : + RING_ALLOC_REQ_MPC_CHNLS_TYPE_RE_CFA); + mpc_msg_in.msg_data = &tx_msg[TFC_MPC_HEADER_SIZE_BYTES]; + mpc_msg_in.msg_size = buff_len - TFC_MPC_HEADER_SIZE_BYTES; + mpc_msg_out.cmp_type = MPC_CMP_TYPE_MID_PATH_LONG; + mpc_msg_out.msg_data = &rx_msg[TFC_MPC_HEADER_SIZE_BYTES]; + mpc_msg_out.msg_size = TFC_MPC_MAX_RX_BYTES - TFC_MPC_HEADER_SIZE_BYTES; + + rc = bnxt_mpc_send(tfcp->bp, + &mpc_msg_in, + &mpc_msg_out, + &mpc_opaque); + if (rc) { + netdev_dbg(bp->dev, "%s: EM insert send failed: %d\n", + __func__, rc); + goto cleanup; + } + + netdev_dbg(bp->dev, "Rx Msg: size:%d\n", mpc_msg_out.msg_size); + bnxt_tfc_buf_dump(bp, "EM insert", (uint8_t *)rx_msg, buff_len, 4, 4); + + /* Process response */ + for (i = 0; i < CFA_BLD_MPC_EM_INSERT_CMP_MAX_FLD; i++) + fields_cmp[i].field_id = INVALID_U16; + + fields_cmp[CFA_BLD_MPC_EM_INSERT_CMP_STATUS_FLD].field_id = + CFA_BLD_MPC_EM_INSERT_CMP_STATUS_FLD; + fields_cmp[CFA_BLD_MPC_EM_INSERT_CMP_BKT_NUM_FLD].field_id = + CFA_BLD_MPC_EM_INSERT_CMP_BKT_NUM_FLD; + fields_cmp[CFA_BLD_MPC_EM_INSERT_CMP_NUM_ENTRIES_FLD].field_id = + CFA_BLD_MPC_EM_INSERT_CMP_NUM_ENTRIES_FLD; + fields_cmp[CFA_BLD_MPC_EM_INSERT_CMP_TABLE_INDEX3_FLD].field_id = + CFA_BLD_MPC_EM_INSERT_CMP_TABLE_INDEX3_FLD; + fields_cmp[CFA_BLD_MPC_EM_INSERT_CMP_CHAIN_UPD_FLD].field_id = + CFA_BLD_MPC_EM_INSERT_CMP_CHAIN_UPD_FLD; + fields_cmp[CFA_BLD_MPC_EM_INSERT_CMP_HASH_MSB_FLD].field_id = + CFA_BLD_MPC_EM_INSERT_CMP_HASH_MSB_FLD; + + rc = mpc_info->mpcops->cfa_bld_mpc_parse_em_insert(rx_msg, + mpc_msg_out.msg_size, + fields_cmp); + if (rc) { + netdev_dbg(bp->dev, "%s: EM insert parse failed: %d\n", + __func__, rc); + goto cleanup; + } + + netdev_dbg(bp->dev, "Hash MSB:0x%0x\n", + (u32)fields_cmp[CFA_BLD_MPC_EM_INSERT_CMP_HASH_MSB_FLD].val); + + if (fields_cmp[CFA_BLD_MPC_EM_INSERT_CMP_STATUS_FLD].val != CFA_BLD_MPC_OK) { + netdev_dbg(bp->dev, "%s: MPC failed with status code:%d\n", + __func__, + (u32)fields_cmp[CFA_BLD_MPC_EM_INSERT_CMP_STATUS_FLD].val); + rc = ((int)fields_cmp[CFA_BLD_MPC_EM_INSERT_CMP_STATUS_FLD].val) * -1; + goto cleanup; + } + + /* Update CPM info so it will determine best pool to use next alloc */ + rc = tfc_cpm_set_usage(cpm_lkup, pool_id, aparms.used_count, aparms.all_used); + if (rc) { + netdev_dbg(bp->dev, "%s: EM insert tfc_cpm_set_usage() failed: %d\n", + __func__, rc); + goto cleanup; + } + + *parms->flow_handle = tfc_create_flow_handle(tsid, + num_contig_records, /* Based on key size */ + entry_offset, + fields_cmp[CFA_BLD_MPC_EM_INSERT_CMP_TABLE_INDEX3_FLD].val); + + kfree(tx_msg); + kfree(rx_msg); + return 0; + +cleanup: + /* Preserve the rc from the actual error rather than + * an error during cleanup. + */ + /* Free allocated resources */ + fparms.record_offset = aparms.record_offset; + fparms.num_contig_records = num_contig_records; + cleanup_rc = cfa_mm_free(cmm, &fparms); + if (cleanup_rc) + netdev_dbg(bp->dev, "%s: failed to free entry: %d\n", + __func__, rc); + + cleanup_rc = tfc_cpm_set_usage(cpm_lkup, pool_id, fparms.used_count, false); + if (cleanup_rc) + netdev_dbg(bp->dev, "%s: failed to set usage: %d\n", + __func__, rc); + + kfree(tx_msg); + kfree(rx_msg); + return rc; +} + +int tfc_em_delete_raw(struct tfc *tfcp, u8 tsid, enum cfa_dir dir, u32 offset, u32 static_bucket) +{ + struct cfa_mpc_data_obj fields_cmp[CFA_BLD_MPC_EM_DELETE_CMP_MAX_FLD]; + struct cfa_mpc_data_obj fields_cmd[CFA_BLD_MPC_EM_DELETE_CMD_MAX_FLD]; + u32 mpc_opaque = TFC_MPC_OPAQUE_VAL; + struct cfa_bld_mpcinfo *mpc_info; + struct bnxt_mpc_mbuf mpc_msg_out; + struct bnxt_mpc_mbuf mpc_msg_in; + u8 tx_msg[TFC_MPC_MAX_TX_BYTES]; + u8 rx_msg[TFC_MPC_MAX_RX_BYTES]; + struct bnxt *bp = tfcp->bp; + u32 buff_len; + int i, rc; + + tfo_mpcinfo_get(tfcp->tfo, &mpc_info); + if (!mpc_info->mpcops) { + netdev_dbg(bp->dev, "%s: MPC not initialized\n", __func__); + return -EINVAL; + } + + /* Create MPC EM delete command using builder */ + for (i = 0; i < CFA_BLD_MPC_EM_DELETE_CMD_MAX_FLD; i++) + fields_cmd[i].field_id = INVALID_U16; + + fields_cmd[CFA_BLD_MPC_EM_DELETE_CMD_OPAQUE_FLD].field_id = + CFA_BLD_MPC_EM_DELETE_CMD_OPAQUE_FLD; + fields_cmd[CFA_BLD_MPC_EM_DELETE_CMD_OPAQUE_FLD].val = 0xAA; + fields_cmd[CFA_BLD_MPC_EM_DELETE_CMD_TABLE_SCOPE_FLD].field_id = + CFA_BLD_MPC_EM_DELETE_CMD_TABLE_SCOPE_FLD; + fields_cmd[CFA_BLD_MPC_EM_DELETE_CMD_TABLE_SCOPE_FLD].val = tsid; + + /* LREC address to delete */ + fields_cmd[CFA_BLD_MPC_EM_DELETE_CMD_TABLE_INDEX_FLD].field_id = + CFA_BLD_MPC_EM_DELETE_CMD_TABLE_INDEX_FLD; + fields_cmd[CFA_BLD_MPC_EM_DELETE_CMD_TABLE_INDEX_FLD].val = offset; + + fields_cmd[CFA_BLD_MPC_EM_DELETE_CMD_TABLE_INDEX2_FLD].field_id = + CFA_BLD_MPC_EM_DELETE_CMD_TABLE_INDEX2_FLD; + fields_cmd[CFA_BLD_MPC_EM_DELETE_CMD_TABLE_INDEX2_FLD].val = static_bucket; + + /* Create MPC EM delete command using builder */ + buff_len = TFC_MPC_MAX_TX_BYTES; + + rc = mpc_info->mpcops->cfa_bld_mpc_build_em_delete(tx_msg, + &buff_len, + fields_cmd); + if (rc) { + netdev_dbg(bp->dev, "%s: delete mpc build failed: %d\n", __func__, -rc); + return -EINVAL; + } + + netdev_dbg(bp->dev, "Tx Msg: size:%d\n", buff_len); + bnxt_tfc_buf_dump(bp, "EM delete", (uint8_t *)tx_msg, buff_len, 4, 4); + + /* Send MPC */ + mpc_msg_in.chnl_id = (dir == CFA_DIR_TX ? + RING_ALLOC_REQ_MPC_CHNLS_TYPE_TE_CFA : + RING_ALLOC_REQ_MPC_CHNLS_TYPE_RE_CFA); + mpc_msg_in.msg_data = &tx_msg[TFC_MPC_HEADER_SIZE_BYTES]; + mpc_msg_in.msg_size = 16; + mpc_msg_out.cmp_type = MPC_CMP_TYPE_MID_PATH_LONG; + mpc_msg_out.msg_data = &rx_msg[TFC_MPC_HEADER_SIZE_BYTES]; + mpc_msg_out.msg_size = TFC_MPC_MAX_RX_BYTES; + + netdev_dbg(bp->dev, "Tx Msg: size:%d\n", mpc_msg_out.msg_size); + bnxt_tfc_buf_dump(bp, "EM Delete", (uint8_t *)tx_msg, buff_len, 4, 4); + + rc = bnxt_mpc_send(tfcp->bp, + &mpc_msg_in, + &mpc_msg_out, + &mpc_opaque); + if (rc) { + netdev_dbg(bp->dev, "%s: delete MPC send failed: %d\n", __func__, rc); + return -EINVAL; + } + + netdev_dbg(bp->dev, "Rx Msg: size:%d\n", mpc_msg_out.msg_size); + bnxt_tfc_buf_dump(bp, "EM delete", (uint8_t *)rx_msg, buff_len, 4, 4); + + /* Process response */ + for (i = 0; i < CFA_BLD_MPC_EM_DELETE_CMP_MAX_FLD; i++) + fields_cmp[i].field_id = INVALID_U16; + + fields_cmp[CFA_BLD_MPC_EM_DELETE_CMP_STATUS_FLD].field_id = + CFA_BLD_MPC_EM_DELETE_CMP_STATUS_FLD; + + rc = mpc_info->mpcops->cfa_bld_mpc_parse_em_delete(rx_msg, + mpc_msg_out.msg_size, + fields_cmp); + if (rc) { + netdev_dbg(bp->dev, "%s: delete parse failed: %d\n", __func__, rc); + return -EINVAL; + } + + if (fields_cmp[CFA_BLD_MPC_EM_DELETE_CMP_STATUS_FLD].val != CFA_BLD_MPC_OK) { + netdev_dbg(bp->dev, "%s: MPC failed with status code:%d\n", __func__, + (u32)fields_cmp[CFA_BLD_MPC_EM_DELETE_CMP_STATUS_FLD].val); + rc = ((int)fields_cmp[CFA_BLD_MPC_EM_DELETE_CMP_STATUS_FLD].val) * -1; + return rc; + } + + return rc; +} + +int tfc_em_delete(struct tfc *tfcp, struct tfc_em_delete_parms *parms) +{ + u32 static_bucket, record_offset, record_size, pool_id; + bool is_bs_owner, is_shared, valid; + struct cfa_mm_free_parms fparms; + struct tfc_cpm *cpm_lkup = NULL; + struct tfc_cpm *cpm_act = NULL; + struct tfc_ts_mem_cfg mem_cfg; + struct tfc_ts_pool_info pi; + struct bnxt *bp = tfcp->bp; + struct tfc_cmm *cmm; + u8 tsid; + int rc; + + /* Get fields from MPC Flow handle */ + tfc_get_fields_from_flow_handle(&parms->flow_handle, + &tsid, + &record_size, + &record_offset, + &static_bucket); + + rc = tfo_ts_get(tfcp->tfo, tsid, &is_shared, NULL, &valid, NULL); + if (rc) { + netdev_dbg(bp->dev, "%s: failed to get tsid: %d\n", __func__, rc); + return -EINVAL; + } + if (!valid) { + netdev_dbg(bp->dev, "%s: tsid not allocated %d\n", __func__, tsid); + return -EINVAL; + } + + rc = tfo_ts_get_pool_info(tfcp->tfo, tsid, parms->dir, &pi); + if (rc) { + netdev_dbg(bp->dev, + "%s: Failed to get pool info for tsid:%d\n", + __func__, tsid); + return -EINVAL; + } + + pool_id = TFC_FLOW_GET_POOL_ID(record_offset, pi.lkup_pool_sz_exp); + + rc = tfo_ts_get_mem_cfg(tfcp->tfo, tsid, + parms->dir, + CFA_REGION_TYPE_LKUP, + &is_bs_owner, + &mem_cfg); + if (rc) { + netdev_dbg(bp->dev, "%s: tfo_ts_get_mem_cfg() failed: %d\n", __func__, rc); + return -EINVAL; + } + + /* Get CPM instance for this table scope */ + rc = tfo_ts_get_cpm_inst(tfcp->tfo, tsid, parms->dir, &cpm_lkup, &cpm_act); + if (rc) { + netdev_dbg(bp->dev, "%s: failed to get CMM instance: %d\n", __func__, rc); + return -EINVAL; + } + + rc = tfc_em_delete_raw(tfcp, + tsid, + parms->dir, + record_offset + + mem_cfg.lkup_rec_start_offset, + static_bucket); + if (rc) { + netdev_dbg(bp->dev, "%s: failed to delete em raw record, offset %u: %d\n", + __func__, record_offset + mem_cfg.lkup_rec_start_offset, rc); + return -EINVAL; + } + + rc = tfc_cpm_get_cmm_inst(cpm_lkup, pool_id, &cmm); + if (rc) { + netdev_dbg(bp->dev, "%s: failed to get CMM instance: %d\n", __func__, rc); + return -EINVAL; + } + + fparms.record_offset = record_offset; + fparms.num_contig_records = roundup_pow_of_two(record_size); + + rc = cfa_mm_free(cmm, &fparms); + if (rc) { + netdev_dbg(bp->dev, "%s: failed to free CMM instance: %d\n", __func__, rc); + return -EINVAL; + } + + rc = tfc_cpm_set_usage(cpm_lkup, pool_id, fparms.used_count, false); + if (rc) + netdev_dbg(bp->dev, "%s: failed to set usage: %d\n", __func__, rc); + + return rc; +} + +static void bucket_decode(u32 *bucket_ptr, + struct bucket_info_t *bucket_info) +{ + int offset = 0; + int i; + + bucket_info->valid = false; + bucket_info->chain = tfc_getbits(bucket_ptr, 254, 1); + bucket_info->chain_ptr = tfc_getbits(bucket_ptr, 228, 26); + + if (bucket_info->chain || + bucket_info->chain_ptr) + bucket_info->valid = true; + + for (i = 0; i < TFC_BUCKET_ENTRIES; i++) { + bucket_info->entries[i].entry_ptr = tfc_getbits(bucket_ptr, offset, 26); + offset += 26; + bucket_info->entries[i].hash_msb = tfc_getbits(bucket_ptr, offset, 12); + offset += 12; + if (bucket_info->entries[i].hash_msb || + bucket_info->entries[i].entry_ptr) { + bucket_info->valid = true; + } + } +} + +static int tfc_mpc_table_read(struct tfc *tfcp, + u8 tsid, + enum cfa_dir dir, + u32 type, + u32 offset, + u8 words, + u8 *data, + u8 debug) +{ + struct cfa_mpc_data_obj fields_cmd[CFA_BLD_MPC_READ_CMD_MAX_FLD]; + struct cfa_mpc_data_obj fields_cmp[CFA_BLD_MPC_READ_CMP_MAX_FLD]; + u32 mpc_opaque = TFC_MPC_OPAQUE_VAL; + struct bnxt_mpc_mbuf mpc_msg_out; + struct cfa_bld_mpcinfo *mpc_info; + struct bnxt_mpc_mbuf mpc_msg_in; + u8 tx_msg[TFC_MPC_MAX_TX_BYTES]; + u8 rx_msg[TFC_MPC_MAX_RX_BYTES]; + struct bnxt *bp = tfcp->bp; + phys_addr_t host_address; + bool is_shared, valid; + u8 discard_data[128]; + u32 buff_len; + u32 set, way; + int i, rc; + + tfo_mpcinfo_get(tfcp->tfo, &mpc_info); + + rc = tfo_ts_get(tfcp->tfo, tsid, &is_shared, NULL, &valid, NULL); + if (rc) { + netdev_dbg(bp->dev, "%s: failed to get tsid: %d\n", + __func__, -rc); + return -EINVAL; + } + if (!valid) { + netdev_dbg(bp->dev, "%s: tsid not allocated %d\n", + __func__, tsid); + return -EINVAL; + } + + /* Check that data pointer is word aligned */ + if (((u64)data) & 0x1fULL) { + netdev_dbg(bp->dev, "%s: Table read data pointer not word aligned\n", + __func__); + return -EINVAL; + } + + host_address = (phys_addr_t)virt_to_phys(data); + + /* Check that MPC APIs are bound */ + if (!mpc_info->mpcops) { + netdev_dbg(bp->dev, "%s: MPC not initialized\n", + __func__); + return -EINVAL; + } + + set = offset & 0x7ff; + way = (offset >> 12) & 0xf; + + if (debug) + netdev_dbg(bp->dev, + "%s: Debug read table type:%s %d words32B at way:%d set:%d debug:%d words32B\n", + __func__, + (type == 0 ? "Lookup" : "Action"), + words, + way, + set, + debug); + else + netdev_dbg(bp->dev, + "%s: Reading table type:%s %d words32B at offset %d words32B\n", + __func__, + (type == 0 ? "Lookup" : "Action"), + words, + offset); + + /* Create MPC EM cache read */ + for (i = 0; i < CFA_BLD_MPC_READ_CMD_MAX_FLD; i++) + fields_cmd[i].field_id = INVALID_U16; + + fields_cmd[CFA_BLD_MPC_READ_CMD_OPAQUE_FLD].field_id = + CFA_BLD_MPC_READ_CMD_OPAQUE_FLD; + fields_cmd[CFA_BLD_MPC_READ_CMD_OPAQUE_FLD].val = 0xAA; + + fields_cmd[CFA_BLD_MPC_READ_CMD_TABLE_TYPE_FLD].field_id = + CFA_BLD_MPC_READ_CMD_TABLE_TYPE_FLD; + fields_cmd[CFA_BLD_MPC_READ_CMD_TABLE_TYPE_FLD].val = (type == 0 ? + CFA_BLD_MPC_HW_TABLE_TYPE_LOOKUP : CFA_BLD_MPC_HW_TABLE_TYPE_ACTION); + + fields_cmd[CFA_BLD_MPC_READ_CMD_TABLE_SCOPE_FLD].field_id = + CFA_BLD_MPC_READ_CMD_TABLE_SCOPE_FLD; + fields_cmd[CFA_BLD_MPC_READ_CMD_TABLE_SCOPE_FLD].val = + (debug ? way : tsid); + + fields_cmd[CFA_BLD_MPC_READ_CMD_DATA_SIZE_FLD].field_id = + CFA_BLD_MPC_READ_CMD_DATA_SIZE_FLD; + fields_cmd[CFA_BLD_MPC_READ_CMD_DATA_SIZE_FLD].val = words; + + fields_cmd[CFA_BLD_MPC_READ_CMD_TABLE_INDEX_FLD].field_id = + CFA_BLD_MPC_READ_CMD_TABLE_INDEX_FLD; + fields_cmd[CFA_BLD_MPC_READ_CMD_TABLE_INDEX_FLD].val = + (debug ? set : offset); + + fields_cmd[CFA_BLD_MPC_READ_CMD_HOST_ADDRESS_FLD].field_id = + CFA_BLD_MPC_READ_CMD_HOST_ADDRESS_FLD; + fields_cmd[CFA_BLD_MPC_READ_CMD_HOST_ADDRESS_FLD].val = (u64)host_address; + + if (debug) { + fields_cmd[CFA_BLD_MPC_READ_CMD_CACHE_OPTION_FLD].field_id = + CFA_BLD_MPC_READ_CMD_CACHE_OPTION_FLD; + fields_cmd[CFA_BLD_MPC_READ_CMD_CACHE_OPTION_FLD].val = debug; + } + + buff_len = TFC_MPC_MAX_TX_BYTES; + + rc = mpc_info->mpcops->cfa_bld_mpc_build_cache_read(tx_msg, + &buff_len, + fields_cmd); + if (rc) { + netdev_dbg(bp->dev, "%s: Action read build failed: %d\n", + __func__, rc); + goto cleanup; + } + + /* Send MPC */ + mpc_msg_in.chnl_id = (dir == CFA_DIR_TX ? + RING_ALLOC_REQ_MPC_CHNLS_TYPE_TE_CFA : + RING_ALLOC_REQ_MPC_CHNLS_TYPE_RE_CFA); + mpc_msg_in.msg_data = &tx_msg[16]; + mpc_msg_in.msg_size = 16; + mpc_msg_out.cmp_type = MPC_CMP_TYPE_MID_PATH_SHORT; + mpc_msg_out.msg_data = &rx_msg[16]; + mpc_msg_out.msg_size = TFC_MPC_MAX_RX_BYTES; + + rc = bnxt_mpc_send(tfcp->bp, + &mpc_msg_in, + &mpc_msg_out, + &mpc_opaque); + if (rc) { + netdev_dbg(bp->dev, "%s: Table read MPC send failed: %d\n", + __func__, rc); + goto cleanup; + } + + /* Process response */ + for (i = 0; i < CFA_BLD_MPC_READ_CMP_MAX_FLD; i++) + fields_cmp[i].field_id = INVALID_U16; + + fields_cmp[CFA_BLD_MPC_READ_CMP_STATUS_FLD].field_id = + CFA_BLD_MPC_READ_CMP_STATUS_FLD; + + rc = mpc_info->mpcops->cfa_bld_mpc_parse_cache_read(rx_msg, + mpc_msg_out.msg_size, + discard_data, + words * TFC_MPC_BYTES_PER_WORD, + fields_cmp); + if (rc) { + netdev_dbg(bp->dev, "%s: Table read parse failed: %d\n", + __func__, rc); + goto cleanup; + } + + if (fields_cmp[CFA_BLD_MPC_READ_CMP_STATUS_FLD].val != CFA_BLD_MPC_OK) { + netdev_dbg(bp->dev, "%s: Table read failed with status code:%d\n", + __func__, + (u32)fields_cmp[CFA_BLD_MPC_READ_CMP_STATUS_FLD].val); + rc = -1; + goto cleanup; + } + + return 0; + + cleanup: + + return rc; +} + +int tfc_em_delete_entries_by_pool_id(struct tfc *tfcp, + u8 tsid, + enum cfa_dir dir, + u16 pool_id, + u8 debug, + u8 *data) +{ + struct tfc_ts_mem_cfg mem_cfg; + struct bucket_info_t *bucket; + struct tfc_ts_pool_info pi; + struct bnxt *bp = tfcp->bp; + bool is_bs_owner; + u32 offset; + int rc; + int i; + int j; + + /* Get memory info */ + rc = tfo_ts_get_pool_info(tfcp->tfo, tsid, dir, &pi); + if (rc) { + netdev_dbg(bp->dev, + "%s: Failed to get pool info for tsid:%d\n", + __func__, tsid); + return -EINVAL; + } + + rc = tfo_ts_get_mem_cfg(tfcp->tfo, + tsid, + dir, + CFA_REGION_TYPE_LKUP, + &is_bs_owner, + &mem_cfg); + if (rc) { + netdev_dbg(bp->dev, "%s: tfo_ts_get_mem_cfg() failed: %d\n", + __func__, rc); + return -EINVAL; + } + + bucket = kmalloc(sizeof(*bucket), GFP_KERNEL); + if (!bucket) + return -EINVAL; + + /* Read static bucket entries */ + for (offset = 0; offset < mem_cfg.lkup_rec_start_offset; ) { + /* Read static bucket region of lookup table. + * A static bucket is 32B in size and must be 32B aligned. + * A table read can read up to 4 * 32B so in the interest + * of efficiency the max read size will be used. + */ + rc = tfc_mpc_table_read(tfcp, + tsid, + dir, + CFA_REGION_TYPE_LKUP, + offset, + TFC_MPC_MAX_TABLE_READ_WORDS, + data, + debug); + if (rc) { + netdev_dbg(bp->dev, + "%s: tfc_mpc_table_read() failed for offset:%d: %d\n", + __func__, offset, rc); + kfree(bucket); + return -EINVAL; + } + + for (i = 0; (i < TFC_MPC_MAX_TABLE_READ_WORDS) && + (offset < mem_cfg.lkup_rec_start_offset); i++) { + /* Walk static bucket entry pointers */ + bucket_decode((u32 *)&data[i * TFC_MPC_BYTES_PER_WORD], + bucket); + + for (j = 0; j < TFC_BUCKET_ENTRIES; j++) { + if (bucket->entries[j].entry_ptr != 0 && + pool_id == (bucket->entries[j].entry_ptr >> + pi.lkup_pool_sz_exp)) { + /* Delete EM entry */ + rc = tfc_em_delete_raw(tfcp, + tsid, + dir, + bucket->entries[j].entry_ptr, + offset); + if (rc) { + netdev_dbg(bp->dev, + "%s: EM delete failed offset:0x%08x %d\n", + __func__, + offset, + rc); + kfree(bucket); + return -1; + } + } + } + + offset++; + } + } + + kfree(bucket); + return rc; +} diff --git a/drivers/thirdparty/release-drivers/bnxt/tfc_v3/tfc_em.h b/drivers/thirdparty/release-drivers/bnxt/tfc_v3/tfc_em.h new file mode 100644 index 000000000000..c3b90da33a5a --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/tfc_v3/tfc_em.h @@ -0,0 +1,151 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019-2023 Broadcom + * All rights reserved. + */ + +#ifndef _TFC_EM_H_ +#define _TFC_EM_H_ + +/* Derived from CAS document */ +#define TFC_MPC_MAX_TX_BYTES 188 +#define TFC_MPC_MAX_RX_BYTES 188 + +#define TFC_MPC_HEADER_SIZE_BYTES 16 + +#define TFC_MPC_BYTES_PER_WORD 32 +#define TFC_MPC_MAX_TABLE_READ_WORDS 4 +#define TFC_MPC_MAX_TABLE_READ_BYTES \ + (TFC_MPC_BYTES_PER_WORD * TFC_MPC_MAX_TABLE_READ_WORDS) + +#define TFC_BUCKET_ENTRIES 6 + +/* MPC opaque currently unused */ +#define TFC_MPC_OPAQUE_VAL 0 + +#define TFC_MOD_STRING_LENGTH 512 +#define TFC_STAT_STRING_LENGTH 128 +#define TFC_ENC_STRING_LENGTH 256 + +struct act_full_info_t { + bool drop; + uint8_t vlan_del_rep; + uint8_t dest_op; + uint16_t vnic_vport; + uint8_t decap_func; + uint16_t mirror; + uint16_t meter_ptr; + uint8_t stat0_ctr_type; + bool stat0_ing_egr; + uint32_t stat0_ptr; + uint8_t stat1_ctr_type; + bool stat1_ing_egr; + uint32_t stat1_ptr; + uint32_t mod_ptr; + uint32_t enc_ptr; + uint32_t src_ptr; + char mod_str[512]; + char stat0_str[128]; + char stat1_str[128]; + char enc_str[256]; +}; + +struct act_mcg_info_t { + uint8_t src_ko_en; + uint32_t nxt_ptr; + uint8_t act_hint0; + uint32_t act_rec_ptr0; + uint8_t act_hint1; + uint32_t act_rec_ptr1; + uint8_t act_hint2; + uint32_t act_rec_ptr2; + uint8_t act_hint3; + uint32_t act_rec_ptr3; + uint8_t act_hint4; + uint32_t act_rec_ptr4; + uint8_t act_hint5; + uint32_t act_rec_ptr5; + uint8_t act_hint6; + uint32_t act_rec_ptr6; + uint8_t act_hint7; + uint32_t act_rec_ptr7; +}; + +struct act_info_t { + bool valid; + uint8_t vector; + union { + struct act_full_info_t full; + struct act_mcg_info_t mcg; + }; +}; + +struct em_info_t { + bool valid; + u8 rec_size; + u16 epoch0; + u16 epoch1; + u8 opcode; + u8 strength; + u8 act_hint; + u32 act_rec_ptr; /* Not FAST */ + u32 destination; /* Just FAST */ + u8 tcp_direction; /* Just CT */ + u8 tcp_update_en; + u8 tcp_win; + u32 tcp_msb_loc; + u32 tcp_msb_opp; + u8 tcp_msb_opp_init; + u8 state; + u8 timer_value; + u16 ring_table_idx; /* Not CT and not RECYCLE */ + u8 act_rec_size; + u8 paths_m1; + u8 fc_op; + u8 fc_type; + u32 fc_ptr; + u8 recycle_dest; /* Just Recycle */ + u8 prof_func; + u8 meta_prof; + u32 metadata; + u8 range_profile; + u16 range_index; + u8 *key; + struct act_info_t act_info; +}; + +struct sb_entry_t { + u16 hash_msb; + u32 entry_ptr; +}; + +struct bucket_info_t { + bool valid; + bool chain; + u32 chain_ptr; + struct sb_entry_t entries[TFC_BUCKET_ENTRIES]; + struct em_info_t em_info[TFC_BUCKET_ENTRIES]; +}; + +#define CALC_NUM_RECORDS_IN_POOL(a, b, c) + +/* Calculates number of 32Byte records from total size in 32bit words */ +#define CALC_NUM_RECORDS(result, key_sz_words) \ + (*(result) = (((key_sz_words) + 7) / 8)) + +/* Calculates the entry offset */ +#define CREATE_OFFSET(result, pool_sz_exp, pool_id, record_offset) \ + (*(result) = (((pool_id) << (pool_sz_exp)) | (record_offset))) + +int tfc_em_delete_raw(struct tfc *tfcp, + u8 tsid, + enum cfa_dir dir, + u32 offset, + u32 static_bucket); + +int tfc_em_delete_entries_by_pool_id(struct tfc *tfcp, + u8 tsid, + enum cfa_dir dir, + u16 pool_id, + u8 debug, + u8 *data); +#endif /* _TFC_EM_H_ */ diff --git a/drivers/thirdparty/release-drivers/bnxt/tfc_v3/tfc_flow_handle.h b/drivers/thirdparty/release-drivers/bnxt/tfc_v3/tfc_flow_handle.h new file mode 100644 index 000000000000..be6af5015228 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/tfc_v3/tfc_flow_handle.h @@ -0,0 +1,65 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019-2023 Broadcom + * All rights reserved. + */ + +#ifndef _TFC_FLOW_HANDLE_H_ +#define _TFC_FLOW_HANDLE_H_ + +#define TFC_POOL_TSID_FLOW_HANDLE_MASK 0x0F80000000000000ULL +#define TFC_POOL_TSID_FLOW_HANDLE_SFT 55 +#define TFC_RECORD_SIZE_FLOW_HANDLE_MASK 0x0070000000000000ULL +#define TFC_RECORD_SIZE_FLOW_HANDLE_SFT 52 +#define TFC_EM_REC_OFFSET_FLOW_HANDLE_MASK 0x000FFFFFFC000000ULL +#define TFC_EM_REC_OFFSET_FLOW_HANDLE_SFT 26 +#define TFC_STATIC_BUCKET_OFFSET_FLOW_HANDLE_MASK 0x0000000003FFFFFFULL +#define TFC_STATIC_BUCKET_OFFSET_FLOW_HANDLE_SFT 0 + +#define TFC_FLOW_HANDLE_MASK ( \ + TFC_POOL_TSID_FLOW_HANDLE_MASK | \ + TFC_RECORD_SIZE_FLOW_HANDLE_MASK | \ + TFC_EM_REC_OFFSET_FLOW_HANDLE_MASK | \ + TFC_STATIC_BUCKET_OFFSET_FLOW_HANDLE_MASK) + +static inline void tfc_get_fields_from_flow_handle(u64 *flow_handle, u8 *tsid, + u32 *record_size, u32 *em_record_offset, + u32 *static_bucket_offset) +{ + *tsid = (u8)((*flow_handle & TFC_POOL_TSID_FLOW_HANDLE_MASK) >> + TFC_POOL_TSID_FLOW_HANDLE_SFT); + *record_size = + (u32)((*flow_handle & TFC_RECORD_SIZE_FLOW_HANDLE_MASK) >> + TFC_RECORD_SIZE_FLOW_HANDLE_SFT); + *em_record_offset = + (u32)((*flow_handle & TFC_EM_REC_OFFSET_FLOW_HANDLE_MASK) >> + TFC_EM_REC_OFFSET_FLOW_HANDLE_SFT); + *static_bucket_offset = + (u32)((*flow_handle & TFC_STATIC_BUCKET_OFFSET_FLOW_HANDLE_MASK) >> + TFC_STATIC_BUCKET_OFFSET_FLOW_HANDLE_SFT); +} + +static inline u64 tfc_create_flow_handle(u32 tsid, u32 record_size, u32 em_record_offset, + u32 static_bucket_offset) +{ + u64 flow_handle = 0ULL; + + flow_handle |= + ((((u64)tsid) << TFC_POOL_TSID_FLOW_HANDLE_SFT) & + TFC_POOL_TSID_FLOW_HANDLE_MASK); + flow_handle |= + ((((u64)record_size) << TFC_RECORD_SIZE_FLOW_HANDLE_SFT) & + TFC_RECORD_SIZE_FLOW_HANDLE_MASK); + flow_handle |= + ((((u64)em_record_offset) << TFC_EM_REC_OFFSET_FLOW_HANDLE_SFT) & + TFC_EM_REC_OFFSET_FLOW_HANDLE_MASK); + flow_handle |= + (((static_bucket_offset) << TFC_STATIC_BUCKET_OFFSET_FLOW_HANDLE_SFT) & + TFC_STATIC_BUCKET_OFFSET_FLOW_HANDLE_MASK); + + return flow_handle; +} + +#define TFC_FLOW_GET_POOL_ID(em_record_offset, pool_sz_exp) \ + ((em_record_offset) >> (pool_sz_exp)) + +#endif /* _TFC_FLOW_HANDLE_H_ */ diff --git a/drivers/thirdparty/release-drivers/bnxt/tfc_v3/tfc_global_id.c b/drivers/thirdparty/release-drivers/bnxt/tfc_v3/tfc_global_id.c new file mode 100644 index 000000000000..0d931c79f7d8 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/tfc_v3/tfc_global_id.c @@ -0,0 +1,48 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* Copyright(c) 2019-2023 Broadcom + * All rights reserved. + */ + +#include +#include "tfc.h" +#include "bnxt_compat.h" +#include "bnxt.h" +#include "bnxt_hwrm.h" +#include "tfc.h" +#include "tfc_msg.h" + +int tfc_global_id_alloc(struct tfc *tfcp, u16 fid, enum tfc_domain_id domain_id, + u16 req_cnt, const struct tfc_global_id_req *req, + struct tfc_global_id *rsp, u16 *rsp_cnt, + bool *first) +{ + struct bnxt *bp = tfcp->bp; + int rc; + u16 sid; + + if (!req) { + netdev_dbg(bp->dev, "%s: global_id req is NULL\n", __func__); + return -EINVAL; + } + + if (!rsp) { + netdev_dbg(bp->dev, "%s: global_id rsp is NULL\n", __func__); + return -EINVAL; + } + + if (!BNXT_PF(bp) && !BNXT_VF_IS_TRUSTED(bp)) { + netdev_dbg(bp->dev, "%s: bp not PF or trusted VF\n", __func__); + return -EINVAL; + } + + rc = tfo_sid_get(tfcp->tfo, &sid); + if (rc) { + netdev_dbg(bp->dev, "%s: Failed to retrieve SID, rc:%d\n", + __func__, rc); + return rc; + } + + rc = tfc_msg_global_id_alloc(tfcp, fid, sid, domain_id, req_cnt, + req, rsp, rsp_cnt, first); + return rc; +} diff --git a/drivers/thirdparty/release-drivers/bnxt/tfc_v3/tfc_ident.c b/drivers/thirdparty/release-drivers/bnxt/tfc_v3/tfc_ident.c new file mode 100644 index 000000000000..b7f64ea59728 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/tfc_v3/tfc_ident.c @@ -0,0 +1,74 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* Copyright(c) 2019-2023 Broadcom + * All rights reserved. + */ + +#include +#include "tfc.h" + +#include "tfc_msg.h" +#include "cfa_types.h" +#include "tfo.h" +#include "tfc_util.h" +#include "bnxt_compat.h" +#include "bnxt.h" + +int tfc_identifier_alloc(struct tfc *tfcp, u16 fid, enum cfa_track_type tt, + struct tfc_identifier_info *ident_info) +{ + struct bnxt *bp = tfcp->bp; + u16 sid; + int rc; + + if (!ident_info) { + netdev_dbg(bp->dev, "%s: Invalid ident_info pointer\n", __func__); + return -EINVAL; + } + + rc = tfo_sid_get(tfcp->tfo, &sid); + if (rc) { + netdev_dbg(bp->dev, "%s: Failed to retrieve SID, rc:%d\n", + __func__, rc); + return rc; + } + + rc = tfc_msg_identifier_alloc(tfcp, ident_info->dir, + ident_info->rsubtype, tt, + fid, sid, &ident_info->id); + if (rc) + netdev_dbg(bp->dev, "%s: hwrm failed %s:%s, rc:%d\n", + __func__, tfc_dir_2_str(ident_info->dir), + tfc_ident_2_str(ident_info->rsubtype), rc); + + return rc; +} + +int tfc_identifier_free(struct tfc *tfcp, u16 fid, + const struct tfc_identifier_info *ident_info) +{ + struct bnxt *bp = tfcp->bp; + u16 sid; + int rc; + + if (!ident_info) { + netdev_dbg(bp->dev, "%s: Invalid ident_info pointer\n", __func__); + return -EINVAL; + } + + rc = tfo_sid_get(tfcp->tfo, &sid); + if (rc) { + netdev_dbg(bp->dev, "%s: Failed to retrieve SID, rc:%d\n", + __func__, rc); + return rc; + } + + rc = tfc_msg_identifier_free(tfcp, ident_info->dir, + ident_info->rsubtype, + fid, sid, ident_info->id); + if (rc) + netdev_dbg(bp->dev, "%s: hwrm failed %s:%s:%d, rc:%d\n", + __func__, tfc_dir_2_str(ident_info->dir), + tfc_ident_2_str(ident_info->rsubtype), ident_info->id, rc); + + return rc; +} diff --git a/drivers/thirdparty/release-drivers/bnxt/tfc_v3/tfc_idx_tbl.c b/drivers/thirdparty/release-drivers/bnxt/tfc_v3/tfc_idx_tbl.c new file mode 100644 index 000000000000..70942662dfed --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/tfc_v3/tfc_idx_tbl.c @@ -0,0 +1,338 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* Copyright(c) 2019-2023 Broadcom + * All rights reserved. + */ + +#include +#include "bnxt_compat.h" +#include "bnxt.h" +#include "bnxt_hwrm.h" +#include "tfc.h" +#include "tfc_msg.h" +#include "tfc_util.h" + +static int tfc_idx_tbl_alloc_check(struct tfc *tfcp, u16 fid, + enum cfa_track_type tt, + struct tfc_idx_tbl_info *tbl_info) +{ + struct bnxt *bp = tfcp->bp; + + if (!bp) + return -EINVAL; + + if (!tbl_info) { + netdev_dbg(bp->dev, "%s: tbl_info is NULL\n", __func__); + return -EINVAL; + } + + if (tt >= CFA_TRACK_TYPE_MAX) { + netdev_dbg(bp->dev, "%s: Invalid track type: %d\n", __func__, tt); + return -EINVAL; + } + + if (tbl_info->dir >= CFA_DIR_MAX) { + netdev_dbg(bp->dev, "%s: Invalid cfa dir: %d\n", __func__, tbl_info->dir); + return -EINVAL; + } + + if (tbl_info->rsubtype >= CFA_RSUBTYPE_IDX_TBL_MAX) { + netdev_dbg(bp->dev, "%s: Invalid idx tbl subtype: %d\n", __func__, + tbl_info->rsubtype); + return -EINVAL; + } + return 0; +} + +int tfc_idx_tbl_alloc(struct tfc *tfcp, u16 fid, + enum cfa_track_type tt, + struct tfc_idx_tbl_info *tbl_info) +{ + struct bnxt *bp = tfcp->bp; + u16 sid; + int rc; + + if (tfc_idx_tbl_alloc_check(tfcp, fid, tt, tbl_info)) + return -EINVAL; + + if (!BNXT_PF(bp) && !BNXT_VF_IS_TRUSTED(bp)) { + netdev_dbg(bp->dev, "%s: bp not PF or trusted VF\n", __func__); + return -EINVAL; + } + + rc = tfo_sid_get(tfcp->tfo, &sid); + if (rc) { + netdev_dbg(bp->dev, "%s: Failed to retrieve SID, rc:%d\n", + __func__, rc); + return rc; + } + + rc = tfc_msg_idx_tbl_alloc(tfcp, fid, sid, tt, tbl_info->dir, + tbl_info->rsubtype, &tbl_info->id); + if (rc) + netdev_dbg(bp->dev, "%s: hwrm failed: %s:%s %d\n", __func__, + tfc_dir_2_str(tbl_info->dir), tfc_idx_tbl_2_str(tbl_info->rsubtype), + rc); + + return rc; +} + +static int tfc_idx_tbl_alloc_set_check(struct tfc *tfcp, u16 fid, + enum cfa_track_type tt, + struct tfc_idx_tbl_info *tbl_info, + const u32 *data, u8 data_sz_in_bytes) +{ + struct bnxt *bp = tfcp->bp; + + if (!bp) + return -EINVAL; + + if (!tbl_info) { + netdev_dbg(bp->dev, "%s: tbl_info is NULL\n", __func__); + return -EINVAL; + } + + if (!data) { + netdev_dbg(bp->dev, "%s: Invalid data pointer\n", __func__); + return -EINVAL; + } + + if (tt >= CFA_TRACK_TYPE_MAX) { + netdev_dbg(bp->dev, "%s: Invalid track type: %d\n", __func__, tt); + return -EINVAL; + } + + if (tbl_info->dir >= CFA_DIR_MAX) { + netdev_dbg(bp->dev, "%s: Invalid cfa dir: %d\n", __func__, tbl_info->dir); + return -EINVAL; + } + + if (tbl_info->rsubtype >= CFA_RSUBTYPE_IDX_TBL_MAX) { + netdev_dbg(bp->dev, "%s: Invalid idx tbl subtype: %d\n", __func__, + tbl_info->rsubtype); + return -EINVAL; + } + + if (!BNXT_PF(bp) && !BNXT_VF_IS_TRUSTED(bp)) { + netdev_dbg(bp->dev, "%s: bp not PF or trusted VF\n", __func__); + return -EINVAL; + } + + if (data_sz_in_bytes == 0) { + netdev_dbg(bp->dev, "%s: Data size must be greater than zero\n", + __func__); + return -EINVAL; + } + + return 0; +} + +int tfc_idx_tbl_alloc_set(struct tfc *tfcp, u16 fid, + enum cfa_track_type tt, + struct tfc_idx_tbl_info *tbl_info, + const u32 *data, u8 data_sz_in_bytes) +{ + struct bnxt *bp = tfcp->bp; + u16 sid; + int rc; + + if (tfc_idx_tbl_alloc_set_check(tfcp, fid, tt, tbl_info, data, data_sz_in_bytes)) + return -EINVAL; + + rc = tfo_sid_get(tfcp->tfo, &sid); + if (rc) { + netdev_dbg(bp->dev, "%s: Failed to retrieve SID, rc:%d\n", + __func__, rc); + return rc; + } + + rc = tfc_msg_idx_tbl_alloc_set(tfcp, fid, sid, tt, tbl_info->dir, + tbl_info->rsubtype, data, + data_sz_in_bytes, &tbl_info->id); + if (rc) + netdev_dbg(bp->dev, "%s: hwrm failed: %s:%s %d\n", __func__, + tfc_dir_2_str(tbl_info->dir), tfc_idx_tbl_2_str(tbl_info->rsubtype), + rc); + + return rc; +} + +static int tfc_idx_tbl_set_check(struct tfc *tfcp, u16 fid, + const struct tfc_idx_tbl_info *tbl_info, + const u32 *data, u8 data_sz_in_bytes) +{ + struct bnxt *bp = tfcp->bp; + + if (!bp) + return -EINVAL; + + if (!tbl_info) { + netdev_dbg(bp->dev, "%s: tbl_info is NULL\n", __func__); + return -EINVAL; + } + + if (tbl_info->dir >= CFA_DIR_MAX) { + netdev_dbg(bp->dev, "%s: Invalid cfa dir: %d\n", __func__, tbl_info->dir); + return -EINVAL; + } + + if (tbl_info->rsubtype >= CFA_RSUBTYPE_IDX_TBL_MAX) { + netdev_dbg(bp->dev, "%s: Invalid idx tbl subtype: %d\n", __func__, + tbl_info->rsubtype); + return -EINVAL; + } + + if (!BNXT_PF(bp) && !BNXT_VF_IS_TRUSTED(bp)) { + netdev_dbg(bp->dev, "%s: bp not PF or trusted VF\n", __func__); + return -EINVAL; + } + + return 0; +} + +int tfc_idx_tbl_set(struct tfc *tfcp, u16 fid, + const struct tfc_idx_tbl_info *tbl_info, + const u32 *data, u8 data_sz_in_bytes) +{ + struct bnxt *bp = tfcp->bp; + u16 sid; + int rc; + + if (tfc_idx_tbl_set_check(tfcp, fid, tbl_info, data, data_sz_in_bytes)) + return -EINVAL; + + rc = tfo_sid_get(tfcp->tfo, &sid); + if (rc) { + netdev_dbg(bp->dev, "%s: Failed to retrieve SID, rc:%d\n", + __func__, rc); + return rc; + } + + rc = tfc_msg_idx_tbl_set(tfcp, fid, sid, tbl_info->dir, + tbl_info->rsubtype, tbl_info->id, + data, data_sz_in_bytes); + if (rc) + netdev_dbg(bp->dev, "%s: hwrm failed: %s:%s %d %d\n", __func__, + tfc_dir_2_str(tbl_info->dir), tfc_idx_tbl_2_str(tbl_info->rsubtype), + tbl_info->id, rc); + + return rc; +} + +static int tfc_idx_tbl_get_check(struct tfc *tfcp, u16 fid, + const struct tfc_idx_tbl_info *tbl_info, + u32 *data, u8 *data_sz_in_bytes) +{ + struct bnxt *bp = tfcp->bp; + + if (!bp) + return -EINVAL; + + if (!tbl_info) { + netdev_dbg(bp->dev, "%s: tbl_info is NULL\n", __func__); + return -EINVAL; + } + + if (tbl_info->dir >= CFA_DIR_MAX) { + netdev_dbg(bp->dev, "%s: Invalid cfa dir: %d\n", __func__, tbl_info->dir); + return -EINVAL; + } + + if (tbl_info->rsubtype >= CFA_RSUBTYPE_IDX_TBL_MAX) { + netdev_dbg(bp->dev, "%s: Invalid idx tbl subtype: %d\n", __func__, + tbl_info->rsubtype); + return -EINVAL; + } + + if (!BNXT_PF(bp) && !BNXT_VF_IS_TRUSTED(bp)) { + netdev_dbg(bp->dev, "%s: bp not PF or trusted VF\n", __func__); + return -EINVAL; + } + + return 0; +} + +int tfc_idx_tbl_get(struct tfc *tfcp, u16 fid, + const struct tfc_idx_tbl_info *tbl_info, + u32 *data, u8 *data_sz_in_bytes) +{ + struct bnxt *bp = tfcp->bp; + u16 sid; + int rc; + + if (tfc_idx_tbl_get_check(tfcp, fid, tbl_info, data, data_sz_in_bytes)) + return -EINVAL; + + rc = tfo_sid_get(tfcp->tfo, &sid); + if (rc) { + netdev_dbg(bp->dev, "%s: Failed to retrieve SID, rc:%d\n", + __func__, rc); + return rc; + } + + rc = tfc_msg_idx_tbl_get(tfcp, fid, sid, tbl_info->dir, + tbl_info->rsubtype, tbl_info->id, + data, data_sz_in_bytes); + if (rc) + netdev_dbg(bp->dev, "%s: hwrm failed: %s:%s %d %d\n", __func__, + tfc_dir_2_str(tbl_info->dir), + tfc_idx_tbl_2_str(tbl_info->rsubtype), tbl_info->id, rc); + return rc; +} + +static int tfc_idx_tbl_free_check(struct tfc *tfcp, u16 fid, + const struct tfc_idx_tbl_info *tbl_info) +{ + struct bnxt *bp = tfcp->bp; + + if (!bp) + return -EINVAL; + + if (!tbl_info) { + netdev_dbg(bp->dev, "%s: tbl_info is NULL\n", __func__); + return -EINVAL; + } + + if (tbl_info->dir >= CFA_DIR_MAX) { + netdev_dbg(bp->dev, "%s: Invalid cfa dir: %d\n", __func__, tbl_info->dir); + return -EINVAL; + } + + if (tbl_info->rsubtype >= CFA_RSUBTYPE_IDX_TBL_MAX) { + netdev_dbg(bp->dev, "%s: Invalid idx tbl subtype: %d\n", __func__, + tbl_info->rsubtype); + return -EINVAL; + } + + if (!BNXT_PF(bp) && !BNXT_VF_IS_TRUSTED(bp)) { + netdev_dbg(bp->dev, "%s: bp not PF or trusted VF\n", __func__); + return -EINVAL; + } + + return 0; +} + +int tfc_idx_tbl_free(struct tfc *tfcp, u16 fid, + const struct tfc_idx_tbl_info *tbl_info) +{ + struct bnxt *bp = tfcp->bp; + u16 sid; + int rc; + + if (tfc_idx_tbl_free_check(tfcp, fid, tbl_info)) + return -EINVAL; + + rc = tfo_sid_get(tfcp->tfo, &sid); + if (rc) { + netdev_dbg(bp->dev, "%s: Failed to retrieve SID, rc:%d\n", + __func__, rc); + return rc; + } + + rc = tfc_msg_idx_tbl_free(tfcp, fid, sid, tbl_info->dir, + tbl_info->rsubtype, tbl_info->id); + if (rc) + netdev_dbg(bp->dev, "%s: hwrm failed: %s:%s %d %d\n", __func__, + tfc_dir_2_str(tbl_info->dir), + tfc_idx_tbl_2_str(tbl_info->rsubtype), tbl_info->id, rc); + return rc; +} diff --git a/drivers/thirdparty/release-drivers/bnxt/tfc_v3/tfc_if_tbl.c b/drivers/thirdparty/release-drivers/bnxt/tfc_v3/tfc_if_tbl.c new file mode 100644 index 000000000000..2e8a50951dc6 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/tfc_v3/tfc_if_tbl.c @@ -0,0 +1,104 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* Copyright(c) 2023 Broadcom + * All rights reserved. + */ +#include +#include "bnxt_compat.h" +#include "bnxt.h" +#include "bnxt_hwrm.h" +#include "tfc.h" +#include "tfc_msg.h" +#include "tfc_util.h" + +int tfc_if_tbl_set(struct tfc *tfcp, u16 fid, + const struct tfc_if_tbl_info *tbl_info, + const u8 *data, u8 data_sz_in_bytes) +{ + struct bnxt *bp = tfcp->bp; + u16 sid; + int rc; + + if (!tbl_info) { + netdev_dbg(bp->dev, "%s: tbl_info is NULL\n", __func__); + return -EINVAL; + } + + if (tbl_info->dir >= CFA_DIR_MAX) { + netdev_dbg(bp->dev, "%s: Invalid cfa dir: %d\n", __func__, tbl_info->dir); + return -EINVAL; + } + + if (tbl_info->rsubtype >= CFA_RSUBTYPE_IF_TBL_MAX) { + netdev_dbg(bp->dev, "%s: Invalid if tbl subtype: %d\n", __func__, + tbl_info->rsubtype); + return -EINVAL; + } + + if (!BNXT_PF(bp) && !BNXT_VF_IS_TRUSTED(bp)) { + netdev_dbg(bp->dev, "%s: bp not PF or trusted VF\n", __func__); + return -EINVAL; + } + + rc = tfo_sid_get(tfcp->tfo, &sid); + if (rc) { + netdev_dbg(bp->dev, "%s: Failed to retrieve SID, rc:%d\n", + __func__, rc); + return rc; + } + + rc = tfc_msg_if_tbl_set(tfcp, fid, sid, tbl_info->dir, + tbl_info->rsubtype, tbl_info->id, + data_sz_in_bytes, data); + if (rc) + netdev_dbg(bp->dev, "%s: hwrm failed: %s:%s %d %d\n", __func__, + tfc_dir_2_str(tbl_info->dir), + tfc_if_tbl_2_str(tbl_info->rsubtype), tbl_info->id, rc); + + return rc; +} + +int tfc_if_tbl_get(struct tfc *tfcp, u16 fid, + const struct tfc_if_tbl_info *tbl_info, + u8 *data, u8 *data_sz_in_bytes) +{ + struct bnxt *bp = tfcp->bp; + u16 sid; + int rc; + + if (!tbl_info) { + netdev_dbg(bp->dev, "%s: tbl_info is NULL\n", __func__); + return -EINVAL; + } + + if (tbl_info->dir >= CFA_DIR_MAX) { + netdev_dbg(bp->dev, "%s: Invalid cfa dir: %d\n", __func__, tbl_info->dir); + return -EINVAL; + } + + if (tbl_info->rsubtype >= CFA_RSUBTYPE_IF_TBL_MAX) { + netdev_dbg(bp->dev, "%s: Invalid if tbl subtype: %d\n", __func__, + tbl_info->rsubtype); + return -EINVAL; + } + + if (!BNXT_PF(bp) && !BNXT_VF_IS_TRUSTED(bp)) { + netdev_dbg(bp->dev, "%s: bp not PF or trusted VF\n", __func__); + return -EINVAL; + } + + rc = tfo_sid_get(tfcp->tfo, &sid); + if (rc) { + netdev_dbg(bp->dev, "%s: Failed to retrieve SID, rc:%d\n", + __func__, rc); + return rc; + } + + rc = tfc_msg_if_tbl_get(tfcp, fid, sid, tbl_info->dir, + tbl_info->rsubtype, tbl_info->id, + data_sz_in_bytes, data); + if (rc) + netdev_dbg(bp->dev, "%s: hwrm failed: %s:%s %d %d\n", __func__, + tfc_dir_2_str(tbl_info->dir), + tfc_if_tbl_2_str(tbl_info->rsubtype), tbl_info->id, rc); + return rc; +} diff --git a/drivers/thirdparty/release-drivers/bnxt/tfc_v3/tfc_init.c b/drivers/thirdparty/release-drivers/bnxt/tfc_v3/tfc_init.c new file mode 100644 index 000000000000..78addc4fdedb --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/tfc_v3/tfc_init.c @@ -0,0 +1,72 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* Copyright(c) 2023 Broadcom + * All rights reserved. + */ + +#include +#include +#include "tfc.h" +#include "tfo.h" +#include "bnxt_compat.h" +#include "bnxt.h" +#include "bnxt_mpc.h" +#include "cfa_bld_mpcops.h" +#include "tfc_priv.h" + +/* The tfc_open and tfc_close APIs may only be used for setting TFC software + * state. They are never used to modify the HW state. That is, they are not + * allowed to send HWRM messages. + */ + +int tfc_open(struct tfc *tfcp) +{ + struct bnxt *bp = tfcp->bp; + bool is_pf; + int rc; + + /* Initialize the TF object */ + if (tfcp->tfo) { + netdev_dbg(bp->dev, "%s: tfc_opened already.\n", __func__); + return -EINVAL; + } + + rc = tfc_bp_is_pf(tfcp, &is_pf); + if (rc) + return rc; + + tfo_open(&tfcp->tfo, is_pf); + + return 0; +} + +int tfc_close(struct tfc *tfcp) +{ + struct bnxt *bp = tfcp->bp; + bool valid; + u16 sid; + u8 tsid; + int rc = 0; + + /* Nullify the TF object */ + if (tfcp->tfo) { + if (tfo_sid_get(tfcp->tfo, &sid) == 0) { + /* If no error, then there is a valid SID which means + * that the FID is still associated with the SID. + */ + netdev_dbg(bp->dev, + "%s: There is still a session associated with this object.\n", + __func__); + } + + for (tsid = 0; tsid < TFC_TBL_SCOPE_MAX; tsid++) { + rc = tfo_ts_get(tfcp->tfo, tsid, NULL, NULL, &valid, NULL); + if (!rc && valid) { + netdev_dbg(bp->dev, + "%s: There is a tsid %d still associated\n", + __func__, tsid); + } + } + tfo_close(&tfcp->tfo); + } + return rc; +} diff --git a/drivers/thirdparty/release-drivers/bnxt/tfc_v3/tfc_mpc_table.c b/drivers/thirdparty/release-drivers/bnxt/tfc_v3/tfc_mpc_table.c new file mode 100644 index 000000000000..0900d0947a73 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/tfc_v3/tfc_mpc_table.c @@ -0,0 +1,888 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* Copyright(c) 2023 Broadcom + * All rights reserved. + */ +#include +#include + +#include "tfc.h" +#include "tfo.h" +#include "tfc_em.h" +#include "tfc_debug.h" +#include "cfa_types.h" + +#include "sys_util.h" +#include "tfc_util.h" + +#define TFC_ACTION_SIZE_BYTES 32 +#define TFC_BUCKET_SIZE_BYTES 32 + +#define TFC_STRING_LENGTH_32 32 +#define TFC_STRING_LENGTH_64 64 +#define TFC_STRING_LENGTH_96 96 +#define TFC_STRING_LENGTH_256 256 + +static const char * const opcode_string[] = { + "NORMAL", + "NORMAL_RFS", + "FAST", + "FAST_RFS", + "CT_MISS_DEF", + "INVALID", + "CT_HIT_DEF", + "INVALID", + "RECYCLE" +}; + +static void act_decode(uint32_t *act_ptr, + uint64_t base, + struct act_info_t *act_info); +static void act_show(struct seq_file *m, + struct act_info_t *act_info, + uint32_t offset); +static void stat_decode(char *str, + uint8_t stat_num, + uint8_t stat1_ctr_type, + uint32_t *stat_ptr); + +static uint64_t get_address(struct tfc_ts_mem_cfg *mem, uint32_t offset) +{ + uint32_t page = offset / mem->pg_tbl[0].pg_size; + uint32_t adj_offset = offset % mem->pg_tbl[0].pg_size; + int level = 0; + uint64_t addr; + + /* + * Use the level according to the num_level of page table + */ + level = mem->num_lvl - 1; + + addr = (uint64_t)mem->pg_tbl[level].pg_va_tbl[page] + adj_offset; + + return addr; +} + +static void act_process(struct seq_file *m, + uint32_t act_rec_ptr, + struct em_info_t *em_info, + struct tfc_ts_mem_cfg *act_mem_cfg) +{ + uint8_t *act_ptr; + uint64_t base; + uint32_t act_offset = act_rec_ptr << 5; + + base = get_address(act_mem_cfg, 0); + act_ptr = (uint8_t *)get_address(act_mem_cfg, act_offset); + act_decode((uint32_t *)act_ptr, base, &em_info->act_info); +} + +static void em_decode(struct seq_file *m, + uint32_t *em_ptr, + struct em_info_t *em_info, + struct tfc_ts_mem_cfg *act_mem_cfg) +{ + em_info->key = (uint8_t *)em_ptr; + + em_ptr += (128 / 8) / 4; /* For EM records the LREC follows 128 bits of key */ + em_info->valid = tfc_getbits(em_ptr, 127, 1); + em_info->rec_size = tfc_getbits(em_ptr, 125, 2); + em_info->epoch0 = tfc_getbits(em_ptr, 113, 12); + em_info->epoch1 = tfc_getbits(em_ptr, 107, 6); + em_info->opcode = tfc_getbits(em_ptr, 103, 4); + em_info->strength = tfc_getbits(em_ptr, 101, 2); + em_info->act_hint = tfc_getbits(em_ptr, 99, 2); + + if (em_info->opcode != 2 && em_info->opcode != 3) { + /* All but FAST */ + em_info->act_rec_ptr = tfc_getbits(em_ptr, 73, 26); + act_process(m, em_info->act_rec_ptr, em_info, act_mem_cfg); + } else { + /* Just FAST */ + em_info->destination = tfc_getbits(em_ptr, 73, 17); + } + + if (em_info->opcode == 4 || em_info->opcode == 6) { + /* CT only */ + em_info->tcp_direction = tfc_getbits(em_ptr, 72, 1); + em_info->tcp_update_en = tfc_getbits(em_ptr, 71, 1); + em_info->tcp_win = tfc_getbits(em_ptr, 66, 5); + em_info->tcp_msb_loc = tfc_getbits(em_ptr, 48, 18); + em_info->tcp_msb_opp = tfc_getbits(em_ptr, 30, 18); + em_info->tcp_msb_opp_init = tfc_getbits(em_ptr, 29, 1); + em_info->state = tfc_getbits(em_ptr, 24, 5); + em_info->timer_value = tfc_getbits(em_ptr, 20, 4); + } else if (em_info->opcode != 8) { + /* Not CT and nor RECYCLE */ + em_info->ring_table_idx = tfc_getbits(em_ptr, 64, 9); + em_info->act_rec_size = tfc_getbits(em_ptr, 59, 5); + em_info->paths_m1 = tfc_getbits(em_ptr, 55, 4); + em_info->fc_op = tfc_getbits(em_ptr, 54, 1); + em_info->fc_type = tfc_getbits(em_ptr, 52, 2); + em_info->fc_ptr = tfc_getbits(em_ptr, 24, 28); + } else { + em_info->recycle_dest = tfc_getbits(em_ptr, 72, 1); /* Just Recycle */ + em_info->prof_func = tfc_getbits(em_ptr, 64, 8); + em_info->meta_prof = tfc_getbits(em_ptr, 61, 3); + em_info->metadata = tfc_getbits(em_ptr, 29, 32); + } + + em_info->range_profile = tfc_getbits(em_ptr, 16, 4); + em_info->range_index = tfc_getbits(em_ptr, 0, 16); +} + +static void em_show(struct seq_file *m, struct em_info_t *em_info) +{ + int i; + char *line1 = NULL; + char *line2 = NULL; + char *line3 = NULL; + char *line4 = NULL; + char tmp1[TFC_STRING_LENGTH_64]; + char tmp2[TFC_STRING_LENGTH_64]; + char tmp3[TFC_STRING_LENGTH_64]; + char tmp4[TFC_STRING_LENGTH_64]; + + line1 = kmalloc(TFC_STRING_LENGTH_256, GFP_KERNEL); + line2 = kmalloc(TFC_STRING_LENGTH_256, GFP_KERNEL); + line3 = kmalloc(TFC_STRING_LENGTH_256, GFP_KERNEL); + line4 = kmalloc(TFC_STRING_LENGTH_256, GFP_KERNEL); + if (!line1 || !line2 || !line3 || !line4) { + kfree(line1); + kfree(line2); + kfree(line3); + kfree(line4); + seq_printf(m, "%s: Failed to allocate temp buffer\n", + __func__); + return; + } + + seq_printf(m, ":LREC: opcode:%s\n", opcode_string[em_info->opcode]); + + snprintf(line1, TFC_STRING_LENGTH_256, "+-+--+-Epoch-+--+--+--+"); + snprintf(line2, TFC_STRING_LENGTH_256, " V|rs| 0 1 |Op|St|ah|"); + snprintf(line3, TFC_STRING_LENGTH_256, "+-+--+----+--+--+--+--+"); + snprintf(line4, TFC_STRING_LENGTH_256, " %1d %2d %4d %2d %2d %2d %2d ", + em_info->valid, + em_info->rec_size, + em_info->epoch0, + em_info->epoch1, + em_info->opcode, + em_info->strength, + em_info->act_hint); + + if (em_info->opcode != 2 && em_info->opcode != 3) { + /* All but FAST */ + snprintf(tmp1, TFC_STRING_LENGTH_64, "-Act Rec--+"); + snprintf(tmp2, TFC_STRING_LENGTH_64, " Ptr |"); + snprintf(tmp3, TFC_STRING_LENGTH_64, "----------+"); + snprintf(tmp4, TFC_STRING_LENGTH_64, "0x%08x ", + em_info->act_rec_ptr); + } else { + /* Just FAST */ + snprintf(tmp1, TFC_STRING_LENGTH_64, "-------+"); + snprintf(tmp2, TFC_STRING_LENGTH_64, " Dest |"); + snprintf(tmp3, TFC_STRING_LENGTH_64, "-------+"); + snprintf(tmp4, TFC_STRING_LENGTH_64, "0x05%x ", + em_info->destination); + } + + strcat(line1, tmp1); + strcat(line2, tmp2); + strcat(line3, tmp3); + strcat(line4, tmp4); + + if (em_info->opcode == 4 || em_info->opcode == 6) { + /* CT only */ + snprintf(tmp1, TFC_STRING_LENGTH_64, "--+--+-------------TCP-------+--+---+"); + snprintf(tmp2, TFC_STRING_LENGTH_64, "Dr|ue| Win| lc | op |oi|st|tmr|"); + snprintf(tmp3, TFC_STRING_LENGTH_64, "--+--+----+-------+-------+--+--+---+"); + snprintf(tmp4, TFC_STRING_LENGTH_64, "%2d %2d %4d %0x5x %0x5x %2d %2d %3d ", + em_info->tcp_direction, + em_info->tcp_update_en, + em_info->tcp_win, + em_info->tcp_msb_loc, + em_info->tcp_msb_opp, + em_info->tcp_msb_opp_init, + em_info->state, + em_info->timer_value); + } else if (em_info->opcode != 8) { + /* Not CT and nor RECYCLE */ + snprintf(tmp1, TFC_STRING_LENGTH_64, "--+--+--+-------FC-------+"); + snprintf(tmp2, TFC_STRING_LENGTH_64, "RI|as|pm|op|tp| Ptr |"); + snprintf(tmp3, TFC_STRING_LENGTH_64, "--+--+--+--+--+----------+"); + snprintf(tmp4, TFC_STRING_LENGTH_64, "%2d %2d %2d %2d %2d 0x%08x ", + em_info->ring_table_idx, + em_info->act_rec_size, + em_info->paths_m1, + em_info->fc_op, + em_info->fc_type, + em_info->fc_ptr); + } else { + snprintf(tmp1, TFC_STRING_LENGTH_64, "--+--+--+---------+"); + snprintf(tmp2, TFC_STRING_LENGTH_64, "RD|pf|mp| cMData |"); + snprintf(tmp3, TFC_STRING_LENGTH_64, "--+--+--+---------+"); + snprintf(tmp4, TFC_STRING_LENGTH_64, "%2d 0x%2x %2d %08x ", + em_info->recycle_dest, + em_info->prof_func, + em_info->meta_prof, + em_info->metadata); + } + + strcat(line1, tmp1); + strcat(line2, tmp2); + strcat(line3, tmp3); + strcat(line4, tmp4); + + snprintf(tmp1, TFC_STRING_LENGTH_64, "-----Range-+\n"); + snprintf(tmp2, TFC_STRING_LENGTH_64, "Prof| Idx |\n"); + snprintf(tmp3, TFC_STRING_LENGTH_64, "----+------+\n"); + snprintf(tmp4, TFC_STRING_LENGTH_64, "0x%02x 0x%04x\n", + em_info->range_profile, + em_info->range_index); + + strcat(line1, tmp1); + strcat(line2, tmp2); + strcat(line3, tmp3); + strcat(line4, tmp4); + + seq_printf(m, "%s%s%s%s", + line1, + line2, + line3, + line4); + + seq_puts(m, "Key:"); + for (i = 0; i < ((em_info->rec_size + 1) * 32); i++) { + if (i % 32 == 0) + seq_printf(m, "\n%04d: ", i); + seq_printf(m, "%02x", em_info->key[i]); + } + i = ((em_info->rec_size + 1) * 32); + seq_printf(m, "\nKey Reversed:\n%04d: ", i - 32); + do { + i--; + seq_printf(m, "%02x", em_info->key[i]); + if (i != 0 && i % 32 == 0) + seq_printf(m, "\n%04d: ", i - 32); + } while (i > 0); + seq_puts(m, "\n"); + + if (em_info->opcode != 2 && em_info->opcode != 3) + act_show(m, &em_info->act_info, em_info->act_rec_ptr << 5); + + kfree(line1); + kfree(line2); + kfree(line3); + kfree(line4); +} + +struct mod_field_s { + uint8_t num_bits; + const char *name; +}; + +struct mod_data_s { + uint8_t num_fields; + const char *name; + struct mod_field_s field[4]; +}; + +struct mod_data_s mod_data[] = { + {1, "Replace", {{16, "DPort"}}}, + {1, "Replace", {{16, "SPort"}}}, + {1, "Replace", {{32, "IPv4 DIP"}}}, + {1, "Replace", {{32, "IPv4 SIP"}}}, + {1, "Replace", {{128, "IPv6 DIP"}}}, + {1, "Replace", {{128, "IPv6 SIP"}}}, + {1, "Replace", {{48, "SMAC"}}}, + {1, "Replace", {{48, "DMAC"}}}, + {2, "Update Field", {{16, "uf_vec"}, {32, "uf_data"}}}, + {3, "Tunnel Modify", {{16, "tun_mv"}, {16, "tun_ex_prot"}, {16, "tun_new_prot"}}}, + {4, "TTL Update", {{5, "alt_pfid"}, {12, "alt_vid"}, {10, "rsvd"}, {5, "ttl_op"}}}, + {4, "Replace/Add Outer VLAN", {{16, "tpid"}, {3, "pri"}, {1, "de"}, {12, "vid"}}}, + {4, "Replace/Add Inner", {{16, "tpid"}, {3, "pri"}, {1, "de"}, {12, "vid"}}}, + {0, "Remove outer VLAN", {{0, NULL}}}, + {0, "Remove inner VLAN", {{0, NULL}}}, + {4, "Metadata Update", {{2, "md_op"}, {4, "md_prof"}, {10, "rsvd"}, {32, "md_data"}}}, +}; + +static void mod_decode(uint32_t *data, char *mod_str) +{ + int i; + int j; + int k; + uint16_t mod_vector; + int32_t row_offset = 64; + int32_t read_offset; + int32_t row = 0; + uint32_t val[8]; + char str[256]; + int16_t vect; + uint16_t bit = 0x8000; + + row_offset -= 16; + read_offset = row_offset; + mod_vector = tfc_getbits(data, read_offset, 16); + snprintf(mod_str, + TFC_MOD_STRING_LENGTH, + "\nModify Record: Vector:0x%08x\n", mod_vector); + + for (vect = 15; vect >= 0; vect--) { + if (mod_vector & bit) { + snprintf(str, TFC_STRING_LENGTH_256, "%s: ", mod_data[vect].name); + strcat(mod_str, str); + + for (i = 0; i < mod_data[vect].num_fields; i++) { + row_offset -= mod_data[vect].field[i].num_bits; + if (row_offset < 0) { + row++; + row_offset = 64 + row_offset; + } + read_offset = row_offset + (row * 64); + + for (j = 0; j < mod_data[vect].field[i].num_bits / 32; j++) { + val[j] = tfc_getbits(data, read_offset, 32); + read_offset -= 32; + } + + if (mod_data[vect].field[i].num_bits % 32) { + val[j] = tfc_getbits(data, + read_offset, + (mod_data[vect].field[i].num_bits % 32)); + j++; + } + + snprintf(str, + TFC_STRING_LENGTH_256, + "%s:0x", + mod_data[vect].field[i].name); + strcat(mod_str, str); + + switch (mod_data[vect].field[i].num_bits) { + case 128: + for (k = 0; k < 8; k++) { + snprintf(str, + TFC_STRING_LENGTH_256, + "%08x", + val[k]); + strcat(mod_str, str); + } + break; + case 48: + snprintf(str, TFC_STRING_LENGTH_256, "%08x", val[0]); + strcat(mod_str, str); + snprintf(str, + TFC_STRING_LENGTH_256, + "%04x", + (val[1] & 0xffff)); + strcat(mod_str, str); + break; + case 32: + snprintf(str, TFC_STRING_LENGTH_256, "%08x ", val[0]); + strcat(mod_str, str); + break; + case 16: + snprintf(str, TFC_STRING_LENGTH_256, "%04x ", val[0]); + strcat(mod_str, str); + break; + default: + snprintf(str, TFC_STRING_LENGTH_256, "%04x ", + (val[0] & + ((1 << mod_data[vect].field[i].num_bits) - 1))); + strcat(mod_str, str); + break; + } + } + + snprintf(str, TFC_STRING_LENGTH_256, "\n"); + strcat(mod_str, str); + } + + bit = bit >> 1; + } + + snprintf(str, TFC_STRING_LENGTH_256, "\n"); + strcat(mod_str, str); +} + +static void enc_decode(uint32_t *data, char *enc_str) +{ + uint16_t vector; + char str[64]; + uint32_t val[16]; + uint32_t offset = 0; + uint8_t vtag; + uint8_t l2; + uint8_t l3; + uint8_t l4; + uint8_t tunnel; + + vector = tfc_getbits(data, offset, 16); + offset += 16; + + vtag = ((vector >> 2) & 0xf); + l2 = ((vector >> 6) & 0x1); + l3 = ((vector >> 7) & 0x7); + l4 = ((vector >> 10) & 0x7); + tunnel = ((vector >> 13) & 0x7); + + snprintf(enc_str, + TFC_ENC_STRING_LENGTH, + "Encap Record: vector:0x%04x\n", vector); + + snprintf(str, TFC_STRING_LENGTH_64, + "Valid:%d EC:%d VTAG:0x%01x L2:%d L3:0x%01x L4:0x%01x Tunnel:0x%01x\n", + (vector & 0x1), + ((vector >> 1) & 0x1), + vtag, + l2, + l3, + l4, + tunnel); + + strcat(enc_str, str); + + if (l2) { /* L2 */ + snprintf(str, TFC_STRING_LENGTH_64, "L2:\n"); + strcat(enc_str, str); + + val[0] = tfc_getbits(data, offset, 32); + offset += 32; + val[1] = tfc_getbits(data, offset, 16); + offset += 16; + + snprintf(str, TFC_STRING_LENGTH_64, "DMAC:0x%08x%04x\n", val[0], val[1]); + strcat(enc_str, str); + } + + if (l3) { /* L3 */ + snprintf(str, TFC_STRING_LENGTH_64, "L3:\n"); + strcat(enc_str, str); + } + + if (l4) { /* L4 */ + snprintf(str, TFC_STRING_LENGTH_64, "L4:\n"); + strcat(enc_str, str); + } + + if (tunnel) { /* Tunnel */ + snprintf(str, TFC_STRING_LENGTH_64, "Tunnel:\n"); + strcat(enc_str, str); + } +} + +static void act_decode(uint32_t *act_ptr, + uint64_t base, + struct act_info_t *act_info) +{ + act_info->valid = false; + act_info->vector = tfc_getbits(act_ptr, 0, 3); + + if (act_info->vector == 1 || + act_info->vector == 4) + act_info->valid = true; + + switch (act_info->vector) { + case 1: + act_info->full.drop = tfc_getbits(act_ptr, 3, 1); + act_info->full.vlan_del_rep = tfc_getbits(act_ptr, 4, 2); + act_info->full.vnic_vport = tfc_getbits(act_ptr, 6, 11); + act_info->full.dest_op = tfc_getbits(act_ptr, 17, 2); + act_info->full.decap_func = tfc_getbits(act_ptr, 19, 5); + act_info->full.mirror = tfc_getbits(act_ptr, 24, 5); + act_info->full.meter_ptr = tfc_getbits(act_ptr, 29, 10); + act_info->full.stat0_ptr = tfc_getbits(act_ptr, 39, 28); + act_info->full.stat0_ing_egr = tfc_getbits(act_ptr, 67, 1); + act_info->full.stat0_ctr_type = tfc_getbits(act_ptr, 68, 2); + act_info->full.stat1_ptr = tfc_getbits(act_ptr, 70, 28); + act_info->full.stat1_ing_egr = tfc_getbits(act_ptr, 98, 1); + act_info->full.stat1_ctr_type = tfc_getbits(act_ptr, 99, 2); + act_info->full.mod_ptr = tfc_getbits(act_ptr, 101, 28); + act_info->full.enc_ptr = tfc_getbits(act_ptr, 129, 28); + act_info->full.src_ptr = tfc_getbits(act_ptr, 157, 28); + + if (act_info->full.mod_ptr) + mod_decode((uint32_t *)(base + (act_info->full.mod_ptr << 3)), + act_info->full.mod_str); + if (act_info->full.stat0_ptr) + stat_decode(act_info->full.stat0_str, + 0, + act_info->full.stat0_ctr_type, + (uint32_t *)(base + (act_info->full.stat0_ptr << 3))); + if (act_info->full.stat1_ptr) + stat_decode(act_info->full.stat1_str, + 1, + act_info->full.stat1_ctr_type, + (uint32_t *)(base + (act_info->full.stat1_ptr << 3))); + if (act_info->full.enc_ptr) + enc_decode((uint32_t *)(base + (act_info->full.enc_ptr << 3)), + act_info->full.enc_str); + break; + case 4: + act_info->mcg.nxt_ptr = tfc_getbits(act_ptr, 6, 26); + act_info->mcg.act_hint0 = tfc_getbits(act_ptr, 32, 2); + act_info->mcg.act_rec_ptr0 = tfc_getbits(act_ptr, 34, 26); + act_info->mcg.act_hint1 = tfc_getbits(act_ptr, 60, 2); + act_info->mcg.act_rec_ptr1 = tfc_getbits(act_ptr, 62, 26); + act_info->mcg.act_hint2 = tfc_getbits(act_ptr, 88, 2); + act_info->mcg.act_rec_ptr2 = tfc_getbits(act_ptr, 90, 26); + act_info->mcg.act_hint3 = tfc_getbits(act_ptr, 116, 2); + act_info->mcg.act_rec_ptr3 = tfc_getbits(act_ptr, 118, 26); + act_info->mcg.act_hint4 = tfc_getbits(act_ptr, 144, 2); + act_info->mcg.act_rec_ptr4 = tfc_getbits(act_ptr, 146, 26); + act_info->mcg.act_hint5 = tfc_getbits(act_ptr, 172, 2); + act_info->mcg.act_rec_ptr5 = tfc_getbits(act_ptr, 174, 26); + act_info->mcg.act_hint6 = tfc_getbits(act_ptr, 200, 2); + act_info->mcg.act_rec_ptr6 = tfc_getbits(act_ptr, 202, 26); + act_info->mcg.act_hint7 = tfc_getbits(act_ptr, 228, 2); + act_info->mcg.act_rec_ptr7 = tfc_getbits(act_ptr, 230, 26); + break; + } +} + +static void act_show(struct seq_file *m, struct act_info_t *act_info, uint32_t offset) +{ + if (act_info->valid) { + switch (act_info->vector) { + case 1: + seq_puts(m, "Full Action Record\n"); + seq_puts(m, "+----------+--+-+--+--+-----+--+-+------+----Stat0-------+------Stat1-----+----------+----------+----------+\n"); + seq_puts(m, "| Index |V |d|dr|do|vn/p |df|m| mtp |ct|ie| ptr |ct|ie| ptr | mptr | eptr | sptr |\n"); + seq_puts(m, "+----------+--+-+--+--+-----+--+-+------+--+--+----------+--+--+----------+----------+----------+----------+\n"); + + seq_printf(m, " 0x%08x %2d %d %2d %2d 0x%03x %2d %d 0x%04x %2d %2d 0x%08x %2d %2d 0x%08x 0x%08x 0x%08x 0x%08x\n", + offset, + act_info->vector, + act_info->full.drop, + act_info->full.vlan_del_rep, + act_info->full.dest_op, + act_info->full.vnic_vport, + act_info->full.decap_func, + act_info->full.mirror, + act_info->full.meter_ptr, + act_info->full.stat0_ctr_type, + act_info->full.stat0_ing_egr, + act_info->full.stat0_ptr, + act_info->full.stat1_ctr_type, + act_info->full.stat1_ing_egr, + act_info->full.stat1_ptr, + act_info->full.mod_ptr, + act_info->full.enc_ptr, + act_info->full.src_ptr); + if (act_info->full.mod_ptr) + seq_printf(m, "%s", act_info->full.mod_str); + if (act_info->full.stat0_ptr) + seq_printf(m, "%s", act_info->full.stat0_str); + if (act_info->full.stat1_ptr) + seq_printf(m, "%s", act_info->full.stat1_str); + if (act_info->full.enc_ptr) + seq_printf(m, "%s", act_info->full.enc_str); + + break; + case 4: + seq_puts(m, "Multicast Group Record\n"); + seq_puts(m, "+----------+--+----------+----------+--+----------+--+----------+--+----------+--+----------+--+----------+--+----------+--+----------+--+\n"); + seq_puts(m, "| Index |V | NxtPtr | ActRPtr0 |ah| ActRPtr1 |ah| ActRPtr2 |ah| ActRPtr3 |ah| ActRPtr4 |ah| ActRPtr5 |ah| ActRPtr6 |ah| ActRPtr7 |ah|\n"); + seq_puts(m, "+----------+--+----------+----------+--+----------+--+----------+--+----------+--+----------+--+----------+--+----------+--+----------+--+\n"); + + seq_printf(m, " 0x%08x %2d 0x%08x 0x%08x %2d 0x%08x %2d 0x%08x %2d 0x%08x %2d 0x%08x %2d 0x%08x %2d 0x%08x %2d 0x%08x %2d\n", + offset, + act_info->vector, + act_info->mcg.nxt_ptr, + act_info->mcg.act_rec_ptr0, + act_info->mcg.act_hint0, + act_info->mcg.act_rec_ptr1, + act_info->mcg.act_hint1, + act_info->mcg.act_rec_ptr2, + act_info->mcg.act_hint2, + act_info->mcg.act_rec_ptr3, + act_info->mcg.act_hint3, + act_info->mcg.act_rec_ptr4, + act_info->mcg.act_hint4, + act_info->mcg.act_rec_ptr5, + act_info->mcg.act_hint5, + act_info->mcg.act_rec_ptr6, + act_info->mcg.act_hint6, + act_info->mcg.act_rec_ptr7, + act_info->mcg.act_hint7); + break; + } + } +} + +struct stat_fields_s { + uint64_t pkt_cnt; + uint64_t byte_cnt; + union { + struct { + uint32_t timestamp; + uint16_t tcp_flags; + } c_24b; + struct { + uint64_t meter_pkt_cnt; + uint64_t meter_byte_cnt; + } c_32b; + struct { + uint64_t timestamp : 32; + uint64_t tcp_flags : 16; + uint64_t meter_pkt_cnt : 38; + uint64_t meter_byte_cnt : 42; + } c_32b_all; + } t; +}; + +#define STATS_COMMON_FMT \ + "Stats:%d Pkt count:%016lld Byte count:%016lld\n" +#define STATS_METER_FMT \ + "\tMeter pkt count:%016lld Meter byte count:%016lld\n" +#define STATS_TCP_FLAGS_FMT \ + "\tTCP flags:0x%04x timestamp:0x%08x\n" + +enum stat_type { + /** Set to statistic to Foward packet count(64b)/Foward byte + * count(64b) + */ + CFA_BLD_STAT_COUNTER_SIZE_16B = 0, + /** Set to statistic to Forward packet count(64b)/Forward byte + * count(64b)/ TCP Flags(16b)/Timestamp(32b) + */ + CFA_BLD_STAT_COUNTER_SIZE_24B = 1, + /** Set to statistic to Forward packet count(64b)/Forward byte + * count(64b)/Meter(drop or red) packet count(64b)/Meter(drop + * or red) byte count(64b) + */ + CFA_BLD_STAT_COUNTER_SIZE_32B = 2, + /** Set to statistic to Forward packet count(64b)/Forward byte + * count(64b)/Meter(drop or red) packet count(38b)/Meter(drop + * or red) byte count(42b)/TCP Flags(16b)/Timestamp(32b) + */ + CFA_BLD_STAT_COUNTER_SIZE_32B_ALL = 3, +}; + +static void stat_decode(char *str, + uint8_t stat_num, + uint8_t stat_ctr_type, + uint32_t *stat_ptr) +{ + struct stat_fields_s *stats = (struct stat_fields_s *)stat_ptr; + uint64_t meter_pkt_cnt; + uint64_t meter_byte_cnt; + uint32_t timestamp; + char tmp0[96]; + + /* Common fields */ + snprintf(str, + TFC_STAT_STRING_LENGTH, + STATS_COMMON_FMT, + stat_num, stats->pkt_cnt, stats->byte_cnt); + + switch (stat_ctr_type) { + case CFA_BLD_STAT_COUNTER_SIZE_16B: + /* Nothing further to do */ + break; + case CFA_BLD_STAT_COUNTER_SIZE_24B: + timestamp = stats->t.c_24b.timestamp; + snprintf(tmp0, + TFC_STRING_LENGTH_96, + STATS_TCP_FLAGS_FMT, + stats->t.c_24b.tcp_flags, + timestamp); + strcat(str, tmp0); + break; + case CFA_BLD_STAT_COUNTER_SIZE_32B: + snprintf(tmp0, + TFC_STRING_LENGTH_96, + STATS_METER_FMT, + stats->t.c_32b.meter_pkt_cnt, + stats->t.c_32b.meter_byte_cnt); + strcat(str, tmp0); + break; + case CFA_BLD_STAT_COUNTER_SIZE_32B_ALL: + meter_pkt_cnt = stats->t.c_32b_all.meter_pkt_cnt; + meter_byte_cnt = stats->t.c_32b_all.meter_byte_cnt; + timestamp = stats->t.c_32b_all.timestamp; + snprintf(tmp0, + TFC_STRING_LENGTH_96, + STATS_METER_FMT STATS_TCP_FLAGS_FMT, + meter_pkt_cnt, + meter_byte_cnt, + stats->t.c_32b_all.tcp_flags, + timestamp); + strcat(str, tmp0); + break; + default: + /* Should never happen since type is 2 bits in size */ + snprintf(tmp0, + TFC_STRING_LENGTH_96, + "Unknown counter type %d\n", stat_ctr_type); + strcat(str, tmp0); + break; + } +} + +static void bucket_decode(struct seq_file *m, + uint32_t *bucket_ptr, + struct bucket_info_t *bucket_info, + struct tfc_ts_mem_cfg *lkup_mem_cfg, + struct tfc_ts_mem_cfg *act_mem_cfg) +{ + int i; + int offset = 0; + uint8_t *em_ptr; + + bucket_info->valid = false; + bucket_info->chain = tfc_getbits(bucket_ptr, 254, 1); + bucket_info->chain_ptr = tfc_getbits(bucket_ptr, 228, 26); + + if (bucket_info->chain || + bucket_info->chain_ptr) + bucket_info->valid = true; + + for (i = 0; i < TFC_BUCKET_ENTRIES; i++) { + bucket_info->entries[i].entry_ptr = tfc_getbits(bucket_ptr, offset, 26); + offset += 26; + bucket_info->entries[i].hash_msb = tfc_getbits(bucket_ptr, offset, 12); + offset += 12; + + if (bucket_info->entries[i].hash_msb || + bucket_info->entries[i].entry_ptr) { + bucket_info->valid = true; + + em_ptr = (uint8_t *)get_address(lkup_mem_cfg, + bucket_info->entries[i].entry_ptr * 32); + em_decode(m, (uint32_t *)em_ptr, &bucket_info->em_info[i], act_mem_cfg); + } + } +} + +static void bucket_show(struct seq_file *m, struct bucket_info_t *bucket_info, uint32_t offset) +{ + int i; + + if (bucket_info->valid) { + seq_printf(m, "Static Bucket:0x%08x\n", offset); + seq_puts(m, "+-+ +---------+ +----------------------------------- Entries --------------------------------------------------------------+\n"); + seq_puts(m, " C CPtr 0 1 2 3 4 5\n"); + seq_puts(m, "+-+ +---------+ +-----+---------+ +-----+---------+ +-----+---------+ +-----+---------+ +-----+---------+ +------+---------+\n"); + seq_printf(m, " %d 0x%07x", + bucket_info->chain, + bucket_info->chain_ptr); + for (i = 0; i < TFC_BUCKET_ENTRIES; i++) { + seq_printf(m, " 0x%03x 0x%07x", + bucket_info->entries[i].hash_msb, + bucket_info->entries[i].entry_ptr); + } + seq_puts(m, "\n"); + + /* + * Now display each valid EM entry from the bucket + */ + for (i = 0; i < TFC_BUCKET_ENTRIES; i++) { + if (bucket_info->entries[i].entry_ptr != 0) { + if (bucket_info->em_info[i].valid) + em_show(m, &bucket_info->em_info[i]); + else + seq_puts(m, "<<< Invalid LREC >>>\n"); + } + } + + seq_puts(m, "\n"); + } +} + +int tfc_em_show(struct seq_file *m, struct tfc *tfcp, uint8_t tsid, enum cfa_dir dir) +{ + int rc = 0; + bool is_shared; + bool is_bs_owner; + struct tfc_ts_mem_cfg *lkup_mem_cfg; + struct tfc_ts_mem_cfg *act_mem_cfg; + uint32_t bucket_row; + uint32_t bucket_count; + uint8_t *bucket_ptr; + struct bucket_info_t *bucket_info; + uint32_t bucket_offset = 0; + bool valid; + + rc = tfo_ts_get(tfcp->tfo, tsid, &is_shared, NULL, &valid, NULL); + if (rc != 0) { + seq_printf(m, "%s: failed to get tsid: %d\n", + __func__, rc); + return -EINVAL; + } + if (!valid) { + seq_printf(m, "%s: tsid not allocated %d\n", + __func__, tsid); + return -EINVAL; + } + + lkup_mem_cfg = kzalloc(sizeof(*lkup_mem_cfg), GFP_KERNEL); + if (!lkup_mem_cfg) + return -ENOMEM; + + rc = tfo_ts_get_mem_cfg(tfcp->tfo, tsid, + dir, + CFA_REGION_TYPE_LKUP, + &is_bs_owner, + lkup_mem_cfg); /* Gets rec_cnt */ + if (rc != 0) { + seq_printf(m, "%s: tfo_ts_get_mem_cfg() failed for LKUP: %d\n", + __func__, rc); + kfree(lkup_mem_cfg); + return -EINVAL; + } + + act_mem_cfg = kzalloc(sizeof(*act_mem_cfg), GFP_KERNEL); + if (!act_mem_cfg) { + kfree(lkup_mem_cfg); + return -ENOMEM; + } + + rc = tfo_ts_get_mem_cfg(tfcp->tfo, tsid, + dir, + CFA_REGION_TYPE_ACT, + &is_bs_owner, + act_mem_cfg); /* Gets rec_cnt */ + if (rc != 0) { + seq_printf(m, "%s: tfo_ts_get_mem_cfg() failed for ACT: %d\n", + __func__, rc); + kfree(lkup_mem_cfg); + kfree(act_mem_cfg); + return -EINVAL; + } + + bucket_count = lkup_mem_cfg->lkup_rec_start_offset; + + seq_puts(m, " Lookup Table\n"); + seq_printf(m, " Static bucket count:%d\n", bucket_count); + + bucket_info = kzalloc(sizeof(*bucket_info), GFP_KERNEL); + if (!bucket_info) { + seq_printf(m, "%s: Failed to allocate bucket info struct\n", + __func__); + kfree(lkup_mem_cfg); + kfree(act_mem_cfg); + return -ENOMEM; + } + + /* + * Go through the static buckets looking for valid entries. + * If a valid entry is found then display it and also display + * the EM entries it points to. + */ + for (bucket_row = 0; bucket_row < bucket_count; ) { + bucket_ptr = (uint8_t *)get_address(lkup_mem_cfg, bucket_offset); + bucket_decode(m, (uint32_t *)bucket_ptr, bucket_info, lkup_mem_cfg, act_mem_cfg); + + if (bucket_info->valid) + bucket_show(m, bucket_info, bucket_offset); + + bucket_offset += TFC_BUCKET_SIZE_BYTES; + bucket_row++; + } + + kfree(bucket_info); + kfree(lkup_mem_cfg); + kfree(act_mem_cfg); + return rc; +} diff --git a/drivers/thirdparty/release-drivers/bnxt/tfc_v3/tfc_msg.c b/drivers/thirdparty/release-drivers/bnxt/tfc_v3/tfc_msg.c new file mode 100644 index 000000000000..0c736b31d522 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/tfc_v3/tfc_msg.c @@ -0,0 +1,1416 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* Copyright(c) 2023 Broadcom + * All rights reserved. + */ + +#include +#include +#include +#include "tfc_msg.h" +#include "bnxt_hsi.h" +#include "bnxt_compat.h" +#include "bnxt.h" +#include "bnxt_hwrm.h" +#include "tfo.h" + +/* Logging defines */ +#define TFC_RM_MSG_DEBUG 0 + +#define CFA_INVALID_FID 0xFFFF + +/* This is the MAX data we can transport across regular HWRM */ +#define TFC_PCI_BUF_SIZE_MAX 80 + +struct tfc_msg_dma_buf { + void *va_addr; + dma_addr_t pa_addr; +}; + +static int tfc_msg_set_fid(struct bnxt *bp, u16 req_fid, u16 *msg_fid) +{ + /* Set request FID to 0xffff in case the request FID is the same as the + * target FID (bp->fw_fid). If we're on a TVF or if this is a PF, then + * set the FID to the requested FID. + * + * The firmware validates the FID and accepts/rejects the request based + * on these rules: + * + * 1. (request_fid == 0xffff), final_fid = target_fid, accept + * 2. IS_PF(request_fid): + * reject, Only (1) above is allowed + * 3. IS_PF(target_fid) && IS_VF(request_fid): + * if(target_fid == parent_of(request_fid)) accept, else reject + * 4. IS_VF(target_fid) && IS_VF(request_fid): + * if(parent_of(target_fid) == parent_of(request_fid)) accept, else reject + * + * Note: for cases 2..4, final_fid = request_fid + */ + if (bp->pf.fw_fid == req_fid) + *msg_fid = CFA_INVALID_FID; + else if (BNXT_VF_IS_TRUSTED(bp) || BNXT_PF(bp)) + *msg_fid = cpu_to_le16(req_fid); + else + return -EINVAL; + return 0; +} + +/* If data bigger than TFC_PCI_BUF_SIZE_MAX then use DMA method */ +int +tfc_msg_tbl_scope_qcaps(struct tfc *tfcp, bool *tbl_scope_capable, u32 *max_lkup_rec_cnt, + u32 *max_act_rec_cnt, u8 *max_lkup_static_buckets_exp) +{ + struct hwrm_tfc_tbl_scope_qcaps_output *resp; + struct hwrm_tfc_tbl_scope_qcaps_input *req; + struct bnxt *bp = tfcp->bp; + int rc; + + if (!tbl_scope_capable) { + netdev_dbg(bp->dev, "%s: Invalid tbl_scope_capable pointer\n", __func__); + return -EINVAL; + } + + *tbl_scope_capable = false; + + rc = hwrm_req_init(bp, req, HWRM_TFC_TBL_SCOPE_QCAPS); + if (rc) + return rc; + + resp = hwrm_req_hold(bp, req); + + /* send the request */ + rc = hwrm_req_send(bp, req); + if (rc) + goto cleanup; + + if (resp->tbl_scope_capable) { + *tbl_scope_capable = true; + if (max_lkup_rec_cnt) + *max_lkup_rec_cnt = + le32_to_cpu(resp->max_lkup_rec_cnt); + if (max_act_rec_cnt) + *max_act_rec_cnt = + le32_to_cpu(resp->max_act_rec_cnt); + if (max_lkup_static_buckets_exp) + *max_lkup_static_buckets_exp = + resp->max_lkup_static_buckets_exp; + } + +cleanup: + hwrm_req_drop(bp, req); + + if (!rc) + netdev_dbg(bp->dev, "%s: Success\n", __func__); + else + netdev_dbg(bp->dev, "%s: Failed: %d\n", __func__, rc); + + return rc; +} + +int +tfc_msg_tbl_scope_id_alloc(struct tfc *tfcp, u16 fid, bool shared, enum cfa_app_type app_type, + u8 *tsid, bool *first) +{ + struct hwrm_tfc_tbl_scope_id_alloc_output *resp; + struct hwrm_tfc_tbl_scope_id_alloc_input *req; + struct bnxt *bp = tfcp->bp; + int rc; + + if (!tsid) { + netdev_dbg(bp->dev, "%s: Invalid tsid pointer\n", __func__); + return -EINVAL; + } + + rc = hwrm_req_init(bp, req, HWRM_TFC_TBL_SCOPE_ID_ALLOC); + if (rc) + return rc; + + resp = hwrm_req_hold(bp, req); + + req->app_type = app_type; + req->shared = shared; + + rc = tfc_msg_set_fid(bp, fid, &req->fid); + if (rc) + goto cleanup; + + /* send the request */ + rc = hwrm_req_send(bp, req); + if (rc) + goto cleanup; + + *tsid = resp->tsid; + if (first) { + if (resp->first) + *first = true; + else + *first = false; + } + +cleanup: + hwrm_req_drop(bp, req); + + if (!rc) + netdev_dbg(bp->dev, "%s: tsid %d first %d Success\n", __func__, + *tsid, *first); + else + netdev_dbg(bp->dev, "%s: Failed: %d\n", __func__, rc); + + return rc; +} + +#define RE_LKUP 0 +#define RE_ACT 1 +#define TE_LKUP 2 +#define TE_ACT 3 + +/* Given the direction and the region return the backing store cfg instance */ +static int tfc_tbl_scope_region_dir_to_inst(struct bnxt *bp, + enum cfa_region_type region, + enum cfa_dir dir, u16 *instance) +{ + if (!instance) { + netdev_dbg(bp->dev, "%s: Invalid tfcp pointer\n", __func__); + return -EINVAL; + } + switch (region) { + case CFA_REGION_TYPE_LKUP: + if (dir == CFA_DIR_RX) + *instance = RE_LKUP; + else + *instance = TE_LKUP; + break; + case CFA_REGION_TYPE_ACT: + if (dir == CFA_DIR_RX) + *instance = RE_ACT; + else + *instance = TE_ACT; + break; + default: + netdev_dbg(bp->dev, "%s: Invalid region\n", __func__); + return -EINVAL; + } + return 0; +} + +/* Given the page_sz_bytes and pbl_level, encode the pg_sz_pbl_level */ +static int tfc_tbl_scope_pg_sz_pbl_level_encode(struct bnxt *bp, + u32 page_sz_in_bytes, + u8 pbl_level, + u8 *page_sz_pbl_level) +{ + u8 page_sz; + + switch (page_sz_in_bytes) { + case 0x1000: /* 4K */ + page_sz = FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_PG_4K; + break; + case 0x2000: /* 8K */ + page_sz = FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_PG_8K; + break; + case 0x10000: /* 64K */ + page_sz = FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_PG_64K; + break; + case 0x200000: /* 2M */ + page_sz = FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_PG_2M; + break; + case 0x40000000: /* 1G */ + page_sz = FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_PG_1G; + break; + default: + netdev_dbg(bp->dev, "%s: Unsupported page size (0x%x)\n", __func__, + page_sz_in_bytes); + return -EINVAL; + } + /* Page size value is already shifted */ + *page_sz_pbl_level = page_sz; + if (pbl_level > 2) { + netdev_dbg(bp->dev, "%s: Invalid pbl_level(%d)\n", __func__, pbl_level); + return -EINVAL; + } + *page_sz_pbl_level |= pbl_level; + return 0; +} + +int +tfc_msg_backing_store_cfg_v2(struct tfc *tfcp, u8 tsid, enum cfa_dir dir, + enum cfa_region_type region, u64 base_addr, + u8 pbl_level, u32 pbl_page_sz_in_bytes, + u32 rec_cnt, u8 static_bkt_cnt_exp, + bool cfg_done) +{ + struct hwrm_func_backing_store_cfg_v2_input *req; + struct ts_split_entries *ts_sp; + struct bnxt *bp = tfcp->bp; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_FUNC_BACKING_STORE_CFG_V2); + if (rc) + return rc; + + ts_sp = (struct ts_split_entries *)&req->split_entry_0; + ts_sp->tsid = tsid; + ts_sp->lkup_static_bkt_cnt_exp[dir] = static_bkt_cnt_exp; + ts_sp->region_num_entries = rec_cnt; + if (cfg_done) + req->flags |= FUNC_BACKING_STORE_CFG_V2_REQ_FLAGS_BS_CFG_ALL_DONE; + + rc = tfc_tbl_scope_region_dir_to_inst(bp, region, dir, &req->instance); + if (rc) + return rc; + + req->page_dir = cpu_to_le64(base_addr); + req->num_entries = cpu_to_le32(rec_cnt); + + req->type = FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_TBL_SCOPE; + + rc = tfc_tbl_scope_pg_sz_pbl_level_encode(bp, pbl_page_sz_in_bytes, + pbl_level, &req->page_size_pbl_level); + if (rc) + return rc; + + /* send the request */ + rc = hwrm_req_send(bp, req); + + if (rc) + netdev_dbg(bp->dev, "%s: Failed: %d\n", __func__, rc); + + return rc; +} + +int +tfc_msg_tbl_scope_deconfig(struct tfc *tfcp, u8 tsid) +{ + struct hwrm_tfc_tbl_scope_deconfig_input *req; + struct bnxt *bp = tfcp->bp; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_TFC_TBL_SCOPE_DECONFIG); + if (rc) + return rc; + + req->tsid = tsid; + rc = hwrm_req_send(bp, req); + + if (rc) + netdev_dbg(bp->dev, "%s: Failed: %d\n", __func__, rc); + + return rc; +} + +int +tfc_msg_tbl_scope_fid_add(struct tfc *tfcp, u16 fid, u8 tsid, u16 *fid_cnt) +{ + struct hwrm_tfc_tbl_scope_fid_add_output *resp; + struct hwrm_tfc_tbl_scope_fid_add_input *req; + struct bnxt *bp = tfcp->bp; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_TFC_TBL_SCOPE_FID_ADD); + if (rc) + return rc; + + resp = hwrm_req_hold(bp, req); + + rc = tfc_msg_set_fid(bp, fid, &req->fid); + if (rc) + goto cleanup; + + req->tsid = tsid; + /* send the request */ + rc = hwrm_req_send(bp, req); + if (rc) + goto cleanup; + + if (fid_cnt) + *fid_cnt = le16_to_cpu(resp->fid_cnt); + +cleanup: + hwrm_req_drop(bp, req); + + if (!rc) + netdev_dbg(bp->dev, "%s: Success\n", __func__); + else + netdev_dbg(bp->dev, "%s: Failed: %d\n", __func__, rc); + + return rc; +} + +int +tfc_msg_tbl_scope_fid_rem(struct tfc *tfcp, u16 fid, u8 tsid, u16 *fid_cnt) +{ + struct hwrm_tfc_tbl_scope_fid_rem_output *resp; + struct hwrm_tfc_tbl_scope_fid_rem_input *req; + struct bnxt *bp = tfcp->bp; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_TFC_TBL_SCOPE_FID_REM); + if (rc) + return rc; + + resp = hwrm_req_hold(bp, req); + + rc = tfc_msg_set_fid(bp, fid, &req->fid); + if (rc) + goto cleanup; + + req->tsid = tsid; + /* send the request */ + rc = hwrm_req_send(bp, req); + if (rc) + goto cleanup; + + if (fid_cnt) + *fid_cnt = le16_to_cpu(resp->fid_cnt); + +cleanup: + hwrm_req_drop(bp, req); + + if (!rc) + netdev_dbg(bp->dev, "%s: Success\n", __func__); + else + netdev_dbg(bp->dev, "%s: Failed: %d\n", __func__, rc); + + return rc; +} + +int +tfc_msg_idx_tbl_alloc(struct tfc *tfcp, u16 fid, u16 sid, enum cfa_track_type tt, enum cfa_dir dir, + enum cfa_resource_subtype_idx_tbl subtype, u16 *id) + +{ + struct hwrm_tfc_idx_tbl_alloc_output *resp; + struct hwrm_tfc_idx_tbl_alloc_input *req; + struct bnxt *bp = tfcp->bp; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_TFC_IDX_TBL_ALLOC); + if (rc) + return rc; + + resp = hwrm_req_hold(bp, req); + + if (dir == CFA_DIR_RX) + req->flags |= TFC_IDX_TBL_ALLOC_REQ_FLAGS_DIR_RX & + TFC_IDX_TBL_ALLOC_REQ_FLAGS_DIR; + else + req->flags |= TFC_IDX_TBL_ALLOC_REQ_FLAGS_DIR_TX & + TFC_IDX_TBL_ALLOC_REQ_FLAGS_DIR; + + if (tt == CFA_TRACK_TYPE_FID) + req->track_type = TFC_IDX_TBL_ALLOC_REQ_TRACK_TYPE_TRACK_TYPE_FID; + else + req->track_type = TFC_IDX_TBL_ALLOC_REQ_TRACK_TYPE_TRACK_TYPE_SID; + + rc = tfc_msg_set_fid(bp, fid, &req->fid); + if (rc) + goto cleanup; + + req->sid = cpu_to_le16(sid); + req->subtype = cpu_to_le16(subtype); + + /* send the request */ + rc = hwrm_req_send(bp, req); + if (rc) + goto cleanup; + + *id = le16_to_cpu(resp->idx_tbl_id); + +cleanup: + hwrm_req_drop(bp, req); + + if (!rc) + netdev_dbg(bp->dev, "%s: idx_tbl_id %d Success\n", __func__, *id); + else + netdev_dbg(bp->dev, "%s: Failed: %d\n", __func__, rc); + + return rc; +} + +int +tfc_msg_idx_tbl_alloc_set(struct tfc *tfcp, u16 fid, u16 sid, enum cfa_track_type tt, + enum cfa_dir dir, enum cfa_resource_subtype_idx_tbl subtype, + const u32 *dev_data, u8 data_size, u16 *id) + +{ + struct hwrm_tfc_idx_tbl_alloc_set_output *resp; + struct hwrm_tfc_idx_tbl_alloc_set_input *req; + struct tfc_msg_dma_buf buf = { 0 }; + struct bnxt *bp = tfcp->bp; + u8 *data = NULL; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_TFC_IDX_TBL_ALLOC_SET); + if (rc) + return rc; + + resp = hwrm_req_hold(bp, req); + + if (dir == CFA_DIR_RX) + req->flags |= TFC_IDX_TBL_ALLOC_SET_REQ_FLAGS_DIR_RX & + TFC_IDX_TBL_ALLOC_SET_REQ_FLAGS_DIR; + else + req->flags |= TFC_IDX_TBL_ALLOC_SET_REQ_FLAGS_DIR_TX & + TFC_IDX_TBL_ALLOC_SET_REQ_FLAGS_DIR; + + if (tt == CFA_TRACK_TYPE_FID) + req->track_type = TFC_IDX_TBL_ALLOC_SET_REQ_TRACK_TYPE_TRACK_TYPE_FID; + else + req->track_type = TFC_IDX_TBL_ALLOC_SET_REQ_TRACK_TYPE_TRACK_TYPE_SID; + + rc = tfc_msg_set_fid(bp, fid, &req->fid); + if (rc) + goto cleanup; + + req->sid = cpu_to_le16(sid); + req->subtype = cpu_to_le16(subtype); + req->data_size = cpu_to_le16(data_size); + + if (req->data_size >= sizeof(req->dev_data)) { + /* Prepare DMA buffer */ + req->flags |= TFC_IDX_TBL_SET_REQ_FLAGS_DMA; + hwrm_req_alloc_flags(bp, req, GFP_KERNEL | __GFP_ZERO); + buf.va_addr = dma_alloc_coherent(&bp->pdev->dev, req->data_size, + &buf.pa_addr, GFP_KERNEL); + + if (!buf.va_addr) { + rc = -ENOMEM; + goto cleanup; + } + + data = buf.va_addr; + req->dma_addr = cpu_to_le64(buf.pa_addr); + } else { + data = &req->dev_data[0]; + } + + memcpy(&data[0], &dev_data[0], req->data_size); + + rc = hwrm_req_send(bp, req); + if (rc) + goto cleanup; + + *id = le16_to_cpu(resp->idx_tbl_id); + +cleanup: + if (buf.va_addr) + dma_free_coherent(&bp->pdev->dev, req->data_size, buf.va_addr, buf.pa_addr); + + hwrm_req_drop(bp, req); + + if (!rc) + netdev_dbg(bp->dev, "%s: idx_tbl_id %d Success\n", __func__, *id); + else + netdev_dbg(bp->dev, "%s: Failed: %d\n", __func__, rc); + + return rc; +} + +int +tfc_msg_idx_tbl_set(struct tfc *tfcp, u16 fid, u16 sid, enum cfa_dir dir, + enum cfa_resource_subtype_idx_tbl subtype, u16 id, + const u32 *dev_data, u8 data_size) +{ + struct hwrm_tfc_idx_tbl_set_input *req; + struct tfc_msg_dma_buf buf = { 0 }; + struct bnxt *bp = tfcp->bp; + int dma_size = 0, rc; + u8 *data = NULL; + + rc = hwrm_req_init(bp, req, HWRM_TFC_IDX_TBL_SET); + if (rc) + return rc; + + if (dir == CFA_DIR_RX) + req->flags |= TFC_IDX_TBL_SET_REQ_FLAGS_DIR_RX & + TFC_IDX_TBL_SET_REQ_FLAGS_DIR; + else + req->flags |= TFC_IDX_TBL_SET_REQ_FLAGS_DIR_TX & + TFC_IDX_TBL_SET_REQ_FLAGS_DIR; + + rc = tfc_msg_set_fid(bp, fid, &req->fid); + if (rc) + goto cleanup; + + req->sid = cpu_to_le16(sid); + req->idx_tbl_id = cpu_to_le16(id); + req->subtype = cpu_to_le16(subtype); + req->data_size = cpu_to_le16(data_size); + + if (req->data_size >= sizeof(req->dev_data)) { + /* Prepare DMA buffer */ + req->flags |= TFC_IDX_TBL_SET_REQ_FLAGS_DMA; + hwrm_req_alloc_flags(bp, req, GFP_KERNEL | __GFP_ZERO); + buf.va_addr = dma_alloc_coherent(&bp->pdev->dev, req->data_size, + &buf.pa_addr, GFP_KERNEL); + + if (!buf.va_addr) { + rc = -ENOMEM; + goto cleanup; + } + + data = buf.va_addr; + req->dma_addr = cpu_to_le64(buf.pa_addr); + } else { + data = &req->dev_data[0]; + } + + memcpy(&data[0], &dev_data[0], req->data_size); + rc = hwrm_req_send(bp, req); + +cleanup: + if (buf.va_addr) + dma_free_coherent(&bp->pdev->dev, dma_size, buf.va_addr, buf.pa_addr); + + if (!rc) + netdev_dbg(bp->dev, "%s: Success\n", __func__); + else + netdev_dbg(bp->dev, "%s: Failed: %d\n", __func__, rc); + + return rc; +} + +int +tfc_msg_idx_tbl_get(struct tfc *tfcp, u16 fid, u16 sid, enum cfa_dir dir, + enum cfa_resource_subtype_idx_tbl subtype, u16 id, + u32 *dev_data, u8 *data_size) +{ + struct hwrm_tfc_idx_tbl_get_output *resp; + struct hwrm_tfc_idx_tbl_get_input *req; + struct tfc_msg_dma_buf buf = { 0 }; + struct bnxt *bp = tfcp->bp; + int dma_size, rc; + + rc = hwrm_req_init(bp, req, HWRM_TFC_IDX_TBL_GET); + if (rc) + return rc; + + resp = hwrm_req_hold(bp, req); + + /* Prepare DMA buffer */ + hwrm_req_alloc_flags(bp, req, GFP_KERNEL | __GFP_ZERO); + dma_size = sizeof(*data_size); + buf.va_addr = dma_alloc_coherent(&bp->pdev->dev, dma_size, + &buf.pa_addr, GFP_KERNEL); + + if (!buf.va_addr) { + rc = -ENOMEM; + goto cleanup; + } + + if (dir == CFA_DIR_RX) + req->flags |= TFC_IDX_TBL_GET_REQ_FLAGS_DIR_RX & + TFC_IDX_TBL_GET_REQ_FLAGS_DIR; + else + req->flags |= TFC_IDX_TBL_GET_REQ_FLAGS_DIR_TX & + TFC_IDX_TBL_GET_REQ_FLAGS_DIR; + + rc = tfc_msg_set_fid(bp, fid, &req->fid); + if (rc) + goto cleanup; + + req->sid = cpu_to_le16(sid); + req->idx_tbl_id = cpu_to_le16(id); + req->subtype = cpu_to_le16(subtype); + req->buffer_size = cpu_to_le16(*data_size); + req->dma_addr = cpu_to_le64(buf.pa_addr); + + rc = hwrm_req_send(bp, req); + if (rc) + goto cleanup; + + memcpy(dev_data, buf.va_addr, resp->data_size); + *data_size = le16_to_cpu(resp->data_size); + +cleanup: + if (buf.va_addr) + dma_free_coherent(&bp->pdev->dev, dma_size, + buf.va_addr, buf.pa_addr); + + hwrm_req_drop(bp, req); + + if (!rc) + netdev_dbg(bp->dev, "%s: data_size %d Success\n", __func__, *data_size); + else + netdev_dbg(bp->dev, "%s: Failed: %d\n", __func__, rc); + + return rc; +} + +int +tfc_msg_idx_tbl_free(struct tfc *tfcp, u16 fid, u16 sid, enum cfa_dir dir, + enum cfa_resource_subtype_idx_tbl subtype, u16 id) +{ + struct hwrm_tfc_idx_tbl_free_input *req; + struct bnxt *bp = tfcp->bp; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_TFC_IDX_TBL_FREE); + if (rc) + return rc; + + if (dir == CFA_DIR_RX) + req->flags |= TFC_IDX_TBL_FREE_REQ_FLAGS_DIR_RX & + TFC_IDX_TBL_FREE_REQ_FLAGS_DIR; + else + req->flags |= TFC_IDX_TBL_FREE_REQ_FLAGS_DIR_TX & + TFC_IDX_TBL_FREE_REQ_FLAGS_DIR; + + rc = tfc_msg_set_fid(bp, fid, &req->fid); + if (rc) + return rc; + + req->sid = cpu_to_le16(sid); + req->idx_tbl_id = cpu_to_le16(id); + req->subtype = cpu_to_le16(subtype); + + rc = hwrm_req_send(bp, req); + + if (rc) + netdev_dbg(bp->dev, "%s: Failed: %d\n", __func__, rc); + + return rc; +} + +int tfc_msg_global_id_alloc(struct tfc *tfcp, u16 fid, u16 sid, + enum tfc_domain_id domain_id, u16 req_cnt, + const struct tfc_global_id_req *glb_id_req, + struct tfc_global_id *rsp, u16 *rsp_cnt, + bool *first) +{ + struct hwrm_tfc_global_id_alloc_output *resp; + struct hwrm_tfc_global_id_alloc_input *req; + struct tfc_global_id_hwrm_req *req_data; + struct tfc_global_id_hwrm_rsp *rsp_data; + struct tfc_msg_dma_buf req_buf = { 0 }; + struct tfc_msg_dma_buf rsp_buf = { 0 }; + int i = 0, rc, resp_cnt = 0; + struct bnxt *bp = tfcp->bp; + int dma_size_req = 0; + int dma_size_rsp = 0; + + rc = hwrm_req_init(bp, req, HWRM_TFC_GLOBAL_ID_ALLOC); + if (rc) + return rc; + + resp = hwrm_req_hold(bp, req); + + /* Prepare DMA buffers */ + dma_size_req = req_cnt * sizeof(struct tfc_global_id_req); + hwrm_req_alloc_flags(bp, req, GFP_KERNEL | __GFP_ZERO); + req_buf.va_addr = dma_alloc_coherent(&bp->pdev->dev, dma_size_req, + &req_buf.pa_addr, GFP_KERNEL); + + if (!req_buf.va_addr) { + rc = -ENOMEM; + goto cleanup; + } + + for (i = 0; i < req_cnt; i++) + resp_cnt += glb_id_req->cnt; + + dma_size_rsp = resp_cnt * sizeof(struct tfc_global_id); + rsp_buf.va_addr = dma_alloc_coherent(&bp->pdev->dev, dma_size_rsp, + &rsp_buf.pa_addr, GFP_KERNEL); + + if (!rsp_buf.va_addr) { + rc = -ENOMEM; + goto cleanup; + } + + /* Populate the request */ + rc = tfc_msg_set_fid(bp, fid, &req->fid); + if (rc) + goto cleanup; + + req->sid = cpu_to_le16(sid); + req->global_id = cpu_to_le16(domain_id); + req->req_cnt = req_cnt; + req->req_addr = cpu_to_le64(req_buf.pa_addr); + req->resc_addr = cpu_to_le64(rsp_buf.pa_addr); + req_data = (struct tfc_global_id_hwrm_req *)req_buf.va_addr; + for (i = 0; i < req_cnt; i++) { + req_data[i].rtype = cpu_to_le16(glb_id_req[i].rtype); + req_data[i].dir = cpu_to_le16(glb_id_req[i].dir); + req_data[i].subtype = cpu_to_le16(glb_id_req[i].rsubtype); + req_data[i].cnt = cpu_to_le16(glb_id_req[i].cnt); + } + + rc = hwrm_req_send(bp, req); + if (rc) + goto cleanup; + + if (first) { + if (resp->first) + *first = true; + else + *first = false; + } + + /* Process the response + * Should always get expected number of entries + */ + if (le32_to_cpu(resp->rsp_cnt) != *rsp_cnt) { + rc = -EINVAL; + netdev_dbg(bp->dev, "Alloc message size error, rc:%d\n", rc); + goto cleanup; + } + + rsp_data = (struct tfc_global_id_hwrm_rsp *)rsp_buf.va_addr; + for (i = 0; i < resp->rsp_cnt; i++) { + rsp[i].rtype = le32_to_cpu(rsp_data[i].rtype); + rsp[i].dir = le32_to_cpu(rsp_data[i].dir); + rsp[i].rsubtype = le32_to_cpu(rsp_data[i].subtype); + rsp[i].id = le32_to_cpu(rsp_data[i].id); + } + +cleanup: + if (req_buf.va_addr) + dma_free_coherent(&bp->pdev->dev, dma_size_req, + req_buf.va_addr, req_buf.pa_addr); + + if (rsp_buf.va_addr) + dma_free_coherent(&bp->pdev->dev, dma_size_rsp, + rsp_buf.va_addr, rsp_buf.pa_addr); + + hwrm_req_drop(bp, req); + + if (!rc) + netdev_dbg(bp->dev, "%s: first %d Success\n", __func__, *first); + else + netdev_dbg(bp->dev, "%s: Failed: %d\n", __func__, rc); + + return rc; +} + +int +tfc_msg_tbl_scope_config_get(struct tfc *tfcp, u8 tsid, bool *configured) +{ + struct hwrm_tfc_tbl_scope_config_get_output *resp; + struct hwrm_tfc_tbl_scope_config_get_input *req; + struct bnxt *bp = tfcp->bp; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_TFC_TBL_SCOPE_CONFIG_GET); + if (rc) + return rc; + + resp = hwrm_req_hold(bp, req); + + req->tsid = tsid; + /* send the request */ + rc = hwrm_req_send(bp, req); + if (rc) + goto cleanup; + + *configured = le16_to_cpu(resp->configured) ? true : false; + +cleanup: + hwrm_req_drop(bp, req); + + if (!rc) + netdev_dbg(bp->dev, "%s: configured %d Success\n", __func__, *configured); + else + netdev_dbg(bp->dev, "%s: Failed: %d\n", __func__, rc); + + return rc; +} + +int +tfc_msg_session_id_alloc(struct tfc *tfcp, u16 fid, u16 *sid) +{ + struct hwrm_tfc_session_id_alloc_output *resp; + struct hwrm_tfc_session_id_alloc_input *req; + struct bnxt *bp = tfcp->bp; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_TFC_SESSION_ID_ALLOC); + if (rc) + return rc; + + resp = hwrm_req_hold(bp, req); + + rc = tfc_msg_set_fid(bp, fid, &req->fid); + if (rc) + goto cleanup; + + /* send the request */ + rc = hwrm_req_send(bp, req); + if (rc) + goto cleanup; + + *sid = le16_to_cpu(resp->sid); + +cleanup: + hwrm_req_drop(bp, req); + + if (!rc) + netdev_dbg(bp->dev, "%s: sid %d Success\n", __func__, *sid); + else + netdev_dbg(bp->dev, "%s: Failed: %d\n", __func__, rc); + + return rc; +} + +int +tfc_msg_session_fid_add(struct tfc *tfcp, u16 fid, u16 sid, u16 *fid_cnt) +{ + struct hwrm_tfc_session_fid_add_output *resp; + struct hwrm_tfc_session_fid_add_input *req; + struct bnxt *bp = tfcp->bp; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_TFC_SESSION_FID_ADD); + if (rc) + return rc; + + resp = hwrm_req_hold(bp, req); + + rc = tfc_msg_set_fid(bp, fid, &req->fid); + if (rc) + goto cleanup; + + req->sid = cpu_to_le16(sid); + + /* send the request */ + rc = hwrm_req_send(bp, req); + if (rc) + goto cleanup; + + if (fid_cnt) + *fid_cnt = le16_to_cpu(resp->fid_cnt); + +cleanup: + hwrm_req_drop(bp, req); + + if (!rc) + netdev_dbg(bp->dev, "%s: Success\n", __func__); + else + netdev_dbg(bp->dev, "%s: Failed: %d\n", __func__, rc); + + return rc; +} + +int +tfc_msg_session_fid_rem(struct tfc *tfcp, u16 fid, u16 sid, u16 *fid_cnt) +{ + struct hwrm_tfc_session_fid_rem_output *resp; + struct hwrm_tfc_session_fid_rem_input *req; + struct bnxt *bp = tfcp->bp; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_TFC_SESSION_FID_REM); + if (rc) + return rc; + + resp = hwrm_req_hold(bp, req); + + rc = tfc_msg_set_fid(bp, fid, &req->fid); + if (rc) + goto cleanup; + + req->sid = cpu_to_le16(sid); + + /* send the request */ + rc = hwrm_req_send(bp, req); + if (rc) + goto cleanup; + + if (fid_cnt) + *fid_cnt = le16_to_cpu(resp->fid_cnt); + +cleanup: + hwrm_req_drop(bp, req); + + if (!rc) + netdev_dbg(bp->dev, "%s: Success\n", __func__); + else + netdev_dbg(bp->dev, "%s: Failed: %d\n", __func__, rc); + + return rc; +} + +static int tfc_msg_set_tt(struct bnxt *bp, enum cfa_track_type tt, u8 *ptt) +{ + switch (tt) { + case CFA_TRACK_TYPE_SID: + *ptt = TFC_IDENT_ALLOC_REQ_TRACK_TYPE_TRACK_TYPE_SID; + break; + case CFA_TRACK_TYPE_FID: + *ptt = TFC_IDENT_ALLOC_REQ_TRACK_TYPE_TRACK_TYPE_FID; + break; + default: + netdev_dbg(bp->dev, "%s: Invalid tt[%u]\n", __func__, tt); + return -EINVAL; + } + + return 0; +} + +int tfc_msg_identifier_alloc(struct tfc *tfcp, enum cfa_dir dir, + enum cfa_resource_subtype_ident subtype, + enum cfa_track_type tt, u16 fid, u16 sid, u16 *ident_id) +{ + struct hwrm_tfc_ident_alloc_output *resp; + struct hwrm_tfc_ident_alloc_input *req; + struct bnxt *bp = tfcp->bp; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_TFC_IDENT_ALLOC); + if (rc) + return rc; + + resp = hwrm_req_hold(bp, req); + + req->flags = (dir == CFA_DIR_TX ? + TFC_IDENT_ALLOC_REQ_FLAGS_DIR_TX : + TFC_IDENT_ALLOC_REQ_FLAGS_DIR_RX); + + rc = tfc_msg_set_tt(bp, tt, &req->track_type); + if (rc) + goto cleanup; + + rc = tfc_msg_set_fid(bp, fid, &req->fid); + if (rc) + goto cleanup; + + req->sid = cpu_to_le16(sid); + req->subtype = subtype; + + /* send the request */ + rc = hwrm_req_send(bp, req); + if (rc) + goto cleanup; + + *ident_id = le16_to_cpu(resp->ident_id); + +cleanup: + hwrm_req_drop(bp, req); + + if (!rc) + netdev_dbg(bp->dev, "%s: ident_id %d Success\n", __func__, *ident_id); + else + netdev_dbg(bp->dev, "%s: Failed: %d\n", __func__, rc); + + return rc; +} + +int tfc_msg_identifier_free(struct tfc *tfcp, enum cfa_dir dir, + enum cfa_resource_subtype_ident subtype, + u16 fid, u16 sid, u16 ident_id) +{ + struct hwrm_tfc_ident_free_input *req; + struct bnxt *bp = tfcp->bp; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_TFC_IDENT_FREE); + if (rc) + return rc; + + req->flags = (dir == CFA_DIR_TX ? + TFC_IDENT_FREE_REQ_FLAGS_DIR_TX : + TFC_IDENT_FREE_REQ_FLAGS_DIR_RX); + + rc = tfc_msg_set_fid(bp, fid, &req->fid); + if (rc) + return rc; + + req->sid = cpu_to_le16(sid); + req->subtype = subtype; + req->ident_id = ident_id; + + /* send the request */ + rc = hwrm_req_send(bp, req); + + if (rc) + netdev_dbg(bp->dev, "%s: Failed: %d\n", __func__, rc); + return rc; +} + +int +tfc_msg_tcam_alloc(struct tfc *tfcp, u16 fid, u16 sid, enum cfa_dir dir, + enum cfa_resource_subtype_tcam subtype, + enum cfa_track_type tt, u16 pri, u16 key_sz_bytes, + u16 *tcam_id) +{ + struct hwrm_tfc_tcam_alloc_output *resp; + struct hwrm_tfc_tcam_alloc_input *req; + struct bnxt *bp = tfcp->bp; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_TFC_TCAM_ALLOC); + if (rc) + return rc; + + resp = hwrm_req_hold(bp, req); + + req->flags = (dir == CFA_DIR_TX ? + TFC_TCAM_ALLOC_REQ_FLAGS_DIR_TX : + TFC_TCAM_ALLOC_REQ_FLAGS_DIR_RX); + + req->track_type = (tt == CFA_TRACK_TYPE_FID ? + TFC_TCAM_ALLOC_REQ_TRACK_TYPE_TRACK_TYPE_FID : + TFC_TCAM_ALLOC_REQ_TRACK_TYPE_TRACK_TYPE_SID); + + rc = tfc_msg_set_fid(bp, fid, &req->fid); + if (rc) + goto cleanup; + + req->sid = cpu_to_le16(sid); + req->subtype = cpu_to_le16(subtype); + req->priority = cpu_to_le16(pri); + req->key_size = cpu_to_le16(key_sz_bytes); + + /* send the request */ + rc = hwrm_req_send(bp, req); + if (rc) + goto cleanup; + + *tcam_id = resp->idx; + +cleanup: + hwrm_req_drop(bp, req); + + if (!rc) + netdev_dbg(bp->dev, "%s: tcam_id %d Success\n", __func__, *tcam_id); + else + netdev_dbg(bp->dev, "%s: Failed: %d\n", __func__, rc); + + return rc; +} + +int +tfc_msg_tcam_alloc_set(struct tfc *tfcp, u16 fid, u16 sid, enum cfa_dir dir, + enum cfa_resource_subtype_tcam subtype, + enum cfa_track_type tt, u16 *tcam_id, u16 pri, + const u8 *key, u8 key_size, const u8 *mask, + const u8 *remap, u8 remap_size) +{ + struct hwrm_tfc_tcam_alloc_set_output *resp; + struct hwrm_tfc_tcam_alloc_set_input *req; + struct tfc_msg_dma_buf buf = { 0 }; + struct bnxt *bp = tfcp->bp; + int data_size = 0, rc; + u8 *data = NULL; + + rc = hwrm_req_init(bp, req, HWRM_TFC_TCAM_ALLOC_SET); + if (rc) + return rc; + + resp = hwrm_req_hold(bp, req); + + if (dir == CFA_DIR_RX) + req->flags |= TFC_IDX_TBL_ALLOC_SET_REQ_FLAGS_DIR_RX & + TFC_IDX_TBL_ALLOC_SET_REQ_FLAGS_DIR; + else + req->flags |= TFC_IDX_TBL_ALLOC_SET_REQ_FLAGS_DIR_TX & + TFC_IDX_TBL_ALLOC_SET_REQ_FLAGS_DIR; + + req->track_type = (tt == CFA_TRACK_TYPE_FID ? + TFC_TCAM_ALLOC_SET_REQ_TRACK_TYPE_TRACK_TYPE_FID : + TFC_TCAM_ALLOC_SET_REQ_TRACK_TYPE_TRACK_TYPE_SID); + + rc = tfc_msg_set_fid(bp, fid, &req->fid); + if (rc) + goto cleanup; + + req->sid = cpu_to_le16(sid); + req->subtype = cpu_to_le16(subtype); + req->key_size = cpu_to_le16(key_size); + req->priority = cpu_to_le16(pri); + req->result_size = cpu_to_le16(remap_size); + data_size = 2 * req->key_size + req->result_size; + + if (data_size > TFC_PCI_BUF_SIZE_MAX) { + /* Prepare DMA buffer */ + req->flags |= TFC_TCAM_ALLOC_SET_REQ_FLAGS_DMA; + hwrm_req_alloc_flags(bp, req, GFP_KERNEL | __GFP_ZERO); + buf.va_addr = dma_alloc_coherent(&bp->pdev->dev, data_size, + &buf.pa_addr, GFP_KERNEL); + + if (!buf.va_addr) { + rc = -ENOMEM; + goto cleanup; + } + + data = buf.va_addr; + req->dma_addr = cpu_to_le64(buf.pa_addr); + } else { + data = &req->dev_data[0]; + } + + memcpy(&data[0], &key, key_size * sizeof(u32)); + memcpy(&data[key_size], &mask, key_size * sizeof(u32)); + memcpy(&data[key_size * 2], &remap, remap_size * sizeof(u32)); + + rc = hwrm_req_send(bp, req); + if (rc) + goto cleanup; + + *tcam_id = resp->tcam_id; + +cleanup: + if (buf.va_addr) + dma_free_coherent(&bp->pdev->dev, data_size, + buf.va_addr, buf.pa_addr); + + hwrm_req_drop(bp, req); + + if (!rc) + netdev_dbg(bp->dev, "%s: tcam_id %d Success\n", __func__, *tcam_id); + else + netdev_dbg(bp->dev, "%s: Failed: %d\n", __func__, rc); + + return rc; +} + +int +tfc_msg_tcam_set(struct tfc *tfcp, u16 fid, u16 sid, enum cfa_dir dir, + enum cfa_resource_subtype_tcam subtype, + u16 tcam_id, const u8 *key, u8 key_size, + const u8 *mask, const u8 *remap, + u8 remap_size) +{ + struct hwrm_tfc_tcam_set_input *req; + struct tfc_msg_dma_buf buf = { 0 }; + struct bnxt *bp = tfcp->bp; + int data_size = 0, rc; + u8 *data = NULL; + + rc = hwrm_req_init(bp, req, HWRM_TFC_TCAM_SET); + if (rc) + return rc; + + if (dir == CFA_DIR_RX) + req->flags |= TFC_IDX_TBL_ALLOC_SET_REQ_FLAGS_DIR_RX & + TFC_IDX_TBL_ALLOC_SET_REQ_FLAGS_DIR; + else + req->flags |= TFC_IDX_TBL_ALLOC_SET_REQ_FLAGS_DIR_TX & + TFC_IDX_TBL_ALLOC_SET_REQ_FLAGS_DIR; + + rc = tfc_msg_set_fid(bp, fid, &req->fid); + if (rc) + goto cleanup; + + req->sid = cpu_to_le16(sid); + req->tcam_id = cpu_to_le16(tcam_id); + req->subtype = cpu_to_le16(subtype); + req->key_size = cpu_to_le16(key_size); + req->result_size = cpu_to_le16(remap_size); + data_size = 2 * req->key_size + req->result_size; + + if (data_size > TFC_PCI_BUF_SIZE_MAX) { + req->flags |= TF_TCAM_SET_REQ_FLAGS_DMA; + hwrm_req_alloc_flags(bp, req, GFP_KERNEL | __GFP_ZERO); + buf.va_addr = dma_alloc_coherent(&bp->pdev->dev, data_size, + &buf.pa_addr, GFP_KERNEL); + + if (!buf.va_addr) { + rc = -ENOMEM; + goto cleanup; + } + + data = buf.va_addr; + req->dma_addr = cpu_to_le64(buf.pa_addr); + } else { + data = &req->dev_data[0]; + } + + memcpy(&data[0], key, key_size); + memcpy(&data[key_size], mask, key_size); + memcpy(&data[key_size * 2], remap, remap_size); + + rc = hwrm_req_send(bp, req); + +cleanup: + if (buf.va_addr) + dma_free_coherent(&bp->pdev->dev, data_size, + buf.va_addr, buf.pa_addr); + + if (!rc) + netdev_dbg(bp->dev, "%s: Success\n", __func__); + else + netdev_dbg(bp->dev, "%s: Failed: %d\n", __func__, rc); + + return rc; +} + +int +tfc_msg_tcam_get(struct tfc *tfcp, u16 fid, u16 sid, enum cfa_dir dir, + enum cfa_resource_subtype_tcam subtype, + u16 tcam_id, u8 *key, u8 *key_size, + u8 *mask, u8 *remap, u8 *remap_size) +{ + struct hwrm_tfc_tcam_get_output *resp; + struct hwrm_tfc_tcam_get_input *req; + struct bnxt *bp = tfcp->bp; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_TFC_TCAM_GET); + if (rc) + return rc; + + resp = hwrm_req_hold(bp, req); + + req->flags = (dir == CFA_DIR_TX ? + TFC_TCAM_GET_REQ_FLAGS_DIR_TX : + TFC_TCAM_GET_REQ_FLAGS_DIR_RX); + + rc = tfc_msg_set_fid(bp, fid, &req->fid); + if (rc) + goto cleanup; + + req->sid = cpu_to_le16(sid); + req->tcam_id = cpu_to_le16(tcam_id); + req->subtype = cpu_to_le16(subtype); + + rc = hwrm_req_send(bp, req); + if (rc || + *key_size < le16_to_cpu(resp->key_size) || + *remap_size < le16_to_cpu(resp->result_size)) { + netdev_dbg(bp->dev, "Key buffer is too small, rc:%d\n", -EINVAL); + goto cleanup; + } + + *key_size = resp->key_size; + *remap_size = resp->result_size; + memcpy(key, &resp->dev_data[0], resp->key_size); + memcpy(mask, &resp->dev_data[resp->key_size], resp->key_size); + memcpy(remap, &resp->dev_data[resp->key_size * 2], resp->result_size); + +cleanup: + hwrm_req_drop(bp, req); + + if (!rc) + netdev_dbg(bp->dev, "%s: key_size %d remap_size %d Success\n", + __func__, *key_size, *remap_size); + else + rc = -EINVAL; + + if (rc) + netdev_dbg(bp->dev, "%s: Failed: %d\n", __func__, rc); + + return rc; +} + +int +tfc_msg_tcam_free(struct tfc *tfcp, u16 fid, u16 sid, enum cfa_dir dir, + enum cfa_resource_subtype_tcam subtype, u16 tcam_id) +{ + struct hwrm_tfc_tcam_free_input *req; + struct bnxt *bp = tfcp->bp; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_TFC_TCAM_FREE); + if (rc) + return rc; + + req->flags = (dir == CFA_DIR_TX ? + TFC_TCAM_FREE_REQ_FLAGS_DIR_TX : + TFC_TCAM_FREE_REQ_FLAGS_DIR_RX); + + rc = tfc_msg_set_fid(bp, fid, &req->fid); + if (rc) + return rc; + + req->sid = cpu_to_le16(sid); + req->tcam_id = cpu_to_le16(tcam_id); + req->subtype = cpu_to_le16(subtype); + + rc = hwrm_req_send(bp, req); + if (rc) + netdev_dbg(bp->dev, "%s: Failed: %d\n", __func__, rc); + return rc; +} + +int +tfc_msg_if_tbl_set(struct tfc *tfcp, u16 fid, u16 sid, + enum cfa_dir dir, enum cfa_resource_subtype_if_tbl subtype, + u16 index, u8 data_size, const u8 *data) +{ + struct hwrm_tfc_if_tbl_set_input *req; + struct bnxt *bp = tfcp->bp; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_TFC_IF_TBL_SET); + if (rc) + return rc; + + req->flags = (dir == CFA_DIR_TX ? + TFC_IF_TBL_SET_REQ_FLAGS_DIR_TX : + TFC_IF_TBL_SET_REQ_FLAGS_DIR_RX); + + rc = tfc_msg_set_fid(bp, fid, &req->fid); + if (rc) + return rc; + req->sid = cpu_to_le16(sid); + + req->index = cpu_to_le16(index); + req->subtype = cpu_to_le16(subtype); + req->data_size = data_size; + memcpy(req->data, data, data_size); + + rc = hwrm_req_send(bp, req); + if (rc) + netdev_dbg(bp->dev, "%s: Failed: %d\n", __func__, rc); + return rc; +} + +int +tfc_msg_if_tbl_get(struct tfc *tfcp, u16 fid, u16 sid, + enum cfa_dir dir, enum cfa_resource_subtype_if_tbl subtype, + u16 index, u8 *data_size, u8 *data) +{ + struct hwrm_tfc_if_tbl_get_output *resp; + struct hwrm_tfc_if_tbl_get_input *req; + struct bnxt *bp = tfcp->bp; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_TFC_IF_TBL_GET); + if (rc) + return rc; + + resp = hwrm_req_hold(bp, req); + + req->flags = (dir == CFA_DIR_TX ? + TFC_IF_TBL_GET_REQ_FLAGS_DIR_TX : + TFC_IF_TBL_GET_REQ_FLAGS_DIR_RX); + + rc = tfc_msg_set_fid(bp, fid, &req->fid); + if (rc) { + netdev_dbg(bp->dev, "%s: set fid Failed: %d\n", __func__, rc); + goto cleanup; + } + + req->sid = cpu_to_le16(sid); + + req->index = cpu_to_le16(index); + req->subtype = cpu_to_le16(subtype); + req->data_size = cpu_to_le16(*data_size); + + rc = hwrm_req_send(bp, req); + if (rc) { + netdev_dbg(bp->dev, "%s: hwrm req send Failed: %d\n", __func__, rc); + goto cleanup; + } + + if (*data_size < le16_to_cpu(resp->data_size)) { + netdev_dbg(bp->dev, + "Table buffer is too small %d limit %d\n", + *data_size, resp->data_size); + rc = -EINVAL; + goto cleanup; + } + + memcpy(data, resp->data, *data_size); + *data_size = resp->data_size; + +cleanup: + hwrm_req_drop(bp, req); + + if (!rc) + netdev_dbg(bp->dev, "%s: data_size %d Success\n", __func__, *data_size); + else + netdev_dbg(bp->dev, "%s: Failed: %d\n", __func__, rc); + + return rc; +} diff --git a/drivers/thirdparty/release-drivers/bnxt/tfc_v3/tfc_msg.h b/drivers/thirdparty/release-drivers/bnxt/tfc_v3/tfc_msg.h new file mode 100644 index 000000000000..bae626e4038f --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/tfc_v3/tfc_msg.h @@ -0,0 +1,136 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2023 Broadcom + * All rights reserved. + */ + +#include "tfc.h" +#include "tfo.h" + +/* HWRM Direct messages */ + +int +tfc_msg_tbl_scope_qcaps(struct tfc *tfcp, bool *tbl_scope_capable, u32 *max_lkup_rec_cnt, + u32 *max_act_rec_cnt, u8 *max_lkup_static_buckets_exp); + +int tfc_msg_tbl_scope_id_alloc(struct tfc *tfcp, u16 fid, bool shared, + enum cfa_app_type app_type, u8 *tsid, + bool *first); + +int +tfc_msg_backing_store_cfg_v2(struct tfc *tfcp, u8 tsid, enum cfa_dir dir, + enum cfa_region_type region, u64 base_addr, + u8 pbl_level, u32 pbl_page_sz, + u32 rec_cnt, u8 static_bkt_cnt_exp, + bool cfg_done); + +int +tfc_msg_tbl_scope_deconfig(struct tfc *tfcp, u8 tsid); + +int +tfc_msg_tbl_scope_fid_add(struct tfc *tfcp, u16 fid, u8 tsid, u16 *fid_cnt); + +int +tfc_msg_tbl_scope_fid_rem(struct tfc *tfcp, u16 fid, u8 tsid, u16 *fid_cnt); + +int +tfc_msg_idx_tbl_alloc(struct tfc *tfcp, u16 fid, u16 sid, enum cfa_track_type tt, enum cfa_dir dir, + enum cfa_resource_subtype_idx_tbl rsubtype, u16 *id); + +int +tfc_msg_idx_tbl_alloc_set(struct tfc *tfcp, u16 fid, u16 sid, enum cfa_track_type tt, + enum cfa_dir dir, enum cfa_resource_subtype_idx_tbl subtype, + const u32 *dev_data, u8 data_size, u16 *id); + +int +tfc_msg_idx_tbl_set(struct tfc *tfcp, u16 fid, u16 sid, enum cfa_dir dir, + enum cfa_resource_subtype_idx_tbl subtype, + u16 id, const u32 *dev_data, u8 data_size); + +int +tfc_msg_idx_tbl_get(struct tfc *tfcp, u16 fid, u16 sid, enum cfa_dir dir, + enum cfa_resource_subtype_idx_tbl subtype, + u16 id, u32 *dev_data, u8 *data_size); + +int +tfc_msg_idx_tbl_free(struct tfc *tfcp, u16 fid, u16 sid, enum cfa_dir dir, + enum cfa_resource_subtype_idx_tbl subtype, u16 id); + +int tfc_msg_global_id_alloc(struct tfc *tfcp, u16 fid, u16 sid, + enum tfc_domain_id domain_id, u16 req_cnt, + const struct tfc_global_id_req *glb_id_req, + struct tfc_global_id *rsp, u16 *rsp_cnt, + bool *first); +int +tfc_msg_session_id_alloc(struct tfc *tfcp, u16 fid, u16 *tsid); + +int +tfc_msg_session_fid_add(struct tfc *tfcp, u16 fid, u16 sid, u16 *fid_cnt); + +int +tfc_msg_session_fid_rem(struct tfc *tfcp, u16 fid, u16 sid, u16 *fid_cnt); + +int tfc_msg_identifier_alloc(struct tfc *tfcp, enum cfa_dir dir, + enum cfa_resource_subtype_ident subtype, + enum cfa_track_type tt, u16 fid, u16 sid, + u16 *ident_id); + +int tfc_msg_identifier_free(struct tfc *tfcp, enum cfa_dir dir, + enum cfa_resource_subtype_ident subtype, + u16 fid, u16 sid, u16 ident_id); +#ifndef TFC_FORCE_POOL_0 +int +tfc_msg_tbl_scope_pool_alloc(struct tfc *tfcp, + u8 tsid, + enum cfa_dir dir, + enum tfc_ts_table_type type, + u16 *pool_id, + u8 *lkup_pool_sz_exp); + +int +tfc_msg_tbl_scope_pool_free(struct tfc *tfcp, + u8 tsid, + enum cfa_dir dir, + enum tfc_ts_table_type type, + u16 pool_id); +#endif /* !TFC_FORCE_POOL_0 */ + +int +tfc_msg_tbl_scope_config_get(struct tfc *tfcp, u8 tsid, bool *configured); + +int +tfc_msg_tcam_alloc(struct tfc *tfcp, u16 fid, u16 sid, enum cfa_dir dir, + enum cfa_resource_subtype_tcam subtype, + enum cfa_track_type tt, u16 pri, u16 key_sz_words, + u16 *tcam_id); + +int +tfc_msg_tcam_alloc_set(struct tfc *tfcp, u16 fid, u16 sid, enum cfa_dir dir, + enum cfa_resource_subtype_tcam subtype, + enum cfa_track_type tt, u16 *tcam_id, u16 pri, + const u8 *key, u8 key_size, const u8 *mask, + const u8 *remap, u8 remap_size); + +int +tfc_msg_tcam_set(struct tfc *tfcp, u16 fid, u16 sid, enum cfa_dir dir, + enum cfa_resource_subtype_tcam subtype, + u16 tcam_id, const u8 *key, u8 key_size, + const u8 *mask, const u8 *remap, + u8 remap_size); + +int +tfc_msg_tcam_get(struct tfc *tfcp, u16 fid, u16 sid, enum cfa_dir dir, + enum cfa_resource_subtype_tcam subtype, + u16 tcam_id, u8 *key, u8 *key_size, + u8 *mask, u8 *remap, u8 *remap_size); + +int +tfc_msg_tcam_free(struct tfc *tfcp, u16 fid, u16 sid, enum cfa_dir dir, + enum cfa_resource_subtype_tcam subtype, u16 tcam_id); +int +tfc_msg_if_tbl_set(struct tfc *tfcp, u16 fid, u16 sid, enum cfa_dir dir, + enum cfa_resource_subtype_if_tbl subtype, + u16 index, u8 data_size, const u8 *data); +int +tfc_msg_if_tbl_get(struct tfc *tfcp, u16 fid, u16 sid, + enum cfa_dir dir, enum cfa_resource_subtype_if_tbl subtype, + u16 index, u8 *data_size, u8 *data); diff --git a/drivers/thirdparty/release-drivers/bnxt/tfc_v3/tfc_priv.c b/drivers/thirdparty/release-drivers/bnxt/tfc_v3/tfc_priv.c new file mode 100644 index 000000000000..444592318b4d --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/tfc_v3/tfc_priv.c @@ -0,0 +1,84 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* Copyright(c) 2023 Broadcom + * All rights reserved. + */ + +#include +#include "bnxt_compat.h" +#include "bnxt.h" +#include "tfc.h" +#include "tfc_priv.h" + +int +tfc_get_fid(struct tfc *tfcp, u16 *fw_fid) +{ + struct bnxt *bp = tfcp->bp; + + if (!fw_fid) { + netdev_dbg(bp->dev, "%s: Invalid fw_fid pointer\n", __func__); + return -EINVAL; + } + + *fw_fid = bp->vf.fw_fid; + + return 0; +} + +int +tfc_get_pfid(struct tfc *tfcp, u16 *pfid) +{ + struct bnxt *bp = tfcp->bp; + + if (!pfid) { + netdev_dbg(bp->dev, "%s: Invalid pfid pointer\n", __func__); + return -EINVAL; + } + + if (BNXT_VF(bp)) + *pfid = bp->vf.fw_fid; + else + *pfid = bp->pf.fw_fid; + + return 0; +} + +int +tfc_bp_is_pf(struct tfc *tfcp, bool *is_pf) +{ + struct bnxt *bp = tfcp->bp; + + if (!is_pf) { + netdev_dbg(bp->dev, "%s: invalid is_pf pointer\n", __func__); + return -EINVAL; + } + + if (BNXT_PF(bp)) { + *is_pf = true; + return 0; + } + *is_pf = false; + return 0; +} + +int tfc_bp_vf_max(struct tfc *tfcp, u16 *max_vf) +{ + struct bnxt *bp = tfcp->bp; + + if (!max_vf) { + netdev_dbg(bp->dev, "%s: invalid max_vf pointer\n", __func__); + return -EINVAL; + } + + if (!BNXT_PF(bp)) { + netdev_dbg(bp->dev, "%s: not a PF\n", __func__); + return -EINVAL; + } + + /* If not sriov, no vfs enabled */ + if (bp->pf.max_vfs) + *max_vf = bp->pf.first_vf_id + bp->pf.max_vfs; + else + *max_vf = bp->pf.fw_fid; + + return 0; +} diff --git a/drivers/thirdparty/release-drivers/bnxt/tfc_v3/tfc_priv.h b/drivers/thirdparty/release-drivers/bnxt/tfc_v3/tfc_priv.h new file mode 100644 index 000000000000..2009dcf6000d --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/tfc_v3/tfc_priv.h @@ -0,0 +1,55 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2023 Broadcom + * All rights reserved. + */ + +#ifndef _TFC_PRIV_H_ +#define _TFC_PRIV_H_ + +#include "tfc.h" + +/** + * Get the FID for this DPDK port/function. + * + * @tfcp: Pointer to TFC handle + * @fw_fid: The function ID + * + * Return + * 0 for SUCCESS, negative error value for FAILURE (errno.h) + */ +int tfc_get_fid(struct tfc *tfcp, u16 *fw_fid); + +/** + * Get the PFID for this DPDK port/function. + * + * @tfcp: Pointer to TFC handle + * @pfid: The Physical Function ID for this port/function + * + * Return + * 0 for SUCCESS, negative error value for FAILURE (errno.h) + */ +int tfc_get_pfid(struct tfc *tfcp, u16 *pfid); + +/** + * Is this DPDK port/function a PF? + * + * @tfcp: Pointer to TFC handle + * @is_pf: If true, the DPDK port is a PF (as opposed to a VF) + * + * Return + * 0 for SUCCESS, negative error value for FAILURE (errno.h) + */ +int tfc_bp_is_pf(struct tfc *tfcp, bool *is_pf); + +/** + * Get the maximum VF for the PF + * + * @tfcp: Pointer to TFC handle + * @max_vf: The maximum VF for the PF (only valid on a PF) + * + * Return + * 0 for SUCCESS, negative error value for FAILURE (errno.h) + */ +int tfc_bp_vf_max(struct tfc *tfcp, u16 *max_vf); + +#endif /* _TFC_PRIV_H_ */ diff --git a/drivers/thirdparty/release-drivers/bnxt/tfc_v3/tfc_session.c b/drivers/thirdparty/release-drivers/bnxt/tfc_v3/tfc_session.c new file mode 100644 index 000000000000..c471835ffbfa --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/tfc_v3/tfc_session.c @@ -0,0 +1,167 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* Copyright(c) 2023 Broadcom + * All rights reserved. + */ + +#include +#include "tfc.h" + +#include "tfc_msg.h" +#include "cfa_types.h" +#include "tfo.h" +#include "bnxt_compat.h" +#include "bnxt.h" + +int tfc_session_id_alloc(struct tfc *tfcp, u16 fid, u16 *sid) +{ + struct bnxt *bp = tfcp->bp; + u16 current_sid; + int rc; + + if (!sid) { + netdev_dbg(bp->dev, "%s: Invalid sid pointer\n", __func__); + return -EINVAL; + } + + rc = tfo_sid_get(tfcp->tfo, ¤t_sid); + if (!rc) { + netdev_dbg(bp->dev, "%s: Cannot allocate SID, current session is %u.\n", __func__, + current_sid); + return -EBUSY; + } else if (rc != -ENODATA) { + netdev_dbg(bp->dev, "%s: Getting current sid failed, rc:%d.\n", __func__, -rc); + return rc; + } + /* -ENODATA ==> current SID is invalid */ + + rc = tfc_msg_session_id_alloc(tfcp, fid, sid); + if (rc) { + netdev_dbg(bp->dev, "%s: session id alloc message failed, rc:%d\n", __func__, -rc); + return rc; + } + + rc = tfo_sid_set(tfcp->tfo, *sid); + if (rc) { + netdev_dbg(bp->dev, "%s: Failed to store session id, rc:%d\n", __func__, -rc); + return rc; + } + + return rc; +} + +int tfc_session_id_set(struct tfc *tfcp, u16 sid) +{ + u16 current_sid = INVALID_SID; + struct bnxt *bp = tfcp->bp; + int rc; + + rc = tfo_sid_get(tfcp->tfo, ¤t_sid); + if (!rc) { + /* SID is valid if rc == 0 */ + if (current_sid != sid) { + netdev_dbg(bp->dev, "%s: Cannot update SID %u, current session is %u\n", + __func__, sid, current_sid); + return -EBUSY; + } + } else if (rc != -ENODATA) { + netdev_dbg(bp->dev, "%s: Getting current sid failed, rc:%d.\n", __func__, rc); + return rc; + } + if (current_sid != sid) { + rc = tfo_sid_set(tfcp->tfo, sid); + if (rc) { + netdev_dbg(bp->dev, "%s: Failed to store session id, rc:%d\n", __func__, + rc); + return rc; + } + } + return rc; +} +int tfc_session_fid_add(struct tfc *tfcp, u16 fid, u16 sid, + u16 *fid_cnt) +{ + u16 current_sid = INVALID_SID; + struct bnxt *bp = NULL; + int rc; + + if (!tfcp) { + netdev_dbg(NULL, "%s: Invalid tfcp pointer\n", __func__); + return -EINVAL; + } + + bp = tfcp->bp; + if (!fid_cnt) { + netdev_dbg(bp->dev, "%s: Invalid fid_cnt pointer\n", __func__); + return -EINVAL; + } + + rc = tfo_sid_get(tfcp->tfo, ¤t_sid); + if (!rc) { + /* SID is valid if rc == 0 */ + if (current_sid != sid) { + netdev_dbg(bp->dev, "%s: Cannot add FID to SID %u, current session is %u\n", + __func__, sid, current_sid); + return -EBUSY; + } + } else if (rc != -ENODATA) { + netdev_dbg(bp->dev, "%s: Getting current sid failed, rc:%d.\n", __func__, rc); + return rc; + } + /* -ENODATA ==> current SID is invalid */ + + rc = tfc_msg_session_fid_add(tfcp, fid, sid, fid_cnt); + if (rc) { + netdev_dbg(bp->dev, "%s: session fid add message failed, rc:%d\n", __func__, rc); + return rc; + } + + if (current_sid != sid) { + rc = tfo_sid_set(tfcp->tfo, sid); + if (rc) { + netdev_dbg(bp->dev, "%s: Failed to store session id, rc:%d\n", __func__, + rc); + return rc; + } + } + + return rc; +} + +int tfc_session_fid_rem(struct tfc *tfcp, u16 fid, u16 *fid_cnt) +{ + struct bnxt *bp = NULL; + u16 sid; + int rc; + + if (!tfcp) { + netdev_dbg(NULL, "%s: Invalid tfcp pointer\n", __func__); + return -EINVAL; + } + + bp = tfcp->bp; + if (!fid_cnt) { + netdev_dbg(bp->dev, "%s: Invalid fid_cnt pointer\n", __func__); + return -EINVAL; + } + + rc = tfo_sid_get(tfcp->tfo, &sid); + if (rc) { + netdev_dbg(bp->dev, "%s: no sid allocated, rc:%d\n", __func__, rc); + return rc; + } + + rc = tfc_msg_session_fid_rem(tfcp, fid, sid, fid_cnt); + if (rc) { + netdev_dbg(bp->dev, "%s: session fid rem message failed, rc:%d\n", __func__, rc); + return rc; + } + + if (bp->pf.fw_fid == fid) { + rc = tfo_sid_set(tfcp->tfo, INVALID_SID); + if (rc) + netdev_dbg(bp->dev, "%s: Failed to reset session id, rc:%d\n", + __func__, rc); + } + + return rc; +} diff --git a/drivers/thirdparty/release-drivers/bnxt/tfc_v3/tfc_tbl_scope.c b/drivers/thirdparty/release-drivers/bnxt/tfc_v3/tfc_tbl_scope.c new file mode 100644 index 000000000000..693dddbbfee4 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/tfc_v3/tfc_tbl_scope.c @@ -0,0 +1,1838 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* Copyright(c) 2023 Broadcom + * All rights reserved. + */ + +#include +#include +#include +#include "tfc.h" + +#include "tfc_priv.h" +#include "tfc_msg.h" +#include "tfc_em.h" +#include "cfa_types.h" +#include "cfa_tim.h" +#include "cfa_tpm.h" +#include "tfo.h" +#include "bnxt_compat.h" +#include "bnxt.h" +#include "tfc_cpm.h" +#include "cfa_mm.h" +#include "cfa_bld_mpc_field_ids.h" +#include "tfc_vf2pf_msg.h" +#include "tfc_util.h" + +/* These values are for Thor2. Take care to adjust them appropriately when + * support for additional HW is added. + */ +#define ENTRIES_PER_BUCKET 6 /* Max number of entries for a single bucket */ +#define LREC_SIZE 16 /* sizes in bytes */ +#define RECORD_SIZE 32 + +/* Page alignments must be some power of 2. These bits define the powers of 2 + * that are valid for page alignments. It is taken from + * cfa_hw_ts_pbl_page_size. + */ +#define VALID_PAGE_ALIGNMENTS 0x40753000 + +#define MAX_PAGE_PTRS(page_size) ((page_size) / sizeof(void *)) + +#define BITS_IN_VAR(x) (sizeof(x) * 8) + +/* Private functions */ + +/* Calculate the smallest power of 2 that is >= x. The return value is the + * exponent of 2. + */ +static inline unsigned int next_pow2(unsigned int x) +{ + /* This algorithm calculates the nearest power of 2 greater than or + * equal to x: + * The function __builtin_clz returns the number of leading 0-bits in + * an unsigned int. + * Subtract this from the number of bits in x to get the power of 2. In + * the examples below, an int is assumed to have 32 bits. + * + * Example 1: + * x == 2 + * __builtin_clz(1) = 31 + * 32 - 31 = 1 + * 2^1 = 2 + * Example 2: + * x = 63 + * __builtin_clz(62) = 26 + * 32 - 26 = 6 + * 2^6 = 64 + */ + return x == 1 ? 1 : (BITS_IN_VAR(x) - __builtin_clz(x - 1)); +} + +/* Calculate the largest power of 2 that is less than x. The return value is + * the exponent of 2. + */ +static inline unsigned int prev_pow2(unsigned int x) +{ + /* This algorithm calculates the nearest power of 2 less than x: + * The function __builtin_clz returns the number of leading 0-bits in + * an unsigned int. + * Subtract this from one less than the number of bits in x to get + * the power of 2. In the examples below, an int is assumed to have + * 32 bits. + * + * Example 1: + * x = 2 + * __builtin_clz(1) = 31 + * 31 - 31 = 0 + * 2^0 = 1 + * Example 2: + * x = 63 + * __builtin_clz(62) = 26 + * 31 - 26 = 5 + * 2^5 = 32 + * Example 3: + * x = 64 + * __builtin_clz(63) = 26 + * 31 - 26 = 5 + * 2^5 = 32 + */ + return x == 1 ? 0 : (BITS_IN_VAR(x) - 1 - __builtin_clz(x - 1)); +} + +static inline u32 roundup32(u32 x, u32 y) +{ + return (((x + y - 1) / y) * y); +} + +static inline u64 roundup64(u64 x, u64 y) +{ + return (((x + y - 1) / y) * y); +} + +/** + * This function calculates how many buckets and records are required for a + * given flow_cnt and factor. + * + * @flow_cnt: The total number of flows for which to compute memory + * @key_sz_in_bytes: The lookup key size in bytes + * @shared: True if the table scope will be shared. Shared table scopes cannot have + * dynamic buckets. + * @factor: This indicates a multiplier factor for determining the static and dynamic + * bucket counts. The larger the factor, the more buckets will be allocated. + * @lkup_rec_cnt: The total number of lookup records to allocate (includes buckets) + * @static_bucket_cnt_exp: The log2 of the number of static buckets to allocate. + * For example if 1024 static buckets, 1024=2^10, + so the value 10 would be returned. + * @dynamic_bucket_cnt: The number of dynamic buckets to allocate + * + * Return 0 if successful, -EINVAL if not. + */ +static int calc_lkup_rec_cnt(struct bnxt *bp, u32 flow_cnt, u16 key_sz_in_bytes, + bool shared, enum tfc_tbl_scope_bucket_factor factor, + u32 *lkup_rec_cnt, u8 *static_bucket_cnt_exp, + u32 *dynamic_bucket_cnt) +{ + unsigned int entry_size; + unsigned int flow_adj; /* flow_cnt adjusted for factor */ + unsigned int key_rec_cnt; + + switch (factor) { + case TFC_TBL_SCOPE_BUCKET_FACTOR_1: + flow_adj = flow_cnt; + break; + case TFC_TBL_SCOPE_BUCKET_FACTOR_2: + flow_adj = flow_cnt * 2; + break; + case TFC_TBL_SCOPE_BUCKET_FACTOR_4: + flow_adj = flow_cnt * 4; + break; + case TFC_TBL_SCOPE_BUCKET_FACTOR_8: + flow_adj = flow_cnt * 8; + break; + case TFC_TBL_SCOPE_BUCKET_FACTOR_16: + flow_adj = flow_cnt * 16; + break; + default: + netdev_dbg(bp->dev, "%s: Invalid factor (%u)\n", __func__, factor); + return -EINVAL; + } + + if (key_sz_in_bytes <= RECORD_SIZE - LREC_SIZE) { + entry_size = 1; + } else if (key_sz_in_bytes <= RECORD_SIZE * 2 - LREC_SIZE) { + entry_size = 2; + } else if (key_sz_in_bytes <= RECORD_SIZE * 3 - LREC_SIZE) { + entry_size = 3; + } else if (key_sz_in_bytes <= RECORD_SIZE * 4 - LREC_SIZE) { + entry_size = 4; + } else { + netdev_dbg(bp->dev, "%s: Key size (%u) cannot be larger than (%u)\n", __func__, + key_sz_in_bytes, RECORD_SIZE * 4 - LREC_SIZE); + return -EINVAL; + } + key_rec_cnt = flow_cnt * entry_size; + +#ifdef DYNAMIC_BUCKETS_SUPPORTED + if (shared) { +#endif + *static_bucket_cnt_exp = + next_pow2(flow_adj / ENTRIES_PER_BUCKET); + *dynamic_bucket_cnt = 0; +#ifdef DYNAMIC_BUCKETS_SUPPORTED + } else { + *static_bucket_cnt_exp = + prev_pow2(flow_cnt / ENTRIES_PER_BUCKET); + *dynamic_bucket_cnt = + (flow_adj - flow_cnt) / ENTRIES_PER_BUCKET; + } +#endif + + *lkup_rec_cnt = key_rec_cnt + (1 << *static_bucket_cnt_exp) + + *dynamic_bucket_cnt; + + return 0; +} + +static int calc_act_rec_cnt(struct bnxt *bp, u32 *act_rec_cnt, u32 flow_cnt, + u16 act_rec_sz_in_bytes) +{ + if (act_rec_sz_in_bytes % RECORD_SIZE) { + netdev_dbg(bp->dev, "%s: Action record size (%u) must be a multiple of %u\n", + __func__, act_rec_sz_in_bytes, RECORD_SIZE); + return -EINVAL; + } + + *act_rec_cnt = flow_cnt * (act_rec_sz_in_bytes / RECORD_SIZE); + + return 0; +} + +/* Using a #define for the number of bits since the size of an int can depend + * upon the processor. + */ +#define BITS_IN_UINT (sizeof(unsigned int) * 8) + +static int calc_pool_sz_exp(struct bnxt *bp, u8 *pool_sz_exp, u32 rec_cnt, + u32 max_pools) +{ + unsigned int recs_per_region = rec_cnt / max_pools; + + if (recs_per_region == 0) { + netdev_dbg(bp->dev, "%s: rec_cnt (%u) must be larger than max_pools (%u)\n", + __func__, rec_cnt, max_pools); + return -EINVAL; + } + + *pool_sz_exp = prev_pow2(recs_per_region + 1); + + return 0; +} + +static int calc_rec_start_offset(struct bnxt *bp, u32 *start_offset, u32 bucket_cnt_exp) +{ + *start_offset = 1 << bucket_cnt_exp; + + return 0; +} + +static void free_pg_tbl(struct bnxt *bp, struct tfc_ts_page_tbl *tp) +{ + u32 i; + + for (i = 0; i < tp->pg_count; i++) { + if (!tp->pg_va_tbl[i]) { + netdev_dbg(bp->dev, "No mapping for page: %d table: %16p\n", i, tp); + continue; + } + + dma_free_coherent(&bp->pdev->dev, tp->pg_size, + tp->pg_va_tbl[i], tp->pg_pa_tbl[i]); + tp->pg_va_tbl[i] = NULL; + } + + tp->pg_count = 0; + kfree(tp->pg_va_tbl); + tp->pg_va_tbl = NULL; + kfree(tp->pg_pa_tbl); + tp->pg_pa_tbl = NULL; +} + +static int alloc_pg_tbl(struct bnxt *bp, struct tfc_ts_page_tbl *tp, u32 pg_count, + u32 pg_size) +{ + u32 i; + + tp->pg_va_tbl = + kcalloc(pg_count, sizeof(void *), GFP_KERNEL); + if (!tp->pg_va_tbl) + return -ENOMEM; + + tp->pg_pa_tbl = + kzalloc(pg_count * sizeof(void *), GFP_KERNEL); + if (!tp->pg_pa_tbl) { + kfree(tp->pg_va_tbl); + return -ENOMEM; + } + + tp->pg_count = 0; + tp->pg_size = pg_size; + + for (i = 0; i < pg_count; i++) { + tp->pg_va_tbl[i] = dma_alloc_coherent(&bp->pdev->dev, pg_size, + &tp->pg_pa_tbl[i], GFP_KERNEL); + if (!tp->pg_va_tbl[i]) + goto cleanup; + + tp->pg_count++; + } + + return 0; + +cleanup: + free_pg_tbl(bp, tp); + return -ENOMEM; +} + +static void free_page_table(struct bnxt *bp, struct tfc_ts_mem_cfg *mem_cfg) +{ + struct tfc_ts_page_tbl *tp; + int i; + + for (i = 0; i < mem_cfg->num_lvl; i++) { + tp = &mem_cfg->pg_tbl[i]; + netdev_dbg(bp->dev, "EEM: Freeing page table: lvl %d cnt %u\n", i, tp->pg_count); + + free_pg_tbl(bp, tp); + } + + mem_cfg->l0_addr = NULL; + mem_cfg->l0_dma_addr = 0; + mem_cfg->num_lvl = 0; + mem_cfg->num_data_pages = 0; +} + +static int alloc_page_table(struct bnxt *bp, struct tfc_ts_mem_cfg *mem_cfg, u32 page_size) +{ + struct tfc_ts_page_tbl *tp; + int i, rc; + u32 j; + + for (i = 0; i < mem_cfg->num_lvl; i++) { + tp = &mem_cfg->pg_tbl[i]; + + rc = alloc_pg_tbl(bp, tp, mem_cfg->page_cnt[i], page_size); + if (rc) { + netdev_dbg(bp->dev, "Failed to allocate page table: lvl: %d, rc:%d\n", i, + rc); + goto cleanup; + } + + for (j = 0; j < tp->pg_count; j++) { + netdev_dbg(bp->dev, "EEM: Allocated page table: size %u lvl %d cnt %u", + page_size, i, tp->pg_count); + netdev_dbg(bp->dev, "VA:%p PA:%p\n", + (void *)(uintptr_t)tp->pg_va_tbl[j], + (void *)(uintptr_t)tp->pg_pa_tbl[j]); + } + } + return 0; + +cleanup: + free_page_table(bp, mem_cfg); + return rc; +} + +static u32 page_tbl_pgcnt(u32 num_pages, u32 page_size) +{ + return roundup32(num_pages, MAX_PAGE_PTRS(page_size)) / + MAX_PAGE_PTRS(page_size); + return 0; +} + +static void size_page_tbls(int max_lvl, u64 num_data_pages, + u32 page_size, u32 *page_cnt) +{ + if (max_lvl == TFC_TS_PT_LVL_0) { + page_cnt[TFC_TS_PT_LVL_0] = num_data_pages; + } else if (max_lvl == TFC_TS_PT_LVL_1) { + page_cnt[TFC_TS_PT_LVL_1] = num_data_pages; + page_cnt[TFC_TS_PT_LVL_0] = + page_tbl_pgcnt(page_cnt[TFC_TS_PT_LVL_1], page_size); + } else if (max_lvl == TFC_TS_PT_LVL_2) { + page_cnt[TFC_TS_PT_LVL_2] = num_data_pages; + page_cnt[TFC_TS_PT_LVL_1] = + page_tbl_pgcnt(page_cnt[TFC_TS_PT_LVL_2], page_size); + page_cnt[TFC_TS_PT_LVL_0] = + page_tbl_pgcnt(page_cnt[TFC_TS_PT_LVL_1], page_size); + } else { + return; + } +} + +static int num_pages_get(struct tfc_ts_mem_cfg *mem_cfg, u32 page_size) +{ + u64 max_page_ptrs = MAX_PAGE_PTRS(page_size); + u64 lvl_data_size = page_size; + int lvl = TFC_TS_PT_LVL_0; + u64 data_size; + + mem_cfg->num_data_pages = 0; + data_size = (u64)mem_cfg->rec_cnt * mem_cfg->entry_size; + + while (lvl_data_size < data_size) { + lvl++; + + if (lvl == TFC_TS_PT_LVL_1) + lvl_data_size = max_page_ptrs * page_size; + else if (lvl == TFC_TS_PT_LVL_2) + lvl_data_size = + max_page_ptrs * max_page_ptrs * page_size; + else + return -ENOMEM; + } + + mem_cfg->num_data_pages = roundup64(data_size, page_size) / page_size; + mem_cfg->num_lvl = lvl + 1; + + return 0; +} + +static void link_page_table(struct tfc_ts_page_tbl *tp, + struct tfc_ts_page_tbl *tp_next, bool set_pte_last) +{ + u64 *pg_pa = tp_next->pg_pa_tbl, *pg_va, valid; + u32 i, j, k = 0; + + for (i = 0; i < tp->pg_count; i++) { + pg_va = tp->pg_va_tbl[i]; + + for (j = 0; j < MAX_PAGE_PTRS(tp->pg_size); j++) { + if (k == tp_next->pg_count - 2 && set_pte_last) + valid = PTU_PTE_NEXT_TO_LAST | PTU_PTE_VALID; + else if (k == tp_next->pg_count - 1 && set_pte_last) + valid = PTU_PTE_LAST | PTU_PTE_VALID; + else + valid = PTU_PTE_VALID; + + pg_va[j] = cpu_to_le64(pg_pa[k] | valid); + if (++k >= tp_next->pg_count) + return; + } + } +} + +static void setup_page_table(struct tfc_ts_mem_cfg *mem_cfg) +{ + struct tfc_ts_page_tbl *tp_next; + struct tfc_ts_page_tbl *tp; + bool set_pte_last = 0; + int i; + + for (i = 0; i < mem_cfg->num_lvl - 1; i++) { + tp = &mem_cfg->pg_tbl[i]; + tp_next = &mem_cfg->pg_tbl[i + 1]; + if (i == mem_cfg->num_lvl - 2) + set_pte_last = 1; + link_page_table(tp, tp_next, set_pte_last); + } + + mem_cfg->l0_addr = mem_cfg->pg_tbl[TFC_TS_PT_LVL_0].pg_va_tbl[0]; + mem_cfg->l0_dma_addr = mem_cfg->pg_tbl[TFC_TS_PT_LVL_0].pg_pa_tbl[0]; +} + +static void unlink_and_free(struct bnxt *bp, struct tfc_ts_mem_cfg *mem_cfg, u32 page_size) +{ + /* tf_em_free_page_table */ + struct tfc_ts_page_tbl *tp; + int i; + + for (i = 0; i < mem_cfg->num_lvl; i++) { + tp = &mem_cfg->pg_tbl[i]; + netdev_dbg(bp->dev, "EEM: Freeing page table: size %u lvl %d cnt %u\n", + page_size, i, tp->pg_count); + + /* tf_em_free_pg_tbl */ + free_pg_tbl(bp, tp); + } + + mem_cfg->l0_addr = NULL; + mem_cfg->l0_dma_addr = 0; + mem_cfg->num_lvl = 0; + mem_cfg->num_data_pages = 0; +} + +static int alloc_link_pbl(struct bnxt *bp, struct tfc_ts_mem_cfg *mem_cfg, u32 page_size) +{ + int rc; + + /* tf_em_size_page_tbl_lvl */ + rc = num_pages_get(mem_cfg, page_size); + if (rc) { + netdev_dbg(bp->dev, "EEM: Failed to size page table levels\n"); + netdev_dbg(bp->dev, "data-sz: %016llu page-sz: %u\n", + (u64)mem_cfg->rec_cnt * mem_cfg->entry_size, page_size); + return rc; + } + + /* tf_em_size_page_tbls */ + size_page_tbls(mem_cfg->num_lvl - 1, mem_cfg->num_data_pages, page_size, + mem_cfg->page_cnt); + + netdev_dbg(bp->dev, "EEM: lvls: %d sz: %016llu pgs: %016llu l0: %u l1: %u l2: %u\n", + mem_cfg->num_lvl, mem_cfg->num_data_pages * page_size, + mem_cfg->num_data_pages, mem_cfg->page_cnt[TFC_TS_PT_LVL_0], + mem_cfg->page_cnt[TFC_TS_PT_LVL_1], + mem_cfg->page_cnt[TFC_TS_PT_LVL_2]); + + /* tf_em_alloc_page_table -> tf_em_alloc_pg_tbl */ + rc = alloc_page_table(bp, mem_cfg, page_size); + if (rc) + goto cleanup; + + /* tf_em_setup_page_table */ + setup_page_table(mem_cfg); + + return 0; + +cleanup: + unlink_and_free(bp, mem_cfg, page_size); + return rc; +} + +/* tbl_scope_pools_create_parms contains the parameters for creating pools. + */ +struct tbl_scope_pools_create_parms { + /* Indicates if the table scope will be shared. */ + bool shared; + /* The number of pools the table scope will be divided into. (set + * to 1 if not shared). + */ + u16 max_pools; + /* The size of each individual lookup record pool expressed as: + * log2(max_records/max_pools). For example if 1024 records and 2 pools + * 1024/2=512=2^9, so the value 9 would be entered. + */ + u8 lkup_pool_sz_exp[CFA_DIR_MAX]; + /* The size of each individual action record pool expressed as: + * log2(max_records/max_pools). For example if 1024 records and 2 pools + * 1024/2=512=2^9, so the value 9 would be entered. + */ + u8 act_pool_sz_exp[CFA_DIR_MAX]; +}; + +/** + * Allocate and store TPM and TIM for shared scope + * + * Dynamically allocate and store TPM instances for shared scope + * + * @tfcp: Pointer to TFC handle + * @tsid: Table scope identifier + * @params: Parameters for allocate and store TPM instances for shared scope + * + * Returns + * 0 for SUCCESS, negative error value for FAILURE (errno.h) + */ +static int tbl_scope_pools_create(struct tfc *tfcp, u8 tsid, + struct tbl_scope_pools_create_parms *parms) +{ + void *tpms[CFA_DIR_MAX][CFA_REGION_TYPE_MAX]; + enum cfa_region_type region; + struct bnxt *bp = tfcp->bp; + void *tim = NULL, *tpm = NULL; + u32 tpm_db_size; + int dir, rc; + /* Dynamically allocate and store base addresses for TIM, + * TPM instances for the given tsid + */ + + if (tfo_ts_validate(tfcp->tfo, tsid, NULL) != 0) { + netdev_dbg(bp->dev, "%s: tsid(%d) invalid\n", __func__, tsid); + return -EINVAL; + } + + rc = tfo_tim_get(tfcp->tfo, &tim); + if (rc) + return -EINVAL; + + rc = cfa_tpm_query(parms->max_pools, &tpm_db_size); + if (rc) + return -EINVAL; + + memset(tpms, 0, sizeof(void *) * CFA_DIR_MAX * CFA_REGION_TYPE_MAX); + + /* Allocate pool managers */ + for (region = 0; region < CFA_REGION_TYPE_MAX; region++) { + for (dir = 0; dir < CFA_DIR_MAX; dir++) { + tpms[dir][region] = kzalloc(tpm_db_size, GFP_KERNEL); + if (!tpms[dir][region]) + goto cleanup; + + rc = cfa_tpm_open(tpms[dir][region], tpm_db_size, parms->max_pools); + if (rc) + goto cleanup; + + rc = cfa_tpm_pool_size_set(tpms[dir][region], + (region == CFA_REGION_TYPE_LKUP ? + parms->lkup_pool_sz_exp[dir] : + parms->act_pool_sz_exp[dir])); + if (rc) + goto cleanup; + + rc = cfa_tim_tpm_inst_set(tim, tsid, region, dir, tpms[dir][region]); + if (rc) + goto cleanup; + } + } + + return 0; + + cleanup: + if (tim) { + for (region = 0; region < CFA_REGION_TYPE_MAX; region++) { + for (dir = 0; dir < CFA_DIR_MAX; dir++) { + /* It is possible that a tpm has been allocated + * but not added to tim. Ensure that those instances are + * cleaned up. + */ + rc = cfa_tim_tpm_inst_get(tim, tsid, region, dir, &tpm); + if (!rc && tpm) { + kfree(tpm); + rc = cfa_tim_tpm_inst_set(tim, tsid, region, dir, NULL); + } else { + kfree(tpms[dir][region]); + } + } + } + } + + return rc; +} + +/** + * Free TPM instances for shared scope + * + * @tfcp: Pointer to TFC handle + * @tsid: Table scope identifier + * + * Returns + * 0 for SUCCESS, negative error value for FAILURE (errno.h) + */ +static int tbl_scope_pools_destroy(struct tfc *tfcp, u8 tsid) +{ + enum cfa_region_type region; + struct bnxt *bp = tfcp->bp; + void *tim, *tpm; + int dir, rc; + + if (tfo_ts_validate(tfcp->tfo, tsid, NULL) != 0) { + netdev_dbg(bp->dev, "%s: tsid(%d) invalid\n", __func__, tsid); + return -EINVAL; + } + + rc = tfo_tim_get(tfcp->tfo, &tim); + if (rc) + return -EINVAL; + + /* Free TIM, TPM instances for the given tsid. */ + if (tim) { + for (region = 0; region < CFA_REGION_TYPE_MAX; region++) { + for (dir = 0; dir < CFA_DIR_MAX; dir++) { + rc = cfa_tim_tpm_inst_get(tim, tsid, region, dir, &tpm); + if (rc) + return -EINVAL; + + if (tpm) { + rc = cfa_tim_tpm_inst_set(tim, tsid, region, dir, NULL); + kfree(tpm); + } + } + } + } + + return rc; +} + +/** + * Remove all associated pools owned by a function from TPM + * + * @tfcp: Pointer to TFC handle + * @fid: function + * @tsid: Table scope identifier + * @pool_cnt: Pointer to the number of pools still associated with other fids. + * + * Returns + * 0 for SUCCESS, negative error value for FAILURE (errno.h) + */ +static int tbl_scope_tpm_fid_rem(struct tfc *tfcp, u16 fid, u8 tsid, + u16 *pool_cnt) +{ + enum cfa_region_type region; + struct bnxt *bp = tfcp->bp; + u16 pool_id, lfid, max_fid; + bool shared, valid, is_pf; + u16 found_cnt = 0; + enum cfa_dir dir; + void *tim, *tpm; + int rc; + + if (!pool_cnt) { + netdev_dbg(bp->dev, "%s: Invalid pool_cnt pointer\n", __func__); + return -EINVAL; + } + rc = tfc_bp_is_pf(tfcp, &is_pf); + if (rc) + return rc; + + if (!is_pf) { + netdev_dbg(bp->dev, "%s: only valid for PF\n", __func__); + return -EINVAL; + } + rc = tfo_ts_get(tfcp->tfo, tsid, &shared, NULL, &valid, NULL); + if (!valid || !shared) { + netdev_dbg(bp->dev, "%s: tsid(%d) valid(%s) shared(%s)\n", + __func__, tsid, valid ? "TRUE" : "FALSE", + shared ? "TRUE" : "FALSE"); + return -EINVAL; + } + + rc = tfo_tim_get(tfcp->tfo, &tim); + if (rc) { + netdev_dbg(bp->dev, "%s: Failed to get TIM\n", __func__); + return -EINVAL; + } + + for (dir = 0; dir < CFA_DIR_MAX; dir++) { + for (region = 0; region < CFA_REGION_TYPE_MAX; region++) { + /* Get the TPM and then check to see if the fid is associated + * with any of the pools + */ + rc = cfa_tim_tpm_inst_get(tim, tsid, region, dir, &tpm); + if (rc) { + netdev_dbg(bp->dev, "%s: Failed to get TPM for tsid:%d dir:%d\n", + __func__, tsid, dir); + return -EINVAL; + } + rc = cfa_tpm_srchm_by_fid(tpm, CFA_SRCH_MODE_FIRST, fid, &pool_id); + if (rc) /* FID not used */ + continue; + netdev_dbg(bp->dev, "%s: tsid(%d) fid(%d) region(%s) pool_id(%d)\n", + __func__, tsid, fid, tfc_ts_region_2_str(region, dir), + pool_id); + do { + /* Remove fid from pool */ + rc = cfa_tpm_fid_rem(tpm, pool_id, fid); + if (rc) + netdev_dbg(bp->dev, + "%s: cfa_tpm_fid_rem() failed for fid:%d pool:%d\n", + __func__, fid, pool_id); + + rc = cfa_tpm_srchm_by_fid(tpm, + CFA_SRCH_MODE_NEXT, + fid, &pool_id); + if (!rc) + netdev_dbg(bp->dev, "%s: tsid(%d) fid(%d) region(%s) pool_id(%d)\n", + __func__, tsid, fid, + tfc_ts_region_2_str(region, dir), + pool_id); + } while (!rc); + } + } + rc = tfc_bp_vf_max(tfcp, &max_fid); + if (rc) + return rc; + + for (dir = 0; dir < CFA_DIR_MAX; dir++) { + for (region = 0; region < CFA_REGION_TYPE_MAX; region++) { + /* Get the TPM and then check to see if the fid is associated + * with any of the pools + */ + rc = cfa_tim_tpm_inst_get(tim, tsid, region, dir, &tpm); + if (rc) { + netdev_dbg(bp->dev, "%s: Failed to get TPM for tsid:%d dir:%d\n", + __func__, tsid, dir); + return -EINVAL; + } + for (lfid = BNXT_FIRST_PF_FID; lfid <= max_fid; lfid++) { + rc = cfa_tpm_srchm_by_fid(tpm, CFA_SRCH_MODE_FIRST, + lfid, &pool_id); + if (rc) /* FID not used */ + continue; + netdev_dbg(bp->dev, "%s: tsid(%d) fid(%d) region(%s) pool_id(%d)\n", + __func__, tsid, lfid, tfc_ts_region_2_str(region, dir), + pool_id); + do { + found_cnt++; + rc = cfa_tpm_srchm_by_fid(tpm, + CFA_SRCH_MODE_NEXT, + lfid, &pool_id); + if (!rc) { + netdev_dbg(bp->dev, "%s: tsid(%d) fid(%d) region(%s) pool_id(%d)\n", + __func__, tsid, lfid, + tfc_ts_region_2_str(region, dir), + pool_id); + } + } while (!rc); + } + } + } + *pool_cnt = found_cnt; + return 0; +} + +/* Public APIs */ +int tfc_tbl_scope_qcaps(struct tfc *tfcp, bool *tbl_scope_capable, + u32 *max_lkup_rec_cnt, + u32 *max_act_rec_cnt, + u8 *max_lkup_static_buckets_exp) +{ + struct bnxt *bp = tfcp->bp; + int rc; + + if (!tbl_scope_capable) { + netdev_dbg(bp->dev, "%s: Invalid tbl_scope_capable pointer\n", __func__); + return -EINVAL; + } + + rc = tfc_msg_tbl_scope_qcaps(tfcp, tbl_scope_capable, max_lkup_rec_cnt, + max_act_rec_cnt, + max_lkup_static_buckets_exp); + if (rc) + netdev_dbg(bp->dev, "%s: table scope qcaps message failed, rc:%d\n", __func__, rc); + + return rc; +} + +int tfc_tbl_scope_size_query(struct tfc *tfcp, + struct tfc_tbl_scope_size_query_parms *parms) +{ + struct bnxt *bp = tfcp->bp; + enum cfa_dir dir; + int rc; + + if (!parms) { + netdev_dbg(bp->dev, "%s: Invalid parms pointer\n", __func__); + return -EINVAL; + } + + if (parms->factor > TFC_TBL_SCOPE_BUCKET_FACTOR_MAX) { + netdev_dbg(bp->dev, "%s: Invalid factor %u\n", __func__, parms->factor); + return -EINVAL; + } + + for (dir = CFA_DIR_RX; dir < CFA_DIR_MAX; dir++) { + rc = calc_lkup_rec_cnt(bp, parms->flow_cnt[dir], + parms->key_sz_in_bytes[dir], + parms->shared, parms->factor, + &parms->lkup_rec_cnt[dir], + &parms->static_bucket_cnt_exp[dir], + &parms->dynamic_bucket_cnt[dir]); + if (rc) + break; + + rc = calc_act_rec_cnt(bp, &parms->act_rec_cnt[dir], + parms->flow_cnt[dir], + parms->act_rec_sz_in_bytes[dir]); + if (rc) + break; + + rc = calc_pool_sz_exp(bp, &parms->lkup_pool_sz_exp[dir], + parms->lkup_rec_cnt[dir] - + (1 << parms->static_bucket_cnt_exp[dir]), + parms->max_pools); + if (rc) + break; + + rc = calc_pool_sz_exp(bp, &parms->act_pool_sz_exp[dir], + parms->act_rec_cnt[dir], + parms->max_pools); + if (rc) + break; + + rc = calc_rec_start_offset(bp, &parms->lkup_rec_start_offset[dir], + parms->static_bucket_cnt_exp[dir]); + if (rc) + break; + } + + return rc; +} + +int tfc_tbl_scope_id_alloc(struct tfc *tfcp, bool shared, + enum cfa_app_type app_type, u8 *tsid, + bool *first) +{ + struct bnxt *bp = tfcp->bp; + bool valid = true; + int rc; + + if (!tsid) { + netdev_dbg(bp->dev, "%s: Invalid tsid pointer\n", __func__); + return -EINVAL; + } + if (!first) { + netdev_dbg(bp->dev, "%s: Invalid first pointer\n", __func__); + return -EINVAL; + } + if (app_type >= CFA_APP_TYPE_INVALID) { + netdev_dbg(bp->dev, "%s: Invalid app type\n", __func__); + return -EINVAL; + } + rc = tfc_msg_tbl_scope_id_alloc(tfcp, bp->pf.fw_fid, shared, app_type, tsid, first); + if (rc) { + netdev_dbg(bp->dev, "%s: table scope ID alloc message failed, rc:%d\n", + __func__, rc); + } else { + /* TODO, update tbl_scope_id_alloc() API to pass in app type */ + rc = tfo_ts_set(tfcp->tfo, *tsid, shared, app_type, valid, 0); + } + return rc; +} + +int tfc_tbl_scope_mem_alloc(struct tfc *tfcp, u16 fid, u8 tsid, + struct tfc_tbl_scope_mem_alloc_parms *parms) +{ + struct tfc_ts_mem_cfg lkup_mem_cfg[CFA_DIR_MAX]; + struct tfc_ts_mem_cfg act_mem_cfg[CFA_DIR_MAX]; + u64 act_base_addr[2], lkup_base_addr[2]; + u8 act_pbl_level[2], lkup_pbl_level[2]; + bool is_pf, shared = false; + bool valid, cfg_done; + struct bnxt *bp = tfcp->bp; + int dir, rc; + u32 page_sz; + u8 cfg_cnt; + u16 pfid; + + if (!parms) { + netdev_dbg(bp->dev, "%s: Invalid parms pointer\n", __func__); + return -EINVAL; + } + + if (tfo_ts_validate(tfcp->tfo, tsid, &valid) != 0) { + netdev_dbg(bp->dev, "%s: Invalid tsid(%d) object\n", __func__, tsid); + return -EINVAL; + } + + if (parms->local && !valid) { + netdev_dbg(bp->dev, "%s: tsid(%d) not allocated\n", __func__, tsid); + return -EINVAL; + } + + /* Normalize page size to a power of 2 */ + page_sz = 1 << next_pow2(parms->pbl_page_sz_in_bytes); + if (parms->pbl_page_sz_in_bytes != page_sz || + (page_sz & VALID_PAGE_ALIGNMENTS) == 0) { + netdev_dbg(bp->dev, "%s: Invalid page size %d\n", __func__, + parms->pbl_page_sz_in_bytes); + return -EINVAL; + } + + memset(lkup_mem_cfg, 0, sizeof(lkup_mem_cfg)); + memset(act_mem_cfg, 0, sizeof(act_mem_cfg)); + + rc = tfc_get_pfid(tfcp, &pfid); + if (rc) + return rc; + + rc = tfc_bp_is_pf(tfcp, &is_pf); + if (rc) + return rc; + + for (dir = 0; dir < CFA_DIR_MAX; dir++) { + struct tfc_ts_pool_info pi; + + rc = tfo_ts_get_pool_info(tfcp->tfo, tsid, dir, &pi); + if (rc) + return rc; + + pi.lkup_pool_sz_exp = parms->lkup_pool_sz_exp[dir]; + pi.act_pool_sz_exp = parms->act_pool_sz_exp[dir]; + rc = tfo_ts_set_pool_info(tfcp->tfo, tsid, dir, &pi); + if (rc) + return rc; + } + + /* A shared table scope will have more than 1 pool */ + if (parms->max_pools > 1) + shared = true; + + /* If we are running on a PF, we will allocate memory locally */ + if (is_pf) { + struct tbl_scope_pools_create_parms cparms; + + cfg_done = false; + cfg_cnt = 0; + for (dir = 0; dir < CFA_DIR_MAX; dir++) { + lkup_mem_cfg[dir].rec_cnt = parms->lkup_rec_cnt[dir]; + lkup_mem_cfg[dir].lkup_rec_start_offset = + 1 << parms->static_bucket_cnt_exp[dir]; + lkup_mem_cfg[dir].entry_size = RECORD_SIZE; + + netdev_dbg(bp->dev, "Alloc lkup table: dir %d\n", dir); + + rc = alloc_link_pbl(bp, &lkup_mem_cfg[dir], + parms->pbl_page_sz_in_bytes); + if (rc) + goto cleanup; + + lkup_base_addr[dir] = lkup_mem_cfg[dir].l0_dma_addr; + lkup_pbl_level[dir] = lkup_mem_cfg[dir].num_lvl - 1; + + rc = tfc_msg_backing_store_cfg_v2(tfcp, tsid, dir, + CFA_REGION_TYPE_LKUP, + lkup_base_addr[dir], + lkup_pbl_level[dir], + parms->pbl_page_sz_in_bytes, + parms->lkup_rec_cnt[dir], + parms->static_bucket_cnt_exp[dir], + cfg_done); + + if (rc) { + netdev_dbg(bp->dev, + "%s: backing store cfg msg failed dir(%s) lkup, rc:%d\n", + __func__, dir == CFA_DIR_RX ? "rx" : "tx", rc); + goto cleanup; + } + + rc = tfo_ts_set_mem_cfg(tfcp->tfo, tsid, dir, CFA_REGION_TYPE_LKUP, + parms->local, &lkup_mem_cfg[dir]); + if (rc) + goto cleanup; + + netdev_dbg(bp->dev, "Alloc action table: dir %d\n", dir); + + act_mem_cfg[dir].rec_cnt = parms->act_rec_cnt[dir]; + act_mem_cfg[dir].entry_size = RECORD_SIZE; + + rc = alloc_link_pbl(bp, &act_mem_cfg[dir], + parms->pbl_page_sz_in_bytes); + if (rc) + goto cleanup; + + act_base_addr[dir] = act_mem_cfg[dir].l0_dma_addr; + act_pbl_level[dir] = act_mem_cfg[dir].num_lvl - 1; + + cfg_done = false; + + if (cfg_cnt) + cfg_done = true; + + rc = tfc_msg_backing_store_cfg_v2(tfcp, tsid, dir, + CFA_REGION_TYPE_ACT, + act_base_addr[dir], + act_pbl_level[dir], + parms->pbl_page_sz_in_bytes, + parms->act_rec_cnt[dir], 0, + cfg_done); + if (rc) { + netdev_dbg(bp->dev, + "%s: bs cfg msg failed dir(%s) action, rc:%d\n", + __func__, dir == CFA_DIR_RX ? "rx" : "tx", rc); + goto cleanup; + } + + /* Set shared and valid in local state */ + valid = true; + rc = tfo_ts_set(tfcp->tfo, tsid, shared, CFA_APP_TYPE_TF, valid, + parms->max_pools); + if (rc) + goto cleanup; + + rc = tfo_ts_set_mem_cfg(tfcp->tfo, tsid, dir, CFA_REGION_TYPE_ACT, + parms->local, &act_mem_cfg[dir]); + if (rc) + goto cleanup; + + cfg_cnt++; + } + cparms.shared = shared; + cparms.max_pools = parms->max_pools; + + for (dir = 0; dir < CFA_DIR_MAX; dir++) { + cparms.lkup_pool_sz_exp[dir] = parms->lkup_pool_sz_exp[dir]; + cparms.act_pool_sz_exp[dir] = parms->act_pool_sz_exp[dir]; + } + + rc = tbl_scope_pools_create(tfcp, tsid, &cparms); + if (rc) + goto cleanup; + + /* If not shared, allocate the single pool_id in each region + * so that we can save the associated fid for the table scope + */ + if (!shared) { + u16 pool_id; + enum cfa_region_type region; + u16 max_vf; + + rc = tfc_bp_vf_max(tfcp, &max_vf); + if (rc) + return rc; + if (fid > max_vf) { + netdev_dbg(bp->dev, "%s fid out of range %d\n", + __func__, fid); + return -EINVAL; + } + + for (region = 0; region < CFA_REGION_TYPE_MAX; region++) { + for (dir = 0; dir < CFA_DIR_MAX; dir++) { + rc = tfc_tbl_scope_pool_alloc(tfcp, + fid, + tsid, + region, + dir, + NULL, + &pool_id); + if (rc) + goto cleanup; + /* only 1 pool available */ + if (pool_id != 0) + goto cleanup; + } + } + } + } else /* this is a VF */ { + /* If first or !shared, send message to PF to allocate the memory */ + if (parms->first || !shared) { + struct tfc_vf2pf_tbl_scope_mem_alloc_cfg_cmd req = { { 0 } }; + struct tfc_vf2pf_tbl_scope_mem_alloc_cfg_resp resp = { { 0 } }; + u16 fid; + + rc = tfc_get_fid(tfcp, &fid); + if (rc) + return rc; + + req.hdr.type = TFC_VF2PF_TYPE_TBL_SCOPE_MEM_ALLOC_CFG_CMD; + req.hdr.fid = fid; + req.tsid = tsid; + req.max_pools = parms->max_pools; + for (dir = CFA_DIR_RX; dir < CFA_DIR_MAX; dir++) { + req.static_bucket_cnt_exp[dir] = parms->static_bucket_cnt_exp[dir]; + req.dynamic_bucket_cnt[dir] = parms->dynamic_bucket_cnt[dir]; + req.lkup_rec_cnt[dir] = parms->lkup_rec_cnt[dir]; + req.lkup_pool_sz_exp[dir] = parms->lkup_pool_sz_exp[dir]; + req.act_pool_sz_exp[dir] = parms->act_pool_sz_exp[dir]; + req.act_rec_cnt[dir] = parms->act_rec_cnt[dir]; + req.lkup_rec_start_offset[dir] = parms->lkup_rec_start_offset[dir]; + } + + rc = tfc_vf2pf_mem_alloc(tfcp, &req, &resp); + if (rc) { + netdev_dbg(bp->dev, "%s: tfc_vf2pf_mem_alloc failed\n", __func__); + goto cleanup; + } + + netdev_dbg(bp->dev, "%s: tsid: %d, status %d\n", __func__, + resp.tsid, resp.status); + } + + /* Save off info for later use */ + for (dir = CFA_DIR_RX; dir < CFA_DIR_MAX; dir++) { + lkup_mem_cfg[dir].rec_cnt = parms->lkup_rec_cnt[dir]; + lkup_mem_cfg[dir].lkup_rec_start_offset = + 1 << parms->static_bucket_cnt_exp[dir]; + lkup_mem_cfg[dir].entry_size = RECORD_SIZE; + + act_mem_cfg[dir].rec_cnt = parms->act_rec_cnt[dir]; + act_mem_cfg[dir].entry_size = RECORD_SIZE; + + rc = tfo_ts_set_mem_cfg(tfcp->tfo, + tsid, + dir, + CFA_REGION_TYPE_LKUP, + true, + &lkup_mem_cfg[dir]); + if (rc) + goto cleanup; + + rc = tfo_ts_set_mem_cfg(tfcp->tfo, + tsid, + dir, + CFA_REGION_TYPE_ACT, + true, + &act_mem_cfg[dir]); + if (rc) + goto cleanup; + + /* Set shared and valid in local state */ + valid = true; + rc = tfo_ts_set(tfcp->tfo, tsid, shared, CFA_APP_TYPE_TF, + valid, parms->max_pools); + } + } + return rc; + +cleanup: + for (dir = 0; dir < CFA_DIR_MAX; dir++) { + unlink_and_free(bp, &lkup_mem_cfg[dir], parms->pbl_page_sz_in_bytes); + unlink_and_free(bp, &act_mem_cfg[dir], parms->pbl_page_sz_in_bytes); + } + + memset(lkup_mem_cfg, 0, sizeof(lkup_mem_cfg)); + memset(act_mem_cfg, 0, sizeof(act_mem_cfg)); + + for (dir = 0; dir < CFA_DIR_MAX; dir++) { + (void)tfo_ts_set_mem_cfg(tfcp->tfo, tsid, dir, + CFA_REGION_TYPE_LKUP, + parms->local, + &lkup_mem_cfg[dir]); + (void)tfo_ts_set_mem_cfg(tfcp->tfo, tsid, dir, + CFA_REGION_TYPE_ACT, + parms->local, + &act_mem_cfg[dir]); + } + return rc; +} + +int tfc_tbl_scope_mem_free(struct tfc *tfcp, u16 fid, u8 tsid) +{ + bool local, shared, is_pf = false; + struct tfc_ts_mem_cfg mem_cfg; + enum cfa_region_type region; + struct bnxt *bp = tfcp->bp; + int dir, rc, lrc; + + if (tfo_ts_validate(tfcp->tfo, tsid, NULL) != 0) { + netdev_dbg(bp->dev, "%s: tsid(%d) invalid\n", __func__, tsid); + return -EINVAL; + } + + rc = tfo_ts_get(tfcp->tfo, tsid, &shared, NULL, NULL, NULL); + if (rc) + return rc; + + rc = tfc_bp_is_pf(tfcp, &is_pf); + if (rc) + return rc; + + /* Lookup any memory config to get local */ + rc = tfo_ts_get_mem_cfg(tfcp->tfo, tsid, CFA_DIR_RX, CFA_REGION_TYPE_LKUP, + &local, &mem_cfg); + if (rc) + return rc; + + if (!is_pf) { + struct tfc_vf2pf_tbl_scope_mem_free_cmd req = {{ 0 }}; + struct tfc_vf2pf_tbl_scope_mem_free_resp resp = {{ 0 }}; + u16 fid; + + rc = tfc_get_fid(tfcp, &fid); + if (rc) + return rc; + + req.hdr.type = TFC_VF2PF_TYPE_TBL_SCOPE_MEM_FREE_CMD; + req.hdr.fid = fid; + req.tsid = tsid; + + rc = tfc_vf2pf_mem_free(tfcp, &req, &resp); + if (rc) { + netdev_dbg(bp->dev, "%s: tfc_vf2pf_mem_free failed\n", __func__); + /* continue cleanup regardless */ + } + netdev_dbg(bp->dev, "%s: tsid: %d, status %d\n", __func__, resp.tsid, resp.status); + } + if (shared && is_pf) { + u16 pool_cnt; + u16 max_vf; + + rc = tfc_bp_vf_max(tfcp, &max_vf); + if (rc) + return rc; + + if (fid > max_vf) { + netdev_dbg(bp->dev, "%s: invalid fid 0x%x\n", __func__, fid); + return -EINVAL; + } + rc = tbl_scope_tpm_fid_rem(tfcp, fid, tsid, &pool_cnt); + if (rc) { + netdev_dbg(bp->dev, "%s: error getting tsid(%d) pools status %d\n", + __func__, tsid, rc); + return rc; + } + /* Then if there are still fids present, return */ + if (pool_cnt) { + netdev_dbg(bp->dev, "%s: tsid(%d) fids still present pool_cnt(%d)\n", + __func__, tsid, pool_cnt); + return 0; + } + } + /* Send Deconfig HWRM before freeing memory */ + rc = tfc_msg_tbl_scope_deconfig(tfcp, tsid); + if (rc) { + netdev_dbg(bp->dev, "%s: deconfig failure: %d\n", __func__, rc); + return rc; + } + + for (region = 0; region < CFA_REGION_TYPE_MAX; region++) { + for (dir = 0; dir < CFA_DIR_MAX; dir++) { + lrc = tfo_ts_get_mem_cfg(tfcp->tfo, tsid, dir, region, &local, + &mem_cfg); + if (lrc) { + rc = lrc; + continue; + } + /* memory only allocated on PF */ + if (is_pf) + unlink_and_free(bp, &mem_cfg, mem_cfg.pg_tbl[0].pg_size); + + memset(&mem_cfg, 0, sizeof(mem_cfg)); + + /* memory freed, set local to false */ + local = false; + (void)tfo_ts_set_mem_cfg(tfcp->tfo, tsid, dir, region, local, + &mem_cfg); + } + } + if (rc) { + netdev_dbg(bp->dev, "%s: tsid(%d) db err(%d), continuing\n", + __func__, tsid, rc); + } + if (is_pf) { + rc = tbl_scope_pools_destroy(tfcp, tsid); + if (rc) { + netdev_dbg(bp->dev, "%s: tsid(%d) pool err(%d) continuing\n", + __func__, tsid, rc); + } + } + /* cleanup state */ + rc = tfo_ts_set(tfcp->tfo, tsid, false, CFA_APP_TYPE_INVALID, false, 0); + + return rc; +} + +int tfc_tbl_scope_fid_add(struct tfc *tfcp, u16 fid, u8 tsid, u16 *fid_cnt) +{ + struct bnxt *bp = tfcp->bp; + int rc; + + if (bp->pf.fw_fid != fid) { + netdev_dbg(bp->dev, "%s: Invalid fid\n", __func__); + return -EINVAL; + } + + if (tfo_ts_validate(tfcp->tfo, tsid, NULL) != 0) { + netdev_dbg(bp->dev, "%s: tsid(%d) invalid\n", __func__, tsid); + return -EINVAL; + } + + rc = tfc_msg_tbl_scope_fid_add(tfcp, fid, tsid, fid_cnt); + if (rc) + netdev_dbg(bp->dev, "%s: table scope fid add message failed, rc:%d\n", + __func__, rc); + + return rc; +} + +int tfc_tbl_scope_fid_rem(struct tfc *tfcp, u16 fid, u8 tsid, u16 *fid_cnt) +{ + struct tfc_ts_mem_cfg mem_cfg; + struct bnxt *bp = tfcp->bp; + struct tfc_cpm *cpm_lkup; + struct tfc_cpm *cpm_act; + bool local; + int rc; + + if (bp->pf.fw_fid != fid) { + netdev_dbg(bp->dev, "%s: Invalid fid\n", __func__); + return -EINVAL; + } + + if (tfo_ts_validate(tfcp->tfo, tsid, NULL) != 0) { + netdev_dbg(bp->dev, "%s: tsid(%d) invalid\n", __func__, tsid); + return -EINVAL; + } + + rc = tfc_msg_tbl_scope_fid_rem(tfcp, fid, tsid, fid_cnt); + if (rc) + netdev_dbg(bp->dev, "%s: table scope fid rem message failed, rc:%d\n", + __func__, rc); + + /* Check if any direction has a CPM instance and, if so, free it. */ + rc = tfo_ts_get_cpm_inst(tfcp->tfo, tsid, CFA_DIR_RX, &cpm_lkup, &cpm_act); + if (!rc && (cpm_lkup || cpm_act)) + (void)tfc_tbl_scope_cpm_free(tfcp, tsid); + + /* Check if any table has memory configured and, if so, free it. */ + rc = tfo_ts_get_mem_cfg(tfcp->tfo, tsid, CFA_DIR_RX, CFA_REGION_TYPE_LKUP, + &local, &mem_cfg); + /* If mem already freed, then local is set to zero (false). */ + if (!rc && local) + (void)tfc_tbl_scope_mem_free(tfcp, fid, tsid); + + rc = tfo_ts_set(tfcp->tfo, tsid, false, CFA_APP_TYPE_INVALID, false, 0); + + return rc; +} + +int tfc_tbl_scope_cpm_alloc(struct tfc *tfcp, u8 tsid, + struct tfc_tbl_scope_cpm_alloc_parms *parms) +{ + struct tfc_cmm *cmm_lkup = NULL; + struct tfc_cmm *cmm_act = NULL; + struct tfc_ts_pool_info pi; + struct bnxt *bp = tfcp->bp; + bool is_shared; + int dir, rc; + + if (tfo_ts_validate(tfcp->tfo, tsid, NULL) != 0) { + netdev_dbg(bp->dev, "%s: tsid(%d) invalid\n", __func__, tsid); + return -EINVAL; + } + if (tfo_ts_get(tfcp->tfo, tsid, &is_shared, NULL, NULL, NULL)) { + netdev_dbg(bp->dev, "%s: tsid(%d) info get failed\n", __func__, tsid); + return -EINVAL; + } + + /* Create 4 CPM instances and set the pool_sz_exp and max_pools for each */ + for (dir = 0; dir < CFA_DIR_MAX; dir++) { + rc = tfo_ts_get_pool_info(tfcp->tfo, tsid, dir, &pi); + if (rc) { + netdev_dbg(bp->dev, + "%s: Failed to get pool info for tsid:%d\n", + __func__, tsid); + return -EINVAL; + } + pi.lkup_max_contig_rec = parms->lkup_max_contig_rec[dir]; + pi.act_max_contig_rec = parms->act_max_contig_rec[dir]; + tfc_cpm_open(&pi.lkup_cpm, parms->max_pools); + tfc_cpm_set_pool_size(pi.lkup_cpm, (1 << pi.lkup_pool_sz_exp)); + tfc_cpm_open(&pi.act_cpm, parms->max_pools); + tfc_cpm_set_pool_size(pi.act_cpm, (1 << pi.act_pool_sz_exp)); + tfo_ts_set_cpm_inst(tfcp->tfo, tsid, dir, pi.lkup_cpm, pi.act_cpm); + tfo_ts_set_pool_info(tfcp->tfo, tsid, dir, &pi); + + /* If not shared create CMM instance for and populate CPM with pool_id 0 + * If shared, a pool_id will be allocated during tfc_act_alloc() or + * tfc_em_insert() and the CMM instance will be created on the first + * call. + */ + if (!is_shared) { + struct cfa_mm_query_parms qparms; + struct cfa_mm_open_parms oparms; + struct tfc_ts_mem_cfg mem_cfg; + u32 pool_id = 0; + + /* ACTION */ + rc = tfo_ts_get_mem_cfg(tfcp->tfo, tsid, dir, CFA_REGION_TYPE_ACT, + NULL, &mem_cfg); + if (rc) { + netdev_dbg(bp->dev, "%s: tfo_ts_get_mem_cfg() failed: %d\n", + __func__, rc); + return -EINVAL; + } + /* override the record size since a single pool because + * pool_sz_exp is 0 in this case + */ + tfc_cpm_set_pool_size(pi.act_cpm, mem_cfg.rec_cnt); + + /*create CMM instance */ + qparms.max_records = mem_cfg.rec_cnt; + qparms.max_contig_records = roundup_pow_of_two(pi.act_max_contig_rec); + rc = cfa_mm_query(&qparms); + if (rc) { + netdev_dbg(bp->dev, "%s: cfa_mm_query() failed: %d\n", + __func__, rc); + return -EINVAL; + } + + cmm_act = vzalloc(qparms.db_size); + if (!cmm_act) { + rc = -ENOMEM; + goto cleanup; + } + oparms.db_mem_size = qparms.db_size; + oparms.max_contig_records = qparms.max_contig_records; + oparms.max_records = qparms.max_records; + rc = cfa_mm_open(cmm_act, &oparms); + if (rc) { + netdev_dbg(bp->dev, "%s: cfa_mm_open() failed: %d\n", + __func__, rc); + rc = -EINVAL; + goto cleanup; + } + /* Store CMM instance in the CPM for pool_id 0 */ + rc = tfc_cpm_set_cmm_inst(pi.act_cpm, pool_id, cmm_act); + if (rc) { + netdev_dbg(bp->dev, "%s: tfc_cpm_set_cmm_inst() act failed: %d\n", + __func__, rc); + rc = -EINVAL; + goto cleanup; + } + /* LOOKUP */ + rc = tfo_ts_get_mem_cfg(tfcp->tfo, tsid, dir, CFA_REGION_TYPE_LKUP, + NULL, &mem_cfg); + if (rc) { + netdev_dbg(bp->dev, "%s: tfo_ts_get_mem_cfg() failed: %c\n", + __func__, rc); + rc = -EINVAL; + goto cleanup; + } + /* Create lkup pool CMM instance */ + qparms.max_records = mem_cfg.rec_cnt; + qparms.max_contig_records = roundup_pow_of_two(pi.lkup_max_contig_rec); + rc = cfa_mm_query(&qparms); + if (rc) { + netdev_dbg(bp->dev, "%s: cfa_mm_query() failed: %d\n", + __func__, rc); + rc = -EINVAL; + goto cleanup; + } + cmm_lkup = vzalloc(qparms.db_size); + if (!cmm_lkup) { + rc = -ENOMEM; + goto cleanup; + } + oparms.db_mem_size = qparms.db_size; + oparms.max_contig_records = qparms.max_contig_records; + oparms.max_records = qparms.max_records; + rc = cfa_mm_open(cmm_lkup, &oparms); + if (rc) { + netdev_dbg(bp->dev, "%s: cfa_mm_open() failed: %d\n", + __func__, rc); + rc = -EINVAL; + goto cleanup; + } + /* override the record size since a single pool because + * pool_sz_exp is 0 in this case + */ + tfc_cpm_set_pool_size(pi.lkup_cpm, mem_cfg.rec_cnt); + + /* Store CMM instance in the CPM for pool_id 0 */ + rc = tfc_cpm_set_cmm_inst(pi.lkup_cpm, pool_id, cmm_lkup); + if (rc) { + netdev_dbg(bp->dev, "%s: tfc_cpm_set_cmm_inst() lkup failed: %d\n", + __func__, rc); + rc = -EINVAL; + goto cleanup; + } + } + } + + return 0; +cleanup: + vfree(cmm_act); + vfree(cmm_lkup); + + return rc; +} + +int tfc_tbl_scope_cpm_free(struct tfc *tfcp, u8 tsid) +{ + struct bnxt *bp = tfcp->bp; + struct tfc_ts_pool_info pi; + int dir, rc = 0; + + if (tfo_ts_validate(tfcp->tfo, tsid, NULL) != 0) { + netdev_dbg(bp->dev, "%s: tsid(%d) invalid\n", __func__, tsid); + return -EINVAL; + } + + for (dir = 0; dir < CFA_DIR_MAX; dir++) { + uint16_t pool_id; + struct tfc_cmm *cmm; + enum cfa_srch_mode srch_mode; + + rc = tfo_ts_get_pool_info(tfcp->tfo, tsid, dir, &pi); + if (rc) + netdev_dbg(bp->dev, "%s: pool info error(%d)\n", __func__, rc); + + /* Clean up lkup cpm/cmm instances */ + srch_mode = CFA_SRCH_MODE_FIRST; + do { + rc = tfc_cpm_srchm_by_configured_pool(pi.lkup_cpm, srch_mode, + &pool_id, &cmm); + srch_mode = CFA_SRCH_MODE_NEXT; + + if (rc == 0 && cmm) { + netdev_dbg(bp->dev, "%s: free lkup_%s CMM for pool(%d)\n", + __func__, dir == CFA_DIR_RX ? "rx" : "tx", + pool_id); + cfa_mm_close(cmm); + vfree(cmm); + } + + } while (!rc); + + tfc_cpm_close(pi.lkup_cpm); + + /* Clean up action cpm/cmm instances */ + srch_mode = CFA_SRCH_MODE_FIRST; + do { + uint16_t pool_id; + struct tfc_cmm *cmm; + + rc = tfc_cpm_srchm_by_configured_pool(pi.act_cpm, srch_mode, + &pool_id, &cmm); + srch_mode = CFA_SRCH_MODE_NEXT; + + if (rc == 0 && cmm) { + netdev_dbg(bp->dev, "%s: free act_%s CMM for pool(%d)\n", + __func__, dir == CFA_DIR_RX ? "rx" : "tx", + pool_id); + cfa_mm_close(cmm); + vfree(cmm); + } + + } while (!rc); + + tfc_cpm_close(pi.act_cpm); + + rc = tfo_ts_set_cpm_inst(tfcp->tfo, tsid, dir, NULL, NULL); + if (rc) + netdev_dbg(bp->dev, "%s: cpm inst error(%d)\n", __func__, rc); + + pi.lkup_cpm = NULL; + pi.act_cpm = NULL; + rc = tfo_ts_set_pool_info(tfcp->tfo, tsid, dir, &pi); + if (rc) + netdev_dbg(bp->dev, "%s: pool info error(%d)\n", __func__, rc); + } + + return rc; +} + +int tfc_tbl_scope_pool_alloc(struct tfc *tfcp, u16 fid, u8 tsid, enum cfa_region_type region, + enum cfa_dir dir, u8 *pool_sz_exp, u16 *pool_id) +{ + struct bnxt *bp = tfcp->bp; + void *tim, *tpm; + bool is_pf; + int rc; + + if (!pool_id) { + netdev_dbg(bp->dev, "%s: Invalid pool_id pointer\n", __func__); + return -EINVAL; + } + + if (tfo_ts_validate(tfcp->tfo, tsid, NULL) != 0) { + netdev_dbg(bp->dev, "%s: tsid(%d) invalid\n", __func__, tsid); + return -EINVAL; + } + + rc = tfc_bp_is_pf(tfcp, &is_pf); + if (rc) { + netdev_dbg(bp->dev, "%s: Failed to get PF status\n", __func__); + return -EINVAL; + } + + if (is_pf) { + rc = tfo_tim_get(tfcp->tfo, &tim); + if (rc) { + netdev_dbg(bp->dev, "%s: Failed to get TIM\n", __func__); + return -EINVAL; + } + + rc = cfa_tim_tpm_inst_get(tim, tsid, region, dir, &tpm); + if (rc) { + netdev_dbg(bp->dev, "%s: Failed to get TPM for tsid:%d region:%d dir:%d\n", + __func__, tsid, region, dir); + return -EINVAL; + } + + rc = cfa_tpm_alloc(tpm, pool_id); + if (rc) { + netdev_dbg(bp->dev, "%s: Failed allocate pool_id %d\n", __func__, rc); + return -EINVAL; + } + + if (pool_sz_exp) { + rc = cfa_tpm_pool_size_get(tpm, pool_sz_exp); + if (rc) { + netdev_dbg(bp->dev, "%s: Failed get pool size exp\n", __func__); + return -EINVAL; + } + } + rc = cfa_tpm_fid_add(tpm, *pool_id, fid); + if (rc) { + netdev_dbg(bp->dev, "%s: Failed to set pool_id %d fid 0x%x %d\n", + __func__, *pool_id, fid, rc); + return rc; + } + } else { /* !PF */ + struct tfc_vf2pf_tbl_scope_pool_alloc_cmd req = { { 0 } }; + struct tfc_vf2pf_tbl_scope_pool_alloc_resp resp = { { 0 } }; + uint16_t fid; + + rc = tfc_get_fid(tfcp, &fid); + if (rc) + return rc; + + req.hdr.type = TFC_VF2PF_TYPE_TBL_SCOPE_POOL_ALLOC_CMD; + req.hdr.fid = fid; + req.tsid = tsid; + req.dir = dir; + req.region = region; + + /* Send message to PF to allocate pool */ + rc = tfc_vf2pf_pool_alloc(tfcp, &req, &resp); + if (rc) { + netdev_dbg(bp->dev, "%s: tfc_vf2pf_pool_alloc failed\n", __func__); + return rc; + } + *pool_id = resp.pool_id; + if (pool_sz_exp) + *pool_sz_exp = resp.pool_sz_exp; + } + return rc; +} + +int tfc_tbl_scope_pool_free(struct tfc *tfcp, u16 fid, u8 tsid, + enum cfa_region_type region, enum cfa_dir dir, + u16 pool_id) +{ + struct bnxt *bp = tfcp->bp; + void *tim, *tpm; + bool is_pf; + int rc; + + if (tfo_ts_validate(tfcp->tfo, tsid, NULL) != 0) { + netdev_dbg(bp->dev, "%s: tsid(%d) invalid\n", __func__, tsid); + return -EINVAL; + } + + rc = tfc_bp_is_pf(tfcp, &is_pf); + if (rc) { + netdev_dbg(bp->dev, "%s: Failed to get PF status\n", __func__); + return -EINVAL; + } + + if (is_pf) { + rc = tfo_tim_get(tfcp->tfo, &tim); + if (rc) + return -EINVAL; + + rc = cfa_tim_tpm_inst_get(tim, tsid, region, dir, &tpm); + if (rc) + return -EINVAL; + + rc = cfa_tpm_fid_rem(tpm, pool_id, fid); + if (rc) + return -EINVAL; + + rc = cfa_tpm_free(tpm, pool_id); + return rc; + + } else { + /* Pools are currently only deleted on the VF when the + * VF calls tfc_tbl_scope_mem_free() if shared. + */ + } + + return rc; +} + +int tfc_tbl_scope_config_state_get(struct tfc *tfcp, u8 tsid, bool *configured) +{ + struct bnxt *bp = tfcp->bp; + int rc; + + if (tfo_ts_validate(tfcp->tfo, tsid, NULL) != 0) { + netdev_dbg(bp->dev, "%s: tsid(%d) invalid\n", __func__, tsid); + return -EINVAL; + } + + rc = tfc_msg_tbl_scope_config_get(tfcp, tsid, configured); + if (rc) { + netdev_dbg(bp->dev, "%s: message failed %d\n", __func__, rc); + return rc; + } + + return rc; +} + +int tfc_tbl_scope_func_reset(struct tfc *tfcp, u16 fid) + +{ + void *tim = NULL, *tpm = NULL; + enum cfa_region_type region; + struct bnxt *bp = tfcp->bp; + u16 pool_id, found_cnt = 0; + bool shared, valid, is_pf; + enum cfa_app_type app; + enum cfa_dir dir; + u8 tsid, *data; + int rc; + + rc = tfc_bp_is_pf(tfcp, &is_pf); + if (rc) { + netdev_dbg(bp->dev, "%s: Failed to get PF status\n", __func__); + return -EINVAL; + } + if (!is_pf) { + netdev_dbg(bp->dev, "%s: only valid for PF\n", __func__); + return -EINVAL; + } + rc = tfo_tim_get(tfcp->tfo, &tim); + if (rc) { + netdev_dbg(bp->dev, "%s: Failed to get TIM\n", __func__); + return -EINVAL; + } + + data = kzalloc(32 * TFC_MPC_BYTES_PER_WORD, GFP_KERNEL); + + for (tsid = 1; tsid < TFC_TBL_SCOPE_MAX; tsid++) { + rc = tfo_ts_get(tfcp->tfo, tsid, &shared, &app, &valid, NULL); + if (rc) + continue; /* TS is not used, move on to the next */ + + if (!shared || !valid) + continue; /* TS invalid or not shared, move on */ + + for (dir = 0; dir < CFA_DIR_MAX; dir++) { + for (region = 0; region < CFA_REGION_TYPE_MAX; region++) { + /* Get the TPM and then check to see if the fid is associated + * with any of the pools + */ + rc = cfa_tim_tpm_inst_get(tim, tsid, region, dir, &tpm); + if (rc) { + netdev_dbg(bp->dev, + "%s: Failed to get TPM for tsid:%d dir:%d\n", + __func__, tsid, dir); + kfree(data); + return -EINVAL; + } + + rc = cfa_tpm_srchm_by_fid(tpm, CFA_SRCH_MODE_FIRST, fid, &pool_id); + if (rc) /* FID not used */ + continue; + + do { + found_cnt++; + + /* Flush EM entries associated with this TS. */ + if (region == CFA_REGION_TYPE_LKUP) + rc = tfc_em_delete_entries_by_pool_id(tfcp, + tsid, + dir, + pool_id, + 0, + data); + if (region == CFA_REGION_TYPE_LKUP && rc) + netdev_dbg(bp->dev, + "%s: failed for TS:%d Dir:%d pool:%d\n", + __func__, tsid, dir, pool_id); + + /* Remove fid from pool */ + rc = cfa_tpm_fid_rem(tpm, pool_id, fid); + if (rc) + netdev_dbg(bp->dev, + "%s: cfa_tpm_fid_rem() failed for fid:%d pool:%d\n", + __func__, fid, pool_id); + + /* Next! */ + rc = cfa_tpm_srchm_by_fid(tpm, + CFA_SRCH_MODE_NEXT, + fid, + &pool_id); + } while (!rc); + } + } + } + kfree(data); + + if (found_cnt == 0) { + netdev_dbg(bp->dev, "%s: FID:%d is not associated with any pool\n", __func__, fid); + return -EINVAL; + } + return 0; +} diff --git a/drivers/thirdparty/release-drivers/bnxt/tfc_v3/tfc_tcam.c b/drivers/thirdparty/release-drivers/bnxt/tfc_v3/tfc_tcam.c new file mode 100644 index 000000000000..5b5d2b568653 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/tfc_v3/tfc_tcam.c @@ -0,0 +1,236 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* Copyright(c) 2019-2023 Broadcom + * All rights reserved. + */ + +#include +#include "tfc.h" +#include "bnxt_compat.h" +#include "bnxt.h" +#include "bnxt_hwrm.h" +#include "tfc.h" +#include "tfc_msg.h" +#include "tfc_util.h" + +int tfc_tcam_alloc(struct tfc *tfcp, u16 fid, enum cfa_track_type tt, u8 priority, + u8 key_sz_in_bytes, struct tfc_tcam_info *tcam_info) +{ + struct bnxt *bp = tfcp->bp; + u16 sid; + int rc; + + if (!tcam_info) { + netdev_dbg(bp->dev, "%s: tcam_info is NULL\n", __func__); + return -EINVAL; + } + + if (tcam_info->rsubtype >= CFA_RSUBTYPE_TCAM_MAX) { + netdev_dbg(bp->dev, "%s: Invalid tcam subtype: %d\n", __func__, + tcam_info->rsubtype); + return -EINVAL; + } + + if (!BNXT_PF(bp) && !BNXT_VF_IS_TRUSTED(bp)) { + netdev_dbg(bp->dev, "%s: bp not PF or trusted VF\n", __func__); + return -EINVAL; + } + + rc = tfo_sid_get(tfcp->tfo, &sid); + if (rc) { + netdev_dbg(bp->dev, "%s: Failed to retrieve SID, rc:%d\n", __func__, rc); + return rc; + } + + rc = tfc_msg_tcam_alloc(tfcp, fid, sid, tcam_info->dir, + tcam_info->rsubtype, tt, priority, + key_sz_in_bytes, &tcam_info->id); + if (rc) + netdev_dbg(bp->dev, "%s: alloc failed %s:%s rc:%d\n", __func__, + tfc_dir_2_str(tcam_info->dir), + tfc_tcam_2_str(tcam_info->rsubtype), rc); + + return rc; +} + +int tfc_tcam_alloc_set(struct tfc *tfcp, u16 fid, enum cfa_track_type tt, + u8 priority, struct tfc_tcam_info *tcam_info, + const struct tfc_tcam_data *tcam_data) +{ + struct bnxt *bp = tfcp->bp; + u16 sid; + int rc; + + if (!tcam_info) { + netdev_dbg(bp->dev, "%s: tcam_info is NULL\n", __func__); + return -EINVAL; + } + + if (!tcam_data) { + netdev_dbg(bp->dev, "%s: tcam_data is NULL\n", __func__); + return -EINVAL; + } + + if (tcam_info->rsubtype >= CFA_RSUBTYPE_TCAM_MAX) { + netdev_dbg(bp->dev, "%s: Invalid tcam subtype: %d\n", __func__, + tcam_info->rsubtype); + return -EINVAL; + } + + if (!BNXT_PF(bp) && !BNXT_VF_IS_TRUSTED(bp)) { + netdev_dbg(bp->dev, "%s: bp not PF or trusted VF\n", __func__); + return -EINVAL; + } + + rc = tfo_sid_get(tfcp->tfo, &sid); + if (rc) { + netdev_dbg(bp->dev, "%s: Failed to retrieve SID, rc:%d\n", + __func__, rc); + return rc; + } + + rc = tfc_msg_tcam_alloc_set(tfcp, fid, sid, tcam_info->dir, + tcam_info->rsubtype, tt, &tcam_info->id, + priority, tcam_data->key, + tcam_data->key_sz_in_bytes, + tcam_data->mask, + tcam_data->remap, + tcam_data->remap_sz_in_bytes); + if (rc) + netdev_dbg(bp->dev, "%s: alloc_set failed: %s:%s rc:%d\n", __func__, + tfc_dir_2_str(tcam_info->dir), + tfc_tcam_2_str(tcam_info->rsubtype), rc); + + return rc; +} + +int tfc_tcam_set(struct tfc *tfcp, u16 fid, const struct tfc_tcam_info *tcam_info, + const struct tfc_tcam_data *tcam_data) +{ + struct bnxt *bp = tfcp->bp; + u16 sid; + int rc; + + if (!tcam_info) { + netdev_dbg(bp->dev, "%s: tcam_info is NULL\n", __func__); + return -EINVAL; + } + + if (!tcam_data) { + netdev_dbg(bp->dev, "%s: tcam_data is NULL\n", __func__); + return -EINVAL; + } + + if (tcam_info->rsubtype >= CFA_RSUBTYPE_TCAM_MAX) { + netdev_dbg(bp->dev, "%s: Invalid tcam subtype: %d\n", __func__, + tcam_info->rsubtype); + return -EINVAL; + } + + if (!BNXT_PF(bp) && !BNXT_VF_IS_TRUSTED(bp)) { + netdev_dbg(bp->dev, "%s: bp not PF or trusted VF\n", __func__); + return -EINVAL; + } + + rc = tfo_sid_get(tfcp->tfo, &sid); + if (rc) { + netdev_dbg(bp->dev, "%s: Failed to retrieve SID, rc:%d\n", __func__, rc); + return rc; + } + + rc = tfc_msg_tcam_set(tfcp, fid, sid, tcam_info->dir, + tcam_info->rsubtype, tcam_info->id, + tcam_data->key, + tcam_data->key_sz_in_bytes, + tcam_data->mask, tcam_data->remap, + tcam_data->remap_sz_in_bytes); + if (rc) + netdev_dbg(bp->dev, "%s: set failed: %s:%s %d rc:%d\n", __func__, + tfc_dir_2_str(tcam_info->dir), + tfc_tcam_2_str(tcam_info->rsubtype), tcam_info->id, rc); + + return rc; +} + +int tfc_tcam_get(struct tfc *tfcp, u16 fid, const struct tfc_tcam_info *tcam_info, + struct tfc_tcam_data *tcam_data) +{ + struct bnxt *bp = tfcp->bp; + u16 sid; + int rc; + + if (!tcam_info) { + netdev_dbg(bp->dev, "%s: tcam_info is NULL\n", __func__); + return -EINVAL; + } + + if (!tcam_data) { + netdev_dbg(bp->dev, "%s: tcam_data is NULL\n", __func__); + return -EINVAL; + } + + if (tcam_info->rsubtype >= CFA_RSUBTYPE_TCAM_MAX) { + netdev_dbg(bp->dev, "%s: Invalid tcam subtype: %d\n", __func__, + tcam_info->rsubtype); + return -EINVAL; + } + + if (!BNXT_PF(bp) && !BNXT_VF_IS_TRUSTED(bp)) { + netdev_dbg(bp->dev, "%s: bp not PF or trusted VF\n", __func__); + return -EINVAL; + } + + rc = tfo_sid_get(tfcp->tfo, &sid); + if (rc) { + netdev_dbg(bp->dev, "%s: Failed to retrieve SID, rc:%d\n", __func__, rc); + return rc; + } + + rc = tfc_msg_tcam_get(tfcp, fid, sid, tcam_info->dir, + tcam_info->rsubtype, tcam_info->id, + tcam_data->key, &tcam_data->key_sz_in_bytes, + tcam_data->mask, tcam_data->remap, + &tcam_data->remap_sz_in_bytes); + if (rc) + netdev_dbg(bp->dev, "%s: get failed: %s:%s %d rc:%d\n", __func__, + tfc_dir_2_str(tcam_info->dir), + tfc_tcam_2_str(tcam_info->rsubtype), tcam_info->id, rc); + + return rc; +} + +int tfc_tcam_free(struct tfc *tfcp, u16 fid, const struct tfc_tcam_info *tcam_info) +{ + struct bnxt *bp = tfcp->bp; + u16 sid; + int rc; + + if (!tcam_info) { + netdev_dbg(bp->dev, "%s: tcam_info is NULL\n", __func__); + return -EINVAL; + } + + if (tcam_info->rsubtype >= CFA_RSUBTYPE_TCAM_MAX) { + netdev_dbg(bp->dev, "%s: Invalid tcam subtype: %d\n", __func__, + tcam_info->rsubtype); + return -EINVAL; + } + + if (!BNXT_PF(bp) && !BNXT_VF_IS_TRUSTED(bp)) { + netdev_dbg(bp->dev, "%s: bp not PF or trusted VF\n", __func__); + return -EINVAL; + } + + rc = tfo_sid_get(tfcp->tfo, &sid); + if (rc) { + netdev_dbg(bp->dev, "%s: Failed to retrieve SID, rc:%d\n", __func__, rc); + return rc; + } + + rc = tfc_msg_tcam_free(tfcp, fid, sid, tcam_info->dir, + tcam_info->rsubtype, tcam_info->id); + if (rc) + netdev_dbg(bp->dev, "%s: free failed: %s:%s:%d rc:%d\n", __func__, + tfc_dir_2_str(tcam_info->dir), + tfc_tcam_2_str(tcam_info->rsubtype), tcam_info->id, rc); + return rc; +} diff --git a/drivers/thirdparty/release-drivers/bnxt/tfc_v3/tfc_util.c b/drivers/thirdparty/release-drivers/bnxt/tfc_v3/tfc_util.c new file mode 100644 index 000000000000..93faa2b92ef0 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/tfc_v3/tfc_util.c @@ -0,0 +1,156 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* Copyright(c) 2014-2023 Broadcom + * All rights reserved. + */ + +#include +#include "tfc.h" +#include "tfo.h" +#include "tfc_util.h" + +const char * +tfc_dir_2_str(enum cfa_dir dir) +{ + switch (dir) { + case CFA_DIR_RX: + return "RX"; + case CFA_DIR_TX: + return "TX"; + default: + return "Invalid direction"; + } +} + +const char * +tfc_ident_2_str(enum cfa_resource_subtype_ident id_stype) +{ + switch (id_stype) { + case CFA_RSUBTYPE_IDENT_L2CTX: + return "ident_l2_ctx"; + case CFA_RSUBTYPE_IDENT_PROF_FUNC: + return "ident_prof_func"; + case CFA_RSUBTYPE_IDENT_WC_PROF: + return "ident_wc_prof"; + case CFA_RSUBTYPE_IDENT_EM_PROF: + return "ident_em_prof"; + case CFA_RSUBTYPE_IDENT_L2_FUNC: + return "ident_l2_func"; + default: + return "Invalid identifier subtype"; + } +} + +const char * +tfc_tcam_2_str(enum cfa_resource_subtype_tcam tcam_stype) +{ + switch (tcam_stype) { + case CFA_RSUBTYPE_TCAM_L2CTX: + return "tcam_l2_ctx"; + case CFA_RSUBTYPE_TCAM_PROF_TCAM: + return "tcam_prof_tcam"; + case CFA_RSUBTYPE_TCAM_WC: + return "tcam_wc"; + case CFA_RSUBTYPE_TCAM_CT_RULE: + return "tcam_ct_rule"; + case CFA_RSUBTYPE_TCAM_VEB: + return "tcam_veb"; + case CFA_RSUBTYPE_TCAM_FEATURE_CHAIN: + return "tcam_fc"; + default: + return "Invalid tcam subtype"; + } +} + +const char * +tfc_idx_tbl_2_str(enum cfa_resource_subtype_idx_tbl tbl_stype) +{ + switch (tbl_stype) { + case CFA_RSUBTYPE_IDX_TBL_STAT64: + return "idx_tbl_64b_statistics"; + case CFA_RSUBTYPE_IDX_TBL_METER_PROF: + return "idx_tbl_meter_prof"; + case CFA_RSUBTYPE_IDX_TBL_METER_INST: + return "idx_tbl_meter_inst"; + case CFA_RSUBTYPE_IDX_TBL_MIRROR: + return "idx_tbl_mirror"; + case CFA_RSUBTYPE_IDX_TBL_METADATA_PROF: + return "idx_tbl_metadata_prof"; + case CFA_RSUBTYPE_IDX_TBL_METADATA_LKUP: + return "idx_tbl_metadata_lkup"; + case CFA_RSUBTYPE_IDX_TBL_METADATA_ACT: + return "idx_tbl_metadata_act"; + case CFA_RSUBTYPE_IDX_TBL_EM_FKB: + return "idx_tbl_em_fkb"; + case CFA_RSUBTYPE_IDX_TBL_WC_FKB: + return "idx_tbl_wc_fkb"; + case CFA_RSUBTYPE_IDX_TBL_EM_FKB_MASK: + return "idx_tbl_em_fkb_mask"; + case CFA_RSUBTYPE_IDX_TBL_CT_STATE: + return "idx_tbl_ct_state"; + case CFA_RSUBTYPE_IDX_TBL_RANGE_PROF: + return "idx_tbl_range_prof"; + case CFA_RSUBTYPE_IDX_TBL_RANGE_ENTRY: + return "idx_tbl_range_entry"; + default: + return "Invalid idx tbl subtype"; + } +} + +const char * +tfc_if_tbl_2_str(enum cfa_resource_subtype_if_tbl tbl_stype) +{ + switch (tbl_stype) { + case CFA_RSUBTYPE_IF_TBL_ILT: + return "if_tbl_ilt"; + case CFA_RSUBTYPE_IF_TBL_VSPT: + return "if_tbl_vspt"; + case CFA_RSUBTYPE_IF_TBL_PROF_PARIF_DFLT_ACT_PTR: + return "if_tbl_parif_dflt_act_ptr"; + case CFA_RSUBTYPE_IF_TBL_PROF_PARIF_ERR_ACT_PTR: + return "if_tbl_parif_err_act_ptr"; + case CFA_RSUBTYPE_IF_TBL_EPOCH0: + return "if_tbl_epoch0"; + case CFA_RSUBTYPE_IF_TBL_EPOCH1: + return "if_tbl_epoch1"; + case CFA_RSUBTYPE_IF_TBL_LAG: + return "if_tbl_lag"; + default: + return "Invalid if tbl subtype"; + } +} + +const char * +tfc_ts_region_2_str(enum cfa_region_type region, enum cfa_dir dir) +{ + switch (region) { + case CFA_REGION_TYPE_LKUP: + if (dir == CFA_DIR_RX) + return "ts_lookup_rx"; + else if (dir == CFA_DIR_TX) + return "lookup_tx"; + else + return "ts_lookup_invalid_dir"; + case CFA_REGION_TYPE_ACT: + if (dir == CFA_DIR_RX) + return "ts_action_rx"; + else if (dir == CFA_DIR_TX) + return "ts_action_tx"; + else + return "ts_action_invalid_dir"; + default: + return "Invalid ts region"; + } +} + +u32 +tfc_getbits(u32 *data, int offset, int blen) +{ + int end = (offset + blen - 1) >> 5; + int start = offset >> 5; + u32 val; + + val = data[start] >> (offset & 0x1f); + if (start != end) + val |= (data[start + 1] << (32 - (offset & 0x1f))); + return (blen == 32) ? val : (val & ((1 << blen) - 1)); +} diff --git a/drivers/thirdparty/release-drivers/bnxt/tfc_v3/tfc_util.h b/drivers/thirdparty/release-drivers/bnxt/tfc_v3/tfc_util.h new file mode 100644 index 000000000000..73b699d8cdec --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/tfc_v3/tfc_util.h @@ -0,0 +1,87 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2023 Broadcom + * All rights reserved. + */ + +#ifndef _TFC_UTIL_H_ +#define _TFC_UTIL_H_ + +#include "tfc.h" +#include "tfo.h" + +/** + * Helper function converting direction to text string + * + * @dir: Receive or transmit direction identifier + * + * Returns: + * Pointer to a char string holding the string for the direction + */ +const char *tfc_dir_2_str(enum cfa_dir dir); + +/** + * Helper function converting identifier subtype to text string + * + * @id_stype: Identifier subtype + * + * Returns: + * Pointer to a char string holding the string for the identifier + */ +const char *tfc_ident_2_str(enum cfa_resource_subtype_ident id_stype); + +/** + * Helper function converting tcam subtype to text string + * + * @tcam_stype: TCAM subtype + * + * Returns: + * Pointer to a char string holding the string for the tcam + */ +const char *tfc_tcam_2_str(enum cfa_resource_subtype_tcam tcam_stype); + +/** + * Helper function converting index tbl subtype to text string + * + * @idx_tbl_stype: Index table subtype + * + * Returns: + * Pointer to a char string holding the string for the table subtype + */ +const char *tfc_idx_tbl_2_str(enum cfa_resource_subtype_idx_tbl idx_tbl_stype); + +/** + * Helper function converting table scope lkup/act type and direction (region) + * to string + * + * @region_type: Region type + * @dir: Direction + * + * Returns: + * Pointer to a char string holding the string for the table subtype + */ +const char * +tfc_ts_region_2_str(enum cfa_region_type region, enum cfa_dir dir); + +/** + * Helper function converting if tbl subtype to text string + * + * @if_tbl_stype: If table subtype + * + * Returns: + * Pointer to a char string holding the string for the table subtype + */ +const char *tfc_if_tbl_2_str(enum cfa_resource_subtype_if_tbl if_tbl_stype); + +/** + * Helper function retrieving field value from the buffer + * + * @data: buffer + * @offset: field start bit position in the buffer + * @blen: field length in bit + * + * Returns: + * field value + */ +u32 tfc_getbits(u32 *data, int offset, int blen); + +#endif /* _TFC_UTIL_H_ */ diff --git a/drivers/thirdparty/release-drivers/bnxt/tfc_v3/tfc_vf2pf_msg.c b/drivers/thirdparty/release-drivers/bnxt/tfc_v3/tfc_vf2pf_msg.c new file mode 100644 index 000000000000..6efdf400fabe --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/tfc_v3/tfc_vf2pf_msg.c @@ -0,0 +1,322 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* Copyright(c) 2023 Broadcom + * All rights reserved. + */ + +#include +#include "bnxt_compat.h" +#include "bnxt.h" +#include "bnxt_hwrm.h" +#include "bnxt_sriov.h" +#include "bnxt_debugfs.h" +#include "tfc_vf2pf_msg.h" +#include "tfc_util.h" + +/* Logging defines */ +#define TFC_VF2PF_MSG_DEBUG 0 + +int +tfc_vf2pf_mem_alloc(struct tfc *tfcp, + struct tfc_vf2pf_tbl_scope_mem_alloc_cfg_cmd *req, + struct tfc_vf2pf_tbl_scope_mem_alloc_cfg_resp *resp) +{ + struct bnxt *bp = tfcp->bp; + int rc; + + if (!req) { + netdev_dbg(bp->dev, "%s: Invalid req pointer\n", __func__); + return -EINVAL; + } + + if (!resp) { + netdev_dbg(bp->dev, "%s: Invalid resp pointer\n", __func__); + return -EINVAL; + } + + rc = bnxt_hwrm_tf_oem_cmd(bp, (u32 *)req, sizeof(*req), (u32 *)resp, sizeof(*resp)); + return rc; +} + +int +tfc_vf2pf_mem_free(struct tfc *tfcp, + struct tfc_vf2pf_tbl_scope_mem_free_cmd *req, + struct tfc_vf2pf_tbl_scope_mem_free_resp *resp) +{ + struct bnxt *bp = tfcp->bp; + int rc; + + if (!req) { + netdev_dbg(bp->dev, "%s: Invalid req pointer\n", __func__); + return -EINVAL; + } + + if (!resp) { + netdev_dbg(bp->dev, "%s: Invalid resp pointer\n", __func__); + return -EINVAL; + } + + rc = bnxt_hwrm_tf_oem_cmd(bp, (u32 *)req, sizeof(*req), (u32 *)resp, sizeof(*resp)); + return rc; +} + +int +tfc_vf2pf_pool_alloc(struct tfc *tfcp, + struct tfc_vf2pf_tbl_scope_pool_alloc_cmd *req, + struct tfc_vf2pf_tbl_scope_pool_alloc_resp *resp) +{ + struct bnxt *bp = tfcp->bp; + + if (!req) { + netdev_dbg(bp->dev, "%s: Invalid req pointer\n", __func__); + return -EINVAL; + } + + if (!resp) { + netdev_dbg(bp->dev, "%s: Invalid resp pointer\n", __func__); + return -EINVAL; + } + + return bnxt_hwrm_tf_oem_cmd(bp, (uint32_t *)req, sizeof(*req), + (uint32_t *)resp, sizeof(*resp)); +} + +int +tfc_vf2pf_pool_free(struct tfc *tfcp, + struct tfc_vf2pf_tbl_scope_pool_free_cmd *req, + struct tfc_vf2pf_tbl_scope_pool_free_resp *resp) +{ + struct bnxt *bp = tfcp->bp; + + if (!req) { + netdev_dbg(bp->dev, "%s: Invalid req pointer\n", __func__); + return -EINVAL; + } + if (!resp) { + netdev_dbg(bp->dev, "%s: Invalid resp pointer\n", __func__); + return -EINVAL; + } + + return bnxt_hwrm_tf_oem_cmd(bp, (uint32_t *)req, sizeof(*req), + (uint32_t *)resp, sizeof(*resp)); +} + +static int +tfc_vf2pf_mem_alloc_process(struct tfc *tfcp, + u32 *oem_data, + u32 *resp_data, + u16 *resp_len) +{ + struct tfc_vf2pf_tbl_scope_mem_alloc_cfg_resp *resp = + (struct tfc_vf2pf_tbl_scope_mem_alloc_cfg_resp *)resp_data; + struct tfc_vf2pf_tbl_scope_mem_alloc_cfg_cmd *req = + (struct tfc_vf2pf_tbl_scope_mem_alloc_cfg_cmd *)oem_data; + struct tfc_tbl_scope_mem_alloc_parms ma_parms; + u16 data_len = sizeof(*resp); + struct bnxt *bp = tfcp->bp; + int dir, rc; + + if (*resp_len < data_len) { + netdev_dbg(bp->dev, "%s: resp_data buffer is too small\n", __func__); + return -EINVAL; + } + + /* This block of code is for testing purpose. Will be removed later */ + netdev_dbg(bp->dev, "%s: Table scope mem alloc cfg cmd:\n", __func__); + netdev_dbg(bp->dev, "\ttsid: 0x%x, max_pools: 0x%x\n", req->tsid, req->max_pools); + for (dir = CFA_DIR_RX; dir < CFA_DIR_MAX; dir++) { + netdev_dbg(bp->dev, "\tsbuckt_cnt_exp: 0x%x, dbucket_cnt: 0x%x\n", + req->static_bucket_cnt_exp[dir], req->dynamic_bucket_cnt[dir]); + netdev_dbg(bp->dev, "\tlkup_rec_cnt: 0x%x, lkup_pool_sz_exp: 0x%x\n", + req->lkup_rec_cnt[dir], req->lkup_pool_sz_exp[dir]); + netdev_dbg(bp->dev, "\tact_pool_sz_exp: 0x%x, lkup_rec_start_offset: 0x%x\n", + req->act_pool_sz_exp[dir], req->lkup_rec_start_offset[dir]); + } + + memset(&ma_parms, 0, sizeof(struct tfc_tbl_scope_mem_alloc_parms)); + + for (dir = CFA_DIR_RX; dir < CFA_DIR_MAX; dir++) { + ma_parms.static_bucket_cnt_exp[dir] = req->static_bucket_cnt_exp[dir]; + ma_parms.dynamic_bucket_cnt[dir] = req->dynamic_bucket_cnt[dir]; + ma_parms.lkup_rec_cnt[dir] = req->lkup_rec_cnt[dir]; + ma_parms.act_rec_cnt[dir] = req->act_rec_cnt[dir]; + ma_parms.act_pool_sz_exp[dir] = req->act_pool_sz_exp[dir]; + ma_parms.lkup_pool_sz_exp[dir] = req->lkup_pool_sz_exp[dir]; + ma_parms.lkup_rec_start_offset[dir] = req->lkup_rec_start_offset[dir]; + } + /* Obtain from driver page definition (4k for DPDK) */ + ma_parms.pbl_page_sz_in_bytes = BNXT_PAGE_SIZE; + /* First is meaningless on the PF, set to 0 */ + ma_parms.first = 0; + + /* This is not for local use if we are getting a message from the VF */ + ma_parms.local = false; + ma_parms.max_pools = req->max_pools; + rc = tfc_tbl_scope_mem_alloc(tfcp, req->hdr.fid, req->tsid, &ma_parms); + if (!rc) { + netdev_dbg(bp->dev, "%s: tsid(%d) PF allocation succeeds\n", __func__, req->tsid); + } else { + netdev_dbg(bp->dev, "%s: tsid(%d) PF allocation fails (%d)\n", __func__, req->tsid, + rc); + } + + rc = bnxt_debug_tf_create(bp, req->tsid); + if (rc) + netdev_dbg(bp->dev, "%s: port(%d) tsid(%d) Failed to create debugfs entry\n", + __func__, bp->pf.port_id, req->tsid); + + *resp_len = cpu_to_le16(data_len); + resp->hdr.type = TFC_VF2PF_TYPE_TBL_SCOPE_MEM_ALLOC_CFG_CMD; + resp->tsid = req->tsid; + resp->status = rc; + return rc; +} + +static int +tfc_vf2pf_mem_free_process(struct tfc *tfcp, u32 *oem_data, u32 *resp_data, u16 *resp_len) +{ + struct tfc_vf2pf_tbl_scope_mem_free_resp *resp = + (struct tfc_vf2pf_tbl_scope_mem_free_resp *)resp_data; + struct tfc_vf2pf_tbl_scope_mem_free_cmd *req = + (struct tfc_vf2pf_tbl_scope_mem_free_cmd *)oem_data; + u16 data_len = sizeof(*resp); + struct bnxt *bp = tfcp->bp; + int rc; + + if (*resp_len < data_len) { + netdev_dbg(bp->dev, "%s: resp_data buffer is too small\n", __func__); + return -EINVAL; + } + + /* This block of code is for testing purpose. Will be removed later */ + netdev_dbg(bp->dev, "%s: Table scope mem free cfg cmd:\n", __func__); + netdev_dbg(bp->dev, "\ttsid: 0x%x\n", req->tsid); + + rc = tfc_tbl_scope_mem_free(tfcp, req->hdr.fid, req->tsid); + if (!rc) { + netdev_dbg(bp->dev, "%s: tsid(%d) PF free succeeds\n", + __func__, req->tsid); + } else { + netdev_dbg(bp->dev, "%s: tsid(%d) PF free fails (%d)\n", + __func__, req->tsid, rc); + } + + bnxt_debug_tf_delete(bp); + + *resp_len = cpu_to_le16(data_len); + resp->hdr.type = TFC_VF2PF_TYPE_TBL_SCOPE_MEM_FREE_CMD; + resp->tsid = req->tsid; + resp->status = rc; + return rc; +} + +static int +tfc_vf2pf_pool_alloc_process(struct tfc *tfcp, + u32 *oem_data, + u32 *resp_data, + u16 *resp_len) +{ + struct tfc_vf2pf_tbl_scope_pool_alloc_resp *resp = + (struct tfc_vf2pf_tbl_scope_pool_alloc_resp *)resp_data; + struct tfc_vf2pf_tbl_scope_pool_alloc_cmd *req = + (struct tfc_vf2pf_tbl_scope_pool_alloc_cmd *)oem_data; + u16 data_len = sizeof(*resp); + struct bnxt *bp = tfcp->bp; + u8 pool_sz_exp = 0; + u16 pool_id = 0; + int rc; + + if (*resp_len < data_len) { + netdev_dbg(bp->dev, "%s: resp_data buffer is too small\n", __func__); + return -EINVAL; + } + + /* This block of code is for testing purpose. Will be removed later */ + netdev_dbg(bp->dev, "%s: Table scope pool alloc cmd:\n", __func__); + netdev_dbg(bp->dev, "\ttsid: 0x%x, region:%s fid(%d)\n", req->tsid, + tfc_ts_region_2_str(req->region, req->dir), req->hdr.fid); + + rc = tfc_tbl_scope_pool_alloc(tfcp, req->hdr.fid, req->tsid, req->region, + req->dir, &pool_sz_exp, &pool_id); + if (!rc) { + netdev_dbg(bp->dev, "%s: tsid(%d) PF pool_alloc(%d) succeeds\n", + __func__, req->tsid, pool_id); + } else { + netdev_dbg(bp->dev, "%s: tsid(%d) PF pool_alloc fails (%d)\n", + __func__, req->tsid, rc); + } + *resp_len = cpu_to_le16(data_len); + resp->hdr.type = TFC_VF2PF_TYPE_TBL_SCOPE_POOL_ALLOC_CMD; + resp->tsid = req->tsid; + resp->pool_sz_exp = pool_sz_exp; + resp->pool_id = pool_id; + resp->status = rc; + return rc; +} + +static int +tfc_vf2pf_pool_free_process(struct tfc *tfcp, + u32 *oem_data, + u32 *resp_data, + u16 *resp_len) +{ + struct tfc_vf2pf_tbl_scope_pool_free_resp *resp = + (struct tfc_vf2pf_tbl_scope_pool_free_resp *)resp_data; + struct tfc_vf2pf_tbl_scope_pool_free_cmd *req = + (struct tfc_vf2pf_tbl_scope_pool_free_cmd *)oem_data; + u16 data_len = sizeof(*resp); + struct bnxt *bp = tfcp->bp; + int rc; + + if (*resp_len < data_len) { + netdev_dbg(bp->dev, "%s: resp_data buffer is too small\n", __func__); + return -EINVAL; + } + + /* This block of code is for testing purpose. Will be removed later */ + netdev_dbg(bp->dev, "%s: Table scope pool free cfg cmd:\n", __func__); + netdev_dbg(bp->dev, "\ttsid: 0x%x\n", req->tsid); + + rc = tfc_tbl_scope_pool_free(tfcp, req->hdr.fid, req->tsid, req->region, + req->dir, req->pool_id); + if (!rc) { + netdev_dbg(bp->dev, "%s: tsid(%d) PF free succeeds\n", __func__, req->tsid); + } else { + netdev_dbg(bp->dev, "%s: tsid(%d) PF free fails (%d)\n", __func__, req->tsid, + rc); + } + *resp_len = cpu_to_le16(data_len); + resp->hdr.type = TFC_VF2PF_TYPE_TBL_SCOPE_MEM_FREE_CMD; + resp->tsid = req->tsid; + resp->status = rc; + return rc; +} + +int +tfc_oem_cmd_process(struct tfc *tfcp, u32 *oem_data, u32 *resp, u16 *resp_len) +{ + struct tfc_vf2pf_hdr *oem = (struct tfc_vf2pf_hdr *)oem_data; + int rc; + + switch (oem->type) { + case TFC_VF2PF_TYPE_TBL_SCOPE_MEM_ALLOC_CFG_CMD: + rc = tfc_vf2pf_mem_alloc_process(tfcp, oem_data, resp, resp_len); + break; + case TFC_VF2PF_TYPE_TBL_SCOPE_MEM_FREE_CMD: + rc = tfc_vf2pf_mem_free_process(tfcp, oem_data, resp, resp_len); + break; + + case TFC_VF2PF_TYPE_TBL_SCOPE_POOL_ALLOC_CMD: + rc = tfc_vf2pf_pool_alloc_process(tfcp, oem_data, resp, resp_len); + break; + + case TFC_VF2PF_TYPE_TBL_SCOPE_POOL_FREE_CMD: + rc = tfc_vf2pf_pool_free_process(tfcp, oem_data, resp, resp_len); + break; + case TFC_VF2PF_TYPE_TBL_SCOPE_PFID_QUERY_CMD: + default: + rc = -EPERM; + break; + } + + return rc; +} diff --git a/drivers/thirdparty/release-drivers/bnxt/tfc_v3/tfc_vf2pf_msg.h b/drivers/thirdparty/release-drivers/bnxt/tfc_v3/tfc_vf2pf_msg.h new file mode 100644 index 000000000000..e4ef349eb1d9 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/tfc_v3/tfc_vf2pf_msg.h @@ -0,0 +1,149 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2022 Broadcom + * All rights reserved. + */ +#ifndef _TFC_VF2PF_MSG_H_ +#define _TFC_VF2PF_MSG_H_ + +#include "cfa_types.h" +#include "tfc.h" + +/* HWRM_OEM_CMD is used to transport the vf2pf commands and responses. + * All commands will have a naming_authority set to PCI_SIG, oem_id set to + * 0x14e4 and message_family set to TRUFLOW. The maximum size of the oem_data + * is 104 bytes. The response maximum size is 88 bytes. + */ + +/* Truflow VF2PF message types */ +enum tfc_vf2pf_type { + TFC_VF2PF_TYPE_TBL_SCOPE_MEM_ALLOC_CFG_CMD = 1, + TFC_VF2PF_TYPE_TBL_SCOPE_MEM_FREE_CMD, + TFC_VF2PF_TYPE_TBL_SCOPE_PFID_QUERY_CMD, + TFC_VF2PF_TYPE_TBL_SCOPE_POOL_ALLOC_CMD, + TFC_VF2PF_TYPE_TBL_SCOPE_POOL_FREE_CMD, +}; + +/* Truflow VF2PF response status */ +enum tfc_vf2pf_status { + TFC_VF2PF_STATUS_OK = 0, + TFC_VF2PF_STATUS_TSID_CFG_ERR = 1, + TFC_VF2PF_STATUS_TSID_MEM_ALLOC_ERR = 2, + TFC_VF2PF_STATUS_TSID_INVALID = 3, + TFC_VF2PF_STATUS_TSID_NOT_CONFIGURED = 4, + TFC_VF2PF_STATUS_NO_POOLS_AVAIL = 5, + TFC_VF2PF_STATUS_FID_ERR = 6, +}; + +/* Truflow VF2PF header used for all Truflow VF2PF cmds/responses */ +struct tfc_vf2pf_hdr { + u16 type; /* use enum tfc_vf2pf_type */ + u16 fid; /* VF fid */ +}; + +/* Truflow VF2PF Table Scope Memory allocate/config command */ +struct tfc_vf2pf_tbl_scope_mem_alloc_cfg_cmd { + struct tfc_vf2pf_hdr hdr; /* vf2pf header */ + u8 tsid; /* table scope identifier */ + u8 static_bucket_cnt_exp[CFA_DIR_MAX]; /* lkup static bucket count */ + u16 max_pools; /* maximum number of pools requested - 1 for non-shared */ + u32 dynamic_bucket_cnt[CFA_DIR_MAX]; /* dynamic bucket count */ + u32 lkup_rec_cnt[CFA_DIR_MAX]; /* lkup record count */ + u32 act_rec_cnt[CFA_DIR_MAX]; /* action record count */ + u8 lkup_pool_sz_exp[CFA_DIR_MAX]; /* lkup pool sz expressed as log2(max_recs/max_pools) */ + u8 act_pool_sz_exp[CFA_DIR_MAX]; /* action pool sz expressed as log2(max_recs/max_pools) */ + u32 lkup_rec_start_offset[CFA_DIR_MAX]; /* start offset in 32B records of the lkup recs */ + /* (after buckets) */ +}; + +/* Truflow VF2PF Table Scope Memory allocate/config response */ +struct tfc_vf2pf_tbl_scope_mem_alloc_cfg_resp { + struct tfc_vf2pf_hdr hdr; /* vf2pf header copied from cmd */ + enum tfc_vf2pf_status status; /* status of request */ + u8 tsid; /* tsid allocated */ +}; + +/* Truflow VF2PF Table Scope Memory free command */ +struct tfc_vf2pf_tbl_scope_mem_free_cmd { + struct tfc_vf2pf_hdr hdr; /* vf2pf header */ + uint8_t tsid; /* table scope identifier */ +}; + +/* Truflow VF2PF Table Scope Memory free response */ +struct tfc_vf2pf_tbl_scope_mem_free_resp { + struct tfc_vf2pf_hdr hdr; /* vf2pf header copied from cmd */ + enum tfc_vf2pf_status status; /* status of request */ + uint8_t tsid; /* tsid memory freed */ +}; + +/* Truflow VF2PF Table Scope PFID query command */ +struct tfc_vf2pf_tbl_scope_pfid_query_cmd { + struct tfc_vf2pf_hdr hdr; /* vf2pf header */ +}; + +/* Truflow VF2PF Table Scope PFID query response */ +struct tfc_vf2pf_pfid_query_resp { + struct tfc_vf2pf_hdr hdr; /* vf2pf header copied from cmd */ + enum tfc_vf2pf_status status; /* status of AFM/NIC flow tbl scope */ + u8 tsid; /* tsid used for AFM/NIC flow tbl scope */ + u8 lkup_pool_sz_exp[CFA_DIR_MAX]; /* lookup tbl pool size = log2(max_recs/max_pools) */ + u8 act_pool_sz_exp[CFA_DIR_MAX]; /* action tbl pool size = log2(max_recs/max_pools) */ + u32 lkup_rec_start_offset[CFA_DIR_MAX]; /* lkup record start offset in 32B records */ + u16 max_pools; /* maximum number of pools */ +}; + +/* Truflow VF2PF Table Scope pool alloc command */ +struct tfc_vf2pf_tbl_scope_pool_alloc_cmd { + struct tfc_vf2pf_hdr hdr; /* vf2pf header */ + u8 tsid; /* table scope identifier */ + enum cfa_dir dir; /* direction RX or TX */ + enum cfa_region_type region; /* region lkup or action */ +}; + +/* Truflow VF2PF Table Scope pool alloc response */ +struct tfc_vf2pf_tbl_scope_pool_alloc_resp { + struct tfc_vf2pf_hdr hdr; /* vf2pf header copied from cmd */ + enum tfc_vf2pf_status status; /* status of pool allocation */ + u8 tsid; /* tbl scope identifier */ + u8 pool_sz_exp; /* pool size expressed as log2(max_recs/max_pools) */ + u16 pool_id; /* pool_id allocated */ +}; + +/* Truflow VF2PF Table Scope pool free command */ +struct tfc_vf2pf_tbl_scope_pool_free_cmd { + struct tfc_vf2pf_hdr hdr; /* vf2pf header */ + enum cfa_dir dir; /* direction RX or TX */ + enum cfa_region_type region; /* region lkup or action */ + u8 tsid; /* table scope id */ + u16 pool_id; /* pool id */ +}; + +/* Truflow VF2PF Table Scope pool free response */ +struct tfc_vf2pf_tbl_scope_pool_free_resp { + struct tfc_vf2pf_hdr hdr; /* vf2pf header copied from cmd */ + enum tfc_vf2pf_status status; /* status of pool allocation */ + u8 tsid; /* table scope id */ +}; + +int +tfc_vf2pf_mem_alloc(struct tfc *tfcp, + struct tfc_vf2pf_tbl_scope_mem_alloc_cfg_cmd *req, + struct tfc_vf2pf_tbl_scope_mem_alloc_cfg_resp *resp); + +int +tfc_vf2pf_mem_free(struct tfc *tfcp, + struct tfc_vf2pf_tbl_scope_mem_free_cmd *req, + struct tfc_vf2pf_tbl_scope_mem_free_resp *resp); + +int +tfc_vf2pf_pool_alloc(struct tfc *tfcp, + struct tfc_vf2pf_tbl_scope_pool_alloc_cmd *req, + struct tfc_vf2pf_tbl_scope_pool_alloc_resp *resp); + +int +tfc_vf2pf_pool_free(struct tfc *tfcp, + struct tfc_vf2pf_tbl_scope_pool_free_cmd *req, + struct tfc_vf2pf_tbl_scope_pool_free_resp *resp); + +int +tfc_oem_cmd_process(struct tfc *tfcp, u32 *oem_data, u32 *resp, u16 *resp_len); +#endif /* _TFC_VF2PF_MSG_H */ diff --git a/drivers/thirdparty/release-drivers/bnxt/tfc_v3/tfo.c b/drivers/thirdparty/release-drivers/bnxt/tfc_v3/tfo.c new file mode 100644 index 000000000000..dd5e425ea9d2 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/tfc_v3/tfo.c @@ -0,0 +1,501 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* Copyright(c) 2023 Broadcom + * All rights reserved. + */ + +#include +#include "bnxt_compat.h" +#include "tfo.h" +#include "cfa_types.h" +#include "cfa_tim.h" +#include "bnxt.h" + +/* Table scope stored configuration */ +struct tfc_tsid_db { + bool ts_valid; /* Table scope is valid */ + bool ts_is_shared; /* Table scope is shared */ + bool ts_is_bs_owner; /* Backing store alloced by this instance (PF) */ + uint16_t ts_max_pools; /* maximum pools per CPM instance */ + enum cfa_app_type ts_app; /* application type TF/AFM */ + struct tfc_ts_mem_cfg ts_mem[CFA_REGION_TYPE_MAX][CFA_DIR_MAX]; /* backing store mem cfg */ + struct tfc_ts_pool_info ts_pool[CFA_DIR_MAX]; /* pool info config */ +}; + +/* TFC Object Signature + * This signature identifies the tfc object database and + * is used for pointer validation + */ +#define TFC_OBJ_SIGNATURE 0xABACABAF + +/* TFC Object + * This data structure contains all data stored per bnxt port + * Access is restricted through set/get APIs. + * + * If a module (e.g. tbl_scope needs to store data, it should + * be added here and accessor functions created. + */ +struct tfc_object { + u32 signature; /* TF object signature */ + u16 sid; /* Session ID */ + bool is_pf; /* port is a PF */ + struct cfa_bld_mpcinfo mpc_info; /* MPC ops handle */ + struct tfc_tsid_db tsid_db[TFC_TBL_SCOPE_MAX]; /* tsid database */ + /* TIM instance pointer (PF) - this is where the 4 instances + * of the TPM (rx/tx_lkup, rx/tx_act) will be stored per shared + * table scope. Only valid on a PF. + */ + void *ts_tim; +}; + +void tfo_open(void **tfo, bool is_pf) +{ + struct tfc_object *tfco = NULL; + u32 tim_db_size; + int rc; + + tfco = kzalloc(sizeof(*tfco), GFP_KERNEL); + if (!tfco) + return; + + tfco->signature = TFC_OBJ_SIGNATURE; + tfco->is_pf = is_pf; + tfco->sid = INVALID_SID; + tfco->ts_tim = NULL; + + /* Bind to the MPC builder */ + rc = cfa_bld_mpc_bind(CFA_P70, &tfco->mpc_info); + if (rc) { + netdev_dbg(NULL, "%s: MPC bind failed\n", __func__); + goto cleanup; + } + if (is_pf) { + /* Allocate TIM */ + rc = cfa_tim_query(TFC_TBL_SCOPE_MAX, CFA_REGION_TYPE_MAX, + &tim_db_size); + if (rc) + goto cleanup; + + tfco->ts_tim = kzalloc(tim_db_size, GFP_KERNEL); + if (!tfco->ts_tim) + goto cleanup; + + rc = cfa_tim_open(tfco->ts_tim, + tim_db_size, + TFC_TBL_SCOPE_MAX, + CFA_REGION_TYPE_MAX); + if (rc) { + kfree(tfco->ts_tim); + tfco->ts_tim = NULL; + goto cleanup; + } + } + + *tfo = tfco; + return; + +cleanup: + kfree(tfco); + *tfo = NULL; +} + +void tfo_close(void **tfo) +{ + struct tfc_object *tfco = (struct tfc_object *)(*tfo); + void *tim = NULL, *tpm = NULL; + enum cfa_region_type region; + int dir, rc, tsid; + + if (*tfo && tfco->signature == TFC_OBJ_SIGNATURE) { + /* If TIM is setup free it and any TPMs */ + if (tfo_tim_get(*tfo, &tim)) + goto done; + + if (!tim) + goto done; + + for (tsid = 0; tsid < TFC_TBL_SCOPE_MAX; tsid++) { + for (region = 0; region < CFA_REGION_TYPE_MAX; region++) { + for (dir = 0; dir < CFA_DIR_MAX; dir++) { + tpm = NULL; + rc = cfa_tim_tpm_inst_get(tim, tsid, region, dir, &tpm); + if (!rc && tpm) { + kfree(tpm); + cfa_tim_tpm_inst_set(tim, tsid, region, dir, NULL); + } + } + } + } + kfree(tfco->ts_tim); + tfco->ts_tim = NULL; +done: + kfree(*tfo); + *tfo = NULL; + } +} + +int tfo_mpcinfo_get(void *tfo, struct cfa_bld_mpcinfo **mpc_info) +{ + struct tfc_object *tfco = (struct tfc_object *)tfo; + + if (!tfo) + return -EINVAL; + + if (tfco->signature != TFC_OBJ_SIGNATURE) { + netdev_dbg(NULL, "%s: Invalid tfo object\n", __func__); + return -EINVAL; + } + + *mpc_info = &tfco->mpc_info; + + return 0; +} + +int tfo_ts_validate(void *tfo, u8 ts_tsid, bool *ts_valid) +{ + struct tfc_object *tfco = (struct tfc_object *)tfo; + struct tfc_tsid_db *tsid_db; + + if (tfco->signature != TFC_OBJ_SIGNATURE) { + netdev_dbg(NULL, "%s: Invalid tfo object\n", __func__); + return -EINVAL; + } + + if (ts_tsid >= TFC_TBL_SCOPE_MAX) { + netdev_dbg(NULL, "%s: Invalid tsid %d\n", __func__, ts_tsid); + return -EINVAL; + } + tsid_db = &tfco->tsid_db[ts_tsid]; + + if (ts_valid) + *ts_valid = tsid_db->ts_valid; + + return 0; +} + +int tfo_ts_set(void *tfo, u8 ts_tsid, bool ts_is_shared, + enum cfa_app_type ts_app, bool ts_valid, u16 ts_max_pools) +{ + struct tfc_object *tfco = (struct tfc_object *)tfo; + struct tfc_tsid_db *tsid_db; + + if (tfco->signature != TFC_OBJ_SIGNATURE) { + netdev_dbg(NULL, "%s: Invalid tfo object\n", __func__); + return -EINVAL; + } + + if (ts_tsid >= TFC_TBL_SCOPE_MAX) { + netdev_dbg(NULL, "%s: Invalid tsid %d\n", __func__, ts_tsid); + return -EINVAL; + } + + tsid_db = &tfco->tsid_db[ts_tsid]; + + tsid_db->ts_valid = ts_valid; + tsid_db->ts_is_shared = ts_is_shared; + tsid_db->ts_app = ts_app; + tsid_db->ts_max_pools = ts_max_pools; + + return 0; +} + +int tfo_ts_get(void *tfo, u8 ts_tsid, bool *ts_is_shared, + enum cfa_app_type *ts_app, bool *ts_valid, + u16 *ts_max_pools) +{ + struct tfc_object *tfco = (struct tfc_object *)tfo; + struct tfc_tsid_db *tsid_db; + + if (tfco->signature != TFC_OBJ_SIGNATURE) { + netdev_dbg(NULL, "%s: Invalid tfo object\n", __func__); + return -EINVAL; + } + + if (ts_tsid >= TFC_TBL_SCOPE_MAX) { + netdev_dbg(NULL, "%s: Invalid tsid %d\n", __func__, ts_tsid); + return -EINVAL; + } + + tsid_db = &tfco->tsid_db[ts_tsid]; + + if (ts_valid) + *ts_valid = tsid_db->ts_valid; + + if (ts_is_shared) + *ts_is_shared = tsid_db->ts_is_shared; + + if (ts_app) + *ts_app = tsid_db->ts_app; + + if (ts_max_pools) + *ts_max_pools = tsid_db->ts_max_pools; + + return 0; +} + +/* Set the table scope memory configuration for this direction */ +int tfo_ts_set_mem_cfg(void *tfo, uint8_t ts_tsid, enum cfa_dir dir, + enum cfa_region_type region, bool is_bs_owner, + struct tfc_ts_mem_cfg *mem_cfg) +{ + struct tfc_object *tfco = (struct tfc_object *)tfo; + struct tfc_tsid_db *tsid_db; + + if (tfco->signature != TFC_OBJ_SIGNATURE) { + netdev_dbg(NULL, "%s: Invalid tfo object\n", __func__); + return -EINVAL; + } + + if (!mem_cfg) { + netdev_dbg(NULL, "%s: Invalid mem_cfg pointer\n", __func__); + return -EINVAL; + } + + if (ts_tsid >= TFC_TBL_SCOPE_MAX) { + netdev_dbg(NULL, "%s: Invalid tsid %d\n", __func__, ts_tsid); + return -EINVAL; + } + + tsid_db = &tfco->tsid_db[ts_tsid]; + + tsid_db->ts_mem[region][dir] = *mem_cfg; + tsid_db->ts_is_bs_owner = is_bs_owner; + + return 0; +} + +/* Get the table scope memory configuration for this direction */ +int tfo_ts_get_mem_cfg(void *tfo, u8 ts_tsid, enum cfa_dir dir, + enum cfa_region_type region, bool *is_bs_owner, + struct tfc_ts_mem_cfg *mem_cfg) +{ + struct tfc_object *tfco = (struct tfc_object *)tfo; + struct tfc_tsid_db *tsid_db; + + if (tfco->signature != TFC_OBJ_SIGNATURE) { + netdev_dbg(NULL, "%s: Invalid tfo object\n", __func__); + return -EINVAL; + } + + if (!mem_cfg) { + netdev_dbg(NULL, "%s: Invalid mem_cfg pointer\n", __func__); + return -EINVAL; + } + + if (ts_tsid >= TFC_TBL_SCOPE_MAX) { + netdev_dbg(NULL, "%s: Invalid tsid %d\n", __func__, ts_tsid); + return -EINVAL; + } + + tsid_db = &tfco->tsid_db[ts_tsid]; + + *mem_cfg = tsid_db->ts_mem[region][dir]; + if (is_bs_owner) + *is_bs_owner = tsid_db->ts_is_bs_owner; + + return 0; +} + +/* Get the Pool Manager instance */ +int tfo_ts_get_cpm_inst(void *tfo, u8 ts_tsid, enum cfa_dir dir, + struct tfc_cpm **cpm_lkup, struct tfc_cpm **cpm_act) +{ + struct tfc_object *tfco = (struct tfc_object *)tfo; + struct tfc_tsid_db *tsid_db; + + if (tfco->signature != TFC_OBJ_SIGNATURE) { + netdev_dbg(NULL, "%s: Invalid tfo object\n", __func__); + return -EINVAL; + } + + if (!cpm_lkup) { + netdev_dbg(NULL, "%s: Invalid cpm_lkup pointer\n", __func__); + return -EINVAL; + } + + if (!cpm_act) { + netdev_dbg(NULL, "%s: Invalid cpm_act pointer\n", __func__); + return -EINVAL; + } + if (ts_tsid >= TFC_TBL_SCOPE_MAX) { + netdev_dbg(NULL, "%s: Invalid tsid %d\n", __func__, ts_tsid); + return -EINVAL; + } + + tsid_db = &tfco->tsid_db[ts_tsid]; + + *cpm_lkup = tsid_db->ts_pool[dir].lkup_cpm; + *cpm_act = tsid_db->ts_pool[dir].act_cpm; + + return 0; +} + +/* Set the Pool Manager instance */ +int tfo_ts_set_cpm_inst(void *tfo, u8 ts_tsid, enum cfa_dir dir, + struct tfc_cpm *cpm_lkup, struct tfc_cpm *cpm_act) +{ + struct tfc_object *tfco = (struct tfc_object *)tfo; + struct tfc_tsid_db *tsid_db; + + if (tfco->signature != TFC_OBJ_SIGNATURE) { + netdev_dbg(NULL, "%s: Invalid tfo object\n", __func__); + return -EINVAL; + } + + if (ts_tsid >= TFC_TBL_SCOPE_MAX) { + netdev_dbg(NULL, "%s: Invalid tsid %d\n", __func__, ts_tsid); + return -EINVAL; + } + tsid_db = &tfco->tsid_db[ts_tsid]; + + tsid_db->ts_pool[dir].lkup_cpm = cpm_lkup; + tsid_db->ts_pool[dir].act_cpm = cpm_act; + + return 0; +} + +/* Set the table scope pool memory configuration for this direction */ +int tfo_ts_set_pool_info(void *tfo, u8 ts_tsid, enum cfa_dir dir, + struct tfc_ts_pool_info *ts_pool) +{ + struct tfc_object *tfco = (struct tfc_object *)tfo; + struct tfc_tsid_db *tsid_db; + + if (tfco->signature != TFC_OBJ_SIGNATURE) { + netdev_dbg(NULL, "%s: Invalid tfo object\n", __func__); + return -EINVAL; + } + + if (!ts_pool) { + netdev_dbg(NULL, "%s: Invalid ts_pool pointer\n", __func__); + return -EINVAL; + } + + if (ts_tsid >= TFC_TBL_SCOPE_MAX) { + netdev_dbg(NULL, "%s: Invalid tsid %d\n", __func__, ts_tsid); + return -EINVAL; + } + tsid_db = &tfco->tsid_db[ts_tsid]; + + tsid_db->ts_pool[dir] = *ts_pool; + + return 0; +} + +/* Get the table scope pool memory configuration for this direction */ +int tfo_ts_get_pool_info(void *tfo, u8 ts_tsid, enum cfa_dir dir, + struct tfc_ts_pool_info *ts_pool) +{ + struct tfc_object *tfco = (struct tfc_object *)tfo; + struct tfc_tsid_db *tsid_db; + + if (tfco->signature != TFC_OBJ_SIGNATURE) { + netdev_dbg(NULL, "%s: Invalid tfo object\n", __func__); + return -EINVAL; + } + + if (!ts_pool) { + netdev_dbg(NULL, "%s: Invalid ts_pool pointer\n", __func__); + return -EINVAL; + } + if (ts_tsid >= TFC_TBL_SCOPE_MAX) { + netdev_dbg(NULL, "%s: Invalid tsid %d\n", __func__, ts_tsid); + return -EINVAL; + } + tsid_db = &tfco->tsid_db[ts_tsid]; + + *ts_pool = tsid_db->ts_pool[dir]; + + return 0; +} + +int tfo_sid_set(void *tfo, uint16_t sid) +{ + struct tfc_object *tfco = (struct tfc_object *)tfo; + + if (tfco->signature != TFC_OBJ_SIGNATURE) { + netdev_dbg(NULL, "%s: Invalid tfo object\n", __func__); + return -EINVAL; + } + + if (tfco->sid != INVALID_SID && sid != INVALID_SID && + tfco->sid != sid) { + netdev_dbg(NULL, "%s: Cannot set SID %u, current session is %u.\n", + __func__, sid, tfco->sid); + return -EINVAL; + } + + tfco->sid = sid; + + return 0; +} + +int tfo_sid_get(void *tfo, uint16_t *sid) +{ + struct tfc_object *tfco = (struct tfc_object *)tfo; + + if (tfco->signature != TFC_OBJ_SIGNATURE) { + netdev_dbg(NULL, "%s: Invalid tfo object\n", __func__); + return -EINVAL; + } + + if (!sid) { + netdev_dbg(NULL, "%s: Invalid sid pointer\n", __func__); + return -EINVAL; + } + + if (tfco->sid == INVALID_SID) { + /* Session has not been created */ + return -ENODATA; + } + + *sid = tfco->sid; + + return 0; +} + +int tfo_tim_set(void *tfo, void *tim) +{ + struct tfc_object *tfco = (struct tfc_object *)tfo; + + if (tfco->signature != TFC_OBJ_SIGNATURE) { + netdev_dbg(NULL, "%s: Invalid tfo object\n", __func__); + return -EINVAL; + } + + if (!tim) { + netdev_dbg(NULL, "%s: Invalid tim pointer\n", __func__); + return -EINVAL; + } + + if (tfco->ts_tim && tfco->ts_tim != tim) { + netdev_dbg(NULL, "%s: Cannot set TS TIM, TIM is already set\n", __func__); + return -EINVAL; + } + + tfco->ts_tim = tim; + + return 0; +} + +int tfo_tim_get(void *tfo, void **tim) +{ + struct tfc_object *tfco = (struct tfc_object *)tfo; + + if (tfco->signature != TFC_OBJ_SIGNATURE) { + netdev_dbg(NULL, "%s: Invalid tfo object\n", __func__); + return -EINVAL; + } + + if (!tim) { + netdev_dbg(NULL, "%s: Invalid tim pointer to pointer\n", __func__); + return -EINVAL; + } + + if (!tfco->ts_tim) + /* ts tim could be null, no need to log error message */ + return -ENODATA; + + *tim = tfco->ts_tim; + + return 0; +} diff --git a/drivers/thirdparty/release-drivers/bnxt/tfc_v3/tfo.h b/drivers/thirdparty/release-drivers/bnxt/tfc_v3/tfo.h new file mode 100644 index 000000000000..268c2f11f151 --- /dev/null +++ b/drivers/thirdparty/release-drivers/bnxt/tfc_v3/tfo.h @@ -0,0 +1,271 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2023 Broadcom + * All rights reserved. + */ + +#ifndef _TFO_H_ +#define _TFO_H_ + +#include "cfa_types.h" +#include "cfa_bld_mpcops.h" +#include "tfc.h" +#include "tfc_cpm.h" + +/* Invalid Table Scope ID */ +#define INVALID_TSID 0xff + +/* Invalid session ID */ +#define INVALID_SID 0xffff + +/* Maximum number of table scopes */ +#define TFC_TBL_SCOPE_MAX 32 + +/* Backing store/memory page levels */ +enum tfc_ts_pg_tbl_lvl { + TFC_TS_PT_LVL_0 = 0, + TFC_TS_PT_LVL_1, + TFC_TS_PT_LVL_2, + TFC_TS_PT_LVL_MAX +}; + +/* Backing store/memory page table level config structure */ +struct tfc_ts_page_tbl { + dma_addr_t *pg_pa_tbl; /* Array of pointers to physical addresses */ + void **pg_va_tbl; /* Array of pointers to virtual addresses */ + u32 pg_count; /* Number of pages in this level */ + u32 pg_size; /* Size of each page in bytes */ +}; + +/* Backing store/memory config structure */ +struct tfc_ts_mem_cfg { + struct tfc_ts_page_tbl pg_tbl[TFC_TS_PT_LVL_MAX]; /* page table configuration */ + u64 num_data_pages; /* Total number of pages */ + u64 l0_dma_addr; /* Physical base memory address */ + void *l0_addr; /* Virtual base memory address */ + int num_lvl; /* Number of page levels */ + u32 page_cnt[TFC_TS_PT_LVL_MAX]; /* Page count per level */ + u32 rec_cnt; /* Total number of records in memory */ + u32 lkup_rec_start_offset; /* Offset of lkup rec start (in recs) */ + u32 entry_size; /* Size of record in bytes */ +}; + +/* Backing store pool info */ +struct tfc_ts_pool_info { + u16 lkup_max_contig_rec; /* max contig records */ + u16 act_max_contig_rec; /* max contig records */ + u8 lkup_pool_sz_exp; /* lookup pool size exp */ + u8 act_pool_sz_exp; /* action pool size exp */ + struct tfc_cpm *lkup_cpm; /* CPM lookup pool manager pointer */ + struct tfc_cpm *act_cpm; /* CPM action pool manager pointer */ +}; + +/* TFO APIs */ + +/** + * Allocate a TFC object for this DPDK port/function. + * + * @tfo: Pointer to TFC object + * @is_pf: Indicates whether the port is a PF. + */ +void tfo_open(void **tfo, bool is_pf); + +/** + * Free the TFC object for this DPDK port/function. + * + * @tfo: Pointer to TFC object + */ +void tfo_close(void **tfo); + +/** + * Validate table scope id + * + * @tfo: Pointer to TFC object + * @ts_tsid: Table scope ID + * @ts_valid True if the table scope is valid + * + * Return 0 for tsid within range + */ +int tfo_ts_validate(void *tfo, uint8_t ts_tsid, bool *ts_valid); + +/** + * Set the table scope configuration. + * + * @tfo: Pointer to TFC object + * @ts_tsid: The table scope ID + * @ts_is_shared: True if the table scope is shared + * @ts_app: Application type TF/AFM + * @ts_valid: True if the table scope is valid + * @ts_max_pools: Maximum number of pools if shared. + * + * Return + * 0 for SUCCESS, negative error value for FAILURE (errno.h) + */ +int tfo_ts_set(void *tfo, u8 ts_tsid, bool ts_is_shared, + enum cfa_app_type ts_app, bool ts_valid, + u16 ts_max_pools); + +/** + * Get the table scope configuration. + * + * @tfo: Pointer to TFC object + * @ts_tsid: The table scope ID + * @ts_is_shared: True if the table scope is shared + * @ts_app: Application type TF/AFM + * @ts_valid: True if the table scope is valid + * @ts_max_pools: Maximum number of pools Returned if shared + * + * Return + * 0 for SUCCESS, negative error value for FAILURE (errno.h) + */ +int tfo_ts_get(void *tfo, u8 ts_tsid, bool *ts_is_shared, + enum cfa_app_type *ts_app, bool *ts_valid, + u16 *ts_max_pools); + +/** + * Set the table scope memory configuration for this direction. + * + * @tfo: Pointer to TFC object + * @ts_tsid: The table scope ID + * @dir: The direction (RX/TX) + * @region: The memory region type (lookup/action) + * @is_bs_owner: True if the caller is the owner of the backing store + * @mem_cfg: Backing store/memory config structure + * + * Return + * 0 for SUCCESS, negative error value for FAILURE (errno.h) + */ +int tfo_ts_set_mem_cfg(void *tfo, u8 ts_tsid, enum cfa_dir dir, + enum cfa_region_type region, bool is_bs_owner, + struct tfc_ts_mem_cfg *mem_cfg); + +/** + * Get the table scope memory configuration for this direction. + * + * @tfo: Pointer to TFC object + * @ts_tsid: The table scope ID + * @dir: The direction (RX/TX) + * @region: The memory region type (lookup/action) + * @is_bs_owner: True if the caller is the owner of the backing store + * @mem_cfg: Backing store/memory config structure + * + * Return + * 0 for SUCCESS, negative error value for FAILURE (errno.h) + */ +int tfo_ts_get_mem_cfg(void *tfo, u8 ts_tsid, enum cfa_dir dir, + enum cfa_region_type region, bool *is_bs_owner, + struct tfc_ts_mem_cfg *mem_cfg); + +/** + * Get the pool memory configuration for this direction. + * + * @tfo: Pointer to TFC object + * @ts_tsid: The table scope ID + * @dir: The direction (RX/TX) + * @ts_pool: Table scope pool info + * + * Return + * 0 for SUCCESS, negative error value for FAILURE (errno.h) + */ +int tfo_ts_get_pool_info(void *tfo, u8 ts_tsid, enum cfa_dir dir, + struct tfc_ts_pool_info *ts_pool); + +/** + * Set the pool memory configuration for this direction. + * + * @tfo: Pointer to TFC object + * @ts_tsid: The table scope ID + * @dir: The direction (RX/TX) + * @ts_pool: Table scope pool info + * + * Return + * 0 for SUCCESS, negative error value for FAILURE (errno.h) + */ +int tfo_ts_set_pool_info(void *tfo, u8 ts_tsid, enum cfa_dir dir, + struct tfc_ts_pool_info *ts_pool); + +/** + * Get the Pool Manager instance + * + * @tfo: Pointer to TFC object + * @ts_tsid: The table scope ID + * @dir: The direction (RX/TX) + * @cpm_lkup: Lookup CPM instance + * @cpm_act: Action CPM instance + * + * Return + * 0 for SUCCESS, negative error value for FAILURE (errno.h) + */ +int tfo_ts_get_cpm_inst(void *tfo, u8 ts_tsid, enum cfa_dir dir, + struct tfc_cpm **cpm_lkup, struct tfc_cpm **cpm_act); + +/** + * Set the Pool Manager instance + * + * @tfo: Pointer to TFC object + * @ts_tsid: The table scope ID + * @dir: The direction (RX/TX) + * @cpm_lkup: Lookup CPM instance + * @cpm_act: Action CPM instance + * + * Return + * 0 for SUCCESS, negative error value for FAILURE (errno.h) + */ +int tfo_ts_set_cpm_inst(void *tfo, u8 ts_tsid, enum cfa_dir dir, + struct tfc_cpm *cpm_lkup, struct tfc_cpm *cpm_act); + +/** + * Get the MPC info reference + * + * @tfo: Pointer to TFC object + * @mpc_info: MPC reference + * + * Return + * 0 for SUCCESS, negative error value for FAILURE (errno.h) + */ +int tfo_mpcinfo_get(void *tfo, struct cfa_bld_mpcinfo **mpc_info); + +/** + * Set the session ID. + * + * @tfo: Pointer to TFC object + * @sid: The session ID + * + * Return + * 0 for SUCCESS, negative error value for FAILURE (errno.h) + */ +int tfo_sid_set(void *tfo, u16 sid); + +/** + * Get the session ID. + * + * @tfo: Pointer to TFC object + * @sid: The session ID + * + * Return + * 0 for SUCCESS, negative error value for FAILURE (errno.h) + */ +int tfo_sid_get(void *tfo, u16 *sid); + +/** + * Set the table scope instance manager. + * + * @tfo: Pointer to TFC object + * @tim: Pointer to the table scope instance manager + * + * Return + * 0 for SUCCESS, negative error value for FAILURE (errno.h) + */ +int tfo_tim_set(void *tfo, void *tim); + +/** + * Get the table scope instance manager. + * + * @tfo: Pointer to TFC object + * @tim: Pointer to a pointer to the table scope instance manager + * + * Return + * 0 for SUCCESS, negative error value for FAILURE (errno.h) + */ +int tfo_tim_get(void *tfo, void **tim); + +#endif /* _TFO_H_ */ diff --git a/drivers/thirdparty/release-drivers/mlnx/get_mlnx_info.sh b/drivers/thirdparty/release-drivers/mlnx/get_mlnx_info.sh new file mode 100755 index 000000000000..591e597072e0 --- /dev/null +++ b/drivers/thirdparty/release-drivers/mlnx/get_mlnx_info.sh @@ -0,0 +1,20 @@ +#!/bin/bash + +mlnx_version="23.10-3.2.2.0" + +mlnx_tgz_name="MLNX_OFED_LINUX-$mlnx_version-rhel9.4-x86_64.tgz" + +if [[ $1 == mlnx_url ]]; then + mlnx_url="https://content.mellanox.com/ofed/MLNX_OFED-$mlnx_version/$mlnx_tgz_name" +elif [[ $1 == mlnx_version ]]; then + echo $mlnx_version + exit 0 +elif [[ $1 == mlnx_tgz_name ]]; then + echo $mlnx_tgz_name + exit 0 +else + echo "Error: wrong parameter for release-drivers/mlnx/get_mlnx_tgz_url.sh!" + exit 1 +fi + +echo "$mlnx_url"